diff --git a/go.mod b/go.mod
index 6f6e826636..f736922030 100644
--- a/go.mod
+++ b/go.mod
@@ -5,7 +5,7 @@ go 1.24.4
require (
github.com/goodhosts/hostsfile v0.1.6
github.com/google/go-containerregistry v0.20.6
- github.com/rancher/wharfie v0.6.2
+ github.com/rancher/wharfie v0.7.0
github.com/spf13/pflag v1.0.6
github.com/stretchr/testify v1.10.0
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba
@@ -17,13 +17,18 @@ require (
require (
github.com/containerd/errdefs v1.0.0 // indirect
github.com/containerd/errdefs/pkg v0.3.0 // indirect
+ github.com/corpix/uarand v0.1.1 // indirect
+ github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/mattn/go-runewidth v0.0.16 // indirect
github.com/moby/go-archive v0.1.0 // indirect
github.com/moby/sys/atomicwriter v0.1.0 // indirect
github.com/moby/sys/userns v0.1.0 // indirect
github.com/morikuni/aec v1.0.0 // indirect
+ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/rivo/uniseg v0.2.0 // indirect
+ github.com/x448/float16 v0.8.4 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
+ sigs.k8s.io/randfill v1.0.0 // indirect
)
require (
@@ -51,10 +56,9 @@ require (
github.com/go-test/deep v1.1.0
github.com/gogo/protobuf v1.3.2 // indirect
github.com/google/go-cmp v0.7.0 // indirect
- github.com/google/gofuzz v1.2.0 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/gorilla/mux v1.8.0 // indirect
- github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/imdario/mergo v0.3.14
github.com/inconshreveable/mousetrap v1.1.0 // indirect
@@ -80,9 +84,6 @@ require (
github.com/pelletier/go-toml/v2 v2.1.0 // indirect
github.com/pkg/errors v0.9.1
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
- github.com/prometheus/client_golang v1.14.0 // indirect
- github.com/prometheus/common v0.39.0 // indirect
- github.com/prometheus/procfs v0.9.0 // indirect
github.com/sagikazarmark/locafero v0.4.0 // indirect
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
github.com/sirupsen/logrus v1.9.3
@@ -100,35 +101,34 @@ require (
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect
go.opentelemetry.io/otel v1.36.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.28.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 // indirect
go.opentelemetry.io/otel/metric v1.36.0 // indirect
go.opentelemetry.io/otel/sdk v1.36.0 // indirect
go.opentelemetry.io/otel/sdk/metric v1.36.0 // indirect
go.opentelemetry.io/otel/trace v1.36.0 // indirect
- go.opentelemetry.io/proto/otlp v1.3.1 // indirect
- go.uber.org/atomic v1.9.0 // indirect
- go.uber.org/multierr v1.9.0 // indirect
- golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect
- golang.org/x/net v0.28.0 // indirect
+ go.opentelemetry.io/proto/otlp v1.4.0 // indirect
+ go.uber.org/multierr v1.11.0 // indirect
+ golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
+ golang.org/x/net v0.41.0 // indirect
golang.org/x/oauth2 v0.30.0 // indirect
golang.org/x/sync v0.15.0
golang.org/x/sys v0.33.0 // indirect
- golang.org/x/term v0.23.0 // indirect
- golang.org/x/text v0.17.0 // indirect
- golang.org/x/time v0.5.0 // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect
- google.golang.org/grpc v1.67.0 // indirect
- google.golang.org/protobuf v1.36.3 // indirect
+ golang.org/x/term v0.32.0 // indirect
+ golang.org/x/text v0.26.0 // indirect
+ golang.org/x/time v0.9.0 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 // indirect
+ google.golang.org/grpc v1.68.1 // indirect
+ google.golang.org/protobuf v1.36.5 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v2 v2.4.0
gotest.tools v2.2.0+incompatible
- k8s.io/apimachinery v0.30.2 // indirect
- k8s.io/client-go v0.30.2
- k8s.io/klog/v2 v2.120.1 // indirect
- sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
- sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
+ k8s.io/apimachinery v0.33.0 // indirect
+ k8s.io/client-go v0.33.0
+ k8s.io/klog/v2 v2.130.1 // indirect
+ sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
+ sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect
sigs.k8s.io/yaml v1.4.0
)
diff --git a/go.sum b/go.sum
index 6d8424718f..bb5ad2a339 100644
--- a/go.sum
+++ b/go.sum
@@ -34,8 +34,7 @@ github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
github.com/containerd/stargz-snapshotter/estargz v0.16.3 h1:7evrXtoh1mSbGj/pfRccTampEyKpjpOnS3CyiV1Ebr8=
github.com/containerd/stargz-snapshotter/estargz v0.16.3/go.mod h1:uyr4BfYfOj3G9WBVE8cOlQmXAbPN9VEQpBBeJIuOipU=
-github.com/corpix/uarand v0.0.0-20170723150923-031be390f409 h1:9A+mfQmwzZ6KwUXPc8nHxFtKgn9VIvO3gXAOspIcE3s=
-github.com/corpix/uarand v0.0.0-20170723150923-031be390f409/go.mod h1:JSm890tOkDN+M1jqN8pUGDKnzJrsVbJwSMHBY4zwz7M=
+github.com/corpix/uarand v0.1.1 h1:RMr1TWc9F4n5jiPDzFHtmaUXLKLNUFK0SgCLo4BhX/U=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
@@ -82,6 +81,8 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/fvbommel/sortorder v1.1.0 h1:fUmoe+HLsBTctBDoaBwpQo5N+nrCp8g/BjKb/6ZQmYw=
github.com/fvbommel/sortorder v1.1.0/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0=
+github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
+github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
@@ -90,12 +91,10 @@ github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
-github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
-github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
+github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
-github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
-github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
+github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
github.com/go-sql-driver/mysql v1.3.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg=
@@ -114,8 +113,7 @@ github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6
github.com/goodhosts/hostsfile v0.1.6 h1:aK6DxpNV6pZ1NbdvNE2vYBMTnvIJF5O2J/8ZOlp2eMY=
github.com/goodhosts/hostsfile v0.1.6/go.mod h1:bkCocEIf3Ca0hcBustUZoWYhOgKUaIK+47m8fBjoBx8=
github.com/google/certificate-transparency-go v1.0.10-0.20180222191210-5ab67e519c93/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg=
-github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
-github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
+github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
@@ -123,15 +121,13 @@ github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX
github.com/google/go-containerregistry v0.20.6 h1:cvWX87UxxLgaH76b4hIvya6Dzz9qHB31qAwjAohdSTU=
github.com/google/go-containerregistry v0.20.6/go.mod h1:T0x8MuoAoKX/873bkeSfLD2FAkwCDf9/HZgsFJ02E2Y=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
-github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI=
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
@@ -182,8 +178,6 @@ github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6T
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-sqlite3 v1.6.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
-github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU=
github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
@@ -249,28 +243,26 @@ github.com/prometheus/client_golang v0.9.0-pre1.0.20180209125602-c332b6f63c06/go
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
-github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
-github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
+github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
+github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
-github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
+github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
-github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI=
-github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y=
+github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
+github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
-github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
-github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
-github.com/rancher/dynamiclistener v0.3.5 h1:5TaIHvkDGmZKvc96Huur16zfTKOiLhDtK4S+WV0JA6A=
-github.com/rancher/dynamiclistener v0.3.5/go.mod h1:dW/YF6/m2+uEyJ5VtEcd9THxda599HP6N9dSXk81+k0=
-github.com/rancher/wharfie v0.6.2 h1:ZTrZ0suU0abWwLLf2zaqjhwpxK8+BkbnMocnU2u1bSQ=
-github.com/rancher/wharfie v0.6.2/go.mod h1:7ii0+eehBwUEFaJMiRHWCbvN11bsfVHT1oc+P/6IBSg=
+github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
+github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
+github.com/rancher/dynamiclistener v0.3.6 h1:iAFWeiFNra6tYlt4k+jINrK3hOxZ8mjW2S/9nA6sxKs=
+github.com/rancher/wharfie v0.7.0 h1:M+OHMkE+tfafY59E5RuZ/Q4IorKNJGVqhtZRksTpOWo=
+github.com/rancher/wharfie v0.7.0/go.mod h1:wSQoRNUM58z0Qb9kmAT1L6ia2ys0LWHRH+7Vix/rkD8=
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
@@ -307,6 +299,7 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
@@ -323,6 +316,8 @@ github.com/theupdateframework/notary v0.7.0 h1:QyagRZ7wlSpjT5N2qQAh/pN+DVqgekv4D
github.com/theupdateframework/notary v0.7.0/go.mod h1:c9DRxcmhHmVLDay4/2fUYdISnHqbFDGRSlXPO0AhYWw=
github.com/vbatts/tar-split v0.12.1 h1:CqKoORW7BUWBe7UL/iqTVvkTBOF8UvOMKOIZykxnnbo=
github.com/vbatts/tar-split v0.12.1/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA=
+github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
+github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
@@ -340,10 +335,10 @@ go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg=
go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.28.0 h1:U2guen0GhqH8o/G2un8f/aG/y++OuW6MyCo6hT9prXk=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.28.0/go.mod h1:yeGZANgEcpdx/WK0IvvRFC+2oLiMS2u4L/0Rj2M2Qr0=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 h1:wpMfgF8E1rkrT1Z6meFh1NDtownE9Ii3n3X2GJYjsaU=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0/go.mod h1:wAy0T/dUbs468uOlkT31xjvqQgEVXv58BRFWEgn5v/0=
go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE=
@@ -354,14 +349,12 @@ go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFw
go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4=
go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w=
go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA=
-go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
-go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
-go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
-go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg=
+go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
-go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
-go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
+go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
+go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M=
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
@@ -371,10 +364,9 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
-golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw=
-golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
-golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g=
-golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k=
+golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM=
+golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
+golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w=
@@ -386,8 +378,7 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE=
-golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
+golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw=
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -412,14 +403,12 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
-golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU=
-golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk=
+golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc=
-golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
-golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
-golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
+golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M=
+golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
+golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
@@ -428,15 +417,15 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 h1:wKguEg1hsxI2/L3hUYrpo1RVi48K+uTyzKqprwLXsb8=
-google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
+google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q=
+google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 h1:8ZmaLZE4XWrtU3MyClkYqqtl6Oegr3235h7jxsDyqCY=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU=
google.golang.org/grpc v1.0.5/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
-google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw=
-google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA=
-google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU=
-google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
+google.golang.org/grpc v1.68.1 h1:oI5oTa11+ng8r8XMMN7jAOmWfPZWbYpCFaMUTACxkM0=
+google.golang.org/grpc v1.68.1/go.mod h1:+q1XYFJjShcqn0QZHvCyeR4CXPA+llXIeUIfIe00waw=
+google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
+google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/cenkalti/backoff.v2 v2.2.1/go.mod h1:S0QdOvT2AlerfSBkp0O+dk+bbIMaNbEmVk876gPCthU=
@@ -444,6 +433,7 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
@@ -465,21 +455,22 @@ gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q=
gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA=
-k8s.io/api v0.30.2 h1:+ZhRj+28QT4UOH+BKznu4CBgPWgkXO7XAvMcMl0qKvI=
-k8s.io/api v0.30.2/go.mod h1:ULg5g9JvOev2dG0u2hig4Z7tQ2hHIuS+m8MNZ+X6EmI=
-k8s.io/apimachinery v0.30.2 h1:fEMcnBj6qkzzPGSVsAZtQThU62SmQ4ZymlXRC5yFSCg=
-k8s.io/apimachinery v0.30.2/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc=
-k8s.io/client-go v0.30.2 h1:sBIVJdojUNPDU/jObC+18tXWcTJVcwyqS9diGdWHk50=
-k8s.io/client-go v0.30.2/go.mod h1:JglKSWULm9xlJLx4KCkfLLQ7XwtlbflV6uFFSHTMgVs=
-k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw=
-k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
-k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
-k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
+k8s.io/api v0.33.0 h1:yTgZVn1XEe6opVpP1FylmNrIFWuDqe2H0V8CT5gxfIU=
+k8s.io/apimachinery v0.33.0 h1:1a6kHrJxb2hs4t8EE5wuR/WxKDwGN1FKH3JvDtA0CIQ=
+k8s.io/apimachinery v0.33.0/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM=
+k8s.io/client-go v0.33.0 h1:UASR0sAYVUzs2kYuKn/ZakZlcs2bEHaizrrHUZg0G98=
+k8s.io/client-go v0.33.0/go.mod h1:kGkd+l/gNGg8GYWAPr0xF1rRKvVWvzh9vmZAMXtaKOg=
+k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
+k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
+k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4=
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y=
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
-sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
-sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
-sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
-sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
+sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
+sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
+sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
+sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
+sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
+sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc=
+sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
diff --git a/go.work.sum b/go.work.sum
index 7bf9162072..f331fd24d5 100644
--- a/go.work.sum
+++ b/go.work.sum
@@ -1,5 +1,6 @@
bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM=
cel.dev/expr v0.16.1/go.mod h1:AsGA5zb3WruAEQeQng1RZdGEXmBj0jvMWh6l5SnNuC8=
+cel.dev/expr v0.19.1/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw=
cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
@@ -726,6 +727,8 @@ github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMo
github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM=
github.com/akavel/rsrc v0.10.2/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c=
+github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE=
+github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
github.com/alexflint/go-filemutex v1.1.0/go.mod h1:7P4iRhttt/nUvUOrYIhcpMzv2G6CY9UnI16Z+UJqRyk=
github.com/alexflint/go-filemutex v1.2.0/go.mod h1:mYyQSWvw9Tx2/H2n9qXPb52tTYfE0pZAWcBq5mK025c=
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
@@ -760,6 +763,7 @@ github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91
github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
+github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E=
github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA=
github.com/cilium/ebpf v0.9.1/go.mod h1:+OhNOIXx/Fnu1IE8bJz2dzOA+VSfyTfdNUVdlQnxUFY=
@@ -836,6 +840,7 @@ github.com/containers/ocicrypt v1.1.10/go.mod h1:YfzSSr06PTHQwSTUKqDSjish9BeW1E4
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
github.com/coreos/go-iptables v0.6.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q=
+github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk=
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
@@ -914,7 +919,9 @@ github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ4
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.0/go.mod h1:YkVgnZu1ZjjL7xTxrfm/LLZBfkhTqSR1ydtm6jTKKwI=
github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4=
+github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo=
+github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
@@ -940,10 +947,12 @@ github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0=
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
+github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
github.com/google/cel-go v0.12.6/go.mod h1:Jk7ljRzLBhkmiAwBoUxB1sZSCVBAzkqPF25olK/iRDw=
github.com/google/flatbuffers v1.12.1/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
github.com/google/flatbuffers v23.5.26+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
+github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw=
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
github.com/google/go-containerregistry v0.13.0 h1:y1C7Z3e149OJbOPDBxLYR8ITPz8dTKqQwjErKVHJC8k=
github.com/google/go-containerregistry v0.13.0/go.mod h1:J9FQ+eSS4a1aC2GNZxvNpbWhgp0487v+cgiilB4FqDo=
@@ -957,6 +966,7 @@ github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLe
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
github.com/google/s2a-go v0.1.3/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A=
github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A=
github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw=
@@ -992,14 +1002,14 @@ github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+
github.com/googleapis/google-cloud-go-testing v0.0.0-20210719221736-1c9a4c676720/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
+github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0/go.mod h1:qmOFXW2epJhM0qSnUUYpldc7gVz2KMQwJ/QYCDIa7XU=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI=
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
github.com/hashicorp/consul/api v1.12.0/go.mod h1:6pVBMo0ebnYdt2S3H87XhekM/HHrUoTD2XXb/VrZVy0=
github.com/hashicorp/consul/api v1.18.0/go.mod h1:owRRGJ9M5xReDC5nfT8FTJrNAPbT4NM6p/k+d03q2v4=
@@ -1068,6 +1078,7 @@ github.com/klauspost/compress v1.17.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQs
github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
+github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/lestrrat-go/backoff/v2 v2.0.8/go.mod h1:rHP/q/r9aT27n24JQLa7JhSQZCKBBOiM/uP402WwN8Y=
github.com/lestrrat-go/blackmagic v1.0.0/go.mod h1:TNgH//0vYSs8VXDCfkZLgIrVTTXQELZffUV0tz3MtdQ=
github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E=
@@ -1113,6 +1124,7 @@ github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:F
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mndrix/tap-go v0.0.0-20171203230836-629fa407e90b/go.mod h1:pzzDgJWZ34fGzaAZGFW22KVZDfyrYW+QABMrWnJBnSs=
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
+github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI=
github.com/moby/sys/mount v0.3.4/go.mod h1:KcQJMbQdJHPlq5lcYT+/CjatWM4PuxKe+XLSVS4J6Os=
github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4=
@@ -1143,6 +1155,7 @@ github.com/onsi/ginkgo/v2 v2.6.1/go.mod h1:yjiuMwPokqY1XauOgju45q3sJt6VzQ/Fict1L
github.com/onsi/ginkgo/v2 v2.9.1/go.mod h1:FEcmzVcCHl+4o9bQZVab+4dC9+j+91t2FHSzmGAPfuo=
github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o=
github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM=
+github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0=
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
@@ -1154,6 +1167,7 @@ github.com/onsi/gomega v1.24.2/go.mod h1:gs3J10IS7Z7r7eXRoNJIrNqU4ToQukCJhFtKrWg
github.com/onsi/gomega v1.27.4/go.mod h1:riYq/GJKh8hhoM01HN6Vmuy93AarCXCBGpvFDK3q3fQ=
github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
github.com/onsi/gomega v1.31.0/go.mod h1:DW9aCi7U6Yi40wNVAvT6kzFnEVEI5n3DloYBiKiT6zk=
+github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
github.com/open-policy-agent/opa v0.42.2/go.mod h1:MrmoTi/BsKWT58kXlVayBb+rYVeaMwuBm3nYAN3923s=
github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
@@ -1191,11 +1205,13 @@ github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
+github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=
github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8=
+github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
@@ -1261,7 +1277,7 @@ github.com/veraison/go-cose v1.0.0-rc.1/go.mod h1:7ziE85vSq4ScFTg6wyoMXjucIGOf4J
github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
-github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
+github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU=
github.com/yashtewari/glob-intersection v0.1.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok=
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
@@ -1274,11 +1290,13 @@ go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dY
go.etcd.io/etcd/api/v3 v3.5.6/go.mod h1:KFtNaxGDw4Yx/BA4iPPwevUTAuqcsPxzyX8PHydchN8=
go.etcd.io/etcd/api/v3 v3.5.9/go.mod h1:uyAal843mC8uUVSLWz6eHa/d971iDGnCRpmKd2Z+X8k=
go.etcd.io/etcd/api/v3 v3.5.10/go.mod h1:TidfmT4Uycad3NM/o25fG3J07odo4GBB9hoxaodFCtI=
+go.etcd.io/etcd/api/v3 v3.5.21/go.mod h1:c3aH5wcvXv/9dqIw2Y810LDXJfhSYdHQ0vxmP3CCHVY=
go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
go.etcd.io/etcd/client/pkg/v3 v3.5.4/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
go.etcd.io/etcd/client/pkg/v3 v3.5.6/go.mod h1:ggrwbk069qxpKPq8/FKkQ3Xq9y39kbFR4LnKszpRXeQ=
go.etcd.io/etcd/client/pkg/v3 v3.5.9/go.mod h1:y+CzeSmkMpWN2Jyu1npecjB9BBnABxGM4pN8cGuJeL4=
go.etcd.io/etcd/client/pkg/v3 v3.5.10/go.mod h1:DYivfIviIuQ8+/lCq4vcxuseg2P2XbHygkKwFo9fc8U=
+go.etcd.io/etcd/client/pkg/v3 v3.5.21/go.mod h1:BgqT/IXPjK9NkeSDjbzwsHySX3yIle2+ndz28nVsjUs=
go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
go.etcd.io/etcd/client/v2 v2.305.4/go.mod h1:Ud+VUwIi9/uQHOMA+4ekToJ12lTxlv0zB/+DHwTGEbU=
go.etcd.io/etcd/client/v2 v2.305.6/go.mod h1:BHha8XJGe8vCIBfWBpbBLVZ4QjOIlfoouvOwydu63E0=
@@ -1289,6 +1307,7 @@ go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/
go.etcd.io/etcd/client/v3 v3.5.6/go.mod h1:f6GRinRMCsFVv9Ht42EyY7nfsVGwrNO0WEoS2pRKzQk=
go.etcd.io/etcd/client/v3 v3.5.9/go.mod h1:i/Eo5LrZ5IKqpbtpPDuaUnDOUv471oDg8cjQaUr2MbA=
go.etcd.io/etcd/client/v3 v3.5.10/go.mod h1:RVeBnDz2PUEZqTpgqwAtUd8nAPf5kjyFyND7P1VkOKc=
+go.etcd.io/etcd/client/v3 v3.5.21/go.mod h1:mFYy67IOqmbRf/kRUvsHixzo3iG+1OF2W2+jVIQRAnU=
go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE=
go.etcd.io/etcd/pkg/v3 v3.5.5/go.mod h1:6ksYFxttiUGzC2uxyqiyOEvhAiD0tuIqSZkX3TyPdaE=
go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc=
@@ -1333,8 +1352,6 @@ go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0/go.mod h1:UFG7EBM
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0/go.mod h1:hO1KLR7jcKaDDKDkvI9dP/FIhpmna5lkqPUQdEjFAM8=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.7.0/go.mod h1:ceUgdyfNv4h4gLxHR0WNfDiiVmZFodZhZSbOLhpxqXE=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.14.0/go.mod h1:HrbCVv40OOLTABmOn1ZWty6CHXkU8DK/Urc43tHug70=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0/go.mod h1:keUU7UfnwWTWpJ+FWnyqmogPa82nuU5VUANFq49hlMY=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.7.0/go.mod h1:E+/KKhwOSw8yoPxSSuUHG6vKppkvhN+S1Jc7Nib3k3o=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.14.0/go.mod h1:5w41DY6S9gZrbjuq6Y+753e96WfPha5IcsOSZTtullM=
@@ -1379,16 +1396,16 @@ go.opentelemetry.io/proto/otlp v0.16.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI
go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
go.opentelemetry.io/proto/otlp v1.1.0/go.mod h1:GpBHCBWiqvVLDqmHZsoMM3C5ySeKTC7ej/RNTae6MdY=
-go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg=
-go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/automaxprocs v1.5.1/go.mod h1:BF4eumQw0P9GtnuxxovUd06vwm1o18oMzFtK66vU6XU=
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
+go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw=
+go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
go4.org/intern v0.0.0-20230205224052-192e9f60865c h1:b8WZ7Ja8nKegYxfwDLLwT00ZKv4lXAQrw8LYPK+cHSI=
go4.org/intern v0.0.0-20230205224052-192e9f60865c/go.mod h1:RJ0SVrOMpxLhgb5noIV+09zI1RsRlMsbUcSxpWHqbrE=
go4.org/unsafe/assume-no-moving-gc v0.0.0-20230204201903-c31fa085b70e h1:AY/D6WBvaYJLmXK9VTIAX0tokDhrkkqdvIUwOU2nxio=
@@ -1422,6 +1439,7 @@ golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDf
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
golang.org/x/crypto v0.30.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
+golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=
golang.org/x/exp/typeparams v0.0.0-20221208152030-732eee02a75a/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
@@ -1438,6 +1456,7 @@ golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -1487,6 +1506,7 @@ golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0=
golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM=
golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs=
+golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw=
golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA=
golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
@@ -1523,6 +1543,7 @@ golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5H
golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8=
golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
+golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -1533,8 +1554,10 @@ golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -1609,6 +1632,8 @@ golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/telemetry v0.0.0-20240208230135-b75ee8823808/go.mod h1:KG1lNk5ZFNssSZLrpVb4sMXKMpGwGXOxSG3rnu2gZQQ=
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457/go.mod h1:pRgIJT+bRLFKnoM1ldnzKoxTIn14Yxz928LQRYYgIN0=
@@ -1677,6 +1702,8 @@ golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58
golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c=
+golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI=
+golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0=
golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI=
golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg=
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -1809,6 +1836,7 @@ google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0t
google.golang.org/genproto v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:CCviP9RmpZ1mxVr8MUjCnSiY09IbAXZxhLE6EhHIdPU=
google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:CgAqfJo+Xmu0GwA0411Ht3OU3OntXwsGmrmjI8ioGXI=
google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405/go.mod h1:3WDQMjmJk36UQhjQ89emUzb1mdaHcPeeAh4SCBKznB4=
+google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 h1:wpZ8pe2x1Q3f2KyT5f8oP/fa9rHAKgFPr/HZdNuS+PQ=
google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:J7XzRzVy1+IPwWHZUzoD0IccYZIrXILAQpc+Qy9CMhY=
google.golang.org/genproto v0.0.0-20231212172506-995d672761c0/go.mod h1:l/k7rMz0vFTBPy+tFSGvXEd3z+BcoG1k7EHbqm+YBsY=
google.golang.org/genproto v0.0.0-20240116215550-a9fa1716bcac/go.mod h1:+Rvu7ElI+aLzyDQhpHMFMMltsD6m7nqpuWDd2CwJw3k=
@@ -1838,8 +1866,6 @@ google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237/go.
google.golang.org/genproto/googleapis/api v0.0.0-20240513163218-0867130af1f8/go.mod h1:vPrPUTsDCYxXWjP7clS81mZ6/803D8K4iM9Ma27VKas=
google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I=
google.golang.org/genproto/googleapis/api v0.0.0-20241118233622-e639e219e697/go.mod h1:+D9ySVjN8nY8YCVjc5O7PZDIdZporIDY3KaGfJunh88=
-google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q=
-google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08=
google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA=
google.golang.org/genproto/googleapis/bytestream v0.0.0-20231120223509-83a465c0220f/go.mod h1:iIgEblxoG4klcXsG0d9cpoxJ4xndv6+1FkDROCHhPRI=
google.golang.org/genproto/googleapis/bytestream v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:SCz6T5xjNXM4QFPRwxHcfChp7V+9DcXR3ay2TkHR8Tg=
@@ -1867,10 +1893,9 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda/go.
google.golang.org/genproto/googleapis/rpc v0.0.0-20240513163218-0867130af1f8/go.mod h1:I7Y+G38R2bu5j1aLzfFmQfTcU/WnFuqDwLZAbvKTKpM=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
google.golang.org/genproto/googleapis/rpc v0.0.0-20241118233622-e639e219e697/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU=
google.golang.org/genproto/googleapis/rpc v0.0.0-20241206012308-a4fef0638583/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 h1:8ZmaLZE4XWrtU3MyClkYqqtl6Oegr3235h7jxsDyqCY=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU=
google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
@@ -1908,8 +1933,6 @@ google.golang.org/grpc v1.63.0/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDom
google.golang.org/grpc v1.64.1/go.mod h1:hiQF4LFZelK2WKaP6W0L92zGHtiQdZxk8CrSdvyjeP0=
google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA=
google.golang.org/grpc v1.68.0/go.mod h1:fmSPC5AsjSBCK54MyHRx48kpOti1/jRfOlwEWywNjWA=
-google.golang.org/grpc v1.68.1 h1:oI5oTa11+ng8r8XMMN7jAOmWfPZWbYpCFaMUTACxkM0=
-google.golang.org/grpc v1.68.1/go.mod h1:+q1XYFJjShcqn0QZHvCyeR4CXPA+llXIeUIfIe00waw=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
@@ -1919,6 +1942,8 @@ google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw
google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
+google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
+gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
@@ -1929,6 +1954,7 @@ k8s.io/api v0.26.1/go.mod h1:xd/GBNgR0f707+ATNyPmQ1oyKSgndzXij81FzWGsejg=
k8s.io/api v0.26.2/go.mod h1:1kjMQsFE+QHPfskEcVNgL3+Hp88B80uj0QtSOlj8itU=
k8s.io/api v0.27.1/go.mod h1:z5g/BpAiD+f6AArpqNjkY+cji8ueZDU/WV1jcj5Jk4E=
k8s.io/api v0.27.2/go.mod h1:ENmbocXfBT2ADujUXcBhHV55RIT31IIEvkntP6vZKS4=
+k8s.io/api v0.33.0/go.mod h1:CTO61ECK/KU7haa3qq8sarQ0biLq2ju405IZAd9zsiM=
k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0=
k8s.io/apimachinery v0.22.5/go.mod h1:xziclGKwuuJ2RM5/rSFQSYAj0zdbci3DH8kj+WvyN0U=
k8s.io/apimachinery v0.26.2/go.mod h1:ats7nN1LExKHvJ9TmwootT00Yz05MuYqPXEXaVeOy5I=
@@ -1954,6 +1980,7 @@ k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2R
k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4=
k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a/go.mod h1:y5VtZWM9sHHc2ZodIH/6SHzXj+TPU5USoA8lcIeKEKY=
+k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8=
k8s.io/kubelet v0.30.2/go.mod h1:DSwwTbLQmdNkebAU7ypIALR4P9aXZNFwgRmedojUE94=
k8s.io/kubernetes v1.21.0/go.mod h1:Yx6XZ8zalyqEk7but+j4+5SvLzdyH1eeqZ4cwO+5dD4=
k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
@@ -1965,6 +1992,7 @@ k8s.io/utils v0.0.0-20230202215443-34013725500c/go.mod h1:OLgZIPagt7ERELqWJFomSt
k8s.io/utils v0.0.0-20230209194617-a36077c30491/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
oras.land/oras-go v1.2.0/go.mod h1:pFNs7oHp2dYsYMSS82HaX5l4mpnGO7hbpPN6EWH2ltc=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.35/go.mod h1:WxjusMwXlKzfAs4p9km6XJRndVt2FROgMVCE4cdohFo=
diff --git a/vendor/github.com/fxamacker/cbor/v2/.gitignore b/vendor/github.com/fxamacker/cbor/v2/.gitignore
new file mode 100644
index 0000000000..f1c181ec9c
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/.gitignore
@@ -0,0 +1,12 @@
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
diff --git a/vendor/github.com/fxamacker/cbor/v2/.golangci.yml b/vendor/github.com/fxamacker/cbor/v2/.golangci.yml
new file mode 100644
index 0000000000..38cb9ae101
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/.golangci.yml
@@ -0,0 +1,104 @@
+# Do not delete linter settings. Linters like gocritic can be enabled on the command line.
+
+linters-settings:
+ depguard:
+ rules:
+ prevent_unmaintained_packages:
+ list-mode: strict
+ files:
+ - $all
+ - "!$test"
+ allow:
+ - $gostd
+ - github.com/x448/float16
+ deny:
+ - pkg: io/ioutil
+ desc: "replaced by io and os packages since Go 1.16: https://tip.golang.org/doc/go1.16#ioutil"
+ dupl:
+ threshold: 100
+ funlen:
+ lines: 100
+ statements: 50
+ goconst:
+ ignore-tests: true
+ min-len: 2
+ min-occurrences: 3
+ gocritic:
+ enabled-tags:
+ - diagnostic
+ - experimental
+ - opinionated
+ - performance
+ - style
+ disabled-checks:
+ - commentedOutCode
+ - dupImport # https://github.com/go-critic/go-critic/issues/845
+ - ifElseChain
+ - octalLiteral
+ - paramTypeCombine
+ - whyNoLint
+ gofmt:
+ simplify: false
+ goimports:
+ local-prefixes: github.com/fxamacker/cbor
+ golint:
+ min-confidence: 0
+ govet:
+ check-shadowing: true
+ lll:
+ line-length: 140
+ maligned:
+ suggest-new: true
+ misspell:
+ locale: US
+ staticcheck:
+ checks: ["all"]
+
+linters:
+ disable-all: true
+ enable:
+ - asciicheck
+ - bidichk
+ - depguard
+ - errcheck
+ - exportloopref
+ - goconst
+ - gocritic
+ - gocyclo
+ - gofmt
+ - goimports
+ - goprintffuncname
+ - gosec
+ - gosimple
+ - govet
+ - ineffassign
+ - misspell
+ - nilerr
+ - revive
+ - staticcheck
+ - stylecheck
+ - typecheck
+ - unconvert
+ - unused
+
+issues:
+ # max-issues-per-linter default is 50. Set to 0 to disable limit.
+ max-issues-per-linter: 0
+ # max-same-issues default is 3. Set to 0 to disable limit.
+ max-same-issues: 0
+
+ exclude-rules:
+ - path: decode.go
+ text: "string ` overflows ` has (\\d+) occurrences, make it a constant"
+ - path: decode.go
+ text: "string ` \\(range is \\[` has (\\d+) occurrences, make it a constant"
+ - path: decode.go
+ text: "string `, ` has (\\d+) occurrences, make it a constant"
+ - path: decode.go
+ text: "string ` overflows Go's int64` has (\\d+) occurrences, make it a constant"
+ - path: decode.go
+ text: "string `\\]\\)` has (\\d+) occurrences, make it a constant"
+ - path: valid.go
+ text: "string ` for type ` has (\\d+) occurrences, make it a constant"
+ - path: valid.go
+ text: "string `cbor: ` has (\\d+) occurrences, make it a constant"
diff --git a/vendor/github.com/fxamacker/cbor/v2/CODE_OF_CONDUCT.md b/vendor/github.com/fxamacker/cbor/v2/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..c794b2b0c6
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/CODE_OF_CONDUCT.md
@@ -0,0 +1,133 @@
+
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+We as members, contributors, and leaders pledge to make participation in our
+community a harassment-free experience for everyone, regardless of age, body
+size, visible or invisible disability, ethnicity, sex characteristics, gender
+identity and expression, level of experience, education, socio-economic status,
+nationality, personal appearance, race, caste, color, religion, or sexual
+identity and orientation.
+
+We pledge to act and interact in ways that contribute to an open, welcoming,
+diverse, inclusive, and healthy community.
+
+## Our Standards
+
+Examples of behavior that contributes to a positive environment for our
+community include:
+
+* Demonstrating empathy and kindness toward other people
+* Being respectful of differing opinions, viewpoints, and experiences
+* Giving and gracefully accepting constructive feedback
+* Accepting responsibility and apologizing to those affected by our mistakes,
+ and learning from the experience
+* Focusing on what is best not just for us as individuals, but for the overall
+ community
+
+Examples of unacceptable behavior include:
+
+* The use of sexualized language or imagery, and sexual attention or advances of
+ any kind
+* Trolling, insulting or derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or email address,
+ without their explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Enforcement Responsibilities
+
+Community leaders are responsible for clarifying and enforcing our standards of
+acceptable behavior and will take appropriate and fair corrective action in
+response to any behavior that they deem inappropriate, threatening, offensive,
+or harmful.
+
+Community leaders have the right and responsibility to remove, edit, or reject
+comments, commits, code, wiki edits, issues, and other contributions that are
+not aligned to this Code of Conduct, and will communicate reasons for moderation
+decisions when appropriate.
+
+## Scope
+
+This Code of Conduct applies within all community spaces, and also applies when
+an individual is officially representing the community in public spaces.
+Examples of representing our community include using an official e-mail address,
+posting via an official social media account, or acting as an appointed
+representative at an online or offline event.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported to the community leaders responsible for enforcement at
+faye.github@gmail.com.
+All complaints will be reviewed and investigated promptly and fairly.
+
+All community leaders are obligated to respect the privacy and security of the
+reporter of any incident.
+
+## Enforcement Guidelines
+
+Community leaders will follow these Community Impact Guidelines in determining
+the consequences for any action they deem in violation of this Code of Conduct:
+
+### 1. Correction
+
+**Community Impact**: Use of inappropriate language or other behavior deemed
+unprofessional or unwelcome in the community.
+
+**Consequence**: A private, written warning from community leaders, providing
+clarity around the nature of the violation and an explanation of why the
+behavior was inappropriate. A public apology may be requested.
+
+### 2. Warning
+
+**Community Impact**: A violation through a single incident or series of
+actions.
+
+**Consequence**: A warning with consequences for continued behavior. No
+interaction with the people involved, including unsolicited interaction with
+those enforcing the Code of Conduct, for a specified period of time. This
+includes avoiding interactions in community spaces as well as external channels
+like social media. Violating these terms may lead to a temporary or permanent
+ban.
+
+### 3. Temporary Ban
+
+**Community Impact**: A serious violation of community standards, including
+sustained inappropriate behavior.
+
+**Consequence**: A temporary ban from any sort of interaction or public
+communication with the community for a specified period of time. No public or
+private interaction with the people involved, including unsolicited interaction
+with those enforcing the Code of Conduct, is allowed during this period.
+Violating these terms may lead to a permanent ban.
+
+### 4. Permanent Ban
+
+**Community Impact**: Demonstrating a pattern of violation of community
+standards, including sustained inappropriate behavior, harassment of an
+individual, or aggression toward or disparagement of classes of individuals.
+
+**Consequence**: A permanent ban from any sort of public interaction within the
+community.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage],
+version 2.1, available at
+[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
+
+Community Impact Guidelines were inspired by
+[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
+
+For answers to common questions about this code of conduct, see the FAQ at
+[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
+[https://www.contributor-covenant.org/translations][translations].
+
+[homepage]: https://www.contributor-covenant.org
+[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
+[Mozilla CoC]: https://github.com/mozilla/diversity
+[FAQ]: https://www.contributor-covenant.org/faq
+[translations]: https://www.contributor-covenant.org/translations
diff --git a/vendor/github.com/fxamacker/cbor/v2/CONTRIBUTING.md b/vendor/github.com/fxamacker/cbor/v2/CONTRIBUTING.md
new file mode 100644
index 0000000000..de0965e12d
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/CONTRIBUTING.md
@@ -0,0 +1,41 @@
+# How to contribute
+
+You can contribute by using the library, opening issues, or opening pull requests.
+
+## Bug reports and security vulnerabilities
+
+Most issues are tracked publicly on [GitHub](https://github.com/fxamacker/cbor/issues).
+
+To report security vulnerabilities, please email faye.github@gmail.com and allow time for the problem to be resolved before disclosing it to the public. For more info, see [Security Policy](https://github.com/fxamacker/cbor#security-policy).
+
+Please do not send data that might contain personally identifiable information, even if you think you have permission. That type of support requires payment and a signed contract where I'm indemnified, held harmless, and defended by you for any data you send to me.
+
+## Pull requests
+
+Please [create an issue](https://github.com/fxamacker/cbor/issues/new/choose) before you begin work on a PR. The improvement may have already been considered, etc.
+
+Pull requests have signing requirements and must not be anonymous. Exceptions are usually made for docs and CI scripts.
+
+See the [Pull Request Template](https://github.com/fxamacker/cbor/blob/master/.github/pull_request_template.md) for details.
+
+Pull requests have a greater chance of being approved if:
+- it does not reduce speed, increase memory use, reduce security, etc. for people not using the new option or feature.
+- it has > 97% code coverage.
+
+## Describe your issue
+
+Clearly describe the issue:
+* If it's a bug, please provide: **version of this library** and **Go** (`go version`), **unmodified error message**, and describe **how to reproduce it**. Also state **what you expected to happen** instead of the error.
+* If you propose a change or addition, try to give an example how the improved code could look like or how to use it.
+* If you found a compilation error, please confirm you're using a supported version of Go. If you are, then provide the output of `go version` first, followed by the complete error message.
+
+## Please don't
+
+Please don't send data containing personally identifiable information, even if you think you have permission. That type of support requires payment and a contract where I'm indemnified, held harmless, and defended for any data you send to me.
+
+Please don't send CBOR data larger than 1024 bytes by email. If you want to send crash-producing CBOR data > 1024 bytes by email, please get my permission before sending it to me.
+
+## Credits
+
+- This guide used nlohmann/json contribution guidelines for inspiration as suggested in issue #22.
+- Special thanks to @lukseven for pointing out the contribution guidelines didn't mention signing requirements.
diff --git a/vendor/go.uber.org/atomic/LICENSE.txt b/vendor/github.com/fxamacker/cbor/v2/LICENSE
similarity index 87%
rename from vendor/go.uber.org/atomic/LICENSE.txt
rename to vendor/github.com/fxamacker/cbor/v2/LICENSE
index 8765c9fbc6..eaa8504921 100644
--- a/vendor/go.uber.org/atomic/LICENSE.txt
+++ b/vendor/github.com/fxamacker/cbor/v2/LICENSE
@@ -1,4 +1,6 @@
-Copyright (c) 2016 Uber Technologies, Inc.
+MIT License
+
+Copyright (c) 2019-present Faye Amacker
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
@@ -7,13 +9,13 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/fxamacker/cbor/v2/README.md b/vendor/github.com/fxamacker/cbor/v2/README.md
new file mode 100644
index 0000000000..af0a79507e
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/README.md
@@ -0,0 +1,691 @@
+# CBOR Codec in Go
+
+
+
+[fxamacker/cbor](https://github.com/fxamacker/cbor) is a library for encoding and decoding [CBOR](https://www.rfc-editor.org/info/std94) and [CBOR Sequences](https://www.rfc-editor.org/rfc/rfc8742.html).
+
+CBOR is a [trusted alternative](https://www.rfc-editor.org/rfc/rfc8949.html#name-comparison-of-other-binary-) to JSON, MessagePack, Protocol Buffers, etc. CBOR is an Internet Standard defined by [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94) and is designed to be relevant for decades.
+
+`fxamacker/cbor` is used in projects by Arm Ltd., Cisco, EdgeX Foundry, Flow Foundation, Fraunhofer‑AISEC, Kubernetes, Let's Encrypt (ISRG), Linux Foundation, Microsoft, Mozilla, Oasis Protocol, Tailscale, Teleport, [etc](https://github.com/fxamacker/cbor#who-uses-fxamackercbor).
+
+See [Quick Start](#quick-start) and [Releases](https://github.com/fxamacker/cbor/releases/). 🆕 `UnmarshalFirst` and `DiagnoseFirst` can decode CBOR Sequences. `cbor.MarshalToBuffer()` and `UserBufferEncMode` accepts user-specified buffer.
+
+## fxamacker/cbor
+
+[](https://github.com/fxamacker/cbor/actions?query=workflow%3Aci)
+[](https://github.com/fxamacker/cbor/actions?query=workflow%3A%22cover+%E2%89%A596%25%22)
+[](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml)
+[](#fuzzing-and-code-coverage)
+[](https://goreportcard.com/report/github.com/fxamacker/cbor)
+
+`fxamacker/cbor` is a CBOR codec in full conformance with [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94). It also supports CBOR Sequences ([RFC 8742](https://www.rfc-editor.org/rfc/rfc8742.html)) and Extended Diagnostic Notation ([Appendix G of RFC 8610](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G)).
+
+Features include full support for CBOR tags, [Core Deterministic Encoding](https://www.rfc-editor.org/rfc/rfc8949.html#name-core-deterministic-encoding), duplicate map key detection, etc.
+
+Design balances trade-offs between security, speed, concurrency, encoded data size, usability, etc.
+
+Highlights
+
+__🚀 Speed__
+
+Encoding and decoding is fast without using Go's `unsafe` package. Slower settings are opt-in. Default limits allow very fast and memory efficient rejection of malformed CBOR data.
+
+__🔒 Security__
+
+Decoder has configurable limits that defend against malicious inputs. Duplicate map key detection is supported. By contrast, `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security).
+
+Codec passed multiple confidential security assessments in 2022. No vulnerabilities found in subset of codec in a [nonconfidential security assessment](https://github.com/veraison/go-cose/blob/v1.0.0-rc.1/reports/NCC_Microsoft-go-cose-Report_2022-05-26_v1.0.pdf) prepared by NCC Group for Microsoft Corporation.
+
+__🗜️ Data Size__
+
+Struct tags (`toarray`, `keyasint`, `omitempty`) automatically reduce size of encoded structs. Encoding optionally shrinks float64→32→16 when values fit.
+
+__:jigsaw: Usability__
+
+API is mostly same as `encoding/json` plus interfaces that simplify concurrency for CBOR options. Encoding and decoding modes can be created at startup and reused by any goroutines.
+
+Presets include Core Deterministic Encoding, Preferred Serialization, CTAP2 Canonical CBOR, etc.
+
+__📆 Extensibility__
+
+Features include CBOR [extension points](https://www.rfc-editor.org/rfc/rfc8949.html#section-7.1) (e.g. CBOR tags) and extensive settings. API has interfaces that allow users to create custom encoding and decoding without modifying this library.
+
+
+
+
+
+### Secure Decoding with Configurable Settings
+
+`fxamacker/cbor` has configurable limits, etc. that defend against malicious CBOR data.
+
+By contrast, `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security).
+
+Example decoding with encoding/gob 💥 fatal error (out of memory)
+
+```Go
+// Example of encoding/gob having "fatal error: runtime: out of memory"
+// while decoding 181 bytes.
+package main
+import (
+ "bytes"
+ "encoding/gob"
+ "encoding/hex"
+ "fmt"
+)
+
+// Example data is from https://github.com/golang/go/issues/24446
+// (shortened to 181 bytes).
+const data = "4dffb503010102303001ff30000109010130010800010130010800010130" +
+ "01ffb80001014a01ffb60001014b01ff860001013001ff860001013001ff" +
+ "860001013001ff860001013001ffb80000001eff850401010e3030303030" +
+ "30303030303030303001ff3000010c0104000016ffb70201010830303030" +
+ "3030303001ff3000010c000030ffb6040405fcff00303030303030303030" +
+ "303030303030303030303030303030303030303030303030303030303030" +
+ "30"
+
+type X struct {
+ J *X
+ K map[string]int
+}
+
+func main() {
+ raw, _ := hex.DecodeString(data)
+ decoder := gob.NewDecoder(bytes.NewReader(raw))
+
+ var x X
+ decoder.Decode(&x) // fatal error: runtime: out of memory
+ fmt.Println("Decoding finished.")
+}
+```
+
+
+
+
+
+`fxamacker/cbor` is fast at rejecting malformed CBOR data. E.g. attempts to
+decode 10 bytes of malicious CBOR data to `[]byte` (with default settings):
+
+| Codec | Speed (ns/op) | Memory | Allocs |
+| :---- | ------------: | -----: | -----: |
+| fxamacker/cbor 2.5.0 | 44 ± 5% | 32 B/op | 2 allocs/op |
+| ugorji/go 1.2.11 | 5353261 ± 4% | 67111321 B/op | 13 allocs/op |
+
+Benchmark details
+
+Latest comparison used:
+- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}`
+- go1.19.10, linux/amd64, i5-13600K (disabled all e-cores, DDR4 @2933)
+- go test -bench=. -benchmem -count=20
+
+#### Prior comparisons
+
+| Codec | Speed (ns/op) | Memory | Allocs |
+| :---- | ------------: | -----: | -----: |
+| fxamacker/cbor 2.5.0-beta2 | 44.33 ± 2% | 32 B/op | 2 allocs/op |
+| fxamacker/cbor 0.1.0 - 2.4.0 | ~44.68 ± 6% | 32 B/op | 2 allocs/op |
+| ugorji/go 1.2.10 | 5524792.50 ± 3% | 67110491 B/op | 12 allocs/op |
+| ugorji/go 1.1.0 - 1.2.6 | 💥 runtime: | out of memory: | cannot allocate |
+
+- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}`
+- go1.19.6, linux/amd64, i5-13600K (DDR4)
+- go test -bench=. -benchmem -count=20
+
+
+
+
+
+### Smaller Encodings with Struct Tags
+
+Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs.
+
+Example encoding 3-level nested Go struct to 1 byte CBOR
+
+https://go.dev/play/p/YxwvfPdFQG2
+
+```Go
+// Example encoding nested struct (with omitempty tag)
+// - encoding/json: 18 byte JSON
+// - fxamacker/cbor: 1 byte CBOR
+package main
+
+import (
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+
+ "github.com/fxamacker/cbor/v2"
+)
+
+type GrandChild struct {
+ Quux int `json:",omitempty"`
+}
+
+type Child struct {
+ Baz int `json:",omitempty"`
+ Qux GrandChild `json:",omitempty"`
+}
+
+type Parent struct {
+ Foo Child `json:",omitempty"`
+ Bar int `json:",omitempty"`
+}
+
+func cb() {
+ results, _ := cbor.Marshal(Parent{})
+ fmt.Println("hex(CBOR): " + hex.EncodeToString(results))
+
+ text, _ := cbor.Diagnose(results) // Diagnostic Notation
+ fmt.Println("DN: " + text)
+}
+
+func js() {
+ results, _ := json.Marshal(Parent{})
+ fmt.Println("hex(JSON): " + hex.EncodeToString(results))
+
+ text := string(results) // JSON
+ fmt.Println("JSON: " + text)
+}
+
+func main() {
+ cb()
+ fmt.Println("-------------")
+ js()
+}
+```
+
+Output (DN is Diagnostic Notation):
+```
+hex(CBOR): a0
+DN: {}
+-------------
+hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d
+JSON: {"Foo":{"Qux":{}}}
+```
+
+
+
+
+
+Example using different struct tags together:
+
+
+
+API is mostly same as `encoding/json`, plus interfaces that simplify concurrency for CBOR options.
+
+## Quick Start
+
+__Install__: `go get github.com/fxamacker/cbor/v2` and `import "github.com/fxamacker/cbor/v2"`.
+
+### Key Points
+
+This library can encode and decode CBOR (RFC 8949) and CBOR Sequences (RFC 8742).
+
+- __CBOR data item__ is a single piece of CBOR data and its structure may contain 0 or more nested data items.
+- __CBOR sequence__ is a concatenation of 0 or more encoded CBOR data items.
+
+Configurable limits and options can be used to balance trade-offs.
+
+- Encoding and decoding modes are created from options (settings).
+- Modes can be created at startup and reused.
+- Modes are safe for concurrent use.
+
+### Default Mode
+
+Package level functions only use this library's default settings.
+They provide the "default mode" of encoding and decoding.
+
+```go
+// API matches encoding/json for Marshal, Unmarshal, Encode, Decode, etc.
+b, err = cbor.Marshal(v) // encode v to []byte b
+err = cbor.Unmarshal(b, &v) // decode []byte b to v
+decoder = cbor.NewDecoder(r) // create decoder with io.Reader r
+err = decoder.Decode(&v) // decode a CBOR data item to v
+
+// v2.7.0 added MarshalToBuffer() and UserBufferEncMode interface.
+err = cbor.MarshalToBuffer(v, b) // encode v to b instead of using built-in buf pool.
+
+// v2.5.0 added new functions that return remaining bytes.
+
+// UnmarshalFirst decodes first CBOR data item and returns remaining bytes.
+rest, err = cbor.UnmarshalFirst(b, &v) // decode []byte b to v
+
+// DiagnoseFirst translates first CBOR data item to text and returns remaining bytes.
+text, rest, err = cbor.DiagnoseFirst(b) // decode []byte b to Diagnostic Notation text
+
+// NOTE: Unmarshal returns ExtraneousDataError if there are remaining bytes,
+// but new funcs UnmarshalFirst and DiagnoseFirst do not.
+```
+
+__IMPORTANT__: 👉 CBOR settings allow trade-offs between speed, security, encoding size, etc.
+
+- Different CBOR libraries may use different default settings.
+- CBOR-based formats or protocols usually require specific settings.
+
+For example, WebAuthn uses "CTAP2 Canonical CBOR" which is available as a preset.
+
+### Presets
+
+Presets can be used as-is or as a starting point for custom settings.
+
+```go
+// EncOptions is a struct of encoder settings.
+func CoreDetEncOptions() EncOptions // RFC 8949 Core Deterministic Encoding
+func PreferredUnsortedEncOptions() EncOptions // RFC 8949 Preferred Serialization
+func CanonicalEncOptions() EncOptions // RFC 7049 Canonical CBOR
+func CTAP2EncOptions() EncOptions // FIDO2 CTAP2 Canonical CBOR
+```
+
+Presets are used to create custom modes.
+
+### Custom Modes
+
+Modes are created from settings. Once created, modes have immutable settings.
+
+💡 Create the mode at startup and reuse it. It is safe for concurrent use.
+
+```Go
+// Create encoding mode.
+opts := cbor.CoreDetEncOptions() // use preset options as a starting point
+opts.Time = cbor.TimeUnix // change any settings if needed
+em, err := opts.EncMode() // create an immutable encoding mode
+
+// Reuse the encoding mode. It is safe for concurrent use.
+
+// API matches encoding/json.
+b, err := em.Marshal(v) // encode v to []byte b
+encoder := em.NewEncoder(w) // create encoder with io.Writer w
+err := encoder.Encode(v) // encode v to io.Writer w
+```
+
+Default mode and custom modes automatically apply struct tags.
+
+### User Specified Buffer for Encoding (v2.7.0)
+
+`UserBufferEncMode` interface extends `EncMode` interface to add `MarshalToBuffer()`. It accepts a user-specified buffer instead of using built-in buffer pool.
+
+```Go
+em, err := myEncOptions.UserBufferEncMode() // create UserBufferEncMode mode
+
+var buf bytes.Buffer
+err = em.MarshalToBuffer(v, &buf) // encode v to provided buf
+```
+
+### Struct Tags
+
+Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs.
+
+Example encoding 3-level nested Go struct to 1 byte CBOR
+
+https://go.dev/play/p/YxwvfPdFQG2
+
+```Go
+// Example encoding nested struct (with omitempty tag)
+// - encoding/json: 18 byte JSON
+// - fxamacker/cbor: 1 byte CBOR
+package main
+
+import (
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+
+ "github.com/fxamacker/cbor/v2"
+)
+
+type GrandChild struct {
+ Quux int `json:",omitempty"`
+}
+
+type Child struct {
+ Baz int `json:",omitempty"`
+ Qux GrandChild `json:",omitempty"`
+}
+
+type Parent struct {
+ Foo Child `json:",omitempty"`
+ Bar int `json:",omitempty"`
+}
+
+func cb() {
+ results, _ := cbor.Marshal(Parent{})
+ fmt.Println("hex(CBOR): " + hex.EncodeToString(results))
+
+ text, _ := cbor.Diagnose(results) // Diagnostic Notation
+ fmt.Println("DN: " + text)
+}
+
+func js() {
+ results, _ := json.Marshal(Parent{})
+ fmt.Println("hex(JSON): " + hex.EncodeToString(results))
+
+ text := string(results) // JSON
+ fmt.Println("JSON: " + text)
+}
+
+func main() {
+ cb()
+ fmt.Println("-------------")
+ js()
+}
+```
+
+Output (DN is Diagnostic Notation):
+```
+hex(CBOR): a0
+DN: {}
+-------------
+hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d
+JSON: {"Foo":{"Qux":{}}}
+```
+
+
+
+
+
+Example using several struct tags
+
+
+
+
+
+Struct tags simplify use of CBOR-based protocols that require CBOR arrays or maps with integer keys.
+
+### CBOR Tags
+
+CBOR tags are specified in a `TagSet`.
+
+Custom modes can be created with a `TagSet` to handle CBOR tags.
+
+```go
+em, err := opts.EncMode() // no CBOR tags
+em, err := opts.EncModeWithTags(ts) // immutable CBOR tags
+em, err := opts.EncModeWithSharedTags(ts) // mutable shared CBOR tags
+```
+
+`TagSet` and modes using it are safe for concurrent use. Equivalent API is available for `DecMode`.
+
+Example using TagSet and TagOptions
+
+```go
+// Use signedCWT struct defined in "Decoding CWT" example.
+
+// Create TagSet (safe for concurrency).
+tags := cbor.NewTagSet()
+// Register tag COSE_Sign1 18 with signedCWT type.
+tags.Add(
+ cbor.TagOptions{EncTag: cbor.EncTagRequired, DecTag: cbor.DecTagRequired},
+ reflect.TypeOf(signedCWT{}),
+ 18)
+
+// Create DecMode with immutable tags.
+dm, _ := cbor.DecOptions{}.DecModeWithTags(tags)
+
+// Unmarshal to signedCWT with tag support.
+var v signedCWT
+if err := dm.Unmarshal(data, &v); err != nil {
+ return err
+}
+
+// Create EncMode with immutable tags.
+em, _ := cbor.EncOptions{}.EncModeWithTags(tags)
+
+// Marshal signedCWT with tag number.
+if data, err := cbor.Marshal(v); err != nil {
+ return err
+}
+```
+
+
+
+### Functions and Interfaces
+
+Functions and interfaces at a glance
+
+Common functions with same API as `encoding/json`:
+- `Marshal`, `Unmarshal`
+- `NewEncoder`, `(*Encoder).Encode`
+- `NewDecoder`, `(*Decoder).Decode`
+
+NOTE: `Unmarshal` will return `ExtraneousDataError` if there are remaining bytes
+because RFC 8949 treats CBOR data item with remaining bytes as malformed.
+- 💡 Use `UnmarshalFirst` to decode first CBOR data item and return any remaining bytes.
+
+Other useful functions:
+- `Diagnose`, `DiagnoseFirst` produce human-readable [Extended Diagnostic Notation](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G) from CBOR data.
+- `UnmarshalFirst` decodes first CBOR data item and return any remaining bytes.
+- `Wellformed` returns true if the the CBOR data item is well-formed.
+
+Interfaces identical or comparable to Go `encoding` packages include:
+`Marshaler`, `Unmarshaler`, `BinaryMarshaler`, and `BinaryUnmarshaler`.
+
+The `RawMessage` type can be used to delay CBOR decoding or precompute CBOR encoding.
+
+
+
+### Security Tips
+
+🔒 Use Go's `io.LimitReader` to limit size when decoding very large or indefinite size data.
+
+Default limits may need to be increased for systems handling very large data (e.g. blockchains).
+
+`DecOptions` can be used to modify default limits for `MaxArrayElements`, `MaxMapPairs`, and `MaxNestedLevels`.
+
+## Status
+
+v2.7.0 (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc. It passed fuzz tests (5+ billion executions) and is production quality.
+
+For more details, see [release notes](https://github.com/fxamacker/cbor/releases).
+
+### Prior Release
+
+[v2.6.0](https://github.com/fxamacker/cbor/releases/tag/v2.6.0) (February 2024) adds important new features, optimizations, and bug fixes. It is especially useful to systems that need to convert data between CBOR and JSON. New options and optimizations improve handling of bignum, integers, maps, and strings.
+
+v2.5.0 was released on Sunday, August 13, 2023 with new features and important bug fixes. It is fuzz tested and production quality after extended beta [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023).
+
+__IMPORTANT__: 👉 Before upgrading from v2.4 or older release, please read the notable changes highlighted in the release notes. v2.5.0 is a large release with bug fixes to error handling for extraneous data in `Unmarshal`, etc. that should be reviewed before upgrading.
+
+See [v2.5.0 release notes](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) for list of new features, improvements, and bug fixes.
+
+See ["Version and API Changes"](https://github.com/fxamacker/cbor#versions-and-api-changes) section for more info about version numbering, etc.
+
+
+
+## Who uses fxamacker/cbor
+
+`fxamacker/cbor` is used in projects by Arm Ltd., Berlin Institute of Health at Charité, Chainlink, Cisco, Confidential Computing Consortium, ConsenSys, Dapper Labs, EdgeX Foundry, F5, FIDO Alliance, Fraunhofer‑AISEC, Kubernetes, Let's Encrypt (ISRG), Linux Foundation, Matrix.org, Microsoft, Mozilla, National Cybersecurity Agency of France (govt), Netherlands (govt), Oasis Protocol, Smallstep, Tailscale, Taurus SA, Teleport, TIBCO, and others.
+
+`fxamacker/cbor` passed multiple confidential security assessments. A [nonconfidential security assessment](https://github.com/veraison/go-cose/blob/v1.0.0-rc.1/reports/NCC_Microsoft-go-cose-Report_2022-05-26_v1.0.pdf) (prepared by NCC Group for Microsoft Corporation) includes a subset of fxamacker/cbor v2.4.0 in its scope.
+
+## Standards
+
+`fxamacker/cbor` is a CBOR codec in full conformance with [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94). It also supports CBOR Sequences ([RFC 8742](https://www.rfc-editor.org/rfc/rfc8742.html)) and Extended Diagnostic Notation ([Appendix G of RFC 8610](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G)).
+
+Notable CBOR features include:
+
+| CBOR Feature | Description |
+| :--- | :--- |
+| CBOR tags | API supports built-in and user-defined tags. |
+| Preferred serialization | Integers encode to fewest bytes. Optional float64 → float32 → float16. |
+| Map key sorting | Unsorted, length-first (Canonical CBOR), and bytewise-lexicographic (CTAP2). |
+| Duplicate map keys | Always forbid for encoding and option to allow/forbid for decoding. |
+| Indefinite length data | Option to allow/forbid for encoding and decoding. |
+| Well-formedness | Always checked and enforced. |
+| Basic validity checks | Optionally check UTF-8 validity and duplicate map keys. |
+| Security considerations | Prevent integer overflow and resource exhaustion (RFC 8949 Section 10). |
+
+Known limitations are noted in the [Limitations section](#limitations).
+
+Go nil values for slices, maps, pointers, etc. are encoded as CBOR null. Empty slices, maps, etc. are encoded as empty CBOR arrays and maps.
+
+Decoder checks for all required well-formedness errors, including all "subkinds" of syntax errors and too little data.
+
+After well-formedness is verified, basic validity errors are handled as follows:
+
+* Invalid UTF-8 string: Decoder has option to check and return invalid UTF-8 string error. This check is enabled by default.
+* Duplicate keys in a map: Decoder has options to ignore or enforce rejection of duplicate map keys.
+
+When decoding well-formed CBOR arrays and maps, decoder saves the first error it encounters and continues with the next item. Options to handle this differently may be added in the future.
+
+By default, decoder treats time values of floating-point NaN and Infinity as if they are CBOR Null or CBOR Undefined.
+
+__Click to expand topic:__
+
+
+ Duplicate Map Keys
+
+This library provides options for fast detection and rejection of duplicate map keys based on applying a Go-specific data model to CBOR's extended generic data model in order to determine duplicate vs distinct map keys. Detection relies on whether the CBOR map key would be a duplicate "key" when decoded and applied to the user-provided Go map or struct.
+
+`DupMapKeyQuiet` turns off detection of duplicate map keys. It tries to use a "keep fastest" method by choosing either "keep first" or "keep last" depending on the Go data type.
+
+`DupMapKeyEnforcedAPF` enforces detection and rejection of duplidate map keys. Decoding stops immediately and returns `DupMapKeyError` when the first duplicate key is detected. The error includes the duplicate map key and the index number.
+
+APF suffix means "Allow Partial Fill" so the destination map or struct can contain some decoded values at the time of error. It is the caller's responsibility to respond to the `DupMapKeyError` by discarding the partially filled result if that's required by their protocol.
+
+
+
+
+ Tag Validity
+
+This library checks tag validity for built-in tags (currently tag numbers 0, 1, 2, 3, and 55799):
+
+* Inadmissible type for tag content
+* Inadmissible value for tag content
+
+Unknown tag data items (not tag number 0, 1, 2, 3, or 55799) are handled in two ways:
+
+* When decoding into an empty interface, unknown tag data item will be decoded into `cbor.Tag` data type, which contains tag number and tag content. The tag content will be decoded into the default Go data type for the CBOR data type.
+* When decoding into other Go types, unknown tag data item is decoded into the specified Go type. If Go type is registered with a tag number, the tag number can optionally be verified.
+
+Decoder also has an option to forbid tag data items (treat any tag data item as error) which is specified by protocols such as CTAP2 Canonical CBOR.
+
+For more information, see [decoding options](#decoding-options-1) and [tag options](#tag-options).
+
+
+
+## Limitations
+
+If any of these limitations prevent you from using this library, please open an issue along with a link to your project.
+
+* CBOR `Undefined` (0xf7) value decodes to Go's `nil` value. CBOR `Null` (0xf6) more closely matches Go's `nil`.
+* CBOR map keys with data types not supported by Go for map keys are ignored and an error is returned after continuing to decode remaining items.
+* When decoding registered CBOR tag data to interface type, decoder creates a pointer to registered Go type matching CBOR tag number. Requiring a pointer for this is a Go limitation.
+
+## Fuzzing and Code Coverage
+
+__Code coverage__ is always 95% or higher (with `go test -cover`) when tagging a release.
+
+__Coverage-guided fuzzing__ must pass billions of execs using before tagging a release. Fuzzing is done using nonpublic code which may eventually get merged into this project. Until then, reports like OpenSSF Scorecard can't detect fuzz tests being used by this project.
+
+
+
+## Versions and API Changes
+This project uses [Semantic Versioning](https://semver.org), so the API is always backwards compatible unless the major version number changes.
+
+These functions have signatures identical to encoding/json and their API will continue to match `encoding/json` even after major new releases:
+`Marshal`, `Unmarshal`, `NewEncoder`, `NewDecoder`, `(*Encoder).Encode`, and `(*Decoder).Decode`.
+
+Exclusions from SemVer:
+- Newly added API documented as "subject to change".
+- Newly added API in the master branch that has never been tagged in non-beta release.
+- If function parameters are unchanged, bug fixes that change behavior (e.g. return error for edge case was missed in prior version). We try to highlight these in the release notes and add extended beta period. E.g. [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023).
+
+This project avoids breaking changes to behavior of encoding and decoding functions unless required to improve conformance with supported RFCs (e.g. RFC 8949, RFC 8742, etc.) Visible changes that don't improve conformance to standards are typically made available as new opt-in settings or new functions.
+
+## Code of Conduct
+
+This project has adopted the [Contributor Covenant Code of Conduct](CODE_OF_CONDUCT.md). Contact [faye.github@gmail.com](mailto:faye.github@gmail.com) with any questions or comments.
+
+## Contributing
+
+Please open an issue before beginning work on a PR. The improvement may have already been considered, etc.
+
+For more info, see [How to Contribute](CONTRIBUTING.md).
+
+## Security Policy
+
+Security fixes are provided for the latest released version of fxamacker/cbor.
+
+For the full text of the Security Policy, see [SECURITY.md](SECURITY.md).
+
+## Acknowledgements
+
+Many thanks to all the contributors on this project!
+
+I'm especially grateful to Bastian Müller and Dieter Shirley for suggesting and collaborating on CBOR stream mode, and much more.
+
+I'm very grateful to Stefan Tatschner, Yawning Angel, Jernej Kos, x448, ZenGround0, and Jakob Borg for their contributions or support in the very early days.
+
+Big thanks to Ben Luddy for his contributions in v2.6.0 and v2.7.0.
+
+This library clearly wouldn't be possible without Carsten Bormann authoring CBOR RFCs.
+
+Special thanks to Laurence Lundblade and Jeffrey Yasskin for their help on IETF mailing list or at [7049bis](https://github.com/cbor-wg/CBORbis).
+
+Huge thanks to The Go Authors for creating a fun and practical programming language with batteries included!
+
+This library uses `x448/float16` which used to be included. As a standalone package, `x448/float16` is useful to other projects as well.
+
+## License
+
+Copyright © 2019-2024 [Faye Amacker](https://github.com/fxamacker).
+
+fxamacker/cbor is licensed under the MIT License. See [LICENSE](LICENSE) for the full license text.
+
+
diff --git a/vendor/github.com/fxamacker/cbor/v2/SECURITY.md b/vendor/github.com/fxamacker/cbor/v2/SECURITY.md
new file mode 100644
index 0000000000..9c05146d16
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/SECURITY.md
@@ -0,0 +1,7 @@
+# Security Policy
+
+Security fixes are provided for the latest released version of fxamacker/cbor.
+
+If the security vulnerability is already known to the public, then you can open an issue as a bug report.
+
+To report security vulnerabilities not yet known to the public, please email faye.github@gmail.com and allow time for the problem to be resolved before reporting it to the public.
diff --git a/vendor/github.com/fxamacker/cbor/v2/bytestring.go b/vendor/github.com/fxamacker/cbor/v2/bytestring.go
new file mode 100644
index 0000000000..823bff12ce
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/bytestring.go
@@ -0,0 +1,63 @@
+// Copyright (c) Faye Amacker. All rights reserved.
+// Licensed under the MIT License. See LICENSE in the project root for license information.
+
+package cbor
+
+import (
+ "errors"
+)
+
+// ByteString represents CBOR byte string (major type 2). ByteString can be used
+// when using a Go []byte is not possible or convenient. For example, Go doesn't
+// allow []byte as map key, so ByteString can be used to support data formats
+// having CBOR map with byte string keys. ByteString can also be used to
+// encode invalid UTF-8 string as CBOR byte string.
+// See DecOption.MapKeyByteStringMode for more details.
+type ByteString string
+
+// Bytes returns bytes representing ByteString.
+func (bs ByteString) Bytes() []byte {
+ return []byte(bs)
+}
+
+// MarshalCBOR encodes ByteString as CBOR byte string (major type 2).
+func (bs ByteString) MarshalCBOR() ([]byte, error) {
+ e := getEncodeBuffer()
+ defer putEncodeBuffer(e)
+
+ // Encode length
+ encodeHead(e, byte(cborTypeByteString), uint64(len(bs)))
+
+ // Encode data
+ buf := make([]byte, e.Len()+len(bs))
+ n := copy(buf, e.Bytes())
+ copy(buf[n:], bs)
+
+ return buf, nil
+}
+
+// UnmarshalCBOR decodes CBOR byte string (major type 2) to ByteString.
+// Decoding CBOR null and CBOR undefined sets ByteString to be empty.
+func (bs *ByteString) UnmarshalCBOR(data []byte) error {
+ if bs == nil {
+ return errors.New("cbor.ByteString: UnmarshalCBOR on nil pointer")
+ }
+
+ // Decoding CBOR null and CBOR undefined to ByteString resets data.
+ // This behavior is similar to decoding CBOR null and CBOR undefined to []byte.
+ if len(data) == 1 && (data[0] == 0xf6 || data[0] == 0xf7) {
+ *bs = ""
+ return nil
+ }
+
+ d := decoder{data: data, dm: defaultDecMode}
+
+ // Check if CBOR data type is byte string
+ if typ := d.nextCBORType(); typ != cborTypeByteString {
+ return &UnmarshalTypeError{CBORType: typ.String(), GoType: typeByteString.String()}
+ }
+
+ b, _ := d.parseByteString()
+ *bs = ByteString(b)
+ return nil
+}
diff --git a/vendor/github.com/fxamacker/cbor/v2/cache.go b/vendor/github.com/fxamacker/cbor/v2/cache.go
new file mode 100644
index 0000000000..ea0f39e24f
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/cache.go
@@ -0,0 +1,363 @@
+// Copyright (c) Faye Amacker. All rights reserved.
+// Licensed under the MIT License. See LICENSE in the project root for license information.
+
+package cbor
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+type encodeFuncs struct {
+ ef encodeFunc
+ ief isEmptyFunc
+}
+
+var (
+ decodingStructTypeCache sync.Map // map[reflect.Type]*decodingStructType
+ encodingStructTypeCache sync.Map // map[reflect.Type]*encodingStructType
+ encodeFuncCache sync.Map // map[reflect.Type]encodeFuncs
+ typeInfoCache sync.Map // map[reflect.Type]*typeInfo
+)
+
+type specialType int
+
+const (
+ specialTypeNone specialType = iota
+ specialTypeUnmarshalerIface
+ specialTypeEmptyIface
+ specialTypeIface
+ specialTypeTag
+ specialTypeTime
+)
+
+type typeInfo struct {
+ elemTypeInfo *typeInfo
+ keyTypeInfo *typeInfo
+ typ reflect.Type
+ kind reflect.Kind
+ nonPtrType reflect.Type
+ nonPtrKind reflect.Kind
+ spclType specialType
+}
+
+func newTypeInfo(t reflect.Type) *typeInfo {
+ tInfo := typeInfo{typ: t, kind: t.Kind()}
+
+ for t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+
+ k := t.Kind()
+
+ tInfo.nonPtrType = t
+ tInfo.nonPtrKind = k
+
+ if k == reflect.Interface {
+ if t.NumMethod() == 0 {
+ tInfo.spclType = specialTypeEmptyIface
+ } else {
+ tInfo.spclType = specialTypeIface
+ }
+ } else if t == typeTag {
+ tInfo.spclType = specialTypeTag
+ } else if t == typeTime {
+ tInfo.spclType = specialTypeTime
+ } else if reflect.PtrTo(t).Implements(typeUnmarshaler) {
+ tInfo.spclType = specialTypeUnmarshalerIface
+ }
+
+ switch k {
+ case reflect.Array, reflect.Slice:
+ tInfo.elemTypeInfo = getTypeInfo(t.Elem())
+ case reflect.Map:
+ tInfo.keyTypeInfo = getTypeInfo(t.Key())
+ tInfo.elemTypeInfo = getTypeInfo(t.Elem())
+ }
+
+ return &tInfo
+}
+
+type decodingStructType struct {
+ fields fields
+ fieldIndicesByName map[string]int
+ err error
+ toArray bool
+}
+
+// The stdlib errors.Join was introduced in Go 1.20, and we still support Go 1.17, so instead,
+// here's a very basic implementation of an aggregated error.
+type multierror []error
+
+func (m multierror) Error() string {
+ var sb strings.Builder
+ for i, err := range m {
+ sb.WriteString(err.Error())
+ if i < len(m)-1 {
+ sb.WriteString(", ")
+ }
+ }
+ return sb.String()
+}
+
+func getDecodingStructType(t reflect.Type) *decodingStructType {
+ if v, _ := decodingStructTypeCache.Load(t); v != nil {
+ return v.(*decodingStructType)
+ }
+
+ flds, structOptions := getFields(t)
+
+ toArray := hasToArrayOption(structOptions)
+
+ var errs []error
+ for i := 0; i < len(flds); i++ {
+ if flds[i].keyAsInt {
+ nameAsInt, numErr := strconv.Atoi(flds[i].name)
+ if numErr != nil {
+ errs = append(errs, errors.New("cbor: failed to parse field name \""+flds[i].name+"\" to int ("+numErr.Error()+")"))
+ break
+ }
+ flds[i].nameAsInt = int64(nameAsInt)
+ }
+
+ flds[i].typInfo = getTypeInfo(flds[i].typ)
+ }
+
+ fieldIndicesByName := make(map[string]int, len(flds))
+ for i, fld := range flds {
+ if _, ok := fieldIndicesByName[fld.name]; ok {
+ errs = append(errs, fmt.Errorf("cbor: two or more fields of %v have the same name %q", t, fld.name))
+ continue
+ }
+ fieldIndicesByName[fld.name] = i
+ }
+
+ var err error
+ {
+ var multi multierror
+ for _, each := range errs {
+ if each != nil {
+ multi = append(multi, each)
+ }
+ }
+ if len(multi) == 1 {
+ err = multi[0]
+ } else if len(multi) > 1 {
+ err = multi
+ }
+ }
+
+ structType := &decodingStructType{
+ fields: flds,
+ fieldIndicesByName: fieldIndicesByName,
+ err: err,
+ toArray: toArray,
+ }
+ decodingStructTypeCache.Store(t, structType)
+ return structType
+}
+
+type encodingStructType struct {
+ fields fields
+ bytewiseFields fields
+ lengthFirstFields fields
+ omitEmptyFieldsIdx []int
+ err error
+ toArray bool
+}
+
+func (st *encodingStructType) getFields(em *encMode) fields {
+ switch em.sort {
+ case SortNone, SortFastShuffle:
+ return st.fields
+ case SortLengthFirst:
+ return st.lengthFirstFields
+ default:
+ return st.bytewiseFields
+ }
+}
+
+type bytewiseFieldSorter struct {
+ fields fields
+}
+
+func (x *bytewiseFieldSorter) Len() int {
+ return len(x.fields)
+}
+
+func (x *bytewiseFieldSorter) Swap(i, j int) {
+ x.fields[i], x.fields[j] = x.fields[j], x.fields[i]
+}
+
+func (x *bytewiseFieldSorter) Less(i, j int) bool {
+ return bytes.Compare(x.fields[i].cborName, x.fields[j].cborName) <= 0
+}
+
+type lengthFirstFieldSorter struct {
+ fields fields
+}
+
+func (x *lengthFirstFieldSorter) Len() int {
+ return len(x.fields)
+}
+
+func (x *lengthFirstFieldSorter) Swap(i, j int) {
+ x.fields[i], x.fields[j] = x.fields[j], x.fields[i]
+}
+
+func (x *lengthFirstFieldSorter) Less(i, j int) bool {
+ if len(x.fields[i].cborName) != len(x.fields[j].cborName) {
+ return len(x.fields[i].cborName) < len(x.fields[j].cborName)
+ }
+ return bytes.Compare(x.fields[i].cborName, x.fields[j].cborName) <= 0
+}
+
+func getEncodingStructType(t reflect.Type) (*encodingStructType, error) {
+ if v, _ := encodingStructTypeCache.Load(t); v != nil {
+ structType := v.(*encodingStructType)
+ return structType, structType.err
+ }
+
+ flds, structOptions := getFields(t)
+
+ if hasToArrayOption(structOptions) {
+ return getEncodingStructToArrayType(t, flds)
+ }
+
+ var err error
+ var hasKeyAsInt bool
+ var hasKeyAsStr bool
+ var omitEmptyIdx []int
+ e := getEncodeBuffer()
+ for i := 0; i < len(flds); i++ {
+ // Get field's encodeFunc
+ flds[i].ef, flds[i].ief = getEncodeFunc(flds[i].typ)
+ if flds[i].ef == nil {
+ err = &UnsupportedTypeError{t}
+ break
+ }
+
+ // Encode field name
+ if flds[i].keyAsInt {
+ nameAsInt, numErr := strconv.Atoi(flds[i].name)
+ if numErr != nil {
+ err = errors.New("cbor: failed to parse field name \"" + flds[i].name + "\" to int (" + numErr.Error() + ")")
+ break
+ }
+ flds[i].nameAsInt = int64(nameAsInt)
+ if nameAsInt >= 0 {
+ encodeHead(e, byte(cborTypePositiveInt), uint64(nameAsInt))
+ } else {
+ n := nameAsInt*(-1) - 1
+ encodeHead(e, byte(cborTypeNegativeInt), uint64(n))
+ }
+ flds[i].cborName = make([]byte, e.Len())
+ copy(flds[i].cborName, e.Bytes())
+ e.Reset()
+
+ hasKeyAsInt = true
+ } else {
+ encodeHead(e, byte(cborTypeTextString), uint64(len(flds[i].name)))
+ flds[i].cborName = make([]byte, e.Len()+len(flds[i].name))
+ n := copy(flds[i].cborName, e.Bytes())
+ copy(flds[i].cborName[n:], flds[i].name)
+ e.Reset()
+
+ // If cborName contains a text string, then cborNameByteString contains a
+ // string that has the byte string major type but is otherwise identical to
+ // cborName.
+ flds[i].cborNameByteString = make([]byte, len(flds[i].cborName))
+ copy(flds[i].cborNameByteString, flds[i].cborName)
+ // Reset encoded CBOR type to byte string, preserving the "additional
+ // information" bits:
+ flds[i].cborNameByteString[0] = byte(cborTypeByteString) |
+ getAdditionalInformation(flds[i].cborNameByteString[0])
+
+ hasKeyAsStr = true
+ }
+
+ // Check if field can be omitted when empty
+ if flds[i].omitEmpty {
+ omitEmptyIdx = append(omitEmptyIdx, i)
+ }
+ }
+ putEncodeBuffer(e)
+
+ if err != nil {
+ structType := &encodingStructType{err: err}
+ encodingStructTypeCache.Store(t, structType)
+ return structType, structType.err
+ }
+
+ // Sort fields by canonical order
+ bytewiseFields := make(fields, len(flds))
+ copy(bytewiseFields, flds)
+ sort.Sort(&bytewiseFieldSorter{bytewiseFields})
+
+ lengthFirstFields := bytewiseFields
+ if hasKeyAsInt && hasKeyAsStr {
+ lengthFirstFields = make(fields, len(flds))
+ copy(lengthFirstFields, flds)
+ sort.Sort(&lengthFirstFieldSorter{lengthFirstFields})
+ }
+
+ structType := &encodingStructType{
+ fields: flds,
+ bytewiseFields: bytewiseFields,
+ lengthFirstFields: lengthFirstFields,
+ omitEmptyFieldsIdx: omitEmptyIdx,
+ }
+
+ encodingStructTypeCache.Store(t, structType)
+ return structType, structType.err
+}
+
+func getEncodingStructToArrayType(t reflect.Type, flds fields) (*encodingStructType, error) {
+ for i := 0; i < len(flds); i++ {
+ // Get field's encodeFunc
+ flds[i].ef, flds[i].ief = getEncodeFunc(flds[i].typ)
+ if flds[i].ef == nil {
+ structType := &encodingStructType{err: &UnsupportedTypeError{t}}
+ encodingStructTypeCache.Store(t, structType)
+ return structType, structType.err
+ }
+ }
+
+ structType := &encodingStructType{
+ fields: flds,
+ toArray: true,
+ }
+ encodingStructTypeCache.Store(t, structType)
+ return structType, structType.err
+}
+
+func getEncodeFunc(t reflect.Type) (encodeFunc, isEmptyFunc) {
+ if v, _ := encodeFuncCache.Load(t); v != nil {
+ fs := v.(encodeFuncs)
+ return fs.ef, fs.ief
+ }
+ ef, ief := getEncodeFuncInternal(t)
+ encodeFuncCache.Store(t, encodeFuncs{ef, ief})
+ return ef, ief
+}
+
+func getTypeInfo(t reflect.Type) *typeInfo {
+ if v, _ := typeInfoCache.Load(t); v != nil {
+ return v.(*typeInfo)
+ }
+ tInfo := newTypeInfo(t)
+ typeInfoCache.Store(t, tInfo)
+ return tInfo
+}
+
+func hasToArrayOption(tag string) bool {
+ s := ",toarray"
+ idx := strings.Index(tag, s)
+ return idx >= 0 && (len(tag) == idx+len(s) || tag[idx+len(s)] == ',')
+}
diff --git a/vendor/github.com/fxamacker/cbor/v2/common.go b/vendor/github.com/fxamacker/cbor/v2/common.go
new file mode 100644
index 0000000000..ec038a49ec
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/common.go
@@ -0,0 +1,182 @@
+// Copyright (c) Faye Amacker. All rights reserved.
+// Licensed under the MIT License. See LICENSE in the project root for license information.
+
+package cbor
+
+import (
+ "fmt"
+ "strconv"
+)
+
+type cborType uint8
+
+const (
+ cborTypePositiveInt cborType = 0x00
+ cborTypeNegativeInt cborType = 0x20
+ cborTypeByteString cborType = 0x40
+ cborTypeTextString cborType = 0x60
+ cborTypeArray cborType = 0x80
+ cborTypeMap cborType = 0xa0
+ cborTypeTag cborType = 0xc0
+ cborTypePrimitives cborType = 0xe0
+)
+
+func (t cborType) String() string {
+ switch t {
+ case cborTypePositiveInt:
+ return "positive integer"
+ case cborTypeNegativeInt:
+ return "negative integer"
+ case cborTypeByteString:
+ return "byte string"
+ case cborTypeTextString:
+ return "UTF-8 text string"
+ case cborTypeArray:
+ return "array"
+ case cborTypeMap:
+ return "map"
+ case cborTypeTag:
+ return "tag"
+ case cborTypePrimitives:
+ return "primitives"
+ default:
+ return "Invalid type " + strconv.Itoa(int(t))
+ }
+}
+
+type additionalInformation uint8
+
+const (
+ maxAdditionalInformationWithoutArgument = 23
+ additionalInformationWith1ByteArgument = 24
+ additionalInformationWith2ByteArgument = 25
+ additionalInformationWith4ByteArgument = 26
+ additionalInformationWith8ByteArgument = 27
+
+ // For major type 7.
+ additionalInformationAsFalse = 20
+ additionalInformationAsTrue = 21
+ additionalInformationAsNull = 22
+ additionalInformationAsUndefined = 23
+ additionalInformationAsFloat16 = 25
+ additionalInformationAsFloat32 = 26
+ additionalInformationAsFloat64 = 27
+
+ // For major type 2, 3, 4, 5.
+ additionalInformationAsIndefiniteLengthFlag = 31
+)
+
+const (
+ maxSimpleValueInAdditionalInformation = 23
+ minSimpleValueIn1ByteArgument = 32
+)
+
+func (ai additionalInformation) isIndefiniteLength() bool {
+ return ai == additionalInformationAsIndefiniteLengthFlag
+}
+
+const (
+ // From RFC 8949 Section 3:
+ // "The initial byte of each encoded data item contains both information about the major type
+ // (the high-order 3 bits, described in Section 3.1) and additional information
+ // (the low-order 5 bits)."
+
+ // typeMask is used to extract major type in initial byte of encoded data item.
+ typeMask = 0xe0
+
+ // additionalInformationMask is used to extract additional information in initial byte of encoded data item.
+ additionalInformationMask = 0x1f
+)
+
+func getType(raw byte) cborType {
+ return cborType(raw & typeMask)
+}
+
+func getAdditionalInformation(raw byte) byte {
+ return raw & additionalInformationMask
+}
+
+func isBreakFlag(raw byte) bool {
+ return raw == cborBreakFlag
+}
+
+func parseInitialByte(b byte) (t cborType, ai byte) {
+ return getType(b), getAdditionalInformation(b)
+}
+
+const (
+ tagNumRFC3339Time = 0
+ tagNumEpochTime = 1
+ tagNumUnsignedBignum = 2
+ tagNumNegativeBignum = 3
+ tagNumExpectedLaterEncodingBase64URL = 21
+ tagNumExpectedLaterEncodingBase64 = 22
+ tagNumExpectedLaterEncodingBase16 = 23
+ tagNumSelfDescribedCBOR = 55799
+)
+
+const (
+ cborBreakFlag = byte(0xff)
+ cborByteStringWithIndefiniteLengthHead = byte(0x5f)
+ cborTextStringWithIndefiniteLengthHead = byte(0x7f)
+ cborArrayWithIndefiniteLengthHead = byte(0x9f)
+ cborMapWithIndefiniteLengthHead = byte(0xbf)
+)
+
+var (
+ cborFalse = []byte{0xf4}
+ cborTrue = []byte{0xf5}
+ cborNil = []byte{0xf6}
+ cborNaN = []byte{0xf9, 0x7e, 0x00}
+ cborPositiveInfinity = []byte{0xf9, 0x7c, 0x00}
+ cborNegativeInfinity = []byte{0xf9, 0xfc, 0x00}
+)
+
+// validBuiltinTag checks that supported built-in tag numbers are followed by expected content types.
+func validBuiltinTag(tagNum uint64, contentHead byte) error {
+ t := getType(contentHead)
+ switch tagNum {
+ case tagNumRFC3339Time:
+ // Tag content (date/time text string in RFC 3339 format) must be string type.
+ if t != cborTypeTextString {
+ return newInadmissibleTagContentTypeError(
+ tagNumRFC3339Time,
+ "text string",
+ t.String())
+ }
+ return nil
+
+ case tagNumEpochTime:
+ // Tag content (epoch date/time) must be uint, int, or float type.
+ if t != cborTypePositiveInt && t != cborTypeNegativeInt && (contentHead < 0xf9 || contentHead > 0xfb) {
+ return newInadmissibleTagContentTypeError(
+ tagNumEpochTime,
+ "integer or floating-point number",
+ t.String())
+ }
+ return nil
+
+ case tagNumUnsignedBignum, tagNumNegativeBignum:
+ // Tag content (bignum) must be byte type.
+ if t != cborTypeByteString {
+ return newInadmissibleTagContentTypeErrorf(
+ fmt.Sprintf(
+ "tag number %d or %d must be followed by byte string, got %s",
+ tagNumUnsignedBignum,
+ tagNumNegativeBignum,
+ t.String(),
+ ))
+ }
+ return nil
+
+ case tagNumExpectedLaterEncodingBase64URL, tagNumExpectedLaterEncodingBase64, tagNumExpectedLaterEncodingBase16:
+ // From RFC 8949 3.4.5.2:
+ // The data item tagged can be a byte string or any other data item. In the latter
+ // case, the tag applies to all of the byte string data items contained in the data
+ // item, except for those contained in a nested data item tagged with an expected
+ // conversion.
+ return nil
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/fxamacker/cbor/v2/decode.go b/vendor/github.com/fxamacker/cbor/v2/decode.go
new file mode 100644
index 0000000000..85842ac736
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/decode.go
@@ -0,0 +1,3187 @@
+// Copyright (c) Faye Amacker. All rights reserved.
+// Licensed under the MIT License. See LICENSE in the project root for license information.
+
+package cbor
+
+import (
+ "encoding"
+ "encoding/base64"
+ "encoding/binary"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "math/big"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/x448/float16"
+)
+
+// Unmarshal parses the CBOR-encoded data into the value pointed to by v
+// using default decoding options. If v is nil, not a pointer, or
+// a nil pointer, Unmarshal returns an error.
+//
+// To unmarshal CBOR into a value implementing the Unmarshaler interface,
+// Unmarshal calls that value's UnmarshalCBOR method with a valid
+// CBOR value.
+//
+// To unmarshal CBOR byte string into a value implementing the
+// encoding.BinaryUnmarshaler interface, Unmarshal calls that value's
+// UnmarshalBinary method with decoded CBOR byte string.
+//
+// To unmarshal CBOR into a pointer, Unmarshal sets the pointer to nil
+// if CBOR data is null (0xf6) or undefined (0xf7). Otherwise, Unmarshal
+// unmarshals CBOR into the value pointed to by the pointer. If the
+// pointer is nil, Unmarshal creates a new value for it to point to.
+//
+// To unmarshal CBOR into an empty interface value, Unmarshal uses the
+// following rules:
+//
+// CBOR booleans decode to bool.
+// CBOR positive integers decode to uint64.
+// CBOR negative integers decode to int64 (big.Int if value overflows).
+// CBOR floating points decode to float64.
+// CBOR byte strings decode to []byte.
+// CBOR text strings decode to string.
+// CBOR arrays decode to []interface{}.
+// CBOR maps decode to map[interface{}]interface{}.
+// CBOR null and undefined values decode to nil.
+// CBOR times (tag 0 and 1) decode to time.Time.
+// CBOR bignums (tag 2 and 3) decode to big.Int.
+// CBOR tags with an unrecognized number decode to cbor.Tag
+//
+// To unmarshal a CBOR array into a slice, Unmarshal allocates a new slice
+// if the CBOR array is empty or slice capacity is less than CBOR array length.
+// Otherwise Unmarshal overwrites existing elements, and sets slice length
+// to CBOR array length.
+//
+// To unmarshal a CBOR array into a Go array, Unmarshal decodes CBOR array
+// elements into Go array elements. If the Go array is smaller than the
+// CBOR array, the extra CBOR array elements are discarded. If the CBOR
+// array is smaller than the Go array, the extra Go array elements are
+// set to zero values.
+//
+// To unmarshal a CBOR array into a struct, struct must have a special field "_"
+// with struct tag `cbor:",toarray"`. Go array elements are decoded into struct
+// fields. Any "omitempty" struct field tag option is ignored in this case.
+//
+// To unmarshal a CBOR map into a map, Unmarshal allocates a new map only if the
+// map is nil. Otherwise Unmarshal reuses the existing map and keeps existing
+// entries. Unmarshal stores key-value pairs from the CBOR map into Go map.
+// See DecOptions.DupMapKey to enable duplicate map key detection.
+//
+// To unmarshal a CBOR map into a struct, Unmarshal matches CBOR map keys to the
+// keys in the following priority:
+//
+// 1. "cbor" key in struct field tag,
+// 2. "json" key in struct field tag,
+// 3. struct field name.
+//
+// Unmarshal tries an exact match for field name, then a case-insensitive match.
+// Map key-value pairs without corresponding struct fields are ignored. See
+// DecOptions.ExtraReturnErrors to return error at unknown field.
+//
+// To unmarshal a CBOR text string into a time.Time value, Unmarshal parses text
+// string formatted in RFC3339. To unmarshal a CBOR integer/float into a
+// time.Time value, Unmarshal creates an unix time with integer/float as seconds
+// and fractional seconds since January 1, 1970 UTC. As a special case, Infinite
+// and NaN float values decode to time.Time's zero value.
+//
+// To unmarshal CBOR null (0xf6) and undefined (0xf7) values into a
+// slice/map/pointer, Unmarshal sets Go value to nil. Because null is often
+// used to mean "not present", unmarshalling CBOR null and undefined value
+// into any other Go type has no effect and returns no error.
+//
+// Unmarshal supports CBOR tag 55799 (self-describe CBOR), tag 0 and 1 (time),
+// and tag 2 and 3 (bignum).
+//
+// Unmarshal returns ExtraneousDataError error (without decoding into v)
+// if there are any remaining bytes following the first valid CBOR data item.
+// See UnmarshalFirst, if you want to unmarshal only the first
+// CBOR data item without ExtraneousDataError caused by remaining bytes.
+func Unmarshal(data []byte, v interface{}) error {
+ return defaultDecMode.Unmarshal(data, v)
+}
+
+// UnmarshalFirst parses the first CBOR data item into the value pointed to by v
+// using default decoding options. Any remaining bytes are returned in rest.
+//
+// If v is nil, not a pointer, or a nil pointer, UnmarshalFirst returns an error.
+//
+// See the documentation for Unmarshal for details.
+func UnmarshalFirst(data []byte, v interface{}) (rest []byte, err error) {
+ return defaultDecMode.UnmarshalFirst(data, v)
+}
+
+// Valid checks whether data is a well-formed encoded CBOR data item and
+// that it complies with default restrictions such as MaxNestedLevels,
+// MaxArrayElements, MaxMapPairs, etc.
+//
+// If there are any remaining bytes after the CBOR data item,
+// an ExtraneousDataError is returned.
+//
+// WARNING: Valid doesn't check if encoded CBOR data item is valid (i.e. validity)
+// and RFC 8949 distinctly defines what is "Valid" and what is "Well-formed".
+//
+// Deprecated: Valid is kept for compatibility and should not be used.
+// Use Wellformed instead because it has a more appropriate name.
+func Valid(data []byte) error {
+ return defaultDecMode.Valid(data)
+}
+
+// Wellformed checks whether data is a well-formed encoded CBOR data item and
+// that it complies with default restrictions such as MaxNestedLevels,
+// MaxArrayElements, MaxMapPairs, etc.
+//
+// If there are any remaining bytes after the CBOR data item,
+// an ExtraneousDataError is returned.
+func Wellformed(data []byte) error {
+ return defaultDecMode.Wellformed(data)
+}
+
+// Unmarshaler is the interface implemented by types that wish to unmarshal
+// CBOR data themselves. The input is a valid CBOR value. UnmarshalCBOR
+// must copy the CBOR data if it needs to use it after returning.
+type Unmarshaler interface {
+ UnmarshalCBOR([]byte) error
+}
+
+// InvalidUnmarshalError describes an invalid argument passed to Unmarshal.
+type InvalidUnmarshalError struct {
+ s string
+}
+
+func (e *InvalidUnmarshalError) Error() string {
+ return e.s
+}
+
+// UnmarshalTypeError describes a CBOR value that can't be decoded to a Go type.
+type UnmarshalTypeError struct {
+ CBORType string // type of CBOR value
+ GoType string // type of Go value it could not be decoded into
+ StructFieldName string // name of the struct field holding the Go value (optional)
+ errorMsg string // additional error message (optional)
+}
+
+func (e *UnmarshalTypeError) Error() string {
+ var s string
+ if e.StructFieldName != "" {
+ s = "cbor: cannot unmarshal " + e.CBORType + " into Go struct field " + e.StructFieldName + " of type " + e.GoType
+ } else {
+ s = "cbor: cannot unmarshal " + e.CBORType + " into Go value of type " + e.GoType
+ }
+ if e.errorMsg != "" {
+ s += " (" + e.errorMsg + ")"
+ }
+ return s
+}
+
+// InvalidMapKeyTypeError describes invalid Go map key type when decoding CBOR map.
+// For example, Go doesn't allow slice as map key.
+type InvalidMapKeyTypeError struct {
+ GoType string
+}
+
+func (e *InvalidMapKeyTypeError) Error() string {
+ return "cbor: invalid map key type: " + e.GoType
+}
+
+// DupMapKeyError describes detected duplicate map key in CBOR map.
+type DupMapKeyError struct {
+ Key interface{}
+ Index int
+}
+
+func (e *DupMapKeyError) Error() string {
+ return fmt.Sprintf("cbor: found duplicate map key \"%v\" at map element index %d", e.Key, e.Index)
+}
+
+// UnknownFieldError describes detected unknown field in CBOR map when decoding to Go struct.
+type UnknownFieldError struct {
+ Index int
+}
+
+func (e *UnknownFieldError) Error() string {
+ return fmt.Sprintf("cbor: found unknown field at map element index %d", e.Index)
+}
+
+// UnacceptableDataItemError is returned when unmarshaling a CBOR input that contains a data item
+// that is not acceptable to a specific CBOR-based application protocol ("invalid or unexpected" as
+// described in RFC 8949 Section 5 Paragraph 3).
+type UnacceptableDataItemError struct {
+ CBORType string
+ Message string
+}
+
+func (e UnacceptableDataItemError) Error() string {
+ return fmt.Sprintf("cbor: data item of cbor type %s is not accepted by protocol: %s", e.CBORType, e.Message)
+}
+
+// ByteStringExpectedFormatError is returned when unmarshaling CBOR byte string fails when
+// using non-default ByteStringExpectedFormat decoding option that makes decoder expect
+// a specified format such as base64, hex, etc.
+type ByteStringExpectedFormatError struct {
+ expectedFormatOption ByteStringExpectedFormatMode
+ err error
+}
+
+func newByteStringExpectedFormatError(expectedFormatOption ByteStringExpectedFormatMode, err error) *ByteStringExpectedFormatError {
+ return &ByteStringExpectedFormatError{expectedFormatOption, err}
+}
+
+func (e *ByteStringExpectedFormatError) Error() string {
+ switch e.expectedFormatOption {
+ case ByteStringExpectedBase64URL:
+ return fmt.Sprintf("cbor: failed to decode base64url from byte string: %s", e.err)
+
+ case ByteStringExpectedBase64:
+ return fmt.Sprintf("cbor: failed to decode base64 from byte string: %s", e.err)
+
+ case ByteStringExpectedBase16:
+ return fmt.Sprintf("cbor: failed to decode hex from byte string: %s", e.err)
+
+ default:
+ return fmt.Sprintf("cbor: failed to decode byte string in expected format %d: %s", e.expectedFormatOption, e.err)
+ }
+}
+
+func (e *ByteStringExpectedFormatError) Unwrap() error {
+ return e.err
+}
+
+// InadmissibleTagContentTypeError is returned when unmarshaling built-in CBOR tags
+// fails because of inadmissible type for tag content. Currently, the built-in
+// CBOR tags in this codec are tags 0-3 and 21-23.
+// See "Tag validity" in RFC 8949 Section 5.3.2.
+type InadmissibleTagContentTypeError struct {
+ s string
+ tagNum int
+ expectedTagContentType string
+ gotTagContentType string
+}
+
+func newInadmissibleTagContentTypeError(
+ tagNum int,
+ expectedTagContentType string,
+ gotTagContentType string,
+) *InadmissibleTagContentTypeError {
+ return &InadmissibleTagContentTypeError{
+ tagNum: tagNum,
+ expectedTagContentType: expectedTagContentType,
+ gotTagContentType: gotTagContentType,
+ }
+}
+
+func newInadmissibleTagContentTypeErrorf(s string) *InadmissibleTagContentTypeError {
+ return &InadmissibleTagContentTypeError{s: "cbor: " + s} //nolint:goconst // ignore "cbor"
+}
+
+func (e *InadmissibleTagContentTypeError) Error() string {
+ if e.s == "" {
+ return fmt.Sprintf(
+ "cbor: tag number %d must be followed by %s, got %s",
+ e.tagNum,
+ e.expectedTagContentType,
+ e.gotTagContentType,
+ )
+ }
+ return e.s
+}
+
+// DupMapKeyMode specifies how to enforce duplicate map key. Two map keys are considered duplicates if:
+// 1. When decoding into a struct, both keys match the same struct field. The keys are also
+// considered duplicates if neither matches any field and decoding to interface{} would produce
+// equal (==) values for both keys.
+// 2. When decoding into a map, both keys are equal (==) when decoded into values of the
+// destination map's key type.
+type DupMapKeyMode int
+
+const (
+ // DupMapKeyQuiet doesn't enforce duplicate map key. Decoder quietly (no error)
+ // uses faster of "keep first" or "keep last" depending on Go data type and other factors.
+ DupMapKeyQuiet DupMapKeyMode = iota
+
+ // DupMapKeyEnforcedAPF enforces detection and rejection of duplicate map keys.
+ // APF means "Allow Partial Fill" and the destination map or struct can be partially filled.
+ // If a duplicate map key is detected, DupMapKeyError is returned without further decoding
+ // of the map. It's the caller's responsibility to respond to DupMapKeyError by
+ // discarding the partially filled result if their protocol requires it.
+ // WARNING: using DupMapKeyEnforcedAPF will decrease performance and increase memory use.
+ DupMapKeyEnforcedAPF
+
+ maxDupMapKeyMode
+)
+
+func (dmkm DupMapKeyMode) valid() bool {
+ return dmkm >= 0 && dmkm < maxDupMapKeyMode
+}
+
+// IndefLengthMode specifies whether to allow indefinite length items.
+type IndefLengthMode int
+
+const (
+ // IndefLengthAllowed allows indefinite length items.
+ IndefLengthAllowed IndefLengthMode = iota
+
+ // IndefLengthForbidden disallows indefinite length items.
+ IndefLengthForbidden
+
+ maxIndefLengthMode
+)
+
+func (m IndefLengthMode) valid() bool {
+ return m >= 0 && m < maxIndefLengthMode
+}
+
+// TagsMode specifies whether to allow CBOR tags.
+type TagsMode int
+
+const (
+ // TagsAllowed allows CBOR tags.
+ TagsAllowed TagsMode = iota
+
+ // TagsForbidden disallows CBOR tags.
+ TagsForbidden
+
+ maxTagsMode
+)
+
+func (tm TagsMode) valid() bool {
+ return tm >= 0 && tm < maxTagsMode
+}
+
+// IntDecMode specifies which Go type (int64, uint64, or big.Int) should
+// be used when decoding CBOR integers (major type 0 and 1) to Go interface{}.
+type IntDecMode int
+
+const (
+ // IntDecConvertNone affects how CBOR integers (major type 0 and 1) decode to Go interface{}.
+ // It decodes CBOR unsigned integer (major type 0) to:
+ // - uint64
+ // It decodes CBOR negative integer (major type 1) to:
+ // - int64 if value fits
+ // - big.Int or *big.Int (see BigIntDecMode) if value doesn't fit into int64
+ IntDecConvertNone IntDecMode = iota
+
+ // IntDecConvertSigned affects how CBOR integers (major type 0 and 1) decode to Go interface{}.
+ // It decodes CBOR integers (major type 0 and 1) to:
+ // - int64 if value fits
+ // - big.Int or *big.Int (see BigIntDecMode) if value < math.MinInt64
+ // - return UnmarshalTypeError if value > math.MaxInt64
+ // Deprecated: IntDecConvertSigned should not be used.
+ // Please use other options, such as IntDecConvertSignedOrError, IntDecConvertSignedOrBigInt, IntDecConvertNone.
+ IntDecConvertSigned
+
+ // IntDecConvertSignedOrFail affects how CBOR integers (major type 0 and 1) decode to Go interface{}.
+ // It decodes CBOR integers (major type 0 and 1) to:
+ // - int64 if value fits
+ // - return UnmarshalTypeError if value doesn't fit into int64
+ IntDecConvertSignedOrFail
+
+ // IntDecConvertSigned affects how CBOR integers (major type 0 and 1) decode to Go interface{}.
+ // It makes CBOR integers (major type 0 and 1) decode to:
+ // - int64 if value fits
+ // - big.Int or *big.Int (see BigIntDecMode) if value doesn't fit into int64
+ IntDecConvertSignedOrBigInt
+
+ maxIntDec
+)
+
+func (idm IntDecMode) valid() bool {
+ return idm >= 0 && idm < maxIntDec
+}
+
+// MapKeyByteStringMode specifies how to decode CBOR byte string (major type 2)
+// as Go map key when decoding CBOR map key into an empty Go interface value.
+// Specifically, this option applies when decoding CBOR map into
+// - Go empty interface, or
+// - Go map with empty interface as key type.
+// The CBOR map key types handled by this option are
+// - byte string
+// - tagged byte string
+// - nested tagged byte string
+type MapKeyByteStringMode int
+
+const (
+ // MapKeyByteStringAllowed allows CBOR byte string to be decoded as Go map key.
+ // Since Go doesn't allow []byte as map key, CBOR byte string is decoded to
+ // ByteString which has underlying string type.
+ // This is the default setting.
+ MapKeyByteStringAllowed MapKeyByteStringMode = iota
+
+ // MapKeyByteStringForbidden forbids CBOR byte string being decoded as Go map key.
+ // Attempting to decode CBOR byte string as map key into empty interface value
+ // returns a decoding error.
+ MapKeyByteStringForbidden
+
+ maxMapKeyByteStringMode
+)
+
+func (mkbsm MapKeyByteStringMode) valid() bool {
+ return mkbsm >= 0 && mkbsm < maxMapKeyByteStringMode
+}
+
+// ExtraDecErrorCond specifies extra conditions that should be treated as errors.
+type ExtraDecErrorCond uint
+
+// ExtraDecErrorNone indicates no extra error condition.
+const ExtraDecErrorNone ExtraDecErrorCond = 0
+
+const (
+ // ExtraDecErrorUnknownField indicates error condition when destination
+ // Go struct doesn't have a field matching a CBOR map key.
+ ExtraDecErrorUnknownField ExtraDecErrorCond = 1 << iota
+
+ maxExtraDecError
+)
+
+func (ec ExtraDecErrorCond) valid() bool {
+ return ec < maxExtraDecError
+}
+
+// UTF8Mode option specifies if decoder should
+// decode CBOR Text containing invalid UTF-8 string.
+type UTF8Mode int
+
+const (
+ // UTF8RejectInvalid rejects CBOR Text containing
+ // invalid UTF-8 string.
+ UTF8RejectInvalid UTF8Mode = iota
+
+ // UTF8DecodeInvalid allows decoding CBOR Text containing
+ // invalid UTF-8 string.
+ UTF8DecodeInvalid
+
+ maxUTF8Mode
+)
+
+func (um UTF8Mode) valid() bool {
+ return um >= 0 && um < maxUTF8Mode
+}
+
+// FieldNameMatchingMode specifies how string keys in CBOR maps are matched to Go struct field names.
+type FieldNameMatchingMode int
+
+const (
+ // FieldNameMatchingPreferCaseSensitive prefers to decode map items into struct fields whose names (or tag
+ // names) exactly match the item's key. If there is no such field, a map item will be decoded into a field whose
+ // name is a case-insensitive match for the item's key.
+ FieldNameMatchingPreferCaseSensitive FieldNameMatchingMode = iota
+
+ // FieldNameMatchingCaseSensitive decodes map items only into a struct field whose name (or tag name) is an
+ // exact match for the item's key.
+ FieldNameMatchingCaseSensitive
+
+ maxFieldNameMatchingMode
+)
+
+func (fnmm FieldNameMatchingMode) valid() bool {
+ return fnmm >= 0 && fnmm < maxFieldNameMatchingMode
+}
+
+// BigIntDecMode specifies how to decode CBOR bignum to Go interface{}.
+type BigIntDecMode int
+
+const (
+ // BigIntDecodeValue makes CBOR bignum decode to big.Int (instead of *big.Int)
+ // when unmarshalling into a Go interface{}.
+ BigIntDecodeValue BigIntDecMode = iota
+
+ // BigIntDecodePointer makes CBOR bignum decode to *big.Int when
+ // unmarshalling into a Go interface{}.
+ BigIntDecodePointer
+
+ maxBigIntDecMode
+)
+
+func (bidm BigIntDecMode) valid() bool {
+ return bidm >= 0 && bidm < maxBigIntDecMode
+}
+
+// ByteStringToStringMode specifies the behavior when decoding a CBOR byte string into a Go string.
+type ByteStringToStringMode int
+
+const (
+ // ByteStringToStringForbidden generates an error on an attempt to decode a CBOR byte string into a Go string.
+ ByteStringToStringForbidden ByteStringToStringMode = iota
+
+ // ByteStringToStringAllowed permits decoding a CBOR byte string into a Go string.
+ ByteStringToStringAllowed
+
+ // ByteStringToStringAllowedWithExpectedLaterEncoding permits decoding a CBOR byte string
+ // into a Go string. Also, if the byte string is enclosed (directly or indirectly) by one of
+ // the "expected later encoding" tags (numbers 21 through 23), the destination string will
+ // be populated by applying the designated text encoding to the contents of the input byte
+ // string.
+ ByteStringToStringAllowedWithExpectedLaterEncoding
+
+ maxByteStringToStringMode
+)
+
+func (bstsm ByteStringToStringMode) valid() bool {
+ return bstsm >= 0 && bstsm < maxByteStringToStringMode
+}
+
+// FieldNameByteStringMode specifies the behavior when decoding a CBOR byte string map key as a Go struct field name.
+type FieldNameByteStringMode int
+
+const (
+ // FieldNameByteStringForbidden generates an error on an attempt to decode a CBOR byte string map key as a Go struct field name.
+ FieldNameByteStringForbidden FieldNameByteStringMode = iota
+
+ // FieldNameByteStringAllowed permits CBOR byte string map keys to be recognized as Go struct field names.
+ FieldNameByteStringAllowed
+
+ maxFieldNameByteStringMode
+)
+
+func (fnbsm FieldNameByteStringMode) valid() bool {
+ return fnbsm >= 0 && fnbsm < maxFieldNameByteStringMode
+}
+
+// UnrecognizedTagToAnyMode specifies how to decode unrecognized CBOR tag into an empty interface (any).
+// Currently, recognized CBOR tag numbers are 0, 1, 2, 3, or registered by TagSet.
+type UnrecognizedTagToAnyMode int
+
+const (
+ // UnrecognizedTagNumAndContentToAny decodes CBOR tag number and tag content to cbor.Tag
+ // when decoding unrecognized CBOR tag into an empty interface.
+ UnrecognizedTagNumAndContentToAny UnrecognizedTagToAnyMode = iota
+
+ // UnrecognizedTagContentToAny decodes only CBOR tag content (into its default type)
+ // when decoding unrecognized CBOR tag into an empty interface.
+ UnrecognizedTagContentToAny
+
+ maxUnrecognizedTagToAny
+)
+
+func (uttam UnrecognizedTagToAnyMode) valid() bool {
+ return uttam >= 0 && uttam < maxUnrecognizedTagToAny
+}
+
+// TimeTagToAnyMode specifies how to decode CBOR tag 0 and 1 into an empty interface (any).
+// Based on the specified mode, Unmarshal can return a time.Time value or a time string in a specific format.
+type TimeTagToAnyMode int
+
+const (
+ // TimeTagToTime decodes CBOR tag 0 and 1 into a time.Time value
+ // when decoding tag 0 or 1 into an empty interface.
+ TimeTagToTime TimeTagToAnyMode = iota
+
+ // TimeTagToRFC3339 decodes CBOR tag 0 and 1 into a time string in RFC3339 format
+ // when decoding tag 0 or 1 into an empty interface.
+ TimeTagToRFC3339
+
+ // TimeTagToRFC3339Nano decodes CBOR tag 0 and 1 into a time string in RFC3339Nano format
+ // when decoding tag 0 or 1 into an empty interface.
+ TimeTagToRFC3339Nano
+
+ maxTimeTagToAnyMode
+)
+
+func (tttam TimeTagToAnyMode) valid() bool {
+ return tttam >= 0 && tttam < maxTimeTagToAnyMode
+}
+
+// SimpleValueRegistry is a registry of unmarshaling behaviors for each possible CBOR simple value
+// number (0...23 and 32...255).
+type SimpleValueRegistry struct {
+ rejected [256]bool
+}
+
+// WithRejectedSimpleValue registers the given simple value as rejected. If the simple value is
+// encountered in a CBOR input during unmarshaling, an UnacceptableDataItemError is returned.
+func WithRejectedSimpleValue(sv SimpleValue) func(*SimpleValueRegistry) error {
+ return func(r *SimpleValueRegistry) error {
+ if sv >= 24 && sv <= 31 {
+ return fmt.Errorf("cbor: cannot set analog for reserved simple value %d", sv)
+ }
+ r.rejected[sv] = true
+ return nil
+ }
+}
+
+// Creates a new SimpleValueRegistry. The registry state is initialized by executing the provided
+// functions in order against a registry that is pre-populated with the defaults for all well-formed
+// simple value numbers.
+func NewSimpleValueRegistryFromDefaults(fns ...func(*SimpleValueRegistry) error) (*SimpleValueRegistry, error) {
+ var r SimpleValueRegistry
+ for _, fn := range fns {
+ if err := fn(&r); err != nil {
+ return nil, err
+ }
+ }
+ return &r, nil
+}
+
+// NaNMode specifies how to decode floating-point values (major type 7, additional information 25
+// through 27) representing NaN (not-a-number).
+type NaNMode int
+
+const (
+ // NaNDecodeAllowed will decode NaN values to Go float32 or float64.
+ NaNDecodeAllowed NaNMode = iota
+
+ // NaNDecodeForbidden will return an UnacceptableDataItemError on an attempt to decode a NaN value.
+ NaNDecodeForbidden
+
+ maxNaNDecode
+)
+
+func (ndm NaNMode) valid() bool {
+ return ndm >= 0 && ndm < maxNaNDecode
+}
+
+// InfMode specifies how to decode floating-point values (major type 7, additional information 25
+// through 27) representing positive or negative infinity.
+type InfMode int
+
+const (
+ // InfDecodeAllowed will decode infinite values to Go float32 or float64.
+ InfDecodeAllowed InfMode = iota
+
+ // InfDecodeForbidden will return an UnacceptableDataItemError on an attempt to decode an
+ // infinite value.
+ InfDecodeForbidden
+
+ maxInfDecode
+)
+
+func (idm InfMode) valid() bool {
+ return idm >= 0 && idm < maxInfDecode
+}
+
+// ByteStringToTimeMode specifies the behavior when decoding a CBOR byte string into a Go time.Time.
+type ByteStringToTimeMode int
+
+const (
+ // ByteStringToTimeForbidden generates an error on an attempt to decode a CBOR byte string into a Go time.Time.
+ ByteStringToTimeForbidden ByteStringToTimeMode = iota
+
+ // ByteStringToTimeAllowed permits decoding a CBOR byte string into a Go time.Time.
+ ByteStringToTimeAllowed
+
+ maxByteStringToTimeMode
+)
+
+func (bttm ByteStringToTimeMode) valid() bool {
+ return bttm >= 0 && bttm < maxByteStringToTimeMode
+}
+
+// ByteStringExpectedFormatMode specifies how to decode CBOR byte string into Go byte slice
+// when the byte string is NOT enclosed in CBOR tag 21, 22, or 23. An error is returned if
+// the CBOR byte string does not contain the expected format (e.g. base64) specified.
+// For tags 21-23, see "Expected Later Encoding for CBOR-to-JSON Converters"
+// in RFC 8949 Section 3.4.5.2.
+type ByteStringExpectedFormatMode int
+
+const (
+ // ByteStringExpectedFormatNone copies the unmodified CBOR byte string into Go byte slice
+ // if the byte string is not tagged by CBOR tag 21-23.
+ ByteStringExpectedFormatNone ByteStringExpectedFormatMode = iota
+
+ // ByteStringExpectedBase64URL expects CBOR byte strings to contain base64url-encoded bytes
+ // if the byte string is not tagged by CBOR tag 21-23. The decoder will attempt to decode
+ // the base64url-encoded bytes into Go slice.
+ ByteStringExpectedBase64URL
+
+ // ByteStringExpectedBase64 expects CBOR byte strings to contain base64-encoded bytes
+ // if the byte string is not tagged by CBOR tag 21-23. The decoder will attempt to decode
+ // the base64-encoded bytes into Go slice.
+ ByteStringExpectedBase64
+
+ // ByteStringExpectedBase16 expects CBOR byte strings to contain base16-encoded bytes
+ // if the byte string is not tagged by CBOR tag 21-23. The decoder will attempt to decode
+ // the base16-encoded bytes into Go slice.
+ ByteStringExpectedBase16
+
+ maxByteStringExpectedFormatMode
+)
+
+func (bsefm ByteStringExpectedFormatMode) valid() bool {
+ return bsefm >= 0 && bsefm < maxByteStringExpectedFormatMode
+}
+
+// BignumTagMode specifies whether or not the "bignum" tags 2 and 3 (RFC 8949 Section 3.4.3) can be
+// decoded.
+type BignumTagMode int
+
+const (
+ // BignumTagAllowed allows bignum tags to be decoded.
+ BignumTagAllowed BignumTagMode = iota
+
+ // BignumTagForbidden produces an UnacceptableDataItemError during Unmarshal if a bignum tag
+ // is encountered in the input.
+ BignumTagForbidden
+
+ maxBignumTag
+)
+
+func (btm BignumTagMode) valid() bool {
+ return btm >= 0 && btm < maxBignumTag
+}
+
+// BinaryUnmarshalerMode specifies how to decode into types that implement
+// encoding.BinaryUnmarshaler.
+type BinaryUnmarshalerMode int
+
+const (
+ // BinaryUnmarshalerByteString will invoke UnmarshalBinary on the contents of a CBOR byte
+ // string when decoding into a value that implements BinaryUnmarshaler.
+ BinaryUnmarshalerByteString BinaryUnmarshalerMode = iota
+
+ // BinaryUnmarshalerNone does not recognize BinaryUnmarshaler implementations during decode.
+ BinaryUnmarshalerNone
+
+ maxBinaryUnmarshalerMode
+)
+
+func (bum BinaryUnmarshalerMode) valid() bool {
+ return bum >= 0 && bum < maxBinaryUnmarshalerMode
+}
+
+// DecOptions specifies decoding options.
+type DecOptions struct {
+ // DupMapKey specifies whether to enforce duplicate map key.
+ DupMapKey DupMapKeyMode
+
+ // TimeTag specifies whether or not untagged data items, or tags other
+ // than tag 0 and tag 1, can be decoded to time.Time. If tag 0 or tag 1
+ // appears in an input, the type of its content is always validated as
+ // specified in RFC 8949. That behavior is not controlled by this
+ // option. The behavior of the supported modes are:
+ //
+ // DecTagIgnored (default): Untagged text strings and text strings
+ // enclosed in tags other than 0 and 1 are decoded as though enclosed
+ // in tag 0. Untagged unsigned integers, negative integers, and
+ // floating-point numbers (or those enclosed in tags other than 0 and
+ // 1) are decoded as though enclosed in tag 1. Decoding a tag other
+ // than 0 or 1 enclosing simple values null or undefined into a
+ // time.Time does not modify the destination value.
+ //
+ // DecTagOptional: Untagged text strings are decoded as though
+ // enclosed in tag 0. Untagged unsigned integers, negative integers,
+ // and floating-point numbers are decoded as though enclosed in tag
+ // 1. Tags other than 0 and 1 will produce an error on attempts to
+ // decode them into a time.Time.
+ //
+ // DecTagRequired: Only tags 0 and 1 can be decoded to time.Time. Any
+ // other input will produce an error.
+ TimeTag DecTagMode
+
+ // MaxNestedLevels specifies the max nested levels allowed for any combination of CBOR array, maps, and tags.
+ // Default is 32 levels and it can be set to [4, 65535]. Note that higher maximum levels of nesting can
+ // require larger amounts of stack to deserialize. Don't increase this higher than you require.
+ MaxNestedLevels int
+
+ // MaxArrayElements specifies the max number of elements for CBOR arrays.
+ // Default is 128*1024=131072 and it can be set to [16, 2147483647]
+ MaxArrayElements int
+
+ // MaxMapPairs specifies the max number of key-value pairs for CBOR maps.
+ // Default is 128*1024=131072 and it can be set to [16, 2147483647]
+ MaxMapPairs int
+
+ // IndefLength specifies whether to allow indefinite length CBOR items.
+ IndefLength IndefLengthMode
+
+ // TagsMd specifies whether to allow CBOR tags (major type 6).
+ TagsMd TagsMode
+
+ // IntDec specifies which Go integer type (int64 or uint64) to use
+ // when decoding CBOR int (major type 0 and 1) to Go interface{}.
+ IntDec IntDecMode
+
+ // MapKeyByteString specifies how to decode CBOR byte string as map key
+ // when decoding CBOR map with byte string key into an empty interface value.
+ // By default, an error is returned when attempting to decode CBOR byte string
+ // as map key because Go doesn't allow []byte as map key.
+ MapKeyByteString MapKeyByteStringMode
+
+ // ExtraReturnErrors specifies extra conditions that should be treated as errors.
+ ExtraReturnErrors ExtraDecErrorCond
+
+ // DefaultMapType specifies Go map type to create and decode to
+ // when unmarshalling CBOR into an empty interface value.
+ // By default, unmarshal uses map[interface{}]interface{}.
+ DefaultMapType reflect.Type
+
+ // UTF8 specifies if decoder should decode CBOR Text containing invalid UTF-8.
+ // By default, unmarshal rejects CBOR text containing invalid UTF-8.
+ UTF8 UTF8Mode
+
+ // FieldNameMatching specifies how string keys in CBOR maps are matched to Go struct field names.
+ FieldNameMatching FieldNameMatchingMode
+
+ // BigIntDec specifies how to decode CBOR bignum to Go interface{}.
+ BigIntDec BigIntDecMode
+
+ // DefaultByteStringType is the Go type that should be produced when decoding a CBOR byte
+ // string into an empty interface value. Types to which a []byte is convertible are valid
+ // for this option, except for array and pointer-to-array types. If nil, the default is
+ // []byte.
+ DefaultByteStringType reflect.Type
+
+ // ByteStringToString specifies the behavior when decoding a CBOR byte string into a Go string.
+ ByteStringToString ByteStringToStringMode
+
+ // FieldNameByteString specifies the behavior when decoding a CBOR byte string map key as a
+ // Go struct field name.
+ FieldNameByteString FieldNameByteStringMode
+
+ // UnrecognizedTagToAny specifies how to decode unrecognized CBOR tag into an empty interface.
+ // Currently, recognized CBOR tag numbers are 0, 1, 2, 3, or registered by TagSet.
+ UnrecognizedTagToAny UnrecognizedTagToAnyMode
+
+ // TimeTagToAny specifies how to decode CBOR tag 0 and 1 into an empty interface (any).
+ // Based on the specified mode, Unmarshal can return a time.Time value or a time string in a specific format.
+ TimeTagToAny TimeTagToAnyMode
+
+ // SimpleValues is an immutable mapping from each CBOR simple value to a corresponding
+ // unmarshal behavior. If nil, the simple values false, true, null, and undefined are mapped
+ // to the Go analog values false, true, nil, and nil, respectively, and all other simple
+ // values N (except the reserved simple values 24 through 31) are mapped to
+ // cbor.SimpleValue(N). In other words, all well-formed simple values can be decoded.
+ //
+ // Users may provide a custom SimpleValueRegistry constructed via
+ // NewSimpleValueRegistryFromDefaults.
+ SimpleValues *SimpleValueRegistry
+
+ // NaN specifies how to decode floating-point values (major type 7, additional information
+ // 25 through 27) representing NaN (not-a-number).
+ NaN NaNMode
+
+ // Inf specifies how to decode floating-point values (major type 7, additional information
+ // 25 through 27) representing positive or negative infinity.
+ Inf InfMode
+
+ // ByteStringToTime specifies how to decode CBOR byte string into Go time.Time.
+ ByteStringToTime ByteStringToTimeMode
+
+ // ByteStringExpectedFormat specifies how to decode CBOR byte string into Go byte slice
+ // when the byte string is NOT enclosed in CBOR tag 21, 22, or 23. An error is returned if
+ // the CBOR byte string does not contain the expected format (e.g. base64) specified.
+ // For tags 21-23, see "Expected Later Encoding for CBOR-to-JSON Converters"
+ // in RFC 8949 Section 3.4.5.2.
+ ByteStringExpectedFormat ByteStringExpectedFormatMode
+
+ // BignumTag specifies whether or not the "bignum" tags 2 and 3 (RFC 8949 Section 3.4.3) can
+ // be decoded. Unlike BigIntDec, this option applies to all bignum tags encountered in a
+ // CBOR input, independent of the type of the destination value of a particular Unmarshal
+ // operation.
+ BignumTag BignumTagMode
+
+ // BinaryUnmarshaler specifies how to decode into types that implement
+ // encoding.BinaryUnmarshaler.
+ BinaryUnmarshaler BinaryUnmarshalerMode
+}
+
+// DecMode returns DecMode with immutable options and no tags (safe for concurrency).
+func (opts DecOptions) DecMode() (DecMode, error) { //nolint:gocritic // ignore hugeParam
+ return opts.decMode()
+}
+
+// validForTags checks that the provided tag set is compatible with these options and returns a
+// non-nil error if and only if the provided tag set is incompatible.
+func (opts DecOptions) validForTags(tags TagSet) error { //nolint:gocritic // ignore hugeParam
+ if opts.TagsMd == TagsForbidden {
+ return errors.New("cbor: cannot create DecMode with TagSet when TagsMd is TagsForbidden")
+ }
+ if tags == nil {
+ return errors.New("cbor: cannot create DecMode with nil value as TagSet")
+ }
+ if opts.ByteStringToString == ByteStringToStringAllowedWithExpectedLaterEncoding ||
+ opts.ByteStringExpectedFormat != ByteStringExpectedFormatNone {
+ for _, tagNum := range []uint64{
+ tagNumExpectedLaterEncodingBase64URL,
+ tagNumExpectedLaterEncodingBase64,
+ tagNumExpectedLaterEncodingBase16,
+ } {
+ if rt := tags.getTypeFromTagNum([]uint64{tagNum}); rt != nil {
+ return fmt.Errorf("cbor: DecMode with non-default StringExpectedEncoding or ByteSliceExpectedEncoding treats tag %d as built-in and conflicts with the provided TagSet's registration of %v", tagNum, rt)
+ }
+ }
+
+ }
+ return nil
+}
+
+// DecModeWithTags returns DecMode with options and tags that are both immutable (safe for concurrency).
+func (opts DecOptions) DecModeWithTags(tags TagSet) (DecMode, error) { //nolint:gocritic // ignore hugeParam
+ if err := opts.validForTags(tags); err != nil {
+ return nil, err
+ }
+ dm, err := opts.decMode()
+ if err != nil {
+ return nil, err
+ }
+
+ // Copy tags
+ ts := tagSet(make(map[reflect.Type]*tagItem))
+ syncTags := tags.(*syncTagSet)
+ syncTags.RLock()
+ for contentType, tag := range syncTags.t {
+ if tag.opts.DecTag != DecTagIgnored {
+ ts[contentType] = tag
+ }
+ }
+ syncTags.RUnlock()
+
+ if len(ts) > 0 {
+ dm.tags = ts
+ }
+
+ return dm, nil
+}
+
+// DecModeWithSharedTags returns DecMode with immutable options and mutable shared tags (safe for concurrency).
+func (opts DecOptions) DecModeWithSharedTags(tags TagSet) (DecMode, error) { //nolint:gocritic // ignore hugeParam
+ if err := opts.validForTags(tags); err != nil {
+ return nil, err
+ }
+ dm, err := opts.decMode()
+ if err != nil {
+ return nil, err
+ }
+ dm.tags = tags
+ return dm, nil
+}
+
+const (
+ defaultMaxArrayElements = 131072
+ minMaxArrayElements = 16
+ maxMaxArrayElements = 2147483647
+
+ defaultMaxMapPairs = 131072
+ minMaxMapPairs = 16
+ maxMaxMapPairs = 2147483647
+
+ defaultMaxNestedLevels = 32
+ minMaxNestedLevels = 4
+ maxMaxNestedLevels = 65535
+)
+
+var defaultSimpleValues = func() *SimpleValueRegistry {
+ registry, err := NewSimpleValueRegistryFromDefaults()
+ if err != nil {
+ panic(err)
+ }
+ return registry
+}()
+
+//nolint:gocyclo // Each option comes with some manageable boilerplate
+func (opts DecOptions) decMode() (*decMode, error) { //nolint:gocritic // ignore hugeParam
+ if !opts.DupMapKey.valid() {
+ return nil, errors.New("cbor: invalid DupMapKey " + strconv.Itoa(int(opts.DupMapKey)))
+ }
+
+ if !opts.TimeTag.valid() {
+ return nil, errors.New("cbor: invalid TimeTag " + strconv.Itoa(int(opts.TimeTag)))
+ }
+
+ if !opts.IndefLength.valid() {
+ return nil, errors.New("cbor: invalid IndefLength " + strconv.Itoa(int(opts.IndefLength)))
+ }
+
+ if !opts.TagsMd.valid() {
+ return nil, errors.New("cbor: invalid TagsMd " + strconv.Itoa(int(opts.TagsMd)))
+ }
+
+ if !opts.IntDec.valid() {
+ return nil, errors.New("cbor: invalid IntDec " + strconv.Itoa(int(opts.IntDec)))
+ }
+
+ if !opts.MapKeyByteString.valid() {
+ return nil, errors.New("cbor: invalid MapKeyByteString " + strconv.Itoa(int(opts.MapKeyByteString)))
+ }
+
+ if opts.MaxNestedLevels == 0 {
+ opts.MaxNestedLevels = defaultMaxNestedLevels
+ } else if opts.MaxNestedLevels < minMaxNestedLevels || opts.MaxNestedLevels > maxMaxNestedLevels {
+ return nil, errors.New("cbor: invalid MaxNestedLevels " + strconv.Itoa(opts.MaxNestedLevels) +
+ " (range is [" + strconv.Itoa(minMaxNestedLevels) + ", " + strconv.Itoa(maxMaxNestedLevels) + "])")
+ }
+
+ if opts.MaxArrayElements == 0 {
+ opts.MaxArrayElements = defaultMaxArrayElements
+ } else if opts.MaxArrayElements < minMaxArrayElements || opts.MaxArrayElements > maxMaxArrayElements {
+ return nil, errors.New("cbor: invalid MaxArrayElements " + strconv.Itoa(opts.MaxArrayElements) +
+ " (range is [" + strconv.Itoa(minMaxArrayElements) + ", " + strconv.Itoa(maxMaxArrayElements) + "])")
+ }
+
+ if opts.MaxMapPairs == 0 {
+ opts.MaxMapPairs = defaultMaxMapPairs
+ } else if opts.MaxMapPairs < minMaxMapPairs || opts.MaxMapPairs > maxMaxMapPairs {
+ return nil, errors.New("cbor: invalid MaxMapPairs " + strconv.Itoa(opts.MaxMapPairs) +
+ " (range is [" + strconv.Itoa(minMaxMapPairs) + ", " + strconv.Itoa(maxMaxMapPairs) + "])")
+ }
+
+ if !opts.ExtraReturnErrors.valid() {
+ return nil, errors.New("cbor: invalid ExtraReturnErrors " + strconv.Itoa(int(opts.ExtraReturnErrors)))
+ }
+
+ if opts.DefaultMapType != nil && opts.DefaultMapType.Kind() != reflect.Map {
+ return nil, fmt.Errorf("cbor: invalid DefaultMapType %s", opts.DefaultMapType)
+ }
+
+ if !opts.UTF8.valid() {
+ return nil, errors.New("cbor: invalid UTF8 " + strconv.Itoa(int(opts.UTF8)))
+ }
+
+ if !opts.FieldNameMatching.valid() {
+ return nil, errors.New("cbor: invalid FieldNameMatching " + strconv.Itoa(int(opts.FieldNameMatching)))
+ }
+
+ if !opts.BigIntDec.valid() {
+ return nil, errors.New("cbor: invalid BigIntDec " + strconv.Itoa(int(opts.BigIntDec)))
+ }
+
+ if opts.DefaultByteStringType != nil &&
+ opts.DefaultByteStringType.Kind() != reflect.String &&
+ (opts.DefaultByteStringType.Kind() != reflect.Slice || opts.DefaultByteStringType.Elem().Kind() != reflect.Uint8) {
+ return nil, fmt.Errorf("cbor: invalid DefaultByteStringType: %s is not of kind string or []uint8", opts.DefaultByteStringType)
+ }
+
+ if !opts.ByteStringToString.valid() {
+ return nil, errors.New("cbor: invalid ByteStringToString " + strconv.Itoa(int(opts.ByteStringToString)))
+ }
+
+ if !opts.FieldNameByteString.valid() {
+ return nil, errors.New("cbor: invalid FieldNameByteString " + strconv.Itoa(int(opts.FieldNameByteString)))
+ }
+
+ if !opts.UnrecognizedTagToAny.valid() {
+ return nil, errors.New("cbor: invalid UnrecognizedTagToAnyMode " + strconv.Itoa(int(opts.UnrecognizedTagToAny)))
+ }
+ simpleValues := opts.SimpleValues
+ if simpleValues == nil {
+ simpleValues = defaultSimpleValues
+ }
+
+ if !opts.TimeTagToAny.valid() {
+ return nil, errors.New("cbor: invalid TimeTagToAny " + strconv.Itoa(int(opts.TimeTagToAny)))
+ }
+
+ if !opts.NaN.valid() {
+ return nil, errors.New("cbor: invalid NaNDec " + strconv.Itoa(int(opts.NaN)))
+ }
+
+ if !opts.Inf.valid() {
+ return nil, errors.New("cbor: invalid InfDec " + strconv.Itoa(int(opts.Inf)))
+ }
+
+ if !opts.ByteStringToTime.valid() {
+ return nil, errors.New("cbor: invalid ByteStringToTime " + strconv.Itoa(int(opts.ByteStringToTime)))
+ }
+
+ if !opts.ByteStringExpectedFormat.valid() {
+ return nil, errors.New("cbor: invalid ByteStringExpectedFormat " + strconv.Itoa(int(opts.ByteStringExpectedFormat)))
+ }
+
+ if !opts.BignumTag.valid() {
+ return nil, errors.New("cbor: invalid BignumTag " + strconv.Itoa(int(opts.BignumTag)))
+ }
+
+ if !opts.BinaryUnmarshaler.valid() {
+ return nil, errors.New("cbor: invalid BinaryUnmarshaler " + strconv.Itoa(int(opts.BinaryUnmarshaler)))
+ }
+
+ dm := decMode{
+ dupMapKey: opts.DupMapKey,
+ timeTag: opts.TimeTag,
+ maxNestedLevels: opts.MaxNestedLevels,
+ maxArrayElements: opts.MaxArrayElements,
+ maxMapPairs: opts.MaxMapPairs,
+ indefLength: opts.IndefLength,
+ tagsMd: opts.TagsMd,
+ intDec: opts.IntDec,
+ mapKeyByteString: opts.MapKeyByteString,
+ extraReturnErrors: opts.ExtraReturnErrors,
+ defaultMapType: opts.DefaultMapType,
+ utf8: opts.UTF8,
+ fieldNameMatching: opts.FieldNameMatching,
+ bigIntDec: opts.BigIntDec,
+ defaultByteStringType: opts.DefaultByteStringType,
+ byteStringToString: opts.ByteStringToString,
+ fieldNameByteString: opts.FieldNameByteString,
+ unrecognizedTagToAny: opts.UnrecognizedTagToAny,
+ timeTagToAny: opts.TimeTagToAny,
+ simpleValues: simpleValues,
+ nanDec: opts.NaN,
+ infDec: opts.Inf,
+ byteStringToTime: opts.ByteStringToTime,
+ byteStringExpectedFormat: opts.ByteStringExpectedFormat,
+ bignumTag: opts.BignumTag,
+ binaryUnmarshaler: opts.BinaryUnmarshaler,
+ }
+
+ return &dm, nil
+}
+
+// DecMode is the main interface for CBOR decoding.
+type DecMode interface {
+ // Unmarshal parses the CBOR-encoded data into the value pointed to by v
+ // using the decoding mode. If v is nil, not a pointer, or a nil pointer,
+ // Unmarshal returns an error.
+ //
+ // See the documentation for Unmarshal for details.
+ Unmarshal(data []byte, v interface{}) error
+
+ // UnmarshalFirst parses the first CBOR data item into the value pointed to by v
+ // using the decoding mode. Any remaining bytes are returned in rest.
+ //
+ // If v is nil, not a pointer, or a nil pointer, UnmarshalFirst returns an error.
+ //
+ // See the documentation for Unmarshal for details.
+ UnmarshalFirst(data []byte, v interface{}) (rest []byte, err error)
+
+ // Valid checks whether data is a well-formed encoded CBOR data item and
+ // that it complies with configurable restrictions such as MaxNestedLevels,
+ // MaxArrayElements, MaxMapPairs, etc.
+ //
+ // If there are any remaining bytes after the CBOR data item,
+ // an ExtraneousDataError is returned.
+ //
+ // WARNING: Valid doesn't check if encoded CBOR data item is valid (i.e. validity)
+ // and RFC 8949 distinctly defines what is "Valid" and what is "Well-formed".
+ //
+ // Deprecated: Valid is kept for compatibility and should not be used.
+ // Use Wellformed instead because it has a more appropriate name.
+ Valid(data []byte) error
+
+ // Wellformed checks whether data is a well-formed encoded CBOR data item and
+ // that it complies with configurable restrictions such as MaxNestedLevels,
+ // MaxArrayElements, MaxMapPairs, etc.
+ //
+ // If there are any remaining bytes after the CBOR data item,
+ // an ExtraneousDataError is returned.
+ Wellformed(data []byte) error
+
+ // NewDecoder returns a new decoder that reads from r using dm DecMode.
+ NewDecoder(r io.Reader) *Decoder
+
+ // DecOptions returns user specified options used to create this DecMode.
+ DecOptions() DecOptions
+}
+
+type decMode struct {
+ tags tagProvider
+ dupMapKey DupMapKeyMode
+ timeTag DecTagMode
+ maxNestedLevels int
+ maxArrayElements int
+ maxMapPairs int
+ indefLength IndefLengthMode
+ tagsMd TagsMode
+ intDec IntDecMode
+ mapKeyByteString MapKeyByteStringMode
+ extraReturnErrors ExtraDecErrorCond
+ defaultMapType reflect.Type
+ utf8 UTF8Mode
+ fieldNameMatching FieldNameMatchingMode
+ bigIntDec BigIntDecMode
+ defaultByteStringType reflect.Type
+ byteStringToString ByteStringToStringMode
+ fieldNameByteString FieldNameByteStringMode
+ unrecognizedTagToAny UnrecognizedTagToAnyMode
+ timeTagToAny TimeTagToAnyMode
+ simpleValues *SimpleValueRegistry
+ nanDec NaNMode
+ infDec InfMode
+ byteStringToTime ByteStringToTimeMode
+ byteStringExpectedFormat ByteStringExpectedFormatMode
+ bignumTag BignumTagMode
+ binaryUnmarshaler BinaryUnmarshalerMode
+}
+
+var defaultDecMode, _ = DecOptions{}.decMode()
+
+// DecOptions returns user specified options used to create this DecMode.
+func (dm *decMode) DecOptions() DecOptions {
+ simpleValues := dm.simpleValues
+ if simpleValues == defaultSimpleValues {
+ // Users can't explicitly set this to defaultSimpleValues. It must have been nil in
+ // the original DecOptions.
+ simpleValues = nil
+ }
+
+ return DecOptions{
+ DupMapKey: dm.dupMapKey,
+ TimeTag: dm.timeTag,
+ MaxNestedLevels: dm.maxNestedLevels,
+ MaxArrayElements: dm.maxArrayElements,
+ MaxMapPairs: dm.maxMapPairs,
+ IndefLength: dm.indefLength,
+ TagsMd: dm.tagsMd,
+ IntDec: dm.intDec,
+ MapKeyByteString: dm.mapKeyByteString,
+ ExtraReturnErrors: dm.extraReturnErrors,
+ DefaultMapType: dm.defaultMapType,
+ UTF8: dm.utf8,
+ FieldNameMatching: dm.fieldNameMatching,
+ BigIntDec: dm.bigIntDec,
+ DefaultByteStringType: dm.defaultByteStringType,
+ ByteStringToString: dm.byteStringToString,
+ FieldNameByteString: dm.fieldNameByteString,
+ UnrecognizedTagToAny: dm.unrecognizedTagToAny,
+ TimeTagToAny: dm.timeTagToAny,
+ SimpleValues: simpleValues,
+ NaN: dm.nanDec,
+ Inf: dm.infDec,
+ ByteStringToTime: dm.byteStringToTime,
+ ByteStringExpectedFormat: dm.byteStringExpectedFormat,
+ BignumTag: dm.bignumTag,
+ BinaryUnmarshaler: dm.binaryUnmarshaler,
+ }
+}
+
+// Unmarshal parses the CBOR-encoded data into the value pointed to by v
+// using dm decoding mode. If v is nil, not a pointer, or a nil pointer,
+// Unmarshal returns an error.
+//
+// See the documentation for Unmarshal for details.
+func (dm *decMode) Unmarshal(data []byte, v interface{}) error {
+ d := decoder{data: data, dm: dm}
+
+ // Check well-formedness.
+ off := d.off // Save offset before data validation
+ err := d.wellformed(false, false) // don't allow any extra data after valid data item.
+ d.off = off // Restore offset
+ if err != nil {
+ return err
+ }
+
+ return d.value(v)
+}
+
+// UnmarshalFirst parses the first CBOR data item into the value pointed to by v
+// using dm decoding mode. Any remaining bytes are returned in rest.
+//
+// If v is nil, not a pointer, or a nil pointer, UnmarshalFirst returns an error.
+//
+// See the documentation for Unmarshal for details.
+func (dm *decMode) UnmarshalFirst(data []byte, v interface{}) (rest []byte, err error) {
+ d := decoder{data: data, dm: dm}
+
+ // check well-formedness.
+ off := d.off // Save offset before data validation
+ err = d.wellformed(true, false) // allow extra data after well-formed data item
+ d.off = off // Restore offset
+
+ // If it is well-formed, parse the value. This is structured like this to allow
+ // better test coverage
+ if err == nil {
+ err = d.value(v)
+ }
+
+ // If either wellformed or value returned an error, do not return rest bytes
+ if err != nil {
+ return nil, err
+ }
+
+ // Return the rest of the data slice (which might be len 0)
+ return d.data[d.off:], nil
+}
+
+// Valid checks whether data is a well-formed encoded CBOR data item and
+// that it complies with configurable restrictions such as MaxNestedLevels,
+// MaxArrayElements, MaxMapPairs, etc.
+//
+// If there are any remaining bytes after the CBOR data item,
+// an ExtraneousDataError is returned.
+//
+// WARNING: Valid doesn't check if encoded CBOR data item is valid (i.e. validity)
+// and RFC 8949 distinctly defines what is "Valid" and what is "Well-formed".
+//
+// Deprecated: Valid is kept for compatibility and should not be used.
+// Use Wellformed instead because it has a more appropriate name.
+func (dm *decMode) Valid(data []byte) error {
+ return dm.Wellformed(data)
+}
+
+// Wellformed checks whether data is a well-formed encoded CBOR data item and
+// that it complies with configurable restrictions such as MaxNestedLevels,
+// MaxArrayElements, MaxMapPairs, etc.
+//
+// If there are any remaining bytes after the CBOR data item,
+// an ExtraneousDataError is returned.
+func (dm *decMode) Wellformed(data []byte) error {
+ d := decoder{data: data, dm: dm}
+ return d.wellformed(false, false)
+}
+
+// NewDecoder returns a new decoder that reads from r using dm DecMode.
+func (dm *decMode) NewDecoder(r io.Reader) *Decoder {
+ return &Decoder{r: r, d: decoder{dm: dm}}
+}
+
+type decoder struct {
+ data []byte
+ off int // next read offset in data
+ dm *decMode
+
+ // expectedLaterEncodingTags stores a stack of encountered "Expected Later Encoding" tags,
+ // if any.
+ //
+ // The "Expected Later Encoding" tags (21 to 23) are valid for any data item. When decoding
+ // byte strings, the effective encoding comes from the tag nearest to the byte string being
+ // decoded. For example, the effective encoding of the byte string 21(22(h'41')) would be
+ // controlled by tag 22,and in the data item 23(h'42', 22([21(h'43')])]) the effective
+ // encoding of the byte strings h'42' and h'43' would be controlled by tag 23 and 21,
+ // respectively.
+ expectedLaterEncodingTags []uint64
+}
+
+// value decodes CBOR data item into the value pointed to by v.
+// If CBOR data item fails to be decoded into v,
+// error is returned and offset is moved to the next CBOR data item.
+// Precondition: d.data contains at least one well-formed CBOR data item.
+func (d *decoder) value(v interface{}) error {
+ // v can't be nil, non-pointer, or nil pointer value.
+ if v == nil {
+ return &InvalidUnmarshalError{"cbor: Unmarshal(nil)"}
+ }
+ rv := reflect.ValueOf(v)
+ if rv.Kind() != reflect.Ptr {
+ return &InvalidUnmarshalError{"cbor: Unmarshal(non-pointer " + rv.Type().String() + ")"}
+ } else if rv.IsNil() {
+ return &InvalidUnmarshalError{"cbor: Unmarshal(nil " + rv.Type().String() + ")"}
+ }
+ rv = rv.Elem()
+ return d.parseToValue(rv, getTypeInfo(rv.Type()))
+}
+
+// parseToValue decodes CBOR data to value. It assumes data is well-formed,
+// and does not perform bounds checking.
+func (d *decoder) parseToValue(v reflect.Value, tInfo *typeInfo) error { //nolint:gocyclo
+
+ // Decode CBOR nil or CBOR undefined to pointer value by setting pointer value to nil.
+ if d.nextCBORNil() && v.Kind() == reflect.Ptr {
+ d.skip()
+ v.Set(reflect.Zero(v.Type()))
+ return nil
+ }
+
+ if tInfo.spclType == specialTypeIface {
+ if !v.IsNil() {
+ // Use value type
+ v = v.Elem()
+ tInfo = getTypeInfo(v.Type())
+ } else { //nolint:gocritic
+ // Create and use registered type if CBOR data is registered tag
+ if d.dm.tags != nil && d.nextCBORType() == cborTypeTag {
+
+ off := d.off
+ var tagNums []uint64
+ for d.nextCBORType() == cborTypeTag {
+ _, _, tagNum := d.getHead()
+ tagNums = append(tagNums, tagNum)
+ }
+ d.off = off
+
+ registeredType := d.dm.tags.getTypeFromTagNum(tagNums)
+ if registeredType != nil {
+ if registeredType.Implements(tInfo.nonPtrType) ||
+ reflect.PtrTo(registeredType).Implements(tInfo.nonPtrType) {
+ v.Set(reflect.New(registeredType))
+ v = v.Elem()
+ tInfo = getTypeInfo(registeredType)
+ }
+ }
+ }
+ }
+ }
+
+ // Create new value for the pointer v to point to.
+ // At this point, CBOR value is not nil/undefined if v is a pointer.
+ for v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ if !v.CanSet() {
+ d.skip()
+ return errors.New("cbor: cannot set new value for " + v.Type().String())
+ }
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ v = v.Elem()
+ }
+
+ // Strip self-described CBOR tag number.
+ for d.nextCBORType() == cborTypeTag {
+ off := d.off
+ _, _, tagNum := d.getHead()
+ if tagNum != tagNumSelfDescribedCBOR {
+ d.off = off
+ break
+ }
+ }
+
+ // Check validity of supported built-in tags.
+ off := d.off
+ for d.nextCBORType() == cborTypeTag {
+ _, _, tagNum := d.getHead()
+ if err := validBuiltinTag(tagNum, d.data[d.off]); err != nil {
+ d.skip()
+ return err
+ }
+ }
+ d.off = off
+
+ if tInfo.spclType != specialTypeNone {
+ switch tInfo.spclType {
+ case specialTypeEmptyIface:
+ iv, err := d.parse(false) // Skipped self-described CBOR tag number already.
+ if iv != nil {
+ v.Set(reflect.ValueOf(iv))
+ }
+ return err
+
+ case specialTypeTag:
+ return d.parseToTag(v)
+
+ case specialTypeTime:
+ if d.nextCBORNil() {
+ // Decoding CBOR null and undefined to time.Time is no-op.
+ d.skip()
+ return nil
+ }
+ tm, ok, err := d.parseToTime()
+ if err != nil {
+ return err
+ }
+ if ok {
+ v.Set(reflect.ValueOf(tm))
+ }
+ return nil
+
+ case specialTypeUnmarshalerIface:
+ return d.parseToUnmarshaler(v)
+ }
+ }
+
+ // Check registered tag number
+ if tagItem := d.getRegisteredTagItem(tInfo.nonPtrType); tagItem != nil {
+ t := d.nextCBORType()
+ if t != cborTypeTag {
+ if tagItem.opts.DecTag == DecTagRequired {
+ d.skip() // Required tag number is absent, skip entire tag
+ return &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: tInfo.typ.String(),
+ errorMsg: "expect CBOR tag value"}
+ }
+ } else if err := d.validRegisteredTagNums(tagItem); err != nil {
+ d.skip() // Skip tag content
+ return err
+ }
+ }
+
+ t := d.nextCBORType()
+
+ switch t {
+ case cborTypePositiveInt:
+ _, _, val := d.getHead()
+ return fillPositiveInt(t, val, v)
+
+ case cborTypeNegativeInt:
+ _, _, val := d.getHead()
+ if val > math.MaxInt64 {
+ // CBOR negative integer overflows int64, use big.Int to store value.
+ bi := new(big.Int)
+ bi.SetUint64(val)
+ bi.Add(bi, big.NewInt(1))
+ bi.Neg(bi)
+
+ if tInfo.nonPtrType == typeBigInt {
+ v.Set(reflect.ValueOf(*bi))
+ return nil
+ }
+ return &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: tInfo.nonPtrType.String(),
+ errorMsg: bi.String() + " overflows Go's int64",
+ }
+ }
+ nValue := int64(-1) ^ int64(val)
+ return fillNegativeInt(t, nValue, v)
+
+ case cborTypeByteString:
+ b, copied := d.parseByteString()
+ b, converted, err := d.applyByteStringTextConversion(b, v.Type())
+ if err != nil {
+ return err
+ }
+ copied = copied || converted
+ return fillByteString(t, b, !copied, v, d.dm.byteStringToString, d.dm.binaryUnmarshaler)
+
+ case cborTypeTextString:
+ b, err := d.parseTextString()
+ if err != nil {
+ return err
+ }
+ return fillTextString(t, b, v)
+
+ case cborTypePrimitives:
+ _, ai, val := d.getHead()
+ switch ai {
+ case additionalInformationAsFloat16:
+ f := float64(float16.Frombits(uint16(val)).Float32())
+ return fillFloat(t, f, v)
+
+ case additionalInformationAsFloat32:
+ f := float64(math.Float32frombits(uint32(val)))
+ return fillFloat(t, f, v)
+
+ case additionalInformationAsFloat64:
+ f := math.Float64frombits(val)
+ return fillFloat(t, f, v)
+
+ default: // ai <= 24
+ if d.dm.simpleValues.rejected[SimpleValue(val)] {
+ return &UnacceptableDataItemError{
+ CBORType: t.String(),
+ Message: "simple value " + strconv.FormatInt(int64(val), 10) + " is not recognized",
+ }
+ }
+
+ switch ai {
+ case additionalInformationAsFalse,
+ additionalInformationAsTrue:
+ return fillBool(t, ai == additionalInformationAsTrue, v)
+
+ case additionalInformationAsNull,
+ additionalInformationAsUndefined:
+ return fillNil(t, v)
+
+ default:
+ return fillPositiveInt(t, val, v)
+ }
+ }
+
+ case cborTypeTag:
+ _, _, tagNum := d.getHead()
+ switch tagNum {
+ case tagNumUnsignedBignum:
+ // Bignum (tag 2) can be decoded to uint, int, float, slice, array, or big.Int.
+ b, copied := d.parseByteString()
+ bi := new(big.Int).SetBytes(b)
+
+ if tInfo.nonPtrType == typeBigInt {
+ v.Set(reflect.ValueOf(*bi))
+ return nil
+ }
+ if tInfo.nonPtrKind == reflect.Slice || tInfo.nonPtrKind == reflect.Array {
+ return fillByteString(t, b, !copied, v, ByteStringToStringForbidden, d.dm.binaryUnmarshaler)
+ }
+ if bi.IsUint64() {
+ return fillPositiveInt(t, bi.Uint64(), v)
+ }
+ return &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: tInfo.nonPtrType.String(),
+ errorMsg: bi.String() + " overflows " + v.Type().String(),
+ }
+
+ case tagNumNegativeBignum:
+ // Bignum (tag 3) can be decoded to int, float, slice, array, or big.Int.
+ b, copied := d.parseByteString()
+ bi := new(big.Int).SetBytes(b)
+ bi.Add(bi, big.NewInt(1))
+ bi.Neg(bi)
+
+ if tInfo.nonPtrType == typeBigInt {
+ v.Set(reflect.ValueOf(*bi))
+ return nil
+ }
+ if tInfo.nonPtrKind == reflect.Slice || tInfo.nonPtrKind == reflect.Array {
+ return fillByteString(t, b, !copied, v, ByteStringToStringForbidden, d.dm.binaryUnmarshaler)
+ }
+ if bi.IsInt64() {
+ return fillNegativeInt(t, bi.Int64(), v)
+ }
+ return &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: tInfo.nonPtrType.String(),
+ errorMsg: bi.String() + " overflows " + v.Type().String(),
+ }
+
+ case tagNumExpectedLaterEncodingBase64URL, tagNumExpectedLaterEncodingBase64, tagNumExpectedLaterEncodingBase16:
+ // If conversion for interoperability with text encodings is not configured,
+ // treat tags 21-23 as unregistered tags.
+ if d.dm.byteStringToString == ByteStringToStringAllowedWithExpectedLaterEncoding || d.dm.byteStringExpectedFormat != ByteStringExpectedFormatNone {
+ d.expectedLaterEncodingTags = append(d.expectedLaterEncodingTags, tagNum)
+ defer func() {
+ d.expectedLaterEncodingTags = d.expectedLaterEncodingTags[:len(d.expectedLaterEncodingTags)-1]
+ }()
+ }
+ }
+
+ return d.parseToValue(v, tInfo)
+
+ case cborTypeArray:
+ if tInfo.nonPtrKind == reflect.Slice {
+ return d.parseArrayToSlice(v, tInfo)
+ } else if tInfo.nonPtrKind == reflect.Array {
+ return d.parseArrayToArray(v, tInfo)
+ } else if tInfo.nonPtrKind == reflect.Struct {
+ return d.parseArrayToStruct(v, tInfo)
+ }
+ d.skip()
+ return &UnmarshalTypeError{CBORType: t.String(), GoType: tInfo.nonPtrType.String()}
+
+ case cborTypeMap:
+ if tInfo.nonPtrKind == reflect.Struct {
+ return d.parseMapToStruct(v, tInfo)
+ } else if tInfo.nonPtrKind == reflect.Map {
+ return d.parseMapToMap(v, tInfo)
+ }
+ d.skip()
+ return &UnmarshalTypeError{CBORType: t.String(), GoType: tInfo.nonPtrType.String()}
+ }
+
+ return nil
+}
+
+func (d *decoder) parseToTag(v reflect.Value) error {
+ if d.nextCBORNil() {
+ // Decoding CBOR null and undefined to cbor.Tag is no-op.
+ d.skip()
+ return nil
+ }
+
+ t := d.nextCBORType()
+ if t != cborTypeTag {
+ d.skip()
+ return &UnmarshalTypeError{CBORType: t.String(), GoType: typeTag.String()}
+ }
+
+ // Unmarshal tag number
+ _, _, num := d.getHead()
+
+ // Unmarshal tag content
+ content, err := d.parse(false)
+ if err != nil {
+ return err
+ }
+
+ v.Set(reflect.ValueOf(Tag{num, content}))
+ return nil
+}
+
+// parseToTime decodes the current data item as a time.Time. The bool return value is false if and
+// only if the destination value should remain unmodified.
+func (d *decoder) parseToTime() (time.Time, bool, error) {
+ // Verify that tag number or absence of tag number is acceptable to specified timeTag.
+ if t := d.nextCBORType(); t == cborTypeTag {
+ if d.dm.timeTag == DecTagIgnored {
+ // Skip all enclosing tags
+ for t == cborTypeTag {
+ d.getHead()
+ t = d.nextCBORType()
+ }
+ if d.nextCBORNil() {
+ d.skip()
+ return time.Time{}, false, nil
+ }
+ } else {
+ // Read tag number
+ _, _, tagNum := d.getHead()
+ if tagNum != 0 && tagNum != 1 {
+ d.skip() // skip tag content
+ return time.Time{}, false, errors.New("cbor: wrong tag number for time.Time, got " + strconv.Itoa(int(tagNum)) + ", expect 0 or 1")
+ }
+ }
+ } else {
+ if d.dm.timeTag == DecTagRequired {
+ d.skip()
+ return time.Time{}, false, &UnmarshalTypeError{CBORType: t.String(), GoType: typeTime.String(), errorMsg: "expect CBOR tag value"}
+ }
+ }
+
+ switch t := d.nextCBORType(); t {
+ case cborTypeByteString:
+ if d.dm.byteStringToTime == ByteStringToTimeAllowed {
+ b, _ := d.parseByteString()
+ t, err := time.Parse(time.RFC3339, string(b))
+ if err != nil {
+ return time.Time{}, false, fmt.Errorf("cbor: cannot set %q for time.Time: %w", string(b), err)
+ }
+ return t, true, nil
+ }
+ return time.Time{}, false, &UnmarshalTypeError{CBORType: t.String(), GoType: typeTime.String()}
+
+ case cborTypeTextString:
+ s, err := d.parseTextString()
+ if err != nil {
+ return time.Time{}, false, err
+ }
+ t, err := time.Parse(time.RFC3339, string(s))
+ if err != nil {
+ return time.Time{}, false, errors.New("cbor: cannot set " + string(s) + " for time.Time: " + err.Error())
+ }
+ return t, true, nil
+
+ case cborTypePositiveInt:
+ _, _, val := d.getHead()
+ if val > math.MaxInt64 {
+ return time.Time{}, false, &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: typeTime.String(),
+ errorMsg: fmt.Sprintf("%d overflows Go's int64", val),
+ }
+ }
+ return time.Unix(int64(val), 0), true, nil
+
+ case cborTypeNegativeInt:
+ _, _, val := d.getHead()
+ if val > math.MaxInt64 {
+ if val == math.MaxUint64 {
+ // Maximum absolute value representable by negative integer is 2^64,
+ // not 2^64-1, so it overflows uint64.
+ return time.Time{}, false, &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: typeTime.String(),
+ errorMsg: "-18446744073709551616 overflows Go's int64",
+ }
+ }
+ return time.Time{}, false, &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: typeTime.String(),
+ errorMsg: fmt.Sprintf("-%d overflows Go's int64", val+1),
+ }
+ }
+ return time.Unix(int64(-1)^int64(val), 0), true, nil
+
+ case cborTypePrimitives:
+ _, ai, val := d.getHead()
+ var f float64
+ switch ai {
+ case additionalInformationAsFloat16:
+ f = float64(float16.Frombits(uint16(val)).Float32())
+
+ case additionalInformationAsFloat32:
+ f = float64(math.Float32frombits(uint32(val)))
+
+ case additionalInformationAsFloat64:
+ f = math.Float64frombits(val)
+
+ default:
+ return time.Time{}, false, &UnmarshalTypeError{CBORType: t.String(), GoType: typeTime.String()}
+ }
+
+ if math.IsNaN(f) || math.IsInf(f, 0) {
+ // https://www.rfc-editor.org/rfc/rfc8949.html#section-3.4.2-6
+ return time.Time{}, true, nil
+ }
+ seconds, fractional := math.Modf(f)
+ return time.Unix(int64(seconds), int64(fractional*1e9)), true, nil
+
+ default:
+ return time.Time{}, false, &UnmarshalTypeError{CBORType: t.String(), GoType: typeTime.String()}
+ }
+}
+
+// parseToUnmarshaler parses CBOR data to value implementing Unmarshaler interface.
+// It assumes data is well-formed, and does not perform bounds checking.
+func (d *decoder) parseToUnmarshaler(v reflect.Value) error {
+ if d.nextCBORNil() && v.Kind() == reflect.Ptr && v.IsNil() {
+ d.skip()
+ return nil
+ }
+
+ if v.Kind() != reflect.Ptr && v.CanAddr() {
+ v = v.Addr()
+ }
+ if u, ok := v.Interface().(Unmarshaler); ok {
+ start := d.off
+ d.skip()
+ return u.UnmarshalCBOR(d.data[start:d.off])
+ }
+ d.skip()
+ return errors.New("cbor: failed to assert " + v.Type().String() + " as cbor.Unmarshaler")
+}
+
+// parse parses CBOR data and returns value in default Go type.
+// It assumes data is well-formed, and does not perform bounds checking.
+func (d *decoder) parse(skipSelfDescribedTag bool) (interface{}, error) { //nolint:gocyclo
+ // Strip self-described CBOR tag number.
+ if skipSelfDescribedTag {
+ for d.nextCBORType() == cborTypeTag {
+ off := d.off
+ _, _, tagNum := d.getHead()
+ if tagNum != tagNumSelfDescribedCBOR {
+ d.off = off
+ break
+ }
+ }
+ }
+
+ // Check validity of supported built-in tags.
+ off := d.off
+ for d.nextCBORType() == cborTypeTag {
+ _, _, tagNum := d.getHead()
+ if err := validBuiltinTag(tagNum, d.data[d.off]); err != nil {
+ d.skip()
+ return nil, err
+ }
+ }
+ d.off = off
+
+ t := d.nextCBORType()
+ switch t {
+ case cborTypePositiveInt:
+ _, _, val := d.getHead()
+
+ switch d.dm.intDec {
+ case IntDecConvertNone:
+ return val, nil
+
+ case IntDecConvertSigned, IntDecConvertSignedOrFail:
+ if val > math.MaxInt64 {
+ return nil, &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: reflect.TypeOf(int64(0)).String(),
+ errorMsg: strconv.FormatUint(val, 10) + " overflows Go's int64",
+ }
+ }
+
+ return int64(val), nil
+
+ case IntDecConvertSignedOrBigInt:
+ if val > math.MaxInt64 {
+ bi := new(big.Int).SetUint64(val)
+ if d.dm.bigIntDec == BigIntDecodePointer {
+ return bi, nil
+ }
+ return *bi, nil
+ }
+
+ return int64(val), nil
+
+ default:
+ // not reachable
+ }
+
+ case cborTypeNegativeInt:
+ _, _, val := d.getHead()
+
+ if val > math.MaxInt64 {
+ // CBOR negative integer value overflows Go int64, use big.Int instead.
+ bi := new(big.Int).SetUint64(val)
+ bi.Add(bi, big.NewInt(1))
+ bi.Neg(bi)
+
+ if d.dm.intDec == IntDecConvertSignedOrFail {
+ return nil, &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: reflect.TypeOf(int64(0)).String(),
+ errorMsg: bi.String() + " overflows Go's int64",
+ }
+ }
+
+ if d.dm.bigIntDec == BigIntDecodePointer {
+ return bi, nil
+ }
+ return *bi, nil
+ }
+
+ nValue := int64(-1) ^ int64(val)
+ return nValue, nil
+
+ case cborTypeByteString:
+ b, copied := d.parseByteString()
+ var effectiveByteStringType = d.dm.defaultByteStringType
+ if effectiveByteStringType == nil {
+ effectiveByteStringType = typeByteSlice
+ }
+ b, converted, err := d.applyByteStringTextConversion(b, effectiveByteStringType)
+ if err != nil {
+ return nil, err
+ }
+ copied = copied || converted
+
+ switch effectiveByteStringType {
+ case typeByteSlice:
+ if copied {
+ return b, nil
+ }
+ clone := make([]byte, len(b))
+ copy(clone, b)
+ return clone, nil
+
+ case typeString:
+ return string(b), nil
+
+ default:
+ if copied || d.dm.defaultByteStringType.Kind() == reflect.String {
+ // Avoid an unnecessary copy since the conversion to string must
+ // copy the underlying bytes.
+ return reflect.ValueOf(b).Convert(d.dm.defaultByteStringType).Interface(), nil
+ }
+ clone := make([]byte, len(b))
+ copy(clone, b)
+ return reflect.ValueOf(clone).Convert(d.dm.defaultByteStringType).Interface(), nil
+ }
+
+ case cborTypeTextString:
+ b, err := d.parseTextString()
+ if err != nil {
+ return nil, err
+ }
+ return string(b), nil
+
+ case cborTypeTag:
+ tagOff := d.off
+ _, _, tagNum := d.getHead()
+ contentOff := d.off
+
+ switch tagNum {
+ case tagNumRFC3339Time, tagNumEpochTime:
+ d.off = tagOff
+ tm, _, err := d.parseToTime()
+ if err != nil {
+ return nil, err
+ }
+
+ switch d.dm.timeTagToAny {
+ case TimeTagToTime:
+ return tm, nil
+
+ case TimeTagToRFC3339:
+ if tagNum == 1 {
+ tm = tm.UTC()
+ }
+ // Call time.MarshalText() to format decoded time to RFC3339 format,
+ // and return error on time value that cannot be represented in
+ // RFC3339 format. E.g. year cannot exceed 9999, etc.
+ text, err := tm.Truncate(time.Second).MarshalText()
+ if err != nil {
+ return nil, fmt.Errorf("cbor: decoded time cannot be represented in RFC3339 format: %v", err)
+ }
+ return string(text), nil
+
+ case TimeTagToRFC3339Nano:
+ if tagNum == 1 {
+ tm = tm.UTC()
+ }
+ // Call time.MarshalText() to format decoded time to RFC3339 format,
+ // and return error on time value that cannot be represented in
+ // RFC3339 format with sub-second precision.
+ text, err := tm.MarshalText()
+ if err != nil {
+ return nil, fmt.Errorf("cbor: decoded time cannot be represented in RFC3339 format with sub-second precision: %v", err)
+ }
+ return string(text), nil
+
+ default:
+ // not reachable
+ }
+
+ case tagNumUnsignedBignum:
+ b, _ := d.parseByteString()
+ bi := new(big.Int).SetBytes(b)
+
+ if d.dm.bigIntDec == BigIntDecodePointer {
+ return bi, nil
+ }
+ return *bi, nil
+
+ case tagNumNegativeBignum:
+ b, _ := d.parseByteString()
+ bi := new(big.Int).SetBytes(b)
+ bi.Add(bi, big.NewInt(1))
+ bi.Neg(bi)
+
+ if d.dm.bigIntDec == BigIntDecodePointer {
+ return bi, nil
+ }
+ return *bi, nil
+
+ case tagNumExpectedLaterEncodingBase64URL, tagNumExpectedLaterEncodingBase64, tagNumExpectedLaterEncodingBase16:
+ // If conversion for interoperability with text encodings is not configured,
+ // treat tags 21-23 as unregistered tags.
+ if d.dm.byteStringToString == ByteStringToStringAllowedWithExpectedLaterEncoding ||
+ d.dm.byteStringExpectedFormat != ByteStringExpectedFormatNone {
+ d.expectedLaterEncodingTags = append(d.expectedLaterEncodingTags, tagNum)
+ defer func() {
+ d.expectedLaterEncodingTags = d.expectedLaterEncodingTags[:len(d.expectedLaterEncodingTags)-1]
+ }()
+ return d.parse(false)
+ }
+ }
+
+ if d.dm.tags != nil {
+ // Parse to specified type if tag number is registered.
+ tagNums := []uint64{tagNum}
+ for d.nextCBORType() == cborTypeTag {
+ _, _, num := d.getHead()
+ tagNums = append(tagNums, num)
+ }
+ registeredType := d.dm.tags.getTypeFromTagNum(tagNums)
+ if registeredType != nil {
+ d.off = tagOff
+ rv := reflect.New(registeredType)
+ if err := d.parseToValue(rv.Elem(), getTypeInfo(registeredType)); err != nil {
+ return nil, err
+ }
+ return rv.Elem().Interface(), nil
+ }
+ }
+
+ // Parse tag content
+ d.off = contentOff
+ content, err := d.parse(false)
+ if err != nil {
+ return nil, err
+ }
+ if d.dm.unrecognizedTagToAny == UnrecognizedTagContentToAny {
+ return content, nil
+ }
+ return Tag{tagNum, content}, nil
+
+ case cborTypePrimitives:
+ _, ai, val := d.getHead()
+ if ai <= 24 && d.dm.simpleValues.rejected[SimpleValue(val)] {
+ return nil, &UnacceptableDataItemError{
+ CBORType: t.String(),
+ Message: "simple value " + strconv.FormatInt(int64(val), 10) + " is not recognized",
+ }
+ }
+ if ai < 20 || ai == 24 {
+ return SimpleValue(val), nil
+ }
+
+ switch ai {
+ case additionalInformationAsFalse,
+ additionalInformationAsTrue:
+ return (ai == additionalInformationAsTrue), nil
+
+ case additionalInformationAsNull,
+ additionalInformationAsUndefined:
+ return nil, nil
+
+ case additionalInformationAsFloat16:
+ f := float64(float16.Frombits(uint16(val)).Float32())
+ return f, nil
+
+ case additionalInformationAsFloat32:
+ f := float64(math.Float32frombits(uint32(val)))
+ return f, nil
+
+ case additionalInformationAsFloat64:
+ f := math.Float64frombits(val)
+ return f, nil
+ }
+
+ case cborTypeArray:
+ return d.parseArray()
+
+ case cborTypeMap:
+ if d.dm.defaultMapType != nil {
+ m := reflect.New(d.dm.defaultMapType)
+ err := d.parseToValue(m, getTypeInfo(m.Elem().Type()))
+ if err != nil {
+ return nil, err
+ }
+ return m.Elem().Interface(), nil
+ }
+ return d.parseMap()
+ }
+
+ return nil, nil
+}
+
+// parseByteString parses a CBOR encoded byte string. The returned byte slice
+// may be backed directly by the input. The second return value will be true if
+// and only if the slice is backed by a copy of the input. Callers are
+// responsible for making a copy if necessary.
+func (d *decoder) parseByteString() ([]byte, bool) {
+ _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag()
+ if !indefiniteLength {
+ b := d.data[d.off : d.off+int(val)]
+ d.off += int(val)
+ return b, false
+ }
+ // Process indefinite length string chunks.
+ b := []byte{}
+ for !d.foundBreak() {
+ _, _, val = d.getHead()
+ b = append(b, d.data[d.off:d.off+int(val)]...)
+ d.off += int(val)
+ }
+ return b, true
+}
+
+// applyByteStringTextConversion converts bytes read from a byte string to or from a configured text
+// encoding. If no transformation was performed (because it was not required), the original byte
+// slice is returned and the bool return value is false. Otherwise, a new slice containing the
+// converted bytes is returned along with the bool value true.
+func (d *decoder) applyByteStringTextConversion(
+ src []byte,
+ dstType reflect.Type,
+) (
+ dst []byte,
+ transformed bool,
+ err error,
+) {
+ switch dstType.Kind() {
+ case reflect.String:
+ if d.dm.byteStringToString != ByteStringToStringAllowedWithExpectedLaterEncoding || len(d.expectedLaterEncodingTags) == 0 {
+ return src, false, nil
+ }
+
+ switch d.expectedLaterEncodingTags[len(d.expectedLaterEncodingTags)-1] {
+ case tagNumExpectedLaterEncodingBase64URL:
+ encoded := make([]byte, base64.RawURLEncoding.EncodedLen(len(src)))
+ base64.RawURLEncoding.Encode(encoded, src)
+ return encoded, true, nil
+
+ case tagNumExpectedLaterEncodingBase64:
+ encoded := make([]byte, base64.StdEncoding.EncodedLen(len(src)))
+ base64.StdEncoding.Encode(encoded, src)
+ return encoded, true, nil
+
+ case tagNumExpectedLaterEncodingBase16:
+ encoded := make([]byte, hex.EncodedLen(len(src)))
+ hex.Encode(encoded, src)
+ return encoded, true, nil
+
+ default:
+ // If this happens, there is a bug: the decoder has pushed an invalid
+ // "expected later encoding" tag to the stack.
+ panic(fmt.Sprintf("unrecognized expected later encoding tag: %d", d.expectedLaterEncodingTags))
+ }
+
+ case reflect.Slice:
+ if dstType.Elem().Kind() != reflect.Uint8 || len(d.expectedLaterEncodingTags) > 0 {
+ // Either the destination is not a slice of bytes, or the encoder that
+ // produced the input indicated an expected text encoding tag and therefore
+ // the content of the byte string has NOT been text encoded.
+ return src, false, nil
+ }
+
+ switch d.dm.byteStringExpectedFormat {
+ case ByteStringExpectedBase64URL:
+ decoded := make([]byte, base64.RawURLEncoding.DecodedLen(len(src)))
+ n, err := base64.RawURLEncoding.Decode(decoded, src)
+ if err != nil {
+ return nil, false, newByteStringExpectedFormatError(ByteStringExpectedBase64URL, err)
+ }
+ return decoded[:n], true, nil
+
+ case ByteStringExpectedBase64:
+ decoded := make([]byte, base64.StdEncoding.DecodedLen(len(src)))
+ n, err := base64.StdEncoding.Decode(decoded, src)
+ if err != nil {
+ return nil, false, newByteStringExpectedFormatError(ByteStringExpectedBase64, err)
+ }
+ return decoded[:n], true, nil
+
+ case ByteStringExpectedBase16:
+ decoded := make([]byte, hex.DecodedLen(len(src)))
+ n, err := hex.Decode(decoded, src)
+ if err != nil {
+ return nil, false, newByteStringExpectedFormatError(ByteStringExpectedBase16, err)
+ }
+ return decoded[:n], true, nil
+ }
+ }
+
+ return src, false, nil
+}
+
+// parseTextString parses CBOR encoded text string. It returns a byte slice
+// to prevent creating an extra copy of string. Caller should wrap returned
+// byte slice as string when needed.
+func (d *decoder) parseTextString() ([]byte, error) {
+ _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag()
+ if !indefiniteLength {
+ b := d.data[d.off : d.off+int(val)]
+ d.off += int(val)
+ if d.dm.utf8 == UTF8RejectInvalid && !utf8.Valid(b) {
+ return nil, &SemanticError{"cbor: invalid UTF-8 string"}
+ }
+ return b, nil
+ }
+ // Process indefinite length string chunks.
+ b := []byte{}
+ for !d.foundBreak() {
+ _, _, val = d.getHead()
+ x := d.data[d.off : d.off+int(val)]
+ d.off += int(val)
+ if d.dm.utf8 == UTF8RejectInvalid && !utf8.Valid(x) {
+ for !d.foundBreak() {
+ d.skip() // Skip remaining chunk on error
+ }
+ return nil, &SemanticError{"cbor: invalid UTF-8 string"}
+ }
+ b = append(b, x...)
+ }
+ return b, nil
+}
+
+func (d *decoder) parseArray() ([]interface{}, error) {
+ _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag()
+ hasSize := !indefiniteLength
+ count := int(val)
+ if !hasSize {
+ count = d.numOfItemsUntilBreak() // peek ahead to get array size to preallocate slice for better performance
+ }
+ v := make([]interface{}, count)
+ var e interface{}
+ var err, lastErr error
+ for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ {
+ if e, lastErr = d.parse(true); lastErr != nil {
+ if err == nil {
+ err = lastErr
+ }
+ continue
+ }
+ v[i] = e
+ }
+ return v, err
+}
+
+func (d *decoder) parseArrayToSlice(v reflect.Value, tInfo *typeInfo) error {
+ _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag()
+ hasSize := !indefiniteLength
+ count := int(val)
+ if !hasSize {
+ count = d.numOfItemsUntilBreak() // peek ahead to get array size to preallocate slice for better performance
+ }
+ if v.IsNil() || v.Cap() < count || count == 0 {
+ v.Set(reflect.MakeSlice(tInfo.nonPtrType, count, count))
+ }
+ v.SetLen(count)
+ var err error
+ for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ {
+ if lastErr := d.parseToValue(v.Index(i), tInfo.elemTypeInfo); lastErr != nil {
+ if err == nil {
+ err = lastErr
+ }
+ }
+ }
+ return err
+}
+
+func (d *decoder) parseArrayToArray(v reflect.Value, tInfo *typeInfo) error {
+ _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag()
+ hasSize := !indefiniteLength
+ count := int(val)
+ gi := 0
+ vLen := v.Len()
+ var err error
+ for ci := 0; (hasSize && ci < count) || (!hasSize && !d.foundBreak()); ci++ {
+ if gi < vLen {
+ // Read CBOR array element and set array element
+ if lastErr := d.parseToValue(v.Index(gi), tInfo.elemTypeInfo); lastErr != nil {
+ if err == nil {
+ err = lastErr
+ }
+ }
+ gi++
+ } else {
+ d.skip() // Skip remaining CBOR array element
+ }
+ }
+ // Set remaining Go array elements to zero values.
+ if gi < vLen {
+ zeroV := reflect.Zero(tInfo.elemTypeInfo.typ)
+ for ; gi < vLen; gi++ {
+ v.Index(gi).Set(zeroV)
+ }
+ }
+ return err
+}
+
+func (d *decoder) parseMap() (interface{}, error) {
+ _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag()
+ hasSize := !indefiniteLength
+ count := int(val)
+ m := make(map[interface{}]interface{})
+ var k, e interface{}
+ var err, lastErr error
+ keyCount := 0
+ for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ {
+ // Parse CBOR map key.
+ if k, lastErr = d.parse(true); lastErr != nil {
+ if err == nil {
+ err = lastErr
+ }
+ d.skip()
+ continue
+ }
+
+ // Detect if CBOR map key can be used as Go map key.
+ rv := reflect.ValueOf(k)
+ if !isHashableValue(rv) {
+ var converted bool
+ if d.dm.mapKeyByteString == MapKeyByteStringAllowed {
+ k, converted = convertByteSliceToByteString(k)
+ }
+ if !converted {
+ if err == nil {
+ err = &InvalidMapKeyTypeError{rv.Type().String()}
+ }
+ d.skip()
+ continue
+ }
+ }
+
+ // Parse CBOR map value.
+ if e, lastErr = d.parse(true); lastErr != nil {
+ if err == nil {
+ err = lastErr
+ }
+ continue
+ }
+
+ // Add key-value pair to Go map.
+ m[k] = e
+
+ // Detect duplicate map key.
+ if d.dm.dupMapKey == DupMapKeyEnforcedAPF {
+ newKeyCount := len(m)
+ if newKeyCount == keyCount {
+ m[k] = nil
+ err = &DupMapKeyError{k, i}
+ i++
+ // skip the rest of the map
+ for ; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ {
+ d.skip() // Skip map key
+ d.skip() // Skip map value
+ }
+ return m, err
+ }
+ keyCount = newKeyCount
+ }
+ }
+ return m, err
+}
+
+func (d *decoder) parseMapToMap(v reflect.Value, tInfo *typeInfo) error { //nolint:gocyclo
+ _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag()
+ hasSize := !indefiniteLength
+ count := int(val)
+ if v.IsNil() {
+ mapsize := count
+ if !hasSize {
+ mapsize = 0
+ }
+ v.Set(reflect.MakeMapWithSize(tInfo.nonPtrType, mapsize))
+ }
+ keyType, eleType := tInfo.keyTypeInfo.typ, tInfo.elemTypeInfo.typ
+ reuseKey, reuseEle := isImmutableKind(tInfo.keyTypeInfo.kind), isImmutableKind(tInfo.elemTypeInfo.kind)
+ var keyValue, eleValue, zeroKeyValue, zeroEleValue reflect.Value
+ keyIsInterfaceType := keyType == typeIntf // If key type is interface{}, need to check if key value is hashable.
+ var err, lastErr error
+ keyCount := v.Len()
+ var existingKeys map[interface{}]bool // Store existing map keys, used for detecting duplicate map key.
+ if d.dm.dupMapKey == DupMapKeyEnforcedAPF {
+ existingKeys = make(map[interface{}]bool, keyCount)
+ if keyCount > 0 {
+ vKeys := v.MapKeys()
+ for i := 0; i < len(vKeys); i++ {
+ existingKeys[vKeys[i].Interface()] = true
+ }
+ }
+ }
+ for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ {
+ // Parse CBOR map key.
+ if !keyValue.IsValid() {
+ keyValue = reflect.New(keyType).Elem()
+ } else if !reuseKey {
+ if !zeroKeyValue.IsValid() {
+ zeroKeyValue = reflect.Zero(keyType)
+ }
+ keyValue.Set(zeroKeyValue)
+ }
+ if lastErr = d.parseToValue(keyValue, tInfo.keyTypeInfo); lastErr != nil {
+ if err == nil {
+ err = lastErr
+ }
+ d.skip()
+ continue
+ }
+
+ // Detect if CBOR map key can be used as Go map key.
+ if keyIsInterfaceType && keyValue.Elem().IsValid() {
+ if !isHashableValue(keyValue.Elem()) {
+ var converted bool
+ if d.dm.mapKeyByteString == MapKeyByteStringAllowed {
+ var k interface{}
+ k, converted = convertByteSliceToByteString(keyValue.Elem().Interface())
+ if converted {
+ keyValue.Set(reflect.ValueOf(k))
+ }
+ }
+ if !converted {
+ if err == nil {
+ err = &InvalidMapKeyTypeError{keyValue.Elem().Type().String()}
+ }
+ d.skip()
+ continue
+ }
+ }
+ }
+
+ // Parse CBOR map value.
+ if !eleValue.IsValid() {
+ eleValue = reflect.New(eleType).Elem()
+ } else if !reuseEle {
+ if !zeroEleValue.IsValid() {
+ zeroEleValue = reflect.Zero(eleType)
+ }
+ eleValue.Set(zeroEleValue)
+ }
+ if lastErr := d.parseToValue(eleValue, tInfo.elemTypeInfo); lastErr != nil {
+ if err == nil {
+ err = lastErr
+ }
+ continue
+ }
+
+ // Add key-value pair to Go map.
+ v.SetMapIndex(keyValue, eleValue)
+
+ // Detect duplicate map key.
+ if d.dm.dupMapKey == DupMapKeyEnforcedAPF {
+ newKeyCount := v.Len()
+ if newKeyCount == keyCount {
+ kvi := keyValue.Interface()
+ if !existingKeys[kvi] {
+ v.SetMapIndex(keyValue, reflect.New(eleType).Elem())
+ err = &DupMapKeyError{kvi, i}
+ i++
+ // skip the rest of the map
+ for ; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ {
+ d.skip() // skip map key
+ d.skip() // skip map value
+ }
+ return err
+ }
+ delete(existingKeys, kvi)
+ }
+ keyCount = newKeyCount
+ }
+ }
+ return err
+}
+
+func (d *decoder) parseArrayToStruct(v reflect.Value, tInfo *typeInfo) error {
+ structType := getDecodingStructType(tInfo.nonPtrType)
+ if structType.err != nil {
+ return structType.err
+ }
+
+ if !structType.toArray {
+ t := d.nextCBORType()
+ d.skip()
+ return &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: tInfo.nonPtrType.String(),
+ errorMsg: "cannot decode CBOR array to struct without toarray option",
+ }
+ }
+
+ start := d.off
+ _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag()
+ hasSize := !indefiniteLength
+ count := int(val)
+ if !hasSize {
+ count = d.numOfItemsUntilBreak() // peek ahead to get array size
+ }
+ if count != len(structType.fields) {
+ d.off = start
+ d.skip()
+ return &UnmarshalTypeError{
+ CBORType: cborTypeArray.String(),
+ GoType: tInfo.typ.String(),
+ errorMsg: "cannot decode CBOR array to struct with different number of elements",
+ }
+ }
+ var err, lastErr error
+ for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ {
+ f := structType.fields[i]
+
+ // Get field value by index
+ var fv reflect.Value
+ if len(f.idx) == 1 {
+ fv = v.Field(f.idx[0])
+ } else {
+ fv, lastErr = getFieldValue(v, f.idx, func(v reflect.Value) (reflect.Value, error) {
+ // Return a new value for embedded field null pointer to point to, or return error.
+ if !v.CanSet() {
+ return reflect.Value{}, errors.New("cbor: cannot set embedded pointer to unexported struct: " + v.Type().String())
+ }
+ v.Set(reflect.New(v.Type().Elem()))
+ return v, nil
+ })
+ if lastErr != nil && err == nil {
+ err = lastErr
+ }
+ if !fv.IsValid() {
+ d.skip()
+ continue
+ }
+ }
+
+ if lastErr = d.parseToValue(fv, f.typInfo); lastErr != nil {
+ if err == nil {
+ if typeError, ok := lastErr.(*UnmarshalTypeError); ok {
+ typeError.StructFieldName = tInfo.typ.String() + "." + f.name
+ err = typeError
+ } else {
+ err = lastErr
+ }
+ }
+ }
+ }
+ return err
+}
+
+// parseMapToStruct needs to be fast so gocyclo can be ignored for now.
+func (d *decoder) parseMapToStruct(v reflect.Value, tInfo *typeInfo) error { //nolint:gocyclo
+ structType := getDecodingStructType(tInfo.nonPtrType)
+ if structType.err != nil {
+ return structType.err
+ }
+
+ if structType.toArray {
+ t := d.nextCBORType()
+ d.skip()
+ return &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: tInfo.nonPtrType.String(),
+ errorMsg: "cannot decode CBOR map to struct with toarray option",
+ }
+ }
+
+ var err, lastErr error
+
+ // Get CBOR map size
+ _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag()
+ hasSize := !indefiniteLength
+ count := int(val)
+
+ // Keeps track of matched struct fields
+ var foundFldIdx []bool
+ {
+ const maxStackFields = 128
+ if nfields := len(structType.fields); nfields <= maxStackFields {
+ // For structs with typical field counts, expect that this can be
+ // stack-allocated.
+ var a [maxStackFields]bool
+ foundFldIdx = a[:nfields]
+ } else {
+ foundFldIdx = make([]bool, len(structType.fields))
+ }
+ }
+
+ // Keeps track of CBOR map keys to detect duplicate map key
+ keyCount := 0
+ var mapKeys map[interface{}]struct{}
+
+ errOnUnknownField := (d.dm.extraReturnErrors & ExtraDecErrorUnknownField) > 0
+
+MapEntryLoop:
+ for j := 0; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ {
+ var f *field
+
+ // If duplicate field detection is enabled and the key at index j did not match any
+ // field, k will hold the map key.
+ var k interface{}
+
+ t := d.nextCBORType()
+ if t == cborTypeTextString || (t == cborTypeByteString && d.dm.fieldNameByteString == FieldNameByteStringAllowed) {
+ var keyBytes []byte
+ if t == cborTypeTextString {
+ keyBytes, lastErr = d.parseTextString()
+ if lastErr != nil {
+ if err == nil {
+ err = lastErr
+ }
+ d.skip() // skip value
+ continue
+ }
+ } else { // cborTypeByteString
+ keyBytes, _ = d.parseByteString()
+ }
+
+ // Check for exact match on field name.
+ if i, ok := structType.fieldIndicesByName[string(keyBytes)]; ok {
+ fld := structType.fields[i]
+
+ if !foundFldIdx[i] {
+ f = fld
+ foundFldIdx[i] = true
+ } else if d.dm.dupMapKey == DupMapKeyEnforcedAPF {
+ err = &DupMapKeyError{fld.name, j}
+ d.skip() // skip value
+ j++
+ // skip the rest of the map
+ for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ {
+ d.skip()
+ d.skip()
+ }
+ return err
+ } else {
+ // discard repeated match
+ d.skip()
+ continue MapEntryLoop
+ }
+ }
+
+ // Find field with case-insensitive match
+ if f == nil && d.dm.fieldNameMatching == FieldNameMatchingPreferCaseSensitive {
+ keyLen := len(keyBytes)
+ keyString := string(keyBytes)
+ for i := 0; i < len(structType.fields); i++ {
+ fld := structType.fields[i]
+ if len(fld.name) == keyLen && strings.EqualFold(fld.name, keyString) {
+ if !foundFldIdx[i] {
+ f = fld
+ foundFldIdx[i] = true
+ } else if d.dm.dupMapKey == DupMapKeyEnforcedAPF {
+ err = &DupMapKeyError{keyString, j}
+ d.skip() // skip value
+ j++
+ // skip the rest of the map
+ for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ {
+ d.skip()
+ d.skip()
+ }
+ return err
+ } else {
+ // discard repeated match
+ d.skip()
+ continue MapEntryLoop
+ }
+ break
+ }
+ }
+ }
+
+ if d.dm.dupMapKey == DupMapKeyEnforcedAPF && f == nil {
+ k = string(keyBytes)
+ }
+ } else if t <= cborTypeNegativeInt { // uint/int
+ var nameAsInt int64
+
+ if t == cborTypePositiveInt {
+ _, _, val := d.getHead()
+ nameAsInt = int64(val)
+ } else {
+ _, _, val := d.getHead()
+ if val > math.MaxInt64 {
+ if err == nil {
+ err = &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: reflect.TypeOf(int64(0)).String(),
+ errorMsg: "-1-" + strconv.FormatUint(val, 10) + " overflows Go's int64",
+ }
+ }
+ d.skip() // skip value
+ continue
+ }
+ nameAsInt = int64(-1) ^ int64(val)
+ }
+
+ // Find field
+ for i := 0; i < len(structType.fields); i++ {
+ fld := structType.fields[i]
+ if fld.keyAsInt && fld.nameAsInt == nameAsInt {
+ if !foundFldIdx[i] {
+ f = fld
+ foundFldIdx[i] = true
+ } else if d.dm.dupMapKey == DupMapKeyEnforcedAPF {
+ err = &DupMapKeyError{nameAsInt, j}
+ d.skip() // skip value
+ j++
+ // skip the rest of the map
+ for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ {
+ d.skip()
+ d.skip()
+ }
+ return err
+ } else {
+ // discard repeated match
+ d.skip()
+ continue MapEntryLoop
+ }
+ break
+ }
+ }
+
+ if d.dm.dupMapKey == DupMapKeyEnforcedAPF && f == nil {
+ k = nameAsInt
+ }
+ } else {
+ if err == nil {
+ err = &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: reflect.TypeOf("").String(),
+ errorMsg: "map key is of type " + t.String() + " and cannot be used to match struct field name",
+ }
+ }
+ if d.dm.dupMapKey == DupMapKeyEnforcedAPF {
+ // parse key
+ k, lastErr = d.parse(true)
+ if lastErr != nil {
+ d.skip() // skip value
+ continue
+ }
+ // Detect if CBOR map key can be used as Go map key.
+ if !isHashableValue(reflect.ValueOf(k)) {
+ d.skip() // skip value
+ continue
+ }
+ } else {
+ d.skip() // skip key
+ }
+ }
+
+ if f == nil {
+ if errOnUnknownField {
+ err = &UnknownFieldError{j}
+ d.skip() // Skip value
+ j++
+ // skip the rest of the map
+ for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ {
+ d.skip()
+ d.skip()
+ }
+ return err
+ }
+
+ // Two map keys that match the same struct field are immediately considered
+ // duplicates. This check detects duplicates between two map keys that do
+ // not match a struct field. If unknown field errors are enabled, then this
+ // check is never reached.
+ if d.dm.dupMapKey == DupMapKeyEnforcedAPF {
+ if mapKeys == nil {
+ mapKeys = make(map[interface{}]struct{}, 1)
+ }
+ mapKeys[k] = struct{}{}
+ newKeyCount := len(mapKeys)
+ if newKeyCount == keyCount {
+ err = &DupMapKeyError{k, j}
+ d.skip() // skip value
+ j++
+ // skip the rest of the map
+ for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ {
+ d.skip()
+ d.skip()
+ }
+ return err
+ }
+ keyCount = newKeyCount
+ }
+
+ d.skip() // Skip value
+ continue
+ }
+
+ // Get field value by index
+ var fv reflect.Value
+ if len(f.idx) == 1 {
+ fv = v.Field(f.idx[0])
+ } else {
+ fv, lastErr = getFieldValue(v, f.idx, func(v reflect.Value) (reflect.Value, error) {
+ // Return a new value for embedded field null pointer to point to, or return error.
+ if !v.CanSet() {
+ return reflect.Value{}, errors.New("cbor: cannot set embedded pointer to unexported struct: " + v.Type().String())
+ }
+ v.Set(reflect.New(v.Type().Elem()))
+ return v, nil
+ })
+ if lastErr != nil && err == nil {
+ err = lastErr
+ }
+ if !fv.IsValid() {
+ d.skip()
+ continue
+ }
+ }
+
+ if lastErr = d.parseToValue(fv, f.typInfo); lastErr != nil {
+ if err == nil {
+ if typeError, ok := lastErr.(*UnmarshalTypeError); ok {
+ typeError.StructFieldName = tInfo.nonPtrType.String() + "." + f.name
+ err = typeError
+ } else {
+ err = lastErr
+ }
+ }
+ }
+ }
+ return err
+}
+
+// validRegisteredTagNums verifies that tag numbers match registered tag numbers of type t.
+// validRegisteredTagNums assumes next CBOR data type is tag. It scans all tag numbers, and stops at tag content.
+func (d *decoder) validRegisteredTagNums(registeredTag *tagItem) error {
+ // Scan until next cbor data is tag content.
+ tagNums := make([]uint64, 0, 1)
+ for d.nextCBORType() == cborTypeTag {
+ _, _, val := d.getHead()
+ tagNums = append(tagNums, val)
+ }
+
+ if !registeredTag.equalTagNum(tagNums) {
+ return &WrongTagError{registeredTag.contentType, registeredTag.num, tagNums}
+ }
+ return nil
+}
+
+func (d *decoder) getRegisteredTagItem(vt reflect.Type) *tagItem {
+ if d.dm.tags != nil {
+ return d.dm.tags.getTagItemFromType(vt)
+ }
+ return nil
+}
+
+// skip moves data offset to the next item. skip assumes data is well-formed,
+// and does not perform bounds checking.
+func (d *decoder) skip() {
+ t, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag()
+
+ if indefiniteLength {
+ switch t {
+ case cborTypeByteString, cborTypeTextString, cborTypeArray, cborTypeMap:
+ for {
+ if isBreakFlag(d.data[d.off]) {
+ d.off++
+ return
+ }
+ d.skip()
+ }
+ }
+ }
+
+ switch t {
+ case cborTypeByteString, cborTypeTextString:
+ d.off += int(val)
+
+ case cborTypeArray:
+ for i := 0; i < int(val); i++ {
+ d.skip()
+ }
+
+ case cborTypeMap:
+ for i := 0; i < int(val)*2; i++ {
+ d.skip()
+ }
+
+ case cborTypeTag:
+ d.skip()
+ }
+}
+
+func (d *decoder) getHeadWithIndefiniteLengthFlag() (
+ t cborType,
+ ai byte,
+ val uint64,
+ indefiniteLength bool,
+) {
+ t, ai, val = d.getHead()
+ indefiniteLength = additionalInformation(ai).isIndefiniteLength()
+ return
+}
+
+// getHead assumes data is well-formed, and does not perform bounds checking.
+func (d *decoder) getHead() (t cborType, ai byte, val uint64) {
+ t, ai = parseInitialByte(d.data[d.off])
+ val = uint64(ai)
+ d.off++
+
+ if ai <= maxAdditionalInformationWithoutArgument {
+ return
+ }
+
+ if ai == additionalInformationWith1ByteArgument {
+ val = uint64(d.data[d.off])
+ d.off++
+ return
+ }
+
+ if ai == additionalInformationWith2ByteArgument {
+ const argumentSize = 2
+ val = uint64(binary.BigEndian.Uint16(d.data[d.off : d.off+argumentSize]))
+ d.off += argumentSize
+ return
+ }
+
+ if ai == additionalInformationWith4ByteArgument {
+ const argumentSize = 4
+ val = uint64(binary.BigEndian.Uint32(d.data[d.off : d.off+argumentSize]))
+ d.off += argumentSize
+ return
+ }
+
+ if ai == additionalInformationWith8ByteArgument {
+ const argumentSize = 8
+ val = binary.BigEndian.Uint64(d.data[d.off : d.off+argumentSize])
+ d.off += argumentSize
+ return
+ }
+ return
+}
+
+func (d *decoder) numOfItemsUntilBreak() int {
+ savedOff := d.off
+ i := 0
+ for !d.foundBreak() {
+ d.skip()
+ i++
+ }
+ d.off = savedOff
+ return i
+}
+
+// foundBreak returns true if next byte is CBOR break code and moves cursor by 1,
+// otherwise it returns false.
+// foundBreak assumes data is well-formed, and does not perform bounds checking.
+func (d *decoder) foundBreak() bool {
+ if isBreakFlag(d.data[d.off]) {
+ d.off++
+ return true
+ }
+ return false
+}
+
+func (d *decoder) reset(data []byte) {
+ d.data = data
+ d.off = 0
+ d.expectedLaterEncodingTags = d.expectedLaterEncodingTags[:0]
+}
+
+func (d *decoder) nextCBORType() cborType {
+ return getType(d.data[d.off])
+}
+
+func (d *decoder) nextCBORNil() bool {
+ return d.data[d.off] == 0xf6 || d.data[d.off] == 0xf7
+}
+
+var (
+ typeIntf = reflect.TypeOf([]interface{}(nil)).Elem()
+ typeTime = reflect.TypeOf(time.Time{})
+ typeBigInt = reflect.TypeOf(big.Int{})
+ typeUnmarshaler = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
+ typeBinaryUnmarshaler = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem()
+ typeString = reflect.TypeOf("")
+ typeByteSlice = reflect.TypeOf([]byte(nil))
+)
+
+func fillNil(_ cborType, v reflect.Value) error {
+ switch v.Kind() {
+ case reflect.Slice, reflect.Map, reflect.Interface, reflect.Ptr:
+ v.Set(reflect.Zero(v.Type()))
+ return nil
+ }
+ return nil
+}
+
+func fillPositiveInt(t cborType, val uint64, v reflect.Value) error {
+ switch v.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ if val > math.MaxInt64 {
+ return &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: v.Type().String(),
+ errorMsg: strconv.FormatUint(val, 10) + " overflows " + v.Type().String(),
+ }
+ }
+ if v.OverflowInt(int64(val)) {
+ return &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: v.Type().String(),
+ errorMsg: strconv.FormatUint(val, 10) + " overflows " + v.Type().String(),
+ }
+ }
+ v.SetInt(int64(val))
+ return nil
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ if v.OverflowUint(val) {
+ return &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: v.Type().String(),
+ errorMsg: strconv.FormatUint(val, 10) + " overflows " + v.Type().String(),
+ }
+ }
+ v.SetUint(val)
+ return nil
+
+ case reflect.Float32, reflect.Float64:
+ f := float64(val)
+ v.SetFloat(f)
+ return nil
+ }
+
+ if v.Type() == typeBigInt {
+ i := new(big.Int).SetUint64(val)
+ v.Set(reflect.ValueOf(*i))
+ return nil
+ }
+ return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()}
+}
+
+func fillNegativeInt(t cborType, val int64, v reflect.Value) error {
+ switch v.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ if v.OverflowInt(val) {
+ return &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: v.Type().String(),
+ errorMsg: strconv.FormatInt(val, 10) + " overflows " + v.Type().String(),
+ }
+ }
+ v.SetInt(val)
+ return nil
+
+ case reflect.Float32, reflect.Float64:
+ f := float64(val)
+ v.SetFloat(f)
+ return nil
+ }
+ if v.Type() == typeBigInt {
+ i := new(big.Int).SetInt64(val)
+ v.Set(reflect.ValueOf(*i))
+ return nil
+ }
+ return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()}
+}
+
+func fillBool(t cborType, val bool, v reflect.Value) error {
+ if v.Kind() == reflect.Bool {
+ v.SetBool(val)
+ return nil
+ }
+ return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()}
+}
+
+func fillFloat(t cborType, val float64, v reflect.Value) error {
+ switch v.Kind() {
+ case reflect.Float32, reflect.Float64:
+ if v.OverflowFloat(val) {
+ return &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: v.Type().String(),
+ errorMsg: strconv.FormatFloat(val, 'E', -1, 64) + " overflows " + v.Type().String(),
+ }
+ }
+ v.SetFloat(val)
+ return nil
+ }
+ return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()}
+}
+
+func fillByteString(t cborType, val []byte, shared bool, v reflect.Value, bsts ByteStringToStringMode, bum BinaryUnmarshalerMode) error {
+ if bum == BinaryUnmarshalerByteString && reflect.PtrTo(v.Type()).Implements(typeBinaryUnmarshaler) {
+ if v.CanAddr() {
+ v = v.Addr()
+ if u, ok := v.Interface().(encoding.BinaryUnmarshaler); ok {
+ // The contract of BinaryUnmarshaler forbids
+ // retaining the input bytes, so no copying is
+ // required even if val is shared.
+ return u.UnmarshalBinary(val)
+ }
+ }
+ return errors.New("cbor: cannot set new value for " + v.Type().String())
+ }
+ if bsts != ByteStringToStringForbidden && v.Kind() == reflect.String {
+ v.SetString(string(val))
+ return nil
+ }
+ if v.Kind() == reflect.Slice && v.Type().Elem().Kind() == reflect.Uint8 {
+ src := val
+ if shared {
+ // SetBytes shares the underlying bytes of the source slice.
+ src = make([]byte, len(val))
+ copy(src, val)
+ }
+ v.SetBytes(src)
+ return nil
+ }
+ if v.Kind() == reflect.Array && v.Type().Elem().Kind() == reflect.Uint8 {
+ vLen := v.Len()
+ i := 0
+ for ; i < vLen && i < len(val); i++ {
+ v.Index(i).SetUint(uint64(val[i]))
+ }
+ // Set remaining Go array elements to zero values.
+ if i < vLen {
+ zeroV := reflect.Zero(reflect.TypeOf(byte(0)))
+ for ; i < vLen; i++ {
+ v.Index(i).Set(zeroV)
+ }
+ }
+ return nil
+ }
+ return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()}
+}
+
+func fillTextString(t cborType, val []byte, v reflect.Value) error {
+ if v.Kind() == reflect.String {
+ v.SetString(string(val))
+ return nil
+ }
+ return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()}
+}
+
+func isImmutableKind(k reflect.Kind) bool {
+ switch k {
+ case reflect.Bool,
+ reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
+ reflect.Float32, reflect.Float64,
+ reflect.String:
+ return true
+
+ default:
+ return false
+ }
+}
+
+func isHashableValue(rv reflect.Value) bool {
+ switch rv.Kind() {
+ case reflect.Slice, reflect.Map, reflect.Func:
+ return false
+
+ case reflect.Struct:
+ switch rv.Type() {
+ case typeTag:
+ tag := rv.Interface().(Tag)
+ return isHashableValue(reflect.ValueOf(tag.Content))
+ case typeBigInt:
+ return false
+ }
+ }
+ return true
+}
+
+// convertByteSliceToByteString converts []byte to ByteString if
+// - v is []byte type, or
+// - v is Tag type and tag content type is []byte
+// This function also handles nested tags.
+// CBOR data is already verified to be well-formed before this function is used,
+// so the recursion won't exceed max nested levels.
+func convertByteSliceToByteString(v interface{}) (interface{}, bool) {
+ switch v := v.(type) {
+ case []byte:
+ return ByteString(v), true
+
+ case Tag:
+ content, converted := convertByteSliceToByteString(v.Content)
+ if converted {
+ return Tag{Number: v.Number, Content: content}, true
+ }
+ }
+ return v, false
+}
diff --git a/vendor/github.com/fxamacker/cbor/v2/diagnose.go b/vendor/github.com/fxamacker/cbor/v2/diagnose.go
new file mode 100644
index 0000000000..44afb86608
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/diagnose.go
@@ -0,0 +1,724 @@
+// Copyright (c) Faye Amacker. All rights reserved.
+// Licensed under the MIT License. See LICENSE in the project root for license information.
+
+package cbor
+
+import (
+ "bytes"
+ "encoding/base32"
+ "encoding/base64"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "math/big"
+ "strconv"
+ "unicode/utf16"
+ "unicode/utf8"
+
+ "github.com/x448/float16"
+)
+
+// DiagMode is the main interface for CBOR diagnostic notation.
+type DiagMode interface {
+ // Diagnose returns extended diagnostic notation (EDN) of CBOR data items using this DiagMode.
+ Diagnose([]byte) (string, error)
+
+ // DiagnoseFirst returns extended diagnostic notation (EDN) of the first CBOR data item using the DiagMode. Any remaining bytes are returned in rest.
+ DiagnoseFirst([]byte) (string, []byte, error)
+
+ // DiagOptions returns user specified options used to create this DiagMode.
+ DiagOptions() DiagOptions
+}
+
+// ByteStringEncoding specifies the base encoding that byte strings are notated.
+type ByteStringEncoding uint8
+
+const (
+ // ByteStringBase16Encoding encodes byte strings in base16, without padding.
+ ByteStringBase16Encoding ByteStringEncoding = iota
+
+ // ByteStringBase32Encoding encodes byte strings in base32, without padding.
+ ByteStringBase32Encoding
+
+ // ByteStringBase32HexEncoding encodes byte strings in base32hex, without padding.
+ ByteStringBase32HexEncoding
+
+ // ByteStringBase64Encoding encodes byte strings in base64url, without padding.
+ ByteStringBase64Encoding
+
+ maxByteStringEncoding
+)
+
+func (bse ByteStringEncoding) valid() error {
+ if bse >= maxByteStringEncoding {
+ return errors.New("cbor: invalid ByteStringEncoding " + strconv.Itoa(int(bse)))
+ }
+ return nil
+}
+
+// DiagOptions specifies Diag options.
+type DiagOptions struct {
+ // ByteStringEncoding specifies the base encoding that byte strings are notated.
+ // Default is ByteStringBase16Encoding.
+ ByteStringEncoding ByteStringEncoding
+
+ // ByteStringHexWhitespace specifies notating with whitespace in byte string
+ // when ByteStringEncoding is ByteStringBase16Encoding.
+ ByteStringHexWhitespace bool
+
+ // ByteStringText specifies notating with text in byte string
+ // if it is a valid UTF-8 text.
+ ByteStringText bool
+
+ // ByteStringEmbeddedCBOR specifies notating embedded CBOR in byte string
+ // if it is a valid CBOR bytes.
+ ByteStringEmbeddedCBOR bool
+
+ // CBORSequence specifies notating CBOR sequences.
+ // otherwise, it returns an error if there are more bytes after the first CBOR.
+ CBORSequence bool
+
+ // FloatPrecisionIndicator specifies appending a suffix to indicate float precision.
+ // Refer to https://www.rfc-editor.org/rfc/rfc8949.html#name-encoding-indicators.
+ FloatPrecisionIndicator bool
+
+ // MaxNestedLevels specifies the max nested levels allowed for any combination of CBOR array, maps, and tags.
+ // Default is 32 levels and it can be set to [4, 65535]. Note that higher maximum levels of nesting can
+ // require larger amounts of stack to deserialize. Don't increase this higher than you require.
+ MaxNestedLevels int
+
+ // MaxArrayElements specifies the max number of elements for CBOR arrays.
+ // Default is 128*1024=131072 and it can be set to [16, 2147483647]
+ MaxArrayElements int
+
+ // MaxMapPairs specifies the max number of key-value pairs for CBOR maps.
+ // Default is 128*1024=131072 and it can be set to [16, 2147483647]
+ MaxMapPairs int
+}
+
+// DiagMode returns a DiagMode with immutable options.
+func (opts DiagOptions) DiagMode() (DiagMode, error) {
+ return opts.diagMode()
+}
+
+func (opts DiagOptions) diagMode() (*diagMode, error) {
+ if err := opts.ByteStringEncoding.valid(); err != nil {
+ return nil, err
+ }
+
+ decMode, err := DecOptions{
+ MaxNestedLevels: opts.MaxNestedLevels,
+ MaxArrayElements: opts.MaxArrayElements,
+ MaxMapPairs: opts.MaxMapPairs,
+ }.decMode()
+ if err != nil {
+ return nil, err
+ }
+
+ return &diagMode{
+ byteStringEncoding: opts.ByteStringEncoding,
+ byteStringHexWhitespace: opts.ByteStringHexWhitespace,
+ byteStringText: opts.ByteStringText,
+ byteStringEmbeddedCBOR: opts.ByteStringEmbeddedCBOR,
+ cborSequence: opts.CBORSequence,
+ floatPrecisionIndicator: opts.FloatPrecisionIndicator,
+ decMode: decMode,
+ }, nil
+}
+
+type diagMode struct {
+ byteStringEncoding ByteStringEncoding
+ byteStringHexWhitespace bool
+ byteStringText bool
+ byteStringEmbeddedCBOR bool
+ cborSequence bool
+ floatPrecisionIndicator bool
+ decMode *decMode
+}
+
+// DiagOptions returns user specified options used to create this DiagMode.
+func (dm *diagMode) DiagOptions() DiagOptions {
+ return DiagOptions{
+ ByteStringEncoding: dm.byteStringEncoding,
+ ByteStringHexWhitespace: dm.byteStringHexWhitespace,
+ ByteStringText: dm.byteStringText,
+ ByteStringEmbeddedCBOR: dm.byteStringEmbeddedCBOR,
+ CBORSequence: dm.cborSequence,
+ FloatPrecisionIndicator: dm.floatPrecisionIndicator,
+ MaxNestedLevels: dm.decMode.maxNestedLevels,
+ MaxArrayElements: dm.decMode.maxArrayElements,
+ MaxMapPairs: dm.decMode.maxMapPairs,
+ }
+}
+
+// Diagnose returns extended diagnostic notation (EDN) of CBOR data items using the DiagMode.
+func (dm *diagMode) Diagnose(data []byte) (string, error) {
+ return newDiagnose(data, dm.decMode, dm).diag(dm.cborSequence)
+}
+
+// DiagnoseFirst returns extended diagnostic notation (EDN) of the first CBOR data item using the DiagMode. Any remaining bytes are returned in rest.
+func (dm *diagMode) DiagnoseFirst(data []byte) (diagNotation string, rest []byte, err error) {
+ return newDiagnose(data, dm.decMode, dm).diagFirst()
+}
+
+var defaultDiagMode, _ = DiagOptions{}.diagMode()
+
+// Diagnose returns extended diagnostic notation (EDN) of CBOR data items
+// using the default diagnostic mode.
+//
+// Refer to https://www.rfc-editor.org/rfc/rfc8949.html#name-diagnostic-notation.
+func Diagnose(data []byte) (string, error) {
+ return defaultDiagMode.Diagnose(data)
+}
+
+// Diagnose returns extended diagnostic notation (EDN) of the first CBOR data item using the DiagMode. Any remaining bytes are returned in rest.
+func DiagnoseFirst(data []byte) (diagNotation string, rest []byte, err error) {
+ return defaultDiagMode.DiagnoseFirst(data)
+}
+
+type diagnose struct {
+ dm *diagMode
+ d *decoder
+ w *bytes.Buffer
+}
+
+func newDiagnose(data []byte, decm *decMode, diagm *diagMode) *diagnose {
+ return &diagnose{
+ dm: diagm,
+ d: &decoder{data: data, dm: decm},
+ w: &bytes.Buffer{},
+ }
+}
+
+func (di *diagnose) diag(cborSequence bool) (string, error) {
+ // CBOR Sequence
+ firstItem := true
+ for {
+ switch err := di.wellformed(cborSequence); err {
+ case nil:
+ if !firstItem {
+ di.w.WriteString(", ")
+ }
+ firstItem = false
+ if itemErr := di.item(); itemErr != nil {
+ return di.w.String(), itemErr
+ }
+
+ case io.EOF:
+ if firstItem {
+ return di.w.String(), err
+ }
+ return di.w.String(), nil
+
+ default:
+ return di.w.String(), err
+ }
+ }
+}
+
+func (di *diagnose) diagFirst() (diagNotation string, rest []byte, err error) {
+ err = di.wellformed(true)
+ if err == nil {
+ err = di.item()
+ }
+
+ if err == nil {
+ // Return EDN and the rest of the data slice (which might be len 0)
+ return di.w.String(), di.d.data[di.d.off:], nil
+ }
+
+ return di.w.String(), nil, err
+}
+
+func (di *diagnose) wellformed(allowExtraData bool) error {
+ off := di.d.off
+ err := di.d.wellformed(allowExtraData, false)
+ di.d.off = off
+ return err
+}
+
+func (di *diagnose) item() error { //nolint:gocyclo
+ initialByte := di.d.data[di.d.off]
+ switch initialByte {
+ case cborByteStringWithIndefiniteLengthHead,
+ cborTextStringWithIndefiniteLengthHead: // indefinite-length byte/text string
+ di.d.off++
+ if isBreakFlag(di.d.data[di.d.off]) {
+ di.d.off++
+ switch initialByte {
+ case cborByteStringWithIndefiniteLengthHead:
+ // indefinite-length bytes with no chunks.
+ di.w.WriteString(`''_`)
+ return nil
+ case cborTextStringWithIndefiniteLengthHead:
+ // indefinite-length text with no chunks.
+ di.w.WriteString(`""_`)
+ return nil
+ }
+ }
+
+ di.w.WriteString("(_ ")
+
+ i := 0
+ for !di.d.foundBreak() {
+ if i > 0 {
+ di.w.WriteString(", ")
+ }
+
+ i++
+ // wellformedIndefiniteString() already checked that the next item is a byte/text string.
+ if err := di.item(); err != nil {
+ return err
+ }
+ }
+
+ di.w.WriteByte(')')
+ return nil
+
+ case cborArrayWithIndefiniteLengthHead: // indefinite-length array
+ di.d.off++
+ di.w.WriteString("[_ ")
+
+ i := 0
+ for !di.d.foundBreak() {
+ if i > 0 {
+ di.w.WriteString(", ")
+ }
+
+ i++
+ if err := di.item(); err != nil {
+ return err
+ }
+ }
+
+ di.w.WriteByte(']')
+ return nil
+
+ case cborMapWithIndefiniteLengthHead: // indefinite-length map
+ di.d.off++
+ di.w.WriteString("{_ ")
+
+ i := 0
+ for !di.d.foundBreak() {
+ if i > 0 {
+ di.w.WriteString(", ")
+ }
+
+ i++
+ // key
+ if err := di.item(); err != nil {
+ return err
+ }
+
+ di.w.WriteString(": ")
+
+ // value
+ if err := di.item(); err != nil {
+ return err
+ }
+ }
+
+ di.w.WriteByte('}')
+ return nil
+ }
+
+ t := di.d.nextCBORType()
+ switch t {
+ case cborTypePositiveInt:
+ _, _, val := di.d.getHead()
+ di.w.WriteString(strconv.FormatUint(val, 10))
+ return nil
+
+ case cborTypeNegativeInt:
+ _, _, val := di.d.getHead()
+ if val > math.MaxInt64 {
+ // CBOR negative integer overflows int64, use big.Int to store value.
+ bi := new(big.Int)
+ bi.SetUint64(val)
+ bi.Add(bi, big.NewInt(1))
+ bi.Neg(bi)
+ di.w.WriteString(bi.String())
+ return nil
+ }
+
+ nValue := int64(-1) ^ int64(val)
+ di.w.WriteString(strconv.FormatInt(nValue, 10))
+ return nil
+
+ case cborTypeByteString:
+ b, _ := di.d.parseByteString()
+ return di.encodeByteString(b)
+
+ case cborTypeTextString:
+ b, err := di.d.parseTextString()
+ if err != nil {
+ return err
+ }
+ return di.encodeTextString(string(b), '"')
+
+ case cborTypeArray:
+ _, _, val := di.d.getHead()
+ count := int(val)
+ di.w.WriteByte('[')
+
+ for i := 0; i < count; i++ {
+ if i > 0 {
+ di.w.WriteString(", ")
+ }
+ if err := di.item(); err != nil {
+ return err
+ }
+ }
+ di.w.WriteByte(']')
+ return nil
+
+ case cborTypeMap:
+ _, _, val := di.d.getHead()
+ count := int(val)
+ di.w.WriteByte('{')
+
+ for i := 0; i < count; i++ {
+ if i > 0 {
+ di.w.WriteString(", ")
+ }
+ // key
+ if err := di.item(); err != nil {
+ return err
+ }
+ di.w.WriteString(": ")
+ // value
+ if err := di.item(); err != nil {
+ return err
+ }
+ }
+ di.w.WriteByte('}')
+ return nil
+
+ case cborTypeTag:
+ _, _, tagNum := di.d.getHead()
+ switch tagNum {
+ case tagNumUnsignedBignum:
+ if nt := di.d.nextCBORType(); nt != cborTypeByteString {
+ return newInadmissibleTagContentTypeError(
+ tagNumUnsignedBignum,
+ "byte string",
+ nt.String())
+ }
+
+ b, _ := di.d.parseByteString()
+ bi := new(big.Int).SetBytes(b)
+ di.w.WriteString(bi.String())
+ return nil
+
+ case tagNumNegativeBignum:
+ if nt := di.d.nextCBORType(); nt != cborTypeByteString {
+ return newInadmissibleTagContentTypeError(
+ tagNumNegativeBignum,
+ "byte string",
+ nt.String(),
+ )
+ }
+
+ b, _ := di.d.parseByteString()
+ bi := new(big.Int).SetBytes(b)
+ bi.Add(bi, big.NewInt(1))
+ bi.Neg(bi)
+ di.w.WriteString(bi.String())
+ return nil
+
+ default:
+ di.w.WriteString(strconv.FormatUint(tagNum, 10))
+ di.w.WriteByte('(')
+ if err := di.item(); err != nil {
+ return err
+ }
+ di.w.WriteByte(')')
+ return nil
+ }
+
+ case cborTypePrimitives:
+ _, ai, val := di.d.getHead()
+ switch ai {
+ case additionalInformationAsFalse:
+ di.w.WriteString("false")
+ return nil
+
+ case additionalInformationAsTrue:
+ di.w.WriteString("true")
+ return nil
+
+ case additionalInformationAsNull:
+ di.w.WriteString("null")
+ return nil
+
+ case additionalInformationAsUndefined:
+ di.w.WriteString("undefined")
+ return nil
+
+ case additionalInformationAsFloat16,
+ additionalInformationAsFloat32,
+ additionalInformationAsFloat64:
+ return di.encodeFloat(ai, val)
+
+ default:
+ di.w.WriteString("simple(")
+ di.w.WriteString(strconv.FormatUint(val, 10))
+ di.w.WriteByte(')')
+ return nil
+ }
+ }
+
+ return nil
+}
+
+// writeU16 format a rune as "\uxxxx"
+func (di *diagnose) writeU16(val rune) {
+ di.w.WriteString("\\u")
+ var in [2]byte
+ in[0] = byte(val >> 8)
+ in[1] = byte(val)
+ sz := hex.EncodedLen(len(in))
+ di.w.Grow(sz)
+ dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz]
+ hex.Encode(dst, in[:])
+ di.w.Write(dst)
+}
+
+var rawBase32Encoding = base32.StdEncoding.WithPadding(base32.NoPadding)
+var rawBase32HexEncoding = base32.HexEncoding.WithPadding(base32.NoPadding)
+
+func (di *diagnose) encodeByteString(val []byte) error {
+ if len(val) > 0 {
+ if di.dm.byteStringText && utf8.Valid(val) {
+ return di.encodeTextString(string(val), '\'')
+ }
+
+ if di.dm.byteStringEmbeddedCBOR {
+ di2 := newDiagnose(val, di.dm.decMode, di.dm)
+ // should always notating embedded CBOR sequence.
+ if str, err := di2.diag(true); err == nil {
+ di.w.WriteString("<<")
+ di.w.WriteString(str)
+ di.w.WriteString(">>")
+ return nil
+ }
+ }
+ }
+
+ switch di.dm.byteStringEncoding {
+ case ByteStringBase16Encoding:
+ di.w.WriteString("h'")
+ if di.dm.byteStringHexWhitespace {
+ sz := hex.EncodedLen(len(val))
+ if len(val) > 0 {
+ sz += len(val) - 1
+ }
+ di.w.Grow(sz)
+
+ dst := di.w.Bytes()[di.w.Len():]
+ for i := range val {
+ if i > 0 {
+ dst = append(dst, ' ')
+ }
+ hex.Encode(dst[len(dst):len(dst)+2], val[i:i+1])
+ dst = dst[:len(dst)+2]
+ }
+ di.w.Write(dst)
+ } else {
+ sz := hex.EncodedLen(len(val))
+ di.w.Grow(sz)
+ dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz]
+ hex.Encode(dst, val)
+ di.w.Write(dst)
+ }
+ di.w.WriteByte('\'')
+ return nil
+
+ case ByteStringBase32Encoding:
+ di.w.WriteString("b32'")
+ sz := rawBase32Encoding.EncodedLen(len(val))
+ di.w.Grow(sz)
+ dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz]
+ rawBase32Encoding.Encode(dst, val)
+ di.w.Write(dst)
+ di.w.WriteByte('\'')
+ return nil
+
+ case ByteStringBase32HexEncoding:
+ di.w.WriteString("h32'")
+ sz := rawBase32HexEncoding.EncodedLen(len(val))
+ di.w.Grow(sz)
+ dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz]
+ rawBase32HexEncoding.Encode(dst, val)
+ di.w.Write(dst)
+ di.w.WriteByte('\'')
+ return nil
+
+ case ByteStringBase64Encoding:
+ di.w.WriteString("b64'")
+ sz := base64.RawURLEncoding.EncodedLen(len(val))
+ di.w.Grow(sz)
+ dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz]
+ base64.RawURLEncoding.Encode(dst, val)
+ di.w.Write(dst)
+ di.w.WriteByte('\'')
+ return nil
+
+ default:
+ // It should not be possible for users to construct a *diagMode with an invalid byte
+ // string encoding.
+ panic(fmt.Sprintf("diagmode has invalid ByteStringEncoding %v", di.dm.byteStringEncoding))
+ }
+}
+
+const utf16SurrSelf = rune(0x10000)
+
+// quote should be either `'` or `"`
+func (di *diagnose) encodeTextString(val string, quote byte) error {
+ di.w.WriteByte(quote)
+
+ for i := 0; i < len(val); {
+ if b := val[i]; b < utf8.RuneSelf {
+ switch {
+ case b == '\t', b == '\n', b == '\r', b == '\\', b == quote:
+ di.w.WriteByte('\\')
+
+ switch b {
+ case '\t':
+ b = 't'
+ case '\n':
+ b = 'n'
+ case '\r':
+ b = 'r'
+ }
+ di.w.WriteByte(b)
+
+ case b >= ' ' && b <= '~':
+ di.w.WriteByte(b)
+
+ default:
+ di.writeU16(rune(b))
+ }
+
+ i++
+ continue
+ }
+
+ c, size := utf8.DecodeRuneInString(val[i:])
+ switch {
+ case c == utf8.RuneError:
+ return &SemanticError{"cbor: invalid UTF-8 string"}
+
+ case c < utf16SurrSelf:
+ di.writeU16(c)
+
+ default:
+ c1, c2 := utf16.EncodeRune(c)
+ di.writeU16(c1)
+ di.writeU16(c2)
+ }
+
+ i += size
+ }
+
+ di.w.WriteByte(quote)
+ return nil
+}
+
+func (di *diagnose) encodeFloat(ai byte, val uint64) error {
+ f64 := float64(0)
+ switch ai {
+ case additionalInformationAsFloat16:
+ f16 := float16.Frombits(uint16(val))
+ switch {
+ case f16.IsNaN():
+ di.w.WriteString("NaN")
+ return nil
+ case f16.IsInf(1):
+ di.w.WriteString("Infinity")
+ return nil
+ case f16.IsInf(-1):
+ di.w.WriteString("-Infinity")
+ return nil
+ default:
+ f64 = float64(f16.Float32())
+ }
+
+ case additionalInformationAsFloat32:
+ f32 := math.Float32frombits(uint32(val))
+ switch {
+ case f32 != f32:
+ di.w.WriteString("NaN")
+ return nil
+ case f32 > math.MaxFloat32:
+ di.w.WriteString("Infinity")
+ return nil
+ case f32 < -math.MaxFloat32:
+ di.w.WriteString("-Infinity")
+ return nil
+ default:
+ f64 = float64(f32)
+ }
+
+ case additionalInformationAsFloat64:
+ f64 = math.Float64frombits(val)
+ switch {
+ case f64 != f64:
+ di.w.WriteString("NaN")
+ return nil
+ case f64 > math.MaxFloat64:
+ di.w.WriteString("Infinity")
+ return nil
+ case f64 < -math.MaxFloat64:
+ di.w.WriteString("-Infinity")
+ return nil
+ }
+ }
+ // Use ES6 number to string conversion which should match most JSON generators.
+ // Inspired by https://github.com/golang/go/blob/4df10fba1687a6d4f51d7238a403f8f2298f6a16/src/encoding/json/encode.go#L585
+ const bitSize = 64
+ b := make([]byte, 0, 32)
+ if abs := math.Abs(f64); abs != 0 && (abs < 1e-6 || abs >= 1e21) {
+ b = strconv.AppendFloat(b, f64, 'e', -1, bitSize)
+ // clean up e-09 to e-9
+ n := len(b)
+ if n >= 4 && string(b[n-4:n-1]) == "e-0" {
+ b = append(b[:n-2], b[n-1])
+ }
+ } else {
+ b = strconv.AppendFloat(b, f64, 'f', -1, bitSize)
+ }
+
+ // add decimal point and trailing zero if needed
+ if bytes.IndexByte(b, '.') < 0 {
+ if i := bytes.IndexByte(b, 'e'); i < 0 {
+ b = append(b, '.', '0')
+ } else {
+ b = append(b[:i+2], b[i:]...)
+ b[i] = '.'
+ b[i+1] = '0'
+ }
+ }
+
+ di.w.WriteString(string(b))
+
+ if di.dm.floatPrecisionIndicator {
+ switch ai {
+ case additionalInformationAsFloat16:
+ di.w.WriteString("_1")
+ return nil
+
+ case additionalInformationAsFloat32:
+ di.w.WriteString("_2")
+ return nil
+
+ case additionalInformationAsFloat64:
+ di.w.WriteString("_3")
+ return nil
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/fxamacker/cbor/v2/doc.go b/vendor/github.com/fxamacker/cbor/v2/doc.go
new file mode 100644
index 0000000000..23f68b984c
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/doc.go
@@ -0,0 +1,129 @@
+// Copyright (c) Faye Amacker. All rights reserved.
+// Licensed under the MIT License. See LICENSE in the project root for license information.
+
+/*
+Package cbor is a modern CBOR codec (RFC 8949 & RFC 7049) with CBOR tags,
+Go struct tags (toarray/keyasint/omitempty), Core Deterministic Encoding,
+CTAP2, Canonical CBOR, float64->32->16, and duplicate map key detection.
+
+Encoding options allow "preferred serialization" by encoding integers and floats
+to their smallest forms (e.g. float16) when values fit.
+
+Struct tags like "keyasint", "toarray" and "omitempty" make CBOR data smaller
+and easier to use with structs.
+
+For example, "toarray" tag makes struct fields encode to CBOR array elements. And
+"keyasint" makes a field encode to an element of CBOR map with specified int key.
+
+Latest docs can be viewed at https://github.com/fxamacker/cbor#cbor-library-in-go
+
+# Basics
+
+The Quick Start guide is at https://github.com/fxamacker/cbor#quick-start
+
+Function signatures identical to encoding/json include:
+
+ Marshal, Unmarshal, NewEncoder, NewDecoder, (*Encoder).Encode, (*Decoder).Decode.
+
+Standard interfaces include:
+
+ BinaryMarshaler, BinaryUnmarshaler, Marshaler, and Unmarshaler.
+
+Custom encoding and decoding is possible by implementing standard interfaces for
+user-defined Go types.
+
+Codec functions are available at package-level (using defaults options) or by
+creating modes from options at runtime.
+
+"Mode" in this API means definite way of encoding (EncMode) or decoding (DecMode).
+
+EncMode and DecMode interfaces are created from EncOptions or DecOptions structs.
+
+ em, err := cbor.EncOptions{...}.EncMode()
+ em, err := cbor.CanonicalEncOptions().EncMode()
+ em, err := cbor.CTAP2EncOptions().EncMode()
+
+Modes use immutable options to avoid side-effects and simplify concurrency. Behavior of
+modes won't accidentally change at runtime after they're created.
+
+Modes are intended to be reused and are safe for concurrent use.
+
+EncMode and DecMode Interfaces
+
+ // EncMode interface uses immutable options and is safe for concurrent use.
+ type EncMode interface {
+ Marshal(v interface{}) ([]byte, error)
+ NewEncoder(w io.Writer) *Encoder
+ EncOptions() EncOptions // returns copy of options
+ }
+
+ // DecMode interface uses immutable options and is safe for concurrent use.
+ type DecMode interface {
+ Unmarshal(data []byte, v interface{}) error
+ NewDecoder(r io.Reader) *Decoder
+ DecOptions() DecOptions // returns copy of options
+ }
+
+Using Default Encoding Mode
+
+ b, err := cbor.Marshal(v)
+
+ encoder := cbor.NewEncoder(w)
+ err = encoder.Encode(v)
+
+Using Default Decoding Mode
+
+ err := cbor.Unmarshal(b, &v)
+
+ decoder := cbor.NewDecoder(r)
+ err = decoder.Decode(&v)
+
+Creating and Using Encoding Modes
+
+ // Create EncOptions using either struct literal or a function.
+ opts := cbor.CanonicalEncOptions()
+
+ // If needed, modify encoding options
+ opts.Time = cbor.TimeUnix
+
+ // Create reusable EncMode interface with immutable options, safe for concurrent use.
+ em, err := opts.EncMode()
+
+ // Use EncMode like encoding/json, with same function signatures.
+ b, err := em.Marshal(v)
+ // or
+ encoder := em.NewEncoder(w)
+ err := encoder.Encode(v)
+
+ // NOTE: Both em.Marshal(v) and encoder.Encode(v) use encoding options
+ // specified during creation of em (encoding mode).
+
+# CBOR Options
+
+Predefined Encoding Options: https://github.com/fxamacker/cbor#predefined-encoding-options
+
+Encoding Options: https://github.com/fxamacker/cbor#encoding-options
+
+Decoding Options: https://github.com/fxamacker/cbor#decoding-options
+
+# Struct Tags
+
+Struct tags like `cbor:"name,omitempty"` and `json:"name,omitempty"` work as expected.
+If both struct tags are specified then `cbor` is used.
+
+Struct tags like "keyasint", "toarray", and "omitempty" make it easy to use
+very compact formats like COSE and CWT (CBOR Web Tokens) with structs.
+
+For example, "toarray" makes struct fields encode to array elements. And "keyasint"
+makes struct fields encode to elements of CBOR map with int keys.
+
+https://raw.githubusercontent.com/fxamacker/images/master/cbor/v2.0.0/cbor_easy_api.png
+
+Struct tags are listed at https://github.com/fxamacker/cbor#struct-tags-1
+
+# Tests and Fuzzing
+
+Over 375 tests are included in this package. Cover-guided fuzzing is handled by
+a private fuzzer that replaced fxamacker/cbor-fuzz years ago.
+*/
+package cbor
diff --git a/vendor/github.com/fxamacker/cbor/v2/encode.go b/vendor/github.com/fxamacker/cbor/v2/encode.go
new file mode 100644
index 0000000000..6508e291d6
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/encode.go
@@ -0,0 +1,1989 @@
+// Copyright (c) Faye Amacker. All rights reserved.
+// Licensed under the MIT License. See LICENSE in the project root for license information.
+
+package cbor
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "math/big"
+ "math/rand"
+ "reflect"
+ "sort"
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/x448/float16"
+)
+
+// Marshal returns the CBOR encoding of v using default encoding options.
+// See EncOptions for encoding options.
+//
+// Marshal uses the following encoding rules:
+//
+// If value implements the Marshaler interface, Marshal calls its
+// MarshalCBOR method.
+//
+// If value implements encoding.BinaryMarshaler, Marhsal calls its
+// MarshalBinary method and encode it as CBOR byte string.
+//
+// Boolean values encode as CBOR booleans (type 7).
+//
+// Positive integer values encode as CBOR positive integers (type 0).
+//
+// Negative integer values encode as CBOR negative integers (type 1).
+//
+// Floating point values encode as CBOR floating points (type 7).
+//
+// String values encode as CBOR text strings (type 3).
+//
+// []byte values encode as CBOR byte strings (type 2).
+//
+// Array and slice values encode as CBOR arrays (type 4).
+//
+// Map values encode as CBOR maps (type 5).
+//
+// Struct values encode as CBOR maps (type 5). Each exported struct field
+// becomes a pair with field name encoded as CBOR text string (type 3) and
+// field value encoded based on its type. See struct tag option "keyasint"
+// to encode field name as CBOR integer (type 0 and 1). Also see struct
+// tag option "toarray" for special field "_" to encode struct values as
+// CBOR array (type 4).
+//
+// Marshal supports format string stored under the "cbor" key in the struct
+// field's tag. CBOR format string can specify the name of the field,
+// "omitempty" and "keyasint" options, and special case "-" for field omission.
+// If "cbor" key is absent, Marshal uses "json" key.
+//
+// Struct field name is treated as integer if it has "keyasint" option in
+// its format string. The format string must specify an integer as its
+// field name.
+//
+// Special struct field "_" is used to specify struct level options, such as
+// "toarray". "toarray" option enables Go struct to be encoded as CBOR array.
+// "omitempty" is disabled by "toarray" to ensure that the same number
+// of elements are encoded every time.
+//
+// Anonymous struct fields are marshaled as if their exported fields
+// were fields in the outer struct. Marshal follows the same struct fields
+// visibility rules used by JSON encoding package.
+//
+// time.Time values encode as text strings specified in RFC3339 or numerical
+// representation of seconds since January 1, 1970 UTC depending on
+// EncOptions.Time setting. Also See EncOptions.TimeTag to encode
+// time.Time as CBOR tag with tag number 0 or 1.
+//
+// big.Int values encode as CBOR integers (type 0 and 1) if values fit.
+// Otherwise, big.Int values encode as CBOR bignums (tag 2 and 3). See
+// EncOptions.BigIntConvert to always encode big.Int values as CBOR
+// bignums.
+//
+// Pointer values encode as the value pointed to.
+//
+// Interface values encode as the value stored in the interface.
+//
+// Nil slice/map/pointer/interface values encode as CBOR nulls (type 7).
+//
+// Values of other types cannot be encoded in CBOR. Attempting
+// to encode such a value causes Marshal to return an UnsupportedTypeError.
+func Marshal(v interface{}) ([]byte, error) {
+ return defaultEncMode.Marshal(v)
+}
+
+// MarshalToBuffer encodes v into provided buffer (instead of using built-in buffer pool)
+// and uses default encoding options.
+//
+// NOTE: Unlike Marshal, the buffer provided to MarshalToBuffer can contain
+// partially encoded data if error is returned.
+//
+// See Marshal for more details.
+func MarshalToBuffer(v interface{}, buf *bytes.Buffer) error {
+ return defaultEncMode.MarshalToBuffer(v, buf)
+}
+
+// Marshaler is the interface implemented by types that can marshal themselves
+// into valid CBOR.
+type Marshaler interface {
+ MarshalCBOR() ([]byte, error)
+}
+
+// MarshalerError represents error from checking encoded CBOR data item
+// returned from MarshalCBOR for well-formedness and some very limited tag validation.
+type MarshalerError struct {
+ typ reflect.Type
+ err error
+}
+
+func (e *MarshalerError) Error() string {
+ return "cbor: error calling MarshalCBOR for type " +
+ e.typ.String() +
+ ": " + e.err.Error()
+}
+
+func (e *MarshalerError) Unwrap() error {
+ return e.err
+}
+
+// UnsupportedTypeError is returned by Marshal when attempting to encode value
+// of an unsupported type.
+type UnsupportedTypeError struct {
+ Type reflect.Type
+}
+
+func (e *UnsupportedTypeError) Error() string {
+ return "cbor: unsupported type: " + e.Type.String()
+}
+
+// UnsupportedValueError is returned by Marshal when attempting to encode an
+// unsupported value.
+type UnsupportedValueError struct {
+ msg string
+}
+
+func (e *UnsupportedValueError) Error() string {
+ return "cbor: unsupported value: " + e.msg
+}
+
+// SortMode identifies supported sorting order.
+type SortMode int
+
+const (
+ // SortNone encodes map pairs and struct fields in an arbitrary order.
+ SortNone SortMode = 0
+
+ // SortLengthFirst causes map keys or struct fields to be sorted such that:
+ // - If two keys have different lengths, the shorter one sorts earlier;
+ // - If two keys have the same length, the one with the lower value in
+ // (byte-wise) lexical order sorts earlier.
+ // It is used in "Canonical CBOR" encoding in RFC 7049 3.9.
+ SortLengthFirst SortMode = 1
+
+ // SortBytewiseLexical causes map keys or struct fields to be sorted in the
+ // bytewise lexicographic order of their deterministic CBOR encodings.
+ // It is used in "CTAP2 Canonical CBOR" and "Core Deterministic Encoding"
+ // in RFC 7049bis.
+ SortBytewiseLexical SortMode = 2
+
+ // SortShuffle encodes map pairs and struct fields in a shuffled
+ // order. This mode does not guarantee an unbiased permutation, but it
+ // does guarantee that the runtime of the shuffle algorithm used will be
+ // constant.
+ SortFastShuffle SortMode = 3
+
+ // SortCanonical is used in "Canonical CBOR" encoding in RFC 7049 3.9.
+ SortCanonical SortMode = SortLengthFirst
+
+ // SortCTAP2 is used in "CTAP2 Canonical CBOR".
+ SortCTAP2 SortMode = SortBytewiseLexical
+
+ // SortCoreDeterministic is used in "Core Deterministic Encoding" in RFC 7049bis.
+ SortCoreDeterministic SortMode = SortBytewiseLexical
+
+ maxSortMode SortMode = 4
+)
+
+func (sm SortMode) valid() bool {
+ return sm >= 0 && sm < maxSortMode
+}
+
+// StringMode specifies how to encode Go string values.
+type StringMode int
+
+const (
+ // StringToTextString encodes Go string to CBOR text string (major type 3).
+ StringToTextString StringMode = iota
+
+ // StringToByteString encodes Go string to CBOR byte string (major type 2).
+ StringToByteString
+)
+
+func (st StringMode) cborType() (cborType, error) {
+ switch st {
+ case StringToTextString:
+ return cborTypeTextString, nil
+
+ case StringToByteString:
+ return cborTypeByteString, nil
+ }
+ return 0, errors.New("cbor: invalid StringType " + strconv.Itoa(int(st)))
+}
+
+// ShortestFloatMode specifies which floating-point format should
+// be used as the shortest possible format for CBOR encoding.
+// It is not used for encoding Infinity and NaN values.
+type ShortestFloatMode int
+
+const (
+ // ShortestFloatNone makes float values encode without any conversion.
+ // This is the default for ShortestFloatMode in v1.
+ // E.g. a float32 in Go will encode to CBOR float32. And
+ // a float64 in Go will encode to CBOR float64.
+ ShortestFloatNone ShortestFloatMode = iota
+
+ // ShortestFloat16 specifies float16 as the shortest form that preserves value.
+ // E.g. if float64 can convert to float32 while preserving value, then
+ // encoding will also try to convert float32 to float16. So a float64 might
+ // encode as CBOR float64, float32 or float16 depending on the value.
+ ShortestFloat16
+
+ maxShortestFloat
+)
+
+func (sfm ShortestFloatMode) valid() bool {
+ return sfm >= 0 && sfm < maxShortestFloat
+}
+
+// NaNConvertMode specifies how to encode NaN and overrides ShortestFloatMode.
+// ShortestFloatMode is not used for encoding Infinity and NaN values.
+type NaNConvertMode int
+
+const (
+ // NaNConvert7e00 always encodes NaN to 0xf97e00 (CBOR float16 = 0x7e00).
+ NaNConvert7e00 NaNConvertMode = iota
+
+ // NaNConvertNone never modifies or converts NaN to other representations
+ // (float64 NaN stays float64, etc. even if it can use float16 without losing
+ // any bits).
+ NaNConvertNone
+
+ // NaNConvertPreserveSignal converts NaN to the smallest form that preserves
+ // value (quiet bit + payload) as described in RFC 7049bis Draft 12.
+ NaNConvertPreserveSignal
+
+ // NaNConvertQuiet always forces quiet bit = 1 and shortest form that preserves
+ // NaN payload.
+ NaNConvertQuiet
+
+ // NaNConvertReject returns UnsupportedValueError on attempts to encode a NaN value.
+ NaNConvertReject
+
+ maxNaNConvert
+)
+
+func (ncm NaNConvertMode) valid() bool {
+ return ncm >= 0 && ncm < maxNaNConvert
+}
+
+// InfConvertMode specifies how to encode Infinity and overrides ShortestFloatMode.
+// ShortestFloatMode is not used for encoding Infinity and NaN values.
+type InfConvertMode int
+
+const (
+ // InfConvertFloat16 always converts Inf to lossless IEEE binary16 (float16).
+ InfConvertFloat16 InfConvertMode = iota
+
+ // InfConvertNone never converts (used by CTAP2 Canonical CBOR).
+ InfConvertNone
+
+ // InfConvertReject returns UnsupportedValueError on attempts to encode an infinite value.
+ InfConvertReject
+
+ maxInfConvert
+)
+
+func (icm InfConvertMode) valid() bool {
+ return icm >= 0 && icm < maxInfConvert
+}
+
+// TimeMode specifies how to encode time.Time values.
+type TimeMode int
+
+const (
+ // TimeUnix causes time.Time to be encoded as epoch time in integer with second precision.
+ TimeUnix TimeMode = iota
+
+ // TimeUnixMicro causes time.Time to be encoded as epoch time in float-point rounded to microsecond precision.
+ TimeUnixMicro
+
+ // TimeUnixDynamic causes time.Time to be encoded as integer if time.Time doesn't have fractional seconds,
+ // otherwise float-point rounded to microsecond precision.
+ TimeUnixDynamic
+
+ // TimeRFC3339 causes time.Time to be encoded as RFC3339 formatted string with second precision.
+ TimeRFC3339
+
+ // TimeRFC3339Nano causes time.Time to be encoded as RFC3339 formatted string with nanosecond precision.
+ TimeRFC3339Nano
+
+ maxTimeMode
+)
+
+func (tm TimeMode) valid() bool {
+ return tm >= 0 && tm < maxTimeMode
+}
+
+// BigIntConvertMode specifies how to encode big.Int values.
+type BigIntConvertMode int
+
+const (
+ // BigIntConvertShortest makes big.Int encode to CBOR integer if value fits.
+ // E.g. if big.Int value can be converted to CBOR integer while preserving
+ // value, encoder will encode it to CBOR integer (major type 0 or 1).
+ BigIntConvertShortest BigIntConvertMode = iota
+
+ // BigIntConvertNone makes big.Int encode to CBOR bignum (tag 2 or 3) without
+ // converting it to another CBOR type.
+ BigIntConvertNone
+
+ // BigIntConvertReject returns an UnsupportedTypeError instead of marshaling a big.Int.
+ BigIntConvertReject
+
+ maxBigIntConvert
+)
+
+func (bim BigIntConvertMode) valid() bool {
+ return bim >= 0 && bim < maxBigIntConvert
+}
+
+// NilContainersMode specifies how to encode nil slices and maps.
+type NilContainersMode int
+
+const (
+ // NilContainerAsNull encodes nil slices and maps as CBOR null.
+ // This is the default.
+ NilContainerAsNull NilContainersMode = iota
+
+ // NilContainerAsEmpty encodes nil slices and maps as
+ // empty container (CBOR bytestring, array, or map).
+ NilContainerAsEmpty
+
+ maxNilContainersMode
+)
+
+func (m NilContainersMode) valid() bool {
+ return m >= 0 && m < maxNilContainersMode
+}
+
+// OmitEmptyMode specifies how to encode struct fields with omitempty tag.
+// The default behavior omits if field value would encode as empty CBOR value.
+type OmitEmptyMode int
+
+const (
+ // OmitEmptyCBORValue specifies that struct fields tagged with "omitempty"
+ // should be omitted from encoding if the field would be encoded as an empty
+ // CBOR value, such as CBOR false, 0, 0.0, nil, empty byte, empty string,
+ // empty array, or empty map.
+ OmitEmptyCBORValue OmitEmptyMode = iota
+
+ // OmitEmptyGoValue specifies that struct fields tagged with "omitempty"
+ // should be omitted from encoding if the field has an empty Go value,
+ // defined as false, 0, 0.0, a nil pointer, a nil interface value, and
+ // any empty array, slice, map, or string.
+ // This behavior is the same as the current (aka v1) encoding/json package
+ // included in Go.
+ OmitEmptyGoValue
+
+ maxOmitEmptyMode
+)
+
+func (om OmitEmptyMode) valid() bool {
+ return om >= 0 && om < maxOmitEmptyMode
+}
+
+// FieldNameMode specifies the CBOR type to use when encoding struct field names.
+type FieldNameMode int
+
+const (
+ // FieldNameToTextString encodes struct fields to CBOR text string (major type 3).
+ FieldNameToTextString FieldNameMode = iota
+
+ // FieldNameToTextString encodes struct fields to CBOR byte string (major type 2).
+ FieldNameToByteString
+
+ maxFieldNameMode
+)
+
+func (fnm FieldNameMode) valid() bool {
+ return fnm >= 0 && fnm < maxFieldNameMode
+}
+
+// ByteSliceLaterFormatMode specifies which later format conversion hint (CBOR tag 21-23)
+// to include (if any) when encoding Go byte slice to CBOR byte string. The encoder will
+// always encode unmodified bytes from the byte slice and just wrap it within
+// CBOR tag 21, 22, or 23 if specified.
+// See "Expected Later Encoding for CBOR-to-JSON Converters" in RFC 8949 Section 3.4.5.2.
+type ByteSliceLaterFormatMode int
+
+const (
+ // ByteSliceLaterFormatNone encodes unmodified bytes from Go byte slice to CBOR byte string (major type 2)
+ // without adding CBOR tag 21, 22, or 23.
+ ByteSliceLaterFormatNone ByteSliceLaterFormatMode = iota
+
+ // ByteSliceLaterFormatBase64URL encodes unmodified bytes from Go byte slice to CBOR byte string (major type 2)
+ // inside CBOR tag 21 (expected later conversion to base64url encoding, see RFC 8949 Section 3.4.5.2).
+ ByteSliceLaterFormatBase64URL
+
+ // ByteSliceLaterFormatBase64 encodes unmodified bytes from Go byte slice to CBOR byte string (major type 2)
+ // inside CBOR tag 22 (expected later conversion to base64 encoding, see RFC 8949 Section 3.4.5.2).
+ ByteSliceLaterFormatBase64
+
+ // ByteSliceLaterFormatBase16 encodes unmodified bytes from Go byte slice to CBOR byte string (major type 2)
+ // inside CBOR tag 23 (expected later conversion to base16 encoding, see RFC 8949 Section 3.4.5.2).
+ ByteSliceLaterFormatBase16
+)
+
+func (bsefm ByteSliceLaterFormatMode) encodingTag() (uint64, error) {
+ switch bsefm {
+ case ByteSliceLaterFormatNone:
+ return 0, nil
+
+ case ByteSliceLaterFormatBase64URL:
+ return tagNumExpectedLaterEncodingBase64URL, nil
+
+ case ByteSliceLaterFormatBase64:
+ return tagNumExpectedLaterEncodingBase64, nil
+
+ case ByteSliceLaterFormatBase16:
+ return tagNumExpectedLaterEncodingBase16, nil
+ }
+ return 0, errors.New("cbor: invalid ByteSliceLaterFormat " + strconv.Itoa(int(bsefm)))
+}
+
+// ByteArrayMode specifies how to encode byte arrays.
+type ByteArrayMode int
+
+const (
+ // ByteArrayToByteSlice encodes byte arrays the same way that a byte slice with identical
+ // length and contents is encoded.
+ ByteArrayToByteSlice ByteArrayMode = iota
+
+ // ByteArrayToArray encodes byte arrays to the CBOR array type with one unsigned integer
+ // item for each byte in the array.
+ ByteArrayToArray
+
+ maxByteArrayMode
+)
+
+func (bam ByteArrayMode) valid() bool {
+ return bam >= 0 && bam < maxByteArrayMode
+}
+
+// BinaryMarshalerMode specifies how to encode types that implement encoding.BinaryMarshaler.
+type BinaryMarshalerMode int
+
+const (
+ // BinaryMarshalerByteString encodes the output of MarshalBinary to a CBOR byte string.
+ BinaryMarshalerByteString BinaryMarshalerMode = iota
+
+ // BinaryMarshalerNone does not recognize BinaryMarshaler implementations during encode.
+ BinaryMarshalerNone
+
+ maxBinaryMarshalerMode
+)
+
+func (bmm BinaryMarshalerMode) valid() bool {
+ return bmm >= 0 && bmm < maxBinaryMarshalerMode
+}
+
+// EncOptions specifies encoding options.
+type EncOptions struct {
+ // Sort specifies sorting order.
+ Sort SortMode
+
+ // ShortestFloat specifies the shortest floating-point encoding that preserves
+ // the value being encoded.
+ ShortestFloat ShortestFloatMode
+
+ // NaNConvert specifies how to encode NaN and it overrides ShortestFloatMode.
+ NaNConvert NaNConvertMode
+
+ // InfConvert specifies how to encode Inf and it overrides ShortestFloatMode.
+ InfConvert InfConvertMode
+
+ // BigIntConvert specifies how to encode big.Int values.
+ BigIntConvert BigIntConvertMode
+
+ // Time specifies how to encode time.Time.
+ Time TimeMode
+
+ // TimeTag allows time.Time to be encoded with a tag number.
+ // RFC3339 format gets tag number 0, and numeric epoch time tag number 1.
+ TimeTag EncTagMode
+
+ // IndefLength specifies whether to allow indefinite length CBOR items.
+ IndefLength IndefLengthMode
+
+ // NilContainers specifies how to encode nil slices and maps.
+ NilContainers NilContainersMode
+
+ // TagsMd specifies whether to allow CBOR tags (major type 6).
+ TagsMd TagsMode
+
+ // OmitEmptyMode specifies how to encode struct fields with omitempty tag.
+ OmitEmpty OmitEmptyMode
+
+ // String specifies which CBOR type to use when encoding Go strings.
+ // - CBOR text string (major type 3) is default
+ // - CBOR byte string (major type 2)
+ String StringMode
+
+ // FieldName specifies the CBOR type to use when encoding struct field names.
+ FieldName FieldNameMode
+
+ // ByteSliceLaterFormat specifies which later format conversion hint (CBOR tag 21-23)
+ // to include (if any) when encoding Go byte slice to CBOR byte string. The encoder will
+ // always encode unmodified bytes from the byte slice and just wrap it within
+ // CBOR tag 21, 22, or 23 if specified.
+ // See "Expected Later Encoding for CBOR-to-JSON Converters" in RFC 8949 Section 3.4.5.2.
+ ByteSliceLaterFormat ByteSliceLaterFormatMode
+
+ // ByteArray specifies how to encode byte arrays.
+ ByteArray ByteArrayMode
+
+ // BinaryMarshaler specifies how to encode types that implement encoding.BinaryMarshaler.
+ BinaryMarshaler BinaryMarshalerMode
+}
+
+// CanonicalEncOptions returns EncOptions for "Canonical CBOR" encoding,
+// defined in RFC 7049 Section 3.9 with the following rules:
+//
+// 1. "Integers must be as small as possible."
+// 2. "The expression of lengths in major types 2 through 5 must be as short as possible."
+// 3. The keys in every map must be sorted in length-first sorting order.
+// See SortLengthFirst for details.
+// 4. "Indefinite-length items must be made into definite-length items."
+// 5. "If a protocol allows for IEEE floats, then additional canonicalization rules might
+// need to be added. One example rule might be to have all floats start as a 64-bit
+// float, then do a test conversion to a 32-bit float; if the result is the same numeric
+// value, use the shorter value and repeat the process with a test conversion to a
+// 16-bit float. (This rule selects 16-bit float for positive and negative Infinity
+// as well.) Also, there are many representations for NaN. If NaN is an allowed value,
+// it must always be represented as 0xf97e00."
+func CanonicalEncOptions() EncOptions {
+ return EncOptions{
+ Sort: SortCanonical,
+ ShortestFloat: ShortestFloat16,
+ NaNConvert: NaNConvert7e00,
+ InfConvert: InfConvertFloat16,
+ IndefLength: IndefLengthForbidden,
+ }
+}
+
+// CTAP2EncOptions returns EncOptions for "CTAP2 Canonical CBOR" encoding,
+// defined in CTAP specification, with the following rules:
+//
+// 1. "Integers must be encoded as small as possible."
+// 2. "The representations of any floating-point values are not changed."
+// 3. "The expression of lengths in major types 2 through 5 must be as short as possible."
+// 4. "Indefinite-length items must be made into definite-length items.""
+// 5. The keys in every map must be sorted in bytewise lexicographic order.
+// See SortBytewiseLexical for details.
+// 6. "Tags as defined in Section 2.4 in [RFC7049] MUST NOT be present."
+func CTAP2EncOptions() EncOptions {
+ return EncOptions{
+ Sort: SortCTAP2,
+ ShortestFloat: ShortestFloatNone,
+ NaNConvert: NaNConvertNone,
+ InfConvert: InfConvertNone,
+ IndefLength: IndefLengthForbidden,
+ TagsMd: TagsForbidden,
+ }
+}
+
+// CoreDetEncOptions returns EncOptions for "Core Deterministic" encoding,
+// defined in RFC 7049bis with the following rules:
+//
+// 1. "Preferred serialization MUST be used. In particular, this means that arguments
+// (see Section 3) for integers, lengths in major types 2 through 5, and tags MUST
+// be as short as possible"
+// "Floating point values also MUST use the shortest form that preserves the value"
+// 2. "Indefinite-length items MUST NOT appear."
+// 3. "The keys in every map MUST be sorted in the bytewise lexicographic order of
+// their deterministic encodings."
+func CoreDetEncOptions() EncOptions {
+ return EncOptions{
+ Sort: SortCoreDeterministic,
+ ShortestFloat: ShortestFloat16,
+ NaNConvert: NaNConvert7e00,
+ InfConvert: InfConvertFloat16,
+ IndefLength: IndefLengthForbidden,
+ }
+}
+
+// PreferredUnsortedEncOptions returns EncOptions for "Preferred Serialization" encoding,
+// defined in RFC 7049bis with the following rules:
+//
+// 1. "The preferred serialization always uses the shortest form of representing the argument
+// (Section 3);"
+// 2. "it also uses the shortest floating-point encoding that preserves the value being
+// encoded (see Section 5.5)."
+// "The preferred encoding for a floating-point value is the shortest floating-point encoding
+// that preserves its value, e.g., 0xf94580 for the number 5.5, and 0xfa45ad9c00 for the
+// number 5555.5, unless the CBOR-based protocol specifically excludes the use of the shorter
+// floating-point encodings. For NaN values, a shorter encoding is preferred if zero-padding
+// the shorter significand towards the right reconstitutes the original NaN value (for many
+// applications, the single NaN encoding 0xf97e00 will suffice)."
+// 3. "Definite length encoding is preferred whenever the length is known at the time the
+// serialization of the item starts."
+func PreferredUnsortedEncOptions() EncOptions {
+ return EncOptions{
+ Sort: SortNone,
+ ShortestFloat: ShortestFloat16,
+ NaNConvert: NaNConvert7e00,
+ InfConvert: InfConvertFloat16,
+ }
+}
+
+// EncMode returns EncMode with immutable options and no tags (safe for concurrency).
+func (opts EncOptions) EncMode() (EncMode, error) { //nolint:gocritic // ignore hugeParam
+ return opts.encMode()
+}
+
+// UserBufferEncMode returns UserBufferEncMode with immutable options and no tags (safe for concurrency).
+func (opts EncOptions) UserBufferEncMode() (UserBufferEncMode, error) { //nolint:gocritic // ignore hugeParam
+ return opts.encMode()
+}
+
+// EncModeWithTags returns EncMode with options and tags that are both immutable (safe for concurrency).
+func (opts EncOptions) EncModeWithTags(tags TagSet) (EncMode, error) { //nolint:gocritic // ignore hugeParam
+ return opts.UserBufferEncModeWithTags(tags)
+}
+
+// UserBufferEncModeWithTags returns UserBufferEncMode with options and tags that are both immutable (safe for concurrency).
+func (opts EncOptions) UserBufferEncModeWithTags(tags TagSet) (UserBufferEncMode, error) { //nolint:gocritic // ignore hugeParam
+ if opts.TagsMd == TagsForbidden {
+ return nil, errors.New("cbor: cannot create EncMode with TagSet when TagsMd is TagsForbidden")
+ }
+ if tags == nil {
+ return nil, errors.New("cbor: cannot create EncMode with nil value as TagSet")
+ }
+ em, err := opts.encMode()
+ if err != nil {
+ return nil, err
+ }
+ // Copy tags
+ ts := tagSet(make(map[reflect.Type]*tagItem))
+ syncTags := tags.(*syncTagSet)
+ syncTags.RLock()
+ for contentType, tag := range syncTags.t {
+ if tag.opts.EncTag != EncTagNone {
+ ts[contentType] = tag
+ }
+ }
+ syncTags.RUnlock()
+ if len(ts) > 0 {
+ em.tags = ts
+ }
+ return em, nil
+}
+
+// EncModeWithSharedTags returns EncMode with immutable options and mutable shared tags (safe for concurrency).
+func (opts EncOptions) EncModeWithSharedTags(tags TagSet) (EncMode, error) { //nolint:gocritic // ignore hugeParam
+ return opts.UserBufferEncModeWithSharedTags(tags)
+}
+
+// UserBufferEncModeWithSharedTags returns UserBufferEncMode with immutable options and mutable shared tags (safe for concurrency).
+func (opts EncOptions) UserBufferEncModeWithSharedTags(tags TagSet) (UserBufferEncMode, error) { //nolint:gocritic // ignore hugeParam
+ if opts.TagsMd == TagsForbidden {
+ return nil, errors.New("cbor: cannot create EncMode with TagSet when TagsMd is TagsForbidden")
+ }
+ if tags == nil {
+ return nil, errors.New("cbor: cannot create EncMode with nil value as TagSet")
+ }
+ em, err := opts.encMode()
+ if err != nil {
+ return nil, err
+ }
+ em.tags = tags
+ return em, nil
+}
+
+func (opts EncOptions) encMode() (*encMode, error) { //nolint:gocritic // ignore hugeParam
+ if !opts.Sort.valid() {
+ return nil, errors.New("cbor: invalid SortMode " + strconv.Itoa(int(opts.Sort)))
+ }
+ if !opts.ShortestFloat.valid() {
+ return nil, errors.New("cbor: invalid ShortestFloatMode " + strconv.Itoa(int(opts.ShortestFloat)))
+ }
+ if !opts.NaNConvert.valid() {
+ return nil, errors.New("cbor: invalid NaNConvertMode " + strconv.Itoa(int(opts.NaNConvert)))
+ }
+ if !opts.InfConvert.valid() {
+ return nil, errors.New("cbor: invalid InfConvertMode " + strconv.Itoa(int(opts.InfConvert)))
+ }
+ if !opts.BigIntConvert.valid() {
+ return nil, errors.New("cbor: invalid BigIntConvertMode " + strconv.Itoa(int(opts.BigIntConvert)))
+ }
+ if !opts.Time.valid() {
+ return nil, errors.New("cbor: invalid TimeMode " + strconv.Itoa(int(opts.Time)))
+ }
+ if !opts.TimeTag.valid() {
+ return nil, errors.New("cbor: invalid TimeTag " + strconv.Itoa(int(opts.TimeTag)))
+ }
+ if !opts.IndefLength.valid() {
+ return nil, errors.New("cbor: invalid IndefLength " + strconv.Itoa(int(opts.IndefLength)))
+ }
+ if !opts.NilContainers.valid() {
+ return nil, errors.New("cbor: invalid NilContainers " + strconv.Itoa(int(opts.NilContainers)))
+ }
+ if !opts.TagsMd.valid() {
+ return nil, errors.New("cbor: invalid TagsMd " + strconv.Itoa(int(opts.TagsMd)))
+ }
+ if opts.TagsMd == TagsForbidden && opts.TimeTag == EncTagRequired {
+ return nil, errors.New("cbor: cannot set TagsMd to TagsForbidden when TimeTag is EncTagRequired")
+ }
+ if !opts.OmitEmpty.valid() {
+ return nil, errors.New("cbor: invalid OmitEmpty " + strconv.Itoa(int(opts.OmitEmpty)))
+ }
+ stringMajorType, err := opts.String.cborType()
+ if err != nil {
+ return nil, err
+ }
+ if !opts.FieldName.valid() {
+ return nil, errors.New("cbor: invalid FieldName " + strconv.Itoa(int(opts.FieldName)))
+ }
+ byteSliceLaterEncodingTag, err := opts.ByteSliceLaterFormat.encodingTag()
+ if err != nil {
+ return nil, err
+ }
+ if !opts.ByteArray.valid() {
+ return nil, errors.New("cbor: invalid ByteArray " + strconv.Itoa(int(opts.ByteArray)))
+ }
+ if !opts.BinaryMarshaler.valid() {
+ return nil, errors.New("cbor: invalid BinaryMarshaler " + strconv.Itoa(int(opts.BinaryMarshaler)))
+ }
+ em := encMode{
+ sort: opts.Sort,
+ shortestFloat: opts.ShortestFloat,
+ nanConvert: opts.NaNConvert,
+ infConvert: opts.InfConvert,
+ bigIntConvert: opts.BigIntConvert,
+ time: opts.Time,
+ timeTag: opts.TimeTag,
+ indefLength: opts.IndefLength,
+ nilContainers: opts.NilContainers,
+ tagsMd: opts.TagsMd,
+ omitEmpty: opts.OmitEmpty,
+ stringType: opts.String,
+ stringMajorType: stringMajorType,
+ fieldName: opts.FieldName,
+ byteSliceLaterFormat: opts.ByteSliceLaterFormat,
+ byteSliceLaterEncodingTag: byteSliceLaterEncodingTag,
+ byteArray: opts.ByteArray,
+ binaryMarshaler: opts.BinaryMarshaler,
+ }
+ return &em, nil
+}
+
+// EncMode is the main interface for CBOR encoding.
+type EncMode interface {
+ Marshal(v interface{}) ([]byte, error)
+ NewEncoder(w io.Writer) *Encoder
+ EncOptions() EncOptions
+}
+
+// UserBufferEncMode is an interface for CBOR encoding, which extends EncMode by
+// adding MarshalToBuffer to support user specified buffer rather than encoding
+// into the built-in buffer pool.
+type UserBufferEncMode interface {
+ EncMode
+ MarshalToBuffer(v interface{}, buf *bytes.Buffer) error
+
+ // This private method is to prevent users implementing
+ // this interface and so future additions to it will
+ // not be breaking changes.
+ // See https://go.dev/blog/module-compatibility
+ unexport()
+}
+
+type encMode struct {
+ tags tagProvider
+ sort SortMode
+ shortestFloat ShortestFloatMode
+ nanConvert NaNConvertMode
+ infConvert InfConvertMode
+ bigIntConvert BigIntConvertMode
+ time TimeMode
+ timeTag EncTagMode
+ indefLength IndefLengthMode
+ nilContainers NilContainersMode
+ tagsMd TagsMode
+ omitEmpty OmitEmptyMode
+ stringType StringMode
+ stringMajorType cborType
+ fieldName FieldNameMode
+ byteSliceLaterFormat ByteSliceLaterFormatMode
+ byteSliceLaterEncodingTag uint64
+ byteArray ByteArrayMode
+ binaryMarshaler BinaryMarshalerMode
+}
+
+var defaultEncMode, _ = EncOptions{}.encMode()
+
+// These four decoding modes are used by getMarshalerDecMode.
+// maxNestedLevels, maxArrayElements, and maxMapPairs are
+// set to max allowed limits to avoid rejecting Marshaler
+// output that would have been the allowable output of a
+// non-Marshaler object that exceeds default limits.
+var (
+ marshalerForbidIndefLengthForbidTagsDecMode = decMode{
+ maxNestedLevels: maxMaxNestedLevels,
+ maxArrayElements: maxMaxArrayElements,
+ maxMapPairs: maxMaxMapPairs,
+ indefLength: IndefLengthForbidden,
+ tagsMd: TagsForbidden,
+ }
+
+ marshalerAllowIndefLengthForbidTagsDecMode = decMode{
+ maxNestedLevels: maxMaxNestedLevels,
+ maxArrayElements: maxMaxArrayElements,
+ maxMapPairs: maxMaxMapPairs,
+ indefLength: IndefLengthAllowed,
+ tagsMd: TagsForbidden,
+ }
+
+ marshalerForbidIndefLengthAllowTagsDecMode = decMode{
+ maxNestedLevels: maxMaxNestedLevels,
+ maxArrayElements: maxMaxArrayElements,
+ maxMapPairs: maxMaxMapPairs,
+ indefLength: IndefLengthForbidden,
+ tagsMd: TagsAllowed,
+ }
+
+ marshalerAllowIndefLengthAllowTagsDecMode = decMode{
+ maxNestedLevels: maxMaxNestedLevels,
+ maxArrayElements: maxMaxArrayElements,
+ maxMapPairs: maxMaxMapPairs,
+ indefLength: IndefLengthAllowed,
+ tagsMd: TagsAllowed,
+ }
+)
+
+// getMarshalerDecMode returns one of four existing decoding modes
+// which can be reused (safe for parallel use) for the purpose of
+// checking if data returned by Marshaler is well-formed.
+func getMarshalerDecMode(indefLength IndefLengthMode, tagsMd TagsMode) *decMode {
+ switch {
+ case indefLength == IndefLengthAllowed && tagsMd == TagsAllowed:
+ return &marshalerAllowIndefLengthAllowTagsDecMode
+
+ case indefLength == IndefLengthAllowed && tagsMd == TagsForbidden:
+ return &marshalerAllowIndefLengthForbidTagsDecMode
+
+ case indefLength == IndefLengthForbidden && tagsMd == TagsAllowed:
+ return &marshalerForbidIndefLengthAllowTagsDecMode
+
+ case indefLength == IndefLengthForbidden && tagsMd == TagsForbidden:
+ return &marshalerForbidIndefLengthForbidTagsDecMode
+
+ default:
+ // This should never happen, unless we add new options to
+ // IndefLengthMode or TagsMode without updating this function.
+ return &decMode{
+ maxNestedLevels: maxMaxNestedLevels,
+ maxArrayElements: maxMaxArrayElements,
+ maxMapPairs: maxMaxMapPairs,
+ indefLength: indefLength,
+ tagsMd: tagsMd,
+ }
+ }
+}
+
+// EncOptions returns user specified options used to create this EncMode.
+func (em *encMode) EncOptions() EncOptions {
+ return EncOptions{
+ Sort: em.sort,
+ ShortestFloat: em.shortestFloat,
+ NaNConvert: em.nanConvert,
+ InfConvert: em.infConvert,
+ BigIntConvert: em.bigIntConvert,
+ Time: em.time,
+ TimeTag: em.timeTag,
+ IndefLength: em.indefLength,
+ NilContainers: em.nilContainers,
+ TagsMd: em.tagsMd,
+ OmitEmpty: em.omitEmpty,
+ String: em.stringType,
+ FieldName: em.fieldName,
+ ByteSliceLaterFormat: em.byteSliceLaterFormat,
+ ByteArray: em.byteArray,
+ BinaryMarshaler: em.binaryMarshaler,
+ }
+}
+
+func (em *encMode) unexport() {}
+
+func (em *encMode) encTagBytes(t reflect.Type) []byte {
+ if em.tags != nil {
+ if tagItem := em.tags.getTagItemFromType(t); tagItem != nil {
+ return tagItem.cborTagNum
+ }
+ }
+ return nil
+}
+
+// Marshal returns the CBOR encoding of v using em encoding mode.
+//
+// See the documentation for Marshal for details.
+func (em *encMode) Marshal(v interface{}) ([]byte, error) {
+ e := getEncodeBuffer()
+
+ if err := encode(e, em, reflect.ValueOf(v)); err != nil {
+ putEncodeBuffer(e)
+ return nil, err
+ }
+
+ buf := make([]byte, e.Len())
+ copy(buf, e.Bytes())
+
+ putEncodeBuffer(e)
+ return buf, nil
+}
+
+// MarshalToBuffer encodes v into provided buffer (instead of using built-in buffer pool)
+// and uses em encoding mode.
+//
+// NOTE: Unlike Marshal, the buffer provided to MarshalToBuffer can contain
+// partially encoded data if error is returned.
+//
+// See Marshal for more details.
+func (em *encMode) MarshalToBuffer(v interface{}, buf *bytes.Buffer) error {
+ if buf == nil {
+ return fmt.Errorf("cbor: encoding buffer provided by user is nil")
+ }
+ return encode(buf, em, reflect.ValueOf(v))
+}
+
+// NewEncoder returns a new encoder that writes to w using em EncMode.
+func (em *encMode) NewEncoder(w io.Writer) *Encoder {
+ return &Encoder{w: w, em: em}
+}
+
+// encodeBufferPool caches unused bytes.Buffer objects for later reuse.
+var encodeBufferPool = sync.Pool{
+ New: func() interface{} {
+ e := new(bytes.Buffer)
+ e.Grow(32) // TODO: make this configurable
+ return e
+ },
+}
+
+func getEncodeBuffer() *bytes.Buffer {
+ return encodeBufferPool.Get().(*bytes.Buffer)
+}
+
+func putEncodeBuffer(e *bytes.Buffer) {
+ e.Reset()
+ encodeBufferPool.Put(e)
+}
+
+type encodeFunc func(e *bytes.Buffer, em *encMode, v reflect.Value) error
+type isEmptyFunc func(em *encMode, v reflect.Value) (empty bool, err error)
+
+func encode(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ if !v.IsValid() {
+ // v is zero value
+ e.Write(cborNil)
+ return nil
+ }
+ vt := v.Type()
+ f, _ := getEncodeFunc(vt)
+ if f == nil {
+ return &UnsupportedTypeError{vt}
+ }
+
+ return f(e, em, v)
+}
+
+func encodeBool(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ if b := em.encTagBytes(v.Type()); b != nil {
+ e.Write(b)
+ }
+ b := cborFalse
+ if v.Bool() {
+ b = cborTrue
+ }
+ e.Write(b)
+ return nil
+}
+
+func encodeInt(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ if b := em.encTagBytes(v.Type()); b != nil {
+ e.Write(b)
+ }
+ i := v.Int()
+ if i >= 0 {
+ encodeHead(e, byte(cborTypePositiveInt), uint64(i))
+ return nil
+ }
+ i = i*(-1) - 1
+ encodeHead(e, byte(cborTypeNegativeInt), uint64(i))
+ return nil
+}
+
+func encodeUint(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ if b := em.encTagBytes(v.Type()); b != nil {
+ e.Write(b)
+ }
+ encodeHead(e, byte(cborTypePositiveInt), v.Uint())
+ return nil
+}
+
+func encodeFloat(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ if b := em.encTagBytes(v.Type()); b != nil {
+ e.Write(b)
+ }
+ f64 := v.Float()
+ if math.IsNaN(f64) {
+ return encodeNaN(e, em, v)
+ }
+ if math.IsInf(f64, 0) {
+ return encodeInf(e, em, v)
+ }
+ fopt := em.shortestFloat
+ if v.Kind() == reflect.Float64 && (fopt == ShortestFloatNone || cannotFitFloat32(f64)) {
+ // Encode float64
+ // Don't use encodeFloat64() because it cannot be inlined.
+ const argumentSize = 8
+ const headSize = 1 + argumentSize
+ var scratch [headSize]byte
+ scratch[0] = byte(cborTypePrimitives) | byte(additionalInformationAsFloat64)
+ binary.BigEndian.PutUint64(scratch[1:], math.Float64bits(f64))
+ e.Write(scratch[:])
+ return nil
+ }
+
+ f32 := float32(f64)
+ if fopt == ShortestFloat16 {
+ var f16 float16.Float16
+ p := float16.PrecisionFromfloat32(f32)
+ if p == float16.PrecisionExact {
+ // Roundtrip float32->float16->float32 test isn't needed.
+ f16 = float16.Fromfloat32(f32)
+ } else if p == float16.PrecisionUnknown {
+ // Try roundtrip float32->float16->float32 to determine if float32 can fit into float16.
+ f16 = float16.Fromfloat32(f32)
+ if f16.Float32() == f32 {
+ p = float16.PrecisionExact
+ }
+ }
+ if p == float16.PrecisionExact {
+ // Encode float16
+ // Don't use encodeFloat16() because it cannot be inlined.
+ const argumentSize = 2
+ const headSize = 1 + argumentSize
+ var scratch [headSize]byte
+ scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat16
+ binary.BigEndian.PutUint16(scratch[1:], uint16(f16))
+ e.Write(scratch[:])
+ return nil
+ }
+ }
+
+ // Encode float32
+ // Don't use encodeFloat32() because it cannot be inlined.
+ const argumentSize = 4
+ const headSize = 1 + argumentSize
+ var scratch [headSize]byte
+ scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat32
+ binary.BigEndian.PutUint32(scratch[1:], math.Float32bits(f32))
+ e.Write(scratch[:])
+ return nil
+}
+
+func encodeInf(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ f64 := v.Float()
+ switch em.infConvert {
+ case InfConvertReject:
+ return &UnsupportedValueError{msg: "floating-point infinity"}
+
+ case InfConvertFloat16:
+ if f64 > 0 {
+ e.Write(cborPositiveInfinity)
+ } else {
+ e.Write(cborNegativeInfinity)
+ }
+ return nil
+ }
+ if v.Kind() == reflect.Float64 {
+ return encodeFloat64(e, f64)
+ }
+ return encodeFloat32(e, float32(f64))
+}
+
+func encodeNaN(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ switch em.nanConvert {
+ case NaNConvert7e00:
+ e.Write(cborNaN)
+ return nil
+
+ case NaNConvertNone:
+ if v.Kind() == reflect.Float64 {
+ return encodeFloat64(e, v.Float())
+ }
+ f32 := float32NaNFromReflectValue(v)
+ return encodeFloat32(e, f32)
+
+ case NaNConvertReject:
+ return &UnsupportedValueError{msg: "floating-point NaN"}
+
+ default: // NaNConvertPreserveSignal, NaNConvertQuiet
+ if v.Kind() == reflect.Float64 {
+ f64 := v.Float()
+ f64bits := math.Float64bits(f64)
+ if em.nanConvert == NaNConvertQuiet && f64bits&(1<<51) == 0 {
+ f64bits |= 1 << 51 // Set quiet bit = 1
+ f64 = math.Float64frombits(f64bits)
+ }
+ // The lower 29 bits are dropped when converting from float64 to float32.
+ if f64bits&0x1fffffff != 0 {
+ // Encode NaN as float64 because dropped coef bits from float64 to float32 are not all 0s.
+ return encodeFloat64(e, f64)
+ }
+ // Create float32 from float64 manually because float32(f64) always turns on NaN's quiet bits.
+ sign := uint32(f64bits>>32) & (1 << 31)
+ exp := uint32(0x7f800000)
+ coef := uint32((f64bits & 0xfffffffffffff) >> 29)
+ f32bits := sign | exp | coef
+ f32 := math.Float32frombits(f32bits)
+ // The lower 13 bits are dropped when converting from float32 to float16.
+ if f32bits&0x1fff != 0 {
+ // Encode NaN as float32 because dropped coef bits from float32 to float16 are not all 0s.
+ return encodeFloat32(e, f32)
+ }
+ // Encode NaN as float16
+ f16, _ := float16.FromNaN32ps(f32) // Ignore err because it only returns error when f32 is not a NaN.
+ return encodeFloat16(e, f16)
+ }
+
+ f32 := float32NaNFromReflectValue(v)
+ f32bits := math.Float32bits(f32)
+ if em.nanConvert == NaNConvertQuiet && f32bits&(1<<22) == 0 {
+ f32bits |= 1 << 22 // Set quiet bit = 1
+ f32 = math.Float32frombits(f32bits)
+ }
+ // The lower 13 bits are dropped coef bits when converting from float32 to float16.
+ if f32bits&0x1fff != 0 {
+ // Encode NaN as float32 because dropped coef bits from float32 to float16 are not all 0s.
+ return encodeFloat32(e, f32)
+ }
+ f16, _ := float16.FromNaN32ps(f32) // Ignore err because it only returns error when f32 is not a NaN.
+ return encodeFloat16(e, f16)
+ }
+}
+
+func encodeFloat16(e *bytes.Buffer, f16 float16.Float16) error {
+ const argumentSize = 2
+ const headSize = 1 + argumentSize
+ var scratch [headSize]byte
+ scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat16
+ binary.BigEndian.PutUint16(scratch[1:], uint16(f16))
+ e.Write(scratch[:])
+ return nil
+}
+
+func encodeFloat32(e *bytes.Buffer, f32 float32) error {
+ const argumentSize = 4
+ const headSize = 1 + argumentSize
+ var scratch [headSize]byte
+ scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat32
+ binary.BigEndian.PutUint32(scratch[1:], math.Float32bits(f32))
+ e.Write(scratch[:])
+ return nil
+}
+
+func encodeFloat64(e *bytes.Buffer, f64 float64) error {
+ const argumentSize = 8
+ const headSize = 1 + argumentSize
+ var scratch [headSize]byte
+ scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat64
+ binary.BigEndian.PutUint64(scratch[1:], math.Float64bits(f64))
+ e.Write(scratch[:])
+ return nil
+}
+
+func encodeByteString(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ vk := v.Kind()
+ if vk == reflect.Slice && v.IsNil() && em.nilContainers == NilContainerAsNull {
+ e.Write(cborNil)
+ return nil
+ }
+ if vk == reflect.Slice && v.Type().Elem().Kind() == reflect.Uint8 && em.byteSliceLaterEncodingTag != 0 {
+ encodeHead(e, byte(cborTypeTag), em.byteSliceLaterEncodingTag)
+ }
+ if b := em.encTagBytes(v.Type()); b != nil {
+ e.Write(b)
+ }
+ slen := v.Len()
+ if slen == 0 {
+ return e.WriteByte(byte(cborTypeByteString))
+ }
+ encodeHead(e, byte(cborTypeByteString), uint64(slen))
+ if vk == reflect.Array {
+ for i := 0; i < slen; i++ {
+ e.WriteByte(byte(v.Index(i).Uint()))
+ }
+ return nil
+ }
+ e.Write(v.Bytes())
+ return nil
+}
+
+func encodeString(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ if b := em.encTagBytes(v.Type()); b != nil {
+ e.Write(b)
+ }
+ s := v.String()
+ encodeHead(e, byte(em.stringMajorType), uint64(len(s)))
+ e.WriteString(s)
+ return nil
+}
+
+type arrayEncodeFunc struct {
+ f encodeFunc
+}
+
+func (ae arrayEncodeFunc) encode(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ if em.byteArray == ByteArrayToByteSlice && v.Type().Elem().Kind() == reflect.Uint8 {
+ return encodeByteString(e, em, v)
+ }
+ if v.Kind() == reflect.Slice && v.IsNil() && em.nilContainers == NilContainerAsNull {
+ e.Write(cborNil)
+ return nil
+ }
+ if b := em.encTagBytes(v.Type()); b != nil {
+ e.Write(b)
+ }
+ alen := v.Len()
+ if alen == 0 {
+ return e.WriteByte(byte(cborTypeArray))
+ }
+ encodeHead(e, byte(cborTypeArray), uint64(alen))
+ for i := 0; i < alen; i++ {
+ if err := ae.f(e, em, v.Index(i)); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// encodeKeyValueFunc encodes key/value pairs in map (v).
+// If kvs is provided (having the same length as v), length of encoded key and value are stored in kvs.
+// kvs is used for canonical encoding of map.
+type encodeKeyValueFunc func(e *bytes.Buffer, em *encMode, v reflect.Value, kvs []keyValue) error
+
+type mapEncodeFunc struct {
+ e encodeKeyValueFunc
+}
+
+func (me mapEncodeFunc) encode(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ if v.IsNil() && em.nilContainers == NilContainerAsNull {
+ e.Write(cborNil)
+ return nil
+ }
+ if b := em.encTagBytes(v.Type()); b != nil {
+ e.Write(b)
+ }
+ mlen := v.Len()
+ if mlen == 0 {
+ return e.WriteByte(byte(cborTypeMap))
+ }
+
+ encodeHead(e, byte(cborTypeMap), uint64(mlen))
+ if em.sort == SortNone || em.sort == SortFastShuffle || mlen <= 1 {
+ return me.e(e, em, v, nil)
+ }
+
+ kvsp := getKeyValues(v.Len()) // for sorting keys
+ defer putKeyValues(kvsp)
+ kvs := *kvsp
+
+ kvBeginOffset := e.Len()
+ if err := me.e(e, em, v, kvs); err != nil {
+ return err
+ }
+ kvTotalLen := e.Len() - kvBeginOffset
+
+ // Use the capacity at the tail of the encode buffer as a staging area to rearrange the
+ // encoded pairs into sorted order.
+ e.Grow(kvTotalLen)
+ tmp := e.Bytes()[e.Len() : e.Len()+kvTotalLen] // Can use e.AvailableBuffer() in Go 1.21+.
+ dst := e.Bytes()[kvBeginOffset:]
+
+ if em.sort == SortBytewiseLexical {
+ sort.Sort(&bytewiseKeyValueSorter{kvs: kvs, data: dst})
+ } else {
+ sort.Sort(&lengthFirstKeyValueSorter{kvs: kvs, data: dst})
+ }
+
+ // This is where the encoded bytes are actually rearranged in the output buffer to reflect
+ // the desired order.
+ sortedOffset := 0
+ for _, kv := range kvs {
+ copy(tmp[sortedOffset:], dst[kv.offset:kv.nextOffset])
+ sortedOffset += kv.nextOffset - kv.offset
+ }
+ copy(dst, tmp[:kvTotalLen])
+
+ return nil
+
+}
+
+// keyValue is the position of an encoded pair in a buffer. All offsets are zero-based and relative
+// to the first byte of the first encoded pair.
+type keyValue struct {
+ offset int
+ valueOffset int
+ nextOffset int
+}
+
+type bytewiseKeyValueSorter struct {
+ kvs []keyValue
+ data []byte
+}
+
+func (x *bytewiseKeyValueSorter) Len() int {
+ return len(x.kvs)
+}
+
+func (x *bytewiseKeyValueSorter) Swap(i, j int) {
+ x.kvs[i], x.kvs[j] = x.kvs[j], x.kvs[i]
+}
+
+func (x *bytewiseKeyValueSorter) Less(i, j int) bool {
+ kvi, kvj := x.kvs[i], x.kvs[j]
+ return bytes.Compare(x.data[kvi.offset:kvi.valueOffset], x.data[kvj.offset:kvj.valueOffset]) <= 0
+}
+
+type lengthFirstKeyValueSorter struct {
+ kvs []keyValue
+ data []byte
+}
+
+func (x *lengthFirstKeyValueSorter) Len() int {
+ return len(x.kvs)
+}
+
+func (x *lengthFirstKeyValueSorter) Swap(i, j int) {
+ x.kvs[i], x.kvs[j] = x.kvs[j], x.kvs[i]
+}
+
+func (x *lengthFirstKeyValueSorter) Less(i, j int) bool {
+ kvi, kvj := x.kvs[i], x.kvs[j]
+ if keyLengthDifference := (kvi.valueOffset - kvi.offset) - (kvj.valueOffset - kvj.offset); keyLengthDifference != 0 {
+ return keyLengthDifference < 0
+ }
+ return bytes.Compare(x.data[kvi.offset:kvi.valueOffset], x.data[kvj.offset:kvj.valueOffset]) <= 0
+}
+
+var keyValuePool = sync.Pool{}
+
+func getKeyValues(length int) *[]keyValue {
+ v := keyValuePool.Get()
+ if v == nil {
+ y := make([]keyValue, length)
+ return &y
+ }
+ x := v.(*[]keyValue)
+ if cap(*x) >= length {
+ *x = (*x)[:length]
+ return x
+ }
+ // []keyValue from the pool does not have enough capacity.
+ // Return it back to the pool and create a new one.
+ keyValuePool.Put(x)
+ y := make([]keyValue, length)
+ return &y
+}
+
+func putKeyValues(x *[]keyValue) {
+ *x = (*x)[:0]
+ keyValuePool.Put(x)
+}
+
+func encodeStructToArray(e *bytes.Buffer, em *encMode, v reflect.Value) (err error) {
+ structType, err := getEncodingStructType(v.Type())
+ if err != nil {
+ return err
+ }
+
+ if b := em.encTagBytes(v.Type()); b != nil {
+ e.Write(b)
+ }
+
+ flds := structType.fields
+
+ encodeHead(e, byte(cborTypeArray), uint64(len(flds)))
+ for i := 0; i < len(flds); i++ {
+ f := flds[i]
+
+ var fv reflect.Value
+ if len(f.idx) == 1 {
+ fv = v.Field(f.idx[0])
+ } else {
+ // Get embedded field value. No error is expected.
+ fv, _ = getFieldValue(v, f.idx, func(reflect.Value) (reflect.Value, error) {
+ // Write CBOR nil for null pointer to embedded struct
+ e.Write(cborNil)
+ return reflect.Value{}, nil
+ })
+ if !fv.IsValid() {
+ continue
+ }
+ }
+
+ if err := f.ef(e, em, fv); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func encodeStruct(e *bytes.Buffer, em *encMode, v reflect.Value) (err error) {
+ structType, err := getEncodingStructType(v.Type())
+ if err != nil {
+ return err
+ }
+
+ flds := structType.getFields(em)
+
+ start := 0
+ if em.sort == SortFastShuffle && len(flds) > 0 {
+ start = rand.Intn(len(flds)) //nolint:gosec // Don't need a CSPRNG for deck cutting.
+ }
+
+ if b := em.encTagBytes(v.Type()); b != nil {
+ e.Write(b)
+ }
+
+ // Encode head with struct field count.
+ // Head is rewritten later if actual encoded field count is different from struct field count.
+ encodedHeadLen := encodeHead(e, byte(cborTypeMap), uint64(len(flds)))
+
+ kvbegin := e.Len()
+ kvcount := 0
+ for offset := 0; offset < len(flds); offset++ {
+ f := flds[(start+offset)%len(flds)]
+
+ var fv reflect.Value
+ if len(f.idx) == 1 {
+ fv = v.Field(f.idx[0])
+ } else {
+ // Get embedded field value. No error is expected.
+ fv, _ = getFieldValue(v, f.idx, func(reflect.Value) (reflect.Value, error) {
+ // Skip null pointer to embedded struct
+ return reflect.Value{}, nil
+ })
+ if !fv.IsValid() {
+ continue
+ }
+ }
+ if f.omitEmpty {
+ empty, err := f.ief(em, fv)
+ if err != nil {
+ return err
+ }
+ if empty {
+ continue
+ }
+ }
+
+ if !f.keyAsInt && em.fieldName == FieldNameToByteString {
+ e.Write(f.cborNameByteString)
+ } else { // int or text string
+ e.Write(f.cborName)
+ }
+
+ if err := f.ef(e, em, fv); err != nil {
+ return err
+ }
+
+ kvcount++
+ }
+
+ if len(flds) == kvcount {
+ // Encoded element count in head is the same as actual element count.
+ return nil
+ }
+
+ // Overwrite the bytes that were reserved for the head before encoding the map entries.
+ var actualHeadLen int
+ {
+ headbuf := *bytes.NewBuffer(e.Bytes()[kvbegin-encodedHeadLen : kvbegin-encodedHeadLen : kvbegin])
+ actualHeadLen = encodeHead(&headbuf, byte(cborTypeMap), uint64(kvcount))
+ }
+
+ if actualHeadLen == encodedHeadLen {
+ // The bytes reserved for the encoded head were exactly the right size, so the
+ // encoded entries are already in their final positions.
+ return nil
+ }
+
+ // We reserved more bytes than needed for the encoded head, based on the number of fields
+ // encoded. The encoded entries are offset to the right by the number of excess reserved
+ // bytes. Shift the entries left to remove the gap.
+ excessReservedBytes := encodedHeadLen - actualHeadLen
+ dst := e.Bytes()[kvbegin-excessReservedBytes : e.Len()-excessReservedBytes]
+ src := e.Bytes()[kvbegin:e.Len()]
+ copy(dst, src)
+
+ // After shifting, the excess bytes are at the end of the output buffer and they are
+ // garbage.
+ e.Truncate(e.Len() - excessReservedBytes)
+ return nil
+}
+
+func encodeIntf(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ if v.IsNil() {
+ e.Write(cborNil)
+ return nil
+ }
+ return encode(e, em, v.Elem())
+}
+
+func encodeTime(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ t := v.Interface().(time.Time)
+ if t.IsZero() {
+ e.Write(cborNil) // Even if tag is required, encode as CBOR null.
+ return nil
+ }
+ if em.timeTag == EncTagRequired {
+ tagNumber := 1
+ if em.time == TimeRFC3339 || em.time == TimeRFC3339Nano {
+ tagNumber = 0
+ }
+ encodeHead(e, byte(cborTypeTag), uint64(tagNumber))
+ }
+ switch em.time {
+ case TimeUnix:
+ secs := t.Unix()
+ return encodeInt(e, em, reflect.ValueOf(secs))
+
+ case TimeUnixMicro:
+ t = t.UTC().Round(time.Microsecond)
+ f := float64(t.UnixNano()) / 1e9
+ return encodeFloat(e, em, reflect.ValueOf(f))
+
+ case TimeUnixDynamic:
+ t = t.UTC().Round(time.Microsecond)
+ secs, nsecs := t.Unix(), uint64(t.Nanosecond())
+ if nsecs == 0 {
+ return encodeInt(e, em, reflect.ValueOf(secs))
+ }
+ f := float64(secs) + float64(nsecs)/1e9
+ return encodeFloat(e, em, reflect.ValueOf(f))
+
+ case TimeRFC3339:
+ s := t.Format(time.RFC3339)
+ return encodeString(e, em, reflect.ValueOf(s))
+
+ default: // TimeRFC3339Nano
+ s := t.Format(time.RFC3339Nano)
+ return encodeString(e, em, reflect.ValueOf(s))
+ }
+}
+
+func encodeBigInt(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ if em.bigIntConvert == BigIntConvertReject {
+ return &UnsupportedTypeError{Type: typeBigInt}
+ }
+
+ vbi := v.Interface().(big.Int)
+ sign := vbi.Sign()
+ bi := new(big.Int).SetBytes(vbi.Bytes()) // bi is absolute value of v
+ if sign < 0 {
+ // For negative number, convert to CBOR encoded number (-v-1).
+ bi.Sub(bi, big.NewInt(1))
+ }
+
+ if em.bigIntConvert == BigIntConvertShortest {
+ if bi.IsUint64() {
+ if sign >= 0 {
+ // Encode as CBOR pos int (major type 0)
+ encodeHead(e, byte(cborTypePositiveInt), bi.Uint64())
+ return nil
+ }
+ // Encode as CBOR neg int (major type 1)
+ encodeHead(e, byte(cborTypeNegativeInt), bi.Uint64())
+ return nil
+ }
+ }
+
+ tagNum := 2
+ if sign < 0 {
+ tagNum = 3
+ }
+ // Write tag number
+ encodeHead(e, byte(cborTypeTag), uint64(tagNum))
+ // Write bignum byte string
+ b := bi.Bytes()
+ encodeHead(e, byte(cborTypeByteString), uint64(len(b)))
+ e.Write(b)
+ return nil
+}
+
+type binaryMarshalerEncoder struct {
+ alternateEncode encodeFunc
+ alternateIsEmpty isEmptyFunc
+}
+
+func (bme binaryMarshalerEncoder) encode(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ if em.binaryMarshaler != BinaryMarshalerByteString {
+ return bme.alternateEncode(e, em, v)
+ }
+
+ vt := v.Type()
+ m, ok := v.Interface().(encoding.BinaryMarshaler)
+ if !ok {
+ pv := reflect.New(vt)
+ pv.Elem().Set(v)
+ m = pv.Interface().(encoding.BinaryMarshaler)
+ }
+ data, err := m.MarshalBinary()
+ if err != nil {
+ return err
+ }
+ if b := em.encTagBytes(vt); b != nil {
+ e.Write(b)
+ }
+ encodeHead(e, byte(cborTypeByteString), uint64(len(data)))
+ e.Write(data)
+ return nil
+}
+
+func (bme binaryMarshalerEncoder) isEmpty(em *encMode, v reflect.Value) (bool, error) {
+ if em.binaryMarshaler != BinaryMarshalerByteString {
+ return bme.alternateIsEmpty(em, v)
+ }
+
+ m, ok := v.Interface().(encoding.BinaryMarshaler)
+ if !ok {
+ pv := reflect.New(v.Type())
+ pv.Elem().Set(v)
+ m = pv.Interface().(encoding.BinaryMarshaler)
+ }
+ data, err := m.MarshalBinary()
+ if err != nil {
+ return false, err
+ }
+ return len(data) == 0, nil
+}
+
+func encodeMarshalerType(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ if em.tagsMd == TagsForbidden && v.Type() == typeRawTag {
+ return errors.New("cbor: cannot encode cbor.RawTag when TagsMd is TagsForbidden")
+ }
+ m, ok := v.Interface().(Marshaler)
+ if !ok {
+ pv := reflect.New(v.Type())
+ pv.Elem().Set(v)
+ m = pv.Interface().(Marshaler)
+ }
+ data, err := m.MarshalCBOR()
+ if err != nil {
+ return err
+ }
+
+ // Verify returned CBOR data item from MarshalCBOR() is well-formed and passes tag validity for builtin tags 0-3.
+ d := decoder{data: data, dm: getMarshalerDecMode(em.indefLength, em.tagsMd)}
+ err = d.wellformed(false, true)
+ if err != nil {
+ return &MarshalerError{typ: v.Type(), err: err}
+ }
+
+ e.Write(data)
+ return nil
+}
+
+func encodeTag(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ if em.tagsMd == TagsForbidden {
+ return errors.New("cbor: cannot encode cbor.Tag when TagsMd is TagsForbidden")
+ }
+
+ t := v.Interface().(Tag)
+
+ if t.Number == 0 && t.Content == nil {
+ // Marshal uninitialized cbor.Tag
+ e.Write(cborNil)
+ return nil
+ }
+
+ // Marshal tag number
+ encodeHead(e, byte(cborTypeTag), t.Number)
+
+ vem := *em // shallow copy
+
+ // For built-in tags, disable settings that may introduce tag validity errors when
+ // marshaling certain Content values.
+ switch t.Number {
+ case tagNumRFC3339Time:
+ vem.stringType = StringToTextString
+ vem.stringMajorType = cborTypeTextString
+ case tagNumUnsignedBignum, tagNumNegativeBignum:
+ vem.byteSliceLaterFormat = ByteSliceLaterFormatNone
+ vem.byteSliceLaterEncodingTag = 0
+ }
+
+ // Marshal tag content
+ return encode(e, &vem, reflect.ValueOf(t.Content))
+}
+
+// encodeHead writes CBOR head of specified type t and returns number of bytes written.
+func encodeHead(e *bytes.Buffer, t byte, n uint64) int {
+ if n <= maxAdditionalInformationWithoutArgument {
+ const headSize = 1
+ e.WriteByte(t | byte(n))
+ return headSize
+ }
+
+ if n <= math.MaxUint8 {
+ const headSize = 2
+ scratch := [headSize]byte{
+ t | byte(additionalInformationWith1ByteArgument),
+ byte(n),
+ }
+ e.Write(scratch[:])
+ return headSize
+ }
+
+ if n <= math.MaxUint16 {
+ const headSize = 3
+ var scratch [headSize]byte
+ scratch[0] = t | byte(additionalInformationWith2ByteArgument)
+ binary.BigEndian.PutUint16(scratch[1:], uint16(n))
+ e.Write(scratch[:])
+ return headSize
+ }
+
+ if n <= math.MaxUint32 {
+ const headSize = 5
+ var scratch [headSize]byte
+ scratch[0] = t | byte(additionalInformationWith4ByteArgument)
+ binary.BigEndian.PutUint32(scratch[1:], uint32(n))
+ e.Write(scratch[:])
+ return headSize
+ }
+
+ const headSize = 9
+ var scratch [headSize]byte
+ scratch[0] = t | byte(additionalInformationWith8ByteArgument)
+ binary.BigEndian.PutUint64(scratch[1:], n)
+ e.Write(scratch[:])
+ return headSize
+}
+
+var (
+ typeMarshaler = reflect.TypeOf((*Marshaler)(nil)).Elem()
+ typeBinaryMarshaler = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem()
+ typeRawMessage = reflect.TypeOf(RawMessage(nil))
+ typeByteString = reflect.TypeOf(ByteString(""))
+)
+
+func getEncodeFuncInternal(t reflect.Type) (ef encodeFunc, ief isEmptyFunc) {
+ k := t.Kind()
+ if k == reflect.Ptr {
+ return getEncodeIndirectValueFunc(t), isEmptyPtr
+ }
+ switch t {
+ case typeSimpleValue:
+ return encodeMarshalerType, isEmptyUint
+
+ case typeTag:
+ return encodeTag, alwaysNotEmpty
+
+ case typeTime:
+ return encodeTime, alwaysNotEmpty
+
+ case typeBigInt:
+ return encodeBigInt, alwaysNotEmpty
+
+ case typeRawMessage:
+ return encodeMarshalerType, isEmptySlice
+
+ case typeByteString:
+ return encodeMarshalerType, isEmptyString
+ }
+ if reflect.PtrTo(t).Implements(typeMarshaler) {
+ return encodeMarshalerType, alwaysNotEmpty
+ }
+ if reflect.PtrTo(t).Implements(typeBinaryMarshaler) {
+ defer func() {
+ // capture encoding method used for modes that disable BinaryMarshaler
+ bme := binaryMarshalerEncoder{
+ alternateEncode: ef,
+ alternateIsEmpty: ief,
+ }
+ ef = bme.encode
+ ief = bme.isEmpty
+ }()
+ }
+ switch k {
+ case reflect.Bool:
+ return encodeBool, isEmptyBool
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return encodeInt, isEmptyInt
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return encodeUint, isEmptyUint
+
+ case reflect.Float32, reflect.Float64:
+ return encodeFloat, isEmptyFloat
+
+ case reflect.String:
+ return encodeString, isEmptyString
+
+ case reflect.Slice:
+ if t.Elem().Kind() == reflect.Uint8 {
+ return encodeByteString, isEmptySlice
+ }
+ fallthrough
+
+ case reflect.Array:
+ f, _ := getEncodeFunc(t.Elem())
+ if f == nil {
+ return nil, nil
+ }
+ return arrayEncodeFunc{f: f}.encode, isEmptySlice
+
+ case reflect.Map:
+ f := getEncodeMapFunc(t)
+ if f == nil {
+ return nil, nil
+ }
+ return f, isEmptyMap
+
+ case reflect.Struct:
+ // Get struct's special field "_" tag options
+ if f, ok := t.FieldByName("_"); ok {
+ tag := f.Tag.Get("cbor")
+ if tag != "-" {
+ if hasToArrayOption(tag) {
+ return encodeStructToArray, isEmptyStruct
+ }
+ }
+ }
+ return encodeStruct, isEmptyStruct
+
+ case reflect.Interface:
+ return encodeIntf, isEmptyIntf
+ }
+ return nil, nil
+}
+
+func getEncodeIndirectValueFunc(t reflect.Type) encodeFunc {
+ for t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+ f, _ := getEncodeFunc(t)
+ if f == nil {
+ return nil
+ }
+ return func(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ for v.Kind() == reflect.Ptr && !v.IsNil() {
+ v = v.Elem()
+ }
+ if v.Kind() == reflect.Ptr && v.IsNil() {
+ e.Write(cborNil)
+ return nil
+ }
+ return f(e, em, v)
+ }
+}
+
+func alwaysNotEmpty(_ *encMode, _ reflect.Value) (empty bool, err error) {
+ return false, nil
+}
+
+func isEmptyBool(_ *encMode, v reflect.Value) (bool, error) {
+ return !v.Bool(), nil
+}
+
+func isEmptyInt(_ *encMode, v reflect.Value) (bool, error) {
+ return v.Int() == 0, nil
+}
+
+func isEmptyUint(_ *encMode, v reflect.Value) (bool, error) {
+ return v.Uint() == 0, nil
+}
+
+func isEmptyFloat(_ *encMode, v reflect.Value) (bool, error) {
+ return v.Float() == 0.0, nil
+}
+
+func isEmptyString(_ *encMode, v reflect.Value) (bool, error) {
+ return v.Len() == 0, nil
+}
+
+func isEmptySlice(_ *encMode, v reflect.Value) (bool, error) {
+ return v.Len() == 0, nil
+}
+
+func isEmptyMap(_ *encMode, v reflect.Value) (bool, error) {
+ return v.Len() == 0, nil
+}
+
+func isEmptyPtr(_ *encMode, v reflect.Value) (bool, error) {
+ return v.IsNil(), nil
+}
+
+func isEmptyIntf(_ *encMode, v reflect.Value) (bool, error) {
+ return v.IsNil(), nil
+}
+
+func isEmptyStruct(em *encMode, v reflect.Value) (bool, error) {
+ structType, err := getEncodingStructType(v.Type())
+ if err != nil {
+ return false, err
+ }
+
+ if em.omitEmpty == OmitEmptyGoValue {
+ return false, nil
+ }
+
+ if structType.toArray {
+ return len(structType.fields) == 0, nil
+ }
+
+ if len(structType.fields) > len(structType.omitEmptyFieldsIdx) {
+ return false, nil
+ }
+
+ for _, i := range structType.omitEmptyFieldsIdx {
+ f := structType.fields[i]
+
+ // Get field value
+ var fv reflect.Value
+ if len(f.idx) == 1 {
+ fv = v.Field(f.idx[0])
+ } else {
+ // Get embedded field value. No error is expected.
+ fv, _ = getFieldValue(v, f.idx, func(reflect.Value) (reflect.Value, error) {
+ // Skip null pointer to embedded struct
+ return reflect.Value{}, nil
+ })
+ if !fv.IsValid() {
+ continue
+ }
+ }
+
+ empty, err := f.ief(em, fv)
+ if err != nil {
+ return false, err
+ }
+ if !empty {
+ return false, nil
+ }
+ }
+ return true, nil
+}
+
+func cannotFitFloat32(f64 float64) bool {
+ f32 := float32(f64)
+ return float64(f32) != f64
+}
+
+// float32NaNFromReflectValue extracts float32 NaN from reflect.Value while preserving NaN's quiet bit.
+func float32NaNFromReflectValue(v reflect.Value) float32 {
+ // Keith Randall's workaround for issue https://github.com/golang/go/issues/36400
+ p := reflect.New(v.Type())
+ p.Elem().Set(v)
+ f32 := p.Convert(reflect.TypeOf((*float32)(nil))).Elem().Interface().(float32)
+ return f32
+}
diff --git a/vendor/github.com/fxamacker/cbor/v2/encode_map.go b/vendor/github.com/fxamacker/cbor/v2/encode_map.go
new file mode 100644
index 0000000000..8b4b4bbc59
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/encode_map.go
@@ -0,0 +1,94 @@
+// Copyright (c) Faye Amacker. All rights reserved.
+// Licensed under the MIT License. See LICENSE in the project root for license information.
+
+//go:build go1.20
+
+package cbor
+
+import (
+ "bytes"
+ "reflect"
+ "sync"
+)
+
+type mapKeyValueEncodeFunc struct {
+ kf, ef encodeFunc
+ kpool, vpool sync.Pool
+}
+
+func (me *mapKeyValueEncodeFunc) encodeKeyValues(e *bytes.Buffer, em *encMode, v reflect.Value, kvs []keyValue) error {
+ iterk := me.kpool.Get().(*reflect.Value)
+ defer func() {
+ iterk.SetZero()
+ me.kpool.Put(iterk)
+ }()
+ iterv := me.vpool.Get().(*reflect.Value)
+ defer func() {
+ iterv.SetZero()
+ me.vpool.Put(iterv)
+ }()
+
+ if kvs == nil {
+ for i, iter := 0, v.MapRange(); iter.Next(); i++ {
+ iterk.SetIterKey(iter)
+ iterv.SetIterValue(iter)
+
+ if err := me.kf(e, em, *iterk); err != nil {
+ return err
+ }
+ if err := me.ef(e, em, *iterv); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+
+ initial := e.Len()
+ for i, iter := 0, v.MapRange(); iter.Next(); i++ {
+ iterk.SetIterKey(iter)
+ iterv.SetIterValue(iter)
+
+ offset := e.Len()
+ if err := me.kf(e, em, *iterk); err != nil {
+ return err
+ }
+ valueOffset := e.Len()
+ if err := me.ef(e, em, *iterv); err != nil {
+ return err
+ }
+ kvs[i] = keyValue{
+ offset: offset - initial,
+ valueOffset: valueOffset - initial,
+ nextOffset: e.Len() - initial,
+ }
+ }
+
+ return nil
+}
+
+func getEncodeMapFunc(t reflect.Type) encodeFunc {
+ kf, _ := getEncodeFunc(t.Key())
+ ef, _ := getEncodeFunc(t.Elem())
+ if kf == nil || ef == nil {
+ return nil
+ }
+ mkv := &mapKeyValueEncodeFunc{
+ kf: kf,
+ ef: ef,
+ kpool: sync.Pool{
+ New: func() interface{} {
+ rk := reflect.New(t.Key()).Elem()
+ return &rk
+ },
+ },
+ vpool: sync.Pool{
+ New: func() interface{} {
+ rv := reflect.New(t.Elem()).Elem()
+ return &rv
+ },
+ },
+ }
+ return mapEncodeFunc{
+ e: mkv.encodeKeyValues,
+ }.encode
+}
diff --git a/vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go b/vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go
new file mode 100644
index 0000000000..31c39336dd
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go
@@ -0,0 +1,60 @@
+// Copyright (c) Faye Amacker. All rights reserved.
+// Licensed under the MIT License. See LICENSE in the project root for license information.
+
+//go:build !go1.20
+
+package cbor
+
+import (
+ "bytes"
+ "reflect"
+)
+
+type mapKeyValueEncodeFunc struct {
+ kf, ef encodeFunc
+}
+
+func (me *mapKeyValueEncodeFunc) encodeKeyValues(e *bytes.Buffer, em *encMode, v reflect.Value, kvs []keyValue) error {
+ if kvs == nil {
+ for i, iter := 0, v.MapRange(); iter.Next(); i++ {
+ if err := me.kf(e, em, iter.Key()); err != nil {
+ return err
+ }
+ if err := me.ef(e, em, iter.Value()); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+
+ initial := e.Len()
+ for i, iter := 0, v.MapRange(); iter.Next(); i++ {
+ offset := e.Len()
+ if err := me.kf(e, em, iter.Key()); err != nil {
+ return err
+ }
+ valueOffset := e.Len()
+ if err := me.ef(e, em, iter.Value()); err != nil {
+ return err
+ }
+ kvs[i] = keyValue{
+ offset: offset - initial,
+ valueOffset: valueOffset - initial,
+ nextOffset: e.Len() - initial,
+ }
+ }
+
+ return nil
+}
+
+func getEncodeMapFunc(t reflect.Type) encodeFunc {
+ kf, _ := getEncodeFunc(t.Key())
+ ef, _ := getEncodeFunc(t.Elem())
+ if kf == nil || ef == nil {
+ return nil
+ }
+ mkv := &mapKeyValueEncodeFunc{kf: kf, ef: ef}
+ return mapEncodeFunc{
+ e: mkv.encodeKeyValues,
+ }.encode
+}
diff --git a/vendor/github.com/fxamacker/cbor/v2/simplevalue.go b/vendor/github.com/fxamacker/cbor/v2/simplevalue.go
new file mode 100644
index 0000000000..de175cee4a
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/simplevalue.go
@@ -0,0 +1,69 @@
+package cbor
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+)
+
+// SimpleValue represents CBOR simple value.
+// CBOR simple value is:
+// - an extension point like CBOR tag.
+// - a subset of CBOR major type 7 that isn't floating-point.
+// - "identified by a number between 0 and 255, but distinct from that number itself".
+// For example, "a simple value 2 is not equivalent to an integer 2" as a CBOR map key.
+//
+// CBOR simple values identified by 20..23 are: "false", "true" , "null", and "undefined".
+// Other CBOR simple values are currently unassigned/reserved by IANA.
+type SimpleValue uint8
+
+var (
+ typeSimpleValue = reflect.TypeOf(SimpleValue(0))
+)
+
+// MarshalCBOR encodes SimpleValue as CBOR simple value (major type 7).
+func (sv SimpleValue) MarshalCBOR() ([]byte, error) {
+ // RFC 8949 3.3. Floating-Point Numbers and Values with No Content says:
+ // "An encoder MUST NOT issue two-byte sequences that start with 0xf8
+ // (major type 7, additional information 24) and continue with a byte
+ // less than 0x20 (32 decimal). Such sequences are not well-formed.
+ // (This implies that an encoder cannot encode false, true, null, or
+ // undefined in two-byte sequences and that only the one-byte variants
+ // of these are well-formed; more generally speaking, each simple value
+ // only has a single representation variant)."
+
+ switch {
+ case sv <= maxSimpleValueInAdditionalInformation:
+ return []byte{byte(cborTypePrimitives) | byte(sv)}, nil
+
+ case sv >= minSimpleValueIn1ByteArgument:
+ return []byte{byte(cborTypePrimitives) | additionalInformationWith1ByteArgument, byte(sv)}, nil
+
+ default:
+ return nil, &UnsupportedValueError{msg: fmt.Sprintf("SimpleValue(%d)", sv)}
+ }
+}
+
+// UnmarshalCBOR decodes CBOR simple value (major type 7) to SimpleValue.
+func (sv *SimpleValue) UnmarshalCBOR(data []byte) error {
+ if sv == nil {
+ return errors.New("cbor.SimpleValue: UnmarshalCBOR on nil pointer")
+ }
+
+ d := decoder{data: data, dm: defaultDecMode}
+
+ typ, ai, val := d.getHead()
+
+ if typ != cborTypePrimitives {
+ return &UnmarshalTypeError{CBORType: typ.String(), GoType: "SimpleValue"}
+ }
+ if ai > additionalInformationWith1ByteArgument {
+ return &UnmarshalTypeError{CBORType: typ.String(), GoType: "SimpleValue", errorMsg: "not simple values"}
+ }
+
+ // It is safe to cast val to uint8 here because
+ // - data is already verified to be well-formed CBOR simple value and
+ // - val is <= math.MaxUint8.
+ *sv = SimpleValue(val)
+ return nil
+}
diff --git a/vendor/github.com/fxamacker/cbor/v2/stream.go b/vendor/github.com/fxamacker/cbor/v2/stream.go
new file mode 100644
index 0000000000..507ab6c184
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/stream.go
@@ -0,0 +1,277 @@
+// Copyright (c) Faye Amacker. All rights reserved.
+// Licensed under the MIT License. See LICENSE in the project root for license information.
+
+package cbor
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "reflect"
+)
+
+// Decoder reads and decodes CBOR values from io.Reader.
+type Decoder struct {
+ r io.Reader
+ d decoder
+ buf []byte
+ off int // next read offset in buf
+ bytesRead int
+}
+
+// NewDecoder returns a new decoder that reads and decodes from r using
+// the default decoding options.
+func NewDecoder(r io.Reader) *Decoder {
+ return defaultDecMode.NewDecoder(r)
+}
+
+// Decode reads CBOR value and decodes it into the value pointed to by v.
+func (dec *Decoder) Decode(v interface{}) error {
+ _, err := dec.readNext()
+ if err != nil {
+ // Return validation error or read error.
+ return err
+ }
+
+ dec.d.reset(dec.buf[dec.off:])
+ err = dec.d.value(v)
+
+ // Increment dec.off even if decoding err is not nil because
+ // dec.d.off points to the next CBOR data item if current
+ // CBOR data item is valid but failed to be decoded into v.
+ // This allows next CBOR data item to be decoded in next
+ // call to this function.
+ dec.off += dec.d.off
+ dec.bytesRead += dec.d.off
+
+ return err
+}
+
+// Skip skips to the next CBOR data item (if there is any),
+// otherwise it returns error such as io.EOF, io.UnexpectedEOF, etc.
+func (dec *Decoder) Skip() error {
+ n, err := dec.readNext()
+ if err != nil {
+ // Return validation error or read error.
+ return err
+ }
+
+ dec.off += n
+ dec.bytesRead += n
+ return nil
+}
+
+// NumBytesRead returns the number of bytes read.
+func (dec *Decoder) NumBytesRead() int {
+ return dec.bytesRead
+}
+
+// Buffered returns a reader for data remaining in Decoder's buffer.
+// Returned reader is valid until the next call to Decode or Skip.
+func (dec *Decoder) Buffered() io.Reader {
+ return bytes.NewReader(dec.buf[dec.off:])
+}
+
+// readNext() reads next CBOR data item from Reader to buffer.
+// It returns the size of next CBOR data item.
+// It also returns validation error or read error if any.
+func (dec *Decoder) readNext() (int, error) {
+ var readErr error
+ var validErr error
+
+ for {
+ // Process any unread data in dec.buf.
+ if dec.off < len(dec.buf) {
+ dec.d.reset(dec.buf[dec.off:])
+ off := dec.off // Save offset before data validation
+ validErr = dec.d.wellformed(true, false)
+ dec.off = off // Restore offset
+
+ if validErr == nil {
+ return dec.d.off, nil
+ }
+
+ if validErr != io.ErrUnexpectedEOF {
+ return 0, validErr
+ }
+
+ // Process last read error on io.ErrUnexpectedEOF.
+ if readErr != nil {
+ if readErr == io.EOF {
+ // current CBOR data item is incomplete.
+ return 0, io.ErrUnexpectedEOF
+ }
+ return 0, readErr
+ }
+ }
+
+ // More data is needed and there was no read error.
+ var n int
+ for n == 0 {
+ n, readErr = dec.read()
+ if n == 0 && readErr != nil {
+ // No more data can be read and read error is encountered.
+ // At this point, validErr is either nil or io.ErrUnexpectedEOF.
+ if readErr == io.EOF {
+ if validErr == io.ErrUnexpectedEOF {
+ // current CBOR data item is incomplete.
+ return 0, io.ErrUnexpectedEOF
+ }
+ }
+ return 0, readErr
+ }
+ }
+
+ // At this point, dec.buf contains new data from last read (n > 0).
+ }
+}
+
+// read() reads data from Reader to buffer.
+// It returns number of bytes read and any read error encountered.
+// Postconditions:
+// - dec.buf contains previously unread data and new data.
+// - dec.off is 0.
+func (dec *Decoder) read() (int, error) {
+ // Grow buf if needed.
+ const minRead = 512
+ if cap(dec.buf)-len(dec.buf)+dec.off < minRead {
+ oldUnreadBuf := dec.buf[dec.off:]
+ dec.buf = make([]byte, len(dec.buf)-dec.off, 2*cap(dec.buf)+minRead)
+ dec.overwriteBuf(oldUnreadBuf)
+ }
+
+ // Copy unread data over read data and reset off to 0.
+ if dec.off > 0 {
+ dec.overwriteBuf(dec.buf[dec.off:])
+ }
+
+ // Read from reader and reslice buf.
+ n, err := dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)])
+ dec.buf = dec.buf[0 : len(dec.buf)+n]
+ return n, err
+}
+
+func (dec *Decoder) overwriteBuf(newBuf []byte) {
+ n := copy(dec.buf, newBuf)
+ dec.buf = dec.buf[:n]
+ dec.off = 0
+}
+
+// Encoder writes CBOR values to io.Writer.
+type Encoder struct {
+ w io.Writer
+ em *encMode
+ indefTypes []cborType
+}
+
+// NewEncoder returns a new encoder that writes to w using the default encoding options.
+func NewEncoder(w io.Writer) *Encoder {
+ return defaultEncMode.NewEncoder(w)
+}
+
+// Encode writes the CBOR encoding of v.
+func (enc *Encoder) Encode(v interface{}) error {
+ if len(enc.indefTypes) > 0 && v != nil {
+ indefType := enc.indefTypes[len(enc.indefTypes)-1]
+ if indefType == cborTypeTextString {
+ k := reflect.TypeOf(v).Kind()
+ if k != reflect.String {
+ return errors.New("cbor: cannot encode item type " + k.String() + " for indefinite-length text string")
+ }
+ } else if indefType == cborTypeByteString {
+ t := reflect.TypeOf(v)
+ k := t.Kind()
+ if (k != reflect.Array && k != reflect.Slice) || t.Elem().Kind() != reflect.Uint8 {
+ return errors.New("cbor: cannot encode item type " + k.String() + " for indefinite-length byte string")
+ }
+ }
+ }
+
+ buf := getEncodeBuffer()
+
+ err := encode(buf, enc.em, reflect.ValueOf(v))
+ if err == nil {
+ _, err = enc.w.Write(buf.Bytes())
+ }
+
+ putEncodeBuffer(buf)
+ return err
+}
+
+// StartIndefiniteByteString starts byte string encoding of indefinite length.
+// Subsequent calls of (*Encoder).Encode() encodes definite length byte strings
+// ("chunks") as one contiguous string until EndIndefinite is called.
+func (enc *Encoder) StartIndefiniteByteString() error {
+ return enc.startIndefinite(cborTypeByteString)
+}
+
+// StartIndefiniteTextString starts text string encoding of indefinite length.
+// Subsequent calls of (*Encoder).Encode() encodes definite length text strings
+// ("chunks") as one contiguous string until EndIndefinite is called.
+func (enc *Encoder) StartIndefiniteTextString() error {
+ return enc.startIndefinite(cborTypeTextString)
+}
+
+// StartIndefiniteArray starts array encoding of indefinite length.
+// Subsequent calls of (*Encoder).Encode() encodes elements of the array
+// until EndIndefinite is called.
+func (enc *Encoder) StartIndefiniteArray() error {
+ return enc.startIndefinite(cborTypeArray)
+}
+
+// StartIndefiniteMap starts array encoding of indefinite length.
+// Subsequent calls of (*Encoder).Encode() encodes elements of the map
+// until EndIndefinite is called.
+func (enc *Encoder) StartIndefiniteMap() error {
+ return enc.startIndefinite(cborTypeMap)
+}
+
+// EndIndefinite closes last opened indefinite length value.
+func (enc *Encoder) EndIndefinite() error {
+ if len(enc.indefTypes) == 0 {
+ return errors.New("cbor: cannot encode \"break\" code outside indefinite length values")
+ }
+ _, err := enc.w.Write([]byte{cborBreakFlag})
+ if err == nil {
+ enc.indefTypes = enc.indefTypes[:len(enc.indefTypes)-1]
+ }
+ return err
+}
+
+var cborIndefHeader = map[cborType][]byte{
+ cborTypeByteString: {cborByteStringWithIndefiniteLengthHead},
+ cborTypeTextString: {cborTextStringWithIndefiniteLengthHead},
+ cborTypeArray: {cborArrayWithIndefiniteLengthHead},
+ cborTypeMap: {cborMapWithIndefiniteLengthHead},
+}
+
+func (enc *Encoder) startIndefinite(typ cborType) error {
+ if enc.em.indefLength == IndefLengthForbidden {
+ return &IndefiniteLengthError{typ}
+ }
+ _, err := enc.w.Write(cborIndefHeader[typ])
+ if err == nil {
+ enc.indefTypes = append(enc.indefTypes, typ)
+ }
+ return err
+}
+
+// RawMessage is a raw encoded CBOR value.
+type RawMessage []byte
+
+// MarshalCBOR returns m or CBOR nil if m is nil.
+func (m RawMessage) MarshalCBOR() ([]byte, error) {
+ if len(m) == 0 {
+ return cborNil, nil
+ }
+ return m, nil
+}
+
+// UnmarshalCBOR creates a copy of data and saves to *m.
+func (m *RawMessage) UnmarshalCBOR(data []byte) error {
+ if m == nil {
+ return errors.New("cbor.RawMessage: UnmarshalCBOR on nil pointer")
+ }
+ *m = append((*m)[0:0], data...)
+ return nil
+}
diff --git a/vendor/github.com/fxamacker/cbor/v2/structfields.go b/vendor/github.com/fxamacker/cbor/v2/structfields.go
new file mode 100644
index 0000000000..81228acf0f
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/structfields.go
@@ -0,0 +1,260 @@
+// Copyright (c) Faye Amacker. All rights reserved.
+// Licensed under the MIT License. See LICENSE in the project root for license information.
+
+package cbor
+
+import (
+ "reflect"
+ "sort"
+ "strings"
+)
+
+type field struct {
+ name string
+ nameAsInt int64 // used to decoder to match field name with CBOR int
+ cborName []byte
+ cborNameByteString []byte // major type 2 name encoding iff cborName has major type 3
+ idx []int
+ typ reflect.Type
+ ef encodeFunc
+ ief isEmptyFunc
+ typInfo *typeInfo // used to decoder to reuse type info
+ tagged bool // used to choose dominant field (at the same level tagged fields dominate untagged fields)
+ omitEmpty bool // used to skip empty field
+ keyAsInt bool // used to encode/decode field name as int
+}
+
+type fields []*field
+
+// indexFieldSorter sorts fields by field idx at each level, breaking ties with idx depth.
+type indexFieldSorter struct {
+ fields fields
+}
+
+func (x *indexFieldSorter) Len() int {
+ return len(x.fields)
+}
+
+func (x *indexFieldSorter) Swap(i, j int) {
+ x.fields[i], x.fields[j] = x.fields[j], x.fields[i]
+}
+
+func (x *indexFieldSorter) Less(i, j int) bool {
+ iIdx, jIdx := x.fields[i].idx, x.fields[j].idx
+ for k := 0; k < len(iIdx) && k < len(jIdx); k++ {
+ if iIdx[k] != jIdx[k] {
+ return iIdx[k] < jIdx[k]
+ }
+ }
+ return len(iIdx) <= len(jIdx)
+}
+
+// nameLevelAndTagFieldSorter sorts fields by field name, idx depth, and presence of tag.
+type nameLevelAndTagFieldSorter struct {
+ fields fields
+}
+
+func (x *nameLevelAndTagFieldSorter) Len() int {
+ return len(x.fields)
+}
+
+func (x *nameLevelAndTagFieldSorter) Swap(i, j int) {
+ x.fields[i], x.fields[j] = x.fields[j], x.fields[i]
+}
+
+func (x *nameLevelAndTagFieldSorter) Less(i, j int) bool {
+ fi, fj := x.fields[i], x.fields[j]
+ if fi.name != fj.name {
+ return fi.name < fj.name
+ }
+ if len(fi.idx) != len(fj.idx) {
+ return len(fi.idx) < len(fj.idx)
+ }
+ if fi.tagged != fj.tagged {
+ return fi.tagged
+ }
+ return i < j // Field i and j have the same name, depth, and tagged status. Nothing else matters.
+}
+
+// getFields returns visible fields of struct type t following visibility rules for JSON encoding.
+func getFields(t reflect.Type) (flds fields, structOptions string) {
+ // Get special field "_" tag options
+ if f, ok := t.FieldByName("_"); ok {
+ tag := f.Tag.Get("cbor")
+ if tag != "-" {
+ structOptions = tag
+ }
+ }
+
+ // nTypes contains next level anonymous fields' types and indexes
+ // (there can be multiple fields of the same type at the same level)
+ flds, nTypes := appendFields(t, nil, nil, nil)
+
+ if len(nTypes) > 0 {
+
+ var cTypes map[reflect.Type][][]int // current level anonymous fields' types and indexes
+ vTypes := map[reflect.Type]bool{t: true} // visited field types at less nested levels
+
+ for len(nTypes) > 0 {
+ cTypes, nTypes = nTypes, nil
+
+ for t, idx := range cTypes {
+ // If there are multiple anonymous fields of the same struct type at the same level, all are ignored.
+ if len(idx) > 1 {
+ continue
+ }
+
+ // Anonymous field of the same type at deeper nested level is ignored.
+ if vTypes[t] {
+ continue
+ }
+ vTypes[t] = true
+
+ flds, nTypes = appendFields(t, idx[0], flds, nTypes)
+ }
+ }
+ }
+
+ sort.Sort(&nameLevelAndTagFieldSorter{flds})
+
+ // Keep visible fields.
+ j := 0 // index of next unique field
+ for i := 0; i < len(flds); {
+ name := flds[i].name
+ if i == len(flds)-1 || // last field
+ name != flds[i+1].name || // field i has unique field name
+ len(flds[i].idx) < len(flds[i+1].idx) || // field i is at a less nested level than field i+1
+ (flds[i].tagged && !flds[i+1].tagged) { // field i is tagged while field i+1 is not
+ flds[j] = flds[i]
+ j++
+ }
+
+ // Skip fields with the same field name.
+ for i++; i < len(flds) && name == flds[i].name; i++ { //nolint:revive
+ }
+ }
+ if j != len(flds) {
+ flds = flds[:j]
+ }
+
+ // Sort fields by field index
+ sort.Sort(&indexFieldSorter{flds})
+
+ return flds, structOptions
+}
+
+// appendFields appends type t's exportable fields to flds and anonymous struct fields to nTypes .
+func appendFields(
+ t reflect.Type,
+ idx []int,
+ flds fields,
+ nTypes map[reflect.Type][][]int,
+) (
+ _flds fields,
+ _nTypes map[reflect.Type][][]int,
+) {
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+
+ ft := f.Type
+ for ft.Kind() == reflect.Ptr {
+ ft = ft.Elem()
+ }
+
+ if !isFieldExportable(f, ft.Kind()) {
+ continue
+ }
+
+ tag := f.Tag.Get("cbor")
+ if tag == "" {
+ tag = f.Tag.Get("json")
+ }
+ if tag == "-" {
+ continue
+ }
+
+ tagged := tag != ""
+
+ // Parse field tag options
+ var tagFieldName string
+ var omitempty, keyasint bool
+ for j := 0; tag != ""; j++ {
+ var token string
+ idx := strings.IndexByte(tag, ',')
+ if idx == -1 {
+ token, tag = tag, ""
+ } else {
+ token, tag = tag[:idx], tag[idx+1:]
+ }
+ if j == 0 {
+ tagFieldName = token
+ } else {
+ switch token {
+ case "omitempty":
+ omitempty = true
+ case "keyasint":
+ keyasint = true
+ }
+ }
+ }
+
+ fieldName := tagFieldName
+ if tagFieldName == "" {
+ fieldName = f.Name
+ }
+
+ fIdx := make([]int, len(idx)+1)
+ copy(fIdx, idx)
+ fIdx[len(fIdx)-1] = i
+
+ if !f.Anonymous || ft.Kind() != reflect.Struct || tagFieldName != "" {
+ flds = append(flds, &field{
+ name: fieldName,
+ idx: fIdx,
+ typ: f.Type,
+ omitEmpty: omitempty,
+ keyAsInt: keyasint,
+ tagged: tagged})
+ } else {
+ if nTypes == nil {
+ nTypes = make(map[reflect.Type][][]int)
+ }
+ nTypes[ft] = append(nTypes[ft], fIdx)
+ }
+ }
+
+ return flds, nTypes
+}
+
+// isFieldExportable returns true if f is an exportable (regular or anonymous) field or
+// a nonexportable anonymous field of struct type.
+// Nonexportable anonymous field of struct type can contain exportable fields.
+func isFieldExportable(f reflect.StructField, fk reflect.Kind) bool { //nolint:gocritic // ignore hugeParam
+ exportable := f.PkgPath == ""
+ return exportable || (f.Anonymous && fk == reflect.Struct)
+}
+
+type embeddedFieldNullPtrFunc func(reflect.Value) (reflect.Value, error)
+
+// getFieldValue returns field value of struct v by index. When encountering null pointer
+// to anonymous (embedded) struct field, f is called with the last traversed field value.
+func getFieldValue(v reflect.Value, idx []int, f embeddedFieldNullPtrFunc) (fv reflect.Value, err error) {
+ fv = v
+ for i, n := range idx {
+ fv = fv.Field(n)
+
+ if i < len(idx)-1 {
+ if fv.Kind() == reflect.Ptr && fv.Type().Elem().Kind() == reflect.Struct {
+ if fv.IsNil() {
+ // Null pointer to embedded struct field
+ fv, err = f(fv)
+ if err != nil || !fv.IsValid() {
+ return fv, err
+ }
+ }
+ fv = fv.Elem()
+ }
+ }
+ }
+ return fv, nil
+}
diff --git a/vendor/github.com/fxamacker/cbor/v2/tag.go b/vendor/github.com/fxamacker/cbor/v2/tag.go
new file mode 100644
index 0000000000..5c4d2b7a42
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/tag.go
@@ -0,0 +1,299 @@
+package cbor
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "sync"
+)
+
+// Tag represents CBOR tag data, including tag number and unmarshaled tag content. Marshaling and
+// unmarshaling of tag content is subject to any encode and decode options that would apply to
+// enclosed data item if it were to appear outside of a tag.
+type Tag struct {
+ Number uint64
+ Content interface{}
+}
+
+// RawTag represents CBOR tag data, including tag number and raw tag content.
+// RawTag implements Unmarshaler and Marshaler interfaces.
+type RawTag struct {
+ Number uint64
+ Content RawMessage
+}
+
+// UnmarshalCBOR sets *t with tag number and raw tag content copied from data.
+func (t *RawTag) UnmarshalCBOR(data []byte) error {
+ if t == nil {
+ return errors.New("cbor.RawTag: UnmarshalCBOR on nil pointer")
+ }
+
+ // Decoding CBOR null and undefined to cbor.RawTag is no-op.
+ if len(data) == 1 && (data[0] == 0xf6 || data[0] == 0xf7) {
+ return nil
+ }
+
+ d := decoder{data: data, dm: defaultDecMode}
+
+ // Unmarshal tag number.
+ typ, _, num := d.getHead()
+ if typ != cborTypeTag {
+ return &UnmarshalTypeError{CBORType: typ.String(), GoType: typeRawTag.String()}
+ }
+ t.Number = num
+
+ // Unmarshal tag content.
+ c := d.data[d.off:]
+ t.Content = make([]byte, len(c))
+ copy(t.Content, c)
+ return nil
+}
+
+// MarshalCBOR returns CBOR encoding of t.
+func (t RawTag) MarshalCBOR() ([]byte, error) {
+ if t.Number == 0 && len(t.Content) == 0 {
+ // Marshal uninitialized cbor.RawTag
+ b := make([]byte, len(cborNil))
+ copy(b, cborNil)
+ return b, nil
+ }
+
+ e := getEncodeBuffer()
+
+ encodeHead(e, byte(cborTypeTag), t.Number)
+
+ content := t.Content
+ if len(content) == 0 {
+ content = cborNil
+ }
+
+ buf := make([]byte, len(e.Bytes())+len(content))
+ n := copy(buf, e.Bytes())
+ copy(buf[n:], content)
+
+ putEncodeBuffer(e)
+ return buf, nil
+}
+
+// DecTagMode specifies how decoder handles tag number.
+type DecTagMode int
+
+const (
+ // DecTagIgnored makes decoder ignore tag number (skips if present).
+ DecTagIgnored DecTagMode = iota
+
+ // DecTagOptional makes decoder verify tag number if it's present.
+ DecTagOptional
+
+ // DecTagRequired makes decoder verify tag number and tag number must be present.
+ DecTagRequired
+
+ maxDecTagMode
+)
+
+func (dtm DecTagMode) valid() bool {
+ return dtm >= 0 && dtm < maxDecTagMode
+}
+
+// EncTagMode specifies how encoder handles tag number.
+type EncTagMode int
+
+const (
+ // EncTagNone makes encoder not encode tag number.
+ EncTagNone EncTagMode = iota
+
+ // EncTagRequired makes encoder encode tag number.
+ EncTagRequired
+
+ maxEncTagMode
+)
+
+func (etm EncTagMode) valid() bool {
+ return etm >= 0 && etm < maxEncTagMode
+}
+
+// TagOptions specifies how encoder and decoder handle tag number.
+type TagOptions struct {
+ DecTag DecTagMode
+ EncTag EncTagMode
+}
+
+// TagSet is an interface to add and remove tag info. It is used by EncMode and DecMode
+// to provide CBOR tag support.
+type TagSet interface {
+ // Add adds given tag number(s), content type, and tag options to TagSet.
+ Add(opts TagOptions, contentType reflect.Type, num uint64, nestedNum ...uint64) error
+
+ // Remove removes given tag content type from TagSet.
+ Remove(contentType reflect.Type)
+
+ tagProvider
+}
+
+type tagProvider interface {
+ getTagItemFromType(t reflect.Type) *tagItem
+ getTypeFromTagNum(num []uint64) reflect.Type
+}
+
+type tagItem struct {
+ num []uint64
+ cborTagNum []byte
+ contentType reflect.Type
+ opts TagOptions
+}
+
+func (t *tagItem) equalTagNum(num []uint64) bool {
+ // Fast path to compare 1 tag number
+ if len(t.num) == 1 && len(num) == 1 && t.num[0] == num[0] {
+ return true
+ }
+
+ if len(t.num) != len(num) {
+ return false
+ }
+
+ for i := 0; i < len(t.num); i++ {
+ if t.num[i] != num[i] {
+ return false
+ }
+ }
+
+ return true
+}
+
+type (
+ tagSet map[reflect.Type]*tagItem
+
+ syncTagSet struct {
+ sync.RWMutex
+ t tagSet
+ }
+)
+
+func (t tagSet) getTagItemFromType(typ reflect.Type) *tagItem {
+ return t[typ]
+}
+
+func (t tagSet) getTypeFromTagNum(num []uint64) reflect.Type {
+ for typ, tag := range t {
+ if tag.equalTagNum(num) {
+ return typ
+ }
+ }
+ return nil
+}
+
+// NewTagSet returns TagSet (safe for concurrency).
+func NewTagSet() TagSet {
+ return &syncTagSet{t: make(map[reflect.Type]*tagItem)}
+}
+
+// Add adds given tag number(s), content type, and tag options to TagSet.
+func (t *syncTagSet) Add(opts TagOptions, contentType reflect.Type, num uint64, nestedNum ...uint64) error {
+ if contentType == nil {
+ return errors.New("cbor: cannot add nil content type to TagSet")
+ }
+ for contentType.Kind() == reflect.Ptr {
+ contentType = contentType.Elem()
+ }
+ tag, err := newTagItem(opts, contentType, num, nestedNum...)
+ if err != nil {
+ return err
+ }
+ t.Lock()
+ defer t.Unlock()
+ for typ, ti := range t.t {
+ if typ == contentType {
+ return errors.New("cbor: content type " + contentType.String() + " already exists in TagSet")
+ }
+ if ti.equalTagNum(tag.num) {
+ return fmt.Errorf("cbor: tag number %v already exists in TagSet", tag.num)
+ }
+ }
+ t.t[contentType] = tag
+ return nil
+}
+
+// Remove removes given tag content type from TagSet.
+func (t *syncTagSet) Remove(contentType reflect.Type) {
+ for contentType.Kind() == reflect.Ptr {
+ contentType = contentType.Elem()
+ }
+ t.Lock()
+ delete(t.t, contentType)
+ t.Unlock()
+}
+
+func (t *syncTagSet) getTagItemFromType(typ reflect.Type) *tagItem {
+ t.RLock()
+ ti := t.t[typ]
+ t.RUnlock()
+ return ti
+}
+
+func (t *syncTagSet) getTypeFromTagNum(num []uint64) reflect.Type {
+ t.RLock()
+ rt := t.t.getTypeFromTagNum(num)
+ t.RUnlock()
+ return rt
+}
+
+func newTagItem(opts TagOptions, contentType reflect.Type, num uint64, nestedNum ...uint64) (*tagItem, error) {
+ if opts.DecTag == DecTagIgnored && opts.EncTag == EncTagNone {
+ return nil, errors.New("cbor: cannot add tag with DecTagIgnored and EncTagNone options to TagSet")
+ }
+ if contentType.PkgPath() == "" || contentType.Kind() == reflect.Interface {
+ return nil, errors.New("cbor: can only add named types to TagSet, got " + contentType.String())
+ }
+ if contentType == typeTime {
+ return nil, errors.New("cbor: cannot add time.Time to TagSet, use EncOptions.TimeTag and DecOptions.TimeTag instead")
+ }
+ if contentType == typeBigInt {
+ return nil, errors.New("cbor: cannot add big.Int to TagSet, it's built-in and supported automatically")
+ }
+ if contentType == typeTag {
+ return nil, errors.New("cbor: cannot add cbor.Tag to TagSet")
+ }
+ if contentType == typeRawTag {
+ return nil, errors.New("cbor: cannot add cbor.RawTag to TagSet")
+ }
+ if num == 0 || num == 1 {
+ return nil, errors.New("cbor: cannot add tag number 0 or 1 to TagSet, use EncOptions.TimeTag and DecOptions.TimeTag instead")
+ }
+ if num == 2 || num == 3 {
+ return nil, errors.New("cbor: cannot add tag number 2 or 3 to TagSet, it's built-in and supported automatically")
+ }
+ if num == tagNumSelfDescribedCBOR {
+ return nil, errors.New("cbor: cannot add tag number 55799 to TagSet, it's built-in and ignored automatically")
+ }
+
+ te := tagItem{num: []uint64{num}, opts: opts, contentType: contentType}
+ te.num = append(te.num, nestedNum...)
+
+ // Cache encoded tag numbers
+ e := getEncodeBuffer()
+ for _, n := range te.num {
+ encodeHead(e, byte(cborTypeTag), n)
+ }
+ te.cborTagNum = make([]byte, e.Len())
+ copy(te.cborTagNum, e.Bytes())
+ putEncodeBuffer(e)
+
+ return &te, nil
+}
+
+var (
+ typeTag = reflect.TypeOf(Tag{})
+ typeRawTag = reflect.TypeOf(RawTag{})
+)
+
+// WrongTagError describes mismatch between CBOR tag and registered tag.
+type WrongTagError struct {
+ RegisteredType reflect.Type
+ RegisteredTagNum []uint64
+ TagNum []uint64
+}
+
+func (e *WrongTagError) Error() string {
+ return fmt.Sprintf("cbor: wrong tag number for %s, got %v, expected %v", e.RegisteredType.String(), e.TagNum, e.RegisteredTagNum)
+}
diff --git a/vendor/github.com/fxamacker/cbor/v2/valid.go b/vendor/github.com/fxamacker/cbor/v2/valid.go
new file mode 100644
index 0000000000..b40793b95e
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/valid.go
@@ -0,0 +1,394 @@
+// Copyright (c) Faye Amacker. All rights reserved.
+// Licensed under the MIT License. See LICENSE in the project root for license information.
+
+package cbor
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+ "math"
+ "strconv"
+
+ "github.com/x448/float16"
+)
+
+// SyntaxError is a description of a CBOR syntax error.
+type SyntaxError struct {
+ msg string
+}
+
+func (e *SyntaxError) Error() string { return e.msg }
+
+// SemanticError is a description of a CBOR semantic error.
+type SemanticError struct {
+ msg string
+}
+
+func (e *SemanticError) Error() string { return e.msg }
+
+// MaxNestedLevelError indicates exceeded max nested level of any combination of CBOR arrays/maps/tags.
+type MaxNestedLevelError struct {
+ maxNestedLevels int
+}
+
+func (e *MaxNestedLevelError) Error() string {
+ return "cbor: exceeded max nested level " + strconv.Itoa(e.maxNestedLevels)
+}
+
+// MaxArrayElementsError indicates exceeded max number of elements for CBOR arrays.
+type MaxArrayElementsError struct {
+ maxArrayElements int
+}
+
+func (e *MaxArrayElementsError) Error() string {
+ return "cbor: exceeded max number of elements " + strconv.Itoa(e.maxArrayElements) + " for CBOR array"
+}
+
+// MaxMapPairsError indicates exceeded max number of key-value pairs for CBOR maps.
+type MaxMapPairsError struct {
+ maxMapPairs int
+}
+
+func (e *MaxMapPairsError) Error() string {
+ return "cbor: exceeded max number of key-value pairs " + strconv.Itoa(e.maxMapPairs) + " for CBOR map"
+}
+
+// IndefiniteLengthError indicates found disallowed indefinite length items.
+type IndefiniteLengthError struct {
+ t cborType
+}
+
+func (e *IndefiniteLengthError) Error() string {
+ return "cbor: indefinite-length " + e.t.String() + " isn't allowed"
+}
+
+// TagsMdError indicates found disallowed CBOR tags.
+type TagsMdError struct {
+}
+
+func (e *TagsMdError) Error() string {
+ return "cbor: CBOR tag isn't allowed"
+}
+
+// ExtraneousDataError indicates found extraneous data following well-formed CBOR data item.
+type ExtraneousDataError struct {
+ numOfBytes int // number of bytes of extraneous data
+ index int // location of extraneous data
+}
+
+func (e *ExtraneousDataError) Error() string {
+ return "cbor: " + strconv.Itoa(e.numOfBytes) + " bytes of extraneous data starting at index " + strconv.Itoa(e.index)
+}
+
+// wellformed checks whether the CBOR data item is well-formed.
+// allowExtraData indicates if extraneous data is allowed after the CBOR data item.
+// - use allowExtraData = true when using Decoder.Decode()
+// - use allowExtraData = false when using Unmarshal()
+func (d *decoder) wellformed(allowExtraData bool, checkBuiltinTags bool) error {
+ if len(d.data) == d.off {
+ return io.EOF
+ }
+ _, err := d.wellformedInternal(0, checkBuiltinTags)
+ if err == nil {
+ if !allowExtraData && d.off != len(d.data) {
+ err = &ExtraneousDataError{len(d.data) - d.off, d.off}
+ }
+ }
+ return err
+}
+
+// wellformedInternal checks data's well-formedness and returns max depth and error.
+func (d *decoder) wellformedInternal(depth int, checkBuiltinTags bool) (int, error) { //nolint:gocyclo
+ t, _, val, indefiniteLength, err := d.wellformedHeadWithIndefiniteLengthFlag()
+ if err != nil {
+ return 0, err
+ }
+
+ switch t {
+ case cborTypeByteString, cborTypeTextString:
+ if indefiniteLength {
+ if d.dm.indefLength == IndefLengthForbidden {
+ return 0, &IndefiniteLengthError{t}
+ }
+ return d.wellformedIndefiniteString(t, depth, checkBuiltinTags)
+ }
+ valInt := int(val)
+ if valInt < 0 {
+ // Detect integer overflow
+ return 0, errors.New("cbor: " + t.String() + " length " + strconv.FormatUint(val, 10) + " is too large, causing integer overflow")
+ }
+ if len(d.data)-d.off < valInt { // valInt+off may overflow integer
+ return 0, io.ErrUnexpectedEOF
+ }
+ d.off += valInt
+
+ case cborTypeArray, cborTypeMap:
+ depth++
+ if depth > d.dm.maxNestedLevels {
+ return 0, &MaxNestedLevelError{d.dm.maxNestedLevels}
+ }
+
+ if indefiniteLength {
+ if d.dm.indefLength == IndefLengthForbidden {
+ return 0, &IndefiniteLengthError{t}
+ }
+ return d.wellformedIndefiniteArrayOrMap(t, depth, checkBuiltinTags)
+ }
+
+ valInt := int(val)
+ if valInt < 0 {
+ // Detect integer overflow
+ return 0, errors.New("cbor: " + t.String() + " length " + strconv.FormatUint(val, 10) + " is too large, it would cause integer overflow")
+ }
+
+ if t == cborTypeArray {
+ if valInt > d.dm.maxArrayElements {
+ return 0, &MaxArrayElementsError{d.dm.maxArrayElements}
+ }
+ } else {
+ if valInt > d.dm.maxMapPairs {
+ return 0, &MaxMapPairsError{d.dm.maxMapPairs}
+ }
+ }
+
+ count := 1
+ if t == cborTypeMap {
+ count = 2
+ }
+ maxDepth := depth
+ for j := 0; j < count; j++ {
+ for i := 0; i < valInt; i++ {
+ var dpt int
+ if dpt, err = d.wellformedInternal(depth, checkBuiltinTags); err != nil {
+ return 0, err
+ }
+ if dpt > maxDepth {
+ maxDepth = dpt // Save max depth
+ }
+ }
+ }
+ depth = maxDepth
+
+ case cborTypeTag:
+ if d.dm.tagsMd == TagsForbidden {
+ return 0, &TagsMdError{}
+ }
+
+ tagNum := val
+
+ // Scan nested tag numbers to avoid recursion.
+ for {
+ if len(d.data) == d.off { // Tag number must be followed by tag content.
+ return 0, io.ErrUnexpectedEOF
+ }
+ if checkBuiltinTags {
+ err = validBuiltinTag(tagNum, d.data[d.off])
+ if err != nil {
+ return 0, err
+ }
+ }
+ if d.dm.bignumTag == BignumTagForbidden && (tagNum == 2 || tagNum == 3) {
+ return 0, &UnacceptableDataItemError{
+ CBORType: cborTypeTag.String(),
+ Message: "bignum",
+ }
+ }
+ if getType(d.data[d.off]) != cborTypeTag {
+ break
+ }
+ if _, _, tagNum, err = d.wellformedHead(); err != nil {
+ return 0, err
+ }
+ depth++
+ if depth > d.dm.maxNestedLevels {
+ return 0, &MaxNestedLevelError{d.dm.maxNestedLevels}
+ }
+ }
+ // Check tag content.
+ return d.wellformedInternal(depth, checkBuiltinTags)
+ }
+
+ return depth, nil
+}
+
+// wellformedIndefiniteString checks indefinite length byte/text string's well-formedness and returns max depth and error.
+func (d *decoder) wellformedIndefiniteString(t cborType, depth int, checkBuiltinTags bool) (int, error) {
+ var err error
+ for {
+ if len(d.data) == d.off {
+ return 0, io.ErrUnexpectedEOF
+ }
+ if isBreakFlag(d.data[d.off]) {
+ d.off++
+ break
+ }
+ // Peek ahead to get next type and indefinite length status.
+ nt, ai := parseInitialByte(d.data[d.off])
+ if t != nt {
+ return 0, &SyntaxError{"cbor: wrong element type " + nt.String() + " for indefinite-length " + t.String()}
+ }
+ if additionalInformation(ai).isIndefiniteLength() {
+ return 0, &SyntaxError{"cbor: indefinite-length " + t.String() + " chunk is not definite-length"}
+ }
+ if depth, err = d.wellformedInternal(depth, checkBuiltinTags); err != nil {
+ return 0, err
+ }
+ }
+ return depth, nil
+}
+
+// wellformedIndefiniteArrayOrMap checks indefinite length array/map's well-formedness and returns max depth and error.
+func (d *decoder) wellformedIndefiniteArrayOrMap(t cborType, depth int, checkBuiltinTags bool) (int, error) {
+ var err error
+ maxDepth := depth
+ i := 0
+ for {
+ if len(d.data) == d.off {
+ return 0, io.ErrUnexpectedEOF
+ }
+ if isBreakFlag(d.data[d.off]) {
+ d.off++
+ break
+ }
+ var dpt int
+ if dpt, err = d.wellformedInternal(depth, checkBuiltinTags); err != nil {
+ return 0, err
+ }
+ if dpt > maxDepth {
+ maxDepth = dpt
+ }
+ i++
+ if t == cborTypeArray {
+ if i > d.dm.maxArrayElements {
+ return 0, &MaxArrayElementsError{d.dm.maxArrayElements}
+ }
+ } else {
+ if i%2 == 0 && i/2 > d.dm.maxMapPairs {
+ return 0, &MaxMapPairsError{d.dm.maxMapPairs}
+ }
+ }
+ }
+ if t == cborTypeMap && i%2 == 1 {
+ return 0, &SyntaxError{"cbor: unexpected \"break\" code"}
+ }
+ return maxDepth, nil
+}
+
+func (d *decoder) wellformedHeadWithIndefiniteLengthFlag() (
+ t cborType,
+ ai byte,
+ val uint64,
+ indefiniteLength bool,
+ err error,
+) {
+ t, ai, val, err = d.wellformedHead()
+ if err != nil {
+ return
+ }
+ indefiniteLength = additionalInformation(ai).isIndefiniteLength()
+ return
+}
+
+func (d *decoder) wellformedHead() (t cborType, ai byte, val uint64, err error) {
+ dataLen := len(d.data) - d.off
+ if dataLen == 0 {
+ return 0, 0, 0, io.ErrUnexpectedEOF
+ }
+
+ t, ai = parseInitialByte(d.data[d.off])
+ val = uint64(ai)
+ d.off++
+ dataLen--
+
+ if ai <= maxAdditionalInformationWithoutArgument {
+ return t, ai, val, nil
+ }
+
+ if ai == additionalInformationWith1ByteArgument {
+ const argumentSize = 1
+ if dataLen < argumentSize {
+ return 0, 0, 0, io.ErrUnexpectedEOF
+ }
+ val = uint64(d.data[d.off])
+ d.off++
+ if t == cborTypePrimitives && val < 32 {
+ return 0, 0, 0, &SyntaxError{"cbor: invalid simple value " + strconv.Itoa(int(val)) + " for type " + t.String()}
+ }
+ return t, ai, val, nil
+ }
+
+ if ai == additionalInformationWith2ByteArgument {
+ const argumentSize = 2
+ if dataLen < argumentSize {
+ return 0, 0, 0, io.ErrUnexpectedEOF
+ }
+ val = uint64(binary.BigEndian.Uint16(d.data[d.off : d.off+argumentSize]))
+ d.off += argumentSize
+ if t == cborTypePrimitives {
+ if err := d.acceptableFloat(float64(float16.Frombits(uint16(val)).Float32())); err != nil {
+ return 0, 0, 0, err
+ }
+ }
+ return t, ai, val, nil
+ }
+
+ if ai == additionalInformationWith4ByteArgument {
+ const argumentSize = 4
+ if dataLen < argumentSize {
+ return 0, 0, 0, io.ErrUnexpectedEOF
+ }
+ val = uint64(binary.BigEndian.Uint32(d.data[d.off : d.off+argumentSize]))
+ d.off += argumentSize
+ if t == cborTypePrimitives {
+ if err := d.acceptableFloat(float64(math.Float32frombits(uint32(val)))); err != nil {
+ return 0, 0, 0, err
+ }
+ }
+ return t, ai, val, nil
+ }
+
+ if ai == additionalInformationWith8ByteArgument {
+ const argumentSize = 8
+ if dataLen < argumentSize {
+ return 0, 0, 0, io.ErrUnexpectedEOF
+ }
+ val = binary.BigEndian.Uint64(d.data[d.off : d.off+argumentSize])
+ d.off += argumentSize
+ if t == cborTypePrimitives {
+ if err := d.acceptableFloat(math.Float64frombits(val)); err != nil {
+ return 0, 0, 0, err
+ }
+ }
+ return t, ai, val, nil
+ }
+
+ if additionalInformation(ai).isIndefiniteLength() {
+ switch t {
+ case cborTypePositiveInt, cborTypeNegativeInt, cborTypeTag:
+ return 0, 0, 0, &SyntaxError{"cbor: invalid additional information " + strconv.Itoa(int(ai)) + " for type " + t.String()}
+ case cborTypePrimitives: // 0xff (break code) should not be outside wellformedIndefinite().
+ return 0, 0, 0, &SyntaxError{"cbor: unexpected \"break\" code"}
+ }
+ return t, ai, val, nil
+ }
+
+ // ai == 28, 29, 30
+ return 0, 0, 0, &SyntaxError{"cbor: invalid additional information " + strconv.Itoa(int(ai)) + " for type " + t.String()}
+}
+
+func (d *decoder) acceptableFloat(f float64) error {
+ switch {
+ case d.dm.nanDec == NaNDecodeForbidden && math.IsNaN(f):
+ return &UnacceptableDataItemError{
+ CBORType: cborTypePrimitives.String(),
+ Message: "floating-point NaN",
+ }
+ case d.dm.infDec == InfDecodeForbidden && math.IsInf(f, 0):
+ return &UnacceptableDataItemError{
+ CBORType: cborTypePrimitives.String(),
+ Message: "floating-point infinity",
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/google/gofuzz/.travis.yml b/vendor/github.com/google/gofuzz/.travis.yml
deleted file mode 100644
index 061d72ae07..0000000000
--- a/vendor/github.com/google/gofuzz/.travis.yml
+++ /dev/null
@@ -1,10 +0,0 @@
-language: go
-
-go:
- - 1.11.x
- - 1.12.x
- - 1.13.x
- - master
-
-script:
- - go test -cover
diff --git a/vendor/github.com/google/gofuzz/CONTRIBUTING.md b/vendor/github.com/google/gofuzz/CONTRIBUTING.md
deleted file mode 100644
index 97c1b34fd5..0000000000
--- a/vendor/github.com/google/gofuzz/CONTRIBUTING.md
+++ /dev/null
@@ -1,67 +0,0 @@
-# How to contribute #
-
-We'd love to accept your patches and contributions to this project. There are
-just a few small guidelines you need to follow.
-
-
-## Contributor License Agreement ##
-
-Contributions to any Google project must be accompanied by a Contributor
-License Agreement. This is not a copyright **assignment**, it simply gives
-Google permission to use and redistribute your contributions as part of the
-project.
-
- * If you are an individual writing original source code and you're sure you
- own the intellectual property, then you'll need to sign an [individual
- CLA][].
-
- * If you work for a company that wants to allow you to contribute your work,
- then you'll need to sign a [corporate CLA][].
-
-You generally only need to submit a CLA once, so if you've already submitted
-one (even if it was for a different project), you probably don't need to do it
-again.
-
-[individual CLA]: https://developers.google.com/open-source/cla/individual
-[corporate CLA]: https://developers.google.com/open-source/cla/corporate
-
-
-## Submitting a patch ##
-
- 1. It's generally best to start by opening a new issue describing the bug or
- feature you're intending to fix. Even if you think it's relatively minor,
- it's helpful to know what people are working on. Mention in the initial
- issue that you are planning to work on that bug or feature so that it can
- be assigned to you.
-
- 1. Follow the normal process of [forking][] the project, and setup a new
- branch to work in. It's important that each group of changes be done in
- separate branches in order to ensure that a pull request only includes the
- commits related to that bug or feature.
-
- 1. Go makes it very simple to ensure properly formatted code, so always run
- `go fmt` on your code before committing it. You should also run
- [golint][] over your code. As noted in the [golint readme][], it's not
- strictly necessary that your code be completely "lint-free", but this will
- help you find common style issues.
-
- 1. Any significant changes should almost always be accompanied by tests. The
- project already has good test coverage, so look at some of the existing
- tests if you're unsure how to go about it. [gocov][] and [gocov-html][]
- are invaluable tools for seeing which parts of your code aren't being
- exercised by your tests.
-
- 1. Do your best to have [well-formed commit messages][] for each change.
- This provides consistency throughout the project, and ensures that commit
- messages are able to be formatted properly by various git tools.
-
- 1. Finally, push the commits to your fork and submit a [pull request][].
-
-[forking]: https://help.github.com/articles/fork-a-repo
-[golint]: https://github.com/golang/lint
-[golint readme]: https://github.com/golang/lint/blob/master/README
-[gocov]: https://github.com/axw/gocov
-[gocov-html]: https://github.com/matm/gocov-html
-[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html
-[squash]: http://git-scm.com/book/en/Git-Tools-Rewriting-History#Squashing-Commits
-[pull request]: https://help.github.com/articles/creating-a-pull-request
diff --git a/vendor/github.com/google/gofuzz/fuzz.go b/vendor/github.com/google/gofuzz/fuzz.go
deleted file mode 100644
index 761520a8ce..0000000000
--- a/vendor/github.com/google/gofuzz/fuzz.go
+++ /dev/null
@@ -1,605 +0,0 @@
-/*
-Copyright 2014 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package fuzz
-
-import (
- "fmt"
- "math/rand"
- "reflect"
- "regexp"
- "time"
-
- "github.com/google/gofuzz/bytesource"
- "strings"
-)
-
-// fuzzFuncMap is a map from a type to a fuzzFunc that handles that type.
-type fuzzFuncMap map[reflect.Type]reflect.Value
-
-// Fuzzer knows how to fill any object with random fields.
-type Fuzzer struct {
- fuzzFuncs fuzzFuncMap
- defaultFuzzFuncs fuzzFuncMap
- r *rand.Rand
- nilChance float64
- minElements int
- maxElements int
- maxDepth int
- skipFieldPatterns []*regexp.Regexp
-}
-
-// New returns a new Fuzzer. Customize your Fuzzer further by calling Funcs,
-// RandSource, NilChance, or NumElements in any order.
-func New() *Fuzzer {
- return NewWithSeed(time.Now().UnixNano())
-}
-
-func NewWithSeed(seed int64) *Fuzzer {
- f := &Fuzzer{
- defaultFuzzFuncs: fuzzFuncMap{
- reflect.TypeOf(&time.Time{}): reflect.ValueOf(fuzzTime),
- },
-
- fuzzFuncs: fuzzFuncMap{},
- r: rand.New(rand.NewSource(seed)),
- nilChance: .2,
- minElements: 1,
- maxElements: 10,
- maxDepth: 100,
- }
- return f
-}
-
-// NewFromGoFuzz is a helper function that enables using gofuzz (this
-// project) with go-fuzz (https://github.com/dvyukov/go-fuzz) for continuous
-// fuzzing. Essentially, it enables translating the fuzzing bytes from
-// go-fuzz to any Go object using this library.
-//
-// This implementation promises a constant translation from a given slice of
-// bytes to the fuzzed objects. This promise will remain over future
-// versions of Go and of this library.
-//
-// Note: the returned Fuzzer should not be shared between multiple goroutines,
-// as its deterministic output will no longer be available.
-//
-// Example: use go-fuzz to test the function `MyFunc(int)` in the package
-// `mypackage`. Add the file: "mypacakge_fuzz.go" with the content:
-//
-// // +build gofuzz
-// package mypacakge
-// import fuzz "github.com/google/gofuzz"
-// func Fuzz(data []byte) int {
-// var i int
-// fuzz.NewFromGoFuzz(data).Fuzz(&i)
-// MyFunc(i)
-// return 0
-// }
-func NewFromGoFuzz(data []byte) *Fuzzer {
- return New().RandSource(bytesource.New(data))
-}
-
-// Funcs adds each entry in fuzzFuncs as a custom fuzzing function.
-//
-// Each entry in fuzzFuncs must be a function taking two parameters.
-// The first parameter must be a pointer or map. It is the variable that
-// function will fill with random data. The second parameter must be a
-// fuzz.Continue, which will provide a source of randomness and a way
-// to automatically continue fuzzing smaller pieces of the first parameter.
-//
-// These functions are called sensibly, e.g., if you wanted custom string
-// fuzzing, the function `func(s *string, c fuzz.Continue)` would get
-// called and passed the address of strings. Maps and pointers will always
-// be made/new'd for you, ignoring the NilChange option. For slices, it
-// doesn't make much sense to pre-create them--Fuzzer doesn't know how
-// long you want your slice--so take a pointer to a slice, and make it
-// yourself. (If you don't want your map/pointer type pre-made, take a
-// pointer to it, and make it yourself.) See the examples for a range of
-// custom functions.
-func (f *Fuzzer) Funcs(fuzzFuncs ...interface{}) *Fuzzer {
- for i := range fuzzFuncs {
- v := reflect.ValueOf(fuzzFuncs[i])
- if v.Kind() != reflect.Func {
- panic("Need only funcs!")
- }
- t := v.Type()
- if t.NumIn() != 2 || t.NumOut() != 0 {
- panic("Need 2 in and 0 out params!")
- }
- argT := t.In(0)
- switch argT.Kind() {
- case reflect.Ptr, reflect.Map:
- default:
- panic("fuzzFunc must take pointer or map type")
- }
- if t.In(1) != reflect.TypeOf(Continue{}) {
- panic("fuzzFunc's second parameter must be type fuzz.Continue")
- }
- f.fuzzFuncs[argT] = v
- }
- return f
-}
-
-// RandSource causes f to get values from the given source of randomness.
-// Use if you want deterministic fuzzing.
-func (f *Fuzzer) RandSource(s rand.Source) *Fuzzer {
- f.r = rand.New(s)
- return f
-}
-
-// NilChance sets the probability of creating a nil pointer, map, or slice to
-// 'p'. 'p' should be between 0 (no nils) and 1 (all nils), inclusive.
-func (f *Fuzzer) NilChance(p float64) *Fuzzer {
- if p < 0 || p > 1 {
- panic("p should be between 0 and 1, inclusive.")
- }
- f.nilChance = p
- return f
-}
-
-// NumElements sets the minimum and maximum number of elements that will be
-// added to a non-nil map or slice.
-func (f *Fuzzer) NumElements(atLeast, atMost int) *Fuzzer {
- if atLeast > atMost {
- panic("atLeast must be <= atMost")
- }
- if atLeast < 0 {
- panic("atLeast must be >= 0")
- }
- f.minElements = atLeast
- f.maxElements = atMost
- return f
-}
-
-func (f *Fuzzer) genElementCount() int {
- if f.minElements == f.maxElements {
- return f.minElements
- }
- return f.minElements + f.r.Intn(f.maxElements-f.minElements+1)
-}
-
-func (f *Fuzzer) genShouldFill() bool {
- return f.r.Float64() >= f.nilChance
-}
-
-// MaxDepth sets the maximum number of recursive fuzz calls that will be made
-// before stopping. This includes struct members, pointers, and map and slice
-// elements.
-func (f *Fuzzer) MaxDepth(d int) *Fuzzer {
- f.maxDepth = d
- return f
-}
-
-// Skip fields which match the supplied pattern. Call this multiple times if needed
-// This is useful to skip XXX_ fields generated by protobuf
-func (f *Fuzzer) SkipFieldsWithPattern(pattern *regexp.Regexp) *Fuzzer {
- f.skipFieldPatterns = append(f.skipFieldPatterns, pattern)
- return f
-}
-
-// Fuzz recursively fills all of obj's fields with something random. First
-// this tries to find a custom fuzz function (see Funcs). If there is no
-// custom function this tests whether the object implements fuzz.Interface and,
-// if so, calls Fuzz on it to fuzz itself. If that fails, this will see if
-// there is a default fuzz function provided by this package. If all of that
-// fails, this will generate random values for all primitive fields and then
-// recurse for all non-primitives.
-//
-// This is safe for cyclic or tree-like structs, up to a limit. Use the
-// MaxDepth method to adjust how deep you need it to recurse.
-//
-// obj must be a pointer. Only exported (public) fields can be set (thanks,
-// golang :/ ) Intended for tests, so will panic on bad input or unimplemented
-// fields.
-func (f *Fuzzer) Fuzz(obj interface{}) {
- v := reflect.ValueOf(obj)
- if v.Kind() != reflect.Ptr {
- panic("needed ptr!")
- }
- v = v.Elem()
- f.fuzzWithContext(v, 0)
-}
-
-// FuzzNoCustom is just like Fuzz, except that any custom fuzz function for
-// obj's type will not be called and obj will not be tested for fuzz.Interface
-// conformance. This applies only to obj and not other instances of obj's
-// type.
-// Not safe for cyclic or tree-like structs!
-// obj must be a pointer. Only exported (public) fields can be set (thanks, golang :/ )
-// Intended for tests, so will panic on bad input or unimplemented fields.
-func (f *Fuzzer) FuzzNoCustom(obj interface{}) {
- v := reflect.ValueOf(obj)
- if v.Kind() != reflect.Ptr {
- panic("needed ptr!")
- }
- v = v.Elem()
- f.fuzzWithContext(v, flagNoCustomFuzz)
-}
-
-const (
- // Do not try to find a custom fuzz function. Does not apply recursively.
- flagNoCustomFuzz uint64 = 1 << iota
-)
-
-func (f *Fuzzer) fuzzWithContext(v reflect.Value, flags uint64) {
- fc := &fuzzerContext{fuzzer: f}
- fc.doFuzz(v, flags)
-}
-
-// fuzzerContext carries context about a single fuzzing run, which lets Fuzzer
-// be thread-safe.
-type fuzzerContext struct {
- fuzzer *Fuzzer
- curDepth int
-}
-
-func (fc *fuzzerContext) doFuzz(v reflect.Value, flags uint64) {
- if fc.curDepth >= fc.fuzzer.maxDepth {
- return
- }
- fc.curDepth++
- defer func() { fc.curDepth-- }()
-
- if !v.CanSet() {
- return
- }
-
- if flags&flagNoCustomFuzz == 0 {
- // Check for both pointer and non-pointer custom functions.
- if v.CanAddr() && fc.tryCustom(v.Addr()) {
- return
- }
- if fc.tryCustom(v) {
- return
- }
- }
-
- if fn, ok := fillFuncMap[v.Kind()]; ok {
- fn(v, fc.fuzzer.r)
- return
- }
-
- switch v.Kind() {
- case reflect.Map:
- if fc.fuzzer.genShouldFill() {
- v.Set(reflect.MakeMap(v.Type()))
- n := fc.fuzzer.genElementCount()
- for i := 0; i < n; i++ {
- key := reflect.New(v.Type().Key()).Elem()
- fc.doFuzz(key, 0)
- val := reflect.New(v.Type().Elem()).Elem()
- fc.doFuzz(val, 0)
- v.SetMapIndex(key, val)
- }
- return
- }
- v.Set(reflect.Zero(v.Type()))
- case reflect.Ptr:
- if fc.fuzzer.genShouldFill() {
- v.Set(reflect.New(v.Type().Elem()))
- fc.doFuzz(v.Elem(), 0)
- return
- }
- v.Set(reflect.Zero(v.Type()))
- case reflect.Slice:
- if fc.fuzzer.genShouldFill() {
- n := fc.fuzzer.genElementCount()
- v.Set(reflect.MakeSlice(v.Type(), n, n))
- for i := 0; i < n; i++ {
- fc.doFuzz(v.Index(i), 0)
- }
- return
- }
- v.Set(reflect.Zero(v.Type()))
- case reflect.Array:
- if fc.fuzzer.genShouldFill() {
- n := v.Len()
- for i := 0; i < n; i++ {
- fc.doFuzz(v.Index(i), 0)
- }
- return
- }
- v.Set(reflect.Zero(v.Type()))
- case reflect.Struct:
- for i := 0; i < v.NumField(); i++ {
- skipField := false
- fieldName := v.Type().Field(i).Name
- for _, pattern := range fc.fuzzer.skipFieldPatterns {
- if pattern.MatchString(fieldName) {
- skipField = true
- break
- }
- }
- if !skipField {
- fc.doFuzz(v.Field(i), 0)
- }
- }
- case reflect.Chan:
- fallthrough
- case reflect.Func:
- fallthrough
- case reflect.Interface:
- fallthrough
- default:
- panic(fmt.Sprintf("Can't handle %#v", v.Interface()))
- }
-}
-
-// tryCustom searches for custom handlers, and returns true iff it finds a match
-// and successfully randomizes v.
-func (fc *fuzzerContext) tryCustom(v reflect.Value) bool {
- // First: see if we have a fuzz function for it.
- doCustom, ok := fc.fuzzer.fuzzFuncs[v.Type()]
- if !ok {
- // Second: see if it can fuzz itself.
- if v.CanInterface() {
- intf := v.Interface()
- if fuzzable, ok := intf.(Interface); ok {
- fuzzable.Fuzz(Continue{fc: fc, Rand: fc.fuzzer.r})
- return true
- }
- }
- // Finally: see if there is a default fuzz function.
- doCustom, ok = fc.fuzzer.defaultFuzzFuncs[v.Type()]
- if !ok {
- return false
- }
- }
-
- switch v.Kind() {
- case reflect.Ptr:
- if v.IsNil() {
- if !v.CanSet() {
- return false
- }
- v.Set(reflect.New(v.Type().Elem()))
- }
- case reflect.Map:
- if v.IsNil() {
- if !v.CanSet() {
- return false
- }
- v.Set(reflect.MakeMap(v.Type()))
- }
- default:
- return false
- }
-
- doCustom.Call([]reflect.Value{v, reflect.ValueOf(Continue{
- fc: fc,
- Rand: fc.fuzzer.r,
- })})
- return true
-}
-
-// Interface represents an object that knows how to fuzz itself. Any time we
-// find a type that implements this interface we will delegate the act of
-// fuzzing itself.
-type Interface interface {
- Fuzz(c Continue)
-}
-
-// Continue can be passed to custom fuzzing functions to allow them to use
-// the correct source of randomness and to continue fuzzing their members.
-type Continue struct {
- fc *fuzzerContext
-
- // For convenience, Continue implements rand.Rand via embedding.
- // Use this for generating any randomness if you want your fuzzing
- // to be repeatable for a given seed.
- *rand.Rand
-}
-
-// Fuzz continues fuzzing obj. obj must be a pointer.
-func (c Continue) Fuzz(obj interface{}) {
- v := reflect.ValueOf(obj)
- if v.Kind() != reflect.Ptr {
- panic("needed ptr!")
- }
- v = v.Elem()
- c.fc.doFuzz(v, 0)
-}
-
-// FuzzNoCustom continues fuzzing obj, except that any custom fuzz function for
-// obj's type will not be called and obj will not be tested for fuzz.Interface
-// conformance. This applies only to obj and not other instances of obj's
-// type.
-func (c Continue) FuzzNoCustom(obj interface{}) {
- v := reflect.ValueOf(obj)
- if v.Kind() != reflect.Ptr {
- panic("needed ptr!")
- }
- v = v.Elem()
- c.fc.doFuzz(v, flagNoCustomFuzz)
-}
-
-// RandString makes a random string up to 20 characters long. The returned string
-// may include a variety of (valid) UTF-8 encodings.
-func (c Continue) RandString() string {
- return randString(c.Rand)
-}
-
-// RandUint64 makes random 64 bit numbers.
-// Weirdly, rand doesn't have a function that gives you 64 random bits.
-func (c Continue) RandUint64() uint64 {
- return randUint64(c.Rand)
-}
-
-// RandBool returns true or false randomly.
-func (c Continue) RandBool() bool {
- return randBool(c.Rand)
-}
-
-func fuzzInt(v reflect.Value, r *rand.Rand) {
- v.SetInt(int64(randUint64(r)))
-}
-
-func fuzzUint(v reflect.Value, r *rand.Rand) {
- v.SetUint(randUint64(r))
-}
-
-func fuzzTime(t *time.Time, c Continue) {
- var sec, nsec int64
- // Allow for about 1000 years of random time values, which keeps things
- // like JSON parsing reasonably happy.
- sec = c.Rand.Int63n(1000 * 365 * 24 * 60 * 60)
- c.Fuzz(&nsec)
- *t = time.Unix(sec, nsec)
-}
-
-var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){
- reflect.Bool: func(v reflect.Value, r *rand.Rand) {
- v.SetBool(randBool(r))
- },
- reflect.Int: fuzzInt,
- reflect.Int8: fuzzInt,
- reflect.Int16: fuzzInt,
- reflect.Int32: fuzzInt,
- reflect.Int64: fuzzInt,
- reflect.Uint: fuzzUint,
- reflect.Uint8: fuzzUint,
- reflect.Uint16: fuzzUint,
- reflect.Uint32: fuzzUint,
- reflect.Uint64: fuzzUint,
- reflect.Uintptr: fuzzUint,
- reflect.Float32: func(v reflect.Value, r *rand.Rand) {
- v.SetFloat(float64(r.Float32()))
- },
- reflect.Float64: func(v reflect.Value, r *rand.Rand) {
- v.SetFloat(r.Float64())
- },
- reflect.Complex64: func(v reflect.Value, r *rand.Rand) {
- v.SetComplex(complex128(complex(r.Float32(), r.Float32())))
- },
- reflect.Complex128: func(v reflect.Value, r *rand.Rand) {
- v.SetComplex(complex(r.Float64(), r.Float64()))
- },
- reflect.String: func(v reflect.Value, r *rand.Rand) {
- v.SetString(randString(r))
- },
- reflect.UnsafePointer: func(v reflect.Value, r *rand.Rand) {
- panic("unimplemented")
- },
-}
-
-// randBool returns true or false randomly.
-func randBool(r *rand.Rand) bool {
- return r.Int31()&(1<<30) == 0
-}
-
-type int63nPicker interface {
- Int63n(int64) int64
-}
-
-// UnicodeRange describes a sequential range of unicode characters.
-// Last must be numerically greater than First.
-type UnicodeRange struct {
- First, Last rune
-}
-
-// UnicodeRanges describes an arbitrary number of sequential ranges of unicode characters.
-// To be useful, each range must have at least one character (First <= Last) and
-// there must be at least one range.
-type UnicodeRanges []UnicodeRange
-
-// choose returns a random unicode character from the given range, using the
-// given randomness source.
-func (ur UnicodeRange) choose(r int63nPicker) rune {
- count := int64(ur.Last - ur.First + 1)
- return ur.First + rune(r.Int63n(count))
-}
-
-// CustomStringFuzzFunc constructs a FuzzFunc which produces random strings.
-// Each character is selected from the range ur. If there are no characters
-// in the range (cr.Last < cr.First), this will panic.
-func (ur UnicodeRange) CustomStringFuzzFunc() func(s *string, c Continue) {
- ur.check()
- return func(s *string, c Continue) {
- *s = ur.randString(c.Rand)
- }
-}
-
-// check is a function that used to check whether the first of ur(UnicodeRange)
-// is greater than the last one.
-func (ur UnicodeRange) check() {
- if ur.Last < ur.First {
- panic("The last encoding must be greater than the first one.")
- }
-}
-
-// randString of UnicodeRange makes a random string up to 20 characters long.
-// Each character is selected form ur(UnicodeRange).
-func (ur UnicodeRange) randString(r *rand.Rand) string {
- n := r.Intn(20)
- sb := strings.Builder{}
- sb.Grow(n)
- for i := 0; i < n; i++ {
- sb.WriteRune(ur.choose(r))
- }
- return sb.String()
-}
-
-// defaultUnicodeRanges sets a default unicode range when user do not set
-// CustomStringFuzzFunc() but wants fuzz string.
-var defaultUnicodeRanges = UnicodeRanges{
- {' ', '~'}, // ASCII characters
- {'\u00a0', '\u02af'}, // Multi-byte encoded characters
- {'\u4e00', '\u9fff'}, // Common CJK (even longer encodings)
-}
-
-// CustomStringFuzzFunc constructs a FuzzFunc which produces random strings.
-// Each character is selected from one of the ranges of ur(UnicodeRanges).
-// Each range has an equal probability of being chosen. If there are no ranges,
-// or a selected range has no characters (.Last < .First), this will panic.
-// Do not modify any of the ranges in ur after calling this function.
-func (ur UnicodeRanges) CustomStringFuzzFunc() func(s *string, c Continue) {
- // Check unicode ranges slice is empty.
- if len(ur) == 0 {
- panic("UnicodeRanges is empty.")
- }
- // if not empty, each range should be checked.
- for i := range ur {
- ur[i].check()
- }
- return func(s *string, c Continue) {
- *s = ur.randString(c.Rand)
- }
-}
-
-// randString of UnicodeRanges makes a random string up to 20 characters long.
-// Each character is selected form one of the ranges of ur(UnicodeRanges),
-// and each range has an equal probability of being chosen.
-func (ur UnicodeRanges) randString(r *rand.Rand) string {
- n := r.Intn(20)
- sb := strings.Builder{}
- sb.Grow(n)
- for i := 0; i < n; i++ {
- sb.WriteRune(ur[r.Intn(len(ur))].choose(r))
- }
- return sb.String()
-}
-
-// randString makes a random string up to 20 characters long. The returned string
-// may include a variety of (valid) UTF-8 encodings.
-func randString(r *rand.Rand) string {
- return defaultUnicodeRanges.randString(r)
-}
-
-// randUint64 makes random 64 bit numbers.
-// Weirdly, rand doesn't have a function that gives you 64 random bits.
-func randUint64(r *rand.Rand) uint64 {
- return uint64(r.Uint32())<<32 | uint64(r.Uint32())
-}
diff --git a/vendor/github.com/munnerz/goautoneg/LICENSE b/vendor/github.com/munnerz/goautoneg/LICENSE
new file mode 100644
index 0000000000..bbc7b897cb
--- /dev/null
+++ b/vendor/github.com/munnerz/goautoneg/LICENSE
@@ -0,0 +1,31 @@
+Copyright (c) 2011, Open Knowledge Foundation Ltd.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ Neither the name of the Open Knowledge Foundation Ltd. nor the
+ names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/munnerz/goautoneg/Makefile b/vendor/github.com/munnerz/goautoneg/Makefile
new file mode 100644
index 0000000000..e33ee17303
--- /dev/null
+++ b/vendor/github.com/munnerz/goautoneg/Makefile
@@ -0,0 +1,13 @@
+include $(GOROOT)/src/Make.inc
+
+TARG=bitbucket.org/ww/goautoneg
+GOFILES=autoneg.go
+
+include $(GOROOT)/src/Make.pkg
+
+format:
+ gofmt -w *.go
+
+docs:
+ gomake clean
+ godoc ${TARG} > README.txt
diff --git a/vendor/github.com/munnerz/goautoneg/README.txt b/vendor/github.com/munnerz/goautoneg/README.txt
new file mode 100644
index 0000000000..7723656d58
--- /dev/null
+++ b/vendor/github.com/munnerz/goautoneg/README.txt
@@ -0,0 +1,67 @@
+PACKAGE
+
+package goautoneg
+import "bitbucket.org/ww/goautoneg"
+
+HTTP Content-Type Autonegotiation.
+
+The functions in this package implement the behaviour specified in
+http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+
+Copyright (c) 2011, Open Knowledge Foundation Ltd.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ Neither the name of the Open Knowledge Foundation Ltd. nor the
+ names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+FUNCTIONS
+
+func Negotiate(header string, alternatives []string) (content_type string)
+Negotiate the most appropriate content_type given the accept header
+and a list of alternatives.
+
+func ParseAccept(header string) (accept []Accept)
+Parse an Accept Header string returning a sorted list
+of clauses
+
+
+TYPES
+
+type Accept struct {
+ Type, SubType string
+ Q float32
+ Params map[string]string
+}
+Structure to represent a clause in an HTTP Accept Header
+
+
+SUBDIRECTORIES
+
+ .hg
diff --git a/vendor/github.com/munnerz/goautoneg/autoneg.go b/vendor/github.com/munnerz/goautoneg/autoneg.go
new file mode 100644
index 0000000000..1dd1cad646
--- /dev/null
+++ b/vendor/github.com/munnerz/goautoneg/autoneg.go
@@ -0,0 +1,189 @@
+/*
+HTTP Content-Type Autonegotiation.
+
+The functions in this package implement the behaviour specified in
+http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+
+Copyright (c) 2011, Open Knowledge Foundation Ltd.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ Neither the name of the Open Knowledge Foundation Ltd. nor the
+ names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+package goautoneg
+
+import (
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// Structure to represent a clause in an HTTP Accept Header
+type Accept struct {
+ Type, SubType string
+ Q float64
+ Params map[string]string
+}
+
+// acceptSlice is defined to implement sort interface.
+type acceptSlice []Accept
+
+func (slice acceptSlice) Len() int {
+ return len(slice)
+}
+
+func (slice acceptSlice) Less(i, j int) bool {
+ ai, aj := slice[i], slice[j]
+ if ai.Q > aj.Q {
+ return true
+ }
+ if ai.Type != "*" && aj.Type == "*" {
+ return true
+ }
+ if ai.SubType != "*" && aj.SubType == "*" {
+ return true
+ }
+ return false
+}
+
+func (slice acceptSlice) Swap(i, j int) {
+ slice[i], slice[j] = slice[j], slice[i]
+}
+
+func stringTrimSpaceCutset(r rune) bool {
+ return r == ' '
+}
+
+func nextSplitElement(s, sep string) (item string, remaining string) {
+ if index := strings.Index(s, sep); index != -1 {
+ return s[:index], s[index+1:]
+ }
+ return s, ""
+}
+
+// Parse an Accept Header string returning a sorted list
+// of clauses
+func ParseAccept(header string) acceptSlice {
+ partsCount := 0
+ remaining := header
+ for len(remaining) > 0 {
+ partsCount++
+ _, remaining = nextSplitElement(remaining, ",")
+ }
+ accept := make(acceptSlice, 0, partsCount)
+
+ remaining = header
+ var part string
+ for len(remaining) > 0 {
+ part, remaining = nextSplitElement(remaining, ",")
+ part = strings.TrimFunc(part, stringTrimSpaceCutset)
+
+ a := Accept{
+ Q: 1.0,
+ }
+
+ sp, remainingPart := nextSplitElement(part, ";")
+
+ sp0, spRemaining := nextSplitElement(sp, "/")
+ a.Type = strings.TrimFunc(sp0, stringTrimSpaceCutset)
+
+ switch {
+ case len(spRemaining) == 0:
+ if a.Type == "*" {
+ a.SubType = "*"
+ } else {
+ continue
+ }
+ default:
+ var sp1 string
+ sp1, spRemaining = nextSplitElement(spRemaining, "/")
+ if len(spRemaining) > 0 {
+ continue
+ }
+ a.SubType = strings.TrimFunc(sp1, stringTrimSpaceCutset)
+ }
+
+ if len(remainingPart) == 0 {
+ accept = append(accept, a)
+ continue
+ }
+
+ a.Params = make(map[string]string)
+ for len(remainingPart) > 0 {
+ sp, remainingPart = nextSplitElement(remainingPart, ";")
+ sp0, spRemaining = nextSplitElement(sp, "=")
+ if len(spRemaining) == 0 {
+ continue
+ }
+ var sp1 string
+ sp1, spRemaining = nextSplitElement(spRemaining, "=")
+ if len(spRemaining) != 0 {
+ continue
+ }
+ token := strings.TrimFunc(sp0, stringTrimSpaceCutset)
+ if token == "q" {
+ a.Q, _ = strconv.ParseFloat(sp1, 32)
+ } else {
+ a.Params[token] = strings.TrimFunc(sp1, stringTrimSpaceCutset)
+ }
+ }
+
+ accept = append(accept, a)
+ }
+
+ sort.Sort(accept)
+ return accept
+}
+
+// Negotiate the most appropriate content_type given the accept header
+// and a list of alternatives.
+func Negotiate(header string, alternatives []string) (content_type string) {
+ asp := make([][]string, 0, len(alternatives))
+ for _, ctype := range alternatives {
+ asp = append(asp, strings.SplitN(ctype, "/", 2))
+ }
+ for _, clause := range ParseAccept(header) {
+ for i, ctsp := range asp {
+ if clause.Type == ctsp[0] && clause.SubType == ctsp[1] {
+ content_type = alternatives[i]
+ return
+ }
+ if clause.Type == ctsp[0] && clause.SubType == "*" {
+ content_type = alternatives[i]
+ return
+ }
+ if clause.Type == "*" && clause.SubType == "*" {
+ content_type = alternatives[i]
+ return
+ }
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/rancher/wharfie/pkg/registries/endpoint.go b/vendor/github.com/rancher/wharfie/pkg/registries/endpoint.go
index 62d7118ef4..e33f51b7c9 100644
--- a/vendor/github.com/rancher/wharfie/pkg/registries/endpoint.go
+++ b/vendor/github.com/rancher/wharfie/pkg/registries/endpoint.go
@@ -13,6 +13,11 @@ import (
var _ authn.Keychain = &endpoint{}
var _ http.RoundTripper = &endpoint{}
+const (
+ defaultRegistry = "docker.io"
+ defaultRegistryHost = "index.docker.io"
+)
+
type endpoint struct {
auth authn.Authenticator
keychain authn.Keychain
@@ -69,6 +74,13 @@ func (e endpoint) RoundTrip(req *http.Request) (*http.Response, error) {
}
}
+ // set ns from original host if the request is being proxied
+ if ns := getNamespace(req.Host); isProxy(endpointURL.Host, ns) {
+ q := req.URL.Query()
+ q.Set("ns", ns)
+ req.URL.RawQuery = q.Encode()
+ }
+
// override request host and scheme
req.Host = endpointURL.Host
req.URL.Host = endpointURL.Host
@@ -80,3 +92,25 @@ func (e endpoint) RoundTrip(req *http.Request) (*http.Response, error) {
}
return e.registry.getTransport(req.URL).RoundTrip(req)
}
+
+// isDefault returns true if this endpoint is the default endpoint for the image -
+// does the registry namespace match the mirror endpoint namespace?
+func (e endpoint) isDefault() bool {
+ return getNamespace(e.ref.Context().RegistryStr()) == getNamespace(e.url.Host)
+}
+
+func getNamespace(host string) string {
+ if host == defaultRegistryHost {
+ return defaultRegistry
+ }
+ return host
+}
+
+func isProxy(host, ns string) bool {
+ if ns != "" && ns != host {
+ if ns != defaultRegistry || host != defaultRegistryHost {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/rancher/wharfie/pkg/registries/registries.go b/vendor/github.com/rancher/wharfie/pkg/registries/registries.go
index a6195b070c..88a7ebacd9 100644
--- a/vendor/github.com/rancher/wharfie/pkg/registries/registries.go
+++ b/vendor/github.com/rancher/wharfie/pkg/registries/registries.go
@@ -11,6 +11,7 @@ import (
"os"
"path"
"regexp"
+ "strings"
"time"
"github.com/google/go-containerregistry/pkg/authn"
@@ -56,7 +57,6 @@ func GetPrivateRegistries(path string) (*registry, error) {
}
func (r *registry) Image(ref name.Reference, options ...remote.Option) (v1.Image, error) {
- ref = r.rewrite(ref)
endpoints, err := r.getEndpoints(ref)
if err != nil {
return nil, err
@@ -64,8 +64,13 @@ func (r *registry) Image(ref name.Reference, options ...remote.Option) (v1.Image
errs := []error{}
for _, endpoint := range endpoints {
+ epRef := ref
+ if !endpoint.isDefault() {
+ epRef = r.rewrite(ref)
+ }
+ logrus.Debugf("Trying endpoint %s", endpoint.url)
endpointOptions := append(options, remote.WithTransport(endpoint), remote.WithAuthFromKeychain(endpoint))
- remoteImage, err := remote.Image(ref, endpointOptions...)
+ remoteImage, err := remote.Image(epRef, endpointOptions...)
if err != nil {
logrus.Warnf("Failed to get image from endpoint: %v", err)
errs = append(errs, err)
@@ -159,38 +164,20 @@ func (r *registry) getEndpoints(ref name.Reference) ([]endpoint, error) {
for _, key := range keys {
if mirror, ok := r.Registry.Mirrors[key]; ok {
for _, endpointStr := range mirror.Endpoints {
- endpointURL, err := url.Parse(endpointStr)
- if err != nil {
- logrus.Warnf("Ignoring invalid endpoint URL for registry %s: %v", registry, err)
- continue
- }
- if !endpointURL.IsAbs() {
- logrus.Warnf("Ignoring relative endpoint URL for registry %s: %q", registry, endpointStr)
- continue
- }
- if endpointURL.Host == "" {
- logrus.Warnf("Ignoring endpoint URL without host for registry %s: %q", registry, endpointStr)
- continue
- }
- if endpointURL.Scheme == "" {
- logrus.Warnf("Ignoring endpoint URL without scheme for registry %s: %q", registry, endpointStr)
- continue
- }
- if endpointURL.Path == "" {
- endpointURL.Path = "/v2"
+ if endpointURL, err := normalizeEndpointAddress(endpointStr); err != nil {
+ logrus.Warnf("Ignoring invalid endpoint %s for registry %s: %v", endpointStr, registry, err)
} else {
- endpointURL.Path = path.Clean(endpointURL.Path)
+ endpoints = append(endpoints, r.makeEndpoint(endpointURL, ref))
}
- endpoints = append(endpoints, r.makeEndpoint(endpointURL, ref))
}
- // found a mirrors configuration for this registry, don't check any further entries
+ // found a mirror for this registry, don't check any further entries
// even if we didn't add any valid endpoints.
break
}
}
// always add the default endpoint
- defaultURL, err := url.Parse(fmt.Sprintf("https://%s/v2", registry))
+ defaultURL, err := normalizeEndpointAddress(registry)
if err != nil {
return nil, errors.Wrapf(err, "failed to construct default endpoint for registry %s", registry)
}
@@ -210,6 +197,41 @@ func (r *registry) makeEndpoint(endpointURL *url.URL, ref name.Reference) endpoi
}
}
+// normalizeEndpointAddress normalizes the endpoint address.
+// If successful, it returns the registry URL.
+// If unsuccessful, an error is returned.
+// Scheme and hostname logic should match containerd:
+// https://github.com/containerd/containerd/blob/v1.7.13/remotes/docker/config/hosts.go#L99-L131
+func normalizeEndpointAddress(endpoint string) (*url.URL, error) {
+ // Ensure that the endpoint address has a scheme so that the URL is parsed properly
+ if !strings.Contains(endpoint, "://") {
+ endpoint = "//" + endpoint
+ }
+ endpointURL, err := url.Parse(endpoint)
+ if err != nil {
+ return nil, err
+ }
+ if endpointURL.Host == "" {
+ return nil, fmt.Errorf("invalid URL without host: %s", endpoint)
+ }
+ if endpointURL.Scheme == "" {
+ // localhost on odd ports defaults to http
+ port := endpointURL.Port()
+ if isLocalhost(endpointURL.Host) && port != "" && port != "443" {
+ endpointURL.Scheme = "http"
+ } else {
+ endpointURL.Scheme = "https"
+ }
+ }
+ switch endpointURL.Path {
+ case "", "/", "/v2":
+ endpointURL.Path = "/v2"
+ default:
+ endpointURL.Path = path.Clean(endpointURL.Path)
+ }
+ return endpointURL, nil
+}
+
// getAuthenticatorForHost returns an Authenticator for an endpoint URL. If no
// configuration is present, Anonymous authentication is used.
func (r *registry) getAuthenticator(endpointURL *url.URL) authn.Authenticator {
@@ -218,6 +240,7 @@ func (r *registry) getAuthenticator(endpointURL *url.URL) authn.Authenticator {
if registry == name.DefaultRegistry {
keys = append(keys, "docker.io")
}
+ keys = append(keys, "*")
for _, key := range keys {
if config, ok := r.Registry.Configs[key]; ok {
@@ -229,6 +252,9 @@ func (r *registry) getAuthenticator(endpointURL *url.URL) authn.Authenticator {
IdentityToken: config.Auth.IdentityToken,
})
}
+ // found a config for this registry, don't check any further entries
+ // even if we didn't add any valid auth.
+ break
}
}
return authn.Anonymous
@@ -237,41 +263,52 @@ func (r *registry) getAuthenticator(endpointURL *url.URL) authn.Authenticator {
// getTLSConfig returns TLS configuration for an endpoint URL. This is cribbed from
// https://github.com/containerd/cri/blob/release/1.4/pkg/server/image_pull.go#L274
func (r *registry) getTLSConfig(endpointURL *url.URL) (*tls.Config, error) {
- host := endpointURL.Host
tlsConfig := &tls.Config{}
- if config, ok := r.Registry.Configs[host]; ok {
- if config.TLS != nil {
- if config.TLS.CertFile != "" && config.TLS.KeyFile == "" {
- return nil, errors.Errorf("cert file %q was specified, but no corresponding key file was specified", config.TLS.CertFile)
- }
- if config.TLS.CertFile == "" && config.TLS.KeyFile != "" {
- return nil, errors.Errorf("key file %q was specified, but no corresponding cert file was specified", config.TLS.KeyFile)
- }
- if config.TLS.CertFile != "" && config.TLS.KeyFile != "" {
- cert, err := tls.LoadX509KeyPair(config.TLS.CertFile, config.TLS.KeyFile)
- if err != nil {
- return nil, errors.Wrap(err, "failed to load cert file")
+ registry := endpointURL.Host
+ keys := []string{registry}
+ if registry == name.DefaultRegistry {
+ keys = append(keys, "docker.io")
+ }
+ keys = append(keys, "*")
+
+ for _, key := range keys {
+ if config, ok := r.Registry.Configs[key]; ok {
+ if config.TLS != nil {
+ if config.TLS.CertFile != "" && config.TLS.KeyFile == "" {
+ return nil, errors.Errorf("cert file %q was specified, but no corresponding key file was specified", config.TLS.CertFile)
}
- if len(cert.Certificate) != 0 {
- tlsConfig.Certificates = []tls.Certificate{cert}
+ if config.TLS.CertFile == "" && config.TLS.KeyFile != "" {
+ return nil, errors.Errorf("key file %q was specified, but no corresponding cert file was specified", config.TLS.KeyFile)
}
- tlsConfig.BuildNameToCertificate() // nolint:staticcheck
- }
-
- if config.TLS.CAFile != "" {
- caCertPool, err := x509.SystemCertPool()
- if err != nil {
- return nil, errors.Wrap(err, "failed to get system cert pool")
+ if config.TLS.CertFile != "" && config.TLS.KeyFile != "" {
+ cert, err := tls.LoadX509KeyPair(config.TLS.CertFile, config.TLS.KeyFile)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to load cert file")
+ }
+ if len(cert.Certificate) != 0 {
+ tlsConfig.Certificates = []tls.Certificate{cert}
+ }
+ tlsConfig.BuildNameToCertificate() // nolint:staticcheck
}
- caCert, err := ioutil.ReadFile(config.TLS.CAFile)
- if err != nil {
- return nil, errors.Wrap(err, "failed to load CA file")
+
+ if config.TLS.CAFile != "" {
+ caCertPool, err := x509.SystemCertPool()
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to get system cert pool")
+ }
+ caCert, err := ioutil.ReadFile(config.TLS.CAFile)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to load CA file")
+ }
+ caCertPool.AppendCertsFromPEM(caCert)
+ tlsConfig.RootCAs = caCertPool
}
- caCertPool.AppendCertsFromPEM(caCert)
- tlsConfig.RootCAs = caCertPool
- }
- tlsConfig.InsecureSkipVerify = config.TLS.InsecureSkipVerify
+ tlsConfig.InsecureSkipVerify = config.TLS.InsecureSkipVerify
+ }
+ // found a config for this registry, don't check any further entries
+ // even if we didn't add any valid tls config.
+ break
}
}
@@ -291,8 +328,24 @@ func (r *registry) getRewrites(registry string) map[string]string {
if len(mirror.Rewrites) > 0 {
return mirror.Rewrites
}
+ // found a mirror for this registry, don't check any further entries
+ // even if we didn't add any rewrites.
+ break
}
}
return nil
}
+
+func isLocalhost(host string) bool {
+ if h, _, err := net.SplitHostPort(host); err == nil {
+ host = h
+ }
+
+ if host == "localhost" {
+ return true
+ }
+
+ ip := net.ParseIP(host)
+ return ip.IsLoopback()
+}
diff --git a/vendor/github.com/x448/float16/.travis.yml b/vendor/github.com/x448/float16/.travis.yml
new file mode 100644
index 0000000000..8902bdaaff
--- /dev/null
+++ b/vendor/github.com/x448/float16/.travis.yml
@@ -0,0 +1,13 @@
+language: go
+
+go:
+ - 1.11.x
+
+env:
+ - GO111MODULE=on
+
+script:
+ - go test -short -coverprofile=coverage.txt -covermode=count ./...
+
+after_success:
+ - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/x448/float16/LICENSE b/vendor/github.com/x448/float16/LICENSE
new file mode 100644
index 0000000000..bf6e357854
--- /dev/null
+++ b/vendor/github.com/x448/float16/LICENSE
@@ -0,0 +1,22 @@
+MIT License
+
+Copyright (c) 2019 Montgomery Edwards⁴⁴⁸ and Faye Amacker
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
diff --git a/vendor/github.com/x448/float16/README.md b/vendor/github.com/x448/float16/README.md
new file mode 100644
index 0000000000..b524b8135d
--- /dev/null
+++ b/vendor/github.com/x448/float16/README.md
@@ -0,0 +1,133 @@
+# Float16 (Binary16) in Go/Golang
+[](https://travis-ci.org/x448/float16)
+[](https://codecov.io/gh/x448/float16)
+[](https://goreportcard.com/report/github.com/x448/float16)
+[](https://github.com/x448/float16/releases)
+[](https://raw.githubusercontent.com/x448/float16/master/LICENSE)
+
+`float16` package provides [IEEE 754 half-precision floating-point format (binary16)](https://en.wikipedia.org/wiki/Half-precision_floating-point_format) with IEEE 754 default rounding for conversions. IEEE 754-2008 refers to this 16-bit floating-point format as binary16.
+
+IEEE 754 default rounding ("Round-to-Nearest RoundTiesToEven") is considered the most accurate and statistically unbiased estimate of the true result.
+
+All possible 4+ billion floating-point conversions with this library are verified to be correct.
+
+Lowercase "float16" refers to IEEE 754 binary16. And capitalized "Float16" refers to exported Go data type provided by this library.
+
+## Features
+Current features include:
+
+* float16 to float32 conversions use lossless conversion.
+* float32 to float16 conversions use IEEE 754-2008 "Round-to-Nearest RoundTiesToEven".
+* conversions using pure Go take about 2.65 ns/op on a desktop amd64.
+* unit tests provide 100% code coverage and check all possible 4+ billion conversions.
+* other functions include: IsInf(), IsNaN(), IsNormal(), PrecisionFromfloat32(), String(), etc.
+* all functions in this library use zero allocs except String().
+
+## Status
+This library is used by [fxamacker/cbor](https://github.com/fxamacker/cbor) and is ready for production use on supported platforms. The version number < 1.0 indicates more functions and options are planned but not yet published.
+
+Current status:
+
+* core API is done and breaking API changes are unlikely.
+* 100% of unit tests pass:
+ * short mode (`go test -short`) tests around 65765 conversions in 0.005s.
+ * normal mode (`go test`) tests all possible 4+ billion conversions in about 95s.
+* 100% code coverage with both short mode and normal mode.
+* tested on amd64 but it should work on all little-endian platforms supported by Go.
+
+Roadmap:
+
+* add functions for fast batch conversions leveraging SIMD when supported by hardware.
+* speed up unit test when verifying all possible 4+ billion conversions.
+* test on additional platforms.
+
+## Float16 to Float32 Conversion
+Conversions from float16 to float32 are lossless conversions. All 65536 possible float16 to float32 conversions (in pure Go) are confirmed to be correct.
+
+Unit tests take a fraction of a second to check all 65536 expected values for float16 to float32 conversions.
+
+## Float32 to Float16 Conversion
+Conversions from float32 to float16 use IEEE 754 default rounding ("Round-to-Nearest RoundTiesToEven"). All 4294967296 possible float32 to float16 conversions (in pure Go) are confirmed to be correct.
+
+Unit tests in normal mode take about 1-2 minutes to check all 4+ billion float32 input values and results for Fromfloat32(), FromNaN32ps(), and PrecisionFromfloat32().
+
+Unit tests in short mode use a small subset (around 229 float32 inputs) and finish in under 0.01 second while still reaching 100% code coverage.
+
+## Usage
+Install with `go get github.com/x448/float16`.
+```
+// Convert float32 to float16
+pi := float32(math.Pi)
+pi16 := float16.Fromfloat32(pi)
+
+// Convert float16 to float32
+pi32 := pi16.Float32()
+
+// PrecisionFromfloat32() is faster than the overhead of calling a function.
+// This example only converts if there's no data loss and input is not a subnormal.
+if float16.PrecisionFromfloat32(pi) == float16.PrecisionExact {
+ pi16 := float16.Fromfloat32(pi)
+}
+```
+
+## Float16 Type and API
+Float16 (capitalized) is a Go type with uint16 as the underlying state. There are 6 exported functions and 9 exported methods.
+```
+package float16 // import "github.com/x448/float16"
+
+// Exported types and consts
+type Float16 uint16
+const ErrInvalidNaNValue = float16Error("float16: invalid NaN value, expected IEEE 754 NaN")
+
+// Exported functions
+Fromfloat32(f32 float32) Float16 // Float16 number converted from f32 using IEEE 754 default rounding
+ with identical results to AMD and Intel F16C hardware. NaN inputs
+ are converted with quiet bit always set on, to be like F16C.
+
+FromNaN32ps(nan float32) (Float16, error) // Float16 NaN without modifying quiet bit.
+ // The "ps" suffix means "preserve signaling".
+ // Returns sNaN and ErrInvalidNaNValue if nan isn't a NaN.
+
+Frombits(b16 uint16) Float16 // Float16 number corresponding to b16 (IEEE 754 binary16 rep.)
+NaN() Float16 // Float16 of IEEE 754 binary16 not-a-number
+Inf(sign int) Float16 // Float16 of IEEE 754 binary16 infinity according to sign
+
+PrecisionFromfloat32(f32 float32) Precision // quickly indicates exact, ..., overflow, underflow
+ // (inline and < 1 ns/op)
+// Exported methods
+(f Float16) Float32() float32 // float32 number converted from f16 using lossless conversion
+(f Float16) Bits() uint16 // the IEEE 754 binary16 representation of f
+(f Float16) IsNaN() bool // true if f is not-a-number (NaN)
+(f Float16) IsQuietNaN() bool // true if f is a quiet not-a-number (NaN)
+(f Float16) IsInf(sign int) bool // true if f is infinite based on sign (-1=NegInf, 0=any, 1=PosInf)
+(f Float16) IsFinite() bool // true if f is not infinite or NaN
+(f Float16) IsNormal() bool // true if f is not zero, infinite, subnormal, or NaN.
+(f Float16) Signbit() bool // true if f is negative or negative zero
+(f Float16) String() string // string representation of f to satisfy fmt.Stringer interface
+```
+See [API](https://godoc.org/github.com/x448/float16) at godoc.org for more info.
+
+## Benchmarks
+Conversions (in pure Go) are around 2.65 ns/op for float16 -> float32 and float32 -> float16 on amd64. Speeds can vary depending on input value.
+
+```
+All functions have zero allocations except float16.String().
+
+FromFloat32pi-2 2.59ns ± 0% // speed using Fromfloat32() to convert a float32 of math.Pi to Float16
+ToFloat32pi-2 2.69ns ± 0% // speed using Float32() to convert a float16 of math.Pi to float32
+Frombits-2 0.29ns ± 5% // speed using Frombits() to cast a uint16 to Float16
+
+PrecisionFromFloat32-2 0.29ns ± 1% // speed using PrecisionFromfloat32() to check for overflows, etc.
+```
+
+## System Requirements
+* Tested on Go 1.11, 1.12, and 1.13 but it should also work with older versions.
+* Tested on amd64 but it should also work on all little-endian platforms supported by Go.
+
+## Special Thanks
+Special thanks to Kathryn Long (starkat99) for creating [half-rs](https://github.com/starkat99/half-rs), a very nice rust implementation of float16.
+
+## License
+Copyright (c) 2019 Montgomery Edwards⁴⁴⁸ and Faye Amacker
+
+Licensed under [MIT License](LICENSE)
diff --git a/vendor/github.com/x448/float16/float16.go b/vendor/github.com/x448/float16/float16.go
new file mode 100644
index 0000000000..1a0e6dad00
--- /dev/null
+++ b/vendor/github.com/x448/float16/float16.go
@@ -0,0 +1,302 @@
+// Copyright 2019 Montgomery Edwards⁴⁴⁸ and Faye Amacker
+//
+// Special thanks to Kathryn Long for her Rust implementation
+// of float16 at github.com/starkat99/half-rs (MIT license)
+
+package float16
+
+import (
+ "math"
+ "strconv"
+)
+
+// Float16 represents IEEE 754 half-precision floating-point numbers (binary16).
+type Float16 uint16
+
+// Precision indicates whether the conversion to Float16 is
+// exact, subnormal without dropped bits, inexact, underflow, or overflow.
+type Precision int
+
+const (
+
+ // PrecisionExact is for non-subnormals that don't drop bits during conversion.
+ // All of these can round-trip. Should always convert to float16.
+ PrecisionExact Precision = iota
+
+ // PrecisionUnknown is for subnormals that don't drop bits during conversion but
+ // not all of these can round-trip so precision is unknown without more effort.
+ // Only 2046 of these can round-trip and the rest cannot round-trip.
+ PrecisionUnknown
+
+ // PrecisionInexact is for dropped significand bits and cannot round-trip.
+ // Some of these are subnormals. Cannot round-trip float32->float16->float32.
+ PrecisionInexact
+
+ // PrecisionUnderflow is for Underflows. Cannot round-trip float32->float16->float32.
+ PrecisionUnderflow
+
+ // PrecisionOverflow is for Overflows. Cannot round-trip float32->float16->float32.
+ PrecisionOverflow
+)
+
+// PrecisionFromfloat32 returns Precision without performing
+// the conversion. Conversions from both Infinity and NaN
+// values will always report PrecisionExact even if NaN payload
+// or NaN-Quiet-Bit is lost. This function is kept simple to
+// allow inlining and run < 0.5 ns/op, to serve as a fast filter.
+func PrecisionFromfloat32(f32 float32) Precision {
+ u32 := math.Float32bits(f32)
+
+ if u32 == 0 || u32 == 0x80000000 {
+ // +- zero will always be exact conversion
+ return PrecisionExact
+ }
+
+ const COEFMASK uint32 = 0x7fffff // 23 least significant bits
+ const EXPSHIFT uint32 = 23
+ const EXPBIAS uint32 = 127
+ const EXPMASK uint32 = uint32(0xff) << EXPSHIFT
+ const DROPMASK uint32 = COEFMASK >> 10
+
+ exp := int32(((u32 & EXPMASK) >> EXPSHIFT) - EXPBIAS)
+ coef := u32 & COEFMASK
+
+ if exp == 128 {
+ // +- infinity or NaN
+ // apps may want to do extra checks for NaN separately
+ return PrecisionExact
+ }
+
+ // https://en.wikipedia.org/wiki/Half-precision_floating-point_format says,
+ // "Decimals between 2^−24 (minimum positive subnormal) and 2^−14 (maximum subnormal): fixed interval 2^−24"
+ if exp < -24 {
+ return PrecisionUnderflow
+ }
+ if exp > 15 {
+ return PrecisionOverflow
+ }
+ if (coef & DROPMASK) != uint32(0) {
+ // these include subnormals and non-subnormals that dropped bits
+ return PrecisionInexact
+ }
+
+ if exp < -14 {
+ // Subnormals. Caller may want to test these further.
+ // There are 2046 subnormals that can successfully round-trip f32->f16->f32
+ // and 20 of those 2046 have 32-bit input coef == 0.
+ // RFC 7049 and 7049bis Draft 12 don't precisely define "preserves value"
+ // so some protocols and libraries will choose to handle subnormals differently
+ // when deciding to encode them to CBOR float32 vs float16.
+ return PrecisionUnknown
+ }
+
+ return PrecisionExact
+}
+
+// Frombits returns the float16 number corresponding to the IEEE 754 binary16
+// representation u16, with the sign bit of u16 and the result in the same bit
+// position. Frombits(Bits(x)) == x.
+func Frombits(u16 uint16) Float16 {
+ return Float16(u16)
+}
+
+// Fromfloat32 returns a Float16 value converted from f32. Conversion uses
+// IEEE default rounding (nearest int, with ties to even).
+func Fromfloat32(f32 float32) Float16 {
+ return Float16(f32bitsToF16bits(math.Float32bits(f32)))
+}
+
+// ErrInvalidNaNValue indicates a NaN was not received.
+const ErrInvalidNaNValue = float16Error("float16: invalid NaN value, expected IEEE 754 NaN")
+
+type float16Error string
+
+func (e float16Error) Error() string { return string(e) }
+
+// FromNaN32ps converts nan to IEEE binary16 NaN while preserving both
+// signaling and payload. Unlike Fromfloat32(), which can only return
+// qNaN because it sets quiet bit = 1, this can return both sNaN and qNaN.
+// If the result is infinity (sNaN with empty payload), then the
+// lowest bit of payload is set to make the result a NaN.
+// Returns ErrInvalidNaNValue and 0x7c01 (sNaN) if nan isn't IEEE 754 NaN.
+// This function was kept simple to be able to inline.
+func FromNaN32ps(nan float32) (Float16, error) {
+ const SNAN = Float16(uint16(0x7c01)) // signalling NaN
+
+ u32 := math.Float32bits(nan)
+ sign := u32 & 0x80000000
+ exp := u32 & 0x7f800000
+ coef := u32 & 0x007fffff
+
+ if (exp != 0x7f800000) || (coef == 0) {
+ return SNAN, ErrInvalidNaNValue
+ }
+
+ u16 := uint16((sign >> 16) | uint32(0x7c00) | (coef >> 13))
+
+ if (u16 & 0x03ff) == 0 {
+ // result became infinity, make it NaN by setting lowest bit in payload
+ u16 = u16 | 0x0001
+ }
+
+ return Float16(u16), nil
+}
+
+// NaN returns a Float16 of IEEE 754 binary16 not-a-number (NaN).
+// Returned NaN value 0x7e01 has all exponent bits = 1 with the
+// first and last bits = 1 in the significand. This is consistent
+// with Go's 64-bit math.NaN(). Canonical CBOR in RFC 7049 uses 0x7e00.
+func NaN() Float16 {
+ return Float16(0x7e01)
+}
+
+// Inf returns a Float16 with an infinity value with the specified sign.
+// A sign >= returns positive infinity.
+// A sign < 0 returns negative infinity.
+func Inf(sign int) Float16 {
+ if sign >= 0 {
+ return Float16(0x7c00)
+ }
+ return Float16(0x8000 | 0x7c00)
+}
+
+// Float32 returns a float32 converted from f (Float16).
+// This is a lossless conversion.
+func (f Float16) Float32() float32 {
+ u32 := f16bitsToF32bits(uint16(f))
+ return math.Float32frombits(u32)
+}
+
+// Bits returns the IEEE 754 binary16 representation of f, with the sign bit
+// of f and the result in the same bit position. Bits(Frombits(x)) == x.
+func (f Float16) Bits() uint16 {
+ return uint16(f)
+}
+
+// IsNaN reports whether f is an IEEE 754 binary16 “not-a-number” value.
+func (f Float16) IsNaN() bool {
+ return (f&0x7c00 == 0x7c00) && (f&0x03ff != 0)
+}
+
+// IsQuietNaN reports whether f is a quiet (non-signaling) IEEE 754 binary16
+// “not-a-number” value.
+func (f Float16) IsQuietNaN() bool {
+ return (f&0x7c00 == 0x7c00) && (f&0x03ff != 0) && (f&0x0200 != 0)
+}
+
+// IsInf reports whether f is an infinity (inf).
+// A sign > 0 reports whether f is positive inf.
+// A sign < 0 reports whether f is negative inf.
+// A sign == 0 reports whether f is either inf.
+func (f Float16) IsInf(sign int) bool {
+ return ((f == 0x7c00) && sign >= 0) ||
+ (f == 0xfc00 && sign <= 0)
+}
+
+// IsFinite returns true if f is neither infinite nor NaN.
+func (f Float16) IsFinite() bool {
+ return (uint16(f) & uint16(0x7c00)) != uint16(0x7c00)
+}
+
+// IsNormal returns true if f is neither zero, infinite, subnormal, or NaN.
+func (f Float16) IsNormal() bool {
+ exp := uint16(f) & uint16(0x7c00)
+ return (exp != uint16(0x7c00)) && (exp != 0)
+}
+
+// Signbit reports whether f is negative or negative zero.
+func (f Float16) Signbit() bool {
+ return (uint16(f) & uint16(0x8000)) != 0
+}
+
+// String satisfies the fmt.Stringer interface.
+func (f Float16) String() string {
+ return strconv.FormatFloat(float64(f.Float32()), 'f', -1, 32)
+}
+
+// f16bitsToF32bits returns uint32 (float32 bits) converted from specified uint16.
+func f16bitsToF32bits(in uint16) uint32 {
+ // All 65536 conversions with this were confirmed to be correct
+ // by Montgomery Edwards⁴⁴⁸ (github.com/x448).
+
+ sign := uint32(in&0x8000) << 16 // sign for 32-bit
+ exp := uint32(in&0x7c00) >> 10 // exponenent for 16-bit
+ coef := uint32(in&0x03ff) << 13 // significand for 32-bit
+
+ if exp == 0x1f {
+ if coef == 0 {
+ // infinity
+ return sign | 0x7f800000 | coef
+ }
+ // NaN
+ return sign | 0x7fc00000 | coef
+ }
+
+ if exp == 0 {
+ if coef == 0 {
+ // zero
+ return sign
+ }
+
+ // normalize subnormal numbers
+ exp++
+ for coef&0x7f800000 == 0 {
+ coef <<= 1
+ exp--
+ }
+ coef &= 0x007fffff
+ }
+
+ return sign | ((exp + (0x7f - 0xf)) << 23) | coef
+}
+
+// f32bitsToF16bits returns uint16 (Float16 bits) converted from the specified float32.
+// Conversion rounds to nearest integer with ties to even.
+func f32bitsToF16bits(u32 uint32) uint16 {
+ // Translated from Rust to Go by Montgomery Edwards⁴⁴⁸ (github.com/x448).
+ // All 4294967296 conversions with this were confirmed to be correct by x448.
+ // Original Rust implementation is by Kathryn Long (github.com/starkat99) with MIT license.
+
+ sign := u32 & 0x80000000
+ exp := u32 & 0x7f800000
+ coef := u32 & 0x007fffff
+
+ if exp == 0x7f800000 {
+ // NaN or Infinity
+ nanBit := uint32(0)
+ if coef != 0 {
+ nanBit = uint32(0x0200)
+ }
+ return uint16((sign >> 16) | uint32(0x7c00) | nanBit | (coef >> 13))
+ }
+
+ halfSign := sign >> 16
+
+ unbiasedExp := int32(exp>>23) - 127
+ halfExp := unbiasedExp + 15
+
+ if halfExp >= 0x1f {
+ return uint16(halfSign | uint32(0x7c00))
+ }
+
+ if halfExp <= 0 {
+ if 14-halfExp > 24 {
+ return uint16(halfSign)
+ }
+ coef := coef | uint32(0x00800000)
+ halfCoef := coef >> uint32(14-halfExp)
+ roundBit := uint32(1) << uint32(13-halfExp)
+ if (coef&roundBit) != 0 && (coef&(3*roundBit-1)) != 0 {
+ halfCoef++
+ }
+ return uint16(halfSign | halfCoef)
+ }
+
+ uHalfExp := uint32(halfExp) << 10
+ halfCoef := coef >> 13
+ roundBit := uint32(0x00001000)
+ if (coef&roundBit) != 0 && (coef&(3*roundBit-1)) != 0 {
+ return uint16((halfSign | uHalfExp | halfCoef) + 1)
+ }
+ return uint16(halfSign | uHalfExp | halfCoef)
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/README.md b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/README.md
new file mode 100644
index 0000000000..5309bb7cb1
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/README.md
@@ -0,0 +1,3 @@
+# OTLP Trace gRPC Exporter
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go
index 86fb61a0de..2171bee3c8 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
@@ -89,11 +78,11 @@ func newClient(opts ...Option) *client {
}
// Start establishes a gRPC connection to the collector.
-func (c *client) Start(ctx context.Context) error {
+func (c *client) Start(context.Context) error {
if c.conn == nil {
// If the caller did not provide a ClientConn when the client was
// created, create one using the configuration they did provide.
- conn, err := grpc.DialContext(ctx, c.endpoint, c.dialOpts...)
+ conn, err := grpc.NewClient(c.endpoint, c.dialOpts...)
if err != nil {
return err
}
@@ -240,7 +229,12 @@ func (c *client) exportContext(parent context.Context) (context.Context, context
}
if c.metadata.Len() > 0 {
- ctx = metadata.NewOutgoingContext(ctx, c.metadata)
+ md := c.metadata
+ if outMD, ok := metadata.FromOutgoingContext(ctx); ok {
+ md = metadata.Join(md, outMD)
+ }
+
+ ctx = metadata.NewOutgoingContext(ctx, md)
}
// Unify the client stopCtx with the parent.
@@ -260,30 +254,38 @@ func (c *client) exportContext(parent context.Context) (context.Context, context
// duration to wait for if an explicit throttle time is included in err.
func retryable(err error) (bool, time.Duration) {
s := status.Convert(err)
+ return retryableGRPCStatus(s)
+}
+
+func retryableGRPCStatus(s *status.Status) (bool, time.Duration) {
switch s.Code() {
case codes.Canceled,
codes.DeadlineExceeded,
- codes.ResourceExhausted,
codes.Aborted,
codes.OutOfRange,
codes.Unavailable,
codes.DataLoss:
- return true, throttleDelay(s)
+ // Additionally handle RetryInfo.
+ _, d := throttleDelay(s)
+ return true, d
+ case codes.ResourceExhausted:
+ // Retry only if the server signals that the recovery from resource exhaustion is possible.
+ return throttleDelay(s)
}
// Not a retry-able error.
return false, 0
}
-// throttleDelay returns a duration to wait for if an explicit throttle time
-// is included in the response status.
-func throttleDelay(s *status.Status) time.Duration {
+// throttleDelay returns of the status is RetryInfo
+// and the its duration to wait for if an explicit throttle time.
+func throttleDelay(s *status.Status) (bool, time.Duration) {
for _, detail := range s.Details() {
if t, ok := detail.(*errdetails.RetryInfo); ok {
- return t.RetryDelay.AsDuration()
+ return true, t.RetryDelay.AsDuration()
}
}
- return 0
+ return false, 0
}
// MarshalLog is the marshaling function used by the logging system to represent this Client.
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go
new file mode 100644
index 0000000000..b7bd429ffd
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go
@@ -0,0 +1,65 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+/*
+Package otlptracegrpc provides an OTLP span exporter using gRPC.
+By default the telemetry is sent to https://localhost:4317.
+
+Exporter should be created using [New].
+
+The environment variables described below can be used for configuration.
+
+OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT (default: "https://localhost:4317") -
+target to which the exporter sends telemetry.
+The target syntax is defined in https://github.com/grpc/grpc/blob/master/doc/naming.md.
+The value must contain a scheme ("http" or "https") and host.
+The value may additionally contain a port, and a path.
+The value should not contain a query string or fragment.
+OTEL_EXPORTER_OTLP_TRACES_ENDPOINT takes precedence over OTEL_EXPORTER_OTLP_ENDPOINT.
+The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WithInsecure], and [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_INSECURE, OTEL_EXPORTER_OTLP_TRACES_INSECURE (default: "false") -
+setting "true" disables client transport security for the exporter's gRPC connection.
+You can use this only when an endpoint is provided without the http or https scheme.
+OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT setting overrides
+the scheme defined via OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT.
+OTEL_EXPORTER_OTLP_TRACES_INSECURE takes precedence over OTEL_EXPORTER_OTLP_INSECURE.
+The configuration can be overridden by [WithInsecure], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_TRACES_HEADERS (default: none) -
+key-value pairs used as gRPC metadata associated with gRPC requests.
+The value is expected to be represented in a format matching the [W3C Baggage HTTP Header Content Format],
+except that additional semi-colon delimited metadata is not supported.
+Example value: "key1=value1,key2=value2".
+OTEL_EXPORTER_OTLP_TRACES_HEADERS takes precedence over OTEL_EXPORTER_OTLP_HEADERS.
+The configuration can be overridden by [WithHeaders] option.
+
+OTEL_EXPORTER_OTLP_TIMEOUT, OTEL_EXPORTER_OTLP_TRACES_TIMEOUT (default: "10000") -
+maximum time in milliseconds the OTLP exporter waits for each batch export.
+OTEL_EXPORTER_OTLP_TRACES_TIMEOUT takes precedence over OTEL_EXPORTER_OTLP_TIMEOUT.
+The configuration can be overridden by [WithTimeout] option.
+
+OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_TRACES_COMPRESSION (default: none) -
+the gRPC compressor the exporter uses.
+Supported value: "gzip".
+OTEL_EXPORTER_OTLP_TRACES_COMPRESSION takes precedence over OTEL_EXPORTER_OTLP_COMPRESSION.
+The configuration can be overridden by [WithCompressor], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE (default: none) -
+the filepath to the trusted certificate to use when verifying a server's TLS credentials.
+OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CERTIFICATE.
+The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE (default: none) -
+the filepath to the client certificate/chain trust for client's private key to use in mTLS communication in PEM format.
+OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE.
+The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY (default: none) -
+the filepath to the client's private key to use in mTLS communication in PEM format.
+OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY takes precedence over OTEL_EXPORTER_OTLP_CLIENT_KEY.
+The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] option.
+
+[W3C Baggage HTTP Header Content Format]: https://www.w3.org/TR/baggage/#header-content
+*/
+package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/exporter.go
index 89af41002f..b826b84247 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/exporter.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/exporter.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go
index becb1f0fbb..4abf48d1f6 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go
@@ -2,18 +2,7 @@
// source: internal/shared/otlp/envconfig/envconfig.go.tmpl
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig"
@@ -26,6 +15,7 @@ import (
"strconv"
"strings"
"time"
+ "unicode"
"go.opentelemetry.io/otel/internal/global"
)
@@ -174,13 +164,17 @@ func stringToHeader(value string) map[string]string {
global.Error(errors.New("missing '="), "parse headers", "input", header)
continue
}
- name, err := url.QueryUnescape(n)
- if err != nil {
- global.Error(err, "escape header key", "key", n)
+
+ trimmedName := strings.TrimSpace(n)
+
+ // Validate the key.
+ if !isValidHeaderKey(trimmedName) {
+ global.Error(errors.New("invalid header key"), "parse headers", "key", trimmedName)
continue
}
- trimmedName := strings.TrimSpace(name)
- value, err := url.QueryUnescape(v)
+
+ // Only decode the value.
+ value, err := url.PathUnescape(v)
if err != nil {
global.Error(err, "escape header value", "value", v)
continue
@@ -200,3 +194,22 @@ func createCertPool(certBytes []byte) (*x509.CertPool, error) {
}
return cp, nil
}
+
+func isValidHeaderKey(key string) bool {
+ if key == "" {
+ return false
+ }
+ for _, c := range key {
+ if !isTokenChar(c) {
+ return false
+ }
+ }
+ return true
+}
+
+func isTokenChar(c rune) bool {
+ return c <= unicode.MaxASCII && (unicode.IsLetter(c) ||
+ unicode.IsDigit(c) ||
+ c == '!' || c == '#' || c == '$' || c == '%' || c == '&' || c == '\'' || c == '*' ||
+ c == '+' || c == '-' || c == '.' || c == '^' || c == '_' || c == '`' || c == '|' || c == '~')
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go
index 1fb2906189..97cd6c54f7 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go
index 32f6dddb4f..7bb189a94b 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go
@@ -2,18 +2,7 @@
// source: internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go
index 19b8434d4d..0a317d9263 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go
@@ -2,24 +2,15 @@
// source: internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
import (
"crypto/tls"
"fmt"
+ "net/http"
+ "net/url"
"path"
"strings"
"time"
@@ -32,6 +23,7 @@ import (
"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry"
+ "go.opentelemetry.io/otel/internal/global"
)
const (
@@ -44,6 +36,10 @@ const (
)
type (
+ // HTTPTransportProxyFunc is a function that resolves which URL to use as proxy for a given request.
+ // This type is compatible with `http.Transport.Proxy` and can be used to set a custom proxy function to the OTLP HTTP client.
+ HTTPTransportProxyFunc func(*http.Request) (*url.URL, error)
+
SignalConfig struct {
Endpoint string
Insecure bool
@@ -55,6 +51,8 @@ type (
// gRPC configurations
GRPCCredentials credentials.TransportCredentials
+
+ Proxy HTTPTransportProxyFunc
}
Config struct {
@@ -100,7 +98,7 @@ func cleanPath(urlPath string, defaultPath string) string {
return defaultPath
}
if !path.IsAbs(tmp) {
- tmp = fmt.Sprintf("/%s", tmp)
+ tmp = "/" + tmp
}
return tmp
}
@@ -127,7 +125,7 @@ func NewGRPCConfig(opts ...GRPCOption) Config {
if cfg.ServiceConfig != "" {
cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig))
}
- // Priroritize GRPCCredentials over Insecure (passing both is an error).
+ // Prioritize GRPCCredentials over Insecure (passing both is an error).
if cfg.Traces.GRPCCredentials != nil {
cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Traces.GRPCCredentials))
} else if cfg.Traces.Insecure {
@@ -141,9 +139,6 @@ func NewGRPCConfig(opts ...GRPCOption) Config {
if cfg.Traces.Compression == GzipCompression {
cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name)))
}
- if len(cfg.DialOptions) != 0 {
- cfg.DialOptions = append(cfg.DialOptions, cfg.DialOptions...)
- }
if cfg.ReconnectionPeriod != 0 {
p := grpc.ConnectParams{
Backoff: backoff.DefaultConfig,
@@ -261,6 +256,9 @@ func NewGRPCOption(fn func(cfg Config) Config) GRPCOption {
// Generic Options
+// WithEndpoint configures the trace host and port only; endpoint should
+// resemble "example.com" or "localhost:4317". To configure the scheme and path,
+// use WithEndpointURL.
func WithEndpoint(endpoint string) GenericOption {
return newGenericOption(func(cfg Config) Config {
cfg.Traces.Endpoint = endpoint
@@ -268,6 +266,24 @@ func WithEndpoint(endpoint string) GenericOption {
})
}
+// WithEndpointURL configures the trace scheme, host, port, and path; the
+// provided value should resemble "https://example.com:4318/v1/traces".
+func WithEndpointURL(v string) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ u, err := url.Parse(v)
+ if err != nil {
+ global.Error(err, "otlptrace: parse endpoint url", "url", v)
+ return cfg
+ }
+
+ cfg.Traces.Endpoint = u.Host
+ cfg.Traces.URLPath = u.Path
+ cfg.Traces.Insecure = u.Scheme != "https"
+
+ return cfg
+ })
+}
+
func WithCompression(compression Compression) GenericOption {
return newGenericOption(func(cfg Config) Config {
cfg.Traces.Compression = compression
@@ -326,3 +342,10 @@ func WithTimeout(duration time.Duration) GenericOption {
return cfg
})
}
+
+func WithProxy(pf HTTPTransportProxyFunc) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Traces.Proxy = pf
+ return cfg
+ })
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go
index d9dcdc96e7..3d4f699d47 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go
@@ -2,18 +2,7 @@
// source: internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go
index 19b6d4b21f..38b97a0131 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go
@@ -2,18 +2,7 @@
// source: internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go
index 076905e54b..a12ea4c48e 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go
@@ -2,18 +2,7 @@
// source: internal/shared/otlp/partialsuccess.go
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go
index 3ce7d6632b..1c5450ab62 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go
@@ -2,18 +2,7 @@
// source: internal/shared/otlp/retry/retry.go.tmpl
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
// Package retry provides request retry functionality that can perform
// configurable exponential backoff for transient errors and honor any
@@ -123,7 +112,7 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
}
if ctxErr := waitFunc(ctx, delay); ctxErr != nil {
- return fmt.Errorf("%w: %s", ctxErr, err)
+ return fmt.Errorf("%w: %w", ctxErr, err)
}
}
}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go
index 78ce9ad8f0..00ab1f20c6 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
@@ -64,14 +53,50 @@ func WithInsecure() Option {
return wrappedOption{otlpconfig.WithInsecure()}
}
-// WithEndpoint sets the target endpoint the exporter will connect to. If
-// unset, localhost:4317 will be used as a default.
+// WithEndpoint sets the target endpoint (host and port) the Exporter will
+// connect to. The provided endpoint should resemble "example.com:4317" (no
+// scheme or path).
+//
+// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_TRACES_ENDPOINT
+// environment variable is set, and this option is not passed, that variable
+// value will be used. If both environment variables are set,
+// OTEL_EXPORTER_OTLP_TRACES_ENDPOINT will take precedence. If an environment
+// variable is set, and this option is passed, this option will take precedence.
+//
+// If both this option and WithEndpointURL are used, the last used option will
+// take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, "localhost:4317" will be used.
//
// This option has no effect if WithGRPCConn is used.
func WithEndpoint(endpoint string) Option {
return wrappedOption{otlpconfig.WithEndpoint(endpoint)}
}
+// WithEndpointURL sets the target endpoint URL (scheme, host, port, path)
+// the Exporter will connect to. The provided endpoint URL should resemble
+// "https://example.com:4318/v1/traces".
+//
+// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_TRACES_ENDPOINT
+// environment variable is set, and this option is not passed, that variable
+// value will be used. If both environment variables are set,
+// OTEL_EXPORTER_OTLP_TRACES_ENDPOINT will take precedence. If an environment
+// variable is set, and this option is passed, this option will take precedence.
+//
+// If both this option and WithEndpoint are used, the last used option will
+// take precedence.
+//
+// If an invalid URL is provided, the default value will be kept.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, "https://localhost:4317/v1/traces" will be used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithEndpointURL(u string) Option {
+ return wrappedOption{otlpconfig.WithEndpointURL(u)}
+}
+
// WithReconnectionPeriod set the minimum amount of time between connection
// attempts to the target endpoint.
//
@@ -93,13 +118,7 @@ func compressorToCompression(compressor string) otlpconfig.Compression {
}
// WithCompressor sets the compressor for the gRPC client to use when sending
-// requests. It is the responsibility of the caller to ensure that the
-// compressor set has been registered with google.golang.org/grpc/encoding.
-// This can be done by encoding.RegisterCompressor. Some compressors
-// auto-register on import, such as gzip, which can be registered by calling
-// `import _ "google.golang.org/grpc/encoding/gzip"`.
-//
-// This option has no effect if WithGRPCConn is used.
+// requests. Supported compressor values: "gzip".
func WithCompressor(compressor string) Option {
return wrappedOption{otlpconfig.WithCompression(compressorToCompression(compressor))}
}
@@ -137,6 +156,8 @@ func WithServiceConfig(serviceConfig string) Option {
// connection. The options here are appended to the internal grpc.DialOptions
// used so they will take precedence over any other internal grpc.DialOptions
// they might conflict with.
+// The [grpc.WithBlock], [grpc.WithTimeout], and [grpc.WithReturnConnectionError]
+// grpc.DialOptions are ignored.
//
// This option has no effect if WithGRPCConn is used.
func WithDialOption(opts ...grpc.DialOption) Option {
diff --git a/vendor/go.uber.org/atomic/.codecov.yml b/vendor/go.uber.org/atomic/.codecov.yml
deleted file mode 100644
index 571116cc39..0000000000
--- a/vendor/go.uber.org/atomic/.codecov.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-coverage:
- range: 80..100
- round: down
- precision: 2
-
- status:
- project: # measuring the overall project coverage
- default: # context, you can create multiple ones with custom titles
- enabled: yes # must be yes|true to enable this status
- target: 100 # specify the target coverage for each commit status
- # option: "auto" (must increase from parent commit or pull request base)
- # option: "X%" a static target percentage to hit
- if_not_found: success # if parent is not found report status as success, error, or failure
- if_ci_failed: error # if ci fails report status as success, error, or failure
-
-# Also update COVER_IGNORE_PKGS in the Makefile.
-ignore:
- - /internal/gen-atomicint/
- - /internal/gen-valuewrapper/
diff --git a/vendor/go.uber.org/atomic/.gitignore b/vendor/go.uber.org/atomic/.gitignore
deleted file mode 100644
index 2e337a0ed5..0000000000
--- a/vendor/go.uber.org/atomic/.gitignore
+++ /dev/null
@@ -1,15 +0,0 @@
-/bin
-.DS_Store
-/vendor
-cover.html
-cover.out
-lint.log
-
-# Binaries
-*.test
-
-# Profiling output
-*.prof
-
-# Output of fossa analyzer
-/fossa
diff --git a/vendor/go.uber.org/atomic/CHANGELOG.md b/vendor/go.uber.org/atomic/CHANGELOG.md
deleted file mode 100644
index 38f564e2b3..0000000000
--- a/vendor/go.uber.org/atomic/CHANGELOG.md
+++ /dev/null
@@ -1,100 +0,0 @@
-# Changelog
-All notable changes to this project will be documented in this file.
-
-The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
-and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
-
-## [1.9.0] - 2021-07-15
-### Added
-- Add `Float64.Swap` to match int atomic operations.
-- Add `atomic.Time` type for atomic operations on `time.Time` values.
-
-[1.9.0]: https://github.com/uber-go/atomic/compare/v1.8.0...v1.9.0
-
-## [1.8.0] - 2021-06-09
-### Added
-- Add `atomic.Uintptr` type for atomic operations on `uintptr` values.
-- Add `atomic.UnsafePointer` type for atomic operations on `unsafe.Pointer` values.
-
-[1.8.0]: https://github.com/uber-go/atomic/compare/v1.7.0...v1.8.0
-
-## [1.7.0] - 2020-09-14
-### Added
-- Support JSON serialization and deserialization of primitive atomic types.
-- Support Text marshalling and unmarshalling for string atomics.
-
-### Changed
-- Disallow incorrect comparison of atomic values in a non-atomic way.
-
-### Removed
-- Remove dependency on `golang.org/x/{lint, tools}`.
-
-[1.7.0]: https://github.com/uber-go/atomic/compare/v1.6.0...v1.7.0
-
-## [1.6.0] - 2020-02-24
-### Changed
-- Drop library dependency on `golang.org/x/{lint, tools}`.
-
-[1.6.0]: https://github.com/uber-go/atomic/compare/v1.5.1...v1.6.0
-
-## [1.5.1] - 2019-11-19
-- Fix bug where `Bool.CAS` and `Bool.Toggle` do work correctly together
- causing `CAS` to fail even though the old value matches.
-
-[1.5.1]: https://github.com/uber-go/atomic/compare/v1.5.0...v1.5.1
-
-## [1.5.0] - 2019-10-29
-### Changed
-- With Go modules, only the `go.uber.org/atomic` import path is supported now.
- If you need to use the old import path, please add a `replace` directive to
- your `go.mod`.
-
-[1.5.0]: https://github.com/uber-go/atomic/compare/v1.4.0...v1.5.0
-
-## [1.4.0] - 2019-05-01
-### Added
- - Add `atomic.Error` type for atomic operations on `error` values.
-
-[1.4.0]: https://github.com/uber-go/atomic/compare/v1.3.2...v1.4.0
-
-## [1.3.2] - 2018-05-02
-### Added
-- Add `atomic.Duration` type for atomic operations on `time.Duration` values.
-
-[1.3.2]: https://github.com/uber-go/atomic/compare/v1.3.1...v1.3.2
-
-## [1.3.1] - 2017-11-14
-### Fixed
-- Revert optimization for `atomic.String.Store("")` which caused data races.
-
-[1.3.1]: https://github.com/uber-go/atomic/compare/v1.3.0...v1.3.1
-
-## [1.3.0] - 2017-11-13
-### Added
-- Add `atomic.Bool.CAS` for compare-and-swap semantics on bools.
-
-### Changed
-- Optimize `atomic.String.Store("")` by avoiding an allocation.
-
-[1.3.0]: https://github.com/uber-go/atomic/compare/v1.2.0...v1.3.0
-
-## [1.2.0] - 2017-04-12
-### Added
-- Shadow `atomic.Value` from `sync/atomic`.
-
-[1.2.0]: https://github.com/uber-go/atomic/compare/v1.1.0...v1.2.0
-
-## [1.1.0] - 2017-03-10
-### Added
-- Add atomic `Float64` type.
-
-### Changed
-- Support new `go.uber.org/atomic` import path.
-
-[1.1.0]: https://github.com/uber-go/atomic/compare/v1.0.0...v1.1.0
-
-## [1.0.0] - 2016-07-18
-
-- Initial release.
-
-[1.0.0]: https://github.com/uber-go/atomic/releases/tag/v1.0.0
diff --git a/vendor/go.uber.org/atomic/Makefile b/vendor/go.uber.org/atomic/Makefile
deleted file mode 100644
index 46c945b32b..0000000000
--- a/vendor/go.uber.org/atomic/Makefile
+++ /dev/null
@@ -1,79 +0,0 @@
-# Directory to place `go install`ed binaries into.
-export GOBIN ?= $(shell pwd)/bin
-
-GOLINT = $(GOBIN)/golint
-GEN_ATOMICINT = $(GOBIN)/gen-atomicint
-GEN_ATOMICWRAPPER = $(GOBIN)/gen-atomicwrapper
-STATICCHECK = $(GOBIN)/staticcheck
-
-GO_FILES ?= $(shell find . '(' -path .git -o -path vendor ')' -prune -o -name '*.go' -print)
-
-# Also update ignore section in .codecov.yml.
-COVER_IGNORE_PKGS = \
- go.uber.org/atomic/internal/gen-atomicint \
- go.uber.org/atomic/internal/gen-atomicwrapper
-
-.PHONY: build
-build:
- go build ./...
-
-.PHONY: test
-test:
- go test -race ./...
-
-.PHONY: gofmt
-gofmt:
- $(eval FMT_LOG := $(shell mktemp -t gofmt.XXXXX))
- gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true
- @[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" && cat $(FMT_LOG) && false)
-
-$(GOLINT):
- cd tools && go install golang.org/x/lint/golint
-
-$(STATICCHECK):
- cd tools && go install honnef.co/go/tools/cmd/staticcheck
-
-$(GEN_ATOMICWRAPPER): $(wildcard ./internal/gen-atomicwrapper/*)
- go build -o $@ ./internal/gen-atomicwrapper
-
-$(GEN_ATOMICINT): $(wildcard ./internal/gen-atomicint/*)
- go build -o $@ ./internal/gen-atomicint
-
-.PHONY: golint
-golint: $(GOLINT)
- $(GOLINT) ./...
-
-.PHONY: staticcheck
-staticcheck: $(STATICCHECK)
- $(STATICCHECK) ./...
-
-.PHONY: lint
-lint: gofmt golint staticcheck generatenodirty
-
-# comma separated list of packages to consider for code coverage.
-COVER_PKG = $(shell \
- go list -find ./... | \
- grep -v $(foreach pkg,$(COVER_IGNORE_PKGS),-e "^$(pkg)$$") | \
- paste -sd, -)
-
-.PHONY: cover
-cover:
- go test -coverprofile=cover.out -coverpkg $(COVER_PKG) -v ./...
- go tool cover -html=cover.out -o cover.html
-
-.PHONY: generate
-generate: $(GEN_ATOMICINT) $(GEN_ATOMICWRAPPER)
- go generate ./...
-
-.PHONY: generatenodirty
-generatenodirty:
- @[ -z "$$(git status --porcelain)" ] || ( \
- echo "Working tree is dirty. Commit your changes first."; \
- git status; \
- exit 1 )
- @make generate
- @status=$$(git status --porcelain); \
- [ -z "$$status" ] || ( \
- echo "Working tree is dirty after `make generate`:"; \
- echo "$$status"; \
- echo "Please ensure that the generated code is up-to-date." )
diff --git a/vendor/go.uber.org/atomic/README.md b/vendor/go.uber.org/atomic/README.md
deleted file mode 100644
index 96b47a1f12..0000000000
--- a/vendor/go.uber.org/atomic/README.md
+++ /dev/null
@@ -1,63 +0,0 @@
-# atomic [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![Go Report Card][reportcard-img]][reportcard]
-
-Simple wrappers for primitive types to enforce atomic access.
-
-## Installation
-
-```shell
-$ go get -u go.uber.org/atomic@v1
-```
-
-### Legacy Import Path
-
-As of v1.5.0, the import path `go.uber.org/atomic` is the only supported way
-of using this package. If you are using Go modules, this package will fail to
-compile with the legacy import path path `github.com/uber-go/atomic`.
-
-We recommend migrating your code to the new import path but if you're unable
-to do so, or if your dependencies are still using the old import path, you
-will have to add a `replace` directive to your `go.mod` file downgrading the
-legacy import path to an older version.
-
-```
-replace github.com/uber-go/atomic => github.com/uber-go/atomic v1.4.0
-```
-
-You can do so automatically by running the following command.
-
-```shell
-$ go mod edit -replace github.com/uber-go/atomic=github.com/uber-go/atomic@v1.4.0
-```
-
-## Usage
-
-The standard library's `sync/atomic` is powerful, but it's easy to forget which
-variables must be accessed atomically. `go.uber.org/atomic` preserves all the
-functionality of the standard library, but wraps the primitive types to
-provide a safer, more convenient API.
-
-```go
-var atom atomic.Uint32
-atom.Store(42)
-atom.Sub(2)
-atom.CAS(40, 11)
-```
-
-See the [documentation][doc] for a complete API specification.
-
-## Development Status
-
-Stable.
-
----
-
-Released under the [MIT License](LICENSE.txt).
-
-[doc-img]: https://godoc.org/github.com/uber-go/atomic?status.svg
-[doc]: https://godoc.org/go.uber.org/atomic
-[ci-img]: https://github.com/uber-go/atomic/actions/workflows/go.yml/badge.svg
-[ci]: https://github.com/uber-go/atomic/actions/workflows/go.yml
-[cov-img]: https://codecov.io/gh/uber-go/atomic/branch/master/graph/badge.svg
-[cov]: https://codecov.io/gh/uber-go/atomic
-[reportcard-img]: https://goreportcard.com/badge/go.uber.org/atomic
-[reportcard]: https://goreportcard.com/report/go.uber.org/atomic
diff --git a/vendor/go.uber.org/atomic/bool.go b/vendor/go.uber.org/atomic/bool.go
deleted file mode 100644
index 209df7bbcd..0000000000
--- a/vendor/go.uber.org/atomic/bool.go
+++ /dev/null
@@ -1,81 +0,0 @@
-// @generated Code generated by gen-atomicwrapper.
-
-// Copyright (c) 2020-2021 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import (
- "encoding/json"
-)
-
-// Bool is an atomic type-safe wrapper for bool values.
-type Bool struct {
- _ nocmp // disallow non-atomic comparison
-
- v Uint32
-}
-
-var _zeroBool bool
-
-// NewBool creates a new Bool.
-func NewBool(val bool) *Bool {
- x := &Bool{}
- if val != _zeroBool {
- x.Store(val)
- }
- return x
-}
-
-// Load atomically loads the wrapped bool.
-func (x *Bool) Load() bool {
- return truthy(x.v.Load())
-}
-
-// Store atomically stores the passed bool.
-func (x *Bool) Store(val bool) {
- x.v.Store(boolToInt(val))
-}
-
-// CAS is an atomic compare-and-swap for bool values.
-func (x *Bool) CAS(old, new bool) (swapped bool) {
- return x.v.CAS(boolToInt(old), boolToInt(new))
-}
-
-// Swap atomically stores the given bool and returns the old
-// value.
-func (x *Bool) Swap(val bool) (old bool) {
- return truthy(x.v.Swap(boolToInt(val)))
-}
-
-// MarshalJSON encodes the wrapped bool into JSON.
-func (x *Bool) MarshalJSON() ([]byte, error) {
- return json.Marshal(x.Load())
-}
-
-// UnmarshalJSON decodes a bool from JSON.
-func (x *Bool) UnmarshalJSON(b []byte) error {
- var v bool
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- x.Store(v)
- return nil
-}
diff --git a/vendor/go.uber.org/atomic/bool_ext.go b/vendor/go.uber.org/atomic/bool_ext.go
deleted file mode 100644
index a2e60e9873..0000000000
--- a/vendor/go.uber.org/atomic/bool_ext.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright (c) 2020 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import (
- "strconv"
-)
-
-//go:generate bin/gen-atomicwrapper -name=Bool -type=bool -wrapped=Uint32 -pack=boolToInt -unpack=truthy -cas -swap -json -file=bool.go
-
-func truthy(n uint32) bool {
- return n == 1
-}
-
-func boolToInt(b bool) uint32 {
- if b {
- return 1
- }
- return 0
-}
-
-// Toggle atomically negates the Boolean and returns the previous value.
-func (b *Bool) Toggle() (old bool) {
- for {
- old := b.Load()
- if b.CAS(old, !old) {
- return old
- }
- }
-}
-
-// String encodes the wrapped value as a string.
-func (b *Bool) String() string {
- return strconv.FormatBool(b.Load())
-}
diff --git a/vendor/go.uber.org/atomic/doc.go b/vendor/go.uber.org/atomic/doc.go
deleted file mode 100644
index ae7390ee68..0000000000
--- a/vendor/go.uber.org/atomic/doc.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright (c) 2020 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-// Package atomic provides simple wrappers around numerics to enforce atomic
-// access.
-package atomic
diff --git a/vendor/go.uber.org/atomic/duration.go b/vendor/go.uber.org/atomic/duration.go
deleted file mode 100644
index 207594f5e8..0000000000
--- a/vendor/go.uber.org/atomic/duration.go
+++ /dev/null
@@ -1,82 +0,0 @@
-// @generated Code generated by gen-atomicwrapper.
-
-// Copyright (c) 2020-2021 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import (
- "encoding/json"
- "time"
-)
-
-// Duration is an atomic type-safe wrapper for time.Duration values.
-type Duration struct {
- _ nocmp // disallow non-atomic comparison
-
- v Int64
-}
-
-var _zeroDuration time.Duration
-
-// NewDuration creates a new Duration.
-func NewDuration(val time.Duration) *Duration {
- x := &Duration{}
- if val != _zeroDuration {
- x.Store(val)
- }
- return x
-}
-
-// Load atomically loads the wrapped time.Duration.
-func (x *Duration) Load() time.Duration {
- return time.Duration(x.v.Load())
-}
-
-// Store atomically stores the passed time.Duration.
-func (x *Duration) Store(val time.Duration) {
- x.v.Store(int64(val))
-}
-
-// CAS is an atomic compare-and-swap for time.Duration values.
-func (x *Duration) CAS(old, new time.Duration) (swapped bool) {
- return x.v.CAS(int64(old), int64(new))
-}
-
-// Swap atomically stores the given time.Duration and returns the old
-// value.
-func (x *Duration) Swap(val time.Duration) (old time.Duration) {
- return time.Duration(x.v.Swap(int64(val)))
-}
-
-// MarshalJSON encodes the wrapped time.Duration into JSON.
-func (x *Duration) MarshalJSON() ([]byte, error) {
- return json.Marshal(x.Load())
-}
-
-// UnmarshalJSON decodes a time.Duration from JSON.
-func (x *Duration) UnmarshalJSON(b []byte) error {
- var v time.Duration
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- x.Store(v)
- return nil
-}
diff --git a/vendor/go.uber.org/atomic/duration_ext.go b/vendor/go.uber.org/atomic/duration_ext.go
deleted file mode 100644
index 4c18b0a9ed..0000000000
--- a/vendor/go.uber.org/atomic/duration_ext.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright (c) 2020 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import "time"
-
-//go:generate bin/gen-atomicwrapper -name=Duration -type=time.Duration -wrapped=Int64 -pack=int64 -unpack=time.Duration -cas -swap -json -imports time -file=duration.go
-
-// Add atomically adds to the wrapped time.Duration and returns the new value.
-func (d *Duration) Add(delta time.Duration) time.Duration {
- return time.Duration(d.v.Add(int64(delta)))
-}
-
-// Sub atomically subtracts from the wrapped time.Duration and returns the new value.
-func (d *Duration) Sub(delta time.Duration) time.Duration {
- return time.Duration(d.v.Sub(int64(delta)))
-}
-
-// String encodes the wrapped value as a string.
-func (d *Duration) String() string {
- return d.Load().String()
-}
diff --git a/vendor/go.uber.org/atomic/error.go b/vendor/go.uber.org/atomic/error.go
deleted file mode 100644
index 3be19c35ee..0000000000
--- a/vendor/go.uber.org/atomic/error.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// @generated Code generated by gen-atomicwrapper.
-
-// Copyright (c) 2020-2021 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-// Error is an atomic type-safe wrapper for error values.
-type Error struct {
- _ nocmp // disallow non-atomic comparison
-
- v Value
-}
-
-var _zeroError error
-
-// NewError creates a new Error.
-func NewError(val error) *Error {
- x := &Error{}
- if val != _zeroError {
- x.Store(val)
- }
- return x
-}
-
-// Load atomically loads the wrapped error.
-func (x *Error) Load() error {
- return unpackError(x.v.Load())
-}
-
-// Store atomically stores the passed error.
-func (x *Error) Store(val error) {
- x.v.Store(packError(val))
-}
diff --git a/vendor/go.uber.org/atomic/float64.go b/vendor/go.uber.org/atomic/float64.go
deleted file mode 100644
index 8a13671847..0000000000
--- a/vendor/go.uber.org/atomic/float64.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// @generated Code generated by gen-atomicwrapper.
-
-// Copyright (c) 2020-2021 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import (
- "encoding/json"
- "math"
-)
-
-// Float64 is an atomic type-safe wrapper for float64 values.
-type Float64 struct {
- _ nocmp // disallow non-atomic comparison
-
- v Uint64
-}
-
-var _zeroFloat64 float64
-
-// NewFloat64 creates a new Float64.
-func NewFloat64(val float64) *Float64 {
- x := &Float64{}
- if val != _zeroFloat64 {
- x.Store(val)
- }
- return x
-}
-
-// Load atomically loads the wrapped float64.
-func (x *Float64) Load() float64 {
- return math.Float64frombits(x.v.Load())
-}
-
-// Store atomically stores the passed float64.
-func (x *Float64) Store(val float64) {
- x.v.Store(math.Float64bits(val))
-}
-
-// Swap atomically stores the given float64 and returns the old
-// value.
-func (x *Float64) Swap(val float64) (old float64) {
- return math.Float64frombits(x.v.Swap(math.Float64bits(val)))
-}
-
-// MarshalJSON encodes the wrapped float64 into JSON.
-func (x *Float64) MarshalJSON() ([]byte, error) {
- return json.Marshal(x.Load())
-}
-
-// UnmarshalJSON decodes a float64 from JSON.
-func (x *Float64) UnmarshalJSON(b []byte) error {
- var v float64
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- x.Store(v)
- return nil
-}
diff --git a/vendor/go.uber.org/atomic/float64_ext.go b/vendor/go.uber.org/atomic/float64_ext.go
deleted file mode 100644
index df36b0107f..0000000000
--- a/vendor/go.uber.org/atomic/float64_ext.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright (c) 2020 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import (
- "math"
- "strconv"
-)
-
-//go:generate bin/gen-atomicwrapper -name=Float64 -type=float64 -wrapped=Uint64 -pack=math.Float64bits -unpack=math.Float64frombits -swap -json -imports math -file=float64.go
-
-// Add atomically adds to the wrapped float64 and returns the new value.
-func (f *Float64) Add(delta float64) float64 {
- for {
- old := f.Load()
- new := old + delta
- if f.CAS(old, new) {
- return new
- }
- }
-}
-
-// Sub atomically subtracts from the wrapped float64 and returns the new value.
-func (f *Float64) Sub(delta float64) float64 {
- return f.Add(-delta)
-}
-
-// CAS is an atomic compare-and-swap for float64 values.
-//
-// Note: CAS handles NaN incorrectly. NaN != NaN using Go's inbuilt operators
-// but CAS allows a stored NaN to compare equal to a passed in NaN.
-// This avoids typical CAS loops from blocking forever, e.g.,
-//
-// for {
-// old := atom.Load()
-// new = f(old)
-// if atom.CAS(old, new) {
-// break
-// }
-// }
-//
-// If CAS did not match NaN to match, then the above would loop forever.
-func (f *Float64) CAS(old, new float64) (swapped bool) {
- return f.v.CAS(math.Float64bits(old), math.Float64bits(new))
-}
-
-// String encodes the wrapped value as a string.
-func (f *Float64) String() string {
- // 'g' is the behavior for floats with %v.
- return strconv.FormatFloat(f.Load(), 'g', -1, 64)
-}
diff --git a/vendor/go.uber.org/atomic/gen.go b/vendor/go.uber.org/atomic/gen.go
deleted file mode 100644
index 1e9ef4f879..0000000000
--- a/vendor/go.uber.org/atomic/gen.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright (c) 2020 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-//go:generate bin/gen-atomicint -name=Int32 -wrapped=int32 -file=int32.go
-//go:generate bin/gen-atomicint -name=Int64 -wrapped=int64 -file=int64.go
-//go:generate bin/gen-atomicint -name=Uint32 -wrapped=uint32 -unsigned -file=uint32.go
-//go:generate bin/gen-atomicint -name=Uint64 -wrapped=uint64 -unsigned -file=uint64.go
-//go:generate bin/gen-atomicint -name=Uintptr -wrapped=uintptr -unsigned -file=uintptr.go
diff --git a/vendor/go.uber.org/atomic/int32.go b/vendor/go.uber.org/atomic/int32.go
deleted file mode 100644
index 640ea36a17..0000000000
--- a/vendor/go.uber.org/atomic/int32.go
+++ /dev/null
@@ -1,102 +0,0 @@
-// @generated Code generated by gen-atomicint.
-
-// Copyright (c) 2020-2021 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import (
- "encoding/json"
- "strconv"
- "sync/atomic"
-)
-
-// Int32 is an atomic wrapper around int32.
-type Int32 struct {
- _ nocmp // disallow non-atomic comparison
-
- v int32
-}
-
-// NewInt32 creates a new Int32.
-func NewInt32(val int32) *Int32 {
- return &Int32{v: val}
-}
-
-// Load atomically loads the wrapped value.
-func (i *Int32) Load() int32 {
- return atomic.LoadInt32(&i.v)
-}
-
-// Add atomically adds to the wrapped int32 and returns the new value.
-func (i *Int32) Add(delta int32) int32 {
- return atomic.AddInt32(&i.v, delta)
-}
-
-// Sub atomically subtracts from the wrapped int32 and returns the new value.
-func (i *Int32) Sub(delta int32) int32 {
- return atomic.AddInt32(&i.v, -delta)
-}
-
-// Inc atomically increments the wrapped int32 and returns the new value.
-func (i *Int32) Inc() int32 {
- return i.Add(1)
-}
-
-// Dec atomically decrements the wrapped int32 and returns the new value.
-func (i *Int32) Dec() int32 {
- return i.Sub(1)
-}
-
-// CAS is an atomic compare-and-swap.
-func (i *Int32) CAS(old, new int32) (swapped bool) {
- return atomic.CompareAndSwapInt32(&i.v, old, new)
-}
-
-// Store atomically stores the passed value.
-func (i *Int32) Store(val int32) {
- atomic.StoreInt32(&i.v, val)
-}
-
-// Swap atomically swaps the wrapped int32 and returns the old value.
-func (i *Int32) Swap(val int32) (old int32) {
- return atomic.SwapInt32(&i.v, val)
-}
-
-// MarshalJSON encodes the wrapped int32 into JSON.
-func (i *Int32) MarshalJSON() ([]byte, error) {
- return json.Marshal(i.Load())
-}
-
-// UnmarshalJSON decodes JSON into the wrapped int32.
-func (i *Int32) UnmarshalJSON(b []byte) error {
- var v int32
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- i.Store(v)
- return nil
-}
-
-// String encodes the wrapped value as a string.
-func (i *Int32) String() string {
- v := i.Load()
- return strconv.FormatInt(int64(v), 10)
-}
diff --git a/vendor/go.uber.org/atomic/int64.go b/vendor/go.uber.org/atomic/int64.go
deleted file mode 100644
index 9ab66b9809..0000000000
--- a/vendor/go.uber.org/atomic/int64.go
+++ /dev/null
@@ -1,102 +0,0 @@
-// @generated Code generated by gen-atomicint.
-
-// Copyright (c) 2020-2021 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import (
- "encoding/json"
- "strconv"
- "sync/atomic"
-)
-
-// Int64 is an atomic wrapper around int64.
-type Int64 struct {
- _ nocmp // disallow non-atomic comparison
-
- v int64
-}
-
-// NewInt64 creates a new Int64.
-func NewInt64(val int64) *Int64 {
- return &Int64{v: val}
-}
-
-// Load atomically loads the wrapped value.
-func (i *Int64) Load() int64 {
- return atomic.LoadInt64(&i.v)
-}
-
-// Add atomically adds to the wrapped int64 and returns the new value.
-func (i *Int64) Add(delta int64) int64 {
- return atomic.AddInt64(&i.v, delta)
-}
-
-// Sub atomically subtracts from the wrapped int64 and returns the new value.
-func (i *Int64) Sub(delta int64) int64 {
- return atomic.AddInt64(&i.v, -delta)
-}
-
-// Inc atomically increments the wrapped int64 and returns the new value.
-func (i *Int64) Inc() int64 {
- return i.Add(1)
-}
-
-// Dec atomically decrements the wrapped int64 and returns the new value.
-func (i *Int64) Dec() int64 {
- return i.Sub(1)
-}
-
-// CAS is an atomic compare-and-swap.
-func (i *Int64) CAS(old, new int64) (swapped bool) {
- return atomic.CompareAndSwapInt64(&i.v, old, new)
-}
-
-// Store atomically stores the passed value.
-func (i *Int64) Store(val int64) {
- atomic.StoreInt64(&i.v, val)
-}
-
-// Swap atomically swaps the wrapped int64 and returns the old value.
-func (i *Int64) Swap(val int64) (old int64) {
- return atomic.SwapInt64(&i.v, val)
-}
-
-// MarshalJSON encodes the wrapped int64 into JSON.
-func (i *Int64) MarshalJSON() ([]byte, error) {
- return json.Marshal(i.Load())
-}
-
-// UnmarshalJSON decodes JSON into the wrapped int64.
-func (i *Int64) UnmarshalJSON(b []byte) error {
- var v int64
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- i.Store(v)
- return nil
-}
-
-// String encodes the wrapped value as a string.
-func (i *Int64) String() string {
- v := i.Load()
- return strconv.FormatInt(int64(v), 10)
-}
diff --git a/vendor/go.uber.org/atomic/nocmp.go b/vendor/go.uber.org/atomic/nocmp.go
deleted file mode 100644
index a8201cb4a1..0000000000
--- a/vendor/go.uber.org/atomic/nocmp.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright (c) 2020 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-// nocmp is an uncomparable struct. Embed this inside another struct to make
-// it uncomparable.
-//
-// type Foo struct {
-// nocmp
-// // ...
-// }
-//
-// This DOES NOT:
-//
-// - Disallow shallow copies of structs
-// - Disallow comparison of pointers to uncomparable structs
-type nocmp [0]func()
diff --git a/vendor/go.uber.org/atomic/string.go b/vendor/go.uber.org/atomic/string.go
deleted file mode 100644
index 80df93d094..0000000000
--- a/vendor/go.uber.org/atomic/string.go
+++ /dev/null
@@ -1,54 +0,0 @@
-// @generated Code generated by gen-atomicwrapper.
-
-// Copyright (c) 2020-2021 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-// String is an atomic type-safe wrapper for string values.
-type String struct {
- _ nocmp // disallow non-atomic comparison
-
- v Value
-}
-
-var _zeroString string
-
-// NewString creates a new String.
-func NewString(val string) *String {
- x := &String{}
- if val != _zeroString {
- x.Store(val)
- }
- return x
-}
-
-// Load atomically loads the wrapped string.
-func (x *String) Load() string {
- if v := x.v.Load(); v != nil {
- return v.(string)
- }
- return _zeroString
-}
-
-// Store atomically stores the passed string.
-func (x *String) Store(val string) {
- x.v.Store(val)
-}
diff --git a/vendor/go.uber.org/atomic/string_ext.go b/vendor/go.uber.org/atomic/string_ext.go
deleted file mode 100644
index 83d92edafc..0000000000
--- a/vendor/go.uber.org/atomic/string_ext.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright (c) 2020 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-//go:generate bin/gen-atomicwrapper -name=String -type=string -wrapped=Value -file=string.go
-// Note: No Swap as String wraps Value, which wraps the stdlib sync/atomic.Value which
-// only supports Swap as of go1.17: https://github.com/golang/go/issues/39351
-
-// String returns the wrapped value.
-func (s *String) String() string {
- return s.Load()
-}
-
-// MarshalText encodes the wrapped string into a textual form.
-//
-// This makes it encodable as JSON, YAML, XML, and more.
-func (s *String) MarshalText() ([]byte, error) {
- return []byte(s.Load()), nil
-}
-
-// UnmarshalText decodes text and replaces the wrapped string with it.
-//
-// This makes it decodable from JSON, YAML, XML, and more.
-func (s *String) UnmarshalText(b []byte) error {
- s.Store(string(b))
- return nil
-}
diff --git a/vendor/go.uber.org/atomic/time.go b/vendor/go.uber.org/atomic/time.go
deleted file mode 100644
index 33460fc37e..0000000000
--- a/vendor/go.uber.org/atomic/time.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// @generated Code generated by gen-atomicwrapper.
-
-// Copyright (c) 2020-2021 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import (
- "time"
-)
-
-// Time is an atomic type-safe wrapper for time.Time values.
-type Time struct {
- _ nocmp // disallow non-atomic comparison
-
- v Value
-}
-
-var _zeroTime time.Time
-
-// NewTime creates a new Time.
-func NewTime(val time.Time) *Time {
- x := &Time{}
- if val != _zeroTime {
- x.Store(val)
- }
- return x
-}
-
-// Load atomically loads the wrapped time.Time.
-func (x *Time) Load() time.Time {
- return unpackTime(x.v.Load())
-}
-
-// Store atomically stores the passed time.Time.
-func (x *Time) Store(val time.Time) {
- x.v.Store(packTime(val))
-}
diff --git a/vendor/go.uber.org/atomic/time_ext.go b/vendor/go.uber.org/atomic/time_ext.go
deleted file mode 100644
index 1e3dc978aa..0000000000
--- a/vendor/go.uber.org/atomic/time_ext.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright (c) 2021 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import "time"
-
-//go:generate bin/gen-atomicwrapper -name=Time -type=time.Time -wrapped=Value -pack=packTime -unpack=unpackTime -imports time -file=time.go
-
-func packTime(t time.Time) interface{} {
- return t
-}
-
-func unpackTime(v interface{}) time.Time {
- if t, ok := v.(time.Time); ok {
- return t
- }
- return time.Time{}
-}
diff --git a/vendor/go.uber.org/atomic/uint32.go b/vendor/go.uber.org/atomic/uint32.go
deleted file mode 100644
index 7859a9cc3b..0000000000
--- a/vendor/go.uber.org/atomic/uint32.go
+++ /dev/null
@@ -1,102 +0,0 @@
-// @generated Code generated by gen-atomicint.
-
-// Copyright (c) 2020-2021 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import (
- "encoding/json"
- "strconv"
- "sync/atomic"
-)
-
-// Uint32 is an atomic wrapper around uint32.
-type Uint32 struct {
- _ nocmp // disallow non-atomic comparison
-
- v uint32
-}
-
-// NewUint32 creates a new Uint32.
-func NewUint32(val uint32) *Uint32 {
- return &Uint32{v: val}
-}
-
-// Load atomically loads the wrapped value.
-func (i *Uint32) Load() uint32 {
- return atomic.LoadUint32(&i.v)
-}
-
-// Add atomically adds to the wrapped uint32 and returns the new value.
-func (i *Uint32) Add(delta uint32) uint32 {
- return atomic.AddUint32(&i.v, delta)
-}
-
-// Sub atomically subtracts from the wrapped uint32 and returns the new value.
-func (i *Uint32) Sub(delta uint32) uint32 {
- return atomic.AddUint32(&i.v, ^(delta - 1))
-}
-
-// Inc atomically increments the wrapped uint32 and returns the new value.
-func (i *Uint32) Inc() uint32 {
- return i.Add(1)
-}
-
-// Dec atomically decrements the wrapped uint32 and returns the new value.
-func (i *Uint32) Dec() uint32 {
- return i.Sub(1)
-}
-
-// CAS is an atomic compare-and-swap.
-func (i *Uint32) CAS(old, new uint32) (swapped bool) {
- return atomic.CompareAndSwapUint32(&i.v, old, new)
-}
-
-// Store atomically stores the passed value.
-func (i *Uint32) Store(val uint32) {
- atomic.StoreUint32(&i.v, val)
-}
-
-// Swap atomically swaps the wrapped uint32 and returns the old value.
-func (i *Uint32) Swap(val uint32) (old uint32) {
- return atomic.SwapUint32(&i.v, val)
-}
-
-// MarshalJSON encodes the wrapped uint32 into JSON.
-func (i *Uint32) MarshalJSON() ([]byte, error) {
- return json.Marshal(i.Load())
-}
-
-// UnmarshalJSON decodes JSON into the wrapped uint32.
-func (i *Uint32) UnmarshalJSON(b []byte) error {
- var v uint32
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- i.Store(v)
- return nil
-}
-
-// String encodes the wrapped value as a string.
-func (i *Uint32) String() string {
- v := i.Load()
- return strconv.FormatUint(uint64(v), 10)
-}
diff --git a/vendor/go.uber.org/atomic/uint64.go b/vendor/go.uber.org/atomic/uint64.go
deleted file mode 100644
index 2f2a7db638..0000000000
--- a/vendor/go.uber.org/atomic/uint64.go
+++ /dev/null
@@ -1,102 +0,0 @@
-// @generated Code generated by gen-atomicint.
-
-// Copyright (c) 2020-2021 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import (
- "encoding/json"
- "strconv"
- "sync/atomic"
-)
-
-// Uint64 is an atomic wrapper around uint64.
-type Uint64 struct {
- _ nocmp // disallow non-atomic comparison
-
- v uint64
-}
-
-// NewUint64 creates a new Uint64.
-func NewUint64(val uint64) *Uint64 {
- return &Uint64{v: val}
-}
-
-// Load atomically loads the wrapped value.
-func (i *Uint64) Load() uint64 {
- return atomic.LoadUint64(&i.v)
-}
-
-// Add atomically adds to the wrapped uint64 and returns the new value.
-func (i *Uint64) Add(delta uint64) uint64 {
- return atomic.AddUint64(&i.v, delta)
-}
-
-// Sub atomically subtracts from the wrapped uint64 and returns the new value.
-func (i *Uint64) Sub(delta uint64) uint64 {
- return atomic.AddUint64(&i.v, ^(delta - 1))
-}
-
-// Inc atomically increments the wrapped uint64 and returns the new value.
-func (i *Uint64) Inc() uint64 {
- return i.Add(1)
-}
-
-// Dec atomically decrements the wrapped uint64 and returns the new value.
-func (i *Uint64) Dec() uint64 {
- return i.Sub(1)
-}
-
-// CAS is an atomic compare-and-swap.
-func (i *Uint64) CAS(old, new uint64) (swapped bool) {
- return atomic.CompareAndSwapUint64(&i.v, old, new)
-}
-
-// Store atomically stores the passed value.
-func (i *Uint64) Store(val uint64) {
- atomic.StoreUint64(&i.v, val)
-}
-
-// Swap atomically swaps the wrapped uint64 and returns the old value.
-func (i *Uint64) Swap(val uint64) (old uint64) {
- return atomic.SwapUint64(&i.v, val)
-}
-
-// MarshalJSON encodes the wrapped uint64 into JSON.
-func (i *Uint64) MarshalJSON() ([]byte, error) {
- return json.Marshal(i.Load())
-}
-
-// UnmarshalJSON decodes JSON into the wrapped uint64.
-func (i *Uint64) UnmarshalJSON(b []byte) error {
- var v uint64
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- i.Store(v)
- return nil
-}
-
-// String encodes the wrapped value as a string.
-func (i *Uint64) String() string {
- v := i.Load()
- return strconv.FormatUint(uint64(v), 10)
-}
diff --git a/vendor/go.uber.org/atomic/uintptr.go b/vendor/go.uber.org/atomic/uintptr.go
deleted file mode 100644
index ecf7a77273..0000000000
--- a/vendor/go.uber.org/atomic/uintptr.go
+++ /dev/null
@@ -1,102 +0,0 @@
-// @generated Code generated by gen-atomicint.
-
-// Copyright (c) 2020-2021 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import (
- "encoding/json"
- "strconv"
- "sync/atomic"
-)
-
-// Uintptr is an atomic wrapper around uintptr.
-type Uintptr struct {
- _ nocmp // disallow non-atomic comparison
-
- v uintptr
-}
-
-// NewUintptr creates a new Uintptr.
-func NewUintptr(val uintptr) *Uintptr {
- return &Uintptr{v: val}
-}
-
-// Load atomically loads the wrapped value.
-func (i *Uintptr) Load() uintptr {
- return atomic.LoadUintptr(&i.v)
-}
-
-// Add atomically adds to the wrapped uintptr and returns the new value.
-func (i *Uintptr) Add(delta uintptr) uintptr {
- return atomic.AddUintptr(&i.v, delta)
-}
-
-// Sub atomically subtracts from the wrapped uintptr and returns the new value.
-func (i *Uintptr) Sub(delta uintptr) uintptr {
- return atomic.AddUintptr(&i.v, ^(delta - 1))
-}
-
-// Inc atomically increments the wrapped uintptr and returns the new value.
-func (i *Uintptr) Inc() uintptr {
- return i.Add(1)
-}
-
-// Dec atomically decrements the wrapped uintptr and returns the new value.
-func (i *Uintptr) Dec() uintptr {
- return i.Sub(1)
-}
-
-// CAS is an atomic compare-and-swap.
-func (i *Uintptr) CAS(old, new uintptr) (swapped bool) {
- return atomic.CompareAndSwapUintptr(&i.v, old, new)
-}
-
-// Store atomically stores the passed value.
-func (i *Uintptr) Store(val uintptr) {
- atomic.StoreUintptr(&i.v, val)
-}
-
-// Swap atomically swaps the wrapped uintptr and returns the old value.
-func (i *Uintptr) Swap(val uintptr) (old uintptr) {
- return atomic.SwapUintptr(&i.v, val)
-}
-
-// MarshalJSON encodes the wrapped uintptr into JSON.
-func (i *Uintptr) MarshalJSON() ([]byte, error) {
- return json.Marshal(i.Load())
-}
-
-// UnmarshalJSON decodes JSON into the wrapped uintptr.
-func (i *Uintptr) UnmarshalJSON(b []byte) error {
- var v uintptr
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- i.Store(v)
- return nil
-}
-
-// String encodes the wrapped value as a string.
-func (i *Uintptr) String() string {
- v := i.Load()
- return strconv.FormatUint(uint64(v), 10)
-}
diff --git a/vendor/go.uber.org/atomic/unsafe_pointer.go b/vendor/go.uber.org/atomic/unsafe_pointer.go
deleted file mode 100644
index 169f793dcf..0000000000
--- a/vendor/go.uber.org/atomic/unsafe_pointer.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright (c) 2021 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import (
- "sync/atomic"
- "unsafe"
-)
-
-// UnsafePointer is an atomic wrapper around unsafe.Pointer.
-type UnsafePointer struct {
- _ nocmp // disallow non-atomic comparison
-
- v unsafe.Pointer
-}
-
-// NewUnsafePointer creates a new UnsafePointer.
-func NewUnsafePointer(val unsafe.Pointer) *UnsafePointer {
- return &UnsafePointer{v: val}
-}
-
-// Load atomically loads the wrapped value.
-func (p *UnsafePointer) Load() unsafe.Pointer {
- return atomic.LoadPointer(&p.v)
-}
-
-// Store atomically stores the passed value.
-func (p *UnsafePointer) Store(val unsafe.Pointer) {
- atomic.StorePointer(&p.v, val)
-}
-
-// Swap atomically swaps the wrapped unsafe.Pointer and returns the old value.
-func (p *UnsafePointer) Swap(val unsafe.Pointer) (old unsafe.Pointer) {
- return atomic.SwapPointer(&p.v, val)
-}
-
-// CAS is an atomic compare-and-swap.
-func (p *UnsafePointer) CAS(old, new unsafe.Pointer) (swapped bool) {
- return atomic.CompareAndSwapPointer(&p.v, old, new)
-}
diff --git a/vendor/go.uber.org/atomic/value.go b/vendor/go.uber.org/atomic/value.go
deleted file mode 100644
index 671f3a3824..0000000000
--- a/vendor/go.uber.org/atomic/value.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright (c) 2020 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import "sync/atomic"
-
-// Value shadows the type of the same name from sync/atomic
-// https://godoc.org/sync/atomic#Value
-type Value struct {
- atomic.Value
-
- _ nocmp // disallow non-atomic comparison
-}
diff --git a/vendor/go.uber.org/multierr/CHANGELOG.md b/vendor/go.uber.org/multierr/CHANGELOG.md
index d2c8aadaf0..f8177b978c 100644
--- a/vendor/go.uber.org/multierr/CHANGELOG.md
+++ b/vendor/go.uber.org/multierr/CHANGELOG.md
@@ -1,6 +1,21 @@
Releases
========
+v1.11.0 (2023-03-28)
+====================
+- `Errors` now supports any error that implements multiple-error
+ interface.
+- Add `Every` function to allow checking if all errors in the chain
+ satisfies `errors.Is` against the target error.
+
+v1.10.0 (2023-03-08)
+====================
+
+- Comply with Go 1.20's multiple-error interface.
+- Drop Go 1.18 support.
+ Per the support policy, only Go 1.19 and 1.20 are supported now.
+- Drop all non-test external dependencies.
+
v1.9.0 (2022-12-12)
===================
diff --git a/vendor/go.uber.org/multierr/README.md b/vendor/go.uber.org/multierr/README.md
index 70aacecd71..5ab6ac40f4 100644
--- a/vendor/go.uber.org/multierr/README.md
+++ b/vendor/go.uber.org/multierr/README.md
@@ -2,9 +2,29 @@
`multierr` allows combining one or more Go `error`s together.
+## Features
+
+- **Idiomatic**:
+ multierr follows best practices in Go, and keeps your code idiomatic.
+ - It keeps the underlying error type hidden,
+ allowing you to deal in `error` values exclusively.
+ - It provides APIs to safely append into an error from a `defer` statement.
+- **Performant**:
+ multierr is optimized for performance:
+ - It avoids allocations where possible.
+ - It utilizes slice resizing semantics to optimize common cases
+ like appending into the same error object from a loop.
+- **Interoperable**:
+ multierr interoperates with the Go standard library's error APIs seamlessly:
+ - The `errors.Is` and `errors.As` functions *just work*.
+- **Lightweight**:
+ multierr comes with virtually no dependencies.
+
## Installation
- go get -u go.uber.org/multierr
+```bash
+go get -u go.uber.org/multierr@latest
+```
## Status
diff --git a/vendor/go.uber.org/multierr/error.go b/vendor/go.uber.org/multierr/error.go
index cdd91ae56d..3a828b2dff 100644
--- a/vendor/go.uber.org/multierr/error.go
+++ b/vendor/go.uber.org/multierr/error.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2017-2021 Uber Technologies, Inc.
+// Copyright (c) 2017-2023 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -147,8 +147,7 @@ import (
"io"
"strings"
"sync"
-
- "go.uber.org/atomic"
+ "sync/atomic"
)
var (
@@ -196,23 +195,7 @@ type errorGroup interface {
//
// Callers of this function are free to modify the returned slice.
func Errors(err error) []error {
- if err == nil {
- return nil
- }
-
- // Note that we're casting to multiError, not errorGroup. Our contract is
- // that returned errors MAY implement errorGroup. Errors, however, only
- // has special behavior for multierr-specific error objects.
- //
- // This behavior can be expanded in the future but I think it's prudent to
- // start with as little as possible in terms of contract and possibility
- // of misuse.
- eg, ok := err.(*multiError)
- if !ok {
- return []error{err}
- }
-
- return append(([]error)(nil), eg.Errors()...)
+ return extractErrors(err)
}
// multiError is an error that holds one or more errors.
@@ -227,8 +210,6 @@ type multiError struct {
errors []error
}
-var _ errorGroup = (*multiError)(nil)
-
// Errors returns the list of underlying errors.
//
// This slice MUST NOT be modified.
@@ -239,33 +220,6 @@ func (merr *multiError) Errors() []error {
return merr.errors
}
-// As attempts to find the first error in the error list that matches the type
-// of the value that target points to.
-//
-// This function allows errors.As to traverse the values stored on the
-// multierr error.
-func (merr *multiError) As(target interface{}) bool {
- for _, err := range merr.Errors() {
- if errors.As(err, target) {
- return true
- }
- }
- return false
-}
-
-// Is attempts to match the provided error against errors in the error list.
-//
-// This function allows errors.Is to traverse the values stored on the
-// multierr error.
-func (merr *multiError) Is(target error) bool {
- for _, err := range merr.Errors() {
- if errors.Is(err, target) {
- return true
- }
- }
- return false
-}
-
func (merr *multiError) Error() string {
if merr == nil {
return ""
@@ -281,6 +235,17 @@ func (merr *multiError) Error() string {
return result
}
+// Every compares every error in the given err against the given target error
+// using [errors.Is], and returns true only if every comparison returned true.
+func Every(err error, target error) bool {
+ for _, e := range extractErrors(err) {
+ if !errors.Is(e, target) {
+ return false
+ }
+ }
+ return true
+}
+
func (merr *multiError) Format(f fmt.State, c rune) {
if c == 'v' && f.Flag('+') {
merr.writeMultiline(f)
diff --git a/vendor/go.uber.org/atomic/error_ext.go b/vendor/go.uber.org/multierr/error_post_go120.go
similarity index 65%
rename from vendor/go.uber.org/atomic/error_ext.go
rename to vendor/go.uber.org/multierr/error_post_go120.go
index ffe0be21cb..a173f9c251 100644
--- a/vendor/go.uber.org/atomic/error_ext.go
+++ b/vendor/go.uber.org/multierr/error_post_go120.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2020 Uber Technologies, Inc.
+// Copyright (c) 2017-2023 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -18,22 +18,31 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
-package atomic
+//go:build go1.20
+// +build go1.20
-// atomic.Value panics on nil inputs, or if the underlying type changes.
-// Stabilize by always storing a custom struct that we control.
+package multierr
-//go:generate bin/gen-atomicwrapper -name=Error -type=error -wrapped=Value -pack=packError -unpack=unpackError -file=error.go
-
-type packedError struct{ Value error }
+// Unwrap returns a list of errors wrapped by this multierr.
+func (merr *multiError) Unwrap() []error {
+ return merr.Errors()
+}
-func packError(v error) interface{} {
- return packedError{v}
+type multipleErrors interface {
+ Unwrap() []error
}
-func unpackError(v interface{}) error {
- if err, ok := v.(packedError); ok {
- return err.Value
+func extractErrors(err error) []error {
+ if err == nil {
+ return nil
}
- return nil
+
+ // check if the given err is an Unwrapable error that
+ // implements multipleErrors interface.
+ eg, ok := err.(multipleErrors)
+ if !ok {
+ return []error{err}
+ }
+
+ return append(([]error)(nil), eg.Unwrap()...)
}
diff --git a/vendor/go.uber.org/multierr/error_pre_go120.go b/vendor/go.uber.org/multierr/error_pre_go120.go
new file mode 100644
index 0000000000..93872a3fcd
--- /dev/null
+++ b/vendor/go.uber.org/multierr/error_pre_go120.go
@@ -0,0 +1,79 @@
+// Copyright (c) 2017-2023 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build !go1.20
+// +build !go1.20
+
+package multierr
+
+import "errors"
+
+// Versions of Go before 1.20 did not support the Unwrap() []error method.
+// This provides a similar behavior by implementing the Is(..) and As(..)
+// methods.
+// See the errors.Join proposal for details:
+// https://github.com/golang/go/issues/53435
+
+// As attempts to find the first error in the error list that matches the type
+// of the value that target points to.
+//
+// This function allows errors.As to traverse the values stored on the
+// multierr error.
+func (merr *multiError) As(target interface{}) bool {
+ for _, err := range merr.Errors() {
+ if errors.As(err, target) {
+ return true
+ }
+ }
+ return false
+}
+
+// Is attempts to match the provided error against errors in the error list.
+//
+// This function allows errors.Is to traverse the values stored on the
+// multierr error.
+func (merr *multiError) Is(target error) bool {
+ for _, err := range merr.Errors() {
+ if errors.Is(err, target) {
+ return true
+ }
+ }
+ return false
+}
+
+func extractErrors(err error) []error {
+ if err == nil {
+ return nil
+ }
+
+ // Note that we're casting to multiError, not errorGroup. Our contract is
+ // that returned errors MAY implement errorGroup. Errors, however, only
+ // has special behavior for multierr-specific error objects.
+ //
+ // This behavior can be expanded in the future but I think it's prudent to
+ // start with as little as possible in terms of contract and possibility
+ // of misuse.
+ eg, ok := err.(*multiError)
+ if !ok {
+ return []error{err}
+ }
+
+ return append(([]error)(nil), eg.Errors()...)
+}
diff --git a/vendor/go.uber.org/multierr/glide.yaml b/vendor/go.uber.org/multierr/glide.yaml
deleted file mode 100644
index 6ef084ec24..0000000000
--- a/vendor/go.uber.org/multierr/glide.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
-package: go.uber.org/multierr
-import:
-- package: go.uber.org/atomic
- version: ^1
-testImport:
-- package: github.com/stretchr/testify
- subpackages:
- - assert
diff --git a/vendor/golang.org/x/exp/LICENSE b/vendor/golang.org/x/exp/LICENSE
index 6a66aea5ea..2a7cf70da6 100644
--- a/vendor/golang.org/x/exp/LICENSE
+++ b/vendor/golang.org/x/exp/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
+Copyright 2009 The Go Authors.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
@@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
- * Neither the name of Google Inc. nor the names of its
+ * Neither the name of Google LLC nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go
index 5e8158bba8..46ceac3439 100644
--- a/vendor/golang.org/x/exp/slices/slices.go
+++ b/vendor/golang.org/x/exp/slices/slices.go
@@ -209,25 +209,37 @@ func Insert[S ~[]E, E any](s S, i int, v ...E) S {
return s
}
+// clearSlice sets all elements up to the length of s to the zero value of E.
+// We may use the builtin clear func instead, and remove clearSlice, when upgrading
+// to Go 1.21+.
+func clearSlice[S ~[]E, E any](s S) {
+ var zero E
+ for i := range s {
+ s[i] = zero
+ }
+}
+
// Delete removes the elements s[i:j] from s, returning the modified slice.
-// Delete panics if s[i:j] is not a valid slice of s.
-// Delete is O(len(s)-j), so if many items must be deleted, it is better to
+// Delete panics if j > len(s) or s[i:j] is not a valid slice of s.
+// Delete is O(len(s)-i), so if many items must be deleted, it is better to
// make a single call deleting them all together than to delete one at a time.
-// Delete might not modify the elements s[len(s)-(j-i):len(s)]. If those
-// elements contain pointers you might consider zeroing those elements so that
-// objects they reference can be garbage collected.
+// Delete zeroes the elements s[len(s)-(j-i):len(s)].
func Delete[S ~[]E, E any](s S, i, j int) S {
- _ = s[i:j] // bounds check
+ _ = s[i:j:len(s)] // bounds check
- return append(s[:i], s[j:]...)
+ if i == j {
+ return s
+ }
+
+ oldlen := len(s)
+ s = append(s[:i], s[j:]...)
+ clearSlice(s[len(s):oldlen]) // zero/nil out the obsolete elements, for GC
+ return s
}
// DeleteFunc removes any elements from s for which del returns true,
// returning the modified slice.
-// When DeleteFunc removes m elements, it might not modify the elements
-// s[len(s)-m:len(s)]. If those elements contain pointers you might consider
-// zeroing those elements so that objects they reference can be garbage
-// collected.
+// DeleteFunc zeroes the elements between the new length and the original length.
func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S {
i := IndexFunc(s, del)
if i == -1 {
@@ -240,11 +252,13 @@ func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S {
i++
}
}
+ clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC
return s[:i]
}
// Replace replaces the elements s[i:j] by the given v, and returns the
// modified slice. Replace panics if s[i:j] is not a valid slice of s.
+// When len(v) < (j-i), Replace zeroes the elements between the new length and the original length.
func Replace[S ~[]E, E any](s S, i, j int, v ...E) S {
_ = s[i:j] // verify that i:j is a valid subslice
@@ -272,6 +286,7 @@ func Replace[S ~[]E, E any](s S, i, j int, v ...E) S {
if i+len(v) != j {
copy(r[i+len(v):], s[j:])
}
+ clearSlice(s[tot:]) // zero/nil out the obsolete elements, for GC
return r
}
@@ -345,9 +360,7 @@ func Clone[S ~[]E, E any](s S) S {
// This is like the uniq command found on Unix.
// Compact modifies the contents of the slice s and returns the modified slice,
// which may have a smaller length.
-// When Compact discards m elements in total, it might not modify the elements
-// s[len(s)-m:len(s)]. If those elements contain pointers you might consider
-// zeroing those elements so that objects they reference can be garbage collected.
+// Compact zeroes the elements between the new length and the original length.
func Compact[S ~[]E, E comparable](s S) S {
if len(s) < 2 {
return s
@@ -361,11 +374,13 @@ func Compact[S ~[]E, E comparable](s S) S {
i++
}
}
+ clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC
return s[:i]
}
// CompactFunc is like [Compact] but uses an equality function to compare elements.
// For runs of elements that compare equal, CompactFunc keeps the first one.
+// CompactFunc zeroes the elements between the new length and the original length.
func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S {
if len(s) < 2 {
return s
@@ -379,6 +394,7 @@ func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S {
i++
}
}
+ clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC
return s[:i]
}
diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go
index b67897f76b..f58bbc7ba4 100644
--- a/vendor/golang.org/x/exp/slices/sort.go
+++ b/vendor/golang.org/x/exp/slices/sort.go
@@ -22,10 +22,12 @@ func Sort[S ~[]E, E constraints.Ordered](x S) {
// SortFunc sorts the slice x in ascending order as determined by the cmp
// function. This sort is not guaranteed to be stable.
// cmp(a, b) should return a negative number when a < b, a positive number when
-// a > b and zero when a == b.
+// a > b and zero when a == b or when a is not comparable to b in the sense
+// of the formal definition of Strict Weak Ordering.
//
// SortFunc requires that cmp is a strict weak ordering.
// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings.
+// To indicate 'uncomparable', return 0 from the function.
func SortFunc[S ~[]E, E any](x S, cmp func(a, b E) int) {
n := len(x)
pdqsortCmpFunc(x, 0, n, bits.Len(uint(n)), cmp)
diff --git a/vendor/golang.org/x/exp/slog/handler.go b/vendor/golang.org/x/exp/slog/handler.go
index 74f88738c9..bd635cb818 100644
--- a/vendor/golang.org/x/exp/slog/handler.go
+++ b/vendor/golang.org/x/exp/slog/handler.go
@@ -8,6 +8,7 @@ import (
"context"
"fmt"
"io"
+ "reflect"
"strconv"
"sync"
"time"
@@ -504,6 +505,23 @@ func (s *handleState) appendString(str string) {
}
func (s *handleState) appendValue(v Value) {
+ defer func() {
+ if r := recover(); r != nil {
+ // If it panics with a nil pointer, the most likely cases are
+ // an encoding.TextMarshaler or error fails to guard against nil,
+ // in which case "" seems to be the feasible choice.
+ //
+ // Adapted from the code in fmt/print.go.
+ if v := reflect.ValueOf(v.any); v.Kind() == reflect.Pointer && v.IsNil() {
+ s.appendString("")
+ return
+ }
+
+ // Otherwise just print the original panic message.
+ s.appendString(fmt.Sprintf("!PANIC: %v", r))
+ }
+ }()
+
var err error
if s.h.json {
err = appendJSONValue(s, v)
diff --git a/vendor/golang.org/x/time/LICENSE b/vendor/golang.org/x/time/LICENSE
index 6a66aea5ea..2a7cf70da6 100644
--- a/vendor/golang.org/x/time/LICENSE
+++ b/vendor/golang.org/x/time/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
+Copyright 2009 The Go Authors.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
@@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
- * Neither the name of Google Inc. nor the names of its
+ * Neither the name of Google LLC nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go
index 8f6c7f493f..93a798ab63 100644
--- a/vendor/golang.org/x/time/rate/rate.go
+++ b/vendor/golang.org/x/time/rate/rate.go
@@ -99,8 +99,9 @@ func (lim *Limiter) Tokens() float64 {
// bursts of at most b tokens.
func NewLimiter(r Limit, b int) *Limiter {
return &Limiter{
- limit: r,
- burst: b,
+ limit: r,
+ burst: b,
+ tokens: float64(b),
}
}
@@ -344,18 +345,6 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration)
tokens: n,
timeToAct: t,
}
- } else if lim.limit == 0 {
- var ok bool
- if lim.burst >= n {
- ok = true
- lim.burst -= n
- }
- return Reservation{
- ok: ok,
- lim: lim,
- tokens: lim.burst,
- timeToAct: t,
- }
}
t, tokens := lim.advance(t)
diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go
index cffdfda961..737d6876d5 100644
--- a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go
+++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go
@@ -192,11 +192,6 @@ func (d decoder) unmarshalMessage(m protoreflect.Message, skipTypeURL bool) erro
fd = fieldDescs.ByTextName(name)
}
}
- if flags.ProtoLegacyWeak {
- if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() {
- fd = nil // reset since the weak reference is not linked in
- }
- }
if fd == nil {
// Field is unknown.
diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go
index d972a3d98e..b53805056a 100644
--- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go
+++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go
@@ -185,11 +185,6 @@ func (d decoder) unmarshalMessage(m protoreflect.Message, checkDelims bool) erro
} else if xtErr != nil && xtErr != protoregistry.NotFound {
return d.newError(tok.Pos(), "unable to resolve [%s]: %v", tok.RawString(), xtErr)
}
- if flags.ProtoLegacyWeak {
- if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() {
- fd = nil // reset since the weak reference is not linked in
- }
- }
// Handle unknown fields.
if fd == nil {
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go
index 7e87c76044..669133d04d 100644
--- a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go
+++ b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go
@@ -26,7 +26,7 @@ var byteType = reflect.TypeOf(byte(0))
// The type is the underlying field type (e.g., a repeated field may be
// represented by []T, but the Go type passed in is just T).
// A list of enum value descriptors must be provided for enum fields.
-// This does not populate the Enum or Message (except for weak message).
+// This does not populate the Enum or Message.
//
// This function is a best effort attempt; parsing errors are ignored.
func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescriptors) protoreflect.FieldDescriptor {
@@ -109,9 +109,6 @@ func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescri
}
case s == "packed":
f.L1.EditionFeatures.IsPacked = true
- case strings.HasPrefix(s, "weak="):
- f.L1.IsWeak = true
- f.L1.Message = filedesc.PlaceholderMessage(protoreflect.FullName(s[len("weak="):]))
case strings.HasPrefix(s, "def="):
// The default tag is special in that everything afterwards is the
// default regardless of the presence of commas.
@@ -183,9 +180,6 @@ func Marshal(fd protoreflect.FieldDescriptor, enumName string) string {
// the exact same semantics from the previous generator.
tag = append(tag, "json="+jsonName)
}
- if fd.IsWeak() {
- tag = append(tag, "weak="+string(fd.Message().FullName()))
- }
// The previous implementation does not tag extension fields as proto3,
// even when the field is defined in a proto3 file. Match that behavior
// for consistency.
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
index 378b826faa..688aabe434 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
@@ -19,7 +19,6 @@ import (
"google.golang.org/protobuf/internal/pragma"
"google.golang.org/protobuf/internal/strs"
"google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/reflect/protoregistry"
)
// Edition is an Enum for proto2.Edition
@@ -275,7 +274,6 @@ type (
Kind protoreflect.Kind
StringName stringName
IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto
- IsWeak bool // promoted from google.protobuf.FieldOptions
IsLazy bool // promoted from google.protobuf.FieldOptions
Default defaultValue
ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields
@@ -369,7 +367,7 @@ func (fd *Field) IsPacked() bool {
return fd.L1.EditionFeatures.IsPacked
}
func (fd *Field) IsExtension() bool { return false }
-func (fd *Field) IsWeak() bool { return fd.L1.IsWeak }
+func (fd *Field) IsWeak() bool { return false }
func (fd *Field) IsLazy() bool { return fd.L1.IsLazy }
func (fd *Field) IsList() bool { return fd.Cardinality() == protoreflect.Repeated && !fd.IsMap() }
func (fd *Field) IsMap() bool { return fd.Message() != nil && fd.Message().IsMapEntry() }
@@ -396,11 +394,6 @@ func (fd *Field) Enum() protoreflect.EnumDescriptor {
return fd.L1.Enum
}
func (fd *Field) Message() protoreflect.MessageDescriptor {
- if fd.L1.IsWeak {
- if d, _ := protoregistry.GlobalFiles.FindDescriptorByName(fd.L1.Message.FullName()); d != nil {
- return d.(protoreflect.MessageDescriptor)
- }
- }
return fd.L1.Message
}
func (fd *Field) IsMapEntry() bool {
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
index 67a51b327c..d4c94458bd 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
@@ -32,11 +32,6 @@ func (file *File) resolveMessages() {
for j := range md.L2.Fields.List {
fd := &md.L2.Fields.List[j]
- // Weak fields are resolved upon actual use.
- if fd.L1.IsWeak {
- continue
- }
-
// Resolve message field dependency.
switch fd.L1.Kind {
case protoreflect.EnumKind:
@@ -150,8 +145,6 @@ func (fd *File) unmarshalFull(b []byte) {
switch num {
case genid.FileDescriptorProto_PublicDependency_field_number:
fd.L2.Imports[v].IsPublic = true
- case genid.FileDescriptorProto_WeakDependency_field_number:
- fd.L2.Imports[v].IsWeak = true
}
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
@@ -502,8 +495,6 @@ func (fd *Field) unmarshalOptions(b []byte) {
switch num {
case genid.FieldOptions_Packed_field_number:
fd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v)
- case genid.FieldOptions_Weak_field_number:
- fd.L1.IsWeak = protowire.DecodeBool(v)
case genid.FieldOptions_Lazy_field_number:
fd.L1.IsLazy = protowire.DecodeBool(v)
case FieldOptions_EnforceUTF8:
diff --git a/vendor/google.golang.org/protobuf/internal/filetype/build.go b/vendor/google.golang.org/protobuf/internal/filetype/build.go
index ba83fea44c..e1b4130bd2 100644
--- a/vendor/google.golang.org/protobuf/internal/filetype/build.go
+++ b/vendor/google.golang.org/protobuf/internal/filetype/build.go
@@ -63,7 +63,7 @@ type Builder struct {
// message declarations in "flattened ordering".
//
// Dependencies are Go types for enums or messages referenced by
- // message fields (excluding weak fields), for parent extended messages of
+ // message fields, for parent extended messages of
// extension fields, for enums or messages referenced by extension fields,
// and for input and output messages referenced by service methods.
// Dependencies must come after declarations, but the ordering of
diff --git a/vendor/google.golang.org/protobuf/internal/flags/flags.go b/vendor/google.golang.org/protobuf/internal/flags/flags.go
index 5cb3ee70f9..a06ccabc2f 100644
--- a/vendor/google.golang.org/protobuf/internal/flags/flags.go
+++ b/vendor/google.golang.org/protobuf/internal/flags/flags.go
@@ -6,7 +6,7 @@
package flags
// ProtoLegacy specifies whether to enable support for legacy functionality
-// such as MessageSets, weak fields, and various other obscure behavior
+// such as MessageSets, and various other obscure behavior
// that is necessary to maintain backwards compatibility with proto1 or
// the pre-release variants of proto2 and proto3.
//
@@ -22,8 +22,3 @@ const ProtoLegacy = protoLegacy
// extension fields at unmarshal time, but defers creating the message
// structure until the extension is first accessed.
const LazyUnmarshalExtensions = ProtoLegacy
-
-// ProtoLegacyWeak specifies whether to enable support for weak fields.
-// This flag was split out of ProtoLegacy in preparation for removing
-// support for weak fields (independent of the other protolegacy features).
-const ProtoLegacyWeak = ProtoLegacy
diff --git a/vendor/google.golang.org/protobuf/internal/genid/goname.go b/vendor/google.golang.org/protobuf/internal/genid/goname.go
index 693d2e9e1f..99bb95bafd 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/goname.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/goname.go
@@ -11,15 +11,10 @@ const (
SizeCache_goname = "sizeCache"
SizeCacheA_goname = "XXX_sizecache"
- WeakFields_goname = "weakFields"
- WeakFieldsA_goname = "XXX_weak"
-
UnknownFields_goname = "unknownFields"
UnknownFieldsA_goname = "XXX_unrecognized"
ExtensionFields_goname = "extensionFields"
ExtensionFieldsA_goname = "XXX_InternalExtensions"
ExtensionFieldsB_goname = "XXX_extensions"
-
- WeakFieldPrefix_goname = "XXX_weak_"
)
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
index 7c1f66c8c1..d14d7d93cc 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
@@ -5,15 +5,12 @@
package impl
import (
- "fmt"
"reflect"
- "sync"
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/internal/errors"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/reflect/protoregistry"
"google.golang.org/protobuf/runtime/protoiface"
)
@@ -121,78 +118,6 @@ func (mi *MessageInfo) initOneofFieldCoders(od protoreflect.OneofDescriptor, si
}
}
-func makeWeakMessageFieldCoder(fd protoreflect.FieldDescriptor) pointerCoderFuncs {
- var once sync.Once
- var messageType protoreflect.MessageType
- lazyInit := func() {
- once.Do(func() {
- messageName := fd.Message().FullName()
- messageType, _ = protoregistry.GlobalTypes.FindMessageByName(messageName)
- })
- }
-
- return pointerCoderFuncs{
- size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int {
- m, ok := p.WeakFields().get(f.num)
- if !ok {
- return 0
- }
- lazyInit()
- if messageType == nil {
- panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName()))
- }
- return sizeMessage(m, f.tagsize, opts)
- },
- marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
- m, ok := p.WeakFields().get(f.num)
- if !ok {
- return b, nil
- }
- lazyInit()
- if messageType == nil {
- panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName()))
- }
- return appendMessage(b, m, f.wiretag, opts)
- },
- unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) {
- fs := p.WeakFields()
- m, ok := fs.get(f.num)
- if !ok {
- lazyInit()
- if messageType == nil {
- return unmarshalOutput{}, errUnknown
- }
- m = messageType.New().Interface()
- fs.set(f.num, m)
- }
- return consumeMessage(b, m, wtyp, opts)
- },
- isInit: func(p pointer, f *coderFieldInfo) error {
- m, ok := p.WeakFields().get(f.num)
- if !ok {
- return nil
- }
- return proto.CheckInitialized(m)
- },
- merge: func(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
- sm, ok := src.WeakFields().get(f.num)
- if !ok {
- return
- }
- dm, ok := dst.WeakFields().get(f.num)
- if !ok {
- lazyInit()
- if messageType == nil {
- panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName()))
- }
- dm = messageType.New().Interface()
- dst.WeakFields().set(f.num, dm)
- }
- opts.Merge(dm, sm)
- },
- }
-}
-
func makeMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) pointerCoderFuncs {
if mi := getMessageInfo(ft); mi != nil {
funcs := pointerCoderFuncs{
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
index 111d95833d..f78b57b046 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
@@ -119,9 +119,6 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) {
}
case isOneof:
fieldOffset = offsetOf(fs)
- case fd.IsWeak():
- fieldOffset = si.weakOffset
- funcs = makeWeakMessageFieldCoder(fd)
default:
fieldOffset = offsetOf(fs)
childMessage, funcs = fieldCoder(fd, ft)
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go
index f81d7d0db9..41c1f74ef8 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go
@@ -46,9 +46,6 @@ func (mi *MessageInfo) makeOpaqueCoderMethods(t reflect.Type, si opaqueStructInf
switch {
case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
fieldOffset = offsetOf(fs)
- case fd.IsWeak():
- fieldOffset = si.weakOffset
- funcs = makeWeakMessageFieldCoder(fd)
case fd.Message() != nil && !fd.IsMap():
fieldOffset = offsetOf(fs)
if fd.IsList() {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/lazy.go b/vendor/google.golang.org/protobuf/internal/impl/lazy.go
index e8fb6c35b4..c7de31e243 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/lazy.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/lazy.go
@@ -131,7 +131,7 @@ func (mi *MessageInfo) skipField(b []byte, f *coderFieldInfo, wtyp protowire.Typ
fmi := f.validation.mi
if fmi == nil {
fd := mi.Desc.Fields().ByNumber(f.num)
- if fd == nil || !fd.IsWeak() {
+ if fd == nil {
return out, ValidationUnknown
}
messageName := fd.Message().FullName()
diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go
index bf0b6049b4..a51dffbe29 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go
@@ -310,12 +310,9 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey,
fd.L0.Parent = md
fd.L0.Index = n
- if fd.L1.IsWeak || fd.L1.EditionFeatures.IsPacked {
+ if fd.L1.EditionFeatures.IsPacked {
fd.L1.Options = func() protoreflect.ProtoMessage {
opts := descopts.Field.ProtoReflect().New()
- if fd.L1.IsWeak {
- opts.Set(opts.Descriptor().Fields().ByName("weak"), protoreflect.ValueOfBool(true))
- }
if fd.L1.EditionFeatures.IsPacked {
opts.Set(opts.Descriptor().Fields().ByName("packed"), protoreflect.ValueOfBool(fd.L1.EditionFeatures.IsPacked))
}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go
index d1f79b4224..d50423dcb7 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/message.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/message.go
@@ -14,7 +14,6 @@ import (
"google.golang.org/protobuf/internal/genid"
"google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/reflect/protoregistry"
)
// MessageInfo provides protobuf related functionality for a given Go type
@@ -120,7 +119,6 @@ type (
var (
sizecacheType = reflect.TypeOf(SizeCache(0))
- weakFieldsType = reflect.TypeOf(WeakFields(nil))
unknownFieldsAType = reflect.TypeOf(unknownFieldsA(nil))
unknownFieldsBType = reflect.TypeOf(unknownFieldsB(nil))
extensionFieldsType = reflect.TypeOf(ExtensionFields(nil))
@@ -129,8 +127,6 @@ var (
type structInfo struct {
sizecacheOffset offset
sizecacheType reflect.Type
- weakOffset offset
- weakType reflect.Type
unknownOffset offset
unknownType reflect.Type
extensionOffset offset
@@ -148,7 +144,6 @@ type structInfo struct {
func (mi *MessageInfo) makeStructInfo(t reflect.Type) structInfo {
si := structInfo{
sizecacheOffset: invalidOffset,
- weakOffset: invalidOffset,
unknownOffset: invalidOffset,
extensionOffset: invalidOffset,
lazyOffset: invalidOffset,
@@ -168,11 +163,6 @@ fieldLoop:
si.sizecacheOffset = offsetOf(f)
si.sizecacheType = f.Type
}
- case genid.WeakFields_goname, genid.WeakFieldsA_goname:
- if f.Type == weakFieldsType {
- si.weakOffset = offsetOf(f)
- si.weakType = f.Type
- }
case genid.UnknownFields_goname, genid.UnknownFieldsA_goname:
if f.Type == unknownFieldsAType || f.Type == unknownFieldsBType {
si.unknownOffset = offsetOf(f)
@@ -256,9 +246,6 @@ func (mi *MessageInfo) Message(i int) protoreflect.MessageType {
mi.init()
fd := mi.Desc.Fields().Get(i)
switch {
- case fd.IsWeak():
- mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName())
- return mt
case fd.IsMap():
return mapEntryType{fd.Message(), mi.fieldTypes[fd.Number()]}
default:
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go
index d8dcd78863..dd55e8e009 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go
@@ -56,9 +56,6 @@ func opaqueInitHook(mi *MessageInfo) bool {
usePresence, _ := usePresenceForField(si, fd)
switch {
- case fd.IsWeak():
- // Weak fields are no different for opaque.
- fi = fieldInfoForWeakMessage(fd, si.weakOffset)
case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
// Oneofs are no different for opaque.
fi = fieldInfoForOneof(fd, si.oneofsByName[fd.ContainingOneof().Name()], mi.Exporter, si.oneofWrappersByNumber[fd.Number()])
@@ -620,8 +617,6 @@ func usePresenceForField(si opaqueStructInfo, fd protoreflect.FieldDescriptor) (
switch {
case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
return false, false
- case fd.IsWeak():
- return false, false
case fd.IsMap():
return false, false
case fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind:
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go
index 31c19b54f8..0d20132fa2 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go
@@ -72,8 +72,6 @@ func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) {
fi = fieldInfoForMap(fd, fs, mi.Exporter)
case fd.IsList():
fi = fieldInfoForList(fd, fs, mi.Exporter)
- case fd.IsWeak():
- fi = fieldInfoForWeakMessage(fd, si.weakOffset)
case fd.Message() != nil:
fi = fieldInfoForMessage(fd, fs, mi.Exporter)
default:
@@ -219,9 +217,6 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) {
}
case fd.Message() != nil:
ft = fs.Type
- if fd.IsWeak() {
- ft = nil
- }
isMessage = true
}
if isMessage && ft != nil && ft.Kind() != reflect.Ptr {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go
index 3cd1fbc21f..68d4ae32ec 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go
@@ -8,11 +8,8 @@ import (
"fmt"
"math"
"reflect"
- "sync"
- "google.golang.org/protobuf/internal/flags"
"google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/reflect/protoregistry"
)
type fieldInfo struct {
@@ -332,79 +329,6 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField,
}
}
-func fieldInfoForWeakMessage(fd protoreflect.FieldDescriptor, weakOffset offset) fieldInfo {
- if !flags.ProtoLegacyWeak {
- panic("no support for proto1 weak fields")
- }
-
- var once sync.Once
- var messageType protoreflect.MessageType
- lazyInit := func() {
- once.Do(func() {
- messageName := fd.Message().FullName()
- messageType, _ = protoregistry.GlobalTypes.FindMessageByName(messageName)
- if messageType == nil {
- panic(fmt.Sprintf("weak message %v for field %v is not linked in", messageName, fd.FullName()))
- }
- })
- }
-
- num := fd.Number()
- return fieldInfo{
- fieldDesc: fd,
- has: func(p pointer) bool {
- if p.IsNil() {
- return false
- }
- _, ok := p.Apply(weakOffset).WeakFields().get(num)
- return ok
- },
- clear: func(p pointer) {
- p.Apply(weakOffset).WeakFields().clear(num)
- },
- get: func(p pointer) protoreflect.Value {
- lazyInit()
- if p.IsNil() {
- return protoreflect.ValueOfMessage(messageType.Zero())
- }
- m, ok := p.Apply(weakOffset).WeakFields().get(num)
- if !ok {
- return protoreflect.ValueOfMessage(messageType.Zero())
- }
- return protoreflect.ValueOfMessage(m.ProtoReflect())
- },
- set: func(p pointer, v protoreflect.Value) {
- lazyInit()
- m := v.Message()
- if m.Descriptor() != messageType.Descriptor() {
- if got, want := m.Descriptor().FullName(), messageType.Descriptor().FullName(); got != want {
- panic(fmt.Sprintf("field %v has mismatching message descriptor: got %v, want %v", fd.FullName(), got, want))
- }
- panic(fmt.Sprintf("field %v has mismatching message descriptor: %v", fd.FullName(), m.Descriptor().FullName()))
- }
- p.Apply(weakOffset).WeakFields().set(num, m.Interface())
- },
- mutable: func(p pointer) protoreflect.Value {
- lazyInit()
- fs := p.Apply(weakOffset).WeakFields()
- m, ok := fs.get(num)
- if !ok {
- m = messageType.New().Interface()
- fs.set(num, m)
- }
- return protoreflect.ValueOfMessage(m.ProtoReflect())
- },
- newMessage: func() protoreflect.Message {
- lazyInit()
- return messageType.New()
- },
- newField: func() protoreflect.Value {
- lazyInit()
- return protoreflect.ValueOfMessage(messageType.New())
- },
- }
-}
-
func fieldInfoForMessage(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo {
ft := fs.Type
conv := NewConverter(ft, fd)
diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
index 6bed45e35c..62f8bf663e 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
@@ -111,7 +111,6 @@ func (p pointer) StringSlice() *[]string { return (*[]string)(p.p
func (p pointer) Bytes() *[]byte { return (*[]byte)(p.p) }
func (p pointer) BytesPtr() **[]byte { return (**[]byte)(p.p) }
func (p pointer) BytesSlice() *[][]byte { return (*[][]byte)(p.p) }
-func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.p) }
func (p pointer) Extensions() *map[int32]ExtensionField { return (*map[int32]ExtensionField)(p.p) }
func (p pointer) LazyInfoPtr() **protolazy.XXX_lazyUnmarshalInfo {
return (**protolazy.XXX_lazyUnmarshalInfo)(p.p)
diff --git a/vendor/google.golang.org/protobuf/internal/impl/validate.go b/vendor/google.golang.org/protobuf/internal/impl/validate.go
index b534a3d6db..7b2995dde5 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/validate.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/validate.go
@@ -211,9 +211,7 @@ func newValidationInfo(fd protoreflect.FieldDescriptor, ft reflect.Type) validat
switch fd.Kind() {
case protoreflect.MessageKind:
vi.typ = validationTypeMessage
- if !fd.IsWeak() {
- vi.mi = getMessageInfo(ft)
- }
+ vi.mi = getMessageInfo(ft)
case protoreflect.GroupKind:
vi.typ = validationTypeGroup
vi.mi = getMessageInfo(ft)
@@ -320,26 +318,6 @@ State:
}
if f != nil {
vi = f.validation
- if vi.typ == validationTypeMessage && vi.mi == nil {
- // Probable weak field.
- //
- // TODO: Consider storing the results of this lookup somewhere
- // rather than recomputing it on every validation.
- fd := st.mi.Desc.Fields().ByNumber(num)
- if fd == nil || !fd.IsWeak() {
- break
- }
- messageName := fd.Message().FullName()
- messageType, err := protoregistry.GlobalTypes.FindMessageByName(messageName)
- switch err {
- case nil:
- vi.mi, _ = messageType.(*MessageInfo)
- case protoregistry.NotFound:
- vi.typ = validationTypeBytes
- default:
- return out, ValidationUnknown
- }
- }
break
}
// Possible extension field.
diff --git a/vendor/google.golang.org/protobuf/internal/impl/weak.go b/vendor/google.golang.org/protobuf/internal/impl/weak.go
deleted file mode 100644
index eb79a7ba94..0000000000
--- a/vendor/google.golang.org/protobuf/internal/impl/weak.go
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package impl
-
-import (
- "fmt"
-
- "google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/reflect/protoregistry"
-)
-
-// weakFields adds methods to the exported WeakFields type for internal use.
-//
-// The exported type is an alias to an unnamed type, so methods can't be
-// defined directly on it.
-type weakFields WeakFields
-
-func (w weakFields) get(num protoreflect.FieldNumber) (protoreflect.ProtoMessage, bool) {
- m, ok := w[int32(num)]
- return m, ok
-}
-
-func (w *weakFields) set(num protoreflect.FieldNumber, m protoreflect.ProtoMessage) {
- if *w == nil {
- *w = make(weakFields)
- }
- (*w)[int32(num)] = m
-}
-
-func (w *weakFields) clear(num protoreflect.FieldNumber) {
- delete(*w, int32(num))
-}
-
-func (Export) HasWeak(w WeakFields, num protoreflect.FieldNumber) bool {
- _, ok := w[int32(num)]
- return ok
-}
-
-func (Export) ClearWeak(w *WeakFields, num protoreflect.FieldNumber) {
- delete(*w, int32(num))
-}
-
-func (Export) GetWeak(w WeakFields, num protoreflect.FieldNumber, name protoreflect.FullName) protoreflect.ProtoMessage {
- if m, ok := w[int32(num)]; ok {
- return m
- }
- mt, _ := protoregistry.GlobalTypes.FindMessageByName(name)
- if mt == nil {
- panic(fmt.Sprintf("message %v for weak field is not linked in", name))
- }
- return mt.Zero().Interface()
-}
-
-func (Export) SetWeak(w *WeakFields, num protoreflect.FieldNumber, name protoreflect.FullName, m protoreflect.ProtoMessage) {
- if m != nil {
- mt, _ := protoregistry.GlobalTypes.FindMessageByName(name)
- if mt == nil {
- panic(fmt.Sprintf("message %v for weak field is not linked in", name))
- }
- if mt != m.ProtoReflect().Type() {
- panic(fmt.Sprintf("invalid message type for weak field: got %T, want %T", m, mt.Zero().Interface()))
- }
- }
- if m == nil || !m.ProtoReflect().IsValid() {
- delete(*w, int32(num))
- return
- }
- if *w == nil {
- *w = make(weakFields)
- }
- (*w)[int32(num)] = m
-}
diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go
index f5c06280fe..01efc33030 100644
--- a/vendor/google.golang.org/protobuf/internal/version/version.go
+++ b/vendor/google.golang.org/protobuf/internal/version/version.go
@@ -52,7 +52,7 @@ import (
const (
Major = 1
Minor = 36
- Patch = 3
+ Patch = 5
PreRelease = ""
)
diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go
index e28d7acb37..4cbf1aeaf7 100644
--- a/vendor/google.golang.org/protobuf/proto/decode.go
+++ b/vendor/google.golang.org/protobuf/proto/decode.go
@@ -8,7 +8,6 @@ import (
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/internal/encoding/messageset"
"google.golang.org/protobuf/internal/errors"
- "google.golang.org/protobuf/internal/flags"
"google.golang.org/protobuf/internal/genid"
"google.golang.org/protobuf/internal/pragma"
"google.golang.org/protobuf/reflect/protoreflect"
@@ -172,10 +171,6 @@ func (o UnmarshalOptions) unmarshalMessageSlow(b []byte, m protoreflect.Message)
var err error
if fd == nil {
err = errUnknown
- } else if flags.ProtoLegacyWeak {
- if fd.IsWeak() && fd.Message().IsPlaceholder() {
- err = errUnknown // weak referent is not linked in
- }
}
// Parse the field value.
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go
index cd8fadbaf8..cd7fbc87a4 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go
@@ -68,7 +68,7 @@ type Descriptor interface {
// dependency is not resolved, in which case only name information is known.
//
// Placeholder types may only be returned by the following accessors
- // as a result of unresolved dependencies or weak imports:
+ // as a result of unresolved dependencies:
//
// ╔═══════════════════════════════════╤═════════════════════╗
// ║ Accessor │ Descriptor ║
@@ -168,11 +168,7 @@ type FileImport struct {
// The current file and the imported file must be within proto package.
IsPublic bool
- // IsWeak reports whether this is a weak import, which does not impose
- // a direct dependency on the target file.
- //
- // Weak imports are a legacy proto1 feature. Equivalent behavior is
- // achieved using proto2 extension fields or proto3 Any messages.
+ // Deprecated: support for weak fields has been removed.
IsWeak bool
}
@@ -325,9 +321,7 @@ type FieldDescriptor interface {
// specified in the source .proto file.
HasOptionalKeyword() bool
- // IsWeak reports whether this is a weak field, which does not impose a
- // direct dependency on the target type.
- // If true, then Message returns a placeholder type.
+ // Deprecated: support for weak fields has been removed.
IsWeak() bool
// IsPacked reports whether repeated primitive numeric kinds should be
diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
index 191552cce0..497da66e91 100644
--- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
@@ -122,6 +122,7 @@ import (
reflect "reflect"
strings "strings"
sync "sync"
+ unsafe "unsafe"
)
// `Any` contains an arbitrary serialized protocol buffer message along with a
@@ -411,7 +412,7 @@ func (x *Any) GetValue() []byte {
var File_google_protobuf_any_proto protoreflect.FileDescriptor
-var file_google_protobuf_any_proto_rawDesc = []byte{
+var file_google_protobuf_any_proto_rawDesc = string([]byte{
0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x36, 0x0a, 0x03,
@@ -427,16 +428,16 @@ var file_google_protobuf_any_proto_rawDesc = []byte{
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65,
0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x33,
-}
+})
var (
file_google_protobuf_any_proto_rawDescOnce sync.Once
- file_google_protobuf_any_proto_rawDescData = file_google_protobuf_any_proto_rawDesc
+ file_google_protobuf_any_proto_rawDescData []byte
)
func file_google_protobuf_any_proto_rawDescGZIP() []byte {
file_google_protobuf_any_proto_rawDescOnce.Do(func() {
- file_google_protobuf_any_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_any_proto_rawDescData)
+ file_google_protobuf_any_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_any_proto_rawDesc), len(file_google_protobuf_any_proto_rawDesc)))
})
return file_google_protobuf_any_proto_rawDescData
}
@@ -462,7 +463,7 @@ func file_google_protobuf_any_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_google_protobuf_any_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_any_proto_rawDesc), len(file_google_protobuf_any_proto_rawDesc)),
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
@@ -473,7 +474,6 @@ func file_google_protobuf_any_proto_init() {
MessageInfos: file_google_protobuf_any_proto_msgTypes,
}.Build()
File_google_protobuf_any_proto = out.File
- file_google_protobuf_any_proto_rawDesc = nil
file_google_protobuf_any_proto_goTypes = nil
file_google_protobuf_any_proto_depIdxs = nil
}
diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
index 34d76e6cd9..193880d181 100644
--- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
@@ -80,6 +80,7 @@ import (
reflect "reflect"
sync "sync"
time "time"
+ unsafe "unsafe"
)
// A Duration represents a signed, fixed-length span of time represented
@@ -288,7 +289,7 @@ func (x *Duration) GetNanos() int32 {
var File_google_protobuf_duration_proto protoreflect.FileDescriptor
-var file_google_protobuf_duration_proto_rawDesc = []byte{
+var file_google_protobuf_duration_proto_rawDesc = string([]byte{
0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
@@ -305,16 +306,16 @@ var file_google_protobuf_duration_proto_rawDesc = []byte{
0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74,
0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79,
0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+})
var (
file_google_protobuf_duration_proto_rawDescOnce sync.Once
- file_google_protobuf_duration_proto_rawDescData = file_google_protobuf_duration_proto_rawDesc
+ file_google_protobuf_duration_proto_rawDescData []byte
)
func file_google_protobuf_duration_proto_rawDescGZIP() []byte {
file_google_protobuf_duration_proto_rawDescOnce.Do(func() {
- file_google_protobuf_duration_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_duration_proto_rawDescData)
+ file_google_protobuf_duration_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_duration_proto_rawDesc), len(file_google_protobuf_duration_proto_rawDesc)))
})
return file_google_protobuf_duration_proto_rawDescData
}
@@ -340,7 +341,7 @@ func file_google_protobuf_duration_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_google_protobuf_duration_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_duration_proto_rawDesc), len(file_google_protobuf_duration_proto_rawDesc)),
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
@@ -351,7 +352,6 @@ func file_google_protobuf_duration_proto_init() {
MessageInfos: file_google_protobuf_duration_proto_msgTypes,
}.Build()
File_google_protobuf_duration_proto = out.File
- file_google_protobuf_duration_proto_rawDesc = nil
file_google_protobuf_duration_proto_goTypes = nil
file_google_protobuf_duration_proto_depIdxs = nil
}
diff --git a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
index e5d7da38c2..041feb0f3e 100644
--- a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
@@ -83,6 +83,7 @@ import (
sort "sort"
strings "strings"
sync "sync"
+ unsafe "unsafe"
)
// `FieldMask` represents a set of symbolic field paths, for example:
@@ -503,7 +504,7 @@ func (x *FieldMask) GetPaths() []string {
var File_google_protobuf_field_mask_proto protoreflect.FileDescriptor
-var file_google_protobuf_field_mask_proto_rawDesc = []byte{
+var file_google_protobuf_field_mask_proto_rawDesc = string([]byte{
0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
@@ -519,16 +520,16 @@ var file_google_protobuf_field_mask_proto_rawDesc = []byte{
0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+})
var (
file_google_protobuf_field_mask_proto_rawDescOnce sync.Once
- file_google_protobuf_field_mask_proto_rawDescData = file_google_protobuf_field_mask_proto_rawDesc
+ file_google_protobuf_field_mask_proto_rawDescData []byte
)
func file_google_protobuf_field_mask_proto_rawDescGZIP() []byte {
file_google_protobuf_field_mask_proto_rawDescOnce.Do(func() {
- file_google_protobuf_field_mask_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_field_mask_proto_rawDescData)
+ file_google_protobuf_field_mask_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_field_mask_proto_rawDesc), len(file_google_protobuf_field_mask_proto_rawDesc)))
})
return file_google_protobuf_field_mask_proto_rawDescData
}
@@ -554,7 +555,7 @@ func file_google_protobuf_field_mask_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_google_protobuf_field_mask_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_field_mask_proto_rawDesc), len(file_google_protobuf_field_mask_proto_rawDesc)),
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
@@ -565,7 +566,6 @@ func file_google_protobuf_field_mask_proto_init() {
MessageInfos: file_google_protobuf_field_mask_proto_msgTypes,
}.Build()
File_google_protobuf_field_mask_proto = out.File
- file_google_protobuf_field_mask_proto_rawDesc = nil
file_google_protobuf_field_mask_proto_goTypes = nil
file_google_protobuf_field_mask_proto_depIdxs = nil
}
diff --git a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go
index f2c53ea337..ecdd31ab53 100644
--- a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go
@@ -128,6 +128,7 @@ import (
reflect "reflect"
sync "sync"
utf8 "unicode/utf8"
+ unsafe "unsafe"
)
// `NullValue` is a singleton enumeration to represent the null value for the
@@ -671,7 +672,7 @@ func (x *ListValue) GetValues() []*Value {
var File_google_protobuf_struct_proto protoreflect.FileDescriptor
-var file_google_protobuf_struct_proto_rawDesc = []byte{
+var file_google_protobuf_struct_proto_rawDesc = string([]byte{
0x0a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22,
@@ -719,16 +720,16 @@ var file_google_protobuf_struct_proto_rawDesc = []byte{
0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c,
0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x33,
-}
+})
var (
file_google_protobuf_struct_proto_rawDescOnce sync.Once
- file_google_protobuf_struct_proto_rawDescData = file_google_protobuf_struct_proto_rawDesc
+ file_google_protobuf_struct_proto_rawDescData []byte
)
func file_google_protobuf_struct_proto_rawDescGZIP() []byte {
file_google_protobuf_struct_proto_rawDescOnce.Do(func() {
- file_google_protobuf_struct_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_struct_proto_rawDescData)
+ file_google_protobuf_struct_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_struct_proto_rawDesc), len(file_google_protobuf_struct_proto_rawDesc)))
})
return file_google_protobuf_struct_proto_rawDescData
}
@@ -773,7 +774,7 @@ func file_google_protobuf_struct_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_google_protobuf_struct_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_struct_proto_rawDesc), len(file_google_protobuf_struct_proto_rawDesc)),
NumEnums: 1,
NumMessages: 4,
NumExtensions: 0,
@@ -785,7 +786,6 @@ func file_google_protobuf_struct_proto_init() {
MessageInfos: file_google_protobuf_struct_proto_msgTypes,
}.Build()
File_google_protobuf_struct_proto = out.File
- file_google_protobuf_struct_proto_rawDesc = nil
file_google_protobuf_struct_proto_goTypes = nil
file_google_protobuf_struct_proto_depIdxs = nil
}
diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
index 9550109aa3..00ac835c0b 100644
--- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
@@ -78,6 +78,7 @@ import (
reflect "reflect"
sync "sync"
time "time"
+ unsafe "unsafe"
)
// A Timestamp represents a point in time independent of any time zone or local
@@ -297,7 +298,7 @@ func (x *Timestamp) GetNanos() int32 {
var File_google_protobuf_timestamp_proto protoreflect.FileDescriptor
-var file_google_protobuf_timestamp_proto_rawDesc = []byte{
+var file_google_protobuf_timestamp_proto_rawDesc = string([]byte{
0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
@@ -314,16 +315,16 @@ var file_google_protobuf_timestamp_proto_rawDesc = []byte{
0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f,
0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+})
var (
file_google_protobuf_timestamp_proto_rawDescOnce sync.Once
- file_google_protobuf_timestamp_proto_rawDescData = file_google_protobuf_timestamp_proto_rawDesc
+ file_google_protobuf_timestamp_proto_rawDescData []byte
)
func file_google_protobuf_timestamp_proto_rawDescGZIP() []byte {
file_google_protobuf_timestamp_proto_rawDescOnce.Do(func() {
- file_google_protobuf_timestamp_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_timestamp_proto_rawDescData)
+ file_google_protobuf_timestamp_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_timestamp_proto_rawDesc), len(file_google_protobuf_timestamp_proto_rawDesc)))
})
return file_google_protobuf_timestamp_proto_rawDescData
}
@@ -349,7 +350,7 @@ func file_google_protobuf_timestamp_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_google_protobuf_timestamp_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_timestamp_proto_rawDesc), len(file_google_protobuf_timestamp_proto_rawDesc)),
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
@@ -360,7 +361,6 @@ func file_google_protobuf_timestamp_proto_init() {
MessageInfos: file_google_protobuf_timestamp_proto_msgTypes,
}.Build()
File_google_protobuf_timestamp_proto = out.File
- file_google_protobuf_timestamp_proto_rawDesc = nil
file_google_protobuf_timestamp_proto_goTypes = nil
file_google_protobuf_timestamp_proto_depIdxs = nil
}
diff --git a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go
index 15b424ec12..5de5301063 100644
--- a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go
@@ -48,6 +48,7 @@ import (
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
)
// Wrapper message for `double`.
@@ -529,7 +530,7 @@ func (x *BytesValue) GetValue() []byte {
var File_google_protobuf_wrappers_proto protoreflect.FileDescriptor
-var file_google_protobuf_wrappers_proto_rawDesc = []byte{
+var file_google_protobuf_wrappers_proto_rawDesc = string([]byte{
0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
@@ -563,16 +564,16 @@ var file_google_protobuf_wrappers_proto_rawDesc = []byte{
0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+})
var (
file_google_protobuf_wrappers_proto_rawDescOnce sync.Once
- file_google_protobuf_wrappers_proto_rawDescData = file_google_protobuf_wrappers_proto_rawDesc
+ file_google_protobuf_wrappers_proto_rawDescData []byte
)
func file_google_protobuf_wrappers_proto_rawDescGZIP() []byte {
file_google_protobuf_wrappers_proto_rawDescOnce.Do(func() {
- file_google_protobuf_wrappers_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_wrappers_proto_rawDescData)
+ file_google_protobuf_wrappers_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_wrappers_proto_rawDesc), len(file_google_protobuf_wrappers_proto_rawDesc)))
})
return file_google_protobuf_wrappers_proto_rawDescData
}
@@ -606,7 +607,7 @@ func file_google_protobuf_wrappers_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_google_protobuf_wrappers_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_wrappers_proto_rawDesc), len(file_google_protobuf_wrappers_proto_rawDesc)),
NumEnums: 0,
NumMessages: 9,
NumExtensions: 0,
@@ -617,7 +618,6 @@ func file_google_protobuf_wrappers_proto_init() {
MessageInfos: file_google_protobuf_wrappers_proto_msgTypes,
}.Build()
File_google_protobuf_wrappers_proto = out.File
- file_google_protobuf_wrappers_proto_rawDesc = nil
file_google_protobuf_wrappers_proto_goTypes = nil
file_google_protobuf_wrappers_proto_depIdxs = nil
}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/doc.go b/vendor/k8s.io/apimachinery/pkg/api/errors/doc.go
index 167baf680d..58751ed0ec 100644
--- a/vendor/k8s.io/apimachinery/pkg/api/errors/doc.go
+++ b/vendor/k8s.io/apimachinery/pkg/api/errors/doc.go
@@ -15,4 +15,4 @@ limitations under the License.
*/
// Package errors provides detailed error types for api field validation.
-package errors // import "k8s.io/apimachinery/pkg/api/errors"
+package errors
diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go
index 57e0e71f67..6a3ab8f24e 100644
--- a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go
+++ b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go
@@ -54,6 +54,7 @@ var knownReasons = map[metav1.StatusReason]struct{}{
metav1.StatusReasonGone: {},
metav1.StatusReasonInvalid: {},
metav1.StatusReasonServerTimeout: {},
+ metav1.StatusReasonStoreReadError: {},
metav1.StatusReasonTimeout: {},
metav1.StatusReasonTooManyRequests: {},
metav1.StatusReasonBadRequest: {},
@@ -775,6 +776,12 @@ func IsUnexpectedObjectError(err error) bool {
return err != nil && (ok || errors.As(err, &uoe))
}
+// IsStoreReadError determines if err is due to either failure to transform the
+// data from the storage, or failure to decode the object appropriately.
+func IsStoreReadError(err error) bool {
+ return ReasonForError(err) == metav1.StatusReasonStoreReadError
+}
+
// SuggestsClientDelay returns true if this error suggests a client delay as well as the
// suggested seconds to wait, or false if the error does not imply a wait. It does not
// address whether the error *should* be retried, since some errors (like a 3xx) may
diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS
new file mode 100644
index 0000000000..3bd8bf535e
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS
@@ -0,0 +1,15 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+reviewers:
+ - thockin
+ - smarterclayton
+ - wojtek-t
+ - deads2k
+ - derekwaynecarr
+ - caesarxuchao
+ - mikedanese
+ - liggitt
+ - janetkuo
+ - dims
+emeritus_reviewers:
+ - ncdc
diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go b/vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go
new file mode 100644
index 0000000000..cbdf2eeb83
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go
@@ -0,0 +1,119 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package meta
+
+import (
+ "time"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// SetStatusCondition sets the corresponding condition in conditions to newCondition and returns true
+// if the conditions are changed by this call.
+// conditions must be non-nil.
+// 1. if the condition of the specified type already exists (all fields of the existing condition are updated to
+// newCondition, LastTransitionTime is set to now if the new status differs from the old status)
+// 2. if a condition of the specified type does not exist (LastTransitionTime is set to now() if unset, and newCondition is appended)
+func SetStatusCondition(conditions *[]metav1.Condition, newCondition metav1.Condition) (changed bool) {
+ if conditions == nil {
+ return false
+ }
+ existingCondition := FindStatusCondition(*conditions, newCondition.Type)
+ if existingCondition == nil {
+ if newCondition.LastTransitionTime.IsZero() {
+ newCondition.LastTransitionTime = metav1.NewTime(time.Now())
+ }
+ *conditions = append(*conditions, newCondition)
+ return true
+ }
+
+ if existingCondition.Status != newCondition.Status {
+ existingCondition.Status = newCondition.Status
+ if !newCondition.LastTransitionTime.IsZero() {
+ existingCondition.LastTransitionTime = newCondition.LastTransitionTime
+ } else {
+ existingCondition.LastTransitionTime = metav1.NewTime(time.Now())
+ }
+ changed = true
+ }
+
+ if existingCondition.Reason != newCondition.Reason {
+ existingCondition.Reason = newCondition.Reason
+ changed = true
+ }
+ if existingCondition.Message != newCondition.Message {
+ existingCondition.Message = newCondition.Message
+ changed = true
+ }
+ if existingCondition.ObservedGeneration != newCondition.ObservedGeneration {
+ existingCondition.ObservedGeneration = newCondition.ObservedGeneration
+ changed = true
+ }
+
+ return changed
+}
+
+// RemoveStatusCondition removes the corresponding conditionType from conditions if present. Returns
+// true if it was present and got removed.
+// conditions must be non-nil.
+func RemoveStatusCondition(conditions *[]metav1.Condition, conditionType string) (removed bool) {
+ if conditions == nil || len(*conditions) == 0 {
+ return false
+ }
+ newConditions := make([]metav1.Condition, 0, len(*conditions)-1)
+ for _, condition := range *conditions {
+ if condition.Type != conditionType {
+ newConditions = append(newConditions, condition)
+ }
+ }
+
+ removed = len(*conditions) != len(newConditions)
+ *conditions = newConditions
+
+ return removed
+}
+
+// FindStatusCondition finds the conditionType in conditions.
+func FindStatusCondition(conditions []metav1.Condition, conditionType string) *metav1.Condition {
+ for i := range conditions {
+ if conditions[i].Type == conditionType {
+ return &conditions[i]
+ }
+ }
+
+ return nil
+}
+
+// IsStatusConditionTrue returns true when the conditionType is present and set to `metav1.ConditionTrue`
+func IsStatusConditionTrue(conditions []metav1.Condition, conditionType string) bool {
+ return IsStatusConditionPresentAndEqual(conditions, conditionType, metav1.ConditionTrue)
+}
+
+// IsStatusConditionFalse returns true when the conditionType is present and set to `metav1.ConditionFalse`
+func IsStatusConditionFalse(conditions []metav1.Condition, conditionType string) bool {
+ return IsStatusConditionPresentAndEqual(conditions, conditionType, metav1.ConditionFalse)
+}
+
+// IsStatusConditionPresentAndEqual returns true when conditionType is present and equal to status.
+func IsStatusConditionPresentAndEqual(conditions []metav1.Condition, conditionType string, status metav1.ConditionStatus) bool {
+ for _, condition := range conditions {
+ if condition.Type == conditionType {
+ return condition.Status == status
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/google/gofuzz/doc.go b/vendor/k8s.io/apimachinery/pkg/api/meta/doc.go
similarity index 76%
rename from vendor/github.com/google/gofuzz/doc.go
rename to vendor/k8s.io/apimachinery/pkg/api/meta/doc.go
index 9f9956d4a6..a3b18a5c9a 100644
--- a/vendor/github.com/google/gofuzz/doc.go
+++ b/vendor/k8s.io/apimachinery/pkg/api/meta/doc.go
@@ -1,5 +1,5 @@
/*
-Copyright 2014 Google Inc. All rights reserved.
+Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,5 +14,6 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-// Package fuzz is a library for populating go objects with random values.
-package fuzz
+// Package meta provides functions for retrieving API metadata from objects
+// belonging to the Kubernetes API
+package meta
diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/errors.go b/vendor/k8s.io/apimachinery/pkg/api/meta/errors.go
new file mode 100644
index 0000000000..f36aa4ec22
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/meta/errors.go
@@ -0,0 +1,132 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package meta
+
+import (
+ "errors"
+ "fmt"
+
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/util/sets"
+)
+
+// AmbiguousResourceError is returned if the RESTMapper finds multiple matches for a resource
+type AmbiguousResourceError struct {
+ PartialResource schema.GroupVersionResource
+
+ MatchingResources []schema.GroupVersionResource
+ MatchingKinds []schema.GroupVersionKind
+}
+
+func (e *AmbiguousResourceError) Error() string {
+ switch {
+ case len(e.MatchingKinds) > 0 && len(e.MatchingResources) > 0:
+ return fmt.Sprintf("%v matches multiple resources %v and kinds %v", e.PartialResource, e.MatchingResources, e.MatchingKinds)
+ case len(e.MatchingKinds) > 0:
+ return fmt.Sprintf("%v matches multiple kinds %v", e.PartialResource, e.MatchingKinds)
+ case len(e.MatchingResources) > 0:
+ return fmt.Sprintf("%v matches multiple resources %v", e.PartialResource, e.MatchingResources)
+ }
+ return fmt.Sprintf("%v matches multiple resources or kinds", e.PartialResource)
+}
+
+func (*AmbiguousResourceError) Is(target error) bool {
+ _, ok := target.(*AmbiguousResourceError)
+ return ok
+}
+
+// AmbiguousKindError is returned if the RESTMapper finds multiple matches for a kind
+type AmbiguousKindError struct {
+ PartialKind schema.GroupVersionKind
+
+ MatchingResources []schema.GroupVersionResource
+ MatchingKinds []schema.GroupVersionKind
+}
+
+func (e *AmbiguousKindError) Error() string {
+ switch {
+ case len(e.MatchingKinds) > 0 && len(e.MatchingResources) > 0:
+ return fmt.Sprintf("%v matches multiple resources %v and kinds %v", e.PartialKind, e.MatchingResources, e.MatchingKinds)
+ case len(e.MatchingKinds) > 0:
+ return fmt.Sprintf("%v matches multiple kinds %v", e.PartialKind, e.MatchingKinds)
+ case len(e.MatchingResources) > 0:
+ return fmt.Sprintf("%v matches multiple resources %v", e.PartialKind, e.MatchingResources)
+ }
+ return fmt.Sprintf("%v matches multiple resources or kinds", e.PartialKind)
+}
+
+func (*AmbiguousKindError) Is(target error) bool {
+ _, ok := target.(*AmbiguousKindError)
+ return ok
+}
+
+func IsAmbiguousError(err error) bool {
+ if err == nil {
+ return false
+ }
+ return errors.Is(err, &AmbiguousResourceError{}) || errors.Is(err, &AmbiguousKindError{})
+}
+
+// NoResourceMatchError is returned if the RESTMapper can't find any match for a resource
+type NoResourceMatchError struct {
+ PartialResource schema.GroupVersionResource
+}
+
+func (e *NoResourceMatchError) Error() string {
+ return fmt.Sprintf("no matches for %v", e.PartialResource)
+}
+
+func (*NoResourceMatchError) Is(target error) bool {
+ _, ok := target.(*NoResourceMatchError)
+ return ok
+}
+
+// NoKindMatchError is returned if the RESTMapper can't find any match for a kind
+type NoKindMatchError struct {
+ // GroupKind is the API group and kind that was searched
+ GroupKind schema.GroupKind
+ // SearchedVersions is the optional list of versions the search was restricted to
+ SearchedVersions []string
+}
+
+func (e *NoKindMatchError) Error() string {
+ searchedVersions := sets.NewString()
+ for _, v := range e.SearchedVersions {
+ searchedVersions.Insert(schema.GroupVersion{Group: e.GroupKind.Group, Version: v}.String())
+ }
+
+ switch len(searchedVersions) {
+ case 0:
+ return fmt.Sprintf("no matches for kind %q in group %q", e.GroupKind.Kind, e.GroupKind.Group)
+ case 1:
+ return fmt.Sprintf("no matches for kind %q in version %q", e.GroupKind.Kind, searchedVersions.List()[0])
+ default:
+ return fmt.Sprintf("no matches for kind %q in versions %q", e.GroupKind.Kind, searchedVersions.List())
+ }
+}
+
+func (*NoKindMatchError) Is(target error) bool {
+ _, ok := target.(*NoKindMatchError)
+ return ok
+}
+
+func IsNoMatchError(err error) bool {
+ if err == nil {
+ return false
+ }
+ return errors.Is(err, &NoResourceMatchError{}) || errors.Is(err, &NoKindMatchError{})
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/firsthit_restmapper.go b/vendor/k8s.io/apimachinery/pkg/api/meta/firsthit_restmapper.go
new file mode 100644
index 0000000000..1bc816fe3f
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/meta/firsthit_restmapper.go
@@ -0,0 +1,105 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package meta
+
+import (
+ "fmt"
+
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ utilerrors "k8s.io/apimachinery/pkg/util/errors"
+)
+
+var (
+ _ ResettableRESTMapper = &FirstHitRESTMapper{}
+)
+
+// FirstHitRESTMapper is a wrapper for multiple RESTMappers which returns the
+// first successful result for the singular requests
+type FirstHitRESTMapper struct {
+ MultiRESTMapper
+}
+
+func (m FirstHitRESTMapper) String() string {
+ return fmt.Sprintf("FirstHitRESTMapper{\n\t%v\n}", m.MultiRESTMapper)
+}
+
+func (m FirstHitRESTMapper) ResourceFor(resource schema.GroupVersionResource) (schema.GroupVersionResource, error) {
+ errors := []error{}
+ for _, t := range m.MultiRESTMapper {
+ ret, err := t.ResourceFor(resource)
+ if err == nil {
+ return ret, nil
+ }
+ errors = append(errors, err)
+ }
+
+ return schema.GroupVersionResource{}, collapseAggregateErrors(errors)
+}
+
+func (m FirstHitRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) {
+ errors := []error{}
+ for _, t := range m.MultiRESTMapper {
+ ret, err := t.KindFor(resource)
+ if err == nil {
+ return ret, nil
+ }
+ errors = append(errors, err)
+ }
+
+ return schema.GroupVersionKind{}, collapseAggregateErrors(errors)
+}
+
+// RESTMapping provides the REST mapping for the resource based on the
+// kind and version. This implementation supports multiple REST schemas and
+// return the first match.
+func (m FirstHitRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error) {
+ errors := []error{}
+ for _, t := range m.MultiRESTMapper {
+ ret, err := t.RESTMapping(gk, versions...)
+ if err == nil {
+ return ret, nil
+ }
+ errors = append(errors, err)
+ }
+
+ return nil, collapseAggregateErrors(errors)
+}
+
+func (m FirstHitRESTMapper) Reset() {
+ m.MultiRESTMapper.Reset()
+}
+
+// collapseAggregateErrors returns the minimal errors. it handles empty as nil, handles one item in a list
+// by returning the item, and collapses all NoMatchErrors to a single one (since they should all be the same)
+func collapseAggregateErrors(errors []error) error {
+ if len(errors) == 0 {
+ return nil
+ }
+ if len(errors) == 1 {
+ return errors[0]
+ }
+
+ allNoMatchErrors := true
+ for _, err := range errors {
+ allNoMatchErrors = allNoMatchErrors && IsNoMatchError(err)
+ }
+ if allNoMatchErrors {
+ return errors[0]
+ }
+
+ return utilerrors.NewAggregate(errors)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/help.go b/vendor/k8s.io/apimachinery/pkg/api/meta/help.go
new file mode 100644
index 0000000000..468afd0e9e
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/meta/help.go
@@ -0,0 +1,334 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package meta
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "sync"
+
+ "k8s.io/apimachinery/pkg/conversion"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+var (
+ // isListCache maintains a cache of types that are checked for lists
+ // which is used by IsListType.
+ // TODO: remove and replace with an interface check
+ isListCache = struct {
+ lock sync.RWMutex
+ byType map[reflect.Type]bool
+ }{
+ byType: make(map[reflect.Type]bool, 1024),
+ }
+)
+
+// IsListType returns true if the provided Object has a slice called Items.
+// TODO: Replace the code in this check with an interface comparison by
+// creating and enforcing that lists implement a list accessor.
+func IsListType(obj runtime.Object) bool {
+ switch t := obj.(type) {
+ case runtime.Unstructured:
+ return t.IsList()
+ }
+ t := reflect.TypeOf(obj)
+
+ isListCache.lock.RLock()
+ ok, exists := isListCache.byType[t]
+ isListCache.lock.RUnlock()
+
+ if !exists {
+ _, err := getItemsPtr(obj)
+ ok = err == nil
+
+ // cache only the first 1024 types
+ isListCache.lock.Lock()
+ if len(isListCache.byType) < 1024 {
+ isListCache.byType[t] = ok
+ }
+ isListCache.lock.Unlock()
+ }
+
+ return ok
+}
+
+var (
+ errExpectFieldItems = errors.New("no Items field in this object")
+ errExpectSliceItems = errors.New("Items field must be a slice of objects")
+)
+
+// GetItemsPtr returns a pointer to the list object's Items member.
+// If 'list' doesn't have an Items member, it's not really a list type
+// and an error will be returned.
+// This function will either return a pointer to a slice, or an error, but not both.
+// TODO: this will be replaced with an interface in the future
+func GetItemsPtr(list runtime.Object) (interface{}, error) {
+ obj, err := getItemsPtr(list)
+ if err != nil {
+ return nil, fmt.Errorf("%T is not a list: %v", list, err)
+ }
+ return obj, nil
+}
+
+// getItemsPtr returns a pointer to the list object's Items member or an error.
+func getItemsPtr(list runtime.Object) (interface{}, error) {
+ v, err := conversion.EnforcePtr(list)
+ if err != nil {
+ return nil, err
+ }
+
+ items := v.FieldByName("Items")
+ if !items.IsValid() {
+ return nil, errExpectFieldItems
+ }
+ switch items.Kind() {
+ case reflect.Interface, reflect.Pointer:
+ target := reflect.TypeOf(items.Interface()).Elem()
+ if target.Kind() != reflect.Slice {
+ return nil, errExpectSliceItems
+ }
+ return items.Interface(), nil
+ case reflect.Slice:
+ return items.Addr().Interface(), nil
+ default:
+ return nil, errExpectSliceItems
+ }
+}
+
+// EachListItem invokes fn on each runtime.Object in the list. Any error immediately terminates
+// the loop.
+//
+// If items passed to fn are retained for different durations, and you want to avoid
+// retaining all items in obj as long as any item is referenced, use EachListItemWithAlloc instead.
+func EachListItem(obj runtime.Object, fn func(runtime.Object) error) error {
+ return eachListItem(obj, fn, false)
+}
+
+// EachListItemWithAlloc works like EachListItem, but avoids retaining references to the items slice in obj.
+// It does this by making a shallow copy of non-pointer items in obj.
+//
+// If the items passed to fn are not retained, or are retained for the same duration, use EachListItem instead for memory efficiency.
+func EachListItemWithAlloc(obj runtime.Object, fn func(runtime.Object) error) error {
+ return eachListItem(obj, fn, true)
+}
+
+// allocNew: Whether shallow copy is required when the elements in Object.Items are struct
+func eachListItem(obj runtime.Object, fn func(runtime.Object) error, allocNew bool) error {
+ if unstructured, ok := obj.(runtime.Unstructured); ok {
+ if allocNew {
+ return unstructured.EachListItemWithAlloc(fn)
+ }
+ return unstructured.EachListItem(fn)
+ }
+ // TODO: Change to an interface call?
+ itemsPtr, err := GetItemsPtr(obj)
+ if err != nil {
+ return err
+ }
+ items, err := conversion.EnforcePtr(itemsPtr)
+ if err != nil {
+ return err
+ }
+ len := items.Len()
+ if len == 0 {
+ return nil
+ }
+ takeAddr := false
+ if elemType := items.Type().Elem(); elemType.Kind() != reflect.Pointer && elemType.Kind() != reflect.Interface {
+ if !items.Index(0).CanAddr() {
+ return fmt.Errorf("unable to take address of items in %T for EachListItem", obj)
+ }
+ takeAddr = true
+ }
+
+ for i := 0; i < len; i++ {
+ raw := items.Index(i)
+ if takeAddr {
+ if allocNew {
+ // shallow copy to avoid retaining a reference to the original list item
+ itemCopy := reflect.New(raw.Type())
+ // assign to itemCopy and type-assert
+ itemCopy.Elem().Set(raw)
+ // reflect.New will guarantee that itemCopy must be a pointer.
+ raw = itemCopy
+ } else {
+ raw = raw.Addr()
+ }
+ }
+ // raw must be a pointer or an interface
+ // allocate a pointer is cheap
+ switch item := raw.Interface().(type) {
+ case *runtime.RawExtension:
+ if err := fn(item.Object); err != nil {
+ return err
+ }
+ case runtime.Object:
+ if err := fn(item); err != nil {
+ return err
+ }
+ default:
+ obj, ok := item.(runtime.Object)
+ if !ok {
+ return fmt.Errorf("%v: item[%v]: Expected object, got %#v(%s)", obj, i, raw.Interface(), raw.Kind())
+ }
+ if err := fn(obj); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// ExtractList returns obj's Items element as an array of runtime.Objects.
+// Returns an error if obj is not a List type (does not have an Items member).
+//
+// If items in the returned list are retained for different durations, and you want to avoid
+// retaining all items in obj as long as any item is referenced, use ExtractListWithAlloc instead.
+func ExtractList(obj runtime.Object) ([]runtime.Object, error) {
+ return extractList(obj, false)
+}
+
+// ExtractListWithAlloc works like ExtractList, but avoids retaining references to the items slice in obj.
+// It does this by making a shallow copy of non-pointer items in obj.
+//
+// If the items in the returned list are not retained, or are retained for the same duration, use ExtractList instead for memory efficiency.
+func ExtractListWithAlloc(obj runtime.Object) ([]runtime.Object, error) {
+ return extractList(obj, true)
+}
+
+// allocNew: Whether shallow copy is required when the elements in Object.Items are struct
+func extractList(obj runtime.Object, allocNew bool) ([]runtime.Object, error) {
+ itemsPtr, err := GetItemsPtr(obj)
+ if err != nil {
+ return nil, err
+ }
+ items, err := conversion.EnforcePtr(itemsPtr)
+ if err != nil {
+ return nil, err
+ }
+ if items.IsNil() {
+ return nil, nil
+ }
+ list := make([]runtime.Object, items.Len())
+ if len(list) == 0 {
+ return list, nil
+ }
+ elemType := items.Type().Elem()
+ isRawExtension := elemType == rawExtensionObjectType
+ implementsObject := elemType.Implements(objectType)
+ for i := range list {
+ raw := items.Index(i)
+ switch {
+ case isRawExtension:
+ item := raw.Interface().(runtime.RawExtension)
+ switch {
+ case item.Object != nil:
+ list[i] = item.Object
+ case item.Raw != nil:
+ // TODO: Set ContentEncoding and ContentType correctly.
+ list[i] = &runtime.Unknown{Raw: item.Raw}
+ default:
+ list[i] = nil
+ }
+ case implementsObject:
+ list[i] = raw.Interface().(runtime.Object)
+ case allocNew:
+ // shallow copy to avoid retaining a reference to the original list item
+ itemCopy := reflect.New(raw.Type())
+ // assign to itemCopy and type-assert
+ itemCopy.Elem().Set(raw)
+ var ok bool
+ // reflect.New will guarantee that itemCopy must be a pointer.
+ if list[i], ok = itemCopy.Interface().(runtime.Object); !ok {
+ return nil, fmt.Errorf("%v: item[%v]: Expected object, got %#v(%s)", obj, i, raw.Interface(), raw.Kind())
+ }
+ default:
+ var found bool
+ if list[i], found = raw.Addr().Interface().(runtime.Object); !found {
+ return nil, fmt.Errorf("%v: item[%v]: Expected object, got %#v(%s)", obj, i, raw.Interface(), raw.Kind())
+ }
+ }
+ }
+ return list, nil
+}
+
+var (
+ // objectSliceType is the type of a slice of Objects
+ objectSliceType = reflect.TypeOf([]runtime.Object{})
+ objectType = reflect.TypeOf((*runtime.Object)(nil)).Elem()
+ rawExtensionObjectType = reflect.TypeOf(runtime.RawExtension{})
+)
+
+// LenList returns the length of this list or 0 if it is not a list.
+func LenList(list runtime.Object) int {
+ itemsPtr, err := GetItemsPtr(list)
+ if err != nil {
+ return 0
+ }
+ items, err := conversion.EnforcePtr(itemsPtr)
+ if err != nil {
+ return 0
+ }
+ return items.Len()
+}
+
+// SetList sets the given list object's Items member have the elements given in
+// objects.
+// Returns an error if list is not a List type (does not have an Items member),
+// or if any of the objects are not of the right type.
+func SetList(list runtime.Object, objects []runtime.Object) error {
+ itemsPtr, err := GetItemsPtr(list)
+ if err != nil {
+ return err
+ }
+ items, err := conversion.EnforcePtr(itemsPtr)
+ if err != nil {
+ return err
+ }
+ if items.Type() == objectSliceType {
+ items.Set(reflect.ValueOf(objects))
+ return nil
+ }
+ slice := reflect.MakeSlice(items.Type(), len(objects), len(objects))
+ for i := range objects {
+ dest := slice.Index(i)
+ if dest.Type() == rawExtensionObjectType {
+ dest = dest.FieldByName("Object")
+ }
+
+ // check to see if you're directly assignable
+ if reflect.TypeOf(objects[i]).AssignableTo(dest.Type()) {
+ dest.Set(reflect.ValueOf(objects[i]))
+ continue
+ }
+
+ src, err := conversion.EnforcePtr(objects[i])
+ if err != nil {
+ return err
+ }
+ if src.Type().AssignableTo(dest.Type()) {
+ dest.Set(src)
+ } else if src.Type().ConvertibleTo(dest.Type()) {
+ dest.Set(src.Convert(dest.Type()))
+ } else {
+ return fmt.Errorf("item[%d]: can't assign or convert %v into %v", i, src.Type(), dest.Type())
+ }
+ }
+ items.Set(slice)
+ return nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go b/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go
new file mode 100644
index 0000000000..a35ce3bd0a
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go
@@ -0,0 +1,143 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package meta
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/types"
+)
+
+type ListMetaAccessor interface {
+ GetListMeta() List
+}
+
+// List lets you work with list metadata from any of the versioned or
+// internal API objects. Attempting to set or retrieve a field on an object that does
+// not support that field will be a no-op and return a default value.
+type List metav1.ListInterface
+
+// Type exposes the type and APIVersion of versioned or internal API objects.
+type Type metav1.Type
+
+// MetadataAccessor lets you work with object and list metadata from any of the versioned or
+// internal API objects. Attempting to set or retrieve a field on an object that does
+// not support that field (Name, UID, Namespace on lists) will be a no-op and return
+// a default value.
+//
+// MetadataAccessor exposes Interface in a way that can be used with multiple objects.
+type MetadataAccessor interface {
+ APIVersion(obj runtime.Object) (string, error)
+ SetAPIVersion(obj runtime.Object, version string) error
+
+ Kind(obj runtime.Object) (string, error)
+ SetKind(obj runtime.Object, kind string) error
+
+ Namespace(obj runtime.Object) (string, error)
+ SetNamespace(obj runtime.Object, namespace string) error
+
+ Name(obj runtime.Object) (string, error)
+ SetName(obj runtime.Object, name string) error
+
+ GenerateName(obj runtime.Object) (string, error)
+ SetGenerateName(obj runtime.Object, name string) error
+
+ UID(obj runtime.Object) (types.UID, error)
+ SetUID(obj runtime.Object, uid types.UID) error
+
+ SelfLink(obj runtime.Object) (string, error)
+ SetSelfLink(obj runtime.Object, selfLink string) error
+
+ Labels(obj runtime.Object) (map[string]string, error)
+ SetLabels(obj runtime.Object, labels map[string]string) error
+
+ Annotations(obj runtime.Object) (map[string]string, error)
+ SetAnnotations(obj runtime.Object, annotations map[string]string) error
+
+ Continue(obj runtime.Object) (string, error)
+ SetContinue(obj runtime.Object, c string) error
+
+ runtime.ResourceVersioner
+}
+
+type RESTScopeName string
+
+const (
+ RESTScopeNameNamespace RESTScopeName = "namespace"
+ RESTScopeNameRoot RESTScopeName = "root"
+)
+
+// RESTScope contains the information needed to deal with REST resources that are in a resource hierarchy
+type RESTScope interface {
+ // Name of the scope
+ Name() RESTScopeName
+}
+
+// RESTMapping contains the information needed to deal with objects of a specific
+// resource and kind in a RESTful manner.
+type RESTMapping struct {
+ // Resource is the GroupVersionResource (location) for this endpoint
+ Resource schema.GroupVersionResource
+
+ // GroupVersionKind is the GroupVersionKind (data format) to submit to this endpoint
+ GroupVersionKind schema.GroupVersionKind
+
+ // Scope contains the information needed to deal with REST Resources that are in a resource hierarchy
+ Scope RESTScope
+}
+
+// RESTMapper allows clients to map resources to kind, and map kind and version
+// to interfaces for manipulating those objects. It is primarily intended for
+// consumers of Kubernetes compatible REST APIs as defined in docs/devel/api-conventions.md.
+//
+// The Kubernetes API provides versioned resources and object kinds which are scoped
+// to API groups. In other words, kinds and resources should not be assumed to be
+// unique across groups.
+//
+// TODO: split into sub-interfaces
+type RESTMapper interface {
+ // KindFor takes a partial resource and returns the single match. Returns an error if there are multiple matches
+ KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error)
+
+ // KindsFor takes a partial resource and returns the list of potential kinds in priority order
+ KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error)
+
+ // ResourceFor takes a partial resource and returns the single match. Returns an error if there are multiple matches
+ ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error)
+
+ // ResourcesFor takes a partial resource and returns the list of potential resource in priority order
+ ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error)
+
+ // RESTMapping identifies a preferred resource mapping for the provided group kind.
+ RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error)
+ // RESTMappings returns all resource mappings for the provided group kind if no
+ // version search is provided. Otherwise identifies a preferred resource mapping for
+ // the provided version(s).
+ RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error)
+
+ ResourceSingularizer(resource string) (singular string, err error)
+}
+
+// ResettableRESTMapper is a RESTMapper which is capable of resetting itself
+// from discovery.
+// All rest mappers that delegate to other rest mappers must implement this interface and dynamically
+// check if the delegate mapper supports the Reset() operation.
+type ResettableRESTMapper interface {
+ RESTMapper
+ Reset()
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go b/vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go
new file mode 100644
index 0000000000..a4298114b6
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go
@@ -0,0 +1,112 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package meta
+
+import (
+ "sync"
+
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// lazyObject defers loading the mapper and typer until necessary.
+type lazyObject struct {
+ loader func() (RESTMapper, error)
+
+ lock sync.Mutex
+ loaded bool
+ err error
+ mapper RESTMapper
+}
+
+// NewLazyRESTMapperLoader handles unrecoverable errors when creating a RESTMapper / ObjectTyper by
+// returning those initialization errors when the interface methods are invoked. This defers the
+// initialization and any server calls until a client actually needs to perform the action.
+func NewLazyRESTMapperLoader(fn func() (RESTMapper, error)) RESTMapper {
+ obj := &lazyObject{loader: fn}
+ return obj
+}
+
+// init lazily loads the mapper and typer, returning an error if initialization has failed.
+func (o *lazyObject) init() error {
+ o.lock.Lock()
+ defer o.lock.Unlock()
+ if o.loaded {
+ return o.err
+ }
+ o.mapper, o.err = o.loader()
+ o.loaded = true
+ return o.err
+}
+
+var _ ResettableRESTMapper = &lazyObject{}
+
+func (o *lazyObject) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) {
+ if err := o.init(); err != nil {
+ return schema.GroupVersionKind{}, err
+ }
+ return o.mapper.KindFor(resource)
+}
+
+func (o *lazyObject) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) {
+ if err := o.init(); err != nil {
+ return []schema.GroupVersionKind{}, err
+ }
+ return o.mapper.KindsFor(resource)
+}
+
+func (o *lazyObject) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) {
+ if err := o.init(); err != nil {
+ return schema.GroupVersionResource{}, err
+ }
+ return o.mapper.ResourceFor(input)
+}
+
+func (o *lazyObject) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) {
+ if err := o.init(); err != nil {
+ return []schema.GroupVersionResource{}, err
+ }
+ return o.mapper.ResourcesFor(input)
+}
+
+func (o *lazyObject) RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error) {
+ if err := o.init(); err != nil {
+ return nil, err
+ }
+ return o.mapper.RESTMapping(gk, versions...)
+}
+
+func (o *lazyObject) RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error) {
+ if err := o.init(); err != nil {
+ return nil, err
+ }
+ return o.mapper.RESTMappings(gk, versions...)
+}
+
+func (o *lazyObject) ResourceSingularizer(resource string) (singular string, err error) {
+ if err := o.init(); err != nil {
+ return "", err
+ }
+ return o.mapper.ResourceSingularizer(resource)
+}
+
+func (o *lazyObject) Reset() {
+ o.lock.Lock()
+ defer o.lock.Unlock()
+ if o.loaded && o.err == nil {
+ MaybeResetRESTMapper(o.mapper)
+ }
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go
new file mode 100644
index 0000000000..2551f07f5c
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go
@@ -0,0 +1,643 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package meta
+
+import (
+ "fmt"
+ "reflect"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/conversion"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/klog/v2"
+)
+
+// errNotList is returned when an object implements the Object style interfaces but not the List style
+// interfaces.
+var errNotList = fmt.Errorf("object does not implement the List interfaces")
+
+var errNotCommon = fmt.Errorf("object does not implement the common interface for accessing the SelfLink")
+
+// CommonAccessor returns a Common interface for the provided object or an error if the object does
+// not provide List.
+func CommonAccessor(obj interface{}) (metav1.Common, error) {
+ switch t := obj.(type) {
+ case List:
+ return t, nil
+ case ListMetaAccessor:
+ if m := t.GetListMeta(); m != nil {
+ return m, nil
+ }
+ return nil, errNotCommon
+ case metav1.ListMetaAccessor:
+ if m := t.GetListMeta(); m != nil {
+ return m, nil
+ }
+ return nil, errNotCommon
+ case metav1.Object:
+ return t, nil
+ case metav1.ObjectMetaAccessor:
+ if m := t.GetObjectMeta(); m != nil {
+ return m, nil
+ }
+ return nil, errNotCommon
+ default:
+ return nil, errNotCommon
+ }
+}
+
+// ListAccessor returns a List interface for the provided object or an error if the object does
+// not provide List.
+// IMPORTANT: Objects are NOT a superset of lists. Do not use this check to determine whether an
+// object *is* a List.
+func ListAccessor(obj interface{}) (List, error) {
+ switch t := obj.(type) {
+ case List:
+ return t, nil
+ case ListMetaAccessor:
+ if m := t.GetListMeta(); m != nil {
+ return m, nil
+ }
+ return nil, errNotList
+ case metav1.ListMetaAccessor:
+ if m := t.GetListMeta(); m != nil {
+ return m, nil
+ }
+ return nil, errNotList
+ default:
+ return nil, errNotList
+ }
+}
+
+// errNotObject is returned when an object implements the List style interfaces but not the Object style
+// interfaces.
+var errNotObject = fmt.Errorf("object does not implement the Object interfaces")
+
+// Accessor takes an arbitrary object pointer and returns meta.Interface.
+// obj must be a pointer to an API type. An error is returned if the minimum
+// required fields are missing. Fields that are not required return the default
+// value and are a no-op if set.
+func Accessor(obj interface{}) (metav1.Object, error) {
+ switch t := obj.(type) {
+ case metav1.Object:
+ return t, nil
+ case metav1.ObjectMetaAccessor:
+ if m := t.GetObjectMeta(); m != nil {
+ return m, nil
+ }
+ return nil, errNotObject
+ default:
+ return nil, errNotObject
+ }
+}
+
+// AsPartialObjectMetadata takes the metav1 interface and returns a partial object.
+// TODO: consider making this solely a conversion action.
+func AsPartialObjectMetadata(m metav1.Object) *metav1.PartialObjectMetadata {
+ switch t := m.(type) {
+ case *metav1.ObjectMeta:
+ return &metav1.PartialObjectMetadata{ObjectMeta: *t}
+ default:
+ return &metav1.PartialObjectMetadata{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: m.GetName(),
+ GenerateName: m.GetGenerateName(),
+ Namespace: m.GetNamespace(),
+ SelfLink: m.GetSelfLink(),
+ UID: m.GetUID(),
+ ResourceVersion: m.GetResourceVersion(),
+ Generation: m.GetGeneration(),
+ CreationTimestamp: m.GetCreationTimestamp(),
+ DeletionTimestamp: m.GetDeletionTimestamp(),
+ DeletionGracePeriodSeconds: m.GetDeletionGracePeriodSeconds(),
+ Labels: m.GetLabels(),
+ Annotations: m.GetAnnotations(),
+ OwnerReferences: m.GetOwnerReferences(),
+ Finalizers: m.GetFinalizers(),
+ ManagedFields: m.GetManagedFields(),
+ },
+ }
+ }
+}
+
+// TypeAccessor returns an interface that allows retrieving and modifying the APIVersion
+// and Kind of an in-memory internal object.
+// TODO: this interface is used to test code that does not have ObjectMeta or ListMeta
+// in round tripping (objects which can use apiVersion/kind, but do not fit the Kube
+// api conventions).
+func TypeAccessor(obj interface{}) (Type, error) {
+ if typed, ok := obj.(runtime.Object); ok {
+ return objectAccessor{typed}, nil
+ }
+ v, err := conversion.EnforcePtr(obj)
+ if err != nil {
+ return nil, err
+ }
+ t := v.Type()
+ if v.Kind() != reflect.Struct {
+ return nil, fmt.Errorf("expected struct, but got %v: %v (%#v)", v.Kind(), t, v.Interface())
+ }
+
+ typeMeta := v.FieldByName("TypeMeta")
+ if !typeMeta.IsValid() {
+ return nil, fmt.Errorf("struct %v lacks embedded TypeMeta type", t)
+ }
+ a := &genericAccessor{}
+ if err := extractFromTypeMeta(typeMeta, a); err != nil {
+ return nil, fmt.Errorf("unable to find type fields on %#v: %v", typeMeta, err)
+ }
+ return a, nil
+}
+
+type objectAccessor struct {
+ runtime.Object
+}
+
+func (obj objectAccessor) GetKind() string {
+ return obj.GetObjectKind().GroupVersionKind().Kind
+}
+
+func (obj objectAccessor) SetKind(kind string) {
+ gvk := obj.GetObjectKind().GroupVersionKind()
+ gvk.Kind = kind
+ obj.GetObjectKind().SetGroupVersionKind(gvk)
+}
+
+func (obj objectAccessor) GetAPIVersion() string {
+ return obj.GetObjectKind().GroupVersionKind().GroupVersion().String()
+}
+
+func (obj objectAccessor) SetAPIVersion(version string) {
+ gvk := obj.GetObjectKind().GroupVersionKind()
+ gv, err := schema.ParseGroupVersion(version)
+ if err != nil {
+ gv = schema.GroupVersion{Version: version}
+ }
+ gvk.Group, gvk.Version = gv.Group, gv.Version
+ obj.GetObjectKind().SetGroupVersionKind(gvk)
+}
+
+// NewAccessor returns a MetadataAccessor that can retrieve
+// or manipulate resource version on objects derived from core API
+// metadata concepts.
+func NewAccessor() MetadataAccessor {
+ return resourceAccessor{}
+}
+
+// resourceAccessor implements ResourceVersioner and SelfLinker.
+type resourceAccessor struct{}
+
+func (resourceAccessor) Kind(obj runtime.Object) (string, error) {
+ return objectAccessor{obj}.GetKind(), nil
+}
+
+func (resourceAccessor) SetKind(obj runtime.Object, kind string) error {
+ objectAccessor{obj}.SetKind(kind)
+ return nil
+}
+
+func (resourceAccessor) APIVersion(obj runtime.Object) (string, error) {
+ return objectAccessor{obj}.GetAPIVersion(), nil
+}
+
+func (resourceAccessor) SetAPIVersion(obj runtime.Object, version string) error {
+ objectAccessor{obj}.SetAPIVersion(version)
+ return nil
+}
+
+func (resourceAccessor) Namespace(obj runtime.Object) (string, error) {
+ accessor, err := Accessor(obj)
+ if err != nil {
+ return "", err
+ }
+ return accessor.GetNamespace(), nil
+}
+
+func (resourceAccessor) SetNamespace(obj runtime.Object, namespace string) error {
+ accessor, err := Accessor(obj)
+ if err != nil {
+ return err
+ }
+ accessor.SetNamespace(namespace)
+ return nil
+}
+
+func (resourceAccessor) Name(obj runtime.Object) (string, error) {
+ accessor, err := Accessor(obj)
+ if err != nil {
+ return "", err
+ }
+ return accessor.GetName(), nil
+}
+
+func (resourceAccessor) SetName(obj runtime.Object, name string) error {
+ accessor, err := Accessor(obj)
+ if err != nil {
+ return err
+ }
+ accessor.SetName(name)
+ return nil
+}
+
+func (resourceAccessor) GenerateName(obj runtime.Object) (string, error) {
+ accessor, err := Accessor(obj)
+ if err != nil {
+ return "", err
+ }
+ return accessor.GetGenerateName(), nil
+}
+
+func (resourceAccessor) SetGenerateName(obj runtime.Object, name string) error {
+ accessor, err := Accessor(obj)
+ if err != nil {
+ return err
+ }
+ accessor.SetGenerateName(name)
+ return nil
+}
+
+func (resourceAccessor) UID(obj runtime.Object) (types.UID, error) {
+ accessor, err := Accessor(obj)
+ if err != nil {
+ return "", err
+ }
+ return accessor.GetUID(), nil
+}
+
+func (resourceAccessor) SetUID(obj runtime.Object, uid types.UID) error {
+ accessor, err := Accessor(obj)
+ if err != nil {
+ return err
+ }
+ accessor.SetUID(uid)
+ return nil
+}
+
+func (resourceAccessor) SelfLink(obj runtime.Object) (string, error) {
+ accessor, err := CommonAccessor(obj)
+ if err != nil {
+ return "", err
+ }
+ return accessor.GetSelfLink(), nil
+}
+
+func (resourceAccessor) SetSelfLink(obj runtime.Object, selfLink string) error {
+ accessor, err := CommonAccessor(obj)
+ if err != nil {
+ return err
+ }
+ accessor.SetSelfLink(selfLink)
+ return nil
+}
+
+func (resourceAccessor) Labels(obj runtime.Object) (map[string]string, error) {
+ accessor, err := Accessor(obj)
+ if err != nil {
+ return nil, err
+ }
+ return accessor.GetLabels(), nil
+}
+
+func (resourceAccessor) SetLabels(obj runtime.Object, labels map[string]string) error {
+ accessor, err := Accessor(obj)
+ if err != nil {
+ return err
+ }
+ accessor.SetLabels(labels)
+ return nil
+}
+
+func (resourceAccessor) Annotations(obj runtime.Object) (map[string]string, error) {
+ accessor, err := Accessor(obj)
+ if err != nil {
+ return nil, err
+ }
+ return accessor.GetAnnotations(), nil
+}
+
+func (resourceAccessor) SetAnnotations(obj runtime.Object, annotations map[string]string) error {
+ accessor, err := Accessor(obj)
+ if err != nil {
+ return err
+ }
+ accessor.SetAnnotations(annotations)
+ return nil
+}
+
+func (resourceAccessor) ResourceVersion(obj runtime.Object) (string, error) {
+ accessor, err := CommonAccessor(obj)
+ if err != nil {
+ return "", err
+ }
+ return accessor.GetResourceVersion(), nil
+}
+
+func (resourceAccessor) SetResourceVersion(obj runtime.Object, version string) error {
+ accessor, err := CommonAccessor(obj)
+ if err != nil {
+ return err
+ }
+ accessor.SetResourceVersion(version)
+ return nil
+}
+
+func (resourceAccessor) Continue(obj runtime.Object) (string, error) {
+ accessor, err := ListAccessor(obj)
+ if err != nil {
+ return "", err
+ }
+ return accessor.GetContinue(), nil
+}
+
+func (resourceAccessor) SetContinue(obj runtime.Object, version string) error {
+ accessor, err := ListAccessor(obj)
+ if err != nil {
+ return err
+ }
+ accessor.SetContinue(version)
+ return nil
+}
+
+// extractFromOwnerReference extracts v to o. v is the OwnerReferences field of an object.
+func extractFromOwnerReference(v reflect.Value, o *metav1.OwnerReference) error {
+ if err := runtime.Field(v, "APIVersion", &o.APIVersion); err != nil {
+ return err
+ }
+ if err := runtime.Field(v, "Kind", &o.Kind); err != nil {
+ return err
+ }
+ if err := runtime.Field(v, "Name", &o.Name); err != nil {
+ return err
+ }
+ if err := runtime.Field(v, "UID", &o.UID); err != nil {
+ return err
+ }
+ var controllerPtr *bool
+ if err := runtime.Field(v, "Controller", &controllerPtr); err != nil {
+ return err
+ }
+ if controllerPtr != nil {
+ controller := *controllerPtr
+ o.Controller = &controller
+ }
+ var blockOwnerDeletionPtr *bool
+ if err := runtime.Field(v, "BlockOwnerDeletion", &blockOwnerDeletionPtr); err != nil {
+ return err
+ }
+ if blockOwnerDeletionPtr != nil {
+ block := *blockOwnerDeletionPtr
+ o.BlockOwnerDeletion = &block
+ }
+ return nil
+}
+
+// setOwnerReference sets v to o. v is the OwnerReferences field of an object.
+func setOwnerReference(v reflect.Value, o *metav1.OwnerReference) error {
+ if err := runtime.SetField(o.APIVersion, v, "APIVersion"); err != nil {
+ return err
+ }
+ if err := runtime.SetField(o.Kind, v, "Kind"); err != nil {
+ return err
+ }
+ if err := runtime.SetField(o.Name, v, "Name"); err != nil {
+ return err
+ }
+ if err := runtime.SetField(o.UID, v, "UID"); err != nil {
+ return err
+ }
+ if o.Controller != nil {
+ controller := *(o.Controller)
+ if err := runtime.SetField(&controller, v, "Controller"); err != nil {
+ return err
+ }
+ }
+ if o.BlockOwnerDeletion != nil {
+ block := *(o.BlockOwnerDeletion)
+ if err := runtime.SetField(&block, v, "BlockOwnerDeletion"); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// genericAccessor contains pointers to strings that can modify an arbitrary
+// struct and implements the Accessor interface.
+type genericAccessor struct {
+ namespace *string
+ name *string
+ generateName *string
+ uid *types.UID
+ apiVersion *string
+ kind *string
+ resourceVersion *string
+ selfLink *string
+ creationTimestamp *metav1.Time
+ deletionTimestamp **metav1.Time
+ labels *map[string]string
+ annotations *map[string]string
+ ownerReferences reflect.Value
+ finalizers *[]string
+}
+
+func (a genericAccessor) GetNamespace() string {
+ if a.namespace == nil {
+ return ""
+ }
+ return *a.namespace
+}
+
+func (a genericAccessor) SetNamespace(namespace string) {
+ if a.namespace == nil {
+ return
+ }
+ *a.namespace = namespace
+}
+
+func (a genericAccessor) GetName() string {
+ if a.name == nil {
+ return ""
+ }
+ return *a.name
+}
+
+func (a genericAccessor) SetName(name string) {
+ if a.name == nil {
+ return
+ }
+ *a.name = name
+}
+
+func (a genericAccessor) GetGenerateName() string {
+ if a.generateName == nil {
+ return ""
+ }
+ return *a.generateName
+}
+
+func (a genericAccessor) SetGenerateName(generateName string) {
+ if a.generateName == nil {
+ return
+ }
+ *a.generateName = generateName
+}
+
+func (a genericAccessor) GetUID() types.UID {
+ if a.uid == nil {
+ return ""
+ }
+ return *a.uid
+}
+
+func (a genericAccessor) SetUID(uid types.UID) {
+ if a.uid == nil {
+ return
+ }
+ *a.uid = uid
+}
+
+func (a genericAccessor) GetAPIVersion() string {
+ return *a.apiVersion
+}
+
+func (a genericAccessor) SetAPIVersion(version string) {
+ *a.apiVersion = version
+}
+
+func (a genericAccessor) GetKind() string {
+ return *a.kind
+}
+
+func (a genericAccessor) SetKind(kind string) {
+ *a.kind = kind
+}
+
+func (a genericAccessor) GetResourceVersion() string {
+ return *a.resourceVersion
+}
+
+func (a genericAccessor) SetResourceVersion(version string) {
+ *a.resourceVersion = version
+}
+
+func (a genericAccessor) GetSelfLink() string {
+ return *a.selfLink
+}
+
+func (a genericAccessor) SetSelfLink(selfLink string) {
+ *a.selfLink = selfLink
+}
+
+func (a genericAccessor) GetCreationTimestamp() metav1.Time {
+ return *a.creationTimestamp
+}
+
+func (a genericAccessor) SetCreationTimestamp(timestamp metav1.Time) {
+ *a.creationTimestamp = timestamp
+}
+
+func (a genericAccessor) GetDeletionTimestamp() *metav1.Time {
+ return *a.deletionTimestamp
+}
+
+func (a genericAccessor) SetDeletionTimestamp(timestamp *metav1.Time) {
+ *a.deletionTimestamp = timestamp
+}
+
+func (a genericAccessor) GetLabels() map[string]string {
+ if a.labels == nil {
+ return nil
+ }
+ return *a.labels
+}
+
+func (a genericAccessor) SetLabels(labels map[string]string) {
+ *a.labels = labels
+}
+
+func (a genericAccessor) GetAnnotations() map[string]string {
+ if a.annotations == nil {
+ return nil
+ }
+ return *a.annotations
+}
+
+func (a genericAccessor) SetAnnotations(annotations map[string]string) {
+ if a.annotations == nil {
+ emptyAnnotations := make(map[string]string)
+ a.annotations = &emptyAnnotations
+ }
+ *a.annotations = annotations
+}
+
+func (a genericAccessor) GetFinalizers() []string {
+ if a.finalizers == nil {
+ return nil
+ }
+ return *a.finalizers
+}
+
+func (a genericAccessor) SetFinalizers(finalizers []string) {
+ *a.finalizers = finalizers
+}
+
+func (a genericAccessor) GetOwnerReferences() []metav1.OwnerReference {
+ var ret []metav1.OwnerReference
+ s := a.ownerReferences
+ if s.Kind() != reflect.Pointer || s.Elem().Kind() != reflect.Slice {
+ klog.Errorf("expect %v to be a pointer to slice", s)
+ return ret
+ }
+ s = s.Elem()
+ // Set the capacity to one element greater to avoid copy if the caller later append an element.
+ ret = make([]metav1.OwnerReference, s.Len(), s.Len()+1)
+ for i := 0; i < s.Len(); i++ {
+ if err := extractFromOwnerReference(s.Index(i), &ret[i]); err != nil {
+ klog.Errorf("extractFromOwnerReference failed: %v", err)
+ return ret
+ }
+ }
+ return ret
+}
+
+func (a genericAccessor) SetOwnerReferences(references []metav1.OwnerReference) {
+ s := a.ownerReferences
+ if s.Kind() != reflect.Pointer || s.Elem().Kind() != reflect.Slice {
+ klog.Errorf("expect %v to be a pointer to slice", s)
+ }
+ s = s.Elem()
+ newReferences := reflect.MakeSlice(s.Type(), len(references), len(references))
+ for i := 0; i < len(references); i++ {
+ if err := setOwnerReference(newReferences.Index(i), &references[i]); err != nil {
+ klog.Errorf("setOwnerReference failed: %v", err)
+ return
+ }
+ }
+ s.Set(newReferences)
+}
+
+// extractFromTypeMeta extracts pointers to version and kind fields from an object
+func extractFromTypeMeta(v reflect.Value, a *genericAccessor) error {
+ if err := runtime.FieldPtr(v, "APIVersion", &a.apiVersion); err != nil {
+ return err
+ }
+ if err := runtime.FieldPtr(v, "Kind", &a.kind); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go b/vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go
new file mode 100644
index 0000000000..b7e9712505
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go
@@ -0,0 +1,220 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package meta
+
+import (
+ "fmt"
+ "strings"
+
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ utilerrors "k8s.io/apimachinery/pkg/util/errors"
+)
+
+var (
+ _ ResettableRESTMapper = MultiRESTMapper{}
+)
+
+// MultiRESTMapper is a wrapper for multiple RESTMappers.
+type MultiRESTMapper []RESTMapper
+
+func (m MultiRESTMapper) String() string {
+ nested := make([]string, 0, len(m))
+ for _, t := range m {
+ currString := fmt.Sprintf("%v", t)
+ splitStrings := strings.Split(currString, "\n")
+ nested = append(nested, strings.Join(splitStrings, "\n\t"))
+ }
+
+ return fmt.Sprintf("MultiRESTMapper{\n\t%s\n}", strings.Join(nested, "\n\t"))
+}
+
+// ResourceSingularizer converts a REST resource name from plural to singular (e.g., from pods to pod)
+// This implementation supports multiple REST schemas and return the first match.
+func (m MultiRESTMapper) ResourceSingularizer(resource string) (singular string, err error) {
+ for _, t := range m {
+ singular, err = t.ResourceSingularizer(resource)
+ if err == nil {
+ return
+ }
+ }
+ return
+}
+
+func (m MultiRESTMapper) ResourcesFor(resource schema.GroupVersionResource) ([]schema.GroupVersionResource, error) {
+ allGVRs := []schema.GroupVersionResource{}
+ for _, t := range m {
+ gvrs, err := t.ResourcesFor(resource)
+ // ignore "no match" errors, but any other error percolates back up
+ if IsNoMatchError(err) {
+ continue
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ // walk the existing values to de-dup
+ for _, curr := range gvrs {
+ found := false
+ for _, existing := range allGVRs {
+ if curr == existing {
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ allGVRs = append(allGVRs, curr)
+ }
+ }
+ }
+
+ if len(allGVRs) == 0 {
+ return nil, &NoResourceMatchError{PartialResource: resource}
+ }
+
+ return allGVRs, nil
+}
+
+func (m MultiRESTMapper) KindsFor(resource schema.GroupVersionResource) (gvk []schema.GroupVersionKind, err error) {
+ allGVKs := []schema.GroupVersionKind{}
+ for _, t := range m {
+ gvks, err := t.KindsFor(resource)
+ // ignore "no match" errors, but any other error percolates back up
+ if IsNoMatchError(err) {
+ continue
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ // walk the existing values to de-dup
+ for _, curr := range gvks {
+ found := false
+ for _, existing := range allGVKs {
+ if curr == existing {
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ allGVKs = append(allGVKs, curr)
+ }
+ }
+ }
+
+ if len(allGVKs) == 0 {
+ return nil, &NoResourceMatchError{PartialResource: resource}
+ }
+
+ return allGVKs, nil
+}
+
+func (m MultiRESTMapper) ResourceFor(resource schema.GroupVersionResource) (schema.GroupVersionResource, error) {
+ resources, err := m.ResourcesFor(resource)
+ if err != nil {
+ return schema.GroupVersionResource{}, err
+ }
+ if len(resources) == 1 {
+ return resources[0], nil
+ }
+
+ return schema.GroupVersionResource{}, &AmbiguousResourceError{PartialResource: resource, MatchingResources: resources}
+}
+
+func (m MultiRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) {
+ kinds, err := m.KindsFor(resource)
+ if err != nil {
+ return schema.GroupVersionKind{}, err
+ }
+ if len(kinds) == 1 {
+ return kinds[0], nil
+ }
+
+ return schema.GroupVersionKind{}, &AmbiguousResourceError{PartialResource: resource, MatchingKinds: kinds}
+}
+
+// RESTMapping provides the REST mapping for the resource based on the
+// kind and version. This implementation supports multiple REST schemas and
+// return the first match.
+func (m MultiRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error) {
+ allMappings := []*RESTMapping{}
+ errors := []error{}
+
+ for _, t := range m {
+ currMapping, err := t.RESTMapping(gk, versions...)
+ // ignore "no match" errors, but any other error percolates back up
+ if IsNoMatchError(err) {
+ continue
+ }
+ if err != nil {
+ errors = append(errors, err)
+ continue
+ }
+
+ allMappings = append(allMappings, currMapping)
+ }
+
+ // if we got exactly one mapping, then use it even if other requested failed
+ if len(allMappings) == 1 {
+ return allMappings[0], nil
+ }
+ if len(allMappings) > 1 {
+ var kinds []schema.GroupVersionKind
+ for _, m := range allMappings {
+ kinds = append(kinds, m.GroupVersionKind)
+ }
+ return nil, &AmbiguousKindError{PartialKind: gk.WithVersion(""), MatchingKinds: kinds}
+ }
+ if len(errors) > 0 {
+ return nil, utilerrors.NewAggregate(errors)
+ }
+ return nil, &NoKindMatchError{GroupKind: gk, SearchedVersions: versions}
+}
+
+// RESTMappings returns all possible RESTMappings for the provided group kind, or an error
+// if the type is not recognized.
+func (m MultiRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error) {
+ var allMappings []*RESTMapping
+ var errors []error
+
+ for _, t := range m {
+ currMappings, err := t.RESTMappings(gk, versions...)
+ // ignore "no match" errors, but any other error percolates back up
+ if IsNoMatchError(err) {
+ continue
+ }
+ if err != nil {
+ errors = append(errors, err)
+ continue
+ }
+ allMappings = append(allMappings, currMappings...)
+ }
+ if len(errors) > 0 {
+ return nil, utilerrors.NewAggregate(errors)
+ }
+ if len(allMappings) == 0 {
+ return nil, &NoKindMatchError{GroupKind: gk, SearchedVersions: versions}
+ }
+ return allMappings, nil
+}
+
+func (m MultiRESTMapper) Reset() {
+ for _, t := range m {
+ MaybeResetRESTMapper(t)
+ }
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/priority.go b/vendor/k8s.io/apimachinery/pkg/api/meta/priority.go
new file mode 100644
index 0000000000..4f097c9c90
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/meta/priority.go
@@ -0,0 +1,230 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package meta
+
+import (
+ "fmt"
+
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+const (
+ AnyGroup = "*"
+ AnyVersion = "*"
+ AnyResource = "*"
+ AnyKind = "*"
+)
+
+var (
+ _ ResettableRESTMapper = PriorityRESTMapper{}
+)
+
+// PriorityRESTMapper is a wrapper for automatically choosing a particular Resource or Kind
+// when multiple matches are possible
+type PriorityRESTMapper struct {
+ // Delegate is the RESTMapper to use to locate all the Kind and Resource matches
+ Delegate RESTMapper
+
+ // ResourcePriority is a list of priority patterns to apply to matching resources.
+ // The list of all matching resources is narrowed based on the patterns until only one remains.
+ // A pattern with no matches is skipped. A pattern with more than one match uses its
+ // matches as the list to continue matching against.
+ ResourcePriority []schema.GroupVersionResource
+
+ // KindPriority is a list of priority patterns to apply to matching kinds.
+ // The list of all matching kinds is narrowed based on the patterns until only one remains.
+ // A pattern with no matches is skipped. A pattern with more than one match uses its
+ // matches as the list to continue matching against.
+ KindPriority []schema.GroupVersionKind
+}
+
+func (m PriorityRESTMapper) String() string {
+ return fmt.Sprintf("PriorityRESTMapper{\n\t%v\n\t%v\n\t%v\n}", m.ResourcePriority, m.KindPriority, m.Delegate)
+}
+
+// ResourceFor finds all resources, then passes them through the ResourcePriority patterns to find a single matching hit.
+func (m PriorityRESTMapper) ResourceFor(partiallySpecifiedResource schema.GroupVersionResource) (schema.GroupVersionResource, error) {
+ originalGVRs, originalErr := m.Delegate.ResourcesFor(partiallySpecifiedResource)
+ if originalErr != nil && len(originalGVRs) == 0 {
+ return schema.GroupVersionResource{}, originalErr
+ }
+ if len(originalGVRs) == 1 {
+ return originalGVRs[0], originalErr
+ }
+
+ remainingGVRs := append([]schema.GroupVersionResource{}, originalGVRs...)
+ for _, pattern := range m.ResourcePriority {
+ matchedGVRs := []schema.GroupVersionResource{}
+ for _, gvr := range remainingGVRs {
+ if resourceMatches(pattern, gvr) {
+ matchedGVRs = append(matchedGVRs, gvr)
+ }
+ }
+
+ switch len(matchedGVRs) {
+ case 0:
+ // if you have no matches, then nothing matched this pattern just move to the next
+ continue
+ case 1:
+ // one match, return
+ return matchedGVRs[0], originalErr
+ default:
+ // more than one match, use the matched hits as the list moving to the next pattern.
+ // this way you can have a series of selection criteria
+ remainingGVRs = matchedGVRs
+ }
+ }
+
+ return schema.GroupVersionResource{}, &AmbiguousResourceError{PartialResource: partiallySpecifiedResource, MatchingResources: originalGVRs}
+}
+
+// KindFor finds all kinds, then passes them through the KindPriority patterns to find a single matching hit.
+func (m PriorityRESTMapper) KindFor(partiallySpecifiedResource schema.GroupVersionResource) (schema.GroupVersionKind, error) {
+ originalGVKs, originalErr := m.Delegate.KindsFor(partiallySpecifiedResource)
+ if originalErr != nil && len(originalGVKs) == 0 {
+ return schema.GroupVersionKind{}, originalErr
+ }
+ if len(originalGVKs) == 1 {
+ return originalGVKs[0], originalErr
+ }
+
+ remainingGVKs := append([]schema.GroupVersionKind{}, originalGVKs...)
+ for _, pattern := range m.KindPriority {
+ matchedGVKs := []schema.GroupVersionKind{}
+ for _, gvr := range remainingGVKs {
+ if kindMatches(pattern, gvr) {
+ matchedGVKs = append(matchedGVKs, gvr)
+ }
+ }
+
+ switch len(matchedGVKs) {
+ case 0:
+ // if you have no matches, then nothing matched this pattern just move to the next
+ continue
+ case 1:
+ // one match, return
+ return matchedGVKs[0], originalErr
+ default:
+ // more than one match, use the matched hits as the list moving to the next pattern.
+ // this way you can have a series of selection criteria
+ remainingGVKs = matchedGVKs
+ }
+ }
+
+ return schema.GroupVersionKind{}, &AmbiguousResourceError{PartialResource: partiallySpecifiedResource, MatchingKinds: originalGVKs}
+}
+
+func resourceMatches(pattern schema.GroupVersionResource, resource schema.GroupVersionResource) bool {
+ if pattern.Group != AnyGroup && pattern.Group != resource.Group {
+ return false
+ }
+ if pattern.Version != AnyVersion && pattern.Version != resource.Version {
+ return false
+ }
+ if pattern.Resource != AnyResource && pattern.Resource != resource.Resource {
+ return false
+ }
+
+ return true
+}
+
+func kindMatches(pattern schema.GroupVersionKind, kind schema.GroupVersionKind) bool {
+ if pattern.Group != AnyGroup && pattern.Group != kind.Group {
+ return false
+ }
+ if pattern.Version != AnyVersion && pattern.Version != kind.Version {
+ return false
+ }
+ if pattern.Kind != AnyKind && pattern.Kind != kind.Kind {
+ return false
+ }
+
+ return true
+}
+
+func (m PriorityRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (mapping *RESTMapping, err error) {
+ mappings, originalErr := m.Delegate.RESTMappings(gk, versions...)
+ if originalErr != nil && len(mappings) == 0 {
+ return nil, originalErr
+ }
+
+ // any versions the user provides take priority
+ priorities := m.KindPriority
+ if len(versions) > 0 {
+ priorities = make([]schema.GroupVersionKind, 0, len(m.KindPriority)+len(versions))
+ for _, version := range versions {
+ gv := schema.GroupVersion{
+ Version: version,
+ Group: gk.Group,
+ }
+ priorities = append(priorities, gv.WithKind(AnyKind))
+ }
+ priorities = append(priorities, m.KindPriority...)
+ }
+
+ remaining := append([]*RESTMapping{}, mappings...)
+ for _, pattern := range priorities {
+ var matching []*RESTMapping
+ for _, m := range remaining {
+ if kindMatches(pattern, m.GroupVersionKind) {
+ matching = append(matching, m)
+ }
+ }
+
+ switch len(matching) {
+ case 0:
+ // if you have no matches, then nothing matched this pattern just move to the next
+ continue
+ case 1:
+ // one match, return
+ return matching[0], originalErr
+ default:
+ // more than one match, use the matched hits as the list moving to the next pattern.
+ // this way you can have a series of selection criteria
+ remaining = matching
+ }
+ }
+ if len(remaining) == 1 {
+ return remaining[0], originalErr
+ }
+
+ var kinds []schema.GroupVersionKind
+ for _, m := range mappings {
+ kinds = append(kinds, m.GroupVersionKind)
+ }
+ return nil, &AmbiguousKindError{PartialKind: gk.WithVersion(""), MatchingKinds: kinds}
+}
+
+func (m PriorityRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error) {
+ return m.Delegate.RESTMappings(gk, versions...)
+}
+
+func (m PriorityRESTMapper) ResourceSingularizer(resource string) (singular string, err error) {
+ return m.Delegate.ResourceSingularizer(resource)
+}
+
+func (m PriorityRESTMapper) ResourcesFor(partiallySpecifiedResource schema.GroupVersionResource) ([]schema.GroupVersionResource, error) {
+ return m.Delegate.ResourcesFor(partiallySpecifiedResource)
+}
+
+func (m PriorityRESTMapper) KindsFor(partiallySpecifiedResource schema.GroupVersionResource) (gvk []schema.GroupVersionKind, err error) {
+ return m.Delegate.KindsFor(partiallySpecifiedResource)
+}
+
+func (m PriorityRESTMapper) Reset() {
+ MaybeResetRESTMapper(m.Delegate)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go b/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go
new file mode 100644
index 0000000000..91cb98cae4
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go
@@ -0,0 +1,529 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// TODO: move everything in this file to pkg/api/rest
+package meta
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// Implements RESTScope interface
+type restScope struct {
+ name RESTScopeName
+}
+
+func (r *restScope) Name() RESTScopeName {
+ return r.name
+}
+
+var RESTScopeNamespace = &restScope{
+ name: RESTScopeNameNamespace,
+}
+
+var RESTScopeRoot = &restScope{
+ name: RESTScopeNameRoot,
+}
+
+// DefaultRESTMapper exposes mappings between the types defined in a
+// runtime.Scheme. It assumes that all types defined the provided scheme
+// can be mapped with the provided MetadataAccessor and Codec interfaces.
+//
+// The resource name of a Kind is defined as the lowercase,
+// English-plural version of the Kind string.
+// When converting from resource to Kind, the singular version of the
+// resource name is also accepted for convenience.
+//
+// TODO: Only accept plural for some operations for increased control?
+// (`get pod bar` vs `get pods bar`)
+type DefaultRESTMapper struct {
+ defaultGroupVersions []schema.GroupVersion
+
+ resourceToKind map[schema.GroupVersionResource]schema.GroupVersionKind
+ kindToPluralResource map[schema.GroupVersionKind]schema.GroupVersionResource
+ kindToScope map[schema.GroupVersionKind]RESTScope
+ singularToPlural map[schema.GroupVersionResource]schema.GroupVersionResource
+ pluralToSingular map[schema.GroupVersionResource]schema.GroupVersionResource
+}
+
+func (m *DefaultRESTMapper) String() string {
+ if m == nil {
+ return ""
+ }
+ return fmt.Sprintf("DefaultRESTMapper{kindToPluralResource=%v}", m.kindToPluralResource)
+}
+
+var _ RESTMapper = &DefaultRESTMapper{}
+
+// NewDefaultRESTMapper initializes a mapping between Kind and APIVersion
+// to a resource name and back based on the objects in a runtime.Scheme
+// and the Kubernetes API conventions. Takes a group name, a priority list of the versions
+// to search when an object has no default version (set empty to return an error),
+// and a function that retrieves the correct metadata for a given version.
+func NewDefaultRESTMapper(defaultGroupVersions []schema.GroupVersion) *DefaultRESTMapper {
+ resourceToKind := make(map[schema.GroupVersionResource]schema.GroupVersionKind)
+ kindToPluralResource := make(map[schema.GroupVersionKind]schema.GroupVersionResource)
+ kindToScope := make(map[schema.GroupVersionKind]RESTScope)
+ singularToPlural := make(map[schema.GroupVersionResource]schema.GroupVersionResource)
+ pluralToSingular := make(map[schema.GroupVersionResource]schema.GroupVersionResource)
+ // TODO: verify name mappings work correctly when versions differ
+
+ return &DefaultRESTMapper{
+ resourceToKind: resourceToKind,
+ kindToPluralResource: kindToPluralResource,
+ kindToScope: kindToScope,
+ defaultGroupVersions: defaultGroupVersions,
+ singularToPlural: singularToPlural,
+ pluralToSingular: pluralToSingular,
+ }
+}
+
+func (m *DefaultRESTMapper) Add(kind schema.GroupVersionKind, scope RESTScope) {
+ plural, singular := UnsafeGuessKindToResource(kind)
+ m.AddSpecific(kind, plural, singular, scope)
+}
+
+func (m *DefaultRESTMapper) AddSpecific(kind schema.GroupVersionKind, plural, singular schema.GroupVersionResource, scope RESTScope) {
+ m.singularToPlural[singular] = plural
+ m.pluralToSingular[plural] = singular
+
+ m.resourceToKind[singular] = kind
+ m.resourceToKind[plural] = kind
+
+ m.kindToPluralResource[kind] = plural
+ m.kindToScope[kind] = scope
+}
+
+// unpluralizedSuffixes is a list of resource suffixes that are the same plural and singular
+// This is only is only necessary because some bits of code are lazy and don't actually use the RESTMapper like they should.
+// TODO eliminate this so that different callers can correctly map to resources. This probably means updating all
+// callers to use the RESTMapper they mean.
+var unpluralizedSuffixes = []string{
+ "endpoints",
+}
+
+// UnsafeGuessKindToResource converts Kind to a resource name.
+// Broken. This method only "sort of" works when used outside of this package. It assumes that Kinds and Resources match
+// and they aren't guaranteed to do so.
+func UnsafeGuessKindToResource(kind schema.GroupVersionKind) ( /*plural*/ schema.GroupVersionResource /*singular*/, schema.GroupVersionResource) {
+ kindName := kind.Kind
+ if len(kindName) == 0 {
+ return schema.GroupVersionResource{}, schema.GroupVersionResource{}
+ }
+ singularName := strings.ToLower(kindName)
+ singular := kind.GroupVersion().WithResource(singularName)
+
+ for _, skip := range unpluralizedSuffixes {
+ if strings.HasSuffix(singularName, skip) {
+ return singular, singular
+ }
+ }
+
+ switch string(singularName[len(singularName)-1]) {
+ case "s":
+ return kind.GroupVersion().WithResource(singularName + "es"), singular
+ case "y":
+ return kind.GroupVersion().WithResource(strings.TrimSuffix(singularName, "y") + "ies"), singular
+ }
+
+ return kind.GroupVersion().WithResource(singularName + "s"), singular
+}
+
+// ResourceSingularizer implements RESTMapper
+// It converts a resource name from plural to singular (e.g., from pods to pod)
+func (m *DefaultRESTMapper) ResourceSingularizer(resourceType string) (string, error) {
+ partialResource := schema.GroupVersionResource{Resource: resourceType}
+ resources, err := m.ResourcesFor(partialResource)
+ if err != nil {
+ return resourceType, err
+ }
+
+ singular := schema.GroupVersionResource{}
+ for _, curr := range resources {
+ currSingular, ok := m.pluralToSingular[curr]
+ if !ok {
+ continue
+ }
+ if singular.Empty() {
+ singular = currSingular
+ continue
+ }
+
+ if currSingular.Resource != singular.Resource {
+ return resourceType, fmt.Errorf("multiple possible singular resources (%v) found for %v", resources, resourceType)
+ }
+ }
+
+ if singular.Empty() {
+ return resourceType, fmt.Errorf("no singular of resource %v has been defined", resourceType)
+ }
+
+ return singular.Resource, nil
+}
+
+// coerceResourceForMatching makes the resource lower case and converts internal versions to unspecified (legacy behavior)
+func coerceResourceForMatching(resource schema.GroupVersionResource) schema.GroupVersionResource {
+ resource.Resource = strings.ToLower(resource.Resource)
+ if resource.Version == runtime.APIVersionInternal {
+ resource.Version = ""
+ }
+
+ return resource
+}
+
+func (m *DefaultRESTMapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) {
+ resource := coerceResourceForMatching(input)
+
+ hasResource := len(resource.Resource) > 0
+ hasGroup := len(resource.Group) > 0
+ hasVersion := len(resource.Version) > 0
+
+ if !hasResource {
+ return nil, fmt.Errorf("a resource must be present, got: %v", resource)
+ }
+
+ ret := []schema.GroupVersionResource{}
+ switch {
+ case hasGroup && hasVersion:
+ // fully qualified. Find the exact match
+ for plural, singular := range m.pluralToSingular {
+ if singular == resource {
+ ret = append(ret, plural)
+ break
+ }
+ if plural == resource {
+ ret = append(ret, plural)
+ break
+ }
+ }
+
+ case hasGroup:
+ // given a group, prefer an exact match. If you don't find one, resort to a prefix match on group
+ foundExactMatch := false
+ requestedGroupResource := resource.GroupResource()
+ for plural, singular := range m.pluralToSingular {
+ if singular.GroupResource() == requestedGroupResource {
+ foundExactMatch = true
+ ret = append(ret, plural)
+ }
+ if plural.GroupResource() == requestedGroupResource {
+ foundExactMatch = true
+ ret = append(ret, plural)
+ }
+ }
+
+ // if you didn't find an exact match, match on group prefixing. This allows storageclass.storage to match
+ // storageclass.storage.k8s.io
+ if !foundExactMatch {
+ for plural, singular := range m.pluralToSingular {
+ if !strings.HasPrefix(plural.Group, requestedGroupResource.Group) {
+ continue
+ }
+ if singular.Resource == requestedGroupResource.Resource {
+ ret = append(ret, plural)
+ }
+ if plural.Resource == requestedGroupResource.Resource {
+ ret = append(ret, plural)
+ }
+ }
+
+ }
+
+ case hasVersion:
+ for plural, singular := range m.pluralToSingular {
+ if singular.Version == resource.Version && singular.Resource == resource.Resource {
+ ret = append(ret, plural)
+ }
+ if plural.Version == resource.Version && plural.Resource == resource.Resource {
+ ret = append(ret, plural)
+ }
+ }
+
+ default:
+ for plural, singular := range m.pluralToSingular {
+ if singular.Resource == resource.Resource {
+ ret = append(ret, plural)
+ }
+ if plural.Resource == resource.Resource {
+ ret = append(ret, plural)
+ }
+ }
+ }
+
+ if len(ret) == 0 {
+ return nil, &NoResourceMatchError{PartialResource: resource}
+ }
+
+ sort.Sort(resourceByPreferredGroupVersion{ret, m.defaultGroupVersions})
+ return ret, nil
+}
+
+func (m *DefaultRESTMapper) ResourceFor(resource schema.GroupVersionResource) (schema.GroupVersionResource, error) {
+ resources, err := m.ResourcesFor(resource)
+ if err != nil {
+ return schema.GroupVersionResource{}, err
+ }
+ if len(resources) == 1 {
+ return resources[0], nil
+ }
+
+ return schema.GroupVersionResource{}, &AmbiguousResourceError{PartialResource: resource, MatchingResources: resources}
+}
+
+func (m *DefaultRESTMapper) KindsFor(input schema.GroupVersionResource) ([]schema.GroupVersionKind, error) {
+ resource := coerceResourceForMatching(input)
+
+ hasResource := len(resource.Resource) > 0
+ hasGroup := len(resource.Group) > 0
+ hasVersion := len(resource.Version) > 0
+
+ if !hasResource {
+ return nil, fmt.Errorf("a resource must be present, got: %v", resource)
+ }
+
+ ret := []schema.GroupVersionKind{}
+ switch {
+ // fully qualified. Find the exact match
+ case hasGroup && hasVersion:
+ kind, exists := m.resourceToKind[resource]
+ if exists {
+ ret = append(ret, kind)
+ }
+
+ case hasGroup:
+ foundExactMatch := false
+ requestedGroupResource := resource.GroupResource()
+ for currResource, currKind := range m.resourceToKind {
+ if currResource.GroupResource() == requestedGroupResource {
+ foundExactMatch = true
+ ret = append(ret, currKind)
+ }
+ }
+
+ // if you didn't find an exact match, match on group prefixing. This allows storageclass.storage to match
+ // storageclass.storage.k8s.io
+ if !foundExactMatch {
+ for currResource, currKind := range m.resourceToKind {
+ if !strings.HasPrefix(currResource.Group, requestedGroupResource.Group) {
+ continue
+ }
+ if currResource.Resource == requestedGroupResource.Resource {
+ ret = append(ret, currKind)
+ }
+ }
+
+ }
+
+ case hasVersion:
+ for currResource, currKind := range m.resourceToKind {
+ if currResource.Version == resource.Version && currResource.Resource == resource.Resource {
+ ret = append(ret, currKind)
+ }
+ }
+
+ default:
+ for currResource, currKind := range m.resourceToKind {
+ if currResource.Resource == resource.Resource {
+ ret = append(ret, currKind)
+ }
+ }
+ }
+
+ if len(ret) == 0 {
+ return nil, &NoResourceMatchError{PartialResource: input}
+ }
+
+ sort.Sort(kindByPreferredGroupVersion{ret, m.defaultGroupVersions})
+ return ret, nil
+}
+
+func (m *DefaultRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) {
+ kinds, err := m.KindsFor(resource)
+ if err != nil {
+ return schema.GroupVersionKind{}, err
+ }
+ if len(kinds) == 1 {
+ return kinds[0], nil
+ }
+
+ return schema.GroupVersionKind{}, &AmbiguousResourceError{PartialResource: resource, MatchingKinds: kinds}
+}
+
+type kindByPreferredGroupVersion struct {
+ list []schema.GroupVersionKind
+ sortOrder []schema.GroupVersion
+}
+
+func (o kindByPreferredGroupVersion) Len() int { return len(o.list) }
+func (o kindByPreferredGroupVersion) Swap(i, j int) { o.list[i], o.list[j] = o.list[j], o.list[i] }
+func (o kindByPreferredGroupVersion) Less(i, j int) bool {
+ lhs := o.list[i]
+ rhs := o.list[j]
+ if lhs == rhs {
+ return false
+ }
+
+ if lhs.GroupVersion() == rhs.GroupVersion() {
+ return lhs.Kind < rhs.Kind
+ }
+
+ // otherwise, the difference is in the GroupVersion, so we need to sort with respect to the preferred order
+ lhsIndex := -1
+ rhsIndex := -1
+
+ for i := range o.sortOrder {
+ if o.sortOrder[i] == lhs.GroupVersion() {
+ lhsIndex = i
+ }
+ if o.sortOrder[i] == rhs.GroupVersion() {
+ rhsIndex = i
+ }
+ }
+
+ if rhsIndex == -1 {
+ return true
+ }
+
+ return lhsIndex < rhsIndex
+}
+
+type resourceByPreferredGroupVersion struct {
+ list []schema.GroupVersionResource
+ sortOrder []schema.GroupVersion
+}
+
+func (o resourceByPreferredGroupVersion) Len() int { return len(o.list) }
+func (o resourceByPreferredGroupVersion) Swap(i, j int) { o.list[i], o.list[j] = o.list[j], o.list[i] }
+func (o resourceByPreferredGroupVersion) Less(i, j int) bool {
+ lhs := o.list[i]
+ rhs := o.list[j]
+ if lhs == rhs {
+ return false
+ }
+
+ if lhs.GroupVersion() == rhs.GroupVersion() {
+ return lhs.Resource < rhs.Resource
+ }
+
+ // otherwise, the difference is in the GroupVersion, so we need to sort with respect to the preferred order
+ lhsIndex := -1
+ rhsIndex := -1
+
+ for i := range o.sortOrder {
+ if o.sortOrder[i] == lhs.GroupVersion() {
+ lhsIndex = i
+ }
+ if o.sortOrder[i] == rhs.GroupVersion() {
+ rhsIndex = i
+ }
+ }
+
+ if rhsIndex == -1 {
+ return true
+ }
+
+ return lhsIndex < rhsIndex
+}
+
+// RESTMapping returns a struct representing the resource path and conversion interfaces a
+// RESTClient should use to operate on the provided group/kind in order of versions. If a version search
+// order is not provided, the search order provided to DefaultRESTMapper will be used to resolve which
+// version should be used to access the named group/kind.
+func (m *DefaultRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error) {
+ mappings, err := m.RESTMappings(gk, versions...)
+ if err != nil {
+ return nil, err
+ }
+ if len(mappings) == 0 {
+ return nil, &NoKindMatchError{GroupKind: gk, SearchedVersions: versions}
+ }
+ // since we rely on RESTMappings method
+ // take the first match and return to the caller
+ // as this was the existing behavior.
+ return mappings[0], nil
+}
+
+// RESTMappings returns the RESTMappings for the provided group kind. If a version search order
+// is not provided, the search order provided to DefaultRESTMapper will be used.
+func (m *DefaultRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error) {
+ mappings := make([]*RESTMapping, 0)
+ potentialGVK := make([]schema.GroupVersionKind, 0)
+ hadVersion := false
+
+ // Pick an appropriate version
+ for _, version := range versions {
+ if len(version) == 0 || version == runtime.APIVersionInternal {
+ continue
+ }
+ currGVK := gk.WithVersion(version)
+ hadVersion = true
+ if _, ok := m.kindToPluralResource[currGVK]; ok {
+ potentialGVK = append(potentialGVK, currGVK)
+ break
+ }
+ }
+ // Use the default preferred versions
+ if !hadVersion && len(potentialGVK) == 0 {
+ for _, gv := range m.defaultGroupVersions {
+ if gv.Group != gk.Group {
+ continue
+ }
+ potentialGVK = append(potentialGVK, gk.WithVersion(gv.Version))
+ }
+ }
+
+ if len(potentialGVK) == 0 {
+ return nil, &NoKindMatchError{GroupKind: gk, SearchedVersions: versions}
+ }
+
+ for _, gvk := range potentialGVK {
+ //Ensure we have a REST mapping
+ res, ok := m.kindToPluralResource[gvk]
+ if !ok {
+ continue
+ }
+
+ // Ensure we have a REST scope
+ scope, ok := m.kindToScope[gvk]
+ if !ok {
+ return nil, fmt.Errorf("the provided version %q and kind %q cannot be mapped to a supported scope", gvk.GroupVersion(), gvk.Kind)
+ }
+
+ mappings = append(mappings, &RESTMapping{
+ Resource: res,
+ GroupVersionKind: gvk,
+ Scope: scope,
+ })
+ }
+
+ if len(mappings) == 0 {
+ return nil, &NoResourceMatchError{PartialResource: schema.GroupVersionResource{Group: gk.Group, Resource: gk.Kind}}
+ }
+ return mappings, nil
+}
+
+// MaybeResetRESTMapper calls Reset() on the mapper if it is a ResettableRESTMapper.
+func MaybeResetRESTMapper(mapper RESTMapper) {
+ m, ok := mapper.(ResettableRESTMapper)
+ if ok {
+ m.Reset()
+ }
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/operation/operation.go b/vendor/k8s.io/apimachinery/pkg/api/operation/operation.go
new file mode 100644
index 0000000000..9f5ae7a9d4
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/operation/operation.go
@@ -0,0 +1,56 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package operation
+
+import "k8s.io/apimachinery/pkg/util/sets"
+
+// Operation provides contextual information about a validation request and the API
+// operation being validated.
+// This type is intended for use with generate validation code and may be enhanced
+// in the future to include other information needed to validate requests.
+type Operation struct {
+ // Type is the category of operation being validated. This does not
+ // differentiate between HTTP verbs like PUT and PATCH, but rather merges
+ // those into a single "Update" category.
+ Type Type
+
+ // Options declare the options enabled for validation.
+ //
+ // Options should be set according to a resource validation strategy before validation
+ // is performed, and must be treated as read-only during validation.
+ //
+ // Options are identified by string names. Option string names may match the name of a feature
+ // gate, in which case the presence of the name in the set indicates that the feature is
+ // considered enabled for the resource being validated. Note that a resource may have a
+ // feature enabled even when the feature gate is disabled. This can happen when feature is
+ // already in-use by a resource, often because the feature gate was enabled when the
+ // resource first began using the feature.
+ //
+ // Unset options are disabled/false.
+ Options sets.Set[string]
+}
+
+// Code is the request operation to be validated.
+type Type uint32
+
+const (
+ // Create indicates the request being validated is for a resource create operation.
+ Create Type = iota
+
+ // Update indicates the request being validated is for a resource update operation.
+ Update
+)
diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go
index 69f1bc336d..d0aada9dd7 100644
--- a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go
+++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go
@@ -20,11 +20,13 @@ import (
"bytes"
"errors"
"fmt"
- "math"
+ math "math"
"math/big"
"strconv"
"strings"
+ cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct"
+
inf "gopkg.in/inf.v0"
)
@@ -458,9 +460,10 @@ func (q *Quantity) CanonicalizeBytes(out []byte) (result, suffix []byte) {
}
}
-// AsApproximateFloat64 returns a float64 representation of the quantity which may
-// lose precision. If the value of the quantity is outside the range of a float64
-// +Inf/-Inf will be returned.
+// AsApproximateFloat64 returns a float64 representation of the quantity which
+// may lose precision. If precision matter more than performance, see
+// AsFloat64Slow. If the value of the quantity is outside the range of a
+// float64 +Inf/-Inf will be returned.
func (q *Quantity) AsApproximateFloat64() float64 {
var base float64
var exponent int
@@ -478,6 +481,36 @@ func (q *Quantity) AsApproximateFloat64() float64 {
return base * math.Pow10(exponent)
}
+// AsFloat64Slow returns a float64 representation of the quantity. This is
+// more precise than AsApproximateFloat64 but significantly slower. If the
+// value of the quantity is outside the range of a float64 +Inf/-Inf will be
+// returned.
+func (q *Quantity) AsFloat64Slow() float64 {
+ infDec := q.AsDec()
+
+ var absScale int64
+ if infDec.Scale() < 0 {
+ absScale = int64(-infDec.Scale())
+ } else {
+ absScale = int64(infDec.Scale())
+ }
+ pow10AbsScale := big.NewInt(10)
+ pow10AbsScale = pow10AbsScale.Exp(pow10AbsScale, big.NewInt(absScale), nil)
+
+ var resultBigFloat *big.Float
+ if infDec.Scale() < 0 {
+ resultBigInt := new(big.Int).Mul(infDec.UnscaledBig(), pow10AbsScale)
+ resultBigFloat = new(big.Float).SetInt(resultBigInt)
+ } else {
+ pow10AbsScaleFloat := new(big.Float).SetInt(pow10AbsScale)
+ resultBigFloat = new(big.Float).SetInt(infDec.UnscaledBig())
+ resultBigFloat = resultBigFloat.Quo(resultBigFloat, pow10AbsScaleFloat)
+ }
+
+ result, _ := resultBigFloat.Float64()
+ return result
+}
+
// AsInt64 returns a representation of the current value as an int64 if a fast conversion
// is possible. If false is returned, callers must use the inf.Dec form of this quantity.
func (q *Quantity) AsInt64() (int64, bool) {
@@ -683,6 +716,12 @@ func (q Quantity) MarshalJSON() ([]byte, error) {
return result, nil
}
+func (q Quantity) MarshalCBOR() ([]byte, error) {
+ // The call to String() should never return the string "" because the receiver's
+ // address will never be nil.
+ return cbor.Marshal(q.String())
+}
+
// ToUnstructured implements the value.UnstructuredConverter interface.
func (q Quantity) ToUnstructured() interface{} {
return q.String()
@@ -711,6 +750,27 @@ func (q *Quantity) UnmarshalJSON(value []byte) error {
return nil
}
+func (q *Quantity) UnmarshalCBOR(value []byte) error {
+ var s *string
+ if err := cbor.Unmarshal(value, &s); err != nil {
+ return err
+ }
+
+ if s == nil {
+ q.d.Dec = nil
+ q.i = int64Amount{}
+ return nil
+ }
+
+ parsed, err := ParseQuantity(strings.TrimSpace(*s))
+ if err != nil {
+ return err
+ }
+
+ *q = parsed
+ return nil
+}
+
// NewDecimalQuantity returns a new Quantity representing the given
// value in the given format.
func NewDecimalQuantity(b inf.Dec, format Format) *Quantity {
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS
index e7e5c152d0..ec414a84b9 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS
@@ -11,6 +11,7 @@ reviewers:
- luxas
- janetkuo
- justinsb
- - ncdc
- soltysh
- dims
+emeritus_reviewers:
+ - ncdc
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go
index 15b45ffa84..5005beb12d 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go
@@ -18,6 +18,7 @@ package v1
import (
"k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/utils/ptr"
)
// IsControlledBy checks if the object has a controllerRef set to the given owner
@@ -36,10 +37,14 @@ func GetControllerOf(controllee Object) *OwnerReference {
return nil
}
cp := *ref
+ cp.Controller = ptr.To(*ref.Controller)
+ if ref.BlockOwnerDeletion != nil {
+ cp.BlockOwnerDeletion = ptr.To(*ref.BlockOwnerDeletion)
+ }
return &cp
}
-// GetControllerOf returns a pointer to the controllerRef if controllee has a controller
+// GetControllerOfNoCopy returns a pointer to the controllerRef if controllee has a controller
func GetControllerOfNoCopy(controllee Object) *OwnerReference {
refs := controllee.GetOwnerReferences()
for i := range refs {
@@ -52,14 +57,12 @@ func GetControllerOfNoCopy(controllee Object) *OwnerReference {
// NewControllerRef creates an OwnerReference pointing to the given owner.
func NewControllerRef(owner Object, gvk schema.GroupVersionKind) *OwnerReference {
- blockOwnerDeletion := true
- isController := true
return &OwnerReference{
APIVersion: gvk.GroupVersion().String(),
Kind: gvk.Kind,
Name: owner.GetName(),
UID: owner.GetUID(),
- BlockOwnerDeletion: &blockOwnerDeletion,
- Controller: &isController,
+ BlockOwnerDeletion: ptr.To(true),
+ Controller: ptr.To(true),
}
}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go
index 7736753d66..617b9a5d30 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go
@@ -21,4 +21,4 @@ limitations under the License.
// +groupName=meta.k8s.io
-package v1 // import "k8s.io/apimachinery/pkg/apis/meta/v1"
+package v1
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go
index 75b88890f6..9ee6c05918 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go
@@ -329,10 +329,38 @@ func (m *Duration) XXX_DiscardUnknown() {
var xxx_messageInfo_Duration proto.InternalMessageInfo
+func (m *FieldSelectorRequirement) Reset() { *m = FieldSelectorRequirement{} }
+func (*FieldSelectorRequirement) ProtoMessage() {}
+func (*FieldSelectorRequirement) Descriptor() ([]byte, []int) {
+ return fileDescriptor_a8431b6e0aeeb761, []int{10}
+}
+func (m *FieldSelectorRequirement) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *FieldSelectorRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *FieldSelectorRequirement) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_FieldSelectorRequirement.Merge(m, src)
+}
+func (m *FieldSelectorRequirement) XXX_Size() int {
+ return m.Size()
+}
+func (m *FieldSelectorRequirement) XXX_DiscardUnknown() {
+ xxx_messageInfo_FieldSelectorRequirement.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FieldSelectorRequirement proto.InternalMessageInfo
+
func (m *FieldsV1) Reset() { *m = FieldsV1{} }
func (*FieldsV1) ProtoMessage() {}
func (*FieldsV1) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{10}
+ return fileDescriptor_a8431b6e0aeeb761, []int{11}
}
func (m *FieldsV1) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -360,7 +388,7 @@ var xxx_messageInfo_FieldsV1 proto.InternalMessageInfo
func (m *GetOptions) Reset() { *m = GetOptions{} }
func (*GetOptions) ProtoMessage() {}
func (*GetOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{11}
+ return fileDescriptor_a8431b6e0aeeb761, []int{12}
}
func (m *GetOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -388,7 +416,7 @@ var xxx_messageInfo_GetOptions proto.InternalMessageInfo
func (m *GroupKind) Reset() { *m = GroupKind{} }
func (*GroupKind) ProtoMessage() {}
func (*GroupKind) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{12}
+ return fileDescriptor_a8431b6e0aeeb761, []int{13}
}
func (m *GroupKind) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -416,7 +444,7 @@ var xxx_messageInfo_GroupKind proto.InternalMessageInfo
func (m *GroupResource) Reset() { *m = GroupResource{} }
func (*GroupResource) ProtoMessage() {}
func (*GroupResource) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{13}
+ return fileDescriptor_a8431b6e0aeeb761, []int{14}
}
func (m *GroupResource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -444,7 +472,7 @@ var xxx_messageInfo_GroupResource proto.InternalMessageInfo
func (m *GroupVersion) Reset() { *m = GroupVersion{} }
func (*GroupVersion) ProtoMessage() {}
func (*GroupVersion) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{14}
+ return fileDescriptor_a8431b6e0aeeb761, []int{15}
}
func (m *GroupVersion) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -472,7 +500,7 @@ var xxx_messageInfo_GroupVersion proto.InternalMessageInfo
func (m *GroupVersionForDiscovery) Reset() { *m = GroupVersionForDiscovery{} }
func (*GroupVersionForDiscovery) ProtoMessage() {}
func (*GroupVersionForDiscovery) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{15}
+ return fileDescriptor_a8431b6e0aeeb761, []int{16}
}
func (m *GroupVersionForDiscovery) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -500,7 +528,7 @@ var xxx_messageInfo_GroupVersionForDiscovery proto.InternalMessageInfo
func (m *GroupVersionKind) Reset() { *m = GroupVersionKind{} }
func (*GroupVersionKind) ProtoMessage() {}
func (*GroupVersionKind) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{16}
+ return fileDescriptor_a8431b6e0aeeb761, []int{17}
}
func (m *GroupVersionKind) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -528,7 +556,7 @@ var xxx_messageInfo_GroupVersionKind proto.InternalMessageInfo
func (m *GroupVersionResource) Reset() { *m = GroupVersionResource{} }
func (*GroupVersionResource) ProtoMessage() {}
func (*GroupVersionResource) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{17}
+ return fileDescriptor_a8431b6e0aeeb761, []int{18}
}
func (m *GroupVersionResource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -556,7 +584,7 @@ var xxx_messageInfo_GroupVersionResource proto.InternalMessageInfo
func (m *LabelSelector) Reset() { *m = LabelSelector{} }
func (*LabelSelector) ProtoMessage() {}
func (*LabelSelector) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{18}
+ return fileDescriptor_a8431b6e0aeeb761, []int{19}
}
func (m *LabelSelector) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -584,7 +612,7 @@ var xxx_messageInfo_LabelSelector proto.InternalMessageInfo
func (m *LabelSelectorRequirement) Reset() { *m = LabelSelectorRequirement{} }
func (*LabelSelectorRequirement) ProtoMessage() {}
func (*LabelSelectorRequirement) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{19}
+ return fileDescriptor_a8431b6e0aeeb761, []int{20}
}
func (m *LabelSelectorRequirement) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -612,7 +640,7 @@ var xxx_messageInfo_LabelSelectorRequirement proto.InternalMessageInfo
func (m *List) Reset() { *m = List{} }
func (*List) ProtoMessage() {}
func (*List) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{20}
+ return fileDescriptor_a8431b6e0aeeb761, []int{21}
}
func (m *List) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -640,7 +668,7 @@ var xxx_messageInfo_List proto.InternalMessageInfo
func (m *ListMeta) Reset() { *m = ListMeta{} }
func (*ListMeta) ProtoMessage() {}
func (*ListMeta) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{21}
+ return fileDescriptor_a8431b6e0aeeb761, []int{22}
}
func (m *ListMeta) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -668,7 +696,7 @@ var xxx_messageInfo_ListMeta proto.InternalMessageInfo
func (m *ListOptions) Reset() { *m = ListOptions{} }
func (*ListOptions) ProtoMessage() {}
func (*ListOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{22}
+ return fileDescriptor_a8431b6e0aeeb761, []int{23}
}
func (m *ListOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -696,7 +724,7 @@ var xxx_messageInfo_ListOptions proto.InternalMessageInfo
func (m *ManagedFieldsEntry) Reset() { *m = ManagedFieldsEntry{} }
func (*ManagedFieldsEntry) ProtoMessage() {}
func (*ManagedFieldsEntry) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{23}
+ return fileDescriptor_a8431b6e0aeeb761, []int{24}
}
func (m *ManagedFieldsEntry) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -724,7 +752,7 @@ var xxx_messageInfo_ManagedFieldsEntry proto.InternalMessageInfo
func (m *MicroTime) Reset() { *m = MicroTime{} }
func (*MicroTime) ProtoMessage() {}
func (*MicroTime) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{24}
+ return fileDescriptor_a8431b6e0aeeb761, []int{25}
}
func (m *MicroTime) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_MicroTime.Unmarshal(m, b)
@@ -747,7 +775,7 @@ var xxx_messageInfo_MicroTime proto.InternalMessageInfo
func (m *ObjectMeta) Reset() { *m = ObjectMeta{} }
func (*ObjectMeta) ProtoMessage() {}
func (*ObjectMeta) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{25}
+ return fileDescriptor_a8431b6e0aeeb761, []int{26}
}
func (m *ObjectMeta) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -775,7 +803,7 @@ var xxx_messageInfo_ObjectMeta proto.InternalMessageInfo
func (m *OwnerReference) Reset() { *m = OwnerReference{} }
func (*OwnerReference) ProtoMessage() {}
func (*OwnerReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{26}
+ return fileDescriptor_a8431b6e0aeeb761, []int{27}
}
func (m *OwnerReference) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -803,7 +831,7 @@ var xxx_messageInfo_OwnerReference proto.InternalMessageInfo
func (m *PartialObjectMetadata) Reset() { *m = PartialObjectMetadata{} }
func (*PartialObjectMetadata) ProtoMessage() {}
func (*PartialObjectMetadata) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{27}
+ return fileDescriptor_a8431b6e0aeeb761, []int{28}
}
func (m *PartialObjectMetadata) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -831,7 +859,7 @@ var xxx_messageInfo_PartialObjectMetadata proto.InternalMessageInfo
func (m *PartialObjectMetadataList) Reset() { *m = PartialObjectMetadataList{} }
func (*PartialObjectMetadataList) ProtoMessage() {}
func (*PartialObjectMetadataList) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{28}
+ return fileDescriptor_a8431b6e0aeeb761, []int{29}
}
func (m *PartialObjectMetadataList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -859,7 +887,7 @@ var xxx_messageInfo_PartialObjectMetadataList proto.InternalMessageInfo
func (m *Patch) Reset() { *m = Patch{} }
func (*Patch) ProtoMessage() {}
func (*Patch) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{29}
+ return fileDescriptor_a8431b6e0aeeb761, []int{30}
}
func (m *Patch) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -887,7 +915,7 @@ var xxx_messageInfo_Patch proto.InternalMessageInfo
func (m *PatchOptions) Reset() { *m = PatchOptions{} }
func (*PatchOptions) ProtoMessage() {}
func (*PatchOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{30}
+ return fileDescriptor_a8431b6e0aeeb761, []int{31}
}
func (m *PatchOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -915,7 +943,7 @@ var xxx_messageInfo_PatchOptions proto.InternalMessageInfo
func (m *Preconditions) Reset() { *m = Preconditions{} }
func (*Preconditions) ProtoMessage() {}
func (*Preconditions) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{31}
+ return fileDescriptor_a8431b6e0aeeb761, []int{32}
}
func (m *Preconditions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -943,7 +971,7 @@ var xxx_messageInfo_Preconditions proto.InternalMessageInfo
func (m *RootPaths) Reset() { *m = RootPaths{} }
func (*RootPaths) ProtoMessage() {}
func (*RootPaths) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{32}
+ return fileDescriptor_a8431b6e0aeeb761, []int{33}
}
func (m *RootPaths) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -971,7 +999,7 @@ var xxx_messageInfo_RootPaths proto.InternalMessageInfo
func (m *ServerAddressByClientCIDR) Reset() { *m = ServerAddressByClientCIDR{} }
func (*ServerAddressByClientCIDR) ProtoMessage() {}
func (*ServerAddressByClientCIDR) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{33}
+ return fileDescriptor_a8431b6e0aeeb761, []int{34}
}
func (m *ServerAddressByClientCIDR) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -999,7 +1027,7 @@ var xxx_messageInfo_ServerAddressByClientCIDR proto.InternalMessageInfo
func (m *Status) Reset() { *m = Status{} }
func (*Status) ProtoMessage() {}
func (*Status) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{34}
+ return fileDescriptor_a8431b6e0aeeb761, []int{35}
}
func (m *Status) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1027,7 +1055,7 @@ var xxx_messageInfo_Status proto.InternalMessageInfo
func (m *StatusCause) Reset() { *m = StatusCause{} }
func (*StatusCause) ProtoMessage() {}
func (*StatusCause) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{35}
+ return fileDescriptor_a8431b6e0aeeb761, []int{36}
}
func (m *StatusCause) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1055,7 +1083,7 @@ var xxx_messageInfo_StatusCause proto.InternalMessageInfo
func (m *StatusDetails) Reset() { *m = StatusDetails{} }
func (*StatusDetails) ProtoMessage() {}
func (*StatusDetails) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{36}
+ return fileDescriptor_a8431b6e0aeeb761, []int{37}
}
func (m *StatusDetails) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1083,7 +1111,7 @@ var xxx_messageInfo_StatusDetails proto.InternalMessageInfo
func (m *TableOptions) Reset() { *m = TableOptions{} }
func (*TableOptions) ProtoMessage() {}
func (*TableOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{37}
+ return fileDescriptor_a8431b6e0aeeb761, []int{38}
}
func (m *TableOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1111,7 +1139,7 @@ var xxx_messageInfo_TableOptions proto.InternalMessageInfo
func (m *Time) Reset() { *m = Time{} }
func (*Time) ProtoMessage() {}
func (*Time) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{38}
+ return fileDescriptor_a8431b6e0aeeb761, []int{39}
}
func (m *Time) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Time.Unmarshal(m, b)
@@ -1134,7 +1162,7 @@ var xxx_messageInfo_Time proto.InternalMessageInfo
func (m *Timestamp) Reset() { *m = Timestamp{} }
func (*Timestamp) ProtoMessage() {}
func (*Timestamp) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{39}
+ return fileDescriptor_a8431b6e0aeeb761, []int{40}
}
func (m *Timestamp) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1162,7 +1190,7 @@ var xxx_messageInfo_Timestamp proto.InternalMessageInfo
func (m *TypeMeta) Reset() { *m = TypeMeta{} }
func (*TypeMeta) ProtoMessage() {}
func (*TypeMeta) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{40}
+ return fileDescriptor_a8431b6e0aeeb761, []int{41}
}
func (m *TypeMeta) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1190,7 +1218,7 @@ var xxx_messageInfo_TypeMeta proto.InternalMessageInfo
func (m *UpdateOptions) Reset() { *m = UpdateOptions{} }
func (*UpdateOptions) ProtoMessage() {}
func (*UpdateOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{41}
+ return fileDescriptor_a8431b6e0aeeb761, []int{42}
}
func (m *UpdateOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1218,7 +1246,7 @@ var xxx_messageInfo_UpdateOptions proto.InternalMessageInfo
func (m *Verbs) Reset() { *m = Verbs{} }
func (*Verbs) ProtoMessage() {}
func (*Verbs) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{42}
+ return fileDescriptor_a8431b6e0aeeb761, []int{43}
}
func (m *Verbs) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1246,7 +1274,7 @@ var xxx_messageInfo_Verbs proto.InternalMessageInfo
func (m *WatchEvent) Reset() { *m = WatchEvent{} }
func (*WatchEvent) ProtoMessage() {}
func (*WatchEvent) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{43}
+ return fileDescriptor_a8431b6e0aeeb761, []int{44}
}
func (m *WatchEvent) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1282,6 +1310,7 @@ func init() {
proto.RegisterType((*CreateOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.CreateOptions")
proto.RegisterType((*DeleteOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions")
proto.RegisterType((*Duration)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Duration")
+ proto.RegisterType((*FieldSelectorRequirement)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.FieldSelectorRequirement")
proto.RegisterType((*FieldsV1)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.FieldsV1")
proto.RegisterType((*GetOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GetOptions")
proto.RegisterType((*GroupKind)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupKind")
@@ -1326,186 +1355,190 @@ func init() {
}
var fileDescriptor_a8431b6e0aeeb761 = []byte{
- // 2853 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x1a, 0x4b, 0x6f, 0x24, 0x47,
- 0xd9, 0x3d, 0x0f, 0x7b, 0xe6, 0x9b, 0x19, 0x3f, 0x6a, 0xbd, 0x30, 0x6b, 0x84, 0xc7, 0xe9, 0x44,
- 0xd1, 0x06, 0x92, 0x71, 0x76, 0x09, 0xd1, 0x66, 0x43, 0x02, 0x1e, 0xcf, 0x7a, 0xe3, 0x64, 0x1d,
- 0x5b, 0xe5, 0xdd, 0x05, 0x42, 0x84, 0xd2, 0x9e, 0x2e, 0x8f, 0x1b, 0xf7, 0x74, 0x4f, 0xaa, 0x7a,
- 0xbc, 0x19, 0x38, 0x90, 0x03, 0x08, 0x90, 0x50, 0x14, 0x6e, 0x9c, 0x50, 0x22, 0xf8, 0x01, 0x88,
- 0x13, 0x77, 0x90, 0xc8, 0x31, 0x88, 0x4b, 0x24, 0xd0, 0x28, 0x31, 0x07, 0x8e, 0x88, 0xab, 0x85,
- 0x04, 0xaa, 0x47, 0x77, 0x57, 0xcf, 0x63, 0xdd, 0x93, 0x5d, 0x22, 0x6e, 0xd3, 0xdf, 0xbb, 0xaa,
- 0xbe, 0xfa, 0xea, 0x7b, 0x0c, 0x3c, 0x73, 0x7c, 0x8d, 0xd5, 0x1d, 0x7f, 0xdd, 0xea, 0x3a, 0x1d,
- 0xab, 0x75, 0xe4, 0x78, 0x84, 0xf6, 0xd7, 0xbb, 0xc7, 0x6d, 0x0e, 0x60, 0xeb, 0x1d, 0x12, 0x58,
- 0xeb, 0x27, 0x57, 0xd6, 0xdb, 0xc4, 0x23, 0xd4, 0x0a, 0x88, 0x5d, 0xef, 0x52, 0x3f, 0xf0, 0xd1,
- 0x63, 0x92, 0xab, 0xae, 0x73, 0xd5, 0xbb, 0xc7, 0x6d, 0x0e, 0x60, 0x75, 0xce, 0x55, 0x3f, 0xb9,
- 0xb2, 0xf2, 0x54, 0xdb, 0x09, 0x8e, 0x7a, 0x07, 0xf5, 0x96, 0xdf, 0x59, 0x6f, 0xfb, 0x6d, 0x7f,
- 0x5d, 0x30, 0x1f, 0xf4, 0x0e, 0xc5, 0x97, 0xf8, 0x10, 0xbf, 0xa4, 0xd0, 0x95, 0xf5, 0x49, 0xa6,
- 0xd0, 0x9e, 0x17, 0x38, 0x1d, 0x32, 0x6c, 0xc5, 0xca, 0xb3, 0xe7, 0x31, 0xb0, 0xd6, 0x11, 0xe9,
- 0x58, 0xc3, 0x7c, 0xe6, 0x9f, 0xb2, 0x50, 0xd8, 0xd8, 0xdb, 0xbe, 0x49, 0xfd, 0x5e, 0x17, 0xad,
- 0x41, 0xce, 0xb3, 0x3a, 0xa4, 0x6a, 0xac, 0x19, 0x97, 0x8b, 0x8d, 0xf2, 0x07, 0x83, 0xda, 0xcc,
- 0xe9, 0xa0, 0x96, 0x7b, 0xd5, 0xea, 0x10, 0x2c, 0x30, 0xc8, 0x85, 0xc2, 0x09, 0xa1, 0xcc, 0xf1,
- 0x3d, 0x56, 0xcd, 0xac, 0x65, 0x2f, 0x97, 0xae, 0xbe, 0x58, 0x4f, 0xb3, 0xfe, 0xba, 0x50, 0x70,
- 0x57, 0xb2, 0x6e, 0xf9, 0xb4, 0xe9, 0xb0, 0x96, 0x7f, 0x42, 0x68, 0xbf, 0xb1, 0xa8, 0xb4, 0x14,
- 0x14, 0x92, 0xe1, 0x48, 0x03, 0xfa, 0x91, 0x01, 0x8b, 0x5d, 0x4a, 0x0e, 0x09, 0xa5, 0xc4, 0x56,
- 0xf8, 0x6a, 0x76, 0xcd, 0x78, 0x08, 0x6a, 0xab, 0x4a, 0xed, 0xe2, 0xde, 0x90, 0x7c, 0x3c, 0xa2,
- 0x11, 0xfd, 0xda, 0x80, 0x15, 0x46, 0xe8, 0x09, 0xa1, 0x1b, 0xb6, 0x4d, 0x09, 0x63, 0x8d, 0xfe,
- 0xa6, 0xeb, 0x10, 0x2f, 0xd8, 0xdc, 0x6e, 0x62, 0x56, 0xcd, 0x89, 0x7d, 0xf8, 0x7a, 0x3a, 0x83,
- 0xf6, 0x27, 0xc9, 0x69, 0x98, 0xca, 0xa2, 0x95, 0x89, 0x24, 0x0c, 0xdf, 0xc7, 0x0c, 0xf3, 0x10,
- 0xca, 0xe1, 0x41, 0xde, 0x72, 0x58, 0x80, 0xee, 0xc2, 0x6c, 0x9b, 0x7f, 0xb0, 0xaa, 0x21, 0x0c,
- 0xac, 0xa7, 0x33, 0x30, 0x94, 0xd1, 0x98, 0x57, 0xf6, 0xcc, 0x8a, 0x4f, 0x86, 0x95, 0x34, 0xf3,
- 0x67, 0x39, 0x28, 0x6d, 0xec, 0x6d, 0x63, 0xc2, 0xfc, 0x1e, 0x6d, 0x91, 0x14, 0x4e, 0x73, 0x0d,
- 0xca, 0xcc, 0xf1, 0xda, 0x3d, 0xd7, 0xa2, 0x1c, 0x5a, 0x9d, 0x15, 0x94, 0xcb, 0x8a, 0xb2, 0xbc,
- 0xaf, 0xe1, 0x70, 0x82, 0x12, 0x5d, 0x05, 0xe0, 0x12, 0x58, 0xd7, 0x6a, 0x11, 0xbb, 0x9a, 0x59,
- 0x33, 0x2e, 0x17, 0x1a, 0x48, 0xf1, 0xc1, 0xab, 0x11, 0x06, 0x6b, 0x54, 0xe8, 0x51, 0xc8, 0x0b,
- 0x4b, 0xab, 0x05, 0xa1, 0xa6, 0xa2, 0xc8, 0xf3, 0x62, 0x19, 0x58, 0xe2, 0xd0, 0x13, 0x30, 0xa7,
- 0xbc, 0xac, 0x5a, 0x14, 0x64, 0x0b, 0x8a, 0x6c, 0x2e, 0x74, 0x83, 0x10, 0xcf, 0xd7, 0x77, 0xec,
- 0x78, 0xb6, 0xf0, 0x3b, 0x6d, 0x7d, 0xaf, 0x38, 0x9e, 0x8d, 0x05, 0x06, 0xdd, 0x82, 0xfc, 0x09,
- 0xa1, 0x07, 0xdc, 0x13, 0xb8, 0x6b, 0x7e, 0x39, 0xdd, 0x46, 0xdf, 0xe5, 0x2c, 0x8d, 0x22, 0x37,
- 0x4d, 0xfc, 0xc4, 0x52, 0x08, 0xaa, 0x03, 0xb0, 0x23, 0x9f, 0x06, 0x62, 0x79, 0xd5, 0xfc, 0x5a,
- 0xf6, 0x72, 0xb1, 0x31, 0xcf, 0xd7, 0xbb, 0x1f, 0x41, 0xb1, 0x46, 0xc1, 0xe9, 0x5b, 0x56, 0x40,
- 0xda, 0x3e, 0x75, 0x08, 0xab, 0xce, 0xc5, 0xf4, 0x9b, 0x11, 0x14, 0x6b, 0x14, 0xe8, 0x65, 0x40,
- 0x2c, 0xf0, 0xa9, 0xd5, 0x26, 0x6a, 0xa9, 0x2f, 0x59, 0xec, 0xa8, 0x0a, 0x62, 0x75, 0x2b, 0x6a,
- 0x75, 0x68, 0x7f, 0x84, 0x02, 0x8f, 0xe1, 0x32, 0x7f, 0x67, 0xc0, 0x82, 0xe6, 0x0b, 0xc2, 0xef,
- 0xae, 0x41, 0xb9, 0xad, 0xdd, 0x3a, 0xe5, 0x17, 0xd1, 0x69, 0xeb, 0x37, 0x12, 0x27, 0x28, 0x11,
- 0x81, 0x22, 0x55, 0x92, 0xc2, 0xe8, 0x72, 0x25, 0xb5, 0xd3, 0x86, 0x36, 0xc4, 0x9a, 0x34, 0x20,
- 0xc3, 0xb1, 0x64, 0xf3, 0x1f, 0x86, 0x70, 0xe0, 0x30, 0xde, 0xa0, 0xcb, 0x5a, 0x4c, 0x33, 0xc4,
- 0xf6, 0x95, 0x27, 0xc4, 0xa3, 0x73, 0x02, 0x41, 0xe6, 0xff, 0x22, 0x10, 0x5c, 0x2f, 0xfc, 0xf2,
- 0xbd, 0xda, 0xcc, 0xdb, 0x7f, 0x5b, 0x9b, 0x31, 0x7f, 0x61, 0x40, 0x79, 0xa3, 0xdb, 0x75, 0xfb,
- 0xbb, 0xdd, 0x40, 0x2c, 0xc0, 0x84, 0x59, 0x9b, 0xf6, 0x71, 0xcf, 0x53, 0x0b, 0x05, 0x7e, 0xbf,
- 0x9b, 0x02, 0x82, 0x15, 0x86, 0xdf, 0x9f, 0x43, 0x9f, 0xb6, 0x88, 0xba, 0x6e, 0xd1, 0xfd, 0xd9,
- 0xe2, 0x40, 0x2c, 0x71, 0xfc, 0x90, 0x0f, 0x1d, 0xe2, 0xda, 0x3b, 0x96, 0x67, 0xb5, 0x09, 0x55,
- 0x97, 0x23, 0xda, 0xfa, 0x2d, 0x0d, 0x87, 0x13, 0x94, 0xe6, 0x7f, 0x32, 0x50, 0xdc, 0xf4, 0x3d,
- 0xdb, 0x09, 0xd4, 0xe5, 0x0a, 0xfa, 0xdd, 0x91, 0xe0, 0x71, 0xbb, 0xdf, 0x25, 0x58, 0x60, 0xd0,
- 0x73, 0x30, 0xcb, 0x02, 0x2b, 0xe8, 0x31, 0x61, 0x4f, 0xb1, 0xf1, 0x48, 0x18, 0x96, 0xf6, 0x05,
- 0xf4, 0x6c, 0x50, 0x5b, 0x88, 0xc4, 0x49, 0x10, 0x56, 0x0c, 0xdc, 0xd3, 0xfd, 0x03, 0xb1, 0x51,
- 0xf6, 0x4d, 0xf9, 0xec, 0x85, 0xef, 0x47, 0x36, 0xf6, 0xf4, 0xdd, 0x11, 0x0a, 0x3c, 0x86, 0x0b,
- 0x9d, 0x00, 0x72, 0x2d, 0x16, 0xdc, 0xa6, 0x96, 0xc7, 0x84, 0xae, 0xdb, 0x4e, 0x87, 0xa8, 0x0b,
- 0xff, 0xa5, 0x74, 0x27, 0xce, 0x39, 0x62, 0xbd, 0xb7, 0x46, 0xa4, 0xe1, 0x31, 0x1a, 0xd0, 0xe3,
- 0x30, 0x4b, 0x89, 0xc5, 0x7c, 0xaf, 0x9a, 0x17, 0xcb, 0x8f, 0xa2, 0x32, 0x16, 0x50, 0xac, 0xb0,
- 0x3c, 0xa0, 0x75, 0x08, 0x63, 0x56, 0x3b, 0x0c, 0xaf, 0x51, 0x40, 0xdb, 0x91, 0x60, 0x1c, 0xe2,
- 0xcd, 0xdf, 0x1a, 0x50, 0xd9, 0xa4, 0xc4, 0x0a, 0xc8, 0x34, 0x6e, 0xf1, 0xa9, 0x4f, 0x1c, 0x6d,
- 0xc0, 0x82, 0xf8, 0xbe, 0x6b, 0xb9, 0x8e, 0x2d, 0xcf, 0x20, 0x27, 0x98, 0x3f, 0xaf, 0x98, 0x17,
- 0xb6, 0x92, 0x68, 0x3c, 0x4c, 0x6f, 0xfe, 0x24, 0x0b, 0x95, 0x26, 0x71, 0x49, 0x6c, 0xf2, 0x16,
- 0xa0, 0x36, 0xb5, 0x5a, 0x64, 0x8f, 0x50, 0xc7, 0xb7, 0xf7, 0x49, 0xcb, 0xf7, 0x6c, 0x26, 0xdc,
- 0x28, 0xdb, 0xf8, 0x1c, 0xdf, 0xdf, 0x9b, 0x23, 0x58, 0x3c, 0x86, 0x03, 0xb9, 0x50, 0xe9, 0x52,
- 0xf1, 0x5b, 0xec, 0xb9, 0xf4, 0xb2, 0xd2, 0xd5, 0xaf, 0xa4, 0x3b, 0xd2, 0x3d, 0x9d, 0xb5, 0xb1,
- 0x74, 0x3a, 0xa8, 0x55, 0x12, 0x20, 0x9c, 0x14, 0x8e, 0xbe, 0x01, 0x8b, 0x3e, 0xed, 0x1e, 0x59,
- 0x5e, 0x93, 0x74, 0x89, 0x67, 0x13, 0x2f, 0x60, 0x62, 0x23, 0x0b, 0x8d, 0x65, 0x9e, 0x8b, 0xec,
- 0x0e, 0xe1, 0xf0, 0x08, 0x35, 0x7a, 0x0d, 0x96, 0xba, 0xd4, 0xef, 0x5a, 0x6d, 0xb1, 0x31, 0x7b,
- 0xbe, 0xeb, 0xb4, 0xfa, 0x6a, 0x3b, 0x9f, 0x3c, 0x1d, 0xd4, 0x96, 0xf6, 0x86, 0x91, 0x67, 0x83,
- 0xda, 0x05, 0xb1, 0x75, 0x1c, 0x12, 0x23, 0xf1, 0xa8, 0x18, 0xcd, 0x0d, 0xf2, 0x93, 0xdc, 0xc0,
- 0xdc, 0x86, 0x42, 0xb3, 0xa7, 0xee, 0xc4, 0x0b, 0x50, 0xb0, 0xd5, 0x6f, 0xb5, 0xf3, 0xe1, 0xe5,
- 0x8c, 0x68, 0xce, 0x06, 0xb5, 0x0a, 0x4f, 0x3f, 0xeb, 0x21, 0x00, 0x47, 0x2c, 0xe6, 0xe3, 0x50,
- 0x10, 0x07, 0xcf, 0xee, 0x5e, 0x41, 0x8b, 0x90, 0xc5, 0xd6, 0x3d, 0x21, 0xa5, 0x8c, 0xf9, 0x4f,
- 0x2d, 0x8a, 0xed, 0x02, 0xdc, 0x24, 0x41, 0x78, 0xf0, 0x1b, 0xb0, 0x10, 0x86, 0xf2, 0xe4, 0x0b,
- 0x13, 0x79, 0x13, 0x4e, 0xa2, 0xf1, 0x30, 0xbd, 0xf9, 0x3a, 0x14, 0xc5, 0x2b, 0xc4, 0x9f, 0xf0,
- 0x38, 0x5d, 0x30, 0xee, 0x93, 0x2e, 0x84, 0x39, 0x40, 0x66, 0x52, 0x0e, 0xa0, 0x99, 0xeb, 0x42,
- 0x45, 0xf2, 0x86, 0x09, 0x52, 0x2a, 0x0d, 0x4f, 0x42, 0x21, 0x34, 0x53, 0x69, 0x89, 0x12, 0xe3,
- 0x50, 0x10, 0x8e, 0x28, 0x34, 0x6d, 0x47, 0x90, 0x78, 0x51, 0xd3, 0x29, 0xd3, 0xb2, 0x9f, 0xcc,
- 0xfd, 0xb3, 0x1f, 0x4d, 0xd3, 0x0f, 0xa1, 0x3a, 0x29, 0x9b, 0x7e, 0x80, 0x37, 0x3f, 0xbd, 0x29,
- 0xe6, 0x3b, 0x06, 0x2c, 0xea, 0x92, 0xd2, 0x1f, 0x5f, 0x7a, 0x25, 0xe7, 0x67, 0x7b, 0xda, 0x8e,
- 0xfc, 0xca, 0x80, 0xe5, 0xc4, 0xd2, 0xa6, 0x3a, 0xf1, 0x29, 0x8c, 0xd2, 0x9d, 0x23, 0x3b, 0x85,
- 0x73, 0xfc, 0x25, 0x03, 0x95, 0x5b, 0xd6, 0x01, 0x71, 0xf7, 0x89, 0x4b, 0x5a, 0x81, 0x4f, 0xd1,
- 0x0f, 0xa0, 0xd4, 0xb1, 0x82, 0xd6, 0x91, 0x80, 0x86, 0x95, 0x41, 0x33, 0x5d, 0xb0, 0x4b, 0x48,
- 0xaa, 0xef, 0xc4, 0x62, 0x6e, 0x78, 0x01, 0xed, 0x37, 0x2e, 0x28, 0x93, 0x4a, 0x1a, 0x06, 0xeb,
- 0xda, 0x44, 0x39, 0x27, 0xbe, 0x6f, 0xbc, 0xd5, 0xe5, 0x69, 0xcb, 0xf4, 0x55, 0x64, 0xc2, 0x04,
- 0x4c, 0xde, 0xec, 0x39, 0x94, 0x74, 0x88, 0x17, 0xc4, 0xe5, 0xdc, 0xce, 0x90, 0x7c, 0x3c, 0xa2,
- 0x71, 0xe5, 0x45, 0x58, 0x1c, 0x36, 0x9e, 0xc7, 0x9f, 0x63, 0xd2, 0x97, 0xe7, 0x85, 0xf9, 0x4f,
- 0xb4, 0x0c, 0xf9, 0x13, 0xcb, 0xed, 0xa9, 0xdb, 0x88, 0xe5, 0xc7, 0xf5, 0xcc, 0x35, 0xc3, 0xfc,
- 0x8d, 0x01, 0xd5, 0x49, 0x86, 0xa0, 0x2f, 0x6a, 0x82, 0x1a, 0x25, 0x65, 0x55, 0xf6, 0x15, 0xd2,
- 0x97, 0x52, 0x6f, 0x40, 0xc1, 0xef, 0xf2, 0x9c, 0xc2, 0xa7, 0xea, 0xd4, 0x9f, 0x08, 0x4f, 0x72,
- 0x57, 0xc1, 0xcf, 0x06, 0xb5, 0x8b, 0x09, 0xf1, 0x21, 0x02, 0x47, 0xac, 0x3c, 0x52, 0x0b, 0x7b,
- 0xf8, 0xeb, 0x11, 0x45, 0xea, 0xbb, 0x02, 0x82, 0x15, 0xc6, 0xfc, 0xbd, 0x01, 0x39, 0x91, 0x90,
- 0xbf, 0x0e, 0x05, 0xbe, 0x7f, 0xb6, 0x15, 0x58, 0xc2, 0xae, 0xd4, 0xa5, 0x20, 0xe7, 0xde, 0x21,
- 0x81, 0x15, 0x7b, 0x5b, 0x08, 0xc1, 0x91, 0x44, 0x84, 0x21, 0xef, 0x04, 0xa4, 0x13, 0x1e, 0xe4,
- 0x53, 0x13, 0x45, 0xab, 0x46, 0x44, 0x1d, 0x5b, 0xf7, 0x6e, 0xbc, 0x15, 0x10, 0x8f, 0x1f, 0x46,
- 0x7c, 0x35, 0xb6, 0xb9, 0x0c, 0x2c, 0x45, 0x99, 0xff, 0x32, 0x20, 0x52, 0xc5, 0x9d, 0x9f, 0x11,
- 0xf7, 0xf0, 0x96, 0xe3, 0x1d, 0xab, 0x6d, 0x8d, 0xcc, 0xd9, 0x57, 0x70, 0x1c, 0x51, 0x8c, 0x7b,
- 0x1e, 0x32, 0xd3, 0x3d, 0x0f, 0x5c, 0x61, 0xcb, 0xf7, 0x02, 0xc7, 0xeb, 0x8d, 0xdc, 0xb6, 0x4d,
- 0x05, 0xc7, 0x11, 0x05, 0x4f, 0x44, 0x28, 0xe9, 0x58, 0x8e, 0xe7, 0x78, 0x6d, 0xbe, 0x88, 0x4d,
- 0xbf, 0xe7, 0x05, 0xe2, 0x45, 0x56, 0x89, 0x08, 0x1e, 0xc1, 0xe2, 0x31, 0x1c, 0xe6, 0xbf, 0x73,
- 0x50, 0xe2, 0x6b, 0x0e, 0xdf, 0xb9, 0xe7, 0xa1, 0xe2, 0xea, 0x5e, 0xa0, 0xd6, 0x7e, 0x51, 0x99,
- 0x92, 0xbc, 0xd7, 0x38, 0x49, 0xcb, 0x99, 0x45, 0x0a, 0x15, 0x31, 0x67, 0x92, 0xcc, 0x5b, 0x3a,
- 0x12, 0x27, 0x69, 0x79, 0xf4, 0xba, 0xc7, 0xef, 0x87, 0xca, 0x4c, 0xa2, 0x23, 0xfa, 0x26, 0x07,
- 0x62, 0x89, 0x43, 0x3b, 0x70, 0xc1, 0x72, 0x5d, 0xff, 0x9e, 0x00, 0x36, 0x7c, 0xff, 0xb8, 0x63,
- 0xd1, 0x63, 0x26, 0x8a, 0xe9, 0x42, 0xe3, 0x0b, 0x8a, 0xe5, 0xc2, 0xc6, 0x28, 0x09, 0x1e, 0xc7,
- 0x37, 0xee, 0xd8, 0x72, 0x53, 0x1e, 0xdb, 0x11, 0x2c, 0x0f, 0x81, 0xc4, 0x2d, 0x57, 0x95, 0xed,
- 0x33, 0x4a, 0xce, 0x32, 0x1e, 0x43, 0x73, 0x36, 0x01, 0x8e, 0xc7, 0x4a, 0x44, 0xd7, 0x61, 0x9e,
- 0x7b, 0xb2, 0xdf, 0x0b, 0xc2, 0xbc, 0x33, 0x2f, 0x8e, 0x1b, 0x9d, 0x0e, 0x6a, 0xf3, 0xb7, 0x13,
- 0x18, 0x3c, 0x44, 0xc9, 0x37, 0xd7, 0x75, 0x3a, 0x4e, 0x50, 0x9d, 0x13, 0x2c, 0xd1, 0xe6, 0xde,
- 0xe2, 0x40, 0x2c, 0x71, 0x09, 0x0f, 0x2c, 0x9c, 0xeb, 0x81, 0x9b, 0xb0, 0xc4, 0x88, 0x67, 0x6f,
- 0x7b, 0x4e, 0xe0, 0x58, 0xee, 0x8d, 0x13, 0x91, 0x55, 0x96, 0xc4, 0x41, 0x5c, 0xe4, 0x29, 0xe1,
- 0xfe, 0x30, 0x12, 0x8f, 0xd2, 0x9b, 0x7f, 0xce, 0x02, 0x92, 0x09, 0xbb, 0x2d, 0x93, 0x32, 0x19,
- 0x17, 0x79, 0x59, 0xa1, 0x12, 0x7e, 0x63, 0xa8, 0xac, 0x50, 0xb9, 0x7e, 0x88, 0x47, 0x3b, 0x50,
- 0x94, 0xf1, 0x29, 0xbe, 0x73, 0xeb, 0x8a, 0xb8, 0xb8, 0x1b, 0x22, 0xce, 0x06, 0xb5, 0x95, 0x84,
- 0x9a, 0x08, 0x23, 0x4a, 0xbe, 0x58, 0x02, 0xba, 0x0a, 0x60, 0x75, 0x1d, 0xbd, 0xe9, 0x57, 0x8c,
- 0x5b, 0x3f, 0x71, 0xf9, 0x8e, 0x35, 0x2a, 0xf4, 0x12, 0xe4, 0x82, 0x4f, 0x57, 0x96, 0x15, 0x44,
- 0xd5, 0xc9, 0x8b, 0x30, 0x21, 0x81, 0x6b, 0x17, 0x97, 0x82, 0x71, 0xb3, 0x54, 0x45, 0x15, 0x69,
- 0xdf, 0x8a, 0x30, 0x58, 0xa3, 0x42, 0xdf, 0x82, 0xc2, 0xa1, 0xca, 0x67, 0xc5, 0xe9, 0xa6, 0x8e,
- 0xb3, 0x61, 0x16, 0x2c, 0xfb, 0x0e, 0xe1, 0x17, 0x8e, 0xa4, 0xa1, 0xaf, 0x42, 0x89, 0xf5, 0x0e,
- 0xa2, 0x14, 0x40, 0xba, 0x44, 0xf4, 0xde, 0xee, 0xc7, 0x28, 0xac, 0xd3, 0x99, 0x6f, 0x42, 0x71,
- 0xc7, 0x69, 0x51, 0x5f, 0x14, 0x92, 0x4f, 0xc0, 0x1c, 0x4b, 0x54, 0x49, 0xd1, 0x49, 0x86, 0xae,
- 0x1a, 0xe2, 0xb9, 0x8f, 0x7a, 0x96, 0xe7, 0xcb, 0x5a, 0x28, 0x1f, 0xfb, 0xe8, 0xab, 0x1c, 0x88,
- 0x25, 0xee, 0xfa, 0x32, 0xcf, 0x32, 0x7e, 0xfa, 0x7e, 0x6d, 0xe6, 0xdd, 0xf7, 0x6b, 0x33, 0xef,
- 0xbd, 0xaf, 0x32, 0x8e, 0x3f, 0x00, 0xc0, 0xee, 0xc1, 0xf7, 0x48, 0x4b, 0xc6, 0xee, 0x54, 0xbd,
- 0xc1, 0xb0, 0x25, 0x2d, 0x7a, 0x83, 0x99, 0xa1, 0xcc, 0x51, 0xc3, 0xe1, 0x04, 0x25, 0x5a, 0x87,
- 0x62, 0xd4, 0xf5, 0x53, 0xfe, 0xb1, 0x14, 0xfa, 0x5b, 0xd4, 0x1a, 0xc4, 0x31, 0x4d, 0xe2, 0x21,
- 0xc9, 0x9d, 0xfb, 0x90, 0x34, 0x20, 0xdb, 0x73, 0x6c, 0x55, 0x75, 0x3f, 0x1d, 0x3e, 0xe4, 0x77,
- 0xb6, 0x9b, 0x67, 0x83, 0xda, 0x23, 0x93, 0x9a, 0xed, 0x41, 0xbf, 0x4b, 0x58, 0xfd, 0xce, 0x76,
- 0x13, 0x73, 0xe6, 0x71, 0x51, 0x6d, 0x76, 0xca, 0xa8, 0x76, 0x15, 0xa0, 0x1d, 0xf7, 0x2e, 0x64,
- 0xd0, 0x88, 0x1c, 0x51, 0xeb, 0x59, 0x68, 0x54, 0x88, 0xc1, 0x52, 0x8b, 0xd7, 0xf7, 0xaa, 0x87,
- 0xc0, 0x02, 0xab, 0x23, 0xbb, 0xa1, 0xd3, 0xdd, 0x89, 0x4b, 0x4a, 0xcd, 0xd2, 0xe6, 0xb0, 0x30,
- 0x3c, 0x2a, 0x1f, 0xf9, 0xb0, 0x64, 0xab, 0x32, 0x33, 0x56, 0x5a, 0x9c, 0x5a, 0xa9, 0x88, 0x58,
- 0xcd, 0x61, 0x41, 0x78, 0x54, 0x36, 0xfa, 0x2e, 0xac, 0x84, 0xc0, 0xd1, 0x5a, 0x5f, 0x44, 0xfd,
- 0x6c, 0x63, 0xf5, 0x74, 0x50, 0x5b, 0x69, 0x4e, 0xa4, 0xc2, 0xf7, 0x91, 0x80, 0x6c, 0x98, 0x75,
- 0x65, 0x96, 0x5c, 0x12, 0x99, 0xcd, 0xd7, 0xd2, 0xad, 0x22, 0xf6, 0xfe, 0xba, 0x9e, 0x1d, 0x47,
- 0x7d, 0x1b, 0x95, 0x18, 0x2b, 0xd9, 0xe8, 0x2d, 0x28, 0x59, 0x9e, 0xe7, 0x07, 0x96, 0xec, 0x3e,
- 0x94, 0x85, 0xaa, 0x8d, 0xa9, 0x55, 0x6d, 0xc4, 0x32, 0x86, 0xb2, 0x71, 0x0d, 0x83, 0x75, 0x55,
- 0xe8, 0x1e, 0x2c, 0xf8, 0xf7, 0x3c, 0x42, 0x31, 0x39, 0x24, 0x94, 0x78, 0x2d, 0xc2, 0xaa, 0x15,
- 0xa1, 0xfd, 0x99, 0x94, 0xda, 0x13, 0xcc, 0xb1, 0x4b, 0x27, 0xe1, 0x0c, 0x0f, 0x6b, 0x41, 0x75,
- 0x1e, 0x5b, 0x3d, 0xcb, 0x75, 0xbe, 0x4f, 0x28, 0xab, 0xce, 0xc7, 0x0d, 0xeb, 0xad, 0x08, 0x8a,
- 0x35, 0x0a, 0xd4, 0x83, 0x4a, 0x47, 0x7f, 0x32, 0xaa, 0x4b, 0xc2, 0xcc, 0x6b, 0xe9, 0xcc, 0x1c,
- 0x7d, 0xd4, 0xe2, 0x34, 0x28, 0x81, 0xc3, 0x49, 0x2d, 0x2b, 0xcf, 0x41, 0xe9, 0x53, 0x56, 0x08,
- 0xbc, 0xc2, 0x18, 0x3e, 0x90, 0xa9, 0x2a, 0x8c, 0x3f, 0x66, 0x60, 0x3e, 0xb9, 0x8d, 0x43, 0xcf,
- 0x61, 0x3e, 0xd5, 0x73, 0x18, 0xd6, 0xb2, 0xc6, 0xc4, 0xc9, 0x45, 0x18, 0x9f, 0xb3, 0x13, 0xe3,
- 0xb3, 0x0a, 0x83, 0xb9, 0x07, 0x09, 0x83, 0x75, 0x00, 0x9e, 0xac, 0x50, 0xdf, 0x75, 0x09, 0x15,
- 0x11, 0xb0, 0xa0, 0x26, 0x14, 0x11, 0x14, 0x6b, 0x14, 0x3c, 0xa5, 0x3e, 0x70, 0xfd, 0xd6, 0xb1,
- 0xd8, 0x82, 0xf0, 0xf6, 0x8a, 0xd8, 0x57, 0x90, 0x29, 0x75, 0x63, 0x04, 0x8b, 0xc7, 0x70, 0x98,
- 0x7d, 0xb8, 0xb8, 0x67, 0x51, 0x9e, 0xe4, 0xc4, 0x37, 0x45, 0xd4, 0x2c, 0x6f, 0x8c, 0x54, 0x44,
- 0x4f, 0x4f, 0x7b, 0xe3, 0xe2, 0xcd, 0x8f, 0x61, 0x71, 0x55, 0x64, 0xfe, 0xd5, 0x80, 0x4b, 0x63,
- 0x75, 0x7f, 0x06, 0x15, 0xd9, 0x1b, 0xc9, 0x8a, 0xec, 0xf9, 0x94, 0xad, 0xcc, 0x71, 0xd6, 0x4e,
- 0xa8, 0xcf, 0xe6, 0x20, 0xbf, 0xc7, 0x33, 0x61, 0xf3, 0x43, 0x03, 0xca, 0xe2, 0xd7, 0x34, 0x9d,
- 0xe4, 0x5a, 0x72, 0xc0, 0x50, 0x7c, 0x78, 0xc3, 0x85, 0x87, 0xd1, 0x6a, 0x7e, 0xc7, 0x80, 0x64,
- 0x0f, 0x17, 0xbd, 0x28, 0xaf, 0x80, 0x11, 0x35, 0x59, 0xa7, 0x74, 0xff, 0x17, 0x26, 0x95, 0xa4,
- 0x17, 0x52, 0x75, 0x2b, 0x9f, 0x84, 0x22, 0xf6, 0xfd, 0x60, 0xcf, 0x0a, 0x8e, 0x18, 0xdf, 0xbb,
- 0x2e, 0xff, 0xa1, 0xb6, 0x57, 0xec, 0x9d, 0xc0, 0x60, 0x09, 0x37, 0x7f, 0x6e, 0xc0, 0xa5, 0x89,
- 0x73, 0x23, 0x1e, 0x45, 0x5a, 0xd1, 0x97, 0x5a, 0x51, 0xe4, 0xc8, 0x31, 0x1d, 0xd6, 0xa8, 0x78,
- 0x2d, 0x99, 0x18, 0x36, 0x0d, 0xd7, 0x92, 0x09, 0x6d, 0x38, 0x49, 0x6b, 0xfe, 0x33, 0x03, 0x6a,
- 0x50, 0xf3, 0x3f, 0x76, 0xfa, 0xc7, 0x87, 0xc6, 0x44, 0xf3, 0xc9, 0x31, 0x51, 0x34, 0x13, 0xd2,
- 0xe6, 0x24, 0xd9, 0xfb, 0xcf, 0x49, 0xd0, 0xb3, 0xd1, 0xe8, 0x45, 0xfa, 0xd0, 0x6a, 0x72, 0xf4,
- 0x72, 0x36, 0xa8, 0x95, 0x95, 0xf0, 0xe4, 0x28, 0xe6, 0x35, 0x98, 0xb3, 0x49, 0x60, 0x39, 0xae,
- 0xac, 0x0b, 0x53, 0x0f, 0x13, 0xa4, 0xb0, 0xa6, 0x64, 0x6d, 0x94, 0xb8, 0x4d, 0xea, 0x03, 0x87,
- 0x02, 0x79, 0xc0, 0x6e, 0xf9, 0xb6, 0xac, 0x48, 0xf2, 0x71, 0xc0, 0xde, 0xf4, 0x6d, 0x82, 0x05,
- 0xc6, 0x7c, 0xd7, 0x80, 0x92, 0x94, 0xb4, 0x69, 0xf5, 0x18, 0x41, 0x57, 0xa2, 0x55, 0xc8, 0xe3,
- 0xbe, 0xa4, 0xcf, 0xd8, 0xce, 0x06, 0xb5, 0xa2, 0x20, 0x13, 0xc5, 0xcc, 0x98, 0x59, 0x52, 0xe6,
- 0x9c, 0x3d, 0x7a, 0x14, 0xf2, 0xe2, 0x02, 0xa9, 0xcd, 0x8c, 0x87, 0x85, 0x1c, 0x88, 0x25, 0xce,
- 0xfc, 0x38, 0x03, 0x95, 0xc4, 0xe2, 0x52, 0xd4, 0x05, 0x51, 0x0b, 0x35, 0x93, 0xa2, 0x2d, 0x3f,
- 0x79, 0x34, 0xaf, 0x9e, 0xaf, 0xd9, 0x07, 0x79, 0xbe, 0xbe, 0x0d, 0xb3, 0x2d, 0xbe, 0x47, 0xe1,
- 0x3f, 0x3d, 0xae, 0x4c, 0x73, 0x9c, 0x62, 0x77, 0x63, 0x6f, 0x14, 0x9f, 0x0c, 0x2b, 0x81, 0xe8,
- 0x26, 0x2c, 0x51, 0x12, 0xd0, 0xfe, 0xc6, 0x61, 0x40, 0xa8, 0xde, 0x4c, 0xc8, 0xc7, 0xd9, 0x37,
- 0x1e, 0x26, 0xc0, 0xa3, 0x3c, 0xe6, 0x01, 0x94, 0x6f, 0x5b, 0x07, 0x6e, 0x34, 0x1e, 0xc3, 0x50,
- 0x71, 0xbc, 0x96, 0xdb, 0xb3, 0x89, 0x0c, 0xe8, 0x61, 0xf4, 0x0a, 0x2f, 0xed, 0xb6, 0x8e, 0x3c,
- 0x1b, 0xd4, 0x2e, 0x24, 0x00, 0x72, 0x1e, 0x84, 0x93, 0x22, 0x4c, 0x17, 0x72, 0x9f, 0x61, 0x25,
- 0xf9, 0x1d, 0x28, 0xc6, 0xb9, 0xfe, 0x43, 0x56, 0x69, 0xbe, 0x01, 0x05, 0xee, 0xf1, 0x61, 0x8d,
- 0x7a, 0x4e, 0x96, 0x94, 0xcc, 0xbd, 0x32, 0x69, 0x72, 0x2f, 0x31, 0x64, 0xbd, 0xd3, 0xb5, 0x1f,
- 0x70, 0xc8, 0x9a, 0x79, 0x90, 0x97, 0x2f, 0x3b, 0xe5, 0xcb, 0x77, 0x15, 0xe4, 0x1f, 0x51, 0xf8,
- 0x23, 0x23, 0x13, 0x08, 0xed, 0x91, 0xd1, 0xdf, 0x7f, 0x6d, 0xc2, 0xf0, 0x63, 0x03, 0x40, 0xb4,
- 0xf2, 0x44, 0x1b, 0x29, 0xc5, 0x38, 0xff, 0x0e, 0xcc, 0xfa, 0xd2, 0x23, 0xe5, 0xa0, 0x75, 0xca,
- 0x7e, 0x71, 0x74, 0x91, 0xa4, 0x4f, 0x62, 0x25, 0xac, 0xf1, 0xf2, 0x07, 0x9f, 0xac, 0xce, 0x7c,
- 0xf8, 0xc9, 0xea, 0xcc, 0x47, 0x9f, 0xac, 0xce, 0xbc, 0x7d, 0xba, 0x6a, 0x7c, 0x70, 0xba, 0x6a,
- 0x7c, 0x78, 0xba, 0x6a, 0x7c, 0x74, 0xba, 0x6a, 0x7c, 0x7c, 0xba, 0x6a, 0xbc, 0xfb, 0xf7, 0xd5,
- 0x99, 0xd7, 0x1e, 0x4b, 0xf3, 0x07, 0xbf, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0xcb, 0x82, 0xff,
- 0xd4, 0x07, 0x28, 0x00, 0x00,
+ // 2928 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x3a, 0x4d, 0x6c, 0x24, 0x47,
+ 0xd5, 0xee, 0xf9, 0xb1, 0x67, 0xde, 0x78, 0xfc, 0x53, 0xeb, 0xfd, 0xbe, 0x59, 0x23, 0x3c, 0x4e,
+ 0x27, 0x8a, 0x36, 0x90, 0x8c, 0x77, 0x97, 0x25, 0xda, 0x6c, 0x48, 0xc0, 0xe3, 0x59, 0x6f, 0x9c,
+ 0xac, 0x63, 0xab, 0xbc, 0xbb, 0x81, 0x10, 0xa1, 0x94, 0xa7, 0xcb, 0xe3, 0xc6, 0x3d, 0xdd, 0x93,
+ 0xaa, 0x1e, 0x6f, 0x06, 0x0e, 0xe4, 0x00, 0x12, 0x48, 0x28, 0x0a, 0x37, 0x4e, 0x28, 0x11, 0x9c,
+ 0x38, 0x21, 0x4e, 0xdc, 0x41, 0x22, 0xc7, 0x20, 0x2e, 0x91, 0x40, 0xa3, 0xac, 0x39, 0x70, 0x44,
+ 0x5c, 0x2d, 0x24, 0x50, 0xfd, 0xf4, 0xdf, 0xfc, 0xac, 0x7b, 0x76, 0x97, 0x88, 0xdb, 0xf4, 0xfb,
+ 0xaf, 0xaa, 0xf7, 0x5e, 0xbd, 0xf7, 0x6a, 0xe0, 0xea, 0xd1, 0x35, 0x5e, 0xb3, 0xbd, 0x35, 0xd2,
+ 0xb1, 0xdb, 0xa4, 0x79, 0x68, 0xbb, 0x94, 0xf5, 0xd6, 0x3a, 0x47, 0x2d, 0x01, 0xe0, 0x6b, 0x6d,
+ 0xea, 0x93, 0xb5, 0xe3, 0xcb, 0x6b, 0x2d, 0xea, 0x52, 0x46, 0x7c, 0x6a, 0xd5, 0x3a, 0xcc, 0xf3,
+ 0x3d, 0xf4, 0x94, 0xe2, 0xaa, 0xc5, 0xb9, 0x6a, 0x9d, 0xa3, 0x96, 0x00, 0xf0, 0x9a, 0xe0, 0xaa,
+ 0x1d, 0x5f, 0x5e, 0x7e, 0xae, 0x65, 0xfb, 0x87, 0xdd, 0xfd, 0x5a, 0xd3, 0x6b, 0xaf, 0xb5, 0xbc,
+ 0x96, 0xb7, 0x26, 0x99, 0xf7, 0xbb, 0x07, 0xf2, 0x4b, 0x7e, 0xc8, 0x5f, 0x4a, 0xe8, 0xf2, 0xda,
+ 0x38, 0x53, 0x58, 0xd7, 0xf5, 0xed, 0x36, 0x1d, 0xb4, 0x62, 0xf9, 0xf9, 0xb3, 0x18, 0x78, 0xf3,
+ 0x90, 0xb6, 0xc9, 0x20, 0x9f, 0xf9, 0xc7, 0x2c, 0x14, 0xd6, 0x77, 0xb7, 0x6e, 0x32, 0xaf, 0xdb,
+ 0x41, 0xab, 0x90, 0x73, 0x49, 0x9b, 0x56, 0x8c, 0x55, 0xe3, 0x62, 0xb1, 0x3e, 0xfb, 0x71, 0xbf,
+ 0x3a, 0x75, 0xd2, 0xaf, 0xe6, 0x5e, 0x27, 0x6d, 0x8a, 0x25, 0x06, 0x39, 0x50, 0x38, 0xa6, 0x8c,
+ 0xdb, 0x9e, 0xcb, 0x2b, 0x99, 0xd5, 0xec, 0xc5, 0xd2, 0x95, 0x97, 0x6b, 0x69, 0xd6, 0x5f, 0x93,
+ 0x0a, 0xee, 0x2a, 0xd6, 0x4d, 0x8f, 0x35, 0x6c, 0xde, 0xf4, 0x8e, 0x29, 0xeb, 0xd5, 0x17, 0xb4,
+ 0x96, 0x82, 0x46, 0x72, 0x1c, 0x6a, 0x40, 0x3f, 0x34, 0x60, 0xa1, 0xc3, 0xe8, 0x01, 0x65, 0x8c,
+ 0x5a, 0x1a, 0x5f, 0xc9, 0xae, 0x1a, 0x8f, 0x41, 0x6d, 0x45, 0xab, 0x5d, 0xd8, 0x1d, 0x90, 0x8f,
+ 0x87, 0x34, 0xa2, 0x5f, 0x1a, 0xb0, 0xcc, 0x29, 0x3b, 0xa6, 0x6c, 0xdd, 0xb2, 0x18, 0xe5, 0xbc,
+ 0xde, 0xdb, 0x70, 0x6c, 0xea, 0xfa, 0x1b, 0x5b, 0x0d, 0xcc, 0x2b, 0x39, 0xb9, 0x0f, 0x5f, 0x4f,
+ 0x67, 0xd0, 0xde, 0x38, 0x39, 0x75, 0x53, 0x5b, 0xb4, 0x3c, 0x96, 0x84, 0xe3, 0x07, 0x98, 0x61,
+ 0x1e, 0xc0, 0x6c, 0x70, 0x90, 0xb7, 0x6c, 0xee, 0xa3, 0xbb, 0x30, 0xdd, 0x12, 0x1f, 0xbc, 0x62,
+ 0x48, 0x03, 0x6b, 0xe9, 0x0c, 0x0c, 0x64, 0xd4, 0xe7, 0xb4, 0x3d, 0xd3, 0xf2, 0x93, 0x63, 0x2d,
+ 0xcd, 0xfc, 0x49, 0x0e, 0x4a, 0xeb, 0xbb, 0x5b, 0x98, 0x72, 0xaf, 0xcb, 0x9a, 0x34, 0x85, 0xd3,
+ 0x5c, 0x83, 0x59, 0x6e, 0xbb, 0xad, 0xae, 0x43, 0x98, 0x80, 0x56, 0xa6, 0x25, 0xe5, 0x92, 0xa6,
+ 0x9c, 0xdd, 0x8b, 0xe1, 0x70, 0x82, 0x12, 0x5d, 0x01, 0x10, 0x12, 0x78, 0x87, 0x34, 0xa9, 0x55,
+ 0xc9, 0xac, 0x1a, 0x17, 0x0b, 0x75, 0xa4, 0xf9, 0xe0, 0xf5, 0x10, 0x83, 0x63, 0x54, 0xe8, 0x49,
+ 0xc8, 0x4b, 0x4b, 0x2b, 0x05, 0xa9, 0xa6, 0xac, 0xc9, 0xf3, 0x72, 0x19, 0x58, 0xe1, 0xd0, 0x33,
+ 0x30, 0xa3, 0xbd, 0xac, 0x52, 0x94, 0x64, 0xf3, 0x9a, 0x6c, 0x26, 0x70, 0x83, 0x00, 0x2f, 0xd6,
+ 0x77, 0x64, 0xbb, 0x96, 0xf4, 0xbb, 0xd8, 0xfa, 0x5e, 0xb3, 0x5d, 0x0b, 0x4b, 0x0c, 0xba, 0x05,
+ 0xf9, 0x63, 0xca, 0xf6, 0x85, 0x27, 0x08, 0xd7, 0xfc, 0x72, 0xba, 0x8d, 0xbe, 0x2b, 0x58, 0xea,
+ 0x45, 0x61, 0x9a, 0xfc, 0x89, 0x95, 0x10, 0x54, 0x03, 0xe0, 0x87, 0x1e, 0xf3, 0xe5, 0xf2, 0x2a,
+ 0xf9, 0xd5, 0xec, 0xc5, 0x62, 0x7d, 0x4e, 0xac, 0x77, 0x2f, 0x84, 0xe2, 0x18, 0x85, 0xa0, 0x6f,
+ 0x12, 0x9f, 0xb6, 0x3c, 0x66, 0x53, 0x5e, 0x99, 0x89, 0xe8, 0x37, 0x42, 0x28, 0x8e, 0x51, 0xa0,
+ 0x57, 0x01, 0x71, 0xdf, 0x63, 0xa4, 0x45, 0xf5, 0x52, 0x5f, 0x21, 0xfc, 0xb0, 0x02, 0x72, 0x75,
+ 0xcb, 0x7a, 0x75, 0x68, 0x6f, 0x88, 0x02, 0x8f, 0xe0, 0x32, 0x7f, 0x6b, 0xc0, 0x7c, 0xcc, 0x17,
+ 0xa4, 0xdf, 0x5d, 0x83, 0xd9, 0x56, 0x2c, 0xea, 0xb4, 0x5f, 0x84, 0xa7, 0x1d, 0x8f, 0x48, 0x9c,
+ 0xa0, 0x44, 0x14, 0x8a, 0x4c, 0x4b, 0x0a, 0xb2, 0xcb, 0xe5, 0xd4, 0x4e, 0x1b, 0xd8, 0x10, 0x69,
+ 0x8a, 0x01, 0x39, 0x8e, 0x24, 0x9b, 0x7f, 0x37, 0xa4, 0x03, 0x07, 0xf9, 0x06, 0x5d, 0x8c, 0xe5,
+ 0x34, 0x43, 0x6e, 0xdf, 0xec, 0x98, 0x7c, 0x74, 0x46, 0x22, 0xc8, 0xfc, 0x4f, 0x24, 0x82, 0xeb,
+ 0x85, 0x9f, 0x7f, 0x58, 0x9d, 0x7a, 0xef, 0xaf, 0xab, 0x53, 0xe6, 0xcf, 0x0c, 0x98, 0x5d, 0xef,
+ 0x74, 0x9c, 0xde, 0x4e, 0xc7, 0x97, 0x0b, 0x30, 0x61, 0xda, 0x62, 0x3d, 0xdc, 0x75, 0xf5, 0x42,
+ 0x41, 0xc4, 0x77, 0x43, 0x42, 0xb0, 0xc6, 0x88, 0xf8, 0x39, 0xf0, 0x58, 0x93, 0xea, 0x70, 0x0b,
+ 0xe3, 0x67, 0x53, 0x00, 0xb1, 0xc2, 0x89, 0x43, 0x3e, 0xb0, 0xa9, 0x63, 0x6d, 0x13, 0x97, 0xb4,
+ 0x28, 0xd3, 0xc1, 0x11, 0x6e, 0xfd, 0x66, 0x0c, 0x87, 0x13, 0x94, 0xe6, 0xbf, 0x33, 0x50, 0xdc,
+ 0xf0, 0x5c, 0xcb, 0xf6, 0x75, 0x70, 0xf9, 0xbd, 0xce, 0x50, 0xf2, 0xb8, 0xdd, 0xeb, 0x50, 0x2c,
+ 0x31, 0xe8, 0x05, 0x98, 0xe6, 0x3e, 0xf1, 0xbb, 0x5c, 0xda, 0x53, 0xac, 0x3f, 0x11, 0xa4, 0xa5,
+ 0x3d, 0x09, 0x3d, 0xed, 0x57, 0xe7, 0x43, 0x71, 0x0a, 0x84, 0x35, 0x83, 0xf0, 0x74, 0x6f, 0x5f,
+ 0x6e, 0x94, 0x75, 0x53, 0x5d, 0x7b, 0xc1, 0xfd, 0x91, 0x8d, 0x3c, 0x7d, 0x67, 0x88, 0x02, 0x8f,
+ 0xe0, 0x42, 0xc7, 0x80, 0x1c, 0xc2, 0xfd, 0xdb, 0x8c, 0xb8, 0x5c, 0xea, 0xba, 0x6d, 0xb7, 0xa9,
+ 0x0e, 0xf8, 0x2f, 0xa5, 0x3b, 0x71, 0xc1, 0x11, 0xe9, 0xbd, 0x35, 0x24, 0x0d, 0x8f, 0xd0, 0x80,
+ 0x9e, 0x86, 0x69, 0x46, 0x09, 0xf7, 0xdc, 0x4a, 0x5e, 0x2e, 0x3f, 0xcc, 0xca, 0x58, 0x42, 0xb1,
+ 0xc6, 0x8a, 0x84, 0xd6, 0xa6, 0x9c, 0x93, 0x56, 0x90, 0x5e, 0xc3, 0x84, 0xb6, 0xad, 0xc0, 0x38,
+ 0xc0, 0x9b, 0xbf, 0x31, 0xa0, 0xbc, 0xc1, 0x28, 0xf1, 0xe9, 0x24, 0x6e, 0xf1, 0xd0, 0x27, 0x8e,
+ 0xd6, 0x61, 0x5e, 0x7e, 0xdf, 0x25, 0x8e, 0x6d, 0xa9, 0x33, 0xc8, 0x49, 0xe6, 0xff, 0xd7, 0xcc,
+ 0xf3, 0x9b, 0x49, 0x34, 0x1e, 0xa4, 0x37, 0x7f, 0x9d, 0x83, 0x72, 0x83, 0x3a, 0x34, 0x32, 0x79,
+ 0x13, 0x50, 0x8b, 0x91, 0x26, 0xdd, 0xa5, 0xcc, 0xf6, 0xac, 0x3d, 0xda, 0xf4, 0x5c, 0x8b, 0x4b,
+ 0x37, 0xca, 0xd6, 0xff, 0x4f, 0xec, 0xef, 0xcd, 0x21, 0x2c, 0x1e, 0xc1, 0x81, 0x1c, 0x28, 0x77,
+ 0x98, 0xfc, 0x2d, 0xf7, 0x5c, 0x79, 0x59, 0xe9, 0xca, 0x57, 0xd2, 0x1d, 0xe9, 0x6e, 0x9c, 0xb5,
+ 0xbe, 0x78, 0xd2, 0xaf, 0x96, 0x13, 0x20, 0x9c, 0x14, 0x8e, 0xbe, 0x01, 0x0b, 0x1e, 0xeb, 0x1c,
+ 0x12, 0xb7, 0x41, 0x3b, 0xd4, 0xb5, 0xa8, 0xeb, 0x73, 0xb9, 0x91, 0x85, 0xfa, 0x92, 0xa8, 0x45,
+ 0x76, 0x06, 0x70, 0x78, 0x88, 0x1a, 0xbd, 0x09, 0x8b, 0x1d, 0xe6, 0x75, 0x48, 0x4b, 0x6e, 0xcc,
+ 0xae, 0xe7, 0xd8, 0xcd, 0x9e, 0xde, 0xce, 0x67, 0x4f, 0xfa, 0xd5, 0xc5, 0xdd, 0x41, 0xe4, 0x69,
+ 0xbf, 0x7a, 0x4e, 0x6e, 0x9d, 0x80, 0x44, 0x48, 0x3c, 0x2c, 0x26, 0xe6, 0x06, 0xf9, 0xb1, 0x6e,
+ 0xf0, 0xa1, 0x01, 0x97, 0xec, 0x96, 0xeb, 0x31, 0x2a, 0xae, 0x08, 0x8a, 0x29, 0xb1, 0x6e, 0x30,
+ 0xe6, 0xb1, 0x37, 0x6c, 0xff, 0x70, 0xc3, 0xe9, 0x72, 0x9f, 0xb2, 0x3a, 0xa3, 0xe4, 0xc8, 0x76,
+ 0x5b, 0xbb, 0x9e, 0x4f, 0x5d, 0xdf, 0x26, 0x8e, 0xf4, 0xc8, 0x42, 0xfd, 0xea, 0x49, 0xbf, 0x7a,
+ 0x69, 0x6b, 0x42, 0x5e, 0x3c, 0xb1, 0x36, 0x73, 0x0b, 0x0a, 0x8d, 0xae, 0x0e, 0xdb, 0x97, 0xa0,
+ 0x60, 0xe9, 0xdf, 0xda, 0x39, 0x82, 0xfc, 0x11, 0xd2, 0x9c, 0xf6, 0xab, 0x65, 0x51, 0x21, 0xd7,
+ 0x02, 0x00, 0x0e, 0x59, 0xcc, 0x5f, 0x19, 0x50, 0x91, 0xce, 0xb9, 0x47, 0x1d, 0xda, 0xf4, 0x3d,
+ 0x86, 0xe9, 0x3b, 0x5d, 0x9b, 0xd1, 0x36, 0x75, 0x7d, 0xf4, 0x45, 0xc8, 0x1e, 0xd1, 0x9e, 0x4e,
+ 0x5d, 0x25, 0x2d, 0x36, 0xfb, 0x1a, 0xed, 0x61, 0x01, 0x47, 0x37, 0xa0, 0xe0, 0x75, 0x44, 0xfa,
+ 0xf0, 0x98, 0x4e, 0x5d, 0xcf, 0x04, 0xaa, 0x77, 0x34, 0xfc, 0xb4, 0x5f, 0x3d, 0x9f, 0x10, 0x1f,
+ 0x20, 0x70, 0xc8, 0x2a, 0x0e, 0xe5, 0x98, 0x38, 0x5d, 0x2a, 0x1c, 0x25, 0x3c, 0x94, 0xbb, 0x12,
+ 0x82, 0x35, 0xc6, 0x7c, 0x1a, 0x0a, 0x52, 0x0c, 0xbf, 0x7b, 0x19, 0x2d, 0x40, 0x16, 0x93, 0x7b,
+ 0xd2, 0xaa, 0x59, 0x2c, 0x7e, 0xc6, 0xee, 0x83, 0x1d, 0x80, 0x9b, 0xd4, 0x0f, 0x42, 0x68, 0x1d,
+ 0xe6, 0x83, 0x4b, 0x31, 0x79, 0x57, 0x87, 0x71, 0x89, 0x93, 0x68, 0x3c, 0x48, 0x6f, 0xbe, 0x05,
+ 0x45, 0x79, 0x9f, 0x8b, 0x62, 0x28, 0x2a, 0xbc, 0x8c, 0x07, 0x14, 0x5e, 0x41, 0x35, 0x95, 0x19,
+ 0x57, 0x4d, 0xc5, 0xcc, 0x75, 0xa0, 0xac, 0x78, 0x83, 0x52, 0x33, 0x95, 0x86, 0x67, 0xa1, 0x10,
+ 0x98, 0xa9, 0xb5, 0x84, 0x2d, 0x46, 0x20, 0x08, 0x87, 0x14, 0x31, 0x6d, 0x87, 0x90, 0xa8, 0x4d,
+ 0xd2, 0x29, 0x8b, 0xd5, 0x91, 0x99, 0x07, 0xd7, 0x91, 0x31, 0x4d, 0x3f, 0x80, 0xca, 0xb8, 0xbe,
+ 0xe4, 0x11, 0xaa, 0xa7, 0xf4, 0xa6, 0x98, 0xef, 0x1b, 0xb0, 0x10, 0x97, 0x94, 0xfe, 0xf8, 0xd2,
+ 0x2b, 0x39, 0xbb, 0x6e, 0x8e, 0xed, 0xc8, 0x2f, 0x0c, 0x58, 0x4a, 0x2c, 0x6d, 0xa2, 0x13, 0x9f,
+ 0xc0, 0xa8, 0xb8, 0x73, 0x64, 0x27, 0x70, 0x8e, 0x3f, 0x67, 0xa0, 0x7c, 0x8b, 0xec, 0x53, 0x27,
+ 0x88, 0x54, 0xf4, 0x7d, 0x28, 0xb5, 0x89, 0xdf, 0x3c, 0x94, 0xd0, 0xa0, 0xc7, 0x6a, 0xa4, 0xbb,
+ 0x36, 0x12, 0x92, 0x6a, 0xdb, 0x91, 0x98, 0x1b, 0xae, 0xcf, 0x7a, 0xf5, 0x73, 0xda, 0xa4, 0x52,
+ 0x0c, 0x83, 0xe3, 0xda, 0x64, 0x63, 0x2c, 0xbf, 0x6f, 0xbc, 0xdb, 0x11, 0x05, 0xe0, 0xe4, 0xfd,
+ 0x78, 0xc2, 0x84, 0x58, 0x56, 0x8b, 0x1a, 0xe3, 0xed, 0x01, 0xf9, 0x78, 0x48, 0xe3, 0xf2, 0xcb,
+ 0xb0, 0x30, 0x68, 0xbc, 0xc8, 0x3f, 0x61, 0x56, 0x54, 0x89, 0x70, 0x09, 0xf2, 0x32, 0x4f, 0xa9,
+ 0xc3, 0xc1, 0xea, 0xe3, 0x7a, 0xe6, 0x9a, 0x21, 0xd3, 0xeb, 0x38, 0x43, 0x1e, 0x53, 0x7a, 0x4d,
+ 0x88, 0x7f, 0xc8, 0xf4, 0xfa, 0x3b, 0x03, 0x72, 0xb2, 0xb5, 0x79, 0x0b, 0x0a, 0x62, 0xff, 0x2c,
+ 0xe2, 0x13, 0x69, 0x57, 0xea, 0xa6, 0x5a, 0x70, 0x6f, 0x53, 0x9f, 0x44, 0xde, 0x16, 0x40, 0x70,
+ 0x28, 0x11, 0x61, 0xc8, 0xdb, 0x3e, 0x6d, 0x07, 0x07, 0xf9, 0xdc, 0x58, 0xd1, 0x7a, 0xa4, 0x53,
+ 0xc3, 0xe4, 0xde, 0x8d, 0x77, 0x7d, 0xea, 0x8a, 0xc3, 0x88, 0x42, 0x63, 0x4b, 0xc8, 0xc0, 0x4a,
+ 0x94, 0xf9, 0x4f, 0x03, 0x42, 0x55, 0xc2, 0xf9, 0x39, 0x75, 0x0e, 0x6e, 0xd9, 0xee, 0x91, 0xde,
+ 0xd6, 0xd0, 0x9c, 0x3d, 0x0d, 0xc7, 0x21, 0xc5, 0xa8, 0xeb, 0x21, 0x33, 0xd9, 0xf5, 0x20, 0x14,
+ 0x36, 0x3d, 0xd7, 0xb7, 0xdd, 0xee, 0x50, 0xb4, 0x6d, 0x68, 0x38, 0x0e, 0x29, 0x44, 0x49, 0xc7,
+ 0x68, 0x9b, 0xd8, 0xae, 0xed, 0xb6, 0xc4, 0x22, 0x36, 0xbc, 0xae, 0xeb, 0xcb, 0xda, 0x46, 0x97,
+ 0x74, 0x78, 0x08, 0x8b, 0x47, 0x70, 0x98, 0xff, 0xca, 0x41, 0x49, 0xac, 0x39, 0xb8, 0xe7, 0x5e,
+ 0x84, 0xb2, 0x13, 0xf7, 0x02, 0xbd, 0xf6, 0xf3, 0xda, 0x94, 0x64, 0x5c, 0xe3, 0x24, 0xad, 0x60,
+ 0x3e, 0x88, 0xdf, 0xd0, 0x7a, 0x0f, 0x42, 0xe6, 0x64, 0x75, 0x90, 0xa4, 0x15, 0xd9, 0xeb, 0x9e,
+ 0x88, 0x0f, 0x5d, 0xe3, 0x85, 0x47, 0xf4, 0x86, 0x00, 0x62, 0x85, 0x43, 0xdb, 0x70, 0x8e, 0x38,
+ 0x8e, 0x77, 0x4f, 0x02, 0xeb, 0x9e, 0x77, 0xd4, 0x26, 0xec, 0x88, 0xcb, 0xb1, 0x44, 0xa1, 0xfe,
+ 0x05, 0xcd, 0x72, 0x6e, 0x7d, 0x98, 0x04, 0x8f, 0xe2, 0x1b, 0x75, 0x6c, 0xb9, 0x09, 0x8f, 0xed,
+ 0x10, 0x96, 0x06, 0x40, 0x32, 0xca, 0xf5, 0x8c, 0xe0, 0xaa, 0x96, 0xb3, 0x84, 0x47, 0xd0, 0x9c,
+ 0x8e, 0x81, 0xe3, 0x91, 0x12, 0xd1, 0x75, 0x98, 0x13, 0x9e, 0xec, 0x75, 0xfd, 0xa0, 0x82, 0xcf,
+ 0xcb, 0xe3, 0x46, 0x27, 0xfd, 0xea, 0xdc, 0xed, 0x04, 0x06, 0x0f, 0x50, 0x8a, 0xcd, 0x75, 0xec,
+ 0xb6, 0xed, 0x57, 0x66, 0x24, 0x4b, 0xb8, 0xb9, 0xb7, 0x04, 0x10, 0x2b, 0x5c, 0xc2, 0x03, 0x0b,
+ 0x67, 0x7a, 0xe0, 0x06, 0x2c, 0x72, 0xea, 0x5a, 0x5b, 0xae, 0x2d, 0x0a, 0xc9, 0x1b, 0xc7, 0xb2,
+ 0x3e, 0x2f, 0xc9, 0x83, 0x38, 0x2f, 0x8a, 0xeb, 0xbd, 0x41, 0x24, 0x1e, 0xa6, 0x37, 0xff, 0x94,
+ 0x05, 0xa4, 0x5a, 0x1f, 0x4b, 0x15, 0x65, 0x2a, 0x2f, 0x8a, 0x06, 0x4d, 0xb7, 0x4e, 0xc6, 0x40,
+ 0x83, 0xa6, 0xbb, 0xa6, 0x00, 0x8f, 0xb6, 0xa1, 0xa8, 0xf2, 0x53, 0x14, 0x73, 0x6b, 0x9a, 0xb8,
+ 0xb8, 0x13, 0x20, 0x4e, 0xfb, 0xd5, 0xe5, 0x84, 0x9a, 0x10, 0x23, 0x9b, 0xe7, 0x48, 0x02, 0xba,
+ 0x02, 0x40, 0x3a, 0x76, 0x7c, 0x7c, 0x5a, 0x8c, 0x86, 0x68, 0xd1, 0x20, 0x04, 0xc7, 0xa8, 0xd0,
+ 0x2b, 0x90, 0xf3, 0x1f, 0xae, 0xc1, 0x2d, 0xc8, 0xfe, 0x5d, 0xb4, 0xb3, 0x52, 0x82, 0xd0, 0x2e,
+ 0x83, 0x82, 0x0b, 0xb3, 0x74, 0x6f, 0x1a, 0x6a, 0xdf, 0x0c, 0x31, 0x38, 0x46, 0x85, 0xbe, 0x09,
+ 0x85, 0x03, 0x5d, 0xcf, 0xca, 0xd3, 0x4d, 0x9d, 0x67, 0x83, 0x2a, 0x58, 0x4d, 0x70, 0x82, 0x2f,
+ 0x1c, 0x4a, 0x43, 0x5f, 0x85, 0x12, 0xef, 0xee, 0x87, 0x25, 0x80, 0x72, 0x89, 0xf0, 0xbe, 0xdd,
+ 0x8b, 0x50, 0x38, 0x4e, 0x67, 0xbe, 0x03, 0xc5, 0x6d, 0xbb, 0xc9, 0x3c, 0xd9, 0x92, 0x3f, 0x03,
+ 0x33, 0x3c, 0xd1, 0x6f, 0x86, 0x27, 0x19, 0xb8, 0x6a, 0x80, 0x17, 0x3e, 0xea, 0x12, 0xd7, 0x53,
+ 0x5d, 0x65, 0x3e, 0xf2, 0xd1, 0xd7, 0x05, 0x10, 0x2b, 0xdc, 0xf5, 0x25, 0x51, 0x65, 0xfc, 0xf8,
+ 0xa3, 0xea, 0xd4, 0x07, 0x1f, 0x55, 0xa7, 0x3e, 0xfc, 0x48, 0x57, 0x1c, 0xbf, 0x07, 0x80, 0x9d,
+ 0xfd, 0xef, 0xd2, 0xa6, 0xca, 0xdd, 0xa9, 0xa6, 0xac, 0xc1, 0x70, 0x5f, 0x4e, 0x59, 0x33, 0x03,
+ 0x95, 0x63, 0x0c, 0x87, 0x13, 0x94, 0x68, 0x0d, 0x8a, 0xe1, 0xfc, 0x54, 0xfb, 0xc7, 0x62, 0xe0,
+ 0x6f, 0xe1, 0x90, 0x15, 0x47, 0x34, 0x89, 0x8b, 0x24, 0x77, 0xe6, 0x45, 0x52, 0x87, 0x6c, 0xd7,
+ 0xb6, 0xf4, 0xfc, 0xe2, 0x52, 0x70, 0x91, 0xdf, 0xd9, 0x6a, 0x9c, 0xf6, 0xab, 0x4f, 0x8c, 0x7b,
+ 0xb6, 0xf0, 0x7b, 0x1d, 0xca, 0x6b, 0x77, 0xb6, 0x1a, 0x58, 0x30, 0x8f, 0xca, 0x6a, 0xd3, 0x13,
+ 0x66, 0xb5, 0x2b, 0x00, 0xad, 0x68, 0x0a, 0xa4, 0x92, 0x46, 0xe8, 0x88, 0xb1, 0xe9, 0x4f, 0x8c,
+ 0x0a, 0x71, 0x58, 0x6c, 0x32, 0x4a, 0x82, 0x69, 0x0c, 0xf7, 0x49, 0x5b, 0xcd, 0x95, 0x27, 0x8b,
+ 0x89, 0x0b, 0x5a, 0xcd, 0xe2, 0xc6, 0xa0, 0x30, 0x3c, 0x2c, 0x1f, 0x79, 0xb0, 0x68, 0xe9, 0x86,
+ 0x3d, 0x52, 0x5a, 0x9c, 0x58, 0xa9, 0xcc, 0x58, 0x8d, 0x41, 0x41, 0x78, 0x58, 0x36, 0xfa, 0x0e,
+ 0x2c, 0x07, 0xc0, 0xe1, 0xa9, 0x89, 0xcc, 0xfa, 0xd9, 0xfa, 0xca, 0x49, 0xbf, 0xba, 0xdc, 0x18,
+ 0x4b, 0x85, 0x1f, 0x20, 0x01, 0x59, 0x30, 0xed, 0xa8, 0x2a, 0xb9, 0x24, 0x2b, 0x9b, 0xaf, 0xa5,
+ 0x5b, 0x45, 0xe4, 0xfd, 0xb5, 0x78, 0x75, 0x1c, 0x4e, 0xc0, 0x74, 0x61, 0xac, 0x65, 0xa3, 0x77,
+ 0xa1, 0x44, 0x5c, 0xd7, 0xf3, 0x89, 0x9a, 0xe3, 0xcc, 0x4a, 0x55, 0xeb, 0x13, 0xab, 0x5a, 0x8f,
+ 0x64, 0x0c, 0x54, 0xe3, 0x31, 0x0c, 0x8e, 0xab, 0x42, 0xf7, 0x60, 0xde, 0xbb, 0xe7, 0x52, 0x86,
+ 0xe9, 0x01, 0x65, 0xd4, 0x6d, 0x52, 0x5e, 0x29, 0x4b, 0xed, 0x57, 0x53, 0x6a, 0x4f, 0x30, 0x47,
+ 0x2e, 0x9d, 0x84, 0x73, 0x3c, 0xa8, 0x05, 0xd5, 0x44, 0x6e, 0x75, 0x89, 0x63, 0x7f, 0x8f, 0x32,
+ 0x5e, 0x99, 0x8b, 0x46, 0xff, 0x9b, 0x21, 0x14, 0xc7, 0x28, 0x50, 0x17, 0xca, 0xed, 0xf8, 0x95,
+ 0x51, 0x59, 0x94, 0x66, 0x5e, 0x4b, 0x67, 0xe6, 0xf0, 0xa5, 0x16, 0x95, 0x41, 0x09, 0x1c, 0x4e,
+ 0x6a, 0x59, 0x7e, 0x01, 0x4a, 0x0f, 0xd9, 0x21, 0x88, 0x0e, 0x63, 0xf0, 0x40, 0x26, 0xea, 0x30,
+ 0xfe, 0x90, 0x81, 0xb9, 0xe4, 0x36, 0x0e, 0x5c, 0x87, 0xf9, 0x54, 0xd7, 0x61, 0xd0, 0xcb, 0x1a,
+ 0x63, 0xdf, 0x80, 0x82, 0xfc, 0x9c, 0x1d, 0x9b, 0x9f, 0x75, 0x1a, 0xcc, 0x3d, 0x4a, 0x1a, 0xac,
+ 0x01, 0x88, 0x62, 0x85, 0x79, 0x8e, 0x43, 0x99, 0x1e, 0xab, 0xa9, 0xb7, 0x9e, 0x10, 0x8a, 0x63,
+ 0x14, 0xa2, 0xa4, 0xde, 0x77, 0xbc, 0xe6, 0x91, 0xdc, 0x82, 0x20, 0x7a, 0x65, 0xee, 0x2b, 0xa8,
+ 0x92, 0xba, 0x3e, 0x84, 0xc5, 0x23, 0x38, 0xcc, 0x1e, 0x9c, 0xdf, 0x25, 0x4c, 0x14, 0x39, 0x51,
+ 0xa4, 0xc8, 0x9e, 0xe5, 0xed, 0xa1, 0x8e, 0xe8, 0xd2, 0xa4, 0x11, 0x17, 0x6d, 0x7e, 0x04, 0x8b,
+ 0xba, 0x22, 0xf3, 0x2f, 0x06, 0x5c, 0x18, 0xa9, 0xfb, 0x73, 0xe8, 0xc8, 0xde, 0x4e, 0x76, 0x64,
+ 0x2f, 0xa6, 0x1c, 0x0a, 0x8f, 0xb2, 0x76, 0x4c, 0x7f, 0x36, 0x03, 0xf9, 0x5d, 0x51, 0x09, 0x9b,
+ 0x9f, 0x18, 0x30, 0x2b, 0x7f, 0x4d, 0x32, 0x93, 0xaf, 0x26, 0x9f, 0x6a, 0x8a, 0x8f, 0xef, 0x99,
+ 0xe6, 0x71, 0x0c, 0xed, 0xdf, 0x37, 0x20, 0x39, 0x0d, 0x47, 0x2f, 0xab, 0x10, 0x30, 0xc2, 0x71,
+ 0xf5, 0x84, 0xee, 0xff, 0xd2, 0xb8, 0x96, 0xf4, 0x5c, 0xaa, 0x69, 0xe5, 0xb3, 0x50, 0xc4, 0x9e,
+ 0xe7, 0xef, 0x12, 0xff, 0x90, 0x8b, 0xbd, 0xeb, 0x88, 0x1f, 0x7a, 0x7b, 0xe5, 0xde, 0x49, 0x0c,
+ 0x56, 0x70, 0xf3, 0xa7, 0x06, 0x5c, 0x18, 0xfb, 0x02, 0x27, 0xb2, 0x48, 0x33, 0xfc, 0xd2, 0x2b,
+ 0x0a, 0x1d, 0x39, 0xa2, 0xc3, 0x31, 0x2a, 0xd1, 0x4b, 0x26, 0x9e, 0xed, 0x06, 0x7b, 0xc9, 0x84,
+ 0x36, 0x9c, 0xa4, 0x35, 0xff, 0x91, 0x01, 0xfd, 0xe4, 0xf5, 0x5f, 0x76, 0xfa, 0xa7, 0x07, 0x1e,
+ 0xdc, 0xe6, 0x92, 0x0f, 0x6e, 0xe1, 0xeb, 0x5a, 0xec, 0xc5, 0x29, 0xfb, 0xe0, 0x17, 0x27, 0xf4,
+ 0x7c, 0xf8, 0x88, 0xa5, 0x7c, 0x68, 0x25, 0xf9, 0x88, 0x75, 0xda, 0xaf, 0xce, 0x6a, 0xe1, 0xc9,
+ 0x47, 0xad, 0x37, 0x61, 0xc6, 0xa2, 0x3e, 0xb1, 0x1d, 0xd5, 0x17, 0xa6, 0x7e, 0x96, 0x51, 0xc2,
+ 0x1a, 0x8a, 0xb5, 0x5e, 0x12, 0x36, 0xe9, 0x0f, 0x1c, 0x08, 0x14, 0x09, 0xbb, 0xe9, 0x59, 0xaa,
+ 0x23, 0xc9, 0x47, 0x09, 0x7b, 0xc3, 0xb3, 0x28, 0x96, 0x18, 0xf3, 0x03, 0x03, 0x4a, 0x4a, 0xd2,
+ 0x06, 0xe9, 0x72, 0x8a, 0x2e, 0x87, 0xab, 0x50, 0xc7, 0x7d, 0x21, 0xfe, 0x5a, 0x79, 0xda, 0xaf,
+ 0x16, 0x25, 0x99, 0x6c, 0x66, 0x46, 0xbc, 0xca, 0x65, 0xce, 0xd8, 0xa3, 0x27, 0x21, 0x2f, 0x03,
+ 0x48, 0x6f, 0x66, 0xf4, 0xec, 0x2a, 0x80, 0x58, 0xe1, 0xcc, 0xcf, 0x32, 0x50, 0x4e, 0x2c, 0x2e,
+ 0x45, 0x5f, 0x10, 0x8e, 0x50, 0x33, 0x29, 0xc6, 0xf2, 0xe3, 0xff, 0xe4, 0xa0, 0xaf, 0xaf, 0xe9,
+ 0x47, 0xb9, 0xbe, 0xbe, 0x05, 0xd3, 0x4d, 0xb1, 0x47, 0xc1, 0x7f, 0x66, 0x2e, 0x4f, 0x72, 0x9c,
+ 0x72, 0x77, 0x23, 0x6f, 0x94, 0x9f, 0x1c, 0x6b, 0x81, 0xe8, 0x26, 0x2c, 0x32, 0xea, 0xb3, 0xde,
+ 0xfa, 0x81, 0x4f, 0x59, 0x7c, 0x98, 0x90, 0x8f, 0xaa, 0x6f, 0x3c, 0x48, 0x80, 0x87, 0x79, 0xcc,
+ 0x7d, 0x98, 0xbd, 0x4d, 0xf6, 0x9d, 0xf0, 0xa1, 0x11, 0x43, 0xd9, 0x76, 0x9b, 0x4e, 0xd7, 0xa2,
+ 0x2a, 0xa1, 0x07, 0xd9, 0x2b, 0x08, 0xda, 0xad, 0x38, 0xf2, 0xb4, 0x5f, 0x3d, 0x97, 0x00, 0xa8,
+ 0x97, 0x35, 0x9c, 0x14, 0x61, 0x3a, 0x90, 0xfb, 0x1c, 0x3b, 0xc9, 0x6f, 0x43, 0x31, 0xaa, 0xf5,
+ 0x1f, 0xb3, 0x4a, 0xf3, 0x6d, 0x28, 0x08, 0x8f, 0x0f, 0x7a, 0xd4, 0x33, 0xaa, 0xa4, 0x64, 0xed,
+ 0x95, 0x49, 0x53, 0x7b, 0xc9, 0xe7, 0xea, 0x3b, 0x1d, 0xeb, 0x11, 0x9f, 0xab, 0x33, 0x8f, 0x72,
+ 0xf3, 0x65, 0x27, 0xbc, 0xf9, 0xae, 0x80, 0xfa, 0x4b, 0x8f, 0xb8, 0x64, 0x54, 0x01, 0x11, 0xbb,
+ 0x64, 0xe2, 0xf7, 0x7f, 0xec, 0x85, 0xe1, 0x47, 0x06, 0x80, 0x1c, 0xe5, 0xc9, 0x31, 0x52, 0x8a,
+ 0x3f, 0x46, 0xdc, 0x81, 0x69, 0x4f, 0x79, 0xa4, 0x7a, 0xb2, 0x9e, 0x70, 0x5e, 0x1c, 0x06, 0x92,
+ 0xf2, 0x49, 0xac, 0x85, 0xd5, 0x5f, 0xfd, 0xf8, 0xfe, 0xca, 0xd4, 0x27, 0xf7, 0x57, 0xa6, 0x3e,
+ 0xbd, 0xbf, 0x32, 0xf5, 0xde, 0xc9, 0x8a, 0xf1, 0xf1, 0xc9, 0x8a, 0xf1, 0xc9, 0xc9, 0x8a, 0xf1,
+ 0xe9, 0xc9, 0x8a, 0xf1, 0xd9, 0xc9, 0x8a, 0xf1, 0xc1, 0xdf, 0x56, 0xa6, 0xde, 0x7c, 0x2a, 0xcd,
+ 0x5f, 0x25, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0xf8, 0xda, 0x63, 0x4c, 0x51, 0x29, 0x00, 0x00,
}
func (m *APIGroup) Marshal() (dAtA []byte, err error) {
@@ -1953,6 +1986,16 @@ func (m *DeleteOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ if m.IgnoreStoreReadErrorWithClusterBreakingPotential != nil {
+ i--
+ if *m.IgnoreStoreReadErrorWithClusterBreakingPotential {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x30
+ }
if len(m.DryRun) > 0 {
for iNdEx := len(m.DryRun) - 1; iNdEx >= 0; iNdEx-- {
i -= len(m.DryRun[iNdEx])
@@ -2025,6 +2068,48 @@ func (m *Duration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
+func (m *FieldSelectorRequirement) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *FieldSelectorRequirement) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *FieldSelectorRequirement) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Values) > 0 {
+ for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Values[iNdEx])
+ copy(dAtA[i:], m.Values[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Values[iNdEx])))
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ i -= len(m.Operator)
+ copy(dAtA[i:], m.Operator)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Key)
+ copy(dAtA[i:], m.Key)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
func (m *FieldsV1) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -3701,6 +3786,9 @@ func (m *DeleteOptions) Size() (n int) {
n += 1 + l + sovGenerated(uint64(l))
}
}
+ if m.IgnoreStoreReadErrorWithClusterBreakingPotential != nil {
+ n += 2
+ }
return n
}
@@ -3714,6 +3802,25 @@ func (m *Duration) Size() (n int) {
return n
}
+func (m *FieldSelectorRequirement) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Key)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Operator)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Values) > 0 {
+ for _, s := range m.Values {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
func (m *FieldsV1) Size() (n int) {
if m == nil {
return 0
@@ -4415,6 +4522,7 @@ func (this *DeleteOptions) String() string {
`OrphanDependents:` + valueToStringGenerated(this.OrphanDependents) + `,`,
`PropagationPolicy:` + valueToStringGenerated(this.PropagationPolicy) + `,`,
`DryRun:` + fmt.Sprintf("%v", this.DryRun) + `,`,
+ `IgnoreStoreReadErrorWithClusterBreakingPotential:` + valueToStringGenerated(this.IgnoreStoreReadErrorWithClusterBreakingPotential) + `,`,
`}`,
}, "")
return s
@@ -4429,6 +4537,18 @@ func (this *Duration) String() string {
}, "")
return s
}
+func (this *FieldSelectorRequirement) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&FieldSelectorRequirement{`,
+ `Key:` + fmt.Sprintf("%v", this.Key) + `,`,
+ `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`,
+ `Values:` + fmt.Sprintf("%v", this.Values) + `,`,
+ `}`,
+ }, "")
+ return s
+}
func (this *GetOptions) String() string {
if this == nil {
return "nil"
@@ -6353,6 +6473,27 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error {
}
m.DryRun = append(m.DryRun, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IgnoreStoreReadErrorWithClusterBreakingPotential", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.IgnoreStoreReadErrorWithClusterBreakingPotential = &b
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -6443,6 +6584,152 @@ func (m *Duration) Unmarshal(dAtA []byte) error {
}
return nil
}
+func (m *FieldSelectorRequirement) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: FieldSelectorRequirement: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: FieldSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Operator = FieldSelectorOperator(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Values = append(m.Values, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *FieldsV1) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto
index 2b95700f72..865d3e7caa 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto
@@ -315,6 +315,21 @@ message DeleteOptions {
// +optional
// +listType=atomic
repeated string dryRun = 5;
+
+ // if set to true, it will trigger an unsafe deletion of the resource in
+ // case the normal deletion flow fails with a corrupt object error.
+ // A resource is considered corrupt if it can not be retrieved from
+ // the underlying storage successfully because of a) its data can
+ // not be transformed e.g. decryption failure, or b) it fails
+ // to decode into an object.
+ // NOTE: unsafe deletion ignores finalizer constraints, skips
+ // precondition checks, and removes the object from the storage.
+ // WARNING: This may potentially break the cluster if the workload
+ // associated with the resource being unsafe-deleted relies on normal
+ // deletion flow. Use only if you REALLY know what you are doing.
+ // The default value is false, and the user must opt in to enable it
+ // +optional
+ optional bool ignoreStoreReadErrorWithClusterBreakingPotential = 6;
}
// Duration is a wrapper around time.Duration which supports correct
@@ -324,6 +339,25 @@ message Duration {
optional int64 duration = 1;
}
+// FieldSelectorRequirement is a selector that contains values, a key, and an operator that
+// relates the key and values.
+message FieldSelectorRequirement {
+ // key is the field selector key that the requirement applies to.
+ optional string key = 1;
+
+ // operator represents a key's relationship to a set of values.
+ // Valid operators are In, NotIn, Exists, DoesNotExist.
+ // The list of operators may grow in the future.
+ optional string operator = 2;
+
+ // values is an array of string values.
+ // If the operator is In or NotIn, the values array must be non-empty.
+ // If the operator is Exists or DoesNotExist, the values array must be empty.
+ // +optional
+ // +listType=atomic
+ repeated string values = 3;
+}
+
// FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.
//
// Each key is either a '.' representing the field itself, and will always map to an empty set,
@@ -460,7 +494,7 @@ message List {
optional ListMeta metadata = 1;
// List of objects
- repeated k8s.io.apimachinery.pkg.runtime.RawExtension items = 2;
+ repeated .k8s.io.apimachinery.pkg.runtime.RawExtension items = 2;
}
// ListMeta describes metadata that synthetic resources must have, including lists and
@@ -1209,6 +1243,6 @@ message WatchEvent {
// * If Type is Deleted: the state of the object immediately before deletion.
// * If Type is Error: *Status is recommended; other types may make sense
// depending on context.
- optional k8s.io.apimachinery.pkg.runtime.RawExtension object = 2;
+ optional .k8s.io.apimachinery.pkg.runtime.RawExtension object = 2;
}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go
index 592dcb8a74..c748071ed7 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go
@@ -24,8 +24,10 @@ import (
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
+ cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/types"
+ utiljson "k8s.io/apimachinery/pkg/util/json"
)
// LabelSelectorAsSelector converts the LabelSelector api type into a struct that implements
@@ -280,13 +282,20 @@ func (f FieldsV1) MarshalJSON() ([]byte, error) {
if f.Raw == nil {
return []byte("null"), nil
}
+ if f.getContentType() == fieldsV1InvalidOrValidCBORObject {
+ var u map[string]interface{}
+ if err := cbor.Unmarshal(f.Raw, &u); err != nil {
+ return nil, fmt.Errorf("metav1.FieldsV1 cbor invalid: %w", err)
+ }
+ return utiljson.Marshal(u)
+ }
return f.Raw, nil
}
// UnmarshalJSON implements json.Unmarshaler
func (f *FieldsV1) UnmarshalJSON(b []byte) error {
if f == nil {
- return errors.New("metav1.Fields: UnmarshalJSON on nil pointer")
+ return errors.New("metav1.FieldsV1: UnmarshalJSON on nil pointer")
}
if !bytes.Equal(b, []byte("null")) {
f.Raw = append(f.Raw[0:0], b...)
@@ -296,3 +305,75 @@ func (f *FieldsV1) UnmarshalJSON(b []byte) error {
var _ json.Marshaler = FieldsV1{}
var _ json.Unmarshaler = &FieldsV1{}
+
+func (f FieldsV1) MarshalCBOR() ([]byte, error) {
+ if f.Raw == nil {
+ return cbor.Marshal(nil)
+ }
+ if f.getContentType() == fieldsV1InvalidOrValidJSONObject {
+ var u map[string]interface{}
+ if err := utiljson.Unmarshal(f.Raw, &u); err != nil {
+ return nil, fmt.Errorf("metav1.FieldsV1 json invalid: %w", err)
+ }
+ return cbor.Marshal(u)
+ }
+ return f.Raw, nil
+}
+
+var cborNull = []byte{0xf6}
+
+func (f *FieldsV1) UnmarshalCBOR(b []byte) error {
+ if f == nil {
+ return errors.New("metav1.FieldsV1: UnmarshalCBOR on nil pointer")
+ }
+ if !bytes.Equal(b, cborNull) {
+ f.Raw = append(f.Raw[0:0], b...)
+ }
+ return nil
+}
+
+const (
+ // fieldsV1InvalidOrEmpty indicates that a FieldsV1 either contains no raw bytes or its raw
+ // bytes don't represent an allowable value in any supported encoding.
+ fieldsV1InvalidOrEmpty = iota
+
+ // fieldsV1InvalidOrValidJSONObject indicates that a FieldV1 either contains raw bytes that
+ // are a valid JSON encoding of an allowable value or don't represent an allowable value in
+ // any supported encoding.
+ fieldsV1InvalidOrValidJSONObject
+
+ // fieldsV1InvalidOrValidCBORObject indicates that a FieldV1 either contains raw bytes that
+ // are a valid CBOR encoding of an allowable value or don't represent an allowable value in
+ // any supported encoding.
+ fieldsV1InvalidOrValidCBORObject
+)
+
+// getContentType returns one of fieldsV1InvalidOrEmpty, fieldsV1InvalidOrValidJSONObject,
+// fieldsV1InvalidOrValidCBORObject based on the value of Raw.
+//
+// Raw can be encoded in JSON or CBOR and is only valid if it is empty, null, or an object (map)
+// value. It is invalid if it contains a JSON string, number, boolean, or array. If Raw is nonempty
+// and represents an allowable value, then the initial byte unambiguously distinguishes a
+// JSON-encoded value from a CBOR-encoded value.
+//
+// A valid JSON-encoded value can begin with any of the four JSON whitespace characters, the first
+// character 'n' of null, or '{' (0x09, 0x0a, 0x0d, 0x20, 0x6e, or 0x7b, respectively). A valid
+// CBOR-encoded value can begin with the null simple value, an initial byte with major type "map",
+// or, if a tag-enclosed map, an initial byte with major type "tag" (0xf6, 0xa0...0xbf, or
+// 0xc6...0xdb). The two sets of valid initial bytes don't intersect.
+func (f FieldsV1) getContentType() int {
+ if len(f.Raw) > 0 {
+ p := f.Raw[0]
+ switch p {
+ case 'n', '{', '\t', '\r', '\n', ' ':
+ return fieldsV1InvalidOrValidJSONObject
+ case 0xf6: // null
+ return fieldsV1InvalidOrValidCBORObject
+ default:
+ if p >= 0xa0 && p <= 0xbf /* map */ || p >= 0xc6 && p <= 0xdb /* tag */ {
+ return fieldsV1InvalidOrValidCBORObject
+ }
+ }
+ }
+ return fieldsV1InvalidOrEmpty
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go
index 8eb37f4367..9f302b3f36 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go
@@ -19,6 +19,8 @@ package v1
import (
"encoding/json"
"time"
+
+ cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct"
)
const RFC3339Micro = "2006-01-02T15:04:05.000000Z07:00"
@@ -129,6 +131,25 @@ func (t *MicroTime) UnmarshalJSON(b []byte) error {
return nil
}
+func (t *MicroTime) UnmarshalCBOR(b []byte) error {
+ var s *string
+ if err := cbor.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ if s == nil {
+ t.Time = time.Time{}
+ return nil
+ }
+
+ parsed, err := time.Parse(RFC3339Micro, *s)
+ if err != nil {
+ return err
+ }
+
+ t.Time = parsed.Local()
+ return nil
+}
+
// UnmarshalQueryParameter converts from a URL query parameter value to an object
func (t *MicroTime) UnmarshalQueryParameter(str string) error {
if len(str) == 0 {
@@ -160,6 +181,13 @@ func (t MicroTime) MarshalJSON() ([]byte, error) {
return json.Marshal(t.UTC().Format(RFC3339Micro))
}
+func (t MicroTime) MarshalCBOR() ([]byte, error) {
+ if t.IsZero() {
+ return cbor.Marshal(nil)
+ }
+ return cbor.Marshal(t.UTC().Format(RFC3339Micro))
+}
+
// OpenAPISchemaType is used by the kube-openapi generator when constructing
// the OpenAPI spec of this type.
//
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_fuzz.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_fuzz.go
index 3cf9d48e96..a5f437b4b3 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_fuzz.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_fuzz.go
@@ -20,21 +20,22 @@ limitations under the License.
package v1
import (
+ "math/rand"
"time"
- fuzz "github.com/google/gofuzz"
+ "sigs.k8s.io/randfill"
)
-// Fuzz satisfies fuzz.Interface.
-func (t *MicroTime) Fuzz(c fuzz.Continue) {
+// Fuzz satisfies randfill.SimpleSelfFiller.
+func (t *MicroTime) RandFill(r *rand.Rand) {
if t == nil {
return
}
// Allow for about 1000 years of randomness. Accurate to a tenth of
// micro second. Leave off nanoseconds because JSON doesn't
// represent them so they can't round-trip properly.
- t.Time = time.Unix(c.Rand.Int63n(1000*365*24*60*60), 1000*c.Rand.Int63n(1000000))
+ t.Time = time.Unix(r.Int63n(1000*365*24*60*60), 1000*r.Int63n(1000000))
}
-// ensure MicroTime implements fuzz.Interface
-var _ fuzz.Interface = &MicroTime{}
+// ensure MicroTime implements randfill.Interface
+var _ randfill.SimpleSelfFiller = &MicroTime{}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go
index 421770d432..0333cfdb33 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go
@@ -19,6 +19,8 @@ package v1
import (
"encoding/json"
"time"
+
+ cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct"
)
// Time is a wrapper around time.Time which supports correct
@@ -116,6 +118,25 @@ func (t *Time) UnmarshalJSON(b []byte) error {
return nil
}
+func (t *Time) UnmarshalCBOR(b []byte) error {
+ var s *string
+ if err := cbor.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ if s == nil {
+ t.Time = time.Time{}
+ return nil
+ }
+
+ parsed, err := time.Parse(time.RFC3339, *s)
+ if err != nil {
+ return err
+ }
+
+ t.Time = parsed.Local()
+ return nil
+}
+
// UnmarshalQueryParameter converts from a URL query parameter value to an object
func (t *Time) UnmarshalQueryParameter(str string) error {
if len(str) == 0 {
@@ -151,6 +172,14 @@ func (t Time) MarshalJSON() ([]byte, error) {
return buf, nil
}
+func (t Time) MarshalCBOR() ([]byte, error) {
+ if t.IsZero() {
+ return cbor.Marshal(nil)
+ }
+
+ return cbor.Marshal(t.UTC().Format(time.RFC3339))
+}
+
// ToUnstructured implements the value.UnstructuredConverter interface.
func (t Time) ToUnstructured() interface{} {
if t.IsZero() {
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_fuzz.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_fuzz.go
index bf9e21b5bd..48fb978450 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_fuzz.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_fuzz.go
@@ -20,21 +20,22 @@ limitations under the License.
package v1
import (
+ "math/rand"
"time"
- fuzz "github.com/google/gofuzz"
+ "sigs.k8s.io/randfill"
)
-// Fuzz satisfies fuzz.Interface.
-func (t *Time) Fuzz(c fuzz.Continue) {
+// Fuzz satisfies randfill.SimpleSelfFiller.
+func (t *Time) RandFill(r *rand.Rand) {
if t == nil {
return
}
// Allow for about 1000 years of randomness. Leave off nanoseconds
// because JSON doesn't represent them so they can't round-trip
// properly.
- t.Time = time.Unix(c.Rand.Int63n(1000*365*24*60*60), 0)
+ t.Time = time.Unix(r.Int63n(1000*365*24*60*60), 0)
}
-// ensure Time implements fuzz.Interface
-var _ fuzz.Interface = &Time{}
+// ensure Time implements randfill.SimpleSelfFiller
+var _ randfill.SimpleSelfFiller = &Time{}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go
index 9695ba50b4..4cf3f47956 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go
@@ -439,6 +439,20 @@ const (
//
// The annotation is added to a "Bookmark" event.
InitialEventsAnnotationKey = "k8s.io/initial-events-end"
+
+ // InitialEventsListBlueprintAnnotationKey is the name of the key
+ // where an empty, versioned list is encoded in the requested format
+ // (e.g., protobuf, JSON, CBOR), then base64-encoded and stored as a string.
+ //
+ // This encoding matches the request encoding format, which may be
+ // protobuf, JSON, CBOR, or others, depending on what the client requested.
+ // This ensures that the reconstructed list can be processed through the
+ // same decoder chain that would handle a standard LIST call response.
+ //
+ // The annotation is added to a "Bookmark" event and is used by clients
+ // to guarantee the format consistency when reconstructing
+ // the list during WatchList processing.
+ InitialEventsListBlueprintAnnotationKey = "kubernetes.io/initial-events-list-blueprint"
)
// resourceVersionMatch specifies how the resourceVersion parameter is applied. resourceVersionMatch
@@ -546,6 +560,21 @@ type DeleteOptions struct {
// +optional
// +listType=atomic
DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,5,rep,name=dryRun"`
+
+ // if set to true, it will trigger an unsafe deletion of the resource in
+ // case the normal deletion flow fails with a corrupt object error.
+ // A resource is considered corrupt if it can not be retrieved from
+ // the underlying storage successfully because of a) its data can
+ // not be transformed e.g. decryption failure, or b) it fails
+ // to decode into an object.
+ // NOTE: unsafe deletion ignores finalizer constraints, skips
+ // precondition checks, and removes the object from the storage.
+ // WARNING: This may potentially break the cluster if the workload
+ // associated with the resource being unsafe-deleted relies on normal
+ // deletion flow. Use only if you REALLY know what you are doing.
+ // The default value is false, and the user must opt in to enable it
+ // +optional
+ IgnoreStoreReadErrorWithClusterBreakingPotential *bool `json:"ignoreStoreReadErrorWithClusterBreakingPotential,omitempty" protobuf:"varint,6,opt,name=ignoreStoreReadErrorWithClusterBreakingPotential"`
}
const (
@@ -902,6 +931,22 @@ const (
// Status code 500
StatusReasonServerTimeout StatusReason = "ServerTimeout"
+ // StatusReasonStoreReadError means that the server encountered an error while
+ // retrieving resources from the backend object store.
+ // This may be due to backend database error, or because processing of the read
+ // resource failed.
+ // Details:
+ // "kind" string - the kind attribute of the resource being acted on.
+ // "name" string - the prefix where the reading error(s) occurred
+ // "causes" []StatusCause
+ // - (optional):
+ // - "type" CauseType - CauseTypeUnexpectedServerResponse
+ // - "message" string - the error message from the store backend
+ // - "field" string - the full path with the key of the resource that failed reading
+ //
+ // Status code 500
+ StatusReasonStoreReadError StatusReason = "StorageReadError"
+
// StatusReasonTimeout means that the request could not be completed within the given time.
// Clients can get this response only when they specified a timeout param in the request,
// or if the server cannot complete the operation within a reasonable amount of time.
@@ -1278,6 +1323,33 @@ const (
LabelSelectorOpDoesNotExist LabelSelectorOperator = "DoesNotExist"
)
+// FieldSelectorRequirement is a selector that contains values, a key, and an operator that
+// relates the key and values.
+type FieldSelectorRequirement struct {
+ // key is the field selector key that the requirement applies to.
+ Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
+ // operator represents a key's relationship to a set of values.
+ // Valid operators are In, NotIn, Exists, DoesNotExist.
+ // The list of operators may grow in the future.
+ Operator FieldSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=FieldSelectorOperator"`
+ // values is an array of string values.
+ // If the operator is In or NotIn, the values array must be non-empty.
+ // If the operator is Exists or DoesNotExist, the values array must be empty.
+ // +optional
+ // +listType=atomic
+ Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"`
+}
+
+// A field selector operator is the set of operators that can be used in a selector requirement.
+type FieldSelectorOperator string
+
+const (
+ FieldSelectorOpIn FieldSelectorOperator = "In"
+ FieldSelectorOpNotIn FieldSelectorOperator = "NotIn"
+ FieldSelectorOpExists FieldSelectorOperator = "Exists"
+ FieldSelectorOpDoesNotExist FieldSelectorOperator = "DoesNotExist"
+)
+
// ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource
// that the fieldset applies to.
type ManagedFieldsEntry struct {
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go
index b736e83712..405496d3df 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go
@@ -129,12 +129,24 @@ var map_DeleteOptions = map[string]string{
"orphanDependents": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.",
"propagationPolicy": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.",
"dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "ignoreStoreReadErrorWithClusterBreakingPotential": "if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it",
}
func (DeleteOptions) SwaggerDoc() map[string]string {
return map_DeleteOptions
}
+var map_FieldSelectorRequirement = map[string]string{
+ "": "FieldSelectorRequirement is a selector that contains values, a key, and an operator that relates the key and values.",
+ "key": "key is the field selector key that the requirement applies to.",
+ "operator": "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. The list of operators may grow in the future.",
+ "values": "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty.",
+}
+
+func (FieldSelectorRequirement) SwaggerDoc() map[string]string {
+ return map_FieldSelectorRequirement
+}
+
var map_FieldsV1 = map[string]string{
"": "FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.\n\nEach key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:', where is the name of a field in a struct, or key in a map 'v:', where is the exact json formatted value of a list item 'i:', where is position of a item in a list 'k:', where is a map of a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set.\n\nThe exact format is defined in sigs.k8s.io/structured-merge-diff",
}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go
index 0f58d66c09..59f43b7bff 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go
@@ -20,6 +20,7 @@ import (
gojson "encoding/json"
"fmt"
"io"
+ "math/big"
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -125,6 +126,29 @@ func NestedInt64(obj map[string]interface{}, fields ...string) (int64, bool, err
return i, true, nil
}
+// NestedNumberAsFloat64 returns the float64 value of a nested field. If the field's value is a
+// float64, it is returned. If the field's value is an int64 that can be losslessly converted to
+// float64, it will be converted and returned. Returns false if value is not found and an error if
+// not a float64 or an int64 that can be accurately represented as a float64.
+func NestedNumberAsFloat64(obj map[string]interface{}, fields ...string) (float64, bool, error) {
+ val, found, err := NestedFieldNoCopy(obj, fields...)
+ if !found || err != nil {
+ return 0, found, err
+ }
+ switch x := val.(type) {
+ case int64:
+ f, accuracy := big.NewInt(x).Float64()
+ if accuracy != big.Exact {
+ return 0, false, fmt.Errorf("%v accessor error: int64 value %v cannot be losslessly converted to float64", jsonPath(fields), x)
+ }
+ return f, true, nil
+ case float64:
+ return x, true, nil
+ default:
+ return 0, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected float64 or int64", jsonPath(fields), val, val)
+ }
+}
+
// NestedStringSlice returns a copy of []string value of a nested field.
// Returns false if value is not found and an error if not a []interface{} or contains non-string items in the slice.
func NestedStringSlice(obj map[string]interface{}, fields ...string) ([]string, bool, error) {
@@ -164,7 +188,7 @@ func NestedSlice(obj map[string]interface{}, fields ...string) ([]interface{}, b
// NestedStringMap returns a copy of map[string]string value of a nested field.
// Returns false if value is not found and an error if not a map[string]interface{} or contains non-string values in the map.
func NestedStringMap(obj map[string]interface{}, fields ...string) (map[string]string, bool, error) {
- m, found, err := nestedMapNoCopy(obj, fields...)
+ m, found, err := nestedMapNoCopy(obj, false, fields...)
if !found || err != nil {
return nil, found, err
}
@@ -179,10 +203,32 @@ func NestedStringMap(obj map[string]interface{}, fields ...string) (map[string]s
return strMap, true, nil
}
+// NestedNullCoercingStringMap returns a copy of map[string]string value of a nested field.
+// Returns `nil, true, nil` if the value exists and is explicitly null.
+// Returns `nil, false, err` if the value is not a map or a null value, or is a map and contains non-string non-null values.
+// Null values in the map are coerced to "" to match json decoding behavior.
+func NestedNullCoercingStringMap(obj map[string]interface{}, fields ...string) (map[string]string, bool, error) {
+ m, found, err := nestedMapNoCopy(obj, true, fields...)
+ if !found || err != nil || m == nil {
+ return nil, found, err
+ }
+ strMap := make(map[string]string, len(m))
+ for k, v := range m {
+ if str, ok := v.(string); ok {
+ strMap[k] = str
+ } else if v == nil {
+ strMap[k] = ""
+ } else {
+ return nil, false, fmt.Errorf("%v accessor error: contains non-string value in the map under key %q: %v is of the type %T, expected string", jsonPath(fields), k, v, v)
+ }
+ }
+ return strMap, true, nil
+}
+
// NestedMap returns a deep copy of map[string]interface{} value of a nested field.
// Returns false if value is not found and an error if not a map[string]interface{}.
func NestedMap(obj map[string]interface{}, fields ...string) (map[string]interface{}, bool, error) {
- m, found, err := nestedMapNoCopy(obj, fields...)
+ m, found, err := nestedMapNoCopy(obj, false, fields...)
if !found || err != nil {
return nil, found, err
}
@@ -191,11 +237,14 @@ func NestedMap(obj map[string]interface{}, fields ...string) (map[string]interfa
// nestedMapNoCopy returns a map[string]interface{} value of a nested field.
// Returns false if value is not found and an error if not a map[string]interface{}.
-func nestedMapNoCopy(obj map[string]interface{}, fields ...string) (map[string]interface{}, bool, error) {
+func nestedMapNoCopy(obj map[string]interface{}, tolerateNil bool, fields ...string) (map[string]interface{}, bool, error) {
val, found, err := NestedFieldNoCopy(obj, fields...)
if !found || err != nil {
return nil, found, err
}
+ if val == nil && tolerateNil {
+ return nil, true, nil
+ }
m, ok := val.(map[string]interface{})
if !ok {
return nil, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected map[string]interface{}", jsonPath(fields), val, val)
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go
index 40d289f375..fdb0c86297 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go
@@ -397,7 +397,7 @@ func (u *Unstructured) SetDeletionGracePeriodSeconds(deletionGracePeriodSeconds
}
func (u *Unstructured) GetLabels() map[string]string {
- m, _, _ := NestedStringMap(u.Object, "metadata", "labels")
+ m, _, _ := NestedNullCoercingStringMap(u.Object, "metadata", "labels")
return m
}
@@ -410,7 +410,7 @@ func (u *Unstructured) SetLabels(labels map[string]string) {
}
func (u *Unstructured) GetAnnotations() map[string]string {
- m, _, _ := NestedStringMap(u.Object, "metadata", "annotations")
+ m, _, _ := NestedNullCoercingStringMap(u.Object, "metadata", "annotations")
return m
}
@@ -450,10 +450,14 @@ func (u *Unstructured) SetFinalizers(finalizers []string) {
}
func (u *Unstructured) GetManagedFields() []metav1.ManagedFieldsEntry {
- items, found, err := NestedSlice(u.Object, "metadata", "managedFields")
+ v, found, err := NestedFieldNoCopy(u.Object, "metadata", "managedFields")
if !found || err != nil {
return nil
}
+ items, ok := v.([]interface{})
+ if !ok {
+ return nil
+ }
managedFields := []metav1.ManagedFieldsEntry{}
for _, item := range items {
m, ok := item.(map[string]interface{})
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go
index afe01ed5a4..82e2722404 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go
@@ -339,6 +339,13 @@ func autoConvert_url_Values_To_v1_DeleteOptions(in *url.Values, out *DeleteOptio
} else {
out.DryRun = nil
}
+ if values, ok := map[string][]string(*in)["ignoreStoreReadErrorWithClusterBreakingPotential"]; ok && len(values) > 0 {
+ if err := runtime.Convert_Slice_string_To_Pointer_bool(&values, &out.IgnoreStoreReadErrorWithClusterBreakingPotential, s); err != nil {
+ return err
+ }
+ } else {
+ out.IgnoreStoreReadErrorWithClusterBreakingPotential = nil
+ }
return nil
}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go
index 7d29c504ab..6b0d0dfee9 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go
@@ -290,6 +290,11 @@ func (in *DeleteOptions) DeepCopyInto(out *DeleteOptions) {
*out = make([]string, len(*in))
copy(*out, *in)
}
+ if in.IgnoreStoreReadErrorWithClusterBreakingPotential != nil {
+ in, out := &in.IgnoreStoreReadErrorWithClusterBreakingPotential, &out.IgnoreStoreReadErrorWithClusterBreakingPotential
+ *out = new(bool)
+ **out = **in
+ }
return
}
@@ -327,6 +332,27 @@ func (in *Duration) DeepCopy() *Duration {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FieldSelectorRequirement) DeepCopyInto(out *FieldSelectorRequirement) {
+ *out = *in
+ if in.Values != nil {
+ in, out := &in.Values, &out.Values
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldSelectorRequirement.
+func (in *FieldSelectorRequirement) DeepCopy() *FieldSelectorRequirement {
+ if in == nil {
+ return nil
+ }
+ out := new(FieldSelectorRequirement)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FieldsV1) DeepCopyInto(out *FieldsV1) {
*out = *in
diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/doc.go b/vendor/k8s.io/apimachinery/pkg/conversion/doc.go
index 7415d81646..0c46ef2d16 100644
--- a/vendor/k8s.io/apimachinery/pkg/conversion/doc.go
+++ b/vendor/k8s.io/apimachinery/pkg/conversion/doc.go
@@ -21,4 +21,4 @@ limitations under the License.
// but for the fields which did not change, copying is automated. This makes it
// easy to modify the structures you use in memory without affecting the format
// you store on disk or respond to in your external API calls.
-package conversion // import "k8s.io/apimachinery/pkg/conversion"
+package conversion
diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/doc.go b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/doc.go
index 7b763de6f0..4c1002a4c1 100644
--- a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/doc.go
+++ b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/doc.go
@@ -16,4 +16,4 @@ limitations under the License.
// Package queryparams provides conversion from versioned
// runtime objects to URL query values
-package queryparams // import "k8s.io/apimachinery/pkg/conversion/queryparams"
+package queryparams
diff --git a/vendor/k8s.io/apimachinery/pkg/fields/doc.go b/vendor/k8s.io/apimachinery/pkg/fields/doc.go
index c39b8039ae..49059e2635 100644
--- a/vendor/k8s.io/apimachinery/pkg/fields/doc.go
+++ b/vendor/k8s.io/apimachinery/pkg/fields/doc.go
@@ -16,4 +16,4 @@ limitations under the License.
// Package fields implements a simple field system, parsing and matching
// selectors with sets of fields.
-package fields // import "k8s.io/apimachinery/pkg/fields"
+package fields
diff --git a/vendor/k8s.io/apimachinery/pkg/labels/doc.go b/vendor/k8s.io/apimachinery/pkg/labels/doc.go
index 82de0051bd..35ba788094 100644
--- a/vendor/k8s.io/apimachinery/pkg/labels/doc.go
+++ b/vendor/k8s.io/apimachinery/pkg/labels/doc.go
@@ -16,4 +16,4 @@ limitations under the License.
// Package labels implements a simple label system, parsing and matching
// selectors with sets of labels.
-package labels // import "k8s.io/apimachinery/pkg/labels"
+package labels
diff --git a/vendor/k8s.io/apimachinery/pkg/labels/selector.go b/vendor/k8s.io/apimachinery/pkg/labels/selector.go
index 5e60142405..fafa81a3d2 100644
--- a/vendor/k8s.io/apimachinery/pkg/labels/selector.go
+++ b/vendor/k8s.io/apimachinery/pkg/labels/selector.go
@@ -18,6 +18,7 @@ package labels
import (
"fmt"
+ "slices"
"sort"
"strconv"
"strings"
@@ -27,7 +28,6 @@ import (
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/klog/v2"
- stringslices "k8s.io/utils/strings/slices"
)
var (
@@ -45,6 +45,19 @@ var (
// Requirements is AND of all requirements.
type Requirements []Requirement
+func (r Requirements) String() string {
+ var sb strings.Builder
+
+ for i, requirement := range r {
+ if i > 0 {
+ sb.WriteString(", ")
+ }
+ sb.WriteString(requirement.String())
+ }
+
+ return sb.String()
+}
+
// Selector represents a label selector.
type Selector interface {
// Matches returns true if this selector matches the given set of labels.
@@ -285,6 +298,13 @@ func (r *Requirement) Values() sets.String {
return ret
}
+// ValuesUnsorted returns a copy of requirement values as passed to NewRequirement without sorting.
+func (r *Requirement) ValuesUnsorted() []string {
+ ret := make([]string, 0, len(r.strValues))
+ ret = append(ret, r.strValues...)
+ return ret
+}
+
// Equal checks the equality of requirement.
func (r Requirement) Equal(x Requirement) bool {
if r.key != x.key {
@@ -293,7 +313,7 @@ func (r Requirement) Equal(x Requirement) bool {
if r.operator != x.operator {
return false
}
- return stringslices.Equal(r.strValues, x.strValues)
+ return slices.Equal(r.strValues, x.strValues)
}
// Empty returns true if the internalSelector doesn't restrict selection space
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/doc.go b/vendor/k8s.io/apimachinery/pkg/runtime/doc.go
index 89feb40103..b54429bd89 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/doc.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/doc.go
@@ -48,4 +48,4 @@ limitations under the License.
//
// As a bonus, a few common types useful from all api objects and versions
// are provided in types.go.
-package runtime // import "k8s.io/apimachinery/pkg/runtime"
+package runtime
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/extension.go b/vendor/k8s.io/apimachinery/pkg/runtime/extension.go
index 9056397fa5..60c000bcb7 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/extension.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/extension.go
@@ -18,16 +18,77 @@ package runtime
import (
"bytes"
- "encoding/json"
"errors"
+ "fmt"
+
+ cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct"
+ "k8s.io/apimachinery/pkg/util/json"
)
+// RawExtension intentionally avoids implementing value.UnstructuredConverter for now because the
+// signature of ToUnstructured does not allow returning an error value in cases where the conversion
+// is not possible (content type is unrecognized or bytes don't match content type).
+func rawToUnstructured(raw []byte, contentType string) (interface{}, error) {
+ switch contentType {
+ case ContentTypeJSON:
+ var u interface{}
+ if err := json.Unmarshal(raw, &u); err != nil {
+ return nil, fmt.Errorf("failed to parse RawExtension bytes as JSON: %w", err)
+ }
+ return u, nil
+ case ContentTypeCBOR:
+ var u interface{}
+ if err := cbor.Unmarshal(raw, &u); err != nil {
+ return nil, fmt.Errorf("failed to parse RawExtension bytes as CBOR: %w", err)
+ }
+ return u, nil
+ default:
+ return nil, fmt.Errorf("cannot convert RawExtension with unrecognized content type to unstructured")
+ }
+}
+
+func (re RawExtension) guessContentType() string {
+ switch {
+ case bytes.HasPrefix(re.Raw, cborSelfDescribed):
+ return ContentTypeCBOR
+ case len(re.Raw) > 0:
+ switch re.Raw[0] {
+ case '\t', '\r', '\n', ' ', '{', '[', 'n', 't', 'f', '"', '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ // Prefixes for the four whitespace characters, objects, arrays, strings, numbers, true, false, and null.
+ return ContentTypeJSON
+ }
+ }
+ return ""
+}
+
func (re *RawExtension) UnmarshalJSON(in []byte) error {
if re == nil {
return errors.New("runtime.RawExtension: UnmarshalJSON on nil pointer")
}
- if !bytes.Equal(in, []byte("null")) {
- re.Raw = append(re.Raw[0:0], in...)
+ if bytes.Equal(in, []byte("null")) {
+ return nil
+ }
+ re.Raw = append(re.Raw[0:0], in...)
+ return nil
+}
+
+var (
+ cborNull = []byte{0xf6}
+ cborSelfDescribed = []byte{0xd9, 0xd9, 0xf7}
+)
+
+func (re *RawExtension) UnmarshalCBOR(in []byte) error {
+ if re == nil {
+ return errors.New("runtime.RawExtension: UnmarshalCBOR on nil pointer")
+ }
+ if !bytes.Equal(in, cborNull) {
+ if !bytes.HasPrefix(in, cborSelfDescribed) {
+ // The self-described CBOR tag doesn't change the interpretation of the data
+ // item it encloses, but it is useful as a magic number. Its encoding is
+ // also what is used to implement the CBOR RecognizingDecoder.
+ re.Raw = append(re.Raw[:0], cborSelfDescribed...)
+ }
+ re.Raw = append(re.Raw, in...)
}
return nil
}
@@ -46,6 +107,35 @@ func (re RawExtension) MarshalJSON() ([]byte, error) {
}
return []byte("null"), nil
}
- // TODO: Check whether ContentType is actually JSON before returning it.
- return re.Raw, nil
+
+ contentType := re.guessContentType()
+ if contentType == ContentTypeJSON {
+ return re.Raw, nil
+ }
+
+ u, err := rawToUnstructured(re.Raw, contentType)
+ if err != nil {
+ return nil, err
+ }
+ return json.Marshal(u)
+}
+
+func (re RawExtension) MarshalCBOR() ([]byte, error) {
+ if re.Raw == nil {
+ if re.Object != nil {
+ return cbor.Marshal(re.Object)
+ }
+ return cbor.Marshal(nil)
+ }
+
+ contentType := re.guessContentType()
+ if contentType == ContentTypeCBOR {
+ return re.Raw, nil
+ }
+
+ u, err := rawToUnstructured(re.Raw, contentType)
+ if err != nil {
+ return nil, err
+ }
+ return cbor.Marshal(u)
}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/helper.go b/vendor/k8s.io/apimachinery/pkg/runtime/helper.go
index cc0a77bba6..395dfdbd02 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/helper.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/helper.go
@@ -284,3 +284,21 @@ func (e *encoderWithAllocator) Encode(obj Object, w io.Writer) error {
func (e *encoderWithAllocator) Identifier() Identifier {
return e.encoder.Identifier()
}
+
+type nondeterministicEncoderToEncoderAdapter struct {
+ NondeterministicEncoder
+}
+
+func (e nondeterministicEncoderToEncoderAdapter) Encode(obj Object, w io.Writer) error {
+ return e.EncodeNondeterministic(obj, w)
+}
+
+// UseNondeterministicEncoding returns an Encoder that encodes objects using the provided Encoder's
+// EncodeNondeterministic method if it implements NondeterministicEncoder, otherwise it returns the
+// provided Encoder as-is.
+func UseNondeterministicEncoding(encoder Encoder) Encoder {
+ if nondeterministic, ok := encoder.(NondeterministicEncoder); ok {
+ return nondeterministicEncoderToEncoderAdapter{nondeterministic}
+ }
+ return encoder
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go
index e89ea89391..202bf4f01a 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go
@@ -69,6 +69,19 @@ type Encoder interface {
Identifier() Identifier
}
+// NondeterministicEncoder is implemented by Encoders that can serialize objects more efficiently in
+// cases where the output does not need to be deterministic.
+type NondeterministicEncoder interface {
+ Encoder
+
+ // EncodeNondeterministic writes an object to the stream. Unlike the Encode method of
+ // Encoder, EncodeNondeterministic does not guarantee that any two invocations will write
+ // the same sequence of bytes to the io.Writer. Any differences will not be significant to a
+ // generic decoder. For example, map entries and struct fields might be encoded in any
+ // order.
+ EncodeNondeterministic(Object, io.Writer) error
+}
+
// MemoryAllocator is responsible for allocating memory.
// By encapsulating memory allocation into its own interface, we can reuse the memory
// across many operations in places we know it can significantly improve the performance.
@@ -246,6 +259,7 @@ type ObjectDefaulter interface {
type ObjectVersioner interface {
ConvertToVersion(in Object, gv GroupVersioner) (out Object, err error)
+ PrioritizedVersionsForGroup(group string) []schema.GroupVersion
}
// ObjectConvertor converts an object to a different version.
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go
index a5b116718d..fde87f1a13 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go
@@ -17,15 +17,18 @@ limitations under the License.
package runtime
import (
+ "context"
"fmt"
"reflect"
"strings"
+ "k8s.io/apimachinery/pkg/api/operation"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/naming"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
+ "k8s.io/apimachinery/pkg/util/validation/field"
)
// Scheme defines methods for serializing and deserializing API objects, a type
@@ -68,6 +71,12 @@ type Scheme struct {
// the provided object must be a pointer.
defaulterFuncs map[reflect.Type]func(interface{})
+ // validationFuncs is a map to funcs to be called with an object to perform validation.
+ // The provided object must be a pointer.
+ // If oldObject is non-nil, update validation is performed and may perform additional
+ // validation such as transition rules and immutability checks.
+ validationFuncs map[reflect.Type]func(ctx context.Context, op operation.Operation, object, oldObject interface{}, subresources ...string) field.ErrorList
+
// converter stores all registered conversion functions. It also has
// default converting behavior.
converter *conversion.Converter
@@ -96,6 +105,7 @@ func NewScheme() *Scheme {
unversionedKinds: map[string]reflect.Type{},
fieldLabelConversionFuncs: map[schema.GroupVersionKind]FieldLabelConversionFunc{},
defaulterFuncs: map[reflect.Type]func(interface{}){},
+ validationFuncs: map[reflect.Type]func(ctx context.Context, op operation.Operation, object, oldObject interface{}, subresource ...string) field.ErrorList{},
versionPriority: map[string][]string{},
schemeName: naming.GetNameFromCallsite(internalPackages...),
}
@@ -347,6 +357,35 @@ func (s *Scheme) Default(src Object) {
}
}
+// AddValidationFunc registered a function that can validate the object, and
+// oldObject. These functions will be invoked when Validate() or ValidateUpdate()
+// is called. The function will never be called unless the validated object
+// matches srcType. If this function is invoked twice with the same srcType, the
+// fn passed to the later call will be used instead.
+func (s *Scheme) AddValidationFunc(srcType Object, fn func(ctx context.Context, op operation.Operation, object, oldObject interface{}, subresources ...string) field.ErrorList) {
+ s.validationFuncs[reflect.TypeOf(srcType)] = fn
+}
+
+// Validate validates the provided Object according to the generated declarative validation code.
+// WARNING: This does not validate all objects! The handwritten validation code in validation.go
+// is not run when this is called. Only the generated zz_generated.validations.go validation code is run.
+func (s *Scheme) Validate(ctx context.Context, options sets.Set[string], object Object, subresources ...string) field.ErrorList {
+ if fn, ok := s.validationFuncs[reflect.TypeOf(object)]; ok {
+ return fn(ctx, operation.Operation{Type: operation.Create, Options: options}, object, nil, subresources...)
+ }
+ return nil
+}
+
+// ValidateUpdate validates the provided object and oldObject according to the generated declarative validation code.
+// WARNING: This does not validate all objects! The handwritten validation code in validation.go
+// is not run when this is called. Only the generated zz_generated.validations.go validation code is run.
+func (s *Scheme) ValidateUpdate(ctx context.Context, options sets.Set[string], object, oldObject Object, subresources ...string) field.ErrorList {
+ if fn, ok := s.validationFuncs[reflect.TypeOf(object)]; ok {
+ return fn(ctx, operation.Operation{Type: operation.Update, Options: options}, object, oldObject, subresources...)
+ }
+ return nil
+}
+
// Convert will attempt to convert in into out. Both must be pointers. For easy
// testing of conversion functions. Returns an error if the conversion isn't
// possible. You can call this with types that haven't been registered (for example,
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/cbor.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/cbor.go
new file mode 100644
index 0000000000..4d069a903a
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/cbor.go
@@ -0,0 +1,389 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cbor
+
+import (
+ "bytes"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes"
+ "k8s.io/apimachinery/pkg/runtime/serializer/recognizer"
+ util "k8s.io/apimachinery/pkg/util/runtime"
+
+ "github.com/fxamacker/cbor/v2"
+)
+
+type metaFactory interface {
+ // Interpret should return the version and kind of the wire-format of the object.
+ Interpret(data []byte) (*schema.GroupVersionKind, error)
+}
+
+type defaultMetaFactory struct{}
+
+func (mf *defaultMetaFactory) Interpret(data []byte) (*schema.GroupVersionKind, error) {
+ var tm metav1.TypeMeta
+ // The input is expected to include additional map keys besides apiVersion and kind, so use
+ // lax mode for decoding into TypeMeta.
+ if err := modes.DecodeLax.Unmarshal(data, &tm); err != nil {
+ return nil, fmt.Errorf("unable to determine group/version/kind: %w", err)
+ }
+ actual := tm.GetObjectKind().GroupVersionKind()
+ return &actual, nil
+}
+
+type Serializer interface {
+ runtime.Serializer
+ runtime.NondeterministicEncoder
+ recognizer.RecognizingDecoder
+
+ // NewSerializer returns a value of this interface type rather than exporting the serializer
+ // type and returning one of those because the zero value of serializer isn't ready to
+ // use. Users aren't intended to implement cbor.Serializer themselves, and this unexported
+ // interface method is here to prevent that (https://go.dev/blog/module-compatibility).
+ private()
+}
+
+var _ Serializer = &serializer{}
+
+type options struct {
+ strict bool
+ transcode bool
+}
+
+type Option func(*options)
+
+// Strict configures a serializer to return a strict decoding error when it encounters map keys that
+// do not correspond to a field in the target object of a decode operation. This option is disabled
+// by default.
+func Strict(s bool) Option {
+ return func(opts *options) {
+ opts.strict = s
+ }
+}
+
+// Transcode configures a serializer to transcode the "raw" bytes of a decoded runtime.RawExtension
+// or metav1.FieldsV1 object to JSON. This is enabled by default to support existing programs that
+// depend on the assumption that objects of either type contain valid JSON.
+func Transcode(s bool) Option {
+ return func(opts *options) {
+ opts.transcode = s
+ }
+}
+
+type serializer struct {
+ metaFactory metaFactory
+ creater runtime.ObjectCreater
+ typer runtime.ObjectTyper
+ options options
+}
+
+func (serializer) private() {}
+
+// NewSerializer creates and returns a serializer configured with the provided options. The default
+// options are equivalent to explicitly passing Strict(false) and Transcode(true).
+func NewSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper, options ...Option) Serializer {
+ return newSerializer(&defaultMetaFactory{}, creater, typer, options...)
+}
+
+func newSerializer(metaFactory metaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper, options ...Option) *serializer {
+ s := &serializer{
+ metaFactory: metaFactory,
+ creater: creater,
+ typer: typer,
+ }
+ s.options.transcode = true
+ for _, o := range options {
+ o(&s.options)
+ }
+ return s
+}
+
+func (s *serializer) Identifier() runtime.Identifier {
+ return "cbor"
+}
+
+// Encode writes a CBOR representation of the given object.
+//
+// Because the CBOR data item written by a call to Encode is always enclosed in the "self-described
+// CBOR" tag, its encoded form always has the prefix 0xd9d9f7. This prefix is suitable for use as a
+// "magic number" for distinguishing encoded CBOR from other protocols.
+//
+// The default serialization behavior for any given object replicates the behavior of the JSON
+// serializer as far as it is necessary to allow the CBOR serializer to be used as a drop-in
+// replacement for the JSON serializer, with limited exceptions. For example, the distinction
+// between integers and floating-point numbers is preserved in CBOR due to its distinct
+// representations for each type.
+//
+// Objects implementing runtime.Unstructured will have their unstructured content encoded rather
+// than following the default behavior for their dynamic type.
+func (s *serializer) Encode(obj runtime.Object, w io.Writer) error {
+ return s.encode(modes.Encode, obj, w)
+}
+
+func (s *serializer) EncodeNondeterministic(obj runtime.Object, w io.Writer) error {
+ return s.encode(modes.EncodeNondeterministic, obj, w)
+}
+
+func (s *serializer) encode(mode modes.EncMode, obj runtime.Object, w io.Writer) error {
+ var v interface{} = obj
+ if u, ok := obj.(runtime.Unstructured); ok {
+ v = u.UnstructuredContent()
+ }
+
+ if err := modes.RejectCustomMarshalers(v); err != nil {
+ return err
+ }
+
+ if _, err := w.Write(selfDescribedCBOR); err != nil {
+ return err
+ }
+
+ return mode.MarshalTo(v, w)
+}
+
+// gvkWithDefaults returns group kind and version defaulting from provided default
+func gvkWithDefaults(actual, defaultGVK schema.GroupVersionKind) schema.GroupVersionKind {
+ if len(actual.Kind) == 0 {
+ actual.Kind = defaultGVK.Kind
+ }
+ if len(actual.Version) == 0 && len(actual.Group) == 0 {
+ actual.Group = defaultGVK.Group
+ actual.Version = defaultGVK.Version
+ }
+ if len(actual.Version) == 0 && actual.Group == defaultGVK.Group {
+ actual.Version = defaultGVK.Version
+ }
+ return actual
+}
+
+// diagnose returns the diagnostic encoding of a well-formed CBOR data item.
+func diagnose(data []byte) string {
+ diag, err := modes.Diagnostic.Diagnose(data)
+ if err != nil {
+ // Since the input must already be well-formed CBOR, converting it to diagnostic
+ // notation should not fail.
+ util.HandleError(err)
+
+ return hex.EncodeToString(data)
+ }
+ return diag
+}
+
+// unmarshal unmarshals CBOR data from the provided byte slice into a Go object. If the decoder is
+// configured to report strict errors, the first error return value may be a non-nil strict decoding
+// error. If the last error return value is non-nil, then the unmarshal failed entirely and the
+// state of the destination object should not be relied on.
+func (s *serializer) unmarshal(data []byte, into interface{}) (strict, lax error) {
+ if u, ok := into.(runtime.Unstructured); ok {
+ var content map[string]interface{}
+ defer func() {
+ switch u := u.(type) {
+ case *unstructured.UnstructuredList:
+ // UnstructuredList's implementation of SetUnstructuredContent
+ // produces different objects than those produced by a decode using
+ // UnstructuredJSONScheme:
+ //
+ // 1. SetUnstructuredContent retains the "items" key in the list's
+ // Object field. It is omitted from Object when decoding with
+ // UnstructuredJSONScheme.
+ // 2. SetUnstructuredContent does not populate "apiVersion" and
+ // "kind" on each entry of its Items
+ // field. UnstructuredJSONScheme does, inferring the singular
+ // Kind from the list Kind.
+ // 3. SetUnstructuredContent ignores entries of "items" that are
+ // not JSON objects or are objects without
+ // "kind". UnstructuredJSONScheme returns an error in either
+ // case.
+ //
+ // UnstructuredJSONScheme's behavior is replicated here.
+ var items []interface{}
+ if uncast, present := content["items"]; present {
+ var cast bool
+ items, cast = uncast.([]interface{})
+ if !cast {
+ strict, lax = nil, fmt.Errorf("items field of UnstructuredList must be encoded as an array or null if present")
+ return
+ }
+ }
+ apiVersion, _ := content["apiVersion"].(string)
+ kind, _ := content["kind"].(string)
+ kind = strings.TrimSuffix(kind, "List")
+ var unstructureds []unstructured.Unstructured
+ if len(items) > 0 {
+ unstructureds = make([]unstructured.Unstructured, len(items))
+ }
+ for i := range items {
+ object, cast := items[i].(map[string]interface{})
+ if !cast {
+ strict, lax = nil, fmt.Errorf("elements of the items field of UnstructuredList must be encoded as a map")
+ return
+ }
+
+ // As in UnstructuredJSONScheme, only set the heuristic
+ // singular GVK when both "apiVersion" and "kind" are either
+ // missing, non-string, or empty.
+ object["apiVersion"], _ = object["apiVersion"].(string)
+ object["kind"], _ = object["kind"].(string)
+ if object["apiVersion"] == "" && object["kind"] == "" {
+ object["apiVersion"] = apiVersion
+ object["kind"] = kind
+ }
+
+ if object["kind"] == "" {
+ strict, lax = nil, runtime.NewMissingKindErr(diagnose(data))
+ return
+ }
+ if object["apiVersion"] == "" {
+ strict, lax = nil, runtime.NewMissingVersionErr(diagnose(data))
+ return
+ }
+
+ unstructureds[i].Object = object
+ }
+ delete(content, "items")
+ u.Object = content
+ u.Items = unstructureds
+ default:
+ u.SetUnstructuredContent(content)
+ }
+ }()
+ into = &content
+ } else if err := modes.RejectCustomMarshalers(into); err != nil {
+ return nil, err
+ }
+
+ if !s.options.strict {
+ return nil, modes.DecodeLax.Unmarshal(data, into)
+ }
+
+ err := modes.Decode.Unmarshal(data, into)
+ // TODO: UnknownFieldError is ambiguous. It only provides the index of the first problematic
+ // map entry encountered and does not indicate which map the index refers to.
+ var unknownField *cbor.UnknownFieldError
+ if errors.As(err, &unknownField) {
+ // Unlike JSON, there are no strict errors in CBOR for duplicate map keys. CBOR maps
+ // with duplicate keys are considered invalid according to the spec and are rejected
+ // entirely.
+ return runtime.NewStrictDecodingError([]error{unknownField}), modes.DecodeLax.Unmarshal(data, into)
+ }
+ return nil, err
+}
+
+func (s *serializer) Decode(data []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
+ // A preliminary pass over the input to obtain the actual GVK is redundant on a successful
+ // decode into Unstructured.
+ if _, ok := into.(runtime.Unstructured); ok {
+ if _, unmarshalErr := s.unmarshal(data, into); unmarshalErr != nil {
+ actual, interpretErr := s.metaFactory.Interpret(data)
+ if interpretErr != nil {
+ return nil, nil, interpretErr
+ }
+
+ if gvk != nil {
+ *actual = gvkWithDefaults(*actual, *gvk)
+ }
+
+ return nil, actual, unmarshalErr
+ }
+
+ actual := into.GetObjectKind().GroupVersionKind()
+ if len(actual.Kind) == 0 {
+ return nil, &actual, runtime.NewMissingKindErr(diagnose(data))
+ }
+ if len(actual.Version) == 0 {
+ return nil, &actual, runtime.NewMissingVersionErr(diagnose(data))
+ }
+
+ return into, &actual, nil
+ }
+
+ actual, err := s.metaFactory.Interpret(data)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if gvk != nil {
+ *actual = gvkWithDefaults(*actual, *gvk)
+ }
+
+ if into != nil {
+ types, _, err := s.typer.ObjectKinds(into)
+ if err != nil {
+ return nil, actual, err
+ }
+ *actual = gvkWithDefaults(*actual, types[0])
+ }
+
+ if len(actual.Kind) == 0 {
+ return nil, actual, runtime.NewMissingKindErr(diagnose(data))
+ }
+ if len(actual.Version) == 0 {
+ return nil, actual, runtime.NewMissingVersionErr(diagnose(data))
+ }
+
+ obj, err := runtime.UseOrCreateObject(s.typer, s.creater, *actual, into)
+ if err != nil {
+ return nil, actual, err
+ }
+
+ strict, err := s.unmarshal(data, obj)
+ if err != nil {
+ return nil, actual, err
+ }
+
+ if s.options.transcode {
+ if err := transcodeRawTypes(obj); err != nil {
+ return nil, actual, err
+ }
+ }
+
+ return obj, actual, strict
+}
+
+// selfDescribedCBOR is the CBOR encoding of the head of tag number 55799. This tag, specified in
+// RFC 8949 Section 3.4.6 "Self-Described CBOR", encloses all output from the encoder, has no
+// special semantics, and is used as a magic number to recognize CBOR-encoded data items.
+//
+// See https://www.rfc-editor.org/rfc/rfc8949.html#name-self-described-cbor.
+var selfDescribedCBOR = []byte{0xd9, 0xd9, 0xf7}
+
+func (s *serializer) RecognizesData(data []byte) (ok, unknown bool, err error) {
+ return bytes.HasPrefix(data, selfDescribedCBOR), false, nil
+}
+
+// NewSerializerInfo returns a default SerializerInfo for CBOR using the given creater and typer.
+func NewSerializerInfo(creater runtime.ObjectCreater, typer runtime.ObjectTyper) runtime.SerializerInfo {
+ return runtime.SerializerInfo{
+ MediaType: "application/cbor",
+ MediaTypeType: "application",
+ MediaTypeSubType: "cbor",
+ Serializer: NewSerializer(creater, typer),
+ StrictSerializer: NewSerializer(creater, typer, Strict(true)),
+ StreamSerializer: &runtime.StreamSerializerInfo{
+ Framer: NewFramer(),
+ Serializer: NewSerializer(creater, typer, Transcode(false)),
+ },
+ }
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct/direct.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct/direct.go
new file mode 100644
index 0000000000..a71a487f9e
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct/direct.go
@@ -0,0 +1,61 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package direct provides functions for marshaling and unmarshaling between arbitrary Go values and
+// CBOR data, with behavior that is compatible with that of the CBOR serializer. In particular,
+// types that implement cbor.Marshaler and cbor.Unmarshaler should use these functions.
+package direct
+
+import (
+ "k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes"
+)
+
+// Marshal serializes a value to CBOR. If there is more than one way to encode the value, it will
+// make the same choice as the CBOR implementation of runtime.Serializer.
+//
+// Note: Support for CBOR is at an alpha stage. If the value (or, for composite types, any of its
+// nested values) implement any of the interfaces encoding.TextMarshaler, encoding.TextUnmarshaler,
+// encoding/json.Marshaler, or encoding/json.Unmarshaler, a non-nil error will be returned unless
+// the value also implements the corresponding CBOR interfaces. This limitation will ultimately be
+// removed in favor of automatic transcoding to CBOR.
+func Marshal(src interface{}) ([]byte, error) {
+ if err := modes.RejectCustomMarshalers(src); err != nil {
+ return nil, err
+ }
+ return modes.Encode.Marshal(src)
+}
+
+// Unmarshal deserializes from CBOR into an addressable value. If there is more than one way to
+// unmarshal a value, it will make the same choice as the CBOR implementation of runtime.Serializer.
+//
+// Note: Support for CBOR is at an alpha stage. If the value (or, for composite types, any of its
+// nested values) implement any of the interfaces encoding.TextMarshaler, encoding.TextUnmarshaler,
+// encoding/json.Marshaler, or encoding/json.Unmarshaler, a non-nil error will be returned unless
+// the value also implements the corresponding CBOR interfaces. This limitation will ultimately be
+// removed in favor of automatic transcoding to CBOR.
+func Unmarshal(src []byte, dst interface{}) error {
+ if err := modes.RejectCustomMarshalers(dst); err != nil {
+ return err
+ }
+ return modes.Decode.Unmarshal(src, dst)
+}
+
+// Diagnose accepts well-formed CBOR bytes and returns a string representing the same data item in
+// human-readable diagnostic notation (RFC 8949 Section 8). The diagnostic notation is not meant to
+// be parsed.
+func Diagnose(src []byte) (string, error) {
+ return modes.Diagnostic.Diagnose(src)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/framer.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/framer.go
new file mode 100644
index 0000000000..28a733c673
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/framer.go
@@ -0,0 +1,90 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cbor
+
+import (
+ "io"
+
+ "k8s.io/apimachinery/pkg/runtime"
+
+ "github.com/fxamacker/cbor/v2"
+)
+
+// NewFramer returns a runtime.Framer based on RFC 8742 CBOR Sequences. Each frame contains exactly
+// one encoded CBOR data item.
+func NewFramer() runtime.Framer {
+ return framer{}
+}
+
+var _ runtime.Framer = framer{}
+
+type framer struct{}
+
+func (framer) NewFrameReader(rc io.ReadCloser) io.ReadCloser {
+ return &frameReader{
+ decoder: cbor.NewDecoder(rc),
+ closer: rc,
+ }
+}
+
+func (framer) NewFrameWriter(w io.Writer) io.Writer {
+ // Each data item in a CBOR sequence is self-delimiting (like JSON objects).
+ return w
+}
+
+type frameReader struct {
+ decoder *cbor.Decoder
+ closer io.Closer
+
+ overflow []byte
+}
+
+func (fr *frameReader) Read(dst []byte) (int, error) {
+ if len(fr.overflow) > 0 {
+ // We read a frame that was too large for the destination slice in a previous call
+ // to Read and have bytes left over.
+ n := copy(dst, fr.overflow)
+ if n < len(fr.overflow) {
+ fr.overflow = fr.overflow[n:]
+ return n, io.ErrShortBuffer
+ }
+ fr.overflow = nil
+ return n, nil
+ }
+
+ // The Reader contract allows implementations to use all of dst[0:len(dst)] as scratch
+ // space, even if n < len(dst), but it does not allow implementations to use
+ // dst[len(dst):cap(dst)]. Slicing it up-front allows us to append to it without worrying
+ // about overwriting dst[len(dst):cap(dst)].
+ m := cbor.RawMessage(dst[0:0:len(dst)])
+ if err := fr.decoder.Decode(&m); err != nil {
+ return 0, err
+ }
+
+ if len(m) > len(dst) {
+ // The frame was too big, m has a newly-allocated underlying array to accommodate
+ // it.
+ fr.overflow = m[len(dst):]
+ return copy(dst, m), io.ErrShortBuffer
+ }
+
+ return len(m), nil
+}
+
+func (fr *frameReader) Close() error {
+ return fr.closer.Close()
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/buffers.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/buffers.go
new file mode 100644
index 0000000000..f14cbd6b58
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/buffers.go
@@ -0,0 +1,65 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package modes
+
+import (
+ "bytes"
+ "sync"
+)
+
+var buffers = BufferProvider{p: new(sync.Pool)}
+
+type buffer struct {
+ bytes.Buffer
+}
+
+type pool interface {
+ Get() interface{}
+ Put(interface{})
+}
+
+type BufferProvider struct {
+ p pool
+}
+
+func (b *BufferProvider) Get() *buffer {
+ if buf, ok := b.p.Get().(*buffer); ok {
+ return buf
+ }
+ return &buffer{}
+}
+
+func (b *BufferProvider) Put(buf *buffer) {
+ if buf.Cap() > 3*1024*1024 /* Default MaxRequestBodyBytes */ {
+ // Objects in a sync.Pool are assumed to be fungible. This is not a good assumption
+ // for pools of *bytes.Buffer because a *bytes.Buffer's underlying array grows as
+ // needed to accommodate writes. In Kubernetes, apiservers tend to encode "small"
+ // objects very frequently and much larger objects (especially large lists) only
+ // occasionally. Under steady load, pooled buffers tend to be borrowed frequently
+ // enough to prevent them from being released. Over time, each buffer is used to
+ // encode a large object and its capacity increases accordingly. The result is that
+ // practically all buffers in the pool retain much more capacity than needed to
+ // encode most objects.
+
+ // As a basic mitigation for the worst case, buffers with more capacity than the
+ // default max request body size are never returned to the pool.
+ // TODO: Optimize for higher buffer utilization.
+ return
+ }
+ buf.Reset()
+ b.p.Put(buf)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/custom.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/custom.go
new file mode 100644
index 0000000000..e550ea348d
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/custom.go
@@ -0,0 +1,422 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package modes
+
+import (
+ "encoding"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "reflect"
+ "sync"
+
+ "github.com/fxamacker/cbor/v2"
+)
+
+// Returns a non-nil error if and only if the argument's type (or one of its component types, for
+// composite types) implements json.Marshaler or encoding.TextMarshaler without also implementing
+// cbor.Marshaler and likewise for the respective Unmarshaler interfaces.
+//
+// This is a temporary, graduation-blocking restriction and will be removed in favor of automatic
+// transcoding between CBOR and JSON/text for these types. This restriction allows CBOR to be
+// exercised for in-tree and unstructured types while mitigating the risk of mangling out-of-tree
+// types in client programs.
+func RejectCustomMarshalers(v interface{}) error {
+ if v == nil {
+ return nil
+ }
+ rv := reflect.ValueOf(v)
+ if err := marshalerCache.getChecker(rv.Type()).check(rv, maxDepth); err != nil {
+ return fmt.Errorf("unable to serialize %T: %w", v, err)
+ }
+ if err := unmarshalerCache.getChecker(rv.Type()).check(rv, maxDepth); err != nil {
+ return fmt.Errorf("unable to serialize %T: %w", v, err)
+ }
+ return nil
+}
+
+// Recursion depth is limited as a basic mitigation against cyclic objects. Objects created by the
+// decoder shouldn't be able to contain cycles, but practically any object can be passed to the
+// encoder.
+var errMaxDepthExceeded = errors.New("object depth exceeds limit (possible cycle?)")
+
+// The JSON encoder begins detecting cycles after depth 1000. Use a generous limit here, knowing
+// that it can might deeply nested acyclic objects. The limit will be removed along with the rest of
+// this mechanism.
+const maxDepth = 2048
+
+var marshalerCache = checkers{
+ cborInterface: reflect.TypeFor[cbor.Marshaler](),
+ nonCBORInterfaces: []reflect.Type{
+ reflect.TypeFor[json.Marshaler](),
+ reflect.TypeFor[encoding.TextMarshaler](),
+ },
+}
+
+var unmarshalerCache = checkers{
+ cborInterface: reflect.TypeFor[cbor.Unmarshaler](),
+ nonCBORInterfaces: []reflect.Type{
+ reflect.TypeFor[json.Unmarshaler](),
+ reflect.TypeFor[encoding.TextUnmarshaler](),
+ },
+ assumeAddressableValues: true,
+}
+
+// checker wraps a function for dynamically checking a value of a specific type for custom JSON
+// behaviors not matched by a custom CBOR behavior.
+type checker struct {
+ // check returns a non-nil error if the given value might be marshalled to or from CBOR
+ // using the default behavior for its kind, but marshalled to or from JSON using custom
+ // behavior.
+ check func(rv reflect.Value, depth int) error
+
+ // safe returns true if all values of this type are safe from mismatched custom marshalers.
+ safe func() bool
+}
+
+// TODO: stale
+// Having a single addressable checker for comparisons lets us prune and collapse parts of the
+// object traversal that are statically known to be safe. Depending on the type, it may be
+// unnecessary to inspect each value of that type. For example, no value of the built-in type bool
+// can implement json.Marshaler (a named type whose underlying type is bool could, but it is a
+// distinct type from bool).
+var noop = checker{
+ safe: func() bool {
+ return true
+ },
+ check: func(rv reflect.Value, depth int) error {
+ return nil
+ },
+}
+
+type checkers struct {
+ m sync.Map // reflect.Type => *checker
+
+ cborInterface reflect.Type
+ nonCBORInterfaces []reflect.Type
+
+ assumeAddressableValues bool
+}
+
+func (cache *checkers) getChecker(rt reflect.Type) checker {
+ if ptr, ok := cache.m.Load(rt); ok {
+ return *ptr.(*checker)
+ }
+
+ return cache.getCheckerInternal(rt, nil)
+}
+
+// linked list node representing the path from a composite type to an element type
+type path struct {
+ Type reflect.Type
+ Parent *path
+}
+
+func (p path) cyclic(rt reflect.Type) bool {
+ for ancestor := &p; ancestor != nil; ancestor = ancestor.Parent {
+ if ancestor.Type == rt {
+ return true
+ }
+ }
+ return false
+}
+
+func (cache *checkers) getCheckerInternal(rt reflect.Type, parent *path) (c checker) {
+ // Store a placeholder cache entry first to handle cyclic types.
+ var wg sync.WaitGroup
+ wg.Add(1)
+ defer wg.Done()
+ placeholder := checker{
+ safe: func() bool {
+ wg.Wait()
+ return c.safe()
+ },
+ check: func(rv reflect.Value, depth int) error {
+ wg.Wait()
+ return c.check(rv, depth)
+ },
+ }
+ if actual, loaded := cache.m.LoadOrStore(rt, &placeholder); loaded {
+ // Someone else stored an entry for this type, use it.
+ return *actual.(*checker)
+ }
+
+ // Take a nonreflective path for the unstructured container types. They're common and
+ // usually nested inside one another.
+ switch rt {
+ case reflect.TypeFor[map[string]interface{}](), reflect.TypeFor[[]interface{}]():
+ return checker{
+ safe: func() bool {
+ return false
+ },
+ check: func(rv reflect.Value, depth int) error {
+ return checkUnstructuredValue(cache, rv.Interface(), depth)
+ },
+ }
+ }
+
+ // It's possible that one of the relevant interfaces is implemented on a type with a pointer
+ // receiver, but that a particular value of that type is not addressable. For example:
+ //
+ // func (Foo) MarshalText() ([]byte, error) { ... }
+ // func (*Foo) MarshalCBOR() ([]byte, error) { ... }
+ //
+ // Both methods are in the method set of *Foo, but the method set of Foo contains only
+ // MarshalText.
+ //
+ // Both the unmarshaler and marshaler checks assume that methods implementing a JSON or text
+ // interface with a pointer receiver are always accessible. Only the unmarshaler check
+ // assumes that CBOR methods with pointer receivers are accessible.
+
+ if rt.Implements(cache.cborInterface) {
+ return noop
+ }
+ for _, unsafe := range cache.nonCBORInterfaces {
+ if rt.Implements(unsafe) {
+ err := fmt.Errorf("%v implements %v without corresponding cbor interface", rt, unsafe)
+ return checker{
+ safe: func() bool {
+ return false
+ },
+ check: func(reflect.Value, int) error {
+ return err
+ },
+ }
+ }
+ }
+
+ if cache.assumeAddressableValues && reflect.PointerTo(rt).Implements(cache.cborInterface) {
+ return noop
+ }
+ for _, unsafe := range cache.nonCBORInterfaces {
+ if reflect.PointerTo(rt).Implements(unsafe) {
+ err := fmt.Errorf("%v implements %v without corresponding cbor interface", reflect.PointerTo(rt), unsafe)
+ return checker{
+ safe: func() bool {
+ return false
+ },
+ check: func(reflect.Value, int) error {
+ return err
+ },
+ }
+ }
+ }
+
+ self := &path{Type: rt, Parent: parent}
+
+ switch rt.Kind() {
+ case reflect.Array:
+ ce := cache.getCheckerInternal(rt.Elem(), self)
+ rtlen := rt.Len()
+ if rtlen == 0 || (!self.cyclic(rt.Elem()) && ce.safe()) {
+ return noop
+ }
+ return checker{
+ safe: func() bool {
+ return false
+ },
+ check: func(rv reflect.Value, depth int) error {
+ if depth <= 0 {
+ return errMaxDepthExceeded
+ }
+ for i := 0; i < rtlen; i++ {
+ if err := ce.check(rv.Index(i), depth-1); err != nil {
+ return err
+ }
+ }
+ return nil
+ },
+ }
+
+ case reflect.Interface:
+ // All interface values have to be checked because their dynamic type might
+ // implement one of the interesting interfaces or be composed of another type that
+ // does.
+ return checker{
+ safe: func() bool {
+ return false
+ },
+ check: func(rv reflect.Value, depth int) error {
+ if rv.IsNil() {
+ return nil
+ }
+ // Unpacking interfaces must count against recursion depth,
+ // consider this cycle:
+ // > var i interface{}
+ // > var p *interface{} = &i
+ // > i = p
+ // > rv := reflect.ValueOf(i)
+ // > for {
+ // > rv = rv.Elem()
+ // > }
+ if depth <= 0 {
+ return errMaxDepthExceeded
+ }
+ rv = rv.Elem()
+ return cache.getChecker(rv.Type()).check(rv, depth-1)
+ },
+ }
+
+ case reflect.Map:
+ rtk := rt.Key()
+ ck := cache.getCheckerInternal(rtk, self)
+ rte := rt.Elem()
+ ce := cache.getCheckerInternal(rte, self)
+ if !self.cyclic(rtk) && !self.cyclic(rte) && ck.safe() && ce.safe() {
+ return noop
+ }
+ return checker{
+ safe: func() bool {
+ return false
+ },
+ check: func(rv reflect.Value, depth int) error {
+ if depth <= 0 {
+ return errMaxDepthExceeded
+ }
+ iter := rv.MapRange()
+ rvk := reflect.New(rtk).Elem()
+ rve := reflect.New(rte).Elem()
+ for iter.Next() {
+ rvk.SetIterKey(iter)
+ if err := ck.check(rvk, depth-1); err != nil {
+ return err
+ }
+ rve.SetIterValue(iter)
+ if err := ce.check(rve, depth-1); err != nil {
+ return err
+ }
+ }
+ return nil
+ },
+ }
+
+ case reflect.Pointer:
+ ce := cache.getCheckerInternal(rt.Elem(), self)
+ if !self.cyclic(rt.Elem()) && ce.safe() {
+ return noop
+ }
+ return checker{
+ safe: func() bool {
+ return false
+ },
+ check: func(rv reflect.Value, depth int) error {
+ if rv.IsNil() {
+ return nil
+ }
+ if depth <= 0 {
+ return errMaxDepthExceeded
+ }
+ return ce.check(rv.Elem(), depth-1)
+ },
+ }
+
+ case reflect.Slice:
+ ce := cache.getCheckerInternal(rt.Elem(), self)
+ if !self.cyclic(rt.Elem()) && ce.safe() {
+ return noop
+ }
+ return checker{
+ safe: func() bool {
+ return false
+ },
+ check: func(rv reflect.Value, depth int) error {
+ if depth <= 0 {
+ return errMaxDepthExceeded
+ }
+ for i := 0; i < rv.Len(); i++ {
+ if err := ce.check(rv.Index(i), depth-1); err != nil {
+ return err
+ }
+ }
+ return nil
+ },
+ }
+
+ case reflect.Struct:
+ type field struct {
+ Index int
+ Checker checker
+ }
+ var fields []field
+ for i := 0; i < rt.NumField(); i++ {
+ f := rt.Field(i)
+ cf := cache.getCheckerInternal(f.Type, self)
+ if !self.cyclic(f.Type) && cf.safe() {
+ continue
+ }
+ fields = append(fields, field{Index: i, Checker: cf})
+ }
+ if len(fields) == 0 {
+ return noop
+ }
+ return checker{
+ safe: func() bool {
+ return false
+ },
+ check: func(rv reflect.Value, depth int) error {
+ if depth <= 0 {
+ return errMaxDepthExceeded
+ }
+ for _, fi := range fields {
+ if err := fi.Checker.check(rv.Field(fi.Index), depth-1); err != nil {
+ return err
+ }
+ }
+ return nil
+ },
+ }
+
+ default:
+ // Not a serializable composite type (funcs and channels are composite types but are
+ // rejected by JSON and CBOR serialization).
+ return noop
+
+ }
+}
+
+func checkUnstructuredValue(cache *checkers, v interface{}, depth int) error {
+ switch v := v.(type) {
+ case nil, bool, int64, float64, string:
+ return nil
+ case []interface{}:
+ if depth <= 0 {
+ return errMaxDepthExceeded
+ }
+ for _, element := range v {
+ if err := checkUnstructuredValue(cache, element, depth-1); err != nil {
+ return err
+ }
+ }
+ return nil
+ case map[string]interface{}:
+ if depth <= 0 {
+ return errMaxDepthExceeded
+ }
+ for _, element := range v {
+ if err := checkUnstructuredValue(cache, element, depth-1); err != nil {
+ return err
+ }
+ }
+ return nil
+ default:
+ // Unmarshaling an unstructured doesn't use other dynamic types, but nothing
+ // prevents inserting values with arbitrary dynamic types into unstructured content,
+ // as long as they can be marshalled.
+ rv := reflect.ValueOf(v)
+ return cache.getChecker(rv.Type()).check(rv, depth)
+ }
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/decode.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/decode.go
new file mode 100644
index 0000000000..895b0deff9
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/decode.go
@@ -0,0 +1,158 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package modes
+
+import (
+ "reflect"
+
+ "github.com/fxamacker/cbor/v2"
+)
+
+var simpleValues *cbor.SimpleValueRegistry = func() *cbor.SimpleValueRegistry {
+ var opts []func(*cbor.SimpleValueRegistry) error
+ for sv := 0; sv <= 255; sv++ {
+ // Reject simple values 0-19, 23, and 32-255. The simple values 24-31 are reserved
+ // and considered ill-formed by the CBOR specification. We only accept false (20),
+ // true (21), and null (22).
+ switch sv {
+ case 20: // false
+ case 21: // true
+ case 22: // null
+ case 24, 25, 26, 27, 28, 29, 30, 31: // reserved
+ default:
+ opts = append(opts, cbor.WithRejectedSimpleValue(cbor.SimpleValue(sv)))
+ }
+ }
+ simpleValues, err := cbor.NewSimpleValueRegistryFromDefaults(opts...)
+ if err != nil {
+ panic(err)
+ }
+ return simpleValues
+}()
+
+var Decode cbor.DecMode = func() cbor.DecMode {
+ decode, err := cbor.DecOptions{
+ // Maps with duplicate keys are well-formed but invalid according to the CBOR spec
+ // and never acceptable. Unlike the JSON serializer, inputs containing duplicate map
+ // keys are rejected outright and not surfaced as a strict decoding error.
+ DupMapKey: cbor.DupMapKeyEnforcedAPF,
+
+ // For JSON parity, decoding an RFC3339 string into time.Time needs to be accepted
+ // with or without tagging. If a tag number is present, it must be valid.
+ TimeTag: cbor.DecTagOptional,
+
+ // Observed depth up to 16 in fuzzed batch/v1 CronJobList. JSON implementation limit
+ // is 10000.
+ MaxNestedLevels: 64,
+
+ MaxArrayElements: 1024,
+ MaxMapPairs: 1024,
+
+ // Indefinite-length sequences aren't produced by this serializer, but other
+ // implementations can.
+ IndefLength: cbor.IndefLengthAllowed,
+
+ // Accept inputs that contain CBOR tags.
+ TagsMd: cbor.TagsAllowed,
+
+ // Decode type 0 (unsigned integer) as int64.
+ // TODO: IntDecConvertSignedOrFail errors on overflow, JSON will try to fall back to float64.
+ IntDec: cbor.IntDecConvertSignedOrFail,
+
+ // Disable producing map[cbor.ByteString]interface{}, which is not acceptable for
+ // decodes into interface{}.
+ MapKeyByteString: cbor.MapKeyByteStringForbidden,
+
+ // Error on map keys that don't map to a field in the destination struct.
+ ExtraReturnErrors: cbor.ExtraDecErrorUnknownField,
+
+ // Decode maps into concrete type map[string]interface{} when the destination is an
+ // interface{}.
+ DefaultMapType: reflect.TypeOf(map[string]interface{}(nil)),
+
+ // A CBOR text string whose content is not a valid UTF-8 sequence is well-formed but
+ // invalid according to the CBOR spec. Reject invalid inputs. Encoders are
+ // responsible for ensuring that all text strings they produce contain valid UTF-8
+ // sequences and may use the byte string major type to encode strings that have not
+ // been validated.
+ UTF8: cbor.UTF8RejectInvalid,
+
+ // Never make a case-insensitive match between a map key and a struct field.
+ FieldNameMatching: cbor.FieldNameMatchingCaseSensitive,
+
+ // Produce string concrete values when decoding a CBOR byte string into interface{}.
+ DefaultByteStringType: reflect.TypeOf(""),
+
+ // Allow CBOR byte strings to be decoded into string destination values. If a byte
+ // string is enclosed in an "expected later encoding" tag
+ // (https://www.rfc-editor.org/rfc/rfc8949.html#section-3.4.5.2), then the text
+ // encoding indicated by that tag (e.g. base64) will be applied to the contents of
+ // the byte string.
+ ByteStringToString: cbor.ByteStringToStringAllowedWithExpectedLaterEncoding,
+
+ // Allow CBOR byte strings to match struct fields when appearing as a map key.
+ FieldNameByteString: cbor.FieldNameByteStringAllowed,
+
+ // When decoding an unrecognized tag to interface{}, return the decoded tag content
+ // instead of the default, a cbor.Tag representing a (number, content) pair.
+ UnrecognizedTagToAny: cbor.UnrecognizedTagContentToAny,
+
+ // Decode time tags to interface{} as strings containing RFC 3339 timestamps.
+ TimeTagToAny: cbor.TimeTagToRFC3339Nano,
+
+ // For parity with JSON, strings can be decoded into time.Time if they are RFC 3339
+ // timestamps.
+ ByteStringToTime: cbor.ByteStringToTimeAllowed,
+
+ // Reject NaN and infinite floating-point values since they don't have a JSON
+ // representation (RFC 8259 Section 6).
+ NaN: cbor.NaNDecodeForbidden,
+ Inf: cbor.InfDecodeForbidden,
+
+ // When unmarshaling a byte string into a []byte, assume that the byte string
+ // contains base64-encoded bytes, unless explicitly counterindicated by an "expected
+ // later encoding" tag. This is consistent with the because of unmarshaling a JSON
+ // text into a []byte.
+ ByteStringExpectedFormat: cbor.ByteStringExpectedBase64,
+
+ // Reject the arbitrary-precision integer tags because they can't be faithfully
+ // roundtripped through the allowable Unstructured types.
+ BignumTag: cbor.BignumTagForbidden,
+
+ // Reject anything other than the simple values true, false, and null.
+ SimpleValues: simpleValues,
+
+ // Disable default recognition of types implementing encoding.BinaryUnmarshaler,
+ // which is not recognized for JSON decoding.
+ BinaryUnmarshaler: cbor.BinaryUnmarshalerNone,
+ }.DecMode()
+ if err != nil {
+ panic(err)
+ }
+ return decode
+}()
+
+// DecodeLax is derived from Decode, but does not complain about unknown fields in the input.
+var DecodeLax cbor.DecMode = func() cbor.DecMode {
+ opts := Decode.DecOptions()
+ opts.ExtraReturnErrors &^= cbor.ExtraDecErrorUnknownField // clear bit
+ dm, err := opts.DecMode()
+ if err != nil {
+ panic(err)
+ }
+ return dm
+}()
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/diagnostic.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/diagnostic.go
new file mode 100644
index 0000000000..61f3f145f5
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/diagnostic.go
@@ -0,0 +1,36 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package modes
+
+import (
+ "github.com/fxamacker/cbor/v2"
+)
+
+var Diagnostic cbor.DiagMode = func() cbor.DiagMode {
+ opts := Decode.DecOptions()
+ diagnostic, err := cbor.DiagOptions{
+ ByteStringText: true,
+
+ MaxNestedLevels: opts.MaxNestedLevels,
+ MaxArrayElements: opts.MaxArrayElements,
+ MaxMapPairs: opts.MaxMapPairs,
+ }.DiagMode()
+ if err != nil {
+ panic(err)
+ }
+ return diagnostic
+}()
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/encode.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/encode.go
new file mode 100644
index 0000000000..5fae141518
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/encode.go
@@ -0,0 +1,155 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package modes
+
+import (
+ "io"
+
+ "github.com/fxamacker/cbor/v2"
+)
+
+var Encode = EncMode{
+ delegate: func() cbor.UserBufferEncMode {
+ encode, err := cbor.EncOptions{
+ // Map keys need to be sorted to have deterministic output, and this is the order
+ // defined in RFC 8949 4.2.1 "Core Deterministic Encoding Requirements".
+ Sort: cbor.SortBytewiseLexical,
+
+ // CBOR supports distinct types for IEEE-754 float16, float32, and float64. Store
+ // floats in the smallest width that preserves value so that equivalent float32 and
+ // float64 values encode to identical bytes, as they do in a JSON
+ // encoding. Satisfies one of the "Core Deterministic Encoding Requirements".
+ ShortestFloat: cbor.ShortestFloat16,
+
+ // Error on attempt to encode NaN and infinite values. This is what the JSON
+ // serializer does.
+ NaNConvert: cbor.NaNConvertReject,
+ InfConvert: cbor.InfConvertReject,
+
+ // Error on attempt to encode math/big.Int values, which can't be faithfully
+ // roundtripped through Unstructured in general (the dynamic numeric types allowed
+ // in Unstructured are limited to float64 and int64).
+ BigIntConvert: cbor.BigIntConvertReject,
+
+ // MarshalJSON for time.Time writes RFC3339 with nanos.
+ Time: cbor.TimeRFC3339Nano,
+
+ // The decoder must be able to accept RFC3339 strings with or without tag 0 (e.g. by
+ // the end of time.Time -> JSON -> Unstructured -> CBOR, the CBOR encoder has no
+ // reliable way of knowing that a particular string originated from serializing a
+ // time.Time), so producing tag 0 has little use.
+ TimeTag: cbor.EncTagNone,
+
+ // Indefinite-length items have multiple encodings and aren't being used anyway, so
+ // disable to avoid an opportunity for nondeterminism.
+ IndefLength: cbor.IndefLengthForbidden,
+
+ // Preserve distinction between nil and empty for slices and maps.
+ NilContainers: cbor.NilContainerAsNull,
+
+ // OK to produce tags.
+ TagsMd: cbor.TagsAllowed,
+
+ // Use the same definition of "empty" as encoding/json.
+ OmitEmpty: cbor.OmitEmptyGoValue,
+
+ // The CBOR types text string and byte string are structurally equivalent, with the
+ // semantic difference that a text string whose content is an invalid UTF-8 sequence
+ // is itself invalid. We reject all invalid text strings at decode time and do not
+ // validate or sanitize all Go strings at encode time. Encoding Go strings to the
+ // byte string type is comparable to the existing Protobuf behavior and cheaply
+ // ensures that the output is valid CBOR.
+ String: cbor.StringToByteString,
+
+ // Encode struct field names to the byte string type rather than the text string
+ // type.
+ FieldName: cbor.FieldNameToByteString,
+
+ // Marshal Go byte arrays to CBOR arrays of integers (as in JSON) instead of byte
+ // strings.
+ ByteArray: cbor.ByteArrayToArray,
+
+ // Marshal []byte to CBOR byte string enclosed in tag 22 (expected later base64
+ // encoding, https://www.rfc-editor.org/rfc/rfc8949.html#section-3.4.5.2), to
+ // interoperate with the existing JSON behavior. This indicates to the decoder that,
+ // when decoding into a string (or unstructured), the resulting value should be the
+ // base64 encoding of the original bytes. No base64 encoding or decoding needs to be
+ // performed for []byte-to-CBOR-to-[]byte roundtrips.
+ ByteSliceLaterFormat: cbor.ByteSliceLaterFormatBase64,
+
+ // Disable default recognition of types implementing encoding.BinaryMarshaler, which
+ // is not recognized for JSON encoding.
+ BinaryMarshaler: cbor.BinaryMarshalerNone,
+ }.UserBufferEncMode()
+ if err != nil {
+ panic(err)
+ }
+ return encode
+ }(),
+}
+
+var EncodeNondeterministic = EncMode{
+ delegate: func() cbor.UserBufferEncMode {
+ opts := Encode.options()
+ opts.Sort = cbor.SortFastShuffle
+ em, err := opts.UserBufferEncMode()
+ if err != nil {
+ panic(err)
+ }
+ return em
+ }(),
+}
+
+type EncMode struct {
+ delegate cbor.UserBufferEncMode
+}
+
+func (em EncMode) options() cbor.EncOptions {
+ return em.delegate.EncOptions()
+}
+
+func (em EncMode) MarshalTo(v interface{}, w io.Writer) error {
+ if buf, ok := w.(*buffer); ok {
+ return em.delegate.MarshalToBuffer(v, &buf.Buffer)
+ }
+
+ buf := buffers.Get()
+ defer buffers.Put(buf)
+ if err := em.delegate.MarshalToBuffer(v, &buf.Buffer); err != nil {
+ return err
+ }
+
+ if _, err := io.Copy(w, buf); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (em EncMode) Marshal(v interface{}) ([]byte, error) {
+ buf := buffers.Get()
+ defer buffers.Put(buf)
+
+ if err := em.MarshalTo(v, &buf.Buffer); err != nil {
+ return nil, err
+ }
+
+ clone := make([]byte, buf.Len())
+ copy(clone, buf.Bytes())
+
+ return clone, nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/raw.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/raw.go
new file mode 100644
index 0000000000..09d1340f93
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/raw.go
@@ -0,0 +1,236 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cbor
+
+import (
+ "fmt"
+ "reflect"
+ "sync"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+var sharedTranscoders transcoders
+
+var rawTypeTranscodeFuncs = map[reflect.Type]func(reflect.Value) error{
+ reflect.TypeFor[runtime.RawExtension](): func(rv reflect.Value) error {
+ if !rv.CanAddr() {
+ return nil
+ }
+ re := rv.Addr().Interface().(*runtime.RawExtension)
+ if re.Raw == nil {
+ // When Raw is nil it encodes to null. Don't change nil Raw values during
+ // transcoding, they would have unmarshalled from JSON as nil too.
+ return nil
+ }
+ j, err := re.MarshalJSON()
+ if err != nil {
+ return fmt.Errorf("failed to transcode RawExtension to JSON: %w", err)
+ }
+ re.Raw = j
+ return nil
+ },
+ reflect.TypeFor[metav1.FieldsV1](): func(rv reflect.Value) error {
+ if !rv.CanAddr() {
+ return nil
+ }
+ fields := rv.Addr().Interface().(*metav1.FieldsV1)
+ if fields.Raw == nil {
+ // When Raw is nil it encodes to null. Don't change nil Raw values during
+ // transcoding, they would have unmarshalled from JSON as nil too.
+ return nil
+ }
+ j, err := fields.MarshalJSON()
+ if err != nil {
+ return fmt.Errorf("failed to transcode FieldsV1 to JSON: %w", err)
+ }
+ fields.Raw = j
+ return nil
+ },
+}
+
+func transcodeRawTypes(v interface{}) error {
+ if v == nil {
+ return nil
+ }
+
+ rv := reflect.ValueOf(v)
+ return sharedTranscoders.getTranscoder(rv.Type()).fn(rv)
+}
+
+type transcoder struct {
+ fn func(rv reflect.Value) error
+}
+
+var noop = transcoder{
+ fn: func(reflect.Value) error {
+ return nil
+ },
+}
+
+type transcoders struct {
+ lock sync.RWMutex
+ m map[reflect.Type]**transcoder
+}
+
+func (ts *transcoders) getTranscoder(rt reflect.Type) transcoder {
+ ts.lock.RLock()
+ tpp, ok := ts.m[rt]
+ ts.lock.RUnlock()
+ if ok {
+ return **tpp
+ }
+
+ ts.lock.Lock()
+ defer ts.lock.Unlock()
+ tp := ts.getTranscoderLocked(rt)
+ return *tp
+}
+
+func (ts *transcoders) getTranscoderLocked(rt reflect.Type) *transcoder {
+ if tpp, ok := ts.m[rt]; ok {
+ // A transcoder for this type was cached while waiting to acquire the lock.
+ return *tpp
+ }
+
+ // Cache the transcoder now, before populating fn, so that circular references between types
+ // don't overflow the call stack.
+ t := new(transcoder)
+ if ts.m == nil {
+ ts.m = make(map[reflect.Type]**transcoder)
+ }
+ ts.m[rt] = &t
+
+ for rawType, fn := range rawTypeTranscodeFuncs {
+ if rt == rawType {
+ t = &transcoder{fn: fn}
+ return t
+ }
+ }
+
+ switch rt.Kind() {
+ case reflect.Array:
+ te := ts.getTranscoderLocked(rt.Elem())
+ rtlen := rt.Len()
+ if rtlen == 0 || te == &noop {
+ t = &noop
+ break
+ }
+ t.fn = func(rv reflect.Value) error {
+ for i := 0; i < rtlen; i++ {
+ if err := te.fn(rv.Index(i)); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+ case reflect.Interface:
+ // Any interface value might have a dynamic type involving RawExtension. It needs to
+ // be checked.
+ t.fn = func(rv reflect.Value) error {
+ if rv.IsNil() {
+ return nil
+ }
+ rv = rv.Elem()
+ // The interface element's type is dynamic so its transcoder can't be
+ // determined statically.
+ return ts.getTranscoder(rv.Type()).fn(rv)
+ }
+ case reflect.Map:
+ rtk := rt.Key()
+ tk := ts.getTranscoderLocked(rtk)
+ rte := rt.Elem()
+ te := ts.getTranscoderLocked(rte)
+ if tk == &noop && te == &noop {
+ t = &noop
+ break
+ }
+ t.fn = func(rv reflect.Value) error {
+ iter := rv.MapRange()
+ rvk := reflect.New(rtk).Elem()
+ rve := reflect.New(rte).Elem()
+ for iter.Next() {
+ rvk.SetIterKey(iter)
+ if err := tk.fn(rvk); err != nil {
+ return err
+ }
+ rve.SetIterValue(iter)
+ if err := te.fn(rve); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+ case reflect.Pointer:
+ te := ts.getTranscoderLocked(rt.Elem())
+ if te == &noop {
+ t = &noop
+ break
+ }
+ t.fn = func(rv reflect.Value) error {
+ if rv.IsNil() {
+ return nil
+ }
+ return te.fn(rv.Elem())
+ }
+ case reflect.Slice:
+ te := ts.getTranscoderLocked(rt.Elem())
+ if te == &noop {
+ t = &noop
+ break
+ }
+ t.fn = func(rv reflect.Value) error {
+ for i := 0; i < rv.Len(); i++ {
+ if err := te.fn(rv.Index(i)); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+ case reflect.Struct:
+ type fieldTranscoder struct {
+ Index int
+ Transcoder *transcoder
+ }
+ var fieldTranscoders []fieldTranscoder
+ for i := 0; i < rt.NumField(); i++ {
+ f := rt.Field(i)
+ tf := ts.getTranscoderLocked(f.Type)
+ if tf == &noop {
+ continue
+ }
+ fieldTranscoders = append(fieldTranscoders, fieldTranscoder{Index: i, Transcoder: tf})
+ }
+ if len(fieldTranscoders) == 0 {
+ t = &noop
+ break
+ }
+ t.fn = func(rv reflect.Value) error {
+ for _, ft := range fieldTranscoders {
+ if err := ft.Transcoder.fn(rv.Field(ft.Index)); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+ default:
+ t = &noop
+ }
+
+ return t
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go
index ff98208420..81286fccb4 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go
@@ -17,9 +17,6 @@ limitations under the License.
package serializer
import (
- "mime"
- "strings"
-
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer/json"
@@ -28,41 +25,26 @@ import (
"k8s.io/apimachinery/pkg/runtime/serializer/versioning"
)
-// serializerExtensions are for serializers that are conditionally compiled in
-var serializerExtensions = []func(*runtime.Scheme) (serializerType, bool){}
-
-type serializerType struct {
- AcceptContentTypes []string
- ContentType string
- FileExtensions []string
- // EncodesAsText should be true if this content type can be represented safely in UTF-8
- EncodesAsText bool
-
- Serializer runtime.Serializer
- PrettySerializer runtime.Serializer
- StrictSerializer runtime.Serializer
-
- AcceptStreamContentTypes []string
- StreamContentType string
-
- Framer runtime.Framer
- StreamSerializer runtime.Serializer
-}
-
-func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, options CodecFactoryOptions) []serializerType {
+func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, options CodecFactoryOptions) []runtime.SerializerInfo {
jsonSerializer := json.NewSerializerWithOptions(
mf, scheme, scheme,
- json.SerializerOptions{Yaml: false, Pretty: false, Strict: options.Strict},
+ json.SerializerOptions{Yaml: false, Pretty: false, Strict: options.Strict, StreamingCollectionsEncoding: options.StreamingCollectionsEncodingToJSON},
)
- jsonSerializerType := serializerType{
- AcceptContentTypes: []string{runtime.ContentTypeJSON},
- ContentType: runtime.ContentTypeJSON,
- FileExtensions: []string{"json"},
- EncodesAsText: true,
- Serializer: jsonSerializer,
-
- Framer: json.Framer,
- StreamSerializer: jsonSerializer,
+ jsonSerializerType := runtime.SerializerInfo{
+ MediaType: runtime.ContentTypeJSON,
+ MediaTypeType: "application",
+ MediaTypeSubType: "json",
+ EncodesAsText: true,
+ Serializer: jsonSerializer,
+ StrictSerializer: json.NewSerializerWithOptions(
+ mf, scheme, scheme,
+ json.SerializerOptions{Yaml: false, Pretty: false, Strict: true, StreamingCollectionsEncoding: options.StreamingCollectionsEncodingToJSON},
+ ),
+ StreamSerializer: &runtime.StreamSerializerInfo{
+ EncodesAsText: true,
+ Serializer: jsonSerializer,
+ Framer: json.Framer,
+ },
}
if options.Pretty {
jsonSerializerType.PrettySerializer = json.NewSerializerWithOptions(
@@ -71,12 +53,6 @@ func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, option
)
}
- strictJSONSerializer := json.NewSerializerWithOptions(
- mf, scheme, scheme,
- json.SerializerOptions{Yaml: false, Pretty: false, Strict: true},
- )
- jsonSerializerType.StrictSerializer = strictJSONSerializer
-
yamlSerializer := json.NewSerializerWithOptions(
mf, scheme, scheme,
json.SerializerOptions{Yaml: true, Pretty: false, Strict: options.Strict},
@@ -85,38 +61,40 @@ func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, option
mf, scheme, scheme,
json.SerializerOptions{Yaml: true, Pretty: false, Strict: true},
)
- protoSerializer := protobuf.NewSerializer(scheme, scheme)
+ protoSerializer := protobuf.NewSerializerWithOptions(scheme, scheme, protobuf.SerializerOptions{
+ StreamingCollectionsEncoding: options.StreamingCollectionsEncodingToProtobuf,
+ })
protoRawSerializer := protobuf.NewRawSerializer(scheme, scheme)
- serializers := []serializerType{
+ serializers := []runtime.SerializerInfo{
jsonSerializerType,
{
- AcceptContentTypes: []string{runtime.ContentTypeYAML},
- ContentType: runtime.ContentTypeYAML,
- FileExtensions: []string{"yaml"},
- EncodesAsText: true,
- Serializer: yamlSerializer,
- StrictSerializer: strictYAMLSerializer,
+ MediaType: runtime.ContentTypeYAML,
+ MediaTypeType: "application",
+ MediaTypeSubType: "yaml",
+ EncodesAsText: true,
+ Serializer: yamlSerializer,
+ StrictSerializer: strictYAMLSerializer,
},
{
- AcceptContentTypes: []string{runtime.ContentTypeProtobuf},
- ContentType: runtime.ContentTypeProtobuf,
- FileExtensions: []string{"pb"},
- Serializer: protoSerializer,
+ MediaType: runtime.ContentTypeProtobuf,
+ MediaTypeType: "application",
+ MediaTypeSubType: "vnd.kubernetes.protobuf",
+ Serializer: protoSerializer,
// note, strict decoding is unsupported for protobuf,
// fall back to regular serializing
StrictSerializer: protoSerializer,
-
- Framer: protobuf.LengthDelimitedFramer,
- StreamSerializer: protoRawSerializer,
+ StreamSerializer: &runtime.StreamSerializerInfo{
+ Serializer: protoRawSerializer,
+ Framer: protobuf.LengthDelimitedFramer,
+ },
},
}
- for _, fn := range serializerExtensions {
- if serializer, ok := fn(scheme); ok {
- serializers = append(serializers, serializer)
- }
+ for _, f := range options.serializers {
+ serializers = append(serializers, f(scheme, scheme))
}
+
return serializers
}
@@ -136,6 +114,11 @@ type CodecFactoryOptions struct {
Strict bool
// Pretty includes a pretty serializer along with the non-pretty one
Pretty bool
+
+ StreamingCollectionsEncodingToJSON bool
+ StreamingCollectionsEncodingToProtobuf bool
+
+ serializers []func(runtime.ObjectCreater, runtime.ObjectTyper) runtime.SerializerInfo
}
// CodecFactoryOptionsMutator takes a pointer to an options struct and then modifies it.
@@ -162,6 +145,25 @@ func DisableStrict(options *CodecFactoryOptions) {
options.Strict = false
}
+// WithSerializer configures a serializer to be supported in addition to the default serializers.
+func WithSerializer(f func(runtime.ObjectCreater, runtime.ObjectTyper) runtime.SerializerInfo) CodecFactoryOptionsMutator {
+ return func(options *CodecFactoryOptions) {
+ options.serializers = append(options.serializers, f)
+ }
+}
+
+func WithStreamingCollectionEncodingToJSON() CodecFactoryOptionsMutator {
+ return func(options *CodecFactoryOptions) {
+ options.StreamingCollectionsEncodingToJSON = true
+ }
+}
+
+func WithStreamingCollectionEncodingToProtobuf() CodecFactoryOptionsMutator {
+ return func(options *CodecFactoryOptions) {
+ options.StreamingCollectionsEncodingToProtobuf = true
+ }
+}
+
// NewCodecFactory provides methods for retrieving serializers for the supported wire formats
// and conversion wrappers to define preferred internal and external versions. In the future,
// as the internal version is used less, callers may instead use a defaulting serializer and
@@ -184,7 +186,7 @@ func NewCodecFactory(scheme *runtime.Scheme, mutators ...CodecFactoryOptionsMuta
}
// newCodecFactory is a helper for testing that allows a different metafactory to be specified.
-func newCodecFactory(scheme *runtime.Scheme, serializers []serializerType) CodecFactory {
+func newCodecFactory(scheme *runtime.Scheme, serializers []runtime.SerializerInfo) CodecFactory {
decoders := make([]runtime.Decoder, 0, len(serializers))
var accepts []runtime.SerializerInfo
alreadyAccepted := make(map[string]struct{})
@@ -192,38 +194,20 @@ func newCodecFactory(scheme *runtime.Scheme, serializers []serializerType) Codec
var legacySerializer runtime.Serializer
for _, d := range serializers {
decoders = append(decoders, d.Serializer)
- for _, mediaType := range d.AcceptContentTypes {
- if _, ok := alreadyAccepted[mediaType]; ok {
- continue
- }
- alreadyAccepted[mediaType] = struct{}{}
- info := runtime.SerializerInfo{
- MediaType: d.ContentType,
- EncodesAsText: d.EncodesAsText,
- Serializer: d.Serializer,
- PrettySerializer: d.PrettySerializer,
- StrictSerializer: d.StrictSerializer,
- }
-
- mediaType, _, err := mime.ParseMediaType(info.MediaType)
- if err != nil {
- panic(err)
- }
- parts := strings.SplitN(mediaType, "/", 2)
- info.MediaTypeType = parts[0]
- info.MediaTypeSubType = parts[1]
-
- if d.StreamSerializer != nil {
- info.StreamSerializer = &runtime.StreamSerializerInfo{
- Serializer: d.StreamSerializer,
- EncodesAsText: d.EncodesAsText,
- Framer: d.Framer,
- }
- }
- accepts = append(accepts, info)
- if mediaType == runtime.ContentTypeJSON {
- legacySerializer = d.Serializer
- }
+ if _, ok := alreadyAccepted[d.MediaType]; ok {
+ continue
+ }
+ alreadyAccepted[d.MediaType] = struct{}{}
+
+ acceptedSerializerShallowCopy := d
+ if d.StreamSerializer != nil {
+ cloned := *d.StreamSerializer
+ acceptedSerializerShallowCopy.StreamSerializer = &cloned
+ }
+ accepts = append(accepts, acceptedSerializerShallowCopy)
+
+ if d.MediaType == runtime.ContentTypeJSON {
+ legacySerializer = d.Serializer
}
}
if legacySerializer == nil {
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/collections.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/collections.go
new file mode 100644
index 0000000000..075163ddd8
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/collections.go
@@ -0,0 +1,230 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package json
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "maps"
+ "slices"
+ "sort"
+
+ "k8s.io/apimachinery/pkg/api/meta"
+ "k8s.io/apimachinery/pkg/conversion"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+func streamEncodeCollections(obj runtime.Object, w io.Writer) (bool, error) {
+ list, ok := obj.(*unstructured.UnstructuredList)
+ if ok {
+ return true, streamingEncodeUnstructuredList(w, list)
+ }
+ if _, ok := obj.(json.Marshaler); ok {
+ return false, nil
+ }
+ typeMeta, listMeta, items, err := getListMeta(obj)
+ if err == nil {
+ return true, streamingEncodeList(w, typeMeta, listMeta, items)
+ }
+ return false, nil
+}
+
+// getListMeta implements list extraction logic for json stream serialization.
+//
+// Reason for a custom logic instead of reusing accessors from meta package:
+// * Validate json tags to prevent incompatibility with json standard package.
+// * ListMetaAccessor doesn't distinguish empty from nil value.
+// * TypeAccessort reparsing "apiVersion" and serializing it with "{group}/{version}"
+func getListMeta(list runtime.Object) (metav1.TypeMeta, metav1.ListMeta, []runtime.Object, error) {
+ listValue, err := conversion.EnforcePtr(list)
+ if err != nil {
+ return metav1.TypeMeta{}, metav1.ListMeta{}, nil, err
+ }
+ listType := listValue.Type()
+ if listType.NumField() != 3 {
+ return metav1.TypeMeta{}, metav1.ListMeta{}, nil, fmt.Errorf("expected ListType to have 3 fields")
+ }
+ // TypeMeta
+ typeMeta, ok := listValue.Field(0).Interface().(metav1.TypeMeta)
+ if !ok {
+ return metav1.TypeMeta{}, metav1.ListMeta{}, nil, fmt.Errorf("expected TypeMeta field to have TypeMeta type")
+ }
+ if listType.Field(0).Tag.Get("json") != ",inline" {
+ return metav1.TypeMeta{}, metav1.ListMeta{}, nil, fmt.Errorf(`expected TypeMeta json field tag to be ",inline"`)
+ }
+ // ListMeta
+ listMeta, ok := listValue.Field(1).Interface().(metav1.ListMeta)
+ if !ok {
+ return metav1.TypeMeta{}, metav1.ListMeta{}, nil, fmt.Errorf("expected ListMeta field to have ListMeta type")
+ }
+ if listType.Field(1).Tag.Get("json") != "metadata,omitempty" {
+ return metav1.TypeMeta{}, metav1.ListMeta{}, nil, fmt.Errorf(`expected ListMeta json field tag to be "metadata,omitempty"`)
+ }
+ // Items
+ items, err := meta.ExtractList(list)
+ if err != nil {
+ return metav1.TypeMeta{}, metav1.ListMeta{}, nil, err
+ }
+ if listType.Field(2).Tag.Get("json") != "items" {
+ return metav1.TypeMeta{}, metav1.ListMeta{}, nil, fmt.Errorf(`expected Items json field tag to be "items"`)
+ }
+ return typeMeta, listMeta, items, nil
+}
+
+func streamingEncodeList(w io.Writer, typeMeta metav1.TypeMeta, listMeta metav1.ListMeta, items []runtime.Object) error {
+ // Start
+ if _, err := w.Write([]byte(`{`)); err != nil {
+ return err
+ }
+
+ // TypeMeta
+ if typeMeta.Kind != "" {
+ if err := encodeKeyValuePair(w, "kind", typeMeta.Kind, []byte(",")); err != nil {
+ return err
+ }
+ }
+ if typeMeta.APIVersion != "" {
+ if err := encodeKeyValuePair(w, "apiVersion", typeMeta.APIVersion, []byte(",")); err != nil {
+ return err
+ }
+ }
+
+ // ListMeta
+ if err := encodeKeyValuePair(w, "metadata", listMeta, []byte(",")); err != nil {
+ return err
+ }
+
+ // Items
+ if err := encodeItemsObjectSlice(w, items); err != nil {
+ return err
+ }
+
+ // End
+ _, err := w.Write([]byte("}\n"))
+ return err
+}
+
+func encodeItemsObjectSlice(w io.Writer, items []runtime.Object) (err error) {
+ if items == nil {
+ err := encodeKeyValuePair(w, "items", nil, nil)
+ return err
+ }
+ _, err = w.Write([]byte(`"items":[`))
+ if err != nil {
+ return err
+ }
+ suffix := []byte(",")
+ for i, item := range items {
+ if i == len(items)-1 {
+ suffix = nil
+ }
+ err := encodeValue(w, item, suffix)
+ if err != nil {
+ return err
+ }
+ }
+ _, err = w.Write([]byte("]"))
+ if err != nil {
+ return err
+ }
+ return err
+}
+
+func streamingEncodeUnstructuredList(w io.Writer, list *unstructured.UnstructuredList) error {
+ _, err := w.Write([]byte(`{`))
+ if err != nil {
+ return err
+ }
+ keys := slices.Collect(maps.Keys(list.Object))
+ if _, exists := list.Object["items"]; !exists {
+ keys = append(keys, "items")
+ }
+ sort.Strings(keys)
+
+ suffix := []byte(",")
+ for i, key := range keys {
+ if i == len(keys)-1 {
+ suffix = nil
+ }
+ if key == "items" {
+ err = encodeItemsUnstructuredSlice(w, list.Items, suffix)
+ } else {
+ err = encodeKeyValuePair(w, key, list.Object[key], suffix)
+ }
+ if err != nil {
+ return err
+ }
+ }
+ _, err = w.Write([]byte("}\n"))
+ return err
+}
+
+func encodeItemsUnstructuredSlice(w io.Writer, items []unstructured.Unstructured, suffix []byte) (err error) {
+ _, err = w.Write([]byte(`"items":[`))
+ if err != nil {
+ return err
+ }
+ comma := []byte(",")
+ for i, item := range items {
+ if i == len(items)-1 {
+ comma = nil
+ }
+ err := encodeValue(w, item.Object, comma)
+ if err != nil {
+ return err
+ }
+ }
+ _, err = w.Write([]byte("]"))
+ if err != nil {
+ return err
+ }
+ if len(suffix) > 0 {
+ _, err = w.Write(suffix)
+ }
+ return err
+}
+
+func encodeKeyValuePair(w io.Writer, key string, value any, suffix []byte) (err error) {
+ err = encodeValue(w, key, []byte(":"))
+ if err != nil {
+ return err
+ }
+ err = encodeValue(w, value, suffix)
+ if err != nil {
+ return err
+ }
+ return err
+}
+
+func encodeValue(w io.Writer, value any, suffix []byte) error {
+ data, err := json.Marshal(value)
+ if err != nil {
+ return err
+ }
+ _, err = w.Write(data)
+ if err != nil {
+ return err
+ }
+ if len(suffix) > 0 {
+ _, err = w.Write(suffix)
+ }
+ return err
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go
index 1ae4a32eb7..24f66a1017 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go
@@ -36,7 +36,7 @@ import (
// is not nil, the object has the group, version, and kind fields set.
// Deprecated: use NewSerializerWithOptions instead.
func NewSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper, pretty bool) *Serializer {
- return NewSerializerWithOptions(meta, creater, typer, SerializerOptions{false, pretty, false})
+ return NewSerializerWithOptions(meta, creater, typer, SerializerOptions{false, pretty, false, false})
}
// NewYAMLSerializer creates a YAML serializer that handles encoding versioned objects into the proper YAML form. If typer
@@ -44,7 +44,7 @@ func NewSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtim
// matches JSON, and will error if constructs are used that do not serialize to JSON.
// Deprecated: use NewSerializerWithOptions instead.
func NewYAMLSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper) *Serializer {
- return NewSerializerWithOptions(meta, creater, typer, SerializerOptions{true, false, false})
+ return NewSerializerWithOptions(meta, creater, typer, SerializerOptions{true, false, false, false})
}
// NewSerializerWithOptions creates a JSON/YAML serializer that handles encoding versioned objects into the proper JSON/YAML
@@ -93,6 +93,9 @@ type SerializerOptions struct {
// Strict: configures the Serializer to return strictDecodingError's when duplicate fields are present decoding JSON or YAML.
// Note that enabling this option is not as performant as the non-strict variant, and should not be used in fast paths.
Strict bool
+
+ // StreamingCollectionsEncoding enables encoding collection, one item at the time, drastically reducing memory needed.
+ StreamingCollectionsEncoding bool
}
// Serializer handles encoding versioned objects into the proper JSON form
@@ -242,6 +245,15 @@ func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error {
_, err = w.Write(data)
return err
}
+ if s.options.StreamingCollectionsEncoding {
+ ok, err := streamEncodeCollections(obj, w)
+ if err != nil {
+ return err
+ }
+ if ok {
+ return nil
+ }
+ }
encoder := json.NewEncoder(w)
return encoder.Encode(obj)
}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/collections.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/collections.go
new file mode 100644
index 0000000000..754a80820b
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/collections.go
@@ -0,0 +1,174 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package protobuf
+
+import (
+ "errors"
+ "io"
+ "math/bits"
+
+ "github.com/gogo/protobuf/proto"
+
+ "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/conversion"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+var (
+ errFieldCount = errors.New("expected ListType to have 3 fields")
+ errTypeMetaField = errors.New("expected TypeMeta field to have TypeMeta type")
+ errTypeMetaProtobufTag = errors.New(`expected TypeMeta protobuf field tag to be ""`)
+ errListMetaField = errors.New("expected ListMeta field to have ListMeta type")
+ errListMetaProtobufTag = errors.New(`expected ListMeta protobuf field tag to be "bytes,1,opt,name=metadata"`)
+ errItemsProtobufTag = errors.New(`expected Items protobuf field tag to be "bytes,2,rep,name=items"`)
+ errItemsSizer = errors.New(`expected Items elements to implement proto.Sizer`)
+)
+
+// getStreamingListData implements list extraction logic for protobuf stream serialization.
+//
+// Reason for a custom logic instead of reusing accessors from meta package:
+// * Validate proto tags to prevent incompatibility with proto standard package.
+// * ListMetaAccessor doesn't distinguish empty from nil value.
+// * TypeAccessor reparsing "apiVersion" and serializing it with "{group}/{version}"
+func getStreamingListData(list runtime.Object) (data streamingListData, err error) {
+ listValue, err := conversion.EnforcePtr(list)
+ if err != nil {
+ return data, err
+ }
+ listType := listValue.Type()
+ if listType.NumField() != 3 {
+ return data, errFieldCount
+ }
+ // TypeMeta: validated, but not returned as is not serialized.
+ _, ok := listValue.Field(0).Interface().(metav1.TypeMeta)
+ if !ok {
+ return data, errTypeMetaField
+ }
+ if listType.Field(0).Tag.Get("protobuf") != "" {
+ return data, errTypeMetaProtobufTag
+ }
+ // ListMeta
+ listMeta, ok := listValue.Field(1).Interface().(metav1.ListMeta)
+ if !ok {
+ return data, errListMetaField
+ }
+ // if we were ever to relax the protobuf tag check we should update the hardcoded `0xa` below when writing ListMeta.
+ if listType.Field(1).Tag.Get("protobuf") != "bytes,1,opt,name=metadata" {
+ return data, errListMetaProtobufTag
+ }
+ data.listMeta = listMeta
+ // Items; if we were ever to relax the protobuf tag check we should update the hardcoded `0x12` below when writing Items.
+ if listType.Field(2).Tag.Get("protobuf") != "bytes,2,rep,name=items" {
+ return data, errItemsProtobufTag
+ }
+ items, err := meta.ExtractList(list)
+ if err != nil {
+ return data, err
+ }
+ data.items = items
+ data.totalSize, data.listMetaSize, data.itemsSizes, err = listSize(listMeta, items)
+ return data, err
+}
+
+type streamingListData struct {
+ // totalSize is the total size of the serialized List object, including their proto headers/size bytes
+ totalSize int
+
+ // listMetaSize caches results from .Size() call to listMeta, doesn't include header bytes (field identifier, size)
+ listMetaSize int
+ listMeta metav1.ListMeta
+
+ // itemsSizes caches results from .Size() call to items, doesn't include header bytes (field identifier, size)
+ itemsSizes []int
+ items []runtime.Object
+}
+
+// listSize return size of ListMeta and items to be later used for preallocations.
+// listMetaSize and itemSizes do not include header bytes (field identifier, size).
+func listSize(listMeta metav1.ListMeta, items []runtime.Object) (totalSize, listMetaSize int, itemSizes []int, err error) {
+ // ListMeta
+ listMetaSize = listMeta.Size()
+ totalSize += 1 + sovGenerated(uint64(listMetaSize)) + listMetaSize
+ // Items
+ itemSizes = make([]int, len(items))
+ for i, item := range items {
+ sizer, ok := item.(proto.Sizer)
+ if !ok {
+ return totalSize, listMetaSize, nil, errItemsSizer
+ }
+ n := sizer.Size()
+ itemSizes[i] = n
+ totalSize += 1 + sovGenerated(uint64(n)) + n
+ }
+ return totalSize, listMetaSize, itemSizes, nil
+}
+
+func streamingEncodeUnknownList(w io.Writer, unk runtime.Unknown, listData streamingListData, memAlloc runtime.MemoryAllocator) error {
+ _, err := w.Write(protoEncodingPrefix)
+ if err != nil {
+ return err
+ }
+ // encodeList is responsible for encoding the List into the unknown Raw.
+ encodeList := func(writer io.Writer) (int, error) {
+ return streamingEncodeList(writer, listData, memAlloc)
+ }
+ _, err = unk.MarshalToWriter(w, listData.totalSize, encodeList)
+ return err
+}
+
+func streamingEncodeList(w io.Writer, listData streamingListData, memAlloc runtime.MemoryAllocator) (size int, err error) {
+ // ListMeta; 0xa = (1 << 3) | 2; field number: 1, type: 2 (LEN). https://protobuf.dev/programming-guides/encoding/#structure
+ n, err := doEncodeWithHeader(&listData.listMeta, w, 0xa, listData.listMetaSize, memAlloc)
+ size += n
+ if err != nil {
+ return size, err
+ }
+ // Items; 0x12 = (2 << 3) | 2; field number: 2, type: 2 (LEN). https://protobuf.dev/programming-guides/encoding/#structure
+ for i, item := range listData.items {
+ n, err := doEncodeWithHeader(item, w, 0x12, listData.itemsSizes[i], memAlloc)
+ size += n
+ if err != nil {
+ return size, err
+ }
+ }
+ return size, nil
+}
+
+func writeVarintGenerated(w io.Writer, v int) (int, error) {
+ buf := make([]byte, sovGenerated(uint64(v)))
+ encodeVarintGenerated(buf, len(buf), uint64(v))
+ return w.Write(buf)
+}
+
+// sovGenerated is copied from `generated.pb.go` returns size of varint.
+func sovGenerated(v uint64) int {
+ return (bits.Len64(v|1) + 6) / 7
+}
+
+// encodeVarintGenerated is copied from `generated.pb.go` encodes varint.
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+ offset -= sovGenerated(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go
index 72d0ac79b3..381748d69f 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go
@@ -15,4 +15,4 @@ limitations under the License.
*/
// Package protobuf provides a Kubernetes serializer for the protobuf format.
-package protobuf // import "k8s.io/apimachinery/pkg/runtime/serializer/protobuf"
+package protobuf
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go
index c63e6dc63f..c66c49ac4c 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go
@@ -72,10 +72,18 @@ func IsNotMarshalable(err error) bool {
// is passed, the encoded object will have group, version, and kind fields set. If typer is nil, the objects will be written
// as-is (any type info passed with the object will be used).
func NewSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper) *Serializer {
+ return NewSerializerWithOptions(creater, typer, SerializerOptions{})
+}
+
+// NewSerializerWithOptions creates a Protobuf serializer that handles encoding versioned objects into the proper wire form. If a typer
+// is passed, the encoded object will have group, version, and kind fields set. If typer is nil, the objects will be written
+// as-is (any type info passed with the object will be used).
+func NewSerializerWithOptions(creater runtime.ObjectCreater, typer runtime.ObjectTyper, opts SerializerOptions) *Serializer {
return &Serializer{
prefix: protoEncodingPrefix,
creater: creater,
typer: typer,
+ options: opts,
}
}
@@ -84,6 +92,14 @@ type Serializer struct {
prefix []byte
creater runtime.ObjectCreater
typer runtime.ObjectTyper
+
+ options SerializerOptions
+}
+
+// SerializerOptions holds the options which are used to configure a Proto serializer.
+type SerializerOptions struct {
+ // StreamingCollectionsEncoding enables encoding collection, one item at the time, drastically reducing memory needed.
+ StreamingCollectionsEncoding bool
}
var _ runtime.Serializer = &Serializer{}
@@ -209,6 +225,13 @@ func (s *Serializer) doEncode(obj runtime.Object, w io.Writer, memAlloc runtime.
},
}
}
+ if s.options.StreamingCollectionsEncoding {
+ listData, err := getStreamingListData(obj)
+ if err == nil {
+ // Doesn't honor custom proto marshaling methods (like json streaming), because all proto objects implement proto methods.
+ return streamingEncodeUnknownList(w, unk, listData, memAlloc)
+ }
+ }
switch t := obj.(type) {
case bufferedMarshaller:
@@ -428,6 +451,39 @@ func (s *RawSerializer) encode(obj runtime.Object, w io.Writer, memAlloc runtime
}
func (s *RawSerializer) doEncode(obj runtime.Object, w io.Writer, memAlloc runtime.MemoryAllocator) error {
+ _, err := doEncode(obj, w, nil, memAlloc)
+ return err
+}
+
+func doEncodeWithHeader(obj any, w io.Writer, field byte, precomputedSize int, memAlloc runtime.MemoryAllocator) (size int, err error) {
+ // Field identifier
+ n, err := w.Write([]byte{field})
+ size += n
+ if err != nil {
+ return size, err
+ }
+ // Size
+ n, err = writeVarintGenerated(w, precomputedSize)
+ size += n
+ if err != nil {
+ return size, err
+ }
+ // Obj
+ n, err = doEncode(obj, w, &precomputedSize, memAlloc)
+ size += n
+ if err != nil {
+ return size, err
+ }
+ if n != precomputedSize {
+ return size, fmt.Errorf("the size value was %d, but doEncode wrote %d bytes to data", precomputedSize, n)
+ }
+ return size, nil
+}
+
+// doEncode encodes provided object into writer using a allocator if possible.
+// Avoids call by object Size if precomputedObjSize is provided.
+// precomputedObjSize should not include header bytes (field identifier, size).
+func doEncode(obj any, w io.Writer, precomputedObjSize *int, memAlloc runtime.MemoryAllocator) (int, error) {
if memAlloc == nil {
klog.Error("a mandatory memory allocator wasn't provided, this might have a negative impact on performance, check invocations of EncodeWithAllocator method, falling back on runtime.SimpleAllocator")
memAlloc = &runtime.SimpleAllocator{}
@@ -436,40 +492,43 @@ func (s *RawSerializer) doEncode(obj runtime.Object, w io.Writer, memAlloc runti
case bufferedReverseMarshaller:
// this path performs a single allocation during write only when the Allocator wasn't provided
// it also requires the caller to implement the more efficient Size and MarshalToSizedBuffer methods
- encodedSize := uint64(t.Size())
- data := memAlloc.Allocate(encodedSize)
+ if precomputedObjSize == nil {
+ s := t.Size()
+ precomputedObjSize = &s
+ }
+ data := memAlloc.Allocate(uint64(*precomputedObjSize))
n, err := t.MarshalToSizedBuffer(data)
if err != nil {
- return err
+ return 0, err
}
- _, err = w.Write(data[:n])
- return err
+ return w.Write(data[:n])
case bufferedMarshaller:
// this path performs a single allocation during write only when the Allocator wasn't provided
// it also requires the caller to implement the more efficient Size and MarshalTo methods
- encodedSize := uint64(t.Size())
- data := memAlloc.Allocate(encodedSize)
+ if precomputedObjSize == nil {
+ s := t.Size()
+ precomputedObjSize = &s
+ }
+ data := memAlloc.Allocate(uint64(*precomputedObjSize))
n, err := t.MarshalTo(data)
if err != nil {
- return err
+ return 0, err
}
- _, err = w.Write(data[:n])
- return err
+ return w.Write(data[:n])
case proto.Marshaler:
// this path performs extra allocations
data, err := t.Marshal()
if err != nil {
- return err
+ return 0, err
}
- _, err = w.Write(data)
- return err
+ return w.Write(data)
default:
- return errNotMarshalable{reflect.TypeOf(obj)}
+ return 0, errNotMarshalable{reflect.TypeOf(obj)}
}
}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/types.go b/vendor/k8s.io/apimachinery/pkg/runtime/types.go
index ce77c7910a..ca7b7cc2d4 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/types.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/types.go
@@ -43,9 +43,11 @@ type TypeMeta struct {
}
const (
- ContentTypeJSON string = "application/json"
- ContentTypeYAML string = "application/yaml"
- ContentTypeProtobuf string = "application/vnd.kubernetes.protobuf"
+ ContentTypeJSON string = "application/json"
+ ContentTypeYAML string = "application/yaml"
+ ContentTypeProtobuf string = "application/vnd.kubernetes.protobuf"
+ ContentTypeCBOR string = "application/cbor" // RFC 8949
+ ContentTypeCBORSequence string = "application/cbor-seq" // RFC 8742
)
// RawExtension is used to hold extensions in external versions.
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go b/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go
index a82227b239..27a2064c41 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go
@@ -18,6 +18,7 @@ package runtime
import (
"fmt"
+ "io"
)
type ProtobufMarshaller interface {
@@ -28,6 +29,124 @@ type ProtobufReverseMarshaller interface {
MarshalToSizedBuffer(data []byte) (int, error)
}
+const (
+ typeMetaTag = 0xa
+ rawTag = 0x12
+ contentEncodingTag = 0x1a
+ contentTypeTag = 0x22
+
+ // max length of a varint for a uint64
+ maxUint64VarIntLength = 10
+)
+
+// MarshalToWriter allows a caller to provide a streaming writer for raw bytes,
+// instead of populating them inside the Unknown struct.
+// rawSize is the number of bytes rawWriter will write in a success case.
+// writeRaw is called when it is time to write the raw bytes. It must return `rawSize, nil` or an error.
+func (m *Unknown) MarshalToWriter(w io.Writer, rawSize int, writeRaw func(io.Writer) (int, error)) (int, error) {
+ size := 0
+
+ // reuse the buffer for varint marshaling
+ varintBuffer := make([]byte, maxUint64VarIntLength)
+ writeVarint := func(i int) (int, error) {
+ offset := encodeVarintGenerated(varintBuffer, len(varintBuffer), uint64(i))
+ return w.Write(varintBuffer[offset:])
+ }
+
+ // TypeMeta
+ {
+ n, err := w.Write([]byte{typeMetaTag})
+ size += n
+ if err != nil {
+ return size, err
+ }
+
+ typeMetaBytes, err := m.TypeMeta.Marshal()
+ if err != nil {
+ return size, err
+ }
+
+ n, err = writeVarint(len(typeMetaBytes))
+ size += n
+ if err != nil {
+ return size, err
+ }
+
+ n, err = w.Write(typeMetaBytes)
+ size += n
+ if err != nil {
+ return size, err
+ }
+ }
+
+ // Raw, delegating write to writeRaw()
+ {
+ n, err := w.Write([]byte{rawTag})
+ size += n
+ if err != nil {
+ return size, err
+ }
+
+ n, err = writeVarint(rawSize)
+ size += n
+ if err != nil {
+ return size, err
+ }
+
+ n, err = writeRaw(w)
+ size += n
+ if err != nil {
+ return size, err
+ }
+ if n != int(rawSize) {
+ return size, fmt.Errorf("the size value was %d, but encoding wrote %d bytes to data", rawSize, n)
+ }
+ }
+
+ // ContentEncoding
+ {
+ n, err := w.Write([]byte{contentEncodingTag})
+ size += n
+ if err != nil {
+ return size, err
+ }
+
+ n, err = writeVarint(len(m.ContentEncoding))
+ size += n
+ if err != nil {
+ return size, err
+ }
+
+ n, err = w.Write([]byte(m.ContentEncoding))
+ size += n
+ if err != nil {
+ return size, err
+ }
+ }
+
+ // ContentEncoding
+ {
+ n, err := w.Write([]byte{contentTypeTag})
+ size += n
+ if err != nil {
+ return size, err
+ }
+
+ n, err = writeVarint(len(m.ContentType))
+ size += n
+ if err != nil {
+ return size, err
+ }
+
+ n, err = w.Write([]byte(m.ContentType))
+ size += n
+ if err != nil {
+ return size, err
+ }
+ }
+ return size, nil
+}
+
// NestedMarshalTo allows a caller to avoid extra allocations during serialization of an Unknown
// that will contain an object that implements ProtobufMarshaller or ProtobufReverseMarshaller.
func (m *Unknown) NestedMarshalTo(data []byte, b ProtobufMarshaller, size uint64) (int, error) {
@@ -43,12 +162,12 @@ func (m *Unknown) NestedMarshalTo(data []byte, b ProtobufMarshaller, size uint64
copy(data[i:], m.ContentType)
i = encodeVarintGenerated(data, i, uint64(len(m.ContentType)))
i--
- data[i] = 0x22
+ data[i] = contentTypeTag
i -= len(m.ContentEncoding)
copy(data[i:], m.ContentEncoding)
i = encodeVarintGenerated(data, i, uint64(len(m.ContentEncoding)))
i--
- data[i] = 0x1a
+ data[i] = contentEncodingTag
if b != nil {
if r, ok := b.(ProtobufReverseMarshaller); ok {
n1, err := r.MarshalToSizedBuffer(data[:i])
@@ -75,7 +194,7 @@ func (m *Unknown) NestedMarshalTo(data []byte, b ProtobufMarshaller, size uint64
}
i = encodeVarintGenerated(data, i, size)
i--
- data[i] = 0x12
+ data[i] = rawTag
}
n2, err := m.TypeMeta.MarshalToSizedBuffer(data[:i])
if err != nil {
@@ -84,6 +203,6 @@ func (m *Unknown) NestedMarshalTo(data []byte, b ProtobufMarshaller, size uint64
i -= n2
i = encodeVarintGenerated(data, i, uint64(n2))
i--
- data[i] = 0xa
+ data[i] = typeMetaTag
return msgSize - i, nil
}
diff --git a/vendor/k8s.io/apimachinery/pkg/types/doc.go b/vendor/k8s.io/apimachinery/pkg/types/doc.go
index 5667fa9921..783cbcdc8d 100644
--- a/vendor/k8s.io/apimachinery/pkg/types/doc.go
+++ b/vendor/k8s.io/apimachinery/pkg/types/doc.go
@@ -15,4 +15,4 @@ limitations under the License.
*/
// Package types implements various generic types used throughout kubernetes.
-package types // import "k8s.io/apimachinery/pkg/types"
+package types
diff --git a/vendor/k8s.io/apimachinery/pkg/types/patch.go b/vendor/k8s.io/apimachinery/pkg/types/patch.go
index fe8ecaaffa..d338cf213d 100644
--- a/vendor/k8s.io/apimachinery/pkg/types/patch.go
+++ b/vendor/k8s.io/apimachinery/pkg/types/patch.go
@@ -25,5 +25,7 @@ const (
JSONPatchType PatchType = "application/json-patch+json"
MergePatchType PatchType = "application/merge-patch+json"
StrategicMergePatchType PatchType = "application/strategic-merge-patch+json"
- ApplyPatchType PatchType = "application/apply-patch+yaml"
+ ApplyPatchType PatchType = ApplyYAMLPatchType
+ ApplyYAMLPatchType PatchType = "application/apply-patch+yaml"
+ ApplyCBORPatchType PatchType = "application/apply-patch+cbor"
)
diff --git a/vendor/k8s.io/apimachinery/pkg/util/errors/doc.go b/vendor/k8s.io/apimachinery/pkg/util/errors/doc.go
index 5d4d6250a3..b3b39bc388 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/errors/doc.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/errors/doc.go
@@ -15,4 +15,4 @@ limitations under the License.
*/
// Package errors implements various utility functions and types around errors.
-package errors // import "k8s.io/apimachinery/pkg/util/errors"
+package errors
diff --git a/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go
index 9b3c9c8d5a..f18845a417 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go
@@ -91,12 +91,12 @@ func (r *lengthDelimitedFrameReader) Read(data []byte) (int, error) {
}
n, err := io.ReadAtLeast(r.r, data[:max], int(max))
r.remaining -= n
- if err == io.ErrShortBuffer || r.remaining > 0 {
- return n, io.ErrShortBuffer
- }
if err != nil {
return n, err
}
+ if r.remaining > 0 {
+ return n, io.ErrShortBuffer
+ }
if n != expect {
return n, io.ErrUnexpectedEOF
}
@@ -147,7 +147,6 @@ func (r *jsonFrameReader) Read(data []byte) (int, error) {
// RawMessage#Unmarshal appends to data - we reset the slice down to 0 and will either see
// data written to data, or be larger than data and a different array.
- n := len(data)
m := json.RawMessage(data[:0])
if err := r.decoder.Decode(&m); err != nil {
return 0, err
@@ -156,12 +155,19 @@ func (r *jsonFrameReader) Read(data []byte) (int, error) {
// If capacity of data is less than length of the message, decoder will allocate a new slice
// and set m to it, which means we need to copy the partial result back into data and preserve
// the remaining result for subsequent reads.
- if len(m) > n {
- //nolint:staticcheck // SA4006,SA4010 underlying array of data is modified here.
- data = append(data[0:0], m[:n]...)
- r.remaining = m[n:]
- return n, io.ErrShortBuffer
+ if len(m) > cap(data) {
+ copy(data, m)
+ r.remaining = m[len(data):]
+ return len(data), io.ErrShortBuffer
+ }
+
+ if len(m) > len(data) {
+ // The bytes beyond len(data) were stored in data's underlying array, which we do
+ // not own after this function returns.
+ r.remaining = append([]byte(nil), m[len(data):]...)
+ return len(data), io.ErrShortBuffer
}
+
return len(m), nil
}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/instr_fuzz.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/instr_fuzz.go
index a502b5adb6..2d6f6a0ccc 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/intstr/instr_fuzz.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/instr_fuzz.go
@@ -20,24 +20,24 @@ limitations under the License.
package intstr
import (
- fuzz "github.com/google/gofuzz"
+ "sigs.k8s.io/randfill"
)
-// Fuzz satisfies fuzz.Interface
-func (intstr *IntOrString) Fuzz(c fuzz.Continue) {
+// RandFill satisfies randfill.NativeSelfFiller
+func (intstr *IntOrString) RandFill(c randfill.Continue) {
if intstr == nil {
return
}
- if c.RandBool() {
+ if c.Bool() {
intstr.Type = Int
- c.Fuzz(&intstr.IntVal)
+ c.Fill(&intstr.IntVal)
intstr.StrVal = ""
} else {
intstr.Type = String
intstr.IntVal = 0
- c.Fuzz(&intstr.StrVal)
+ c.Fill(&intstr.StrVal)
}
}
// ensure IntOrString implements fuzz.Interface
-var _ fuzz.Interface = &IntOrString{}
+var _ randfill.NativeSelfFiller = &IntOrString{}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go
index f358c794d1..5fd2e16c84 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go
@@ -25,6 +25,7 @@ import (
"strconv"
"strings"
+ cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct"
"k8s.io/klog/v2"
)
@@ -92,6 +93,20 @@ func (intstr *IntOrString) UnmarshalJSON(value []byte) error {
return json.Unmarshal(value, &intstr.IntVal)
}
+func (intstr *IntOrString) UnmarshalCBOR(value []byte) error {
+ if err := cbor.Unmarshal(value, &intstr.StrVal); err == nil {
+ intstr.Type = String
+ return nil
+ }
+
+ if err := cbor.Unmarshal(value, &intstr.IntVal); err != nil {
+ return err
+ }
+
+ intstr.Type = Int
+ return nil
+}
+
// String returns the string value, or the Itoa of the int value.
func (intstr *IntOrString) String() string {
if intstr == nil {
@@ -126,6 +141,17 @@ func (intstr IntOrString) MarshalJSON() ([]byte, error) {
}
}
+func (intstr IntOrString) MarshalCBOR() ([]byte, error) {
+ switch intstr.Type {
+ case Int:
+ return cbor.Marshal(intstr.IntVal)
+ case String:
+ return cbor.Marshal(intstr.StrVal)
+ default:
+ return nil, fmt.Errorf("impossible IntOrString.Type")
+ }
+}
+
// OpenAPISchemaType is used by the kube-openapi generator when constructing
// the OpenAPI spec of this type.
//
diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go
index 3674914f70..de97deae03 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go
@@ -17,6 +17,7 @@ limitations under the License.
package runtime
import (
+ "context"
"fmt"
"net/http"
"runtime"
@@ -35,7 +36,12 @@ var (
)
// PanicHandlers is a list of functions which will be invoked when a panic happens.
-var PanicHandlers = []func(interface{}){logPanic}
+//
+// The code invoking these handlers prepares a contextual logger so that
+// klog.FromContext(ctx) already skips over the panic handler itself and
+// several other intermediate functions, ideally such that the log output
+// is attributed to the code which triggered the panic.
+var PanicHandlers = []func(context.Context, interface{}){logPanic}
// HandleCrash simply catches a crash and logs an error. Meant to be called via
// defer. Additional context-specific handlers can be provided, and will be
@@ -43,23 +49,74 @@ var PanicHandlers = []func(interface{}){logPanic}
// handlers and logging the panic message.
//
// E.g., you can provide one or more additional handlers for something like shutting down go routines gracefully.
+//
+// Contextual logging: HandleCrashWithContext or HandleCrashWithLogger should be used instead of HandleCrash in code which supports contextual logging.
func HandleCrash(additionalHandlers ...func(interface{})) {
if r := recover(); r != nil {
- for _, fn := range PanicHandlers {
- fn(r)
- }
- for _, fn := range additionalHandlers {
- fn(r)
- }
- if ReallyCrash {
- // Actually proceed to panic.
- panic(r)
+ additionalHandlersWithContext := make([]func(context.Context, interface{}), len(additionalHandlers))
+ for i, handler := range additionalHandlers {
+ handler := handler // capture loop variable
+ additionalHandlersWithContext[i] = func(_ context.Context, r interface{}) {
+ handler(r)
+ }
}
+
+ handleCrash(context.Background(), r, additionalHandlersWithContext...)
+ }
+}
+
+// HandleCrashWithContext simply catches a crash and logs an error. Meant to be called via
+// defer. Additional context-specific handlers can be provided, and will be
+// called in case of panic. HandleCrash actually crashes, after calling the
+// handlers and logging the panic message.
+//
+// E.g., you can provide one or more additional handlers for something like shutting down go routines gracefully.
+//
+// The context is used to determine how to log.
+func HandleCrashWithContext(ctx context.Context, additionalHandlers ...func(context.Context, interface{})) {
+ if r := recover(); r != nil {
+ handleCrash(ctx, r, additionalHandlers...)
+ }
+}
+
+// HandleCrashWithLogger simply catches a crash and logs an error. Meant to be called via
+// defer. Additional context-specific handlers can be provided, and will be
+// called in case of panic. HandleCrash actually crashes, after calling the
+// handlers and logging the panic message.
+//
+// E.g., you can provide one or more additional handlers for something like shutting down go routines gracefully.
+func HandleCrashWithLogger(logger klog.Logger, additionalHandlers ...func(context.Context, interface{})) {
+ if r := recover(); r != nil {
+ ctx := klog.NewContext(context.Background(), logger)
+ handleCrash(ctx, r, additionalHandlers...)
+ }
+}
+
+// handleCrash is the common implementation of the HandleCrash* variants.
+// Having those call a common implementation ensures that the stack depth
+// is the same regardless through which path the handlers get invoked.
+func handleCrash(ctx context.Context, r any, additionalHandlers ...func(context.Context, interface{})) {
+ // We don't really know how many call frames to skip because the Go
+ // panic handler is between us and the code where the panic occurred.
+ // If it's one function (as in Go 1.21), then skipping four levels
+ // gets us to the function which called the `defer HandleCrashWithontext(...)`.
+ logger := klog.FromContext(ctx).WithCallDepth(4)
+ ctx = klog.NewContext(ctx, logger)
+
+ for _, fn := range PanicHandlers {
+ fn(ctx, r)
+ }
+ for _, fn := range additionalHandlers {
+ fn(ctx, r)
+ }
+ if ReallyCrash {
+ // Actually proceed to panic.
+ panic(r)
}
}
// logPanic logs the caller tree when a panic occurs (except in the special case of http.ErrAbortHandler).
-func logPanic(r interface{}) {
+func logPanic(ctx context.Context, r interface{}) {
if r == http.ErrAbortHandler {
// honor the http.ErrAbortHandler sentinel panic value:
// ErrAbortHandler is a sentinel panic value to abort a handler.
@@ -73,10 +130,16 @@ func logPanic(r interface{}) {
const size = 64 << 10
stacktrace := make([]byte, size)
stacktrace = stacktrace[:runtime.Stack(stacktrace, false)]
+
+ logger := klog.FromContext(ctx)
+
+ // For backwards compatibility, conversion to string
+ // is handled here instead of defering to the logging
+ // backend.
if _, ok := r.(string); ok {
- klog.Errorf("Observed a panic: %s\n%s", r, stacktrace)
+ logger.Error(nil, "Observed a panic", "panic", r, "stacktrace", string(stacktrace))
} else {
- klog.Errorf("Observed a panic: %#v (%v)\n%s", r, r, stacktrace)
+ logger.Error(nil, "Observed a panic", "panic", fmt.Sprintf("%v", r), "panicGoValue", fmt.Sprintf("%#v", r), "stacktrace", string(stacktrace))
}
}
@@ -84,35 +147,83 @@ func logPanic(r interface{}) {
// error occurs.
// TODO(lavalamp): for testability, this and the below HandleError function
// should be packaged up into a testable and reusable object.
-var ErrorHandlers = []func(error){
+var ErrorHandlers = []ErrorHandler{
logError,
- (&rudimentaryErrorBackoff{
- lastErrorTime: time.Now(),
- // 1ms was the number folks were able to stomach as a global rate limit.
- // If you need to log errors more than 1000 times a second you
- // should probably consider fixing your code instead. :)
- minPeriod: time.Millisecond,
- }).OnError,
+ func(_ context.Context, _ error, _ string, _ ...interface{}) {
+ (&rudimentaryErrorBackoff{
+ lastErrorTime: time.Now(),
+ // 1ms was the number folks were able to stomach as a global rate limit.
+ // If you need to log errors more than 1000 times a second you
+ // should probably consider fixing your code instead. :)
+ minPeriod: time.Millisecond,
+ }).OnError()
+ },
}
+type ErrorHandler func(ctx context.Context, err error, msg string, keysAndValues ...interface{})
+
// HandlerError is a method to invoke when a non-user facing piece of code cannot
// return an error and needs to indicate it has been ignored. Invoking this method
// is preferable to logging the error - the default behavior is to log but the
// errors may be sent to a remote server for analysis.
+//
+// Contextual logging: HandleErrorWithContext should be used instead of HandleError in code which supports contextual logging.
func HandleError(err error) {
// this is sometimes called with a nil error. We probably shouldn't fail and should do nothing instead
if err == nil {
return
}
+ handleError(context.Background(), err, "Unhandled Error")
+}
+
+// HandlerErrorWithContext is a method to invoke when a non-user facing piece of code cannot
+// return an error and needs to indicate it has been ignored. Invoking this method
+// is preferable to logging the error - the default behavior is to log but the
+// errors may be sent to a remote server for analysis. The context is used to
+// determine how to log the error.
+//
+// If contextual logging is enabled, the default log output is equivalent to
+//
+// logr.FromContext(ctx).WithName("UnhandledError").Error(err, msg, keysAndValues...)
+//
+// Without contextual logging, it is equivalent to:
+//
+// klog.ErrorS(err, msg, keysAndValues...)
+//
+// In contrast to HandleError, passing nil for the error is still going to
+// trigger a log entry. Don't construct a new error or wrap an error
+// with fmt.Errorf. Instead, add additional information via the mssage
+// and key/value pairs.
+//
+// This variant should be used instead of HandleError because it supports
+// structured, contextual logging. Alternatively, [HandleErrorWithLogger] can
+// be used if a logger is available instead of a context.
+func HandleErrorWithContext(ctx context.Context, err error, msg string, keysAndValues ...interface{}) {
+ handleError(ctx, err, msg, keysAndValues...)
+}
+
+// HandleErrorWithLogger is an alternative to [HandlerErrorWithContext] which accepts
+// a logger for contextual logging.
+func HandleErrorWithLogger(logger klog.Logger, err error, msg string, keysAndValues ...interface{}) {
+ handleError(klog.NewContext(context.Background(), logger), err, msg, keysAndValues...)
+}
+
+// handleError is the common implementation of the HandleError* variants.
+// Using this common implementation ensures that the stack depth
+// is the same regardless through which path the handlers get invoked.
+func handleError(ctx context.Context, err error, msg string, keysAndValues ...interface{}) {
for _, fn := range ErrorHandlers {
- fn(err)
+ fn(ctx, err, msg, keysAndValues...)
}
}
-// logError prints an error with the call stack of the location it was reported
-func logError(err error) {
- klog.ErrorDepth(2, err)
+// logError prints an error with the call stack of the location it was reported.
+// It expects to be called as -> HandleError[WithContext] -> handleError -> logError.
+func logError(ctx context.Context, err error, msg string, keysAndValues ...interface{}) {
+ logger := klog.FromContext(ctx).WithCallDepth(3)
+ logger = klog.LoggerWithName(logger, "UnhandledError")
+ logger.Error(err, msg, keysAndValues...) //nolint:logcheck // logcheck complains about unknown key/value pairs.
}
type rudimentaryErrorBackoff struct {
@@ -125,7 +236,7 @@ type rudimentaryErrorBackoff struct {
// OnError will block if it is called more often than the embedded period time.
// This will prevent overly tight hot error loops.
-func (r *rudimentaryErrorBackoff) OnError(error) {
+func (r *rudimentaryErrorBackoff) OnError() {
now := time.Now() // start the timer before acquiring the lock
r.lastErrorTimeLock.Lock()
d := now.Sub(r.lastErrorTime)
diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go b/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go
index fd281bdb88..194883390c 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go
@@ -16,4 +16,4 @@ limitations under the License.
// Package sets has generic set and specified sets. Generic set will
// replace specified ones over time. And specific ones are deprecated.
-package sets // import "k8s.io/apimachinery/pkg/util/sets"
+package sets
diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/set.go b/vendor/k8s.io/apimachinery/pkg/util/sets/set.go
index b76129a1ca..cd961c8c59 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/sets/set.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/sets/set.go
@@ -68,14 +68,8 @@ func (s Set[T]) Delete(items ...T) Set[T] {
// Clear empties the set.
// It is preferable to replace the set with a newly constructed set,
// but not all callers can do that (when there are other references to the map).
-// In some cases the set *won't* be fully cleared, e.g. a Set[float32] containing NaN
-// can't be cleared because NaN can't be removed.
-// For sets containing items of a type that is reflexive for ==,
-// this is optimized to a single call to runtime.mapclear().
func (s Set[T]) Clear() Set[T] {
- for key := range s {
- delete(s, key)
- }
+ clear(s)
return s
}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/error_matcher.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/error_matcher.go
new file mode 100644
index 0000000000..1d15deae86
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/error_matcher.go
@@ -0,0 +1,212 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package field
+
+import (
+ "fmt"
+ "reflect"
+ "regexp"
+ "strings"
+)
+
+// ErrorMatcher is a helper for comparing Error objects.
+type ErrorMatcher struct {
+ // TODO(thockin): consider whether type is ever NOT required, maybe just
+ // assume it.
+ matchType bool
+ // TODO(thockin): consider whether field could be assumed - if the
+ // "want" error has a nil field, don't match on field.
+ matchField bool
+ // TODO(thockin): consider whether value could be assumed - if the
+ // "want" error has a nil value, don't match on field.
+ matchValue bool
+ matchOrigin bool
+ matchDetail func(want, got string) bool
+ requireOriginWhenInvalid bool
+}
+
+// Matches returns true if the two Error objects match according to the
+// configured criteria.
+func (m ErrorMatcher) Matches(want, got *Error) bool {
+ if m.matchType && want.Type != got.Type {
+ return false
+ }
+ if m.matchField && want.Field != got.Field {
+ return false
+ }
+ if m.matchValue && !reflect.DeepEqual(want.BadValue, got.BadValue) {
+ return false
+ }
+ if m.matchOrigin {
+ if want.Origin != got.Origin {
+ return false
+ }
+ if m.requireOriginWhenInvalid && want.Type == ErrorTypeInvalid {
+ if want.Origin == "" || got.Origin == "" {
+ return false
+ }
+ }
+ }
+ if m.matchDetail != nil && !m.matchDetail(want.Detail, got.Detail) {
+ return false
+ }
+ return true
+}
+
+// Render returns a string representation of the specified Error object,
+// according to the criteria configured in the ErrorMatcher.
+func (m ErrorMatcher) Render(e *Error) string {
+ buf := strings.Builder{}
+
+ comma := func() {
+ if buf.Len() > 0 {
+ buf.WriteString(", ")
+ }
+ }
+
+ if m.matchType {
+ comma()
+ buf.WriteString(fmt.Sprintf("Type=%q", e.Type))
+ }
+ if m.matchField {
+ comma()
+ buf.WriteString(fmt.Sprintf("Field=%q", e.Field))
+ }
+ if m.matchValue {
+ comma()
+ buf.WriteString(fmt.Sprintf("Value=%v", e.BadValue))
+ }
+ if m.matchOrigin || m.requireOriginWhenInvalid && e.Type == ErrorTypeInvalid {
+ comma()
+ buf.WriteString(fmt.Sprintf("Origin=%q", e.Origin))
+ }
+ if m.matchDetail != nil {
+ comma()
+ buf.WriteString(fmt.Sprintf("Detail=%q", e.Detail))
+ }
+ return "{" + buf.String() + "}"
+}
+
+// Exactly returns a derived ErrorMatcher which matches all fields exactly.
+func (m ErrorMatcher) Exactly() ErrorMatcher {
+ return m.ByType().ByField().ByValue().ByOrigin().ByDetailExact()
+}
+
+// ByType returns a derived ErrorMatcher which also matches by type.
+func (m ErrorMatcher) ByType() ErrorMatcher {
+ m.matchType = true
+ return m
+}
+
+// ByField returns a derived ErrorMatcher which also matches by field path.
+func (m ErrorMatcher) ByField() ErrorMatcher {
+ m.matchField = true
+ return m
+}
+
+// ByValue returns a derived ErrorMatcher which also matches by the errant
+// value.
+func (m ErrorMatcher) ByValue() ErrorMatcher {
+ m.matchValue = true
+ return m
+}
+
+// ByOrigin returns a derived ErrorMatcher which also matches by the origin.
+func (m ErrorMatcher) ByOrigin() ErrorMatcher {
+ m.matchOrigin = true
+ return m
+}
+
+// RequireOriginWhenInvalid returns a derived ErrorMatcher which also requires
+// the Origin field to be set when the Type is Invalid and the matcher is
+// matching by Origin.
+func (m ErrorMatcher) RequireOriginWhenInvalid() ErrorMatcher {
+ m.requireOriginWhenInvalid = true
+ return m
+}
+
+// ByDetailExact returns a derived ErrorMatcher which also matches errors by
+// the exact detail string.
+func (m ErrorMatcher) ByDetailExact() ErrorMatcher {
+ m.matchDetail = func(want, got string) bool {
+ return got == want
+ }
+ return m
+}
+
+// ByDetailSubstring returns a derived ErrorMatcher which also matches errors
+// by a substring of the detail string.
+func (m ErrorMatcher) ByDetailSubstring() ErrorMatcher {
+ m.matchDetail = func(want, got string) bool {
+ return strings.Contains(got, want)
+ }
+ return m
+}
+
+// ByDetailRegexp returns a derived ErrorMatcher which also matches errors by a
+// regular expression of the detail string, where the "want" string is assumed
+// to be a valid regular expression.
+func (m ErrorMatcher) ByDetailRegexp() ErrorMatcher {
+ m.matchDetail = func(want, got string) bool {
+ return regexp.MustCompile(want).MatchString(got)
+ }
+ return m
+}
+
+// TestIntf lets users pass a testing.T while not coupling this package to Go's
+// testing package.
+type TestIntf interface {
+ Helper()
+ Errorf(format string, args ...any)
+ Logf(format string, args ...any)
+}
+
+// Test compares two ErrorLists by the criteria configured in this matcher, and
+// fails the test if they don't match. If a given "want" error matches multiple
+// "got" errors, they will all be consumed. This might be OK (e.g. if there are
+// multiple errors on the same field from the same origin) or it might be an
+// insufficiently specific matcher, so these will be logged.
+func (m ErrorMatcher) Test(tb TestIntf, want, got ErrorList) {
+ tb.Helper()
+
+ remaining := got
+ for _, w := range want {
+ tmp := make(ErrorList, 0, len(remaining))
+ n := 0
+ for _, g := range remaining {
+ if m.Matches(w, g) {
+ n++
+ } else {
+ tmp = append(tmp, g)
+ }
+ }
+ if n == 0 {
+ tb.Errorf("expected an error matching:\n%s", m.Render(w))
+ } else if n > 1 {
+ // This is not necessarily and error, but it's worth logging in
+ // case it's not what the test author intended.
+ tb.Logf("multiple errors matched:\n%s", m.Render(w))
+ }
+ remaining = tmp
+ }
+ if len(remaining) > 0 {
+ for _, e := range remaining {
+ exactly := m.Exactly() // makes a copy
+ tb.Errorf("unmatched error:\n%s", exactly.Render(e))
+ }
+ }
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go
index bc387d0116..840d645ee3 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go
@@ -33,13 +33,35 @@ type Error struct {
Field string
BadValue interface{}
Detail string
+
+ // Origin uniquely identifies where this error was generated from. It is used in testing to
+ // compare expected errors against actual errors without relying on exact detail string matching.
+ // This allows tests to verify the correct validation logic triggered the error
+ // regardless of how the error message might be formatted or localized.
+ //
+ // The value should be either:
+ // - A simple camelCase identifier (e.g., "maximum", "maxItems")
+ // - A structured format using "format=" for validation errors related to specific formats
+ // (e.g., "format=dns-label", "format=qualified-name")
+ //
+ // If the Origin corresponds to an existing declarative validation tag or JSON Schema keyword,
+ // use that same name for consistency.
+ //
+ // Origin should be set in the most deeply nested validation function that
+ // can still identify the unique source of the error.
+ Origin string
+
+ // CoveredByDeclarative is true when this error is covered by declarative
+ // validation. This field is to identify errors from imperative validation
+ // that should also be caught by declarative validation.
+ CoveredByDeclarative bool
}
var _ error = &Error{}
// Error implements the error interface.
-func (v *Error) Error() string {
- return fmt.Sprintf("%s: %s", v.Field, v.ErrorBody())
+func (e *Error) Error() string {
+ return fmt.Sprintf("%s: %s", e.Field, e.ErrorBody())
}
type OmitValueType struct{}
@@ -48,21 +70,21 @@ var omitValue = OmitValueType{}
// ErrorBody returns the error message without the field name. This is useful
// for building nice-looking higher-level error reporting.
-func (v *Error) ErrorBody() string {
+func (e *Error) ErrorBody() string {
var s string
switch {
- case v.Type == ErrorTypeRequired:
- s = v.Type.String()
- case v.Type == ErrorTypeForbidden:
- s = v.Type.String()
- case v.Type == ErrorTypeTooLong:
- s = v.Type.String()
- case v.Type == ErrorTypeInternal:
- s = v.Type.String()
- case v.BadValue == omitValue:
- s = v.Type.String()
+ case e.Type == ErrorTypeRequired:
+ s = e.Type.String()
+ case e.Type == ErrorTypeForbidden:
+ s = e.Type.String()
+ case e.Type == ErrorTypeTooLong:
+ s = e.Type.String()
+ case e.Type == ErrorTypeInternal:
+ s = e.Type.String()
+ case e.BadValue == omitValue:
+ s = e.Type.String()
default:
- value := v.BadValue
+ value := e.BadValue
valueType := reflect.TypeOf(value)
if value == nil || valueType == nil {
value = "null"
@@ -76,26 +98,38 @@ func (v *Error) ErrorBody() string {
switch t := value.(type) {
case int64, int32, float64, float32, bool:
// use simple printer for simple types
- s = fmt.Sprintf("%s: %v", v.Type, value)
+ s = fmt.Sprintf("%s: %v", e.Type, value)
case string:
- s = fmt.Sprintf("%s: %q", v.Type, t)
+ s = fmt.Sprintf("%s: %q", e.Type, t)
case fmt.Stringer:
// anything that defines String() is better than raw struct
- s = fmt.Sprintf("%s: %s", v.Type, t.String())
+ s = fmt.Sprintf("%s: %s", e.Type, t.String())
default:
// fallback to raw struct
// TODO: internal types have panic guards against json.Marshalling to prevent
// accidental use of internal types in external serialized form. For now, use
// %#v, although it would be better to show a more expressive output in the future
- s = fmt.Sprintf("%s: %#v", v.Type, value)
+ s = fmt.Sprintf("%s: %#v", e.Type, value)
}
}
- if len(v.Detail) != 0 {
- s += fmt.Sprintf(": %s", v.Detail)
+ if len(e.Detail) != 0 {
+ s += fmt.Sprintf(": %s", e.Detail)
}
return s
}
+// WithOrigin adds origin information to the FieldError
+func (e *Error) WithOrigin(o string) *Error {
+ e.Origin = o
+ return e
+}
+
+// MarkCoveredByDeclarative marks the error as covered by declarative validation.
+func (e *Error) MarkCoveredByDeclarative() *Error {
+ e.CoveredByDeclarative = true
+ return e
+}
+
// ErrorType is a machine readable value providing more detail about why
// a field is invalid. These values are expected to match 1-1 with
// CauseType in api/types.go.
@@ -169,32 +203,32 @@ func (t ErrorType) String() string {
// TypeInvalid returns a *Error indicating "type is invalid"
func TypeInvalid(field *Path, value interface{}, detail string) *Error {
- return &Error{ErrorTypeTypeInvalid, field.String(), value, detail}
+ return &Error{ErrorTypeTypeInvalid, field.String(), value, detail, "", false}
}
// NotFound returns a *Error indicating "value not found". This is
// used to report failure to find a requested value (e.g. looking up an ID).
func NotFound(field *Path, value interface{}) *Error {
- return &Error{ErrorTypeNotFound, field.String(), value, ""}
+ return &Error{ErrorTypeNotFound, field.String(), value, "", "", false}
}
// Required returns a *Error indicating "value required". This is used
// to report required values that are not provided (e.g. empty strings, null
// values, or empty arrays).
func Required(field *Path, detail string) *Error {
- return &Error{ErrorTypeRequired, field.String(), "", detail}
+ return &Error{ErrorTypeRequired, field.String(), "", detail, "", false}
}
// Duplicate returns a *Error indicating "duplicate value". This is
// used to report collisions of values that must be unique (e.g. names or IDs).
func Duplicate(field *Path, value interface{}) *Error {
- return &Error{ErrorTypeDuplicate, field.String(), value, ""}
+ return &Error{ErrorTypeDuplicate, field.String(), value, "", "", false}
}
// Invalid returns a *Error indicating "invalid value". This is used
// to report malformed values (e.g. failed regex match, too long, out of bounds).
func Invalid(field *Path, value interface{}, detail string) *Error {
- return &Error{ErrorTypeInvalid, field.String(), value, detail}
+ return &Error{ErrorTypeInvalid, field.String(), value, detail, "", false}
}
// NotSupported returns a *Error indicating "unsupported value".
@@ -209,7 +243,7 @@ func NotSupported[T ~string](field *Path, value interface{}, validValues []T) *E
}
detail = "supported values: " + strings.Join(quotedValues, ", ")
}
- return &Error{ErrorTypeNotSupported, field.String(), value, detail}
+ return &Error{ErrorTypeNotSupported, field.String(), value, detail, "", false}
}
// Forbidden returns a *Error indicating "forbidden". This is used to
@@ -217,29 +251,27 @@ func NotSupported[T ~string](field *Path, value interface{}, validValues []T) *E
// some conditions, but which are not permitted by current conditions (e.g.
// security policy).
func Forbidden(field *Path, detail string) *Error {
- return &Error{ErrorTypeForbidden, field.String(), "", detail}
+ return &Error{ErrorTypeForbidden, field.String(), "", detail, "", false}
}
-// TooLong returns a *Error indicating "too long". This is used to
-// report that the given value is too long. This is similar to
-// Invalid, but the returned error will not include the too-long
-// value.
+// TooLong returns a *Error indicating "too long". This is used to report that
+// the given value is too long. This is similar to Invalid, but the returned
+// error will not include the too-long value. If maxLength is negative, it will
+// be included in the message. The value argument is not used.
func TooLong(field *Path, value interface{}, maxLength int) *Error {
- return &Error{ErrorTypeTooLong, field.String(), value, fmt.Sprintf("must have at most %d bytes", maxLength)}
-}
-
-// TooLongMaxLength returns a *Error indicating "too long". This is used to
-// report that the given value is too long. This is similar to
-// Invalid, but the returned error will not include the too-long
-// value. If maxLength is negative, no max length will be included in the message.
-func TooLongMaxLength(field *Path, value interface{}, maxLength int) *Error {
var msg string
if maxLength >= 0 {
- msg = fmt.Sprintf("may not be longer than %d", maxLength)
+ msg = fmt.Sprintf("may not be more than %d bytes", maxLength)
} else {
msg = "value is too long"
}
- return &Error{ErrorTypeTooLong, field.String(), value, msg}
+ return &Error{ErrorTypeTooLong, field.String(), "", msg, "", false}
+}
+
+// TooLongMaxLength returns a *Error indicating "too long".
+// Deprecated: Use TooLong instead.
+func TooLongMaxLength(field *Path, value interface{}, maxLength int) *Error {
+ return TooLong(field, "", maxLength)
}
// TooMany returns a *Error indicating "too many". This is used to
@@ -261,14 +293,14 @@ func TooMany(field *Path, actualQuantity, maxQuantity int) *Error {
actual = omitValue
}
- return &Error{ErrorTypeTooMany, field.String(), actual, msg}
+ return &Error{ErrorTypeTooMany, field.String(), actual, msg, "", false}
}
// InternalError returns a *Error indicating "internal error". This is used
// to signal that an error was found that was not directly related to user
// input. The err argument must be non-nil.
func InternalError(field *Path, err error) *Error {
- return &Error{ErrorTypeInternal, field.String(), nil, err.Error()}
+ return &Error{ErrorTypeInternal, field.String(), nil, err.Error(), "", false}
}
// ErrorList holds a set of Errors. It is plausible that we might one day have
@@ -287,6 +319,22 @@ func NewErrorTypeMatcher(t ErrorType) utilerrors.Matcher {
}
}
+// WithOrigin sets the origin for all errors in the list and returns the updated list.
+func (list ErrorList) WithOrigin(origin string) ErrorList {
+ for _, err := range list {
+ err.Origin = origin
+ }
+ return list
+}
+
+// MarkCoveredByDeclarative marks all errors in the list as covered by declarative validation.
+func (list ErrorList) MarkCoveredByDeclarative() ErrorList {
+ for _, err := range list {
+ err.CoveredByDeclarative = true
+ }
+ return list
+}
+
// ToAggregate converts the ErrorList into an errors.Aggregate.
func (list ErrorList) ToAggregate() utilerrors.Aggregate {
if len(list) == 0 {
@@ -323,3 +371,25 @@ func (list ErrorList) Filter(fns ...utilerrors.Matcher) ErrorList {
// FilterOut takes an Aggregate and returns an Aggregate
return fromAggregate(err.(utilerrors.Aggregate))
}
+
+// ExtractCoveredByDeclarative returns a new ErrorList containing only the errors that should be covered by declarative validation.
+func (list ErrorList) ExtractCoveredByDeclarative() ErrorList {
+ newList := ErrorList{}
+ for _, err := range list {
+ if err.CoveredByDeclarative {
+ newList = append(newList, err)
+ }
+ }
+ return newList
+}
+
+// RemoveCoveredByDeclarative returns a new ErrorList containing only the errors that should not be covered by declarative validation.
+func (list ErrorList) RemoveCoveredByDeclarative() ErrorList {
+ newList := ErrorList{}
+ for _, err := range list {
+ if !err.CoveredByDeclarative {
+ newList = append(newList, err)
+ }
+ }
+ return newList
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/ip.go b/vendor/k8s.io/apimachinery/pkg/util/validation/ip.go
new file mode 100644
index 0000000000..6e947c74c9
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/validation/ip.go
@@ -0,0 +1,278 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validation
+
+import (
+ "fmt"
+ "net"
+ "net/netip"
+ "slices"
+
+ "k8s.io/apimachinery/pkg/util/validation/field"
+ "k8s.io/klog/v2"
+ netutils "k8s.io/utils/net"
+)
+
+func parseIP(fldPath *field.Path, value string, strictValidation bool) (net.IP, field.ErrorList) {
+ var allErrors field.ErrorList
+
+ ip := netutils.ParseIPSloppy(value)
+ if ip == nil {
+ allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IP address, (e.g. 10.9.8.7 or 2001:db8::ffff)"))
+ return nil, allErrors
+ }
+
+ if strictValidation {
+ addr, err := netip.ParseAddr(value)
+ if err != nil {
+ // If netutils.ParseIPSloppy parsed it, but netip.ParseAddr
+ // doesn't, then it must have illegal leading 0s.
+ allErrors = append(allErrors, field.Invalid(fldPath, value, "must not have leading 0s"))
+ }
+ if addr.Is4In6() {
+ allErrors = append(allErrors, field.Invalid(fldPath, value, "must not be an IPv4-mapped IPv6 address"))
+ }
+ }
+
+ return ip, allErrors
+}
+
+// IsValidIPForLegacyField tests that the argument is a valid IP address for a "legacy"
+// API field that predates strict IP validation. In particular, this allows IPs that are
+// not in canonical form (e.g., "FE80:0:0:0:0:0:0:0abc" instead of "fe80::abc").
+//
+// If strictValidation is false, this also allows IPs in certain invalid or ambiguous
+// formats:
+//
+// 1. IPv4 IPs are allowed to have leading "0"s in octets (e.g. "010.002.003.004").
+// Historically, net.ParseIP (and later netutils.ParseIPSloppy) simply ignored leading
+// "0"s in IPv4 addresses, but most libc-based software treats 0-prefixed IPv4 octets
+// as octal, meaning different software might interpret the same string as two
+// different IPs, potentially leading to security issues. (Current net.ParseIP and
+// netip.ParseAddr simply reject inputs with leading "0"s.)
+//
+// 2. IPv4-mapped IPv6 IPs (e.g. "::ffff:1.2.3.4") are allowed. These can also lead to
+// different software interpreting the value in different ways, because they may be
+// treated as IPv4 by some software and IPv6 by other software. (net.ParseIP and
+// netip.ParseAddr both allow these, but there are no use cases for representing IPv4
+// addresses as IPv4-mapped IPv6 addresses in Kubernetes.)
+//
+// Alternatively, when validating an update to an existing field, you can pass a list of
+// IP values from the old object that should be accepted if they appear in the new object
+// even if they are not valid.
+//
+// This function should only be used to validate the existing fields that were
+// historically validated in this way, and strictValidation should be true unless the
+// StrictIPCIDRValidation feature gate is disabled. Use IsValidIP for parsing new fields.
+func IsValidIPForLegacyField(fldPath *field.Path, value string, strictValidation bool, validOldIPs []string) field.ErrorList {
+ if slices.Contains(validOldIPs, value) {
+ return nil
+ }
+ _, allErrors := parseIP(fldPath, value, strictValidation)
+ return allErrors.WithOrigin("format=ip-sloppy")
+}
+
+// IsValidIP tests that the argument is a valid IP address, according to current
+// Kubernetes standards for IP address validation.
+func IsValidIP(fldPath *field.Path, value string) field.ErrorList {
+ ip, allErrors := parseIP(fldPath, value, true)
+ if len(allErrors) != 0 {
+ return allErrors.WithOrigin("format=ip-strict")
+ }
+
+ if value != ip.String() {
+ allErrors = append(allErrors, field.Invalid(fldPath, value, fmt.Sprintf("must be in canonical form (%q)", ip.String())))
+ }
+ return allErrors.WithOrigin("format=ip-strict")
+}
+
+// GetWarningsForIP returns warnings for IP address values in non-standard forms. This
+// should only be used with fields that are validated with IsValidIPForLegacyField().
+func GetWarningsForIP(fldPath *field.Path, value string) []string {
+ ip := netutils.ParseIPSloppy(value)
+ if ip == nil {
+ klog.ErrorS(nil, "GetWarningsForIP called on value that was not validated with IsValidIPForLegacyField", "field", fldPath, "value", value)
+ return nil
+ }
+
+ addr, _ := netip.ParseAddr(value)
+ if !addr.IsValid() || addr.Is4In6() {
+ // This catches 2 cases: leading 0s (if ParseIPSloppy() accepted it but
+ // ParseAddr() doesn't) or IPv4-mapped IPv6 (.Is4In6()). Either way,
+ // re-stringifying the net.IP value will give the preferred form.
+ return []string{
+ fmt.Sprintf("%s: non-standard IP address %q will be considered invalid in a future Kubernetes release: use %q", fldPath, value, ip.String()),
+ }
+ }
+
+ // If ParseIPSloppy() and ParseAddr() both accept it then it's fully valid, though
+ // it may be non-canonical.
+ if addr.Is6() && addr.String() != value {
+ return []string{
+ fmt.Sprintf("%s: IPv6 address %q should be in RFC 5952 canonical format (%q)", fldPath, value, addr.String()),
+ }
+ }
+
+ return nil
+}
+
+func parseCIDR(fldPath *field.Path, value string, strictValidation bool) (*net.IPNet, field.ErrorList) {
+ var allErrors field.ErrorList
+
+ _, ipnet, err := netutils.ParseCIDRSloppy(value)
+ if err != nil {
+ allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid CIDR value, (e.g. 10.9.8.0/24 or 2001:db8::/64)"))
+ return nil, allErrors
+ }
+
+ if strictValidation {
+ prefix, err := netip.ParsePrefix(value)
+ if err != nil {
+ // If netutils.ParseCIDRSloppy parsed it, but netip.ParsePrefix
+ // doesn't, then it must have illegal leading 0s (either in the
+ // IP part or the prefix).
+ allErrors = append(allErrors, field.Invalid(fldPath, value, "must not have leading 0s in IP or prefix length"))
+ } else if prefix.Addr().Is4In6() {
+ allErrors = append(allErrors, field.Invalid(fldPath, value, "must not have an IPv4-mapped IPv6 address"))
+ } else if prefix.Addr() != prefix.Masked().Addr() {
+ allErrors = append(allErrors, field.Invalid(fldPath, value, "must not have bits set beyond the prefix length"))
+ }
+ }
+
+ return ipnet, allErrors
+}
+
+// IsValidCIDRForLegacyField tests that the argument is a valid CIDR value for a "legacy"
+// API field that predates strict IP validation. In particular, this allows IPs that are
+// not in canonical form (e.g., "FE80:0abc:0:0:0:0:0:0/64" instead of "fe80:abc::/64").
+//
+// If strictValidation is false, this also allows CIDR values in certain invalid or
+// ambiguous formats:
+//
+// 1. The IP part of the CIDR value is parsed as with IsValidIPForLegacyField with
+// strictValidation=false.
+//
+// 2. The CIDR value is allowed to be either a "subnet"/"mask" (with the lower bits after
+// the prefix length all being 0), or an "interface address" as with `ip addr` (with a
+// complete IP address and associated subnet length). With strict validation, the
+// value is required to be in "subnet"/"mask" form.
+//
+// 3. The prefix length is allowed to have leading 0s.
+//
+// Alternatively, when validating an update to an existing field, you can pass a list of
+// CIDR values from the old object that should be accepted if they appear in the new
+// object even if they are not valid.
+//
+// This function should only be used to validate the existing fields that were
+// historically validated in this way, and strictValidation should be true unless the
+// StrictIPCIDRValidation feature gate is disabled. Use IsValidCIDR or
+// IsValidInterfaceAddress for parsing new fields.
+func IsValidCIDRForLegacyField(fldPath *field.Path, value string, strictValidation bool, validOldCIDRs []string) field.ErrorList {
+ if slices.Contains(validOldCIDRs, value) {
+ return nil
+ }
+
+ _, allErrors := parseCIDR(fldPath, value, strictValidation)
+ return allErrors
+}
+
+// IsValidCIDR tests that the argument is a valid CIDR value, according to current
+// Kubernetes standards for CIDR validation. This function is only for
+// "subnet"/"mask"-style CIDR values (e.g., "192.168.1.0/24", with no bits set beyond the
+// prefix length). Use IsValidInterfaceAddress for "ifaddr"-style CIDR values.
+func IsValidCIDR(fldPath *field.Path, value string) field.ErrorList {
+ ipnet, allErrors := parseCIDR(fldPath, value, true)
+ if len(allErrors) != 0 {
+ return allErrors
+ }
+
+ if value != ipnet.String() {
+ allErrors = append(allErrors, field.Invalid(fldPath, value, fmt.Sprintf("must be in canonical form (%q)", ipnet.String())))
+ }
+ return allErrors
+}
+
+// GetWarningsForCIDR returns warnings for CIDR values in non-standard forms. This should
+// only be used with fields that are validated with IsValidCIDRForLegacyField().
+func GetWarningsForCIDR(fldPath *field.Path, value string) []string {
+ ip, ipnet, err := netutils.ParseCIDRSloppy(value)
+ if err != nil {
+ klog.ErrorS(err, "GetWarningsForCIDR called on value that was not validated with IsValidCIDRForLegacyField", "field", fldPath, "value", value)
+ return nil
+ }
+
+ var warnings []string
+
+ // Check for bits set after prefix length
+ if !ip.Equal(ipnet.IP) {
+ _, addrlen := ipnet.Mask.Size()
+ singleIPCIDR := fmt.Sprintf("%s/%d", ip.String(), addrlen)
+ warnings = append(warnings,
+ fmt.Sprintf("%s: CIDR value %q is ambiguous in this context (should be %q or %q?)", fldPath, value, ipnet.String(), singleIPCIDR),
+ )
+ }
+
+ prefix, _ := netip.ParsePrefix(value)
+ addr := prefix.Addr()
+ if !prefix.IsValid() || addr.Is4In6() {
+ // This catches 2 cases: leading 0s (if ParseCIDRSloppy() accepted it but
+ // ParsePrefix() doesn't) or IPv4-mapped IPv6 (.Is4In6()). Either way,
+ // re-stringifying the net.IPNet value will give the preferred form.
+ warnings = append(warnings,
+ fmt.Sprintf("%s: non-standard CIDR value %q will be considered invalid in a future Kubernetes release: use %q", fldPath, value, ipnet.String()),
+ )
+ }
+
+ // If ParseCIDRSloppy() and ParsePrefix() both accept it then it's fully valid,
+ // though it may be non-canonical. But only check this if there are no other
+ // warnings, since either of the other warnings would also cause a round-trip
+ // failure.
+ if len(warnings) == 0 && addr.Is6() && prefix.String() != value {
+ warnings = append(warnings,
+ fmt.Sprintf("%s: IPv6 CIDR value %q should be in RFC 5952 canonical format (%q)", fldPath, value, prefix.String()),
+ )
+ }
+
+ return warnings
+}
+
+// IsValidInterfaceAddress tests that the argument is a valid "ifaddr"-style CIDR value in
+// canonical form (e.g., "192.168.1.5/24", with a complete IP address and associated
+// subnet length). Use IsValidCIDR for "subnet"/"mask"-style CIDR values (e.g.,
+// "192.168.1.0/24").
+func IsValidInterfaceAddress(fldPath *field.Path, value string) field.ErrorList {
+ var allErrors field.ErrorList
+ ip, ipnet, err := netutils.ParseCIDRSloppy(value)
+ if err != nil {
+ allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid address in CIDR form, (e.g. 10.9.8.7/24 or 2001:db8::1/64)"))
+ return allErrors
+ }
+
+ // The canonical form of `value` is not `ipnet.String()`, because `ipnet` doesn't
+ // include the bits after the prefix. We need to construct the canonical form
+ // ourselves from `ip` and `ipnet.Mask`.
+ maskSize, _ := ipnet.Mask.Size()
+ if netutils.IsIPv4(ip) && maskSize > net.IPv4len*8 {
+ // "::ffff:192.168.0.1/120" -> "192.168.0.1/24"
+ maskSize -= (net.IPv6len - net.IPv4len) * 8
+ }
+ canonical := fmt.Sprintf("%s/%d", ip.String(), maskSize)
+ if value != canonical {
+ allErrors = append(allErrors, field.Invalid(fldPath, value, fmt.Sprintf("must be in canonical form (%q)", canonical)))
+ }
+ return allErrors
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go
index b32644902b..b6be7af1b1 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go
@@ -24,7 +24,6 @@ import (
"unicode"
"k8s.io/apimachinery/pkg/util/validation/field"
- netutils "k8s.io/utils/net"
)
const qnameCharFmt string = "[A-Za-z0-9]"
@@ -175,6 +174,8 @@ func IsValidLabelValue(value string) []string {
}
const dns1123LabelFmt string = "[a-z0-9]([-a-z0-9]*[a-z0-9])?"
+const dns1123LabelFmtWithUnderscore string = "_?[a-z0-9]([-_a-z0-9]*[a-z0-9])?"
+
const dns1123LabelErrMsg string = "a lowercase RFC 1123 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character"
// DNS1123LabelMaxLength is a label's max length in DNS (RFC 1123)
@@ -204,10 +205,14 @@ func IsDNS1123Label(value string) []string {
const dns1123SubdomainFmt string = dns1123LabelFmt + "(\\." + dns1123LabelFmt + ")*"
const dns1123SubdomainErrorMsg string = "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character"
+const dns1123SubdomainFmtWithUnderscore string = dns1123LabelFmtWithUnderscore + "(\\." + dns1123LabelFmtWithUnderscore + ")*"
+const dns1123SubdomainErrorMsgFG string = "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '_', '-' or '.', and must start and end with an alphanumeric character"
+
// DNS1123SubdomainMaxLength is a subdomain's max length in DNS (RFC 1123)
const DNS1123SubdomainMaxLength int = 253
var dns1123SubdomainRegexp = regexp.MustCompile("^" + dns1123SubdomainFmt + "$")
+var dns1123SubdomainRegexpWithUnderscore = regexp.MustCompile("^" + dns1123SubdomainFmtWithUnderscore + "$")
// IsDNS1123Subdomain tests for a string that conforms to the definition of a
// subdomain in DNS (RFC 1123).
@@ -222,6 +227,19 @@ func IsDNS1123Subdomain(value string) []string {
return errs
}
+// IsDNS1123SubdomainWithUnderscore tests for a string that conforms to the definition of a
+// subdomain in DNS (RFC 1123), but allows the use of an underscore in the string
+func IsDNS1123SubdomainWithUnderscore(value string) []string {
+ var errs []string
+ if len(value) > DNS1123SubdomainMaxLength {
+ errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength))
+ }
+ if !dns1123SubdomainRegexpWithUnderscore.MatchString(value) {
+ errs = append(errs, RegexError(dns1123SubdomainErrorMsgFG, dns1123SubdomainFmt, "example.com"))
+ }
+ return errs
+}
+
const dns1035LabelFmt string = "[a-z]([-a-z0-9]*[a-z0-9])?"
const dns1035LabelErrMsg string = "a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character"
@@ -350,45 +368,6 @@ func IsValidPortName(port string) []string {
return errs
}
-// IsValidIP tests that the argument is a valid IP address.
-func IsValidIP(fldPath *field.Path, value string) field.ErrorList {
- var allErrors field.ErrorList
- if netutils.ParseIPSloppy(value) == nil {
- allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IP address, (e.g. 10.9.8.7 or 2001:db8::ffff)"))
- }
- return allErrors
-}
-
-// IsValidIPv4Address tests that the argument is a valid IPv4 address.
-func IsValidIPv4Address(fldPath *field.Path, value string) field.ErrorList {
- var allErrors field.ErrorList
- ip := netutils.ParseIPSloppy(value)
- if ip == nil || ip.To4() == nil {
- allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IPv4 address"))
- }
- return allErrors
-}
-
-// IsValidIPv6Address tests that the argument is a valid IPv6 address.
-func IsValidIPv6Address(fldPath *field.Path, value string) field.ErrorList {
- var allErrors field.ErrorList
- ip := netutils.ParseIPSloppy(value)
- if ip == nil || ip.To4() != nil {
- allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IPv6 address"))
- }
- return allErrors
-}
-
-// IsValidCIDR tests that the argument is a valid CIDR value.
-func IsValidCIDR(fldPath *field.Path, value string) field.ErrorList {
- var allErrors field.ErrorList
- _, _, err := netutils.ParseCIDRSloppy(value)
- if err != nil {
- allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid CIDR value, (e.g. 10.9.8.0/24 or 2001:db8::/64)"))
- }
- return allErrors
-}
-
const percentFmt string = "[0-9]+%"
const percentErrMsg string = "a valid percent string must be a numeric string followed by an ending '%'"
diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/backoff.go b/vendor/k8s.io/apimachinery/pkg/util/wait/backoff.go
index 4187619256..177be09a95 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/wait/backoff.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/wait/backoff.go
@@ -157,6 +157,8 @@ func (b Backoff) DelayWithReset(c clock.Clock, resetInterval time.Duration) Dela
// Until is syntactic sugar on top of JitterUntil with zero jitter factor and
// with sliding = true (which means the timer for period starts after the f
// completes).
+//
+// Contextual logging: UntilWithContext should be used instead of Until in code which supports contextual logging.
func Until(f func(), period time.Duration, stopCh <-chan struct{}) {
JitterUntil(f, period, 0.0, true, stopCh)
}
@@ -176,6 +178,8 @@ func UntilWithContext(ctx context.Context, f func(context.Context), period time.
// NonSlidingUntil is syntactic sugar on top of JitterUntil with zero jitter
// factor, with sliding = false (meaning the timer for period starts at the same
// time as the function starts).
+//
+// Contextual logging: NonSlidingUntilWithContext should be used instead of NonSlidingUntil in code which supports contextual logging.
func NonSlidingUntil(f func(), period time.Duration, stopCh <-chan struct{}) {
JitterUntil(f, period, 0.0, false, stopCh)
}
@@ -200,19 +204,44 @@ func NonSlidingUntilWithContext(ctx context.Context, f func(context.Context), pe
//
// Close stopCh to stop. f may not be invoked if stop channel is already
// closed. Pass NeverStop to if you don't want it stop.
+//
+// Contextual logging: JitterUntilWithContext should be used instead of JitterUntil in code which supports contextual logging.
func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding bool, stopCh <-chan struct{}) {
BackoffUntil(f, NewJitteredBackoffManager(period, jitterFactor, &clock.RealClock{}), sliding, stopCh)
}
+// JitterUntilWithContext loops until context is done, running f every period.
+//
+// If jitterFactor is positive, the period is jittered before every run of f.
+// If jitterFactor is not positive, the period is unchanged and not jittered.
+//
+// If sliding is true, the period is computed after f runs. If it is false then
+// period includes the runtime for f.
+//
+// Cancel context to stop. f may not be invoked if context is already done.
+func JitterUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration, jitterFactor float64, sliding bool) {
+ BackoffUntilWithContext(ctx, f, NewJitteredBackoffManager(period, jitterFactor, &clock.RealClock{}), sliding)
+}
+
// BackoffUntil loops until stop channel is closed, run f every duration given by BackoffManager.
//
// If sliding is true, the period is computed after f runs. If it is false then
// period includes the runtime for f.
+//
+// Contextual logging: BackoffUntilWithContext should be used instead of BackoffUntil in code which supports contextual logging.
func BackoffUntil(f func(), backoff BackoffManager, sliding bool, stopCh <-chan struct{}) {
+ BackoffUntilWithContext(ContextForChannel(stopCh), func(context.Context) { f() }, backoff, sliding)
+}
+
+// BackoffUntilWithContext loops until context is done, run f every duration given by BackoffManager.
+//
+// If sliding is true, the period is computed after f runs. If it is false then
+// period includes the runtime for f.
+func BackoffUntilWithContext(ctx context.Context, f func(ctx context.Context), backoff BackoffManager, sliding bool) {
var t clock.Timer
for {
select {
- case <-stopCh:
+ case <-ctx.Done():
return
default:
}
@@ -222,8 +251,8 @@ func BackoffUntil(f func(), backoff BackoffManager, sliding bool, stopCh <-chan
}
func() {
- defer runtime.HandleCrash()
- f()
+ defer runtime.HandleCrashWithContext(ctx)
+ f(ctx)
}()
if sliding {
@@ -236,7 +265,7 @@ func BackoffUntil(f func(), backoff BackoffManager, sliding bool, stopCh <-chan
// In order to mitigate we re-check stopCh at the beginning
// of every loop to prevent extra executions of f().
select {
- case <-stopCh:
+ case <-ctx.Done():
if !t.Stop() {
<-t.C()
}
@@ -246,19 +275,6 @@ func BackoffUntil(f func(), backoff BackoffManager, sliding bool, stopCh <-chan
}
}
-// JitterUntilWithContext loops until context is done, running f every period.
-//
-// If jitterFactor is positive, the period is jittered before every run of f.
-// If jitterFactor is not positive, the period is unchanged and not jittered.
-//
-// If sliding is true, the period is computed after f runs. If it is false then
-// period includes the runtime for f.
-//
-// Cancel context to stop. f may not be invoked if context is already expired.
-func JitterUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration, jitterFactor float64, sliding bool) {
- JitterUntil(func() { f(ctx) }, period, jitterFactor, sliding, ctx.Done())
-}
-
// backoffManager provides simple backoff behavior in a threadsafe manner to a caller.
type backoffManager struct {
backoff Backoff
diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/doc.go b/vendor/k8s.io/apimachinery/pkg/util/wait/doc.go
index 3f0c968ec9..ff89dc170e 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/wait/doc.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/wait/doc.go
@@ -16,4 +16,4 @@ limitations under the License.
// Package wait provides tools for polling or listening for changes
// to a condition.
-package wait // import "k8s.io/apimachinery/pkg/util/wait"
+package wait
diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/loop.go b/vendor/k8s.io/apimachinery/pkg/util/wait/loop.go
index 107bfc132f..9f9b929ffa 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/wait/loop.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/wait/loop.go
@@ -49,7 +49,7 @@ func loopConditionUntilContext(ctx context.Context, t Timer, immediate, sliding
// if we haven't requested immediate execution, delay once
if immediate {
if ok, err := func() (bool, error) {
- defer runtime.HandleCrash()
+ defer runtime.HandleCrashWithContext(ctx)
return condition(ctx)
}(); err != nil || ok {
return err
@@ -83,7 +83,7 @@ func loopConditionUntilContext(ctx context.Context, t Timer, immediate, sliding
t.Next()
}
if ok, err := func() (bool, error) {
- defer runtime.HandleCrash()
+ defer runtime.HandleCrashWithContext(ctx)
return condition(ctx)
}(); err != nil || ok {
return err
diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go
index 6805e8cf94..7379a8d5ac 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go
@@ -80,6 +80,10 @@ func Forever(f func(), period time.Duration) {
Until(f, period, NeverStop)
}
+// jitterRand is a dedicated random source for jitter calculations.
+// It defaults to rand.Float64, but is a package variable so it can be overridden to make unit tests deterministic.
+var jitterRand = rand.Float64
+
// Jitter returns a time.Duration between duration and duration + maxFactor *
// duration.
//
@@ -89,7 +93,7 @@ func Jitter(duration time.Duration, maxFactor float64) time.Duration {
if maxFactor <= 0.0 {
maxFactor = 1.0
}
- wait := duration + time.Duration(rand.Float64()*maxFactor*float64(duration))
+ wait := duration + time.Duration(jitterRand()*maxFactor*float64(duration))
return wait
}
@@ -141,6 +145,7 @@ func (c channelContext) Value(key any) any { return nil }
//
// Deprecated: Will be removed when the legacy polling methods are removed.
func runConditionWithCrashProtection(condition ConditionFunc) (bool, error) {
+ //nolint:logcheck // Already deprecated.
defer runtime.HandleCrash()
return condition()
}
@@ -150,7 +155,7 @@ func runConditionWithCrashProtection(condition ConditionFunc) (bool, error) {
//
// Deprecated: Will be removed when the legacy polling methods are removed.
func runConditionWithCrashProtectionWithContext(ctx context.Context, condition ConditionWithContextFunc) (bool, error) {
- defer runtime.HandleCrash()
+ defer runtime.HandleCrashWithContext(ctx)
return condition(ctx)
}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go
index 9837b3df28..7342f8d1e6 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go
@@ -20,10 +20,12 @@ import (
"bufio"
"bytes"
"encoding/json"
+ "errors"
"fmt"
"io"
"strings"
"unicode"
+ "unicode/utf8"
jsonutil "k8s.io/apimachinery/pkg/util/json"
@@ -92,7 +94,7 @@ func UnmarshalStrict(data []byte, v interface{}) error {
// YAML decoding path is not used (so that error messages are
// JSON specific).
func ToJSON(data []byte) ([]byte, error) {
- if hasJSONPrefix(data) {
+ if IsJSONBuffer(data) {
return data, nil
}
return yaml.YAMLToJSON(data)
@@ -102,7 +104,8 @@ func ToJSON(data []byte) ([]byte, error) {
// separating individual documents. It first converts the YAML
// body to JSON, then unmarshals the JSON.
type YAMLToJSONDecoder struct {
- reader Reader
+ reader Reader
+ inputOffset int
}
// NewYAMLToJSONDecoder decodes YAML documents from the provided
@@ -121,7 +124,7 @@ func NewYAMLToJSONDecoder(r io.Reader) *YAMLToJSONDecoder {
// yaml.Unmarshal.
func (d *YAMLToJSONDecoder) Decode(into interface{}) error {
bytes, err := d.reader.Read()
- if err != nil && err != io.EOF {
+ if err != nil && err != io.EOF { //nolint:errorlint
return err
}
@@ -131,9 +134,14 @@ func (d *YAMLToJSONDecoder) Decode(into interface{}) error {
return YAMLSyntaxError{err}
}
}
+ d.inputOffset += len(bytes)
return err
}
+func (d *YAMLToJSONDecoder) InputOffset() int {
+ return d.inputOffset
+}
+
// YAMLDecoder reads chunks of objects and returns ErrShortBuffer if
// the data is not sufficient.
type YAMLDecoder struct {
@@ -229,18 +237,20 @@ func splitYAMLDocument(data []byte, atEOF bool) (advance int, token []byte, err
return 0, nil, nil
}
-// decoder is a convenience interface for Decode.
-type decoder interface {
- Decode(into interface{}) error
-}
-
-// YAMLOrJSONDecoder attempts to decode a stream of JSON documents or
-// YAML documents by sniffing for a leading { character.
+// YAMLOrJSONDecoder attempts to decode a stream of JSON or YAML documents.
+// While JSON is YAML, the way Go's JSON decode defines a multi-document stream
+// is a series of JSON objects (e.g. {}{}), but YAML defines a multi-document
+// stream as a series of documents separated by "---".
+//
+// This decoder will attempt to decode the stream as JSON first, and if that
+// fails, it will switch to YAML. Once it determines the stream is JSON (by
+// finding a non-YAML-delimited series of objects), it will not switch to YAML.
+// Once it switches to YAML it will not switch back to JSON.
type YAMLOrJSONDecoder struct {
- r io.Reader
- bufferSize int
-
- decoder decoder
+ json *json.Decoder
+ yaml *YAMLToJSONDecoder
+ stream *StreamReader
+ count int // how many objects have been decoded
}
type JSONSyntaxError struct {
@@ -265,31 +275,108 @@ func (e YAMLSyntaxError) Error() string {
// how far into the stream the decoder will look to figure out whether this
// is a JSON stream (has whitespace followed by an open brace).
func NewYAMLOrJSONDecoder(r io.Reader, bufferSize int) *YAMLOrJSONDecoder {
- return &YAMLOrJSONDecoder{
- r: r,
- bufferSize: bufferSize,
+ d := &YAMLOrJSONDecoder{}
+
+ reader, _, mightBeJSON := GuessJSONStream(r, bufferSize)
+ d.stream = reader
+ if mightBeJSON {
+ d.json = json.NewDecoder(reader)
+ } else {
+ d.yaml = NewYAMLToJSONDecoder(reader)
}
+ return d
}
// Decode unmarshals the next object from the underlying stream into the
// provide object, or returns an error.
func (d *YAMLOrJSONDecoder) Decode(into interface{}) error {
- if d.decoder == nil {
- buffer, _, isJSON := GuessJSONStream(d.r, d.bufferSize)
- if isJSON {
- d.decoder = json.NewDecoder(buffer)
+ // Because we don't know if this is a JSON or YAML stream, a failure from
+ // both decoders is ambiguous. When in doubt, it will return the error from
+ // the JSON decoder. Unfortunately, this means that if the first document
+ // is invalid YAML, the error won't be awesome.
+ // TODO: the errors from YAML are not great, we could improve them a lot.
+ var firstErr error
+ if d.json != nil {
+ err := d.json.Decode(into)
+ if err == nil {
+ d.stream.Consume(int(d.json.InputOffset()) - d.stream.Consumed())
+ d.count++
+ return nil
+ }
+ if err == io.EOF { //nolint:errorlint
+ return err
+ }
+ var syntax *json.SyntaxError
+ if ok := errors.As(err, &syntax); ok {
+ firstErr = JSONSyntaxError{
+ Offset: syntax.Offset,
+ Err: syntax,
+ }
} else {
- d.decoder = NewYAMLToJSONDecoder(buffer)
+ firstErr = err
+ }
+ if d.count > 1 {
+ // If we found 0 or 1 JSON object(s), this stream is still
+ // ambiguous. But if we found more than 1 JSON object, then this
+ // is an unambiguous JSON stream, and we should not switch to YAML.
+ return err
+ }
+ // If JSON decoding hits the end of one object and then fails on the
+ // next, it leaves any leading whitespace in the buffer, which can
+ // confuse the YAML decoder. We just eat any whitespace we find, up to
+ // and including the first newline.
+ d.stream.Rewind()
+ if err := d.consumeWhitespace(); err == nil {
+ d.yaml = NewYAMLToJSONDecoder(d.stream)
+ }
+ d.json = nil
+ }
+ if d.yaml != nil {
+ err := d.yaml.Decode(into)
+ if err == nil {
+ d.stream.Consume(d.yaml.InputOffset() - d.stream.Consumed())
+ d.count++
+ return nil
+ }
+ if err == io.EOF { //nolint:errorlint
+ return err
+ }
+ if firstErr == nil {
+ firstErr = err
}
}
- err := d.decoder.Decode(into)
- if syntax, ok := err.(*json.SyntaxError); ok {
- return JSONSyntaxError{
- Offset: syntax.Offset,
- Err: syntax,
+ if firstErr != nil {
+ return firstErr
+ }
+ return fmt.Errorf("decoding failed as both JSON and YAML")
+}
+
+func (d *YAMLOrJSONDecoder) consumeWhitespace() error {
+ consumed := 0
+ for {
+ buf, err := d.stream.ReadN(4)
+ if err != nil && err == io.EOF { //nolint:errorlint
+ return err
+ }
+ r, sz := utf8.DecodeRune(buf)
+ if r == utf8.RuneError || sz == 0 {
+ return fmt.Errorf("invalid utf8 rune")
+ }
+ d.stream.RewindN(len(buf) - sz)
+ if !unicode.IsSpace(r) {
+ d.stream.RewindN(sz)
+ d.stream.Consume(consumed)
+ return nil
+ }
+ if r == '\n' {
+ d.stream.Consume(consumed)
+ return nil
+ }
+ if err == io.EOF { //nolint:errorlint
+ break
}
}
- return err
+ return io.EOF
}
type Reader interface {
@@ -311,7 +398,7 @@ func (r *YAMLReader) Read() ([]byte, error) {
var buffer bytes.Buffer
for {
line, err := r.reader.Read()
- if err != nil && err != io.EOF {
+ if err != nil && err != io.EOF { //nolint:errorlint
return nil, err
}
@@ -329,11 +416,11 @@ func (r *YAMLReader) Read() ([]byte, error) {
if buffer.Len() != 0 {
return buffer.Bytes(), nil
}
- if err == io.EOF {
+ if err == io.EOF { //nolint:errorlint
return nil, err
}
}
- if err == io.EOF {
+ if err == io.EOF { //nolint:errorlint
if buffer.Len() != 0 {
// If we're at EOF, we have a final, non-terminated line. Return it.
return buffer.Bytes(), nil
@@ -369,26 +456,20 @@ func (r *LineReader) Read() ([]byte, error) {
// GuessJSONStream scans the provided reader up to size, looking
// for an open brace indicating this is JSON. It will return the
// bufio.Reader it creates for the consumer.
-func GuessJSONStream(r io.Reader, size int) (io.Reader, []byte, bool) {
- buffer := bufio.NewReaderSize(r, size)
+func GuessJSONStream(r io.Reader, size int) (*StreamReader, []byte, bool) {
+ buffer := NewStreamReader(r, size)
b, _ := buffer.Peek(size)
- return buffer, b, hasJSONPrefix(b)
+ return buffer, b, IsJSONBuffer(b)
}
// IsJSONBuffer scans the provided buffer, looking
// for an open brace indicating this is JSON.
func IsJSONBuffer(buf []byte) bool {
- return hasJSONPrefix(buf)
+ return hasPrefix(buf, jsonPrefix)
}
var jsonPrefix = []byte("{")
-// hasJSONPrefix returns true if the provided buffer appears to start with
-// a JSON open brace.
-func hasJSONPrefix(buf []byte) bool {
- return hasPrefix(buf, jsonPrefix)
-}
-
// Return true if the first non-whitespace bytes in buf is
// prefix.
func hasPrefix(buf []byte, prefix []byte) bool {
diff --git a/vendor/k8s.io/apimachinery/pkg/util/yaml/stream_reader.go b/vendor/k8s.io/apimachinery/pkg/util/yaml/stream_reader.go
new file mode 100644
index 0000000000..d06991057f
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/yaml/stream_reader.go
@@ -0,0 +1,130 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package yaml
+
+import "io"
+
+// StreamReader is a reader designed for consuming streams of variable-length
+// messages. It buffers data until it is explicitly consumed, and can be
+// rewound to re-read previous data.
+type StreamReader struct {
+ r io.Reader
+ buf []byte
+ head int // current read offset into buf
+ ttlConsumed int // number of bytes which have been consumed
+}
+
+// NewStreamReader creates a new StreamReader wrapping the provided
+// io.Reader.
+func NewStreamReader(r io.Reader, size int) *StreamReader {
+ if size == 0 {
+ size = 4096
+ }
+ return &StreamReader{
+ r: r,
+ buf: make([]byte, 0, size), // Start with a reasonable capacity
+ }
+}
+
+// Read implements io.Reader. It first returns any buffered data after the
+// current offset, and if that's exhausted, reads from the underlying reader
+// and buffers the data. The returned data is not considered consumed until the
+// Consume method is called.
+func (r *StreamReader) Read(p []byte) (n int, err error) {
+ // If we have buffered data, return it
+ if r.head < len(r.buf) {
+ n = copy(p, r.buf[r.head:])
+ r.head += n
+ return n, nil
+ }
+
+ // If we've already hit EOF, return it
+ if r.r == nil {
+ return 0, io.EOF
+ }
+
+ // Read from the underlying reader
+ n, err = r.r.Read(p)
+ if n > 0 {
+ r.buf = append(r.buf, p[:n]...)
+ r.head += n
+ }
+ if err == nil {
+ return n, nil
+ }
+ if err == io.EOF {
+ // Store that we've hit EOF by setting r to nil
+ r.r = nil
+ }
+ return n, err
+}
+
+// ReadN reads exactly n bytes from the reader, blocking until all bytes are
+// read or an error occurs. If an error occurs, the number of bytes read is
+// returned along with the error. If EOF is hit before n bytes are read, this
+// will return the bytes read so far, along with io.EOF. The returned data is
+// not considered consumed until the Consume method is called.
+func (r *StreamReader) ReadN(want int) ([]byte, error) {
+ ret := make([]byte, want)
+ off := 0
+ for off < want {
+ n, err := r.Read(ret[off:])
+ if err != nil {
+ return ret[:off+n], err
+ }
+ off += n
+ }
+ return ret, nil
+}
+
+// Peek returns the next n bytes without advancing the reader. The returned
+// bytes are valid until the next call to Consume.
+func (r *StreamReader) Peek(n int) ([]byte, error) {
+ buf, err := r.ReadN(n)
+ r.RewindN(len(buf))
+ if err != nil {
+ return buf, err
+ }
+ return buf, nil
+}
+
+// Rewind resets the reader to the beginning of the buffered data.
+func (r *StreamReader) Rewind() {
+ r.head = 0
+}
+
+// RewindN rewinds the reader by n bytes. If n is greater than the current
+// buffer, the reader is rewound to the beginning of the buffer.
+func (r *StreamReader) RewindN(n int) {
+ r.head -= min(n, r.head)
+}
+
+// Consume discards up to n bytes of previously read data from the beginning of
+// the buffer. Once consumed, that data is no longer available for rewinding.
+// If n is greater than the current buffer, the buffer is cleared. Consume
+// never consume data from the underlying reader.
+func (r *StreamReader) Consume(n int) {
+ n = min(n, len(r.buf))
+ r.buf = r.buf[n:]
+ r.head -= n
+ r.ttlConsumed += n
+}
+
+// Consumed returns the number of bytes consumed from the input reader.
+func (r *StreamReader) Consumed() int {
+ return r.ttlConsumed
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/version/doc.go b/vendor/k8s.io/apimachinery/pkg/version/doc.go
index 29574fd6d5..5f446a4f4a 100644
--- a/vendor/k8s.io/apimachinery/pkg/version/doc.go
+++ b/vendor/k8s.io/apimachinery/pkg/version/doc.go
@@ -16,5 +16,5 @@ limitations under the License.
// +k8s:openapi-gen=true
-// Package version supplies the type for version information collected at build time.
-package version // import "k8s.io/apimachinery/pkg/version"
+// Package version supplies the type for version information.
+package version
diff --git a/vendor/k8s.io/apimachinery/pkg/version/types.go b/vendor/k8s.io/apimachinery/pkg/version/types.go
index 72727b503b..6a18f9e91d 100644
--- a/vendor/k8s.io/apimachinery/pkg/version/types.go
+++ b/vendor/k8s.io/apimachinery/pkg/version/types.go
@@ -20,15 +20,25 @@ package version
// TODO: Add []string of api versions supported? It's still unclear
// how we'll want to distribute that information.
type Info struct {
- Major string `json:"major"`
- Minor string `json:"minor"`
- GitVersion string `json:"gitVersion"`
- GitCommit string `json:"gitCommit"`
- GitTreeState string `json:"gitTreeState"`
- BuildDate string `json:"buildDate"`
- GoVersion string `json:"goVersion"`
- Compiler string `json:"compiler"`
- Platform string `json:"platform"`
+ // Major is the major version of the binary version
+ Major string `json:"major"`
+ // Minor is the minor version of the binary version
+ Minor string `json:"minor"`
+ // EmulationMajor is the major version of the emulation version
+ EmulationMajor string `json:"emulationMajor,omitempty"`
+ // EmulationMinor is the minor version of the emulation version
+ EmulationMinor string `json:"emulationMinor,omitempty"`
+ // MinCompatibilityMajor is the major version of the minimum compatibility version
+ MinCompatibilityMajor string `json:"minCompatibilityMajor,omitempty"`
+ // MinCompatibilityMinor is the minor version of the minimum compatibility version
+ MinCompatibilityMinor string `json:"minCompatibilityMinor,omitempty"`
+ GitVersion string `json:"gitVersion"`
+ GitCommit string `json:"gitCommit"`
+ GitTreeState string `json:"gitTreeState"`
+ BuildDate string `json:"buildDate"`
+ GoVersion string `json:"goVersion"`
+ Compiler string `json:"compiler"`
+ Platform string `json:"platform"`
}
// String returns info as a human-friendly version string.
diff --git a/vendor/k8s.io/apimachinery/pkg/watch/doc.go b/vendor/k8s.io/apimachinery/pkg/watch/doc.go
index 7e6bf3fb95..5fde5e7427 100644
--- a/vendor/k8s.io/apimachinery/pkg/watch/doc.go
+++ b/vendor/k8s.io/apimachinery/pkg/watch/doc.go
@@ -16,4 +16,4 @@ limitations under the License.
// Package watch contains a generic watchable interface, and a fake for
// testing code that uses the watch interface.
-package watch // import "k8s.io/apimachinery/pkg/watch"
+package watch
diff --git a/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go b/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go
index 42dcac2b9e..b422ca9f55 100644
--- a/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go
+++ b/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go
@@ -51,6 +51,7 @@ type Reporter interface {
// StreamWatcher turns any stream for which you can write a Decoder interface
// into a watch.Interface.
type StreamWatcher struct {
+ logger klog.Logger
sync.Mutex
source Decoder
reporter Reporter
@@ -59,8 +60,16 @@ type StreamWatcher struct {
}
// NewStreamWatcher creates a StreamWatcher from the given decoder.
+//
+// Contextual logging: NewStreamWatcherWithLogger should be used instead of NewStreamWatcher in code which supports contextual logging.
func NewStreamWatcher(d Decoder, r Reporter) *StreamWatcher {
+ return NewStreamWatcherWithLogger(klog.Background(), d, r)
+}
+
+// NewStreamWatcherWithLogger creates a StreamWatcher from the given decoder and logger.
+func NewStreamWatcherWithLogger(logger klog.Logger, d Decoder, r Reporter) *StreamWatcher {
sw := &StreamWatcher{
+ logger: logger,
source: d,
reporter: r,
// It's easy for a consumer to add buffering via an extra
@@ -98,7 +107,7 @@ func (sw *StreamWatcher) Stop() {
// receive reads result from the decoder in a loop and sends down the result channel.
func (sw *StreamWatcher) receive() {
- defer utilruntime.HandleCrash()
+ defer utilruntime.HandleCrashWithLogger(sw.logger)
defer close(sw.result)
defer sw.Stop()
for {
@@ -108,10 +117,10 @@ func (sw *StreamWatcher) receive() {
case io.EOF:
// watch closed normally
case io.ErrUnexpectedEOF:
- klog.V(1).Infof("Unexpected EOF during watch stream event decoding: %v", err)
+ sw.logger.V(1).Info("Unexpected EOF during watch stream event decoding", "err", err)
default:
if net.IsProbableEOF(err) || net.IsTimeout(err) {
- klog.V(5).Infof("Unable to decode an event from the watch stream: %v", err)
+ sw.logger.V(5).Info("Unable to decode an event from the watch stream", "err", err)
} else {
select {
case <-sw.done:
diff --git a/vendor/k8s.io/apimachinery/pkg/watch/watch.go b/vendor/k8s.io/apimachinery/pkg/watch/watch.go
index b6c7bbfa8f..251459834b 100644
--- a/vendor/k8s.io/apimachinery/pkg/watch/watch.go
+++ b/vendor/k8s.io/apimachinery/pkg/watch/watch.go
@@ -23,17 +23,30 @@ import (
"k8s.io/klog/v2"
"k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/utils/ptr"
)
// Interface can be implemented by anything that knows how to watch and report changes.
type Interface interface {
- // Stop stops watching. Will close the channel returned by ResultChan(). Releases
- // any resources used by the watch.
+ // Stop tells the producer that the consumer is done watching, so the
+ // producer should stop sending events and close the result channel. The
+ // consumer should keep watching for events until the result channel is
+ // closed.
+ //
+ // Because some implementations may create channels when constructed, Stop
+ // must always be called, even if the consumer has not yet called
+ // ResultChan().
+ //
+ // Only the consumer should call Stop(), not the producer. If the producer
+ // errors and needs to stop the watch prematurely, it should instead send
+ // an error event and close the result channel.
Stop()
- // ResultChan returns a chan which will receive all the events. If an error occurs
- // or Stop() is called, the implementation will close this channel and
- // release any resources used by the watch.
+ // ResultChan returns a channel which will receive events from the event
+ // producer. If an error occurs or Stop() is called, the producer must
+ // close this channel and release any resources used by the watch.
+ // Closing the result channel tells the consumer that no more events will be
+ // sent.
ResultChan() <-chan Event
}
@@ -91,29 +104,42 @@ func (w emptyWatch) ResultChan() <-chan Event {
// FakeWatcher lets you test anything that consumes a watch.Interface; threadsafe.
type FakeWatcher struct {
+ logger klog.Logger
result chan Event
stopped bool
sync.Mutex
}
+var _ Interface = &FakeWatcher{}
+
+// Contextual logging: NewFakeWithOptions and a logger in the FakeOptions should be used instead in code which supports contextual logging.
func NewFake() *FakeWatcher {
- return &FakeWatcher{
- result: make(chan Event),
- }
+ return NewFakeWithOptions(FakeOptions{})
}
+// Contextual logging: NewFakeWithOptions and a logger in the FakeOptions should be used instead in code which supports contextual logging.
func NewFakeWithChanSize(size int, blocking bool) *FakeWatcher {
+ return NewFakeWithOptions(FakeOptions{ChannelSize: size})
+}
+
+func NewFakeWithOptions(options FakeOptions) *FakeWatcher {
return &FakeWatcher{
- result: make(chan Event, size),
+ logger: ptr.Deref(options.Logger, klog.Background()),
+ result: make(chan Event, options.ChannelSize),
}
}
+type FakeOptions struct {
+ Logger *klog.Logger
+ ChannelSize int
+}
+
// Stop implements Interface.Stop().
func (f *FakeWatcher) Stop() {
f.Lock()
defer f.Unlock()
if !f.stopped {
- klog.V(4).Infof("Stopping fake watcher.")
+ f.logger.V(4).Info("Stopping fake watcher")
close(f.result)
f.stopped = true
}
@@ -164,13 +190,22 @@ func (f *FakeWatcher) Action(action EventType, obj runtime.Object) {
// RaceFreeFakeWatcher lets you test anything that consumes a watch.Interface; threadsafe.
type RaceFreeFakeWatcher struct {
+ logger klog.Logger
result chan Event
Stopped bool
sync.Mutex
}
+var _ Interface = &RaceFreeFakeWatcher{}
+
+// Contextual logging: RaceFreeFakeWatcherWithLogger should be used instead of NewRaceFreeFake in code which supports contextual logging.
func NewRaceFreeFake() *RaceFreeFakeWatcher {
+ return NewRaceFreeFakeWithLogger(klog.Background())
+}
+
+func NewRaceFreeFakeWithLogger(logger klog.Logger) *RaceFreeFakeWatcher {
return &RaceFreeFakeWatcher{
+ logger: logger,
result: make(chan Event, DefaultChanSize),
}
}
@@ -180,7 +215,7 @@ func (f *RaceFreeFakeWatcher) Stop() {
f.Lock()
defer f.Unlock()
if !f.Stopped {
- klog.V(4).Infof("Stopping fake watcher.")
+ f.logger.V(4).Info("Stopping fake watcher")
close(f.result)
f.Stopped = true
}
@@ -322,3 +357,21 @@ func (pw *ProxyWatcher) ResultChan() <-chan Event {
func (pw *ProxyWatcher) StopChan() <-chan struct{} {
return pw.stopCh
}
+
+// MockWatcher implements watch.Interface with mockable functions.
+type MockWatcher struct {
+ StopFunc func()
+ ResultChanFunc func() <-chan Event
+}
+
+var _ Interface = &MockWatcher{}
+
+// Stop calls StopFunc
+func (mw MockWatcher) Stop() {
+ mw.StopFunc()
+}
+
+// ResultChan calls ResultChanFunc
+func (mw MockWatcher) ResultChan() <-chan Event {
+ return mw.ResultChanFunc()
+}
diff --git a/vendor/k8s.io/client-go/features/envvar.go b/vendor/k8s.io/client-go/features/envvar.go
new file mode 100644
index 0000000000..8c3f887dc4
--- /dev/null
+++ b/vendor/k8s.io/client-go/features/envvar.go
@@ -0,0 +1,188 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package features
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+ "sync"
+ "sync/atomic"
+
+ "k8s.io/apimachinery/pkg/util/naming"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+ "k8s.io/klog/v2"
+)
+
+// internalPackages are packages that ignored when creating a name for featureGates. These packages are in the common
+// call chains, so they'd be unhelpful as names.
+var internalPackages = []string{"k8s.io/client-go/features/envvar.go"}
+
+var _ Gates = &envVarFeatureGates{}
+
+// newEnvVarFeatureGates creates a feature gate that allows for registration
+// of features and checking if the features are enabled.
+//
+// On the first call to Enabled, the effective state of all known features is loaded from
+// environment variables. The environment variable read for a given feature is formed by
+// concatenating the prefix "KUBE_FEATURE_" with the feature's name.
+//
+// For example, if you have a feature named "MyFeature"
+// setting an environmental variable "KUBE_FEATURE_MyFeature"
+// will allow you to configure the state of that feature.
+//
+// Please note that environmental variables can only be set to the boolean value.
+// Incorrect values will be ignored and logged.
+//
+// Features can also be set directly via the Set method.
+// In that case, these features take precedence over
+// features set via environmental variables.
+func newEnvVarFeatureGates(features map[Feature]FeatureSpec) *envVarFeatureGates {
+ known := map[Feature]FeatureSpec{}
+ for name, spec := range features {
+ known[name] = spec
+ }
+
+ fg := &envVarFeatureGates{
+ callSiteName: naming.GetNameFromCallsite(internalPackages...),
+ known: known,
+ }
+ fg.enabledViaEnvVar.Store(map[Feature]bool{})
+ fg.enabledViaSetMethod = map[Feature]bool{}
+
+ return fg
+}
+
+// envVarFeatureGates implements Gates and allows for feature registration.
+type envVarFeatureGates struct {
+ // callSiteName holds the name of the file
+ // that created this instance
+ callSiteName string
+
+ // readEnvVarsOnce guards reading environmental variables
+ readEnvVarsOnce sync.Once
+
+ // known holds known feature gates
+ known map[Feature]FeatureSpec
+
+ // enabledViaEnvVar holds a map[Feature]bool
+ // with values explicitly set via env var
+ enabledViaEnvVar atomic.Value
+
+ // lockEnabledViaSetMethod protects enabledViaSetMethod
+ lockEnabledViaSetMethod sync.RWMutex
+
+ // enabledViaSetMethod holds values explicitly set
+ // via Set method, features stored in this map take
+ // precedence over features stored in enabledViaEnvVar
+ enabledViaSetMethod map[Feature]bool
+
+ // readEnvVars holds the boolean value which
+ // indicates whether readEnvVarsOnce has been called.
+ readEnvVars atomic.Bool
+}
+
+// Enabled returns true if the key is enabled. If the key is not known, this call will panic.
+func (f *envVarFeatureGates) Enabled(key Feature) bool {
+ if v, ok := f.wasFeatureEnabledViaSetMethod(key); ok {
+ // ensue that the state of all known features
+ // is loaded from environment variables
+ // on the first call to Enabled method.
+ if !f.hasAlreadyReadEnvVar() {
+ _ = f.getEnabledMapFromEnvVar()
+ }
+ return v
+ }
+ if v, ok := f.getEnabledMapFromEnvVar()[key]; ok {
+ return v
+ }
+ if v, ok := f.known[key]; ok {
+ return v.Default
+ }
+ panic(fmt.Errorf("feature %q is not registered in FeatureGates %q", key, f.callSiteName))
+}
+
+// Set sets the given feature to the given value.
+//
+// Features set via this method take precedence over
+// the features set via environment variables.
+func (f *envVarFeatureGates) Set(featureName Feature, featureValue bool) error {
+ feature, ok := f.known[featureName]
+ if !ok {
+ return fmt.Errorf("feature %q is not registered in FeatureGates %q", featureName, f.callSiteName)
+ }
+ if feature.LockToDefault && feature.Default != featureValue {
+ return fmt.Errorf("cannot set feature gate %q to %v, feature is locked to %v", featureName, featureValue, feature.Default)
+ }
+
+ f.lockEnabledViaSetMethod.Lock()
+ defer f.lockEnabledViaSetMethod.Unlock()
+ f.enabledViaSetMethod[featureName] = featureValue
+
+ return nil
+}
+
+// getEnabledMapFromEnvVar will fill the enabled map on the first call.
+// This is the only time a known feature can be set to a value
+// read from the corresponding environmental variable.
+func (f *envVarFeatureGates) getEnabledMapFromEnvVar() map[Feature]bool {
+ f.readEnvVarsOnce.Do(func() {
+ featureGatesState := map[Feature]bool{}
+ for feature, featureSpec := range f.known {
+ featureState, featureStateSet := os.LookupEnv(fmt.Sprintf("KUBE_FEATURE_%s", feature))
+ if !featureStateSet {
+ continue
+ }
+ boolVal, boolErr := strconv.ParseBool(featureState)
+ switch {
+ case boolErr != nil:
+ utilruntime.HandleError(fmt.Errorf("cannot set feature gate %q to %q, due to %v", feature, featureState, boolErr))
+ case featureSpec.LockToDefault:
+ if boolVal != featureSpec.Default {
+ utilruntime.HandleError(fmt.Errorf("cannot set feature gate %q to %q, feature is locked to %v", feature, featureState, featureSpec.Default))
+ break
+ }
+ featureGatesState[feature] = featureSpec.Default
+ default:
+ featureGatesState[feature] = boolVal
+ }
+ }
+ f.enabledViaEnvVar.Store(featureGatesState)
+ f.readEnvVars.Store(true)
+
+ for feature, featureSpec := range f.known {
+ if featureState, ok := featureGatesState[feature]; ok {
+ klog.V(1).InfoS("Feature gate updated state", "feature", feature, "enabled", featureState)
+ continue
+ }
+ klog.V(1).InfoS("Feature gate default state", "feature", feature, "enabled", featureSpec.Default)
+ }
+ })
+ return f.enabledViaEnvVar.Load().(map[Feature]bool)
+}
+
+func (f *envVarFeatureGates) wasFeatureEnabledViaSetMethod(key Feature) (bool, bool) {
+ f.lockEnabledViaSetMethod.RLock()
+ defer f.lockEnabledViaSetMethod.RUnlock()
+
+ value, found := f.enabledViaSetMethod[key]
+ return value, found
+}
+
+func (f *envVarFeatureGates) hasAlreadyReadEnvVar() bool {
+ return f.readEnvVars.Load()
+}
diff --git a/vendor/k8s.io/client-go/features/features.go b/vendor/k8s.io/client-go/features/features.go
new file mode 100644
index 0000000000..5ccdcc55f6
--- /dev/null
+++ b/vendor/k8s.io/client-go/features/features.go
@@ -0,0 +1,143 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package features
+
+import (
+ "errors"
+ "sync/atomic"
+
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+)
+
+// NOTE: types Feature, FeatureSpec, prerelease (and its values)
+// were duplicated from the component-base repository
+//
+// for more information please refer to https://docs.google.com/document/d/1g9BGCRw-7ucUxO6OtCWbb3lfzUGA_uU9178wLdXAIfs
+
+const (
+ // Values for PreRelease.
+ Alpha = prerelease("ALPHA")
+ Beta = prerelease("BETA")
+ GA = prerelease("")
+
+ // Deprecated
+ Deprecated = prerelease("DEPRECATED")
+)
+
+type prerelease string
+
+type Feature string
+
+type FeatureSpec struct {
+ // Default is the default enablement state for the feature
+ Default bool
+ // LockToDefault indicates that the feature is locked to its default and cannot be changed
+ LockToDefault bool
+ // PreRelease indicates the maturity level of the feature
+ PreRelease prerelease
+}
+
+// Gates indicates whether a given feature is enabled or not.
+type Gates interface {
+ // Enabled returns true if the key is enabled.
+ Enabled(key Feature) bool
+}
+
+// Registry represents an external feature gates registry.
+type Registry interface {
+ // Add adds existing feature gates to the provided registry.
+ //
+ // As of today, this method is used by AddFeaturesToExistingFeatureGates and
+ // ReplaceFeatureGates to take control of the features exposed by this library.
+ Add(map[Feature]FeatureSpec) error
+}
+
+// FeatureGates returns the feature gates exposed by this library.
+//
+// By default, only the default features gates will be returned.
+// The default implementation allows controlling the features
+// via environmental variables.
+// For example, if you have a feature named "MyFeature"
+// setting an environmental variable "KUBE_FEATURE_MyFeature"
+// will allow you to configure the state of that feature.
+//
+// Please note that the actual set of the feature gates
+// might be overwritten by calling ReplaceFeatureGates method.
+func FeatureGates() Gates {
+ return featureGates.Load().(*featureGatesWrapper).Gates
+}
+
+// AddFeaturesToExistingFeatureGates adds the default feature gates to the provided registry.
+// Usually this function is combined with ReplaceFeatureGates to take control of the
+// features exposed by this library.
+func AddFeaturesToExistingFeatureGates(registry Registry) error {
+ return registry.Add(defaultKubernetesFeatureGates)
+}
+
+// ReplaceFeatureGates overwrites the default implementation of the feature gates
+// used by this library.
+//
+// Useful for binaries that would like to have full control of the features
+// exposed by this library, such as allowing consumers of a binary
+// to interact with the features via a command line flag.
+//
+// For example:
+//
+// // first, register client-go's features to your registry.
+// clientgofeaturegate.AddFeaturesToExistingFeatureGates(utilfeature.DefaultMutableFeatureGate)
+// // then replace client-go's feature gates implementation with your implementation
+// clientgofeaturegate.ReplaceFeatureGates(utilfeature.DefaultMutableFeatureGate)
+func ReplaceFeatureGates(newFeatureGates Gates) {
+ if replaceFeatureGatesWithWarningIndicator(newFeatureGates) {
+ utilruntime.HandleError(errors.New("the default feature gates implementation has already been used and now it's being overwritten. This might lead to unexpected behaviour. Check your initialization order"))
+ }
+}
+
+func replaceFeatureGatesWithWarningIndicator(newFeatureGates Gates) bool {
+ shouldProduceWarning := false
+
+ if defaultFeatureGates, ok := FeatureGates().(*envVarFeatureGates); ok {
+ if defaultFeatureGates.hasAlreadyReadEnvVar() {
+ shouldProduceWarning = true
+ }
+ }
+ wrappedFeatureGates := &featureGatesWrapper{newFeatureGates}
+ featureGates.Store(wrappedFeatureGates)
+
+ return shouldProduceWarning
+}
+
+func init() {
+ envVarGates := newEnvVarFeatureGates(defaultKubernetesFeatureGates)
+
+ wrappedFeatureGates := &featureGatesWrapper{envVarGates}
+ featureGates.Store(wrappedFeatureGates)
+}
+
+// featureGatesWrapper a thin wrapper to satisfy featureGates variable (atomic.Value).
+// That is, all calls to Store for a given Value must use values of the same concrete type.
+type featureGatesWrapper struct {
+ Gates
+}
+
+var (
+ // featureGates is a shared global FeatureGates.
+ //
+ // Top-level commands/options setup that needs to modify this feature gates
+ // should use AddFeaturesToExistingFeatureGates followed by ReplaceFeatureGates.
+ featureGates = &atomic.Value{}
+)
diff --git a/vendor/k8s.io/client-go/features/known_features.go b/vendor/k8s.io/client-go/features/known_features.go
new file mode 100644
index 0000000000..344d2ebb72
--- /dev/null
+++ b/vendor/k8s.io/client-go/features/known_features.go
@@ -0,0 +1,84 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package features
+
+const (
+ // Every feature gate should add method here following this template:
+ //
+ // // owner: @username
+ // // alpha: v1.4
+ // MyFeature featuregate.Feature = "MyFeature"
+ //
+ // Feature gates should be listed in alphabetical, case-sensitive
+ // (upper before any lower case character) order. This reduces the risk
+ // of code conflicts because changes are more likely to be scattered
+ // across the file.
+
+ // owner: @benluddy
+ // kep: https://kep.k8s.io/4222
+ // alpha: 1.32
+ //
+ // If disabled, clients configured to accept "application/cbor" will instead accept
+ // "application/json" with the same relative preference, and clients configured to write
+ // "application/cbor" or "application/apply-patch+cbor" will instead write
+ // "application/json" or "application/apply-patch+yaml", respectively.
+ ClientsAllowCBOR Feature = "ClientsAllowCBOR"
+
+ // owner: @benluddy
+ // kep: https://kep.k8s.io/4222
+ // alpha: 1.32
+ //
+ // If enabled, and only if ClientsAllowCBOR is also enabled, the default request content
+ // type (if not explicitly configured) and the dynamic client's request content type both
+ // become "application/cbor" instead of "application/json". The default content type for
+ // apply patch requests becomes "application/apply-patch+cbor" instead of
+ // "application/apply-patch+yaml".
+ ClientsPreferCBOR Feature = "ClientsPreferCBOR"
+
+ // owner: @nilekhc
+ // alpha: v1.30
+ InformerResourceVersion Feature = "InformerResourceVersion"
+
+ // owner: @deads2k
+ // beta: v1.33
+ //
+ // Refactor informers to deliver watch stream events in order instead of out of order.
+ InOrderInformers Feature = "InOrderInformers"
+
+ // owner: @p0lyn0mial
+ // beta: v1.30
+ //
+ // Allow the client to get a stream of individual items instead of chunking from the server.
+ //
+ // NOTE:
+ // The feature is disabled in Beta by default because
+ // it will only be turned on for selected control plane component(s).
+ WatchListClient Feature = "WatchListClient"
+)
+
+// defaultKubernetesFeatureGates consists of all known Kubernetes-specific feature keys.
+//
+// To add a new feature, define a key for it above and add it here.
+// After registering with the binary, the features are, by default, controllable using environment variables.
+// For more details, please see envVarFeatureGates implementation.
+var defaultKubernetesFeatureGates = map[Feature]FeatureSpec{
+ ClientsAllowCBOR: {Default: false, PreRelease: Alpha},
+ ClientsPreferCBOR: {Default: false, PreRelease: Alpha},
+ InformerResourceVersion: {Default: false, PreRelease: Alpha},
+ InOrderInformers: {Default: true, PreRelease: Beta},
+ WatchListClient: {Default: false, PreRelease: Beta},
+}
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go
index b99459757e..486790fa69 100644
--- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go
+++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go
@@ -17,4 +17,4 @@ limitations under the License.
// +k8s:deepcopy-gen=package
// +groupName=client.authentication.k8s.io
-package clientauthentication // import "k8s.io/client-go/pkg/apis/clientauthentication"
+package clientauthentication
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/doc.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/doc.go
index 94ca35c2c9..e378b75cf6 100644
--- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/doc.go
+++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/doc.go
@@ -21,4 +21,4 @@ limitations under the License.
// +groupName=client.authentication.k8s.io
-package v1 // import "k8s.io/client-go/pkg/apis/clientauthentication/v1"
+package v1
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go
index 22d1c588bc..6eb6a98157 100644
--- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go
+++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go
@@ -21,4 +21,4 @@ limitations under the License.
// +groupName=client.authentication.k8s.io
-package v1beta1 // import "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
+package v1beta1
diff --git a/vendor/k8s.io/client-go/pkg/version/doc.go b/vendor/k8s.io/client-go/pkg/version/doc.go
index 05e997e133..c3ace74513 100644
--- a/vendor/k8s.io/client-go/pkg/version/doc.go
+++ b/vendor/k8s.io/client-go/pkg/version/doc.go
@@ -18,4 +18,4 @@ limitations under the License.
// Package version supplies version information collected at build time to
// kubernetes components.
-package version // import "k8s.io/client-go/pkg/version"
+package version
diff --git a/vendor/k8s.io/client-go/rest/.mockery.yaml b/vendor/k8s.io/client-go/rest/.mockery.yaml
new file mode 100644
index 0000000000..e21d7b5be2
--- /dev/null
+++ b/vendor/k8s.io/client-go/rest/.mockery.yaml
@@ -0,0 +1,10 @@
+---
+dir: .
+filename: "mock_{{.InterfaceName | snakecase}}_test.go"
+boilerplate-file: ../../../../../hack/boilerplate/boilerplate.generatego.txt
+outpkg: rest
+with-expecter: true
+packages:
+ k8s.io/client-go/rest:
+ interfaces:
+ BackoffManager:
diff --git a/vendor/k8s.io/client-go/rest/client.go b/vendor/k8s.io/client-go/rest/client.go
index 60df7e568c..a085c334f9 100644
--- a/vendor/k8s.io/client-go/rest/client.go
+++ b/vendor/k8s.io/client-go/rest/client.go
@@ -17,16 +17,21 @@ limitations under the License.
package rest
import (
+ "fmt"
+ "mime"
"net/http"
"net/url"
"os"
"strconv"
"strings"
+ "sync/atomic"
"time"
+ "github.com/munnerz/goautoneg"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
+ clientfeatures "k8s.io/client-go/features"
"k8s.io/client-go/util/flowcontrol"
)
@@ -85,10 +90,10 @@ type RESTClient struct {
versionedAPIPath string
// content describes how a RESTClient encodes and decodes responses.
- content ClientContentConfig
+ content requestClientContentConfigProvider
// creates BackoffManager that is passed to requests.
- createBackoffMgr func() BackoffManager
+ createBackoffMgr func() BackoffManagerWithContext
// rateLimiter is shared among all requests created by this client unless specifically
// overridden.
@@ -96,7 +101,7 @@ type RESTClient struct {
// warningHandler is shared among all requests created by this client.
// If not set, defaultWarningHandler is used.
- warningHandler WarningHandler
+ warningHandler WarningHandlerWithContext
// Set specific behavior of the client. If not set http.DefaultClient will be used.
Client *http.Client
@@ -105,10 +110,6 @@ type RESTClient struct {
// NewRESTClient creates a new RESTClient. This client performs generic REST functions
// such as Get, Put, Post, and Delete on specified paths.
func NewRESTClient(baseURL *url.URL, versionedAPIPath string, config ClientContentConfig, rateLimiter flowcontrol.RateLimiter, client *http.Client) (*RESTClient, error) {
- if len(config.ContentType) == 0 {
- config.ContentType = "application/json"
- }
-
base := *baseURL
if !strings.HasSuffix(base.Path, "/") {
base.Path += "/"
@@ -119,14 +120,53 @@ func NewRESTClient(baseURL *url.URL, versionedAPIPath string, config ClientConte
return &RESTClient{
base: &base,
versionedAPIPath: versionedAPIPath,
- content: config,
+ content: requestClientContentConfigProvider{base: scrubCBORContentConfigIfDisabled(config)},
createBackoffMgr: readExpBackoffConfig,
rateLimiter: rateLimiter,
-
- Client: client,
+ Client: client,
}, nil
}
+func scrubCBORContentConfigIfDisabled(content ClientContentConfig) ClientContentConfig {
+ if clientfeatures.FeatureGates().Enabled(clientfeatures.ClientsAllowCBOR) {
+ content.Negotiator = clientNegotiatorWithCBORSequenceStreamDecoder{content.Negotiator}
+ return content
+ }
+
+ if mediatype, _, err := mime.ParseMediaType(content.ContentType); err == nil && mediatype == "application/cbor" {
+ content.ContentType = "application/json"
+ }
+
+ clauses := goautoneg.ParseAccept(content.AcceptContentTypes)
+ scrubbed := false
+ for i, clause := range clauses {
+ if clause.Type == "application" && clause.SubType == "cbor" {
+ scrubbed = true
+ clauses[i].SubType = "json"
+ }
+ }
+ if !scrubbed {
+ // No application/cbor in AcceptContentTypes, nothing more to do.
+ return content
+ }
+
+ parts := make([]string, 0, len(clauses))
+ for _, clause := range clauses {
+ // ParseAccept does not store the parameter "q" in Params.
+ params := clause.Params
+ if clause.Q < 1 { // omit q=1, it's the default
+ if params == nil {
+ params = make(map[string]string, 1)
+ }
+ params["q"] = strconv.FormatFloat(clause.Q, 'g', 3, 32)
+ }
+ parts = append(parts, mime.FormatMediaType(fmt.Sprintf("%s/%s", clause.Type, clause.SubType), params))
+ }
+ content.AcceptContentTypes = strings.Join(parts, ",")
+
+ return content
+}
+
// GetRateLimiter returns rate limiter for a given client, or nil if it's called on a nil client
func (c *RESTClient) GetRateLimiter() flowcontrol.RateLimiter {
if c == nil {
@@ -138,7 +178,7 @@ func (c *RESTClient) GetRateLimiter() flowcontrol.RateLimiter {
// readExpBackoffConfig handles the internal logic of determining what the
// backoff policy is. By default if no information is available, NoBackoff.
// TODO Generalize this see #17727 .
-func readExpBackoffConfig() BackoffManager {
+func readExpBackoffConfig() BackoffManagerWithContext {
backoffBase := os.Getenv(envBackoffBase)
backoffDuration := os.Getenv(envBackoffDuration)
@@ -198,5 +238,106 @@ func (c *RESTClient) Delete() *Request {
// APIVersion returns the APIVersion this RESTClient is expected to use.
func (c *RESTClient) APIVersion() schema.GroupVersion {
- return c.content.GroupVersion
+ config, _ := c.content.GetClientContentConfig()
+ return config.GroupVersion
+}
+
+// requestClientContentConfigProvider observes HTTP 415 (Unsupported Media Type) responses to detect
+// that the server does not understand CBOR. Once this has happened, future requests are forced to
+// use JSON so they can succeed. This is convenient for client users that want to prefer CBOR, but
+// also need to interoperate with older servers so requests do not permanently fail. The clients
+// will not default to using CBOR until at least all supported kube-apiservers have enable-CBOR
+// locked to true, so this path will be rarely taken. Additionally, all generated clients accessing
+// built-in kube resources are forced to protobuf, so those will not degrade to JSON.
+type requestClientContentConfigProvider struct {
+ base ClientContentConfig
+
+ // Becomes permanently true if a server responds with HTTP 415 (Unsupported Media Type) to a
+ // request with "Content-Type" header containing the CBOR media type.
+ sawUnsupportedMediaTypeForCBOR atomic.Bool
+}
+
+// GetClientContentConfig returns the ClientContentConfig that should be used for new requests by
+// this client and true if the request ContentType was selected by default.
+func (p *requestClientContentConfigProvider) GetClientContentConfig() (ClientContentConfig, bool) {
+ config := p.base
+
+ defaulted := config.ContentType == ""
+ if defaulted {
+ config.ContentType = "application/json"
+ }
+
+ if !clientfeatures.FeatureGates().Enabled(clientfeatures.ClientsAllowCBOR) {
+ return config, defaulted
+ }
+
+ if defaulted && clientfeatures.FeatureGates().Enabled(clientfeatures.ClientsPreferCBOR) {
+ config.ContentType = "application/cbor"
+ }
+
+ if sawUnsupportedMediaTypeForCBOR := p.sawUnsupportedMediaTypeForCBOR.Load(); !sawUnsupportedMediaTypeForCBOR {
+ return config, defaulted
+ }
+
+ if mediaType, _, _ := mime.ParseMediaType(config.ContentType); mediaType != runtime.ContentTypeCBOR {
+ return config, defaulted
+ }
+
+ // The effective ContentType is CBOR and the client has previously received an HTTP 415 in
+ // response to a CBOR request. Override ContentType to JSON.
+ config.ContentType = runtime.ContentTypeJSON
+ return config, defaulted
+}
+
+// UnsupportedMediaType reports that the server has responded to a request with HTTP 415 Unsupported
+// Media Type.
+func (p *requestClientContentConfigProvider) UnsupportedMediaType(requestContentType string) {
+ if !clientfeatures.FeatureGates().Enabled(clientfeatures.ClientsAllowCBOR) {
+ return
+ }
+
+ // This could be extended to consider the Content-Encoding request header, the Accept and
+ // Accept-Encoding response headers, the request method, and URI (as mentioned in
+ // https://www.rfc-editor.org/rfc/rfc9110.html#section-15.5.16). The request Content-Type
+ // header is sufficient to implement a blanket CBOR fallback mechanism.
+ requestContentType, _, _ = mime.ParseMediaType(requestContentType)
+ switch requestContentType {
+ case runtime.ContentTypeCBOR, string(types.ApplyCBORPatchType):
+ p.sawUnsupportedMediaTypeForCBOR.Store(true)
+ }
+}
+
+// clientNegotiatorWithCBORSequenceStreamDecoder is a ClientNegotiator that delegates to another
+// ClientNegotiator to select the appropriate Encoder or Decoder for a given media type. As a
+// special case, it will resolve "application/cbor-seq" (a CBOR Sequence, the concatenation of zero
+// or more CBOR data items) as an alias for "application/cbor" (exactly one CBOR data item) when
+// selecting a stream decoder.
+type clientNegotiatorWithCBORSequenceStreamDecoder struct {
+ negotiator runtime.ClientNegotiator
+}
+
+func (n clientNegotiatorWithCBORSequenceStreamDecoder) Encoder(contentType string, params map[string]string) (runtime.Encoder, error) {
+ return n.negotiator.Encoder(contentType, params)
+}
+
+func (n clientNegotiatorWithCBORSequenceStreamDecoder) Decoder(contentType string, params map[string]string) (runtime.Decoder, error) {
+ return n.negotiator.Decoder(contentType, params)
+}
+
+func (n clientNegotiatorWithCBORSequenceStreamDecoder) StreamDecoder(contentType string, params map[string]string) (runtime.Decoder, runtime.Serializer, runtime.Framer, error) {
+ if !clientfeatures.FeatureGates().Enabled(clientfeatures.ClientsAllowCBOR) {
+ return n.negotiator.StreamDecoder(contentType, params)
+ }
+
+ switch contentType {
+ case runtime.ContentTypeCBORSequence:
+ return n.negotiator.StreamDecoder(runtime.ContentTypeCBOR, params)
+ case runtime.ContentTypeCBOR:
+ // This media type is only appropriate for exactly one data item, not the zero or
+ // more events of a watch stream.
+ return nil, nil, nil, runtime.NegotiateError{ContentType: contentType, Stream: true}
+ default:
+ return n.negotiator.StreamDecoder(contentType, params)
+ }
+
}
diff --git a/vendor/k8s.io/client-go/rest/config.go b/vendor/k8s.io/client-go/rest/config.go
index f8ff7e928c..82d4f7136a 100644
--- a/vendor/k8s.io/client-go/rest/config.go
+++ b/vendor/k8s.io/client-go/rest/config.go
@@ -32,6 +32,9 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/runtime/serializer"
+ "k8s.io/apimachinery/pkg/runtime/serializer/cbor"
+ "k8s.io/client-go/features"
"k8s.io/client-go/pkg/version"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"k8s.io/client-go/transport"
@@ -113,6 +116,9 @@ type Config struct {
// QPS indicates the maximum QPS to the master from this client.
// If it's zero, the created RESTClient will use DefaultQPS: 5
+ //
+ // Setting this to a negative value will disable client-side ratelimiting
+ // unless `Ratelimiter` is also set.
QPS float32
// Maximum burst for throttle.
@@ -123,10 +129,23 @@ type Config struct {
RateLimiter flowcontrol.RateLimiter
// WarningHandler handles warnings in server responses.
- // If not set, the default warning handler is used.
- // See documentation for SetDefaultWarningHandler() for details.
+ // If this and WarningHandlerWithContext are not set, the
+ // default warning handler is used. If both are set,
+ // WarningHandlerWithContext is used.
+ //
+ // See documentation for [SetDefaultWarningHandler] for details.
+ //
+ //logcheck:context // WarningHandlerWithContext should be used instead of WarningHandler in code which supports contextual logging.
WarningHandler WarningHandler
+ // WarningHandlerWithContext handles warnings in server responses.
+ // If this and WarningHandler are not set, the
+ // default warning handler is used. If both are set,
+ // WarningHandlerWithContext is used.
+ //
+ // See documentation for [SetDefaultWarningHandler] for details.
+ WarningHandlerWithContext WarningHandlerWithContext
+
// The maximum length of time to wait before giving up on a server request. A value of zero means no timeout.
Timeout time.Duration
@@ -375,12 +394,27 @@ func RESTClientForConfigAndClient(config *Config, httpClient *http.Client) (*RES
}
restClient, err := NewRESTClient(baseURL, versionedAPIPath, clientContent, rateLimiter, httpClient)
- if err == nil && config.WarningHandler != nil {
- restClient.warningHandler = config.WarningHandler
- }
+ maybeSetWarningHandler(restClient, config.WarningHandler, config.WarningHandlerWithContext)
return restClient, err
}
+// maybeSetWarningHandler sets the handlerWithContext if non-nil,
+// otherwise the handler with a wrapper if non-nil,
+// and does nothing if both are nil.
+//
+// May be called for a nil client.
+func maybeSetWarningHandler(c *RESTClient, handler WarningHandler, handlerWithContext WarningHandlerWithContext) {
+ if c == nil {
+ return
+ }
+ switch {
+ case handlerWithContext != nil:
+ c.warningHandler = handlerWithContext
+ case handler != nil:
+ c.warningHandler = warningLoggerNopContext{l: handler}
+ }
+}
+
// UnversionedRESTClientFor is the same as RESTClientFor, except that it allows
// the config.Version to be empty.
func UnversionedRESTClientFor(config *Config) (*RESTClient, error) {
@@ -442,9 +476,7 @@ func UnversionedRESTClientForConfigAndClient(config *Config, httpClient *http.Cl
}
restClient, err := NewRESTClient(baseURL, versionedAPIPath, clientContent, rateLimiter, httpClient)
- if err == nil && config.WarningHandler != nil {
- restClient.warningHandler = config.WarningHandler
- }
+ maybeSetWarningHandler(restClient, config.WarningHandler, config.WarningHandlerWithContext)
return restClient, err
}
@@ -526,6 +558,7 @@ func InClusterConfig() (*Config, error) {
tlsClientConfig := TLSClientConfig{}
if _, err := certutil.NewPool(rootCAFile); err != nil {
+ //nolint:logcheck // The decision to log this instead of returning an error goes back to ~2016. It's part of the client-go API now, so not changing it just to support contextual logging.
klog.Errorf("Expected to load root CA config from %s, but got err: %v", rootCAFile, err)
} else {
tlsClientConfig.CAFile = rootCAFile
@@ -610,15 +643,16 @@ func AnonymousClientConfig(config *Config) *Config {
CAData: config.TLSClientConfig.CAData,
NextProtos: config.TLSClientConfig.NextProtos,
},
- RateLimiter: config.RateLimiter,
- WarningHandler: config.WarningHandler,
- UserAgent: config.UserAgent,
- DisableCompression: config.DisableCompression,
- QPS: config.QPS,
- Burst: config.Burst,
- Timeout: config.Timeout,
- Dial: config.Dial,
- Proxy: config.Proxy,
+ RateLimiter: config.RateLimiter,
+ WarningHandler: config.WarningHandler,
+ WarningHandlerWithContext: config.WarningHandlerWithContext,
+ UserAgent: config.UserAgent,
+ DisableCompression: config.DisableCompression,
+ QPS: config.QPS,
+ Burst: config.Burst,
+ Timeout: config.Timeout,
+ Dial: config.Dial,
+ Proxy: config.Proxy,
}
}
@@ -652,20 +686,37 @@ func CopyConfig(config *Config) *Config {
CAData: config.TLSClientConfig.CAData,
NextProtos: config.TLSClientConfig.NextProtos,
},
- UserAgent: config.UserAgent,
- DisableCompression: config.DisableCompression,
- Transport: config.Transport,
- WrapTransport: config.WrapTransport,
- QPS: config.QPS,
- Burst: config.Burst,
- RateLimiter: config.RateLimiter,
- WarningHandler: config.WarningHandler,
- Timeout: config.Timeout,
- Dial: config.Dial,
- Proxy: config.Proxy,
+ UserAgent: config.UserAgent,
+ DisableCompression: config.DisableCompression,
+ Transport: config.Transport,
+ WrapTransport: config.WrapTransport,
+ QPS: config.QPS,
+ Burst: config.Burst,
+ RateLimiter: config.RateLimiter,
+ WarningHandler: config.WarningHandler,
+ WarningHandlerWithContext: config.WarningHandlerWithContext,
+ Timeout: config.Timeout,
+ Dial: config.Dial,
+ Proxy: config.Proxy,
}
if config.ExecProvider != nil && config.ExecProvider.Config != nil {
c.ExecProvider.Config = config.ExecProvider.Config.DeepCopyObject()
}
return c
}
+
+// CodecFactoryForGeneratedClient returns the provided CodecFactory if there are no enabled client
+// feature gates affecting serialization. Otherwise, it constructs and returns a new CodecFactory
+// from the provided Scheme.
+//
+// This is supported ONLY for use by clients generated with client-gen. The caller is responsible
+// for ensuring that the CodecFactory argument was constructed using the Scheme argument.
+func CodecFactoryForGeneratedClient(scheme *runtime.Scheme, codecs serializer.CodecFactory) serializer.CodecFactory {
+ if !features.FeatureGates().Enabled(features.ClientsAllowCBOR) {
+ // NOTE: This assumes client-gen will not generate CBOR-enabled Codecs as long as
+ // the feature gate exists.
+ return codecs
+ }
+
+ return serializer.NewCodecFactory(scheme, serializer.WithSerializer(cbor.NewSerializerInfo))
+}
diff --git a/vendor/k8s.io/client-go/rest/plugin.go b/vendor/k8s.io/client-go/rest/plugin.go
index ae5cbdc2c4..f7a4e4f344 100644
--- a/vendor/k8s.io/client-go/rest/plugin.go
+++ b/vendor/k8s.io/client-go/rest/plugin.go
@@ -21,8 +21,6 @@ import (
"net/http"
"sync"
- "k8s.io/klog/v2"
-
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
)
@@ -65,7 +63,10 @@ func RegisterAuthProviderPlugin(name string, plugin Factory) error {
if _, found := plugins[name]; found {
return fmt.Errorf("auth Provider Plugin %q was registered twice", name)
}
- klog.V(4).Infof("Registered Auth Provider Plugin %q", name)
+ // RegisterAuthProviderPlugin gets called during the init phase before
+ // logging is initialized and therefore should not emit logs. If you
+ // need this message for debugging something, then uncomment it.
+ // klog.V(4).Infof("Registered Auth Provider Plugin %q", name)
plugins[name] = plugin
return nil
}
diff --git a/vendor/k8s.io/client-go/rest/request.go b/vendor/k8s.io/client-go/rest/request.go
index 850e57daeb..1eb2f9b42a 100644
--- a/vendor/k8s.io/client-go/rest/request.go
+++ b/vendor/k8s.io/client-go/rest/request.go
@@ -19,6 +19,7 @@ package rest
import (
"bytes"
"context"
+ "encoding/base64"
"encoding/hex"
"fmt"
"io"
@@ -37,12 +38,15 @@ import (
"golang.org/x/net/http2"
"k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer/streaming"
"k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/watch"
+ clientfeatures "k8s.io/client-go/features"
restclientwatch "k8s.io/client-go/rest/watch"
"k8s.io/client-go/tools/metrics"
"k8s.io/client-go/util/flowcontrol"
@@ -50,7 +54,7 @@ import (
"k8s.io/utils/clock"
)
-var (
+const (
// longThrottleLatency defines threshold for logging requests. All requests being
// throttled (via the provided rateLimiter) for more than longThrottleLatency will
// be logged.
@@ -96,10 +100,13 @@ func defaultRequestRetryFn(maxRetries int) WithRetry {
type Request struct {
c *RESTClient
- warningHandler WarningHandler
+ contentConfig ClientContentConfig
+ contentTypeNotSet bool
+
+ warningHandler WarningHandlerWithContext
rateLimiter flowcontrol.RateLimiter
- backoff BackoffManager
+ backoff BackoffManagerWithContext
timeout time.Duration
maxRetries int
@@ -120,7 +127,7 @@ type Request struct {
// output
err error
- // only one of body / bodyBytes may be set. requests using body are not retriable.
+ // only one of body / bodyBytes may be set. requests using body are not retryable.
body io.Reader
bodyBytes []byte
@@ -129,7 +136,7 @@ type Request struct {
// NewRequest creates a new request helper object for accessing runtime.Objects on a server.
func NewRequest(c *RESTClient) *Request {
- var backoff BackoffManager
+ var backoff BackoffManagerWithContext
if c.createBackoffMgr != nil {
backoff = c.createBackoffMgr()
}
@@ -149,6 +156,11 @@ func NewRequest(c *RESTClient) *Request {
timeout = c.Client.Timeout
}
+ // A request needs to know whether the content type was explicitly configured or selected by
+ // default in order to support the per-request Protobuf override used by clients generated
+ // with --prefers-protobuf.
+ contentConfig, contentTypeDefaulted := c.content.GetClientContentConfig()
+
r := &Request{
c: c,
rateLimiter: c.rateLimiter,
@@ -158,14 +170,12 @@ func NewRequest(c *RESTClient) *Request {
maxRetries: 10,
retryFn: defaultRequestRetryFn,
warningHandler: c.warningHandler,
- }
- switch {
- case len(c.content.AcceptContentTypes) > 0:
- r.SetHeader("Accept", c.content.AcceptContentTypes)
- case len(c.content.ContentType) > 0:
- r.SetHeader("Accept", c.content.ContentType+", */*")
+ contentConfig: contentConfig,
+ contentTypeNotSet: contentTypeDefaulted,
}
+
+ r.setAcceptHeader()
return r
}
@@ -174,11 +184,36 @@ func NewRequestWithClient(base *url.URL, versionedAPIPath string, content Client
return NewRequest(&RESTClient{
base: base,
versionedAPIPath: versionedAPIPath,
- content: content,
+ content: requestClientContentConfigProvider{base: content},
Client: client,
})
}
+func (r *Request) UseProtobufAsDefaultIfPreferred(prefersProtobuf bool) *Request {
+ if prefersProtobuf {
+ return r.UseProtobufAsDefault()
+ }
+ return r
+}
+
+func (r *Request) UseProtobufAsDefault() *Request {
+ if r.contentTypeNotSet && len(r.contentConfig.AcceptContentTypes) == 0 {
+ r.contentConfig.AcceptContentTypes = "application/vnd.kubernetes.protobuf,application/json"
+ r.contentConfig.ContentType = "application/vnd.kubernetes.protobuf"
+ r.setAcceptHeader()
+ }
+ return r
+}
+
+func (r *Request) setAcceptHeader() {
+ switch {
+ case len(r.contentConfig.AcceptContentTypes) > 0:
+ r.SetHeader("Accept", r.contentConfig.AcceptContentTypes)
+ case len(r.contentConfig.ContentType) > 0:
+ r.SetHeader("Accept", r.contentConfig.ContentType+", */*")
+ }
+}
+
// Verb sets the verb this request will use.
func (r *Request) Verb(verb string) *Request {
r.verb = verb
@@ -224,20 +259,47 @@ func (r *Request) Resource(resource string) *Request {
}
// BackOff sets the request's backoff manager to the one specified,
-// or defaults to the stub implementation if nil is provided
+// or defaults to the stub implementation if nil is provided.
+//
+// Deprecated: BackoffManager.Sleep ignores the caller's context. Use BackOffWithContext and BackoffManagerWithContext instead.
func (r *Request) BackOff(manager BackoffManager) *Request {
if manager == nil {
r.backoff = &NoBackoff{}
return r
}
+ r.backoff = &backoffManagerNopContext{BackoffManager: manager}
+ return r
+}
+
+// BackOffWithContext sets the request's backoff manager to the one specified,
+// or defaults to the stub implementation if nil is provided.
+func (r *Request) BackOffWithContext(manager BackoffManagerWithContext) *Request {
+ if manager == nil {
+ r.backoff = &NoBackoff{}
+ return r
+ }
+
r.backoff = manager
return r
}
// WarningHandler sets the handler this client uses when warning headers are encountered.
-// If set to nil, this client will use the default warning handler (see SetDefaultWarningHandler).
+// If set to nil, this client will use the default warning handler (see [SetDefaultWarningHandler]).
+//
+//logcheck:context // WarningHandlerWithContext should be used instead of WarningHandler in code which supports contextual logging.
func (r *Request) WarningHandler(handler WarningHandler) *Request {
+ if handler == nil {
+ r.warningHandler = nil
+ return r
+ }
+ r.warningHandler = warningLoggerNopContext{l: handler}
+ return r
+}
+
+// WarningHandlerWithContext sets the handler this client uses when warning headers are encountered.
+// If set to nil, this client will use the default warning handler (see [SetDefaultWarningHandlerWithContext]).
+func (r *Request) WarningHandlerWithContext(handler WarningHandlerWithContext) *Request {
r.warningHandler = handler
return r
}
@@ -367,7 +429,7 @@ func (r *Request) Param(paramName, s string) *Request {
// VersionedParams will not write query parameters that have omitempty set and are empty. If a
// parameter has already been set it is appended to (Params and VersionedParams are additive).
func (r *Request) VersionedParams(obj runtime.Object, codec runtime.ParameterCodec) *Request {
- return r.SpecificallyVersionedParams(obj, codec, r.c.content.GroupVersion)
+ return r.SpecificallyVersionedParams(obj, codec, r.contentConfig.GroupVersion)
}
func (r *Request) SpecificallyVersionedParams(obj runtime.Object, codec runtime.ParameterCodec, version schema.GroupVersion) *Request {
@@ -447,11 +509,9 @@ func (r *Request) Body(obj interface{}) *Request {
r.err = err
return r
}
- glogBody("Request Body", data)
r.body = nil
r.bodyBytes = data
case []byte:
- glogBody("Request Body", t)
r.body = nil
r.bodyBytes = t
case io.Reader:
@@ -462,7 +522,7 @@ func (r *Request) Body(obj interface{}) *Request {
if reflect.ValueOf(t).IsNil() {
return r
}
- encoder, err := r.c.content.Negotiator.Encoder(r.c.content.ContentType, nil)
+ encoder, err := r.contentConfig.Negotiator.Encoder(r.contentConfig.ContentType, nil)
if err != nil {
r.err = err
return r
@@ -472,10 +532,9 @@ func (r *Request) Body(obj interface{}) *Request {
r.err = err
return r
}
- glogBody("Request Body", data)
r.body = nil
r.bodyBytes = data
- r.SetHeader("Content-Type", r.c.content.ContentType)
+ r.SetHeader("Content-Type", r.contentConfig.ContentType)
default:
r.err = fmt.Errorf("unknown type used for body: %+v", obj)
}
@@ -617,21 +676,17 @@ func (r *Request) tryThrottleWithInfo(ctx context.Context, retryInfo string) err
}
latency := time.Since(now)
- var message string
- switch {
- case len(retryInfo) > 0:
- message = fmt.Sprintf("Waited for %v, %s - request: %s:%s", latency, retryInfo, r.verb, r.URL().String())
- default:
- message = fmt.Sprintf("Waited for %v due to client-side throttling, not priority and fairness, request: %s:%s", latency, r.verb, r.URL().String())
- }
-
if latency > longThrottleLatency {
- klog.V(3).Info(message)
- }
- if latency > extraLongThrottleLatency {
- // If the rate limiter latency is very high, the log message should be printed at a higher log level,
- // but we use a throttled logger to prevent spamming.
- globalThrottledLogger.Infof("%s", message)
+ if retryInfo == "" {
+ retryInfo = "client-side throttling, not priority and fairness"
+ }
+ klog.FromContext(ctx).V(3).Info("Waited before sending request", "delay", latency, "reason", retryInfo, "verb", r.verb, "URL", r.URL())
+
+ if latency > extraLongThrottleLatency {
+ // If the rate limiter latency is very high, the log message should be printed at a higher log level,
+ // but we use a throttled logger to prevent spamming.
+ globalThrottledLogger.info(klog.FromContext(ctx), "Waited before sending request", "delay", latency, "reason", retryInfo, "verb", r.verb, "URL", r.URL())
+ }
}
metrics.RateLimiterLatency.Observe(ctx, r.verb, r.finalURLTemplate(), latency)
@@ -643,7 +698,7 @@ func (r *Request) tryThrottle(ctx context.Context) error {
}
type throttleSettings struct {
- logLevel klog.Level
+ logLevel int
minLogInterval time.Duration
lastLogTime time.Time
@@ -668,9 +723,9 @@ var globalThrottledLogger = &throttledLogger{
},
}
-func (b *throttledLogger) attemptToLog() (klog.Level, bool) {
+func (b *throttledLogger) attemptToLog(logger klog.Logger) (int, bool) {
for _, setting := range b.settings {
- if bool(klog.V(setting.logLevel).Enabled()) {
+ if bool(logger.V(setting.logLevel).Enabled()) {
// Return early without write locking if possible.
if func() bool {
setting.lock.RLock()
@@ -692,19 +747,28 @@ func (b *throttledLogger) attemptToLog() (klog.Level, bool) {
// Infof will write a log message at each logLevel specified by the receiver's throttleSettings
// as long as it hasn't written a log message more recently than minLogInterval.
-func (b *throttledLogger) Infof(message string, args ...interface{}) {
- if logLevel, ok := b.attemptToLog(); ok {
- klog.V(logLevel).Infof(message, args...)
+func (b *throttledLogger) info(logger klog.Logger, message string, kv ...any) {
+ if logLevel, ok := b.attemptToLog(logger); ok {
+ logger.V(logLevel).Info(message, kv...)
}
}
// Watch attempts to begin watching the requested location.
// Returns a watch.Interface, or an error.
func (r *Request) Watch(ctx context.Context) (watch.Interface, error) {
+ w, _, e := r.watchInternal(ctx)
+ return w, e
+}
+
+func (r *Request) watchInternal(ctx context.Context) (watch.Interface, runtime.Decoder, error) {
+ if r.body == nil {
+ logBody(klog.FromContext(ctx), 2, "Request Body", r.bodyBytes)
+ }
+
// We specifically don't want to rate limit watches, so we
// don't use r.rateLimiter here.
if r.err != nil {
- return nil, r.err
+ return nil, nil, r.err
}
client := r.c.Client
@@ -724,18 +788,18 @@ func (r *Request) Watch(ctx context.Context) (watch.Interface, error) {
url := r.URL().String()
for {
if err := retry.Before(ctx, r); err != nil {
- return nil, retry.WrapPreviousError(err)
+ return nil, nil, retry.WrapPreviousError(err)
}
req, err := r.newHTTPRequest(ctx)
if err != nil {
- return nil, err
+ return nil, nil, err
}
resp, err := client.Do(req)
retry.After(ctx, r, resp, err)
if err == nil && resp.StatusCode == http.StatusOK {
- return r.newStreamWatcher(resp)
+ return r.newStreamWatcher(ctx, resp)
}
done, transformErr := func() (bool, error) {
@@ -749,47 +813,208 @@ func (r *Request) Watch(ctx context.Context) (watch.Interface, error) {
// the server must have sent us an error in 'err'
return true, nil
}
- if result := r.transformResponse(resp, req); result.err != nil {
- return true, result.err
+ result := r.transformResponse(ctx, resp, req)
+ if err := result.Error(); err != nil {
+ return true, err
}
return true, fmt.Errorf("for request %s, got status: %v", url, resp.StatusCode)
}()
if done {
if isErrRetryableFunc(req, err) {
- return watch.NewEmptyWatch(), nil
+ return watch.NewEmptyWatch(), nil, nil
}
if err == nil {
// if the server sent us an HTTP Response object,
// we need to return the error object from that.
err = transformErr
}
- return nil, retry.WrapPreviousError(err)
+ return nil, nil, retry.WrapPreviousError(err)
}
}
}
-func (r *Request) newStreamWatcher(resp *http.Response) (watch.Interface, error) {
+type WatchListResult struct {
+ // err holds any errors we might have received
+ // during streaming.
+ err error
+
+ // items hold the collected data
+ items []runtime.Object
+
+ // initialEventsEndBookmarkRV holds the resource version
+ // extracted from the bookmark event that marks
+ // the end of the stream.
+ initialEventsEndBookmarkRV string
+
+ // negotiatedObjectDecoder knows how to decode
+ // the initialEventsListBlueprint
+ negotiatedObjectDecoder runtime.Decoder
+
+ // base64EncodedInitialEventsListBlueprint contains an empty,
+ // versioned list encoded in the requested format
+ // (e.g., protobuf, JSON, CBOR) and stored as a base64-encoded string
+ base64EncodedInitialEventsListBlueprint string
+}
+
+// Into stores the result into obj. The passed obj parameter must be a pointer to a list type.
+//
+// Note:
+//
+// Special attention should be given to the type *unstructured.Unstructured,
+// which represents a list type but does not have an "Items" field.
+// Users who directly use RESTClient may store the response in such an object.
+// This particular case is not handled by the current implementation of this function,
+// but may be considered for future updates.
+func (r WatchListResult) Into(obj runtime.Object) error {
+ if r.err != nil {
+ return r.err
+ }
+
+ listItemsPtr, err := meta.GetItemsPtr(obj)
+ if err != nil {
+ return err
+ }
+ listVal, err := conversion.EnforcePtr(listItemsPtr)
+ if err != nil {
+ return err
+ }
+ if listVal.Kind() != reflect.Slice {
+ return fmt.Errorf("need a pointer to slice, got %v", listVal.Kind())
+ }
+
+ encodedInitialEventsListBlueprint, err := base64.StdEncoding.DecodeString(r.base64EncodedInitialEventsListBlueprint)
+ if err != nil {
+ return fmt.Errorf("failed to decode the received blueprint list, err %w", err)
+ }
+
+ err = runtime.DecodeInto(r.negotiatedObjectDecoder, encodedInitialEventsListBlueprint, obj)
+ if err != nil {
+ return err
+ }
+
+ if len(r.items) == 0 {
+ listVal.Set(reflect.MakeSlice(listVal.Type(), 0, 0))
+ } else {
+ listVal.Set(reflect.MakeSlice(listVal.Type(), len(r.items), len(r.items)))
+ for i, o := range r.items {
+ if listVal.Type().Elem() != reflect.TypeOf(o).Elem() {
+ return fmt.Errorf("received object type = %v at index = %d, doesn't match the list item type = %v", reflect.TypeOf(o).Elem(), i, listVal.Type().Elem())
+ }
+ listVal.Index(i).Set(reflect.ValueOf(o).Elem())
+ }
+ }
+
+ listMeta, err := meta.ListAccessor(obj)
+ if err != nil {
+ return err
+ }
+ listMeta.SetResourceVersion(r.initialEventsEndBookmarkRV)
+ return nil
+}
+
+// WatchList establishes a stream to get a consistent snapshot of data
+// from the server as described in https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/3157-watch-list#proposal
+//
+// Note that the watchlist requires properly setting the ListOptions
+// otherwise it just establishes a regular watch with the server.
+// Check the documentation https://kubernetes.io/docs/reference/using-api/api-concepts/#streaming-lists
+// to see what parameters are currently required.
+func (r *Request) WatchList(ctx context.Context) WatchListResult {
+ if r.body == nil {
+ logBody(klog.FromContext(ctx), 2, "Request Body", r.bodyBytes)
+ }
+
+ if !clientfeatures.FeatureGates().Enabled(clientfeatures.WatchListClient) {
+ return WatchListResult{err: fmt.Errorf("%q feature gate is not enabled", clientfeatures.WatchListClient)}
+ }
+ // TODO(#115478): consider validating request parameters (i.e sendInitialEvents).
+ // Most users use the generated client, which handles the proper setting of parameters.
+ // We don't have validation for other methods (e.g., the Watch)
+ // thus, for symmetry, we haven't added additional checks for the WatchList method.
+ w, d, err := r.watchInternal(ctx)
+ if err != nil {
+ return WatchListResult{err: err}
+ }
+ return r.handleWatchList(ctx, w, d)
+}
+
+// handleWatchList holds the actual logic for easier unit testing.
+// Note that this function will close the passed watch.
+func (r *Request) handleWatchList(ctx context.Context, w watch.Interface, negotiatedObjectDecoder runtime.Decoder) WatchListResult {
+ defer w.Stop()
+ var lastKey string
+ var items []runtime.Object
+
+ for {
+ select {
+ case <-ctx.Done():
+ return WatchListResult{err: ctx.Err()}
+ case event, ok := <-w.ResultChan():
+ if !ok {
+ return WatchListResult{err: fmt.Errorf("unexpected watch close")}
+ }
+ if event.Type == watch.Error {
+ return WatchListResult{err: errors.FromObject(event.Object)}
+ }
+ meta, err := meta.Accessor(event.Object)
+ if err != nil {
+ return WatchListResult{err: fmt.Errorf("failed to parse watch event: %#v", event)}
+ }
+
+ switch event.Type {
+ case watch.Added:
+ // the following check ensures that the response is ordered.
+ // earlier servers had a bug that caused them to not sort the output.
+ // in such cases, return an error which can trigger fallback logic.
+ key := objectKeyFromMeta(meta)
+ if len(lastKey) > 0 && lastKey > key {
+ return WatchListResult{err: fmt.Errorf("cannot add the obj (%#v) with the key = %s, as it violates the ordering guarantees provided by the watchlist feature in beta phase, lastInsertedKey was = %s", event.Object, key, lastKey)}
+ }
+ items = append(items, event.Object)
+ lastKey = key
+ case watch.Bookmark:
+ if meta.GetAnnotations()[metav1.InitialEventsAnnotationKey] == "true" {
+ base64EncodedInitialEventsListBlueprint := meta.GetAnnotations()[metav1.InitialEventsListBlueprintAnnotationKey]
+ if len(base64EncodedInitialEventsListBlueprint) == 0 {
+ return WatchListResult{err: fmt.Errorf("%q annotation is missing content", metav1.InitialEventsListBlueprintAnnotationKey)}
+ }
+ return WatchListResult{
+ items: items,
+ initialEventsEndBookmarkRV: meta.GetResourceVersion(),
+ negotiatedObjectDecoder: negotiatedObjectDecoder,
+ base64EncodedInitialEventsListBlueprint: base64EncodedInitialEventsListBlueprint,
+ }
+ }
+ default:
+ return WatchListResult{err: fmt.Errorf("unexpected watch event %#v, expected to only receive watch.Added and watch.Bookmark events", event)}
+ }
+ }
+ }
+}
+
+func (r *Request) newStreamWatcher(ctx context.Context, resp *http.Response) (watch.Interface, runtime.Decoder, error) {
contentType := resp.Header.Get("Content-Type")
mediaType, params, err := mime.ParseMediaType(contentType)
if err != nil {
- klog.V(4).Infof("Unexpected content type from the server: %q: %v", contentType, err)
+ klog.FromContext(ctx).V(4).Info("Unexpected content type from the server", "contentType", contentType, "err", err)
}
- objectDecoder, streamingSerializer, framer, err := r.c.content.Negotiator.StreamDecoder(mediaType, params)
+ objectDecoder, streamingSerializer, framer, err := r.contentConfig.Negotiator.StreamDecoder(mediaType, params)
if err != nil {
- return nil, err
+ return nil, nil, err
}
- handleWarnings(resp.Header, r.warningHandler)
+ handleWarnings(ctx, resp.Header, r.warningHandler)
frameReader := framer.NewFrameReader(resp.Body)
watchEventDecoder := streaming.NewDecoder(frameReader, streamingSerializer)
- return watch.NewStreamWatcher(
+ return watch.NewStreamWatcherWithLogger(
+ klog.FromContext(ctx),
restclientwatch.NewDecoder(watchEventDecoder, objectDecoder),
// use 500 to indicate that the cause of the error is unknown - other error codes
// are more specific to HTTP interactions, and set a reason
errors.NewClientErrorReporter(http.StatusInternalServerError, r.verb, "ClientWatchDecoding"),
- ), nil
+ ), objectDecoder, nil
}
// updateRequestResultMetric increments the RequestResult metric counter,
@@ -829,6 +1054,10 @@ func sanitize(req *Request, resp *http.Response, err error) (string, string) {
// Any non-2xx http status code causes an error. If we get a non-2xx code, we try to convert the body into an APIStatus object.
// If we can, we return that as an error. Otherwise, we create an error that lists the http status and the content of the response.
func (r *Request) Stream(ctx context.Context) (io.ReadCloser, error) {
+ if r.body == nil {
+ logBody(klog.FromContext(ctx), 2, "Request Body", r.bodyBytes)
+ }
+
if r.err != nil {
return nil, r.err
}
@@ -862,7 +1091,7 @@ func (r *Request) Stream(ctx context.Context) (io.ReadCloser, error) {
switch {
case (resp.StatusCode >= 200) && (resp.StatusCode < 300):
- handleWarnings(resp.Header, r.warningHandler)
+ handleWarnings(ctx, resp.Header, r.warningHandler)
return resp.Body, nil
default:
@@ -872,7 +1101,7 @@ func (r *Request) Stream(ctx context.Context) (io.ReadCloser, error) {
if retry.IsNextRetry(ctx, r, req, resp, err, neverRetryError) {
return false, nil
}
- result := r.transformResponse(resp, req)
+ result := r.transformResponse(ctx, resp, req)
if err := result.Error(); err != nil {
return true, err
}
@@ -970,7 +1199,7 @@ func (r *Request) request(ctx context.Context, fn func(*http.Request, *http.Resp
}()
if r.err != nil {
- klog.V(4).Infof("Error in request: %v", r.err)
+ klog.FromContext(ctx).V(4).Info("Error in request", "err", r.err)
return r.err
}
@@ -1004,7 +1233,7 @@ func (r *Request) request(ctx context.Context, fn func(*http.Request, *http.Resp
return false
}
// For connection errors and apiserver shutdown errors retry.
- if net.IsConnectionReset(err) || net.IsProbableEOF(err) {
+ if net.IsConnectionReset(err) || net.IsProbableEOF(err) || net.IsHTTP2ConnectionLost(err) {
return true
}
return false
@@ -1026,6 +1255,9 @@ func (r *Request) request(ctx context.Context, fn func(*http.Request, *http.Resp
if req.ContentLength >= 0 && !(req.Body != nil && req.ContentLength == 0) {
metrics.RequestSize.Observe(ctx, r.verb, r.URL().Host, float64(req.ContentLength))
}
+ if resp != nil && resp.StatusCode == http.StatusUnsupportedMediaType {
+ r.c.content.UnsupportedMediaType(resp.Request.Header.Get("Content-Type"))
+ }
retry.After(ctx, r, resp, err)
done := func() bool {
@@ -1059,12 +1291,17 @@ func (r *Request) request(ctx context.Context, fn func(*http.Request, *http.Resp
// - If the server responds with a status: *errors.StatusError or *errors.UnexpectedObjectError
// - http.Client.Do errors are returned directly.
func (r *Request) Do(ctx context.Context) Result {
+ logger := klog.FromContext(ctx)
+ if r.body == nil {
+ logBody(logger, 2, "Request Body", r.bodyBytes)
+ }
+
var result Result
err := r.request(ctx, func(req *http.Request, resp *http.Response) {
- result = r.transformResponse(resp, req)
+ result = r.transformResponse(ctx, resp, req)
})
if err != nil {
- return Result{err: err}
+ return Result{err: err, logger: logger}
}
if result.err == nil || len(result.body) > 0 {
metrics.ResponseSize.Observe(ctx, r.verb, r.URL().Host, float64(len(result.body)))
@@ -1074,10 +1311,15 @@ func (r *Request) Do(ctx context.Context) Result {
// DoRaw executes the request but does not process the response body.
func (r *Request) DoRaw(ctx context.Context) ([]byte, error) {
+ logger := klog.FromContext(ctx)
+ if r.body == nil {
+ logBody(logger, 2, "Request Body", r.bodyBytes)
+ }
+
var result Result
err := r.request(ctx, func(req *http.Request, resp *http.Response) {
result.body, result.err = io.ReadAll(resp.Body)
- glogBody("Response Body", result.body)
+ logBody(logger, 2, "Response Body", result.body)
if resp.StatusCode < http.StatusOK || resp.StatusCode > http.StatusPartialContent {
result.err = r.transformUnstructuredResponseError(resp, req, result.body)
}
@@ -1092,7 +1334,8 @@ func (r *Request) DoRaw(ctx context.Context) ([]byte, error) {
}
// transformResponse converts an API response into a structured API object
-func (r *Request) transformResponse(resp *http.Response, req *http.Request) Result {
+func (r *Request) transformResponse(ctx context.Context, resp *http.Response, req *http.Request) Result {
+ logger := klog.FromContext(ctx)
var body []byte
if resp.Body != nil {
data, err := io.ReadAll(resp.Body)
@@ -1107,48 +1350,52 @@ func (r *Request) transformResponse(resp *http.Response, req *http.Request) Resu
// 2. Apiserver sends back the headers and then part of the body
// 3. Apiserver closes connection.
// 4. client-go should catch this and return an error.
- klog.V(2).Infof("Stream error %#v when reading response body, may be caused by closed connection.", err)
+ logger.V(2).Info("Stream error when reading response body, may be caused by closed connection", "err", err)
streamErr := fmt.Errorf("stream error when reading response body, may be caused by closed connection. Please retry. Original error: %w", err)
return Result{
- err: streamErr,
+ err: streamErr,
+ logger: logger,
}
default:
- klog.Errorf("Unexpected error when reading response body: %v", err)
+ logger.Error(err, "Unexpected error when reading response body")
unexpectedErr := fmt.Errorf("unexpected error when reading response body. Please retry. Original error: %w", err)
return Result{
- err: unexpectedErr,
+ err: unexpectedErr,
+ logger: logger,
}
}
}
- glogBody("Response Body", body)
+ // Call depth is tricky. This one is okay for Do and DoRaw.
+ logBody(logger, 7, "Response Body", body)
// verify the content type is accurate
var decoder runtime.Decoder
contentType := resp.Header.Get("Content-Type")
if len(contentType) == 0 {
- contentType = r.c.content.ContentType
+ contentType = r.contentConfig.ContentType
}
if len(contentType) > 0 {
var err error
mediaType, params, err := mime.ParseMediaType(contentType)
if err != nil {
- return Result{err: errors.NewInternalError(err)}
+ return Result{err: errors.NewInternalError(err), logger: logger}
}
- decoder, err = r.c.content.Negotiator.Decoder(mediaType, params)
+ decoder, err = r.contentConfig.Negotiator.Decoder(mediaType, params)
if err != nil {
// if we fail to negotiate a decoder, treat this as an unstructured error
switch {
case resp.StatusCode == http.StatusSwitchingProtocols:
// no-op, we've been upgraded
case resp.StatusCode < http.StatusOK || resp.StatusCode > http.StatusPartialContent:
- return Result{err: r.transformUnstructuredResponseError(resp, req, body)}
+ return Result{err: r.transformUnstructuredResponseError(resp, req, body), logger: logger}
}
return Result{
body: body,
contentType: contentType,
statusCode: resp.StatusCode,
- warnings: handleWarnings(resp.Header, r.warningHandler),
+ warnings: handleWarnings(ctx, resp.Header, r.warningHandler),
+ logger: logger,
}
}
}
@@ -1167,7 +1414,8 @@ func (r *Request) transformResponse(resp *http.Response, req *http.Request) Resu
statusCode: resp.StatusCode,
decoder: decoder,
err: err,
- warnings: handleWarnings(resp.Header, r.warningHandler),
+ warnings: handleWarnings(ctx, resp.Header, r.warningHandler),
+ logger: logger,
}
}
@@ -1176,19 +1424,20 @@ func (r *Request) transformResponse(resp *http.Response, req *http.Request) Resu
contentType: contentType,
statusCode: resp.StatusCode,
decoder: decoder,
- warnings: handleWarnings(resp.Header, r.warningHandler),
+ warnings: handleWarnings(ctx, resp.Header, r.warningHandler),
+ logger: logger,
}
}
// truncateBody decides if the body should be truncated, based on the glog Verbosity.
-func truncateBody(body string) string {
+func truncateBody(logger klog.Logger, body string) string {
max := 0
switch {
- case bool(klog.V(10).Enabled()):
+ case bool(logger.V(10).Enabled()):
return body
- case bool(klog.V(9).Enabled()):
+ case bool(logger.V(9).Enabled()):
max = 10240
- case bool(klog.V(8).Enabled()):
+ case bool(logger.V(8).Enabled()):
max = 1024
}
@@ -1199,17 +1448,20 @@ func truncateBody(body string) string {
return body[:max] + fmt.Sprintf(" [truncated %d chars]", len(body)-max)
}
-// glogBody logs a body output that could be either JSON or protobuf. It explicitly guards against
+// logBody logs a body output that could be either JSON or protobuf. It explicitly guards against
// allocating a new string for the body output unless necessary. Uses a simple heuristic to determine
// whether the body is printable.
-func glogBody(prefix string, body []byte) {
- if klogV := klog.V(8); klogV.Enabled() {
+//
+// It needs to be called by all functions which send or receive the data.
+func logBody(logger klog.Logger, callDepth int, prefix string, body []byte) {
+ if loggerV := logger.V(8); loggerV.Enabled() {
+ loggerV := loggerV.WithCallDepth(callDepth)
if bytes.IndexFunc(body, func(r rune) bool {
return r < 0x0a
}) != -1 {
- klogV.Infof("%s:\n%s", prefix, truncateBody(hex.Dump(body)))
+ loggerV.Info(prefix, "body", truncateBody(logger, hex.Dump(body)))
} else {
- klogV.Infof("%s: %s", prefix, truncateBody(string(body)))
+ loggerV.Info(prefix, "body", truncateBody(logger, string(body)))
}
}
}
@@ -1258,7 +1510,7 @@ func (r *Request) newUnstructuredResponseError(body []byte, isTextResponse bool,
}
var groupResource schema.GroupResource
if len(r.resource) > 0 {
- groupResource.Group = r.c.content.GroupVersion.Group
+ groupResource.Group = r.contentConfig.GroupVersion.Group
groupResource.Resource = r.resource
}
return errors.NewGenericServerResponse(
@@ -1303,6 +1555,7 @@ type Result struct {
contentType string
err error
statusCode int
+ logger klog.Logger
decoder runtime.Decoder
}
@@ -1408,7 +1661,7 @@ func (r Result) Error() error {
// to be backwards compatible with old servers that do not return a version, default to "v1"
out, _, err := r.decoder.Decode(r.body, &schema.GroupVersionKind{Version: "v1"}, nil)
if err != nil {
- klog.V(5).Infof("body was not decodable (unable to check for Status): %v", err)
+ r.logger.V(5).Info("Body was not decodable (unable to check for Status)", "err", err)
return r.err
}
switch t := out.(type) {
@@ -1470,3 +1723,10 @@ func ValidatePathSegmentName(name string, prefix bool) []string {
}
return IsValidPathSegmentName(name)
}
+
+func objectKeyFromMeta(objMeta metav1.Object) string {
+ if len(objMeta.GetNamespace()) > 0 {
+ return fmt.Sprintf("%s/%s", objMeta.GetNamespace(), objMeta.GetName())
+ }
+ return objMeta.GetName()
+}
diff --git a/vendor/k8s.io/client-go/rest/url_utils.go b/vendor/k8s.io/client-go/rest/url_utils.go
index c4ce6e3b8f..0a0ab79173 100644
--- a/vendor/k8s.io/client-go/rest/url_utils.go
+++ b/vendor/k8s.io/client-go/rest/url_utils.go
@@ -61,7 +61,7 @@ func DefaultServerURL(host, apiPath string, groupVersion schema.GroupVersion, de
return hostURL, versionedAPIPath, nil
}
-// DefaultVersionedAPIPathFor constructs the default path for the given group version, assuming the given
+// DefaultVersionedAPIPath constructs the default path for the given group version, assuming the given
// API path, following the standard conventions of the Kubernetes API.
func DefaultVersionedAPIPath(apiPath string, groupVersion schema.GroupVersion) string {
versionedAPIPath := path.Join("/", apiPath)
diff --git a/vendor/k8s.io/client-go/rest/urlbackoff.go b/vendor/k8s.io/client-go/rest/urlbackoff.go
index 2f9962d7e5..5b7b4e216e 100644
--- a/vendor/k8s.io/client-go/rest/urlbackoff.go
+++ b/vendor/k8s.io/client-go/rest/urlbackoff.go
@@ -17,6 +17,8 @@ limitations under the License.
package rest
import (
+ "context"
+ "fmt"
"net/url"
"time"
@@ -32,12 +34,24 @@ import (
var serverIsOverloadedSet = sets.NewInt(429)
var maxResponseCode = 499
+//go:generate mockery
+
+// Deprecated: BackoffManager.Sleep ignores the caller's context. Use BackoffManagerWithContext instead.
type BackoffManager interface {
- UpdateBackoff(actualUrl *url.URL, err error, responseCode int)
- CalculateBackoff(actualUrl *url.URL) time.Duration
+ UpdateBackoff(actualURL *url.URL, err error, responseCode int)
+ CalculateBackoff(actualURL *url.URL) time.Duration
Sleep(d time.Duration)
}
+type BackoffManagerWithContext interface {
+ UpdateBackoffWithContext(ctx context.Context, actualURL *url.URL, err error, responseCode int)
+ CalculateBackoffWithContext(ctx context.Context, actualURL *url.URL) time.Duration
+ SleepWithContext(ctx context.Context, d time.Duration)
+}
+
+var _ BackoffManager = &URLBackoff{}
+var _ BackoffManagerWithContext = &URLBackoff{}
+
// URLBackoff struct implements the semantics on top of Backoff which
// we need for URL specific exponential backoff.
type URLBackoff struct {
@@ -49,11 +63,19 @@ type URLBackoff struct {
type NoBackoff struct {
}
-func (n *NoBackoff) UpdateBackoff(actualUrl *url.URL, err error, responseCode int) {
+func (n *NoBackoff) UpdateBackoff(actualURL *url.URL, err error, responseCode int) {
// do nothing.
}
-func (n *NoBackoff) CalculateBackoff(actualUrl *url.URL) time.Duration {
+func (n *NoBackoff) UpdateBackoffWithContext(ctx context.Context, actualURL *url.URL, err error, responseCode int) {
+ // do nothing.
+}
+
+func (n *NoBackoff) CalculateBackoff(actualURL *url.URL) time.Duration {
+ return 0 * time.Second
+}
+
+func (n *NoBackoff) CalculateBackoffWithContext(ctx context.Context, actualURL *url.URL) time.Duration {
return 0 * time.Second
}
@@ -61,10 +83,21 @@ func (n *NoBackoff) Sleep(d time.Duration) {
time.Sleep(d)
}
+func (n *NoBackoff) SleepWithContext(ctx context.Context, d time.Duration) {
+ if d == 0 {
+ return
+ }
+ t := time.NewTimer(d)
+ defer t.Stop()
+ select {
+ case <-ctx.Done():
+ case <-t.C:
+ }
+}
+
// Disable makes the backoff trivial, i.e., sets it to zero. This might be used
// by tests which want to run 1000s of mock requests without slowing down.
func (b *URLBackoff) Disable() {
- klog.V(4).Infof("Disabling backoff strategy")
b.Backoff = flowcontrol.NewBackOff(0*time.Second, 0*time.Second)
}
@@ -76,32 +109,74 @@ func (b *URLBackoff) baseUrlKey(rawurl *url.URL) string {
// in the future.
host, err := url.Parse(rawurl.String())
if err != nil {
- klog.V(4).Infof("Error extracting url: %v", rawurl)
- panic("bad url!")
+ panic(fmt.Sprintf("Error parsing bad URL %q: %v", rawurl, err))
}
return host.Host
}
// UpdateBackoff updates backoff metadata
-func (b *URLBackoff) UpdateBackoff(actualUrl *url.URL, err error, responseCode int) {
+func (b *URLBackoff) UpdateBackoff(actualURL *url.URL, err error, responseCode int) {
+ b.UpdateBackoffWithContext(context.Background(), actualURL, err, responseCode)
+}
+
+// UpdateBackoffWithContext updates backoff metadata
+func (b *URLBackoff) UpdateBackoffWithContext(ctx context.Context, actualURL *url.URL, err error, responseCode int) {
// range for retry counts that we store is [0,13]
if responseCode > maxResponseCode || serverIsOverloadedSet.Has(responseCode) {
- b.Backoff.Next(b.baseUrlKey(actualUrl), b.Backoff.Clock.Now())
+ b.Backoff.Next(b.baseUrlKey(actualURL), b.Backoff.Clock.Now())
return
} else if responseCode >= 300 || err != nil {
- klog.V(4).Infof("Client is returning errors: code %v, error %v", responseCode, err)
+ klog.FromContext(ctx).V(4).Info("Client is returning errors", "code", responseCode, "err", err)
}
//If we got this far, there is no backoff required for this URL anymore.
- b.Backoff.Reset(b.baseUrlKey(actualUrl))
+ b.Backoff.Reset(b.baseUrlKey(actualURL))
}
// CalculateBackoff takes a url and back's off exponentially,
// based on its knowledge of existing failures.
-func (b *URLBackoff) CalculateBackoff(actualUrl *url.URL) time.Duration {
- return b.Backoff.Get(b.baseUrlKey(actualUrl))
+func (b *URLBackoff) CalculateBackoff(actualURL *url.URL) time.Duration {
+ return b.Backoff.Get(b.baseUrlKey(actualURL))
+}
+
+// CalculateBackoffWithContext takes a url and back's off exponentially,
+// based on its knowledge of existing failures.
+func (b *URLBackoff) CalculateBackoffWithContext(ctx context.Context, actualURL *url.URL) time.Duration {
+ return b.Backoff.Get(b.baseUrlKey(actualURL))
}
func (b *URLBackoff) Sleep(d time.Duration) {
b.Backoff.Clock.Sleep(d)
}
+
+func (b *URLBackoff) SleepWithContext(ctx context.Context, d time.Duration) {
+ if d == 0 {
+ return
+ }
+ t := b.Backoff.Clock.NewTimer(d)
+ defer t.Stop()
+ select {
+ case <-ctx.Done():
+ case <-t.C():
+ }
+}
+
+// backoffManagerNopContext wraps a BackoffManager and adds the *WithContext methods.
+type backoffManagerNopContext struct {
+ BackoffManager
+}
+
+var _ BackoffManager = &backoffManagerNopContext{}
+var _ BackoffManagerWithContext = &backoffManagerNopContext{}
+
+func (b *backoffManagerNopContext) UpdateBackoffWithContext(ctx context.Context, actualURL *url.URL, err error, responseCode int) {
+ b.UpdateBackoff(actualURL, err, responseCode)
+}
+
+func (b *backoffManagerNopContext) CalculateBackoffWithContext(ctx context.Context, actualURL *url.URL) time.Duration {
+ return b.CalculateBackoff(actualURL)
+}
+
+func (b *backoffManagerNopContext) SleepWithContext(ctx context.Context, d time.Duration) {
+ b.Sleep(d)
+}
diff --git a/vendor/k8s.io/client-go/rest/warnings.go b/vendor/k8s.io/client-go/rest/warnings.go
index ad493659f2..713b2d64d6 100644
--- a/vendor/k8s.io/client-go/rest/warnings.go
+++ b/vendor/k8s.io/client-go/rest/warnings.go
@@ -17,6 +17,7 @@ limitations under the License.
package rest
import (
+ "context"
"fmt"
"io"
"net/http"
@@ -33,8 +34,15 @@ type WarningHandler interface {
HandleWarningHeader(code int, agent string, text string)
}
+// WarningHandlerWithContext is an interface for handling warning headers with
+// support for contextual logging.
+type WarningHandlerWithContext interface {
+ // HandleWarningHeaderWithContext is called with the warn code, agent, and text when a warning header is countered.
+ HandleWarningHeaderWithContext(ctx context.Context, code int, agent string, text string)
+}
+
var (
- defaultWarningHandler WarningHandler = WarningLogger{}
+ defaultWarningHandler WarningHandlerWithContext = WarningLogger{}
defaultWarningHandlerLock sync.RWMutex
)
@@ -43,33 +51,68 @@ var (
// - NoWarnings suppresses warnings.
// - WarningLogger logs warnings.
// - NewWarningWriter() outputs warnings to the provided writer.
+//
+// logcheck:context // SetDefaultWarningHandlerWithContext should be used instead of SetDefaultWarningHandler in code which supports contextual logging.
func SetDefaultWarningHandler(l WarningHandler) {
+ if l == nil {
+ SetDefaultWarningHandlerWithContext(nil)
+ return
+ }
+ SetDefaultWarningHandlerWithContext(warningLoggerNopContext{l: l})
+}
+
+// SetDefaultWarningHandlerWithContext is a variant of [SetDefaultWarningHandler] which supports contextual logging.
+func SetDefaultWarningHandlerWithContext(l WarningHandlerWithContext) {
defaultWarningHandlerLock.Lock()
defer defaultWarningHandlerLock.Unlock()
defaultWarningHandler = l
}
-func getDefaultWarningHandler() WarningHandler {
+
+func getDefaultWarningHandler() WarningHandlerWithContext {
defaultWarningHandlerLock.RLock()
defer defaultWarningHandlerLock.RUnlock()
l := defaultWarningHandler
return l
}
-// NoWarnings is an implementation of WarningHandler that suppresses warnings.
+type warningLoggerNopContext struct {
+ l WarningHandler
+}
+
+func (w warningLoggerNopContext) HandleWarningHeaderWithContext(_ context.Context, code int, agent string, message string) {
+ w.l.HandleWarningHeader(code, agent, message)
+}
+
+// NoWarnings is an implementation of [WarningHandler] and [WarningHandlerWithContext] that suppresses warnings.
type NoWarnings struct{}
func (NoWarnings) HandleWarningHeader(code int, agent string, message string) {}
+func (NoWarnings) HandleWarningHeaderWithContext(ctx context.Context, code int, agent string, message string) {
+}
+
+var _ WarningHandler = NoWarnings{}
+var _ WarningHandlerWithContext = NoWarnings{}
-// WarningLogger is an implementation of WarningHandler that logs code 299 warnings
+// WarningLogger is an implementation of [WarningHandler] and [WarningHandlerWithContext] that logs code 299 warnings
type WarningLogger struct{}
func (WarningLogger) HandleWarningHeader(code int, agent string, message string) {
if code != 299 || len(message) == 0 {
return
}
- klog.Warning(message)
+ klog.Background().Info("Warning: " + message)
}
+func (WarningLogger) HandleWarningHeaderWithContext(ctx context.Context, code int, agent string, message string) {
+ if code != 299 || len(message) == 0 {
+ return
+ }
+ klog.FromContext(ctx).Info("Warning: " + message)
+}
+
+var _ WarningHandler = WarningLogger{}
+var _ WarningHandlerWithContext = WarningLogger{}
+
type warningWriter struct {
// out is the writer to output warnings to
out io.Writer
@@ -134,14 +177,14 @@ func (w *warningWriter) WarningCount() int {
return w.writtenCount
}
-func handleWarnings(headers http.Header, handler WarningHandler) []net.WarningHeader {
+func handleWarnings(ctx context.Context, headers http.Header, handler WarningHandlerWithContext) []net.WarningHeader {
if handler == nil {
handler = getDefaultWarningHandler()
}
warnings, _ := net.ParseWarningHeaders(headers["Warning"])
for _, warning := range warnings {
- handler.HandleWarningHeader(warning.Code, warning.Agent, warning.Text)
+ handler.HandleWarningHeaderWithContext(ctx, warning.Code, warning.Agent, warning.Text)
}
return warnings
}
diff --git a/vendor/k8s.io/client-go/rest/watch/decoder.go b/vendor/k8s.io/client-go/rest/watch/decoder.go
index e95c020b2e..c2b68cbcb7 100644
--- a/vendor/k8s.io/client-go/rest/watch/decoder.go
+++ b/vendor/k8s.io/client-go/rest/watch/decoder.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package versioned
+package watch
import (
"fmt"
@@ -51,7 +51,7 @@ func (d *Decoder) Decode() (watch.EventType, runtime.Object, error) {
return "", nil, err
}
if res != &got {
- return "", nil, fmt.Errorf("unable to decode to metav1.Event")
+ return "", nil, fmt.Errorf("unable to decode to metav1.WatchEvent")
}
switch got.Type {
case string(watch.Added), string(watch.Modified), string(watch.Deleted), string(watch.Error), string(watch.Bookmark):
diff --git a/vendor/k8s.io/client-go/rest/watch/encoder.go b/vendor/k8s.io/client-go/rest/watch/encoder.go
index e55aa12d9b..a95b4985c5 100644
--- a/vendor/k8s.io/client-go/rest/watch/encoder.go
+++ b/vendor/k8s.io/client-go/rest/watch/encoder.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package versioned
+package watch
import (
"encoding/json"
diff --git a/vendor/k8s.io/client-go/rest/with_retry.go b/vendor/k8s.io/client-go/rest/with_retry.go
index eaaadc6a4c..e211c39d41 100644
--- a/vendor/k8s.io/client-go/rest/with_retry.go
+++ b/vendor/k8s.io/client-go/rest/with_retry.go
@@ -209,18 +209,18 @@ func (r *withRetry) Before(ctx context.Context, request *Request) error {
// we do a backoff sleep before the first attempt is made,
// (preserving current behavior).
if request.backoff != nil {
- request.backoff.Sleep(request.backoff.CalculateBackoff(url))
+ request.backoff.SleepWithContext(ctx, request.backoff.CalculateBackoffWithContext(ctx, url))
}
return nil
}
// if we are here, we have made attempt(s) at least once before.
if request.backoff != nil {
- delay := request.backoff.CalculateBackoff(url)
+ delay := request.backoff.CalculateBackoffWithContext(ctx, url)
if r.retryAfter.Wait > delay {
delay = r.retryAfter.Wait
}
- request.backoff.Sleep(delay)
+ request.backoff.SleepWithContext(ctx, delay)
}
// We are retrying the request that we already send to
@@ -231,7 +231,7 @@ func (r *withRetry) Before(ctx context.Context, request *Request) error {
return err
}
- klog.V(4).Infof("Got a Retry-After %s response for attempt %d to %v", r.retryAfter.Wait, r.retryAfter.Attempt, request.URL().String())
+ klog.FromContext(ctx).V(4).Info("Got a Retry-After response", "delay", r.retryAfter.Wait, "attempt", r.retryAfter.Attempt, "url", request.URL())
return nil
}
@@ -258,9 +258,9 @@ func (r *withRetry) After(ctx context.Context, request *Request, resp *http.Resp
if request.c.base != nil {
if err != nil {
- request.backoff.UpdateBackoff(request.URL(), err, 0)
+ request.backoff.UpdateBackoffWithContext(ctx, request.URL(), err, 0)
} else {
- request.backoff.UpdateBackoff(request.URL(), err, resp.StatusCode)
+ request.backoff.UpdateBackoffWithContext(ctx, request.URL(), err, resp.StatusCode)
}
}
}
diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go b/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go
index fd913a3083..5871575a66 100644
--- a/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go
+++ b/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go
@@ -16,4 +16,4 @@ limitations under the License.
// +k8s:deepcopy-gen=package
-package api // import "k8s.io/client-go/tools/clientcmd/api"
+package api
diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go b/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go
index dd5f918067..261dcacb5b 100644
--- a/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go
+++ b/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go
@@ -21,7 +21,6 @@ import (
"errors"
"fmt"
"os"
- "path"
"path/filepath"
"reflect"
"strings"
@@ -115,7 +114,7 @@ func ShortenConfig(config *Config) {
// FlattenConfig changes the config object into a self-contained config (useful for making secrets)
func FlattenConfig(config *Config) error {
for key, authInfo := range config.AuthInfos {
- baseDir, err := MakeAbs(path.Dir(authInfo.LocationOfOrigin), "")
+ baseDir, err := MakeAbs(filepath.Dir(authInfo.LocationOfOrigin), "")
if err != nil {
return err
}
@@ -130,7 +129,7 @@ func FlattenConfig(config *Config) error {
config.AuthInfos[key] = authInfo
}
for key, cluster := range config.Clusters {
- baseDir, err := MakeAbs(path.Dir(cluster.LocationOfOrigin), "")
+ baseDir, err := MakeAbs(filepath.Dir(cluster.LocationOfOrigin), "")
if err != nil {
return err
}
diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go b/vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go
index 35bb5dde19..c575652b1a 100644
--- a/vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go
+++ b/vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go
@@ -50,7 +50,7 @@ func init() {
Scheme = runtime.NewScheme()
utilruntime.Must(api.AddToScheme(Scheme))
utilruntime.Must(v1.AddToScheme(Scheme))
- yamlSerializer := json.NewYAMLSerializer(json.DefaultMetaFactory, Scheme, Scheme)
+ yamlSerializer := json.NewSerializerWithOptions(json.DefaultMetaFactory, Scheme, Scheme, json.SerializerOptions{Yaml: true})
Codec = versioning.NewDefaultingCodecForScheme(
Scheme,
yamlSerializer,
diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go
index 9e483e9d75..3ccdebc1c3 100644
--- a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go
+++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go
@@ -18,4 +18,4 @@ limitations under the License.
// +k8s:deepcopy-gen=package
// +k8s:defaulter-gen=Kind
-package v1 // import "k8s.io/client-go/tools/clientcmd/api/v1"
+package v1
diff --git a/vendor/k8s.io/client-go/tools/clientcmd/client_config.go b/vendor/k8s.io/client-go/tools/clientcmd/client_config.go
index 952f6d7eb6..cd0a8649b1 100644
--- a/vendor/k8s.io/client-go/tools/clientcmd/client_config.go
+++ b/vendor/k8s.io/client-go/tools/clientcmd/client_config.go
@@ -29,8 +29,6 @@ import (
clientauth "k8s.io/client-go/tools/auth"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"k8s.io/klog/v2"
-
- "github.com/imdario/mergo"
)
const (
@@ -241,45 +239,37 @@ func (config *DirectClientConfig) ClientConfig() (*restclient.Config, error) {
if err != nil {
return nil, err
}
- mergo.Merge(clientConfig, userAuthPartialConfig, mergo.WithOverride)
+ if err := merge(clientConfig, userAuthPartialConfig); err != nil {
+ return nil, err
+ }
- serverAuthPartialConfig, err := getServerIdentificationPartialConfig(configAuthInfo, configClusterInfo)
- if err != nil {
+ serverAuthPartialConfig := getServerIdentificationPartialConfig(configClusterInfo)
+ if err := merge(clientConfig, serverAuthPartialConfig); err != nil {
return nil, err
}
- mergo.Merge(clientConfig, serverAuthPartialConfig, mergo.WithOverride)
}
return clientConfig, nil
}
// clientauth.Info object contain both user identification and server identification. We want different precedence orders for
-// both, so we have to split the objects and merge them separately
-// we want this order of precedence for the server identification
-// 1. configClusterInfo (the final result of command line flags and merged .kubeconfig files)
-// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority)
-// 3. load the ~/.kubernetes_auth file as a default
-func getServerIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, configClusterInfo clientcmdapi.Cluster) (*restclient.Config, error) {
- mergedConfig := &restclient.Config{}
+// both, so we have to split the objects and merge them separately.
- // configClusterInfo holds the information identify the server provided by .kubeconfig
+// getServerIdentificationPartialConfig extracts server identification information from configClusterInfo
+// (the final result of command line flags and merged .kubeconfig files).
+func getServerIdentificationPartialConfig(configClusterInfo clientcmdapi.Cluster) *restclient.Config {
configClientConfig := &restclient.Config{}
configClientConfig.CAFile = configClusterInfo.CertificateAuthority
configClientConfig.CAData = configClusterInfo.CertificateAuthorityData
configClientConfig.Insecure = configClusterInfo.InsecureSkipTLSVerify
configClientConfig.ServerName = configClusterInfo.TLSServerName
- mergo.Merge(mergedConfig, configClientConfig, mergo.WithOverride)
- return mergedConfig, nil
+ return configClientConfig
}
-// clientauth.Info object contain both user identification and server identification. We want different precedence orders for
-// both, so we have to split the objects and merge them separately
-// we want this order of precedence for user identification
-// 1. configAuthInfo minus auth-path (the final result of command line flags and merged .kubeconfig files)
-// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority)
-// 3. if there is not enough information to identify the user, load try the ~/.kubernetes_auth file
-// 4. if there is not enough information to identify the user, prompt if possible
+// getUserIdentificationPartialConfig extracts user identification information from configAuthInfo
+// (the final result of command line flags and merged .kubeconfig files);
+// if the information available there is insufficient, it prompts (if possible) for additional information.
func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, fallbackReader io.Reader, persistAuthConfig restclient.AuthProviderConfigPersister, configClusterInfo clientcmdapi.Cluster) (*restclient.Config, error) {
mergedConfig := &restclient.Config{}
@@ -338,8 +328,12 @@ func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthI
promptedConfig := makeUserIdentificationConfig(*promptedAuthInfo)
previouslyMergedConfig := mergedConfig
mergedConfig = &restclient.Config{}
- mergo.Merge(mergedConfig, promptedConfig, mergo.WithOverride)
- mergo.Merge(mergedConfig, previouslyMergedConfig, mergo.WithOverride)
+ if err := merge(mergedConfig, promptedConfig); err != nil {
+ return nil, err
+ }
+ if err := merge(mergedConfig, previouslyMergedConfig); err != nil {
+ return nil, err
+ }
config.promptedCredentials.username = mergedConfig.Username
config.promptedCredentials.password = mergedConfig.Password
}
@@ -347,7 +341,7 @@ func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthI
return mergedConfig, nil
}
-// makeUserIdentificationFieldsConfig returns a client.Config capable of being merged using mergo for only user identification information
+// makeUserIdentificationFieldsConfig returns a client.Config capable of being merged for only user identification information
func makeUserIdentificationConfig(info clientauth.Info) *restclient.Config {
config := &restclient.Config{}
config.Username = info.User
@@ -507,12 +501,16 @@ func (config *DirectClientConfig) getContext() (clientcmdapi.Context, error) {
mergedContext := clientcmdapi.NewContext()
if configContext, exists := contexts[contextName]; exists {
- mergo.Merge(mergedContext, configContext, mergo.WithOverride)
+ if err := merge(mergedContext, configContext); err != nil {
+ return clientcmdapi.Context{}, err
+ }
} else if required {
return clientcmdapi.Context{}, fmt.Errorf("context %q does not exist", contextName)
}
if config.overrides != nil {
- mergo.Merge(mergedContext, config.overrides.Context, mergo.WithOverride)
+ if err := merge(mergedContext, &config.overrides.Context); err != nil {
+ return clientcmdapi.Context{}, err
+ }
}
return *mergedContext, nil
@@ -525,12 +523,16 @@ func (config *DirectClientConfig) getAuthInfo() (clientcmdapi.AuthInfo, error) {
mergedAuthInfo := clientcmdapi.NewAuthInfo()
if configAuthInfo, exists := authInfos[authInfoName]; exists {
- mergo.Merge(mergedAuthInfo, configAuthInfo, mergo.WithOverride)
+ if err := merge(mergedAuthInfo, configAuthInfo); err != nil {
+ return clientcmdapi.AuthInfo{}, err
+ }
} else if required {
return clientcmdapi.AuthInfo{}, fmt.Errorf("auth info %q does not exist", authInfoName)
}
if config.overrides != nil {
- mergo.Merge(mergedAuthInfo, config.overrides.AuthInfo, mergo.WithOverride)
+ if err := merge(mergedAuthInfo, &config.overrides.AuthInfo); err != nil {
+ return clientcmdapi.AuthInfo{}, err
+ }
}
return *mergedAuthInfo, nil
@@ -543,15 +545,21 @@ func (config *DirectClientConfig) getCluster() (clientcmdapi.Cluster, error) {
mergedClusterInfo := clientcmdapi.NewCluster()
if config.overrides != nil {
- mergo.Merge(mergedClusterInfo, config.overrides.ClusterDefaults, mergo.WithOverride)
+ if err := merge(mergedClusterInfo, &config.overrides.ClusterDefaults); err != nil {
+ return clientcmdapi.Cluster{}, err
+ }
}
if configClusterInfo, exists := clusterInfos[clusterInfoName]; exists {
- mergo.Merge(mergedClusterInfo, configClusterInfo, mergo.WithOverride)
+ if err := merge(mergedClusterInfo, configClusterInfo); err != nil {
+ return clientcmdapi.Cluster{}, err
+ }
} else if required {
return clientcmdapi.Cluster{}, fmt.Errorf("cluster %q does not exist", clusterInfoName)
}
if config.overrides != nil {
- mergo.Merge(mergedClusterInfo, config.overrides.ClusterInfo, mergo.WithOverride)
+ if err := merge(mergedClusterInfo, &config.overrides.ClusterInfo); err != nil {
+ return clientcmdapi.Cluster{}, err
+ }
}
// * An override of --insecure-skip-tls-verify=true and no accompanying CA/CA data should clear already-set CA/CA data
diff --git a/vendor/k8s.io/client-go/tools/clientcmd/config.go b/vendor/k8s.io/client-go/tools/clientcmd/config.go
index 31f8963160..2cd213ccb3 100644
--- a/vendor/k8s.io/client-go/tools/clientcmd/config.go
+++ b/vendor/k8s.io/client-go/tools/clientcmd/config.go
@@ -19,7 +19,6 @@ package clientcmd
import (
"errors"
"os"
- "path"
"path/filepath"
"reflect"
"sort"
@@ -148,7 +147,7 @@ func NewDefaultPathOptions() *PathOptions {
EnvVar: RecommendedConfigPathEnvVar,
ExplicitFileFlag: RecommendedConfigPathFlag,
- GlobalFileSubpath: path.Join(RecommendedHomeDir, RecommendedFileName),
+ GlobalFileSubpath: filepath.Join(RecommendedHomeDir, RecommendedFileName),
LoadingRules: NewDefaultClientConfigLoadingRules(),
}
diff --git a/vendor/k8s.io/client-go/tools/clientcmd/doc.go b/vendor/k8s.io/client-go/tools/clientcmd/doc.go
index 424311ee12..c07ace6a58 100644
--- a/vendor/k8s.io/client-go/tools/clientcmd/doc.go
+++ b/vendor/k8s.io/client-go/tools/clientcmd/doc.go
@@ -34,4 +34,4 @@ Sample usage from merged .kubeconfig files (local directory, home directory)
client, err := metav1.New(config)
// ...
*/
-package clientcmd // import "k8s.io/client-go/tools/clientcmd"
+package clientcmd
diff --git a/vendor/k8s.io/client-go/tools/clientcmd/loader.go b/vendor/k8s.io/client-go/tools/clientcmd/loader.go
index b75737f1c9..c900e5fd19 100644
--- a/vendor/k8s.io/client-go/tools/clientcmd/loader.go
+++ b/vendor/k8s.io/client-go/tools/clientcmd/loader.go
@@ -24,7 +24,6 @@ import (
goruntime "runtime"
"strings"
- "github.com/imdario/mergo"
"k8s.io/klog/v2"
"k8s.io/apimachinery/pkg/runtime"
@@ -248,7 +247,9 @@ func (rules *ClientConfigLoadingRules) Load() (*clientcmdapi.Config, error) {
mapConfig := clientcmdapi.NewConfig()
for _, kubeconfig := range kubeconfigs {
- mergo.Merge(mapConfig, kubeconfig, mergo.WithOverride)
+ if err := merge(mapConfig, kubeconfig); err != nil {
+ return nil, err
+ }
}
// merge all of the struct values in the reverse order so that priority is given correctly
@@ -256,14 +257,20 @@ func (rules *ClientConfigLoadingRules) Load() (*clientcmdapi.Config, error) {
nonMapConfig := clientcmdapi.NewConfig()
for i := len(kubeconfigs) - 1; i >= 0; i-- {
kubeconfig := kubeconfigs[i]
- mergo.Merge(nonMapConfig, kubeconfig, mergo.WithOverride)
+ if err := merge(nonMapConfig, kubeconfig); err != nil {
+ return nil, err
+ }
}
// since values are overwritten, but maps values are not, we can merge the non-map config on top of the map config and
// get the values we expect.
config := clientcmdapi.NewConfig()
- mergo.Merge(config, mapConfig, mergo.WithOverride)
- mergo.Merge(config, nonMapConfig, mergo.WithOverride)
+ if err := merge(config, mapConfig); err != nil {
+ return nil, err
+ }
+ if err := merge(config, nonMapConfig); err != nil {
+ return nil, err
+ }
if rules.ResolvePaths() {
if err := ResolveLocalPaths(config); err != nil {
diff --git a/vendor/k8s.io/client-go/tools/clientcmd/merge.go b/vendor/k8s.io/client-go/tools/clientcmd/merge.go
new file mode 100644
index 0000000000..3d74e60292
--- /dev/null
+++ b/vendor/k8s.io/client-go/tools/clientcmd/merge.go
@@ -0,0 +1,121 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package clientcmd
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+// recursively merges src into dst:
+// - non-pointer struct fields with any exported fields are recursively merged
+// - non-pointer struct fields with only unexported fields prefer src if the field is non-zero
+// - maps are shallow merged with src keys taking priority over dst
+// - non-zero src fields encountered during recursion that are not maps or structs overwrite and recursion stops
+func merge[T any](dst, src *T) error {
+ if dst == nil {
+ return fmt.Errorf("cannot merge into nil pointer")
+ }
+ if src == nil {
+ return nil
+ }
+ return mergeValues(nil, reflect.ValueOf(dst).Elem(), reflect.ValueOf(src).Elem())
+}
+
+func mergeValues(fieldNames []string, dst, src reflect.Value) error {
+ dstType := dst.Type()
+ // no-op if we can't read the src
+ if !src.IsValid() {
+ return nil
+ }
+ // sanity check types match
+ if srcType := src.Type(); dstType != srcType {
+ return fmt.Errorf("cannot merge mismatched types (%s, %s) at %s", dstType, srcType, strings.Join(fieldNames, "."))
+ }
+
+ switch dstType.Kind() {
+ case reflect.Struct:
+ if hasExportedField(dstType) {
+ // recursively merge
+ for i, n := 0, dstType.NumField(); i < n; i++ {
+ if err := mergeValues(append(fieldNames, dstType.Field(i).Name), dst.Field(i), src.Field(i)); err != nil {
+ return err
+ }
+ }
+ } else if dst.CanSet() {
+ // If all fields are unexported, overwrite with src.
+ // Using src.IsZero() would make more sense but that's not what mergo did.
+ dst.Set(src)
+ }
+
+ case reflect.Map:
+ if dst.CanSet() && !src.IsZero() {
+ // initialize dst if needed
+ if dst.IsZero() {
+ dst.Set(reflect.MakeMap(dstType))
+ }
+ // shallow-merge overwriting dst keys with src keys
+ for _, mapKey := range src.MapKeys() {
+ dst.SetMapIndex(mapKey, src.MapIndex(mapKey))
+ }
+ }
+
+ case reflect.Slice:
+ if dst.CanSet() && src.Len() > 0 {
+ // overwrite dst with non-empty src slice
+ dst.Set(src)
+ }
+
+ case reflect.Pointer:
+ if dst.CanSet() && !src.IsZero() {
+ // overwrite dst with non-zero values for other types
+ if dstType.Elem().Kind() == reflect.Struct {
+ // use struct pointer as-is
+ dst.Set(src)
+ } else {
+ // shallow-copy non-struct pointer (interfaces, primitives, etc)
+ dst.Set(reflect.New(dstType.Elem()))
+ dst.Elem().Set(src.Elem())
+ }
+ }
+
+ default:
+ if dst.CanSet() && !src.IsZero() {
+ // overwrite dst with non-zero values for other types
+ dst.Set(src)
+ }
+ }
+
+ return nil
+}
+
+// hasExportedField returns true if the given type has any exported fields,
+// or if it has any anonymous/embedded struct fields with exported fields
+func hasExportedField(dstType reflect.Type) bool {
+ for i, n := 0, dstType.NumField(); i < n; i++ {
+ field := dstType.Field(i)
+ if field.Anonymous && field.Type.Kind() == reflect.Struct {
+ if hasExportedField(dstType.Field(i).Type) {
+ return true
+ }
+ } else if len(field.PkgPath) == 0 {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/k8s.io/client-go/transport/cache.go b/vendor/k8s.io/client-go/transport/cache.go
index 7c7f1b330f..b8dd866119 100644
--- a/vendor/k8s.io/client-go/transport/cache.go
+++ b/vendor/k8s.io/client-go/transport/cache.go
@@ -28,6 +28,7 @@ import (
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/metrics"
+ "k8s.io/klog/v2"
)
// TlsTransportCache caches TLS http.RoundTrippers different configurations. The
@@ -116,10 +117,13 @@ func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) {
// If we use are reloading files, we need to handle certificate rotation properly
// TODO(jackkleeman): We can also add rotation here when config.HasCertCallback() is true
if config.TLS.ReloadTLSFiles && tlsConfig != nil && tlsConfig.GetClientCertificate != nil {
- dynamicCertDialer := certRotatingDialer(tlsConfig.GetClientCertificate, dial)
+ // The TLS cache is a singleton, so sharing the same name for all of its
+ // background activity seems okay.
+ logger := klog.Background().WithName("tls-transport-cache")
+ dynamicCertDialer := certRotatingDialer(logger, tlsConfig.GetClientCertificate, dial)
tlsConfig.GetClientCertificate = dynamicCertDialer.GetClientCertificate
dial = dynamicCertDialer.connDialer.DialContext
- go dynamicCertDialer.Run(DialerStopCh)
+ go dynamicCertDialer.run(DialerStopCh)
}
proxy := http.ProxyFromEnvironment
diff --git a/vendor/k8s.io/client-go/transport/cache_go118.go b/vendor/k8s.io/client-go/transport/cache_go118.go
index d21d5137d4..babdaf8b5a 100644
--- a/vendor/k8s.io/client-go/transport/cache_go118.go
+++ b/vendor/k8s.io/client-go/transport/cache_go118.go
@@ -18,7 +18,29 @@ limitations under the License.
package transport
+// this is just to make the "unused" linter rule happy
+var _ = isCacheKeyComparable[tlsCacheKey]
+
// assert at compile time that tlsCacheKey is comparable in a way that will never panic at runtime.
-var _ = isComparable[tlsCacheKey]
+//
+// Golang 1.20 introduced an exception to type constraints that allows comparable, but not
+// necessarily strictly comparable type arguments to satisfy the `comparable` type constraint,
+// thus allowing interfaces to fulfil the `comparable` constraint.
+// However, by definition, "A comparison of two interface values with identical
+// dynamic types causes a run-time panic if that type is not comparable".
+//
+// We want to make sure that comparing two `tlsCacheKey` elements won't cause a
+// runtime panic. In order to do that, we'll force the `tlsCacheKey` to be strictly
+// comparable, thus making it impossible for it to contain interfaces.
+// To assert strict comparability, we'll use another definition: "Type
+// parameters are comparable if they are strictly comparable".
+// Below, we first construct a type parameter from the `tlsCacheKey` type so that
+// we can then push this type parameter to a comparable check, thus checking these
+// are strictly comparable.
+//
+// Original suggestion from https://github.com/golang/go/issues/56548#issuecomment-1317673963
+func isCacheKeyComparable[K tlsCacheKey]() {
+ _ = isComparable[K]
+}
func isComparable[T comparable]() {}
diff --git a/vendor/k8s.io/client-go/transport/cert_rotation.go b/vendor/k8s.io/client-go/transport/cert_rotation.go
index dc22b6ec4c..e343f42be4 100644
--- a/vendor/k8s.io/client-go/transport/cert_rotation.go
+++ b/vendor/k8s.io/client-go/transport/cert_rotation.go
@@ -19,7 +19,6 @@ package transport
import (
"bytes"
"crypto/tls"
- "fmt"
"reflect"
"sync"
"time"
@@ -40,6 +39,7 @@ var CertCallbackRefreshDuration = 5 * time.Minute
type reloadFunc func(*tls.CertificateRequestInfo) (*tls.Certificate, error)
type dynamicClientCert struct {
+ logger klog.Logger
clientCert *tls.Certificate
certMtx sync.RWMutex
@@ -47,14 +47,18 @@ type dynamicClientCert struct {
connDialer *connrotation.Dialer
// queue only ever has one item, but it has nice error handling backoff/retry semantics
- queue workqueue.RateLimitingInterface
+ queue workqueue.TypedRateLimitingInterface[string]
}
-func certRotatingDialer(reload reloadFunc, dial utilnet.DialFunc) *dynamicClientCert {
+func certRotatingDialer(logger klog.Logger, reload reloadFunc, dial utilnet.DialFunc) *dynamicClientCert {
d := &dynamicClientCert{
+ logger: logger,
reload: reload,
connDialer: connrotation.NewDialer(connrotation.DialFunc(dial)),
- queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "DynamicClientCertificate"),
+ queue: workqueue.NewTypedRateLimitingQueueWithConfig(
+ workqueue.DefaultTypedControllerRateLimiter[string](),
+ workqueue.TypedRateLimitingQueueConfig[string]{Name: "DynamicClientCertificate"},
+ ),
}
return d
@@ -85,7 +89,7 @@ func (c *dynamicClientCert) loadClientCert() (*tls.Certificate, error) {
return cert, nil
}
- klog.V(1).Infof("certificate rotation detected, shutting down client connections to start using new credentials")
+ c.logger.V(1).Info("Certificate rotation detected, shutting down client connections to start using new credentials")
c.connDialer.CloseAll()
return cert, nil
@@ -130,12 +134,12 @@ func byteMatrixEqual(left, right [][]byte) bool {
}
// run starts the controller and blocks until stopCh is closed.
-func (c *dynamicClientCert) Run(stopCh <-chan struct{}) {
- defer utilruntime.HandleCrash()
+func (c *dynamicClientCert) run(stopCh <-chan struct{}) {
+ defer utilruntime.HandleCrashWithLogger(c.logger)
defer c.queue.ShutDown()
- klog.V(3).Infof("Starting client certificate rotation controller")
- defer klog.V(3).Infof("Shutting down client certificate rotation controller")
+ c.logger.V(3).Info("Starting client certificate rotation controller")
+ defer c.logger.V(3).Info("Shutting down client certificate rotation controller")
go wait.Until(c.runWorker, time.Second, stopCh)
@@ -165,7 +169,7 @@ func (c *dynamicClientCert) processNextWorkItem() bool {
return true
}
- utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err))
+ utilruntime.HandleErrorWithLogger(c.logger, err, "Loading client cert failed", "key", dsKey)
c.queue.AddRateLimited(dsKey)
return true
diff --git a/vendor/k8s.io/client-go/transport/round_trippers.go b/vendor/k8s.io/client-go/transport/round_trippers.go
index e2d1dcc9a9..39fcebd941 100644
--- a/vendor/k8s.io/client-go/transport/round_trippers.go
+++ b/vendor/k8s.io/client-go/transport/round_trippers.go
@@ -21,10 +21,12 @@ import (
"fmt"
"net/http"
"net/http/httptrace"
+ "sort"
"strings"
"sync"
"time"
+ "github.com/go-logr/logr"
"golang.org/x/oauth2"
utilnet "k8s.io/apimachinery/pkg/util/net"
@@ -68,24 +70,22 @@ func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTrip
return rt, nil
}
-// DebugWrappers wraps a round tripper and logs based on the current log level.
+// DebugWrappers potentially wraps a round tripper with a wrapper that logs
+// based on the log level in the context of each individual request.
+//
+// At the moment, wrapping depends on the global log verbosity and is done
+// if that verbosity is >= 6. This may change in the future.
func DebugWrappers(rt http.RoundTripper) http.RoundTripper {
- switch {
- case bool(klog.V(9).Enabled()):
- rt = NewDebuggingRoundTripper(rt, DebugCurlCommand, DebugURLTiming, DebugDetailedTiming, DebugResponseHeaders)
- case bool(klog.V(8).Enabled()):
- rt = NewDebuggingRoundTripper(rt, DebugJustURL, DebugRequestHeaders, DebugResponseStatus, DebugResponseHeaders)
- case bool(klog.V(7).Enabled()):
- rt = NewDebuggingRoundTripper(rt, DebugJustURL, DebugRequestHeaders, DebugResponseStatus)
- case bool(klog.V(6).Enabled()):
- rt = NewDebuggingRoundTripper(rt, DebugURLTiming)
+ //nolint:logcheck // The actual logging is done with a different logger, so only checking here is okay.
+ if klog.V(6).Enabled() {
+ rt = NewDebuggingRoundTripper(rt, DebugByContext)
}
-
return rt
}
type authProxyRoundTripper struct {
username string
+ uid string
groups []string
extra map[string][]string
@@ -98,15 +98,17 @@ var _ utilnet.RoundTripperWrapper = &authProxyRoundTripper{}
// authentication terminating proxy cases
// assuming you pull the user from the context:
// username is the user.Info.GetName() of the user
+// uid is the user.Info.GetUID() of the user
// groups is the user.Info.GetGroups() of the user
// extra is the user.Info.GetExtra() of the user
// extra can contain any additional information that the authenticator
// thought was interesting, for example authorization scopes.
// In order to faithfully round-trip through an impersonation flow, these keys
// MUST be lowercase.
-func NewAuthProxyRoundTripper(username string, groups []string, extra map[string][]string, rt http.RoundTripper) http.RoundTripper {
+func NewAuthProxyRoundTripper(username, uid string, groups []string, extra map[string][]string, rt http.RoundTripper) http.RoundTripper {
return &authProxyRoundTripper{
username: username,
+ uid: uid,
groups: groups,
extra: extra,
rt: rt,
@@ -115,14 +117,15 @@ func NewAuthProxyRoundTripper(username string, groups []string, extra map[string
func (rt *authProxyRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
req = utilnet.CloneRequest(req)
- SetAuthProxyHeaders(req, rt.username, rt.groups, rt.extra)
+ SetAuthProxyHeaders(req, rt.username, rt.uid, rt.groups, rt.extra)
return rt.rt.RoundTrip(req)
}
// SetAuthProxyHeaders stomps the auth proxy header fields. It mutates its argument.
-func SetAuthProxyHeaders(req *http.Request, username string, groups []string, extra map[string][]string) {
+func SetAuthProxyHeaders(req *http.Request, username, uid string, groups []string, extra map[string][]string) {
req.Header.Del("X-Remote-User")
+ req.Header.Del("X-Remote-Uid")
req.Header.Del("X-Remote-Group")
for key := range req.Header {
if strings.HasPrefix(strings.ToLower(key), strings.ToLower("X-Remote-Extra-")) {
@@ -131,6 +134,9 @@ func SetAuthProxyHeaders(req *http.Request, username string, groups []string, ex
}
req.Header.Set("X-Remote-User", username)
+ if len(uid) > 0 {
+ req.Header.Set("X-Remote-Uid", uid)
+ }
for _, group := range groups {
req.Header.Add("X-Remote-Group", group)
}
@@ -373,14 +379,17 @@ func (r *requestInfo) toCurl() string {
}
}
- return fmt.Sprintf("curl -v -X%s %s '%s'", r.RequestVerb, headers, r.RequestURL)
+ // Newline at the end makes this look better in the text log output (the
+ // only usage of this method) because it becomes a multi-line string with
+ // no quoting.
+ return fmt.Sprintf("curl -v -X%s %s '%s'\n", r.RequestVerb, headers, r.RequestURL)
}
// debuggingRoundTripper will display information about the requests passing
// through it based on what is configured
type debuggingRoundTripper struct {
delegatedRoundTripper http.RoundTripper
- levels map[DebugLevel]bool
+ levels int
}
var _ utilnet.RoundTripperWrapper = &debuggingRoundTripper{}
@@ -405,6 +414,26 @@ const (
DebugResponseHeaders
// DebugDetailedTiming will add to the debug output the duration of the HTTP requests events.
DebugDetailedTiming
+ // DebugByContext will add any of the above depending on the verbosity of the per-request logger obtained from the requests context.
+ //
+ // Can be combined in NewDebuggingRoundTripper with some of the other options, in which case the
+ // debug roundtripper will always log what is requested there plus the information that gets
+ // enabled by the context's log verbosity.
+ DebugByContext
+)
+
+// Different log levels include different sets of information.
+//
+// Not exported because the exact content of log messages is not part
+// of of the package API.
+const (
+ levelsV6 = (1 << DebugURLTiming)
+ // Logging *less* information for the response at level 7 compared to 6 replicates prior behavior:
+ // https://github.com/kubernetes/kubernetes/blob/2b472fe4690c83a2b343995f88050b2a3e9ff0fa/staging/src/k8s.io/client-go/transport/round_trippers.go#L79
+ // Presumably that was done because verb and URL are already in the request log entry.
+ levelsV7 = (1 << DebugJustURL) | (1 << DebugRequestHeaders) | (1 << DebugResponseStatus)
+ levelsV8 = (1 << DebugJustURL) | (1 << DebugRequestHeaders) | (1 << DebugResponseStatus) | (1 << DebugResponseHeaders)
+ levelsV9 = (1 << DebugCurlCommand) | (1 << DebugURLTiming) | (1 << DebugDetailedTiming) | (1 << DebugResponseHeaders)
)
// NewDebuggingRoundTripper allows to display in the logs output debug information
@@ -412,10 +441,9 @@ const (
func NewDebuggingRoundTripper(rt http.RoundTripper, levels ...DebugLevel) http.RoundTripper {
drt := &debuggingRoundTripper{
delegatedRoundTripper: rt,
- levels: make(map[DebugLevel]bool, len(levels)),
}
for _, v := range levels {
- drt.levels[v] = true
+ drt.levels |= 1 << v
}
return drt
}
@@ -457,27 +485,51 @@ func maskValue(key string, value string) string {
}
func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+ logger := klog.FromContext(req.Context())
+ levels := rt.levels
+
+ // When logging depends on the context, it uses the verbosity of the per-context logger
+ // and a hard-coded mapping of verbosity to debug details. Otherwise all messages
+ // are logged as V(0).
+ if levels&(1< 0 {
+ logger.Info("Request", kvs...)
}
startTime := time.Now()
- if rt.levels[DebugDetailedTiming] {
+ if levels&(1< 0 {
+ logger.Info("Response", kvs...)
+ }
+
+ return response, err
+}
- if rt.levels[DebugResponseStatus] {
- klog.Infof("Response Status: %s in %d milliseconds", reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond))
+// headerMap formats headers sorted and across multiple lines with no quoting
+// when using string output and as JSON when using zapr.
+type headersMap http.Header
+
+// newHeadersMap masks all sensitive values. This has to be done before
+// passing the map to a logger because while in practice all loggers
+// either use String or MarshalLog, that is not guaranteed.
+func newHeadersMap(header http.Header) headersMap {
+ h := make(headersMap, len(header))
+ for key, values := range header {
+ maskedValues := make([]string, 0, len(values))
+ for _, value := range values {
+ maskedValues = append(maskedValues, maskValue(key, value))
+ }
+ h[key] = maskedValues
}
- if rt.levels[DebugResponseHeaders] {
- klog.Info("Response Headers:")
- for key, values := range reqInfo.ResponseHeaders {
- for _, value := range values {
- klog.Infof(" %s: %s", key, value)
- }
+ return h
+}
+
+var _ fmt.Stringer = headersMap{}
+var _ logr.Marshaler = headersMap{}
+
+func (h headersMap) String() string {
+ // The fixed size typically avoids memory allocations when it is large enough.
+ keys := make([]string, 0, 20)
+ for key := range h {
+ keys = append(keys, key)
+ }
+ sort.Strings(keys)
+ var buffer strings.Builder
+ for _, key := range keys {
+ for _, value := range h[key] {
+ _, _ = buffer.WriteString(key)
+ _, _ = buffer.WriteString(": ")
+ _, _ = buffer.WriteString(value)
+ _, _ = buffer.WriteString("\n")
}
}
+ return buffer.String()
+}
- return response, err
+func (h headersMap) MarshalLog() any {
+ return map[string][]string(h)
}
func (rt *debuggingRoundTripper) WrappedRoundTripper() http.RoundTripper {
diff --git a/vendor/k8s.io/client-go/transport/token_source.go b/vendor/k8s.io/client-go/transport/token_source.go
index 8e312800f7..469dd81765 100644
--- a/vendor/k8s.io/client-go/transport/token_source.go
+++ b/vendor/k8s.io/client-go/transport/token_source.go
@@ -182,7 +182,10 @@ func (ts *cachingTokenSource) Token() (*oauth2.Token, error) {
if ts.tok == nil {
return nil, err
}
- klog.Errorf("Unable to rotate token: %v", err)
+ // Not using a caller-provided logger isn't ideal, but impossible to fix
+ // without new APIs that go up all the way to HTTPWrappersForConfig.
+ // This is currently deemed not worth changing (too much effort, not enough benefit).
+ klog.TODO().Error(err, "Unable to rotate token")
return ts.tok, nil
}
diff --git a/vendor/k8s.io/client-go/transport/transport.go b/vendor/k8s.io/client-go/transport/transport.go
index 4770331a0e..8fdcc5700b 100644
--- a/vendor/k8s.io/client-go/transport/transport.go
+++ b/vendor/k8s.io/client-go/transport/transport.go
@@ -353,7 +353,7 @@ func tryCancelRequest(rt http.RoundTripper, req *http.Request) {
case utilnet.RoundTripperWrapper:
tryCancelRequest(rt.WrappedRoundTripper(), req)
default:
- klog.Warningf("Unable to cancel request for %T", rt)
+ klog.FromContext(req.Context()).Info("Warning: unable to cancel request", "roundTripperType", fmt.Sprintf("%T", rt))
}
}
diff --git a/vendor/k8s.io/client-go/util/cert/cert.go b/vendor/k8s.io/client-go/util/cert/cert.go
index 91e171271a..1220461264 100644
--- a/vendor/k8s.io/client-go/util/cert/cert.go
+++ b/vendor/k8s.io/client-go/util/cert/cert.go
@@ -90,11 +90,37 @@ func NewSelfSignedCACert(cfg Config, key crypto.Signer) (*x509.Certificate, erro
return x509.ParseCertificate(certDERBytes)
}
+// SelfSignedCertKeyOptions contains configuration parameters for generating self-signed certificates.
+type SelfSignedCertKeyOptions struct {
+ // Host is required, and identifies the host of the serving certificate. Can be a DNS name or IP address.
+ Host string
+ // AlternateIPs is optional, and identifies additional IPs the serving certificate should be valid for.
+ AlternateIPs []net.IP
+ // AlternateDNS is optional, and identifies additional DNS names the serving certificate should be valid for.
+ AlternateDNS []string
+
+ // MaxAge controls the duration of the issued certificate.
+ // Defaults to 1 year if unset.
+ // Ignored if FixtureDirectory is set.
+ MaxAge time.Duration
+
+ // FixtureDirectory is intended for use in tests.
+ // If non-empty, it is a directory path which can contain pre-generated certs. The format is:
+ // _-_-.crt
+ // _-_-.key
+ // Certs/keys not existing in that directory are created with a duration of 100 years.
+ FixtureDirectory string
+}
+
// GenerateSelfSignedCertKey creates a self-signed certificate and key for the given host.
// Host may be an IP or a DNS name
// You may also specify additional subject alt names (either ip or dns names) for the certificate.
func GenerateSelfSignedCertKey(host string, alternateIPs []net.IP, alternateDNS []string) ([]byte, []byte, error) {
- return GenerateSelfSignedCertKeyWithFixtures(host, alternateIPs, alternateDNS, "")
+ return GenerateSelfSignedCertKeyWithOptions(SelfSignedCertKeyOptions{
+ Host: host,
+ AlternateIPs: alternateIPs,
+ AlternateDNS: alternateDNS,
+ })
}
// GenerateSelfSignedCertKeyWithFixtures creates a self-signed certificate and key for the given host.
@@ -106,8 +132,26 @@ func GenerateSelfSignedCertKey(host string, alternateIPs []net.IP, alternateDNS
// _-_-.key
// Certs/keys not existing in that directory are created.
func GenerateSelfSignedCertKeyWithFixtures(host string, alternateIPs []net.IP, alternateDNS []string, fixtureDirectory string) ([]byte, []byte, error) {
+ return GenerateSelfSignedCertKeyWithOptions(SelfSignedCertKeyOptions{
+ Host: host,
+ AlternateIPs: alternateIPs,
+ AlternateDNS: alternateDNS,
+ FixtureDirectory: fixtureDirectory,
+ })
+}
+
+// GenerateSelfSignedCertKeyWithOptions generates a self-signed certificate and key based on the provided options.
+func GenerateSelfSignedCertKeyWithOptions(opts SelfSignedCertKeyOptions) ([]byte, []byte, error) {
+ host := opts.Host
+ alternateIPs := opts.AlternateIPs
+ alternateDNS := opts.AlternateDNS
+ fixtureDirectory := opts.FixtureDirectory
+ maxAge := opts.MaxAge
+ if maxAge == 0 {
+ maxAge = 365 * 24 * time.Hour
+ }
+
validFrom := time.Now().Add(-time.Hour) // valid an hour earlier to avoid flakes due to clock skew
- maxAge := time.Hour * 24 * 365 // one year self-signed certs
baseName := fmt.Sprintf("%s_%s_%s", host, strings.Join(ipsToStrings(alternateIPs), "-"), strings.Join(alternateDNS, "-"))
certFixturePath := filepath.Join(fixtureDirectory, baseName+".crt")
diff --git a/vendor/k8s.io/client-go/util/flowcontrol/backoff.go b/vendor/k8s.io/client-go/util/flowcontrol/backoff.go
index 82e4c4c408..7cb46717c2 100644
--- a/vendor/k8s.io/client-go/util/flowcontrol/backoff.go
+++ b/vendor/k8s.io/client-go/util/flowcontrol/backoff.go
@@ -22,7 +22,6 @@ import (
"time"
"k8s.io/utils/clock"
- testingclock "k8s.io/utils/clock/testing"
)
type backoffEntry struct {
@@ -32,7 +31,12 @@ type backoffEntry struct {
type Backoff struct {
sync.RWMutex
- Clock clock.Clock
+ Clock clock.Clock
+ // HasExpiredFunc controls the logic that determines whether the backoff
+ // counter should be reset, and when to GC old backoff entries. If nil, the
+ // default hasExpired function will restart the backoff factor to the
+ // beginning after observing time has passed at least equal to 2*maxDuration
+ HasExpiredFunc func(eventTime time.Time, lastUpdate time.Time, maxDuration time.Duration) bool
defaultDuration time.Duration
maxDuration time.Duration
perItemBackoff map[string]*backoffEntry
@@ -44,7 +48,7 @@ type Backoff struct {
maxJitterFactor float64
}
-func NewFakeBackOff(initial, max time.Duration, tc *testingclock.FakeClock) *Backoff {
+func NewFakeBackOff(initial, max time.Duration, tc clock.Clock) *Backoff {
return newBackoff(tc, initial, max, 0.0)
}
@@ -52,7 +56,7 @@ func NewBackOff(initial, max time.Duration) *Backoff {
return NewBackOffWithJitter(initial, max, 0.0)
}
-func NewFakeBackOffWithJitter(initial, max time.Duration, tc *testingclock.FakeClock, maxJitterFactor float64) *Backoff {
+func NewFakeBackOffWithJitter(initial, max time.Duration, tc clock.Clock, maxJitterFactor float64) *Backoff {
return newBackoff(tc, initial, max, maxJitterFactor)
}
@@ -93,7 +97,7 @@ func (p *Backoff) Next(id string, eventTime time.Time) {
p.Lock()
defer p.Unlock()
entry, ok := p.perItemBackoff[id]
- if !ok || hasExpired(eventTime, entry.lastUpdate, p.maxDuration) {
+ if !ok || p.hasExpired(eventTime, entry.lastUpdate, p.maxDuration) {
entry = p.initEntryUnsafe(id)
entry.backoff += p.jitter(entry.backoff)
} else {
@@ -119,7 +123,7 @@ func (p *Backoff) IsInBackOffSince(id string, eventTime time.Time) bool {
if !ok {
return false
}
- if hasExpired(eventTime, entry.lastUpdate, p.maxDuration) {
+ if p.hasExpired(eventTime, entry.lastUpdate, p.maxDuration) {
return false
}
return p.Clock.Since(eventTime) < entry.backoff
@@ -133,21 +137,21 @@ func (p *Backoff) IsInBackOffSinceUpdate(id string, eventTime time.Time) bool {
if !ok {
return false
}
- if hasExpired(eventTime, entry.lastUpdate, p.maxDuration) {
+ if p.hasExpired(eventTime, entry.lastUpdate, p.maxDuration) {
return false
}
return eventTime.Sub(entry.lastUpdate) < entry.backoff
}
-// Garbage collect records that have aged past maxDuration. Backoff users are expected
-// to invoke this periodically.
+// Garbage collect records that have aged past their expiration, which defaults
+// to 2*maxDuration (see hasExpired godoc). Backoff users are expected to invoke
+// this periodically.
func (p *Backoff) GC() {
p.Lock()
defer p.Unlock()
now := p.Clock.Now()
for id, entry := range p.perItemBackoff {
- if now.Sub(entry.lastUpdate) > p.maxDuration*2 {
- // GC when entry has not been updated for 2*maxDuration
+ if p.hasExpired(now, entry.lastUpdate, p.maxDuration) {
delete(p.perItemBackoff, id)
}
}
@@ -174,7 +178,10 @@ func (p *Backoff) jitter(delay time.Duration) time.Duration {
return time.Duration(p.rand.Float64() * p.maxJitterFactor * float64(delay))
}
-// After 2*maxDuration we restart the backoff factor to the beginning
-func hasExpired(eventTime time.Time, lastUpdate time.Time, maxDuration time.Duration) bool {
+// Unless an alternate function is provided, after 2*maxDuration we restart the backoff factor to the beginning
+func (p *Backoff) hasExpired(eventTime time.Time, lastUpdate time.Time, maxDuration time.Duration) bool {
+ if p.HasExpiredFunc != nil {
+ return p.HasExpiredFunc(eventTime, lastUpdate, maxDuration)
+ }
return eventTime.Sub(lastUpdate) > maxDuration*2 // consider stable if it's ok for twice the maxDuration
}
diff --git a/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go b/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go
index efda7c197f..1f9567881c 100644
--- a/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go
+++ b/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go
@@ -24,49 +24,66 @@ import (
"golang.org/x/time/rate"
)
-type RateLimiter interface {
+// Deprecated: RateLimiter is deprecated, use TypedRateLimiter instead.
+type RateLimiter TypedRateLimiter[any]
+
+type TypedRateLimiter[T comparable] interface {
// When gets an item and gets to decide how long that item should wait
- When(item interface{}) time.Duration
+ When(item T) time.Duration
// Forget indicates that an item is finished being retried. Doesn't matter whether it's for failing
// or for success, we'll stop tracking it
- Forget(item interface{})
+ Forget(item T)
// NumRequeues returns back how many failures the item has had
- NumRequeues(item interface{}) int
+ NumRequeues(item T) int
}
// DefaultControllerRateLimiter is a no-arg constructor for a default rate limiter for a workqueue. It has
// both overall and per-item rate limiting. The overall is a token bucket and the per-item is exponential
+//
+// Deprecated: Use DefaultTypedControllerRateLimiter instead.
func DefaultControllerRateLimiter() RateLimiter {
- return NewMaxOfRateLimiter(
- NewItemExponentialFailureRateLimiter(5*time.Millisecond, 1000*time.Second),
+ return DefaultTypedControllerRateLimiter[any]()
+}
+
+// DefaultTypedControllerRateLimiter is a no-arg constructor for a default rate limiter for a workqueue. It has
+// both overall and per-item rate limiting. The overall is a token bucket and the per-item is exponential
+func DefaultTypedControllerRateLimiter[T comparable]() TypedRateLimiter[T] {
+ return NewTypedMaxOfRateLimiter(
+ NewTypedItemExponentialFailureRateLimiter[T](5*time.Millisecond, 1000*time.Second),
// 10 qps, 100 bucket size. This is only for retry speed and its only the overall factor (not per item)
- &BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)},
+ &TypedBucketRateLimiter[T]{Limiter: rate.NewLimiter(rate.Limit(10), 100)},
)
}
-// BucketRateLimiter adapts a standard bucket to the workqueue ratelimiter API
-type BucketRateLimiter struct {
+// Deprecated: BucketRateLimiter is deprecated, use TypedBucketRateLimiter instead.
+type BucketRateLimiter = TypedBucketRateLimiter[any]
+
+// TypedBucketRateLimiter adapts a standard bucket to the workqueue ratelimiter API
+type TypedBucketRateLimiter[T comparable] struct {
*rate.Limiter
}
var _ RateLimiter = &BucketRateLimiter{}
-func (r *BucketRateLimiter) When(item interface{}) time.Duration {
+func (r *TypedBucketRateLimiter[T]) When(item T) time.Duration {
return r.Limiter.Reserve().Delay()
}
-func (r *BucketRateLimiter) NumRequeues(item interface{}) int {
+func (r *TypedBucketRateLimiter[T]) NumRequeues(item T) int {
return 0
}
-func (r *BucketRateLimiter) Forget(item interface{}) {
+func (r *TypedBucketRateLimiter[T]) Forget(item T) {
}
-// ItemExponentialFailureRateLimiter does a simple baseDelay*2^ limit
+// Deprecated: ItemExponentialFailureRateLimiter is deprecated, use TypedItemExponentialFailureRateLimiter instead.
+type ItemExponentialFailureRateLimiter = TypedItemExponentialFailureRateLimiter[any]
+
+// TypedItemExponentialFailureRateLimiter does a simple baseDelay*2^ limit
// dealing with max failures and expiration are up to the caller
-type ItemExponentialFailureRateLimiter struct {
+type TypedItemExponentialFailureRateLimiter[T comparable] struct {
failuresLock sync.Mutex
- failures map[interface{}]int
+ failures map[T]int
baseDelay time.Duration
maxDelay time.Duration
@@ -74,19 +91,29 @@ type ItemExponentialFailureRateLimiter struct {
var _ RateLimiter = &ItemExponentialFailureRateLimiter{}
+// Deprecated: NewItemExponentialFailureRateLimiter is deprecated, use NewTypedItemExponentialFailureRateLimiter instead.
func NewItemExponentialFailureRateLimiter(baseDelay time.Duration, maxDelay time.Duration) RateLimiter {
- return &ItemExponentialFailureRateLimiter{
- failures: map[interface{}]int{},
+ return NewTypedItemExponentialFailureRateLimiter[any](baseDelay, maxDelay)
+}
+
+func NewTypedItemExponentialFailureRateLimiter[T comparable](baseDelay time.Duration, maxDelay time.Duration) TypedRateLimiter[T] {
+ return &TypedItemExponentialFailureRateLimiter[T]{
+ failures: map[T]int{},
baseDelay: baseDelay,
maxDelay: maxDelay,
}
}
+// Deprecated: DefaultItemBasedRateLimiter is deprecated, use DefaultTypedItemBasedRateLimiter instead.
func DefaultItemBasedRateLimiter() RateLimiter {
- return NewItemExponentialFailureRateLimiter(time.Millisecond, 1000*time.Second)
+ return DefaultTypedItemBasedRateLimiter[any]()
}
-func (r *ItemExponentialFailureRateLimiter) When(item interface{}) time.Duration {
+func DefaultTypedItemBasedRateLimiter[T comparable]() TypedRateLimiter[T] {
+ return NewTypedItemExponentialFailureRateLimiter[T](time.Millisecond, 1000*time.Second)
+}
+
+func (r *TypedItemExponentialFailureRateLimiter[T]) When(item T) time.Duration {
r.failuresLock.Lock()
defer r.failuresLock.Unlock()
@@ -107,14 +134,14 @@ func (r *ItemExponentialFailureRateLimiter) When(item interface{}) time.Duration
return calculated
}
-func (r *ItemExponentialFailureRateLimiter) NumRequeues(item interface{}) int {
+func (r *TypedItemExponentialFailureRateLimiter[T]) NumRequeues(item T) int {
r.failuresLock.Lock()
defer r.failuresLock.Unlock()
return r.failures[item]
}
-func (r *ItemExponentialFailureRateLimiter) Forget(item interface{}) {
+func (r *TypedItemExponentialFailureRateLimiter[T]) Forget(item T) {
r.failuresLock.Lock()
defer r.failuresLock.Unlock()
@@ -122,9 +149,13 @@ func (r *ItemExponentialFailureRateLimiter) Forget(item interface{}) {
}
// ItemFastSlowRateLimiter does a quick retry for a certain number of attempts, then a slow retry after that
-type ItemFastSlowRateLimiter struct {
+// Deprecated: Use TypedItemFastSlowRateLimiter instead.
+type ItemFastSlowRateLimiter = TypedItemFastSlowRateLimiter[any]
+
+// TypedItemFastSlowRateLimiter does a quick retry for a certain number of attempts, then a slow retry after that
+type TypedItemFastSlowRateLimiter[T comparable] struct {
failuresLock sync.Mutex
- failures map[interface{}]int
+ failures map[T]int
maxFastAttempts int
fastDelay time.Duration
@@ -133,16 +164,21 @@ type ItemFastSlowRateLimiter struct {
var _ RateLimiter = &ItemFastSlowRateLimiter{}
+// Deprecated: NewItemFastSlowRateLimiter is deprecated, use NewTypedItemFastSlowRateLimiter instead.
func NewItemFastSlowRateLimiter(fastDelay, slowDelay time.Duration, maxFastAttempts int) RateLimiter {
- return &ItemFastSlowRateLimiter{
- failures: map[interface{}]int{},
+ return NewTypedItemFastSlowRateLimiter[any](fastDelay, slowDelay, maxFastAttempts)
+}
+
+func NewTypedItemFastSlowRateLimiter[T comparable](fastDelay, slowDelay time.Duration, maxFastAttempts int) TypedRateLimiter[T] {
+ return &TypedItemFastSlowRateLimiter[T]{
+ failures: map[T]int{},
fastDelay: fastDelay,
slowDelay: slowDelay,
maxFastAttempts: maxFastAttempts,
}
}
-func (r *ItemFastSlowRateLimiter) When(item interface{}) time.Duration {
+func (r *TypedItemFastSlowRateLimiter[T]) When(item T) time.Duration {
r.failuresLock.Lock()
defer r.failuresLock.Unlock()
@@ -155,14 +191,14 @@ func (r *ItemFastSlowRateLimiter) When(item interface{}) time.Duration {
return r.slowDelay
}
-func (r *ItemFastSlowRateLimiter) NumRequeues(item interface{}) int {
+func (r *TypedItemFastSlowRateLimiter[T]) NumRequeues(item T) int {
r.failuresLock.Lock()
defer r.failuresLock.Unlock()
return r.failures[item]
}
-func (r *ItemFastSlowRateLimiter) Forget(item interface{}) {
+func (r *TypedItemFastSlowRateLimiter[T]) Forget(item T) {
r.failuresLock.Lock()
defer r.failuresLock.Unlock()
@@ -172,11 +208,18 @@ func (r *ItemFastSlowRateLimiter) Forget(item interface{}) {
// MaxOfRateLimiter calls every RateLimiter and returns the worst case response
// When used with a token bucket limiter, the burst could be apparently exceeded in cases where particular items
// were separately delayed a longer time.
-type MaxOfRateLimiter struct {
- limiters []RateLimiter
+//
+// Deprecated: Use TypedMaxOfRateLimiter instead.
+type MaxOfRateLimiter = TypedMaxOfRateLimiter[any]
+
+// TypedMaxOfRateLimiter calls every RateLimiter and returns the worst case response
+// When used with a token bucket limiter, the burst could be apparently exceeded in cases where particular items
+// were separately delayed a longer time.
+type TypedMaxOfRateLimiter[T comparable] struct {
+ limiters []TypedRateLimiter[T]
}
-func (r *MaxOfRateLimiter) When(item interface{}) time.Duration {
+func (r *TypedMaxOfRateLimiter[T]) When(item T) time.Duration {
ret := time.Duration(0)
for _, limiter := range r.limiters {
curr := limiter.When(item)
@@ -188,11 +231,16 @@ func (r *MaxOfRateLimiter) When(item interface{}) time.Duration {
return ret
}
-func NewMaxOfRateLimiter(limiters ...RateLimiter) RateLimiter {
- return &MaxOfRateLimiter{limiters: limiters}
+// Deprecated: NewMaxOfRateLimiter is deprecated, use NewTypedMaxOfRateLimiter instead.
+func NewMaxOfRateLimiter(limiters ...TypedRateLimiter[any]) RateLimiter {
+ return NewTypedMaxOfRateLimiter(limiters...)
}
-func (r *MaxOfRateLimiter) NumRequeues(item interface{}) int {
+func NewTypedMaxOfRateLimiter[T comparable](limiters ...TypedRateLimiter[T]) TypedRateLimiter[T] {
+ return &TypedMaxOfRateLimiter[T]{limiters: limiters}
+}
+
+func (r *TypedMaxOfRateLimiter[T]) NumRequeues(item T) int {
ret := 0
for _, limiter := range r.limiters {
curr := limiter.NumRequeues(item)
@@ -204,23 +252,32 @@ func (r *MaxOfRateLimiter) NumRequeues(item interface{}) int {
return ret
}
-func (r *MaxOfRateLimiter) Forget(item interface{}) {
+func (r *TypedMaxOfRateLimiter[T]) Forget(item T) {
for _, limiter := range r.limiters {
limiter.Forget(item)
}
}
// WithMaxWaitRateLimiter have maxDelay which avoids waiting too long
-type WithMaxWaitRateLimiter struct {
- limiter RateLimiter
+// Deprecated: Use TypedWithMaxWaitRateLimiter instead.
+type WithMaxWaitRateLimiter = TypedWithMaxWaitRateLimiter[any]
+
+// TypedWithMaxWaitRateLimiter have maxDelay which avoids waiting too long
+type TypedWithMaxWaitRateLimiter[T comparable] struct {
+ limiter TypedRateLimiter[T]
maxDelay time.Duration
}
+// Deprecated: NewWithMaxWaitRateLimiter is deprecated, use NewTypedWithMaxWaitRateLimiter instead.
func NewWithMaxWaitRateLimiter(limiter RateLimiter, maxDelay time.Duration) RateLimiter {
- return &WithMaxWaitRateLimiter{limiter: limiter, maxDelay: maxDelay}
+ return NewTypedWithMaxWaitRateLimiter[any](limiter, maxDelay)
+}
+
+func NewTypedWithMaxWaitRateLimiter[T comparable](limiter TypedRateLimiter[T], maxDelay time.Duration) TypedRateLimiter[T] {
+ return &TypedWithMaxWaitRateLimiter[T]{limiter: limiter, maxDelay: maxDelay}
}
-func (w WithMaxWaitRateLimiter) When(item interface{}) time.Duration {
+func (w TypedWithMaxWaitRateLimiter[T]) When(item T) time.Duration {
delay := w.limiter.When(item)
if delay > w.maxDelay {
return w.maxDelay
@@ -229,10 +286,10 @@ func (w WithMaxWaitRateLimiter) When(item interface{}) time.Duration {
return delay
}
-func (w WithMaxWaitRateLimiter) Forget(item interface{}) {
+func (w TypedWithMaxWaitRateLimiter[T]) Forget(item T) {
w.limiter.Forget(item)
}
-func (w WithMaxWaitRateLimiter) NumRequeues(item interface{}) int {
+func (w TypedWithMaxWaitRateLimiter[T]) NumRequeues(item T) int {
return w.limiter.NumRequeues(item)
}
diff --git a/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go
index c1df720302..da444f4fb3 100644
--- a/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go
+++ b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go
@@ -22,19 +22,35 @@ import (
"time"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+ "k8s.io/klog/v2"
"k8s.io/utils/clock"
)
// DelayingInterface is an Interface that can Add an item at a later time. This makes it easier to
// requeue items after failures without ending up in a hot-loop.
-type DelayingInterface interface {
- Interface
+//
+// Deprecated: use TypedDelayingInterface instead.
+type DelayingInterface TypedDelayingInterface[any]
+
+// TypedDelayingInterface is an Interface that can Add an item at a later time. This makes it easier to
+// requeue items after failures without ending up in a hot-loop.
+type TypedDelayingInterface[T comparable] interface {
+ TypedInterface[T]
// AddAfter adds an item to the workqueue after the indicated duration has passed
- AddAfter(item interface{}, duration time.Duration)
+ AddAfter(item T, duration time.Duration)
}
// DelayingQueueConfig specifies optional configurations to customize a DelayingInterface.
-type DelayingQueueConfig struct {
+//
+// Deprecated: use TypedDelayingQueueConfig instead.
+type DelayingQueueConfig = TypedDelayingQueueConfig[any]
+
+// TypedDelayingQueueConfig specifies optional configurations to customize a DelayingInterface.
+type TypedDelayingQueueConfig[T comparable] struct {
+ // An optional logger. The name of the queue does *not* get added to it, this should
+ // be done by the caller if desired.
+ Logger *klog.Logger
+
// Name for the queue. If unnamed, the metrics will not be registered.
Name string
@@ -46,32 +62,60 @@ type DelayingQueueConfig struct {
Clock clock.WithTicker
// Queue optionally allows injecting custom queue Interface instead of the default one.
- Queue Interface
+ Queue TypedInterface[T]
}
// NewDelayingQueue constructs a new workqueue with delayed queuing ability.
// NewDelayingQueue does not emit metrics. For use with a MetricsProvider, please use
// NewDelayingQueueWithConfig instead and specify a name.
+//
+// Deprecated: use NewTypedDelayingQueue instead.
func NewDelayingQueue() DelayingInterface {
return NewDelayingQueueWithConfig(DelayingQueueConfig{})
}
+// NewTypedDelayingQueue constructs a new workqueue with delayed queuing ability.
+// NewTypedDelayingQueue does not emit metrics. For use with a MetricsProvider, please use
+// NewTypedDelayingQueueWithConfig instead and specify a name.
+func NewTypedDelayingQueue[T comparable]() TypedDelayingInterface[T] {
+ return NewTypedDelayingQueueWithConfig(TypedDelayingQueueConfig[T]{})
+}
+
// NewDelayingQueueWithConfig constructs a new workqueue with options to
// customize different properties.
+//
+// Deprecated: use NewTypedDelayingQueueWithConfig instead.
func NewDelayingQueueWithConfig(config DelayingQueueConfig) DelayingInterface {
+ return NewTypedDelayingQueueWithConfig[any](config)
+}
+
+// TypedNewDelayingQueue exists for backwards compatibility only.
+//
+// Deprecated: use NewTypedDelayingQueueWithConfig instead.
+func TypedNewDelayingQueue[T comparable]() TypedDelayingInterface[T] {
+ return NewTypedDelayingQueue[T]()
+}
+
+// NewTypedDelayingQueueWithConfig constructs a new workqueue with options to
+// customize different properties.
+func NewTypedDelayingQueueWithConfig[T comparable](config TypedDelayingQueueConfig[T]) TypedDelayingInterface[T] {
+ logger := klog.Background()
+ if config.Logger != nil {
+ logger = *config.Logger
+ }
if config.Clock == nil {
config.Clock = clock.RealClock{}
}
if config.Queue == nil {
- config.Queue = NewWithConfig(QueueConfig{
+ config.Queue = NewTypedWithConfig[T](TypedQueueConfig[T]{
Name: config.Name,
MetricsProvider: config.MetricsProvider,
Clock: config.Clock,
})
}
- return newDelayingQueue(config.Clock, config.Queue, config.Name, config.MetricsProvider)
+ return newDelayingQueue(logger, config.Clock, config.Queue, config.Name, config.MetricsProvider)
}
// NewDelayingQueueWithCustomQueue constructs a new workqueue with ability to
@@ -100,23 +144,23 @@ func NewDelayingQueueWithCustomClock(clock clock.WithTicker, name string) Delayi
})
}
-func newDelayingQueue(clock clock.WithTicker, q Interface, name string, provider MetricsProvider) *delayingType {
- ret := &delayingType{
- Interface: q,
+func newDelayingQueue[T comparable](logger klog.Logger, clock clock.WithTicker, q TypedInterface[T], name string, provider MetricsProvider) *delayingType[T] {
+ ret := &delayingType[T]{
+ TypedInterface: q,
clock: clock,
heartbeat: clock.NewTicker(maxWait),
stopCh: make(chan struct{}),
- waitingForAddCh: make(chan *waitFor, 1000),
+ waitingForAddCh: make(chan *waitFor[T], 1000),
metrics: newRetryMetrics(name, provider),
}
- go ret.waitingLoop()
+ go ret.waitingLoop(logger)
return ret
}
// delayingType wraps an Interface and provides delayed re-enquing
-type delayingType struct {
- Interface
+type delayingType[T comparable] struct {
+ TypedInterface[T]
// clock tracks time for delayed firing
clock clock.Clock
@@ -130,15 +174,15 @@ type delayingType struct {
heartbeat clock.Ticker
// waitingForAddCh is a buffered channel that feeds waitingForAdd
- waitingForAddCh chan *waitFor
+ waitingForAddCh chan *waitFor[T]
// metrics counts the number of retries
metrics retryMetrics
}
// waitFor holds the data to add and the time it should be added
-type waitFor struct {
- data t
+type waitFor[T any] struct {
+ data T
readyAt time.Time
// index in the priority queue (heap)
index int
@@ -152,15 +196,15 @@ type waitFor struct {
// it has been removed from the queue and placed at index Len()-1 by
// container/heap. Push adds an item at index Len(), and container/heap
// percolates it into the correct location.
-type waitForPriorityQueue []*waitFor
+type waitForPriorityQueue[T any] []*waitFor[T]
-func (pq waitForPriorityQueue) Len() int {
+func (pq waitForPriorityQueue[T]) Len() int {
return len(pq)
}
-func (pq waitForPriorityQueue) Less(i, j int) bool {
+func (pq waitForPriorityQueue[T]) Less(i, j int) bool {
return pq[i].readyAt.Before(pq[j].readyAt)
}
-func (pq waitForPriorityQueue) Swap(i, j int) {
+func (pq waitForPriorityQueue[T]) Swap(i, j int) {
pq[i], pq[j] = pq[j], pq[i]
pq[i].index = i
pq[j].index = j
@@ -168,16 +212,16 @@ func (pq waitForPriorityQueue) Swap(i, j int) {
// Push adds an item to the queue. Push should not be called directly; instead,
// use `heap.Push`.
-func (pq *waitForPriorityQueue) Push(x interface{}) {
+func (pq *waitForPriorityQueue[T]) Push(x interface{}) {
n := len(*pq)
- item := x.(*waitFor)
+ item := x.(*waitFor[T])
item.index = n
*pq = append(*pq, item)
}
// Pop removes an item from the queue. Pop should not be called directly;
// instead, use `heap.Pop`.
-func (pq *waitForPriorityQueue) Pop() interface{} {
+func (pq *waitForPriorityQueue[T]) Pop() interface{} {
n := len(*pq)
item := (*pq)[n-1]
item.index = -1
@@ -187,22 +231,22 @@ func (pq *waitForPriorityQueue) Pop() interface{} {
// Peek returns the item at the beginning of the queue, without removing the
// item or otherwise mutating the queue. It is safe to call directly.
-func (pq waitForPriorityQueue) Peek() interface{} {
+func (pq waitForPriorityQueue[T]) Peek() interface{} {
return pq[0]
}
// ShutDown stops the queue. After the queue drains, the returned shutdown bool
// on Get() will be true. This method may be invoked more than once.
-func (q *delayingType) ShutDown() {
+func (q *delayingType[T]) ShutDown() {
q.stopOnce.Do(func() {
- q.Interface.ShutDown()
+ q.TypedInterface.ShutDown()
close(q.stopCh)
q.heartbeat.Stop()
})
}
// AddAfter adds the given item to the work queue after the given delay
-func (q *delayingType) AddAfter(item interface{}, duration time.Duration) {
+func (q *delayingType[T]) AddAfter(item T, duration time.Duration) {
// don't add if we're already shutting down
if q.ShuttingDown() {
return
@@ -219,7 +263,7 @@ func (q *delayingType) AddAfter(item interface{}, duration time.Duration) {
select {
case <-q.stopCh:
// unblock if ShutDown() is called
- case q.waitingForAddCh <- &waitFor{data: item, readyAt: q.clock.Now().Add(duration)}:
+ case q.waitingForAddCh <- &waitFor[T]{data: item, readyAt: q.clock.Now().Add(duration)}:
}
}
@@ -229,8 +273,8 @@ func (q *delayingType) AddAfter(item interface{}, duration time.Duration) {
const maxWait = 10 * time.Second
// waitingLoop runs until the workqueue is shutdown and keeps a check on the list of items to be added.
-func (q *delayingType) waitingLoop() {
- defer utilruntime.HandleCrash()
+func (q *delayingType[T]) waitingLoop(logger klog.Logger) {
+ defer utilruntime.HandleCrashWithLogger(logger)
// Make a placeholder channel to use when there are no items in our list
never := make(<-chan time.Time)
@@ -238,13 +282,13 @@ func (q *delayingType) waitingLoop() {
// Make a timer that expires when the item at the head of the waiting queue is ready
var nextReadyAtTimer clock.Timer
- waitingForQueue := &waitForPriorityQueue{}
+ waitingForQueue := &waitForPriorityQueue[T]{}
heap.Init(waitingForQueue)
- waitingEntryByData := map[t]*waitFor{}
+ waitingEntryByData := map[T]*waitFor[T]{}
for {
- if q.Interface.ShuttingDown() {
+ if q.TypedInterface.ShuttingDown() {
return
}
@@ -252,12 +296,12 @@ func (q *delayingType) waitingLoop() {
// Add ready entries
for waitingForQueue.Len() > 0 {
- entry := waitingForQueue.Peek().(*waitFor)
+ entry := waitingForQueue.Peek().(*waitFor[T])
if entry.readyAt.After(now) {
break
}
- entry = heap.Pop(waitingForQueue).(*waitFor)
+ entry = heap.Pop(waitingForQueue).(*waitFor[T])
q.Add(entry.data)
delete(waitingEntryByData, entry.data)
}
@@ -268,7 +312,7 @@ func (q *delayingType) waitingLoop() {
if nextReadyAtTimer != nil {
nextReadyAtTimer.Stop()
}
- entry := waitingForQueue.Peek().(*waitFor)
+ entry := waitingForQueue.Peek().(*waitFor[T])
nextReadyAtTimer = q.clock.NewTimer(entry.readyAt.Sub(now))
nextReadyAt = nextReadyAtTimer.C()
}
@@ -308,7 +352,7 @@ func (q *delayingType) waitingLoop() {
}
// insert adds the entry to the priority queue, or updates the readyAt if it already exists in the queue
-func insert(q *waitForPriorityQueue, knownEntries map[t]*waitFor, entry *waitFor) {
+func insert[T comparable](q *waitForPriorityQueue[T], knownEntries map[T]*waitFor[T], entry *waitFor[T]) {
// if the entry already exists, update the time only if it would cause the item to be queued sooner
existing, exists := knownEntries[entry.data]
if exists {
diff --git a/vendor/k8s.io/client-go/util/workqueue/doc.go b/vendor/k8s.io/client-go/util/workqueue/doc.go
index 8555aa95fe..a76d830ede 100644
--- a/vendor/k8s.io/client-go/util/workqueue/doc.go
+++ b/vendor/k8s.io/client-go/util/workqueue/doc.go
@@ -23,4 +23,4 @@ limitations under the License.
// - Multiple consumers and producers. In particular, it is allowed for an
// item to be reenqueued while it is being processed.
// - Shutdown notifications.
-package workqueue // import "k8s.io/client-go/util/workqueue"
+package workqueue
diff --git a/vendor/k8s.io/client-go/util/workqueue/metrics.go b/vendor/k8s.io/client-go/util/workqueue/metrics.go
index f012ccc554..4400cb65e1 100644
--- a/vendor/k8s.io/client-go/util/workqueue/metrics.go
+++ b/vendor/k8s.io/client-go/util/workqueue/metrics.go
@@ -26,10 +26,10 @@ import (
// This file provides abstractions for setting the provider (e.g., prometheus)
// of metrics.
-type queueMetrics interface {
- add(item t)
- get(item t)
- done(item t)
+type queueMetrics[T comparable] interface {
+ add(item T)
+ get(item T)
+ done(item T)
updateUnfinishedWork()
}
@@ -70,7 +70,7 @@ func (noopMetric) Set(float64) {}
func (noopMetric) Observe(float64) {}
// defaultQueueMetrics expects the caller to lock before setting any metrics.
-type defaultQueueMetrics struct {
+type defaultQueueMetrics[T comparable] struct {
clock clock.Clock
// current depth of a workqueue
@@ -81,15 +81,15 @@ type defaultQueueMetrics struct {
latency HistogramMetric
// how long processing an item from a workqueue takes
workDuration HistogramMetric
- addTimes map[t]time.Time
- processingStartTimes map[t]time.Time
+ addTimes map[T]time.Time
+ processingStartTimes map[T]time.Time
// how long have current threads been working?
unfinishedWorkSeconds SettableGaugeMetric
longestRunningProcessor SettableGaugeMetric
}
-func (m *defaultQueueMetrics) add(item t) {
+func (m *defaultQueueMetrics[T]) add(item T) {
if m == nil {
return
}
@@ -101,7 +101,7 @@ func (m *defaultQueueMetrics) add(item t) {
}
}
-func (m *defaultQueueMetrics) get(item t) {
+func (m *defaultQueueMetrics[T]) get(item T) {
if m == nil {
return
}
@@ -114,7 +114,7 @@ func (m *defaultQueueMetrics) get(item t) {
}
}
-func (m *defaultQueueMetrics) done(item t) {
+func (m *defaultQueueMetrics[T]) done(item T) {
if m == nil {
return
}
@@ -125,7 +125,7 @@ func (m *defaultQueueMetrics) done(item t) {
}
}
-func (m *defaultQueueMetrics) updateUnfinishedWork() {
+func (m *defaultQueueMetrics[T]) updateUnfinishedWork() {
// Note that a summary metric would be better for this, but prometheus
// doesn't seem to have non-hacky ways to reset the summary metrics.
var total float64
@@ -141,15 +141,15 @@ func (m *defaultQueueMetrics) updateUnfinishedWork() {
m.longestRunningProcessor.Set(oldest)
}
-type noMetrics struct{}
+type noMetrics[T any] struct{}
-func (noMetrics) add(item t) {}
-func (noMetrics) get(item t) {}
-func (noMetrics) done(item t) {}
-func (noMetrics) updateUnfinishedWork() {}
+func (noMetrics[T]) add(item T) {}
+func (noMetrics[T]) get(item T) {}
+func (noMetrics[T]) done(item T) {}
+func (noMetrics[T]) updateUnfinishedWork() {}
// Gets the time since the specified start in seconds.
-func (m *defaultQueueMetrics) sinceInSeconds(start time.Time) float64 {
+func (m *defaultQueueMetrics[T]) sinceInSeconds(start time.Time) float64 {
return m.clock.Since(start).Seconds()
}
@@ -210,28 +210,15 @@ func (_ noopMetricsProvider) NewRetriesMetric(name string) CounterMetric {
return noopMetric{}
}
-var globalMetricsFactory = queueMetricsFactory{
- metricsProvider: noopMetricsProvider{},
-}
-
-type queueMetricsFactory struct {
- metricsProvider MetricsProvider
+var globalMetricsProvider MetricsProvider = noopMetricsProvider{}
- onlyOnce sync.Once
-}
+var setGlobalMetricsProviderOnce sync.Once
-func (f *queueMetricsFactory) setProvider(mp MetricsProvider) {
- f.onlyOnce.Do(func() {
- f.metricsProvider = mp
- })
-}
-
-func (f *queueMetricsFactory) newQueueMetrics(name string, clock clock.Clock) queueMetrics {
- mp := f.metricsProvider
+func newQueueMetrics[T comparable](mp MetricsProvider, name string, clock clock.Clock) queueMetrics[T] {
if len(name) == 0 || mp == (noopMetricsProvider{}) {
- return noMetrics{}
+ return noMetrics[T]{}
}
- return &defaultQueueMetrics{
+ return &defaultQueueMetrics[T]{
clock: clock,
depth: mp.NewDepthMetric(name),
adds: mp.NewAddsMetric(name),
@@ -239,8 +226,8 @@ func (f *queueMetricsFactory) newQueueMetrics(name string, clock clock.Clock) qu
workDuration: mp.NewWorkDurationMetric(name),
unfinishedWorkSeconds: mp.NewUnfinishedWorkSecondsMetric(name),
longestRunningProcessor: mp.NewLongestRunningProcessorSecondsMetric(name),
- addTimes: map[t]time.Time{},
- processingStartTimes: map[t]time.Time{},
+ addTimes: map[T]time.Time{},
+ processingStartTimes: map[T]time.Time{},
}
}
@@ -251,7 +238,7 @@ func newRetryMetrics(name string, provider MetricsProvider) retryMetrics {
}
if provider == nil {
- provider = globalMetricsFactory.metricsProvider
+ provider = globalMetricsProvider
}
return &defaultRetryMetrics{
@@ -262,5 +249,7 @@ func newRetryMetrics(name string, provider MetricsProvider) retryMetrics {
// SetProvider sets the metrics provider for all subsequently created work
// queues. Only the first call has an effect.
func SetProvider(metricsProvider MetricsProvider) {
- globalMetricsFactory.setProvider(metricsProvider)
+ setGlobalMetricsProviderOnce.Do(func() {
+ globalMetricsProvider = metricsProvider
+ })
}
diff --git a/vendor/k8s.io/client-go/util/workqueue/parallelizer.go b/vendor/k8s.io/client-go/util/workqueue/parallelizer.go
index 366bf20a31..9f986a25a4 100644
--- a/vendor/k8s.io/client-go/util/workqueue/parallelizer.go
+++ b/vendor/k8s.io/client-go/util/workqueue/parallelizer.go
@@ -74,7 +74,7 @@ func ParallelizeUntil(ctx context.Context, workers, pieces int, doWorkPiece DoWo
wg.Add(workers)
for i := 0; i < workers; i++ {
go func() {
- defer utilruntime.HandleCrash()
+ defer utilruntime.HandleCrashWithContext(ctx)
defer wg.Done()
for chunk := range toProcess {
start := chunk * chunkSize
diff --git a/vendor/k8s.io/client-go/util/workqueue/queue.go b/vendor/k8s.io/client-go/util/workqueue/queue.go
index a363d1afb4..3cec1768a0 100644
--- a/vendor/k8s.io/client-go/util/workqueue/queue.go
+++ b/vendor/k8s.io/client-go/util/workqueue/queue.go
@@ -23,18 +23,66 @@ import (
"k8s.io/utils/clock"
)
-type Interface interface {
- Add(item interface{})
+// Deprecated: Interface is deprecated, use TypedInterface instead.
+type Interface TypedInterface[any]
+
+type TypedInterface[T comparable] interface {
+ Add(item T)
Len() int
- Get() (item interface{}, shutdown bool)
- Done(item interface{})
+ Get() (item T, shutdown bool)
+ Done(item T)
ShutDown()
ShutDownWithDrain()
ShuttingDown() bool
}
+// Queue is the underlying storage for items. The functions below are always
+// called from the same goroutine.
+type Queue[T comparable] interface {
+ // Touch can be hooked when an existing item is added again. This may be
+ // useful if the implementation allows priority change for the given item.
+ Touch(item T)
+ // Push adds a new item.
+ Push(item T)
+ // Len tells the total number of items.
+ Len() int
+ // Pop retrieves an item.
+ Pop() (item T)
+}
+
+// DefaultQueue is a slice based FIFO queue.
+func DefaultQueue[T comparable]() Queue[T] {
+ return new(queue[T])
+}
+
+// queue is a slice which implements Queue.
+type queue[T comparable] []T
+
+func (q *queue[T]) Touch(item T) {}
+
+func (q *queue[T]) Push(item T) {
+ *q = append(*q, item)
+}
+
+func (q *queue[T]) Len() int {
+ return len(*q)
+}
+
+func (q *queue[T]) Pop() (item T) {
+ item = (*q)[0]
+
+ // The underlying array still exists and reference this object, so the object will not be garbage collected.
+ (*q)[0] = *new(T)
+ *q = (*q)[1:]
+
+ return item
+}
+
// QueueConfig specifies optional configurations to customize an Interface.
-type QueueConfig struct {
+// Deprecated: use TypedQueueConfig instead.
+type QueueConfig = TypedQueueConfig[any]
+
+type TypedQueueConfig[T comparable] struct {
// Name for the queue. If unnamed, the metrics will not be registered.
Name string
@@ -44,18 +92,38 @@ type QueueConfig struct {
// Clock ability to inject real or fake clock for testing purposes.
Clock clock.WithTicker
+
+ // Queue provides the underlying queue to use. It is optional and defaults to slice based FIFO queue.
+ Queue Queue[T]
}
// New constructs a new work queue (see the package comment).
+//
+// Deprecated: use NewTyped instead.
func New() *Type {
return NewWithConfig(QueueConfig{
Name: "",
})
}
+// NewTyped constructs a new work queue (see the package comment).
+func NewTyped[T comparable]() *Typed[T] {
+ return NewTypedWithConfig(TypedQueueConfig[T]{
+ Name: "",
+ })
+}
+
// NewWithConfig constructs a new workqueue with ability to
// customize different properties.
+//
+// Deprecated: use NewTypedWithConfig instead.
func NewWithConfig(config QueueConfig) *Type {
+ return NewTypedWithConfig(config)
+}
+
+// NewTypedWithConfig constructs a new workqueue with ability to
+// customize different properties.
+func NewTypedWithConfig[T comparable](config TypedQueueConfig[T]) *Typed[T] {
return newQueueWithConfig(config, defaultUnfinishedWorkUpdatePeriod)
}
@@ -69,32 +137,34 @@ func NewNamed(name string) *Type {
// newQueueWithConfig constructs a new named workqueue
// with the ability to customize different properties for testing purposes
-func newQueueWithConfig(config QueueConfig, updatePeriod time.Duration) *Type {
- var metricsFactory *queueMetricsFactory
+func newQueueWithConfig[T comparable](config TypedQueueConfig[T], updatePeriod time.Duration) *Typed[T] {
+ metricsProvider := globalMetricsProvider
if config.MetricsProvider != nil {
- metricsFactory = &queueMetricsFactory{
- metricsProvider: config.MetricsProvider,
- }
- } else {
- metricsFactory = &globalMetricsFactory
+ metricsProvider = config.MetricsProvider
}
if config.Clock == nil {
config.Clock = clock.RealClock{}
}
+ if config.Queue == nil {
+ config.Queue = DefaultQueue[T]()
+ }
+
return newQueue(
config.Clock,
- metricsFactory.newQueueMetrics(config.Name, config.Clock),
+ config.Queue,
+ newQueueMetrics[T](metricsProvider, config.Name, config.Clock),
updatePeriod,
)
}
-func newQueue(c clock.WithTicker, metrics queueMetrics, updatePeriod time.Duration) *Type {
- t := &Type{
+func newQueue[T comparable](c clock.WithTicker, queue Queue[T], metrics queueMetrics[T], updatePeriod time.Duration) *Typed[T] {
+ t := &Typed[T]{
clock: c,
- dirty: set{},
- processing: set{},
+ queue: queue,
+ dirty: set[T]{},
+ processing: set[T]{},
cond: sync.NewCond(&sync.Mutex{}),
metrics: metrics,
unfinishedWorkUpdatePeriod: updatePeriod,
@@ -102,7 +172,7 @@ func newQueue(c clock.WithTicker, metrics queueMetrics, updatePeriod time.Durati
// Don't start the goroutine for a type of noMetrics so we don't consume
// resources unnecessarily
- if _, ok := metrics.(noMetrics); !ok {
+ if _, ok := metrics.(noMetrics[T]); !ok {
go t.updateUnfinishedWorkLoop()
}
@@ -112,61 +182,68 @@ func newQueue(c clock.WithTicker, metrics queueMetrics, updatePeriod time.Durati
const defaultUnfinishedWorkUpdatePeriod = 500 * time.Millisecond
// Type is a work queue (see the package comment).
-type Type struct {
+// Deprecated: Use Typed instead.
+type Type = Typed[any]
+
+type Typed[t comparable] struct {
// queue defines the order in which we will work on items. Every
// element of queue should be in the dirty set and not in the
// processing set.
- queue []t
+ queue Queue[t]
// dirty defines all of the items that need to be processed.
- dirty set
+ dirty set[t]
// Things that are currently being processed are in the processing set.
// These things may be simultaneously in the dirty set. When we finish
// processing something and remove it from this set, we'll check if
// it's in the dirty set, and if so, add it to the queue.
- processing set
+ processing set[t]
cond *sync.Cond
shuttingDown bool
drain bool
- metrics queueMetrics
+ metrics queueMetrics[t]
unfinishedWorkUpdatePeriod time.Duration
clock clock.WithTicker
}
type empty struct{}
-type t interface{}
-type set map[t]empty
+type set[t comparable] map[t]empty
-func (s set) has(item t) bool {
+func (s set[t]) has(item t) bool {
_, exists := s[item]
return exists
}
-func (s set) insert(item t) {
+func (s set[t]) insert(item t) {
s[item] = empty{}
}
-func (s set) delete(item t) {
+func (s set[t]) delete(item t) {
delete(s, item)
}
-func (s set) len() int {
+func (s set[t]) len() int {
return len(s)
}
// Add marks item as needing processing.
-func (q *Type) Add(item interface{}) {
+func (q *Typed[T]) Add(item T) {
q.cond.L.Lock()
defer q.cond.L.Unlock()
if q.shuttingDown {
return
}
if q.dirty.has(item) {
+ // the same item is added again before it is processed, call the Touch
+ // function if the queue cares about it (for e.g, reset its priority)
+ if !q.processing.has(item) {
+ q.queue.Touch(item)
+ }
return
}
@@ -177,37 +254,34 @@ func (q *Type) Add(item interface{}) {
return
}
- q.queue = append(q.queue, item)
+ q.queue.Push(item)
q.cond.Signal()
}
// Len returns the current queue length, for informational purposes only. You
// shouldn't e.g. gate a call to Add() or Get() on Len() being a particular
// value, that can't be synchronized properly.
-func (q *Type) Len() int {
+func (q *Typed[T]) Len() int {
q.cond.L.Lock()
defer q.cond.L.Unlock()
- return len(q.queue)
+ return q.queue.Len()
}
// Get blocks until it can return an item to be processed. If shutdown = true,
// the caller should end their goroutine. You must call Done with item when you
// have finished processing it.
-func (q *Type) Get() (item interface{}, shutdown bool) {
+func (q *Typed[T]) Get() (item T, shutdown bool) {
q.cond.L.Lock()
defer q.cond.L.Unlock()
- for len(q.queue) == 0 && !q.shuttingDown {
+ for q.queue.Len() == 0 && !q.shuttingDown {
q.cond.Wait()
}
- if len(q.queue) == 0 {
+ if q.queue.Len() == 0 {
// We must be shutting down.
- return nil, true
+ return *new(T), true
}
- item = q.queue[0]
- // The underlying array still exists and reference this object, so the object will not be garbage collected.
- q.queue[0] = nil
- q.queue = q.queue[1:]
+ item = q.queue.Pop()
q.metrics.get(item)
@@ -220,7 +294,7 @@ func (q *Type) Get() (item interface{}, shutdown bool) {
// Done marks item as done processing, and if it has been marked as dirty again
// while it was being processed, it will be re-added to the queue for
// re-processing.
-func (q *Type) Done(item interface{}) {
+func (q *Typed[T]) Done(item T) {
q.cond.L.Lock()
defer q.cond.L.Unlock()
@@ -228,7 +302,7 @@ func (q *Type) Done(item interface{}) {
q.processing.delete(item)
if q.dirty.has(item) {
- q.queue = append(q.queue, item)
+ q.queue.Push(item)
q.cond.Signal()
} else if q.processing.len() == 0 {
q.cond.Signal()
@@ -237,7 +311,7 @@ func (q *Type) Done(item interface{}) {
// ShutDown will cause q to ignore all new items added to it and
// immediately instruct the worker goroutines to exit.
-func (q *Type) ShutDown() {
+func (q *Typed[T]) ShutDown() {
q.cond.L.Lock()
defer q.cond.L.Unlock()
@@ -255,7 +329,7 @@ func (q *Type) ShutDown() {
// indefinitely. It is, however, safe to call ShutDown after having called
// ShutDownWithDrain, as to force the queue shut down to terminate immediately
// without waiting for the drainage.
-func (q *Type) ShutDownWithDrain() {
+func (q *Typed[T]) ShutDownWithDrain() {
q.cond.L.Lock()
defer q.cond.L.Unlock()
@@ -268,14 +342,14 @@ func (q *Type) ShutDownWithDrain() {
}
}
-func (q *Type) ShuttingDown() bool {
+func (q *Typed[T]) ShuttingDown() bool {
q.cond.L.Lock()
defer q.cond.L.Unlock()
return q.shuttingDown
}
-func (q *Type) updateUnfinishedWorkLoop() {
+func (q *Typed[T]) updateUnfinishedWorkLoop() {
t := q.clock.NewTicker(q.unfinishedWorkUpdatePeriod)
defer t.Stop()
for range t.C() {
diff --git a/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go b/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go
index 3e4016fb04..fe45afa5a4 100644
--- a/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go
+++ b/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go
@@ -19,24 +19,33 @@ package workqueue
import "k8s.io/utils/clock"
// RateLimitingInterface is an interface that rate limits items being added to the queue.
-type RateLimitingInterface interface {
- DelayingInterface
+//
+// Deprecated: Use TypedRateLimitingInterface instead.
+type RateLimitingInterface TypedRateLimitingInterface[any]
+
+// TypedRateLimitingInterface is an interface that rate limits items being added to the queue.
+type TypedRateLimitingInterface[T comparable] interface {
+ TypedDelayingInterface[T]
// AddRateLimited adds an item to the workqueue after the rate limiter says it's ok
- AddRateLimited(item interface{})
+ AddRateLimited(item T)
// Forget indicates that an item is finished being retried. Doesn't matter whether it's for perm failing
// or for success, we'll stop the rate limiter from tracking it. This only clears the `rateLimiter`, you
// still have to call `Done` on the queue.
- Forget(item interface{})
+ Forget(item T)
// NumRequeues returns back how many times the item was requeued
- NumRequeues(item interface{}) int
+ NumRequeues(item T) int
}
// RateLimitingQueueConfig specifies optional configurations to customize a RateLimitingInterface.
+//
+// Deprecated: Use TypedRateLimitingQueueConfig instead.
+type RateLimitingQueueConfig = TypedRateLimitingQueueConfig[any]
-type RateLimitingQueueConfig struct {
+// TypedRateLimitingQueueConfig specifies optional configurations to customize a TypedRateLimitingInterface.
+type TypedRateLimitingQueueConfig[T comparable] struct {
// Name for the queue. If unnamed, the metrics will not be registered.
Name string
@@ -48,36 +57,55 @@ type RateLimitingQueueConfig struct {
Clock clock.WithTicker
// DelayingQueue optionally allows injecting custom delaying queue DelayingInterface instead of the default one.
- DelayingQueue DelayingInterface
+ DelayingQueue TypedDelayingInterface[T]
}
// NewRateLimitingQueue constructs a new workqueue with rateLimited queuing ability
// Remember to call Forget! If you don't, you may end up tracking failures forever.
// NewRateLimitingQueue does not emit metrics. For use with a MetricsProvider, please use
// NewRateLimitingQueueWithConfig instead and specify a name.
+//
+// Deprecated: Use NewTypedRateLimitingQueue instead.
func NewRateLimitingQueue(rateLimiter RateLimiter) RateLimitingInterface {
return NewRateLimitingQueueWithConfig(rateLimiter, RateLimitingQueueConfig{})
}
+// NewTypedRateLimitingQueue constructs a new workqueue with rateLimited queuing ability
+// Remember to call Forget! If you don't, you may end up tracking failures forever.
+// NewTypedRateLimitingQueue does not emit metrics. For use with a MetricsProvider, please use
+// NewTypedRateLimitingQueueWithConfig instead and specify a name.
+func NewTypedRateLimitingQueue[T comparable](rateLimiter TypedRateLimiter[T]) TypedRateLimitingInterface[T] {
+ return NewTypedRateLimitingQueueWithConfig(rateLimiter, TypedRateLimitingQueueConfig[T]{})
+}
+
// NewRateLimitingQueueWithConfig constructs a new workqueue with rateLimited queuing ability
// with options to customize different properties.
// Remember to call Forget! If you don't, you may end up tracking failures forever.
+//
+// Deprecated: Use NewTypedRateLimitingQueueWithConfig instead.
func NewRateLimitingQueueWithConfig(rateLimiter RateLimiter, config RateLimitingQueueConfig) RateLimitingInterface {
+ return NewTypedRateLimitingQueueWithConfig(rateLimiter, config)
+}
+
+// NewTypedRateLimitingQueueWithConfig constructs a new workqueue with rateLimited queuing ability
+// with options to customize different properties.
+// Remember to call Forget! If you don't, you may end up tracking failures forever.
+func NewTypedRateLimitingQueueWithConfig[T comparable](rateLimiter TypedRateLimiter[T], config TypedRateLimitingQueueConfig[T]) TypedRateLimitingInterface[T] {
if config.Clock == nil {
config.Clock = clock.RealClock{}
}
if config.DelayingQueue == nil {
- config.DelayingQueue = NewDelayingQueueWithConfig(DelayingQueueConfig{
+ config.DelayingQueue = NewTypedDelayingQueueWithConfig(TypedDelayingQueueConfig[T]{
Name: config.Name,
MetricsProvider: config.MetricsProvider,
Clock: config.Clock,
})
}
- return &rateLimitingType{
- DelayingInterface: config.DelayingQueue,
- rateLimiter: rateLimiter,
+ return &rateLimitingType[T]{
+ TypedDelayingInterface: config.DelayingQueue,
+ rateLimiter: rateLimiter,
}
}
@@ -99,21 +127,21 @@ func NewRateLimitingQueueWithDelayingInterface(di DelayingInterface, rateLimiter
}
// rateLimitingType wraps an Interface and provides rateLimited re-enquing
-type rateLimitingType struct {
- DelayingInterface
+type rateLimitingType[T comparable] struct {
+ TypedDelayingInterface[T]
- rateLimiter RateLimiter
+ rateLimiter TypedRateLimiter[T]
}
// AddRateLimited AddAfter's the item based on the time when the rate limiter says it's ok
-func (q *rateLimitingType) AddRateLimited(item interface{}) {
- q.DelayingInterface.AddAfter(item, q.rateLimiter.When(item))
+func (q *rateLimitingType[T]) AddRateLimited(item T) {
+ q.TypedDelayingInterface.AddAfter(item, q.rateLimiter.When(item))
}
-func (q *rateLimitingType) NumRequeues(item interface{}) int {
+func (q *rateLimitingType[T]) NumRequeues(item T) int {
return q.rateLimiter.NumRequeues(item)
}
-func (q *rateLimitingType) Forget(item interface{}) {
+func (q *rateLimitingType[T]) Forget(item T) {
q.rateLimiter.Forget(item)
}
diff --git a/vendor/k8s.io/klog/v2/klog.go b/vendor/k8s.io/klog/v2/klog.go
index 026be9e3b1..47ec9466a6 100644
--- a/vendor/k8s.io/klog/v2/klog.go
+++ b/vendor/k8s.io/klog/v2/klog.go
@@ -404,13 +404,6 @@ func (t *traceLocation) Set(value string) error {
return nil
}
-// flushSyncWriter is the interface satisfied by logging destinations.
-type flushSyncWriter interface {
- Flush() error
- Sync() error
- io.Writer
-}
-
var logging loggingT
var commandLine flag.FlagSet
@@ -486,7 +479,7 @@ type settings struct {
// Access to all of the following fields must be protected via a mutex.
// file holds writer for each of the log types.
- file [severity.NumSeverity]flushSyncWriter
+ file [severity.NumSeverity]io.Writer
// flushInterval is the interval for periodic flushing. If zero,
// the global default will be used.
flushInterval time.Duration
@@ -831,32 +824,12 @@ func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string,
buffer.PutBuffer(b)
}
-// redirectBuffer is used to set an alternate destination for the logs
-type redirectBuffer struct {
- w io.Writer
-}
-
-func (rb *redirectBuffer) Sync() error {
- return nil
-}
-
-func (rb *redirectBuffer) Flush() error {
- return nil
-}
-
-func (rb *redirectBuffer) Write(bytes []byte) (n int, err error) {
- return rb.w.Write(bytes)
-}
-
// SetOutput sets the output destination for all severities
func SetOutput(w io.Writer) {
logging.mu.Lock()
defer logging.mu.Unlock()
for s := severity.FatalLog; s >= severity.InfoLog; s-- {
- rb := &redirectBuffer{
- w: w,
- }
- logging.file[s] = rb
+ logging.file[s] = w
}
}
@@ -868,10 +841,7 @@ func SetOutputBySeverity(name string, w io.Writer) {
if !ok {
panic(fmt.Sprintf("SetOutputBySeverity(%q): unrecognized severity name", name))
}
- rb := &redirectBuffer{
- w: w,
- }
- logging.file[sev] = rb
+ logging.file[sev] = w
}
// LogToStderr sets whether to log exclusively to stderr, bypassing outputs
@@ -1011,7 +981,8 @@ func (l *loggingT) exit(err error) {
logExitFunc(err)
return
}
- l.flushAll()
+ needToSync := l.flushAll()
+ l.syncAll(needToSync)
OsExit(2)
}
@@ -1028,10 +999,6 @@ type syncBuffer struct {
maxbytes uint64 // The max number of bytes this syncBuffer.file can hold before cleaning up.
}
-func (sb *syncBuffer) Sync() error {
- return sb.file.Sync()
-}
-
// CalculateMaxSize returns the real max size in bytes after considering the default max size and the flag options.
func CalculateMaxSize() uint64 {
if logging.logFile != "" {
@@ -1223,24 +1190,45 @@ func StartFlushDaemon(interval time.Duration) {
// lockAndFlushAll is like flushAll but locks l.mu first.
func (l *loggingT) lockAndFlushAll() {
l.mu.Lock()
- l.flushAll()
+ needToSync := l.flushAll()
l.mu.Unlock()
+ // Some environments are slow when syncing and holding the lock might cause contention.
+ l.syncAll(needToSync)
}
-// flushAll flushes all the logs and attempts to "sync" their data to disk.
+// flushAll flushes all the logs
// l.mu is held.
-func (l *loggingT) flushAll() {
+//
+// The result is the number of files which need to be synced and the pointers to them.
+func (l *loggingT) flushAll() fileArray {
+ var needToSync fileArray
+
// Flush from fatal down, in case there's trouble flushing.
for s := severity.FatalLog; s >= severity.InfoLog; s-- {
file := l.file[s]
- if file != nil {
- _ = file.Flush() // ignore error
- _ = file.Sync() // ignore error
+ if sb, ok := file.(*syncBuffer); ok && sb.file != nil {
+ _ = sb.Flush() // ignore error
+ needToSync.files[needToSync.num] = sb.file
+ needToSync.num++
}
}
if logging.loggerOptions.flush != nil {
logging.loggerOptions.flush()
}
+ return needToSync
+}
+
+type fileArray struct {
+ num int
+ files [severity.NumSeverity]*os.File
+}
+
+// syncAll attempts to "sync" their data to disk.
+func (l *loggingT) syncAll(needToSync fileArray) {
+ // Flush from fatal down, in case there's trouble flushing.
+ for i := 0; i < needToSync.num; i++ {
+ _ = needToSync.files[i].Sync() // ignore error
+ }
}
// CopyStandardLogTo arranges for messages written to the Go "log" package's
diff --git a/vendor/k8s.io/utils/clock/testing/fake_clock.go b/vendor/k8s.io/utils/clock/testing/fake_clock.go
deleted file mode 100644
index 9503690be2..0000000000
--- a/vendor/k8s.io/utils/clock/testing/fake_clock.go
+++ /dev/null
@@ -1,374 +0,0 @@
-/*
-Copyright 2014 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package testing
-
-import (
- "sync"
- "time"
-
- "k8s.io/utils/clock"
-)
-
-var (
- _ = clock.PassiveClock(&FakePassiveClock{})
- _ = clock.WithTicker(&FakeClock{})
- _ = clock.Clock(&IntervalClock{})
-)
-
-// FakePassiveClock implements PassiveClock, but returns an arbitrary time.
-type FakePassiveClock struct {
- lock sync.RWMutex
- time time.Time
-}
-
-// FakeClock implements clock.Clock, but returns an arbitrary time.
-type FakeClock struct {
- FakePassiveClock
-
- // waiters are waiting for the fake time to pass their specified time
- waiters []*fakeClockWaiter
-}
-
-type fakeClockWaiter struct {
- targetTime time.Time
- stepInterval time.Duration
- skipIfBlocked bool
- destChan chan time.Time
- afterFunc func()
-}
-
-// NewFakePassiveClock returns a new FakePassiveClock.
-func NewFakePassiveClock(t time.Time) *FakePassiveClock {
- return &FakePassiveClock{
- time: t,
- }
-}
-
-// NewFakeClock constructs a fake clock set to the provided time.
-func NewFakeClock(t time.Time) *FakeClock {
- return &FakeClock{
- FakePassiveClock: *NewFakePassiveClock(t),
- }
-}
-
-// Now returns f's time.
-func (f *FakePassiveClock) Now() time.Time {
- f.lock.RLock()
- defer f.lock.RUnlock()
- return f.time
-}
-
-// Since returns time since the time in f.
-func (f *FakePassiveClock) Since(ts time.Time) time.Duration {
- f.lock.RLock()
- defer f.lock.RUnlock()
- return f.time.Sub(ts)
-}
-
-// SetTime sets the time on the FakePassiveClock.
-func (f *FakePassiveClock) SetTime(t time.Time) {
- f.lock.Lock()
- defer f.lock.Unlock()
- f.time = t
-}
-
-// After is the fake version of time.After(d).
-func (f *FakeClock) After(d time.Duration) <-chan time.Time {
- f.lock.Lock()
- defer f.lock.Unlock()
- stopTime := f.time.Add(d)
- ch := make(chan time.Time, 1) // Don't block!
- f.waiters = append(f.waiters, &fakeClockWaiter{
- targetTime: stopTime,
- destChan: ch,
- })
- return ch
-}
-
-// NewTimer constructs a fake timer, akin to time.NewTimer(d).
-func (f *FakeClock) NewTimer(d time.Duration) clock.Timer {
- f.lock.Lock()
- defer f.lock.Unlock()
- stopTime := f.time.Add(d)
- ch := make(chan time.Time, 1) // Don't block!
- timer := &fakeTimer{
- fakeClock: f,
- waiter: fakeClockWaiter{
- targetTime: stopTime,
- destChan: ch,
- },
- }
- f.waiters = append(f.waiters, &timer.waiter)
- return timer
-}
-
-// AfterFunc is the Fake version of time.AfterFunc(d, cb).
-func (f *FakeClock) AfterFunc(d time.Duration, cb func()) clock.Timer {
- f.lock.Lock()
- defer f.lock.Unlock()
- stopTime := f.time.Add(d)
- ch := make(chan time.Time, 1) // Don't block!
-
- timer := &fakeTimer{
- fakeClock: f,
- waiter: fakeClockWaiter{
- targetTime: stopTime,
- destChan: ch,
- afterFunc: cb,
- },
- }
- f.waiters = append(f.waiters, &timer.waiter)
- return timer
-}
-
-// Tick constructs a fake ticker, akin to time.Tick
-func (f *FakeClock) Tick(d time.Duration) <-chan time.Time {
- if d <= 0 {
- return nil
- }
- f.lock.Lock()
- defer f.lock.Unlock()
- tickTime := f.time.Add(d)
- ch := make(chan time.Time, 1) // hold one tick
- f.waiters = append(f.waiters, &fakeClockWaiter{
- targetTime: tickTime,
- stepInterval: d,
- skipIfBlocked: true,
- destChan: ch,
- })
-
- return ch
-}
-
-// NewTicker returns a new Ticker.
-func (f *FakeClock) NewTicker(d time.Duration) clock.Ticker {
- f.lock.Lock()
- defer f.lock.Unlock()
- tickTime := f.time.Add(d)
- ch := make(chan time.Time, 1) // hold one tick
- f.waiters = append(f.waiters, &fakeClockWaiter{
- targetTime: tickTime,
- stepInterval: d,
- skipIfBlocked: true,
- destChan: ch,
- })
-
- return &fakeTicker{
- c: ch,
- }
-}
-
-// Step moves the clock by Duration and notifies anyone that's called After,
-// Tick, or NewTimer.
-func (f *FakeClock) Step(d time.Duration) {
- f.lock.Lock()
- defer f.lock.Unlock()
- f.setTimeLocked(f.time.Add(d))
-}
-
-// SetTime sets the time.
-func (f *FakeClock) SetTime(t time.Time) {
- f.lock.Lock()
- defer f.lock.Unlock()
- f.setTimeLocked(t)
-}
-
-// Actually changes the time and checks any waiters. f must be write-locked.
-func (f *FakeClock) setTimeLocked(t time.Time) {
- f.time = t
- newWaiters := make([]*fakeClockWaiter, 0, len(f.waiters))
- for i := range f.waiters {
- w := f.waiters[i]
- if !w.targetTime.After(t) {
- if w.skipIfBlocked {
- select {
- case w.destChan <- t:
- default:
- }
- } else {
- w.destChan <- t
- }
-
- if w.afterFunc != nil {
- w.afterFunc()
- }
-
- if w.stepInterval > 0 {
- for !w.targetTime.After(t) {
- w.targetTime = w.targetTime.Add(w.stepInterval)
- }
- newWaiters = append(newWaiters, w)
- }
-
- } else {
- newWaiters = append(newWaiters, f.waiters[i])
- }
- }
- f.waiters = newWaiters
-}
-
-// HasWaiters returns true if Waiters() returns non-0 (so you can write race-free tests).
-func (f *FakeClock) HasWaiters() bool {
- f.lock.RLock()
- defer f.lock.RUnlock()
- return len(f.waiters) > 0
-}
-
-// Waiters returns the number of "waiters" on the clock (so you can write race-free
-// tests). A waiter exists for:
-// - every call to After that has not yet signaled its channel.
-// - every call to AfterFunc that has not yet called its callback.
-// - every timer created with NewTimer which is currently ticking.
-// - every ticker created with NewTicker which is currently ticking.
-// - every ticker created with Tick.
-func (f *FakeClock) Waiters() int {
- f.lock.RLock()
- defer f.lock.RUnlock()
- return len(f.waiters)
-}
-
-// Sleep is akin to time.Sleep
-func (f *FakeClock) Sleep(d time.Duration) {
- f.Step(d)
-}
-
-// IntervalClock implements clock.PassiveClock, but each invocation of Now steps the clock forward the specified duration.
-// IntervalClock technically implements the other methods of clock.Clock, but each implementation is just a panic.
-//
-// Deprecated: See SimpleIntervalClock for an alternative that only has the methods of PassiveClock.
-type IntervalClock struct {
- Time time.Time
- Duration time.Duration
-}
-
-// Now returns i's time.
-func (i *IntervalClock) Now() time.Time {
- i.Time = i.Time.Add(i.Duration)
- return i.Time
-}
-
-// Since returns time since the time in i.
-func (i *IntervalClock) Since(ts time.Time) time.Duration {
- return i.Time.Sub(ts)
-}
-
-// After is unimplemented, will panic.
-// TODO: make interval clock use FakeClock so this can be implemented.
-func (*IntervalClock) After(d time.Duration) <-chan time.Time {
- panic("IntervalClock doesn't implement After")
-}
-
-// NewTimer is unimplemented, will panic.
-// TODO: make interval clock use FakeClock so this can be implemented.
-func (*IntervalClock) NewTimer(d time.Duration) clock.Timer {
- panic("IntervalClock doesn't implement NewTimer")
-}
-
-// AfterFunc is unimplemented, will panic.
-// TODO: make interval clock use FakeClock so this can be implemented.
-func (*IntervalClock) AfterFunc(d time.Duration, f func()) clock.Timer {
- panic("IntervalClock doesn't implement AfterFunc")
-}
-
-// Tick is unimplemented, will panic.
-// TODO: make interval clock use FakeClock so this can be implemented.
-func (*IntervalClock) Tick(d time.Duration) <-chan time.Time {
- panic("IntervalClock doesn't implement Tick")
-}
-
-// NewTicker has no implementation yet and is omitted.
-// TODO: make interval clock use FakeClock so this can be implemented.
-func (*IntervalClock) NewTicker(d time.Duration) clock.Ticker {
- panic("IntervalClock doesn't implement NewTicker")
-}
-
-// Sleep is unimplemented, will panic.
-func (*IntervalClock) Sleep(d time.Duration) {
- panic("IntervalClock doesn't implement Sleep")
-}
-
-var _ = clock.Timer(&fakeTimer{})
-
-// fakeTimer implements clock.Timer based on a FakeClock.
-type fakeTimer struct {
- fakeClock *FakeClock
- waiter fakeClockWaiter
-}
-
-// C returns the channel that notifies when this timer has fired.
-func (f *fakeTimer) C() <-chan time.Time {
- return f.waiter.destChan
-}
-
-// Stop prevents the Timer from firing. It returns true if the call stops the
-// timer, false if the timer has already expired or been stopped.
-func (f *fakeTimer) Stop() bool {
- f.fakeClock.lock.Lock()
- defer f.fakeClock.lock.Unlock()
-
- active := false
- newWaiters := make([]*fakeClockWaiter, 0, len(f.fakeClock.waiters))
- for i := range f.fakeClock.waiters {
- w := f.fakeClock.waiters[i]
- if w != &f.waiter {
- newWaiters = append(newWaiters, w)
- continue
- }
- // If timer is found, it has not been fired yet.
- active = true
- }
-
- f.fakeClock.waiters = newWaiters
-
- return active
-}
-
-// Reset changes the timer to expire after duration d. It returns true if the
-// timer had been active, false if the timer had expired or been stopped.
-func (f *fakeTimer) Reset(d time.Duration) bool {
- f.fakeClock.lock.Lock()
- defer f.fakeClock.lock.Unlock()
-
- active := false
-
- f.waiter.targetTime = f.fakeClock.time.Add(d)
-
- for i := range f.fakeClock.waiters {
- w := f.fakeClock.waiters[i]
- if w == &f.waiter {
- // If timer is found, it has not been fired yet.
- active = true
- break
- }
- }
- if !active {
- f.fakeClock.waiters = append(f.fakeClock.waiters, &f.waiter)
- }
-
- return active
-}
-
-type fakeTicker struct {
- c <-chan time.Time
-}
-
-func (t *fakeTicker) C() <-chan time.Time {
- return t.c
-}
-
-func (t *fakeTicker) Stop() {
-}
diff --git a/vendor/k8s.io/utils/clock/testing/simple_interval_clock.go b/vendor/k8s.io/utils/clock/testing/simple_interval_clock.go
deleted file mode 100644
index 951ca4d170..0000000000
--- a/vendor/k8s.io/utils/clock/testing/simple_interval_clock.go
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package testing
-
-import (
- "time"
-
- "k8s.io/utils/clock"
-)
-
-var (
- _ = clock.PassiveClock(&SimpleIntervalClock{})
-)
-
-// SimpleIntervalClock implements clock.PassiveClock, but each invocation of Now steps the clock forward the specified duration
-type SimpleIntervalClock struct {
- Time time.Time
- Duration time.Duration
-}
-
-// Now returns i's time.
-func (i *SimpleIntervalClock) Now() time.Time {
- i.Time = i.Time.Add(i.Duration)
- return i.Time
-}
-
-// Since returns time since the time in i.
-func (i *SimpleIntervalClock) Since(ts time.Time) time.Duration {
- return i.Time.Sub(ts)
-}
diff --git a/vendor/k8s.io/utils/ptr/OWNERS b/vendor/k8s.io/utils/ptr/OWNERS
new file mode 100644
index 0000000000..0d6392752a
--- /dev/null
+++ b/vendor/k8s.io/utils/ptr/OWNERS
@@ -0,0 +1,10 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+approvers:
+- apelisse
+- stewart-yu
+- thockin
+reviewers:
+- apelisse
+- stewart-yu
+- thockin
diff --git a/vendor/k8s.io/utils/ptr/README.md b/vendor/k8s.io/utils/ptr/README.md
new file mode 100644
index 0000000000..2ca8073dc7
--- /dev/null
+++ b/vendor/k8s.io/utils/ptr/README.md
@@ -0,0 +1,3 @@
+# Pointer
+
+This package provides some functions for pointer-based operations.
diff --git a/vendor/k8s.io/utils/ptr/ptr.go b/vendor/k8s.io/utils/ptr/ptr.go
new file mode 100644
index 0000000000..659ed3b9e2
--- /dev/null
+++ b/vendor/k8s.io/utils/ptr/ptr.go
@@ -0,0 +1,73 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package ptr
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// AllPtrFieldsNil tests whether all pointer fields in a struct are nil. This is useful when,
+// for example, an API struct is handled by plugins which need to distinguish
+// "no plugin accepted this spec" from "this spec is empty".
+//
+// This function is only valid for structs and pointers to structs. Any other
+// type will cause a panic. Passing a typed nil pointer will return true.
+func AllPtrFieldsNil(obj interface{}) bool {
+ v := reflect.ValueOf(obj)
+ if !v.IsValid() {
+ panic(fmt.Sprintf("reflect.ValueOf() produced a non-valid Value for %#v", obj))
+ }
+ if v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ return true
+ }
+ v = v.Elem()
+ }
+ for i := 0; i < v.NumField(); i++ {
+ if v.Field(i).Kind() == reflect.Ptr && !v.Field(i).IsNil() {
+ return false
+ }
+ }
+ return true
+}
+
+// To returns a pointer to the given value.
+func To[T any](v T) *T {
+ return &v
+}
+
+// Deref dereferences ptr and returns the value it points to if no nil, or else
+// returns def.
+func Deref[T any](ptr *T, def T) T {
+ if ptr != nil {
+ return *ptr
+ }
+ return def
+}
+
+// Equal returns true if both arguments are nil or both arguments
+// dereference to the same value.
+func Equal[T comparable](a, b *T) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == nil {
+ return true
+ }
+ return *a == *b
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 3f35fa8075..394f2c6ae4 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -162,6 +162,9 @@ github.com/fsnotify/fsnotify
# github.com/fvbommel/sortorder v1.1.0
## explicit; go 1.13
github.com/fvbommel/sortorder
+# github.com/fxamacker/cbor/v2 v2.7.0
+## explicit; go 1.17
+github.com/fxamacker/cbor/v2
# github.com/go-logr/logr v1.2.3
## explicit; go 1.18
# github.com/go-logr/logr v1.4.2
@@ -232,17 +235,13 @@ github.com/google/go-containerregistry/pkg/v1/tarball
github.com/google/go-containerregistry/pkg/v1/types
# github.com/google/gofuzz v1.2.0
## explicit; go 1.12
-github.com/google/gofuzz
-github.com/google/gofuzz/bytesource
# github.com/google/uuid v1.6.0
## explicit
github.com/google/uuid
# github.com/gorilla/mux v1.8.0
## explicit; go 1.12
-# github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0
-## explicit; go 1.22.7
# github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0
-## go 1.22.7
+## explicit; go 1.22.7
github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule
github.com/grpc-ecosystem/grpc-gateway/v2/runtime
github.com/grpc-ecosystem/grpc-gateway/v2/utilities
@@ -356,6 +355,9 @@ github.com/modern-go/reflect2
# github.com/morikuni/aec v1.0.0
## explicit
github.com/morikuni/aec
+# github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
+## explicit
+github.com/munnerz/goautoneg
# github.com/opencontainers/go-digest v1.0.0
## explicit; go 1.13
github.com/opencontainers/go-digest
@@ -387,21 +389,21 @@ github.com/pkg/errors
## explicit
github.com/pmezard/go-difflib/difflib
# github.com/prometheus/client_golang v1.11.1
-## explicit; go 1.17
-# github.com/prometheus/client_golang v1.14.0
-## explicit; go 1.17
+## explicit; go 1.22
+# github.com/prometheus/client_golang v1.22.0
+## explicit; go 1.22
# github.com/prometheus/client_model v0.2.0
## explicit; go 1.19
# github.com/prometheus/common v0.30.0
-## explicit; go 1.17
-# github.com/prometheus/common v0.39.0
-## explicit; go 1.17
+## explicit; go 1.21
+# github.com/prometheus/common v0.62.0
+## explicit; go 1.21
# github.com/prometheus/procfs v0.7.3
-## explicit; go 1.18
-# github.com/prometheus/procfs v0.9.0
-## explicit; go 1.18
-# github.com/rancher/wharfie v0.6.2
-## explicit; go 1.19
+## explicit; go 1.20
+# github.com/prometheus/procfs v0.15.1
+## explicit; go 1.20
+# github.com/rancher/wharfie v0.7.0
+## explicit; go 1.24.2
github.com/rancher/wharfie/pkg/registries
# github.com/rivo/uniseg v0.2.0
## explicit; go 1.12
@@ -486,6 +488,9 @@ github.com/urfave/cli
# github.com/vbatts/tar-split v0.12.1
## explicit; go 1.17
github.com/vbatts/tar-split/archive/tar
+# github.com/x448/float16 v0.8.4
+## explicit; go 1.11
+github.com/x448/float16
# github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb
## explicit
github.com/xeipuuv/gojsonpointer
@@ -531,14 +536,12 @@ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envco
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform
-# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0
-## explicit; go 1.22.7
# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0
-## go 1.22.7
+## explicit; go 1.22.7
go.opentelemetry.io/otel/exporters/otlp/otlptrace
go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform
-# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0
-## explicit; go 1.20
+# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0
+## explicit; go 1.22.7
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig
@@ -582,7 +585,7 @@ go.opentelemetry.io/otel/trace/noop
# go.opentelemetry.io/proto/otlp v1.3.1
## explicit; go 1.22.7
# go.opentelemetry.io/proto/otlp v1.4.0
-## go 1.22.7
+## explicit; go 1.22.7
go.opentelemetry.io/proto/otlp/collector/metrics/v1
go.opentelemetry.io/proto/otlp/collector/trace/v1
go.opentelemetry.io/proto/otlp/common/v1
@@ -591,8 +594,7 @@ go.opentelemetry.io/proto/otlp/resource/v1
go.opentelemetry.io/proto/otlp/trace/v1
# go.uber.org/atomic v1.9.0
## explicit; go 1.13
-go.uber.org/atomic
-# go.uber.org/multierr v1.9.0
+# go.uber.org/multierr v1.11.0
## explicit; go 1.19
go.uber.org/multierr
# go4.org/intern v0.0.0-20211027215823-ae77deb06f29
@@ -604,7 +606,7 @@ go4.org/netipx
## explicit; go 1.11
# golang.org/x/crypto v0.22.0
## explicit; go 1.23.0
-# golang.org/x/exp v0.0.0-20230905200255-921286631fa9
+# golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56
## explicit; go 1.20
golang.org/x/exp/constraints
golang.org/x/exp/slices
@@ -618,7 +620,7 @@ golang.org/x/mod/semver
## explicit; go 1.23.0
# golang.org/x/net v0.26.0
## explicit; go 1.23.0
-# golang.org/x/net v0.28.0
+# golang.org/x/net v0.38.0
## explicit; go 1.23.0
# golang.org/x/net v0.41.0
## go 1.23.0
@@ -652,14 +654,14 @@ golang.org/x/sys/windows
golang.org/x/sys/windows/registry
# golang.org/x/term v0.19.0
## explicit; go 1.23.0
-# golang.org/x/term v0.23.0
+# golang.org/x/term v0.30.0
## explicit; go 1.23.0
# golang.org/x/term v0.32.0
## go 1.23.0
golang.org/x/term
# golang.org/x/text v0.14.0
## explicit; go 1.23.0
-# golang.org/x/text v0.17.0
+# golang.org/x/text v0.23.0
## explicit; go 1.23.0
# golang.org/x/text v0.26.0
## go 1.23.0
@@ -677,28 +679,24 @@ golang.org/x/text/unicode/norm
## explicit; go 1.18
# golang.org/x/time v0.5.0
## explicit; go 1.18
+# golang.org/x/time v0.9.0
+## explicit; go 1.18
golang.org/x/time/rate
# google.golang.org/appengine v1.6.7
## explicit; go 1.11
# google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094
## explicit; go 1.21
-# google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142
-## explicit; go 1.21
# google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576
-## go 1.21
+## explicit; go 1.21
google.golang.org/genproto/googleapis/api/httpbody
# google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094
## explicit; go 1.21
-# google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1
-## explicit; go 1.21
# google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576
-## go 1.21
+## explicit; go 1.21
google.golang.org/genproto/googleapis/rpc/errdetails
google.golang.org/genproto/googleapis/rpc/status
-# google.golang.org/grpc v1.67.0
-## explicit; go 1.22
# google.golang.org/grpc v1.68.1
-## go 1.22
+## explicit; go 1.22
google.golang.org/grpc
google.golang.org/grpc/attributes
google.golang.org/grpc/backoff
@@ -762,7 +760,7 @@ google.golang.org/grpc/tap
## explicit; go 1.21
# google.golang.org/protobuf v1.34.2
## explicit; go 1.21
-# google.golang.org/protobuf v1.36.3
+# google.golang.org/protobuf v1.36.5
## explicit; go 1.21
google.golang.org/protobuf/encoding/protojson
google.golang.org/protobuf/encoding/prototext
@@ -830,10 +828,12 @@ gotest.tools/internal/source
# inet.af/netaddr v0.0.0-20211027220019-c74959edd3b6
## explicit; go 1.12
# k8s.io/apimachinery v0.25.0
-## explicit; go 1.22.0
-# k8s.io/apimachinery v0.30.2
-## explicit; go 1.22.0
+## explicit; go 1.24.0
+# k8s.io/apimachinery v0.33.0
+## explicit; go 1.24.0
k8s.io/apimachinery/pkg/api/errors
+k8s.io/apimachinery/pkg/api/meta
+k8s.io/apimachinery/pkg/api/operation
k8s.io/apimachinery/pkg/api/resource
k8s.io/apimachinery/pkg/apis/meta/v1
k8s.io/apimachinery/pkg/apis/meta/v1/unstructured
@@ -844,6 +844,9 @@ k8s.io/apimachinery/pkg/labels
k8s.io/apimachinery/pkg/runtime
k8s.io/apimachinery/pkg/runtime/schema
k8s.io/apimachinery/pkg/runtime/serializer
+k8s.io/apimachinery/pkg/runtime/serializer/cbor
+k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct
+k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes
k8s.io/apimachinery/pkg/runtime/serializer/json
k8s.io/apimachinery/pkg/runtime/serializer/protobuf
k8s.io/apimachinery/pkg/runtime/serializer/recognizer
@@ -868,9 +871,10 @@ k8s.io/apimachinery/pkg/version
k8s.io/apimachinery/pkg/watch
k8s.io/apimachinery/third_party/forked/golang/reflect
# k8s.io/client-go v0.25.0
-## explicit; go 1.22.0
-# k8s.io/client-go v0.30.2
-## explicit; go 1.22.0
+## explicit; go 1.24.0
+# k8s.io/client-go v0.33.0
+## explicit; go 1.24.0
+k8s.io/client-go/features
k8s.io/client-go/pkg/apis/clientauthentication
k8s.io/client-go/pkg/apis/clientauthentication/install
k8s.io/client-go/pkg/apis/clientauthentication/v1
@@ -894,7 +898,7 @@ k8s.io/client-go/util/keyutil
k8s.io/client-go/util/workqueue
# k8s.io/klog/v2 v2.70.1
## explicit; go 1.18
-# k8s.io/klog/v2 v2.120.1
+# k8s.io/klog/v2 v2.130.1
## explicit; go 1.18
k8s.io/klog/v2
k8s.io/klog/v2/internal/buffer
@@ -908,19 +912,23 @@ k8s.io/klog/v2/internal/sloghandler
# k8s.io/utils v0.0.0-20250604170112-4c0f3b243397
## explicit; go 1.18
k8s.io/utils/clock
-k8s.io/utils/clock/testing
k8s.io/utils/internal/third_party/forked/golang/net
k8s.io/utils/net
+k8s.io/utils/ptr
k8s.io/utils/strings/slices
# sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2
-## explicit; go 1.18
-# sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd
-## explicit; go 1.18
+## explicit; go 1.21
+# sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3
+## explicit; go 1.21
sigs.k8s.io/json
sigs.k8s.io/json/internal/golang/encoding/json
+# sigs.k8s.io/randfill v1.0.0
+## explicit; go 1.18
+sigs.k8s.io/randfill
+sigs.k8s.io/randfill/bytesource
# sigs.k8s.io/structured-merge-diff/v4 v4.2.3
## explicit; go 1.13
-# sigs.k8s.io/structured-merge-diff/v4 v4.4.1
+# sigs.k8s.io/structured-merge-diff/v4 v4.6.0
## explicit; go 1.13
sigs.k8s.io/structured-merge-diff/v4/value
# sigs.k8s.io/yaml v1.3.0
diff --git a/vendor/sigs.k8s.io/json/Makefile b/vendor/sigs.k8s.io/json/Makefile
index 07b8bfa857..fb6cf040f5 100644
--- a/vendor/sigs.k8s.io/json/Makefile
+++ b/vendor/sigs.k8s.io/json/Makefile
@@ -19,7 +19,7 @@ vet:
go vet sigs.k8s.io/json
@echo "checking for external dependencies"
- @deps=$$(go mod graph); \
+ @deps=$$(go list -f '{{ if not (or .Standard .Module.Main) }}{{.ImportPath}}{{ end }}' -deps sigs.k8s.io/json/... || true); \
if [ -n "$${deps}" ]; then \
echo "only stdlib dependencies allowed, found:"; \
echo "$${deps}"; \
diff --git a/vendor/sigs.k8s.io/json/OWNERS b/vendor/sigs.k8s.io/json/OWNERS
index 0fadafbddb..a08a434e61 100644
--- a/vendor/sigs.k8s.io/json/OWNERS
+++ b/vendor/sigs.k8s.io/json/OWNERS
@@ -2,5 +2,5 @@
approvers:
- deads2k
- - lavalamp
+ - jpbetz
- liggitt
diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go
index 6a13cf2df0..d538ac119b 100644
--- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go
+++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go
@@ -21,10 +21,10 @@ import (
// Unmarshal parses the JSON-encoded data and stores the result
// in the value pointed to by v. If v is nil or not a pointer,
-// Unmarshal returns an InvalidUnmarshalError.
+// Unmarshal returns an [InvalidUnmarshalError].
//
// Unmarshal uses the inverse of the encodings that
-// Marshal uses, allocating maps, slices, and pointers as necessary,
+// [Marshal] uses, allocating maps, slices, and pointers as necessary,
// with the following additional rules:
//
// To unmarshal JSON into a pointer, Unmarshal first handles the case of
@@ -33,28 +33,28 @@ import (
// the value pointed at by the pointer. If the pointer is nil, Unmarshal
// allocates a new value for it to point to.
//
-// To unmarshal JSON into a value implementing the Unmarshaler interface,
-// Unmarshal calls that value's UnmarshalJSON method, including
+// To unmarshal JSON into a value implementing [Unmarshaler],
+// Unmarshal calls that value's [Unmarshaler.UnmarshalJSON] method, including
// when the input is a JSON null.
-// Otherwise, if the value implements encoding.TextUnmarshaler
-// and the input is a JSON quoted string, Unmarshal calls that value's
-// UnmarshalText method with the unquoted form of the string.
+// Otherwise, if the value implements [encoding.TextUnmarshaler]
+// and the input is a JSON quoted string, Unmarshal calls
+// [encoding.TextUnmarshaler.UnmarshalText] with the unquoted form of the string.
//
// To unmarshal JSON into a struct, Unmarshal matches incoming object
-// keys to the keys used by Marshal (either the struct field name or its tag),
+// keys to the keys used by [Marshal] (either the struct field name or its tag),
// preferring an exact match but also accepting a case-insensitive match. By
// default, object keys which don't have a corresponding struct field are
-// ignored (see Decoder.DisallowUnknownFields for an alternative).
+// ignored (see [Decoder.DisallowUnknownFields] for an alternative).
//
// To unmarshal JSON into an interface value,
// Unmarshal stores one of these in the interface value:
//
-// bool, for JSON booleans
-// float64, for JSON numbers
-// string, for JSON strings
-// []interface{}, for JSON arrays
-// map[string]interface{}, for JSON objects
-// nil for JSON null
+// - bool, for JSON booleans
+// - float64, for JSON numbers
+// - string, for JSON strings
+// - []interface{}, for JSON arrays
+// - map[string]interface{}, for JSON objects
+// - nil for JSON null
//
// To unmarshal a JSON array into a slice, Unmarshal resets the slice length
// to zero and then appends each element to the slice.
@@ -72,16 +72,15 @@ import (
// use. If the map is nil, Unmarshal allocates a new map. Otherwise Unmarshal
// reuses the existing map, keeping existing entries. Unmarshal then stores
// key-value pairs from the JSON object into the map. The map's key type must
-// either be any string type, an integer, implement json.Unmarshaler, or
-// implement encoding.TextUnmarshaler.
+// either be any string type, an integer, or implement [encoding.TextUnmarshaler].
//
-// If the JSON-encoded data contain a syntax error, Unmarshal returns a SyntaxError.
+// If the JSON-encoded data contain a syntax error, Unmarshal returns a [SyntaxError].
//
// If a JSON value is not appropriate for a given target type,
// or if a JSON number overflows the target type, Unmarshal
// skips that field and completes the unmarshaling as best it can.
// If no more serious errors are encountered, Unmarshal returns
-// an UnmarshalTypeError describing the earliest such error. In any
+// an [UnmarshalTypeError] describing the earliest such error. In any
// case, it's not guaranteed that all the remaining fields following
// the problematic one will be unmarshaled into the target object.
//
@@ -119,7 +118,7 @@ func Unmarshal(data []byte, v any, opts ...UnmarshalOpt) error {
// a JSON value. UnmarshalJSON must copy the JSON data
// if it wishes to retain the data after returning.
//
-// By convention, to approximate the behavior of Unmarshal itself,
+// By convention, to approximate the behavior of [Unmarshal] itself,
// Unmarshalers implement UnmarshalJSON([]byte("null")) as a no-op.
type Unmarshaler interface {
UnmarshalJSON([]byte) error
@@ -157,8 +156,8 @@ func (e *UnmarshalFieldError) Error() string {
return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String()
}
-// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal.
-// (The argument to Unmarshal must be a non-nil pointer.)
+// An InvalidUnmarshalError describes an invalid argument passed to [Unmarshal].
+// (The argument to [Unmarshal] must be a non-nil pointer.)
type InvalidUnmarshalError struct {
Type reflect.Type
}
@@ -573,17 +572,10 @@ func (d *decodeState) array(v reflect.Value) error {
break
}
- // Get element of array, growing if necessary.
+ // Expand slice length, growing the slice if necessary.
if v.Kind() == reflect.Slice {
- // Grow slice if necessary
if i >= v.Cap() {
- newcap := v.Cap() + v.Cap()/2
- if newcap < 4 {
- newcap = 4
- }
- newv := reflect.MakeSlice(v.Type(), v.Len(), newcap)
- reflect.Copy(newv, v)
- v.Set(newv)
+ v.Grow(1)
}
if i >= v.Len() {
v.SetLen(i + 1)
@@ -620,13 +612,11 @@ func (d *decodeState) array(v reflect.Value) error {
if i < v.Len() {
if v.Kind() == reflect.Array {
- // Array. Zero the rest.
- z := reflect.Zero(v.Type().Elem())
for ; i < v.Len(); i++ {
- v.Index(i).Set(z)
+ v.Index(i).SetZero() // zero remainder of array
}
} else {
- v.SetLen(i)
+ v.SetLen(i) // truncate the slice
}
}
if i == 0 && v.Kind() == reflect.Slice {
@@ -636,7 +626,7 @@ func (d *decodeState) array(v reflect.Value) error {
}
var nullLiteral = []byte("null")
-var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
+var textUnmarshalerType = reflect.TypeFor[encoding.TextUnmarshaler]()
// object consumes an object from d.data[d.off-1:], decoding into v.
// The first byte ('{') of the object has been read already.
@@ -776,7 +766,7 @@ func (d *decodeState) object(v reflect.Value) error {
if !mapElem.IsValid() {
mapElem = reflect.New(elemType).Elem()
} else {
- mapElem.Set(reflect.Zero(elemType))
+ mapElem.SetZero()
}
subv = mapElem
if checkDuplicateField != nil {
@@ -784,28 +774,14 @@ func (d *decodeState) object(v reflect.Value) error {
}
d.appendStrictFieldStackKey(string(key))
} else {
- var f *field
- if i, ok := fields.nameIndex[string(key)]; ok {
- // Found an exact name match.
- f = &fields.list[i]
- if checkDuplicateField != nil {
- checkDuplicateField(i, f.name)
- }
- } else if !d.caseSensitive {
- // Fall back to the expensive case-insensitive
- // linear search.
- for i := range fields.list {
- ff := &fields.list[i]
- if ff.equalFold(ff.nameBytes, key) {
- f = ff
- if checkDuplicateField != nil {
- checkDuplicateField(i, f.name)
- }
- break
- }
- }
+ f := fields.byExactName[string(key)]
+ if f == nil && !d.caseSensitive {
+ f = fields.byFoldedName[string(foldName(key))]
}
if f != nil {
+ if checkDuplicateField != nil {
+ checkDuplicateField(f.listIndex, f.name)
+ }
subv = v
destring = f.quoted
for _, i := range f.index {
@@ -874,33 +850,35 @@ func (d *decodeState) object(v reflect.Value) error {
if v.Kind() == reflect.Map {
kt := t.Key()
var kv reflect.Value
- switch {
- case reflect.PointerTo(kt).Implements(textUnmarshalerType):
+ if reflect.PointerTo(kt).Implements(textUnmarshalerType) {
kv = reflect.New(kt)
if err := d.literalStore(item, kv, true); err != nil {
return err
}
kv = kv.Elem()
- case kt.Kind() == reflect.String:
- kv = reflect.ValueOf(key).Convert(kt)
- default:
+ } else {
switch kt.Kind() {
+ case reflect.String:
+ kv = reflect.New(kt).Elem()
+ kv.SetString(string(key))
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
s := string(key)
n, err := strconv.ParseInt(s, 10, 64)
- if err != nil || reflect.Zero(kt).OverflowInt(n) {
+ if err != nil || kt.OverflowInt(n) {
d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: kt, Offset: int64(start + 1)})
break
}
- kv = reflect.ValueOf(n).Convert(kt)
+ kv = reflect.New(kt).Elem()
+ kv.SetInt(n)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
s := string(key)
n, err := strconv.ParseUint(s, 10, 64)
- if err != nil || reflect.Zero(kt).OverflowUint(n) {
+ if err != nil || kt.OverflowUint(n) {
d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: kt, Offset: int64(start + 1)})
break
}
- kv = reflect.ValueOf(n).Convert(kt)
+ kv = reflect.New(kt).Elem()
+ kv.SetUint(n)
default:
panic("json: Unexpected key type") // should never occur
}
@@ -950,12 +928,12 @@ func (d *decodeState) convertNumber(s string) (any, error) {
f, err := strconv.ParseFloat(s, 64)
if err != nil {
- return nil, &UnmarshalTypeError{Value: "number " + s, Type: reflect.TypeOf(0.0), Offset: int64(d.off)}
+ return nil, &UnmarshalTypeError{Value: "number " + s, Type: reflect.TypeFor[float64](), Offset: int64(d.off)}
}
return f, nil
}
-var numberType = reflect.TypeOf(Number(""))
+var numberType = reflect.TypeFor[Number]()
// literalStore decodes a literal stored in item into v.
//
@@ -965,7 +943,7 @@ var numberType = reflect.TypeOf(Number(""))
func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) error {
// Check for unmarshaler.
if len(item) == 0 {
- //Empty string given
+ // Empty string given.
d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
return nil
}
@@ -1012,7 +990,7 @@ func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool
}
switch v.Kind() {
case reflect.Interface, reflect.Pointer, reflect.Map, reflect.Slice:
- v.Set(reflect.Zero(v.Type()))
+ v.SetZero()
// otherwise, ignore null for primitives/string
}
case 't', 'f': // true, false
@@ -1064,10 +1042,11 @@ func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool
}
v.SetBytes(b[:n])
case reflect.String:
- if v.Type() == numberType && !isValidNumber(string(s)) {
+ t := string(s)
+ if v.Type() == numberType && !isValidNumber(t) {
return fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item)
}
- v.SetString(string(s))
+ v.SetString(t)
case reflect.Interface:
if v.NumMethod() == 0 {
v.Set(reflect.ValueOf(string(s)))
@@ -1083,13 +1062,12 @@ func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool
}
panic(phasePanicMsg)
}
- s := string(item)
switch v.Kind() {
default:
if v.Kind() == reflect.String && v.Type() == numberType {
// s must be a valid number, because it's
// already been tokenized.
- v.SetString(s)
+ v.SetString(string(item))
break
}
if fromQuoted {
@@ -1097,7 +1075,7 @@ func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool
}
d.saveError(&UnmarshalTypeError{Value: "number", Type: v.Type(), Offset: int64(d.readIndex())})
case reflect.Interface:
- n, err := d.convertNumber(s)
+ n, err := d.convertNumber(string(item))
if err != nil {
d.saveError(err)
break
@@ -1109,25 +1087,25 @@ func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool
v.Set(reflect.ValueOf(n))
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- n, err := strconv.ParseInt(s, 10, 64)
+ n, err := strconv.ParseInt(string(item), 10, 64)
if err != nil || v.OverflowInt(n) {
- d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())})
+ d.saveError(&UnmarshalTypeError{Value: "number " + string(item), Type: v.Type(), Offset: int64(d.readIndex())})
break
}
v.SetInt(n)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- n, err := strconv.ParseUint(s, 10, 64)
+ n, err := strconv.ParseUint(string(item), 10, 64)
if err != nil || v.OverflowUint(n) {
- d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())})
+ d.saveError(&UnmarshalTypeError{Value: "number " + string(item), Type: v.Type(), Offset: int64(d.readIndex())})
break
}
v.SetUint(n)
case reflect.Float32, reflect.Float64:
- n, err := strconv.ParseFloat(s, v.Type().Bits())
+ n, err := strconv.ParseFloat(string(item), v.Type().Bits())
if err != nil || v.OverflowFloat(n) {
- d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())})
+ d.saveError(&UnmarshalTypeError{Value: "number " + string(item), Type: v.Type(), Offset: int64(d.readIndex())})
break
}
v.SetFloat(n)
diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go
index 5b67251fbb..eb73bff58b 100644
--- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go
+++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go
@@ -12,12 +12,13 @@ package json
import (
"bytes"
+ "cmp"
"encoding"
"encoding/base64"
"fmt"
"math"
"reflect"
- "sort"
+ "slices"
"strconv"
"strings"
"sync"
@@ -28,29 +29,30 @@ import (
// Marshal returns the JSON encoding of v.
//
// Marshal traverses the value v recursively.
-// If an encountered value implements the Marshaler interface
-// and is not a nil pointer, Marshal calls its MarshalJSON method
-// to produce JSON. If no MarshalJSON method is present but the
-// value implements encoding.TextMarshaler instead, Marshal calls
-// its MarshalText method and encodes the result as a JSON string.
+// If an encountered value implements [Marshaler]
+// and is not a nil pointer, Marshal calls [Marshaler.MarshalJSON]
+// to produce JSON. If no [Marshaler.MarshalJSON] method is present but the
+// value implements [encoding.TextMarshaler] instead, Marshal calls
+// [encoding.TextMarshaler.MarshalText] and encodes the result as a JSON string.
// The nil pointer exception is not strictly necessary
// but mimics a similar, necessary exception in the behavior of
-// UnmarshalJSON.
+// [Unmarshaler.UnmarshalJSON].
//
// Otherwise, Marshal uses the following type-dependent default encodings:
//
// Boolean values encode as JSON booleans.
//
-// Floating point, integer, and Number values encode as JSON numbers.
+// Floating point, integer, and [Number] values encode as JSON numbers.
+// NaN and +/-Inf values will return an [UnsupportedValueError].
//
// String values encode as JSON strings coerced to valid UTF-8,
// replacing invalid bytes with the Unicode replacement rune.
// So that the JSON will be safe to embed inside HTML