diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000..ae9c81f --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,7 @@ +version: 2 + +updates: + - package-ecosystem: "gomod" + directory: "libs-go" + schedule: + interval: "daily" diff --git a/.github/workflows/build-libs-go.yml b/.github/workflows/build-libs-go.yml new file mode 100644 index 0000000..4c547d0 --- /dev/null +++ b/.github/workflows/build-libs-go.yml @@ -0,0 +1,24 @@ +name: build-libs-go + +on: + push: + branches: main + +jobs: + build: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + - name: setup go environment + uses: actions/setup-go@v1 + with: + go-version: '1.15.7' + - name: run unit tests + run: cd libs-go/ && make test + - name: upload coverage report + uses: actions/upload-artifact@master + with: + name: artifacts-libs-go-coverage-report-${{ github.sha }} + path: libs-go/.cover/ + if: always() diff --git a/.github/workflows/build-pr-libs-go.yml b/.github/workflows/build-pr-libs-go.yml new file mode 100644 index 0000000..36b3866 --- /dev/null +++ b/.github/workflows/build-pr-libs-go.yml @@ -0,0 +1,25 @@ +name: build-pr-libs-go + +on: + pull_request: + branches: + - main + +jobs: + build: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + - name: setup go environment + uses: actions/setup-go@v1 + with: + go-version: '1.15.7' + - name: run unit tests + run: cd libs-go/ && make test + - name: upload coverage report + uses: actions/upload-artifact@master + with: + name: artifacts-libs-go-coverage-report-${{ github.sha }} + path: libs-go/.cover/ + if: always() diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..66f8fb5 --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +.idea/ +.vscode/ diff --git a/libs-go/.gitignore b/libs-go/.gitignore new file mode 100644 index 0000000..cab1406 --- /dev/null +++ b/libs-go/.gitignore @@ -0,0 +1,26 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# VS Code +.vscode +debug + +# Jetbrains +.idea + +# Custom +.cover/ +.test/ +hello.txt +*.tar.gz +vendor/ diff --git a/libs-go/Makefile b/libs-go/Makefile new file mode 100644 index 0000000..294ebfd --- /dev/null +++ b/libs-go/Makefile @@ -0,0 +1,25 @@ +.PHONY: test +test: vendor check-encoding + ./scripts/test.sh + +.PHONY: covhtml +covhtml: + open .cover/coverage.html + +.PHONY: clean +clean: + git status --ignored --short | grep '^!! ' | sed 's/!! //' | xargs rm -rf + +.PHONY: check-encoding +check-encoding: + ! find pkg examples -name "*.go" -type f -exec file "{}" ";" | grep CRLF + ! find scripts -name "*.sh" -type f -exec file "{}" ";" | grep CRLF + +.PHONY: fix-encoding +fix-encoding: + find pkg examples -type f -name "*.go" -exec sed -i -e "s/\r//g" {} + + find scripts -type f -name "*.sh" -exec sed -i -e "s/\r//g" {} + + +.PHONY: vendor +vendor: + GO111MODULE=on go mod vendor diff --git a/libs-go/README.md b/libs-go/README.md new file mode 100644 index 0000000..57dde4d --- /dev/null +++ b/libs-go/README.md @@ -0,0 +1,57 @@ +# OCI Artifacts Go Libraries + +[![GitHub Actions status](https://github.com/bloodorangeio/artifacts/workflows/build-libs-go/badge.svg)](https://github.com/bloodorangeio/artifacts/actions?query=workflow%3Abuild-libs-go) + +## Example Usage + +[Source](examples/simple_push_pull.go) + +```go +package main + +import ( + "context" + "fmt" + + "github.com/opencontainers/artifacts/libs-go/pkg/artifacts" + "github.com/opencontainers/artifacts/libs-go/pkg/content" + + "github.com/containerd/containerd/remotes/docker" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +func check(e error) { + if e != nil { + panic(e) + } +} + +func main() { + ref := "localhost:5000/artifacts:test" + fileName := "hello.txt" + fileContent := []byte("Hello World!\n") + customMediaType := "my.custom.media.type" + + ctx := context.Background() + resolver := docker.NewResolver(docker.ResolverOptions{}) + + // Push file(s) w custom mediatype to registry + memoryStore := content.NewMemoryStore() + desc := memoryStore.Add(fileName, customMediaType, fileContent) + pushContents := []ocispec.Descriptor{desc} + fmt.Printf("Pushing %s to %s...\n", fileName, ref) + desc, err := artifacts.Push(ctx, resolver, ref, memoryStore, pushContents) + check(err) + fmt.Printf("Pushed to %s with digest %s\n", ref, desc.Digest) + + // Pull file(s) from registry and save to disk + fmt.Printf("Pulling from %s and saving to %s...\n", ref, fileName) + fileStore := content.NewFileStore("") + defer fileStore.Close() + allowedMediaTypes := []string{customMediaType} + desc, _, err = artifacts.Pull(ctx, resolver, ref, fileStore, artifacts.WithAllowedMediaTypes(allowedMediaTypes)) + check(err) + fmt.Printf("Pulled from %s with digest %s\n", ref, desc.Digest) + fmt.Printf("Try running 'cat %s'\n", fileName) +} +``` diff --git a/libs-go/examples/simple_push_pull.go b/libs-go/examples/simple_push_pull.go new file mode 100644 index 0000000..e60aafc --- /dev/null +++ b/libs-go/examples/simple_push_pull.go @@ -0,0 +1,47 @@ +package main + +import ( + "context" + "fmt" + + "github.com/opencontainers/artifacts/libs-go/pkg/artifacts" + "github.com/opencontainers/artifacts/libs-go/pkg/content" + + "github.com/containerd/containerd/remotes/docker" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +func check(e error) { + if e != nil { + panic(e) + } +} + +func main() { + ref := "localhost:5000/artifacts:test" + fileName := "hello.txt" + fileContent := []byte("Hello World!\n") + customMediaType := "my.custom.media.type" + + ctx := context.Background() + resolver := docker.NewResolver(docker.ResolverOptions{}) + + // Push file(s) w custom mediatype to registry + memoryStore := content.NewMemoryStore() + desc := memoryStore.Add(fileName, customMediaType, fileContent) + pushContents := []ocispec.Descriptor{desc} + fmt.Printf("Pushing %s to %s...\n", fileName, ref) + desc, err := artifacts.Push(ctx, resolver, ref, memoryStore, pushContents) + check(err) + fmt.Printf("Pushed to %s with digest %s\n", ref, desc.Digest) + + // Pull file(s) from registry and save to disk + fmt.Printf("Pulling from %s and saving to %s...\n", ref, fileName) + fileStore := content.NewFileStore("") + defer fileStore.Close() + allowedMediaTypes := []string{customMediaType} + desc, _, err = artifacts.Pull(ctx, resolver, ref, fileStore, artifacts.WithAllowedMediaTypes(allowedMediaTypes)) + check(err) + fmt.Printf("Pulled from %s with digest %s\n", ref, desc.Digest) + fmt.Printf("Try running 'cat %s'\n", fileName) +} \ No newline at end of file diff --git a/libs-go/go.mod b/libs-go/go.mod new file mode 100644 index 0000000..42bd8f1 --- /dev/null +++ b/libs-go/go.mod @@ -0,0 +1,38 @@ +module github.com/opencontainers/artifacts/libs-go + +go 1.15 + +replace ( + // WARNING! Do NOT replace these without also replacing their lines in the `require` stanza below. + // These `replace` stanzas are IGNORED when this is imported as a library + github.com/docker/distribution => github.com/docker/distribution v0.0.0-20191216044856-a8371794149d + github.com/docker/docker => github.com/moby/moby v17.12.0-ce-rc1.0.20200618181300-9dc6525e6118+incompatible +) + +require ( + github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect + github.com/Microsoft/go-winio v0.4.16 // indirect + github.com/Microsoft/hcsshim v0.8.14 // indirect + github.com/containerd/containerd v1.4.3 + github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e // indirect + github.com/docker/cli v20.10.3+incompatible + github.com/docker/distribution v0.0.0-20191216044856-a8371794149d + github.com/docker/docker v17.12.0-ce-rc1.0.20200618181300-9dc6525e6118+incompatible + github.com/docker/docker-credential-helpers v0.6.3 // indirect + github.com/docker/go-connections v0.4.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/google/go-cmp v0.5.4 // indirect + github.com/morikuni/aec v1.0.0 // indirect + github.com/opencontainers/go-digest v1.0.0 + github.com/opencontainers/image-spec v1.0.0 + github.com/opencontainers/runc v0.1.1 // indirect + github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2 + github.com/pkg/errors v0.9.1 + github.com/sirupsen/logrus v1.7.0 + github.com/stretchr/testify v1.5.1 + golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 + golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 + golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c // indirect + google.golang.org/grpc v1.35.0 // indirect + gotest.tools/v3 v3.0.3 // indirect +) diff --git a/libs-go/go.sum b/libs-go/go.sum new file mode 100644 index 0000000..bfc4ea5 --- /dev/null +++ b/libs-go/go.sum @@ -0,0 +1,319 @@ +bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.16 h1:FtSW/jqD+l4ba5iPBj9CODVtgfYAD8w2wS923g/cFDk= +github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/hcsshim v0.8.14 h1:lbPVK25c1cu5xTLITwpUcxoA9vKrKErASPYygvouJns= +github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= +github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a h1:BtpsbiV638WQZwhA98cEZw2BsbnQJrbd0BI7tsy0W1c= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/bitly/go-simplejson v0.5.0 h1:6IH+V8/tVMab511d5bn4M7EwGXZf9Hj6i2xSwkNEM+Y= +github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/bshuster-repo/logrus-logstash-hook v0.4.1 h1:pgAtgj+A31JBVtEHu2uHuEx0n+2ukqUJnS2vVe5pQNA= +github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd h1:rFt+Y/IK1aEZkEHchZRSq9OQbsSzIT/OrI8YFFmRIng= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b h1:otBG+dV+YK+Soembjv71DPz3uX/V/6MMlSyD9JBQ6kQ= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 h1:nvj0OLI3YqYXer/kZD8Ri1aaunCxIEsOst1BVJswV0o= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59 h1:qWj4qVYZ95vLWwqyNJCQg7rDsG5wPdze0UaPolH7DUk= +github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= +github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.3 h1:ijQT13JedHSHrQGWFcGEwzcNKrAGIiZ+jSD5QQG07SY= +github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e h1:6JKvHHt396/qabvMhnhUZvWaHZzfVfldxE60TK8YLhg= +github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= +github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= +github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= +github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/docker/cli v20.10.3+incompatible h1:WVEgoV/GpsTK5hruhHdYi79blQ+nmcm+7Ru/ZuiF+7E= +github.com/docker/cli v20.10.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v0.0.0-20191216044856-a8371794149d h1:jC8tT/S0OGx2cswpeUTn4gOIea8P08lD3VFQT0cOZ50= +github.com/docker/distribution v0.0.0-20191216044856-a8371794149d/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= +github.com/docker/docker-credential-helpers v0.6.3 h1:zI2p9+1NQYdnG6sMU26EX4aVGlqbInSQxQXLvzJ4RPQ= +github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916 h1:yWHOI+vFjEsAakUTSrtqc/SAHrhSkmn48pqjidZX3QA= +github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1 h1:ZClxb8laGDf5arXfYcAtECDFgAgHklGI8CxgjHnXKJ4= +github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7 h1:LofdAjjjqCSXMwLGgOgnE+rdPuvX9DxCqaHwKy7i/ko= +github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= +github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33 h1:893HsJqtxp9z1SF76gg6hY70hRY1wVlTSnC/h1yUDCo= +github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/mux v1.7.2 h1:zoNxOV7WjqXptQOVngLmcSQgXmgk4NMz1HibBchjl/I= +github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f h1:2+myh5ml7lgEU/51gbeLHfKGNfgEQQIWrlbdaOsidbQ= +github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= +github.com/moby/moby v17.12.0-ce-rc1.0.20200618181300-9dc6525e6118+incompatible h1:NT0cwArZg/wGdvY8pzej4tPr+9WGmDdkF8Suj+mkz2g= +github.com/moby/moby v17.12.0-ce-rc1.0.20200618181300-9dc6525e6118+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420 h1:Yu3681ykYHDfLoI6XVjL4JWmkE+3TX9yfIWwRCh1kFM= +github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.0 h1:jcw3cCH887bLKETGYpv8afogdYchbShR0eH6oD9d5PQ= +github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v0.1.1 h1:GlxAyO6x8rfZYN9Tt0Kti5a/cP41iuiO2yYT0IJGY8Y= +github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2 h1:JhzVVoYvbOACxoUmOs6V/G4D5nPVUW73rKvXxP4XUJc= +github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06 h1:BqJUZe1wY8984P2XGsGIGieuao8wucwOwaTS10L9Lj8= +github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5 h1:cLL6NowurKLMfCeQy4tIeph12XNQWgANCNvdyrOYKV4= +github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20180110214958-89604d197083 h1:BVsJT8+ZbyuL3hypz/HmEiM8h2P6hBQGig4el9/MdjA= +github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7 h1:hhvfGDVThBnd4kYisSFmYuHYeUhglxcwag7FhVPH9zM= +github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43 h1:+lm10QQTNSBd8DVTNGHx7o/IKu9HYDvLMffDhbyLccI= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50 h1:hlE8//ciYMztlGpl/VA+Zm1AcTPHYkHJPbHqE6WJUXE= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f h1:ERexzlUfuTvpE74urLSbIQW0Z/6hF9t8U4NsJLaioAY= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= +go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed h1:uPxWBzB3+mlnjy9W58qY1j/cjyFjutgw/Vhan2zLy/A= +golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3 h1:kzM6+9dur93BcC2kVlYl34cHU+TYZLanmpSJHVMmL64= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a h1:zo0EaRwJM6T5UQ+QEt2dDSgEmbFJ4pZr/Rzsjpu7zgI= +google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.35.0 h1:TwIQcH3es+MojMVojxxfQ3l3OF2KzlRxML2xZq0kRo8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789 h1:NMiUjDZiD6qDVeBOzpImftxXzQHCp2Y2QLdmaqU9MRk= +gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/libs-go/pkg/artifacts/artifacts_test.go b/libs-go/pkg/artifacts/artifacts_test.go new file mode 100644 index 0000000..da75a9f --- /dev/null +++ b/libs-go/pkg/artifacts/artifacts_test.go @@ -0,0 +1,447 @@ +package artifacts + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "context" + _ "crypto/sha256" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "testing" + "time" + + artifactscontent "github.com/opencontainers/artifacts/libs-go/pkg/content" + + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/remotes" + "github.com/containerd/containerd/remotes/docker" + "github.com/docker/distribution/configuration" + "github.com/docker/distribution/registry" + _ "github.com/docker/distribution/registry/storage/driver/inmemory" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/phayes/freeport" + "github.com/stretchr/testify/suite" +) + +var ( + testTarball = "../../testdata/charts/chartmuseum-1.8.2.tgz" + testDir = "../../testdata/charts/chartmuseum" + testDirFiles = []string{ + "Chart.yaml", + "values.yaml", + "README.md", + "templates/_helpers.tpl", + "templates/NOTES.txt", + "templates/service.yaml", + ".helmignore", + } +) + +type ArtifactsTestSuite struct { + suite.Suite + DockerRegistryHost string +} + +func newContext() context.Context { + return context.Background() +} + +func newResolver() remotes.Resolver { + return docker.NewResolver(docker.ResolverOptions{}) +} + +// Start Docker registry +func (suite *ArtifactsTestSuite) SetupSuite() { + config := &configuration.Configuration{} + port, err := freeport.GetFreePort() + if err != nil { + suite.Nil(err, "no error finding free port for test registry") + } + suite.DockerRegistryHost = fmt.Sprintf("localhost:%d", port) + config.HTTP.Addr = fmt.Sprintf(":%d", port) + config.HTTP.DrainTimeout = time.Duration(10) * time.Second + config.Storage = map[string]configuration.Parameters{"inmemory": map[string]interface{}{}} + dockerRegistry, err := registry.NewRegistry(context.Background(), config) + suite.Nil(err, "no error finding free port for test registry") + + go dockerRegistry.ListenAndServe() +} + +// Push files to docker registry +func (suite *ArtifactsTestSuite) Test_0_Push() { + var ( + err error + ref string + desc ocispec.Descriptor + descriptors []ocispec.Descriptor + store *artifactscontent.FileStore + ) + + _, err = Push(newContext(), nil, ref, nil, descriptors) + suite.NotNil(err, "error pushing with empty resolver") + + _, err = Push(newContext(), newResolver(), ref, nil, descriptors) + suite.NotNil(err, "error pushing when context missing hostname") + + ref = fmt.Sprintf("%s/empty:test", suite.DockerRegistryHost) + _, err = Push(newContext(), newResolver(), ref, nil, descriptors) + suite.Nil(err, "no error pushing with empty descriptors") + + // Load descriptors with test chart tgz (as single layer) + store = artifactscontent.NewFileStore("") + basename := filepath.Base(testTarball) + desc, err = store.Add(basename, "", testTarball) + suite.Nil(err, "no error loading test chart") + descriptors = []ocispec.Descriptor{desc} + + ref = fmt.Sprintf("%s/chart-tgz:test", suite.DockerRegistryHost) + _, err = Push(newContext(), newResolver(), ref, store, descriptors) + suite.Nil(err, "no error pushing test chart tgz (as single layer)") + + // Load descriptors with test chart dir (each file as layer) + testDirAbs, err := filepath.Abs(testDir) + suite.Nil(err, "no error parsing test directory") + store = artifactscontent.NewFileStore(testDirAbs) + descriptors = []ocispec.Descriptor{} + var ff = func(pathX string, infoX os.FileInfo, errX error) error { + if !infoX.IsDir() { + filename := filepath.Join(filepath.Dir(pathX), infoX.Name()) + name := filepath.ToSlash(filename) + desc, err = store.Add(name, "", filename) + if err != nil { + return err + } + descriptors = append(descriptors, desc) + } + return nil + } + + cwd, _ := os.Getwd() + os.Chdir(testDir) + filepath.Walk(".", ff) + os.Chdir(cwd) + + ref = fmt.Sprintf("%s/chart-dir:test", suite.DockerRegistryHost) + _, err = Push(newContext(), newResolver(), ref, store, descriptors) + suite.Nil(err, "no error pushing test chart dir (each file as layer)") +} + +// Pull files and verify descriptors +func (suite *ArtifactsTestSuite) Test_1_Pull() { + var ( + err error + ref string + descriptors []ocispec.Descriptor + store *artifactscontent.Memorystore + ) + + _, descriptors, err = Pull(newContext(), nil, ref, nil) + suite.NotNil(err, "error pulling with empty resolver") + suite.Nil(descriptors, "descriptors nil pulling with empty resolver") + + // Pull non-existant + store = artifactscontent.NewMemoryStore() + ref = fmt.Sprintf("%s/nonexistant:test", suite.DockerRegistryHost) + _, descriptors, err = Pull(newContext(), newResolver(), ref, store) + suite.NotNil(err, "error pulling non-existant ref") + suite.Nil(descriptors, "descriptors empty with error") + + // Pull chart-tgz + store = artifactscontent.NewMemoryStore() + ref = fmt.Sprintf("%s/chart-tgz:test", suite.DockerRegistryHost) + _, descriptors, err = Pull(newContext(), newResolver(), ref, store) + suite.Nil(err, "no error pulling chart-tgz ref") + + // Verify the descriptors, single layer/file + content, err := ioutil.ReadFile(testTarball) + suite.Nil(err, "no error loading test chart") + name := filepath.Base(testTarball) + _, actualContent, ok := store.GetByName(name) + suite.True(ok, "find in memory") + suite.Equal(content, actualContent, ".tgz content matches on pull") + + // Pull chart-dir + store = artifactscontent.NewMemoryStore() + ref = fmt.Sprintf("%s/chart-dir:test", suite.DockerRegistryHost) + _, descriptors, err = Pull(newContext(), newResolver(), ref, store) + suite.Nil(err, "no error pulling chart-dir ref") + + // Verify the descriptors, multiple layers/files + cwd, _ := os.Getwd() + os.Chdir(testDir) + for _, filename := range testDirFiles { + content, err = ioutil.ReadFile(filename) + suite.Nil(err, fmt.Sprintf("no error loading %s", filename)) + _, actualContent, ok := store.GetByName(filename) + suite.True(ok, "find in memory") + suite.Equal(content, actualContent, fmt.Sprintf("%s content matches on pull", filename)) + } + os.Chdir(cwd) +} + +// Push and pull with customized media types +func (suite *ArtifactsTestSuite) Test_2_MediaType() { + var ( + testData = [][]string{ + {"hi.txt", "application/vnd.me.hi", "hi"}, + {"bye.txt", "application/vnd.me.bye", "bye"}, + } + err error + ref string + descriptors []ocispec.Descriptor + store *artifactscontent.Memorystore + ) + + // Push content with customized media types + store = artifactscontent.NewMemoryStore() + descriptors = nil + for _, data := range testData { + desc := store.Add(data[0], data[1], []byte(data[2])) + descriptors = append(descriptors, desc) + } + ref = fmt.Sprintf("%s/media-type:test", suite.DockerRegistryHost) + _, err = Push(newContext(), newResolver(), ref, store, descriptors) + suite.Nil(err, "no error pushing test data with customized media type") + + // Pull with all media types + store = artifactscontent.NewMemoryStore() + ref = fmt.Sprintf("%s/media-type:test", suite.DockerRegistryHost) + _, descriptors, err = Pull(newContext(), newResolver(), ref, store) + suite.Nil(err, "no error pulling media-type ref") + suite.Equal(2, len(descriptors), "number of contents matches on pull") + for _, data := range testData { + _, actualContent, ok := store.GetByName(data[0]) + suite.True(ok, "find in memory") + content := []byte(data[2]) + suite.Equal(content, actualContent, "test content matches on pull") + } + + // Pull with specified media type + store = artifactscontent.NewMemoryStore() + ref = fmt.Sprintf("%s/media-type:test", suite.DockerRegistryHost) + _, descriptors, err = Pull(newContext(), newResolver(), ref, store, WithAllowedMediaType(testData[0][1])) + suite.Nil(err, "no error pulling media-type ref") + suite.Equal(1, len(descriptors), "number of contents matches on pull") + for _, data := range testData[:1] { + _, actualContent, ok := store.GetByName(data[0]) + suite.True(ok, "find in memory") + content := []byte(data[2]) + suite.Equal(content, actualContent, "test content matches on pull") + } + + // Pull with non-existing media type + store = artifactscontent.NewMemoryStore() + ref = fmt.Sprintf("%s/media-type:test", suite.DockerRegistryHost) + _, descriptors, err = Pull(newContext(), newResolver(), ref, store, WithAllowedMediaType("non.existing.media.type")) + suite.Nil(err, "no error pulling media-type ref") + suite.Equal(0, len(descriptors), "number of contents matches on pull") +} + +// Pull with condition +func (suite *ArtifactsTestSuite) Test_3_Conditional_Pull() { + var ( + testData = [][]string{ + {"version.txt", "edge"}, + {"content.txt", "hello world"}, + } + err error + ref string + descriptors []ocispec.Descriptor + store *artifactscontent.Memorystore + stop bool + ) + + // Push test content + store = artifactscontent.NewMemoryStore() + descriptors = nil + for _, data := range testData { + desc := store.Add(data[0], "", []byte(data[1])) + descriptors = append(descriptors, desc) + } + ref = fmt.Sprintf("%s/conditional-pull:test", suite.DockerRegistryHost) + _, err = Push(newContext(), newResolver(), ref, store, descriptors) + suite.Nil(err, "no error pushing test data") + + // Pull all contents in sequence + store = artifactscontent.NewMemoryStore() + ref = fmt.Sprintf("%s/conditional-pull:test", suite.DockerRegistryHost) + _, descriptors, err = Pull(newContext(), newResolver(), ref, store, WithPullByBFS) + suite.Nil(err, "no error pulling ref") + suite.Equal(2, len(descriptors), "number of contents matches on pull") + for i, data := range testData { + _, actualContent, ok := store.GetByName(data[0]) + suite.True(ok, "find in memory") + content := []byte(data[1]) + suite.Equal(content, actualContent, "test content matches on pull") + name, _ := artifactscontent.ResolveName(descriptors[i]) + suite.Equal(data[0], name, "content sequence matches on pull") + } + + // Selective pull contents: stop at the very beginning + store = artifactscontent.NewMemoryStore() + ref = fmt.Sprintf("%s/conditional-pull:test", suite.DockerRegistryHost) + _, descriptors, err = Pull(newContext(), newResolver(), ref, store, WithPullByBFS, + WithPullBaseHandler(images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + if name, ok := artifactscontent.ResolveName(desc); ok && name == testData[0][0] { + return nil, ErrStopProcessing + } + return nil, nil + }))) + suite.Nil(err, "no error pulling ref") + suite.Equal(0, len(descriptors), "number of contents matches on pull") + + // Selective pull contents: stop in the middle + store = artifactscontent.NewMemoryStore() + ref = fmt.Sprintf("%s/conditional-pull:test", suite.DockerRegistryHost) + stop = false + _, descriptors, err = Pull(newContext(), newResolver(), ref, store, WithPullByBFS, + WithPullBaseHandler(images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + if stop { + return nil, ErrStopProcessing + } + if name, ok := artifactscontent.ResolveName(desc); ok && name == testData[0][0] { + stop = true + } + return nil, nil + }))) + suite.Nil(err, "no error pulling ref") + suite.Equal(1, len(descriptors), "number of contents matches on pull") + for _, data := range testData[:1] { + _, actualContent, ok := store.GetByName(data[0]) + suite.True(ok, "find in memory") + content := []byte(data[1]) + suite.Equal(content, actualContent, "test content matches on pull") + } +} + +// Test for vulnerability GHSA-g5v4-5x39-vwhx +func (suite *ArtifactsTestSuite) Test_4_GHSA_g5v4_5x39_vwhx() { + var testVulnerability = func(headers []tar.Header, tag string, expectedError string) { + // Step 1: build malicious tar+gzip + buf := bytes.NewBuffer(nil) + digester := digest.Canonical.Digester() + zw := gzip.NewWriter(io.MultiWriter(buf, digester.Hash())) + tarDigester := digest.Canonical.Digester() + tw := tar.NewWriter(io.MultiWriter(zw, tarDigester.Hash())) + for _, header := range headers { + err := tw.WriteHeader(&header) + suite.Nil(err, "error writing header") + } + err := tw.Close() + suite.Nil(err, "error closing tar") + err = zw.Close() + suite.Nil(err, "error closing gzip") + + // Step 2: construct malicious descriptor + evilDesc := ocispec.Descriptor{ + MediaType: ocispec.MediaTypeImageLayerGzip, + Digest: digester.Digest(), + Size: int64(buf.Len()), + Annotations: map[string]string{ + artifactscontent.AnnotationDigest: tarDigester.Digest().String(), + artifactscontent.AnnotationUnpack: "true", + ocispec.AnnotationTitle: "foo", + }, + } + + // Step 3: upload malicious artifact to registry + memoryStore := artifactscontent.NewMemoryStore() + memoryStore.Set(evilDesc, buf.Bytes()) + ref := fmt.Sprintf("%s/evil:%s", suite.DockerRegistryHost, tag) + _, err = Push(newContext(), newResolver(), ref, memoryStore, []ocispec.Descriptor{evilDesc}) + suite.Nil(err, "no error pushing test data") + + // Step 4: pull malicious tar with filestore and ensure error + tempDir, err := ioutil.TempDir("", "artifacts_test") + if err != nil { + suite.FailNow("error creating temp directory", err) + } + defer os.RemoveAll(tempDir) + store := artifactscontent.NewFileStore(tempDir) + defer store.Close() + ref = fmt.Sprintf("%s/evil:%s", suite.DockerRegistryHost, tag) + _, _, err = Pull(newContext(), newResolver(), ref, store) + suite.NotNil(err, "error expected pulling malicious tar") + suite.Contains(err.Error(), + expectedError, + "did not get correct error message", + ) + } + + tests := []struct { + name string + headers []tar.Header + tag string + expectedError string + }{ + { + name: "Test symbolic link path traversal", + headers: []tar.Header{ + { + Typeflag: tar.TypeDir, + Name: "foo/subdir/", + Mode: 0755, + }, + { // Symbolic link to `foo` + Typeflag: tar.TypeSymlink, + Name: "foo/subdir/parent", + Linkname: "..", + Mode: 0755, + }, + { // Symbolic link to `../etc/passwd` + Typeflag: tar.TypeSymlink, + Name: "foo/subdir/parent/passwd", + Linkname: "../../etc/passwd", + Mode: 0644, + }, + { // Symbolic link to `../etc` + Typeflag: tar.TypeSymlink, + Name: "foo/subdir/parent/etc", + Linkname: "../../etc", + Mode: 0644, + }, + }, + tag: "symlink_path", + expectedError: "no symbolic link allowed", + }, + { + name: "Test symbolic link pointing to outside", + headers: []tar.Header{ + { // Symbolic link to `/etc/passwd` + Typeflag: tar.TypeSymlink, + Name: "foo/passwd", + Linkname: "../../../etc/passwd", + Mode: 0644, + }, + }, + tag: "symlink", + expectedError: "is outside of", + }, + { + name: "Test hard link pointing to outside", + headers: []tar.Header{ + { // Hard link to `/etc/passwd` + Typeflag: tar.TypeLink, + Name: "foo/passwd", + Linkname: "../../../etc/passwd", + Mode: 0644, + }, + }, + tag: "hardlink", + expectedError: "is outside of", + }, + } + for _, test := range tests { + suite.T().Log(test.name) + testVulnerability(test.headers, test.tag, test.expectedError) + } +} + +func TestArtifactsTestSuite(t *testing.T) { + suite.Run(t, new(ArtifactsTestSuite)) +} diff --git a/libs-go/pkg/artifacts/errors.go b/libs-go/pkg/artifacts/errors.go new file mode 100644 index 0000000..5c8335f --- /dev/null +++ b/libs-go/pkg/artifacts/errors.go @@ -0,0 +1,23 @@ +package artifacts + +import ( + "errors" + "fmt" +) + +// Common errors +var ( + ErrResolverUndefined = errors.New("resolver undefined") +) + +// Path validation related errors +var ( + ErrDirtyPath = errors.New("dirty path") + ErrPathNotSlashSeparated = errors.New("path not slash separated") + ErrAbsolutePathDisallowed = errors.New("absolute path disallowed") + ErrPathTraversalDisallowed = errors.New("path traversal disallowed") +) + +// ErrStopProcessing is used to stop processing an artifact operation. +// This error only makes sense in sequential pulling operation. +var ErrStopProcessing = fmt.Errorf("stop processing") diff --git a/libs-go/pkg/artifacts/pull.go b/libs-go/pkg/artifacts/pull.go new file mode 100644 index 0000000..c7d2d78 --- /dev/null +++ b/libs-go/pkg/artifacts/pull.go @@ -0,0 +1,135 @@ +package artifacts + +import ( + "context" + "sync" + + artifactscontent "github.com/opencontainers/artifacts/libs-go/pkg/content" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/remotes" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "golang.org/x/sync/semaphore" +) + +// Pull pull files from the remote +func Pull(ctx context.Context, resolver remotes.Resolver, ref string, ingester content.Ingester, opts ...PullOpt) (ocispec.Descriptor, []ocispec.Descriptor, error) { + if resolver == nil { + return ocispec.Descriptor{}, nil, ErrResolverUndefined + } + opt := pullOptsDefaults() + for _, o := range opts { + if err := o(opt); err != nil { + return ocispec.Descriptor{}, nil, err + } + } + + _, desc, err := resolver.Resolve(ctx, ref) + if err != nil { + return ocispec.Descriptor{}, nil, err + } + + fetcher, err := resolver.Fetcher(ctx, ref) + if err != nil { + return ocispec.Descriptor{}, nil, err + } + + layers, err := fetchContent(ctx, fetcher, desc, ingester, opt) + if err != nil { + return ocispec.Descriptor{}, nil, err + } + return desc, layers, nil +} + +func fetchContent(ctx context.Context, fetcher remotes.Fetcher, desc ocispec.Descriptor, ingester content.Ingester, opts *pullOpts) ([]ocispec.Descriptor, error) { + var descriptors []ocispec.Descriptor + lock := &sync.Mutex{} + picker := images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + if isAllowedMediaType(desc.MediaType, opts.allowedMediaTypes...) { + if opts.filterName(desc) { + lock.Lock() + defer lock.Unlock() + descriptors = append(descriptors, desc) + } + return nil, nil + } + return nil, nil + }) + + store := opts.contentProvideIngester + if store == nil { + store = newHybridStoreFromIngester(ingester) + } + handlers := []images.Handler{ + filterHandler(opts, opts.allowedMediaTypes...), + } + handlers = append(handlers, opts.baseHandlers...) + handlers = append(handlers, + remotes.FetchHandler(store, fetcher), + picker, + images.ChildrenHandler(store), + ) + handlers = append(handlers, opts.callbackHandlers...) + + if err := opts.dispatch(ctx, images.Handlers(handlers...), nil, desc); err != nil { + return nil, err + } + + return descriptors, nil +} + +func filterHandler(opts *pullOpts, allowedMediaTypes ...string) images.HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + switch { + case isAllowedMediaType(desc.MediaType, ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex): + return nil, nil + case isAllowedMediaType(desc.MediaType, allowedMediaTypes...): + if opts.filterName(desc) { + return nil, nil + } + log.G(ctx).Warnf("blob no name: %v", desc.Digest) + default: + log.G(ctx).Warnf("unknown type: %v", desc.MediaType) + } + return nil, images.ErrStopHandler + } +} + +func isAllowedMediaType(mediaType string, allowedMediaTypes ...string) bool { + if len(allowedMediaTypes) == 0 { + return true + } + for _, allowedMediaType := range allowedMediaTypes { + if mediaType == allowedMediaType { + return true + } + } + return false +} + +// dispatchBFS behaves the same as images.Dispatch() but in sequence with breath-first search. +func dispatchBFS(ctx context.Context, handler images.Handler, weighted *semaphore.Weighted, descs ...ocispec.Descriptor) error { + for i := 0; i < len(descs); i++ { + desc := descs[i] + children, err := handler.Handle(ctx, desc) + if err != nil { + switch err := errors.Cause(err); err { + case images.ErrSkipDesc: + continue // don't traverse the children. + case ErrStopProcessing: + return nil + } + return err + } + descs = append(descs, children...) + } + return nil +} + +func filterName(desc ocispec.Descriptor) bool { + name, ok := artifactscontent.ResolveName(desc) + return ok && len(name) > 0 +} diff --git a/libs-go/pkg/artifacts/pull_opts.go b/libs-go/pkg/artifacts/pull_opts.go new file mode 100644 index 0000000..57352be --- /dev/null +++ b/libs-go/pkg/artifacts/pull_opts.go @@ -0,0 +1,116 @@ +package artifacts + +import ( + "context" + "fmt" + "io" + "sync" + + artifactscontent "github.com/opencontainers/artifacts/libs-go/pkg/content" + + "github.com/containerd/containerd/images" + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/sync/semaphore" +) + +type pullOpts struct { + allowedMediaTypes []string + dispatch func(context.Context, images.Handler, *semaphore.Weighted, ...ocispec.Descriptor) error + baseHandlers []images.Handler + callbackHandlers []images.Handler + contentProvideIngester artifactscontent.ProvideIngester + filterName func(ocispec.Descriptor) bool +} + +// PullOpt allows callers to set options on the artifact pull +type PullOpt func(o *pullOpts) error + +func pullOptsDefaults() *pullOpts { + return &pullOpts{ + dispatch: images.Dispatch, + filterName: filterName, + } +} + +// WithAllowedMediaType sets the allowed media types +func WithAllowedMediaType(allowedMediaTypes ...string) PullOpt { + return func(o *pullOpts) error { + o.allowedMediaTypes = append(o.allowedMediaTypes, allowedMediaTypes...) + return nil + } +} + +// WithAllowedMediaTypes sets the allowed media types +func WithAllowedMediaTypes(allowedMediaTypes []string) PullOpt { + return func(o *pullOpts) error { + o.allowedMediaTypes = append(o.allowedMediaTypes, allowedMediaTypes...) + return nil + } +} + +// WithPullByBFS opt to pull in sequence with breath-first search +func WithPullByBFS(o *pullOpts) error { + o.dispatch = dispatchBFS + return nil +} + +// WithPullBaseHandler provides base handlers, which will be called before +// any pull specific handlers. +func WithPullBaseHandler(handlers ...images.Handler) PullOpt { + return func(o *pullOpts) error { + o.baseHandlers = append(o.baseHandlers, handlers...) + return nil + } +} + +// WithPullCallbackHandler provides callback handlers, which will be called after +// any pull specific handlers. +func WithPullCallbackHandler(handlers ...images.Handler) PullOpt { + return func(o *pullOpts) error { + o.callbackHandlers = append(o.callbackHandlers, handlers...) + return nil + } +} + +// WithContentProvideIngester opt to the provided Provider and Ingester +// for file system I/O, including caches. +func WithContentProvideIngester(store artifactscontent.ProvideIngester) PullOpt { + return func(o *pullOpts) error { + o.contentProvideIngester = store + return nil + } +} + +// WithPullEmptyNameAllowed allows pulling blobs with empty name. +func WithPullEmptyNameAllowed() PullOpt { + return func(o *pullOpts) error { + o.filterName = func(ocispec.Descriptor) bool { + return true + } + return nil + } +} + +// WithPullStatusTrack report results to stdout +func WithPullStatusTrack(writer io.Writer) PullOpt { + return WithPullCallbackHandler(pullStatusTrack(writer)) +} + +func pullStatusTrack(writer io.Writer) images.Handler { + var printLock sync.Mutex + return images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + if name, ok := artifactscontent.ResolveName(desc); ok { + digestString := desc.Digest.String() + if err := desc.Digest.Validate(); err == nil { + if algo := desc.Digest.Algorithm(); algo == digest.SHA256 { + digestString = desc.Digest.Encoded()[:12] + } + } + printLock.Lock() + defer printLock.Unlock() + fmt.Fprintln(writer, "Downloaded", digestString, name) + } + return nil, nil + }) +} diff --git a/libs-go/pkg/artifacts/push.go b/libs-go/pkg/artifacts/push.go new file mode 100644 index 0000000..5557a90 --- /dev/null +++ b/libs-go/pkg/artifacts/push.go @@ -0,0 +1,111 @@ +package artifacts + +import ( + "context" + "encoding/json" + + "github.com/opencontainers/artifacts/libs-go/pkg/constants" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/remotes" + digest "github.com/opencontainers/go-digest" + specs "github.com/opencontainers/image-spec/specs-go" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// Push pushes files to the remote +func Push(ctx context.Context, resolver remotes.Resolver, ref string, provider content.Provider, descriptors []ocispec.Descriptor, opts ...PushOpt) (ocispec.Descriptor, error) { + if resolver == nil { + return ocispec.Descriptor{}, ErrResolverUndefined + } + opt := pushOptsDefaults() + for _, o := range opts { + if err := o(opt); err != nil { + return ocispec.Descriptor{}, err + } + } + if opt.validateName != nil { + for _, desc := range descriptors { + if err := opt.validateName(desc); err != nil { + return ocispec.Descriptor{}, err + } + } + } + + pusher, err := resolver.Pusher(ctx, ref) + if err != nil { + return ocispec.Descriptor{}, err + } + + desc, store, err := pack(provider, descriptors, opt) + if err != nil { + return ocispec.Descriptor{}, err + } + + var wrapper func(images.Handler) images.Handler + if len(opt.baseHandlers) > 0 { + wrapper = func(h images.Handler) images.Handler { + return images.Handlers(append(opt.baseHandlers, h)...) + } + } + + if err := remotes.PushContent(ctx, pusher, desc, store, nil, wrapper); err != nil { + return ocispec.Descriptor{}, err + } + return desc, nil +} + +//func pack(store *hybridStore, descriptors []ocispec.Descriptor, opts *pushOpts) (ocispec.Descriptor, error) { +func pack(provider content.Provider, descriptors []ocispec.Descriptor, opts *pushOpts) (ocispec.Descriptor, content.Store, error) { + store := newHybridStoreFromProvider(provider) + + // Config + var config ocispec.Descriptor + if opts.config == nil { + configBytes := []byte("{}") + config = ocispec.Descriptor{ + MediaType: constants.UnknownConfigMediaType, + Digest: digest.FromBytes(configBytes), + Size: int64(len(configBytes)), + } + store.Set(config, configBytes) + } else { + config = *opts.config + } + if opts.configAnnotations != nil { + config.Annotations = opts.configAnnotations + } + if opts.configMediaType != "" { + config.MediaType = opts.configMediaType + } + + // Manifest + if opts.manifest != nil { + return *opts.manifest, store, nil + } + + if descriptors == nil { + descriptors = []ocispec.Descriptor{} // make it an empty array to prevent potential server-side bugs + } + manifest := ocispec.Manifest{ + Versioned: specs.Versioned{ + SchemaVersion: 2, // historical value. does not pertain to OCI or docker version + }, + Config: config, + Layers: descriptors, + Annotations: opts.manifestAnnotations, + } + manifestBytes, err := json.Marshal(manifest) + if err != nil { + return ocispec.Descriptor{}, nil, err + } + manifestDescriptor := ocispec.Descriptor{ + MediaType: ocispec.MediaTypeImageManifest, + Digest: digest.FromBytes(manifestBytes), + Size: int64(len(manifestBytes)), + } + store.Set(manifestDescriptor, manifestBytes) + + return manifestDescriptor, store, nil +} diff --git a/libs-go/pkg/artifacts/push_opts.go b/libs-go/pkg/artifacts/push_opts.go new file mode 100644 index 0000000..34b67ab --- /dev/null +++ b/libs-go/pkg/artifacts/push_opts.go @@ -0,0 +1,150 @@ +package artifacts + +import ( + "context" + "fmt" + "io" + "path/filepath" + "strings" + "sync" + + artifactscontent "github.com/opencontainers/artifacts/libs-go/pkg/content" + + "github.com/containerd/containerd/images" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type pushOpts struct { + config *ocispec.Descriptor + configMediaType string + configAnnotations map[string]string + manifest *ocispec.Descriptor + manifestAnnotations map[string]string + validateName func(desc ocispec.Descriptor) error + baseHandlers []images.Handler +} + +func pushOptsDefaults() *pushOpts { + return &pushOpts{ + validateName: ValidateNameAsPath, + } +} + +// PushOpt allows callers to set options on the artifact push +type PushOpt func(o *pushOpts) error + +// WithConfig overrides the config - setting this will ignore WithConfigMediaType and WithConfigAnnotations +func WithConfig(config ocispec.Descriptor) PushOpt { + return func(o *pushOpts) error { + o.config = &config + return nil + } +} + +// WithConfigMediaType overrides the config media type +func WithConfigMediaType(mediaType string) PushOpt { + return func(o *pushOpts) error { + o.configMediaType = mediaType + return nil + } +} + +// WithConfigAnnotations overrides the config annotations +func WithConfigAnnotations(annotations map[string]string) PushOpt { + return func(o *pushOpts) error { + o.configAnnotations = annotations + return nil + } +} + +// WithManifest overrides the manifest - setting this will ignore WithManifestConfigAnnotations +func WithManifest(manifest ocispec.Descriptor) PushOpt { + return func(o *pushOpts) error { + o.manifest = &manifest + return nil + } +} + +// WithManifestAnnotations overrides the manifest annotations +func WithManifestAnnotations(annotations map[string]string) PushOpt { + return func(o *pushOpts) error { + o.manifestAnnotations = annotations + return nil + } +} + +// WithNameValidation validates the image title in the descriptor. +// Pass nil to disable name validation. +func WithNameValidation(validate func(desc ocispec.Descriptor) error) PushOpt { + return func(o *pushOpts) error { + o.validateName = validate + return nil + } +} + +// ValidateNameAsPath validates name in the descriptor as file path in order +// to generate good packages intended to be pulled using the FileStore or +// a CLI. +// For cross-platform considerations, only unix paths are accepted. +func ValidateNameAsPath(desc ocispec.Descriptor) error { + // no empty name + path, ok := artifactscontent.ResolveName(desc) + if !ok || path == "" { + return artifactscontent.ErrNoName + } + + // path should be clean + if target := filepath.ToSlash(filepath.Clean(path)); target != path { + return errors.Wrap(ErrDirtyPath, path) + } + + // path should be slash-separated + if strings.Contains(path, "\\") { + return errors.Wrap(ErrPathNotSlashSeparated, path) + } + + // disallow absolute path: covers unix and windows format + if strings.HasPrefix(path, "/") { + return errors.Wrap(ErrAbsolutePathDisallowed, path) + } + if len(path) > 2 { + c := path[0] + if path[1] == ':' && path[2] == '/' && ('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z') { + return errors.Wrap(ErrAbsolutePathDisallowed, path) + } + } + + // disallow path traversal + if strings.HasPrefix(path, "../") || path == ".." { + return errors.Wrap(ErrPathTraversalDisallowed, path) + } + + return nil +} + +// WithPushBaseHandler provides base handlers, which will be called before +// any push specific handlers. +func WithPushBaseHandler(handlers ...images.Handler) PushOpt { + return func(o *pushOpts) error { + o.baseHandlers = append(o.baseHandlers, handlers...) + return nil + } +} + +// WithPushStatusTrack report results to a provided writer +func WithPushStatusTrack(writer io.Writer) PushOpt { + return WithPushBaseHandler(pushStatusTrack(writer)) +} + +func pushStatusTrack(writer io.Writer) images.Handler { + var printLock sync.Mutex + return images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + if name, ok := artifactscontent.ResolveName(desc); ok { + printLock.Lock() + defer printLock.Unlock() + fmt.Fprintln(writer, "Uploading", desc.Digest.Encoded()[:12], name) + } + return nil, nil + }) +} diff --git a/libs-go/pkg/artifacts/push_opts_test.go b/libs-go/pkg/artifacts/push_opts_test.go new file mode 100644 index 0000000..3963c3c --- /dev/null +++ b/libs-go/pkg/artifacts/push_opts_test.go @@ -0,0 +1,64 @@ +package artifacts + +import ( + "testing" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/stretchr/testify/suite" +) + +type PushOptsSuite struct { + suite.Suite +} + +func (suite *PushOptsSuite) TestValidateNameAsPath() { + var err error + + // valid path + err = ValidateNameAsPath(descFromName("hello.txt")) + suite.NoError(err, "valid path") + err = ValidateNameAsPath(descFromName("foo/bar")) + suite.NoError(err, "valid path with multiple sub-directories") + + // no empty name + err = ValidateNameAsPath(descFromName("")) + suite.Error(err, "empty path") + + // path should be clean + err = ValidateNameAsPath(descFromName("./hello.txt")) + suite.Error(err, "dirty path") + err = ValidateNameAsPath(descFromName("foo/../bar")) + suite.Error(err, "dirty path") + + // path should be slash-separated + err = ValidateNameAsPath(descFromName("foo\\bar")) + suite.Error(err, "path not slash separated") + + // disallow absolute path + err = ValidateNameAsPath(descFromName("/foo/bar")) + suite.Error(err, "unix: absolute path disallowed") + err = ValidateNameAsPath(descFromName("C:\\foo\\bar")) + suite.Error(err, "windows: absolute path disallowed") + err = ValidateNameAsPath(descFromName("C:/foo/bar")) + suite.Error(err, "windows: absolute path disallowed") + + // disallow path traversal + err = ValidateNameAsPath(descFromName("..")) + suite.Error(err, "path traversal disallowed") + err = ValidateNameAsPath(descFromName("../bar")) + suite.Error(err, "path traversal disallowed") + err = ValidateNameAsPath(descFromName("foo/../../bar")) + suite.Error(err, "path traversal disallowed") +} + +func TestPushOptsSuite(t *testing.T) { + suite.Run(t, new(PushOptsSuite)) +} + +func descFromName(name string) ocispec.Descriptor { + return ocispec.Descriptor{ + Annotations: map[string]string{ + ocispec.AnnotationTitle: name, + }, + } +} diff --git a/libs-go/pkg/artifacts/store.go b/libs-go/pkg/artifacts/store.go new file mode 100644 index 0000000..a0d169a --- /dev/null +++ b/libs-go/pkg/artifacts/store.go @@ -0,0 +1,119 @@ +package artifacts + +import ( + "context" + "errors" + + artifactscontent "github.com/opencontainers/artifacts/libs-go/pkg/content" + + "github.com/containerd/containerd/content" + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ensure interface +var ( + _ content.Store = &hybridStore{} +) + +type hybridStore struct { + cache *artifactscontent.Memorystore + provider content.Provider + ingester content.Ingester +} + +func newHybridStoreFromProvider(provider content.Provider) *hybridStore { + return &hybridStore{ + cache: artifactscontent.NewMemoryStore(), + provider: provider, + } +} + +func newHybridStoreFromIngester(ingester content.Ingester) *hybridStore { + return &hybridStore{ + cache: artifactscontent.NewMemoryStore(), + ingester: ingester, + } +} + +func (s *hybridStore) Set(desc ocispec.Descriptor, content []byte) { + s.cache.Set(desc, content) +} + +// ReaderAt provides contents +func (s *hybridStore) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { + readerAt, err := s.cache.ReaderAt(ctx, desc) + if err == nil { + return readerAt, nil + } + if s.provider != nil { + return s.provider.ReaderAt(ctx, desc) + } + return nil, err +} + +// Writer begins or resumes the active writer identified by desc +func (s *hybridStore) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { + var wOpts content.WriterOpts + for _, opt := range opts { + if err := opt(&wOpts); err != nil { + return nil, err + } + } + + if isAllowedMediaType(wOpts.Desc.MediaType, ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex) || s.ingester == nil { + return s.cache.Writer(ctx, opts...) + } + return s.ingester.Writer(ctx, opts...) +} + +// TODO: implement (needed to create a content.Store) +// TODO: do not return empty content.Info +// Abort completely cancels the ingest operation targeted by ref. +func (s *hybridStore) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) { + return content.Info{}, nil +} + +// TODO: implement (needed to create a content.Store) +// Update updates mutable information related to content. +// If one or more fieldpaths are provided, only those +// fields will be updated. +// Mutable fields: +// labels.* +func (s *hybridStore) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) { + return content.Info{}, errors.New("not yet implemented: Update (content.Store interface)") +} + +// TODO: implement (needed to create a content.Store) +// Walk will call fn for each item in the content store which +// match the provided filters. If no filters are given all +// items will be walked. +func (s *hybridStore) Walk(ctx context.Context, fn content.WalkFunc, filters ...string) error { + return errors.New("not yet implemented: Walk (content.Store interface)") +} + +// TODO: implement (needed to create a content.Store) +// Delete removes the content from the store. +func (s *hybridStore) Delete(ctx context.Context, dgst digest.Digest) error { + return errors.New("not yet implemented: Delete (content.Store interface)") +} + +// TODO: implement (needed to create a content.Store) +func (s *hybridStore) Status(ctx context.Context, ref string) (content.Status, error) { + // Status returns the status of the provided ref. + return content.Status{}, errors.New("not yet implemented: Status (content.Store interface)") +} + +// TODO: implement (needed to create a content.Store) +// ListStatuses returns the status of any active ingestions whose ref match the +// provided regular expression. If empty, all active ingestions will be +// returned. +func (s *hybridStore) ListStatuses(ctx context.Context, filters ...string) ([]content.Status, error) { + return []content.Status{}, errors.New("not yet implemented: ListStatuses (content.Store interface)") +} + +// TODO: implement (needed to create a content.Store) +// Abort completely cancels the ingest operation targeted by ref. +func (s *hybridStore) Abort(ctx context.Context, ref string) error { + return errors.New("not yet implemented: Abort (content.Store interface)") +} diff --git a/libs-go/pkg/auth/client.go b/libs-go/pkg/auth/client.go new file mode 100644 index 0000000..8fa1d67 --- /dev/null +++ b/libs-go/pkg/auth/client.go @@ -0,0 +1,24 @@ +package auth + +import ( + "context" + "errors" + "net/http" + + "github.com/containerd/containerd/remotes" +) + +// Common errors +var ( + ErrNotLoggedIn = errors.New("not logged in") +) + +// Client provides authentication operations for remotes. +type Client interface { + // Login logs in to a remote server identified by the hostname. + Login(ctx context.Context, hostname, username, secret string, insecure bool) error + // Logout logs out from a remote server identified by the hostname. + Logout(ctx context.Context, hostname string) error + // Resolver returns a new authenticated resolver. + Resolver(ctx context.Context, client *http.Client, plainHTTP bool) (remotes.Resolver, error) +} diff --git a/libs-go/pkg/auth/docker/client.go b/libs-go/pkg/auth/docker/client.go new file mode 100644 index 0000000..bd93c42 --- /dev/null +++ b/libs-go/pkg/auth/docker/client.go @@ -0,0 +1,75 @@ +package docker + +import ( + "os" + + "github.com/opencontainers/artifacts/libs-go/pkg/auth" + + "github.com/docker/cli/cli/config" + "github.com/docker/cli/cli/config/configfile" + "github.com/docker/cli/cli/config/credentials" + "github.com/pkg/errors" +) + +// Client provides authentication operations for docker registries. +type Client struct { + configs []*configfile.ConfigFile +} + +// NewClient creates a new auth client based on provided config paths. +// If not config path is provided, the default path is used. +// Credentials are read from the first config and fall backs to next. +// All changes will only be written to the first config file. +func NewClient(configPaths ...string) (auth.Client, error) { + if len(configPaths) == 0 { + cfg, err := config.Load(config.Dir()) + if err != nil { + return nil, err + } + if !cfg.ContainsAuth() { + cfg.CredentialsStore = credentials.DetectDefaultStore(cfg.CredentialsStore) + } + + return &Client{ + configs: []*configfile.ConfigFile{cfg}, + }, nil + } + + var configs []*configfile.ConfigFile + for _, path := range configPaths { + cfg, err := loadConfigFile(path) + if err != nil { + return nil, errors.Wrap(err, path) + } + configs = append(configs, cfg) + } + + return &Client{ + configs: configs, + }, nil +} + +func (c *Client) primaryCredentialsStore(hostname string) credentials.Store { + return c.configs[0].GetCredentialsStore(hostname) +} + +// loadConfigFile reads the configuration files from the given path. +func loadConfigFile(path string) (*configfile.ConfigFile, error) { + cfg := configfile.New(path) + if _, err := os.Stat(path); err == nil { + file, err := os.Open(path) + if err != nil { + return nil, err + } + defer file.Close() + if err := cfg.LoadFromReader(file); err != nil { + return nil, err + } + } else if !os.IsNotExist(err) { + return nil, err + } + if !cfg.ContainsAuth() { + cfg.CredentialsStore = credentials.DetectDefaultStore(cfg.CredentialsStore) + } + return cfg, nil +} diff --git a/libs-go/pkg/auth/docker/client_test.go b/libs-go/pkg/auth/docker/client_test.go new file mode 100644 index 0000000..c01f483 --- /dev/null +++ b/libs-go/pkg/auth/docker/client_test.go @@ -0,0 +1,128 @@ +package docker + +import ( + "context" + "fmt" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "testing" + "time" + + "github.com/docker/distribution/configuration" + "github.com/docker/distribution/registry" + _ "github.com/docker/distribution/registry/auth/htpasswd" + _ "github.com/docker/distribution/registry/storage/driver/inmemory" + "github.com/phayes/freeport" + "github.com/stretchr/testify/suite" + "golang.org/x/crypto/bcrypt" +) + +var ( + testConfig = "test.config" + testHtpasswd = "test.htpasswd" + testUsername = "alice" + testPassword = "wonderland" +) + +type DockerClientTestSuite struct { + suite.Suite + DockerRegistryHost string + Client *Client + TempTestDir string +} + +func newContext() context.Context { + return context.Background() +} + +func (suite *DockerClientTestSuite) SetupSuite() { + tempDir, err := ioutil.TempDir("", "oci_artifacts_libs_go_auth_docker_test") + suite.Nil(err, "no error creating temp directory for test") + suite.TempTestDir = tempDir + + // Create client + client, err := NewClient(filepath.Join(suite.TempTestDir, testConfig)) + suite.Nil(err, "no error creating client") + var ok bool + suite.Client, ok = client.(*Client) + suite.True(ok, "NewClient returns a *docker.Client inside") + + // Create htpasswd file with bcrypt + secret, err := bcrypt.GenerateFromPassword([]byte(testPassword), bcrypt.DefaultCost) + suite.Nil(err, "no error generating bcrypt password for test htpasswd file") + authRecord := fmt.Sprintf("%s:%s\n", testUsername, string(secret)) + htpasswdPath := filepath.Join(suite.TempTestDir, testHtpasswd) + err = ioutil.WriteFile(htpasswdPath, []byte(authRecord), 0644) + suite.Nil(err, "no error creating test htpasswd file") + + // Registry config + config := &configuration.Configuration{} + port, err := freeport.GetFreePort() + suite.Nil(err, "no error finding free port for test registry") + suite.DockerRegistryHost = fmt.Sprintf("localhost:%d", port) + config.HTTP.Addr = fmt.Sprintf(":%d", port) + config.HTTP.DrainTimeout = time.Duration(10) * time.Second + config.Storage = map[string]configuration.Parameters{"inmemory": map[string]interface{}{}} + config.Auth = configuration.Auth{ + "htpasswd": configuration.Parameters{ + "realm": "localhost", + "path": htpasswdPath, + }, + } + dockerRegistry, err := registry.NewRegistry(context.Background(), config) + suite.Nil(err, "no error finding free port for test registry") + + // Start Docker registry + go dockerRegistry.ListenAndServe() + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() + for { + select { + case <-ctx.Done(): + suite.FailNow("docker registry timed out") + default: + } + req, err := http.NewRequestWithContext( + ctx, + http.MethodGet, + fmt.Sprintf("http://%s/v2/", suite.DockerRegistryHost), + nil, + ) + suite.Nil(err, "no error in generate a /v2/ request") + resp, err := http.DefaultClient.Do(req) + if err == nil { + resp.Body.Close() + break + } + time.Sleep(time.Second) + } +} + +func (suite *DockerClientTestSuite) TearDownSuite() { + os.RemoveAll(suite.TempTestDir) +} + +func (suite *DockerClientTestSuite) Test_0_Login() { + var err error + + err = suite.Client.Login(newContext(), suite.DockerRegistryHost, "oscar", "opponent", false) + suite.NotNil(err, "error logging into registry with invalid credentials") + + err = suite.Client.Login(newContext(), suite.DockerRegistryHost, testUsername, testPassword, false) + suite.Nil(err, "no error logging into registry with valid credentials") +} +func (suite *DockerClientTestSuite) Test_2_Logout() { + var err error + + err = suite.Client.Logout(newContext(), "non-existing-host:42") + suite.NotNil(err, "error logging out of registry that has no entry") + + err = suite.Client.Logout(newContext(), suite.DockerRegistryHost) + suite.Nil(err, "no error logging out of registry") +} + +func TestDockerClientTestSuite(t *testing.T) { + suite.Run(t, new(DockerClientTestSuite)) +} diff --git a/libs-go/pkg/auth/docker/login.go b/libs-go/pkg/auth/docker/login.go new file mode 100644 index 0000000..29e1e08 --- /dev/null +++ b/libs-go/pkg/auth/docker/login.go @@ -0,0 +1,47 @@ +package docker + +import ( + "context" + + "github.com/opencontainers/artifacts/libs-go/pkg/constants" + + ctypes "github.com/docker/cli/cli/config/types" + "github.com/docker/docker/api/types" + "github.com/docker/docker/registry" +) + +// Login logs in to a docker registry identified by the hostname. +func (c *Client) Login(ctx context.Context, hostname, username, secret string, insecure bool) error { + hostname = resolveHostname(hostname) + cred := types.AuthConfig{ + Username: username, + ServerAddress: hostname, + } + if username == "" { + cred.IdentityToken = secret + } else { + cred.Password = secret + } + + opts := registry.ServiceOptions{} + + if insecure { + opts.InsecureRegistries = []string{hostname} + } + + // Login to ensure valid credential + remote, err := registry.NewService(opts) + if err != nil { + return err + } + if _, token, err := remote.Auth(ctx, &cred, constants.RemoteAuthUserAgent); err != nil { + return err + } else if token != "" { + cred.Username = "" + cred.Password = "" + cred.IdentityToken = token + } + + // Store credential + return c.primaryCredentialsStore(hostname).Store(ctypes.AuthConfig(cred)) +} diff --git a/libs-go/pkg/auth/docker/logout.go b/libs-go/pkg/auth/docker/logout.go new file mode 100644 index 0000000..2ea409e --- /dev/null +++ b/libs-go/pkg/auth/docker/logout.go @@ -0,0 +1,27 @@ +package docker + +import ( + "context" + + "github.com/opencontainers/artifacts/libs-go/pkg/auth" + + "github.com/docker/cli/cli/config/configfile" +) + +// Logout logs out from a docker registry identified by the hostname. +func (c *Client) Logout(_ context.Context, hostname string) error { + hostname = resolveHostname(hostname) + + var configs []*configfile.ConfigFile + for _, config := range c.configs { + if _, ok := config.AuthConfigs[hostname]; ok { + configs = append(configs, config) + } + } + if len(configs) == 0 { + return auth.ErrNotLoggedIn + } + + // Log out form the primary config only as backups are read-only. + return c.primaryCredentialsStore(hostname).Erase(hostname) +} diff --git a/libs-go/pkg/auth/docker/resolver.go b/libs-go/pkg/auth/docker/resolver.go new file mode 100644 index 0000000..51358be --- /dev/null +++ b/libs-go/pkg/auth/docker/resolver.go @@ -0,0 +1,54 @@ +package docker + +import ( + "context" + "net/http" + + "github.com/containerd/containerd/remotes" + "github.com/containerd/containerd/remotes/docker" + ctypes "github.com/docker/cli/cli/config/types" + "github.com/docker/docker/registry" +) + +// Resolver returns a new authenticated resolver. +func (c *Client) Resolver(_ context.Context, client *http.Client, plainHTTP bool) (remotes.Resolver, error) { + return docker.NewResolver(docker.ResolverOptions{ + Credentials: c.Credential, + Client: client, + PlainHTTP: plainHTTP, + }), nil +} + +// Credential returns the login credential of the request host. +func (c *Client) Credential(hostname string) (string, string, error) { + hostname = resolveHostname(hostname) + var ( + auth ctypes.AuthConfig + err error + ) + for _, cfg := range c.configs { + auth, err = cfg.GetAuthConfig(hostname) + if err != nil { + // fall back to next config + continue + } + if auth.IdentityToken != "" { + return "", auth.IdentityToken, nil + } + if auth.Username == "" && auth.Password == "" { + // fall back to next config + continue + } + return auth.Username, auth.Password, nil + } + return "", "", err +} + +// resolveHostname resolves Docker specific hostnames +func resolveHostname(hostname string) string { + switch hostname { + case registry.IndexHostname, registry.IndexName, registry.DefaultV2Registry.Host: + return registry.IndexServer + } + return hostname +} diff --git a/libs-go/pkg/constants/consts.go b/libs-go/pkg/constants/consts.go new file mode 100644 index 0000000..09a5304 --- /dev/null +++ b/libs-go/pkg/constants/consts.go @@ -0,0 +1,11 @@ +package constants + +const ( + // UnknownConfigMediaType is the default mediaType used when no + // config media type is specified. + UnknownConfigMediaType = "application/vnd.unknown.config.v1+json" + + // RemoteAuthUserAgent is the value used for User-Agent header when + // performing registry login + RemoteAuthUserAgent = "oci-artifacts-libs-go" +) diff --git a/libs-go/pkg/content/consts.go b/libs-go/pkg/content/consts.go new file mode 100644 index 0000000..5240ff9 --- /dev/null +++ b/libs-go/pkg/content/consts.go @@ -0,0 +1,42 @@ +package content + +import ( + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +const ( + // DefaultBlobMediaType specifies the default blob media type + DefaultBlobMediaType = ocispec.MediaTypeImageLayer + // DefaultBlobDirMediaType specifies the default blob directory media type + DefaultBlobDirMediaType = ocispec.MediaTypeImageLayerGzip +) + +const ( + // TempFilePattern specifies the pattern to create temporary files + TempFilePattern = "oci-artifacts" +) + +const ( + // AnnotationDigest is the annotation key for the digest of the uncompressed content + AnnotationDigest = "org.opencontainers.artifacts.content.digest" + // AnnotationUnpack is the annotation key for indication of unpacking + AnnotationUnpack = "org.opencontainers.artifacts.content.unpack" +) + +const ( + // OCIImageIndexFile is the file name of the index from the OCI Image Layout Specification + // Reference: https://github.com/opencontainers/image-spec/blob/master/image-layout.md#indexjson-file + OCIImageIndexFile = "index.json" +) + +const ( + // DefaultBlocksize default size of each slice of bytes read in each write through in gunzipand untar. + // Simply uses the same size as io.Copy() + DefaultBlocksize = 32768 +) + +const ( + // what you get for a blank digest + BlankHash = digest.Digest("sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855") +) diff --git a/libs-go/pkg/content/content_test.go b/libs-go/pkg/content/content_test.go new file mode 100644 index 0000000..7f50639 --- /dev/null +++ b/libs-go/pkg/content/content_test.go @@ -0,0 +1,172 @@ +package content + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/containerd/containerd/content" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/stretchr/testify/suite" +) + +type ContentTestSuite struct { + suite.Suite + TestMemoryStore *Memorystore + TestFileStore *FileStore +} + +var ( + testDirRoot, _ = filepath.Abs("../../.test") + testFileName = filepath.Join(testDirRoot, "testfile") + testRef = "abc123" + testContent = []byte("Hello World!") + testDescriptor = ocispec.Descriptor{ + MediaType: ocispec.MediaTypeImageConfig, + Digest: digest.FromBytes(testContent), + Size: int64(len(testContent)), + Annotations: map[string]string{ + ocispec.AnnotationTitle: testRef, + }, + } + testBadContent = []byte("doesnotexist") + testBadDescriptor = ocispec.Descriptor{ + MediaType: ocispec.MediaTypeImageConfig, + Digest: digest.FromBytes(testBadContent), + Size: int64(len(testBadContent)), + } +) + +func (suite *ContentTestSuite) SetupSuite() { + testMemoryStore := NewMemoryStore() + testMemoryStore.Add(testRef, "", testContent) + suite.TestMemoryStore = testMemoryStore + + os.Remove(testFileName) + err := ioutil.WriteFile(testFileName, testContent, 0644) + suite.Nil(err, "no error creating test file on disk") + testFileStore := NewFileStore(testDirRoot) + _, err = testFileStore.Add(testRef, "", testFileName) + suite.Nil(err, "no error adding item to file store") + suite.TestFileStore = testFileStore +} + +// Tests all Writers (Ingesters) +func (suite *ContentTestSuite) Test_0_Ingesters() { + ingesters := map[string]content.Ingester{ + "memory": suite.TestMemoryStore, + "file": suite.TestFileStore, + } + + for key, ingester := range ingesters { + + // Bad ref + ctx := context.Background() + refOpt := content.WithDescriptor(testBadDescriptor) + writer, err := ingester.Writer(ctx, refOpt) + if key == "file" { + suite.NotNil(err, fmt.Sprintf("no error getting writer w bad ref for %s store", key)) + } + + // Good ref + ctx = context.Background() + refOpt = content.WithDescriptor(testDescriptor) + writer, err = ingester.Writer(ctx, refOpt) + suite.Nil(err, fmt.Sprintf("no error getting writer w good ref for %s store", key)) + _, err = writer.Write(testContent) + suite.Nil(err, fmt.Sprintf("no error using writer.Write w good ref for %s store", key)) + err = writer.Commit(ctx, testDescriptor.Size, testDescriptor.Digest) + suite.Nil(err, fmt.Sprintf("no error using writer.Commit w good ref for %s store", key)) + + digest := writer.Digest() + suite.Equal(testDescriptor.Digest, digest, fmt.Sprintf("correct digest for %s store", key)) + status, err := writer.Status() + suite.Nil(err, fmt.Sprintf("no error retrieving writer status for %s store", key)) + suite.Equal(testRef, status.Ref, fmt.Sprintf("correct status for %s store", key)) + + // close writer + err = writer.Close() + suite.Nil(err, fmt.Sprintf("no error closing writer w bad ref for %s store", key)) + err = writer.Commit(ctx, testDescriptor.Size, testDescriptor.Digest) + suite.NotNil(err, fmt.Sprintf("error using writer.Commit when closed w good ref for %s store", key)) + + // re-init writer after closing + writer, _ = ingester.Writer(ctx, refOpt) + writer.Write(testContent) + + // invalid truncate size + err = writer.Truncate(123456789) + suite.NotNil(err, fmt.Sprintf("error using writer.Truncate w invalid size, good ref for %s store", key)) + + // valid truncate size + err = writer.Truncate(0) + suite.Nil(err, fmt.Sprintf("no error using writer.Truncate w valid size, good ref for %s store", key)) + + writer.Commit(ctx, testDescriptor.Size, testDescriptor.Digest) + + // bad size + err = writer.Commit(ctx, 1, testDescriptor.Digest) + fmt.Println(err) + suite.NotNil(err, fmt.Sprintf("error using writer.Commit w bad size, good ref for %s store", key)) + + // bad digest + writer, _ = ingester.Writer(ctx, refOpt) + err = writer.Commit(ctx, 0, testBadDescriptor.Digest) + suite.NotNil(err, fmt.Sprintf("error using writer.Commit w bad digest, good ref for %s store", key)) + } +} + +// Tests all Readers (Providers) +func (suite *ContentTestSuite) Test_1_Providers() { + providers := map[string]content.Provider{ + "memory": suite.TestMemoryStore, + "file": suite.TestFileStore, + } + + // Readers (Providers) + for key, provider := range providers { + + // Bad ref + ctx := context.Background() + _, err := provider.ReaderAt(ctx, testBadDescriptor) + suite.NotNil(err, fmt.Sprintf("error with bad ref for %s store", key)) + + // Good ref + ctx = context.Background() + readerAt, err := provider.ReaderAt(ctx, testDescriptor) + suite.Nil(err, fmt.Sprintf("no error with good ref for %s store", key)) + + // readerat Size() + suite.Equal(testDescriptor.Size, readerAt.Size(), fmt.Sprintf("readerat size matches for %s store", key)) + + // readerat Close() + err = readerAt.Close() + suite.Nil(err, fmt.Sprintf("no error closing readerat for %s store", key)) + + // file missing + if key == "file" { + os.Remove(testFileName) + ctx := context.Background() + _, err := provider.ReaderAt(ctx, testDescriptor) + suite.NotNil(err, fmt.Sprintf("error with good ref for %s store (file missing)", key)) + } + } +} + +func (suite *ContentTestSuite) Test_2_GetByName() { + // NotFound + _, _, ok := suite.TestMemoryStore.GetByName("doesnotexist") + suite.False(ok, "unable to find non-existant ref by name for memory store") + + // Found + _, _, ok = suite.TestMemoryStore.GetByName(testRef) + suite.True(ok, "able to find non-existant ref by name for memory store") +} + +func TestContentTestSuite(t *testing.T) { + suite.Run(t, new(ContentTestSuite)) +} diff --git a/libs-go/pkg/content/decompressstore.go b/libs-go/pkg/content/decompressstore.go new file mode 100644 index 0000000..42d832a --- /dev/null +++ b/libs-go/pkg/content/decompressstore.go @@ -0,0 +1,137 @@ +package content + +import ( + "context" + "errors" + "strings" + + ctrcontent "github.com/containerd/containerd/content" +) + +// DecompressStore store to decompress content and extract from tar, if needed, wrapping +// another store. By default, a FileStore will simply take each artifact and write it to +// a file, as a MemoryStore will do into memory. If the artifact is gzipped or tarred, +// you might want to store the actual object inside tar or gzip. Wrap your Store +// with DecompressStore, and it will check the media-type and, if relevant, +// gunzip and/or untar. +// +// For example: +// +// fileStore := NewFileStore(rootPath) +// decompressStore := store.NewDecompressStore(fileStore, WithBlocksize(blocksize)) +// +// The above example works if there is no tar, i.e. each artifact is just a single file, perhaps gzipped, +// or if there is only one file in each tar archive. In other words, when each content.Writer has only one target output stream. +// However, if you have multiple files in each tar archive, each archive of which is an artifact layer, then +// you need a way to select how to handle each file in the tar archive. In other words, when each content.Writer has more than one +// target output stream. In that case, use the following example: +// +// multiStore := NewMultiStore(rootPath) // some store that can handle different filenames +// decompressStore := store.NewDecompressStore(multiStore, WithBlocksize(blocksize), WithMultiWriterIngester()) +// +type DecompressStore struct { + ingester ctrcontent.Ingester + blocksize int + multiWriterIngester bool +} + +func NewDecompressStore(ingester ctrcontent.Ingester, opts ...WriterOpt) DecompressStore { + // we have to reprocess the opts to find the blocksize + var wOpts WriterOpts + for _, opt := range opts { + if err := opt(&wOpts); err != nil { + // TODO: we probably should handle errors here + continue + } + } + + return DecompressStore{ingester, wOpts.Blocksize, wOpts.MultiWriterIngester} +} + +// Writer get a writer +func (d DecompressStore) Writer(ctx context.Context, opts ...ctrcontent.WriterOpt) (ctrcontent.Writer, error) { + // the logic is straightforward: + // - if there is a desc in the opts, and the mediatype is tar or tar+gzip, then pass the correct decompress writer + // - else, pass the regular writer + var ( + writer ctrcontent.Writer + err error + multiIngester MultiWriterIngester + ok bool + ) + + // check to see if we are supposed to use a MultiWriterIngester + if d.multiWriterIngester { + multiIngester, ok = d.ingester.(MultiWriterIngester) + if !ok { + return nil, errors.New("configured to use multiwriter ingester, but ingester does not implement multiwriter") + } + } + + // we have to reprocess the opts to find the desc + var wOpts ctrcontent.WriterOpts + for _, opt := range opts { + if err := opt(&wOpts); err != nil { + return nil, err + } + } + // figure out if compression and/or archive exists + desc := wOpts.Desc + // before we pass it down, we need to strip anything we are removing here + // and possibly update the digest, since the store indexes things by digest + hasGzip, hasTar, modifiedMediaType := checkCompression(desc.MediaType) + wOpts.Desc.MediaType = modifiedMediaType + opts = append(opts, ctrcontent.WithDescriptor(wOpts.Desc)) + // determine if we pass it blocksize, only if positive + writerOpts := []WriterOpt{} + if d.blocksize > 0 { + writerOpts = append(writerOpts, WithBlocksize(d.blocksize)) + } + + writer, err = d.ingester.Writer(ctx, opts...) + if err != nil { + return nil, err + } + + // do we need to wrap with an untar writer? + if hasTar { + // if not multiingester, get a regular writer + if multiIngester == nil { + writer = NewUntarWriter(writer, writerOpts...) + } else { + writers, err := multiIngester.Writers(ctx, opts...) + if err != nil { + return nil, err + } + writer = NewUntarWriterByName(writers, writerOpts...) + } + } + if hasGzip { + if writer == nil { + writer, err = d.ingester.Writer(ctx, opts...) + if err != nil { + return nil, err + } + } + writer = NewGunzipWriter(writer, writerOpts...) + } + return writer, nil +} + +// checkCompression check if the mediatype uses gzip compression or tar. +// Returns if it has gzip and/or tar, as well as the base media type without +// those suffixes. +func checkCompression(mediaType string) (gzip, tar bool, mt string) { + mt = mediaType + gzipSuffix := "+gzip" + tarSuffix := ".tar" + if strings.HasSuffix(mt, gzipSuffix) { + mt = mt[:len(mt)-len(gzipSuffix)] + gzip = true + } + if strings.HasSuffix(mt, tarSuffix) { + mt = mt[:len(mt)-len(tarSuffix)] + tar = true + } + return +} diff --git a/libs-go/pkg/content/decompressstore_test.go b/libs-go/pkg/content/decompressstore_test.go new file mode 100644 index 0000000..e71564a --- /dev/null +++ b/libs-go/pkg/content/decompressstore_test.go @@ -0,0 +1,61 @@ +package content_test + +import ( + "bytes" + "compress/gzip" + "context" + "fmt" + "testing" + + "github.com/opencontainers/artifacts/libs-go/pkg/content" + + ctrcontent "github.com/containerd/containerd/content" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +func TestDecompressStore(t *testing.T) { + rawContent := []byte("Hello World!") + var buf bytes.Buffer + gw := gzip.NewWriter(&buf) + if _, err := gw.Write(rawContent); err != nil { + t.Fatalf("unable to create gzip content for testing: %v", err) + } + if err := gw.Close(); err != nil { + t.Fatalf("unable to close gzip writer creating content for testing: %v", err) + } + gzipContent := buf.Bytes() + gzipContentHash := digest.FromBytes(gzipContent) + gzipDescriptor := ocispec.Descriptor{ + MediaType: fmt.Sprintf("%s+gzip", ocispec.MediaTypeImageConfig), + Digest: gzipContentHash, + Size: int64(len(gzipContent)), + } + + memStore := content.NewMemoryStore() + decompressStore := content.NewDecompressStore(memStore, content.WithBlocksize(0)) + ctx := context.Background() + decompressWriter, err := decompressStore.Writer(ctx, ctrcontent.WithDescriptor(gzipDescriptor)) + if err != nil { + t.Fatalf("unable to get a decompress writer: %v", err) + } + n, err := decompressWriter.Write(gzipContent) + if err != nil { + t.Fatalf("failed to write to decompress writer: %v", err) + } + if n != len(gzipContent) { + t.Fatalf("wrote %d instead of expected %d bytes", n, len(gzipContent)) + } + if err := decompressWriter.Commit(ctx, int64(len(gzipContent)), gzipContentHash); err != nil { + t.Fatalf("unexpected error committing decompress writer: %v", err) + } + + // and now we should be able to get the decompressed data from the memory store + _, b, found := memStore.Get(gzipDescriptor) + if !found { + t.Fatalf("failed to get data from underlying memory store: %v", err) + } + if string(b) != string(rawContent) { + t.Errorf("mismatched data in underlying memory store, actual '%s', expected '%s'", b, rawContent) + } +} diff --git a/libs-go/pkg/content/errors.go b/libs-go/pkg/content/errors.go new file mode 100644 index 0000000..e4a6cbf --- /dev/null +++ b/libs-go/pkg/content/errors.go @@ -0,0 +1,17 @@ +package content + +import "errors" + +// Common errors +var ( + ErrNotFound = errors.New("not_found") + ErrNoName = errors.New("no_name") + ErrUnsupportedSize = errors.New("unsupported_size") + ErrUnsupportedVersion = errors.New("unsupported_version") +) + +// FileStore errors +var ( + ErrPathTraversalDisallowed = errors.New("path_traversal_disallowed") + ErrOverwriteDisallowed = errors.New("overwrite_disallowed") +) diff --git a/libs-go/pkg/content/file.go b/libs-go/pkg/content/file.go new file mode 100644 index 0000000..77b1333 --- /dev/null +++ b/libs-go/pkg/content/file.go @@ -0,0 +1,411 @@ +package content + +import ( + "compress/gzip" + "context" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// ensure interface +var ( + _ ProvideIngester = &FileStore{} +) + +// FileStore provides content from the file system +type FileStore struct { + DisableOverwrite bool + AllowPathTraversalOnWrite bool + + // Reproducible enables stripping times from added files + Reproducible bool + + root string + descriptor *sync.Map // map[digest.Digest]ocispec.Descriptor + pathMap *sync.Map + tmpFiles *sync.Map +} + +// NewFileStore creats a new file store +func NewFileStore(rootPath string) *FileStore { + return &FileStore{ + root: rootPath, + descriptor: &sync.Map{}, + pathMap: &sync.Map{}, + tmpFiles: &sync.Map{}, + } +} + +// Add adds a file reference +func (s *FileStore) Add(name, mediaType, path string) (ocispec.Descriptor, error) { + if path == "" { + path = name + } + path = s.MapPath(name, path) + + fileInfo, err := os.Stat(path) + if err != nil { + return ocispec.Descriptor{}, err + } + + var desc ocispec.Descriptor + if fileInfo.IsDir() { + desc, err = s.descFromDir(name, mediaType, path) + } else { + desc, err = s.descFromFile(fileInfo, mediaType, path) + } + if err != nil { + return ocispec.Descriptor{}, err + } + if desc.Annotations == nil { + desc.Annotations = make(map[string]string) + } + desc.Annotations[ocispec.AnnotationTitle] = name + + s.set(desc) + return desc, nil +} + +func (s *FileStore) descFromFile(info os.FileInfo, mediaType, path string) (ocispec.Descriptor, error) { + file, err := os.Open(path) + if err != nil { + return ocispec.Descriptor{}, err + } + defer file.Close() + digest, err := digest.FromReader(file) + if err != nil { + return ocispec.Descriptor{}, err + } + + if mediaType == "" { + mediaType = DefaultBlobMediaType + } + return ocispec.Descriptor{ + MediaType: mediaType, + Digest: digest, + Size: info.Size(), + }, nil +} + +func (s *FileStore) descFromDir(name, mediaType, root string) (ocispec.Descriptor, error) { + // generate temp file + file, err := s.tempFile() + if err != nil { + return ocispec.Descriptor{}, err + } + defer file.Close() + s.MapPath(name, file.Name()) + + // compress directory + digester := digest.Canonical.Digester() + zw := gzip.NewWriter(io.MultiWriter(file, digester.Hash())) + defer zw.Close() + tarDigester := digest.Canonical.Digester() + if err := tarDirectory(root, name, io.MultiWriter(zw, tarDigester.Hash()), s.Reproducible); err != nil { + return ocispec.Descriptor{}, err + } + + // flush all + if err := zw.Close(); err != nil { + return ocispec.Descriptor{}, err + } + if err := file.Sync(); err != nil { + return ocispec.Descriptor{}, err + } + + // generate descriptor + if mediaType == "" { + mediaType = DefaultBlobDirMediaType + } + info, err := file.Stat() + if err != nil { + return ocispec.Descriptor{}, err + } + return ocispec.Descriptor{ + MediaType: mediaType, + Digest: digester.Digest(), + Size: info.Size(), + Annotations: map[string]string{ + AnnotationDigest: tarDigester.Digest().String(), + AnnotationUnpack: "true", + }, + }, nil +} + +func (s *FileStore) tempFile() (*os.File, error) { + file, err := ioutil.TempFile("", TempFilePattern) + if err != nil { + return nil, err + } + s.tmpFiles.Store(file.Name(), file) + return file, nil +} + +// Close frees up resources used by the file store +func (s *FileStore) Close() error { + var errs []string + s.tmpFiles.Range(func(name, _ interface{}) bool { + if err := os.Remove(name.(string)); err != nil { + errs = append(errs, err.Error()) + } + return true + }) + return errors.New(strings.Join(errs, "; ")) +} + +// ReaderAt provides contents +func (s *FileStore) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { + desc, ok := s.get(desc) + if !ok { + return nil, ErrNotFound + } + name, ok := ResolveName(desc) + if !ok { + return nil, ErrNoName + } + path := s.ResolvePath(name) + file, err := os.Open(path) + if err != nil { + return nil, err + } + + return sizeReaderAt{ + readAtCloser: file, + size: desc.Size, + }, nil +} + +// Writer begins or resumes the active writer identified by desc +func (s *FileStore) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { + var wOpts content.WriterOpts + for _, opt := range opts { + if err := opt(&wOpts); err != nil { + return nil, err + } + } + desc := wOpts.Desc + + name, ok := ResolveName(desc) + if !ok { + return nil, ErrNoName + } + path, err := s.resolveWritePath(name) + if err != nil { + return nil, err + } + file, afterCommit, err := s.createWritePath(path, desc, name) + if err != nil { + return nil, err + } + + now := time.Now() + return &fileWriter{ + store: s, + file: file, + desc: desc, + digester: digest.Canonical.Digester(), + status: content.Status{ + Ref: name, + Total: desc.Size, + StartedAt: now, + UpdatedAt: now, + }, + afterCommit: afterCommit, + }, nil +} + +func (s *FileStore) resolveWritePath(name string) (string, error) { + path := s.ResolvePath(name) + if !s.AllowPathTraversalOnWrite { + base, err := filepath.Abs(s.root) + if err != nil { + return "", err + } + target, err := filepath.Abs(path) + if err != nil { + return "", err + } + rel, err := filepath.Rel(base, target) + if err != nil { + return "", ErrPathTraversalDisallowed + } + rel = filepath.ToSlash(rel) + if strings.HasPrefix(rel, "../") || rel == ".." { + return "", ErrPathTraversalDisallowed + } + } + if s.DisableOverwrite { + if _, err := os.Stat(path); err == nil { + return "", ErrOverwriteDisallowed + } else if !os.IsNotExist(err) { + return "", err + } + } + return path, nil +} + +func (s *FileStore) createWritePath(path string, desc ocispec.Descriptor, prefix string) (*os.File, func() error, error) { + if value, ok := desc.Annotations[AnnotationUnpack]; !ok || value != "true" { + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return nil, nil, err + } + file, err := os.Create(path) + return file, nil, err + } + + if err := os.MkdirAll(path, 0755); err != nil { + return nil, nil, err + } + file, err := s.tempFile() + checksum := desc.Annotations[AnnotationDigest] + afterCommit := func() error { + return extractTarGzip(path, prefix, file.Name(), checksum) + } + return file, afterCommit, err +} + +// MapPath maps name to path +func (s *FileStore) MapPath(name, path string) string { + path = s.resolvePath(path) + s.pathMap.Store(name, path) + return path +} + +// ResolvePath returns the path by name +func (s *FileStore) ResolvePath(name string) string { + if value, ok := s.pathMap.Load(name); ok { + if path, ok := value.(string); ok { + return path + } + } + + // using the name as a fallback solution + return s.resolvePath(name) +} + +func (s *FileStore) resolvePath(path string) string { + if filepath.IsAbs(path) { + return path + } + return filepath.Join(s.root, path) +} + +func (s *FileStore) set(desc ocispec.Descriptor) { + s.descriptor.Store(desc.Digest, desc) +} + +func (s *FileStore) get(desc ocispec.Descriptor) (ocispec.Descriptor, bool) { + value, ok := s.descriptor.Load(desc.Digest) + if !ok { + return ocispec.Descriptor{}, false + } + desc, ok = value.(ocispec.Descriptor) + return desc, ok +} + +type fileWriter struct { + store *FileStore + file *os.File + desc ocispec.Descriptor + digester digest.Digester + status content.Status + afterCommit func() error +} + +func (w *fileWriter) Status() (content.Status, error) { + return w.status, nil +} + +// Digest returns the current digest of the content, up to the current write. +// +// Cannot be called concurrently with `Write`. +func (w *fileWriter) Digest() digest.Digest { + return w.digester.Digest() +} + +// Write p to the transaction. +func (w *fileWriter) Write(p []byte) (n int, err error) { + n, err = w.file.Write(p) + w.digester.Hash().Write(p[:n]) + w.status.Offset += int64(len(p)) + w.status.UpdatedAt = time.Now() + return n, err +} + +func (w *fileWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { + var base content.Info + for _, opt := range opts { + if err := opt(&base); err != nil { + return err + } + } + + if w.file == nil { + return errors.Wrap(errdefs.ErrFailedPrecondition, "cannot commit on closed writer") + } + file := w.file + w.file = nil + + if err := file.Sync(); err != nil { + file.Close() + return errors.Wrap(err, "sync failed") + } + + fileInfo, err := file.Stat() + if err != nil { + file.Close() + return errors.Wrap(err, "stat failed") + } + if err := file.Close(); err != nil { + return errors.Wrap(err, "failed to close file") + } + + if size > 0 && size != fileInfo.Size() { + return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit size %d, expected %d", fileInfo.Size(), size) + } + if dgst := w.digester.Digest(); expected != "" && expected != dgst { + return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit digest %s, expected %s", dgst, expected) + } + + w.store.set(w.desc) + if w.afterCommit != nil { + return w.afterCommit() + } + return nil +} + +// Close the writer, flushing any unwritten data and leaving the progress in +// tact. +func (w *fileWriter) Close() error { + if w.file == nil { + return nil + } + + w.file.Sync() + err := w.file.Close() + w.file = nil + return err +} + +func (w *fileWriter) Truncate(size int64) error { + if size != 0 { + return ErrUnsupportedSize + } + w.status.Offset = 0 + w.digester.Hash().Reset() + if _, err := w.file.Seek(0, io.SeekStart); err != nil { + return err + } + return w.file.Truncate(0) +} diff --git a/libs-go/pkg/content/gunzip.go b/libs-go/pkg/content/gunzip.go new file mode 100644 index 0000000..73526b2 --- /dev/null +++ b/libs-go/pkg/content/gunzip.go @@ -0,0 +1,57 @@ +package content + +import ( + "compress/gzip" + "fmt" + "io" + + "github.com/containerd/containerd/content" +) + +// NewGunzipWriter wrap a writer with a gunzip, so that the stream is gunzipped +// +// By default, it calculates the hash when writing. If the option `skipHash` is true, +// it will skip doing the hash. Skipping the hash is intended to be used only +// if you are confident about the validity of the data being passed to the writer, +// and wish to save on the hashing time. +func NewGunzipWriter(writer content.Writer, opts ...WriterOpt) content.Writer { + // process opts for default + wOpts := DefaultWriterOpts() + for _, opt := range opts { + if err := opt(&wOpts); err != nil { + return nil + } + } + return NewPassthroughWriter(writer, func(r io.Reader, w io.Writer, done chan<- error) { + gr, err := gzip.NewReader(r) + if err != nil { + done <- fmt.Errorf("error creating gzip reader: %v", err) + return + } + // write out the uncompressed data + b := make([]byte, wOpts.Blocksize, wOpts.Blocksize) + for { + var n int + n, err = gr.Read(b) + if err != nil && err != io.EOF { + err = fmt.Errorf("GunzipWriter data read error: %v\n", err) + break + } + l := n + if n > len(b) { + l = len(b) + } + if _, err2 := w.Write(b[:l]); err2 != nil { + err = fmt.Errorf("GunzipWriter: error writing to underlying writer: %v", err2) + break + } + if err == io.EOF { + // clear the error + err = nil + break + } + } + gr.Close() + done <- err + }, opts...) +} diff --git a/libs-go/pkg/content/interface.go b/libs-go/pkg/content/interface.go new file mode 100644 index 0000000..85caa3f --- /dev/null +++ b/libs-go/pkg/content/interface.go @@ -0,0 +1,9 @@ +package content + +import "github.com/containerd/containerd/content" + +// ProvideIngester is the interface that groups the basic Read and Write methods. +type ProvideIngester interface { + content.Provider + content.Ingester +} diff --git a/libs-go/pkg/content/iowriter.go b/libs-go/pkg/content/iowriter.go new file mode 100644 index 0000000..4ad04ac --- /dev/null +++ b/libs-go/pkg/content/iowriter.go @@ -0,0 +1,97 @@ +package content + +import ( + "context" + "io" + "io/ioutil" + + "github.com/containerd/containerd/content" + "github.com/opencontainers/go-digest" +) + +// IoContentWriter writer that wraps an io.Writer, so the results can be streamed to +// an open io.Writer. For example, can be used to pull a layer and write it to a file, or device. +type IoContentWriter struct { + writer io.Writer + digester digest.Digester + size int64 + hash *digest.Digest +} + +// NewIoContentWriter create a new IoContentWriter. +// +// By default, it calculates the hash when writing. If the option `skipHash` is true, +// it will skip doing the hash. Skipping the hash is intended to be used only +// if you are confident about the validity of the data being passed to the writer, +// and wish to save on the hashing time. +func NewIoContentWriter(writer io.Writer, opts ...WriterOpt) content.Writer { + w := writer + if w == nil { + w = ioutil.Discard + } + // process opts for default + wOpts := DefaultWriterOpts() + for _, opt := range opts { + if err := opt(&wOpts); err != nil { + return nil + } + } + ioc := &IoContentWriter{ + writer: w, + digester: digest.Canonical.Digester(), + // we take the OutputHash, since the InputHash goes to the passthrough writer, + // which then passes the processed output to us + hash: wOpts.OutputHash, + } + return NewPassthroughWriter(ioc, func(r io.Reader, w io.Writer, done chan<- error) { + // write out the data to the io writer + var ( + err error + ) + // we could use io.Copy, but calling it with the default blocksize is identical to + // io.CopyBuffer. Otherwise, we would need some way to let the user flag "I want to use + // io.Copy", when it should not matter to them + b := make([]byte, wOpts.Blocksize, wOpts.Blocksize) + _, err = io.CopyBuffer(w, r, b) + done <- err + }, opts...) +} + +func (w *IoContentWriter) Write(p []byte) (n int, err error) { + n, err = w.writer.Write(p) + if err != nil { + return 0, err + } + w.size += int64(n) + if w.hash == nil { + w.digester.Hash().Write(p[:n]) + } + return +} + +func (w *IoContentWriter) Close() error { + return nil +} + +// Digest may return empty digest or panics until committed. +func (w *IoContentWriter) Digest() digest.Digest { + return w.digester.Digest() +} + +// Commit commits the blob (but no roll-back is guaranteed on an error). +// size and expected can be zero-value when unknown. +// Commit always closes the writer, even on error. +// ErrAlreadyExists aborts the writer. +func (w *IoContentWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { + return nil +} + +// Status returns the current state of write +func (w *IoContentWriter) Status() (content.Status, error) { + return content.Status{}, nil +} + +// Truncate updates the size of the target blob +func (w *IoContentWriter) Truncate(size int64) error { + return nil +} diff --git a/libs-go/pkg/content/memory.go b/libs-go/pkg/content/memory.go new file mode 100644 index 0000000..5c76553 --- /dev/null +++ b/libs-go/pkg/content/memory.go @@ -0,0 +1,210 @@ +package content + +import ( + "bytes" + "context" + "sync" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// ensure interface +var ( + _ content.Provider = &Memorystore{} + _ content.Ingester = &Memorystore{} +) + +// Memorystore provides content from the memory +type Memorystore struct { + descriptor map[digest.Digest]ocispec.Descriptor + content map[digest.Digest][]byte + nameMap map[string]ocispec.Descriptor + lock *sync.Mutex +} + +// NewMemoryStore creats a new memory store +func NewMemoryStore() *Memorystore { + return &Memorystore{ + descriptor: make(map[digest.Digest]ocispec.Descriptor), + content: make(map[digest.Digest][]byte), + nameMap: make(map[string]ocispec.Descriptor), + lock: &sync.Mutex{}, + } +} + +// Add adds content +func (s *Memorystore) Add(name, mediaType string, content []byte) ocispec.Descriptor { + var annotations map[string]string + if name != "" { + annotations = map[string]string{ + ocispec.AnnotationTitle: name, + } + } + + if mediaType == "" { + mediaType = DefaultBlobMediaType + } + + desc := ocispec.Descriptor{ + MediaType: mediaType, + Digest: digest.FromBytes(content), + Size: int64(len(content)), + Annotations: annotations, + } + + s.Set(desc, content) + return desc +} + +// ReaderAt provides contents +func (s *Memorystore) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { + desc, content, ok := s.Get(desc) + if !ok { + return nil, ErrNotFound + } + + return sizeReaderAt{ + readAtCloser: nopCloser{ + ReaderAt: bytes.NewReader(content), + }, + size: desc.Size, + }, nil +} + +// Writer begins or resumes the active writer identified by desc +func (s *Memorystore) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { + var wOpts content.WriterOpts + for _, opt := range opts { + if err := opt(&wOpts); err != nil { + return nil, err + } + } + desc := wOpts.Desc + + name, _ := ResolveName(desc) + now := time.Now() + return &memoryWriter{ + store: s, + buffer: bytes.NewBuffer(nil), + desc: desc, + digester: digest.Canonical.Digester(), + status: content.Status{ + Ref: name, + Total: desc.Size, + StartedAt: now, + UpdatedAt: now, + }, + }, nil +} + +// Set adds the content to the store +func (s *Memorystore) Set(desc ocispec.Descriptor, content []byte) { + s.lock.Lock() + defer s.lock.Unlock() + + s.descriptor[desc.Digest] = desc + s.content[desc.Digest] = content + + if name, ok := ResolveName(desc); ok && name != "" { + s.nameMap[name] = desc + } +} + +// Get finds the content from the store +func (s *Memorystore) Get(desc ocispec.Descriptor) (ocispec.Descriptor, []byte, bool) { + s.lock.Lock() + defer s.lock.Unlock() + + desc, ok := s.descriptor[desc.Digest] + if !ok { + return ocispec.Descriptor{}, nil, false + } + content, ok := s.content[desc.Digest] + return desc, content, ok +} + +// GetByName finds the content from the store by name (i.e. AnnotationTitle) +func (s *Memorystore) GetByName(name string) (ocispec.Descriptor, []byte, bool) { + s.lock.Lock() + defer s.lock.Unlock() + + desc, ok := s.nameMap[name] + if !ok { + return ocispec.Descriptor{}, nil, false + } + content, ok := s.content[desc.Digest] + return desc, content, ok +} + +type memoryWriter struct { + store *Memorystore + buffer *bytes.Buffer + desc ocispec.Descriptor + digester digest.Digester + status content.Status +} + +func (w *memoryWriter) Status() (content.Status, error) { + return w.status, nil +} + +// Digest returns the current digest of the content, up to the current write. +// +// Cannot be called concurrently with `Write`. +func (w *memoryWriter) Digest() digest.Digest { + return w.digester.Digest() +} + +// Write p to the transaction. +func (w *memoryWriter) Write(p []byte) (n int, err error) { + n, err = w.buffer.Write(p) + w.digester.Hash().Write(p[:n]) + w.status.Offset += int64(len(p)) + w.status.UpdatedAt = time.Now() + return n, err +} + +func (w *memoryWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { + var base content.Info + for _, opt := range opts { + if err := opt(&base); err != nil { + return err + } + } + + if w.buffer == nil { + return errors.Wrap(errdefs.ErrFailedPrecondition, "cannot commit on closed writer") + } + content := w.buffer.Bytes() + w.buffer = nil + + if size > 0 && size != int64(len(content)) { + return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit size %d, expected %d", len(content), size) + } + if dgst := w.digester.Digest(); expected != "" && expected != dgst { + return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit digest %s, expected %s", dgst, expected) + } + + w.store.Set(w.desc, content) + return nil +} + +func (w *memoryWriter) Close() error { + w.buffer = nil + return nil +} + +func (w *memoryWriter) Truncate(size int64) error { + if size != 0 { + return ErrUnsupportedSize + } + w.status.Offset = 0 + w.digester.Hash().Reset() + w.buffer.Truncate(0) + return nil +} diff --git a/libs-go/pkg/content/multireader.go b/libs-go/pkg/content/multireader.go new file mode 100644 index 0000000..fb9cf8b --- /dev/null +++ b/libs-go/pkg/content/multireader.go @@ -0,0 +1,40 @@ +package content + +import ( + "context" + "fmt" + + "github.com/containerd/containerd/content" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// MultiReader store to read content from multiple stores. It finds the content by asking each underlying +// store to find the content, which it does based on the hash. +// +// Example: +// fileStore := NewFileStore(rootPath) +// memoryStore := NewMemoryStore() +// // load up content in fileStore and memoryStore +// multiStore := MultiReader([]content.Provider{fileStore, memoryStore}) +// +// You now can use multiStore anywhere that content.Provider is accepted +type MultiReader struct { + stores []content.Provider +} + +// AddStore add a store to read from +func (m *MultiReader) AddStore(store ...content.Provider) { + m.stores = append(m.stores, store...) +} + +// ReaderAt get a reader +func (m MultiReader) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { + for _, store := range m.stores { + r, err := store.ReaderAt(ctx, desc) + if r != nil && err == nil { + return r, nil + } + } + // we did not find any + return nil, fmt.Errorf("not found") +} diff --git a/libs-go/pkg/content/multireader_test.go b/libs-go/pkg/content/multireader_test.go new file mode 100644 index 0000000..8f5bc5f --- /dev/null +++ b/libs-go/pkg/content/multireader_test.go @@ -0,0 +1,63 @@ +package content_test + +import ( + "context" + "io/ioutil" + "testing" + + "github.com/opencontainers/artifacts/libs-go/pkg/content" + + ctrcontent "github.com/containerd/containerd/content" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +var ( + testContentA = []byte("Hello World!") + testContentHashA = digest.FromBytes(testContentA) + testContentB = []byte("So long and thanks for all the fish!") + testContentHashB = digest.FromBytes(testContentB) + testDescriptorA = ocispec.Descriptor{ + MediaType: ocispec.MediaTypeImageConfig, + Digest: testContentHashA, + Size: int64(len(testContentA)), + } + testDescriptorB = ocispec.Descriptor{ + MediaType: ocispec.MediaTypeImageConfig, + Digest: testContentHashB, + Size: int64(len(testContentB)), + } +) + +func TestMultiReader(t *testing.T) { + mem1, mem2 := content.NewMemoryStore(), content.NewMemoryStore() + mem1.Add("a", ocispec.MediaTypeImageConfig, testContentA) + mem2.Add("b", ocispec.MediaTypeImageConfig, testContentB) + multiReader := content.MultiReader{} + multiReader.AddStore(mem1, mem2) + + ctx := context.Background() + contentA, err := multiReader.ReaderAt(ctx, testDescriptorA) + if err != nil { + t.Fatalf("failed to get a reader for descriptor A: %v", err) + } + outputA, err := ioutil.ReadAll(ctrcontent.NewReader(contentA)) + if err != nil { + t.Fatalf("failed to read content for descriptor A: %v", err) + } + if string(outputA) != string(testContentA) { + t.Errorf("mismatched content for A, actual '%s', expected '%s'", outputA, testContentA) + } + + contentB, err := multiReader.ReaderAt(ctx, testDescriptorB) + if err != nil { + t.Fatalf("failed to get a reader for descriptor B: %v", err) + } + outputB, err := ioutil.ReadAll(ctrcontent.NewReader(contentB)) + if err != nil { + t.Fatalf("failed to read content for descriptor B: %v", err) + } + if string(outputB) != string(testContentB) { + t.Errorf("mismatched content for B, actual '%s', expected '%s'", outputB, testContentB) + } +} diff --git a/libs-go/pkg/content/multiwriter.go b/libs-go/pkg/content/multiwriter.go new file mode 100644 index 0000000..87df2d6 --- /dev/null +++ b/libs-go/pkg/content/multiwriter.go @@ -0,0 +1,16 @@ +package content + +import ( + "context" + + ctrcontent "github.com/containerd/containerd/content" +) + +// MultiWriterIngester an ingester that can provide a single writer or multiple writers for a single +// descriptor. Useful when the target of a descriptor can have multiple items within it, e.g. a layer +// that is a tar file with multiple files, each of which should go to a different stream, some of which +// should not be handled at all +type MultiWriterIngester interface { + ctrcontent.Ingester + Writers(ctx context.Context, opts ...ctrcontent.WriterOpt) (map[string]ctrcontent.Writer, error) +} diff --git a/libs-go/pkg/content/oci.go b/libs-go/pkg/content/oci.go new file mode 100644 index 0000000..6268023 --- /dev/null +++ b/libs-go/pkg/content/oci.go @@ -0,0 +1,172 @@ +package content + +import ( + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/content/local" + specs "github.com/opencontainers/image-spec/specs-go" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// OCIStore provides content from the file system with the OCI-Image layout. +// Reference: https://github.com/opencontainers/image-spec/blob/master/image-layout.md +type OCIStore struct { + content.Store + + root string + index *ocispec.Index + nameMap map[string]ocispec.Descriptor +} + +// NewOCIStore creates a new OCI store +func NewOCIStore(rootPath string) (*OCIStore, error) { + fileStore, err := local.NewStore(rootPath) + if err != nil { + return nil, err + } + + store := &OCIStore{ + Store: fileStore, + root: rootPath, + } + if err := store.validateOCILayoutFile(); err != nil { + return nil, err + } + if err := store.LoadIndex(); err != nil { + return nil, err + } + + return store, nil +} + +// LoadIndex reads the index.json from the file system +func (s *OCIStore) LoadIndex() error { + path := filepath.Join(s.root, OCIImageIndexFile) + indexFile, err := os.Open(path) + if err != nil { + if !os.IsNotExist(err) { + return err + } + s.index = &ocispec.Index{ + Versioned: specs.Versioned{ + SchemaVersion: 2, // historical value + }, + } + s.nameMap = make(map[string]ocispec.Descriptor) + + return nil + } + defer indexFile.Close() + + if err := json.NewDecoder(indexFile).Decode(&s.index); err != nil { + return err + } + + s.nameMap = make(map[string]ocispec.Descriptor) + for _, desc := range s.index.Manifests { + if name := desc.Annotations[ocispec.AnnotationRefName]; name != "" { + s.nameMap[name] = desc + } + } + + return nil +} + +// SaveIndex writes the index.json to the file system +func (s *OCIStore) SaveIndex() error { + indexJSON, err := json.Marshal(s.index) + if err != nil { + return err + } + + path := filepath.Join(s.root, OCIImageIndexFile) + return ioutil.WriteFile(path, indexJSON, 0644) +} + +// AddReference adds or updates an reference to index. +func (s *OCIStore) AddReference(name string, desc ocispec.Descriptor) { + if desc.Annotations == nil { + desc.Annotations = map[string]string{ + ocispec.AnnotationRefName: name, + } + } else { + desc.Annotations[ocispec.AnnotationRefName] = name + } + + if _, ok := s.nameMap[name]; ok { + s.nameMap[name] = desc + + for i, ref := range s.index.Manifests { + if name == ref.Annotations[ocispec.AnnotationRefName] { + s.index.Manifests[i] = desc + return + } + } + + // Process should not reach here. + // Fallthrough to `Add` scenario and recover. + s.index.Manifests = append(s.index.Manifests, desc) + return + } + + s.index.Manifests = append(s.index.Manifests, desc) + s.nameMap[name] = desc +} + +// DeleteReference deletes an reference from index. +func (s *OCIStore) DeleteReference(name string) { + if _, ok := s.nameMap[name]; !ok { + return + } + + delete(s.nameMap, name) + for i, desc := range s.index.Manifests { + if name == desc.Annotations[ocispec.AnnotationRefName] { + s.index.Manifests[i] = s.index.Manifests[len(s.index.Manifests)-1] + s.index.Manifests = s.index.Manifests[:len(s.index.Manifests)-1] + return + } + } +} + +// ListReferences lists all references in index. +func (s *OCIStore) ListReferences() map[string]ocispec.Descriptor { + return s.nameMap +} + +// validateOCILayoutFile ensures the `oci-layout` file +func (s *OCIStore) validateOCILayoutFile() error { + layoutFilePath := filepath.Join(s.root, ocispec.ImageLayoutFile) + layoutFile, err := os.Open(layoutFilePath) + if err != nil { + if !os.IsNotExist(err) { + return err + } + + layout := ocispec.ImageLayout{ + Version: ocispec.ImageLayoutVersion, + } + layoutJSON, err := json.Marshal(layout) + if err != nil { + return err + } + + return ioutil.WriteFile(layoutFilePath, layoutJSON, 0644) + } + defer layoutFile.Close() + + var layout *ocispec.ImageLayout + err = json.NewDecoder(layoutFile).Decode(&layout) + if err != nil { + return err + } + if layout.Version != ocispec.ImageLayoutVersion { + return ErrUnsupportedVersion + } + + return nil +} diff --git a/libs-go/pkg/content/opts.go b/libs-go/pkg/content/opts.go new file mode 100644 index 0000000..56d4969 --- /dev/null +++ b/libs-go/pkg/content/opts.go @@ -0,0 +1,73 @@ +package content + +import ( + "errors" + + "github.com/opencontainers/go-digest" +) + +type WriterOpts struct { + InputHash *digest.Digest + OutputHash *digest.Digest + Blocksize int + MultiWriterIngester bool +} + +type WriterOpt func(*WriterOpts) error + +func DefaultWriterOpts() WriterOpts { + return WriterOpts{ + InputHash: nil, + OutputHash: nil, + Blocksize: DefaultBlocksize, + } +} + +// WithInputHash provide the expected input hash to a writer. Writers +// may suppress their own calculation of a hash on the stream, taking this +// hash instead. If the Writer processes the data before passing it on to another +// Writer layer, this is the hash of the *input* stream. +// +// To have a blank hash, use WithInputHash(BlankHash). +func WithInputHash(hash digest.Digest) WriterOpt { + return func(w *WriterOpts) error { + w.InputHash = &hash + return nil + } +} + +// WithOutputHash provide the expected output hash to a writer. Writers +// may suppress their own calculation of a hash on the stream, taking this +// hash instead. If the Writer processes the data before passing it on to another +// Writer layer, this is the hash of the *output* stream. +// +// To have a blank hash, use WithInputHash(BlankHash). +func WithOutputHash(hash digest.Digest) WriterOpt { + return func(w *WriterOpts) error { + w.OutputHash = &hash + return nil + } +} + +// WithBlocksize set the blocksize used by the processor of data. +// The default is DefaultBlocksize, which is the same as that used by io.Copy. +// Includes a safety check to ensure the caller doesn't actively set it to <= 0. +func WithBlocksize(blocksize int) WriterOpt { + return func(w *WriterOpts) error { + if blocksize <= 0 { + return errors.New("blocksize must be greater than or equal to 0") + } + w.Blocksize = blocksize + return nil + } +} + +// WithMultiWriterIngester the passed ingester also implements MultiWriter +// and should be used as such. If this is set to true, but the ingester does not +// implement MultiWriter, calling Writer should return an error. +func WithMultiWriterIngester() WriterOpt { + return func(w *WriterOpts) error { + w.MultiWriterIngester = true + return nil + } +} diff --git a/libs-go/pkg/content/passthrough.go b/libs-go/pkg/content/passthrough.go new file mode 100644 index 0000000..b9a891d --- /dev/null +++ b/libs-go/pkg/content/passthrough.go @@ -0,0 +1,262 @@ +package content + +import ( + "context" + "errors" + "io" + "time" + + "github.com/containerd/containerd/content" + "github.com/opencontainers/go-digest" +) + +// PassthroughWriter takes an input stream and passes it through to an underlying writer, +// while providing the ability to manipulate the stream before it gets passed through +type PassthroughWriter struct { + writer content.Writer + pipew *io.PipeWriter + digester digest.Digester + size int64 + underlyingWriter *underlyingWriter + reader *io.PipeReader + hash *digest.Digest + done chan error +} + +// NewPassthroughWriter creates a pass-through writer that allows for processing +// the content via an arbitrary function. The function should do whatever processing it +// wants, reading from the Reader to the Writer. When done, it must indicate via +// sending an error or nil to the Done +func NewPassthroughWriter(writer content.Writer, f func(r io.Reader, w io.Writer, done chan<- error), opts ...WriterOpt) content.Writer { + // process opts for default + wOpts := DefaultWriterOpts() + for _, opt := range opts { + if err := opt(&wOpts); err != nil { + return nil + } + } + + r, w := io.Pipe() + pw := &PassthroughWriter{ + writer: writer, + pipew: w, + digester: digest.Canonical.Digester(), + underlyingWriter: &underlyingWriter{ + writer: writer, + digester: digest.Canonical.Digester(), + hash: wOpts.OutputHash, + }, + reader: r, + hash: wOpts.InputHash, + done: make(chan error, 1), + } + go f(r, pw.underlyingWriter, pw.done) + return pw +} + +func (pw *PassthroughWriter) Write(p []byte) (n int, err error) { + n, err = pw.pipew.Write(p) + if pw.hash == nil { + pw.digester.Hash().Write(p[:n]) + } + pw.size += int64(n) + return +} + +func (pw *PassthroughWriter) Close() error { + pw.pipew.Close() + pw.writer.Close() + return nil +} + +// Digest may return empty digest or panics until committed. +func (pw *PassthroughWriter) Digest() digest.Digest { + if pw.hash != nil { + return *pw.hash + } + return pw.digester.Digest() +} + +// Commit commits the blob (but no roll-back is guaranteed on an error). +// size and expected can be zero-value when unknown. +// Commit always closes the writer, even on error. +// ErrAlreadyExists aborts the writer. +func (pw *PassthroughWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { + pw.pipew.Close() + err := <-pw.done + pw.reader.Close() + if err != nil && err != io.EOF { + return err + } + + // Some underlying writers will validate an expected digest, so we need the option to pass it + // that digest. That is why we caluclate the digest of the underlying writer throughout the write process. + return pw.writer.Commit(ctx, pw.underlyingWriter.size, pw.underlyingWriter.Digest(), opts...) +} + +// Status returns the current state of write +func (pw *PassthroughWriter) Status() (content.Status, error) { + return pw.writer.Status() +} + +// Truncate updates the size of the target blob +func (pw *PassthroughWriter) Truncate(size int64) error { + return pw.writer.Truncate(size) +} + +// underlyingWriter implementation of io.Writer to write to the underlying +// io.Writer +type underlyingWriter struct { + writer content.Writer + digester digest.Digester + size int64 + hash *digest.Digest +} + +// Write write to the underlying writer +func (u *underlyingWriter) Write(p []byte) (int, error) { + n, err := u.writer.Write(p) + if err != nil { + return 0, err + } + + if u.hash == nil { + u.digester.Hash().Write(p) + } + u.size += int64(len(p)) + return n, nil +} + +// Size get total size written +func (u *underlyingWriter) Size() int64 { + return u.size +} + +// Digest may return empty digest or panics until committed. +func (u *underlyingWriter) Digest() digest.Digest { + if u.hash != nil { + return *u.hash + } + return u.digester.Digest() +} + +// PassthroughMultiWriter single writer that passes through to multiple writers, allowing the passthrough +// function to select which writer to use. +type PassthroughMultiWriter struct { + writers []*PassthroughWriter + pipew *io.PipeWriter + digester digest.Digester + size int64 + reader *io.PipeReader + hash *digest.Digest + done chan error + startedAt time.Time + updatedAt time.Time + ref string +} + +func NewPassthroughMultiWriter(writers []content.Writer, f func(r io.Reader, w []io.Writer, done chan<- error), opts ...WriterOpt) content.Writer { + // process opts for default + wOpts := DefaultWriterOpts() + for _, opt := range opts { + if err := opt(&wOpts); err != nil { + return nil + } + } + + var pws []*PassthroughWriter + r, w := io.Pipe() + for _, writer := range writers { + pws = append(pws, &PassthroughWriter{ + writer: writer, + pipew: w, + digester: digest.Canonical.Digester(), + underlyingWriter: &underlyingWriter{ + writer: writer, + digester: digest.Canonical.Digester(), + hash: wOpts.OutputHash, + }, + reader: r, + hash: wOpts.InputHash, + done: make(chan error, 1), + }) + } + + pmw := &PassthroughMultiWriter{ + writers: pws, + startedAt: time.Now(), + updatedAt: time.Now(), + done: make(chan error, 1), + } + // get our output writers + var uws []io.Writer + for _, uw := range pws { + uws = append(uws, uw.underlyingWriter) + } + go f(r, uws, pmw.done) + return pmw +} + +func (pmw *PassthroughMultiWriter) Write(p []byte) (n int, err error) { + n, err = pmw.pipew.Write(p) + if pmw.hash == nil { + pmw.digester.Hash().Write(p[:n]) + } + pmw.size += int64(n) + pmw.updatedAt = time.Now() + return +} + +func (pmw *PassthroughMultiWriter) Close() error { + pmw.pipew.Close() + for _, w := range pmw.writers { + w.Close() + } + return nil +} + +// Digest may return empty digest or panics until committed. +func (pmw *PassthroughMultiWriter) Digest() digest.Digest { + if pmw.hash != nil { + return *pmw.hash + } + return pmw.digester.Digest() +} + +// Commit commits the blob (but no roll-back is guaranteed on an error). +// size and expected can be zero-value when unknown. +// Commit always closes the writer, even on error. +// ErrAlreadyExists aborts the writer. +func (pmw *PassthroughMultiWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { + pmw.pipew.Close() + err := <-pmw.done + pmw.reader.Close() + if err != nil && err != io.EOF { + return err + } + + // Some underlying writers will validate an expected digest, so we need the option to pass it + // that digest. That is why we caluclate the digest of the underlying writer throughout the write process. + for _, w := range pmw.writers { + // maybe this should be Commit(ctx, pw.underlyingWriter.size, pw.underlyingWriter.Digest(), opts...) + w.done <- err + if err := w.Commit(ctx, size, expected, opts...); err != nil { + return err + } + } + return nil +} + +// Status returns the current state of write +func (pmw *PassthroughMultiWriter) Status() (content.Status, error) { + return content.Status{ + StartedAt: pmw.startedAt, + UpdatedAt: pmw.updatedAt, + Total: pmw.size, + }, nil +} + +// Truncate updates the size of the target blob, but cannot do anything with a multiwriter +func (pmw *PassthroughMultiWriter) Truncate(size int64) error { + return errors.New("truncate unavailable on multiwriter") +} diff --git a/libs-go/pkg/content/passthrough_test.go b/libs-go/pkg/content/passthrough_test.go new file mode 100644 index 0000000..f8448f5 --- /dev/null +++ b/libs-go/pkg/content/passthrough_test.go @@ -0,0 +1,118 @@ +package content_test + +import ( + "context" + "fmt" + "io" + "testing" + + "github.com/opencontainers/artifacts/libs-go/pkg/content" + + ctrcontent "github.com/containerd/containerd/content" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +var ( + testRef = "abc123" + testContent = []byte("Hello World!") + testContentHash = digest.FromBytes(testContent) + appendText = "1" + modifiedContent = fmt.Sprintf("%s%s", testContent, appendText) + modifiedContentHash = digest.FromBytes([]byte(modifiedContent)) + testDescriptor = ocispec.Descriptor{ + MediaType: ocispec.MediaTypeImageConfig, + Digest: testContentHash, + Size: int64(len(testContent)), + Annotations: map[string]string{ + ocispec.AnnotationTitle: testRef, + }, + } + modifiedDescriptor = ocispec.Descriptor{ + MediaType: ocispec.MediaTypeImageConfig, + Digest: modifiedContentHash, + Size: int64(len(modifiedContent)), + Annotations: map[string]string{ + ocispec.AnnotationTitle: testRef, + }, + } +) + +func TestPassthroughWriter(t *testing.T) { + // simple pass through function that modifies the data just slightly + f := func(r io.Reader, w io.Writer, done chan<- error) { + var ( + err error + n int + ) + for { + b := make([]byte, 1024) + n, err = r.Read(b) + if err != nil && err != io.EOF { + t.Fatalf("data read error: %v", err) + break + } + l := n + if n > len(b) { + l = len(b) + } + + // we change it just slightly + b = b[:l] + if l > 0 { + b = append(b, []byte(appendText)...) + } + if _, err := w.Write(b); err != nil { + t.Fatalf("error writing to underlying writer: %v", err) + break + } + if err == io.EOF { + break + } + } + done <- err + } + + tests := []struct { + opts []content.WriterOpt + hash digest.Digest + }{ + {nil, testContentHash}, + {[]content.WriterOpt{content.WithInputHash(testContentHash), content.WithOutputHash(modifiedContentHash)}, testContentHash}, + } + + for _, tt := range tests { + ctx := context.Background() + mem := content.NewMemoryStore() + memw, err := mem.Writer(ctx, ctrcontent.WithDescriptor(modifiedDescriptor)) + if err != nil { + t.Fatalf("unexpected error getting the memory store writer: %v", err) + } + writer := content.NewPassthroughWriter(memw, f, tt.opts...) + n, err := writer.Write(testContent) + if err != nil { + t.Fatalf("unexpected error on Write: %v", err) + } + if n != len(testContent) { + t.Fatalf("wrote %d bytes instead of %d", n, len(testContent)) + } + if err := writer.Commit(ctx, testDescriptor.Size, tt.hash); err != nil { + t.Errorf("unexpected error on Commit: %v", err) + } + if digest := writer.Digest(); digest != tt.hash { + t.Errorf("mismatched digest: actual %v, expected %v", digest, tt.hash) + } + + // make sure the data is what we expected + _, b, found := mem.Get(modifiedDescriptor) + if !found { + t.Fatalf("target descriptor not found in underlying memory store") + } + if len(b) != len(modifiedContent) { + t.Errorf("unexpectedly got %d bytes instead of expected %d", len(b), len(modifiedContent)) + } + if string(b) != modifiedContent { + t.Errorf("mismatched content, expected '%s', got '%s'", modifiedContent, string(b)) + } + } +} diff --git a/libs-go/pkg/content/readerat.go b/libs-go/pkg/content/readerat.go new file mode 100644 index 0000000..6c1533a --- /dev/null +++ b/libs-go/pkg/content/readerat.go @@ -0,0 +1,34 @@ +package content + +import ( + "io" + + "github.com/containerd/containerd/content" +) + +// ensure interface +var ( + _ content.ReaderAt = sizeReaderAt{} +) + +type readAtCloser interface { + io.ReaderAt + io.Closer +} + +type sizeReaderAt struct { + readAtCloser + size int64 +} + +func (ra sizeReaderAt) Size() int64 { + return ra.size +} + +type nopCloser struct { + io.ReaderAt +} + +func (nopCloser) Close() error { + return nil +} diff --git a/libs-go/pkg/content/untar.go b/libs-go/pkg/content/untar.go new file mode 100644 index 0000000..729a318 --- /dev/null +++ b/libs-go/pkg/content/untar.go @@ -0,0 +1,152 @@ +package content + +import ( + "archive/tar" + "fmt" + "io" + + "github.com/containerd/containerd/content" +) + +// NewUntarWriter wrap a writer with an untar, so that the stream is untarred +// +// By default, it calculates the hash when writing. If the option `skipHash` is true, +// it will skip doing the hash. Skipping the hash is intended to be used only +// if you are confident about the validity of the data being passed to the writer, +// and wish to save on the hashing time. +func NewUntarWriter(writer content.Writer, opts ...WriterOpt) content.Writer { + // process opts for default + wOpts := DefaultWriterOpts() + for _, opt := range opts { + if err := opt(&wOpts); err != nil { + return nil + } + } + + return NewPassthroughWriter(writer, func(r io.Reader, w io.Writer, done chan<- error) { + tr := tar.NewReader(r) + var err error + for { + _, err := tr.Next() + if err == io.EOF { + // clear the error, since we do not pass an io.EOF + err = nil + break // End of archive + } + if err != nil { + // pass the error on + err = fmt.Errorf("UntarWriter tar file header read error: %v", err) + break + } + // write out the untarred data + // we can handle io.EOF, just go to the next file + // any other errors should stop and get reported + b := make([]byte, wOpts.Blocksize, wOpts.Blocksize) + for { + var n int + n, err = tr.Read(b) + if err != nil && err != io.EOF { + err = fmt.Errorf("UntarWriter file data read error: %v\n", err) + break + } + l := n + if n > len(b) { + l = len(b) + } + if _, err2 := w.Write(b[:l]); err2 != nil { + err = fmt.Errorf("UntarWriter error writing to underlying writer: %v", err2) + break + } + if err == io.EOF { + // go to the next file + break + } + } + // did we break with a non-nil and non-EOF error? + if err != nil && err != io.EOF { + break + } + } + done <- err + }, opts...) +} + +// NewUntarWriterByName wrap multiple writers with an untar, so that the stream is untarred and passed +// to the appropriate writer, based on the filename. If a filename is not found, it will not pass it +// to any writer. The filename "" will handle any stream that does not have a specific filename; use +// it for the default of a single file in a tar stream. +func NewUntarWriterByName(writers map[string]content.Writer, opts ...WriterOpt) content.Writer { + // process opts for default + wOpts := DefaultWriterOpts() + for _, opt := range opts { + if err := opt(&wOpts); err != nil { + return nil + } + } + + // construct an array of content.Writer + nameToIndex := map[string]int{} + var writerSlice []content.Writer + for name, writer := range writers { + writerSlice = append(writerSlice, writer) + nameToIndex[name] = len(writerSlice) - 1 + } + // need a PassthroughMultiWriter here + return NewPassthroughMultiWriter(writerSlice, func(r io.Reader, ws []io.Writer, done chan<- error) { + tr := tar.NewReader(r) + var err error + for { + header, err := tr.Next() + if err == io.EOF { + // clear the error, since we do not pass an io.EOF + err = nil + break // End of archive + } + if err != nil { + // pass the error on + err = fmt.Errorf("UntarWriter tar file header read error: %v", err) + break + } + // get the filename + filename := header.Name + index, ok := nameToIndex[filename] + if !ok { + index, ok = nameToIndex[""] + if !ok { + // we did not find this file or the wildcard, so do not process this file + continue + } + } + + // write out the untarred data + // we can handle io.EOF, just go to the next file + // any other errors should stop and get reported + b := make([]byte, wOpts.Blocksize, wOpts.Blocksize) + for { + var n int + n, err = tr.Read(b) + if err != nil && err != io.EOF { + err = fmt.Errorf("UntarWriter file data read error: %v\n", err) + break + } + l := n + if n > len(b) { + l = len(b) + } + if _, err2 := ws[index].Write(b[:l]); err2 != nil { + err = fmt.Errorf("UntarWriter error writing to underlying writer at index %d for name '%s': %v", index, filename, err2) + break + } + if err == io.EOF { + // go to the next file + break + } + } + // did we break with a non-nil and non-EOF error? + if err != nil && err != io.EOF { + break + } + } + done <- err + }, opts...) +} diff --git a/libs-go/pkg/content/utils.go b/libs-go/pkg/content/utils.go new file mode 100644 index 0000000..656dee2 --- /dev/null +++ b/libs-go/pkg/content/utils.go @@ -0,0 +1,208 @@ +package content + +import ( + "archive/tar" + "compress/gzip" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "time" + + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// ResolveName resolves name from descriptor +func ResolveName(desc ocispec.Descriptor) (string, bool) { + name, ok := desc.Annotations[ocispec.AnnotationTitle] + return name, ok +} + +// tarDirectory walks the directory specified by path, and tar those files with a new +// path prefix. +func tarDirectory(root, prefix string, w io.Writer, stripTimes bool) error { + tw := tar.NewWriter(w) + defer tw.Close() + if err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rename path + name, err := filepath.Rel(root, path) + if err != nil { + return err + } + name = filepath.Join(prefix, name) + name = filepath.ToSlash(name) + + // Generate header + var link string + mode := info.Mode() + if mode&os.ModeSymlink != 0 { + if link, err = os.Readlink(path); err != nil { + return err + } + } + header, err := tar.FileInfoHeader(info, link) + if err != nil { + return errors.Wrap(err, path) + } + header.Name = name + header.Uid = 0 + header.Gid = 0 + header.Uname = "" + header.Gname = "" + + if stripTimes { + header.ModTime = time.Time{} + header.AccessTime = time.Time{} + header.ChangeTime = time.Time{} + } + + // Write file + if err := tw.WriteHeader(header); err != nil { + return errors.Wrap(err, "tar") + } + if mode.IsRegular() { + file, err := os.Open(path) + if err != nil { + return err + } + defer file.Close() + if _, err := io.Copy(tw, file); err != nil { + return errors.Wrap(err, path) + } + } + + return nil + }); err != nil { + return err + } + return nil +} + +// extractTarDirectory extracts tar file to a directory specified by the `root` +// parameter. The file name prefix is ensured to be the string specified by the +// `prefix` parameter and is trimmed. +func extractTarDirectory(root, prefix string, r io.Reader) error { + tr := tar.NewReader(r) + for { + header, err := tr.Next() + if err != nil { + if err == io.EOF { + return nil + } + return err + } + + // Name check + name := header.Name + path, err := ensureBasePath(root, prefix, name) + if err != nil { + return err + } + path = filepath.Join(root, path) + + // Link check + switch header.Typeflag { + case tar.TypeLink, tar.TypeSymlink: + link := header.Linkname + if !filepath.IsAbs(link) { + link = filepath.Join(filepath.Dir(name), link) + } + if _, err := ensureBasePath(root, prefix, link); err != nil { + return err + } + } + + // Create content + switch header.Typeflag { + case tar.TypeReg: + err = writeFile(path, tr, header.FileInfo().Mode()) + case tar.TypeDir: + err = os.MkdirAll(path, header.FileInfo().Mode()) + case tar.TypeLink: + err = os.Link(header.Linkname, path) + case tar.TypeSymlink: + err = os.Symlink(header.Linkname, path) + default: + continue // Non-regular files are skipped + } + if err != nil { + return err + } + + // Change access time and modification time if possible (error ignored) + os.Chtimes(path, header.AccessTime, header.ModTime) + } +} + +// ensureBasePath ensures the target path is in the base path, +// returning its relative path to the base path. +func ensureBasePath(root, base, target string) (string, error) { + path, err := filepath.Rel(base, target) + if err != nil { + return "", err + } + cleanPath := filepath.ToSlash(filepath.Clean(path)) + if cleanPath == ".." || strings.HasPrefix(cleanPath, "../") { + return "", fmt.Errorf("%q is outside of %q", target, base) + } + + // No symbolic link allowed in the relative path + dir := filepath.Dir(path) + for dir != "." { + if info, err := os.Lstat(filepath.Join(root, dir)); err != nil { + if !os.IsNotExist(err) { + return "", err + } + } else if info.Mode()&os.ModeSymlink != 0 { + return "", fmt.Errorf("no symbolic link allowed between %q and %q", base, target) + } + dir = filepath.Dir(dir) + } + + return path, nil +} + +func writeFile(path string, r io.Reader, perm os.FileMode) error { + file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + defer file.Close() + _, err = io.Copy(file, r) + return err +} + +func extractTarGzip(root, prefix, filename, checksum string) error { + file, err := os.Open(filename) + if err != nil { + return err + } + defer file.Close() + zr, err := gzip.NewReader(file) + if err != nil { + return err + } + defer zr.Close() + var r io.Reader = zr + var verifier digest.Verifier + if checksum != "" { + if digest, err := digest.Parse(checksum); err == nil { + verifier = digest.Verifier() + r = io.TeeReader(r, verifier) + } + } + if err := extractTarDirectory(root, prefix, r); err != nil { + return err + } + if verifier != nil && !verifier.Verified() { + return errors.New("content digest mismatch") + } + return nil +} diff --git a/libs-go/pkg/context/context.go b/libs-go/pkg/context/context.go new file mode 100644 index 0000000..f070040 --- /dev/null +++ b/libs-go/pkg/context/context.go @@ -0,0 +1,9 @@ +package context + +import "context" + +// Background returns a default context with logger discarded. +func Background() context.Context { + ctx := context.Background() + return WithLoggerDiscarded(ctx) +} diff --git a/libs-go/pkg/context/logger.go b/libs-go/pkg/context/logger.go new file mode 100644 index 0000000..b83f277 --- /dev/null +++ b/libs-go/pkg/context/logger.go @@ -0,0 +1,35 @@ +package context + +import ( + "context" + "io" + "io/ioutil" + + "github.com/containerd/containerd/log" + "github.com/sirupsen/logrus" +) + +// WithLogger returns a new context with the provided logger. +// This method wraps github.com/containerd/containerd/log.WithLogger() +func WithLogger(ctx context.Context, logger *logrus.Entry) context.Context { + return log.WithLogger(ctx, logger) +} + +// WithLoggerFromWriter returns a new context with the logger, writting to the provided logger. +func WithLoggerFromWriter(ctx context.Context, writer io.Writer) context.Context { + logger := logrus.New() + logger.Out = writer + entry := logrus.NewEntry(logger) + return WithLogger(ctx, entry) +} + +// WithLoggerDiscarded returns a new context with the logger, writting to nothing. +func WithLoggerDiscarded(ctx context.Context) context.Context { + return WithLoggerFromWriter(ctx, ioutil.Discard) +} + +// GetLogger retrieves the current logger from the context. +// This method wraps github.com/containerd/containerd/log.GetLogger() +func GetLogger(ctx context.Context) *logrus.Entry { + return log.GetLogger(ctx) +} diff --git a/libs-go/scripts/test.sh b/libs-go/scripts/test.sh new file mode 100755 index 0000000..1d3e6e1 --- /dev/null +++ b/libs-go/scripts/test.sh @@ -0,0 +1,19 @@ +#!/bin/bash -ex + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +cd $DIR/../ + +rm -rf .cover/ .test/ +mkdir .cover/ .test/ +trap "rm -rf .test/" EXIT + +export CGO_ENABLED=0 +for pkg in `go list ./pkg/... | grep -v /vendor/`; do + go test -v -covermode=atomic \ + -coverprofile=".cover/$(echo $pkg | sed 's/\//_/g').cover.out" $pkg +done + +echo "mode: set" > .cover/cover.out && cat .cover/*.cover.out | grep -v mode: | sort -r | \ + awk '{if($1 != last) {print $0;last=$1}}' >> .cover/cover.out + +go tool cover -html=.cover/cover.out -o=.cover/coverage.html diff --git a/libs-go/testdata/charts/chartmuseum-1.8.2.tgz b/libs-go/testdata/charts/chartmuseum-1.8.2.tgz new file mode 100644 index 0000000..d1a6780 Binary files /dev/null and b/libs-go/testdata/charts/chartmuseum-1.8.2.tgz differ diff --git a/libs-go/testdata/charts/chartmuseum/.helmignore b/libs-go/testdata/charts/chartmuseum/.helmignore new file mode 100755 index 0000000..46fd899 --- /dev/null +++ b/libs-go/testdata/charts/chartmuseum/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +# OWNERS file for Kubernetes +OWNERS diff --git a/libs-go/testdata/charts/chartmuseum/Chart.yaml b/libs-go/testdata/charts/chartmuseum/Chart.yaml new file mode 100755 index 0000000..1e7e44c --- /dev/null +++ b/libs-go/testdata/charts/chartmuseum/Chart.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +appVersion: 0.8.0 +description: Host your own Helm Chart Repository +home: https://github.com/helm/chartmuseum +icon: https://raw.githubusercontent.com/helm/chartmuseum/master/logo2.png +keywords: +- chartmuseum +- helm +- charts repo +maintainers: +- email: opensource@codefresh.io + name: codefresh-io +- email: hello@cloudposse.com + name: cloudposse +- email: chartmuseum@gmail.com + name: chartmuseum +name: chartmuseum +version: 1.8.2 diff --git a/libs-go/testdata/charts/chartmuseum/README.md b/libs-go/testdata/charts/chartmuseum/README.md new file mode 100755 index 0000000..7383ecb --- /dev/null +++ b/libs-go/testdata/charts/chartmuseum/README.md @@ -0,0 +1,477 @@ +# ChartMuseum Helm Chart + +Deploy your own private ChartMuseum. + +Please also see https://github.com/kubernetes-helm/chartmuseum + +## Table of Content + + + + + +- [Prerequisites](#prerequisites) +- [Configuration](#configuration) +- [Installation](#installation) + - [Using with Amazon S3](#using-with-amazon-s3) + - [permissions grant with access keys](#permissions-grant-with-access-keys) + - [permissions grant with IAM instance profile](#permissions-grant-with-iam-instance-profile) + - [permissions grant with IAM assumed role](#permissions-grant-with-iam-assumed-role) + - [Using with Google Cloud Storage](#using-with-google-cloud-storage) + - [Using with Microsoft Azure Blob Storage](#using-with-microsoft-azure-blob-storage) + - [Using with Alibaba Cloud OSS Storage](#using-with-alibaba-cloud-oss-storage) + - [Using with local filesystem storage](#using-with-local-filesystem-storage) + - [Example storage class](#example-storage-class) +- [Uninstall](#uninstall) + + + + +## Prerequisites + +* Kubernetes with extensions/v1beta1 available +* [If enabled] A persistent storage resource and RW access to it +* [If enabled] Kubernetes StorageClass for dynamic provisioning + +## Configuration + +By default this chart will not have persistent storage, and the API service +will be *DISABLED*. This protects against unauthorized access to the API +with default configuration values. + +In addition, by default, pod `securityContext.fsGroup` is set to `1000`. This +is the user/group that the ChartMuseum container runs as, and is used to +enable local persitant storage. If your cluster has DenySecurityContext enabled, +you can set `securityContext` to `{}` and still use this chart with one of +the cloud storage options. + +For a more robust solution supply helm install with a custom values.yaml +You are also required to create the StorageClass resource ahead of time: +``` +kubectl create -f /path/to/storage_class.yaml +``` + +The following table lists common configurable parameters of the chart and +their default values. See values.yaml for all available options. + +| Parameter | Description | Default | +|----------------------------------------|---------------------------------------------|-----------------------------------------------------| +| `image.pullPolicy` | Container pull policy | `IfNotPresent` | +| `image.repository` | Container image to use | `chartmuseum/chartmuseum` | +| `image.tag` | Container image tag to deploy | `v0.8.0` | +| `persistence.accessMode` | Access mode to use for PVC | `ReadWriteOnce` | +| `persistence.enabled` | Whether to use a PVC for persistent storage | `false` | +| `persistence.size` | Amount of space to claim for PVC | `8Gi` | +| `persistence.labels` | Additional labels for PVC | `{}` | +| `persistence.storageClass` | Storage Class to use for PVC | `-` | +| `persistence.volumeName` | Volume to use for PVC | `` | +| `persistence.pv.enabled` | Whether to use a PV for persistent storage | `false` | +| `persistence.pv.capacity.storage` | Storage size to use for PV | `8Gi` | +| `persistence.pv.accessMode` | Access mode to use for PV | `ReadWriteOnce` | +| `persistence.pv.nfs.server` | NFS server for PV | `` | +| `persistence.pv.nfs.path` | Storage Path | `` | +| `persistence.pv.pvname` | Custom name for private volume | `` | +| `replicaCount` | k8s replicas | `1` | +| `resources.limits.cpu` | Container maximum CPU | `100m` | +| `resources.limits.memory` | Container maximum memory | `128Mi` | +| `resources.requests.cpu` | Container requested CPU | `80m` | +| `resources.requests.memory` | Container requested memory | `64Mi` | +| `serviceAccount.create` | If true, create the service account | `false` | +| `serviceAccount.name` | Name of the serviceAccount to create or use | `{{ chartmuseum.fullname }}` | +| `securityContext` | Map of securityContext for the pod | `{ fsGroup: 1000 }` | +| `nodeSelector` | Map of node labels for pod assignment | `{}` | +| `tolerations` | List of node taints to tolerate | `[]` | +| `affinity` | Map of node/pod affinities | `{}` | +| `env.open.STORAGE` | Storage Backend to use | `local` | +| `env.open.ALIBABA_BUCKET` | Bucket to store charts in for Alibaba | `` | +| `env.open.ALIBABA_PREFIX` | Prefix to store charts under for Alibaba | `` | +| `env.open.ALIBABA_ENDPOINT` | Alternative Alibaba endpoint | `` | +| `env.open.ALIBABA_SSE` | Server side encryption algorithm to use | `` | +| `env.open.AMAZON_BUCKET` | Bucket to store charts in for AWS | `` | +| `env.open.AMAZON_ENDPOINT` | Alternative AWS endpoint | `` | +| `env.open.AMAZON_PREFIX` | Prefix to store charts under for AWS | `` | +| `env.open.AMAZON_REGION` | Region to use for bucket access for AWS | `` | +| `env.open.AMAZON_SSE` | Server side encryption algorithm to use | `` | +| `env.open.GOOGLE_BUCKET` | Bucket to store charts in for GCP | `` | +| `env.open.GOOGLE_PREFIX` | Prefix to store charts under for GCP | `` | +| `env.open.STORAGE_MICROSOFT_CONTAINER` | Container to store charts under for MS | `` | +| `env.open.STORAGE_MICROSOFT_PREFIX` | Prefix to store charts under for MS | `` | +| `env.open.STORAGE_OPENSTACK_CONTAINER` | Container to store charts for openstack | `` | +| `env.open.STORAGE_OPENSTACK_PREFIX` | Prefix to store charts for openstack | `` | +| `env.open.STORAGE_OPENSTACK_REGION` | Region of openstack container | `` | +| `env.open.STORAGE_OPENSTACK_CACERT` | Path to a CA cert bundle for openstack | `` | +| `env.open.CHART_POST_FORM_FIELD_NAME` | Form field to query for chart file content | `` | +| `env.open.PROV_POST_FORM_FIELD_NAME` | Form field to query for chart provenance | `` | +| `env.open.DEPTH` | levels of nested repos for multitenancy. | `0` | +| `env.open.DEBUG` | Show debug messages | `false` | +| `env.open.LOG_JSON` | Output structured logs in JSON | `true` | +| `env.open.DISABLE_STATEFILES` | Disable use of index-cache.yaml | `false` | +| `env.open.DISABLE_METRICS` | Disable Prometheus metrics | `true` | +| `env.open.DISABLE_API` | Disable all routes prefixed with /api | `true` | +| `env.open.ALLOW_OVERWRITE` | Allow chart versions to be re-uploaded | `false` | +| `env.open.CHART_URL` | Absolute url for .tgzs in index.yaml | `` | +| `env.open.AUTH_ANONYMOUS_GET` | Allow anon GET operations when auth is used | `false` | +| `env.open.CONTEXT_PATH` | Set the base context path | `` | +| `env.open.INDEX_LIMIT` | Parallel scan limit for the repo indexer | `` | +| `env.open.CACHE` | Cache store, can be one of: redis | `` | +| `env.open.CACHE_REDIS_ADDR` | Address of Redis service (host:port) | `` | +| `env.open.CACHE_REDIS_DB` | Redis database to be selected after connect | `0` | +| `env.field` | Expose pod information to containers through environment variables | `` | +| `env.secret.BASIC_AUTH_USER` | Username for basic HTTP authentication | `` | +| `env.secret.BASIC_AUTH_PASS` | Password for basic HTTP authentication | `` | +| `env.secret.CACHE_REDIS_PASSWORD` | Redis requirepass server configuration | `` | +| `gcp.secret.enabled` | Flag for the GCP service account | `false` | +| `gcp.secret.name` | Secret name for the GCP json file | `` | +| `gcp.secret.key` | Secret key for te GCP json file | `credentials.json` | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.clusterIP` | Static clusterIP or None for headless services| `nil` | +| `service.servicename` | Custom name for service | `` | +| `service.labels` | Additional labels for service | `{}` | +| `deployment.labels` | Additional labels for deployment | `{}` | +| `deployment.matchlabes` | Match labels for deployment selector | `{}` | + +Specify each parameter using the `--set key=value[,key=value]` argument to +`helm install`. + +## Installation + +```shell +helm install --name my-chartmuseum -f custom.yaml stable/chartmuseum +``` + +### Using with Amazon S3 +Make sure your environment is properly setup to access `my-s3-bucket` + +You need at least the following permissions inside your IAM Policy +```yaml +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "AllowListObjects", + "Effect": "Allow", + "Action": [ + "s3:ListBucket" + ], + "Resource": "arn:aws:s3:::my-s3-bucket" + }, + { + "Sid": "AllowObjectsCRUD", + "Effect": "Allow", + "Action": [ + "s3:DeleteObject", + "s3:GetObject", + "s3:PutObject" + ], + "Resource": "arn:aws:s3:::my-s3-bucket/*" + } + ] +} +``` + +You can grant it to `chartmuseum` by several ways: + +#### permissions grant with access keys + +Grant permissions to `special user` and us it's access keys for auth on aws + +Specify `custom.yaml` with such values + +```yaml +env: + open: + STORAGE: amazon + STORAGE_AMAZON_BUCKET: my-s3-bucket + STORAGE_AMAZON_PREFIX: + STORAGE_AMAZON_REGION: us-east-1 + secret: + AWS_ACCESS_KEY_ID: "********" ## aws access key id value + AWS_SECRET_ACCESS_KEY: "********" ## aws access key secret value +``` + +Run command to install + +```shell +helm install --name my-chartmuseum -f custom.yaml stable/chartmuseum +``` + +#### permissions grant with IAM instance profile + +You can grant permissions to k8s node IAM instance profile. +For more information read this [article](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2.html) + +Specify `custom.yaml` with such values + +```yaml +env: + open: + STORAGE: amazon + STORAGE_AMAZON_BUCKET: my-s3-bucket + STORAGE_AMAZON_PREFIX: + STORAGE_AMAZON_REGION: us-east-1 +``` + +Run command to install + +```shell +helm install --name my-chartmuseum -f custom.yaml stable/chartmuseum +``` + +#### permissions grant with IAM assumed role + +To provide access with assumed role you need to install [kube2iam](https://github.com/kubernetes/charts/tree/master/stable/kube2iam) +and create role with granded permissions. + +Specify `custom.yaml` with such values + +```yaml +env: + open: + STORAGE: amazon + STORAGE_AMAZON_BUCKET: my-s3-bucket + STORAGE_AMAZON_PREFIX: + STORAGE_AMAZON_REGION: us-east-1 +replica: + annotations: + iam.amazonaws.com/role: "{assumed role name}" +``` + +Run command to install + +```shell +helm install --name my-chartmuseum -f custom.yaml stable/chartmuseum +``` + +### Using with Google Cloud Storage +Make sure your environment is properly setup to access `my-gcs-bucket` + +Specify `custom.yaml` with such values + +```yaml +env: + open: + STORAGE: google + STORAGE_GOOGLE_BUCKET: my-gcs-bucket + STORAGE_GOOGLE_PREFIX: +``` + +### Using with Google Cloud Storage and a Google Service Account + +A Google service account credentials are stored in a json file. There are two approaches here. Ideally you don't want to send your secrets to tiller. In that case, before installing this chart, you should create a secret with those credentials: + +```shell +kubectl create secret generic chartmuseum-secret --from-file=credentials.json="my-project-45e35d85a593.json" +``` + +Then you can either use a `VALUES` yaml with your values or set those values in the command line: + +```shell +helm install stable/chartmuseum --debug --set gcp.secret.enabled=true,env.open.STORAGE=google,env.open.DISABLE_API=false,env.open.STORAGE_GOOGLE_BUCKET=my-gcp-chartmuseum,gcp.secret.name=chartmuseum-secret +``` + +If you prefer to use a yaml file: + +```yaml +env: + open: + STORAGE: google + STORAGE_GOOGLE_BUCKET: my-gcs-bucket + STORAGE_GOOGLE_PREFIX: + +gcp: + secret: + enabled: true + name: chartmuseum-secret + key: credentials.json +``` + +Run command to install + +```shell +helm install --name my-chartmuseum -f custom.yaml stable/chartmuseum +``` + +In case that you don't mind adding your secret to tiller (you shouldn't do it), this are the commands + +```yaml +env: + open: + STORAGE: google + STORAGE_GOOGLE_BUCKET: my-gcs-bucket + STORAGE_GOOGLE_PREFIX: + secret: + GOOGLE_CREDENTIALS_JSON: my-json-file-base64-encoded +gcp: + secret: + enabled: true + +``` + +Run command to install + +```shell +helm install --name my-chartmuseum -f custom.yaml stable/chartmuseum +``` + +To set the values directly in the command line, use the follosing command. Note that we have to base64 encode the json file because we cannot pass a multi-line text as a value. + +```shell +export JSONKEY=$(cat my-project-77e35d85a593.json | base64) +helm install stable/chartmuseum --debug --set gcp.secret.enabled=true,env.secret.GOOGLE_CREDENTIALS_JSON=${JSONKEY},env.open.STORAGE=google,env.open.DISABLE_API=false,env.open.STORAGE_GOOGLE_BUCKET=my-gcp-chartmuseum +``` + +### Using with Microsoft Azure Blob Storage + +Make sure your environment is properly setup to access `mycontainer`. + +To do so, you must set the following env vars: +- `AZURE_STORAGE_ACCOUNT` +- `AZURE_STORAGE_ACCESS_KEY` + +Specify `custom.yaml` with such values + +```yaml +env: + open: + STORAGE: microsoft + STORAGE_MICROSOFT_CONTAINER: mycontainer + # prefix to store charts for microsoft storage backend + STORAGE_MICROSOFT_PREFIX: + secret: + AZURE_STORAGE_ACCOUNT: "********" ## azure storage account + AZURE_STORAGE_ACCESS_KEY: "********" ## azure storage account access key +``` + +Run command to install + +```shell +helm install --name my-chartmuseum -f custom.yaml stable/chartmuseum +``` + +### Using with Alibaba Cloud OSS Storage + +Make sure your environment is properly setup to access `my-oss-bucket`. + +To do so, you must set the following env vars: +- `ALIBABA_CLOUD_ACCESS_KEY_ID` +- `ALIBABA_CLOUD_ACCESS_KEY_SECRET` + +Specify `custom.yaml` with such values + +```yaml +env: + open: + STORAGE: alibaba + STORAGE_ALIBABA_BUCKET: my-oss-bucket + STORAGE_ALIBABA_PREFIX: + STORAGE_ALIBABA_ENDPOINT: oss-cn-beijing.aliyuncs.com + secret: + ALIBABA_CLOUD_ACCESS_KEY_ID: "********" ## alibaba OSS access key id + ALIBABA_CLOUD_ACCESS_KEY_SECRET: "********" ## alibaba OSS access key secret +``` + +Run command to install + +```shell +helm install --name my-chartmuseum -f custom.yaml stable/chartmuseum +``` + +### Using with Openstack Object Storage + +Make sure your environment is properly setup to access `mycontainer`. + +To do so, you must set the following env vars (depending on your openstack version): +- `OS_AUTH_URL` +- either `OS_PROJECT_NAME` or `OS_TENANT_NAME` or `OS_PROJECT_ID` or `OS_TENANT_ID` +- either `OS_DOMAIN_NAME` or `OS_DOMAIN_ID` +- either `OS_USERNAME` or `OS_USERID` +- `OS_PASSWORD` + +Specify `custom.yaml` with such values + +```yaml +env: + open: + STORAGE: openstack + STORAGE_OPENSTACK_CONTAINER: mycontainer + STORAGE_OPENSTACK_PREFIX: + STORAGE_OPENSTACK_REGION: YOURREGION + secret: + OS_AUTH_URL: https://myauth.url.com/v2.0/ + OS_TENANT_ID: yourtenantid + OS_USERNAME: yourusername + OS_PASSWORD: yourpassword +``` + +Run command to install + +```shell +helm install --name my-chartmuseum -f custom.yaml stable/chartmuseum +``` + +### Using with local filesystem storage +By default chartmuseum uses local filesystem storage. +But on pod recreation it will lose all charts, to prevent that enable persistent storage. + +```yaml +env: + open: + STORAGE: local +persistence: + enabled: true + accessMode: ReadWriteOnce + size: 8Gi + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + + ## Chartmuseum data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" +``` + +Run command to install + +```shell +helm install --name my-chartmuseum -f custom.yaml stable/chartmuseum +``` + +#### Example storage class + +Example storage-class.yaml provided here for use with a Ceph cluster. + +``` +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: storage-volume +provisioner: kubernetes.io/rbd +parameters: + monitors: "10.11.12.13:4567,10.11.12.14:4567" + adminId: admin + adminSecretName: thesecret + adminSecretNamespace: default + pool: chartstore + userId: user + userSecretName: thesecret +``` + +## Uninstall + +By default, a deliberate uninstall will result in the persistent volume +claim being deleted. + +```shell +helm delete my-chartmuseum +``` + +To delete the deployment and its history: +```shell +helm delete --purge my-chartmuseum +``` diff --git a/libs-go/testdata/charts/chartmuseum/templates/NOTES.txt b/libs-go/testdata/charts/chartmuseum/templates/NOTES.txt new file mode 100755 index 0000000..5efa6be --- /dev/null +++ b/libs-go/testdata/charts/chartmuseum/templates/NOTES.txt @@ -0,0 +1,30 @@ +** Please be patient while the chart is being deployed ** + +Get the ChartMuseum URL by running: + +{{- if contains "NodePort" .Values.service.type }} + + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "chartmuseum.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT{{ .Values.env.open.CONTEXT_PATH }}/ + +{{- else if contains "LoadBalancer" .Values.service.type }} + +** Please ensure an external IP is associated to the {{ template "chartmuseum.fullname" . }} service before proceeding ** +** Watch the status using: kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "chartmuseum.fullname" . }} ** + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "chartmuseum.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.externalPort }}{{ .Values.env.open.CONTEXT_PATH }}/ + +OR + + export SERVICE_HOST=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "chartmuseum.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].hostname}') + echo http://$SERVICE_HOST:{{ .Values.service.externalPort }}{{ .Values.env.open.CONTEXT_PATH }}/ + +{{- else if contains "ClusterIP" .Values.service.type }} + + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "chartmuseum.name" . }}" -l "release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + echo http://127.0.0.1:8080{{ .Values.env.open.CONTEXT_PATH }}/ + kubectl port-forward $POD_NAME 8080:8080 --namespace {{ .Release.Namespace }} + +{{- end }} diff --git a/libs-go/testdata/charts/chartmuseum/templates/_helpers.tpl b/libs-go/testdata/charts/chartmuseum/templates/_helpers.tpl new file mode 100755 index 0000000..bd998c8 --- /dev/null +++ b/libs-go/testdata/charts/chartmuseum/templates/_helpers.tpl @@ -0,0 +1,84 @@ +{{- /* +name defines a template for the name of the chartmuseum chart. + +The prevailing wisdom is that names should only contain a-z, 0-9 plus dot (.) and dash (-), and should +not exceed 63 characters. + +Parameters: + +- .Values.nameOverride: Replaces the computed name with this given name +- .Values.namePrefix: Prefix +- .Values.global.namePrefix: Global prefix +- .Values.nameSuffix: Suffix +- .Values.global.nameSuffix: Global suffix + +The applied order is: "global prefix + prefix + name + suffix + global suffix" + +Usage: 'name: "{{- template "chartmuseum.name" . -}}"' +*/ -}} +{{- define "chartmuseum.name"}} +{{- $global := default (dict) .Values.global -}} +{{- $base := default .Chart.Name .Values.nameOverride -}} +{{- $gpre := default "" $global.namePrefix -}} +{{- $pre := default "" .Values.namePrefix -}} +{{- $suf := default "" .Values.nameSuffix -}} +{{- $gsuf := default "" $global.nameSuffix -}} +{{- $name := print $gpre $pre $base $suf $gsuf -}} +{{- $name | lower | trunc 54 | trimSuffix "-" -}} +{{- end -}} + +{{- /* +fullname defines a suitably unique name for a resource by combining +the release name and the chartmuseum chart name. + +The prevailing wisdom is that names should only contain a-z, 0-9 plus dot (.) and dash (-), and should +not exceed 63 characters. + +Parameters: + +- .Values.fullnameOverride: Replaces the computed name with this given name +- .Values.fullnamePrefix: Prefix +- .Values.global.fullnamePrefix: Global prefix +- .Values.fullnameSuffix: Suffix +- .Values.global.fullnameSuffix: Global suffix + +The applied order is: "global prefix + prefix + name + suffix + global suffix" + +Usage: 'name: "{{- template "chartmuseum.fullname" . -}}"' +*/ -}} +{{- define "chartmuseum.fullname"}} +{{- $global := default (dict) .Values.global -}} +{{- $base := default (printf "%s-%s" .Release.Name .Chart.Name) .Values.fullnameOverride -}} +{{- $gpre := default "" $global.fullnamePrefix -}} +{{- $pre := default "" .Values.fullnamePrefix -}} +{{- $suf := default "" .Values.fullnameSuffix -}} +{{- $gsuf := default "" $global.fullnameSuffix -}} +{{- $name := print $gpre $pre $base $suf $gsuf -}} +{{- $name | lower | trunc 54 | trimSuffix "-" -}} +{{- end -}} + + +{{- /* +chartmuseum.labels.standard prints the standard chartmuseum Helm labels. + +The standard labels are frequently used in metadata. +*/ -}} +{{- define "chartmuseum.labels.standard" -}} +app: {{ template "chartmuseum.name" . }} +chart: {{ template "chartmuseum.chartref" . }} +heritage: {{ .Release.Service | quote }} +release: {{ .Release.Name | quote }} +{{- end -}} + +{{- /* +chartmuseum.chartref prints a chart name and version. + +It does minimal escaping for use in Kubernetes labels. + +Example output: + +chartmuseum-0.4.5 +*/ -}} +{{- define "chartmuseum.chartref" -}} +{{- replace "+" "_" .Chart.Version | printf "%s-%s" .Chart.Name -}} +{{- end -}} diff --git a/libs-go/testdata/charts/chartmuseum/templates/deployment.yaml b/libs-go/testdata/charts/chartmuseum/templates/deployment.yaml new file mode 100755 index 0000000..f593336 --- /dev/null +++ b/libs-go/testdata/charts/chartmuseum/templates/deployment.yaml @@ -0,0 +1,142 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: {{ include "chartmuseum.fullname" . }} + annotations: +{{ toYaml .Values.deployment.annotations | indent 4 }} + labels: +{{ include "chartmuseum.labels.standard" . | indent 4 }} +{{- if .Values.deployment.labels }} +{{ toYaml .Values.deployment.labels | indent 4 }} +{{- end }} +spec: + replicas: {{ .Values.replicaCount }} + strategy: +{{ toYaml .Values.strategy | indent 4 }} + revisionHistoryLimit: 10 +{{- if .Values.deployment.matchlabes }} + selector: + matchLabels: +{{ toYaml .Values.deployment.matchlabes | indent 6 }} +{{- end }} + template: + metadata: + name: {{ include "chartmuseum.fullname" . }} + annotations: +{{ toYaml .Values.replica.annotations | indent 8 }} + labels: + app: {{ template "chartmuseum.name" . }} + release: {{ .Release.Name | quote }} +{{- if .Values.deployment.labels }} +{{ toYaml .Values.deployment.labels | indent 8 }} +{{- end }} + spec: + containers: + - name: {{ .Chart.Name }} + image: {{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: +{{- range $name, $value := .Values.env.open }} +{{- if not (empty $value) }} + - name: {{ $name | quote }} + value: {{ $value | quote }} +{{- end }} +{{- end }} +{{- range $name, $value := .Values.env.field }} +{{- if not ( empty $value) }} + - name: {{ $name | quote }} + valueFrom: + fieldRef: + fieldPath: {{ $value | quote }} +{{- end }} +{{- end }} +{{- if .Values.gcp.secret.enabled }} + - name: GOOGLE_APPLICATION_CREDENTIALS + value: "/etc/secrets/google/credentials.json" +{{- end }} +{{- $secret_name := include "chartmuseum.fullname" . }} +{{- range $name, $value := .Values.env.secret }} +{{- if not ( empty $value) }} + - name: {{ $name | quote }} + valueFrom: + secretKeyRef: + name: {{ $secret_name }} + key: {{ $name | quote }} +{{- end }} +{{- end }} + args: + - --port=8080 +{{- if eq .Values.env.open.STORAGE "local" }} + - --storage-local-rootdir=/storage +{{- end }} + ports: + - name: http + containerPort: 8080 + livenessProbe: + httpGet: + path: {{ .Values.env.open.CONTEXT_PATH }}/health + port: http +{{ toYaml .Values.probes.liveness | indent 10 }} + readinessProbe: + httpGet: + path: {{ .Values.env.open.CONTEXT_PATH }}/health + port: http +{{ toYaml .Values.probes.readiness | indent 10 }} +{{- if eq .Values.env.open.STORAGE "local" }} + volumeMounts: + - mountPath: /storage + name: storage-volume +{{- end }} +{{- if .Values.gcp.secret.enabled }} + volumeMounts: + - mountPath: /etc/secrets/google + name: {{ include "chartmuseum.fullname" . }}-gcp +{{- end }} + {{- with .Values.resources }} + resources: +{{ toYaml . | indent 10 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + {{- if .Values.serviceAccount.create }} + serviceAccountName: {{ include "chartmuseum.fullname" . }} + {{- else if .Values.serviceAccount.name }} + serviceAccountName: {{ .Values.serviceAccount.name }} + {{- end }} + {{- with .Values.securityContext }} + securityContext: +{{ toYaml . | indent 8 }} + {{- end }} + volumes: + - name: storage-volume + {{- if .Values.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ .Values.persistence.existingClaim | default (include "chartmuseum.fullname" .) }} + {{- else }} + emptyDir: {} + {{- end -}} + {{ if .Values.gcp.secret.enabled }} + - name: {{ include "chartmuseum.fullname" . }}-gcp + secret: + {{ if .Values.env.secret.GOOGLE_CREDENTIALS_JSON }} + secretName: {{ include "chartmuseum.fullname" . }} + items: + - key: GOOGLE_CREDENTIALS_JSON + path: credentials.json + {{ else }} + secretName: {{ .Values.gcp.secret.name }} + items: + - key: {{ .Values.gcp.secret.key }} + path: credentials.json + {{ end }} + {{ end }} diff --git a/libs-go/testdata/charts/chartmuseum/templates/ingress.yaml b/libs-go/testdata/charts/chartmuseum/templates/ingress.yaml new file mode 100755 index 0000000..0ae70e2 --- /dev/null +++ b/libs-go/testdata/charts/chartmuseum/templates/ingress.yaml @@ -0,0 +1,33 @@ +{{- $servicePort := .Values.service.externalPort -}} +{{- $serviceName := include "chartmuseum.fullname" . -}} +{{- if .Values.ingress.enabled }} +--- +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: {{ include "chartmuseum.fullname" . }} + annotations: +{{ toYaml .Values.ingress.annotations | indent 4 }} + labels: +{{- if .Values.ingress.labels }} +{{ toYaml .Values.ingress.labels | indent 4 }} +{{- end }} +{{ include "chartmuseum.labels.standard" . | indent 4 }} +spec: + rules: + {{- range $host, $paths := .Values.ingress.hosts }} + - host: {{ $host }} + http: + paths: + {{- range $paths }} + - path: {{ . }} + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end -}} + {{- end -}} + {{- if .Values.ingress.tls }} + tls: +{{ toYaml .Values.ingress.tls | indent 4 }} + {{- end -}} +{{- end -}} diff --git a/libs-go/testdata/charts/chartmuseum/templates/pv.yaml b/libs-go/testdata/charts/chartmuseum/templates/pv.yaml new file mode 100755 index 0000000..1aaff0f --- /dev/null +++ b/libs-go/testdata/charts/chartmuseum/templates/pv.yaml @@ -0,0 +1,21 @@ +{{- if .Values.persistence.pv.enabled -}} +apiVersion: v1 +kind: PersistentVolume +metadata: +{{- if .Values.persistence.pv.pvname }} + name: {{ .Values.persistence.pv.pvname }} +{{- else }} + name: {{ include "chartmuseum.fullname" . }} +{{- end }} + labels: + app: {{ include "chartmuseum.fullname" . }} + release: {{ .Release.Name | quote }} +spec: + capacity: + storage: {{ .Values.persistence.pv.capacity.storage }} + accessModes: + - {{ .Values.persistence.pv.accessMode | quote }} + nfs: + server: {{ .Values.persistence.pv.nfs.server }} + path: {{ .Values.persistence.pv.nfs.path | quote }} +{{- end }} \ No newline at end of file diff --git a/libs-go/testdata/charts/chartmuseum/templates/pvc.yaml b/libs-go/testdata/charts/chartmuseum/templates/pvc.yaml new file mode 100755 index 0000000..aaedace --- /dev/null +++ b/libs-go/testdata/charts/chartmuseum/templates/pvc.yaml @@ -0,0 +1,27 @@ +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) -}} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ include "chartmuseum.fullname" . }} + labels: + app: {{ include "chartmuseum.fullname" . }} + release: {{ .Release.Name | quote }} +{{- if .Values.persistence.labels }} +{{ toYaml .Values.persistence.labels | indent 4 }} +{{- end }} +spec: + accessModes: + - {{ .Values.persistence.accessMode | quote }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} +{{- if .Values.persistence.storageClass }} +{{- if (eq "-" .Values.persistence.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.persistence.storageClass }}" +{{- end }} +{{- else if and .Values.persistence.volumeName (.Values.persistence.pv.enabled) }} + volumeName: "{{ .Values.persistence.volumeName }}" +{{- end }} +{{- end }} diff --git a/libs-go/testdata/charts/chartmuseum/templates/secret.yaml b/libs-go/testdata/charts/chartmuseum/templates/secret.yaml new file mode 100755 index 0000000..c46fd88 --- /dev/null +++ b/libs-go/testdata/charts/chartmuseum/templates/secret.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "chartmuseum.fullname" . }} + labels: +{{ include "chartmuseum.labels.standard" . | indent 4 }} +type: Opaque +data: +{{- range $name, $value := .Values.env.secret }} +{{- if not (empty $value) }} +{{- if eq $name "GOOGLE_CREDENTIALS_JSON" }} + {{ $name }}: {{ $value }} + {{- else }} + {{ $name }}: {{ $value | b64enc }} +{{- end }} +{{- end }} +{{- end }} diff --git a/libs-go/testdata/charts/chartmuseum/templates/service.yaml b/libs-go/testdata/charts/chartmuseum/templates/service.yaml new file mode 100755 index 0000000..65ce7a2 --- /dev/null +++ b/libs-go/testdata/charts/chartmuseum/templates/service.yaml @@ -0,0 +1,36 @@ +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.service.servicename }} + name: {{ .Values.service.servicename }} +{{- else }} + name: {{ include "chartmuseum.fullname" . }} +{{- end }} + labels: +{{ include "chartmuseum.labels.standard" . | indent 4 }} +{{- if .Values.service.labels }} +{{ toYaml .Values.service.labels | indent 4 }} +{{- end }} +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.service.type }} + {{- if eq .Values.service.type "ClusterIP" }} + {{- if .Values.service.clusterIP }} + clusterIP: {{ .Values.service.clusterIP }} + {{- end }} + {{- end }} + ports: + - port: {{ .Values.service.externalPort }} +{{- if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }} + nodePort: {{.Values.service.nodePort}} +{{- else }} + targetPort: http +{{- end }} + protocol: TCP + name: http + selector: + app: {{ template "chartmuseum.name" . }} + release: {{ .Release.Name | quote }} diff --git a/libs-go/testdata/charts/chartmuseum/templates/serviceaccount.yaml b/libs-go/testdata/charts/chartmuseum/templates/serviceaccount.yaml new file mode 100755 index 0000000..a4c4f44 --- /dev/null +++ b/libs-go/testdata/charts/chartmuseum/templates/serviceaccount.yaml @@ -0,0 +1,9 @@ +{{- if .Values.serviceAccount.create -}} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "chartmuseum.fullname" . }} + labels: +{{ include "chartmuseum.labels.standard" . | indent 4 }} +{{- end -}} diff --git a/libs-go/testdata/charts/chartmuseum/values.yaml b/libs-go/testdata/charts/chartmuseum/values.yaml new file mode 100755 index 0000000..50cd65f --- /dev/null +++ b/libs-go/testdata/charts/chartmuseum/values.yaml @@ -0,0 +1,221 @@ +replicaCount: 1 +strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 0 +image: + repository: chartmuseum/chartmuseum + tag: v0.8.0 + pullPolicy: IfNotPresent +env: + open: + # storage backend, can be one of: local, alibaba, amazon, google, microsoft + STORAGE: local + # oss bucket to store charts for alibaba storage backend + STORAGE_ALIBABA_BUCKET: + # prefix to store charts for alibaba storage backend + STORAGE_ALIBABA_PREFIX: + # oss endpoint to store charts for alibaba storage backend + STORAGE_ALIBABA_ENDPOINT: + # server side encryption algorithm for alibaba storage backend, can be one + # of: AES256 or KMS + STORAGE_ALIBABA_SSE: + # s3 bucket to store charts for amazon storage backend + STORAGE_AMAZON_BUCKET: + # prefix to store charts for amazon storage backend + STORAGE_AMAZON_PREFIX: + # region of s3 bucket to store charts + STORAGE_AMAZON_REGION: + # alternative s3 endpoint + STORAGE_AMAZON_ENDPOINT: + # server side encryption algorithm + STORAGE_AMAZON_SSE: + # gcs bucket to store charts for google storage backend + STORAGE_GOOGLE_BUCKET: + # prefix to store charts for google storage backend + STORAGE_GOOGLE_PREFIX: + # container to store charts for microsoft storage backend + STORAGE_MICROSOFT_CONTAINER: + # prefix to store charts for microsoft storage backend + STORAGE_MICROSOFT_PREFIX: + # container to store charts for openstack storage backend + STORAGE_OPENSTACK_CONTAINER: + # prefix to store charts for openstack storage backend + STORAGE_OPENSTACK_PREFIX: + # region of openstack container + STORAGE_OPENSTACK_REGION: + # path to a CA cert bundle for your openstack endpoint + STORAGE_OPENSTACK_CACERT: + # form field which will be queried for the chart file content + CHART_POST_FORM_FIELD_NAME: chart + # form field which will be queried for the provenance file content + PROV_POST_FORM_FIELD_NAME: prov + # levels of nested repos for multitenancy. The default depth is 0 (singletenant server) + DEPTH: 0 + # show debug messages + DEBUG: false + # output structured logs as json + LOG_JSON: true + # disable use of index-cache.yaml + DISABLE_STATEFILES: false + # disable Prometheus metrics + DISABLE_METRICS: true + # disable all routes prefixed with /api + DISABLE_API: true + # allow chart versions to be re-uploaded + ALLOW_OVERWRITE: false + # absolute url for .tgzs in index.yaml + CHART_URL: + # allow anonymous GET operations when auth is used + AUTH_ANONYMOUS_GET: false + # sets the base context path + CONTEXT_PATH: + # parallel scan limit for the repo indexer + INDEX_LIMIT: 0 + # cache store, can be one of: redis (leave blank for inmemory cache) + CACHE: + # address of Redis service (host:port) + CACHE_REDIS_ADDR: + # Redis database to be selected after connect + CACHE_REDIS_DB: 0 + field: + # POD_IP: status.podIP + secret: + # username for basic http authentication + BASIC_AUTH_USER: + # password for basic http authentication + BASIC_AUTH_PASS: + # GCP service account json file + GOOGLE_CREDENTIALS_JSON: + # Redis requirepass server configuration + CACHE_REDIS_PASSWORD: +deployment: + ## Chartmuseum Deployment annotations + annotations: {} + # name: value + labels: {} + # name: value + matchlabes: {} + # name: value +replica: + ## Chartmuseum Replicas annotations + annotations: {} + ## Read more about kube2iam to provide access to s3 https://github.com/jtblin/kube2iam + # iam.amazonaws.com/role: role-arn +service: + servicename: + type: ClusterIP + # clusterIP: None + externalPort: 8080 + nodePort: + annotations: {} + labels: {} + +resources: {} +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 80m +# memory: 64Mi + +probes: + liveness: + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + readiness: + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + +serviceAccount: + create: false + # name: + +# UID/GID 1000 is the default user "chartmuseum" used in +# the container image starting in v0.8.0 and above. This +# is required for local persistant storage. If your cluster +# does not allow this, try setting securityContext: {} +securityContext: + fsGroup: 1000 + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +persistence: + enabled: false + accessMode: ReadWriteOnce + size: 8Gi + labels: {} + # name: value + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + + ## Chartmuseum data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + # volumeName: + pv: + enabled: false + pvname: + capacity: + storage: 8Gi + accessMode: ReadWriteOnce + nfs: + server: + path: + +## Ingress for load balancer +ingress: + enabled: false +## Chartmuseum Ingress labels +## +# labels: +# dns: "route53" + +## Chartmuseum Ingress annotations +## +# annotations: +# kubernetes.io/ingress.class: nginx +# kubernetes.io/tls-acme: "true" + +## Chartmuseum Ingress hostnames +## Must be provided if Ingress is enabled +## +# hosts: +# chartmuseum.domain.com: +# - /charts +# - /index.yaml + +## Chartmuseum Ingress TLS configuration +## Secrets must be manually created in the namespace +## +# tls: +# - secretName: chartmuseum-server-tls +# hosts: +# - chartmuseum.domain.com + +# Adding secrets to tiller is not a great option, so If you want to use an existing +# secret that contains the json file, you can use the following entries +gcp: + secret: + enabled: false + # Name of the secret that contains the encoded json + name: + # Secret key that holds the json value. + key: credentials.json