diff --git a/.github/workflows/e2e.yaml b/.github/workflows/e2e.yaml
index 67cd11f1..08edf5bc 100644
--- a/.github/workflows/e2e.yaml
+++ b/.github/workflows/e2e.yaml
@@ -42,9 +42,16 @@ jobs:
echo "> Setting run permissions to OPCT:"
chmod u+x ${OPCT}
- echo "> Running OPCT report:"
+ echo "> Running OPCT report (simple):"
${OPCT} report /tmp/result.tar.gz
+ echo "> Running OPCT report (advanced):"
+ ${OPCT} report /tmp/result.tar.gz \
+ --log-level=debug \
+ --save-to=/tmp/results-data \
+ --skip-server=true \
+ --skip-baseline-api=true
+
e2e-cmd_adm-parse-etcd-logs:
name: "e2e-cmd_adm-parse-etcd-logs"
runs-on: ubuntu-latest
@@ -146,3 +153,36 @@ jobs:
${CUSTOM_BUILD_PATH} adm parse-metrics \
--input ${LOCAL_TEST_DATA} --output /tmp/metrics
tree /tmp/metrics
+
+ e2e-cmd_adm-baseline:
+ name: "e2e-cmd_adm-baseline"
+ runs-on: ubuntu-latest
+ steps:
+ - name: Download artifacts
+ uses: actions/download-artifact@v4
+ with:
+ name: opct-linux-amd64
+ path: /tmp/build/
+
+ - name: Preparing testdata
+ env:
+ OPCT: /tmp/build/opct-linux-amd64
+ run: |
+ echo "> Setting exec permissions to OPCT:"
+ chmod u+x ${OPCT}
+
+ - name: "e2e adm baseline: opct adm baseline (list|get)"
+ env:
+ OPCT: /tmp/build/opct-linux-amd64
+ run: |
+ echo -e "\n\t#>> List latest baseline results"
+ ${OPCT} adm baseline list
+
+ echo -e "\n\t#>> List all baseline results"
+ ${OPCT} adm baseline list --all
+
+ echo -e "\n\t#>> Retrieve a baseline result by name"
+ ${OPCT} adm baseline get --name 4.16_None_latest --dump
+
+ echo -e "\n\t#>> Retrieve a baseline result by release and platform"
+ ${OPCT} adm baseline get --release 4.15 --platform None
diff --git a/.github/workflows/go.yaml b/.github/workflows/go.yaml
index ff154120..54acbf9a 100644
--- a/.github/workflows/go.yaml
+++ b/.github/workflows/go.yaml
@@ -22,9 +22,15 @@ jobs:
name: linters
uses: ./.github/workflows/pre_linters.yaml
+ reviewer:
+ name: reviewer
+ uses: ./.github/workflows/pre_reviewer.yaml
+
go-test:
runs-on: ubuntu-latest
- needs: linters
+ needs:
+ - linters
+ - reviewer
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
diff --git a/.github/workflows/pre_reviewer.yaml b/.github/workflows/pre_reviewer.yaml
new file mode 100644
index 00000000..414cc3e0
--- /dev/null
+++ b/.github/workflows/pre_reviewer.yaml
@@ -0,0 +1,51 @@
+---
+name: reviewer
+
+on:
+ workflow_call: {}
+
+# golangci-lint-action requires those permissions to annotate issues in the PR.
+permissions:
+ contents: read
+ checks: write
+ issues: read
+ pull-requests: write
+
+env:
+ GO_VERSION: 1.22
+ GOLANGCI_LINT_VERSION: v1.59
+
+jobs:
+ # reviewdog / misspell: https://github.com/reviewdog/action-misspell
+ misspell:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - uses: reviewdog/action-misspell@v1
+ with:
+ github_token: ${{ secrets.github_token }}
+ reporter: github-pr-review
+ # level: warning
+ locale: "US"
+
+ # reviewdog / suggester: https://github.com/reviewdog/action-suggester
+ go_fmt:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - run: gofmt -w -s .
+ - uses: reviewdog/action-suggester@v1
+ with:
+ tool_name: gofmt
+
+ # https://github.com/reviewdog/action-hadolint
+ # containerfile:
+ # name: runner / hadolint
+ # runs-on: ubuntu-latest
+ # steps:
+ # - name: Check out code
+ # uses: actions/checkout@v4
+ # - name: hadolint
+ # uses: reviewdog/action-hadolint@v1
+ # with:
+ # reporter: github-pr-review
diff --git a/.github/workflows/static-website.yml b/.github/workflows/static-website.yml
index e593d7a0..c83f69a5 100644
--- a/.github/workflows/static-website.yml
+++ b/.github/workflows/static-website.yml
@@ -4,8 +4,12 @@ name: Documentation
on:
# Static pages are build only targeting the main branch
push:
- branches: ["main"]
- paths: ['mkdocs.yml', 'docs/**', 'hack/docs-requirements.txt']
+ branches:
+ - "main"
+ paths:
+ - 'mkdocs.yml'
+ - 'docs/**'
+ - 'hack/docs-requirements.txt'
workflow_dispatch:
diff --git a/.gitignore b/.gitignore
index 77455776..58cf33c4 100644
--- a/.gitignore
+++ b/.gitignore
@@ -5,6 +5,7 @@ kubeconfig
# build files
dist/
+build/
# changelog is generated automaticaly by hack/generate-changelog.sh
# available only in the rendered webpage (built by mkdocs).
diff --git a/Makefile b/Makefile
index 449bf8fd..ba530052 100644
--- a/Makefile
+++ b/Makefile
@@ -5,7 +5,7 @@ export GO111MODULE=on
export CGO_ENABLED=0
BUILD_DIR ?= $(PWD)/build
-IMG ?= quay.io/ocp-cert/opct
+IMG ?= quay.io/opct/opct
VERSION=$(shell git rev-parse --short HEAD)
RELEASE_TAG ?= 0.0.0
BIN_NAME ?= opct
@@ -57,15 +57,30 @@ build-darwin-arm64: build
linux-amd64-container: build-linux-amd64
podman build -t $(IMG):latest -f hack/Containerfile --build-arg=RELEASE_TAG=$(RELEASE_TAG) .
-.PHONY: image-mirror-sonobuoy
-image-mirror-sonobuoy:
- ./hack/image-mirror-sonobuoy/mirror.sh
+# Publish devel binaries (non-official). Must be used only for troubleshooting in development/support.
+.PHONY: publish-amd64-devel
+publish-amd64-devel: build-linux-amd64
+ aws s3 cp $(BUILD_DIR)/opct-linux-amd64 s3://openshift-provider-certification/bin/opct-linux-amd64-devel
-# Utils dev
-.PHONY: update-go
-update-go:
- go get -u
- go mod tidy
+.PHONY: publish-darwin-arm64-devel
+publish-darwin-arm64-devel: build-darwin-arm64
+ aws s3 cp $(BUILD_DIR)/opct-darwin-arm64 s3://openshift-provider-certification/bin/opct-darwin-arm64-devel
+
+.PHONY: publish-devel
+publish-devel: publish-amd64-devel
+publish-devel: publish-darwin-arm64-devel
+
+#
+# Test
+#
+
+.PHONY: test-lint
+test-lint:
+ @echo "Running linting tools"
+ # Download https://github.com/golangci/golangci-lint/releases/tag/v1.59.1
+ golangci-lint run --timeout=10m
+ # yamllint: pip install yamllint
+ yamllint .github/workflows/*.yaml
.PHONY: test
test:
@@ -90,3 +105,13 @@ build-changelog:
.PHONY: build-docs
build-docs: build-changelog
mkdocs build --site-dir ./site
+
+.PHONY: image-mirror-sonobuoy
+image-mirror-sonobuoy:
+ ./hack/image-mirror-sonobuoy/mirror.sh
+
+# Utils dev
+.PHONY: update-go
+update-go:
+ go get -u
+ go mod tidy
diff --git a/cmd/root.go b/cmd/opct/root.go
similarity index 83%
rename from cmd/root.go
rename to cmd/opct/root.go
index 285cc1b5..a235ed8a 100644
--- a/cmd/root.go
+++ b/cmd/opct/root.go
@@ -5,6 +5,7 @@ import (
"os"
log "github.com/sirupsen/logrus"
+ logwriter "github.com/sirupsen/logrus/hooks/writer"
"github.com/spf13/cobra"
"github.com/spf13/viper"
@@ -12,14 +13,16 @@ import (
"github.com/redhat-openshift-ecosystem/provider-certification-tool/pkg/cmd/adm"
"github.com/redhat-openshift-ecosystem/provider-certification-tool/pkg/cmd/get"
+ "github.com/redhat-openshift-ecosystem/provider-certification-tool/pkg/cmd/report"
"github.com/redhat-openshift-ecosystem/provider-certification-tool/pkg/destroy"
- "github.com/redhat-openshift-ecosystem/provider-certification-tool/pkg/report"
"github.com/redhat-openshift-ecosystem/provider-certification-tool/pkg/retrieve"
"github.com/redhat-openshift-ecosystem/provider-certification-tool/pkg/run"
"github.com/redhat-openshift-ecosystem/provider-certification-tool/pkg/status"
"github.com/redhat-openshift-ecosystem/provider-certification-tool/pkg/version"
)
+const logFile = "opct.log"
+
// rootCmd represents the base command when called without any subcommands
var rootCmd = &cobra.Command{
Use: "opct",
@@ -40,6 +43,24 @@ var rootCmd = &cobra.Command{
log.SetFormatter(&log.TextFormatter{
FullTimestamp: true,
})
+
+ log.SetOutput(os.Stdout)
+ fdLog, err := os.OpenFile(logFile, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644)
+ if err != nil {
+ log.Errorf("error opening file %s: %v", logFile, err)
+ } else {
+ log.AddHook(&logwriter.Hook{ // Send logs with level higher than warning to stderr
+ Writer: fdLog,
+ LogLevels: []log.Level{
+ log.PanicLevel,
+ log.FatalLevel,
+ log.ErrorLevel,
+ log.WarnLevel,
+ log.InfoLevel,
+ log.DebugLevel,
+ },
+ })
+ }
},
}
diff --git a/data/templates/report/filter.html b/data/templates/report/filter.html
new file mode 100644
index 00000000..68d330c0
--- /dev/null
+++ b/data/templates/report/filter.html
@@ -0,0 +1,445 @@
+
+
+
+
+
+
+ OPCT Filters
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ OPCT Baseline
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ -- none --
+
+
+
+
+ Asc
+ Desc
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Clear
+
+
+
+
+
+
+
+
+ ID
+ Name
+ Status
+ State
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Info
+
+
+ {{ row.detailsShowing ? 'Hide' : 'Details' }}
+
+
+
+
+
+
+ {{ key }}: {{ value }}
+
+
+
+
+
+
+
+ {{ infoModal.content }}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/data/templates/report/report.css b/data/templates/report/report.css
new file mode 100644
index 00000000..52a4b2f8
--- /dev/null
+++ b/data/templates/report/report.css
@@ -0,0 +1,47 @@
+/* styles: Tab */
+/* Style the tab */
+.tab { overflow: hidden; border: 1px solid #ccc; background-color: #f1f1f1; }
+
+/* Style the buttons that are used to open the tab content */
+.tab button { background-color: inherit; float: left; border: none; outline: none;
+ cursor: pointer; padding: 14px 16px; transition: 0.3s; }
+
+/* Change background color of buttons on hover */
+.tab button:hover { background-color: #ddd; }
+
+/* Create an active/current tablink class */
+.tab button.active { background-color: #ccc; }
+
+/* Style the tab content */
+.tabcontent { display: none; padding: 6px 12px; border: 1px solid #ccc; border-top: none; }
+
+/* styles: OPCT */
+div#nav-col ul { list-style: none; }
+
+data { display: none; }
+#nav-col { max-width: 200px; }
+span.float-right { float: right; }
+table { font-size: 8pt; }
+
+/* Banner */
+.alert {
+ padding: 20px;
+ background-color: #ddd;
+ color: black;
+ text-align: center;
+}
+
+.closebtn {
+ margin-left: 15px;
+ color: white;
+ font-weight: bold;
+ float: right;
+ font-size: 22px;
+ line-height: 20px;
+ cursor: pointer;
+ transition: 0.3s;
+}
+
+.closebtn:hover {
+ color: black;
+}
\ No newline at end of file
diff --git a/data/templates/report/report.html b/data/templates/report/report.html
new file mode 100644
index 00000000..e4948adf
--- /dev/null
+++ b/data/templates/report/report.html
@@ -0,0 +1,1166 @@
+
+
+
+
+
+ OPCT Report
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ OPCT
+ CAMGI
+ Tests
+ Events
+
+
+ Metrics
+
+
+
+
+
+
×
+
Warning! This report is under developer preview.
+ If you encounter any issues or bugs, please report on
Github Project or in
+
Jira . Thanks!
+
+
+
+
+
+
+
+
+
Loading... ({{ loadingMessage }})
+
+
+
+
+
+
+
+
+
+
+
+[[ if .Summary.Features.HasCAMGI ]]
+
+[[ else ]]
+
CAMGI , Cluster Autoscaler Must Gather Investigator, is a tool for examining OKD/OpenShift must-gather
+ records to investigate cluster autoscaler behavior and configuration.
+
Steps to use with OPCT:
+
+ 1) download the tool
+ 2) extract must-gather from artifacts
+
+
+mkdir results && \
+tar xfz artifacts.tar.gz -C results
+
+mkdir results/must-gather && \
+tar xfJ results/ plugins/99-openshift-artifacts-collector/ results/global/ artifacts_must-gather.tar.xz -C results/must-gather
+
+
+
./camgi results/mu st-gather > results/camgi.html
+
+
4) Open the file results/camgi.html in your browser.
+
+ TODO: collect the camgd.html in the artifacts plugin.
+
+[[ end ]]
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/docs/dev.md b/docs/dev.md
index c4ea41aa..324d65c8 100644
--- a/docs/dev.md
+++ b/docs/dev.md
@@ -146,7 +146,7 @@ make image-mirror-sonobuoy
```
- Check the image in [quay.io/opct/sonobuoy](https://quay.io/repository/opct/sonobuoy?tab=tags)
-**Running the mirror targetting custom repository**
+**Running the mirror targeting custom repository**
```bash
SONOBUOY_VERSION=v0.56.11 MIRROR_REPO=quay.io/mrbraga/sonobuoy make image-mirror-sonobuoy
diff --git a/docs/devel/report.md b/docs/devel/report.md
new file mode 100644
index 00000000..fd6673ef
--- /dev/null
+++ b/docs/devel/report.md
@@ -0,0 +1,14 @@
+# Report HTML app
+
+Report is build upon Vue framework using native browser.
+
+The pages are reactive, using the opct-report.json as data source.
+
+The opct-report.json is generated by `report` command when processing
+the results.
+
+
+References:
+
+- https://vuejs.org/guide/extras/ways-of-using-vue.html
+- https://markus.oberlehner.net/blog/goodbye-webpack-building-vue-applications-without-webpack/
\ No newline at end of file
diff --git a/go.mod b/go.mod
index 3fff86f3..43fbc75e 100644
--- a/go.mod
+++ b/go.mod
@@ -13,13 +13,14 @@ require (
github.com/spf13/viper v1.18.2
github.com/stretchr/testify v1.9.0
github.com/vmware-tanzu/sonobuoy v0.57.1
- github.com/xuri/excelize/v2 v2.8.1
golang.org/x/sync v0.6.0
k8s.io/api v0.30.1
k8s.io/apimachinery v0.30.1
k8s.io/client-go v0.30.1
)
+require github.com/aws/aws-sdk-go v1.55.3
+
require (
github.com/go-echarts/go-echarts/v2 v2.3.3
github.com/montanaflynn/stats v0.7.1
@@ -28,6 +29,11 @@ require (
k8s.io/utils v0.0.0-20240102154912-e7106e64919e
)
+require (
+ github.com/hashicorp/go-retryablehttp v0.7.7
+ github.com/jedib0t/go-pretty/v6 v6.5.9
+)
+
require (
github.com/briandowns/spinner v1.23.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
@@ -48,28 +54,29 @@ require (
github.com/google/uuid v1.6.0 // indirect
github.com/gorilla/mux v1.8.1 // indirect
github.com/gorilla/websocket v1.5.1 // indirect
+ github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/hashicorp/go-version v1.6.0 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
- github.com/imdario/mergo v0.3.16 // indirect
+ github.com/imdario/mergo v0.3.13 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
+ github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/magiconair/properties v1.8.7 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
+ github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/moby/spdystream v0.2.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
- github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
github.com/pelletier/go-toml/v2 v2.1.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
- github.com/richardlehane/mscfb v1.0.4 // indirect
- github.com/richardlehane/msoleps v1.0.3 // indirect
github.com/rifflock/lfshook v0.0.0-20180920164130-b9218ef580f5 // indirect
+ github.com/rivo/uniseg v0.2.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/sagikazarmark/locafero v0.4.0 // indirect
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
@@ -80,14 +87,11 @@ require (
github.com/spf13/cast v1.6.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
- github.com/xuri/efp v0.0.0-20231025114914-d1ff6096ae53 // indirect
- github.com/xuri/nfp v0.0.0-20230919160717-d98342af3f05 // indirect
go.uber.org/multierr v1.11.0 // indirect
- golang.org/x/crypto v0.21.0 // indirect
golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect
golang.org/x/net v0.23.0 // indirect
golang.org/x/oauth2 v0.17.0 // indirect
- golang.org/x/sys v0.18.0 // indirect
+ golang.org/x/sys v0.20.0 // indirect
golang.org/x/term v0.18.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/time v0.5.0 // indirect
@@ -101,5 +105,5 @@ require (
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
- sigs.k8s.io/yaml v1.4.0 // indirect
+ sigs.k8s.io/yaml v1.3.0 // indirect
)
diff --git a/go.sum b/go.sum
index e1e0d7aa..98a02443 100644
--- a/go.sum
+++ b/go.sum
@@ -1,5 +1,7 @@
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
+github.com/aws/aws-sdk-go v1.55.3 h1:0B5hOX+mIx7I5XPOrjrHlKSDQV/+ypFZpIHOx5LOk3E=
+github.com/aws/aws-sdk-go v1.55.3/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
github.com/briandowns/spinner v1.23.0 h1:alDF2guRWqa/FOZZYWjlMIx2L6H0wyewPxo/CH4Pt2A=
github.com/briandowns/spinner v1.23.0/go.mod h1:rPG4gmXeN3wQV/TsAY4w8lPdIM6RX3yqeBQJSrbXjuE=
github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
@@ -46,8 +48,8 @@ github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=
-github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20211214055906-6f57359322fd h1:1FjCyPC+syAzJ5/2S8fqdZK1R22vvA0J7JZKcuOIQ7Y=
+github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
@@ -55,14 +57,26 @@ github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWS
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY=
github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY=
+github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
+github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
+github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k=
+github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
+github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU=
+github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk=
github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
-github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
-github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
+github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
+github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/jedib0t/go-pretty/v6 v6.5.9 h1:ACteMBRrrmm1gMsXe9PSTOClQ63IXDUt03H5U+UV8OU=
+github.com/jedib0t/go-pretty/v6 v6.5.9/go.mod h1:zbn98qrYlh95FIhwwsbIip0LYpwSG8SUOScs+v9/t0E=
+github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
+github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
@@ -84,6 +98,8 @@ github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovk
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
+github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
+github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
@@ -93,8 +109,6 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
-github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw=
-github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8=
github.com/montanaflynn/stats v0.7.1 h1:etflOAAHORrCC44V+aR6Ftzort912ZU+YLiSTuV8eaE=
github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
@@ -116,13 +130,10 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/richardlehane/mscfb v1.0.4 h1:WULscsljNPConisD5hR0+OyZjwK46Pfyr6mPu5ZawpM=
-github.com/richardlehane/mscfb v1.0.4/go.mod h1:YzVpcZg9czvAuhk9T+a3avCpcFPMUWm7gK3DypaEsUk=
-github.com/richardlehane/msoleps v1.0.1/go.mod h1:BWev5JBpU9Ko2WAgmZEuiz4/u3ZYTKbjLycmwiWUfWg=
-github.com/richardlehane/msoleps v1.0.3 h1:aznSZzrwYRl3rLKRT3gUk9am7T/mLNSnJINvN0AQoVM=
-github.com/richardlehane/msoleps v1.0.3/go.mod h1:BWev5JBpU9Ko2WAgmZEuiz4/u3ZYTKbjLycmwiWUfWg=
github.com/rifflock/lfshook v0.0.0-20180920164130-b9218ef580f5 h1:mZHayPoR0lNmnHyvtYjDeq0zlVHn9K/ZXoy17ylucdo=
github.com/rifflock/lfshook v0.0.0-20180920164130-b9218ef580f5/go.mod h1:GEXHk5HgEKCvEIIrSpFI3ozzG5xOKA2DVlEX/gGnewM=
+github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
+github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
@@ -165,12 +176,6 @@ github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8=
github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/vmware-tanzu/sonobuoy v0.57.1 h1:3P2/P5WOJhyspsCkx58eDr0hsBmy8AJS1KYzjo5Epic=
github.com/vmware-tanzu/sonobuoy v0.57.1/go.mod h1:TevlYITSKi7JvgRPhShECtQiJpDErY+8FHehlkgen9c=
-github.com/xuri/efp v0.0.0-20231025114914-d1ff6096ae53 h1:Chd9DkqERQQuHpXjR/HSV1jLZA6uaoiwwH3vSuF3IW0=
-github.com/xuri/efp v0.0.0-20231025114914-d1ff6096ae53/go.mod h1:ybY/Jr0T0GTCnYjKqmdwxyxn2BQf2RcQIIvex5QldPI=
-github.com/xuri/excelize/v2 v2.8.1 h1:pZLMEwK8ep+CLIUWpWmvW8IWE/yxqG0I1xcN6cVMGuQ=
-github.com/xuri/excelize/v2 v2.8.1/go.mod h1:oli1E4C3Pa5RXg1TBXn4ENCXDV5JUMlBluUhG7c+CEE=
-github.com/xuri/nfp v0.0.0-20230919160717-d98342af3f05 h1:qhbILQo1K3mphbwKh1vNm4oGezE1eF9fQWmNiIpSfI4=
-github.com/xuri/nfp v0.0.0-20230919160717-d98342af3f05/go.mod h1:WwHg+CVyzlv/TX9xqBFXEZAuxOPxn2k1GNHwG41IIUQ=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
@@ -180,12 +185,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
-golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUFH7PGP+OQ6mgDYo3yuQ=
golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc=
-golang.org/x/image v0.14.0 h1:tNgSxAFe3jC4uYqvZdTr84SZoM1KfwdC9SKIFrLjFn4=
-golang.org/x/image v0.14.0/go.mod h1:HUYqC05R2ZcZ3ejNQsIHQDQiwWM4JBqmm6MKANTp4LE=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
@@ -215,8 +216,8 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
-golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
+golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
@@ -257,6 +258,7 @@ gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
k8s.io/api v0.30.1 h1:kCm/6mADMdbAxmIh0LBjS54nQBE+U4KmbCfIkF5CpJY=
@@ -277,5 +279,5 @@ sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMm
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
-sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
-sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
+sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
+sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
diff --git a/hack/Containerfile b/hack/Containerfile
index 1177414e..f2aab3aa 100644
--- a/hack/Containerfile
+++ b/hack/Containerfile
@@ -6,7 +6,7 @@ WORKDIR /go/src/github.com/redhat-openshift-ecosystem/provider-certification-too
COPY . .
RUN make build-linux-amd64 RELEASE_TAG=${RELEASE_TAG}
-FROM registry.access.redhat.com/ubi8/ubi-minimal:8.8-860
+FROM quay.io/fedora/fedora-minimal:40
LABEL io.k8s.display-name="OPCT" \
io.k8s.description="OpenShift/OKD Conformance Tool is designed to run conformance suites to validate custom installations." \
io.opct.tags="opct,conformance,openshift,tests,e2e" \
@@ -14,6 +14,6 @@ LABEL io.k8s.display-name="OPCT" \
COPY --from=builder \
/go/src/github.com/redhat-openshift-ecosystem/provider-certification-tool/build/opct-linux-amd64 \
- /usr/bin/
+ /usr/bin/opct
-CMD ["/usr/bin/opct-linux-amd64"]
+CMD ["/usr/bin/opct"]
diff --git a/hack/Containerfile.ci b/hack/Containerfile.ci
deleted file mode 100644
index f0531e6f..00000000
--- a/hack/Containerfile.ci
+++ /dev/null
@@ -1,9 +0,0 @@
-FROM registry.access.redhat.com/ubi8/ubi-minimal:8.8-860
-LABEL io.k8s.display-name="OPCT" \
- io.k8s.description="OpenShift/OKD Conformance Tool is designed to run conformance suites to validate custom installations." \
- io.opct.tags="opct,conformance,openshift,tests,e2e" \
- io.opct.os="linux" io.opct.arch="amd64"
-
-COPY ./openshift-provider-cert-linux-amd64 /usr/bin/
-
-CMD ["/usr/bin/openshift-provider-cert-linux-amd64"]
\ No newline at end of file
diff --git a/hack/verify-codegen.sh b/hack/verify-codegen.sh
deleted file mode 100755
index bafc9041..00000000
--- a/hack/verify-codegen.sh
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/bin/sh
-
-if [ "$IS_CONTAINER" != "" ]; then
- go install github.com/go-bindata/go-bindata/go-bindata@latest
- set -xe
- ./hack/update-generated-bindata.sh
- set +ex
- git diff --exit-code
-else
- podman run --rm \
- --env IS_CONTAINER=TRUE \
- --volume "${PWD}:/go/src/github.com/redhat-openshift-ecosystem/provider-certification-tool:z" \
- --workdir /go/src/github.com/redhat-openshift-ecosystem/provider-certification-tool \
- docker.io/golang:1.19 \
- ./hack/verify-codegen.sh "${@}"
-fi
diff --git a/internal/extractor/extractor.go b/internal/extractor/extractor.go
new file mode 100644
index 00000000..2f00c7e1
--- /dev/null
+++ b/internal/extractor/extractor.go
@@ -0,0 +1,4 @@
+package extractor
+
+// TODO: Provide interface to extract data from different
+// sources and formats.
diff --git a/internal/opct/archive/errorcounter.go b/internal/opct/archive/errorcounter.go
index 3ebbff7b..69921156 100644
--- a/internal/opct/archive/errorcounter.go
+++ b/internal/opct/archive/errorcounter.go
@@ -2,10 +2,30 @@ package archive
import (
"regexp"
-
- "github.com/redhat-openshift-ecosystem/provider-certification-tool/internal/openshift/ci"
)
+// CommonErrorPatterns is a list of common error patterns to be used to
+// discover/calculate the error counter within logs in archives (must-gather,
+// conformance execution) by OPCT.
+// Source: https://github.com/openshift/release/blob/master/core-services/prow/02_config/_config.yaml#L84
+var CommonErrorPatterns = []string{
+ // `error:`,
+ `Failed to push image`,
+ `Failed`,
+ `timed out`,
+ `'ERROR:'`,
+ `ERRO\[`,
+ `^error:`,
+ `(^FAIL|FAIL: |Failure \[)\b`,
+ `panic(\.go)?:`,
+ `"level":"error"`,
+ `level=error`,
+ `level":"fatal"`,
+ `level=fatal`,
+ `│ Error:`,
+ `client connection lost`,
+}
+
// ErrorCounter is a map to handle a generic error counter, indexed by error pattern.
type ErrorCounter map[string]int
@@ -40,7 +60,7 @@ func NewErrorCounter(buf *string, pattern []string) ErrorCounter {
// in a single containging all keys from both maps, and values accumulated
// by key.
func MergeErrorCounters(ec1, ec2 *ErrorCounter) *ErrorCounter {
- new := make(ErrorCounter, len(ci.CommonErrorPatterns))
+ new := make(ErrorCounter, len(CommonErrorPatterns))
if ec1 == nil {
if ec2 == nil {
return &new
diff --git a/internal/opct/archive/errorcounter_test.go b/internal/opct/archive/errorcounter_test.go
index 4971c979..76991ae4 100644
--- a/internal/opct/archive/errorcounter_test.go
+++ b/internal/opct/archive/errorcounter_test.go
@@ -4,7 +4,6 @@ import (
"reflect"
"testing"
- "github.com/redhat-openshift-ecosystem/provider-certification-tool/internal/openshift/ci"
"k8s.io/utils/ptr"
)
@@ -84,7 +83,7 @@ func TestNewErrorCounter(t *testing.T) {
buf: ptr.To(`this buffer has one error,
and another 'ERROR:', also crashs with 'panic.go:12:'.
Some messages of Failed to push image`),
- pattern: ci.CommonErrorPatterns,
+ pattern: CommonErrorPatterns,
},
want: ErrorCounter{
`'ERROR:'`: 1, `Failed`: 1, `Failed to push image`: 1,
@@ -95,7 +94,7 @@ func TestNewErrorCounter(t *testing.T) {
name: "no counters",
args: args{
buf: ptr.To(`this buffer has nothing to parse`),
- pattern: ci.CommonErrorPatterns,
+ pattern: CommonErrorPatterns,
},
want: nil,
},
diff --git a/internal/opct/archive/metaconfig.go b/internal/opct/archive/metaconfig.go
index c9984d35..30aa73d7 100644
--- a/internal/opct/archive/metaconfig.go
+++ b/internal/opct/archive/metaconfig.go
@@ -7,6 +7,7 @@ import (
sbconfig "github.com/vmware-tanzu/sonobuoy/pkg/config"
)
+// MetaConfigSonobuoy is the sonobuoy configuration type.
type MetaConfigSonobuoy = sbconfig.Config
// ParseMetaConfig extract relevant attributes to export to data keeper.
diff --git a/internal/opct/archive/metalog.go b/internal/opct/archive/metalog.go
index cc371bb3..1a20928a 100644
--- a/internal/opct/archive/metalog.go
+++ b/internal/opct/archive/metalog.go
@@ -12,6 +12,16 @@ import (
log "github.com/sirupsen/logrus"
)
+// pluginNameXX are used to calculate the time spent in each plugin.
+const (
+ pluginName05 = "05-openshift-cluster-upgrade"
+ pluginName10 = "10-openshift-kube-conformance"
+ pluginName20 = "20-openshift-conformance-validated"
+ pluginName80 = "80-openshift-tests-replay"
+ pluginName99 = "99-openshift-artifacts-collector"
+)
+
+// MetaLogItem is the struct that holds the items from aggregator's meta log file.
type MetaLogItem struct {
Level string `json:"level,omitempty"`
Message string `json:"msg,omitempty"`
@@ -88,14 +98,16 @@ func ParseMetaLogs(logs []string) []*RuntimeInfoItem {
pluginFinishedAt[logitem.PluginName] = logitem.Time
var delta string
switch logitem.PluginName {
- case "05-openshift-cluster-upgrade":
+ case pluginName05:
delta = diffDate(pluginStartedAt[logitem.PluginName], logitem.Time)
- case "10-openshift-kube-conformance":
- delta = diffDate(pluginFinishedAt["05-openshift-cluster-upgrade"], logitem.Time)
- case "20-openshift-conformance-validated":
- delta = diffDate(pluginFinishedAt["10-openshift-kube-conformance"], logitem.Time)
- case "99-openshift-artifacts-collector":
- delta = diffDate(pluginFinishedAt["20-openshift-conformance-validated"], logitem.Time)
+ case pluginName10:
+ delta = diffDate(pluginFinishedAt[pluginName05], logitem.Time)
+ case pluginName20:
+ delta = diffDate(pluginFinishedAt[pluginName10], logitem.Time)
+ case pluginName80:
+ delta = diffDate(pluginFinishedAt[pluginName20], logitem.Time)
+ case pluginName99:
+ delta = diffDate(pluginFinishedAt[pluginName80], logitem.Time)
}
runtimeLogs = append(runtimeLogs, &RuntimeInfoItem{
Name: fmt.Sprintf("plugin finished %s", logitem.PluginName),
diff --git a/internal/opct/archive/metalog_test.go b/internal/opct/archive/metalog_test.go
index 03d12979..7d4df465 100644
--- a/internal/opct/archive/metalog_test.go
+++ b/internal/opct/archive/metalog_test.go
@@ -9,6 +9,7 @@ import (
"strings"
"testing"
+ "github.com/google/go-cmp/cmp"
opcttests "github.com/redhat-openshift-ecosystem/provider-certification-tool/test"
)
@@ -70,7 +71,9 @@ func TestParseMetaLogs(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := ParseMetaLogs(tt.args.logs); !reflect.DeepEqual(got, tt.want) {
- t.Errorf("ParseMetaLogs() = %v, want %v", got, tt.want)
+ if !cmp.Equal(got, tt.want) {
+ return
+ }
}
})
}
diff --git a/internal/opct/archive/runtime.go b/internal/opct/archive/runtime.go
index a18e26d4..2514fde0 100644
--- a/internal/opct/archive/runtime.go
+++ b/internal/opct/archive/runtime.go
@@ -1,5 +1,7 @@
package archive
+// RuntimeInfoItem is a generic struct to hold runtime information,
+// it is used to export relevant information to the data keeper.
type RuntimeInfoItem struct {
// Name holds the name of the item/attribute.
Name string `json:"name"`
diff --git a/internal/opct/metrics/timers.go b/internal/opct/metrics/timers.go
new file mode 100644
index 00000000..7f75f76c
--- /dev/null
+++ b/internal/opct/metrics/timers.go
@@ -0,0 +1,51 @@
+package metrics
+
+import "time"
+
+// Timer is a struct used internally to handle execution markers,
+// used to calculate the total execution time for some parsers/checkpoints,
+// over the report flow.
+type Timer struct {
+ start time.Time
+
+ // Total is a calculation of elapsed time from start timestamp.
+ Total float64 `json:"seconds"`
+}
+
+// Timers is a struct used internally to handle execution markers,
+// used to check the total execution time for some parsers.
+type Timers struct {
+ Timers map[string]*Timer `json:"Timers,omitempty"`
+ last string
+}
+
+func NewTimers() *Timers {
+ ts := Timers{Timers: make(map[string]*Timer)}
+ return &ts
+}
+
+// set is a method to persist a timer, updating if exists.
+// The current timestamp will be used when a new item is created.
+func (ts *Timers) set(k string) {
+ if _, ok := ts.Timers[k]; !ok {
+ ts.Timers[k] = &Timer{start: time.Now()}
+ } else {
+ stop := time.Now()
+ ts.Timers[k].Total = stop.Sub(ts.Timers[k].start).Seconds()
+ }
+}
+
+// Set method is an external interface to create/update a timer.
+// Interface for start, stop and add a new one (lap).
+func (ts *Timers) Set(k string) {
+ if ts.last != "" {
+ ts.set(ts.last)
+ }
+ ts.set(k)
+ ts.last = k
+}
+
+// Add method creates a new timer metric.
+func (ts *Timers) Add(k string) {
+ ts.set(k)
+}
diff --git a/internal/opct/plugin/plugin.go b/internal/opct/plugin/plugin.go
new file mode 100644
index 00000000..5471f647
--- /dev/null
+++ b/internal/opct/plugin/plugin.go
@@ -0,0 +1,199 @@
+package plugin
+
+import (
+ "github.com/redhat-openshift-ecosystem/provider-certification-tool/internal/opct/archive"
+)
+
+const (
+ PluginNameOpenShiftUpgrade = "05-openshift-cluster-upgrade"
+ PluginNameKubernetesConformance = "10-openshift-kube-conformance"
+ PluginNameOpenShiftConformance = "20-openshift-conformance-validated"
+ PluginNameConformanceReplay = "80-openshift-tests-replay"
+ PluginNameArtifactsCollector = "99-openshift-artifacts-collector"
+
+ // Old Plugin names (prior v0.2). It's used to keep compatibility
+ PluginOldNameKubernetesConformance = "openshift-kube-conformance"
+ PluginOldNameOpenShiftConformance = "openshift-conformance-validated"
+)
+
+type PluginDefinition struct {
+ PluginImage string `json:"pluginImage"`
+ SonobuoyImage string `json:"sonobuoyImage"`
+ Name string `json:"name"`
+}
+
+// OPCTPluginSummary handle plugin details
+type OPCTPluginSummary struct {
+ Name string
+ NameAlias string
+ Status string
+ Total int64
+ Passed int64
+ Failed int64
+ Timeout int64
+ Skipped int64
+
+ // DocumentationReference
+ Documentation *TestDocumentation
+
+ // Definition
+ Definition *PluginDefinition
+
+ // ErrorCounters is the map with details for each failure by regex expression.
+ ErrorCounters archive.ErrorCounter `json:"errorCounters,omitempty"`
+
+ // FailedItems is the map with details for each failure
+ Tests Tests
+
+ // FailedList is the list of tests failures on the original execution
+ FailedList []string
+
+ // FailedFiltered is the list of failures **after** filter(s) pipeline.
+ // Those tests must raise attention and alerts.
+ FailedFiltered []string
+
+ // Filter SuiteOnly:
+ // FailedFilter1 is the list of failures (A) included only in the original suite (B): A INTERSECTION B
+ // FailedFilterSuite []string
+ FailedFilter1 []string
+ FailedExcludedFilter1 []string
+
+ // Filter Baseline:
+ // FailedFilter2 is the list of failures (A) excluding the baseline(B): A EXCLUDE B
+ // FailedFilterBaseline []string
+ FailedFilter2 []string
+ FailedExcludedFilter2 []string
+
+ // Filter FlakeAPI:
+ // FailedFilter3 is the priority list of failures - not reporting as flake in OpenShift CI.
+ // FailedFilterPrio []string
+ FailedFilter3 []string
+ FailedExcludedFilter3 []string
+
+ // Filter BaselineAPI:
+ // FailedFilter4 is the list after excluding known failures from OPCT CI.
+ // This filter is similar BaseLine, but it's a list of failures collected from
+ // processed data (another OPCT execution) on OPCT CI after processed by OPCT report,
+ // exposed thorugh the OPCT API. This list is used to exclude known failures,
+ // to prevent false positives on the review pipeline.
+ // TODO(mtulio): deprecate Filter2 when Filter4 is accurated. Baseline results should
+ // not use Filter2.
+ FailedFilter4 []string
+ FailedExcludedFilter4 []string
+
+ // Filter KnownFailures:
+ // FailedFilter5 is the list of failures that are explicitly removed from pipeline.
+ // It should not be used to exclude failures from the report of e2e included in suite,
+ // but to remove known flake/failures that is not relevant to the pipeline.
+ // Example: '[sig-arch] External binary usage'
+ // Filter5KnownFailures []string
+ FailedFilter5 []string
+ FailedExcludedFilter5 []string
+
+ // Filter Replay:
+ // FailedFilter6 is the list of failures which also failed in the second shot: replay plugin/step.
+ FailedFilter6 []string
+ FailedExcludedFilter6 []string
+}
+
+func (ps *OPCTPluginSummary) calculateErrorCounter() *archive.ErrorCounter {
+ if ps.ErrorCounters == nil {
+ ps.ErrorCounters = make(archive.ErrorCounter, len(archive.CommonErrorPatterns))
+ }
+ for _, test := range ps.Tests {
+ if test.ErrorCounters == nil {
+ continue
+ }
+ for kerr, errName := range test.ErrorCounters {
+ if _, ok := ps.ErrorCounters[kerr]; !ok {
+ ps.ErrorCounters[kerr] = errName
+ } else {
+ ps.ErrorCounters[kerr] += errName
+ }
+ }
+ }
+ return &ps.ErrorCounters
+}
+
+func (ps *OPCTPluginSummary) GetErrorCounters() *archive.ErrorCounter {
+ return ps.calculateErrorCounter()
+}
+
+const (
+ // FilterNameSuiteOnly is the filter to remove failures of tests not included in the suite.
+ FilterNameSuiteOnly = "suite-only"
+
+ // FilterNameKF is the filter to exclude known failures from the OPCT CI.
+ FilterNameKF = "known-failures"
+
+ // FilterNameBaseline is the filter to exclude failures from the baseline archive (CLI arg).
+ FilterNameBaseline = "baseline"
+
+ // FilterNameFlaky is the filter to exclude flaky tests from the report based in Sippy API.
+ FilterNameFlaky = "flaky"
+
+ // FilterNameReplay is the filter to exclude failures which are passing the replay step.
+ FilterNameReplay = "replay"
+
+ // FilterNameFinalCopy is the last step in the filter pipeline to copy the final list of failures
+ // to be used to compose the final report/data.
+ FilterNameFinalCopy = "copy"
+)
+
+// GetFailuresByFilterID returns the list of failures handlers by filter ID.
+func (ps *OPCTPluginSummary) GetFailuresByFilterID(filterID string) ([]string, []string) {
+ switch filterID {
+ case FilterNameSuiteOnly:
+ return ps.FailedFilter1, ps.FailedExcludedFilter1
+ case FilterNameBaseline:
+ return ps.FailedFilter2, ps.FailedExcludedFilter2
+ case FilterNameKF:
+ return ps.FailedFilter5, ps.FailedExcludedFilter5
+ case FilterNameReplay:
+ return ps.FailedFilter6, ps.FailedExcludedFilter6
+ }
+ return nil, nil
+}
+
+// SetFailuresByFilterID stes the list of failures handlers by filter ID.
+func (ps *OPCTPluginSummary) SetFailuresByFilterID(filterID string, failures []string, excluded []string) {
+ switch filterID {
+ case FilterNameSuiteOnly:
+ ps.FailedFilter1 = failures
+ ps.FailedExcludedFilter1 = excluded
+ return
+ case FilterNameBaseline:
+ ps.FailedFilter2 = failures
+ ps.FailedExcludedFilter2 = excluded
+ return
+ case FilterNameKF:
+ ps.FailedFilter5 = failures
+ ps.FailedExcludedFilter5 = excluded
+ return
+ case FilterNameReplay:
+ ps.FailedFilter6 = failures
+ ps.FailedExcludedFilter6 = excluded
+ return
+ }
+}
+
+// GetPreviousFailuresByFilterID returns the list of failures from the previous plugin
+// in the pipeline, by providing the current filter ID.
+// TODO: move the filter logic to a dedicated structure using linked stack/list,
+// allowing each plugin having a dynamic list of filters, instead of forcing the same
+// pipeline across all plugins.
+func (ps *OPCTPluginSummary) GetPreviousFailuresByFilterID(filterID string) []string {
+ switch filterID {
+ case FilterNameSuiteOnly:
+ return nil
+ case FilterNameKF:
+ return ps.FailedFilter1 // SuiteOnly
+ case FilterNameReplay:
+ return ps.FailedFilter5 // KnownFailures
+ case FilterNameBaseline:
+ return ps.FailedFilter6 // Replay
+ case FilterNameFinalCopy:
+ return ps.FailedFilter4 // BaselineAPI
+ }
+ return nil
+}
diff --git a/internal/opct/plugin/sortedDict.go b/internal/opct/plugin/sortedDict.go
new file mode 100644
index 00000000..00a53a25
--- /dev/null
+++ b/internal/opct/plugin/sortedDict.go
@@ -0,0 +1,15 @@
+package plugin
+
+// SortedDict stores and sorts the key/value map to be ranked by value.
+type SortedDict struct {
+ Key string
+ Value int
+}
+
+// SortedList stores the list of key/value map, implementing interfaces
+// to sort/rank a map strings with integers as values.
+type SortedList []SortedDict
+
+func (p SortedList) Len() int { return len(p) }
+func (p SortedList) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+func (p SortedList) Less(i, j int) bool { return p[i].Value < p[j].Value }
diff --git a/internal/opct/plugin/tags.go b/internal/opct/plugin/tags.go
new file mode 100644
index 00000000..96c40fc9
--- /dev/null
+++ b/internal/opct/plugin/tags.go
@@ -0,0 +1,83 @@
+package plugin
+
+import (
+ "fmt"
+ "regexp"
+ "sort"
+)
+
+// tagRegex is the regex to extract the tag from a test name.
+// The 'tag' is the first bracket content from a test name.
+const tagRegex = `^\[([a-zA-Z0-9-]*)\]`
+
+// TestTags stores the test tags map with it's counter.
+// The test tag is the work extracted from the first bracket from a test name.
+// Example test name: '[sig-provider] test name' the 'sig-provider' is the tag.
+type TestTags map[string]int
+
+// NewTestTagsEmpty creates the TestTags with a specific size, to be populated later.
+func NewTestTagsEmpty(size int) TestTags {
+ tt := make(TestTags, size)
+ tt["total"] = 0
+ return tt
+}
+
+// NewTestTags creates the TestTags populating the tag values and counters.
+func NewTestTags(tests []*string) TestTags {
+ tt := make(TestTags, len(tests))
+ tt["total"] = 0
+ tt.addBatch(tests)
+ return tt
+}
+
+// Add extracts tags from test name, store, and increment the counter.
+func (tt TestTags) Add(test *string) {
+ reT := regexp.MustCompile(tagRegex)
+ match := reT.FindStringSubmatch(*test)
+ if len(match) > 0 {
+ if _, ok := tt[match[1]]; !ok {
+ tt[match[1]] = 1
+ } else {
+ tt[match[1]] += 1
+ }
+ }
+ tt["total"] += 1
+}
+
+// AddBatch receive a list of test name (string slice) and stores it.
+func (tt TestTags) addBatch(kn []*string) {
+ for _, test := range kn {
+ tt.Add(test)
+ }
+}
+
+// SortRev creates a rank of tags.
+func (tt TestTags) sortRev() []SortedDict {
+ tags := make(SortedList, len(tt))
+ i := 0
+ for k, v := range tt {
+ tags[i] = SortedDict{k, v}
+ i++
+ }
+ sort.Sort(sort.Reverse(tags))
+ return tags
+}
+
+// ShowSorted return an string with the rank of tags.
+func (tt TestTags) ShowSorted() string {
+ tags := tt.sortRev()
+ msg := ""
+ for _, k := range tags {
+ if k.Key == "total" {
+ msg = fmt.Sprintf("[%v=%v]", k.Key, k.Value)
+ continue
+ }
+ msg = fmt.Sprintf("%s [%v=%s]", msg, k.Key, UtilsCalcPercStr(int64(k.Value), int64(tt["total"])))
+ }
+ return msg
+}
+
+// calcPercStr receives the numerator and denominator and return the numerator and percentage as string.
+func UtilsCalcPercStr(num, den int64) string {
+ return fmt.Sprintf("%d (%.2f%%)", num, (float64(num)/float64(den))*100)
+}
diff --git a/internal/opct/plugin/tags_test.go b/internal/opct/plugin/tags_test.go
new file mode 100644
index 00000000..02ff4363
--- /dev/null
+++ b/internal/opct/plugin/tags_test.go
@@ -0,0 +1,46 @@
+package plugin
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func validTests(testDesc *string) []*string {
+ tests := []*string{}
+ prefix := "tag"
+ max := 5
+
+ for i := 1; i <= max; i++ {
+ for x := (max - i); x >= 0; x-- {
+ test := fmt.Sprintf("[%s-%d] %s ID %d", prefix, i, *testDesc, i)
+ tests = append(tests, &test)
+ }
+ }
+ return tests
+}
+
+func TestShowSorted(t *testing.T) {
+ desc := "TestShowSorted"
+ cases := []struct {
+ name string
+ tests []*string
+ want string
+ }{
+ {
+ name: "empty",
+ tests: validTests(&desc),
+ want: "[total=15] [tag-1=5 (33.33%)] [tag-2=4 (26.67%)] [tag-3=3 (20.00%)] [tag-4=2 (13.33%)] [tag-5=1 (6.67%)]",
+ },
+ }
+
+ for _, tc := range cases {
+ t.Run(tc.name, func(t *testing.T) {
+ // fmt.Printf("%v\n", tc.tests)
+ testTags := NewTestTags(tc.tests)
+ msg := testTags.ShowSorted()
+ assert.Equal(t, tc.want, msg, "unexpected ,essage")
+ })
+ }
+}
diff --git a/internal/opct/plugin/test.go b/internal/opct/plugin/test.go
new file mode 100644
index 00000000..4840b212
--- /dev/null
+++ b/internal/opct/plugin/test.go
@@ -0,0 +1,100 @@
+package plugin
+
+import (
+ "fmt"
+ "regexp"
+ "strings"
+
+ "github.com/redhat-openshift-ecosystem/provider-certification-tool/internal/opct/archive"
+ "github.com/redhat-openshift-ecosystem/provider-certification-tool/internal/openshift/ci/sippy"
+)
+
+// TestItem represents a single test unit holding attributes for the processor
+// pipeline.
+type TestItem struct {
+ // Name is the name of the e2e test. It is hidden from JSON as Tests is a map, and
+ // the key can be used.
+ Name string `json:"-"`
+
+ // ID is the unique identifier of the test within the execution.
+ ID string `json:"id"`
+
+ // Status store the test result. Valid values: passed, skipped, failed.
+ Status string `json:"status"`
+
+ // State represents the state of the test. It can be any status value or filter name.
+ State string `json:"state,omitempty"`
+
+ // Failure contains the failure reason extracted from JUnit field 'item.detials.failure'.
+ Failure string `json:"-"`
+
+ // SystemOut contains the entire test stdout extracted from JUnit field 'item.detials.system-out'.
+ SystemOut string `json:"-"`
+
+ // Offset is the offset of failure from the plugin result file.
+ Offset int `json:"-"`
+
+ // Flaky contains the flake information from OpenShift CI - scraped from Sippy API.
+ Flake *sippy.SippyTestsResponse `json:"flake,omitempty"`
+
+ // ErrorCounters errors indexed by common error key.
+ ErrorCounters archive.ErrorCounter `json:"errorCounters,omitempty"`
+
+ // Reference for documentation.
+ Documentation string `json:"documentation"`
+}
+
+type Tests map[string]*TestItem
+
+// UpdateErrorCounter reads the failures and stdout looking for error patterns from
+// a specific test, accumulating the ErrorCounters structure.
+func (pi *TestItem) UpdateErrorCounter() {
+ total := 0
+ counters := make(archive.ErrorCounter, len(archive.CommonErrorPatterns)+1)
+
+ incError := func(err string, cnt int) {
+ if _, ok := counters[err]; !ok {
+ counters[err] = 0
+ }
+ counters[err] += cnt
+ total += cnt
+ }
+
+ for _, errName := range archive.CommonErrorPatterns {
+ reErr := regexp.MustCompile(errName)
+ // Check occurrences in Failure
+ if matches := reErr.FindAllStringIndex(pi.Failure, -1); len(matches) != 0 {
+ incError(errName, len(matches))
+ }
+ // Check occurrences in SystemOut
+ if matches := reErr.FindAllStringIndex(pi.SystemOut, -1); len(matches) != 0 {
+ incError(errName, len(matches))
+ }
+ }
+
+ if total == 0 {
+ return
+ }
+ pi.ErrorCounters = counters
+ pi.ErrorCounters["total"] = total
+}
+
+// LookupDocumentation extracts from the test name the expected part (removing '[Conformance]')
+// to link to the Documentation URL refereced by the Kubernetes Conformance markdown available
+// at https://github.com/cncf/k8s-conformance/blob/master/docs/KubeConformance-.md .
+// The test documentation (TestDocumentation) should be indexed prior calling the LookupDocumentation.
+func (pi *TestItem) LookupDocumentation(d *TestDocumentation) {
+
+ // origin/openshift-tests appends 'labels' after '[Conformance]' in the
+ // test name in the kubernetes/conformance, transforming it from the original name from upstream.
+ // nameIndex will try to recover the original name to lookup in the source docs.
+ nameIndex := fmt.Sprintf("%s[Conformance]", strings.Split(pi.Name, "[Conformance]")[0])
+
+ // check if the test name is indexed in the conformance documentation.
+ if _, ok := d.Tests[nameIndex]; ok {
+ pi.Documentation = d.Tests[nameIndex].URLFragment
+ return
+ }
+ // When the test is not indexed, no documentation will be added.
+ pi.Documentation = *d.UserBaseURL
+}
diff --git a/internal/opct/plugin/testdoc.go b/internal/opct/plugin/testdoc.go
new file mode 100644
index 00000000..00614aeb
--- /dev/null
+++ b/internal/opct/plugin/testdoc.go
@@ -0,0 +1,115 @@
+package plugin
+
+import (
+ "fmt"
+ "io"
+ "net/http"
+ "regexp"
+ "strings"
+
+ "github.com/pkg/errors"
+ log "github.com/sirupsen/logrus"
+)
+
+// TestDocumentation is the struct that holds the test documentation.
+// The struct is used to store the documentation URL, the raw data, and the
+// tests indexed by name.
+// The test documentation is discovered by name, and the URL fragment is used
+// to mount the URL for the test documentation.
+type TestDocumentation struct {
+ // UserBaseURL is a the User Facing base URL for the documentation.
+ UserBaseURL *string
+
+ // SourceBaseURL is the raw URL to be indexed.
+ SourceBaseURL *string
+
+ // Raw stores the data extracted from SourceBaseURL.
+ Raw *string
+
+ // Tests is the map indexed by test name, with URL fragment (page references) as a value.
+ // Example: for the e2e test '[sig-machinery] run instance', the following map will be created:
+ // map['[sig-machinery] run instance']='#sig-machinery--run-instance'
+ Tests map[string]*TestDocumentationItem
+}
+
+// TestDocumentationItem refers to items documented by
+type TestDocumentationItem struct {
+ Title string
+ Name string
+ // URLFragment stores the discovered fragment parsed by the Documentation page,
+ // indexed by test name, used to mount the Documentation URL for failed tests.
+ URLFragment string
+}
+
+func NewTestDocumentation(user, source string) *TestDocumentation {
+ return &TestDocumentation{
+ UserBaseURL: &user,
+ SourceBaseURL: &source,
+ }
+}
+
+// Load documentation from Suite and save it to further query
+func (d *TestDocumentation) Load() error {
+ app := "Test Documentation"
+ req, err := http.NewRequest(http.MethodGet, *d.SourceBaseURL, nil)
+ if err != nil {
+ return errors.Wrapf(err, "failed to create request to get %s", app)
+ }
+ res, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return errors.Wrapf(err, "failed to make request to %s", app)
+ }
+ defer res.Body.Close()
+
+ if res.StatusCode != http.StatusOK {
+ return errors.New(fmt.Sprintf("unexpected HTTP status code to %s", app))
+ }
+
+ resBody, err := io.ReadAll(res.Body)
+ if err != nil {
+ return errors.Wrapf(err, "failed to read response body for %s", app)
+ }
+ str := string(resBody)
+ d.Raw = &str
+ return nil
+}
+
+// BuildIndex reads the raw Document, discoverying the test name, and the URL
+// fragments. The parser is based in the Kubernetes Conformance documentation:
+// https://github.com/cncf/k8s-conformance/blob/master/docs/KubeConformance-1.27.md
+func (d *TestDocumentation) BuildIndex() error {
+ lines := strings.Split(*d.Raw, "\n")
+ d.Tests = make(map[string]*TestDocumentationItem, len(lines))
+ for number, line := range lines {
+
+ // Build index for Kubernetes Conformance tests, parsing the page for version:
+ // https://github.com/cncf/k8s-conformance/blob/master/docs/KubeConformance-1.27.md
+ if strings.HasPrefix(line, "- Defined in code as: ") {
+ testArr := strings.Split(line, "Defined in code as: ")
+ if len(testArr) < 2 {
+ log.Debugf("Error BuildIndex(): unable to build documentation index for line: %s", line)
+ }
+ testName := testArr[1]
+ d.Tests[testName] = &TestDocumentationItem{
+ Name: testName,
+ // The test reference/section are defined in the third line before the name definition.
+ Title: lines[number-3],
+ }
+
+ // create url fragment for each test section
+ reDoc := regexp.MustCompile(`^## \[(.*)\]`)
+ match := reDoc.FindStringSubmatch(lines[number-3])
+ if len(match) == 2 {
+ fragment := match[1]
+ // mount the fragment removing undesired symbols.
+ for _, c := range []string{":", "-", ".", ",", "="} {
+ fragment = strings.Replace(fragment, c, "", -1)
+ }
+ fragment = strings.Replace(fragment, " ", "-", -1)
+ fragment = strings.ToLower(fragment)
+ d.Tests[testName].URLFragment = fmt.Sprintf("%s#%s", *d.UserBaseURL, fragment)
+ }
+ }
+ }
+ return nil
+}
diff --git a/internal/opct/summary/consolidated.go b/internal/opct/summary/consolidated.go
new file mode 100644
index 00000000..1e355a9a
--- /dev/null
+++ b/internal/opct/summary/consolidated.go
@@ -0,0 +1,1017 @@
+// Package summary provides the entrypoint to process the results of the provider and baseline
+// validations, applying filters and transformations to the data.
+package summary
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "regexp"
+ "sort"
+ "strings"
+
+ log "github.com/sirupsen/logrus"
+
+ "github.com/pkg/errors"
+
+ "github.com/redhat-openshift-ecosystem/provider-certification-tool/internal/opct/metrics"
+ "github.com/redhat-openshift-ecosystem/provider-certification-tool/internal/opct/plugin"
+ "github.com/redhat-openshift-ecosystem/provider-certification-tool/internal/openshift/ci/sippy"
+ "github.com/redhat-openshift-ecosystem/provider-certification-tool/internal/report/baseline"
+)
+
+// ConsolidatedSummary Aggregate the results of provider and baseline
+type ConsolidatedSummary struct {
+ Verbose bool
+ Timers *metrics.Timers
+ Provider *ResultSummary
+ Baseline *ResultSummary
+ BaselineAPI *baseline.BaselineConfig
+}
+
+type ConsolidatedSummaryInput struct {
+ Archive string
+ ArchiveBase string
+ SaveTo string
+ Verbose bool
+ Timers *metrics.Timers
+}
+
+func NewConsolidatedSummary(in *ConsolidatedSummaryInput) *ConsolidatedSummary {
+ return &ConsolidatedSummary{
+ Verbose: in.Verbose,
+ Timers: in.Timers,
+ Provider: &ResultSummary{
+ Name: ResultSourceNameProvider,
+ Archive: in.Archive,
+ OpenShift: &OpenShiftSummary{},
+ Sonobuoy: NewSonobuoySummary(),
+ Suites: &OpenshiftTestsSuites{
+ OpenshiftConformance: &OpenshiftTestsSuite{Name: "openshiftConformance"},
+ KubernetesConformance: &OpenshiftTestsSuite{Name: "kubernetesConformance"},
+ },
+ SavePath: in.SaveTo,
+ },
+ Baseline: &ResultSummary{
+ Name: ResultSourceNameBaseline,
+ Archive: in.ArchiveBase,
+ OpenShift: &OpenShiftSummary{},
+ Sonobuoy: NewSonobuoySummary(),
+ Suites: &OpenshiftTestsSuites{
+ OpenshiftConformance: &OpenshiftTestsSuite{Name: "openshiftConformance"},
+ KubernetesConformance: &OpenshiftTestsSuite{Name: "kubernetesConformance"},
+ },
+ },
+ BaselineAPI: &baseline.BaselineConfig{},
+ }
+}
+
+// Process entrypoint to read and fill all summaries for each archive, plugin and suites
+// applying any transformation it needs through filters.
+func (cs *ConsolidatedSummary) Process() error {
+ cs.Timers.Add("cs-process")
+
+ // Load Result Summary from Archives
+ log.Debug("Processing results/Populating Provider")
+ cs.Timers.Set("cs-process/populate-provider")
+ if err := cs.Provider.Populate(); err != nil {
+ return fmt.Errorf("processing provider results: %w", err)
+ }
+
+ log.Debug("Processing results/Populating Baseline")
+ cs.Timers.Set("cs-process/populate-baseline")
+ if err := cs.Baseline.Populate(); err != nil {
+ return fmt.Errorf("processing baseline results: %w", err)
+ }
+
+ // Filters pipeline (order matters)
+ log.Debug("Processing results/Applying filters/Suite")
+ cs.Timers.Set("cs-process/filter1-suite")
+ if err := cs.applyFilterSuite(); err != nil {
+ return err
+ }
+
+ log.Debug("Processing results/Applying filters/Known Failures")
+ cs.Timers.Set("cs-process/filter5-known-failures")
+ if err := cs.applyFilterKnownFailures(plugin.FilterNameKF); err != nil {
+ return err
+ }
+
+ log.Debug("Processing results/Applying filters/6/Replay")
+ cs.Timers.Set("cs-process/filter-replay")
+ if err := cs.applyFilterReplay(plugin.FilterNameReplay); err != nil {
+ return err
+ }
+
+ log.Debug("Processing results/Applying filters/2/Baseline")
+ cs.Timers.Set("cs-process/filter2-baseline")
+ if err := cs.applyFilterBaseline(plugin.FilterNameBaseline); err != nil {
+ return err
+ }
+
+ log.Debug("Processing results/Applying filters/3/Flake")
+ cs.Timers.Set("cs-process/filter3-flake")
+ if err := cs.applyFilterFlaky(plugin.FilterNameFlaky); err != nil {
+ return err
+ }
+
+ log.Debug("Processing results/Applying filters/4/Baseline API")
+ cs.Timers.Set("cs-process/filter4-baseline-api")
+ if err := cs.applyFilterBaselineAPI(); err != nil {
+ return err
+ }
+
+ log.Debug("Processing results/Applying filters/Saving final filter")
+ cs.Timers.Set("cs-process/filter-finish")
+ if err := cs.applyFilterCopyPipeline(plugin.FilterNameFinalCopy); err != nil {
+ return err
+ }
+
+ // Build documentation for failures.
+ log.Debug("Processing results/Building tests documentation")
+ cs.Timers.Set("cs-process/build-docs")
+ if err := cs.buildDocumentation(); err != nil {
+ return err
+ }
+
+ cs.Timers.Add("cs-process")
+ return nil
+}
+
+// GetProvider get the provider results.
+func (cs *ConsolidatedSummary) GetProvider() *ResultSummary {
+ return cs.Provider
+}
+
+// GetBaseline get the baseline results.
+func (cs *ConsolidatedSummary) GetBaseline() *ResultSummary {
+ return cs.Baseline
+}
+
+// HasBaselineResults checks if the baseline results was set (--dif),
+// and has valid data.
+func (cs *ConsolidatedSummary) HasBaselineResults() bool {
+ if cs.Baseline == nil {
+ return false
+ }
+ return cs.Baseline.HasValidResults()
+}
+
+// Filter1: Suite
+// applyFilterSuite process the FailedList for each plugin, getting **intersection** tests
+// for respective suite.
+func (cs *ConsolidatedSummary) applyFilterSuite() error {
+ for _, pluginName := range []string{
+ plugin.PluginNameOpenShiftUpgrade,
+ plugin.PluginNameKubernetesConformance,
+ plugin.PluginNameOpenShiftConformance,
+ plugin.PluginNameConformanceReplay,
+ } {
+ if err := cs.applyFilterSuiteForPlugin(pluginName); err != nil {
+ return fmt.Errorf("error while processing filter1 (SuiteOnly): %w", err)
+ }
+ }
+ return nil
+}
+
+// applyFilterSuiteForPlugin calculates the intersection of Provider Failed AND suite
+func (cs *ConsolidatedSummary) applyFilterSuiteForPlugin(pluginName string) error {
+ var ps *plugin.OPCTPluginSummary
+ var pluginSuite *OpenshiftTestsSuite
+
+ switch pluginName {
+ case plugin.PluginNameKubernetesConformance:
+ ps = cs.GetProvider().GetOpenShift().GetResultK8SValidated()
+ pluginSuite = cs.GetProvider().GetSuites().KubernetesConformance
+ case plugin.PluginNameOpenShiftConformance:
+ ps = cs.GetProvider().GetOpenShift().GetResultOCPValidated()
+ pluginSuite = cs.GetProvider().GetSuites().OpenshiftConformance
+
+ case plugin.PluginNameOpenShiftUpgrade:
+ ps = cs.GetProvider().GetOpenShift().GetResultConformanceUpgrade()
+ pluginSuite = &OpenshiftTestsSuite{}
+
+ case plugin.PluginNameConformanceReplay:
+ ps = cs.GetProvider().GetOpenShift().GetResultConformanceReplay()
+ pluginSuite = &OpenshiftTestsSuite{}
+ }
+
+ e2eFailures := ps.FailedList
+ e2eSuite := pluginSuite.Tests
+ emptySuite := len(pluginSuite.Tests) == 0
+ hashSuite := make(map[string]struct{}, len(e2eSuite))
+
+ for _, v := range e2eSuite {
+ hashSuite[v] = struct{}{}
+ }
+
+ for _, v := range e2eFailures {
+ // move on the pipeline when the suite is empty.
+ ps.Tests[v].State = "filter1SuiteOnly"
+
+ // Skip when the suite has no tests or issues when collecting the counter.
+ if emptySuite {
+ ps.FailedFilter1 = append(ps.FailedFilter1, v)
+ continue
+ }
+ // save the test in suite, and excluded ones.
+ if _, ok := hashSuite[v]; ok {
+ ps.FailedFilter1 = append(ps.FailedFilter1, v)
+ continue
+ }
+ ps.FailedExcludedFilter1 = append(ps.FailedExcludedFilter1, v)
+ }
+ sort.Strings(ps.FailedFilter1)
+
+ log.Debugf("Filter (SuiteOnly) results: plugin=%s in=failures(%d) in=suite(%d) out=filter(%d) filterExcluded(%d)",
+ pluginName, len(e2eFailures), len(e2eSuite),
+ len(ps.FailedFilter1), len(ps.FailedExcludedFilter1))
+ return nil
+}
+
+// Filter2: Baseline archive
+// applyFilterBaseline process the FailedFilterSuite for each plugin, **excluding** failures from
+// baseline test.
+func (cs *ConsolidatedSummary) applyFilterBaseline(filterID string) error {
+ for _, pluginName := range []string{
+ plugin.PluginNameOpenShiftUpgrade,
+ plugin.PluginNameKubernetesConformance,
+ plugin.PluginNameOpenShiftConformance,
+ plugin.PluginNameConformanceReplay,
+ } {
+ if err := cs.applyFilterBaselineForPlugin(pluginName, filterID); err != nil {
+ return fmt.Errorf("error while processing filter2 (baseline archive): %w", err)
+ }
+ }
+ return nil
+}
+
+// applyFilterBaselineForPlugin calculates the **exclusion** tests of
+// Provider Failed included on suite and Baseline failed tests.
+func (cs *ConsolidatedSummary) applyFilterBaselineForPlugin(pluginName string, filterID string) error {
+ var ps *plugin.OPCTPluginSummary
+ var e2eFailuresBaseline []string
+
+ // TODO: replace the baseline from discovered data from API (s3). The flag
+ // OPCT_DISABLE_EXP_BASELINE_API can be set to use the local file.
+ // Default method is to use the API to get the baseline.
+
+ switch pluginName {
+ case plugin.PluginNameKubernetesConformance:
+ ps = cs.GetProvider().GetOpenShift().GetResultK8SValidated()
+ if cs.GetBaseline().HasValidResults() {
+ e2eFailuresBaseline = cs.GetBaseline().GetOpenShift().GetResultK8SValidated().FailedList
+ }
+ case plugin.PluginNameOpenShiftConformance:
+ ps = cs.GetProvider().GetOpenShift().GetResultOCPValidated()
+ if cs.GetBaseline().HasValidResults() {
+ e2eFailuresBaseline = cs.GetBaseline().GetOpenShift().GetResultOCPValidated().FailedList
+ }
+
+ case plugin.PluginNameOpenShiftUpgrade:
+ ps = cs.GetProvider().GetOpenShift().GetResultConformanceUpgrade()
+
+ case plugin.PluginNameConformanceReplay:
+ ps = cs.GetProvider().GetOpenShift().GetResultConformanceReplay()
+
+ default:
+ return errors.New("Suite not found to apply filter: Flaky")
+ }
+
+ filterFailures, filterFailuresExcluded := ps.GetFailuresByFilterID(filterID)
+ e2eFailuresProvider := ps.GetPreviousFailuresByFilterID(filterID)
+ hashBaseline := make(map[string]struct{}, len(e2eFailuresBaseline))
+
+ for _, v := range e2eFailuresBaseline {
+ hashBaseline[v] = struct{}{}
+ }
+
+ // DEPRECATION warning when used:
+ if len(e2eFailuresBaseline) > 0 {
+ log.Warnf("Filter baseline (--diff|--baseline) is deprecated and will be removed soon, the filter BaselineAPI is replacing and automatically applied to the failure pipeline.")
+ }
+ for _, v := range e2eFailuresProvider {
+ ps.Tests[v].State = "filter2Baseline"
+ if _, ok := hashBaseline[v]; !ok {
+ filterFailures = append(filterFailures, v)
+ continue
+ }
+ filterFailuresExcluded = append(filterFailuresExcluded, v)
+ }
+ sort.Strings(filterFailures)
+ ps.SetFailuresByFilterID(filterID, filterFailures, filterFailuresExcluded)
+
+ log.Debugf("Filter (Baseline) results: plugin=%s in=filter(%d) out=filter(%d) filterExcluded(%d)",
+ pluginName, len(e2eFailuresProvider),
+ len(filterFailures), len(filterFailuresExcluded))
+ return nil
+}
+
+// Filter3: Flaky
+// applyFilterFlaky process the FailedFilterSuite for each plugin, **excluding** failures from
+// baseline test.
+func (cs *ConsolidatedSummary) applyFilterFlaky(filterID string) error {
+ if err := cs.applyFilterFlakeForPlugin(plugin.PluginNameKubernetesConformance, filterID); err != nil {
+ return err
+ }
+ if err := cs.applyFilterFlakeForPlugin(plugin.PluginNameOpenShiftConformance, filterID); err != nil {
+ return err
+ }
+ return nil
+}
+
+// applyFilterFlakeForPlugin query the Sippy API looking for each failed test
+// on each plugin/suite, saving the list on the ResultSummary.
+func (cs *ConsolidatedSummary) applyFilterFlakeForPlugin(pluginName string, filterID string) error {
+ var ps *plugin.OPCTPluginSummary
+
+ switch pluginName {
+ case plugin.PluginNameKubernetesConformance:
+ ps = cs.GetProvider().GetOpenShift().GetResultK8SValidated()
+
+ case plugin.PluginNameOpenShiftConformance:
+ ps = cs.GetProvider().GetOpenShift().GetResultOCPValidated()
+
+ case plugin.PluginNameOpenShiftUpgrade:
+ ps = cs.GetProvider().GetOpenShift().GetResultConformanceUpgrade()
+
+ case plugin.PluginNameConformanceReplay:
+ ps = cs.GetProvider().GetOpenShift().GetResultConformanceReplay()
+
+ default:
+ return errors.New("Suite not found to apply filter: Flaky")
+ }
+
+ // TODO: define if we will check for flakes for all failures or only filtered
+ // Query Flaky only the FilteredBaseline to avoid many external queries.
+ ver, err := cs.GetProvider().GetOpenShift().GetClusterVersionXY()
+ if err != nil {
+ return errors.Errorf("Error getting cluster version: %v", err)
+ }
+
+ api := sippy.NewSippyAPI(ver)
+ for _, name := range ps.FailedFilter2 {
+ ps.Tests[name].State = "filter3FlakeCheck"
+ resp, err := api.QueryTests(&sippy.SippyTestsRequestInput{TestName: name})
+ if err != nil {
+ log.Errorf("#> Error querying to Sippy API: %v", err)
+ ps.FailedFilter3 = append(ps.FailedFilter3, name)
+ continue
+ }
+ if resp == nil {
+ log.Errorf("Error filter flakeAPI: invalid response: %v", resp)
+ ps.FailedFilter3 = append(ps.FailedFilter3, name)
+ continue
+ }
+ for _, r := range *resp {
+ if _, ok := ps.Tests[name]; ok {
+ ps.Tests[name].Flake = &r
+ } else {
+ ps.Tests[name] = &plugin.TestItem{
+ Name: name,
+ Flake: &r,
+ }
+ }
+ // Applying flake filter by moving only non-flakes to the pipeline.
+ // The tests reporing lower than 5% of CurrentFlakePerc by Sippy are selected as non-flake.
+ // TODO: Review flake severity
+ if ps.Tests[name].Flake.CurrentFlakePerc <= 5.0 {
+ ps.Tests[name].State = "filter3Priority"
+ ps.FailedFilter3 = append(ps.FailedFilter3, name)
+ continue
+ }
+ ps.Tests[name].State = "filter3Flake"
+ ps.FailedExcludedFilter3 = append(ps.FailedExcludedFilter3, name)
+ }
+ }
+ sort.Strings(ps.FailedFilter3)
+
+ log.Debugf("Filter (FlakeAPI) results: plugin=%s in=filter(%d) out=filter(%d) filterExcluded(%d)",
+ pluginName, len(ps.FailedFilter2),
+ len(ps.FailedFilter3), len(ps.FailedExcludedFilter3))
+ return nil
+}
+
+// Filter4: Baseline API
+func (cs *ConsolidatedSummary) applyFilterBaselineAPI() error {
+ // Load baseline results from API
+ if err := cs.loadBaselineFromAPI(); err != nil {
+ return fmt.Errorf("loading baseline results from API: %w", err)
+ }
+ for _, pluginName := range []string{
+ plugin.PluginNameOpenShiftUpgrade,
+ plugin.PluginNameKubernetesConformance,
+ plugin.PluginNameOpenShiftConformance,
+ plugin.PluginNameConformanceReplay,
+ } {
+ if err := cs.applyFilterBaselineAPIForPlugin(pluginName); err != nil {
+ return fmt.Errorf("error while processing filter4 (baseline API): %w", err)
+ }
+ }
+ return nil
+}
+
+// loadBaselineFromAPI query the the OPCT "backend" looking for the baseline results.
+func (cs *ConsolidatedSummary) loadBaselineFromAPI() error {
+ if os.Getenv("OPCT_DISABLE_FILTER_BASELINE") == "1" {
+ log.Warnf("Filter pipeline: Basline API is explicitly disabled by OPCT_DISABLE_FILTER_BASELINE, skipping the discoverying baseline results from API")
+ return nil
+ }
+ // Path to S3 Object /api/v0/result/summary/{ocpVersion}/{platformType}
+ // The S3 is served by S3, which will reduce the costs to access S3, and can be
+ // proxies/redirected to other backends without replacing the URL.
+ // The original bucket[1], must be migrated to another account and the CloudFront URL,
+ // is part of that goal without disrupting the current process.
+ // [1] "https://openshift-provider-certification.s3.us-west-2.amazonaws.com"
+ // baseURL := "https://d23912a6309zf7.cloudfront.net/api/v0"
+
+ // Result to evaluate before returning failure
+ ocpRelease, err := cs.Provider.OpenShift.GetClusterVersionXY()
+ if err != nil {
+ os, err := cs.Provider.OpenShift.GetClusterVersion()
+ if err != nil {
+ return errors.Errorf("Error getting cluster version: %v", err)
+ }
+ ocpRelease = fmt.Sprintf("%s.%s", strings.Split(os.Desired, ".")[0], strings.Split(os.Desired, ".")[1])
+ }
+ platformType := cs.Provider.OpenShift.GetInfrastructurePlatformType()
+
+ cs.BaselineAPI = baseline.NewBaselineReportSummary()
+ if err := cs.BaselineAPI.GetLatestRawSummaryFromPlatformWithFallback(ocpRelease, platformType); err != nil {
+ return errors.Wrap(err, "failed to get baseline from API")
+ }
+ return nil
+}
+
+// applyFilterBaselineAPIForPlugin check the Sippy API looking for each failed test
+// on each plugin/suite, saving the list on the ResultSummary.
+// The filter must populate the FailedFilter4 and FailedExcludedFilter4.
+func (cs *ConsolidatedSummary) applyFilterBaselineAPIForPlugin(pluginName string) error {
+ // log.Warnf("TODO: implement applyFilterBaselineAPIForPlugin: %s", pluginName)
+ var ps *plugin.OPCTPluginSummary
+ var e2eFailuresBaseline []string
+ var err error
+
+ // TODO: replace the baseline from discovered data from API (s3). The flag
+ // OPCT_DISABLE_EXP_BASELINE_API can be set to use the local file.
+ // Default method is to use the API to get the baseline.
+
+ skipFilter := false
+ if os.Getenv("OPCT_DISABLE_FILTER_BASELINE") == "1" {
+ skipFilter = true
+ }
+
+ doneFilter := func() {
+ log.Debugf("Filter (BaselineAPI) results: plugin=%s in=filter(%d) inApi=(%d) out=filter(%d) excluded(%d)",
+ pluginName, len(ps.FailedFilter3), len(e2eFailuresBaseline),
+ len(ps.FailedFilter4), len(ps.FailedExcludedFilter4))
+ }
+
+ switch pluginName {
+ case plugin.PluginNameKubernetesConformance:
+ ps = cs.GetProvider().GetOpenShift().GetResultK8SValidated()
+
+ case plugin.PluginNameOpenShiftConformance:
+ ps = cs.GetProvider().GetOpenShift().GetResultOCPValidated()
+
+ case plugin.PluginNameOpenShiftUpgrade:
+ ps = cs.GetProvider().GetOpenShift().GetResultConformanceUpgrade()
+
+ case plugin.PluginNameConformanceReplay:
+ ps = cs.GetProvider().GetOpenShift().GetResultConformanceReplay()
+ ps.FailedFilter4 = ps.FailedFilter3
+ doneFilter()
+ return nil
+
+ default:
+ return fmt.Errorf("plugin not found")
+ }
+
+ b := cs.BaselineAPI.GetBuffer()
+ if b != nil {
+ e2eFailuresBaseline, err = b.GetPriorityFailuresFromPlugin(pluginName)
+ if err != nil {
+ log.Errorf("failed to get priority failures from plugin: %v", err)
+ }
+ }
+
+ e2eFailuresPipeline := ps.FailedFilter3
+ hashBaseline := make(map[string]struct{}, len(e2eFailuresPipeline))
+
+ for _, v := range e2eFailuresBaseline {
+ hashBaseline[v] = struct{}{}
+ }
+
+ for _, v := range e2eFailuresPipeline {
+ ps.Tests[v].State = "filter4BaselineAPI"
+ if _, ok := hashBaseline[v]; !ok {
+ ps.FailedFilter4 = append(ps.FailedFilter4, v)
+ continue
+ }
+ ps.FailedExcludedFilter4 = append(ps.FailedExcludedFilter4, v)
+ }
+
+ // feed the pipeline with the same tests when the filter is disabled.
+ if skipFilter {
+ log.Warn("Filter pipeline: Basline API is explicitly disabled by OPCT_DISABLE_FILTER_BASELINE, using Filter3 to keep processing failures")
+ ps.FailedFilter4 = ps.FailedFilter3
+ }
+ sort.Strings(ps.FailedFilter4)
+ doneFilter()
+ return nil
+}
+
+// Filter5: Known Failures
+// applyFilterKnownFailures skip well known failures that are not relevant to the validation process.
+func (cs *ConsolidatedSummary) applyFilterKnownFailures(filterID string) error {
+ // Reason to skip the test:
+ // "[sig-arch] External binary usage" :
+ // - The test is not relevant to the validation process, and it's not a real failure
+ // since the k8s/conformance suite is executed correctly.
+ // "[sig-mco] Machine config pools complete upgrade" :
+ // - The test is not relevant to the validation process, the custom MCP is used
+ // in the OPCT topology to executed in-cluster validation. If MCP is not used,
+ // the test environment would be evicted when the dedicated node is drained.
+ cs.Provider.TestSuiteKnownFailures = []string{
+ "[sig-arch] External binary usage",
+ "[sig-mco] Machine config pools complete upgrade",
+ }
+
+ for _, pluginName := range []string{
+ plugin.PluginNameOpenShiftUpgrade,
+ plugin.PluginNameKubernetesConformance,
+ plugin.PluginNameOpenShiftConformance,
+ plugin.PluginNameConformanceReplay,
+ } {
+ if err := cs.applyFilterKnownFailuresForPlugin(pluginName, filterID); err != nil {
+ return fmt.Errorf("error while processing filter5 (baseline API): %w", err)
+ }
+ }
+ return nil
+}
+
+// Filter5 by plugin
+func (cs *ConsolidatedSummary) applyFilterKnownFailuresForPlugin(pluginName string, filterID string) error {
+ var ps *plugin.OPCTPluginSummary
+
+ // Get the list of the last filter in the pipeline
+ switch pluginName {
+ case plugin.PluginNameKubernetesConformance:
+ ps = cs.GetProvider().GetOpenShift().GetResultK8SValidated()
+
+ case plugin.PluginNameOpenShiftConformance:
+ ps = cs.GetProvider().GetOpenShift().GetResultOCPValidated()
+
+ case plugin.PluginNameOpenShiftUpgrade:
+ ps = cs.GetProvider().GetOpenShift().GetResultConformanceUpgrade()
+
+ case plugin.PluginNameConformanceReplay:
+ ps = cs.GetProvider().GetOpenShift().GetResultConformanceReplay()
+
+ default:
+ return fmt.Errorf("error while processing filter5 (know failures), plugin not found: %s", pluginName)
+ }
+
+ // read the failures from pipeline
+ filterFailures, filterFailuresExcluded := ps.GetFailuresByFilterID(filterID)
+ e2eFailuresPipeline := ps.GetPreviousFailuresByFilterID(filterID)
+ hashExclusion := make(map[string]struct{}, len(cs.Provider.TestSuiteKnownFailures))
+
+ for _, v := range cs.Provider.TestSuiteKnownFailures {
+ hashExclusion[v] = struct{}{}
+ }
+
+ for _, v := range e2eFailuresPipeline {
+ ps.Tests[v].State = "filter5KnownFailures"
+ if _, ok := hashExclusion[v]; !ok {
+ filterFailures = append(filterFailures, v)
+ continue
+ }
+ filterFailuresExcluded = append(filterFailuresExcluded, v)
+ }
+ sort.Strings(filterFailures)
+ ps.SetFailuresByFilterID(filterID, filterFailures, filterFailuresExcluded)
+
+ log.Debugf("Filter (KF) results: plugin=%s in=filter(%d) out=filter(%d) filterExcluded(%d)",
+ pluginName, len(e2eFailuresPipeline), len(filterFailures), len(filterFailuresExcluded))
+ return nil
+}
+
+// Filter6: Replay
+// applyFilterReplay skip failures that pass in replay, which can be a
+// candidate for flake or false-positive failure.
+// Replay step re-runs the failured tests from conformance suites in serial mode,
+// to check if the test is passing in a second shot.
+func (cs *ConsolidatedSummary) applyFilterReplay(filterID string) error {
+ for _, pluginName := range []string{
+ plugin.PluginNameKubernetesConformance,
+ plugin.PluginNameOpenShiftConformance,
+ } {
+ if err := cs.applyFilterReplayForPlugin(pluginName, filterID); err != nil {
+ return fmt.Errorf("error while processing filter5 (Replay): %w", err)
+ }
+ }
+ return nil
+}
+
+// Filter6 by plugin
+// applyFilterReplayForPlugin extracts passed tests from replay step, and check
+// if conformance plugins has intersection in its failures, if so the test is passing
+// in the second run, excluding it from the failures.
+func (cs *ConsolidatedSummary) applyFilterReplayForPlugin(pluginName string, filterID string) error {
+ var ps *plugin.OPCTPluginSummary
+ switch pluginName {
+ case plugin.PluginNameKubernetesConformance:
+ ps = cs.GetProvider().GetOpenShift().GetResultK8SValidated()
+
+ case plugin.PluginNameOpenShiftConformance:
+ ps = cs.GetProvider().GetOpenShift().GetResultOCPValidated()
+
+ case plugin.PluginNameOpenShiftUpgrade:
+ ps = cs.GetProvider().GetOpenShift().GetResultConformanceUpgrade()
+
+ default:
+ return fmt.Errorf("plugin not found: %s", pluginName)
+ }
+
+ // read the failures from pipeline
+ filterFailures, filterFailuresExcluded := ps.GetFailuresByFilterID(filterID)
+ e2eFailuresPipeline := ps.GetPreviousFailuresByFilterID(filterID)
+
+ replayPlugin := cs.GetProvider().GetOpenShift().GetResultConformanceReplay()
+ if replayPlugin == nil {
+ ps.SetFailuresByFilterID(filterID, filterFailures, filterFailuresExcluded)
+ log.Debugf("Filter (Replay) results: plugin=%s in=filter(%d) out=filter(%d) filterExcluded(%d)",
+ pluginName, len(e2eFailuresPipeline),
+ len(filterFailures), len(filterFailuresExcluded))
+ log.Debugf("skipping filter (Replay) for plugin: %s, no replay results", pluginName)
+ return nil
+ }
+
+ passedReplay := make(map[string]struct{}, len(replayPlugin.Tests))
+ failedReplay := make(map[string]struct{}, len(replayPlugin.Tests))
+ for _, test := range replayPlugin.Tests {
+ name := test.Name
+ if test.Status == "passed" {
+ passedReplay[name] = struct{}{}
+ continue
+ }
+ failedReplay[name] = struct{}{}
+ }
+
+ for _, v := range e2eFailuresPipeline {
+ ps.Tests[v].State = "filter6Replay"
+ if _, ok := passedReplay[v]; !ok {
+ filterFailures = append(filterFailures, v)
+ continue
+ }
+ filterFailuresExcluded = append(filterFailuresExcluded, v)
+ }
+ sort.Strings(filterFailures)
+ ps.SetFailuresByFilterID(filterID, filterFailures, filterFailuresExcluded)
+
+ log.Debugf("Filter (Replay) results: plugin=%s in=filter(%d) replay=pass(%d) fail(%d) out=filter(%d) filterExcluded(%d)",
+ pluginName, len(e2eFailuresPipeline), len(passedReplay), len(failedReplay),
+ len(filterFailures), len(filterFailuresExcluded))
+ return nil
+}
+
+// Filter Final:
+// applyFilterCopyPipeline builds the final failures after filters for each plugin.
+func (cs *ConsolidatedSummary) applyFilterCopyPipeline(filterID string) error {
+ for _, pluginName := range []string{
+ plugin.PluginNameOpenShiftUpgrade,
+ plugin.PluginNameKubernetesConformance,
+ plugin.PluginNameOpenShiftConformance,
+ plugin.PluginNameConformanceReplay,
+ } {
+ if err := cs.applyFilterCopyPipelineForPlugin(pluginName, filterID); err != nil {
+ return fmt.Errorf("error while building filtered failures: %w", err)
+ }
+ }
+ return nil
+}
+
+// applyFilterCopyPipelineForPlugin copy the last filter in the pipeline to the final result of failures.
+func (cs *ConsolidatedSummary) applyFilterCopyPipelineForPlugin(pluginName string, filterID string) error {
+ var ps *plugin.OPCTPluginSummary
+
+ // Get the list of the last filter in the pipeline
+ switch pluginName {
+ case plugin.PluginNameKubernetesConformance:
+ ps = cs.GetProvider().GetOpenShift().GetResultK8SValidated()
+ // Should point to the last filter in the pipeline.
+ ps.FailedFiltered = ps.GetPreviousFailuresByFilterID(filterID)
+
+ case plugin.PluginNameOpenShiftConformance:
+ ps = cs.GetProvider().GetOpenShift().GetResultOCPValidated()
+ // Should point to the last filter in the pipeline.
+ ps.FailedFiltered = ps.GetPreviousFailuresByFilterID(filterID)
+
+ case plugin.PluginNameOpenShiftUpgrade:
+ ps = cs.GetProvider().GetOpenShift().GetResultConformanceUpgrade()
+ // Should point to the last filter in the pipeline.
+ ps.FailedFiltered = ps.GetPreviousFailuresByFilterID(filterID)
+
+ case plugin.PluginNameConformanceReplay:
+ ps = cs.GetProvider().GetOpenShift().GetResultConformanceReplay()
+ // Should point to the last filter in the pipeline.
+ ps.FailedFiltered = ps.FailedList
+
+ default:
+ return fmt.Errorf("invalid plugin: %s", pluginName)
+ }
+
+ log.Debugf("Filter results (Final): plugin=%s filtered failures(%d)", pluginName, len(ps.FailedFiltered))
+ return nil
+}
+
+// saveResultsPlugin saves the results of the plugin to the disk to be used
+// on the review process.
+func (cs *ConsolidatedSummary) saveResultsPlugin(path, pluginName string) error {
+ var resultsProvider *plugin.OPCTPluginSummary
+ var resultsBaseline *plugin.OPCTPluginSummary
+ var suite *OpenshiftTestsSuite
+ var prefix = "tests"
+ bProcessed := cs.GetBaseline().HasValidResults()
+
+ switch pluginName {
+ case plugin.PluginNameKubernetesConformance:
+ resultsProvider = cs.GetProvider().GetOpenShift().GetResultK8SValidated()
+ if bProcessed {
+ resultsBaseline = cs.GetBaseline().GetOpenShift().GetResultK8SValidated()
+ }
+ suite = cs.GetProvider().GetSuites().KubernetesConformance
+ case plugin.PluginNameOpenShiftConformance:
+ resultsProvider = cs.GetProvider().GetOpenShift().GetResultOCPValidated()
+ if bProcessed {
+ resultsBaseline = cs.GetBaseline().GetOpenShift().GetResultOCPValidated()
+ }
+ suite = cs.GetProvider().GetSuites().OpenshiftConformance
+ }
+
+ if cs.Verbose {
+ // Save Provider failures
+ filename := fmt.Sprintf("%s/%s_%s_provider_failures-1-ini.txt", path, prefix, pluginName)
+ if err := writeFileTestList(filename, resultsProvider.FailedList); err != nil {
+ return err
+ }
+
+ // Save Provider failures with filter: Suite (only)
+ filename = fmt.Sprintf("%s/%s_%s_provider_failures-2-filter1_suite.txt", path, prefix, pluginName)
+ if err := writeFileTestList(filename, resultsProvider.FailedFilter1); err != nil {
+ return err
+ }
+
+ // Save Provider failures with filter: Baseline exclusion
+ filename = fmt.Sprintf("%s/%s_%s_provider_failures-3-filter2_baseline.txt", path, prefix, pluginName)
+ if err := writeFileTestList(filename, resultsProvider.FailedFilter2); err != nil {
+ return err
+ }
+
+ // Save Provider failures with filter: Flaky
+ filename = fmt.Sprintf("%s/%s_%s_provider_failures-4-filter3_without_flakes.txt", path, prefix, pluginName)
+ if err := writeFileTestList(filename, resultsProvider.FailedFilter3); err != nil {
+ return err
+ }
+
+ // Save Provider failures with filter: Baseline API
+ filename = fmt.Sprintf("%s/%s_%s_provider_failures-5-filter4_api.txt", path, prefix, pluginName)
+ if err := writeFileTestList(filename, resultsProvider.FailedFilter4); err != nil {
+ return err
+ }
+
+ // Save Provider failures with filter: Known Failures
+ filename = fmt.Sprintf("%s/%s_%s_provider_failures-5-filter5_knownfailures.txt", path, prefix, pluginName)
+ if err := writeFileTestList(filename, resultsProvider.FailedFilter5); err != nil {
+ return err
+ }
+
+ // Save the Providers failures for the latest filter to review (focus on this)
+ filename = fmt.Sprintf("%s/%s_%s_provider_failures.txt", path, prefix, pluginName)
+ if err := writeFileTestList(filename, resultsProvider.FailedFilter3); err != nil {
+ return err
+ }
+
+ // Save baseline failures
+ if bProcessed {
+ filename = fmt.Sprintf("%s/%s_%s_baseline_failures.txt", path, prefix, pluginName)
+ if err := writeFileTestList(filename, resultsBaseline.FailedList); err != nil {
+ return err
+ }
+ }
+
+ // Save the openshift-tests suite use by this plugin:
+ filename = fmt.Sprintf("%s/%s_%s_suite_full.txt", path, prefix, pluginName)
+ if err := writeFileTestList(filename, suite.Tests); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (cs *ConsolidatedSummary) extractFailuresDetailsByPlugin(path, pluginName string) error {
+ var resultsProvider *plugin.OPCTPluginSummary
+ ignoreExistingDir := true
+
+ switch pluginName {
+ case plugin.PluginNameKubernetesConformance:
+ resultsProvider = cs.GetProvider().GetOpenShift().GetResultK8SValidated()
+ case plugin.PluginNameOpenShiftConformance:
+ resultsProvider = cs.GetProvider().GetOpenShift().GetResultOCPValidated()
+ }
+
+ // extract all failed by plugins
+ currentDirectory := fmt.Sprintf("failures-%s", pluginName)
+ subdir := fmt.Sprintf("%s/%s/", path, currentDirectory)
+ if err := createDir(subdir, ignoreExistingDir); err != nil {
+ return err
+ }
+ errFailures := make([]string, len(resultsProvider.Tests))
+ for k := range resultsProvider.Tests {
+ errFailures = append(errFailures, k)
+ }
+ if err := extractSaveTestErrors(subdir, resultsProvider.Tests, errFailures); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// SaveResults dump all the results and processed to the disk to be used
+// on the review process.
+func (cs *ConsolidatedSummary) SaveResults(path string) error {
+
+ cs.Timers.Add("cs-save/results")
+ if err := createDir(path, true); err != nil {
+ return err
+ }
+
+ // Save the list of failures into individual files by Plugin
+ if err := cs.saveResultsPlugin(path, plugin.PluginNameKubernetesConformance); err != nil {
+ return err
+ }
+ if err := cs.saveResultsPlugin(path, plugin.PluginNameOpenShiftConformance); err != nil {
+ return err
+ }
+
+ // Extract errors details to sub directories
+ if err := cs.extractFailuresDetailsByPlugin(path, plugin.PluginNameKubernetesConformance); err != nil {
+ return err
+ }
+ if err := cs.extractFailuresDetailsByPlugin(path, plugin.PluginNameOpenShiftConformance); err != nil {
+ return err
+ }
+
+ log.Infof("#> Data Saved to directory %q", path)
+ cs.Timers.Add("cs-save/results")
+ return nil
+}
+
+// writeFileTestList saves the list of test names to a new text file
+func writeFileTestList(filename string, data []string) error {
+ fd, err := os.OpenFile(filename, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
+ if err != nil {
+ log.Fatalf("failed creating file: %s", err)
+ }
+ defer fd.Close()
+
+ writer := bufio.NewWriter(fd)
+ defer writer.Flush()
+
+ for _, line := range data {
+ _, err = writer.WriteString(line + "\n")
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// extractTestErrors dumps the test error, summary and stdout, then saved
+// to individual files.
+func extractSaveTestErrors(prefix string, items plugin.Tests, failures []string) error {
+
+ for _, line := range failures {
+ if _, ok := items[line]; ok {
+ file := fmt.Sprintf("%s%s-failure.txt", prefix, items[line].ID)
+ err := writeErrorToFile(file, items[line].Failure)
+ if err != nil {
+ log.Errorf("Error writing Failure for test: %s\n", line)
+ }
+
+ file = fmt.Sprintf("%s%s-systemOut.txt", prefix, items[line].ID)
+ err = writeErrorToFile(file, items[line].SystemOut)
+ if err != nil {
+ log.Errorf("Error writing SystemOut for test: %s\n", line)
+ }
+ }
+ }
+ return nil
+}
+
+// writeErrorToFile save the entire buffer to individual file.
+func writeErrorToFile(file, data string) error {
+ fd, err := os.OpenFile(file, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
+ if err != nil {
+ log.Fatalf("failed creating file: %s", err)
+ }
+ defer fd.Close()
+
+ writer := bufio.NewWriter(fd)
+ defer writer.Flush()
+
+ _, err = writer.WriteString(data)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// createDir checks if the directory exists, if not creates it, otherwise log and return error
+func createDir(path string, ignoreexisting bool) error {
+ // Saved directory must be created by must-gather extractor.
+ // TODO check cases not covered by that flow.
+ if _, err := os.Stat(path); !os.IsNotExist(err) {
+ if ignoreexisting {
+ return nil
+ }
+ return errors.New(fmt.Sprintf("directory already exists: %s", path))
+ }
+
+ if err := os.Mkdir(path, os.ModePerm); err != nil {
+ log.Errorf("ERROR: Unable to create directory [%s]: %v", path, err)
+ return err
+ }
+ return nil
+}
+
+// applyFilterFlaky process the FailedFilterSuite for each plugin, **excluding** failures from
+// baseline test.
+func (cs *ConsolidatedSummary) buildDocumentation() error {
+ err := cs.buildDocumentationForPlugin(plugin.PluginNameKubernetesConformance)
+ if err != nil {
+ return err
+ }
+
+ err = cs.buildDocumentationForPlugin(plugin.PluginNameOpenShiftConformance)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// buildDocumentationForPlugin builds the documentation for the test failure for each plugin.
+func (cs *ConsolidatedSummary) buildDocumentationForPlugin(pluginName string) error {
+ var (
+ ps *plugin.OPCTPluginSummary
+ version string
+ docUserBaseURL string
+ docSourceBaseURL string
+ )
+
+ switch pluginName {
+ case plugin.PluginNameKubernetesConformance:
+ ps = cs.GetProvider().GetOpenShift().GetResultK8SValidated()
+ versionFull := cs.GetProvider().GetSonobuoyCluster().APIVersion
+ reVersion := regexp.MustCompile(`^v(\d+\.\d+)`)
+ matches := reVersion.FindStringSubmatch(versionFull)
+ if len(matches) != 2 {
+ log.Warnf("Unable to extract kubernetes version to build documentation: %v [%v]", versionFull, matches)
+ return nil
+ }
+ version = matches[1]
+ docUserBaseURL = fmt.Sprintf("https://github.com/cncf/k8s-conformance/blob/master/docs/KubeConformance-%s.md", version)
+ docSourceBaseURL = fmt.Sprintf("https://raw.githubusercontent.com/cncf/k8s-conformance/master/docs/KubeConformance-%s.md", version)
+ case plugin.PluginNameOpenShiftConformance:
+ ps = cs.GetProvider().GetOpenShift().GetResultOCPValidated()
+ // OCP tests does not have documentation (TODO: check what can be used)
+ // https://docs.openshift.com/container-platform/4.13/welcome/index.html
+ // https://access.redhat.com/search/
+ docUserBaseURL = "https://github.com/openshift/origin/blob/master/test/extended/README.md"
+ docSourceBaseURL = docUserBaseURL
+ default:
+ return errors.New("Plugin not found to apply filter: Flaky")
+ }
+
+ if ps.Documentation == nil {
+ ps.Documentation = plugin.NewTestDocumentation(docUserBaseURL, docSourceBaseURL)
+ err := ps.Documentation.Load()
+ if err != nil {
+ return err
+ }
+ err = ps.Documentation.BuildIndex()
+ if err != nil {
+ return err
+ }
+ }
+
+ for _, test := range ps.Tests {
+ test.LookupDocumentation(ps.Documentation)
+ }
+
+ return nil
+}
diff --git a/internal/opct/summary/openshift.go b/internal/opct/summary/openshift.go
new file mode 100644
index 00000000..0124fd1b
--- /dev/null
+++ b/internal/opct/summary/openshift.go
@@ -0,0 +1,347 @@
+package summary
+
+import (
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+
+ configv1 "github.com/openshift/api/config/v1"
+ "github.com/pkg/errors"
+ "github.com/redhat-openshift-ecosystem/provider-certification-tool/internal/opct/plugin"
+ v1 "k8s.io/api/core/v1"
+)
+
+// OpenShiftSummary holds the data collected from artifacts related to OpenShift objects.
+type OpenShiftSummary struct {
+ Infrastructure *configv1.Infrastructure
+ ClusterVersion *configv1.ClusterVersion
+ ClusterOperators *configv1.ClusterOperatorList
+ ClusterNetwork *configv1.Network
+ Nodes []*Node
+
+ // Plugin Results
+ PluginResultK8sConformance *plugin.OPCTPluginSummary
+ PluginResultOCPValidated *plugin.OPCTPluginSummary
+ PluginResultConformanceUpgrade *plugin.OPCTPluginSummary
+ PluginResultArtifactsCollector *plugin.OPCTPluginSummary
+ PluginResultConformanceReplay *plugin.OPCTPluginSummary
+
+ // get from Sonobuoy metadata
+ VersionK8S string
+}
+
+type SummaryClusterVersionOutput struct {
+ Desired string `json:"desired"`
+ Previous string `json:"previous"`
+ Channel string `json:"channel"`
+ ClusterID string `json:"clusterID"`
+ OverallStatus string `json:"overallStatus"`
+ OverallStatusReason string `json:"overallStatusReason,omitempty"`
+ OverallStatusMessage string `json:"overallStatusMessage,omitempty"`
+ CondAvailable string `json:"conditionAvailable,omitempty"`
+ CondFailing string `json:"conditionFailing,omitempty"`
+ CondProgressing string `json:"conditionProgressing,omitempty"`
+ CondProgressingMessage string `json:"conditionProgressingMessage,omitempty"`
+ CondRetrievedUpdates string `json:"conditionUpdates,omitempty"`
+ CondImplicitlyEnabledCapabilities string `json:"conditionImplicitlyEnabledCapabilities,omitempty"`
+ CondReleaseAccepted string `json:"conditionReleaseAccepted,omitempty"`
+}
+
+type SummaryClusterOperatorOutput struct {
+ CountAvailable uint64
+ CountProgressing uint64
+ CountDegraded uint64
+}
+
+type SummaryOpenShiftInfrastructureV1 = configv1.Infrastructure
+type SummaryOpenShiftClusterNetworkV1 = configv1.Network
+type SummaryOpenShiftNetworkV1 = configv1.Network
+
+type Node struct {
+ Hostname string `json:"hostname,omitempty"`
+ Architecture string `json:"architecture,omitempty"`
+ OperatingSystem string `json:"os,omitempty"`
+ OperatingSystemId string `json:"osId,omitempty"`
+ CreationDate string `json:"creationDate,omitempty"`
+ NodeRoles string `json:"nodeRoles,omitempty"`
+ TaintsNodeRole string `json:"taints,omitempty"`
+ CapacityCPU string `json:"capacityCpu,omitempty"`
+ CapacityStorageGB string `json:"capacityStorageGB,omitempty"`
+ CapacityMemGB string `json:"capacityMemGB,omitempty"`
+ Labels map[string]string `json:"labels,omitempty"`
+ ControlPlane bool `json:"controlPlane,omitempty"`
+}
+
+func NewOpenShiftSummary() *OpenShiftSummary {
+ return &OpenShiftSummary{}
+}
+
+func (os *OpenShiftSummary) SetInfrastructure(cr *configv1.InfrastructureList) error {
+ if len(cr.Items) == 0 {
+ return errors.New("Unable to find result Items to set Infrastructures")
+ }
+ os.Infrastructure = &cr.Items[0]
+ return nil
+}
+
+func (os *OpenShiftSummary) GetInfrastructure() (*SummaryOpenShiftInfrastructureV1, error) {
+ if os.Infrastructure == nil {
+ return &SummaryOpenShiftInfrastructureV1{}, nil
+ }
+ return os.Infrastructure, nil
+}
+
+func (os *OpenShiftSummary) GetInfrastructurePlatformType() string {
+ if os.Infrastructure == nil {
+ return "None"
+ }
+ return string(os.Infrastructure.Status.PlatformStatus.Type)
+}
+
+func (os *OpenShiftSummary) GetClusterNetwork() (*SummaryOpenShiftClusterNetworkV1, error) {
+ if os.Infrastructure == nil {
+ return &SummaryOpenShiftClusterNetworkV1{}, nil
+ }
+ return os.ClusterNetwork, nil
+}
+
+func (os *OpenShiftSummary) SetClusterVersion(cr *configv1.ClusterVersionList) error {
+ if len(cr.Items) == 0 {
+ return errors.New("Unable to find result Items to set Infrastructures")
+ }
+ os.ClusterVersion = &cr.Items[0]
+ return nil
+}
+
+func (os *OpenShiftSummary) GetClusterVersion() (*SummaryClusterVersionOutput, error) {
+ if os.ClusterVersion == nil {
+ return &SummaryClusterVersionOutput{}, nil
+ }
+ resp := SummaryClusterVersionOutput{
+ Desired: os.ClusterVersion.Status.Desired.Version,
+ Channel: os.ClusterVersion.Spec.Channel,
+ ClusterID: string(os.ClusterVersion.Spec.ClusterID),
+ }
+ for _, condition := range os.ClusterVersion.Status.Conditions {
+ if condition.Type == configv1.OperatorProgressing {
+ resp.CondProgressing = string(condition.Status)
+ resp.CondProgressingMessage = condition.Message
+ if string(condition.Status) == "True" {
+ resp.OverallStatusReason = fmt.Sprintf("%sProgressing ", resp.OverallStatusReason)
+ }
+ continue
+ }
+ if string(condition.Type) == "ImplicitlyEnabledCapabilities" {
+ resp.CondImplicitlyEnabledCapabilities = string(condition.Status)
+ continue
+ }
+ if string(condition.Type) == "ReleaseAccepted" {
+ resp.CondReleaseAccepted = string(condition.Status)
+ continue
+ }
+ if string(condition.Type) == "Available" {
+ resp.CondAvailable = string(condition.Status)
+ if string(condition.Status) == "False" {
+ resp.OverallStatus = "Unavailable"
+ resp.OverallStatusReason = fmt.Sprintf("%sAvailable ", resp.OverallStatusReason)
+ resp.OverallStatusMessage = condition.Message
+ } else {
+ resp.OverallStatus = string(condition.Type)
+ }
+ continue
+ }
+ if string(condition.Type) == "Failing" {
+ resp.CondFailing = string(condition.Status)
+ if string(condition.Status) == "True" {
+ resp.OverallStatus = string(condition.Type)
+ resp.OverallStatusReason = fmt.Sprintf("%sFailing ", resp.OverallStatusReason)
+ resp.OverallStatusMessage = condition.Message
+ }
+ continue
+ }
+ if string(condition.Type) == "RetrievedUpdates" {
+ resp.CondRetrievedUpdates = string(condition.Status)
+ continue
+ }
+ }
+ // TODO navigate through history and fill Previous
+ resp.Previous = "TODO"
+ return &resp, nil
+}
+
+func (os *OpenShiftSummary) GetClusterVersionXY() (string, error) {
+ out, err := os.GetClusterVersion()
+ if err != nil {
+ return "", err
+ }
+ re := regexp.MustCompile(`^(\d+.\d+)`)
+ match := re.FindStringSubmatch(out.Desired)
+ return match[1], nil
+}
+
+func (os *OpenShiftSummary) SetClusterOperators(cr *configv1.ClusterOperatorList) error {
+ if len(cr.Items) == 0 {
+ return errors.New("Unable to find result Items to set ClusterOperators")
+ }
+ os.ClusterOperators = cr
+ return nil
+}
+
+func (os *OpenShiftSummary) GetClusterOperator() (*SummaryClusterOperatorOutput, error) {
+ out := SummaryClusterOperatorOutput{}
+ for _, co := range os.ClusterOperators.Items {
+ for _, condition := range co.Status.Conditions {
+ switch condition.Type {
+ case configv1.OperatorAvailable:
+ if condition.Status == configv1.ConditionTrue {
+ out.CountAvailable += 1
+ }
+ case configv1.OperatorProgressing:
+ if condition.Status == configv1.ConditionTrue {
+ out.CountProgressing += 1
+ }
+ case configv1.OperatorDegraded:
+ if condition.Status == configv1.ConditionTrue {
+ out.CountDegraded += 1
+ }
+ }
+ }
+ }
+ return &out, nil
+}
+
+func (os *OpenShiftSummary) SetClusterNetwork(cn *configv1.NetworkList) error {
+ if len(cn.Items) == 0 {
+ return errors.New("Unable to find result Items to set ClusterNetwork")
+ }
+ os.ClusterNetwork = &cn.Items[0]
+ return nil
+}
+
+func (os *OpenShiftSummary) GetNodes() []*Node {
+ return os.Nodes
+}
+
+func (os *OpenShiftSummary) SetNodes(nodes *v1.NodeList) error {
+ if len(nodes.Items) == 0 {
+ return errors.New("Unable to find result Items to set Nodes")
+ }
+ sizeToHuman := func(size string) string {
+ sizeNumber := strings.Split(size, "Ki")[0]
+ sizeInteger, err := strconv.Atoi(sizeNumber)
+ if err != nil {
+ return size
+ }
+ return fmt.Sprintf("%.2f", float64((sizeInteger/1024)/1024))
+ }
+ for _, node := range nodes.Items {
+ // transforming complex k8s type to cleaned structure.
+ customNode := Node{
+ // Hostname: node.Status.Addresses,
+ CapacityCPU: node.Status.Capacity.Cpu().String(),
+ CapacityStorageGB: sizeToHuman(node.Status.Capacity.StorageEphemeral().String()),
+ CapacityMemGB: sizeToHuman(node.Status.Capacity.Memory().String()),
+ CreationDate: node.GetObjectMeta().GetCreationTimestamp().String(),
+ Labels: make(map[string]string),
+ }
+ // parse labels
+ for label, value := range node.GetObjectMeta().GetLabels() {
+ switch label {
+ case "kubernetes.io/os":
+ customNode.OperatingSystem = value
+ continue
+ case "kubernetes.io/hostname":
+ customNode.Hostname = value
+ continue
+ case "kubernetes.io/arch":
+ customNode.Architecture = value
+ continue
+ case "node.openshift.io/os_id":
+ customNode.OperatingSystemId = value
+ continue
+ case "topology.kubernetes.io/zone":
+ customNode.Labels["topology.kubernetes.io/zone"] = value
+ continue
+ }
+ if strings.HasPrefix(label, "node-role.kubernetes.io") {
+ if roleArr := strings.Split(label, "node-role.kubernetes.io/"); len(roleArr) == 2 {
+ if roleArr[1] == "master" || roleArr[1] == "control-plane" {
+ customNode.ControlPlane = true
+ }
+ customNode.NodeRoles += fmt.Sprintf("%s ", roleArr[1])
+ continue
+ }
+ }
+ }
+ // parse taints
+ for _, taint := range node.Spec.Taints {
+ if strings.HasPrefix(taint.Key, "node-role") {
+ customNode.TaintsNodeRole += fmt.Sprintf("%s:%s ", taint.Key, taint.Effect)
+ }
+ }
+ os.Nodes = append(os.Nodes, &customNode)
+ }
+ return nil
+}
+
+func (os *OpenShiftSummary) SetPluginResult(in *plugin.OPCTPluginSummary) error {
+ switch in.Name {
+ // Kubernetes Conformance plugin
+ case plugin.PluginNameKubernetesConformance:
+ os.PluginResultK8sConformance = in
+ case plugin.PluginOldNameKubernetesConformance:
+ in.NameAlias = in.Name
+ in.Name = plugin.PluginNameKubernetesConformance
+ os.PluginResultK8sConformance = in
+
+ // OpenShift Conformance plugin
+ case plugin.PluginNameOpenShiftConformance:
+ os.PluginResultOCPValidated = in
+ case plugin.PluginOldNameOpenShiftConformance:
+ in.NameAlias = in.Name
+ in.Name = plugin.PluginOldNameOpenShiftConformance
+ os.PluginResultOCPValidated = in
+
+ // Other plugins
+ case plugin.PluginNameOpenShiftUpgrade:
+ os.PluginResultConformanceUpgrade = in
+ case plugin.PluginNameArtifactsCollector:
+ os.PluginResultArtifactsCollector = in
+ case plugin.PluginNameConformanceReplay:
+ os.PluginResultConformanceReplay = in
+ default:
+ // return fmt.Errorf("unable to Set Plugin results: Plugin not found: %s", in.Name)
+ return nil
+ }
+ return nil
+}
+
+func (os *OpenShiftSummary) GetResultOCPValidated() *plugin.OPCTPluginSummary {
+ return os.PluginResultOCPValidated
+}
+
+func (os *OpenShiftSummary) GetResultK8SValidated() *plugin.OPCTPluginSummary {
+ return os.PluginResultK8sConformance
+}
+
+func (os *OpenShiftSummary) GetResultConformanceUpgrade() *plugin.OPCTPluginSummary {
+ if os.PluginResultConformanceUpgrade == nil {
+ return &plugin.OPCTPluginSummary{}
+ }
+ return os.PluginResultConformanceUpgrade
+}
+
+func (os *OpenShiftSummary) GetResultArtifactsCollector() *plugin.OPCTPluginSummary {
+ if os.PluginResultArtifactsCollector == nil {
+ return &plugin.OPCTPluginSummary{}
+ }
+ return os.PluginResultArtifactsCollector
+}
+
+func (os *OpenShiftSummary) GetResultConformanceReplay() *plugin.OPCTPluginSummary {
+ if os.PluginResultConformanceReplay == nil {
+ return &plugin.OPCTPluginSummary{}
+ }
+ return os.PluginResultConformanceReplay
+}
diff --git a/internal/opct/summary/result.go b/internal/opct/summary/result.go
new file mode 100644
index 00000000..ddcb5459
--- /dev/null
+++ b/internal/opct/summary/result.go
@@ -0,0 +1,553 @@
+package summary
+
+import (
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strings"
+
+ "github.com/pkg/errors"
+ "github.com/redhat-openshift-ecosystem/provider-certification-tool/internal/opct/archive"
+ "github.com/redhat-openshift-ecosystem/provider-certification-tool/internal/opct/plugin"
+ "github.com/redhat-openshift-ecosystem/provider-certification-tool/internal/openshift/mustgather"
+ "github.com/redhat-openshift-ecosystem/provider-certification-tool/internal/openshift/mustgathermetrics"
+ log "github.com/sirupsen/logrus"
+ v1 "k8s.io/api/core/v1"
+
+ configv1 "github.com/openshift/api/config/v1"
+ "github.com/vmware-tanzu/sonobuoy/pkg/client/results"
+ "github.com/vmware-tanzu/sonobuoy/pkg/discovery"
+)
+
+const (
+ ResultSourceNameProvider = "provider"
+ ResultSourceNameBaseline = "baseline"
+)
+
+// ResultSummary persists the reference of results archive.
+type ResultSummary struct {
+ Name string
+ Archive string
+ Sonobuoy *SonobuoySummary
+ OpenShift *OpenShiftSummary
+ Suites *OpenshiftTestsSuites
+
+ // isConformance indicates if it is a conformance plugin when true.
+ isConformance bool
+
+ // reader is a file description for the archive tarball.
+ reader *results.Reader
+
+ // SavePath is the target path to save the extracted report.
+ SavePath string
+
+ // MustGather stores the extracted items from must-gather.
+ MustGather *mustgather.MustGather
+
+ HasCAMGI bool
+ HasMetrics bool
+ HasInstallConfig bool
+
+ // Metrics stores the extracted items from must-gather metrics.
+ Metrics *mustgathermetrics.MustGatherMetrics
+
+ // Plugin Know failures
+ TestSuiteKnownFailures []string
+
+ // BaselineAPI holds the data fetched from the baseline API.
+ BaselineAPI string
+}
+
+// HasValidResults checks if the result instance has valid archive to be processed,
+// returning true if it's valid.
+// Invalid results happens when the baseline archive was not set on the CLI arguments,
+// making the 'process' command to ignore the comparisons and filters related.
+func (rs *ResultSummary) HasValidResults() bool {
+ if rs.Archive == "" && rs.Name == ResultSourceNameBaseline {
+ return false
+ }
+ return true
+}
+
+// Populate open the archive and process the files to populate the summary structures.
+func (rs *ResultSummary) Populate() error {
+ if !rs.HasValidResults() {
+ // log.Warnf("Ignoring to populate source '%s'. Missing or invalid baseline artifact (-b): %s", rs.Name, rs.Archive)
+ return nil
+ }
+
+ cleanup, err := rs.openReader()
+ defer cleanup()
+ if err != nil {
+ return errors.Wrapf(err, "unable to open reader for file '%s'", rs.Archive)
+ }
+
+ // Report on all plugins or the specified one.
+ plugins, err := rs.getPluginList()
+ if err != nil {
+ return errors.Wrapf(err, "unable to determine plugins to report on")
+ }
+ if len(plugins) == 0 {
+ return fmt.Errorf("no plugins specified by either the --plugin flag or tarball metadata")
+ }
+
+ var lastErr error
+ for _, pluginName := range plugins {
+ log.Infof("Processing Plugin %s...", pluginName)
+ switch pluginName {
+ case plugin.PluginNameKubernetesConformance, plugin.PluginNameOpenShiftConformance:
+ rs.isConformance = true
+ }
+
+ log.Debugf("Processing results/Populating/Processing Plugin/%s", pluginName)
+ if err := rs.processPlugin(pluginName); err != nil {
+ log.Errorf("Processing results/Populating/Processing Plugin/%s: %v", pluginName, err)
+ lastErr = err
+ }
+ }
+
+ log.Info("Processing results...")
+ cleanup, err = rs.openReader()
+ defer cleanup()
+ if err != nil {
+ return err
+ }
+
+ log.Debugf("Processing results/Populating/Populating Summary")
+ err = rs.extractAndLoadData()
+ if err != nil {
+ lastErr = err
+ }
+
+ return lastErr
+}
+
+// GetOpenShift returns the OpenShift objects parsed from results
+func (rs *ResultSummary) GetOpenShift() *OpenShiftSummary {
+ if !rs.HasValidResults() {
+ return &OpenShiftSummary{}
+ }
+ return rs.OpenShift
+}
+
+// GetSonobuoy returns the Sonobuoy objects parsed from results
+func (rs *ResultSummary) GetSonobuoy() *SonobuoySummary {
+ if !rs.HasValidResults() {
+ return &SonobuoySummary{}
+ }
+ return rs.Sonobuoy
+}
+
+// GetSonobuoyCluster returns the SonobuoyCluster object parsed from results
+func (rs *ResultSummary) GetSonobuoyCluster() *discovery.ClusterSummary {
+ if !rs.HasValidResults() {
+ return &discovery.ClusterSummary{}
+ }
+ return rs.Sonobuoy.Cluster
+}
+
+// GetSuites returns the Conformance suites collected from results
+func (rs *ResultSummary) GetSuites() *OpenshiftTestsSuites {
+ return rs.Suites
+}
+
+// getPluginList extract the plugin list from the archive reader.
+func (rs *ResultSummary) getPluginList() ([]string, error) {
+ runInfo := discovery.RunInfo{}
+ err := rs.reader.WalkFiles(func(path string, info os.FileInfo, err error) error {
+ return results.ExtractFileIntoStruct(rs.reader.RunInfoFile(), path, info, &runInfo)
+ })
+
+ return runInfo.LoadedPlugins, errors.Wrap(err, "finding plugin list")
+}
+
+// openReader returns a *results.Reader along with a cleanup function to close the
+// underlying readers. The cleanup function is guaranteed to never be nil.
+func (rs *ResultSummary) openReader() (func(), error) {
+ filepath := rs.Archive
+ fi, err := os.Stat(filepath)
+ if err != nil {
+ rs.reader = nil
+ return func() {}, err
+ }
+ // When results is a directory
+ if fi.IsDir() {
+ rs.reader = results.NewReaderFromDir(filepath)
+ return func() {}, nil
+ }
+ f, err := os.Open(filepath)
+ if err != nil {
+ rs.reader = nil
+ return func() {}, errors.Wrapf(err, "could not open sonobuoy archive: %v", filepath)
+ }
+
+ gzr, err := gzip.NewReader(f)
+ if err != nil {
+ rs.reader = nil
+ return func() { f.Close() }, errors.Wrap(err, "could not make a gzip reader")
+ }
+
+ rs.reader = results.NewReaderWithVersion(gzr, results.VersionTen)
+ return func() { gzr.Close(); f.Close() }, nil
+}
+
+// processPlugin receives the plugin name and load the result file to be processed.
+func (rs *ResultSummary) processPlugin(pluginName string) error {
+
+ // TODO: review the fd usage for tarbal and file
+ cleanup, err := rs.openReader()
+ defer cleanup()
+ if err != nil {
+ return err
+ }
+
+ obj, err := rs.reader.PluginResultsItem(pluginName)
+ if err != nil {
+ return err
+ }
+
+ if err = rs.processPluginResult(obj); err != nil {
+ return err
+ }
+ return nil
+}
+
+// processPluginResult receives the plugin results object and parse it to the summary.
+func (rs *ResultSummary) processPluginResult(obj *results.Item) error {
+ statusCounts := map[string]int{}
+ var tests []results.Item
+ var failures []string
+
+ statusCounts, tests = walkForSummary(obj, statusCounts, tests)
+
+ total := 0
+ for _, v := range statusCounts {
+ total += v
+ }
+
+ testItems := make(map[string]*plugin.TestItem, len(tests))
+ for idx, item := range tests {
+ testItems[item.Name] = &plugin.TestItem{
+ Name: item.Name,
+ ID: fmt.Sprintf("%s-%d", obj.Name, idx),
+ State: "processed",
+ }
+ if item.Status != "" {
+ testItems[item.Name].Status = item.Status
+ }
+ switch item.Status {
+ case results.StatusFailed, results.StatusTimeout:
+ if _, ok := item.Details["failure"]; ok {
+ testItems[item.Name].Failure = item.Details["failure"].(string)
+ }
+ if _, ok := item.Details["system-out"]; ok {
+ testItems[item.Name].SystemOut = item.Details["system-out"].(string)
+ }
+ if _, ok := item.Details["offset"]; ok {
+ testItems[item.Name].Offset = item.Details["offset"].(int)
+ }
+ failures = append(failures, item.Name)
+ testItems[item.Name].UpdateErrorCounter()
+ }
+ }
+
+ if err := rs.GetOpenShift().SetPluginResult(&plugin.OPCTPluginSummary{
+ Name: obj.Name,
+ Status: obj.Status,
+ Total: int64(total),
+ Passed: int64(statusCounts[results.StatusPassed]),
+ Failed: int64(statusCounts[results.StatusFailed] + statusCounts[results.StatusTimeout]),
+ Timeout: int64(statusCounts[results.StatusTimeout]),
+ Skipped: int64(statusCounts[results.StatusSkipped]),
+ FailedList: failures,
+ Tests: testItems,
+ }); err != nil {
+ return err
+ }
+
+ delete(statusCounts, results.StatusPassed)
+ delete(statusCounts, results.StatusFailed)
+ delete(statusCounts, results.StatusTimeout)
+ delete(statusCounts, results.StatusSkipped)
+
+ return nil
+}
+
+// extractDataFromTarball load all files from archive reader and extract desired
+// information to the ResultSummary.
+func (rs *ResultSummary) extractAndLoadData() error {
+ // Path to files insides Sonobuoy tarball
+ const (
+ // OpenShift objects files in archive collected by aggregator server
+ pathResourceInfrastructures = "resources/cluster/config.openshift.io_v1_infrastructures.json"
+ pathResourceClusterVersions = "resources/cluster/config.openshift.io_v1_clusterversions.json"
+ pathResourceClusterOperators = "resources/cluster/config.openshift.io_v1_clusteroperators.json"
+ pathResourceClusterNetwork = "resources/cluster/config.openshift.io_v1_networks.json"
+
+ // Kuberenetes resources locations on archive file
+ pathResourceNodes = "resources/cluster/core_v1_nodes.json"
+
+ // Sonobuoy files in archive
+ // Sonobuoy metadata files
+ pathMetaRun = "meta/run.log"
+ pathMetaConfig = "meta/config.json"
+
+ // Sonobuoy plugin files
+ pathPluginDefinition10 = "plugins/10-openshift-kube-conformance/definition.json"
+ pathPluginDefinition20 = "plugins/20-openshift-conformance-validated/definition.json"
+
+ pathResourceNSOpctConfigMap = "resources/ns/openshift-provider-certification/core_v1_configmaps.json"
+ pathResourceNsKubeConfigMap = "resources/ns/kube-system/core_v1_configmaps.json"
+
+ // artifacts collector locations on archive file
+ pathPluginArtifactTestsK8S = "plugins/99-openshift-artifacts-collector/results/global/artifacts_e2e-tests_openshift-kube-conformance.txt"
+ pathPluginArtifactTestsOCP = "plugins/99-openshift-artifacts-collector/results/global/artifacts_e2e-tests_openshift-conformance-validated.txt"
+ pathPluginArtifactTestsUpgrade = "plugins/99-openshift-artifacts-collector/results/global/artifacts_e2e-tests_openshift-cluster-upgrade.txt"
+ pathPluginArtifactTestsReplay = "plugins/99-openshift-artifacts-collector/results/global/artifacts_e2e-tests_openshift-tests-replay.txt"
+ pathCAMIG = "plugins/99-openshift-artifacts-collector/results/global/artifacts_must-gather_camgi.html"
+ pathMetrics = "plugins/99-openshift-artifacts-collector/results/global/artifacts_must-gather-metrics.tar.xz"
+
+ // TODO: the following file is used to keep compatibility with versions older than v0.3
+ pathPluginArtifactTestsOCP2 = "plugins/99-openshift-artifacts-collector/results/global/artifacts_e2e-openshift-conformance.txt"
+ pathMustGather = "plugins/99-openshift-artifacts-collector/results/global/artifacts_must-gather.tar.xz"
+ )
+
+ // Data bindings
+ mustGather := bytes.Buffer{}
+ saveToFlagEnabled := rs.SavePath != ""
+ testsSuiteK8S := bytes.Buffer{}
+ testsSuiteOCP := bytes.Buffer{}
+
+ CAMGI := bytes.Buffer{}
+ MetricsData := bytes.Buffer{}
+
+ metaRunLogs := bytes.Buffer{}
+ metaConfig := archive.MetaConfigSonobuoy{}
+
+ sbCluster := discovery.ClusterSummary{}
+ ocpInfra := configv1.InfrastructureList{}
+ ocpCV := configv1.ClusterVersionList{}
+ ocpCO := configv1.ClusterOperatorList{}
+ ocpCN := configv1.NetworkList{}
+ opctConfigMapList := v1.ConfigMapList{}
+ kubeSystemConfigMapList := v1.ConfigMapList{}
+ nodes := v1.NodeList{}
+
+ pluginDef10 := SonobuoyPluginDefinition{}
+ pluginDef20 := SonobuoyPluginDefinition{}
+
+ if rs.SavePath != "" {
+ log.Debugf("Creating output directory %s...", rs.SavePath)
+ if err := os.MkdirAll(rs.SavePath, os.ModePerm); err != nil {
+ log.Errorf("Unable to create directory %s: %v", rs.SavePath, err)
+ }
+ }
+
+ patternPluginLogs := `^podlogs\/.*\/sonobuoy-.*-job-.*\/logs\/plugin.txt`
+ rePluginLogs := regexp.MustCompile(patternPluginLogs)
+
+ // Iterate over the archive to get the items as an object to build the Summary report.
+ log.Debugf("Processing results/Populating/Populating Summary/Extracting")
+ err := rs.reader.WalkFiles(func(path string, info os.FileInfo, e error) error {
+ // Extract and marshal the files into the structures
+ if err := results.ExtractFileIntoStruct(results.ClusterHealthFilePath(), path, info, &sbCluster); err != nil {
+ return errors.Wrap(err, fmt.Sprintf("extracting file '%s': %v", path, err))
+ }
+ if err := results.ExtractFileIntoStruct(pathResourceInfrastructures, path, info, &ocpInfra); err != nil {
+ return errors.Wrap(err, fmt.Sprintf("extracting file '%s': %v", path, err))
+ }
+ if err := results.ExtractFileIntoStruct(pathResourceClusterVersions, path, info, &ocpCV); err != nil {
+ return errors.Wrap(err, fmt.Sprintf("extracting file '%s': %v", path, err))
+ }
+ if err := results.ExtractFileIntoStruct(pathResourceClusterOperators, path, info, &ocpCO); err != nil {
+ return errors.Wrap(err, fmt.Sprintf("extracting file '%s': %v", path, err))
+ }
+ if err := results.ExtractFileIntoStruct(pathResourceClusterNetwork, path, info, &ocpCN); err != nil {
+ return errors.Wrap(err, fmt.Sprintf("extracting file '%s': %v", path, err))
+ }
+ if err := results.ExtractFileIntoStruct(pathPluginDefinition10, path, info, &pluginDef10); err != nil {
+ return errors.Wrap(err, fmt.Sprintf("extracting file '%s': %v", path, err))
+ }
+ if err := results.ExtractFileIntoStruct(pathPluginDefinition20, path, info, &pluginDef20); err != nil {
+ return errors.Wrap(err, fmt.Sprintf("extracting file '%s': %v", path, err))
+ }
+ if err := results.ExtractFileIntoStruct(pathMetaConfig, path, info, &metaConfig); err != nil {
+ return errors.Wrap(err, fmt.Sprintf("extracting file '%s': %v", path, err))
+ }
+ if err := results.ExtractFileIntoStruct(pathResourceNSOpctConfigMap, path, info, &opctConfigMapList); err != nil {
+ return errors.Wrap(err, fmt.Sprintf("extracting file '%s': %v", path, err))
+ }
+ if err := results.ExtractFileIntoStruct(pathResourceNodes, path, info, &nodes); err != nil {
+ return errors.Wrap(err, fmt.Sprintf("extracting file '%s': %v", path, err))
+ }
+ if err := results.ExtractFileIntoStruct(pathResourceNsKubeConfigMap, path, info, &kubeSystemConfigMapList); err != nil {
+ return errors.Wrap(err, fmt.Sprintf("extracting file '%s': %v", path, err))
+ }
+ // Extract raw files
+ if warn := results.ExtractBytes(pathPluginArtifactTestsK8S, path, info, &testsSuiteK8S); warn != nil {
+ log.Warnf("Unable to load file %s: %v\n", pathPluginArtifactTestsK8S, warn)
+ return errors.Wrap(warn, fmt.Sprintf("extracting file '%s': %v", path, warn))
+ }
+ if warn := results.ExtractBytes(pathPluginArtifactTestsOCP, path, info, &testsSuiteOCP); warn != nil {
+ log.Warnf("Unable to load file %s: %v\n", pathPluginArtifactTestsOCP, warn)
+ return errors.Wrap(warn, fmt.Sprintf("extracting file '%s': %v", path, warn))
+ }
+ if warn := results.ExtractBytes(pathPluginArtifactTestsOCP2, path, info, &testsSuiteOCP); warn != nil {
+ log.Warnf("Unable to load file %s: %v\n", pathPluginArtifactTestsOCP2, warn)
+ return errors.Wrap(warn, fmt.Sprintf("extracting file '%s': %v", path, warn))
+ }
+ if warn := results.ExtractBytes(pathMetaRun, path, info, &metaRunLogs); warn != nil {
+ log.Warnf("Unable to load file %s: %v\n", pathMetaRun, warn)
+ return errors.Wrap(warn, fmt.Sprintf("extracting file '%s': %v", path, warn))
+ }
+ if warn := results.ExtractBytes(pathMustGather, path, info, &mustGather); warn != nil {
+ log.Warnf("Unable to load file %s: %v\n", pathMustGather, warn)
+ return errors.Wrap(warn, fmt.Sprintf("extracting file '%s': %v", path, warn))
+ }
+ if saveToFlagEnabled {
+ if warn := results.ExtractBytes(pathCAMIG, path, info, &CAMGI); warn != nil {
+ log.Warnf("Unable to load file %s: %v\n", pathCAMIG, warn)
+ return errors.Wrap(warn, fmt.Sprintf("extracting file '%s': %v", path, warn))
+ }
+ if warn := results.ExtractBytes(pathMetrics, path, info, &MetricsData); warn != nil {
+ log.Warnf("Unable to load file %s: %v\n", pathCAMIG, warn)
+ return errors.Wrap(warn, fmt.Sprintf("extracting file '%s': %v", path, warn))
+ }
+ // extract podLogs, container plugin
+ if rePluginLogs.MatchString(path) {
+ var raw bytes.Buffer
+ if warn := results.ExtractBytes(path, path, info, &raw); warn != nil {
+ log.Warnf("Unable to load plugin log %s: %v\n", path, warn)
+ return errors.Wrap(warn, fmt.Sprintf("extracting file '%s': %v", path, warn))
+ }
+ prefix := strings.Split(path, "-job-")
+ if len(prefix) != 2 {
+ log.Warnf("Unable to read podLog prefix for path: %s\n", path)
+ return nil
+ }
+ filepath := strings.Split(prefix[0], "/")
+ if len(filepath) <= 0 {
+ log.Warnf("Unable to read podLog file for path: %s\n", path)
+ return nil
+ }
+ dest := fmt.Sprintf("%s/log-%s-plugin.txt", rs.SavePath, filepath[len(filepath)-1])
+ err := os.WriteFile(dest, raw.Bytes(), 0644)
+ if err != nil {
+ log.Errorf("Processing results/Populating/Populating Summary/Extracting/podLogs/plugins: %v", err)
+ return nil
+ }
+ }
+ }
+ return e
+ })
+ if err != nil {
+ log.Warnf("Processing results/Populating/Populating Summary/Extracting/result: %v", err)
+ }
+
+ log.Debugf("Processing results/Populating/Populating Summary/Processing")
+ if err := rs.GetSonobuoy().SetCluster(&sbCluster); err != nil {
+ log.Warnf("Processing results/Populating/Populating Summary/Processing/Sonobuoy: %v", err)
+ }
+ if err := rs.GetOpenShift().SetInfrastructure(&ocpInfra); err != nil {
+ log.Warnf("Processing results/Populating/Populating Summary/Processing/Object/Infrastructure: %v", err)
+ }
+ if err := rs.GetOpenShift().SetClusterVersion(&ocpCV); err != nil {
+ log.Warnf("Processing results/Populating/Populating Summary/Processing/Object/Version: %v", err)
+ }
+ if err := rs.GetOpenShift().SetClusterOperators(&ocpCO); err != nil {
+ log.Warnf("Processing results/Populating/Populating Summary/Processing/Object/Operators: %v", err)
+ }
+ if err := rs.GetOpenShift().SetClusterNetwork(&ocpCN); err != nil {
+ log.Warnf("Processing results/Populating/Populating Summary/Processing/Object/Network: %v", err)
+ }
+ if err := rs.GetOpenShift().SetNodes(&nodes); err != nil {
+ log.Warnf("Processing results/Populating/Populating Summary/Processing/Object/Nodes: %v", err)
+ }
+ if err := rs.Suites.KubernetesConformance.Load(pathPluginArtifactTestsK8S, &testsSuiteK8S); err != nil {
+ log.Warnf("Processing results/Populating/Populating Summary/Processing/Plugin/kube: %v", err)
+ }
+ if err := rs.Suites.OpenshiftConformance.Load(pathPluginArtifactTestsOCP, &testsSuiteOCP); err != nil {
+ log.Warnf("Processing results/Populating/Populating Summary/Processing/Plugin/openshift: %v", err)
+ }
+ rs.GetSonobuoy().SetPluginDefinition(plugin.PluginNameKubernetesConformance, &pluginDef10)
+ rs.GetSonobuoy().SetPluginDefinition(plugin.PluginNameOpenShiftConformance, &pluginDef20)
+
+ rs.GetSonobuoy().ParseMetaRunlogs(&metaRunLogs)
+ rs.GetSonobuoy().ParseMetaConfig(&metaConfig)
+ rs.GetSonobuoy().ParseOpctConfigMap(&opctConfigMapList)
+
+ // TODO the must-gather parser is consuming more resource than expected, need to be
+ // reviewed, and parsers and queue handlers refactored.
+ log.Debugf("Processing results/Populating/Populating Summary/Processing/MustGather")
+ rs.MustGather = mustgather.NewMustGather(fmt.Sprintf("%s/must-gather", rs.SavePath), saveToFlagEnabled)
+ if err := rs.MustGather.Process(&mustGather); err != nil {
+ log.Errorf("Processing results/Populating/Populating Summary/Processing/MustGather: %v", err)
+ } else {
+ log.Debugf("Processing results/Populating/Populating Summary/Processing/MustGather/CalculatingErrors")
+ rs.MustGather.AggregateCounters()
+ }
+
+ if saveToFlagEnabled {
+ if len(CAMGI.Bytes()) > 0 {
+ err = os.WriteFile(fmt.Sprintf("%s/%s", rs.SavePath, filepath.Base(pathCAMIG)), CAMGI.Bytes(), 0644)
+ if err != nil {
+ log.Errorf("Processing results/Populating/Populating Summary/Processing/CAMGI: %v", err)
+ } else {
+ rs.HasCAMGI = true
+ }
+ } else {
+ log.Error("Processing results/Populating/Populating Summary/Processing/CAMGI: Not Found")
+ }
+ if len(MetricsData.Bytes()) > 0 {
+ rs.Metrics, err = mustgathermetrics.NewMustGatherMetrics(rs.SavePath+"/metrics", pathMetrics, "/metrics", &MetricsData)
+ if err != nil {
+ log.Errorf("Processing results/Populating/Populating Summary/Processing/MetricsData: %v", err)
+ } else {
+ err := rs.Metrics.Process()
+ if err != nil {
+ log.Errorf("Processing MetricsData: %v", err)
+ }
+ rs.HasMetrics = true
+ }
+ } else {
+ log.Error("Processing results/Populating/Populating Summary/Processing/MetricsData: Not Found")
+ }
+ // extract install-config
+ if kubeSystemConfigMapList.Items != nil && len(kubeSystemConfigMapList.Items) > 0 {
+ for _, config := range kubeSystemConfigMapList.Items {
+ if config.ObjectMeta.Name == "cluster-config-v1" {
+ dest := fmt.Sprintf("%s/install-config.txt", rs.SavePath)
+ err := os.WriteFile(dest, []byte(config.Data["install-config"]), 0644)
+ if err != nil {
+ log.Errorf("Processing results/Populating/Populating Summary/Extracting/install-config: %v", err)
+ }
+ rs.HasInstallConfig = true
+ }
+ }
+ }
+ }
+ return nil
+}
+
+// walkForSummary recursively walk through the result YAML file extracting the counters
+// and failures.
+func walkForSummary(result *results.Item, statusCounts map[string]int, failList []results.Item) (map[string]int, []results.Item) {
+ if len(result.Items) > 0 {
+ for _, item := range result.Items {
+ statusCounts, failList = walkForSummary(&item, statusCounts, failList)
+ }
+ return statusCounts, failList
+ }
+
+ statusCounts[result.Status]++
+
+ if result.Status == results.StatusFailed || result.Status == results.StatusTimeout {
+ result.Details["offset"] = statusCounts[result.Status]
+ }
+
+ failList = append(failList, *result)
+ return statusCounts, failList
+}
diff --git a/internal/opct/summary/sonobuoy.go b/internal/opct/summary/sonobuoy.go
new file mode 100644
index 00000000..b193b1ef
--- /dev/null
+++ b/internal/opct/summary/sonobuoy.go
@@ -0,0 +1,60 @@
+package summary
+
+import (
+ "bytes"
+ "strings"
+
+ "github.com/redhat-openshift-ecosystem/provider-certification-tool/internal/opct/archive"
+ "github.com/vmware-tanzu/sonobuoy/pkg/discovery"
+ "github.com/vmware-tanzu/sonobuoy/pkg/plugin/manifest"
+ v1 "k8s.io/api/core/v1"
+)
+
+type SonobuoyPluginDefinitionManifest = manifest.Manifest
+
+// Plugin is the sonobuoy plugin definitoin.
+type SonobuoyPluginDefinition struct {
+ Definition *SonobuoyPluginDefinitionManifest `json:"Definition"`
+ SonobuoyImage string `json:"SonobuoyImage"`
+}
+
+type SonobuoySummary struct {
+ Cluster *discovery.ClusterSummary
+ MetaRuntime []*archive.RuntimeInfoItem
+ MetaConfig []*archive.RuntimeInfoItem
+ OpctConfig []*archive.RuntimeInfoItem
+ PluginsDefinition map[string]*SonobuoyPluginDefinition
+}
+
+func NewSonobuoySummary() *SonobuoySummary {
+ return &SonobuoySummary{
+ PluginsDefinition: make(map[string]*SonobuoyPluginDefinition, 5),
+ }
+}
+
+func (s *SonobuoySummary) SetCluster(c *discovery.ClusterSummary) error {
+ s.Cluster = c
+ return nil
+}
+
+func (s *SonobuoySummary) SetPluginsDefinition(p map[string]*SonobuoyPluginDefinition) error {
+ s.PluginsDefinition = make(map[string]*SonobuoyPluginDefinition, len(p))
+ s.PluginsDefinition = p
+ return nil
+}
+
+func (s *SonobuoySummary) SetPluginDefinition(name string, def *SonobuoyPluginDefinition) {
+ s.PluginsDefinition[name] = def
+}
+
+func (s *SonobuoySummary) ParseMetaRunlogs(logLines *bytes.Buffer) {
+ s.MetaRuntime = archive.ParseMetaLogs(strings.Split(logLines.String(), "\n"))
+}
+
+func (s *SonobuoySummary) ParseMetaConfig(metaConfig *archive.MetaConfigSonobuoy) {
+ s.MetaConfig = archive.ParseMetaConfig(metaConfig)
+}
+
+func (s *SonobuoySummary) ParseOpctConfigMap(cm *v1.ConfigMapList) {
+ s.OpctConfig = archive.ParseOpctConfig(cm)
+}
diff --git a/internal/pkg/summary/suite.go b/internal/opct/summary/suite.go
similarity index 96%
rename from internal/pkg/summary/suite.go
rename to internal/opct/summary/suite.go
index 2f1d221c..82a73e52 100644
--- a/internal/pkg/summary/suite.go
+++ b/internal/opct/summary/suite.go
@@ -27,7 +27,7 @@ type OpenshiftTestsSuite struct {
InputFile string
Name string
Count int
- Tests []string
+ Tests []string `json:"-"`
}
func (s *OpenshiftTestsSuite) Load(ifile string, buf *bytes.Buffer) error {
diff --git a/internal/pkg/sippy/sippy.go b/internal/openshift/ci/sippy/sippy.go
similarity index 89%
rename from internal/pkg/sippy/sippy.go
rename to internal/openshift/ci/sippy/sippy.go
index 75d94243..e81276f7 100644
--- a/internal/pkg/sippy/sippy.go
+++ b/internal/openshift/ci/sippy/sippy.go
@@ -55,17 +55,19 @@ type SippyTestsRequestOutput []SippyTestsResponse
// SippyAPI is the Sippy API structure holding the API client
type SippyAPI struct {
- client *http.Client
+ client *http.Client
+ ocpVersion string
}
// NewSippyAPI creates a new API setting the http attributes to improve the connection reuse.
-func NewSippyAPI() *SippyAPI {
+func NewSippyAPI(ocpVersion string) *SippyAPI {
t := http.DefaultTransport.(*http.Transport).Clone()
t.MaxIdleConns = defaultMaxIdleConns
t.MaxConnsPerHost = defaultMaxConnsPerHost
t.MaxIdleConnsPerHost = defaultMaxIddleConnsPerHost
return &SippyAPI{
+ ocpVersion: ocpVersion,
client: &http.Client{
Timeout: defaultConnTimeoutSec * time.Second,
Transport: t,
@@ -75,14 +77,14 @@ func NewSippyAPI() *SippyAPI {
// QueryTests receive a input with attributes to query the results of a single test
// by name on the CI, returning the list with result items.
-func (a *SippyAPI) QueryTests(r *SippyTestsRequestInput) (*SippyTestsRequestOutput, error) {
+func (a *SippyAPI) QueryTests(in *SippyTestsRequestInput) (*SippyTestsRequestOutput, error) {
filter := SippyTestsRequestFilter{
Items: []SippyTestsRequestFilterItems{
{
ColumnField: "name",
OperatorValue: "equals",
- Value: r.TestName,
+ Value: in.TestName,
},
},
}
@@ -98,7 +100,7 @@ func (a *SippyAPI) QueryTests(r *SippyTestsRequestInput) (*SippyTestsRequestOutp
}
params := url.Values{}
- params.Add("release", "4.11")
+ params.Add("release", a.ocpVersion)
params.Add("filter", string(b))
baseUrl.RawQuery = params.Encode()
@@ -121,6 +123,10 @@ func (a *SippyAPI) QueryTests(r *SippyTestsRequestInput) (*SippyTestsRequestOutp
}
+ if res.StatusCode < 200 || res.StatusCode > 299 {
+ return nil, fmt.Errorf("invalid status code: %d", res.StatusCode)
+ }
+
sippyResponse := SippyTestsRequestOutput{}
if err := json.Unmarshal([]byte(body), &sippyResponse); err != nil {
return nil, fmt.Errorf("couldn't unmarshal response body: %+v \nBody: %s", string(body), err)
diff --git a/internal/openshift/ci/types.go b/internal/openshift/ci/types.go
deleted file mode 100644
index 59a8fbf5..00000000
--- a/internal/openshift/ci/types.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package ci
-
-// Source: https://github.com/openshift/release/blob/master/core-services/prow/02_config/_config.yaml#L84
-var CommonErrorPatterns = []string{
- `error:`,
- `Failed to push image`,
- `Failed`,
- `timed out`,
- `'ERROR:'`,
- `ERRO\[`,
- `^error:`,
- `(^FAIL|FAIL: |Failure \[)\b`,
- `panic(\.go)?:`,
- `"level":"error"`,
- `level=error`,
- `level":"fatal"`,
- `level=fatal`,
- `│ Error:`,
- `client connection lost`,
-}
diff --git a/internal/openshift/mustgather/etcd.go b/internal/openshift/mustgather/etcd.go
index 10044cc6..2d06c042 100644
--- a/internal/openshift/mustgather/etcd.go
+++ b/internal/openshift/mustgather/etcd.go
@@ -14,8 +14,12 @@ import (
)
const (
- parserETCDLogsReqTTLMaxPastHour = 6
+ // parserETCDLogsReqTTLMaxPastHour is the maximum number of past hours to extract from must-gather.
+ // This is used to calculate the slow requests timers from etcd pod logs.
+ parserETCDLogsReqTTLMaxPastHour = 8
+ // BucketRangeName are group/bucket of time in milliseconds to aggregate
+ // values extracted from pod logs.
BucketRangeName200Ms string = "200-300"
BucketRangeName300Ms string = "300-400"
BucketRangeName400Ms string = "400-500"
@@ -29,7 +33,8 @@ const (
BucketRangeNameAll string = "all"
)
-// ErrorEtcdLogs handle errors extracted/parsed from etcd pod logs.
+// ErrorEtcdLogs handle errors extracted/parsed from etcd pod logs, grouping by
+// bucket.
type ErrorEtcdLogs struct {
ErrorCounters archive.ErrorCounter
FilterRequestSlowAll map[string]*BucketFilterStat
@@ -37,7 +42,7 @@ type ErrorEtcdLogs struct {
Buffer []*string `json:"-"`
}
-// common errors to create counters
+// EtcdLogErrorPatterns are common error patterns found in etcd logs.
var EtcdLogErrorPatterns = []string{
`rejected connection`,
`waiting for ReadIndex response took too long, retrying`,
@@ -82,7 +87,7 @@ func NewErrorEtcdLogs(buf *string) *ErrorEtcdLogs {
return etcdLogs
}
-// LogPayloadETCD parses the etcd log file to extract insights
+// logPayloadETCD parses the etcd log file to extract insights
// {"level":"warn","ts":"2023-03-01T15:14:22.192Z",
// "caller":"etcdserver/util.go:166",
// "msg":"apply request took too long",
@@ -90,12 +95,12 @@ func NewErrorEtcdLogs(buf *string) *ErrorEtcdLogs {
// "prefix":"read-only range ",
// "request":"key:\"/kubernetes.io/configmaps/kube-system/kube-controller-manager\" ",
// "response":"range_response_count:1 size:608"}
-type LogPayloadETCD struct {
+type logPayloadETCD struct {
Took string `json:"took"`
Timestamp string `json:"ts"`
}
-type BucketGroup struct {
+type bucketGroup struct {
Bukets1s Buckets
Bukets500ms Buckets
}
@@ -103,7 +108,7 @@ type BucketGroup struct {
type FilterApplyTookTooLong struct {
Name string
GroupBy string
- Group map[string]*BucketGroup
+ Group map[string]*bucketGroup
// filter config
lineFilter string
@@ -120,7 +125,7 @@ func NewFilterApplyTookTooLong(aggregator string) *FilterApplyTookTooLong {
filter.Name = "ApplyTookTooLong"
filter.GroupBy = aggregator
- filter.Group = make(map[string]*BucketGroup)
+ filter.Group = make(map[string]*bucketGroup)
filter.lineFilter = "apply request took too long"
filter.reLineSplitter, _ = regexp.Compile(`^\d+-\d+-\d+T\d+:\d+:\d+.\d+Z `)
@@ -147,7 +152,7 @@ func (f *FilterApplyTookTooLong) ProcessLine(line string) *string {
}
// parse json
- lineParsed := LogPayloadETCD{}
+ lineParsed := logPayloadETCD{}
if err := json.Unmarshal([]byte(split[1]), &lineParsed); err != nil {
log.Errorf("couldn't parse json: %v", err)
}
@@ -175,7 +180,7 @@ func (f *FilterApplyTookTooLong) ProcessLine(line string) *string {
}
func (f *FilterApplyTookTooLong) insertBucket(v float64, ts string) {
- var group *BucketGroup
+ var group *bucketGroup
var aggrKey string
if f.GroupBy == "hour" {
@@ -204,7 +209,7 @@ func (f *FilterApplyTookTooLong) insertBucket(v float64, ts string) {
}
if _, ok := f.Group[aggrKey]; !ok {
- f.Group[aggrKey] = &BucketGroup{}
+ f.Group[aggrKey] = &bucketGroup{}
group = f.Group[aggrKey]
group.Bukets1s = NewBuckets(buckets1s())
group.Bukets500ms = NewBuckets(buckets500ms())
diff --git a/internal/openshift/mustgather/log.go b/internal/openshift/mustgather/log.go
new file mode 100644
index 00000000..96cde68c
--- /dev/null
+++ b/internal/openshift/mustgather/log.go
@@ -0,0 +1,180 @@
+package mustgather
+
+import (
+ "bytes"
+ "os"
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/redhat-openshift-ecosystem/provider-certification-tool/internal/opct/archive"
+ log "github.com/sirupsen/logrus"
+)
+
+var (
+ // maxRateItemsToProcessQueue is the max number of items to process in parallel.
+ defaultBufferLeakyBucket = 50
+ // queueMaxSize is the max number of items to be queued in the bucket/memory before
+ // unblocked by the rate limiter.
+ defaultSizeLeakyBucket = 100
+ // rateLimitIntervalMillisec lower values will increase the rate of processing,
+ // but it will increase the risk of exhausting resources.
+ defaultRateLimitIntervalLeakyBucket = 10 * time.Millisecond
+)
+
+func init() {
+ // allow to override the rate limit to control the processing speed,
+ // and consume less resources.
+ overrideRateLimit := os.Getenv("OPCT_MUSTGATHER_RATELIMIT")
+ if overrideRateLimit == "" {
+ return
+ }
+ rate, err := strconv.Atoi(overrideRateLimit)
+ if err != nil {
+ log.Errorf("error parsing rate limit environment var OPCT_MUSTGATHER_RATELIMIT: %v", err)
+ return
+ }
+ if rate <= 0 || rate > 100 {
+ log.Errorf("invalid rate limit value, must be between 1 and 100: %d", rate)
+ return
+ }
+ defaultRateLimitIntervalLeakyBucket = time.Duration(rate) * time.Millisecond
+}
+
+// MustGatherLog hold the must-gather findings in logs.
+type MustGatherLog struct {
+ Path string
+ PathAlias string
+ Namespace string
+ Pod string
+ Container string
+ ErrorCounters archive.ErrorCounter `json:"ErrorCounters,omitempty"`
+ ErrorEtcdLogs *ErrorEtcdLogs `json:"ErrorEtcdLogs,omitempty"`
+ buffer *bytes.Buffer `json:"-"`
+}
+
+// Processed check if there are items processed, otherwise will save
+// storage preventing items without relevant information.
+func (mgl *MustGatherLog) Processed() bool {
+ if len(mgl.ErrorCounters) > 0 {
+ return true
+ }
+ if mgl.ErrorEtcdLogs != nil {
+ return true
+ }
+ return false
+}
+
+// Leaky bucket implementation (queue limit) to parallel process must-gather items
+// without exhausting resources. Increase the leakRate to process more items.
+// The value of 10 (ms) is a ideal value, if want to decrease the CPU usage while
+// processing the must-gather logs, increase the value to 100 (ms) by setting
+// the environment variable OPCT_MUSTGATHER_RATELIMIT.
+type leakyBucket struct {
+ // bucketSize is the maximum number of items that can be stored in the bucket.
+ bucketSize int
+ // leakRate is the number of items that are removed from the bucket every second.
+ leakRate time.Duration
+ // bucket is the current number of items in the bucket.
+ bucket int
+
+ queue chan *MustGatherLog
+ queueCount int
+ rateLimiter chan struct{}
+ semaphore chan struct{}
+ waiter sync.WaitGroup
+ locker sync.Mutex
+
+ // activeReading is a flag to indicate if the bucket is being read.
+ activeReading bool
+
+ // processor function to be called when the bucket is full.
+ processor func(*MustGatherLog)
+}
+
+func newLeakyBucket(bucketSize int, leakRate time.Duration, fn func(*MustGatherLog)) *leakyBucket {
+ lb := &leakyBucket{
+ bucketSize: bucketSize,
+ leakRate: leakRate,
+ bucket: 0,
+ queue: make(chan *MustGatherLog, bucketSize),
+ queueCount: 0,
+ rateLimiter: make(chan struct{}, defaultBufferLeakyBucket),
+ semaphore: make(chan struct{}, defaultBufferLeakyBucket),
+ processor: fn,
+ activeReading: true,
+ }
+
+ for i := 0; i < cap(lb.rateLimiter); i++ {
+ lb.rateLimiter <- struct{}{}
+ }
+
+ // leaky bucket ticker pausing the rate of processing every
+ // rateLimitIntervalMillisec.
+ go func() {
+ log.Debug("Leaky bucket ticker - starting")
+ ticker := time.NewTicker(lb.leakRate)
+ defer ticker.Stop()
+ for range ticker.C {
+ _, ok := <-lb.rateLimiter
+ // if this isn't going to run indefinitely, signal
+ // this to return by closing the rate channel.
+ if !ok {
+ print("Leaky bucket rate limiter - closing")
+ return
+ }
+ }
+ }()
+
+ // consume the queued pod logs to be processed/extracted information.
+ go func() {
+ log.Debug("Leaky bucket processor - starting")
+ for data := range lb.queue {
+ lb.processor(data)
+ lb.decrement()
+ }
+ }()
+
+ // monitor the queue size
+ go func() {
+ log.Debug("Leaky bucket monitor - starting")
+ for lb.activeReading {
+ log.Debugf("Must-gather processor - queue size monitor: %d", lb.queueCount)
+ time.Sleep(10 * time.Second)
+ }
+ }()
+
+ return lb
+}
+
+// decrement decrements the number of items in the queue.
+func (lb *leakyBucket) decrement() {
+ lb.waiter.Done()
+ lb.locker.Lock()
+ lb.queueCount -= 1
+ lb.locker.Unlock()
+}
+
+// Incremet increments the number of items in the queue.
+func (lb *leakyBucket) Incremet() {
+ lb.waiter.Add(1)
+ lb.locker.Lock()
+ lb.queueCount += 1
+ lb.locker.Unlock()
+}
+
+// AppendQueue checks the rate limiter and semaphore, then
+// add a new item to the queue.
+func (lb *leakyBucket) AppendQueue(mgl *MustGatherLog) {
+ // wait for the rate limiter
+ lb.rateLimiter <- struct{}{}
+
+ // check the concurrency semaphore
+ lb.semaphore <- struct{}{}
+ defer func() {
+ <-lb.semaphore
+ }()
+
+ // Sending the item to the queue
+ lb.queue <- mgl
+}
diff --git a/internal/openshift/mustgather/mustgather.go b/internal/openshift/mustgather/mustgather.go
index d21894f8..ed3415f9 100644
--- a/internal/openshift/mustgather/mustgather.go
+++ b/internal/openshift/mustgather/mustgather.go
@@ -3,25 +3,22 @@ package mustgather
import (
"archive/tar"
"bytes"
+ "fmt"
"io"
"os"
"path/filepath"
- "regexp"
"strings"
"sync"
- "time"
"github.com/pkg/errors"
"github.com/redhat-openshift-ecosystem/provider-certification-tool/internal/opct/archive"
- "github.com/redhat-openshift-ecosystem/provider-certification-tool/internal/openshift/ci"
log "github.com/sirupsen/logrus"
- "github.com/ulikunitz/xz"
"gopkg.in/yaml.v2"
"k8s.io/utils/ptr"
)
-/* MustGatehr raw files */
-type MustGatherFile struct {
+// rawFile hold the raw data from must-gather.
+type rawFile struct {
Path string
PathAlias string `json:"PathAlias,omitempty"`
Data string `json:"Data,omitempty"`
@@ -30,6 +27,7 @@ type MustGatherFile struct {
type MustGather struct {
// path to the directory must-gather will be saved.
path string
+ save bool
// ErrorEtcdLogs summary of etcd errors parsed from must-gather.
ErrorEtcdLogs *ErrorEtcdLogs `json:"ErrorEtcdLogs,omitempty"`
@@ -43,43 +41,43 @@ type MustGather struct {
namespaceCtrl sync.Mutex
// FileData hold raw data from files must-gather.
- RawFiles []*MustGatherFile `json:"RawFiles,omitempty"`
+ RawFiles []*rawFile `json:"RawFiles,omitempty"`
rawFilesCtrl sync.Mutex
PodNetworkChecks MustGatherPodNetworkChecks
}
-func NewMustGather(file string) *MustGather {
+func NewMustGather(file string, save bool) *MustGather {
return &MustGather{
path: file,
+ save: save,
}
}
-// InsertNamespaceErrors append the log data in safe way.
-func (mg *MustGather) InsertNamespaceErrors(log *MustGatherLog) error {
- mg.namespaceCtrl.Lock()
- mg.NamespaceErrors = append(mg.NamespaceErrors, log)
- mg.namespaceCtrl.Unlock()
- return nil
-}
+// Process reads and process in memory the must-gather tarball file.
+func (mg *MustGather) Process(buf *bytes.Buffer) error {
+ log.Debugf("Processing results/Populating/Populating Summary/Processing/MustGather/Reading")
+ tar, err := getTarFromXZBuffer(buf)
+ if err != nil {
+ return err
+ }
-// InsertRawFiles append the file data in safe way.
-func (mg *MustGather) InsertRawFiles(file *MustGatherFile) error {
- mg.rawFilesCtrl.Lock()
- mg.RawFiles = append(mg.RawFiles, file)
- mg.rawFilesCtrl.Unlock()
+ log.Debugf("Processing results/Populating/Populating Summary/Processing/MustGather/Processing")
+ if err := mg.extract(tar); err != nil {
+ return err
+ }
return nil
}
func (mg *MustGather) AggregateCounters() {
if mg.ErrorCounters == nil {
- mg.ErrorCounters = make(archive.ErrorCounter, len(ci.CommonErrorPatterns))
+ mg.ErrorCounters = make(archive.ErrorCounter, len(archive.CommonErrorPatterns))
}
if mg.ErrorEtcdLogs == nil {
mg.ErrorEtcdLogs = &ErrorEtcdLogs{}
}
+ // calculate error findings across all nesmapces.
for nsi := range mg.NamespaceErrors {
- // calculate
hasErrorCounters := false
hasEtcdCounters := false
if mg.NamespaceErrors[nsi].ErrorCounters != nil {
@@ -129,12 +127,29 @@ func (mg *MustGather) AggregateCounters() {
}
}
}
+
log.Debugf("Processing results/Populating/Populating Summary/Processing/MustGather/CalculatingErrors/CalculatingEtcdErrors")
- mg.CalculateCountersEtcd()
+ mg.calculateCountersEtcd()
}
-// CalculateCountersEtcd creates the aggregators, generating counters for each one.
-func (mg *MustGather) CalculateCountersEtcd() {
+// insertNamespaceErrors append the extracted information to the namespaced-resource.
+func (mg *MustGather) insertNamespaceErrors(log *MustGatherLog) error {
+ mg.namespaceCtrl.Lock()
+ mg.NamespaceErrors = append(mg.NamespaceErrors, log)
+ mg.namespaceCtrl.Unlock()
+ return nil
+}
+
+// insertRawFiles append the file data in safe way.
+func (mg *MustGather) insertRawFiles(file *rawFile) error {
+ mg.rawFilesCtrl.Lock()
+ mg.RawFiles = append(mg.RawFiles, file)
+ mg.rawFilesCtrl.Unlock()
+ return nil
+}
+
+// calculateCountersEtcd creates the aggregators, generating counters for each one.
+func (mg *MustGather) calculateCountersEtcd() {
// filter Slow Requests (aggregate by hour)
filterATTL1 := NewFilterApplyTookTooLong("hour")
@@ -151,146 +166,36 @@ func (mg *MustGather) CalculateCountersEtcd() {
mg.ErrorEtcdLogs.FilterRequestSlowAll = filterATTL2.GetStat(1)
}
-// Process read the must-gather tarball.
-func (mg *MustGather) Process(buf *bytes.Buffer) error {
- log.Debugf("Processing results/Populating/Populating Summary/Processing/MustGather/Reading")
- tar, err := mg.read(buf)
- if err != nil {
- return err
- }
- log.Debugf("Processing results/Populating/Populating Summary/Processing/MustGather/Processing")
- err = mg.extract(tar)
- if err != nil {
- return err
- }
- return nil
-}
-
-func (mg *MustGather) read(buf *bytes.Buffer) (*tar.Reader, error) {
- file, err := xz.NewReader(buf)
- if err != nil {
- return nil, err
- }
- return tar.NewReader(file), nil
-}
-
-// matchToExtract define patterns to continue the must-gather processor.
-// the pattern must be defined if the must be extracted. It will return
-// a boolean with match and the file group (pattern type).
-func (mg *MustGather) matchToExtract(path string) (bool, string) {
- patterns := make(map[string]string, 4)
- patterns["logs"] = `(\/namespaces\/.*\/pods\/.*.log)`
- patterns["events"] = `(\/event-filter.html)`
- patterns["rawFile"] = `(\/etcd_info\/.*.json)`
- patterns["podNetCheck"] = `(\/pod_network_connectivity_check\/podnetworkconnectivitychecks.yaml)`
- // TODO /host_service_logs/.*.log
- for typ, pattern := range patterns {
- re := regexp.MustCompile(pattern)
- if re.MatchString(path) {
- return true, typ
- }
- }
- return false, ""
-}
-
-// extractRelativePath removes the prefix of must-gather path/image to save the
-// relative file path when extracting the file or mapping in the counters.
-// OPCT collects must-gather automatically saving in the directory must-gather-opct.
-func (mg *MustGather) extractRelativePath(file string) string {
- re := regexp.MustCompile(`must-gather-opct/([A-Za-z0-9]+(-[A-Za-z0-9]+)+\/)`)
-
- split := re.Split(file, -1)
- if len(split) != 2 {
- return file
- }
- return split[1]
-}
-
-// extract dispatch to process must-gather items.
+// extract reads, and process the tarball and extract the required information.
func (mg *MustGather) extract(tarball *tar.Reader) error {
-
- // Create must-gather directory
- if _, err := os.Stat(mg.path); err != nil {
- if err := os.MkdirAll(mg.path, 0755); err != nil {
- return err
+ // Create must-gather directory under the result path.
+ // Creates directory only when needs it.
+ if mg.save {
+ if _, err := os.Stat(mg.path); err != nil {
+ if err := os.MkdirAll(mg.path, 0755); err != nil {
+ return fmt.Errorf("error creating must-gather directory: %v", err)
+ }
}
}
- // TODO()#1: create a queue package with a instance of MustGatherLog.
- // TODO()#2: increase the parallelism targetting to decrease the total proc time.
- // Leaky bucket implementation (queue limit) to parallel process must-gather items
- // without exhausting resources.
- // Benckmark info: this parallel processing decreased 3 times the total processing time.
- // Samples: Serial=~100s, rate(100)=~30s, rate(150)=~25s.
- keepReading := true
- procQueueSize := 0
- var procQueueLocker sync.Mutex
- // Creating queue monitor as Waiter group does not provide interface to check the
- // queue size.
- procQueueInc := func() {
- procQueueLocker.Lock()
- procQueueSize += 1
- procQueueLocker.Unlock()
- }
- procQueueDec := func() {
- procQueueLocker.Lock()
- procQueueSize -= 1
- procQueueLocker.Unlock()
- }
- go func() {
- for keepReading {
- log.Debugf("Must-gather processor - queue size monitor: %d", procQueueSize)
- time.Sleep(10 * time.Second)
- }
- }()
-
- waiterProcNS := &sync.WaitGroup{}
- chProcNSErrors := make(chan *MustGatherLog, 50)
- semaphore := make(chan struct{}, 50)
- // have a max rate of N/sec
- rate := make(chan struct{}, 20)
- for i := 0; i < cap(rate); i++ {
- rate <- struct{}{}
- }
- // leaky bucket
- go func() {
- ticker := time.NewTicker(100 * time.Millisecond)
- defer ticker.Stop()
- for range ticker.C {
- _, ok := <-rate
- // if this isn't going to run indefinitely, signal
- // this to return by closing the rate channel.
- if !ok {
- return
- }
- }
- }()
- // consumer
- go func() {
- for mgLog := range chProcNSErrors {
- mg.processNamespaceErrors(mgLog)
- waiterProcNS.Done()
- procQueueDec()
- }
- }()
+ processorBucket := newLeakyBucket(defaultSizeLeakyBucket, defaultRateLimitIntervalLeakyBucket, mg.processNamespaceErrors)
// Walk through files in must-gather tarball file.
- for keepReading {
+ for processorBucket.activeReading {
header, err := tarball.Next()
switch {
// no more files
case err == io.EOF:
- log.Debugf("Must-gather processor queued, queue size: %d", procQueueSize)
- waiterProcNS.Wait()
- keepReading = false
- log.Debugf("Must-gather processor finished, queue size: %d", procQueueSize)
+ log.Debugf("Must-gather processor queued, queue size: %d", processorBucket.queueCount)
+ processorBucket.waiter.Wait()
+ processorBucket.activeReading = false
+ log.Debugf("Must-gather processor finished, queue size: %d", processorBucket.queueCount)
return nil
// return on error
case err != nil:
return errors.Wrapf(err, "error reading tarball")
- // return err
// skip it when the headr isn't set (not sure how this happens)
case header == nil:
@@ -299,22 +204,25 @@ func (mg *MustGather) extract(tarball *tar.Reader) error {
// the target location where the dir/file should be created.
target := filepath.Join(mg.path, header.Name)
- ok, typ := mg.matchToExtract(target)
+
+ // check if the file should be processed.
+ ok, itemType := getFileTypeToProcess(target)
if !ok {
continue
}
- targetAlias := mg.extractRelativePath(target)
+ targetAlias := normalizeRelativePath(target)
// the following switch could also be done using fi.Mode(), not sure if there
// a benefit of using one vs. the other.
// fi := header.FileInfo()
switch header.Typeflag {
+
// directories in tarball.
case tar.TypeDir:
-
// creating subdirectories structures will be ignored and need
// sub-directories under mg.path must be created previously if needed.
+ // Enable it only there is a use case to extract more data to disk preserving source dirs.
/*
targetDir := filepath.Join(mg.path, targetAlias)
if _, err := os.Stat(targetDir); err != nil {
@@ -327,34 +235,30 @@ func (mg *MustGather) extract(tarball *tar.Reader) error {
// files in tarball. Process only files classified by 'typ'.
case tar.TypeReg:
- // Save/Process only files matching now types, it will prevent processing && saving
- // all the files in must-gather, extracting only information needed by OPCT.
- switch typ {
- case "logs":
- // parallel processing the logs
+ // Save/Process only files matching known types, it will prevent processing && saving
+ // all the files in must-gather, extracting only information required by OPCT.
+ switch itemType {
+ case patternNamePodLogs:
+ // logs are processed in parallel, the buffer is released when processed.
buf := bytes.Buffer{}
if _, err := io.Copy(&buf, tarball); err != nil {
- return err
+ log.Errorf("must-gather processor/podLogs: error copying buffer for %s: %v", targetAlias, err)
+ continue
}
- waiterProcNS.Add(1)
- procQueueInc()
+ processorBucket.Incremet()
go func(filename string, buffer *bytes.Buffer) {
- // wait for the rate limiter
- rate <- struct{}{}
-
- // check the concurrency semaphore
- semaphore <- struct{}{}
- defer func() {
- <-semaphore
- }()
- // log.Debugf("Producing log processor for file: %s", mgLog.Path)
- chProcNSErrors <- &MustGatherLog{
+ processorBucket.AppendQueue(&MustGatherLog{
Path: filename,
buffer: buffer,
- }
+ })
}(targetAlias, &buf)
- case "events":
+ case patternNameEvents:
+ // skip extracting when save directory is not set. (in-memory processing only)
+ if !mg.save {
+ log.Debugf("skipping file %s", targetAlias)
+ continue
+ }
// forcing file name for event filter
targetLocal := filepath.Join(mg.path, "event-filter.html")
f, err := os.OpenFile(targetLocal, os.O_CREATE|os.O_RDWR, os.FileMode(header.Mode))
@@ -366,9 +270,9 @@ func (mg *MustGather) extract(tarball *tar.Reader) error {
}
f.Close()
- case "rawFile":
+ case patternNameRawFile:
log.Debugf("Must-gather extracting file %s", targetAlias)
- raw := &MustGatherFile{}
+ raw := &rawFile{}
raw.Path = targetAlias
buf := bytes.Buffer{}
if _, err := io.Copy(&buf, tarball); err != nil {
@@ -376,14 +280,14 @@ func (mg *MustGather) extract(tarball *tar.Reader) error {
break
}
raw.Data = buf.String()
- err := mg.InsertRawFiles(raw)
+ err := mg.insertRawFiles(raw)
if err != nil {
log.Errorf("error inserting rawfile: %v", err)
}
- case "podNetCheck":
+ case patternNamePodNetCheck:
log.Debugf("Must-gather extracting file %s", targetAlias)
- raw := &MustGatherFile{}
+ raw := &rawFile{}
raw.Path = targetAlias
buf := bytes.Buffer{}
if _, err := io.Copy(&buf, tarball); err != nil {
@@ -416,9 +320,11 @@ func (mg *MustGather) processNamespaceErrors(mgLog *MustGatherLog) {
mgLog.Namespace = mgItems[0]
mgLog.Pod = mgItems[2]
mgLog.Container = mgItems[3]
- // TODO: log errors
- mgLog.ErrorCounters = archive.NewErrorCounter(ptr.To(mgLog.buffer.String()), ci.CommonErrorPatterns)
- // additional parsers
+
+ // parse errors from logs
+ mgLog.ErrorCounters = archive.NewErrorCounter(ptr.To(mgLog.buffer.String()), archive.CommonErrorPatterns)
+
+ // additional parsers: etcd error counter extractor
if mgLog.Namespace == "openshift-etcd" &&
mgLog.Container == "etcd" &&
strings.HasSuffix(mgLog.Path, "current.log") {
@@ -430,33 +336,11 @@ func (mg *MustGather) processNamespaceErrors(mgLog *MustGatherLog) {
// Insert only if there are logs parsed
if mgLog.Processed() {
- if err := mg.InsertNamespaceErrors(mgLog); err != nil {
+ if err := mg.insertNamespaceErrors(mgLog); err != nil {
log.Errorf("one or more errors found when inserting errors: %v", err)
}
}
-}
-/* MustGatehr log items */
-
-type MustGatherLog struct {
- Path string
- PathAlias string
- Namespace string
- Pod string
- Container string
- ErrorCounters archive.ErrorCounter `json:"ErrorCounters,omitempty"`
- ErrorEtcdLogs *ErrorEtcdLogs `json:"ErrorEtcdLogs,omitempty"`
- buffer *bytes.Buffer `json:"-"`
-}
-
-// Processed check if there are items processed, otherwise will save
-// storage preventing items without relevant information.
-func (mge *MustGatherLog) Processed() bool {
- if len(mge.ErrorCounters) > 0 {
- return true
- }
- if mge.ErrorEtcdLogs != nil {
- return true
- }
- return false
+ // release buffer
+ mgLog.buffer.Reset()
}
diff --git a/internal/openshift/mustgather/podnetconcheck.go b/internal/openshift/mustgather/podnetconcheck.go
index 9afce70b..c374eabd 100644
--- a/internal/openshift/mustgather/podnetconcheck.go
+++ b/internal/openshift/mustgather/podnetconcheck.go
@@ -4,7 +4,22 @@ import log "github.com/sirupsen/logrus"
/* MustGather PodNetworkChecks handle connectivity monitor */
-type MustGatherPodNetworkCheck struct {
+type networkOutage struct {
+ Start string
+ End string
+ Name string
+ Message string
+}
+
+type networkCheckFailure struct {
+ Time string
+ Reason string
+ Latency string
+ Name string
+ Message string
+}
+
+type podNetworkCheck struct {
Name string
SpecSource string
SpecTarget string
@@ -17,15 +32,15 @@ type MustGatherPodNetworkChecks struct {
TotalFailures int64
TotalOutages int64
TotalSuccess int64
- Checks []*MustGatherPodNetworkCheck
- Outages []*NetworkOutage
- Failures []*NetworkCheckFailure
+ Checks []*podNetworkCheck
+ Outages []*networkOutage
+ Failures []*networkCheckFailure
}
func (p *MustGatherPodNetworkChecks) InsertCheck(
- check *MustGatherPodNetworkCheck,
- failures []*NetworkCheckFailure,
- outages []*NetworkOutage,
+ check *podNetworkCheck,
+ failures []*networkCheckFailure,
+ outages []*networkOutage,
) {
p.Checks = append(p.Checks, check)
p.Outages = append(p.Outages, outages...)
@@ -39,29 +54,30 @@ func (p *MustGatherPodNetworkChecks) Parse(data map[string]interface{}) {
// TODO#1 use CRD PodNetworkConnectivityCheck and api controlplane.operator.openshift.io/v1alpha1 to parse
// TODO#2 use reflection to read data
+ prefixErr := "must-gather extracting file pod_network_connectivity_check"
for _, d := range data["items"].([]interface{}) {
item := d.(map[interface{}]interface{})
if item["metadata"] == nil {
- log.Errorf("unable to retrieve pod network check metadata: %v", item["metadata"])
+ log.Debugf("%s/invalid metadata: %v", prefixErr, item["metadata"])
continue
}
metadata := item["metadata"].(map[interface{}]interface{})
if item["spec"] == nil {
- log.Errorf("unable to retrieve pod network check spec: %v", item["spec"])
+ log.Debugf("%s/invalid spec: %v", prefixErr, item["spec"])
continue
}
spec := item["spec"].(map[interface{}]interface{})
if item["status"] == nil {
- log.Errorf("unable to retrieve pod network check status: %v", item["status"])
+ log.Debugf("%s/invalid itme/status: %v", prefixErr, item)
continue
}
status := item["status"].(map[interface{}]interface{})
name := metadata["name"].(string)
- check := &MustGatherPodNetworkCheck{
+ check := &podNetworkCheck{
Name: name,
SpecSource: spec["sourcePod"].(string),
SpecTarget: spec["targetEndpoint"].(string),
@@ -70,7 +86,7 @@ func (p *MustGatherPodNetworkChecks) Parse(data map[string]interface{}) {
check.TotalSuccess = int64(len(status["successes"].([]interface{})))
}
- netFailures := []*NetworkCheckFailure{}
+ netFailures := []*networkCheckFailure{}
if status["failures"] != nil {
failures := status["failures"].([]interface{})
check.TotalFailures = int64(len(failures))
@@ -78,7 +94,7 @@ func (p *MustGatherPodNetworkChecks) Parse(data map[string]interface{}) {
if f.(map[interface{}]interface{})["time"] == nil {
continue
}
- nf := &NetworkCheckFailure{
+ nf := &networkCheckFailure{
Name: name,
Time: f.(map[interface{}]interface{})["time"].(string),
}
@@ -95,12 +111,12 @@ func (p *MustGatherPodNetworkChecks) Parse(data map[string]interface{}) {
}
}
- netOutages := []*NetworkOutage{}
+ netOutages := []*networkOutage{}
if status["outages"] != nil {
outages := status["outages"].([]interface{})
check.TotalOutages = int64(len(outages))
for _, o := range outages {
- no := &NetworkOutage{Name: name}
+ no := &networkOutage{Name: name}
if o.(map[interface{}]interface{})["start"] == nil {
continue
}
@@ -116,20 +132,4 @@ func (p *MustGatherPodNetworkChecks) Parse(data map[string]interface{}) {
}
p.InsertCheck(check, netFailures, netOutages)
}
-
-}
-
-type NetworkOutage struct {
- Start string
- End string
- Name string
- Message string
-}
-
-type NetworkCheckFailure struct {
- Time string
- Reason string
- Latency string
- Name string
- Message string
}
diff --git a/internal/openshift/mustgather/utils.go b/internal/openshift/mustgather/utils.go
new file mode 100644
index 00000000..52d56c16
--- /dev/null
+++ b/internal/openshift/mustgather/utils.go
@@ -0,0 +1,71 @@
+package mustgather
+
+import (
+ "archive/tar"
+ "bytes"
+ "regexp"
+
+ "github.com/ulikunitz/xz"
+)
+
+const (
+ // patterns to match files in must-gather to be collected/processed.
+ // patternNamePodLogs represents the pattern to match pod logs.
+ patternNamePodLogs string = "logs"
+ patternFilePodLogs string = `(\/namespaces\/.*\/pods\/.*.log)`
+
+ // patternNameEvents represents the pattern to match the event filter file.
+ patternNameEvents string = "events"
+ patternFileEvents string = `(\/event-filter.html)`
+
+ // patternNameRawFile represents the pattern to match raw files (any desired to collect).
+ patternNameRawFile string = "rawFile"
+ patternFileRawFile string = `(\/etcd_info\/.*.json)`
+
+ // patternNamePodNetCheck represents the pattern to match pod network check files.
+ patternNamePodNetCheck string = "podNetCheck"
+ patternFilePodNetCheck string = `(\/pod_network_connectivity_check\/podnetworkconnectivitychecks.yaml)`
+)
+
+var (
+ mustGatherFilePatterns = map[string]string{
+ patternNamePodLogs: `(\/namespaces\/.*\/pods\/.*.log)`,
+ patternNameEvents: `(\/event-filter.html)`,
+ patternNameRawFile: `(\/etcd_info\/.*.json)`,
+ patternNamePodNetCheck: `(\/pod_network_connectivity_check\/podnetworkconnectivitychecks.yaml)`,
+ }
+)
+
+// getFileTypeToProcess define patterns to continue the must-gather processor.
+// the pattern must be defined if the must be extracted. It will return
+// a boolean with match and the file group (pattern type).
+func getFileTypeToProcess(path string) (bool, string) {
+ for typ, pattern := range mustGatherFilePatterns {
+ re := regexp.MustCompile(pattern)
+ if re.MatchString(path) {
+ return true, typ
+ }
+ }
+ return false, ""
+}
+
+// normalizeRelativePath removes the prefix of must-gather path/image to save the
+// relative file path when extracting the file or mapping in the counters.
+// OPCT collects must-gather automatically saving in the directory must-gather-opct.
+func normalizeRelativePath(file string) string {
+ re := regexp.MustCompile(`must-gather-opct/([A-Za-z0-9]+(-[A-Za-z0-9]+)+\/)`)
+
+ split := re.Split(file, -1)
+ if len(split) != 2 {
+ return file
+ }
+ return split[1]
+}
+
+func getTarFromXZBuffer(buf *bytes.Buffer) (*tar.Reader, error) {
+ file, err := xz.NewReader(buf)
+ if err != nil {
+ return nil, err
+ }
+ return tar.NewReader(file), nil
+}
diff --git a/internal/opct/chart/charts.go b/internal/openshift/mustgathermetrics/charts.go
similarity index 54%
rename from internal/opct/chart/charts.go
rename to internal/openshift/mustgathermetrics/charts.go
index fbb9c652..791b6859 100644
--- a/internal/opct/chart/charts.go
+++ b/internal/openshift/mustgathermetrics/charts.go
@@ -1,4 +1,4 @@
-package chart
+package mustgathermetrics
import (
"encoding/json"
@@ -38,111 +38,8 @@ type readMetricInput struct {
subtitle string
}
-// type LineExamples struct{}
-
-type MustGatherMetric struct {
- Path string
- OriginalQuery string
- PlotLabel string
- PlotTitle string
- PlotSubTitle string
- CreateChart func() *charts.Line
- CollectorAvailable bool
- MetricData *PrometheusResponse
- DivId string
-}
-
-var ChartsAvailable map[string]*MustGatherMetric
-
-func init() {
- ChartsAvailable = make(map[string]*MustGatherMetric, 0)
- ChartsAvailable["query_range-etcd-disk-fsync-db-duration-p99.json.gz"] = &MustGatherMetric{
- Path: "query_range-etcd-disk-fsync-db-duration-p99.json.gz",
- OriginalQuery: "",
- PlotLabel: "instance",
- PlotTitle: "etcd fsync DB p99",
- PlotSubTitle: "",
- CollectorAvailable: true,
- DivId: "id1",
- }
- ChartsAvailable["query_range-api-kas-request-duration-p99.json.gz"] = &MustGatherMetric{
- Path: "query_range-api-kas-request-duration-p99.json.gz",
- OriginalQuery: "",
- PlotLabel: "verb",
- PlotTitle: "Kube API request p99",
- PlotSubTitle: "",
- CollectorAvailable: true,
- DivId: "id2",
- }
- ChartsAvailable["query_range-etcd-disk-fsync-wal-duration-p99.json.gz"] = &MustGatherMetric{
- Path: "query_range-etcd-disk-fsync-wal-duration-p99.json.gz",
- OriginalQuery: "",
- PlotLabel: "instance",
- PlotTitle: "etcd fsync WAL p99",
- PlotSubTitle: "",
- CollectorAvailable: true,
- DivId: "id0",
- }
- ChartsAvailable["query_range-etcd-peer-round-trip-time.json.gz"] = &MustGatherMetric{
- Path: "query_range-etcd-peer-round-trip-time.json.gz",
- OriginalQuery: "",
- PlotLabel: "instance",
- PlotTitle: "etcd peer round trip",
- PlotSubTitle: "",
- CollectorAvailable: true,
- DivId: "id3",
- }
-
- ChartsAvailable["query_range-etcd-total-leader-elections-day.json.gz"] = &MustGatherMetric{
- Path: "query_range-etcd-total-leader-elections-day.json.gz",
- OriginalQuery: "",
- PlotLabel: "instance",
- PlotTitle: "etcd peer total leader election",
- PlotSubTitle: "",
- CollectorAvailable: true,
- DivId: "id4",
- }
- ChartsAvailable["query_range-etcd-request-duration-p99.json.gz"] = &MustGatherMetric{
- Path: "query_range-etcd-request-duration-p99.json.gz",
- OriginalQuery: "",
- PlotLabel: "operation",
- PlotTitle: "etcd req duration p99",
- PlotSubTitle: "",
- CollectorAvailable: true,
- DivId: "id5",
- }
-
- ChartsAvailable["query_range-cluster-storage-iops.json.gz"] = &MustGatherMetric{
- Path: "query_range-cluster-storage-iops.json.gz",
- OriginalQuery: "",
- PlotLabel: "namespace",
- PlotTitle: "Cluster storage IOPS",
- PlotSubTitle: "",
- CollectorAvailable: false,
- DivId: "id6",
- }
- ChartsAvailable["query_range-cluster-storage-throughput.json.gz"] = &MustGatherMetric{
- Path: "query_range-cluster-storage-throughput.json.gz",
- OriginalQuery: "",
- PlotLabel: "namespace",
- PlotTitle: "Cluster storage throughput",
- PlotSubTitle: "",
- CollectorAvailable: false,
- DivId: "id7",
- }
- ChartsAvailable["query_range-cluster-cpu-usage.json.gz"] = &MustGatherMetric{
- Path: "query_range-cluster-cpu-usage.json.gz",
- OriginalQuery: "",
- PlotLabel: "namespace",
- PlotTitle: "Cluster CPU",
- PlotSubTitle: "",
- CollectorAvailable: false,
- DivId: "id8",
- }
-}
-
-// NewMetricsPage create the page object to genera the metric report.
-func NewMetricsPage() *components.Page {
+// newMetricsPage create the page object to genera the metric report.
+func newMetricsPage() *components.Page {
page := components.NewPage()
page.PageTitle = "OPCT Report Metrics"
return page
@@ -161,7 +58,7 @@ func SaveMetricsPageReport(page *components.Page, path string) error {
return nil
}
-func (mmm *MustGatherMetric) NewChart() *charts.Line {
+func (mmm *MustGatherChart) NewChart() *charts.Line {
return mmm.processMetric(&readMetricInput{
filename: mmm.Path,
label: mmm.PlotLabel,
@@ -170,7 +67,7 @@ func (mmm *MustGatherMetric) NewChart() *charts.Line {
})
}
-func (mmm *MustGatherMetric) NewCharts() []*charts.Line {
+func (mmm *MustGatherChart) NewCharts() []*charts.Line {
in := &readMetricInput{
filename: mmm.Path,
label: mmm.PlotLabel,
@@ -181,7 +78,7 @@ func (mmm *MustGatherMetric) NewCharts() []*charts.Line {
}
// LoadData generates the metric widget (plot graph from data series).
-func (mmm *MustGatherMetric) LoadData(payload []byte) error {
+func (mmm *MustGatherChart) LoadData(payload []byte) error {
mmm.MetricData = &PrometheusResponse{}
err := json.Unmarshal(payload, &mmm.MetricData)
@@ -194,7 +91,7 @@ func (mmm *MustGatherMetric) LoadData(payload []byte) error {
}
// processMetric generates the metric widget (plot graph from data series).
-func (mmm *MustGatherMetric) processMetric(in *readMetricInput) *charts.Line {
+func (mmm *MustGatherChart) processMetric(in *readMetricInput) *charts.Line {
line := charts.NewLine()
line.SetGlobalOptions(
@@ -252,7 +149,7 @@ func (mmm *MustGatherMetric) processMetric(in *readMetricInput) *charts.Line {
}
// processMetric generates the metric widget (plot graph from data series).
-func (mmm *MustGatherMetric) processMetrics(in *readMetricInput) []*charts.Line {
+func (mmm *MustGatherChart) processMetrics(in *readMetricInput) []*charts.Line {
var lines []*charts.Line
idx := 0
@@ -293,7 +190,6 @@ func (mmm *MustGatherMetric) processMetrics(in *readMetricInput) []*charts.Line
}
// sort.Strings(allTimestamps)
-
// line.SetSeriesOptions(charts.WithLineChartOpts(
// opts.LineChart{Smooth: false, ShowSymbol: true, SymbolSize: 15, Symbol: "diamond"},
// ))
diff --git a/internal/openshift/mustgathermetrics/main.go b/internal/openshift/mustgathermetrics/main.go
index da0c2158..3a1d0269 100644
--- a/internal/openshift/mustgathermetrics/main.go
+++ b/internal/openshift/mustgathermetrics/main.go
@@ -9,27 +9,127 @@ import (
"strings"
"github.com/pkg/errors"
- "github.com/redhat-openshift-ecosystem/provider-certification-tool/internal/opct/chart"
log "github.com/sirupsen/logrus"
"github.com/ulikunitz/xz"
)
+type MustGatherChart struct {
+ Path string
+ OriginalQuery string
+ PlotLabel string
+ PlotTitle string
+ PlotSubTitle string
+ CollectorAvailable bool
+ MetricData *PrometheusResponse
+ DivId string
+}
+
+type MustGatherCharts map[string]*MustGatherChart
+
type MustGatherMetrics struct {
fileName string
data *bytes.Buffer
ReportPath string
ReportChartFile string
ServePath string
+ charts MustGatherCharts
+ page *ChartPagePlotly
}
func NewMustGatherMetrics(report, file, uri string, data *bytes.Buffer) (*MustGatherMetrics, error) {
- return &MustGatherMetrics{
+ mgm := &MustGatherMetrics{
fileName: filepath.Base(file),
data: data,
ReportPath: report,
- ReportChartFile: "/metrics.html",
ServePath: uri,
- }, nil
+ ReportChartFile: "/metrics.html",
+ }
+
+ mgm.charts = make(map[string]*MustGatherChart, 0)
+ mgm.charts["query_range-etcd-disk-fsync-db-duration-p99.json.gz"] = &MustGatherChart{
+ Path: "query_range-etcd-disk-fsync-db-duration-p99.json.gz",
+ OriginalQuery: "",
+ PlotLabel: "instance",
+ PlotTitle: "etcd fsync DB p99",
+ PlotSubTitle: "",
+ CollectorAvailable: true,
+ DivId: "id1",
+ }
+ mgm.charts["query_range-api-kas-request-duration-p99.json.gz"] = &MustGatherChart{
+ Path: "query_range-api-kas-request-duration-p99.json.gz",
+ OriginalQuery: "",
+ PlotLabel: "verb",
+ PlotTitle: "Kube API request p99",
+ PlotSubTitle: "",
+ CollectorAvailable: true,
+ DivId: "id2",
+ }
+ mgm.charts["query_range-etcd-disk-fsync-wal-duration-p99.json.gz"] = &MustGatherChart{
+ Path: "query_range-etcd-disk-fsync-wal-duration-p99.json.gz",
+ OriginalQuery: "",
+ PlotLabel: "instance",
+ PlotTitle: "etcd fsync WAL p99",
+ PlotSubTitle: "",
+ CollectorAvailable: true,
+ DivId: "id0",
+ }
+ mgm.charts["query_range-etcd-peer-round-trip-time.json.gz"] = &MustGatherChart{
+ Path: "query_range-etcd-peer-round-trip-time.json.gz",
+ OriginalQuery: "",
+ PlotLabel: "instance",
+ PlotTitle: "etcd peer round trip",
+ PlotSubTitle: "",
+ CollectorAvailable: true,
+ DivId: "id3",
+ }
+
+ mgm.charts["query_range-etcd-total-leader-elections-day.json.gz"] = &MustGatherChart{
+ Path: "query_range-etcd-total-leader-elections-day.json.gz",
+ OriginalQuery: "",
+ PlotLabel: "instance",
+ PlotTitle: "etcd peer total leader election",
+ PlotSubTitle: "",
+ CollectorAvailable: true,
+ DivId: "id4",
+ }
+ mgm.charts["query_range-etcd-request-duration-p99.json.gz"] = &MustGatherChart{
+ Path: "query_range-etcd-request-duration-p99.json.gz",
+ OriginalQuery: "",
+ PlotLabel: "operation",
+ PlotTitle: "etcd req duration p99",
+ PlotSubTitle: "",
+ CollectorAvailable: true,
+ DivId: "id5",
+ }
+ mgm.charts["query_range-cluster-storage-iops.json.gz"] = &MustGatherChart{
+ Path: "query_range-cluster-storage-iops.json.gz",
+ OriginalQuery: "",
+ PlotLabel: "namespace",
+ PlotTitle: "Cluster storage IOPS",
+ PlotSubTitle: "",
+ CollectorAvailable: false,
+ DivId: "id6",
+ }
+ mgm.charts["query_range-cluster-storage-throughput.json.gz"] = &MustGatherChart{
+ Path: "query_range-cluster-storage-throughput.json.gz",
+ OriginalQuery: "",
+ PlotLabel: "namespace",
+ PlotTitle: "Cluster storage throughput",
+ PlotSubTitle: "",
+ CollectorAvailable: false,
+ DivId: "id7",
+ }
+ mgm.charts["query_range-cluster-cpu-usage.json.gz"] = &MustGatherChart{
+ Path: "query_range-cluster-cpu-usage.json.gz",
+ OriginalQuery: "",
+ PlotLabel: "namespace",
+ PlotTitle: "Cluster CPU",
+ PlotSubTitle: "",
+ CollectorAvailable: false,
+ DivId: "id8",
+ }
+ mgm.page = newMetricsPageWithPlotly(report, uri, mgm.charts)
+ return mgm, nil
}
func (mg *MustGatherMetrics) Process() error {
@@ -58,11 +158,10 @@ func (mg *MustGatherMetrics) read(buf *bytes.Buffer) (*tar.Reader, error) {
func (mg *MustGatherMetrics) extract(tarball *tar.Reader) error {
keepReading := true
- metricsPage := chart.NewMetricsPage()
+ metricsPage := newMetricsPage()
reportPath := mg.ReportPath + mg.ReportChartFile
- page := chart.NewMetricsPageWithPlotly(mg.ReportPath, mg.ServePath)
- // Walk through files in tarball file.
+ // Walk through files in tarball.
for keepReading {
header, err := tarball.Next()
@@ -71,14 +170,14 @@ func (mg *MustGatherMetrics) extract(tarball *tar.Reader) error {
// no more files
case err == io.EOF:
- err := chart.SaveMetricsPageReport(metricsPage, reportPath)
+ err := SaveMetricsPageReport(metricsPage, reportPath)
if err != nil {
log.Errorf("error saving metrics to: %s\n", reportPath)
return err
}
// Ploty Page
log.Debugf("Generating Charts with Plotly\n")
- err = page.RenderPage()
+ err = mg.page.RenderPage()
if err != nil {
log.Errorf("error rendering page: %v\n", err)
return err
@@ -103,7 +202,7 @@ func (mg *MustGatherMetrics) extract(tarball *tar.Reader) error {
metricFileName := filepath.Base(header.Name)
- chart, ok := chart.ChartsAvailable[metricFileName]
+ chart, ok := mg.charts[metricFileName]
if !ok {
log.Debugf("Metrics/Extractor/Unsupported metric, ignoring metric data %s\n", header.Name)
continue
diff --git a/internal/opct/chart/plotly.go b/internal/openshift/mustgathermetrics/plotly.go
similarity index 97%
rename from internal/opct/chart/plotly.go
rename to internal/openshift/mustgathermetrics/plotly.go
index 31f71436..02dfa80c 100644
--- a/internal/opct/chart/plotly.go
+++ b/internal/openshift/mustgathermetrics/plotly.go
@@ -1,4 +1,4 @@
-package chart
+package mustgathermetrics
import (
"bytes"
@@ -16,7 +16,7 @@ import (
type ChartPagePlotly struct {
PageTitle string
- Charts map[string]*MustGatherMetric
+ Charts MustGatherCharts
RootPath string
UriPath string
}
@@ -52,11 +52,11 @@ async function updateCharts() {
}
}`
-func NewMetricsPageWithPlotly(path, uri string) *ChartPagePlotly {
+func newMetricsPageWithPlotly(path, uri string, charts MustGatherCharts) *ChartPagePlotly {
page := &ChartPagePlotly{
PageTitle: "OPCT Report Metrics",
- Charts: ChartsAvailable,
+ Charts: charts,
RootPath: path,
UriPath: uri,
}
diff --git a/internal/pkg/summary/consolidated.go b/internal/pkg/summary/consolidated.go
deleted file mode 100644
index 5dd93326..00000000
--- a/internal/pkg/summary/consolidated.go
+++ /dev/null
@@ -1,572 +0,0 @@
-package summary
-
-import (
- "bufio"
- "fmt"
- "os"
- "sort"
-
- log "github.com/sirupsen/logrus"
-
- "github.com/pkg/errors"
-
- "github.com/redhat-openshift-ecosystem/provider-certification-tool/internal/pkg/sippy"
- "github.com/xuri/excelize/v2"
-)
-
-// ConsolidatedSummary Aggregate the results of provider and baseline
-type ConsolidatedSummary struct {
- Provider *ResultSummary
- Baseline *ResultSummary
-}
-
-// Process entrypoint to read and fill all summaries for each archive, plugin and suites
-// applying any transformation it needs through filters.
-func (cs *ConsolidatedSummary) Process() error {
-
- // Load Result Summary from Archives
- if err := cs.Provider.Populate(); err != nil {
- fmt.Println("ERROR processing provider results...")
- return err
- }
-
- if err := cs.Baseline.Populate(); err != nil {
- fmt.Println("ERROR processing baseline results...")
- return err
- }
-
- // Filters
- if err := cs.applyFilterSuite(); err != nil {
- return err
- }
-
- if err := cs.applyFilterBaseline(); err != nil {
- return err
- }
-
- if err := cs.applyFilterFlaky(); err != nil {
- return err
- }
-
- return nil
-}
-
-func (cs *ConsolidatedSummary) GetProvider() *ResultSummary {
- return cs.Provider
-}
-
-func (cs *ConsolidatedSummary) GetBaseline() *ResultSummary {
- return cs.Baseline
-}
-
-// applyFilterSuite process the FailedList for each plugin, getting **intersection** tests
-// for respective suite.
-func (cs *ConsolidatedSummary) applyFilterSuite() error {
- err := cs.applyFilterSuiteForPlugin(PluginNameKubernetesConformance)
- if err != nil {
- return err
- }
-
- err = cs.applyFilterSuiteForPlugin(PluginNameOpenShiftConformance)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-// applyFilterSuiteForPlugin calculates the intersection of Provider Failed AND suite
-func (cs *ConsolidatedSummary) applyFilterSuiteForPlugin(plugin string) error {
-
- var resultsProvider *OPCTPluginSummary
- var pluginSuite *OpenshiftTestsSuite
-
- switch plugin {
- case PluginNameKubernetesConformance:
- resultsProvider = cs.GetProvider().GetOpenShift().GetResultK8SValidated()
- pluginSuite = cs.GetProvider().GetSuites().KubernetesConformance
- case PluginNameOpenShiftConformance:
- resultsProvider = cs.GetProvider().GetOpenShift().GetResultOCPValidated()
- pluginSuite = cs.GetProvider().GetSuites().OpenshiftConformance
- }
-
- e2eFailures := resultsProvider.FailedList
- e2eSuite := pluginSuite.Tests
- hashSuite := make(map[string]struct{}, len(e2eSuite))
-
- for _, v := range e2eSuite {
- hashSuite[v] = struct{}{}
- }
-
- for _, v := range e2eFailures {
- if _, ok := hashSuite[v]; ok {
- resultsProvider.FailedFilterSuite = append(resultsProvider.FailedFilterSuite, v)
- }
- }
- sort.Strings(resultsProvider.FailedFilterSuite)
- return nil
-}
-
-// applyFilterBaseline process the FailedFilterSuite for each plugin, **excluding** failures from
-// baseline test.
-func (cs *ConsolidatedSummary) applyFilterBaseline() error {
- err := cs.applyFilterBaselineForPlugin(PluginNameKubernetesConformance)
- if err != nil {
- return err
- }
-
- err = cs.applyFilterBaselineForPlugin(PluginNameOpenShiftConformance)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-// applyFilterBaselineForPlugin calculates the **exclusion** tests of
-// Provider Failed included on suite and Baseline failed tests.
-func (cs *ConsolidatedSummary) applyFilterBaselineForPlugin(plugin string) error {
-
- var providerSummary *OPCTPluginSummary
- var e2eFailuresBaseline []string
-
- switch plugin {
- case PluginNameKubernetesConformance:
- providerSummary = cs.GetProvider().GetOpenShift().GetResultK8SValidated()
- if cs.GetBaseline().HasValidResults() {
- e2eFailuresBaseline = cs.GetBaseline().GetOpenShift().GetResultK8SValidated().FailedList
- }
- case PluginNameOpenShiftConformance:
- providerSummary = cs.GetProvider().GetOpenShift().GetResultOCPValidated()
- if cs.GetBaseline().HasValidResults() {
- e2eFailuresBaseline = cs.GetBaseline().GetOpenShift().GetResultOCPValidated().FailedList
- }
- default:
- return errors.New("Suite not found to apply filter: Flaky")
- }
-
- e2eFailuresProvider := providerSummary.FailedFilterSuite
- hashBaseline := make(map[string]struct{}, len(e2eFailuresBaseline))
-
- for _, v := range e2eFailuresBaseline {
- hashBaseline[v] = struct{}{}
- }
-
- for _, v := range e2eFailuresProvider {
- if _, ok := hashBaseline[v]; !ok {
- providerSummary.FailedFilterBaseline = append(providerSummary.FailedFilterBaseline, v)
- }
- }
- sort.Strings(providerSummary.FailedFilterBaseline)
- return nil
-}
-
-// applyFilterFlaky process the FailedFilterSuite for each plugin, **excluding** failures from
-// baseline test.
-func (cs *ConsolidatedSummary) applyFilterFlaky() error {
- err := cs.applyFilterFlakyForPlugin(PluginNameKubernetesConformance)
- if err != nil {
- return err
- }
-
- err = cs.applyFilterFlakyForPlugin(PluginNameOpenShiftConformance)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-// applyFilterFlakyForPlugin query the Sippy API looking for each failed test
-// on each plugin/suite, saving the list on the ResultSummary.
-func (cs *ConsolidatedSummary) applyFilterFlakyForPlugin(plugin string) error {
-
- var ps *OPCTPluginSummary
-
- switch plugin {
- case PluginNameKubernetesConformance:
- ps = cs.GetProvider().GetOpenShift().GetResultK8SValidated()
- case PluginNameOpenShiftConformance:
- ps = cs.GetProvider().GetOpenShift().GetResultOCPValidated()
- default:
- return errors.New("Suite not found to apply filter: Flaky")
- }
-
- // TODO: define if we will check for flakes for all failures or only filtered
- // Query Flaky only the FilteredBaseline to avoid many external queries.
- api := sippy.NewSippyAPI()
- for _, name := range ps.FailedFilterBaseline {
-
- resp, err := api.QueryTests(&sippy.SippyTestsRequestInput{TestName: name})
- if err != nil {
- log.Errorf("#> Error querying to Sippy API: %v", err)
- continue
- }
- for _, r := range *resp {
- if _, ok := ps.FailedItems[name]; ok {
- ps.FailedItems[name].Flaky = &r
- } else {
- ps.FailedItems[name] = &PluginFailedItem{
- Name: name,
- Flaky: &r,
- }
- }
-
- // Remove all flakes, regardless the percentage.
- // TODO: Review checking flaky severity
- if ps.FailedItems[name].Flaky.CurrentFlakes == 0 {
- ps.FailedFilterFlaky = append(ps.FailedFilterFlaky, name)
- }
- }
- }
-
- sort.Strings(ps.FailedFilterFlaky)
- return nil
-}
-
-func (cs *ConsolidatedSummary) saveResultsPlugin(path, plugin string) error {
-
- var resultsProvider *OPCTPluginSummary
- var resultsBaseline *OPCTPluginSummary
- var suite *OpenshiftTestsSuite
- var prefix = "tests"
- bProcessed := cs.GetBaseline().HasValidResults()
-
- switch plugin {
- case PluginNameKubernetesConformance:
- resultsProvider = cs.GetProvider().GetOpenShift().GetResultK8SValidated()
- if bProcessed {
- resultsBaseline = cs.GetBaseline().GetOpenShift().GetResultK8SValidated()
- }
- suite = cs.GetProvider().GetSuites().KubernetesConformance
- case PluginNameOpenShiftConformance:
- resultsProvider = cs.GetProvider().GetOpenShift().GetResultOCPValidated()
- if bProcessed {
- resultsBaseline = cs.GetBaseline().GetOpenShift().GetResultOCPValidated()
- }
- suite = cs.GetProvider().GetSuites().OpenshiftConformance
- }
-
- // Save Provider failures
- filename := fmt.Sprintf("%s/%s_%s_provider_failures-1-ini.txt", path, prefix, plugin)
- if err := writeFileTestList(filename, resultsProvider.FailedList); err != nil {
- return err
- }
-
- // Save Provider failures with filter: Suite (only)
- filename = fmt.Sprintf("%s/%s_%s_provider_failures-2-filter1_suite.txt", path, prefix, plugin)
- if err := writeFileTestList(filename, resultsProvider.FailedFilterSuite); err != nil {
- return err
- }
-
- // Save Provider failures with filter: Baseline exclusion
- filename = fmt.Sprintf("%s/%s_%s_provider_failures-3-filter2_baseline.txt", path, prefix, plugin)
- if err := writeFileTestList(filename, resultsProvider.FailedFilterBaseline); err != nil {
- return err
- }
-
- // Save Provider failures with filter: Flaky
- filename = fmt.Sprintf("%s/%s_%s_provider_failures-4-filter3_without_flakes.txt", path, prefix, plugin)
- if err := writeFileTestList(filename, resultsProvider.FailedFilterFlaky); err != nil {
- return err
- }
-
- // Save the Providers failures for the latest filter to review (focus on this)
- filename = fmt.Sprintf("%s/%s_%s_provider_failures.txt", path, prefix, plugin)
- if err := writeFileTestList(filename, resultsProvider.FailedFilterBaseline); err != nil {
- return err
- }
-
- // Save baseline failures
- if bProcessed {
- filename = fmt.Sprintf("%s/%s_%s_baseline_failures.txt", path, prefix, plugin)
- if err := writeFileTestList(filename, resultsBaseline.FailedList); err != nil {
- return err
- }
- }
-
- // Save the openshift-tests suite use by this plugin:
- filename = fmt.Sprintf("%s/%s_%s_suite_full.txt", path, prefix, plugin)
- if err := writeFileTestList(filename, suite.Tests); err != nil {
- return err
- }
-
- return nil
-}
-
-func (cs *ConsolidatedSummary) extractFailuresDetailsByPlugin(path, plugin string) error {
-
- var resultsProvider *OPCTPluginSummary
- var resultsBaseline *OPCTPluginSummary
- bProcessed := cs.GetBaseline().HasValidResults()
- ignoreExistingDir := true
-
- switch plugin {
- case PluginNameKubernetesConformance:
- resultsProvider = cs.GetProvider().GetOpenShift().GetResultK8SValidated()
- if bProcessed {
- resultsBaseline = cs.GetBaseline().GetOpenShift().GetResultK8SValidated()
- }
- case PluginNameOpenShiftConformance:
- resultsProvider = cs.GetProvider().GetOpenShift().GetResultOCPValidated()
- if bProcessed {
- resultsBaseline = cs.GetBaseline().GetOpenShift().GetResultOCPValidated()
- }
- }
-
- currentDirectory := "failures-provider-filtered"
- subdir := fmt.Sprintf("%s/%s", path, currentDirectory)
- if err := createDir(subdir, ignoreExistingDir); err != nil {
- return err
- }
-
- subPrefix := fmt.Sprintf("%s/%s", subdir, plugin)
- errItems := resultsProvider.FailedItems
- errList := resultsProvider.FailedFilterBaseline
- if err := extractTestErrors(subPrefix, errItems, errList); err != nil {
- return err
- }
-
- currentDirectory = "failures-provider"
- subdir = fmt.Sprintf("%s/%s", path, currentDirectory)
- if err := createDir(subdir, ignoreExistingDir); err != nil {
- return err
- }
-
- subPrefix = fmt.Sprintf("%s/%s", subdir, plugin)
- errItems = resultsProvider.FailedItems
- errList = resultsProvider.FailedList
- if err := extractTestErrors(subPrefix, errItems, errList); err != nil {
- return err
- }
-
- currentDirectory = "failures-baseline"
- subdir = fmt.Sprintf("%s/%s", path, currentDirectory)
- if err := createDir(subdir, ignoreExistingDir); err != nil {
- return err
- }
-
- if bProcessed {
- subPrefix = fmt.Sprintf("%s/%s", subdir, plugin)
- errItems = resultsBaseline.FailedItems
- errList = resultsBaseline.FailedList
- if err := extractTestErrors(subPrefix, errItems, errList); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (cs *ConsolidatedSummary) saveFailuresIndexToSheet(path string) error {
-
- var rowN int64
- var errList []string
- bProcessed := cs.GetBaseline().HasValidResults()
- sheet := excelize.NewFile()
- sheetFile := fmt.Sprintf("%s/failures-index.xlsx", path)
- defer saveSheet(sheet, sheetFile)
-
- sheetName := "failures-provider-filtered"
- sh, err := sheet.NewSheet(sheetName)
- if err == nil {
- sheet.SetActiveSheet(sh)
- if err := createSheet(sheet, sheetName); err != nil {
- log.Error(err)
- } else {
- errList = cs.GetProvider().GetOpenShift().GetResultK8SValidated().FailedFilterBaseline
- rowN = 2
- populateSheet(sheet, sheetName, PluginNameKubernetesConformance, errList, &rowN)
-
- errList = cs.GetProvider().GetOpenShift().GetResultOCPValidated().FailedFilterBaseline
- populateSheet(sheet, sheetName, PluginNameOpenShiftConformance, errList, &rowN)
- }
- } else {
- log.Errorf("skipping spreadsheet %s creation due errors: %s", sheetName, err)
- }
-
- sheetName = "failures-provider"
- sh, err = sheet.NewSheet(sheetName)
- if err == nil {
- sheet.SetActiveSheet(sh)
- if err := createSheet(sheet, sheetName); err != nil {
- log.Error(err)
- } else {
- errList = cs.GetProvider().GetOpenShift().GetResultK8SValidated().FailedList
- rowN = 2
- populateSheet(sheet, sheetName, PluginNameKubernetesConformance, errList, &rowN)
-
- errList = cs.GetProvider().GetOpenShift().GetResultOCPValidated().FailedList
- populateSheet(sheet, sheetName, PluginNameOpenShiftConformance, errList, &rowN)
- }
- } else {
- log.Errorf("skipping spreadsheet %s creation due errors: %s", sheetName, err)
- }
-
- if bProcessed {
- sheetName = "failures-baseline"
- sh, err = sheet.NewSheet(sheetName)
- if err != nil {
- log.Errorf("skipping spreadsheet %s creation due errors: %s", sheetName, err)
- return nil
- }
- sheet.SetActiveSheet(sh)
- if err := createSheet(sheet, sheetName); err != nil {
- log.Error(err)
- } else {
- errList = cs.GetBaseline().GetOpenShift().GetResultK8SValidated().FailedList
- rowN = 2
- populateSheet(sheet, sheetName, PluginNameKubernetesConformance, errList, &rowN)
-
- errList = cs.GetBaseline().GetOpenShift().GetResultOCPValidated().FailedList
- populateSheet(sheet, sheetName, PluginNameOpenShiftConformance, errList, &rowN)
- }
- }
-
- return nil
-}
-
-// SaveResults dump all the results and processed to the disk to be used
-// on the review process.
-func (cs *ConsolidatedSummary) SaveResults(path string) error {
-
- if err := createDir(path, false); err != nil {
- return err
- }
-
- // Save the list of failures into individual files by Plugin
- if err := cs.saveResultsPlugin(path, PluginNameKubernetesConformance); err != nil {
- return err
- }
- if err := cs.saveResultsPlugin(path, PluginNameOpenShiftConformance); err != nil {
- return err
- }
-
- // Extract errors details to sub directories
- if err := cs.extractFailuresDetailsByPlugin(path, PluginNameKubernetesConformance); err != nil {
- return err
- }
- if err := cs.extractFailuresDetailsByPlugin(path, PluginNameOpenShiftConformance); err != nil {
- return err
- }
-
- // Save one Sheet file with Failures to be used on the review process
- if err := cs.saveFailuresIndexToSheet(path); err != nil {
- return err
- }
-
- fmt.Printf("\n Data Saved to directory '%s/'\n", path)
- return nil
-}
-
-// writeFileTestList saves the list of test names to a new text file
-func writeFileTestList(filename string, data []string) error {
- fd, err := os.OpenFile(filename, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
- if err != nil {
- log.Fatalf("failed creating file: %s", err)
- }
- defer fd.Close()
-
- writer := bufio.NewWriter(fd)
- defer writer.Flush()
-
- for _, line := range data {
- _, err = writer.WriteString(line + "\n")
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// extractTestErrors dumps the test error, summary and stdout, to be saved
-// to individual files.
-func extractTestErrors(prefix string, items map[string]*PluginFailedItem, failures []string) error {
- for idx, line := range failures {
- if _, ok := items[line]; ok {
- file := fmt.Sprintf("%s_%d-failure.txt", prefix, idx+1)
- err := writeErrorToFile(file, items[line].Failure)
- if err != nil {
- log.Errorf("Error writing Failure for test: %s\n", line)
- }
-
- file = fmt.Sprintf("%s_%d-systemOut.txt", prefix, idx+1)
- err = writeErrorToFile(file, items[line].SystemOut)
- if err != nil {
- log.Errorf("Error writing SystemOut for test: %s\n", line)
- }
- }
- }
- return nil
-}
-
-// writeErrorToFile save the entire buffer to individual file.
-func writeErrorToFile(file, data string) error {
- fd, err := os.OpenFile(file, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
- if err != nil {
- log.Fatalf("failed creating file: %s", err)
- }
- defer fd.Close()
-
- writer := bufio.NewWriter(fd)
- defer writer.Flush()
-
- _, err = writer.WriteString(data)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-// createDir checks if the directory exists, if not creates it, otherwise log and return error
-func createDir(path string, ignoreexisting bool) error {
- if _, err := os.Stat(path); !os.IsNotExist(err) {
- if ignoreexisting {
- return nil
- }
- log.Errorf("ERROR: Directory already exists [%s]: %v", path, err)
- return err
- }
-
- if err := os.Mkdir(path, os.ModePerm); err != nil {
- log.Errorf("ERROR: Unable to create directory [%s]: %v", path, err)
- return err
- }
- return nil
-}
-
-// createSheet creates the excel spreadsheet headers
-func createSheet(sheet *excelize.File, sheeName string) error {
- header := map[string]string{
- "A1": "Plugin", "B1": "Index", "C1": "Error_Directory",
- "D1": "Test_Name", "E1": "Notes_Review", "F1": "References"}
-
- // create header
- for k, v := range header {
- _ = sheet.SetCellValue(sheeName, k, v)
- }
-
- return nil
-}
-
-// populateGsheet fill each row per error item.
-func populateSheet(sheet *excelize.File, sheeName, suite string, list []string, rowN *int64) {
- for idx, v := range list {
- _ = sheet.SetCellValue(sheeName, fmt.Sprintf("A%d", *rowN), suite)
- _ = sheet.SetCellValue(sheeName, fmt.Sprintf("B%d", *rowN), idx+1)
- _ = sheet.SetCellValue(sheeName, fmt.Sprintf("C%d", *rowN), sheeName)
- _ = sheet.SetCellValue(sheeName, fmt.Sprintf("D%d", *rowN), v)
- _ = sheet.SetCellValue(sheeName, fmt.Sprintf("E%d", *rowN), "TODO Review")
- _ = sheet.SetCellValue(sheeName, fmt.Sprintf("F%d", *rowN), "")
- *(rowN) += 1
- }
-}
-
-// save the excel sheet to the disk.
-func saveSheet(sheet *excelize.File, sheetFileName string) {
- if err := sheet.SaveAs(sheetFileName); err != nil {
- log.Error(err)
- }
-}
diff --git a/internal/pkg/summary/opct.go b/internal/pkg/summary/opct.go
deleted file mode 100644
index edc45016..00000000
--- a/internal/pkg/summary/opct.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package summary
-
-import (
- "github.com/redhat-openshift-ecosystem/provider-certification-tool/internal/pkg/sippy"
-)
-
-const (
- PluginNameOpenShiftUpgrade = "05-openshift-cluster-upgrade"
- PluginNameKubernetesConformance = "10-openshift-kube-conformance"
- PluginNameOpenShiftConformance = "20-openshift-conformance-validated"
- PluginNameArtifactsCollector = "99-openshift-artifacts-collector"
-
- // Old Plugin names (prior v0.2). It's used to keep compatibility
- PluginOldNameKubernetesConformance = "openshift-kube-conformance"
- PluginOldNameOpenShiftConformance = "openshift-conformance-validated"
-)
-
-// OPCT
-type OPCTPluginSummary struct {
- Name string
- NameAlias string
- Status string
- Total int64
- Passed int64
- Failed int64
- Timeout int64
- Skipped int64
-
- // FailedItems is the map with details for each failure
- FailedItems map[string]*PluginFailedItem
- // FailedList is the list of tests failures on the original execution
- FailedList []string
- // FailedFilterSuite is the list of failures (A) included only in the original suite (B): A INTERSECTION B
- FailedFilterSuite []string
- // FailedFilterBaseline is the list of failures (A) excluding the baseline(B): A EXCLUDE B
- FailedFilterBaseline []string
- // FailedFilteFlaky is the list of failures with no Flakes on OpenShift CI
- FailedFilterFlaky []string
-}
-
-type PluginFailedItem struct {
- // Name is the name of the e2e test
- Name string
- // Failure contains the failure reason extracted from JUnit field 'item.detials.failure'
- Failure string
- // SystemOut contains the entire test stdout extracted from JUnit field 'item.detials.system-out'
- SystemOut string
- // Offset is the offset of failure from the plugin result file
- Offset int
- // Flaky contains the flaky information from OpenShift CI - scraped from Sippy API
- Flaky *sippy.SippyTestsResponse
-}
diff --git a/internal/pkg/summary/openshift.go b/internal/pkg/summary/openshift.go
deleted file mode 100644
index 29bdf071..00000000
--- a/internal/pkg/summary/openshift.go
+++ /dev/null
@@ -1,138 +0,0 @@
-package summary
-
-import (
- "fmt"
-
- configv1 "github.com/openshift/api/config/v1"
- "github.com/pkg/errors"
-)
-
-type OpenShiftSummary struct {
- Infrastructure *configv1.Infrastructure
- ClusterVersion *configv1.ClusterVersion
- ClusterOperators *configv1.ClusterOperatorList
-
- // Plugin Results
- PluginResultK8sConformance *OPCTPluginSummary
- PluginResultOCPValidated *OPCTPluginSummary
-
- // get from Sonobuoy metadata
- VersionK8S string
-}
-
-type SummaryClusterVersionOutput struct {
- DesiredVersion string
- Progressing string
- ProgressingMessage string
-}
-
-type SummaryClusterOperatorOutput struct {
- CountAvailable uint64
- CountProgressing uint64
- CountDegraded uint64
-}
-
-type SummaryOpenShiftInfrastructureV1 = configv1.Infrastructure
-
-func NewOpenShiftSummary() *OpenShiftSummary {
- return &OpenShiftSummary{}
-}
-
-func (os *OpenShiftSummary) SetInfrastructure(cr *configv1.InfrastructureList) error {
- if len(cr.Items) == 0 {
- return errors.New("Unable to find result Items to set Infrastructures")
- }
- os.Infrastructure = &cr.Items[0]
- return nil
-}
-
-func (os *OpenShiftSummary) GetInfrastructure() (*SummaryOpenShiftInfrastructureV1, error) {
- if os.Infrastructure == nil {
- return &SummaryOpenShiftInfrastructureV1{}, nil
- }
- return os.Infrastructure, nil
-}
-
-func (os *OpenShiftSummary) SetClusterVersion(cr *configv1.ClusterVersionList) error {
- if len(cr.Items) == 0 {
- return errors.New("Unable to find result Items to set Infrastructures")
- }
- os.ClusterVersion = &cr.Items[0]
- return nil
-}
-
-func (os *OpenShiftSummary) GetClusterVersion() (*SummaryClusterVersionOutput, error) {
- if os.ClusterVersion == nil {
- return &SummaryClusterVersionOutput{}, nil
- }
- resp := SummaryClusterVersionOutput{
- DesiredVersion: os.ClusterVersion.Status.Desired.Version,
- }
- for _, condition := range os.ClusterVersion.Status.Conditions {
- if condition.Type == configv1.OperatorProgressing {
- resp.Progressing = string(condition.Status)
- resp.ProgressingMessage = condition.Message
- }
- }
- return &resp, nil
-}
-
-func (os *OpenShiftSummary) SetClusterOperators(cr *configv1.ClusterOperatorList) error {
- if len(cr.Items) == 0 {
- return errors.New("Unable to find result Items to set ClusterOperators")
- }
- os.ClusterOperators = cr
- return nil
-}
-
-func (os *OpenShiftSummary) GetClusterOperator() (*SummaryClusterOperatorOutput, error) {
- out := SummaryClusterOperatorOutput{}
- for _, co := range os.ClusterOperators.Items {
- for _, condition := range co.Status.Conditions {
- switch condition.Type {
- case configv1.OperatorAvailable:
- if condition.Status == configv1.ConditionTrue {
- out.CountAvailable += 1
- }
- case configv1.OperatorProgressing:
- if condition.Status == configv1.ConditionTrue {
- out.CountProgressing += 1
- }
- case configv1.OperatorDegraded:
- if condition.Status == configv1.ConditionTrue {
- out.CountDegraded += 1
- }
- }
- }
- }
- return &out, nil
-}
-
-func (os *OpenShiftSummary) SetPluginResult(in *OPCTPluginSummary) error {
- switch in.Name {
- case PluginNameKubernetesConformance:
- os.PluginResultK8sConformance = in
- case PluginOldNameKubernetesConformance:
- in.NameAlias = in.Name
- in.Name = PluginNameKubernetesConformance
- os.PluginResultK8sConformance = in
-
- case PluginNameOpenShiftConformance:
- os.PluginResultOCPValidated = in
- case PluginOldNameOpenShiftConformance:
- in.NameAlias = in.Name
- in.Name = PluginOldNameOpenShiftConformance
- os.PluginResultOCPValidated = in
- default:
- return fmt.Errorf("unable to Set Plugin results: Plugin not found: %s", in.Name)
- }
- return nil
-}
-
-func (os *OpenShiftSummary) GetResultOCPValidated() *OPCTPluginSummary {
- return os.PluginResultOCPValidated
-}
-
-func (os *OpenShiftSummary) GetResultK8SValidated() *OPCTPluginSummary {
- return os.PluginResultK8sConformance
-}
diff --git a/internal/pkg/summary/result.go b/internal/pkg/summary/result.go
deleted file mode 100644
index 2cd04a8f..00000000
--- a/internal/pkg/summary/result.go
+++ /dev/null
@@ -1,331 +0,0 @@
-package summary
-
-import (
- "bytes"
- "compress/gzip"
- "fmt"
- "os"
-
- "github.com/pkg/errors"
- log "github.com/sirupsen/logrus"
-
- configv1 "github.com/openshift/api/config/v1"
- "github.com/vmware-tanzu/sonobuoy/pkg/client/results"
- "github.com/vmware-tanzu/sonobuoy/pkg/discovery"
-)
-
-const (
- ResultSourceNameProvider = "provider"
- ResultSourceNameBaseline = "baseline"
-
- // OpenShift Custom Resources locations on archive file
- pathResourceInfrastructures = "resources/cluster/config.openshift.io_v1_infrastructures.json"
- pathResourceClusterVersions = "resources/cluster/config.openshift.io_v1_clusterversions.json"
- pathResourceClusterOperators = "resources/cluster/config.openshift.io_v1_clusteroperators.json"
- pathPluginArtifactTestsK8S = "plugins/99-openshift-artifacts-collector/results/global/artifacts_e2e-tests_kubernetes-conformance.txt"
- pathPluginArtifactTestsOCP = "plugins/99-openshift-artifacts-collector/results/global/artifacts_e2e-tests_openshift-conformance.txt"
- // TODO: the following file is used to keep compatibility with versions older than v0.3
- pathPluginArtifactTestsOCP2 = "plugins/99-openshift-artifacts-collector/results/global/artifacts_e2e-openshift-conformance.txt"
-)
-
-// ResultSummary persists the reference of resulta archive
-type ResultSummary struct {
- Name string
- Archive string
- Sonobuoy *SonobuoySummary
- OpenShift *OpenShiftSummary
- Suites *OpenshiftTestsSuites
- reader *results.Reader
-}
-
-// HasValidResults checks if the result instance has valid archive to be processed,
-// returning true if it's valid.
-// Invalid results happens when the baseline archive was not set on the CLI arguments,
-// making the 'process' command to ignore the comparisons and filters related.
-func (rs *ResultSummary) HasValidResults() bool {
- if rs.Archive == "" && rs.Name == ResultSourceNameBaseline {
- return false
- }
- return true
-}
-
-// Populate open the archive and process the files to populate the summary structures.
-func (rs *ResultSummary) Populate() error {
-
- if !rs.HasValidResults() {
- log.Warnf("Ignoring to populate source '%s'. Missing or invalid baseline artifact (-b): %s", rs.Name, rs.Archive)
- return nil
- }
-
- cleanup, err := rs.openReader()
- defer cleanup()
- if err != nil {
- return errors.Wrapf(err, "unable to open reader for file '%s'", rs.Archive)
- }
-
- // Report on all plugins or the specified one.
- plugins, err := rs.getPluginList()
- if err != nil {
- return errors.Wrapf(err, "unable to determine plugins to report on")
- }
- if len(plugins) == 0 {
- return fmt.Errorf("no plugins specified by either the --plugin flag or tarball metadata")
- }
-
- var lastErr error
- for _, plugin := range plugins {
- log.Infof("Processing Plugin %s...\n", plugin)
- switch plugin {
- case PluginNameOpenShiftUpgrade, PluginNameArtifactsCollector:
- log.Infof("Ignoring Plugin %s", plugin)
- continue
- }
- err := rs.processPlugin(plugin)
- if err != nil {
- lastErr = err
- }
- }
-
- // TODO: review the fd usage for tarbal and file
- cleanup, err = rs.openReader()
- defer cleanup()
- if err != nil {
- return err
- }
-
- err = rs.populateSummary()
- if err != nil {
- lastErr = err
- }
-
- return lastErr
-}
-
-// GetOpenShift returns the OpenShift objects parsed from results
-func (rs *ResultSummary) GetOpenShift() *OpenShiftSummary {
- if !rs.HasValidResults() {
- return &OpenShiftSummary{}
- }
- return rs.OpenShift
-}
-
-// GetSonobuoy returns the Sonobuoy objects parsed from results
-func (rs *ResultSummary) GetSonobuoy() *SonobuoySummary {
- if !rs.HasValidResults() {
- return &SonobuoySummary{}
- }
- return rs.Sonobuoy
-}
-
-// GetSonobuoyCluster returns the SonobuoyCluster object parsed from results
-func (rs *ResultSummary) GetSonobuoyCluster() *discovery.ClusterSummary {
- if !rs.HasValidResults() {
- return &discovery.ClusterSummary{}
- }
- return rs.Sonobuoy.Cluster
-}
-
-// GetSuites returns the Conformance suites collected from results
-func (rs *ResultSummary) GetSuites() *OpenshiftTestsSuites {
- return rs.Suites
-}
-
-// getPluginList extract the plugin list from the archive reader.
-func (rs *ResultSummary) getPluginList() ([]string, error) {
- runInfo := discovery.RunInfo{}
- err := rs.reader.WalkFiles(func(path string, info os.FileInfo, err error) error {
- return results.ExtractFileIntoStruct(rs.reader.RunInfoFile(), path, info, &runInfo)
- })
-
- return runInfo.LoadedPlugins, errors.Wrap(err, "finding plugin list")
-}
-
-// openReader returns a *results.Reader along with a cleanup function to close the
-// underlying readers. The cleanup function is guaranteed to never be nil.
-func (rs *ResultSummary) openReader() (func(), error) {
-
- filepath := rs.Archive
- fi, err := os.Stat(filepath)
- if err != nil {
- rs.reader = nil
- return func() {}, err
- }
- // When results is a directory
- if fi.IsDir() {
- rs.reader = results.NewReaderFromDir(filepath)
- return func() {}, nil
- }
- f, err := os.Open(filepath)
- if err != nil {
- rs.reader = nil
- return func() {}, errors.Wrapf(err, "could not open sonobuoy archive: %v", filepath)
- }
-
- gzr, err := gzip.NewReader(f)
- if err != nil {
- rs.reader = nil
- return func() { f.Close() }, errors.Wrap(err, "could not make a gzip reader")
- }
-
- rs.reader = results.NewReaderWithVersion(gzr, results.VersionTen)
- return func() { gzr.Close(); f.Close() }, nil
-}
-
-// processPlugin receives the plugin name and load the result file to be processed.
-func (rs *ResultSummary) processPlugin(plugin string) error {
-
- // TODO: review the fd usage for tarbal and file
- cleanup, err := rs.openReader()
- defer cleanup()
- if err != nil {
- return err
- }
-
- obj, err := rs.reader.PluginResultsItem(plugin)
- if err != nil {
- return err
- }
-
- err = rs.processPluginResult(obj)
- if err != nil {
- return err
- }
- return nil
-}
-
-// processPluginResult receives the plugin results object and parse it to the summary.
-func (rs *ResultSummary) processPluginResult(obj *results.Item) error {
- statusCounts := map[string]int{}
- var failures []results.Item
- var failedList []string
-
- statusCounts, failures = walkForSummary(obj, statusCounts, failures)
-
- total := 0
- for _, v := range statusCounts {
- total += v
- }
-
- failedItems := make(map[string]*PluginFailedItem, len(failures))
- for _, item := range failures {
- failedItems[item.Name] = &PluginFailedItem{
- Name: item.Name,
- }
- if _, ok := item.Details["failure"]; ok {
- failedItems[item.Name].Failure = item.Details["failure"].(string)
- }
- if _, ok := item.Details["system-out"]; ok {
- failedItems[item.Name].SystemOut = item.Details["system-out"].(string)
- }
- if _, ok := item.Details["offset"]; ok {
- failedItems[item.Name].Offset = item.Details["offset"].(int)
- }
- failedList = append(failedList, item.Name)
- }
-
- if err := rs.GetOpenShift().SetPluginResult(&OPCTPluginSummary{
- Name: obj.Name,
- Status: obj.Status,
- Total: int64(total),
- Passed: int64(statusCounts[results.StatusPassed]),
- Failed: int64(statusCounts[results.StatusFailed] + statusCounts[results.StatusTimeout]),
- Timeout: int64(statusCounts[results.StatusTimeout]),
- Skipped: int64(statusCounts[results.StatusSkipped]),
- FailedList: failedList,
- FailedItems: failedItems,
- }); err != nil {
- return err
- }
-
- delete(statusCounts, results.StatusPassed)
- delete(statusCounts, results.StatusFailed)
- delete(statusCounts, results.StatusTimeout)
- delete(statusCounts, results.StatusSkipped)
-
- return nil
-}
-
-// populateSummary load all files from archive reader and extract desired
-// information to the ResultSummary.
-func (rs *ResultSummary) populateSummary() error {
-
- var bugSuiteK8S bytes.Buffer
- var bugSuiteOCP bytes.Buffer
- sbCluster := discovery.ClusterSummary{}
- ocpInfra := configv1.InfrastructureList{}
- ocpCV := configv1.ClusterVersionList{}
- ocpCO := configv1.ClusterOperatorList{}
-
- // Iterate over the archive to get the items as an object to build the Summary report.
- err := rs.reader.WalkFiles(func(path string, info os.FileInfo, e error) error {
- if err := results.ExtractFileIntoStruct(results.ClusterHealthFilePath(), path, info, &sbCluster); err != nil {
- return errors.Wrap(err, fmt.Sprintf("extracting file '%s': %v", path, err))
- }
- if err := results.ExtractFileIntoStruct(pathResourceInfrastructures, path, info, &ocpInfra); err != nil {
- return errors.Wrap(err, fmt.Sprintf("extracting file '%s': %v", path, err))
- }
- if err := results.ExtractFileIntoStruct(pathResourceClusterVersions, path, info, &ocpCV); err != nil {
- return errors.Wrap(err, fmt.Sprintf("extracting file '%s': %v", path, err))
- }
- if err := results.ExtractFileIntoStruct(pathResourceClusterOperators, path, info, &ocpCO); err != nil {
- return errors.Wrap(err, fmt.Sprintf("extracting file '%s': %v", path, err))
- }
- if warn := results.ExtractBytes(pathPluginArtifactTestsK8S, path, info, &bugSuiteK8S); warn != nil {
- log.Warnf("Unable to load file %s: %v\n", pathPluginArtifactTestsK8S, warn)
- return errors.Wrap(warn, fmt.Sprintf("extracting file '%s': %v", path, warn))
- }
- if warn := results.ExtractBytes(pathPluginArtifactTestsOCP, path, info, &bugSuiteOCP); warn != nil {
- log.Warnf("Unable to load file %s: %v\n", pathPluginArtifactTestsOCP, warn)
- return errors.Wrap(warn, fmt.Sprintf("extracting file '%s': %v", path, warn))
- }
- if warn := results.ExtractBytes(pathPluginArtifactTestsOCP2, path, info, &bugSuiteOCP); warn != nil {
- log.Warnf("Unable to load file %s: %v\n", pathPluginArtifactTestsOCP2, warn)
- return errors.Wrap(warn, fmt.Sprintf("extracting file '%s': %v", path, warn))
- }
- return e
- })
- if err != nil {
- return err
- }
-
- if err := rs.GetSonobuoy().SetCluster(&sbCluster); err != nil {
- return err
- }
- if err := rs.GetOpenShift().SetInfrastructure(&ocpInfra); err != nil {
- return err
- }
- if err := rs.GetOpenShift().SetClusterVersion(&ocpCV); err != nil {
- return err
- }
- if err := rs.GetOpenShift().SetClusterOperators(&ocpCO); err != nil {
- return err
- }
- if err := rs.Suites.KubernetesConformance.Load(pathPluginArtifactTestsK8S, &bugSuiteK8S); err != nil {
- return err
- }
- if err := rs.Suites.OpenshiftConformance.Load(pathPluginArtifactTestsOCP, &bugSuiteOCP); err != nil {
- return err
- }
-
- return nil
-}
-
-// walkForSummary recursively walk through the result YAML file extracting the counters
-// and failures.
-func walkForSummary(result *results.Item, statusCounts map[string]int, failList []results.Item) (map[string]int, []results.Item) {
- if len(result.Items) > 0 {
- for _, item := range result.Items {
- statusCounts, failList = walkForSummary(&item, statusCounts, failList)
- }
- return statusCounts, failList
- }
-
- statusCounts[result.Status]++
-
- if result.Status == results.StatusFailed || result.Status == results.StatusTimeout {
- result.Details["offset"] = statusCounts[result.Status]
- failList = append(failList, *result)
- }
-
- return statusCounts, failList
-}
diff --git a/internal/pkg/summary/sonobuoy.go b/internal/pkg/summary/sonobuoy.go
deleted file mode 100644
index 669c61d9..00000000
--- a/internal/pkg/summary/sonobuoy.go
+++ /dev/null
@@ -1,14 +0,0 @@
-package summary
-
-import (
- "github.com/vmware-tanzu/sonobuoy/pkg/discovery"
-)
-
-type SonobuoySummary struct {
- Cluster *discovery.ClusterSummary
-}
-
-func (s *SonobuoySummary) SetCluster(c *discovery.ClusterSummary) error {
- s.Cluster = c
- return nil
-}
diff --git a/internal/report/baseline/aws.go b/internal/report/baseline/aws.go
new file mode 100644
index 00000000..b49607a1
--- /dev/null
+++ b/internal/report/baseline/aws.go
@@ -0,0 +1,56 @@
+package baseline
+
+import (
+ "fmt"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/service/cloudfront"
+ "github.com/aws/aws-sdk-go/service/s3"
+ "github.com/aws/aws-sdk-go/service/s3/s3manager"
+)
+
+// createS3Client creates an S3 client with the specified region
+func createS3Client(region string) (*s3.S3, *s3manager.Uploader, error) {
+ sess, err := session.NewSession(&aws.Config{
+ Region: aws.String(region),
+ })
+ if err != nil {
+ return nil, nil, err
+ }
+
+ svc := s3.New(sess)
+
+ // upload managers https://docs.aws.amazon.com/sdk-for-go/api/service/s3/
+ // Create an uploader with the session and default options
+ uploader := s3manager.NewUploader(sess)
+
+ return svc, uploader, nil
+}
+
+// createCloudFrontClient creates an S3 client with the specified region
+func createCloudFrontClient(region string) (*cloudfront.CloudFront, error) {
+ sess, err := session.NewSessionWithOptions(session.Options{
+ Profile: "opct",
+ Config: aws.Config{
+ Region: aws.String(region),
+ },
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ svc := cloudfront.New(sess)
+ return svc, nil
+}
+
+// checkBucketExists checks if the bucket exists in the S3 storage.
+func checkBucketExists(svc *s3.S3, bucket string) (bool, error) {
+ _, err := svc.HeadBucket(&s3.HeadBucketInput{
+ Bucket: aws.String(bucket),
+ })
+ if err != nil {
+ return false, fmt.Errorf("failed to check if bucket exists: %v", err)
+ }
+ return true, nil
+}
diff --git a/internal/report/baseline/baseline.go b/internal/report/baseline/baseline.go
new file mode 100644
index 00000000..168b5dad
--- /dev/null
+++ b/internal/report/baseline/baseline.go
@@ -0,0 +1,216 @@
+// Package baseline holds the baseline report summary data and the functions to
+// interact with the results service, backed by CloudFront and S3 storage bucket,
+// serving summarized results from CI.
+// "Baseline" results are valid/accepted CI executions. The results are processed
+// and consumed by OPCT CLI 'report' command to compare the results of the validation
+// tests. Those are CI results from reference installations which are used to compare
+// the results from custom executions targeting to inference persistent failures,
+// helping to isolate:
+// - Flaky tests
+// - Permanent failures
+// - Test environment issues
+package baseline
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+
+ "github.com/aws/aws-sdk-go/service/s3"
+ "github.com/aws/aws-sdk-go/service/s3/s3manager"
+ "github.com/hashicorp/go-retryablehttp"
+ log "github.com/sirupsen/logrus"
+)
+
+const (
+ bucketNameBaselineReportSummary = "opct-archive"
+ indexObjectKey = "api/v0/result/summary/index.json"
+ objectPathBaselineReportSummaryPath = "/result/summary/index.json"
+
+ // Path to S3 Object /api/v0/result/summary/{ocpVersion}/{platformType}
+ // The S3 is served by S3, which will reduce the costs to access S3, and can be
+ // proxies/redirected to other backends without replacing the URL.
+ // The original bucket[1], must be migrated to another account and the CloudFront URL,
+ // is part of that goal without disrupting the current process.
+ // [1] "https://openshift-provider-certification.s3.us-west-2.amazonaws.com"
+ reportBaseURL = "https://d23912a6309zf7.cloudfront.net"
+ cloudfrontDistributionID = "E3MJR7MT6EHHJC"
+
+ // To override those values use environment variables OPCT_EXP_BUCKET_NAME and OPCT_EXP_BUCKET_REGION
+ opctStorageBucketName = "opct-archive"
+ opctStorageBucketRegion = "us-east-1"
+)
+
+// BaselineReport is the struct that holds the baseline report data
+// pre-processed and saved in the bucket.
+type BaselineConfig struct {
+ bucketName string
+ bucketRegion string
+ cloudfrontDistributionID string
+
+ buffer *BaselineData
+}
+
+// NewBaselineReportSummary creates a new BaselineConfig struct with the default
+// configuration allowing customization to chage the S3 storage used in the management
+// tasks.
+// TODO deprecate the environment variables when backend is fully migrated to dedicated
+// AWS account.
+func NewBaselineReportSummary() *BaselineConfig {
+ bucketName := opctStorageBucketName
+ bucketRegion := opctStorageBucketRegion
+ if os.Getenv("OPCT_EXP_BUCKET_NAME") != "" {
+ log.Warnf("using custom bucket name: %s", os.Getenv("OPCT_EXP_BUCKET_NAME"))
+ bucketName = os.Getenv("OPCT_EXP_BUCKET_NAME")
+ }
+ if os.Getenv("OPCT_EXP_BUCKET_REGION") != "" {
+ log.Warnf("using custom bucket region: %s", os.Getenv("OPCT_EXP_BUCKET_REGION"))
+ bucketRegion = os.Getenv("OPCT_EXP_BUCKET_REGION")
+ }
+ return &BaselineConfig{
+ bucketName: bucketName,
+ bucketRegion: bucketRegion,
+ cloudfrontDistributionID: cloudfrontDistributionID,
+ }
+}
+
+// createS3Clients creates the S3 client and uploader to interact with the S3 storage, checking if
+// bucket exists.
+func (brs *BaselineConfig) createS3Clients() (*s3.S3, *s3manager.Uploader, error) {
+ if !brs.checkRequiredParams() {
+ return nil, nil, fmt.Errorf("missing required parameters or dependencies to enable this feature")
+ }
+
+ // create s3 client
+ svcS3, uploader, err := createS3Client(brs.bucketRegion)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // Check if the bucket exists
+ bucketExists, err := checkBucketExists(svcS3, brs.bucketName)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if !bucketExists {
+ return nil, nil, fmt.Errorf("the OPCT storage does not exists")
+ }
+
+ return svcS3, uploader, nil
+}
+
+// ReadReportSummaryIndexFromAPI reads the summary report index from the OPCT report URL.
+func (brs *BaselineConfig) ReadReportSummaryIndexFromAPI() (*baselineIndex, error) {
+ resp, err := brs.ReadReportSummaryFromAPI(objectPathBaselineReportSummaryPath)
+ if err != nil {
+ log.WithError(err).Error("error reading baseline report summary from API")
+ return nil, err
+ }
+ index := &baselineIndex{}
+ err = json.Unmarshal(resp, index)
+ if err != nil {
+ log.WithError(err).Error("error unmarshalling baseline report summary")
+ return nil, err
+ }
+ return index, nil
+}
+
+// ReadReportSummaryFromAPI reads the summary report from the external URL.
+func (brs *BaselineConfig) ReadReportSummaryFromAPI(path string) ([]byte, error) {
+ retryClient := retryablehttp.NewClient()
+ retryClient.RetryMax = 5
+ retryLogger := log.New()
+ retryLogger.SetLevel(log.WarnLevel)
+ retryClient.Logger = retryLogger
+
+ url := fmt.Sprintf("%s%s", reportBaseURL, path)
+ req, err := http.NewRequest("GET", url, nil)
+ if err != nil {
+ return nil, fmt.Errorf("error creating request: %v", err)
+ }
+ req.Header.Set("X-Custom-Header", "opct")
+ req.Header.Set("Content-Type", "application/json")
+
+ client := retryClient.StandardClient()
+ resp, err := client.Do(req)
+ if err != nil {
+ return nil, fmt.Errorf("error sending request: %v", err)
+ }
+ defer resp.Body.Close()
+
+ log.Debug("Summary Report API response code: ", resp.Status)
+ if resp.StatusCode < 200 || resp.StatusCode >= 300 {
+ return nil, fmt.Errorf("error baseline API request: %s", resp.Status)
+ }
+
+ rawResp, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return nil, fmt.Errorf("error reading response body: %v", err)
+ }
+
+ return rawResp, nil
+}
+
+// GetLatestRawSummaryFromPlatformWithFallback reads the latest summary report from the OPCT report
+// service, trying to get the latest summary from the specified platform, and fallback to "None",
+// and "AWS", when available.
+func (brs *BaselineConfig) GetLatestRawSummaryFromPlatformWithFallback(ocpRelease, platformType string) error {
+ errCount := 0
+ evaluatePaths := []string{
+ fmt.Sprintf("/result/summary/%s_%s_latest.json", ocpRelease, platformType),
+ fmt.Sprintf("/result/summary/%s_%s_latest.json", ocpRelease, "None"),
+ fmt.Sprintf("/result/summary/%s_%s_latest.json", ocpRelease, "AWS"),
+ }
+ for _, path := range evaluatePaths {
+ // do not tolerate many errors
+ if errCount > (len(evaluatePaths) * 2) {
+ log.Errorf("Too many errors, stopping the process")
+ break
+ }
+ body, err := brs.ReadReportSummaryFromAPI(path)
+ if err != nil {
+ log.WithError(err).Error("error reading baseline report summary from API")
+ errCount++
+ continue
+ }
+ brs.buffer = &BaselineData{}
+ brs.buffer.SetRawData(body)
+ return nil
+ }
+ return nil
+}
+
+// GetLatestSummaryByPlatform reads the latest summary report from the OPCT report service, trying to
+// retrieve from release and platform.
+// ocpRelease is the OpenShift major version, like "4.7", "4.8", etc.
+func (brs *BaselineConfig) GetLatestSummaryByPlatform(ocpRelease, platformType string) error {
+ path := fmt.Sprintf("/result/summary/%s_%s_latest.json", ocpRelease, platformType)
+ buf, err := brs.ReadReportSummaryFromAPI(path)
+ if err != nil {
+ return fmt.Errorf("unable to get latest summary by platform: %w", err)
+ }
+ brs.buffer = &BaselineData{}
+ brs.buffer.SetRawData(buf)
+ return nil
+}
+
+func (brs *BaselineConfig) GetSummaryByName(name string) ([]byte, error) {
+ return brs.ReadReportSummaryFromAPI(fmt.Sprintf("/result/summary/%s.json", name))
+}
+
+// checkRequiredParams checks if the required env to enable feature is set, then
+// set the default storage names for experimental feature.
+func (brs *BaselineConfig) checkRequiredParams() bool {
+ log.Debugf("OPCT_ENABLE_ADM_BASELINE=%s", os.Getenv("OPCT_ENABLE_ADM_BASELINE"))
+ return os.Getenv("OPCT_ENABLE_ADM_BASELINE") == "1"
+}
+
+func (brs *BaselineConfig) GetBuffer() *BaselineData {
+ if brs.buffer == nil {
+ return nil
+ }
+ return brs.buffer
+}
diff --git a/internal/report/baseline/data.go b/internal/report/baseline/data.go
new file mode 100644
index 00000000..1d04d85d
--- /dev/null
+++ b/internal/report/baseline/data.go
@@ -0,0 +1,74 @@
+package baseline
+
+import (
+ "encoding/json"
+ "fmt"
+
+ log "github.com/sirupsen/logrus"
+)
+
+// BaselineData is the struct that holds the baseline data. This struct exists
+// to parse the ReportSummary retrieved from S3. The data is the same structure
+// as the internal/report/data.go.ReportData, although it isn't possible to unmarshall
+// while the cyclic dependencies isn't resolved between packages:
+// - internal/report
+// - internal/opct/summary
+type BaselineData struct {
+ raw []byte
+}
+
+func (bd *BaselineData) SetRawData(data []byte) {
+ bd.raw = data
+}
+
+func (bd *BaselineData) GetRawData() []byte {
+ return bd.raw
+}
+
+// GetPriorityFailuresFromPlugin returns the priority failures from a specific plugin.
+// The priority failures are the failures that are marked as priority in the baseline
+// report. It should be a temporary function while marshaling the data from the AP
+// isn't possible.
+func (bd *BaselineData) GetPriorityFailuresFromPlugin(pluginName string) ([]string, error) {
+ failureStr := []string{}
+ var baselineData map[string]interface{}
+ err := json.Unmarshal(bd.raw, &baselineData)
+ if err != nil {
+ return nil, fmt.Errorf("failed to unmarshal baseline data: %w", err)
+ }
+
+ // cast the data extracting the plugin failures.
+ for p := range baselineData["provider"].(map[string]interface{})["plugins"].(map[string]interface{}) {
+ pluginBaseline := baselineData["provider"].(map[string]interface{})["plugins"].(map[string]interface{})[p]
+ pluginID := pluginBaseline.(map[string]interface{})["id"]
+ if pluginID != pluginName {
+ continue
+ }
+ if _, ok := pluginBaseline.(map[string]interface{})["failedFiltered"]; !ok {
+ log.Debugf("BaselineData/GetPriorityFailuresFromPlugin() plugin %q does not have filtered failures, skipping...", pluginName)
+ return failureStr, nil
+ }
+ if pluginBaseline.(map[string]interface{})["failedFiltered"] == nil {
+ log.Debugf("BaselineData/GetPriorityFailuresFromPlugin() plugin %q does not have filtered failures, skipping...", pluginName)
+ return failureStr, nil
+ }
+ for _, f := range pluginBaseline.(map[string]interface{})["failedFiltered"].([]interface{}) {
+ failureStr = append(failureStr, f.(map[string]interface{})["name"].(string))
+ }
+ }
+ return failureStr, nil
+}
+
+func (bd *BaselineData) GetSetupTags() (map[string]interface{}, error) {
+ var tags map[string]interface{}
+ var obj map[string]interface{}
+ err := json.Unmarshal(bd.raw, &obj)
+ if err != nil {
+ return nil, fmt.Errorf("failed to unmarshal baseline data: %w", err)
+ }
+ fmt.Println(obj["setup"].(map[string]interface{}))
+ tags = obj["setup"].(map[string]interface{})["api"].(map[string]interface{})
+ // tags = obj["setup"].(map[string]interface{})["api"].(map[string]string)
+ // fmt.Println(s)
+ return tags, nil
+}
diff --git a/internal/report/baseline/indexer.go b/internal/report/baseline/indexer.go
new file mode 100644
index 00000000..8f8c6cef
--- /dev/null
+++ b/internal/report/baseline/indexer.go
@@ -0,0 +1,208 @@
+package baseline
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/service/cloudfront"
+ "github.com/aws/aws-sdk-go/service/s3"
+ log "github.com/sirupsen/logrus"
+)
+
+type baselineIndexItem struct {
+ Date string `json:"date"`
+ Name string `json:"name"`
+ Path string `json:"path"`
+ OpenShiftRelease string `json:"openshift_version"`
+ Provider string `json:"provider"`
+ PlatformType string `json:"platform_type"`
+ Status string `json:"status"`
+ Size string `json:"size"`
+ IsLatest bool `json:"is_latest"`
+ Tags map[string]interface{} `json:"tags"`
+}
+type baselineIndex struct {
+ LastUpdate string `json:"date"`
+ Status string `json:"status"`
+ Results []*baselineIndexItem `json:"results"`
+ Latest map[string]*baselineIndexItem `json:"latest"`
+}
+
+// CreateBaselineIndex list all object from S3 Bucket, extract metadata,
+// and calculate the latest by release and platform type, creating a index.json
+// object.
+func (brs *BaselineConfig) CreateBaselineIndex() error {
+ svcS3, _, err := brs.createS3Clients()
+ if err != nil {
+ return fmt.Errorf("failed to create S3 client and validate bucket: %w", err)
+ }
+
+ // List all the objects in the bucket and create index.
+ objects, err := ListObjects(svcS3, brs.bucketRegion, brs.bucketName, "api/v0/result/summary/")
+ if err != nil {
+ return err
+ }
+
+ index := baselineIndex{
+ LastUpdate: time.Now().Format(time.RFC3339),
+ Latest: make(map[string]*baselineIndexItem),
+ }
+ // calculate the index for each object (summary)
+ for _, obj := range objects {
+ // Keys must have the following format: {ocpVersion}_{platformType}_{timestamp}.json
+ objectKey := *obj.Key
+
+ name := objectKey[strings.LastIndex(objectKey, "/")+1:]
+ if name == "index.json" {
+ continue
+ }
+
+ // read the object to extract metadata/tags from 'setup.api'
+ objReader, err := svcS3.GetObject(&s3.GetObjectInput{
+ Bucket: aws.String(brs.bucketName),
+ Key: aws.String(objectKey),
+ })
+ if err != nil {
+ log.Errorf("failed to get object %s: %v", objectKey, err)
+ continue
+ }
+
+ defer objReader.Body.Close()
+ bd := &BaselineData{}
+ body, err := io.ReadAll(objReader.Body)
+ if err != nil {
+ log.Errorf("failed to read object data %s: %v", objectKey, err)
+ continue
+ }
+
+ bd.SetRawData(body)
+ tags, err := bd.GetSetupTags()
+ if err != nil {
+ log.Errorf("failed to deserialize tags/metadata from summary data: %v", err)
+ }
+
+ log.Infof("Processing summary object: %s", name)
+ log.Infof("Processing metadata: %v", tags)
+ openShiftRelease := strings.Split(name, "_")[0]
+ if _, ok := tags["openshiftRelease"]; ok {
+ openShiftRelease = tags["openshiftRelease"].(string)
+ } else {
+ log.Warnf("missing openshiftRelease tag in metadata, extracting from name: %v", openShiftRelease)
+ }
+
+ platformType := strings.Split(name, "_")[1]
+ if _, ok := tags["platformType"]; ok {
+ platformType = tags["platformType"].(string)
+ } else {
+ log.Warnf("missing platformType tag in metadata, extracting from name: %v", platformType)
+ }
+
+ executionDate := strings.Split(name, "_")[2]
+ if _, ok := tags["executionDate"]; ok {
+ executionDate = tags["executionDate"].(string)
+ } else {
+ log.Warnf("missing executionDate tag in metadata, extracting from name: %v", executionDate)
+ }
+
+ // Creating summary item for baseline result
+ res := &baselineIndexItem{
+ Date: executionDate,
+ Name: strings.Split(name, ".json")[0],
+ Path: objectKey,
+ Size: fmt.Sprintf("%d", *obj.Size),
+ OpenShiftRelease: openShiftRelease,
+ PlatformType: platformType,
+ Tags: tags,
+ }
+ // spew.Dump(res)
+ index.Results = append(index.Results, res)
+ latestIndexKey := fmt.Sprintf("%s_%s", openShiftRelease, platformType)
+ existing, ok := index.Latest[latestIndexKey]
+ if !ok {
+ res.IsLatest = true
+ index.Latest[latestIndexKey] = res
+ } else {
+ if existing.Date < res.Date {
+ existing.IsLatest = false
+ res.IsLatest = true
+ index.Latest[latestIndexKey] = res
+ }
+ }
+ }
+
+ // Copy latest to respective path under /__latest.json
+ for kLatest, latest := range index.Latest {
+ latestObjectKey := fmt.Sprintf("api/v0/result/summary/%s_latest.json", kLatest)
+ log.Infof("Creating latest object for %q to %q", kLatest, latestObjectKey)
+ _, err := svcS3.CopyObject(&s3.CopyObjectInput{
+ Bucket: aws.String(brs.bucketName),
+ CopySource: aws.String(fmt.Sprintf("%v/%v", brs.bucketName, latest.Path)),
+ Key: aws.String(latestObjectKey),
+ })
+ if err != nil {
+ log.Errorf("Couldn't create latest object %s: %v", kLatest, err)
+ }
+ }
+
+ // Save the new index to the bucket.
+ indexJSON, err := json.Marshal(index)
+ if err != nil {
+ return fmt.Errorf("unable to save index to json: %w", err)
+ }
+
+ // Save the index to the bucket
+ _, err = svcS3.PutObject(&s3.PutObjectInput{
+ Bucket: aws.String(brs.bucketName),
+ Key: aws.String(indexObjectKey),
+ Body: strings.NewReader(string(indexJSON)),
+ })
+ if err != nil {
+ return fmt.Errorf("failed to upload index to bucket: %w", err)
+ }
+
+ // Expire cache from cloudfront distribution
+ svcCloudfront, err := createCloudFrontClient(brs.bucketRegion)
+ if err != nil {
+ return fmt.Errorf("failed to create cloudfront client: %w", err)
+ }
+ invalidationPathURI := "/result/summary/index.json"
+ log.Infof("Creating cache invalidation for %s", invalidationPathURI)
+ _, err = svcCloudfront.CreateInvalidation(&cloudfront.CreateInvalidationInput{
+ DistributionId: aws.String(brs.cloudfrontDistributionID),
+ InvalidationBatch: &cloudfront.InvalidationBatch{
+ CallerReference: aws.String(time.Now().Format(time.RFC3339)),
+ Paths: &cloudfront.Paths{
+ Quantity: aws.Int64(1),
+ Items: []*string{
+ aws.String(invalidationPathURI),
+ },
+ },
+ },
+ })
+ if err != nil {
+ log.Warnf("failed to create cache invalidation: %v", err)
+ fmt.Printf(`Index updated. Run the following command to invalidate index.cache:
+aws cloudfront create-invalidation \
+ --distribution-id %s \
+ --paths %s`, brs.cloudfrontDistributionID, invalidationPathURI)
+ fmt.Println()
+ }
+ return nil
+}
+
+// ListObjects lists all the objects in the bucket.
+func ListObjects(svc *s3.S3, bucketRegion, bucketName, path string) ([]*s3.Object, error) {
+ input := &s3.ListObjectsInput{
+ Bucket: aws.String(bucketName),
+ Prefix: aws.String(path),
+ }
+ resp, err := svc.ListObjects(input)
+ if err != nil {
+ return nil, err
+ }
+ return resp.Contents, nil
+}
diff --git a/internal/report/baseline/uploader.go b/internal/report/baseline/uploader.go
new file mode 100644
index 00000000..c0e2f641
--- /dev/null
+++ b/internal/report/baseline/uploader.go
@@ -0,0 +1,85 @@
+package baseline
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/service/s3"
+ "github.com/aws/aws-sdk-go/service/s3/s3manager"
+ log "github.com/sirupsen/logrus"
+)
+
+func (brs *BaselineConfig) UploadBaseline(filePath, resPath string, meta map[string]string, dryRun bool) error {
+ svcS3, uploader, err := brs.createS3Clients()
+ if err != nil {
+ return fmt.Errorf("failed to create S3 client and validate bucket: %w", err)
+ }
+
+ // Upload the archive to the bucket
+ log.Debugf("UploadBaseline(): opening file %s", filePath)
+ fdArchive, err := os.Open(filePath)
+ if err != nil {
+ return fmt.Errorf("failed to open file %s: %w", filePath, err)
+ }
+ defer fdArchive.Close()
+
+ // Object names and paths
+ filenameArtifact := filepath.Base(filePath)
+ objectKeyArtifact := fmt.Sprintf("uploads/%s", filenameArtifact)
+ filenameSummary := resPath + "/opct-report-summary.json"
+ objectKeySummary := "api/v0/result/summary/" + meta["dataPath"]
+
+ meta["objectArtifact"] = objectKeyArtifact
+ meta["objectSummary"] = objectKeySummary
+
+ // when metadata is set, parse it and add it to the object
+ // upload artifact to bucket
+ log.Debugf("UploadBaseline(): uploading to object %s", objectKeyArtifact)
+ s3ObjectURI := "s3://" + brs.bucketName + "/" + objectKeyArtifact
+ if !dryRun {
+ _, err := uploader.Upload(&s3manager.UploadInput{
+ Bucket: aws.String(brs.bucketName),
+ Key: aws.String(objectKeyArtifact),
+ Metadata: aws.StringMap(meta),
+ Body: fdArchive,
+ })
+ if err != nil {
+ return fmt.Errorf("failed to upload file %s to bucket %s: %w", filenameArtifact, brs.bucketName, err)
+ }
+ log.Info("Results published successfully to ", s3ObjectURI)
+ // log.Debugf("UploadBaseline(): putObjectOutput: %v", putOutArchive)
+ } else {
+ log.Warnf("DRY-RUN mode: skipping upload to %s", s3ObjectURI)
+ }
+
+ // Saving summary to the bucket
+
+ log.Debugf("UploadBaseline(): opening file %q", filenameSummary)
+ fdSummary, err := os.Open(filenameSummary)
+ if err != nil {
+ return fmt.Errorf("failed to open file %s: %w", filenameSummary, err)
+ }
+ defer fdArchive.Close()
+
+ log.Debugf("UploadBaseline(): uploading baseline summary to %q", objectKeySummary)
+ s3ObjectURI = "s3://" + brs.bucketName + "/" + objectKeySummary
+ if !dryRun {
+ _, err = svcS3.PutObject(&s3.PutObjectInput{
+ Bucket: aws.String(brs.bucketName),
+ Key: aws.String(objectKeySummary),
+ Body: fdSummary,
+ Metadata: aws.StringMap(meta),
+ })
+ if err != nil {
+ return fmt.Errorf("failed to upload file %s to bucket %s: %w", filenameSummary, brs.bucketName, err)
+ }
+ log.Info("Results published successfully to s3://", brs.bucketName, "/", objectKeySummary)
+
+ } else {
+ log.Warnf("DRY-RUN mode: skipping upload to %s", s3ObjectURI)
+ }
+
+ return nil
+}
diff --git a/internal/report/data.go b/internal/report/data.go
new file mode 100644
index 00000000..9188147e
--- /dev/null
+++ b/internal/report/data.go
@@ -0,0 +1,898 @@
+// Package report implements the data layer to extract required information
+// to create the report data (json, and viewes).
+// It uses the data from the summary package to create the report data.
+
+package report
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "html/template"
+ "os"
+ "sort"
+ "strings"
+
+ vfs "github.com/redhat-openshift-ecosystem/provider-certification-tool/internal/assets"
+ "github.com/redhat-openshift-ecosystem/provider-certification-tool/internal/opct/archive"
+ "github.com/redhat-openshift-ecosystem/provider-certification-tool/internal/opct/metrics"
+ "github.com/redhat-openshift-ecosystem/provider-certification-tool/internal/opct/plugin"
+ "github.com/redhat-openshift-ecosystem/provider-certification-tool/internal/opct/summary"
+ "github.com/redhat-openshift-ecosystem/provider-certification-tool/internal/openshift/mustgather"
+ log "github.com/sirupsen/logrus"
+ "github.com/vmware-tanzu/sonobuoy/pkg/discovery"
+)
+
+const (
+ ReportFileNameIndexJSON = "/opct-report.json"
+ // ReportFileNameSummaryJSON is used to API to apply diffs and filters, consumed by API.
+ ReportFileNameSummaryJSON = "/opct-report-summary.json"
+ ReportTemplateBasePath = "data/templates/report"
+)
+
+type ReportData struct {
+ Summary *ReportSummary `json:"summary"`
+ Raw string `json:"-"`
+ Provider *ReportResult `json:"provider"`
+ Baseline *ReportResult `json:"baseline,omitempty"`
+ Checks *ReportChecks `json:"checks,omitempty"`
+ Setup *ReportSetup `json:"setup,omitempty"`
+}
+
+type ReportChecks struct {
+ BaseURL string `json:"baseURL"`
+ EmptyValue string `json:"emptyValue"`
+ Fail []*SLOOutput `json:"failures"`
+ Pass []*SLOOutput `json:"successes"`
+ Warn []*SLOOutput `json:"warnings"`
+ Skip []*SLOOutput `json:"skips"`
+}
+
+type ReportResult struct {
+ Version *ReportVersion `json:"version"`
+ Infra *ReportInfra `json:"infra"`
+ ClusterOperators *ReportClusterOperators `json:"clusterOperators"`
+ ClusterHealth *ReportClusterHealth `json:"clusterHealth"`
+ Plugins map[string]*ReportPlugin `json:"plugins"`
+ HasValidBaseline bool `json:"hasValidBaseline"`
+ MustGatherInfo *mustgather.MustGather `json:"mustGatherInfo,omitempty"`
+ ErrorCounters *archive.ErrorCounter `json:"errorCounters,omitempty"`
+ Runtime *ReportRuntime `json:"runtime,omitempty"`
+ Nodes []*summary.Node `json:"nodes,omitempty"`
+}
+
+func (rt *ReportResult) GetPlugins() []string {
+ plugins := []string{}
+ for pluginName, p := range rt.Plugins {
+ if len(p.Name) == 0 {
+ log.Debugf("show/terminal: skipping plugin %s", pluginName)
+ continue
+ }
+ plugins = append(plugins, pluginName)
+ }
+ return plugins
+}
+
+type ReportSummary struct {
+ Tests *ReportSummaryTests `json:"tests"`
+ Alerts *ReportSummaryAlerts `json:"alerts"`
+ Runtime *ReportSummaryRuntime `json:"runtime,omitempty"`
+ Headline string `json:"headline"`
+ Features ReportSummaryFeatures `json:"features,omitempty"`
+}
+
+type ReportSummaryFeatures struct {
+ HasCAMGI bool `json:"hasCAMGI,omitempty"`
+ HasMetricsData bool `json:"hasMetricsData,omitempty"`
+ HasInstallConfig bool `json:"hasInstallConfig,omitempty"`
+}
+
+type ReportSummaryRuntime struct {
+ Timers *metrics.Timers `json:"timers,omitempty"`
+ Plugins map[string]string `json:"plugins,omitempty"`
+ ExecutionTime string `json:"executionTime,omitempty"`
+}
+
+type ReportSummaryTests struct {
+ Archive string `json:"archive"`
+ ArchiveDiff string `json:"archiveDiff,omitempty"`
+}
+
+type ReportSummaryAlerts struct {
+ PluginK8S string `json:"pluginK8S,omitempty"`
+ PluginK8SMessage string `json:"pluginK8SMessage,omitempty"`
+ PluginOCP string `json:"pluginOCP,omitempty"`
+ PluginOCPMessage string `json:"pluginOCPMessage,omitempty"`
+ SuiteErrors string `json:"suiteErrors,omitempty"`
+ SuiteErrorsMessage string `json:"suiteErrorsMessage,omitempty"`
+ WorkloadErrors string `json:"workloadErrors,omitempty"`
+ WorkloadErrorsMessage string `json:"workloadErrorsMessage,omitempty"`
+ Checks string `json:"checks,omitempty"`
+ ChecksMessage string `json:"checksMessage,omitempty"`
+}
+
+type ReportVersion struct {
+ // OpenShift versions
+ OpenShift *summary.SummaryClusterVersionOutput `json:"openshift"`
+
+ // Kubernetes Version
+ Kubernetes string `json:"kubernetes"`
+
+ // OPCT Version
+ OPCTServer string `json:"opctServer,omitempty"`
+ OPCTClient string `json:"opctClient,omitempty"`
+}
+
+type ReportInfra struct {
+ Name string `json:"name"`
+ PlatformType string `json:"platformType"`
+ PlatformName string `json:"platformName"`
+ Topology string `json:"topology,omitempty"`
+ ControlPlaneTopology string `json:"controlPlaneTopology,omitempty"`
+ APIServerURL string `json:"apiServerURL,omitempty"`
+ APIServerInternalURL string `json:"apiServerInternalURL,omitempty"`
+ NetworkType string `json:"networkType,omitempty"`
+}
+
+type ReportClusterOperators struct {
+ CountAvailable uint64 `json:"countAvailable,omitempty"`
+ CountProgressing uint64 `json:"countProgressing,omitempty"`
+ CountDegraded uint64 `json:"countDegraded,omitempty"`
+}
+
+type ReportClusterHealth struct {
+ NodeHealthTotal int `json:"nodeHealthTotal,omitempty"`
+ NodeHealthy int `json:"nodeHealthy,omitempty"`
+ NodeHealthPerc float64 `json:"nodeHealthPerc,omitempty"`
+ PodHealthTotal int `json:"podHealthTotal,omitempty"`
+ PodHealthy int `json:"podHealthy,omitempty"`
+ PodHealthPerc float64 `json:"podHealthPerc,omitempty"`
+ PodHealthDetails []discovery.HealthInfoDetails `json:"podHealthDetails,omitempty"`
+}
+
+type ReportPlugin struct {
+ ID string `json:"id"`
+ Title string `json:"title"`
+ Name string `json:"name"`
+ Definition *plugin.PluginDefinition `json:"definition,omitempty"`
+ Stat *ReportPluginStat `json:"stat"`
+ ErrorCounters *archive.ErrorCounter `json:"errorCounters,omitempty"`
+ Suite *summary.OpenshiftTestsSuite `json:"suite"`
+
+ Tests map[string]*plugin.TestItem `json:"tests,omitempty"`
+
+ // Filters
+ TagsFailedPrio string `json:"tagsFailuresPriority"`
+ TestsFailedPrio []*ReportTestFailure `json:"testsFailuresPriority"`
+ TagsFlakeCI string `json:"tagsFlakeCI"`
+ TestsFlakeCI []*ReportTestFailure `json:"testsFlakeCI"`
+
+ // SuiteOnly
+ FailedFilter1 []*ReportTestFailure `json:"failedTestsFilter1"`
+ TagsFilter1 string `json:"tagsFailuresFilter1"`
+
+ // Baseline
+ FailedFilter2 []*ReportTestFailure `json:"failedTestsFilter2"`
+ TagsFilter2 string `json:"tagsFailuresFilter2"`
+
+ // FlakeAPI
+ FailedFilter3 []*ReportTestFailure `json:"failedTestsFilter3"`
+ TagsFilter3 string `json:"tagsFailuresFilter3"`
+
+ FailedFilter4 []*ReportTestFailure `json:"failedTestsFilter4"`
+ TagsFilter4 string `json:"tagsFailuresFilter4"`
+
+ FailedFilter5 []*ReportTestFailure `json:"failedTestsFilter5"`
+ TagsFilter5 string `json:"tagsFailuresFilter5"`
+
+ FailedFilter6 []*ReportTestFailure `json:"failedTestsFilter6"`
+ TagsFilter6 string `json:"tagsFailuresFilter6"`
+
+ FailedFiltered []*ReportTestFailure `json:"failedFiltered"`
+ TagsFiltered string `json:"tagsFailuresFiltered"`
+}
+
+func (rp *ReportPlugin) BuildFailedData(filterID string, dataFailures []string) {
+ failures := []*ReportTestFailure{}
+ tags := plugin.NewTestTagsEmpty(len(dataFailures))
+ for _, f := range dataFailures {
+ if _, ok := rp.Tests[f]; !ok {
+ log.Warnf("BuildFailedData: test %s not found in the plugin", f)
+ continue
+ }
+ // Create a new ReportTestFailure
+ rtf := &ReportTestFailure{
+ ID: rp.Tests[f].ID,
+ Name: rp.Tests[f].Name,
+ Documentation: rp.Tests[f].Documentation,
+ }
+ if rp.Tests[f].Flake != nil {
+ rtf.FlakeCount = rp.Tests[f].Flake.CurrentFlakes
+ rtf.FlakePerc = rp.Tests[f].Flake.CurrentFlakePerc
+ }
+ if _, ok := rp.Tests[f].ErrorCounters["total"]; ok {
+ rtf.ErrorsCount = int64(rp.Tests[f].ErrorCounters["total"])
+ }
+ tags.Add(&f)
+ failures = append(failures, rtf)
+ }
+ switch filterID {
+ case "final":
+ rp.FailedFiltered = failures
+ rp.TagsFiltered = tags.ShowSorted()
+ case "F1":
+ rp.FailedFilter1 = failures
+ rp.TagsFilter1 = tags.ShowSorted()
+ case "F3":
+ rp.FailedFilter3 = failures
+ rp.TagsFilter3 = tags.ShowSorted()
+ case "F4":
+ rp.FailedFilter4 = failures
+ rp.TagsFilter4 = tags.ShowSorted()
+ case "F6":
+ rp.FailedFilter6 = failures
+ rp.TagsFilter6 = tags.ShowSorted()
+ }
+}
+
+type ReportPluginStat struct {
+ Completed string `json:"execution"`
+ Result string `json:"result"`
+ Status string `json:"status"`
+ Total int64 `json:"total"`
+ Passed int64 `json:"passed"`
+ Failed int64 `json:"failed"`
+ Timeout int64 `json:"timeout"`
+ Skipped int64 `json:"skipped"`
+
+ // Filters
+ // Filter: SuiteOnly
+ FilterSuite int64 `json:"filter1Suite"`
+ Filter1Excluded int64 `json:"filter1Excluded"`
+
+ // Filter: Baseline (deprecated soon)
+ FilterBaseline int64 `json:"filter2Baseline"`
+ Filter2Excluded int64 `json:"filter2Excluded"`
+
+ // Filter: FlakeCI
+ FilterFailedPrio int64 `json:"filter3FailedPriority"`
+ Filter3Excluded int64 `json:"filter3Excluded"`
+
+ // Filter: BaselineAPI
+ FilterFailedAPI int64 `json:"filter4FailedAPI"`
+ Filter4Excluded int64 `json:"filter4Excluded"`
+
+ // Filter: KnownFailures
+ Filter5Failures int64 `json:"filter5Failures"`
+ Filter5Excluded int64 `json:"filter5Excluded"`
+
+ // Filter: Replay
+ Filter6Failures int64 `json:"filter6Failures"`
+ Filter6Excluded int64 `json:"filter6Excluded"`
+
+ FilterFailures int64 `json:"filterFailures"`
+}
+
+type ReportTestFailure struct {
+ ID string `json:"id"`
+ Name string `json:"name"`
+ Documentation string `json:"documentation"`
+ FlakePerc float64 `json:"flakePerc"`
+ FlakeCount int64 `json:"flakeCount"`
+ ErrorsCount int64 `json:"errorsTotal"`
+}
+
+type ReportSetup struct {
+ Frontend *ReportSetupFrontend `json:"frontend,omitempty"`
+ API *ReportSetupAPI `json:"api,omitempty"`
+}
+type ReportSetupFrontend struct {
+ EmbedData bool
+}
+
+type ReportSetupAPI struct {
+ SummaryName string `json:"dataPath,omitempty"`
+ SummaryArchive string `json:"summaryArchive,omitempty"`
+ UUID string `json:"uuid,omitempty"`
+ ExecutionDate string `json:"executionDate,omitempty"`
+ OpenShiftVersion string `json:"openshiftVersion,omitempty"`
+ OpenShiftRelease string `json:"openshiftRelease,omitempty"`
+ PlatformType string `json:"platformType,omitempty"`
+ ProviderName string `json:"providerName,omitempty"`
+ InfraTopology string `json:"infraTopology,omitempty"`
+ Workflow string `json:"workflow,omitempty"`
+}
+
+type ReportRuntime struct {
+ ServerLogs []*archive.RuntimeInfoItem `json:"serverLogs,omitempty"`
+ ServerConfig []*archive.RuntimeInfoItem `json:"serverConfig,omitempty"`
+ OpctConfig []*archive.RuntimeInfoItem `json:"opctConfig,omitempty"`
+}
+
+func NewReportData(embedFrontend bool) *ReportData {
+ return &ReportData{
+ Provider: &ReportResult{},
+ Setup: &ReportSetup{
+ Frontend: &ReportSetupFrontend{
+ EmbedData: embedFrontend,
+ },
+ API: &ReportSetupAPI{},
+ },
+ }
+}
+
+// Populate is a entrypoint to initialize, trigger the data source processors,
+// and finalize the report data structure used by frontend (HTML or CLI).
+func (re *ReportData) Populate(cs *summary.ConsolidatedSummary) error {
+ cs.Timers.Add("report-populate")
+ re.Summary = &ReportSummary{
+ Tests: &ReportSummaryTests{
+ Archive: cs.GetProvider().Archive,
+ },
+ Runtime: &ReportSummaryRuntime{
+ Plugins: make(map[string]string, 4),
+ },
+ Alerts: &ReportSummaryAlerts{},
+ }
+ if err := re.populateSource(cs.GetProvider()); err != nil {
+ return err
+ }
+ re.Provider.HasValidBaseline = cs.GetBaseline().HasValidResults()
+ if re.Provider.HasValidBaseline {
+ if err := re.populateSource(cs.GetBaseline()); err != nil {
+ return err
+ }
+ re.Summary.Tests.ArchiveDiff = cs.GetBaseline().Archive
+ re.Summary.Headline = fmt.Sprintf("%s (diff %s) | OCP %s | K8S %s",
+ re.Summary.Tests.Archive,
+ re.Summary.Tests.ArchiveDiff,
+ re.Provider.Version.OpenShift.Desired,
+ re.Provider.Version.Kubernetes,
+ )
+ }
+
+ re.Summary.Features = ReportSummaryFeatures{
+ HasCAMGI: cs.Provider.HasCAMGI,
+ HasMetricsData: cs.Provider.HasMetrics,
+ HasInstallConfig: cs.Provider.HasInstallConfig,
+ }
+
+ // Checks need to run after the report is populated, so it can evaluate the
+ // data entirely.
+ checks := NewCheckSummary(re)
+ err := checks.Run()
+ if err != nil {
+ log.Debugf("one or more errors found when running checks: %v", err)
+ }
+ pass, fail, warn, skip := checks.GetCheckResults()
+ re.Checks = &ReportChecks{
+ BaseURL: checks.GetBaseURL(),
+ EmptyValue: CheckIdEmptyValue,
+ Pass: pass,
+ Fail: fail,
+ Warn: warn,
+ Skip: skip,
+ }
+ if len(re.Checks.Fail) > 0 {
+ re.Summary.Alerts.Checks = "danger"
+ re.Summary.Alerts.ChecksMessage = fmt.Sprintf("%d", len(re.Checks.Fail))
+ }
+
+ cs.Timers.Add("report-populate")
+ re.Summary.Runtime.Timers = cs.Timers
+ return nil
+}
+
+// populateSource reads the loaded data, creating a report data for each result
+// data source (provider and/or baseline).
+func (re *ReportData) populateSource(rs *summary.ResultSummary) error {
+ var reResult *ReportResult
+ if rs.Name == summary.ResultSourceNameBaseline {
+ re.Baseline = &ReportResult{}
+ reResult = re.Baseline
+ } else {
+ re.Provider = &ReportResult{}
+ reResult = re.Provider
+ reResult.MustGatherInfo = rs.MustGather
+ }
+ // Version
+ v, err := rs.GetOpenShift().GetClusterVersion()
+ if err != nil {
+ return err
+ }
+ reResult.Version = &ReportVersion{
+ OpenShift: v,
+ Kubernetes: rs.GetSonobuoyCluster().APIVersion,
+ }
+
+ // Infrastructure
+ infra, err := rs.GetOpenShift().GetInfrastructure()
+ if err != nil {
+ return err
+ }
+ platformName := ""
+ if string(infra.Status.PlatformStatus.Type) == "External" {
+ platformName = infra.Spec.PlatformSpec.External.PlatformName
+ }
+ sdn, err := rs.GetOpenShift().GetClusterNetwork()
+ if err != nil {
+ log.Errorf("unable to get clusterNetwork object: %v", err)
+ return err
+ }
+ reResult.Infra = &ReportInfra{
+ PlatformType: string(infra.Status.PlatformStatus.Type),
+ PlatformName: platformName,
+ Name: string(infra.Status.InfrastructureName),
+ Topology: string(infra.Status.InfrastructureTopology),
+ ControlPlaneTopology: string(infra.Status.ControlPlaneTopology),
+ APIServerURL: string(infra.Status.APIServerURL),
+ APIServerInternalURL: string(infra.Status.APIServerInternalURL),
+ NetworkType: string(sdn.Spec.NetworkType),
+ }
+
+ // Cluster Operators
+ co, err := rs.GetOpenShift().GetClusterOperator()
+ if err != nil {
+ return err
+ }
+ reResult.ClusterOperators = &ReportClusterOperators{
+ CountAvailable: co.CountAvailable,
+ CountProgressing: co.CountProgressing,
+ CountDegraded: co.CountDegraded,
+ }
+
+ // Node
+ reResult.Nodes = rs.GetOpenShift().GetNodes()
+
+ // Node and Pod Status
+ sbCluster := rs.GetSonobuoyCluster()
+ reResult.ClusterHealth = &ReportClusterHealth{
+ NodeHealthTotal: sbCluster.NodeHealth.Total,
+ NodeHealthy: sbCluster.NodeHealth.Healthy,
+ NodeHealthPerc: float64(100 * sbCluster.NodeHealth.Healthy / sbCluster.NodeHealth.Total),
+ PodHealthTotal: sbCluster.PodHealth.Total,
+ PodHealthy: sbCluster.PodHealth.Healthy,
+ PodHealthPerc: float64(100 * sbCluster.PodHealth.Healthy / sbCluster.PodHealth.Total),
+ }
+ for _, dt := range sbCluster.PodHealth.Details {
+ if !dt.Healthy {
+ reResult.ClusterHealth.PodHealthDetails = append(reResult.ClusterHealth.PodHealthDetails, dt)
+ }
+ }
+
+ // Populate plugins. New plgins must be added here.
+ availablePlugins := []string{
+ plugin.PluginNameOpenShiftUpgrade,
+ plugin.PluginNameKubernetesConformance,
+ plugin.PluginNameOpenShiftConformance,
+ plugin.PluginNameConformanceReplay,
+ plugin.PluginNameArtifactsCollector,
+ }
+ reResult.Plugins = make(map[string]*ReportPlugin, len(availablePlugins))
+ for _, pluginID := range availablePlugins {
+ if err := re.populatePluginConformance(rs, reResult, pluginID); err != nil {
+ return err
+ }
+ }
+
+ // Aggregate Plugin errors
+ reResult.ErrorCounters = archive.MergeErrorCounters(
+ reResult.Plugins[plugin.PluginNameKubernetesConformance].ErrorCounters,
+ reResult.Plugins[plugin.PluginNameOpenShiftConformance].ErrorCounters,
+ )
+
+ // Runtime
+ if reResult.Runtime == nil {
+ reResult.Runtime = &ReportRuntime{}
+ }
+ var serverFinishedTime string
+ if rs.Sonobuoy != nil && rs.Sonobuoy.MetaRuntime != nil {
+ reResult.Runtime.ServerLogs = rs.Sonobuoy.MetaRuntime
+ for _, e := range rs.Sonobuoy.MetaRuntime {
+ if strings.HasPrefix(e.Name, "plugin finished") {
+ arr := strings.Split(e.Name, "plugin finished ")
+ re.Summary.Runtime.Plugins[arr[len(arr)-1]] = e.Delta
+ }
+ if strings.HasPrefix(e.Name, "server finished") {
+ re.Summary.Runtime.ExecutionTime = e.Total
+ serverFinishedTime = e.Time
+ }
+ }
+ }
+ if rs.Sonobuoy != nil && rs.Sonobuoy.MetaConfig != nil {
+ reResult.Runtime.ServerConfig = rs.Sonobuoy.MetaConfig
+ }
+ if rs.Sonobuoy != nil && rs.Sonobuoy.MetaConfig != nil {
+ reResult.Runtime.OpctConfig = rs.Sonobuoy.OpctConfig
+ }
+
+ // Setup/API data: Copy relevant data to me used as metadata
+ // of archive in the API.
+ if re.Setup == nil {
+ re.Setup = &ReportSetup{}
+ }
+ if re.Setup.API == nil {
+ re.Setup.API = &ReportSetupAPI{}
+ }
+ re.Setup.API.InfraTopology = reResult.Infra.Topology
+ re.Setup.API.PlatformType = string(infra.Status.PlatformStatus.Type)
+ re.Setup.API.ProviderName = string(infra.Status.PlatformStatus.Type)
+ if platformName != "" {
+ re.Setup.API.ProviderName = platformName
+ }
+ // Setup/API data: OpenShift version
+ ocpVersion := reResult.Version.OpenShift.Desired
+ re.Setup.API.OpenShiftVersion = ocpVersion
+ re.Setup.API.OpenShiftRelease = fmt.Sprintf("%s.%s", strings.Split(ocpVersion, ".")[0], strings.Split(ocpVersion, ".")[1])
+
+ // Discover execution time
+ re.Setup.API.ExecutionDate = serverFinishedTime
+ if serverFinishedTime != "" {
+ ts := strings.Replace(serverFinishedTime, "-", "", -1)
+ ts = strings.Replace(ts, ":", "", -1)
+ ts = strings.Replace(ts, "T", "", -1)
+ ts = strings.Replace(ts, "Z", "", -1)
+ re.Setup.API.SummaryName = fmt.Sprintf("%s_%s_%s.json", re.Setup.API.OpenShiftRelease, re.Setup.API.PlatformType, ts)
+ }
+ for i := range reResult.Runtime.ServerConfig {
+ if reResult.Runtime.ServerConfig[i].Name == "UUID" {
+ re.Setup.API.UUID = reResult.Runtime.ServerConfig[i].Value
+ }
+ }
+ for i := range reResult.Runtime.OpctConfig {
+ if reResult.Runtime.OpctConfig[i].Name == "run-mode" {
+ re.Setup.API.Workflow = reResult.Runtime.ServerConfig[i].Value
+ }
+ }
+ return nil
+}
+
+// populatePluginConformance reads the plugin data, processing and creating the report data.
+func (re *ReportData) populatePluginConformance(rs *summary.ResultSummary, reResult *ReportResult, pluginID string) error {
+ var pluginSum *plugin.OPCTPluginSummary
+ var suite *summary.OpenshiftTestsSuite
+ var pluginTitle string
+ var pluginAlert string
+ var pluginAlertMessage string
+
+ switch pluginID {
+ case plugin.PluginNameKubernetesConformance:
+ pluginSum = rs.GetOpenShift().GetResultK8SValidated()
+ pluginTitle = "Results for Kubernetes Conformance Suite"
+ suite = rs.GetSuites().KubernetesConformance
+ case plugin.PluginNameOpenShiftConformance:
+ pluginSum = rs.GetOpenShift().GetResultOCPValidated()
+ pluginTitle = "Results for OpenShift Conformance Suite"
+ suite = rs.GetSuites().OpenshiftConformance
+ case plugin.PluginNameOpenShiftUpgrade:
+ pluginSum = rs.GetOpenShift().GetResultConformanceUpgrade()
+ pluginTitle = "Results for OpenShift Conformance Upgrade Suite"
+ case plugin.PluginNameConformanceReplay:
+ pluginSum = rs.GetOpenShift().GetResultConformanceReplay()
+ pluginTitle = "Results for Replay test suite"
+ case plugin.PluginNameArtifactsCollector:
+ pluginSum = rs.GetOpenShift().GetResultArtifactsCollector()
+ pluginTitle = "Results for Plugin Collector"
+ }
+
+ pluginRes := pluginSum.Status
+ reResult.Plugins[pluginID] = &ReportPlugin{
+ ID: pluginID,
+ Title: pluginTitle,
+ Name: pluginSum.Name,
+ Stat: &ReportPluginStat{
+ Completed: "TODO",
+ Status: pluginSum.Status,
+ Result: pluginRes,
+ Total: pluginSum.Total,
+ Passed: pluginSum.Passed,
+ Failed: pluginSum.Failed,
+ Timeout: pluginSum.Timeout,
+ Skipped: pluginSum.Skipped,
+ },
+ Suite: suite,
+ Tests: pluginSum.Tests,
+ }
+
+ // No more advanced fields to create for non-Conformance
+ switch pluginID {
+ case plugin.PluginNameOpenShiftUpgrade, plugin.PluginNameArtifactsCollector:
+ return nil
+ }
+
+ // Set counters for each filters in the pipeline
+ // Filter SuiteOnly
+ reResult.Plugins[pluginID].Stat.FilterSuite = int64(len(pluginSum.FailedFilter1))
+ reResult.Plugins[pluginID].Stat.Filter1Excluded = int64(len(pluginSum.FailedExcludedFilter1))
+
+ // Filter Baseline
+ reResult.Plugins[pluginID].Stat.FilterBaseline = int64(len(pluginSum.FailedFilter2))
+ reResult.Plugins[pluginID].Stat.Filter2Excluded = int64(len(pluginSum.FailedExcludedFilter2))
+
+ // Filter FlakeAPI
+ reResult.Plugins[pluginID].Stat.FilterFailedPrio = int64(len(pluginSum.FailedFilter3))
+ reResult.Plugins[pluginID].Stat.Filter3Excluded = int64(len(pluginSum.FailedExcludedFilter3))
+
+ // Filter BaselineAPI
+ reResult.Plugins[pluginID].Stat.FilterFailedAPI = int64(len(pluginSum.FailedFilter4))
+ reResult.Plugins[pluginID].Stat.Filter4Excluded = int64(len(pluginSum.FailedExcludedFilter4))
+
+ // Filter KnownFailures
+ reResult.Plugins[pluginID].Stat.Filter5Failures = int64(len(pluginSum.FailedFilter5))
+ reResult.Plugins[pluginID].Stat.Filter5Excluded = int64(len(pluginSum.FailedExcludedFilter5))
+
+ // Filter Replay
+ reResult.Plugins[pluginID].Stat.Filter6Failures = int64(len(pluginSum.FailedFilter6))
+ reResult.Plugins[pluginID].Stat.Filter6Excluded = int64(len(pluginSum.FailedExcludedFilter6))
+
+ // Filter Failures (result)
+ reResult.Plugins[pluginID].Stat.FilterFailures = int64(len(pluginSum.FailedFiltered))
+ reResult.Plugins[pluginID].ErrorCounters = pluginSum.GetErrorCounters()
+
+ // Will consider passed when all conformance tests have passed (removing monitor)
+ hasRuntimeError := (reResult.Plugins[pluginID].Stat.Total == 1) && (reResult.Plugins[pluginID].Stat.Failed == 1)
+ if !hasRuntimeError {
+ if reResult.Plugins[pluginID].Stat.FilterFailures == 0 {
+ reResult.Plugins[pluginID].Stat.Result = "passed"
+ }
+ // Replay is a special case, it can have failures in the filter6 as it is
+ // a replay of the failures from original suite which can have perm failures or bugs.
+ // Replay helps in debugging and getting more confidence in the results.
+ if pluginID == plugin.PluginNameConformanceReplay && reResult.Plugins[pluginID].Stat.Filter6Failures != 0 {
+ reResult.Plugins[pluginID].Stat.Result = "---"
+ }
+ }
+
+ if reResult.Plugins[pluginID].Stat.FilterFailures != 0 {
+ pluginAlert = "danger"
+ pluginAlertMessage = fmt.Sprintf("%d", int64(len(pluginSum.FailedFiltered)))
+ } else if reResult.Plugins[pluginID].Stat.FilterSuite != 0 {
+ pluginAlert = "warning"
+ pluginAlertMessage = fmt.Sprintf("%d", int64(len(pluginSum.FailedFilter1)))
+ }
+
+ if _, ok := rs.GetSonobuoy().PluginsDefinition[pluginID]; ok {
+ def := rs.GetSonobuoy().PluginsDefinition[pluginID]
+ reResult.Plugins[pluginID].Definition = &plugin.PluginDefinition{
+ PluginImage: def.Definition.Spec.Image,
+ SonobuoyImage: def.SonobuoyImage,
+ Name: def.Definition.SonobuoyConfig.PluginName,
+ }
+ }
+
+ // TODO move this filter to a dedicated function
+ noFlakes := make(map[string]struct{})
+ testTagsFailedPrio := plugin.NewTestTagsEmpty(len(pluginSum.FailedFiltered))
+ for _, test := range pluginSum.FailedFiltered {
+ noFlakes[test] = struct{}{}
+ testTagsFailedPrio.Add(&test)
+ testData := &ReportTestFailure{
+ Name: test,
+ ID: pluginSum.Tests[test].ID,
+ Documentation: pluginSum.Tests[test].Documentation,
+ }
+ if _, ok := pluginSum.Tests[test].ErrorCounters["total"]; ok {
+ testData.ErrorsCount = int64(pluginSum.Tests[test].ErrorCounters["total"])
+ }
+ reResult.Plugins[pluginID].FailedFiltered = append(reResult.Plugins[pluginID].FailedFiltered, testData)
+ }
+ reResult.Plugins[pluginID].TagsFailedPrio = testTagsFailedPrio.ShowSorted()
+ reResult.Plugins[pluginID].FailedFiltered = sortReportTestFailure(reResult.Plugins[pluginID].FailedFiltered)
+
+ flakes := reResult.Plugins[pluginID].TestsFlakeCI
+ testTagsFlakeCI := plugin.NewTestTagsEmpty(len(pluginSum.FailedFilter2))
+ for _, test := range pluginSum.FailedFilter2 {
+ if _, ok := noFlakes[test]; ok {
+ continue
+ }
+ testData := &ReportTestFailure{Name: test, ID: pluginSum.Tests[test].ID}
+ if pluginSum.Tests[test].Flake != nil {
+ testData.FlakeCount = pluginSum.Tests[test].Flake.CurrentFlakes
+ testData.FlakePerc = pluginSum.Tests[test].Flake.CurrentFlakePerc
+ }
+ testTagsFlakeCI.Add(&test)
+ if _, ok := pluginSum.Tests[test].ErrorCounters["total"]; ok {
+ testData.ErrorsCount = int64(pluginSum.Tests[test].ErrorCounters["total"])
+ }
+ flakes = append(flakes, testData)
+ }
+ reResult.Plugins[pluginID].TestsFlakeCI = sortReportTestFailure(flakes)
+ reResult.Plugins[pluginID].TagsFlakeCI = testTagsFlakeCI.ShowSorted()
+
+ // Final failures
+ // testTagsFilteredFailures := plugin.NewTestTagsEmpty(len(pluginSum.FailedFiltered))
+ // Final filters (results/priority)
+ reResult.Plugins[pluginID].BuildFailedData("final", pluginSum.FailedFiltered)
+
+ // Filter flakeAPI
+ reResult.Plugins[pluginID].BuildFailedData("F3", pluginSum.FailedExcludedFilter3)
+
+ // Filter BaselineAPI
+ reResult.Plugins[pluginID].BuildFailedData("F4", pluginSum.FailedExcludedFilter4)
+
+ // Filter Replay
+ reResult.Plugins[pluginID].BuildFailedData("F6", pluginSum.FailedExcludedFilter6)
+
+ // Filter SuiteOnly
+ reResult.Plugins[pluginID].BuildFailedData("F1", pluginSum.FailedExcludedFilter1)
+
+ // update alerts
+ if rs.Name == summary.ResultSourceNameProvider && pluginAlert != "" {
+ switch pluginID {
+ case plugin.PluginNameKubernetesConformance:
+ re.Summary.Alerts.PluginK8S = pluginAlert
+ re.Summary.Alerts.PluginK8SMessage = pluginAlertMessage
+ case plugin.PluginNameOpenShiftConformance:
+ re.Summary.Alerts.PluginOCP = pluginAlert
+ re.Summary.Alerts.PluginOCPMessage = pluginAlertMessage
+ }
+ }
+
+ return nil
+}
+
+// SaveResults persist the processed data to the result directory.
+func (re *ReportData) SaveResults(path string) error {
+ re.Summary.Runtime.Timers.Add("report-save/results")
+
+ // opct-report.json (data source)
+ reportData, err := json.MarshalIndent(re, "", " ")
+ if err != nil {
+ return fmt.Errorf("unable to process report data/report.json: %v", err)
+ }
+ // used when not using http file server
+ if re.Setup.Frontend.EmbedData {
+ re.Raw = string(reportData)
+ }
+
+ // save the report data to the result directory
+ err = os.WriteFile(fmt.Sprintf("%s/%s", path, ReportFileNameIndexJSON), reportData, 0644)
+ if err != nil {
+ return fmt.Errorf("unable to save report data/report.json: %v", err)
+ }
+
+ // create a summarized JSON to be used as baseline.
+ // reSummary, err := re.CopySummary()
+ var reSummary ReportData
+ skipSummary := false
+ if err := re.DeepCopyInto(&reSummary); err != nil {
+ log.Errorf("unable to copy report summary: %v", err)
+ skipSummary = true
+ }
+ // clean up the report data for summary artifact.
+ if !skipSummary {
+ if err := reSummary.SummaryBuilder(); err != nil {
+ log.Errorf("unable to build report summary: %v", err)
+ skipSummary = true
+ }
+ }
+ // Serialize the report summary data to JSON.
+ if !skipSummary {
+ reSummaryData, err := json.MarshalIndent(reSummary, "", " ")
+ if err != nil {
+ log.Errorf("unable to marshal report summary data: %v", err)
+ } else {
+ // save the report summary data to the result directory
+ err = os.WriteFile(fmt.Sprintf("%s/%s", path, ReportFileNameSummaryJSON), reSummaryData, 0644)
+ if err != nil {
+ log.Errorf("unable to marshal report summary data: %v", err)
+ }
+ }
+ }
+
+ // render the template files from frontend report pages.
+ for _, file := range []string{"report.html", "report.css", "filter.html"} {
+ log.Debugf("Processing file %s\n", file)
+ srcTemplate := fmt.Sprintf("%s/%s", ReportTemplateBasePath, file)
+ destFile := fmt.Sprintf("%s/opct-%s", path, file)
+ if file == "report.html" {
+ destFile = fmt.Sprintf("%s/index.html", path)
+ }
+
+ datS, err := vfs.GetData().ReadFile(srcTemplate)
+ if err != nil {
+ return fmt.Errorf("unable to read file %q from VFS: %v", srcTemplate, err)
+ }
+
+ // Change Go template delimiter to '[[]]' preventing conflict with
+ // javascript delimiter '{{}}' in the frontend.
+ tmplS, err := template.New("report").Delims("[[", "]]").Parse(string(datS))
+ if err != nil {
+ return fmt.Errorf("unable to create template for %q: %v", srcTemplate, err)
+ }
+
+ var fileBufferS bytes.Buffer
+ err = tmplS.Execute(&fileBufferS, re)
+ if err != nil {
+ return fmt.Errorf("unable to process template for %q: %v", srcTemplate, err)
+ }
+
+ err = os.WriteFile(destFile, fileBufferS.Bytes(), 0644)
+ if err != nil {
+ return fmt.Errorf("unable to save %q: %v", srcTemplate, err)
+ }
+ }
+
+ re.Summary.Runtime.Timers.Add("report-save/results")
+ return nil
+}
+
+// ShowJSON print the raw json in stdout.
+func (re *ReportData) ShowJSON() (string, error) {
+ val, err := json.MarshalIndent(re, "", " ")
+ if err != nil {
+ return "", err
+ }
+ return string(val), nil
+}
+
+// DeepCopy creates a deep copy of the report data.
+// The function uses the json.Marshal and json.Unmarshal to create a new copy of the data
+// without any reference to the original data.
+func (re *ReportData) DeepCopyInto(newRe *ReportData) error {
+ // var newReport ReportData
+ newReportData, err := json.Marshal(re)
+ if err != nil {
+ return err
+ }
+ err = json.Unmarshal(newReportData, &newRe)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func (re *ReportData) SummaryBuilder() error {
+ // Clean up success tests for each plugin.
+ for p := range re.Provider.Plugins {
+ re.Provider.Plugins[p].Tests = nil
+ }
+ // Cleaning useless data from etcd logs parser
+ if re.Provider != nil &&
+ re.Provider.MustGatherInfo != nil {
+ if re.Provider.MustGatherInfo.ErrorEtcdLogs != nil {
+
+ for k := range re.Provider.MustGatherInfo.ErrorEtcdLogs.FilterRequestSlowAll {
+ re.Provider.MustGatherInfo.ErrorEtcdLogs.FilterRequestSlowAll[k].StatOutliers = ""
+ }
+ re.Provider.MustGatherInfo.ErrorEtcdLogs.FilterRequestSlowHour = nil
+ }
+ re.Provider.MustGatherInfo.NamespaceErrors = nil
+ re.Provider.MustGatherInfo.PodNetworkChecks.Checks = nil
+ }
+ // What else to clean up?
+ return nil
+}
+
+//
+// Sorting functions
+//
+
+// SortedTestFailure stores the key/value to rank by Key.
+type SortedTestFailure struct {
+ Key *ReportTestFailure
+ Value int
+}
+
+func sortReportTestFailure(items []*ReportTestFailure) []*ReportTestFailure {
+ rank := make(SortedListTestFailure, len(items))
+ i := 0
+ for _, v := range items {
+ rank[i] = SortedTestFailure{v, int(v.ErrorsCount)}
+ i++
+ }
+ sort.Sort(sort.Reverse(rank))
+ newItems := make([]*ReportTestFailure, len(items))
+ for i, data := range rank {
+ newItems[i] = data.Key
+ }
+ return newItems
+}
+
+// SortedList stores the list of key/value map, implementing interfaces
+// to sort/rank a map strings with integers as values.
+type SortedListTestFailure []SortedTestFailure
+
+func (p SortedListTestFailure) Len() int { return len(p) }
+func (p SortedListTestFailure) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+func (p SortedListTestFailure) Less(i, j int) bool { return p[i].Value < p[j].Value }
diff --git a/internal/report/report.go b/internal/report/report.go
new file mode 100644
index 00000000..9a977efc
--- /dev/null
+++ b/internal/report/report.go
@@ -0,0 +1,17 @@
+package report
+
+// TODO(mtulio):
+// - create single interface to create report
+// - move ConsolidatedSummary actions to here
+// - report must extract the data from the extractor (consolidated summary)
+// - report must validate the data from the extractor (consolidated summary)
+// - report must create the report from the data from the extractor (consolidated summary)
+// - report must save the report to the filesystem
+// - report must serve the report to the user
+// - report must have a way to be tested
+//
+// The report must be able to output in different formats (html, json, cli, etc)
+// ETL strategy:
+// - Extract: read test resultsfrom artifacts and save it in memory
+// - Transform: apply rules to summarize to create the data layer
+// - Load: process the data collected to outputs: (json, cli, html, etc)
diff --git a/internal/report/slo.go b/internal/report/slo.go
new file mode 100644
index 00000000..803bbba4
--- /dev/null
+++ b/internal/report/slo.go
@@ -0,0 +1,955 @@
+// Description: This file contains the implementation of the SLO interface,
+// translated to "checks" in the OPCT report package. The SLO interface is defined
+// in the report package, and the package implements SLIs to ensure acceptance
+// criteria is met in the data collected from artifacts.
+// Reference: https://github.com/kubernetes/community/blob/master/sig-scalability/slos/slos.md
+package report
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+
+ "github.com/redhat-openshift-ecosystem/provider-certification-tool/internal/opct/plugin"
+ log "github.com/sirupsen/logrus"
+)
+
+const (
+ docsRulesPath = "/review/rules"
+ defaultBaseURL = "https://redhat-openshift-ecosystem.github.io/provider-certification-tool"
+
+ CheckResultNamePass CheckResultName = "pass"
+ CheckResultNameFail CheckResultName = "fail"
+ CheckResultNameWarn CheckResultName = "warn"
+ CheckResultNameSkip CheckResultName = "skip"
+
+ CheckIdEmptyValue string = "--"
+
+ // SLOs
+ CheckID001 string = "OPCT-001"
+ CheckID004 string = "OPCT-004"
+ CheckID005 string = "OPCT-005"
+ CheckID022 string = "OPCT-022"
+ CheckID023A string = "OPCT-023A"
+ CheckID023B string = "OPCT-023B"
+)
+
+type CheckResultName string
+
+type CheckResult struct {
+ Name CheckResultName `json:"result"`
+ Message string `json:"message"`
+ Target string `json:"want"`
+ Actual string `json:"got"`
+}
+
+func (cr *CheckResult) String() string {
+ return string(cr.Name)
+}
+
+type SLOOutput struct {
+ ID string `json:"id"`
+ SLO string `json:"slo"`
+
+ // SLOResult is the target value
+ SLOResult string `json:"sloResult"`
+
+ // SLITarget is the target value
+ SLITarget string `json:"sliTarget"`
+
+ // SLICurrent is the indicator result. Allowed values: pass|fail|skip
+ SLIActual string `json:"sliCurrent"`
+
+ Message string `json:"message"`
+
+ Documentation string `json:"documentation"`
+}
+
+type Check struct {
+ // ID is the unique identifier for the check. It is used
+ // to mount the documentation for each check.
+ ID string `json:"id"`
+
+ // Name is the unique name for the check to be reported.
+ // It must have short and descriptive name identifying the
+ // failure item.
+ Name string `json:"name"`
+
+ // Description describes shortly the check.
+ Description string `json:"description"`
+
+ // Documentation must point to documentation URL to review the
+ // item.
+ Documentation string `json:"documentation"`
+
+ // Accepted must report acceptance criteria, when true
+ // the Check is accepted by the tool, otherwise it is
+ // failed and must be reviewede.
+ Result CheckResult `json:"result"`
+
+ // ResultMessage string `json:"resultMessage"`
+
+ Test func() CheckResult `json:"-"`
+
+ // Priority is the priority to execute the check.
+ // 0 is higher.
+ Priority uint64
+}
+
+func ExampleAcceptanceCheckPass() CheckResultName {
+ return CheckResultNamePass
+}
+
+func AcceptanceCheckFail() CheckResultName {
+ return CheckResultNameFail
+}
+
+// func CheckRespCustomFail(custom string) CheckResult {
+// resp := CheckResult(fmt.Sprintf("%s [%s]", CheckResultNameFail, custom))
+// return resp
+// }
+
+// CheckSummary aggregates the checks.
+type CheckSummary struct {
+ baseURL string
+ Checks []*Check `json:"checks"`
+}
+
+func NewCheckSummary(re *ReportData) *CheckSummary {
+ baseURL := defaultBaseURL
+ msgDefaultNotMatch := "default value does not match the acceptance criteria"
+ // Developer environment:
+ // $ mkdocs serve
+ // $ export OPCT_DEV_BASE_URL_DOC="http://127.0.0.1:8000/provider-certification-tool"
+ localDevBaseURL := os.Getenv("OPCT_DEV_BASE_URL_DOC")
+ if localDevBaseURL != "" {
+ baseURL = localDevBaseURL
+ }
+ checkSum := &CheckSummary{
+ Checks: []*Check{},
+ baseURL: fmt.Sprintf("%s%s", baseURL, docsRulesPath),
+ }
+ // Cluster Checks
+ checkSum.Checks = append(checkSum.Checks, &Check{
+ ID: "OPCT-020",
+ Name: "All nodes must be healthy",
+ Test: func() CheckResult {
+ res := CheckResult{Name: CheckResultNameFail, Target: "100%"}
+ if re.Provider == nil || re.Provider.ClusterHealth == nil {
+ log.Debugf("Check Failed: OPCT-008: unavailable results")
+ return res
+ }
+ res.Actual = fmt.Sprintf("%.3f%%", re.Provider.ClusterHealth.NodeHealthPerc)
+ if re.Provider.ClusterHealth.NodeHealthPerc != 100 {
+ log.Debugf("Check Failed: OPCT-008: want[!=100] got[%f]", re.Provider.ClusterHealth.NodeHealthPerc)
+ return res
+ }
+ res.Name = CheckResultNamePass
+ return res
+ },
+ })
+ checkSum.Checks = append(checkSum.Checks, &Check{
+ ID: "OPCT-021",
+ Name: "Pods Healthy must report higher than 98%",
+ Test: func() CheckResult {
+ res := CheckResult{Name: CheckResultNameFail, Target: ">=98%"}
+ if re.Provider == nil || re.Provider.ClusterHealth == nil {
+ return res
+ }
+ res.Actual = fmt.Sprintf("%.3f", re.Provider.ClusterHealth.PodHealthPerc)
+ if re.Provider.ClusterHealth.PodHealthPerc < 98.0 {
+ return res
+ }
+ res.Name = CheckResultNamePass
+ return res
+ },
+ })
+ // Plugins Checks
+ checkSum.Checks = append(checkSum.Checks, &Check{
+ ID: CheckID001,
+ Name: "Kubernetes Conformance [10-openshift-kube-conformance] must pass 100%",
+ Test: func() CheckResult {
+ res := CheckResult{Name: CheckResultNameFail, Target: "Priority==0|Total!=Failed"}
+ prefix := "Check Failed - " + CheckID001
+ if _, ok := re.Provider.Plugins[plugin.PluginNameKubernetesConformance]; !ok {
+ log.Debugf("%s Runtime: processed plugin data not found: %v", prefix, re.Provider.Plugins[plugin.PluginNameKubernetesConformance])
+ return res
+ }
+ p := re.Provider.Plugins[plugin.PluginNameKubernetesConformance]
+ if p.Stat.Total == p.Stat.Failed {
+ res.Message = "Potential Runtime Failure. Check the Plugin logs."
+ res.Actual = "Total==Failed"
+ log.Debugf("%s Runtime: Total and Failed counters are equals indicating execution failure", prefix)
+ return res
+ }
+ res.Actual = fmt.Sprintf("Priority==%d", len(p.TestsFailedPrio))
+ if len(p.TestsFailedPrio) > 0 {
+ log.Debugf("%s Acceptance criteria: TestsFailedPrio counter are greater than 0: %v", prefix, len(p.TestsFailedPrio))
+ return res
+ }
+ res.Name = CheckResultNamePass
+ return res
+ },
+ })
+ checkSum.Checks = append(checkSum.Checks, &Check{
+ ID: CheckID004,
+ Name: "OpenShift Conformance [20-openshift-conformance-validated]: Pass ratio must be >=98.5%",
+ Test: func() CheckResult {
+ prefix := "Check Failed - " + CheckID004
+ res := CheckResult{
+ Name: CheckResultNameFail,
+ Target: "Pass>=98.5%(Fail>1.5%)",
+ }
+ if _, ok := re.Provider.Plugins[plugin.PluginNameOpenShiftConformance]; !ok {
+ return res
+ }
+ // "Acceptance" are relative, the baselines is observed to set
+ // an "accepted" value considering a healthy cluster in known provider/installation.
+ p := re.Provider.Plugins[plugin.PluginNameOpenShiftConformance]
+ if p.Stat == nil {
+ log.Debugf("%s Runtime: Stat not found", prefix)
+ return res
+ }
+ if p.Stat.Total == p.Stat.Failed {
+ res.Message = "Potential Runtime Failure. Check the Plugin logs."
+ res.Actual = "Total==Failed"
+ log.Debugf("%s Runtime: Total and Failed counters are equals indicating execution failure", prefix)
+ return res
+ }
+ perc := (float64(p.Stat.Failed) / float64(p.Stat.Total)) * 100
+ res.Actual = fmt.Sprintf("Fail==%.2f%%(%d)", perc, p.Stat.Failed)
+ if perc > 1.5 {
+ return res
+ }
+ res.Name = CheckResultNamePass
+ return res
+ },
+ })
+ checkSum.Checks = append(checkSum.Checks, &Check{
+ ID: CheckID005,
+ Name: "OpenShift Conformance Validation [20]: Filter Priority Requirement >= 99.5%",
+ Test: func() CheckResult {
+ prefix := "Check Failed - " + CheckID005
+ target := 0.5
+ res := CheckResult{
+ Name: CheckResultNameFail,
+ Target: fmt.Sprintf("W<=%.2f%%,F>%.2f%%", target, target),
+ Actual: "N/A",
+ }
+ if _, ok := re.Provider.Plugins[plugin.PluginNameOpenShiftConformance]; !ok {
+ return res
+ }
+ // "Acceptance" are relative, the baselines is observed to set
+ // an "accepted" value considering a healthy cluster in known provider/installation.
+ // plugin := re.Provider.Plugins[plugin.PluginNameOpenShiftConformance]
+ p := re.Provider.Plugins[plugin.PluginNameOpenShiftConformance]
+ if p.Stat.Total == p.Stat.Failed {
+ res.Message = "Potential Runtime Failure. Check the Plugin logs."
+ res.Actual = "Total==Failed"
+ log.Debugf("%s Runtime: Total and Failed counters are equals indicating execution failure", prefix)
+ return res
+ }
+ perc := (float64(p.Stat.FilterFailedPrio) / float64(p.Stat.Total)) * 100
+ res.Actual = fmt.Sprintf("Fail==%.2f%%(%d)", perc, p.Stat.FilterFailedPrio)
+ if perc > target {
+ res.Name = CheckResultNameFail
+ return res
+ }
+ // if perc > 0 && perc <= target {
+ // res.Name = CheckResultNameWarn
+ // return res
+ // }
+ res.Name = CheckResultNamePass
+ return res
+ },
+ })
+ checkSum.Checks = append(checkSum.Checks, &Check{
+ ID: "OPCT-005B",
+ Name: "OpenShift Conformance Validation [20]: Required to Pass After Filtering",
+ Test: func() CheckResult {
+ prefix := "Check OPCT-005B Failed"
+ target := 0.50
+ res := CheckResult{
+ Name: CheckResultNameFail,
+ Target: fmt.Sprintf("Pass==100%%(W<=%.2f%%,F>%.2f%%)", target, target),
+ Actual: "N/A",
+ }
+ if _, ok := re.Provider.Plugins[plugin.PluginNameOpenShiftConformance]; !ok {
+ return res
+ }
+ // "Acceptance" are relative, the baselines is observed to set
+ // an "accepted" value considering a healthy cluster in known provider/installation.
+ // plugin := re.Provider.Plugins[plugin.PluginNameOpenShiftConformance]
+ p := re.Provider.Plugins[plugin.PluginNameOpenShiftConformance]
+ if p.Stat.Total == p.Stat.Failed {
+ res.Message = "Potential Runtime Failure. Check the Plugin logs."
+ res.Actual = "Total==Failed"
+ log.Debugf("%s Runtime: Total and Failed counters are equals indicating execution failure", prefix)
+ return res
+ }
+ perc := (float64(p.Stat.FilterFailures) / float64(p.Stat.Total)) * 100
+ res.Actual = fmt.Sprintf("Fail==%.2f%%(%d)", perc, p.Stat.FilterFailures)
+ if perc > target {
+ res.Name = CheckResultNameFail
+ return res
+ }
+ if perc > 0 && perc <= target {
+ res.Name = CheckResultNameWarn
+ return res
+ }
+ res.Name = CheckResultNamePass
+ return res
+ },
+ })
+ // TODO: validate if this test is duplicated with OPCT-005
+ // checkSum.Checks = append(checkSum.Checks, &Check{
+ // ID: "OPCT-TBD",
+ // Name: "OpenShift Conformance [20-openshift-conformance-validated]: Pass 100% with Baseline",
+ // Test: func() CheckResult {
+ // prefix := "Check OPCT-TBD Failed"
+ // res := CheckResult{
+ // Name: CheckResultNameFail,
+ // Target: "Pass==100%",
+ // Actual: "N/A",
+ // }
+ // if _, ok := re.Provider.Plugins[plugin.PluginNameOpenShiftConformance]; !ok {
+ // return res
+ // }
+ // if re.Baseline == nil {
+ // res.Name = CheckResultNameSkip
+ // return res
+ // }
+ // if _, ok := re.Baseline.Plugins[plugin.PluginNameOpenShiftConformance]; !ok {
+ // res.Name = CheckResultNameSkip
+ // return res
+ // }
+ // // "Acceptance" are relative, the baselines is observed to set
+ // // an "accepted" value considering a healthy cluster in known provider/installation.
+ // p := re.Provider.Plugins[plugin.PluginNameOpenShiftConformance]
+ // if p.Stat.Total == p.Stat.Failed {
+ // res.Message = "Potential Runtime Failure. Check the Plugin logs."
+ // res.Actual = "Total==Failed"
+ // log.Debugf("%s Runtime: Total and Failed counters are equals indicating execution failure", prefix)
+ // return res
+ // }
+ // perc := (float64(p.Stat.FilterFailedPrio) / float64(p.Stat.Total)) * 100
+ // res.Actual = fmt.Sprintf("FailedPrio==%.2f%%", perc)
+ // if perc > 0 {
+ // res.Name = CheckResultNameFail
+ // return res
+ // }
+ // res.Name = CheckResultNamePass
+ // return res
+ // },
+ // })
+
+ checkSum.Checks = append(checkSum.Checks, &Check{
+ ID: "OPCT-011",
+ Name: "The test suite should generate fewer error reports in the logs",
+ Test: func() CheckResult {
+ // threshold for warn and fail
+ thWarn := 150
+ thFail := 300
+ res := CheckResult{
+ Name: CheckResultNameWarn,
+ Target: fmt.Sprintf("Pass<=%d(W>%d,F>%d)", thWarn, thWarn, thFail),
+ Actual: "N/A",
+ }
+ if re.Provider.ErrorCounters == nil {
+ res.Name = CheckResultNameWarn
+ res.Actual = "No counters"
+ return res
+ }
+ cnt := *re.Provider.ErrorCounters
+ if _, ok := cnt["total"]; !ok {
+ res.Message = "Unable to load Total Counter"
+ res.Name = CheckResultNameFail
+ res.Actual = "ERR !total"
+ return res
+ }
+ // "Acceptance" are relative, the baselines is observed to set
+ // an "accepted" value considering a healthy cluster in known provider/installation.
+ total := cnt["total"]
+ res.Actual = fmt.Sprintf("%d", total)
+ // Error
+ if total > thFail {
+ res.Name = CheckResultNameFail
+ return res
+ }
+ // Warn
+ if total > thWarn {
+ return res
+ }
+ // 0? really? something went wrong!
+ if total == 0 {
+ res.Name = CheckResultNameFail
+ res.Actual = "WARN missing counters"
+ return res
+ }
+ res.Name = CheckResultNamePass
+ return res
+ },
+ })
+ checkSum.Checks = append(checkSum.Checks, &Check{
+ ID: "OPCT-010",
+ Name: "The cluster logs should generate fewer error reports in the logs",
+ Test: func() CheckResult {
+ passLimit := 30000
+ failLimit := 100000
+ res := CheckResult{
+ Name: CheckResultNameFail,
+ Target: "W:<=30k,F:>100k",
+ Actual: "N/A",
+ }
+ prefix := "Check OPCT-007 Failed"
+ if re.Provider.MustGatherInfo == nil {
+ log.Debugf("%s: MustGatherInfo is not defined", prefix)
+ res.Name = CheckResultNameFail
+ res.Actual = "ERR !must-gather"
+ return res
+ }
+ if _, ok := re.Provider.MustGatherInfo.ErrorCounters["total"]; !ok {
+ log.Debugf("%s: OPCT-007: ErrorCounters[\"total\"]", prefix)
+ res.Name = CheckResultNameFail
+ res.Actual = "ERR !counters"
+ return res
+ }
+ // "Acceptance" are relative, the baselines is observed to set
+ // an "accepted" value considering a healthy cluster in known provider/installation.
+ total := re.Provider.MustGatherInfo.ErrorCounters["total"]
+ res.Actual = fmt.Sprintf("%d", total)
+ if total > passLimit && total < failLimit {
+ res.Name = CheckResultNameWarn
+ log.Debugf("%s WARN acceptance criteria: want[<=%d] got[%d]", prefix, passLimit, total)
+ return res
+ }
+ if total >= failLimit {
+ res.Name = CheckResultNameFail
+ log.Debugf("%s FAIL acceptance criteria: want[<=%d] got[%d]", prefix, passLimit, total)
+ return res
+ }
+ // 0? really? something went wrong!
+ if total == 0 {
+ log.Debugf("%s FAIL acceptance criteria: want[!=0] got[%d]", prefix, total)
+ res.Name = CheckResultNameFail
+ res.Actual = "ERR total==0"
+ return res
+ }
+ res.Name = CheckResultNamePass
+ return res
+ },
+ })
+ checkSum.Checks = append(checkSum.Checks, &Check{
+ ID: "OPCT-003",
+ Name: "Plugin Collector [99-openshift-artifacts-collector] must pass",
+ Test: func() CheckResult {
+ prefix := "Check OPCT-003 Failed"
+ res := CheckResult{Name: CheckResultNameFail, Target: "passed", Actual: "N/A"}
+ if _, ok := re.Provider.Plugins[plugin.PluginNameArtifactsCollector]; !ok {
+ return res
+ }
+ p := re.Provider.Plugins[plugin.PluginNameArtifactsCollector]
+ if p.Stat.Total == p.Stat.Failed {
+ log.Debugf("%s Runtime: Total and Failed counters are equals indicating execution failure", prefix)
+ return res
+ }
+ // Acceptance check
+ res.Actual = re.Provider.Plugins[plugin.PluginNameArtifactsCollector].Stat.Status
+ if res.Actual == "passed" {
+ res.Name = CheckResultNamePass
+ return res
+ }
+ log.Debugf("%s: %s", prefix, msgDefaultNotMatch)
+ return res
+ },
+ })
+ checkSum.Checks = append(checkSum.Checks, &Check{
+ ID: "OPCT-002",
+ Name: "Plugin Conformance Upgrade [05-openshift-cluster-upgrade] must pass",
+ Test: func() CheckResult {
+ prefix := "Check OPCT-002 Failed"
+ res := CheckResult{Name: CheckResultNameFail, Target: "passed"}
+ if _, ok := re.Provider.Plugins[plugin.PluginNameOpenShiftUpgrade]; !ok {
+ return res
+ }
+ res.Actual = re.Provider.Plugins[plugin.PluginNameOpenShiftUpgrade].Stat.Status
+ if res.Actual == "passed" {
+ res.Name = CheckResultNamePass
+ return res
+ }
+ log.Debugf("%s: %s", prefix, msgDefaultNotMatch)
+ return res
+ },
+ })
+ // TODO(etcd)
+ /*
+ checkSum.Checks = append(checkSum.Checks, &Check{
+ Name: "[TODO] etcd fio must accept the tests (TODO)",
+ Test: AcceptanceCheckFail,
+ })
+ */
+ checkSum.Checks = append(checkSum.Checks, &Check{
+ ID: "OPCT-010A",
+ Name: "etcd logs: slow requests: average should be under 500ms",
+ Test: func() CheckResult {
+ prefix := "Check OPCT-010A Failed"
+ wantLimit := 500.0
+ res := CheckResult{
+ Name: CheckResultNameFail,
+ Target: fmt.Sprintf("<=%.2f ms", wantLimit),
+ Actual: "N/A",
+ }
+ if re.Provider == nil {
+ log.Debugf("%s: unable to read provider information.", prefix)
+ return res
+ }
+ if re.Provider.MustGatherInfo == nil {
+ res.Actual = "ERR !must-gather"
+ log.Debugf("%s: unable to read must-gather information.", prefix)
+ return res
+ }
+ if re.Provider.MustGatherInfo.ErrorEtcdLogs == nil {
+ res.Actual = "ERR !logs"
+ log.Debugf("%s: unable to etcd stat from must-gather.", prefix)
+ return res
+ }
+ if re.Provider.MustGatherInfo.ErrorEtcdLogs.FilterRequestSlowAll["all"] == nil {
+ res.Actual = "ERR !counters"
+ log.Debugf("%s: unable to read statistics from parsed etcd logs.", prefix)
+ return res
+ }
+ if re.Provider.MustGatherInfo.ErrorEtcdLogs.FilterRequestSlowAll["all"].StatMean == "" {
+ res.Actual = "ERR !p50"
+ log.Debugf("%s: unable to get p50/mean statistics from parsed data: %v", prefix, re.Provider.MustGatherInfo.ErrorEtcdLogs.FilterRequestSlowAll["all"])
+ return res
+ }
+ values := strings.Split(re.Provider.MustGatherInfo.ErrorEtcdLogs.FilterRequestSlowAll["all"].StatMean, " ")
+ if values[0] == "" {
+ log.Debugf("%s: unable to get parse p50/mean: %v", prefix, values)
+ return res
+ }
+ value, err := strconv.ParseFloat(values[0], 64)
+ if err != nil {
+ log.Debugf("%s: unable to convert p50/mean to float: %v", prefix, err)
+ return res
+ }
+ res.Actual = fmt.Sprintf("%.3f", value)
+ if value >= wantLimit {
+ log.Debugf("%s acceptance criteria: want=[<%.0f] got=[%v]", prefix, wantLimit, value)
+ return res
+ }
+ res.Name = CheckResultNamePass
+ return res
+ },
+ })
+ checkSum.Checks = append(checkSum.Checks, &Check{
+ ID: "OPCT-010B",
+ Name: "etcd logs: slow requests: maximum should be under 1000ms",
+ Test: func() CheckResult {
+ prefix := "Check OPCT-010B Failed"
+ wantLimit := 1000.0
+ res := CheckResult{
+ Name: CheckResultNameFail,
+ Target: fmt.Sprintf("<=%.2f ms", wantLimit),
+ Actual: "N/A",
+ }
+ if re.Provider.MustGatherInfo == nil {
+ res.Actual = "ERR !must-gather"
+ log.Debugf("%s: unable to read must-gather information.", prefix)
+ return res
+ }
+ if re.Provider.MustGatherInfo.ErrorEtcdLogs == nil {
+ res.Actual = "ERR !logs"
+ log.Debugf("%s: unable to etcd stat from must-gather.", prefix)
+ return res
+ }
+ if re.Provider.MustGatherInfo.ErrorEtcdLogs.FilterRequestSlowAll["all"] == nil {
+ res.Actual = "ERR !counters"
+ log.Debugf("%s: unable to read statistics from parsed etcd logs.", prefix)
+ return res
+ }
+ if re.Provider.MustGatherInfo.ErrorEtcdLogs.FilterRequestSlowAll["all"].StatMax == "" {
+ res.Actual = "ERR !max"
+ log.Debugf("%s: unable to get max statistics from parsed data: %v", prefix, re.Provider.MustGatherInfo.ErrorEtcdLogs.FilterRequestSlowAll["all"])
+ return res
+ }
+ values := strings.Split(re.Provider.MustGatherInfo.ErrorEtcdLogs.FilterRequestSlowAll["all"].StatMax, " ")
+ if values[0] == "" {
+ res.Actual = "ERR !max"
+ log.Debugf("%s: unable to get parse max: %v", prefix, values)
+ return res
+ }
+ value, err := strconv.ParseFloat(values[0], 64)
+ if err != nil {
+ res.Actual = "ERR !max"
+ log.Debugf("%s: unable to convert max to float: %v", prefix, err)
+ return res
+ }
+ res.Actual = fmt.Sprintf("%.3f", value)
+ if value >= wantLimit {
+ log.Debugf("%s acceptance criteria: want=[<%.0f] got=[%v]", prefix, wantLimit, value)
+ return res
+ }
+ res.Name = CheckResultNamePass
+ return res
+ },
+ })
+ checkSum.Checks = append(checkSum.Checks, &Check{
+ ID: CheckID022,
+ Name: "Detected one or more plugin(s) with potential invalid result",
+ Test: func() CheckResult {
+ prefix := "Check Failed - " + CheckID022
+
+ res := CheckResult{Name: CheckResultNameFail, Target: "passed", Actual: "N/A"}
+ checkPlugins := []string{
+ plugin.PluginNameKubernetesConformance,
+ plugin.PluginNameOpenShiftConformance,
+ plugin.PluginNameArtifactsCollector,
+ }
+ invalidPluginIds := []string{}
+ for _, plugin := range checkPlugins {
+ if _, ok := re.Provider.Plugins[plugin]; !ok {
+ return res
+ }
+ p := re.Provider.Plugins[plugin]
+ if p.Stat.Total == p.Stat.Failed {
+ log.Debugf("%s Runtime: Total and Failed counters are equals indicating execution failure", prefix)
+ invalidPluginIds = append(invalidPluginIds, strings.Split(plugin, "-")[0])
+ }
+ }
+
+ if len(invalidPluginIds) > 0 {
+ res.Actual = fmt.Sprintf("Failed%v", invalidPluginIds)
+ return res
+ }
+
+ res.Name = CheckResultNamePass
+ res.Actual = "passed"
+ log.Debugf("%s: %s", prefix, msgDefaultNotMatch)
+ return res
+ },
+ })
+ checkSum.Checks = append(checkSum.Checks, &Check{
+ ID: CheckID023A,
+ // Should be greated than 300
+ Name: "Sanity [10-openshift-kube-conformance]: potential missing tests in suite",
+ Test: func() CheckResult {
+ prefix := "Check Failed - " + CheckID023A
+ res := CheckResult{
+ Name: CheckResultNameFail,
+ Target: "F:<300",
+ Actual: "N/A",
+ }
+ if _, ok := re.Provider.Plugins[plugin.PluginNameKubernetesConformance]; !ok {
+ res.Actual = "ERR !plugin"
+ return res
+ }
+ p := re.Provider.Plugins[plugin.PluginNameKubernetesConformance]
+ res.Actual = fmt.Sprintf("Total==%d", p.Stat.Total)
+ if p.Stat.Total <= 300 {
+ log.Debugf("%s: found less than expected tests count=%d. Are you running in devel mode?", prefix, p.Stat.Total)
+ return res
+ }
+ res.Name = CheckResultNamePass
+ return res
+ },
+ })
+ checkSum.Checks = append(checkSum.Checks, &Check{
+ ID: CheckID023B,
+ // Should be greated than 3000
+ Name: "Sanity [20-openshift-conformance-validated]: potential missing tests in suite",
+ Test: func() CheckResult {
+ prefix := "Check Failed - " + CheckID023B
+ res := CheckResult{
+ Name: CheckResultNameFail,
+ Target: "F:<3000",
+ Actual: "N/A",
+ }
+ if _, ok := re.Provider.Plugins[plugin.PluginNameOpenShiftConformance]; !ok {
+ res.Actual = "ERR !plugin"
+ return res
+ }
+ p := re.Provider.Plugins[plugin.PluginNameOpenShiftConformance]
+ res.Actual = fmt.Sprintf("Total==%d", p.Stat.Total)
+ if p.Stat.Total <= 3000 {
+ log.Debugf("%s: found less than expected tests count=%d. Is it running in devel mode?!", prefix, p.Stat.Total)
+ return res
+ }
+ res.Name = CheckResultNamePass
+ return res
+ },
+ })
+ checkSum.Checks = append(checkSum.Checks, &Check{
+ ID: "OPCT-030",
+ Name: "Node Topology: ControlPlaneTopology HighlyAvailable must use multi-zone",
+ Test: func() CheckResult {
+ prefix := "Check OPCT-030 Failed"
+ res := CheckResult{
+ Name: CheckResultNameFail,
+ Target: "W:>1,P:>2",
+ Actual: "N/A",
+ }
+ if re.Provider.Infra == nil {
+ log.Debugf("%s: missing Infrastructure object to discover ControlPlaneTopology", prefix)
+ res.Actual = "ERR !infra"
+ return res
+ }
+ if re.Provider.Infra.ControlPlaneTopology != "HighlyAvailable" {
+ res.Name = CheckResultNameSkip
+ res.Actual = fmt.Sprintf("Topology==%s", re.Provider.Infra.ControlPlaneTopology)
+ return res
+ }
+ // Skip when topology isn't available (no-Cloud provider information)
+ provider := re.Provider.Infra.PlatformType
+ if re.Provider.Infra.PlatformType == "None" {
+ res.Name = CheckResultNameSkip
+ res.Actual = fmt.Sprintf("Type==%s", provider)
+ return res
+ }
+ // Why having 2 or less nodes in HighlyAvailable?
+ if len(re.Provider.Nodes) < 3 {
+ log.Debugf("%s: two or less control plane nodes", prefix)
+ res.Actual = fmt.Sprintf("Nodes==%d", len(re.Provider.Nodes))
+ return res
+ }
+ controlPlaneZones := map[string]struct{}{}
+ for _, node := range re.Provider.Nodes {
+ if !node.ControlPlane {
+ continue
+ }
+ if zone, ok := node.Labels["topology.kubernetes.io/zone"]; ok {
+ controlPlaneZones[zone] = struct{}{}
+ }
+ }
+ if len(controlPlaneZones) < 2 {
+ log.Debugf("%s: found one zone: %v", prefix, controlPlaneZones)
+ res.Actual = fmt.Sprintf("Zones==%d", len(controlPlaneZones))
+ return res
+ }
+ res.Name = CheckResultNamePass
+ res.Actual = fmt.Sprintf("Zones==%d", len(controlPlaneZones))
+ return res
+ },
+ })
+ // OpenShift / Infrastructure Object Check
+ checkSum.Checks = append(checkSum.Checks, &Check{
+ ID: CheckIdEmptyValue,
+ Name: "Platform Type must be supported by OPCT",
+ Test: func() CheckResult {
+ prefix := "Check OPCT-TBD Failed"
+ res := CheckResult{Name: CheckResultNameFail, Target: "None|External|AWS|Azure"}
+ if re.Provider == nil || re.Provider.Infra == nil {
+ res.Message = fmt.Sprintf("%s: unable to read the infrastructure object", prefix)
+ log.Debug(res.Message)
+ return res
+ }
+ // Acceptance Criteria
+ res.Actual = re.Provider.Infra.PlatformType
+ switch res.Actual {
+ case "None", "External", "AWS", "Azure":
+ res.Name = CheckResultNamePass
+ return res
+ }
+ log.Debugf("%s (Platform Type): %s: got=[%s]", prefix, msgDefaultNotMatch, re.Provider.Infra.PlatformType)
+ return res
+ },
+ })
+ checkSum.Checks = append(checkSum.Checks, &Check{
+ ID: CheckIdEmptyValue,
+ Name: "Cluster Version Operator must be Available",
+ Test: func() CheckResult {
+ res := CheckResult{Name: CheckResultNameFail, Target: "True"}
+ prefix := "Check Failed"
+ if re.Provider == nil || re.Provider.Version == nil || re.Provider.Version.OpenShift == nil {
+ res.Message = fmt.Sprintf("%s: unable to read provider version", prefix)
+ return res
+ }
+ res.Actual = re.Provider.Version.OpenShift.CondAvailable
+ if res.Actual != "True" {
+ return res
+ }
+ res.Name = CheckResultNamePass
+ return res
+ },
+ })
+ checkSum.Checks = append(checkSum.Checks, &Check{
+ ID: CheckIdEmptyValue,
+ Name: "Cluster condition Failing must be False",
+ Test: func() CheckResult {
+ res := CheckResult{Name: CheckResultNameFail, Target: "False"}
+ prefix := "Check Failed"
+ if re.Provider == nil || re.Provider.Version == nil || re.Provider.Version.OpenShift == nil {
+ res.Message = fmt.Sprintf("%s: unable to read provider version", prefix)
+ return res
+ }
+ res.Actual = re.Provider.Version.OpenShift.CondFailing
+ if res.Actual != "False" {
+ return res
+ }
+ res.Name = CheckResultNamePass
+ return res
+ },
+ })
+ checkSum.Checks = append(checkSum.Checks, &Check{
+ ID: CheckIdEmptyValue,
+ Name: "Cluster upgrade must not be Progressing",
+ Test: func() CheckResult {
+ res := CheckResult{Name: CheckResultNameFail, Target: "False"}
+ if re.Provider == nil || re.Provider.Version == nil || re.Provider.Version.OpenShift == nil {
+ return res
+ }
+ res.Actual = re.Provider.Version.OpenShift.CondProgressing
+ if res.Actual != "False" {
+ return res
+ }
+ res.Name = CheckResultNamePass
+ return res
+ },
+ })
+ checkSum.Checks = append(checkSum.Checks, &Check{
+ ID: CheckIdEmptyValue,
+ Name: "Cluster ReleaseAccepted must be True",
+ Test: func() CheckResult {
+ res := CheckResult{Name: CheckResultNameFail, Target: "True"}
+ if re.Provider == nil || re.Provider.Version == nil || re.Provider.Version.OpenShift == nil {
+ return res
+ }
+ res.Actual = re.Provider.Version.OpenShift.CondReleaseAccepted
+ if res.Actual != "True" {
+ return res
+ }
+ res.Name = CheckResultNamePass
+ return res
+ },
+ })
+ checkSum.Checks = append(checkSum.Checks, &Check{
+ ID: CheckIdEmptyValue,
+ Name: "Infrastructure status must have Topology=HighlyAvailable",
+ Test: func() CheckResult {
+ res := CheckResult{Name: CheckResultNameFail, Target: "HighlyAvailable"}
+ if re.Provider == nil || re.Provider.Infra == nil {
+ return res
+ }
+ res.Actual = re.Provider.Infra.Topology
+ if res.Actual != "HighlyAvailable" {
+ return res
+ }
+ res.Name = CheckResultNamePass
+ return res
+ },
+ })
+ checkSum.Checks = append(checkSum.Checks, &Check{
+ ID: CheckIdEmptyValue,
+ Name: "Infrastructure status must have ControlPlaneTopology=HighlyAvailable",
+ Test: func() CheckResult {
+ res := CheckResult{Name: CheckResultNameFail, Target: "HighlyAvailable"}
+ if re.Provider == nil || re.Provider.Infra == nil {
+ return res
+ }
+ res.Actual = re.Provider.Infra.ControlPlaneTopology
+ if re.Provider.Infra.ControlPlaneTopology != "HighlyAvailable" {
+ return res
+ }
+ res.Name = CheckResultNamePass
+ return res
+ },
+ })
+ // TODO(network): podConnectivityChecks must not have outages
+
+ // TODO:
+ // Question#1: Do we need this test considering there is a check of passing=100% on kube conformance?
+ // Question#2: is that check really need considering the final filters target 0 failures?
+ // checkSum.Checks = append(checkSum.Checks, &Check{
+ // ID: "OPCT-TBD",
+ // Name: "Kubernetes Conformance [10-openshift-kube-conformance]: replay failures must-pass",
+ // Description: "Tests that failed in the previous run must pass in the replay step (re-run)",
+ // Test: func() CheckResult {
+ // return CheckResult{Name: CheckResultNameSkip, Target: "TBD", Actual: "TODO"}
+ // },
+ // })
+ // checkSum.Checks = append(checkSum.Checks, &Check{
+ // ID: "OPCT-TBD",
+ // Name: "OpenShift Conformance [20-openshift-conformance-validated]: replay failures must-pass",
+ // Description: "Tests that failed in the previous run must pass in the replay step (re-run)",
+ // Test: func() CheckResult {
+ // // for each failed test in the Filter5, check if it passed in the replay.
+ // // return CheckResult{Name: CheckResultNameSkip, Target: "TBD", Actual: "TODO"}
+ // res := CheckResult{
+ // Name: CheckResultNameFail,
+ // Target: "F:<300",
+ // Actual: "N/A",
+ // }
+ // },
+ // })
+
+ // Create docs reference when ID is set
+ for c := range checkSum.Checks {
+ if checkSum.Checks[c].ID != CheckIdEmptyValue {
+ checkSum.Checks[c].Documentation = fmt.Sprintf("%s/#%s", checkSum.baseURL, checkSum.Checks[c].ID)
+ }
+ }
+ return checkSum
+}
+
+func (csum *CheckSummary) GetBaseURL() string {
+ return csum.baseURL
+}
+
+func (csum *CheckSummary) GetCheckResults() ([]*SLOOutput, []*SLOOutput, []*SLOOutput, []*SLOOutput) {
+ passes := []*SLOOutput{}
+ failures := []*SLOOutput{}
+ warnings := []*SLOOutput{}
+ skips := []*SLOOutput{}
+ for _, check := range csum.Checks {
+ if check.Result.String() == string(CheckResultNameFail) {
+ failures = append(failures, &SLOOutput{
+ ID: check.ID,
+ SLO: check.Name,
+ SLOResult: check.Result.String(),
+ SLITarget: check.Result.Target,
+ SLIActual: check.Result.Actual,
+ Message: check.Result.Message,
+ Documentation: check.Documentation,
+ })
+ } else if check.Result.String() == string(CheckResultNameWarn) {
+ warnings = append(warnings, &SLOOutput{
+ ID: check.ID,
+ SLO: check.Name,
+ SLOResult: check.Result.String(),
+ SLITarget: check.Result.Target,
+ SLIActual: check.Result.Actual,
+ Message: check.Result.Message,
+ Documentation: check.Documentation,
+ })
+ } else if check.Result.String() == string(CheckResultNameSkip) {
+ skips = append(skips, &SLOOutput{
+ ID: check.ID,
+ SLO: check.Name,
+ SLOResult: check.Result.String(),
+ SLITarget: check.Result.Target,
+ SLIActual: check.Result.Actual,
+ Message: check.Result.Message,
+ Documentation: check.Documentation,
+ })
+ } else {
+ passes = append(passes, &SLOOutput{
+ ID: check.ID,
+ SLO: check.Name,
+ SLOResult: check.Result.String(),
+ SLITarget: check.Result.Target,
+ SLIActual: check.Result.Actual,
+ Message: check.Result.Message,
+ Documentation: check.Documentation,
+ })
+ }
+ }
+ return passes, failures, warnings, skips
+}
+
+func (csum *CheckSummary) Run() error {
+ for _, check := range csum.Checks {
+ check.Result = check.Test()
+ }
+ return nil
+}
diff --git a/internal/report/slo_test.go b/internal/report/slo_test.go
new file mode 100644
index 00000000..290da3fc
--- /dev/null
+++ b/internal/report/slo_test.go
@@ -0,0 +1,24 @@
+package report
+
+// TODO(mtulio): create unit:
+// - name should not have more than X size
+// - ID must be in the format OPCT-NNN
+// - DOC reference must exists in docs/review/rules.md
+// - returns should be pass or fail
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestNewCheckSummary(t *testing.T) {
+ checks := NewCheckSummary(&ReportData{})
+ assert.NotNil(t, checks)
+
+ // Check Names must not be higher than 88 characters
+ for _, check := range checks.Checks {
+ assert.Equal(t, true, len(check.ID) <= 9, "Check Name must not be higher than 8 characters: %s", check.ID)
+ assert.Equal(t, true, len(check.Name) <= 88, "Check Name must not be higher than 88 characters: %s", check.Name)
+ }
+}
diff --git a/internal/report/view/cli.go b/internal/report/view/cli.go
new file mode 100644
index 00000000..3bd8e902
--- /dev/null
+++ b/internal/report/view/cli.go
@@ -0,0 +1,4 @@
+package view
+
+// TODO: implement the CLI view for report.
+// Views should not have any logic.
diff --git a/internal/report/view/html.go b/internal/report/view/html.go
new file mode 100644
index 00000000..607bec86
--- /dev/null
+++ b/internal/report/view/html.go
@@ -0,0 +1,3 @@
+package view
+
+// TODO: implement the HTML view for report.
diff --git a/main.go b/main.go
index 8ce65897..d092ac2a 100644
--- a/main.go
+++ b/main.go
@@ -3,7 +3,7 @@ package main
import (
"embed"
- cmd "github.com/redhat-openshift-ecosystem/provider-certification-tool/cmd"
+ cmd "github.com/redhat-openshift-ecosystem/provider-certification-tool/cmd/opct"
"github.com/redhat-openshift-ecosystem/provider-certification-tool/internal/assets"
)
diff --git a/pkg/cmd/adm/baseline/get.go b/pkg/cmd/adm/baseline/get.go
new file mode 100644
index 00000000..85528b50
--- /dev/null
+++ b/pkg/cmd/adm/baseline/get.go
@@ -0,0 +1,108 @@
+package baseline
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/redhat-openshift-ecosystem/provider-certification-tool/internal/report"
+ reb "github.com/redhat-openshift-ecosystem/provider-certification-tool/internal/report/baseline"
+ log "github.com/sirupsen/logrus"
+ "github.com/spf13/cobra"
+)
+
+type baselineGetInput struct {
+ platform string
+ release string
+ name string
+ dump bool
+ output string
+}
+
+var baselineGetArgs baselineGetInput
+var baselineGetCmd = &cobra.Command{
+ Use: "get",
+ Example: "opct adm baseline get ",
+ Short: "Get a baseline result to be used in the review process.",
+ Long: `Get a baseline result to be used in the review process.
+ Baseline results are used to compare the results of the validation tests.
+ Getting a baseline result is useful when you don't have access to the internet when running 'opct report' command,
+ you don't need to run this command if you have access to the internet, the command will gather the correct result automatically.`,
+ Run: baselineGetCmdRun,
+}
+
+func init() {
+ baselineGetCmd.Flags().StringVar(&baselineGetArgs.platform, "platform", "", "Specify the platform type. Require --platform. Example: External")
+ baselineGetCmd.Flags().StringVar(&baselineGetArgs.release, "release", "", "Specify the release to retrieve latest summary. Require --release. Example: 4.15")
+ baselineGetCmd.Flags().StringVarP(&baselineGetArgs.name, "name", "n", "", "List result by platform. Require --platform")
+ baselineGetCmd.Flags().BoolVar(&baselineGetArgs.dump, "dump", false, "Enable dump the raw data to stdout.")
+ baselineGetCmd.Flags().StringVarP(&baselineGetArgs.output, "output", "o", "", "Save the baseline to output file.")
+}
+
+func baselineGetCmdRun(cmd *cobra.Command, args []string) {
+ if (baselineGetArgs.platform == "" && baselineGetArgs.release == "") && baselineGetArgs.name == "" {
+ if baselineGetArgs.platform == "" && baselineGetArgs.release == "" {
+ log.Error("argument --platform or --release must be set when --name is not used")
+ return
+ }
+ log.Error("argument --name must be set. Check available baseline with 'opct adm baseline list'")
+ return
+ }
+
+ var err error
+ var data []byte
+ rb := reb.NewBaselineReportSummary()
+ if baselineGetArgs.name != "" {
+ log.Infof("Getting baseline result by name: %s", baselineGetArgs.name)
+ data, err = rb.GetSummaryByName(baselineGetArgs.name)
+ } else {
+ log.Infof("Getting latest baseline result by release and platform: %s/%s", baselineGetArgs.release, baselineGetArgs.platform)
+ if err := rb.GetLatestSummaryByPlatform(baselineGetArgs.release, baselineGetArgs.platform); err != nil {
+ log.Errorf("error getting latest summary by platform: %v", err)
+ return
+ }
+ data = rb.GetBuffer().GetRawData()
+ }
+
+ if err != nil {
+ log.Fatalf("Failed to read result: %v", err)
+ }
+
+ // deserialize the data to report.ReportData
+ re := &report.ReportData{}
+ err = json.Unmarshal(data, &re)
+ if err != nil {
+ log.Errorf("failed to unmarshal baseline data: %v", err)
+ return
+ }
+ log.Infof("Baseline result processed from archive: %v", filepath.Base(re.Summary.Tests.Archive))
+
+ if baselineGetArgs.dump {
+ prettyJSON, err := json.MarshalIndent(re, "", " ")
+ if err != nil {
+ log.Errorf("Failed to encode data to pretty JSON: %v", err)
+ }
+ if err == nil && baselineGetArgs.output != "" {
+ err = os.WriteFile(baselineGetArgs.output, prettyJSON, 0644)
+ if err != nil {
+ log.Errorf("Failed to write pretty JSON to output file: %v", err)
+ } else {
+ log.Infof("Pretty JSON saved to %s\n", baselineGetArgs.output)
+ }
+ } else {
+ fmt.Println(string(prettyJSON))
+ }
+ }
+
+ // Temp getting plugin failures
+ bd := reb.BaselineData{}
+ bd.SetRawData(data)
+ pluginName := "20-openshift-conformance-validated"
+ failures, _ := bd.GetPriorityFailuresFromPlugin(pluginName)
+
+ fmt.Println(">> Example serializing and extracting plugin failures for ", pluginName)
+ for f := range failures {
+ fmt.Printf("[%d]: %s\n", f, failures[f])
+ }
+}
diff --git a/pkg/cmd/adm/baseline/indexer.go b/pkg/cmd/adm/baseline/indexer.go
new file mode 100644
index 00000000..9254f642
--- /dev/null
+++ b/pkg/cmd/adm/baseline/indexer.go
@@ -0,0 +1,40 @@
+package baseline
+
+import (
+ "os"
+
+ log "github.com/sirupsen/logrus"
+
+ reb "github.com/redhat-openshift-ecosystem/provider-certification-tool/internal/report/baseline"
+ "github.com/spf13/cobra"
+)
+
+type baselineIndexerInput struct {
+ force bool
+}
+
+var baselineIndexerArgs baselineIndexerInput
+var baselineIndexerCmd = &cobra.Command{
+ Use: "indexer",
+ Example: "opct adm baseline indexer",
+ Short: "(Administrative usage) Rebuild the indexer for baseline in the backend.",
+ Run: baselineIndexerCmdRun,
+}
+
+func init() {
+ baselineListCmd.Flags().BoolVar(&baselineIndexerArgs.force, "force", false, "List all results.")
+
+ // Simple 'check' for non-authorized users, the command will fail later as the user does not have AWS required permissions.
+ if baselineIndexerArgs.force && os.Getenv("OPCT_ENABLE_ADM_BASELINE") != "1" {
+ log.Fatal("You are not allowed to execute this command.")
+ }
+}
+
+func baselineIndexerCmdRun(cmd *cobra.Command, args []string) {
+ rb := reb.NewBaselineReportSummary()
+ err := rb.CreateBaselineIndex()
+ if err != nil {
+ log.Fatalf("Failed to read index from bucket: %v", err)
+ }
+ log.Info("Indexer has been updated.")
+}
diff --git a/pkg/cmd/adm/baseline/list.go b/pkg/cmd/adm/baseline/list.go
new file mode 100644
index 00000000..36677890
--- /dev/null
+++ b/pkg/cmd/adm/baseline/list.go
@@ -0,0 +1,92 @@
+package baseline
+
+import (
+ "log"
+ "os"
+ "sort"
+
+ reb "github.com/redhat-openshift-ecosystem/provider-certification-tool/internal/report/baseline"
+
+ table "github.com/jedib0t/go-pretty/v6/table"
+ "github.com/spf13/cobra"
+)
+
+type baselineListInput struct {
+ all bool
+}
+
+var baselineListArgs baselineListInput
+var baselineListCmd = &cobra.Command{
+ Use: "list",
+ Example: "opct adm baseline list",
+ Short: "List all available baseline results by OpenShift version, provider and platform type.",
+ Run: baselineListCmdRun,
+}
+
+func init() {
+ baselineListCmd.Flags().BoolVar(&baselineListArgs.all, "all", false, "List all results, instead of latest.")
+
+ if baselineListArgs.all && os.Getenv("OPCT_ENABLE_ADM_BASELINE") != "1" {
+ log.Fatal("You are not allowed to execute this command.")
+ }
+}
+
+func baselineListCmdRun(cmd *cobra.Command, args []string) {
+ rb := reb.NewBaselineReportSummary()
+ index, err := rb.ReadReportSummaryIndexFromAPI()
+ if err != nil {
+ log.Fatalf("Failed to read index from bucket: %v", err)
+ }
+
+ tb := table.NewWriter()
+ tb.SetOutputMirror(os.Stdout)
+ // tbProv.SetStyle(table.StyleLight)
+ // tbProv.SetTitle(title)
+ if !baselineListArgs.all {
+ tb.AppendHeader(table.Row{"ID", "Type", "Release", "PlatformType", "Name"})
+ laltestK := make([]string, 0, len(index.Latest))
+ for lts := range index.Latest {
+ laltestK = append(laltestK, lts)
+ }
+ sort.Strings(laltestK)
+ for _, latest := range laltestK {
+ tb.AppendRow(
+ table.Row{
+ latest,
+ "latest",
+ index.Latest[latest].OpenShiftRelease,
+ index.Latest[latest].PlatformType,
+ index.Latest[latest].Name,
+ })
+ }
+ tb.Render()
+ return
+ }
+
+ tb.AppendHeader(table.Row{"Latest", "Release", "Platform", "Provider", "Name", "Version"})
+ for i := range index.Results {
+ res := index.Results[i]
+ latest := ""
+ if res.IsLatest {
+ latest = "*"
+ }
+ provider := ""
+ if p, ok := res.Tags["providerName"]; ok {
+ provider = p.(string)
+ }
+ version := ""
+ if p, ok := res.Tags["openshiftVersion"]; ok {
+ version = p.(string)
+ }
+ tb.AppendRow(
+ table.Row{
+ latest,
+ res.OpenShiftRelease,
+ res.PlatformType,
+ provider,
+ res.Name,
+ version,
+ })
+ }
+ tb.Render()
+}
diff --git a/pkg/cmd/adm/baseline/publish.go b/pkg/cmd/adm/baseline/publish.go
new file mode 100644
index 00000000..3181b834
--- /dev/null
+++ b/pkg/cmd/adm/baseline/publish.go
@@ -0,0 +1,165 @@
+package baseline
+
+// TODO move/migrate 'opct exp publish' to this command
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/redhat-openshift-ecosystem/provider-certification-tool/internal/opct/metrics"
+ "github.com/redhat-openshift-ecosystem/provider-certification-tool/internal/opct/summary"
+ "github.com/redhat-openshift-ecosystem/provider-certification-tool/internal/report"
+ "github.com/redhat-openshift-ecosystem/provider-certification-tool/internal/report/baseline"
+ log "github.com/sirupsen/logrus"
+ "github.com/spf13/cobra"
+)
+
+type baselinePublishInput struct {
+ forceLatest bool
+ verbose bool
+ dryRun bool
+}
+
+var baselinePublishArgs baselinePublishInput
+var baselinePublishCmd = &cobra.Command{
+ Use: "publish",
+ Example: "opct adm baseline publish ",
+ Short: "Publish a baseline result to be used in the review process.",
+ Long: `Publish a baseline result to be used in the review process.
+ Baseline results are used to compare the results of the validation tests.
+ Publishing a baseline result is useful when you want to share the baseline with other users.`,
+ Run: baselinePublishCmdRun,
+}
+
+func init() {
+ baselinePublishCmd.Flags().BoolVarP(
+ &baselinePublishArgs.forceLatest, "force-latest", "f", false,
+ "Name of the baseline to be published.",
+ )
+ baselinePublishCmd.Flags().BoolVarP(
+ &baselinePublishArgs.verbose, "verbose", "v", false,
+ "Show test details of test failures",
+ )
+ baselinePublishCmd.Flags().BoolVar(
+ &baselinePublishArgs.dryRun, "dry-run", false,
+ "Process the data and skip publishing the baseline.",
+ )
+}
+
+func baselinePublishCmdRun(cmd *cobra.Command, args []string) {
+ if baselinePublishArgs.forceLatest {
+ log.Warn("argument --force-latest must be set. Check available baseline with 'opct adm baseline list'")
+ }
+ // TODOs
+ // - check if the baseline exists
+ // - read and process as regular 'report' command
+ // - check sanity: counts should have acceptable, etc
+ // - extract the data to be published, building the name of the file and attributes.
+ if len(args) == 0 {
+ log.Fatalf("result archive not found: %v", args)
+ }
+ archive := args[0]
+ if _, err := os.Stat(archive); os.IsNotExist(err) {
+ log.Fatalf("archive not found: %v", archive)
+ }
+
+ fmt.Println()
+ log.Infof("Processing baseline result for %s", filepath.Base(archive))
+
+ timers := metrics.NewTimers()
+ timers.Add("report-total")
+
+ saveDirectory := "/tmp/opct-tmp-results-" + filepath.Base(archive)
+ err := os.Setenv("OPCT_DISABLE_FILTER_BASELINE", "1")
+ if err != nil {
+ log.Fatalf("error setting variable OPCT_DISABLE_FILTER_BASELINE to skip baseline in the filter pipeline: %v", err)
+ }
+ cs := summary.NewConsolidatedSummary(&summary.ConsolidatedSummaryInput{
+ Verbose: baselinePublishArgs.verbose,
+ Timers: timers,
+ Archive: archive,
+ SaveTo: saveDirectory,
+ })
+
+ log.Debug("Processing results")
+ if err := cs.Process(); err != nil {
+ log.Errorf("error processing results: %v", err)
+ }
+
+ re := report.NewReportData(false)
+ log.Debug("Processing report")
+ if err := re.Populate(cs); err != nil {
+ log.Errorf("error populating report: %v", err)
+ }
+
+ // TODO: ConsolidatedSummary should be migrated to SaveResults
+ if err := cs.SaveResults(saveDirectory); err != nil {
+ log.Errorf("error saving consolidated summary results: %v", err)
+ }
+ timers.Add("report-total")
+ if err := re.SaveResults(saveDirectory); err != nil {
+ log.Errorf("error saving report results: %v", err)
+ }
+
+ // TODO: move to config, or allow to add skips.
+ // Reject publish when those checks are failing:
+ // OPCT-001 : kube conformance failing
+ // OPCT-004 : too many tests failed on openshift conformance
+ // OPCT-003 : collector must be able to collect the results
+ // OPCT-007 (ERR missing must-gather): must-gather is missing
+ // OPCT-022: potential runtime failure
+ // TODO/Validate if need:
+ // OPCT-023*: Test sanity. Enable it when CI pipeline (periodic) is validated
+ // - etcd very slow
+ rejected := false
+ for _, check := range re.Checks.Fail {
+ if check.ID == report.CheckID001 ||
+ check.ID == report.CheckID004 ||
+ check.ID == report.CheckID005 ||
+ check.ID == report.CheckID022 {
+ errMessage := fmt.Sprintf("%q: want=%q, got=%q", check.SLO, check.SLITarget, check.SLIActual)
+ if check.Message != "" {
+ errMessage = fmt.Sprintf("%s: message=%q", errMessage, check.Message)
+ }
+ log.Errorf("rejecting the baseline, check id %s is in failed state: %s", check.ID, errMessage)
+ rejected = true
+ continue
+ }
+ }
+ if rejected {
+ log.Fatal("baseline rejected, see the logs for more details.")
+ return
+ }
+
+ checksStatus := fmt.Sprintf("pass(%d), fail(%d), warn(%d) skip(%d)", len(re.Checks.Pass), len(re.Checks.Fail), len(re.Checks.Warn), len(re.Checks.Skip))
+ log.Infof("Baseline checks are OK, proceeding to publish the baseline: %s", checksStatus)
+
+ // Prepare the baseline to publish:
+ // - build the metadata from the original report (setup.api)
+ // - upload the artifact to /uploads
+ // - upload the summary to /api/v0/result/summary
+ brs := baseline.NewBaselineReportSummary()
+ metaBytes, err := json.Marshal(re.Setup.API)
+ if err != nil {
+ log.Errorf("error marshaling metadata: %v", err)
+ }
+
+ var meta map[string]string
+ err = json.Unmarshal(metaBytes, &meta)
+ if err != nil {
+ log.Errorf("error unmarshalling metadata: %v", err)
+ }
+ log.Infof("Baseline metadata: %v", meta)
+ log.Infof("Uploading baseline to storage")
+ // TODO: check if the baseline already exists. It should check the unique
+ // id other than the bucket name. The UUID is a good candidate.
+ err = brs.UploadBaseline(archive, saveDirectory, meta, baselinePublishArgs.dryRun)
+ if err != nil {
+ log.Fatalf("error uploading baseline: %v", err)
+ }
+
+ log.Infof("Success! Baseline result processed from archive: %v", filepath.Base(re.Summary.Tests.Archive))
+ log.Infof("You must re-index the storage to serve in the result API. See 'opct adm baseline (indexer|list)'")
+}
diff --git a/pkg/cmd/adm/baseline/root.go b/pkg/cmd/adm/baseline/root.go
new file mode 100644
index 00000000..5c20ab0b
--- /dev/null
+++ b/pkg/cmd/adm/baseline/root.go
@@ -0,0 +1,38 @@
+package baseline
+
+import (
+ log "github.com/sirupsen/logrus"
+ "github.com/spf13/cobra"
+)
+
+var baselineCmd = &cobra.Command{
+ Use: "baseline",
+ Short: "Administrative commands to manipulate baseline results.",
+ Long: `Administrative commands to manipulate baseline results.
+ Baseline results are used to compare the results of the validation tests.
+ Those are CI results from reference installations which are used to compare
+ the results from custom executions targeting to inference persistent failures,
+ helping to isolate:
+ - Flaky tests
+ - Permanent failures
+ - Test environment issues`,
+ Run: func(cmd *cobra.Command, args []string) {
+ if len(args) == 0 {
+ if err := cmd.Help(); err != nil {
+ log.Errorf("error loading help(): %v", err)
+ }
+ }
+ },
+ Args: cobra.ExactArgs(1),
+}
+
+func init() {
+ baselineCmd.AddCommand(baselineListCmd)
+ baselineCmd.AddCommand(baselineGetCmd)
+ baselineCmd.AddCommand(baselineIndexerCmd)
+ baselineCmd.AddCommand(baselinePublishCmd)
+}
+
+func NewCmdBaseline() *cobra.Command {
+ return baselineCmd
+}
diff --git a/pkg/cmd/adm/root.go b/pkg/cmd/adm/root.go
index 07cf804d..537049b4 100644
--- a/pkg/cmd/adm/root.go
+++ b/pkg/cmd/adm/root.go
@@ -1,7 +1,7 @@
package adm
import (
- "os"
+ "github.com/redhat-openshift-ecosystem/provider-certification-tool/pkg/cmd/adm/baseline"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
@@ -12,11 +12,9 @@ var admCmd = &cobra.Command{
Short: "Administrative commands.",
Run: func(cmd *cobra.Command, args []string) {
if len(args) == 0 {
- err := cmd.Help()
- if err != nil {
+ if err := cmd.Help(); err != nil {
log.Errorf("error loading help(): %v", err)
}
- os.Exit(0)
}
},
}
@@ -24,6 +22,8 @@ var admCmd = &cobra.Command{
func init() {
admCmd.AddCommand(parseMetricsCmd)
admCmd.AddCommand(parseEtcdLogsCmd)
+ admCmd.AddCommand(baseline.NewCmdBaseline())
+ admCmd.AddCommand(setupNodeCmd)
}
func NewCmdAdm() *cobra.Command {
diff --git a/pkg/cmd/adm/setupNode.go b/pkg/cmd/adm/setupNode.go
new file mode 100644
index 00000000..57ce8d69
--- /dev/null
+++ b/pkg/cmd/adm/setupNode.go
@@ -0,0 +1,114 @@
+package adm
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/redhat-openshift-ecosystem/provider-certification-tool/pkg/client"
+ log "github.com/sirupsen/logrus"
+ "github.com/spf13/cobra"
+ v1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/client-go/kubernetes"
+)
+
+type setupNodeInput struct {
+ nodeName string
+ yes bool
+}
+
+var setupNodeArgs setupNodeInput
+var setupNodeCmd = &cobra.Command{
+ Use: "setup-node",
+ Example: "opct adm setup-node",
+ Short: "Setup the node for the validation process.",
+ Run: setupNodeRun,
+}
+
+func init() {
+ setupNodeCmd.Flags().BoolVarP(&setupNodeArgs.yes, "yes", "y", false, "Node to set required label and taints")
+ setupNodeCmd.Flags().StringVar(&setupNodeArgs.nodeName, "node", "", "Node to set required label and taints")
+}
+
+func discoverNode(clientset kubernetes.Interface) (string, error) {
+ // list all pods with label prometheus=k8s in namespace openshift-monitoring
+ pods, err := clientset.CoreV1().Pods("openshift-monitoring").List(context.TODO(), metav1.ListOptions{
+ LabelSelector: "prometheus=k8s",
+ })
+ if err != nil {
+ log.Fatalf("Failed to list Prometheus pods on namespace openshift-monitoring: %v", err)
+ }
+
+ // get the node running on those pods
+ if len(pods.Items) < 1 {
+ log.Fatalf("Expected at least 1 Prometheus pod, got %d. Use --name to manually set the node.", len(pods.Items))
+ }
+ nodesRunningPrometheus := map[string]struct{}{}
+ for _, pod := range pods.Items {
+ log.Infof("Prometheus pod %s is running on node %s, adding to skip list...", pod.Name, pod.Spec.NodeName)
+ nodesRunningPrometheus[pod.Spec.NodeName] = struct{}{}
+ }
+
+ // list all nodes with label node-role.kubernetes.io/worker=''
+ nodes, err := clientset.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{
+ LabelSelector: "node-role.kubernetes.io/worker=",
+ })
+ if err != nil {
+ log.Fatalf("Failed to list nodes: %v", err)
+ }
+ for _, node := range nodes.Items {
+ if _, ok := nodesRunningPrometheus[node.Name]; !ok {
+ return node.Name, nil
+ }
+ }
+ forceNode := nodes.Items[0].Name
+ log.Warnf("No node available to run the validation process, using %s", forceNode)
+ return forceNode, nil
+}
+
+func setupNodeRun(cmd *cobra.Command, args []string) {
+ kclient, _, err := client.CreateClients()
+ if err != nil {
+ log.Fatalf("Failed to create Kubernetes client: %v", err)
+ }
+
+ if setupNodeArgs.nodeName == "" {
+ setupNodeArgs.nodeName, err = discoverNode(kclient)
+ if err != nil {
+ log.Fatalf("Failed to discover node: %v", err)
+ }
+ }
+ log.Infof("Setting up node %s...", setupNodeArgs.nodeName)
+
+ node, err := kclient.CoreV1().Nodes().Get(context.TODO(), setupNodeArgs.nodeName, metav1.GetOptions{})
+ if err != nil {
+ log.Fatalf("Failed to get node %s: %v", setupNodeArgs.nodeName, err)
+ }
+
+ // Ask if the user wants to proceed with applying changes to the node
+ if !setupNodeArgs.yes {
+ fmt.Printf("Are you sure you want to apply changes to node %s? (y/n): ", setupNodeArgs.nodeName)
+ var response string
+ _, err := fmt.Scanln(&response)
+ if err != nil {
+ log.Fatalf("Failed to read user response: %v", err)
+ }
+ if response != "y" && response != "Y" {
+ fmt.Println("Aborted.")
+ return
+ }
+ }
+
+ // Create the labels map
+ node.ObjectMeta.Labels["node-role.kubernetes.io/tests"] = ""
+ node.Spec.Taints = append(node.Spec.Taints, v1.Taint{
+ Key: "node-role.kubernetes.io/tests",
+ Value: "",
+ Effect: v1.TaintEffectNoSchedule,
+ })
+ // Update the node labels
+ _, err = kclient.CoreV1().Nodes().Update(context.TODO(), node, metav1.UpdateOptions{})
+ if err != nil {
+ log.Fatalf("Failed to update node labels: %v", err)
+ }
+}
diff --git a/pkg/cmd/report/report.go b/pkg/cmd/report/report.go
new file mode 100644
index 00000000..67294330
--- /dev/null
+++ b/pkg/cmd/report/report.go
@@ -0,0 +1,832 @@
+package report
+
+import (
+ "fmt"
+ "net/http"
+ "os"
+ "path/filepath"
+ "sort"
+
+ "github.com/pkg/errors"
+ "github.com/spf13/cobra"
+
+ "text/tabwriter"
+
+ table "github.com/jedib0t/go-pretty/v6/table"
+ tabletext "github.com/jedib0t/go-pretty/v6/text"
+ "github.com/redhat-openshift-ecosystem/provider-certification-tool/internal/opct/metrics"
+ "github.com/redhat-openshift-ecosystem/provider-certification-tool/internal/opct/plugin"
+ "github.com/redhat-openshift-ecosystem/provider-certification-tool/internal/opct/summary"
+ "github.com/redhat-openshift-ecosystem/provider-certification-tool/internal/report"
+ log "github.com/sirupsen/logrus"
+ "github.com/vmware-tanzu/sonobuoy/pkg/errlog"
+)
+
+type Input struct {
+ archive string
+ archiveBase string
+ saveTo string
+ serverAddress string
+ serverSkip bool
+ embedData bool
+ saveOnly bool
+ verbose bool
+ json bool
+ skipBaselineAPI bool
+ force bool
+}
+
+var iconsCollor = map[string]string{
+ "pass": "✅",
+ "passed": "✅",
+ "fail": "❌",
+ "failed": "❌",
+ "warn": "⚠️", // there is a bug, the emoji is rendered breaking the table
+ "alert": "🚨",
+}
+
+var iconsBW = map[string]string{
+ "pass": "✔",
+ "passed": "✔",
+ "fail": "✖",
+ "failed": "✖",
+ "warn": "⚠",
+ "alert": "⚠",
+}
+
+func NewCmdReport() *cobra.Command {
+ data := Input{}
+ cmd := &cobra.Command{
+ Use: "report archive.tar.gz",
+ Short: "Create a report from results.",
+ Run: func(cmd *cobra.Command, args []string) {
+ data.archive = args[0]
+ checkFlags(&data)
+ if err := processResult(&data); err != nil {
+ errlog.LogError(errors.Wrapf(err, "could not process archive: %v", args[0]))
+ os.Exit(1)
+ }
+ },
+ Args: cobra.ExactArgs(1),
+ }
+
+ // TODO: Basline/Diff from CLI must be removed v0.6+ when the
+ // report API is totally validated, introduced in v0.5.
+ // report API is a serverless service storing CI results in S3, serving
+ // summarized information through HTTP endpoint (CloudFront), it is consumed
+ // in the filter pipeline while processing the report, preventing any additional
+ // step from user to download a specific archive.
+ cmd.Flags().StringVarP(
+ &data.archiveBase, "baseline", "b", "",
+ "[DEPRECATED] Baseline result archive file. Example: -b file.tar.gz",
+ )
+ cmd.Flags().StringVarP(
+ &data.archiveBase, "diff", "d", "",
+ "[DEPRECATED] Diff results from a baseline archive file. Example: --diff file.tar.gz",
+ )
+
+ cmd.Flags().StringVarP(
+ &data.saveTo, "save-to", "s", "",
+ "Extract and Save Results to disk. Example: -s ./results",
+ )
+ cmd.Flags().StringVar(
+ &data.serverAddress, "server-address", "0.0.0.0:9090",
+ "HTTP server address to serve files when --save-to is used. Example: --server-address 0.0.0.0:9090",
+ )
+ cmd.Flags().BoolVar(
+ &data.serverSkip, "skip-server", false,
+ "HTTP server address to serve files when --save-to is used. Example: --server-address 0.0.0.0:9090",
+ )
+ cmd.Flags().BoolVar(
+ &data.embedData, "embed-data", false,
+ "Force to embed the data into HTML report, allwoing the use of file protocol/CORS in the browser.",
+ )
+ cmd.Flags().BoolVar(
+ &data.saveOnly, "save-only", false,
+ "Save data and exit. Requires --save-to. Example: -s ./results --save-only",
+ )
+ cmd.Flags().BoolVarP(
+ &data.verbose, "verbose", "v", false,
+ "Show test details of test failures",
+ )
+ cmd.Flags().BoolVar(
+ &data.json, "json", false,
+ "Show report in json format",
+ )
+ cmd.Flags().BoolVar(
+ &data.skipBaselineAPI, "skip-baseline-api", false,
+ "Set to disable the BsaelineAPI call to get the baseline results injected in the failure filter pipeline.",
+ )
+ cmd.Flags().BoolVarP(
+ &data.force, "force", "f", false,
+ "Force to continue the execution, skipping deprecation warnings.",
+ )
+ return cmd
+}
+
+// checkFlags checks the flags and set the default values.
+func checkFlags(input *Input) {
+ if input.embedData {
+ log.Warnf("--embed-data is set to true, forcing --server-skip to true.")
+ input.serverSkip = true
+ }
+}
+
+// processResult reads the artifacts and show it as an report format.
+func processResult(input *Input) error {
+ log.Println("Creating report...")
+ timers := metrics.NewTimers()
+ timers.Add("report-total")
+
+ if input.skipBaselineAPI {
+ log.Warnf("THIS IS NOT RECOMMENDED: detected flag --skip-baseline-api, setting OPCT_DISABLE_FILTER_BASELINE=1 to skip the failure filter in the pipeline")
+ os.Setenv("OPCT_DISABLE_FILTER_BASELINE", "1")
+ }
+
+ // Show deprecation warnings when using --baseline.
+ if input.archiveBase != "" {
+ log.Warnf(`DEPRECATED: --baseline/--diff flag should not be used and will be removed soon.
+Baseline are now discovered and applied to the filter pipeline automatically.
+Please remove the --baseline/--diff flags from the command.
+Additionally, if you want to skip the BaselineAPI filter, use --skip-baseline-api=true.`)
+ if !input.force {
+ log.Warnf("Aborting execution: --force flag is not set, set it if you want continue with warnings.")
+ os.Exit(1)
+ }
+ }
+
+ cs := summary.NewConsolidatedSummary(&summary.ConsolidatedSummaryInput{
+ Verbose: input.verbose,
+ Timers: timers,
+ Archive: input.archive,
+ ArchiveBase: input.archiveBase,
+ SaveTo: input.saveTo,
+ })
+
+ log.Debug("Processing results")
+ if err := cs.Process(); err != nil {
+ return fmt.Errorf("error processing results: %v", err)
+ }
+
+ re := report.NewReportData(input.embedData)
+ log.Debug("Processing report")
+ if err := re.Populate(cs); err != nil {
+ return fmt.Errorf("error populating report: %v", err)
+ }
+
+ // show report in CLI
+ if err := showReportCLI(re, input.verbose); err != nil {
+ return fmt.Errorf("error showing aggregated summary: %v", err)
+ }
+
+ if input.saveTo != "" {
+ // TODO: ConsolidatedSummary should be migrated to SaveResults
+ if err := cs.SaveResults(input.saveTo); err != nil {
+ return fmt.Errorf("error saving consolidated summary results: %v", err)
+ }
+ timers.Add("report-total")
+ if err := re.SaveResults(input.saveTo); err != nil {
+ return fmt.Errorf("error saving report results: %v", err)
+ }
+ if input.saveOnly {
+ os.Exit(0)
+ }
+ }
+
+ // start http server to serve static report
+ if input.saveTo != "" && !input.serverSkip {
+ fs := http.FileServer(http.Dir(input.saveTo))
+ // TODO: redirect home to the opct-reporet.html (or rename to index.html) without
+ // affecting the fileserver.
+ http.Handle("/", fs)
+
+ log.Infof("The report web UI can be accessed at http://%s", input.serverAddress)
+ if err := http.ListenAndServe(input.serverAddress, nil); err != nil {
+ log.Fatalf("Unable to start the report server at address %s: %v", input.serverAddress, err)
+ }
+ }
+ if input.saveTo != "" && input.serverSkip {
+ log.Infof("The report server is not enabled (--server-skip=true)., you'll need to navigate it locallly")
+ log.Infof("To read the report open your browser and navigate to the path file://%s", input.saveTo)
+ log.Infof("To get started open the report file://%s/index.html.", input.saveTo)
+ }
+
+ return nil
+}
+
+func showReportCLI(report *report.ReportData, verbose bool) error {
+ if err := showReportAggregatedSummary(report); err != nil {
+ return fmt.Errorf("error showing aggregated summary: %v", err)
+ }
+ if err := showProcessedSummary(report); err != nil {
+ return fmt.Errorf("error showing processed summary: %v", err)
+ }
+ if err := showErrorDetails(report, verbose); err != nil {
+ return fmt.Errorf("error showing error details: %v", err)
+ }
+ if err := showChecks(report); err != nil {
+ return fmt.Errorf("error showing checks: %v", err)
+ }
+ return nil
+}
+
+func showReportAggregatedSummary(re *report.ReportData) error {
+ baselineProcessed := re.Baseline != nil
+
+ // Using go-table
+ archive := filepath.Base(re.Summary.Tests.Archive)
+ if re.Baseline != nil {
+ archive = fmt.Sprintf("%s\n >> Diff from: %s", archive, filepath.Base(re.Summary.Tests.ArchiveDiff))
+ }
+ title := "OPCT Summary\n > Archive: " + archive
+
+ // standalone results (provider)
+ tbProv := table.NewWriter()
+ tbProv.SetOutputMirror(os.Stdout)
+ tbProv.SetStyle(table.StyleLight)
+ tbProv.SetTitle(title)
+ tbProv.AppendHeader(table.Row{"", "Provider"})
+
+ // baseline results (provider+baseline)
+ tbPBas := table.NewWriter()
+ tbPBas.SetOutputMirror(os.Stdout)
+ tbPBas.SetStyle(table.StyleLight)
+ tbPBas.SetTitle(title)
+ tbPBas.AppendHeader(table.Row{"", "Provider", "Baseline"})
+ rowsPBas := []table.Row{}
+
+ // Section: Cluster configuration
+ joinPlatformType := func(infra *report.ReportInfra) string {
+ tp := infra.PlatformType
+ if tp == "External" {
+ tp = fmt.Sprintf("%s (%s)", tp, infra.PlatformName)
+ }
+ return tp
+ }
+ rowsProv := []table.Row{{"Infrastructure:", ""}}
+ rowsProv = append(rowsProv, table.Row{" PlatformType", joinPlatformType(re.Provider.Infra)})
+ rowsProv = append(rowsProv, table.Row{" Name", re.Provider.Infra.Name})
+ rowsProv = append(rowsProv, table.Row{" ClusterID", re.Provider.Version.OpenShift.ClusterID})
+ rowsProv = append(rowsProv, table.Row{" Topology", re.Provider.Infra.Topology})
+ rowsProv = append(rowsProv, table.Row{" ControlPlaneTopology", re.Provider.Infra.ControlPlaneTopology})
+ rowsProv = append(rowsProv, table.Row{" API Server URL", re.Provider.Infra.APIServerURL})
+ rowsProv = append(rowsProv, table.Row{" API Server URL (internal)", re.Provider.Infra.APIServerInternalURL})
+ rowsProv = append(rowsProv, table.Row{" NetworkType", re.Provider.Infra.NetworkType})
+ tbProv.AppendRows(rowsProv)
+ tbProv.AppendSeparator()
+ if baselineProcessed {
+ rowsPBas = []table.Row{{"Infrastructure:", "", ""}}
+ rowsPBas = append(rowsPBas, table.Row{" PlatformType", joinPlatformType(re.Provider.Infra), joinPlatformType(re.Baseline.Infra)})
+ rowsPBas = append(rowsPBas, table.Row{" Name", re.Provider.Infra.Name, re.Baseline.Infra.Name})
+ rowsPBas = append(rowsPBas, table.Row{" Topology", re.Provider.Infra.Topology, re.Baseline.Infra.Topology})
+ rowsPBas = append(rowsPBas, table.Row{" ControlPlaneTopology", re.Provider.Infra.ControlPlaneTopology, re.Baseline.Infra.ControlPlaneTopology})
+ rowsPBas = append(rowsPBas, table.Row{" API Server URL", re.Provider.Infra.APIServerURL, re.Baseline.Infra.APIServerURL})
+ rowsPBas = append(rowsPBas, table.Row{" API Server URL (internal)", re.Provider.Infra.APIServerInternalURL, re.Baseline.Infra.APIServerInternalURL})
+ rowsPBas = append(rowsPBas, table.Row{" NetworkType", re.Baseline.Infra.NetworkType})
+ tbPBas.AppendRows(rowsPBas)
+ tbPBas.AppendSeparator()
+ }
+
+ // Section: Cluster state
+ rowsProv = []table.Row{{"Cluster Version:", ""}}
+ rowsProv = append(rowsProv, table.Row{" Kubernetes", re.Provider.Version.Kubernetes})
+ rowsProv = append(rowsProv, table.Row{" OpenShift", re.Provider.Version.OpenShift.Desired})
+ rowsProv = append(rowsProv, table.Row{" Channel", re.Provider.Version.OpenShift.Channel})
+ tbProv.AppendRows(rowsProv)
+ tbProv.AppendSeparator()
+ rowsProv = []table.Row{{"Cluster Status: ", re.Provider.Version.OpenShift.OverallStatus}}
+ if re.Provider.Version.OpenShift.OverallStatus != "Available" {
+ rowsProv = append(rowsProv, table.Row{" Reason", re.Provider.Version.OpenShift.OverallStatusReason})
+ rowsProv = append(rowsProv, table.Row{" Message", re.Provider.Version.OpenShift.OverallStatusMessage})
+ }
+ rowsProv = append(rowsProv, table.Row{"Cluster Status/Conditions:", ""})
+ rowsProv = append(rowsProv, table.Row{" Available", re.Provider.Version.OpenShift.CondAvailable})
+ rowsProv = append(rowsProv, table.Row{" Failing", re.Provider.Version.OpenShift.CondFailing})
+ rowsProv = append(rowsProv, table.Row{" Progressing (Update)", re.Provider.Version.OpenShift.CondProgressing})
+ rowsProv = append(rowsProv, table.Row{" RetrievedUpdates", re.Provider.Version.OpenShift.CondRetrievedUpdates})
+ rowsProv = append(rowsProv, table.Row{" EnabledCapabilities", re.Provider.Version.OpenShift.CondImplicitlyEnabledCapabilities})
+ rowsProv = append(rowsProv, table.Row{" ReleaseAccepted", re.Provider.Version.OpenShift.CondReleaseAccepted})
+ tbProv.AppendRows(rowsProv)
+ tbProv.AppendSeparator()
+
+ if baselineProcessed {
+ rowsPBas = []table.Row{{"Cluster Version:", "", ""}}
+ rowsPBas = append(rowsPBas, table.Row{" Kubernetes", re.Provider.Version.Kubernetes, re.Baseline.Version.Kubernetes})
+ rowsPBas = append(rowsPBas, table.Row{" OpenShift", re.Provider.Version.OpenShift.Desired, re.Baseline.Version.OpenShift.Desired})
+ rowsPBas = append(rowsPBas, table.Row{" Channel", re.Provider.Version.OpenShift.Channel, re.Baseline.Version.OpenShift.Channel})
+ tbPBas.AppendRows(rowsPBas)
+ tbPBas.AppendSeparator()
+ rowsPBas = []table.Row{{"Cluster Status: ", re.Provider.Version.OpenShift.OverallStatus, re.Baseline.Version.OpenShift.OverallStatus}}
+ if re.Provider.Version.OpenShift.OverallStatus != "Available" {
+ rowsPBas = append(rowsPBas, table.Row{" Reason", re.Provider.Version.OpenShift.OverallStatusReason, re.Baseline.Version.OpenShift.OverallStatusReason})
+ rowsPBas = append(rowsPBas, table.Row{" Message", re.Provider.Version.OpenShift.OverallStatusMessage, re.Baseline.Version.OpenShift.OverallStatusMessage})
+ }
+ rowsPBas = append(rowsPBas, table.Row{"Cluster Status/Conditions:", "", ""})
+ rowsPBas = append(rowsPBas, table.Row{" Available", re.Provider.Version.OpenShift.CondAvailable, re.Baseline.Version.OpenShift.CondAvailable})
+ rowsPBas = append(rowsPBas, table.Row{" Failing", re.Provider.Version.OpenShift.CondFailing, re.Baseline.Version.OpenShift.CondFailing})
+ rowsPBas = append(rowsPBas, table.Row{" Progressing (Update)", re.Provider.Version.OpenShift.CondProgressing, re.Baseline.Version.OpenShift.CondProgressing})
+ rowsPBas = append(rowsPBas, table.Row{" RetrievedUpdates", re.Provider.Version.OpenShift.CondRetrievedUpdates, re.Baseline.Version.OpenShift.CondRetrievedUpdates})
+ rowsPBas = append(rowsPBas, table.Row{" EnabledCapabilities", re.Provider.Version.OpenShift.CondImplicitlyEnabledCapabilities, re.Baseline.Version.OpenShift.CondImplicitlyEnabledCapabilities})
+ rowsPBas = append(rowsPBas, table.Row{" ReleaseAccepted", re.Provider.Version.OpenShift.CondReleaseAccepted, re.Baseline.Version.OpenShift.CondReleaseAccepted})
+ tbPBas.AppendRows(rowsPBas)
+ tbPBas.AppendSeparator()
+ }
+
+ // Section: Environment state
+ rowsProv = []table.Row{{"Plugin summary:", "Status [Total/Passed/Failed/Skipped] (timeout)"}}
+ if baselineProcessed {
+ rowsPBas = []table.Row{{"Plugin summary:", "Status [Total/Passed/Failed/Skipped] (timeout)", ""}}
+ }
+
+ showPluginSummary := func(w *tabwriter.Writer, pluginName string) {
+ if _, ok := re.Provider.Plugins[pluginName]; !ok {
+ errlog.LogError(errors.New(fmt.Sprintf("Unable to load plugin %s", pluginName)))
+ }
+ plK8S := re.Provider.Plugins[pluginName]
+ name := fmt.Sprintf(" %s", plK8S.Name)
+ stat := plK8S.Stat
+ pOCPPluginRes := fmt.Sprintf("%s [%d/%d/%d/%d] (%d)", stat.Status, stat.Total, stat.Passed, stat.Failed, stat.Skipped, stat.Timeout)
+ rowsProv = append(rowsProv, table.Row{name, pOCPPluginRes})
+ if baselineProcessed {
+ plK8S = re.Baseline.Plugins[pluginName]
+ stat := plK8S.Stat
+ bOCPPluginRes := fmt.Sprintf("%s [%d/%d/%d/%d] (%d)", stat.Status, stat.Total, stat.Passed, stat.Failed, stat.Skipped, stat.Timeout)
+ // fmt.Fprintf(tbWriter, " - %s\t: %s\t: %s\n", name, pOCPPluginRes, bOCPPluginRes)
+ rowsPBas = append(rowsPBas, table.Row{name, pOCPPluginRes, bOCPPluginRes})
+ }
+ }
+
+ showPluginSummary(nil, plugin.PluginNameKubernetesConformance)
+ showPluginSummary(nil, plugin.PluginNameOpenShiftConformance)
+ showPluginSummary(nil, plugin.PluginNameOpenShiftUpgrade)
+
+ tbProv.AppendRows(rowsProv)
+ tbProv.AppendSeparator()
+ rowsProv = []table.Row{{"Env health summary:", "[A=True/P=True/D=True]"}}
+ if baselineProcessed {
+ tbPBas.AppendRows(rowsPBas)
+ tbPBas.AppendSeparator()
+ rowsPBas = []table.Row{{"Env health summary:", "[A=True/P=True/D=True]", ""}}
+ }
+
+ pOCPCO := re.Provider.ClusterOperators
+ rowsProv = append(rowsProv, table.Row{
+ " Cluster Operators",
+ fmt.Sprintf("[%d/%d/%d]", pOCPCO.CountAvailable, pOCPCO.CountProgressing, pOCPCO.CountDegraded),
+ })
+ if baselineProcessed {
+ bOCPCO := re.Baseline.ClusterOperators
+ rowsPBas = append(rowsPBas, table.Row{
+ " Cluster Operators",
+ fmt.Sprintf("[%d/%d/%d]", pOCPCO.CountAvailable, pOCPCO.CountProgressing, pOCPCO.CountDegraded),
+ fmt.Sprintf("[%d/%d/%d]", bOCPCO.CountAvailable, bOCPCO.CountProgressing, bOCPCO.CountDegraded),
+ })
+ }
+
+ // Show Nodes Health info collected by Sonobuoy
+ pNhMessage := fmt.Sprintf("%d/%d %s", re.Provider.ClusterHealth.NodeHealthy, re.Provider.ClusterHealth.NodeHealthTotal, "")
+ if re.Provider.ClusterHealth.NodeHealthTotal != 0 {
+ pNhMessage = fmt.Sprintf("%s (%.2f%%)", pNhMessage, re.Provider.ClusterHealth.NodeHealthPerc)
+ }
+
+ rowsProv = append(rowsProv, table.Row{" Node health", pNhMessage})
+ if baselineProcessed {
+ bNhMessage := fmt.Sprintf("%d/%d %s", re.Baseline.ClusterHealth.NodeHealthy, re.Baseline.ClusterHealth.NodeHealthTotal, "")
+ if re.Baseline.ClusterHealth.NodeHealthTotal != 0 {
+ bNhMessage = fmt.Sprintf("%s (%.2f%%)", bNhMessage, re.Baseline.ClusterHealth.NodeHealthPerc)
+ }
+ rowsPBas = append(rowsPBas, table.Row{" Node health", pNhMessage, bNhMessage})
+ }
+
+ // Show Pods Health info collected by Sonobuoy
+ pPodsHealthMsg := ""
+ bPodsHealthMsg := ""
+ phTotal := ""
+
+ if re.Provider.ClusterHealth.PodHealthTotal != 0 {
+ phTotal = fmt.Sprintf(" (%.2f%%)", re.Provider.ClusterHealth.PodHealthPerc)
+ }
+ pPodsHealthMsg = fmt.Sprintf("%d/%d %s", re.Provider.ClusterHealth.PodHealthy, re.Provider.ClusterHealth.PodHealthTotal, phTotal)
+ rowsProv = append(rowsProv, table.Row{" Pods health", pPodsHealthMsg})
+ if baselineProcessed {
+ phTotal := ""
+ if re.Baseline.ClusterHealth.PodHealthTotal != 0 {
+ phTotal = fmt.Sprintf(" (%.2f%%)", re.Baseline.ClusterHealth.PodHealthPerc)
+ }
+ bPodsHealthMsg = fmt.Sprintf("%d/%d %s", re.Baseline.ClusterHealth.PodHealthy, re.Baseline.ClusterHealth.PodHealthTotal, phTotal)
+ rowsPBas = append(rowsPBas, table.Row{" Pods health", pPodsHealthMsg, bPodsHealthMsg})
+ }
+
+ // Section: Test count by suite
+ tbProv.AppendRows(rowsProv)
+ tbProv.AppendSeparator()
+ rowsProv = []table.Row{{"Test count by suite:", ""}}
+ if baselineProcessed {
+ tbPBas.AppendRows(rowsPBas)
+ tbPBas.AppendSeparator()
+ rowsPBas = []table.Row{{"Test count by suite:", "", ""}}
+ }
+
+ checkEmpty := func(counter int) string {
+ if counter == 0 {
+ return "(FAIL)"
+ }
+ return ""
+ }
+ rowsProv = append(rowsProv, table.Row{
+ summary.SuiteNameKubernetesConformance,
+ fmt.Sprintf("%d %s",
+ re.Provider.Plugins[plugin.PluginNameKubernetesConformance].Suite.Count,
+ checkEmpty(re.Provider.Plugins[plugin.PluginNameKubernetesConformance].Suite.Count),
+ ),
+ })
+ rowsProv = append(rowsProv, table.Row{
+ summary.SuiteNameOpenshiftConformance,
+ fmt.Sprintf("%d %s",
+ re.Provider.Plugins[plugin.PluginNameOpenShiftConformance].Suite.Count,
+ checkEmpty(re.Provider.Plugins[plugin.PluginNameOpenShiftConformance].Suite.Count),
+ ),
+ })
+ if baselineProcessed {
+ p := re.Baseline.Plugins[plugin.PluginNameKubernetesConformance]
+ if p != nil && p.Suite != nil {
+ rowsPBas = append(rowsPBas, table.Row{
+ summary.SuiteNameKubernetesConformance,
+ fmt.Sprintf("%d %s",
+ re.Provider.Plugins[plugin.PluginNameKubernetesConformance].Suite.Count,
+ checkEmpty(re.Provider.Plugins[plugin.PluginNameKubernetesConformance].Suite.Count),
+ ),
+ fmt.Sprintf("%d %s", p.Suite.Count, checkEmpty(p.Suite.Count)),
+ })
+ }
+ p = re.Baseline.Plugins[plugin.PluginNameOpenShiftConformance]
+ if p != nil && p.Suite != nil {
+ rowsPBas = append(rowsPBas, table.Row{
+ summary.SuiteNameOpenshiftConformance,
+ fmt.Sprintf("%d %s",
+ re.Provider.Plugins[plugin.PluginNameOpenShiftConformance].Suite.Count,
+ checkEmpty(re.Provider.Plugins[plugin.PluginNameOpenShiftConformance].Suite.Count),
+ ),
+ fmt.Sprintf("%d %s", p.Suite.Count, checkEmpty(p.Suite.Count)),
+ })
+ }
+ }
+
+ // Decide which table to show.
+ if baselineProcessed {
+ // Table done (provider + baseline)
+ tbPBas.AppendRows(rowsPBas)
+ tbPBas.Render()
+ } else {
+ // Table done (provider)
+ tbProv.AppendRows(rowsProv)
+ tbProv.Render()
+ }
+
+ // Section: Failed pods counter (using old table version [tabwritter])
+ newLineWithTab := "\t\t\n"
+ tbWriter := tabwriter.NewWriter(os.Stdout, 0, 8, 1, '\t', tabwriter.AlignRight)
+ fmt.Fprint(tbWriter, newLineWithTab)
+ if len(re.Provider.ClusterHealth.PodHealthDetails) > 0 {
+ fmt.Fprintf(tbWriter, " Failed pods:\n")
+ fmt.Fprintf(tbWriter, " %s/%s\t%s\t%s\t%s\t%s\n", "Namespace", "PodName", "Healthy", "Ready", "Reason", "Message")
+ for _, podDetails := range re.Provider.ClusterHealth.PodHealthDetails {
+ fmt.Fprintf(tbWriter, " %s/%s\t%t\t%s\t%s\t%s\n", podDetails.Namespace, podDetails.Name, podDetails.Healthy, podDetails.Ready, podDetails.Reason, podDetails.Message)
+ }
+ }
+ tbWriter.Flush()
+
+ return nil
+}
+
+func showProcessedSummary(re *report.ReportData) error {
+ fmt.Printf("\n=> Processed Summary <=\n")
+ fmt.Printf("==> Result Summary by test suite:\n")
+ bProcessed := re.Provider.HasValidBaseline
+ plugins := re.Provider.GetPlugins()
+ sort.Strings(plugins)
+ for _, pluginName := range plugins {
+ showSummaryPlugin(re.Provider, pluginName, bProcessed)
+ }
+ return nil
+}
+
+func showSummaryPlugin(re *report.ReportResult, pluginName string, bProcessed bool) {
+ if re.Plugins[pluginName] == nil {
+ log.Errorf("unable to get plugin %s", pluginName)
+ return
+ }
+ p := re.Plugins[pluginName]
+ if p.Stat == nil {
+ log.Errorf("unable to get stat for plugin %s", pluginName)
+ return
+ }
+
+ tb := table.NewWriter()
+ tb.SetOutputMirror(os.Stdout)
+ tb.SetStyle(table.StyleLight)
+ title := fmt.Sprintf("%s:", p.Name)
+ titleIcon := ""
+ tb.SetColumnConfigs([]table.ColumnConfig{
+ {Number: 1, WidthMin: 25, WidthMax: 25},
+ {Number: 2, WidthMin: 13, WidthMax: 13},
+ })
+ rows := []table.Row{}
+
+ renderTable := func() {
+ title = fmt.Sprintf("%s %s", title, titleIcon)
+ tb.SetTitle(title)
+ tb.Render()
+ }
+
+ stat := p.Stat
+ rows = append(rows, table.Row{"Total tests", stat.Total})
+ rows = append(rows, table.Row{"Passed", stat.Passed})
+ rows = append(rows, table.Row{"Failed", stat.Failed})
+ rows = append(rows, table.Row{"Timeout", stat.Timeout})
+ rows = append(rows, table.Row{"Skipped", stat.Skipped})
+ titleIcon = iconsCollor[stat.Status]
+
+ if p.Name == plugin.PluginNameOpenShiftUpgrade || p.Name == plugin.PluginNameArtifactsCollector {
+ rows = append(rows, table.Row{"Result Job", stat.Status})
+ tb.AppendRows(rows)
+ renderTable()
+ return
+ }
+ rows = append(rows, table.Row{"Filter Failed Suite", plugin.UtilsCalcPercStr(stat.FilterSuite, stat.Total)})
+ rows = append(rows, table.Row{"Filter Failed KF", plugin.UtilsCalcPercStr(stat.Filter5Failures, stat.Total)})
+ rows = append(rows, table.Row{"Filter Replay", plugin.UtilsCalcPercStr(stat.Filter6Failures, stat.Total)})
+ rows = append(rows, table.Row{"Filter Failed Baseline", plugin.UtilsCalcPercStr(stat.FilterBaseline, stat.Total)})
+ rows = append(rows, table.Row{"Filter Failed Priority", plugin.UtilsCalcPercStr(stat.FilterFailedPrio, stat.Total)})
+ rows = append(rows, table.Row{"Filter Failed API", plugin.UtilsCalcPercStr(stat.FilterFailedAPI, stat.Total)})
+ rows = append(rows, table.Row{"Failures (Priotity)", plugin.UtilsCalcPercStr(stat.FilterFailures, stat.Total)})
+
+ // TODO(mtulio): review suites provides better signal.
+ // The final results for non-kubernetes conformance will be hidden (pass|fail) for a while for those reasons:
+ // - OPCT was created to provide feeaback of conformance results, not a passing binary value. The numbers should be interpreted individually
+ // - Conformance results could have flakes or runtime failures which need to be investigated by executor
+ // - Force user/executor to review the results, and not only the summary.
+ // That behavior is aligned with BU: we expect kubernetes conformance passes in all providers, the reviewer
+ // must set this as a target in the review process.
+ // UPDATED(mtulio): OPCT is providing signals for conformance suites. The openshift-validated/conformance
+ // passing after filters means the baseline has common failures, which needs to be investigated in the future
+ // for non-providers - because there is a big chance to be related with the environment or platform-wide issue/bug.
+ // Leaving it commmented and providing a 'processed' result for openshift-conformance too.
+ // if p.Name != plugin.PluginNameKubernetesConformance {
+ // rows = append(rows, table.Row{"Result Job", stat.Status})
+ // tb.AppendRows(rows)
+ // renderTable()
+ // return
+ // }
+
+ // checking for runtime failures
+ runtimeFailed := false
+ if stat.Total == stat.Failed {
+ runtimeFailed = true
+ }
+
+ // rewrite the original status when pass on all filters and not failed on runtime
+ status := stat.Status
+ if (stat.FilterFailures == 0) && !runtimeFailed {
+ status = "passed"
+ }
+
+ rows = append(rows, table.Row{"Result - Job", stat.Status})
+ rows = append(rows, table.Row{"Result - Processed", status})
+ tb.AppendRows(rows)
+ titleIcon = iconsCollor[status]
+ if p.Name == plugin.PluginNameConformanceReplay && status != "passed" {
+ titleIcon = iconsBW["warn"]
+ }
+ renderTable()
+}
+
+// showErrorDetails show details of failres for each plugin.
+func showErrorDetails(re *report.ReportData, verbose bool) error {
+ fmt.Printf("\n==> Result details by conformance plugins: \n")
+
+ bProcessed := re.Provider.HasValidBaseline
+ showErrorDetailPlugin(re.Provider.Plugins[plugin.PluginNameKubernetesConformance], verbose, bProcessed)
+ showErrorDetailPlugin(re.Provider.Plugins[plugin.PluginNameOpenShiftConformance], verbose, bProcessed)
+
+ return nil
+}
+
+// showErrorDetailPlugin Show failed e2e tests by filter, when verbose each filter will be shown.
+func showErrorDetailPlugin(p *report.ReportPlugin, verbose bool, bProcessed bool) {
+ flakeCount := p.Stat.FilterBaseline - p.Stat.FilterFailedPrio
+
+ // TODO(mtulio): migrate to new table format (go-table)
+ if verbose {
+ fmt.Printf("\n\n => %s: (%d failures, %d failures filtered, %d flakes)\n", p.Name, p.Stat.Failed, p.Stat.FilterBaseline, flakeCount)
+ fmt.Printf("\n --> [verbose] Failed tests detected on archive (without filters):\n")
+ if p.Stat.Failed == 0 {
+ fmt.Println("")
+ }
+ for _, test := range p.Tests {
+ if test.State == "failed" {
+ fmt.Println(test.Name)
+ }
+ }
+
+ fmt.Printf("\n --> [verbose] Failed tests detected on suite (Filter SuiteOnly):\n")
+ if p.Stat.FilterSuite == 0 {
+ fmt.Println("")
+ }
+ for _, test := range p.Tests {
+ if test.State == "filterSuiteOnly" {
+ fmt.Println(test.Name)
+ }
+ }
+ if bProcessed {
+ fmt.Printf("\n --> [verbose] Failed tests removing baseline (Filter Baseline):\n")
+ if p.Stat.FilterBaseline == 0 {
+ fmt.Println("")
+ }
+ for _, test := range p.Tests {
+ if test.State == "filterBaseline" {
+ fmt.Println(test.Name)
+ }
+ }
+ }
+ } else {
+ if p.Stat.FilterFailures == 0 && flakeCount == 0 {
+ log.Infof("No failures detected on %s", p.Name)
+ return
+ }
+ fmt.Printf("\n\n => %s: (%d failures, %d flakes)\n", p.Name, p.Stat.FilterFailures, flakeCount)
+ }
+
+ // tables with go-table
+ rowsFail := []table.Row{}
+ tbFailTags := ""
+ tbFailSkip := false
+ noFlakes := make(map[string]struct{})
+ if p.Stat.FilterBaseline == flakeCount {
+ tbFailSkip = true
+ } else {
+ testTags := plugin.NewTestTagsEmpty(int(p.Stat.FilterFailures))
+ for _, test := range p.FailedFiltered {
+ noFlakes[test.Name] = struct{}{}
+ testTags.Add(&test.Name)
+ errCount := 0
+ if _, ok := p.Tests[test.Name].ErrorCounters["total"]; ok {
+ errCount = p.Tests[test.Name].ErrorCounters["total"]
+ }
+ // testsWErrCnt = append(testsWErrCnt, fmt.Sprintf("%d\t%s", errCount, test.Name))
+ rowsFail = append(rowsFail, table.Row{errCount, test.Name})
+ }
+ // Failed tests grouped by tag (first value between '[]')
+ tbFailTags = testTags.ShowSorted()
+ }
+
+ rowsFlake := []table.Row{}
+ tbFlakeTags := ""
+ tbFlakeSkip := false
+ if p.Stat.FilterBaseline == 0 {
+ tbFlakeSkip = true
+ } else {
+ testTags := plugin.NewTestTagsEmpty(int(p.Stat.FilterBaseline))
+ for _, test := range p.TestsFlakeCI {
+ // preventing duplication when flake tests was already listed.
+ if _, ok := noFlakes[test.Name]; ok {
+ continue
+ }
+ // TODO: fix issues when retrieving flakes from Sippy API.
+ // Fallback to '--' when has issues.
+ if p.Tests[test.Name].Flake == nil {
+ rowsFlake = append(rowsFlake, table.Row{"--", "--", "--", test.Name})
+ } else if p.Tests[test.Name].Flake.CurrentFlakes != 0 {
+ errCount := 0
+ if _, ok := p.Tests[test.Name].ErrorCounters["total"]; ok {
+ errCount = p.Tests[test.Name].ErrorCounters["total"]
+ }
+ rowsFlake = append(rowsFlake, table.Row{
+ p.Tests[test.Name].Flake.CurrentFlakes,
+ fmt.Sprintf("%.3f %%", p.Tests[test.Name].Flake.CurrentFlakePerc),
+ errCount, test.Name})
+ }
+ testTags.Add(&test.Name)
+ }
+ tbFlakeTags = testTags.ShowSorted()
+ }
+
+ // Table style
+ st := table.StyleLight
+ st.Options.SeparateRows = true
+
+ // Table for Flakes
+ tbFail := table.NewWriter()
+ tbFail.SetOutputMirror(os.Stdout)
+ tbFail.SetStyle(st)
+ tbFail.SetTitle("==> %s \n%s ACTION REQUIRED: Failed tests to review", p.Name, iconsCollor["alert"])
+ tbFail.AppendHeader(table.Row{"Err Log", "Test Name"})
+ tbFail.AppendRows(rowsFail)
+ tbFail.AppendFooter(table.Row{"", tbFailTags})
+ tbFail.SetColumnConfigs([]table.ColumnConfig{
+ {Number: 2, AlignHeader: tabletext.AlignCenter, WidthMax: 150},
+ })
+ if !tbFailSkip {
+ tbFail.Render()
+ }
+
+ // Table for Flakes
+ tbFlake := table.NewWriter()
+ tbFlake.SetOutputMirror(os.Stdout)
+ tbFlake.SetStyle(st)
+ tbFlake.SetTitle("==> %s \nFailed tests with flake occurrences (on OpenShift CI)", p.Name)
+ tbFlake.AppendHeader(table.Row{"Flake #", "%", "Err Log", "Test Name"})
+ tbFlake.AppendRows(rowsFlake)
+ tbFlake.AppendFooter(table.Row{"", "", "", tbFlakeTags})
+ tbFlake.SetColumnConfigs([]table.ColumnConfig{
+ {Number: 4, AlignHeader: tabletext.AlignCenter, WidthMax: 129},
+ })
+ if !tbFlakeSkip {
+ tbFlake.Render()
+ }
+}
+
+// showChecks show the checks results / final report.
+func showChecks(re *report.ReportData) error {
+ rowsFailures := []table.Row{}
+ rowsWarns := []table.Row{}
+ rowsPass := []table.Row{}
+ rowSkip := []table.Row{}
+
+ fmt.Printf("\n\n")
+ tb := table.NewWriter()
+ tb.SetOutputMirror(os.Stdout)
+ tb.SetStyle(table.StyleLight)
+ tb.AppendHeader(table.Row{"ID", "#", "Result", "Check name", "Target", "Current"})
+ tb.SetColumnConfigs([]table.ColumnConfig{
+ {Number: 1, AlignHeader: tabletext.AlignCenter},
+ {Number: 2, AlignHeader: tabletext.AlignCenter, Align: tabletext.AlignCenter},
+ {Number: 3, AlignHeader: tabletext.AlignCenter, Align: tabletext.AlignCenter},
+ {Number: 4, AlignHeader: tabletext.AlignCenter, AlignFooter: tabletext.AlignCenter},
+ {Number: 5, AlignHeader: tabletext.AlignCenter},
+ {Number: 6, AlignHeader: tabletext.AlignCenter},
+ })
+
+ allChecks := append([]*report.SLOOutput{}, re.Checks.Fail...)
+ allChecks = append(allChecks, re.Checks.Warn...)
+ allChecks = append(allChecks, re.Checks.Pass...)
+ allChecks = append(allChecks, re.Checks.Skip...)
+ for _, check := range re.Checks.Fail {
+ rowsFailures = append(rowsFailures, table.Row{
+ check.ID, iconsCollor[check.SLOResult], check.SLOResult, check.SLO, check.SLITarget, check.SLIActual,
+ })
+ }
+ for _, check := range re.Checks.Warn {
+ rowsWarns = append(rowsWarns, table.Row{
+ check.ID, iconsBW[check.SLOResult], check.SLOResult, check.SLO, check.SLITarget, check.SLIActual,
+ })
+ }
+ for _, check := range re.Checks.Pass {
+ rowsPass = append(rowsPass, table.Row{
+ check.ID, iconsBW[check.SLOResult], check.SLOResult, check.SLO, check.SLITarget, check.SLIActual,
+ })
+ }
+ for _, check := range re.Checks.Skip {
+ rowSkip = append(rowSkip, table.Row{
+ check.ID, iconsBW["pass"], check.SLOResult, check.SLO, check.SLITarget, check.SLIActual,
+ })
+ }
+
+ if len(rowsFailures) > 0 {
+ tb.AppendRows(rowsFailures)
+ tb.AppendSeparator()
+ }
+ if len(rowsWarns) > 0 {
+ tb.AppendRows(rowsWarns)
+ tb.AppendSeparator()
+ }
+ if len(rowsPass) > 0 {
+ tb.AppendRows(rowsPass)
+ tb.AppendSeparator()
+ }
+ if len(rowSkip) > 0 {
+ tb.AppendRows(rowSkip)
+ }
+
+ total := len(allChecks)
+ summary := fmt.Sprintf("Total: %d, Failed: %d (%.2f%%), Warn: %d (%.2f%%), Pass: %d (%.2f%%), Skip: %d (%.2f%%)", total,
+ len(re.Checks.Fail), (float64(len(re.Checks.Fail))/float64(total))*100,
+ len(re.Checks.Warn), (float64(len(re.Checks.Warn))/float64(total))*100,
+ len(re.Checks.Pass), (float64(len(re.Checks.Pass))/float64(total))*100,
+ len(re.Checks.Skip), (float64(len(re.Checks.Skip))/float64(total))*100,
+ )
+ tb.AppendFooter(table.Row{"", "", "", summary, "", ""})
+
+ title := "Validation checks / Results"
+ // Create a alert message when there are check failures.
+ if len(rowsFailures) > 0 {
+ alert := fmt.Sprintf(
+ "\t %s %s IMMEDIATE ACTION: %d Check(s) failed. Review it individually, fix and collect new results %s %s",
+ iconsCollor["alert"], iconsCollor["alert"], len(re.Checks.Fail), iconsCollor["alert"], iconsCollor["alert"])
+ title = fmt.Sprintf("%s\n%s", title, alert)
+ }
+ tb.SetTitle(title)
+ tb.Render()
+
+ return nil
+}
diff --git a/pkg/report/cmd.go b/pkg/report/cmd.go
deleted file mode 100644
index d7d99a14..00000000
--- a/pkg/report/cmd.go
+++ /dev/null
@@ -1,372 +0,0 @@
-package report
-
-import (
- "fmt"
- "os"
-
- "github.com/pkg/errors"
- "github.com/spf13/cobra"
-
- "text/tabwriter"
-
- "github.com/redhat-openshift-ecosystem/provider-certification-tool/internal/pkg/summary"
- "github.com/vmware-tanzu/sonobuoy/pkg/errlog"
-)
-
-type Input struct {
- archive string
- archiveBase string
- saveTo string
- verbose bool
-}
-
-func NewCmdReport() *cobra.Command {
- data := Input{}
- cmd := &cobra.Command{
- Use: "report archive.tar.gz",
- Short: "Create a report from results.",
- Run: func(cmd *cobra.Command, args []string) {
- data.archive = args[0]
- if err := processResult(&data); err != nil {
- errlog.LogError(errors.Wrapf(err, "could not process archive: %v", args[0]))
- os.Exit(1)
- }
- },
- Args: cobra.ExactArgs(1),
- }
-
- cmd.Flags().StringVarP(
- &data.archiveBase, "baseline", "b", "",
- "Baseline result archive file. Example: -b file.tar.gz",
- )
- _ = cmd.MarkFlagRequired("base")
-
- cmd.Flags().StringVarP(
- &data.saveTo, "save-to", "s", "",
- "Extract and Save Results to disk. Example: -s ./results",
- )
- cmd.Flags().BoolVarP(
- &data.verbose, "verbose", "v", false,
- "Show test details of test failures",
- )
- return cmd
-}
-
-func processResult(input *Input) error {
-
- cs := summary.ConsolidatedSummary{
- Provider: &summary.ResultSummary{
- Name: summary.ResultSourceNameProvider,
- Archive: input.archive,
- OpenShift: &summary.OpenShiftSummary{},
- Sonobuoy: &summary.SonobuoySummary{},
- Suites: &summary.OpenshiftTestsSuites{
- OpenshiftConformance: &summary.OpenshiftTestsSuite{Name: "openshiftConformance"},
- KubernetesConformance: &summary.OpenshiftTestsSuite{Name: "kubernetesConformance"},
- },
- },
- Baseline: &summary.ResultSummary{
- Name: summary.ResultSourceNameBaseline,
- Archive: input.archiveBase,
- OpenShift: &summary.OpenShiftSummary{},
- Sonobuoy: &summary.SonobuoySummary{},
- Suites: &summary.OpenshiftTestsSuites{
- OpenshiftConformance: &summary.OpenshiftTestsSuite{Name: "openshiftConformance"},
- KubernetesConformance: &summary.OpenshiftTestsSuite{Name: "kubernetesConformance"},
- },
- },
- }
-
- if err := cs.Process(); err != nil {
- return err
- }
-
- if err := showAggregatedSummary(&cs); err != nil {
- return err
- }
-
- if err := showProcessedSummary(&cs); err != nil {
- return err
- }
-
- if err := showErrorDetails(&cs, input.verbose); err != nil {
- return err
- }
-
- if input.saveTo != "" {
- if err := cs.SaveResults(input.saveTo); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func showAggregatedSummary(cs *summary.ConsolidatedSummary) error {
- fmt.Printf("\n> OPCT Summary <\n\n")
-
- // vars starting with p* represents the 'partner' artifact
- // vars starting with b* represents 'baseline' artifact
- pOCP := cs.GetProvider().GetOpenShift()
- pOCPCV, _ := pOCP.GetClusterVersion()
- pOCPInfra, _ := pOCP.GetInfrastructure()
-
- var bOCP *summary.OpenShiftSummary
- var bOCPCV *summary.SummaryClusterVersionOutput
- var bOCPInfra *summary.SummaryOpenShiftInfrastructureV1
- baselineProcessed := cs.GetBaseline().HasValidResults()
- if baselineProcessed {
- bOCP = cs.GetBaseline().GetOpenShift()
- bOCPCV, _ = bOCP.GetClusterVersion()
- bOCPInfra, _ = bOCP.GetInfrastructure()
- }
-
- // Provider and Baseline Cluster (archive)
- pCL := cs.GetProvider().GetSonobuoyCluster()
- bCL := cs.GetBaseline().GetSonobuoyCluster()
-
- newLineWithTab := "\t\t\n"
- tbWriter := tabwriter.NewWriter(os.Stdout, 0, 8, 1, '\t', tabwriter.AlignRight)
-
- if baselineProcessed {
- fmt.Fprintf(tbWriter, " Kubernetes API Server version\t: %s\t: %s\n", pCL.APIVersion, bCL.APIVersion)
- fmt.Fprintf(tbWriter, " OpenShift Container Platform version\t: %s\t: %s\n", pOCPCV.DesiredVersion, bOCPCV.DesiredVersion)
- fmt.Fprintf(tbWriter, " - Cluster Update Progressing\t: %s\t: %s\n", pOCPCV.Progressing, bOCPCV.Progressing)
- fmt.Fprintf(tbWriter, " - Cluster Target Version\t: %s\t: %s\n", pOCPCV.ProgressingMessage, bOCPCV.ProgressingMessage)
- } else {
- fmt.Fprintf(tbWriter, " Kubernetes API Server version\t: %s\n", pCL.APIVersion)
- fmt.Fprintf(tbWriter, " OpenShift Container Platform version\t: %s\n", pOCPCV.DesiredVersion)
- fmt.Fprintf(tbWriter, " - Cluster Update Progressing\t: %s\n", pOCPCV.Progressing)
- fmt.Fprintf(tbWriter, " - Cluster Target Version\t: %s\n", pOCPCV.ProgressingMessage)
- }
-
- fmt.Fprint(tbWriter, newLineWithTab)
- partnerPlatformName := string(pOCPInfra.Status.PlatformStatus.Type)
- if pOCPInfra.Status.PlatformStatus.Type == "External" {
- partnerPlatformName = fmt.Sprintf("%s (%s)", partnerPlatformName, pOCPInfra.Spec.PlatformSpec.External.PlatformName)
- }
- if baselineProcessed {
- baselinePlatformName := string(bOCPInfra.Status.PlatformStatus.Type)
- if bOCPInfra.Status.PlatformStatus.Type == "External" {
- baselinePlatformName = fmt.Sprintf("%s (%s)", baselinePlatformName, bOCPInfra.Spec.PlatformSpec.External.PlatformName)
- }
- fmt.Fprintf(tbWriter, " OCP Infrastructure:\t\t\n")
- fmt.Fprintf(tbWriter, " - PlatformType\t: %s\t: %s\n", partnerPlatformName, baselinePlatformName)
- fmt.Fprintf(tbWriter, " - Name\t: %s\t: %s\n", pOCPInfra.Status.InfrastructureName, bOCPInfra.Status.InfrastructureName)
- fmt.Fprintf(tbWriter, " - Topology\t: %s\t: %s\n", pOCPInfra.Status.InfrastructureTopology, bOCPInfra.Status.InfrastructureTopology)
- fmt.Fprintf(tbWriter, " - ControlPlaneTopology\t: %s\t: %s\n", pOCPInfra.Status.ControlPlaneTopology, bOCPInfra.Status.ControlPlaneTopology)
- fmt.Fprintf(tbWriter, " - API Server URL\t: %s\t: %s\n", pOCPInfra.Status.APIServerURL, bOCPInfra.Status.APIServerURL)
- fmt.Fprintf(tbWriter, " - API Server URL (internal)\t: %s\t: %s\n", pOCPInfra.Status.APIServerInternalURL, bOCPInfra.Status.APIServerInternalURL)
- } else {
- fmt.Fprintf(tbWriter, " OCP Infrastructure:\t\n")
- fmt.Fprintf(tbWriter, " - PlatformType\t: %s\n", partnerPlatformName)
- fmt.Fprintf(tbWriter, " - Name\t: %s\n", pOCPInfra.Status.InfrastructureName)
- fmt.Fprintf(tbWriter, " - Topology\t: %s\n", pOCPInfra.Status.InfrastructureTopology)
- fmt.Fprintf(tbWriter, " - ControlPlaneTopology\t: %s\n", pOCPInfra.Status.ControlPlaneTopology)
- fmt.Fprintf(tbWriter, " - API Server URL\t: %s\n", pOCPInfra.Status.APIServerURL)
- fmt.Fprintf(tbWriter, " - API Server URL (internal)\t: %s\n", pOCPInfra.Status.APIServerInternalURL)
- }
-
- fmt.Fprint(tbWriter, newLineWithTab)
- fmt.Fprintf(tbWriter, " Plugins summary by name:\t Status [Total/Passed/Failed/Skipped] (timeout)\n")
-
- plK8S := pOCP.GetResultK8SValidated()
- name := plK8S.Name
- pOCPPluginRes := fmt.Sprintf("%s [%d/%d/%d/%d] (%d)", plK8S.Status, plK8S.Total, plK8S.Passed, plK8S.Failed, plK8S.Skipped, plK8S.Timeout)
- if baselineProcessed {
- plK8S = bOCP.GetResultK8SValidated()
- bOCPPluginRes := fmt.Sprintf("%s [%d/%d/%d/%d] (%d)", plK8S.Status, plK8S.Total, plK8S.Passed, plK8S.Failed, plK8S.Skipped, plK8S.Timeout)
- fmt.Fprintf(tbWriter, " - %s\t: %s\t: %s\n", name, pOCPPluginRes, bOCPPluginRes)
- } else {
- fmt.Fprintf(tbWriter, " - %s\t: %s\n", name, pOCPPluginRes)
- }
-
- plOCP := pOCP.GetResultOCPValidated()
- name = plOCP.Name
- pOCPPluginRes = fmt.Sprintf("%s [%d/%d/%d/%d] (%d)", plOCP.Status, plOCP.Total, plOCP.Passed, plOCP.Failed, plOCP.Skipped, plOCP.Timeout)
-
- if baselineProcessed {
- plOCP = bOCP.GetResultOCPValidated()
- bOCPPluginRes := fmt.Sprintf("%s [%d/%d/%d/%d] (%d)", plOCP.Status, plOCP.Total, plOCP.Passed, plOCP.Failed, plOCP.Skipped, plOCP.Timeout)
- fmt.Fprintf(tbWriter, " - %s\t: %s\t: %s\n", name, pOCPPluginRes, bOCPPluginRes)
- } else {
- fmt.Fprintf(tbWriter, " - %s\t: %s\n", name, pOCPPluginRes)
- }
-
- fmt.Fprint(tbWriter, newLineWithTab)
- fmt.Fprintf(tbWriter, " Health summary:\t [A=True/P=True/D=True]\t\n")
- pOCPCO, _ := pOCP.GetClusterOperator()
-
- if baselineProcessed {
- bOCPCO, _ := bOCP.GetClusterOperator()
- fmt.Fprintf(tbWriter, " - Cluster Operators\t: [%d/%d/%d]\t: [%d/%d/%d]\n",
- pOCPCO.CountAvailable, pOCPCO.CountProgressing, pOCPCO.CountDegraded,
- bOCPCO.CountAvailable, bOCPCO.CountProgressing, bOCPCO.CountDegraded,
- )
- } else {
- fmt.Fprintf(tbWriter, " - Cluster Operators\t: [%d/%d/%d]\n",
- pOCPCO.CountAvailable, pOCPCO.CountProgressing, pOCPCO.CountDegraded,
- )
- }
-
- pNhMessage := fmt.Sprintf("%d/%d %s", pCL.NodeHealth.Total, pCL.NodeHealth.Total, "")
- if pCL.NodeHealth.Total != 0 {
- pNhMessage = fmt.Sprintf("%s (%d%%)", pNhMessage, 100*pCL.NodeHealth.Healthy/pCL.NodeHealth.Total)
- }
-
- bNhMessage := fmt.Sprintf("%d/%d %s", bCL.NodeHealth.Total, bCL.NodeHealth.Total, "")
- if bCL.NodeHealth.Total != 0 {
- bNhMessage = fmt.Sprintf("%s (%d%%)", bNhMessage, 100*bCL.NodeHealth.Healthy/bCL.NodeHealth.Total)
- }
- if baselineProcessed {
- fmt.Fprintf(tbWriter, " - Node health\t: %s\t: %s\n", pNhMessage, bNhMessage)
- } else {
- fmt.Fprintf(tbWriter, " - Node health\t: %s\n", pNhMessage)
- }
-
- pPodsHealthMsg := ""
- bPodsHealthMsg := ""
- if len(pCL.PodHealth.Details) > 0 {
- phTotal := ""
- if pCL.PodHealth.Total != 0 {
- phTotal = fmt.Sprintf(" (%d%%)", 100*pCL.PodHealth.Healthy/pCL.PodHealth.Total)
- }
- pPodsHealthMsg = fmt.Sprintf("%d/%d %s", pCL.PodHealth.Healthy, pCL.PodHealth.Total, phTotal)
- }
- if baselineProcessed {
- if len(bCL.PodHealth.Details) > 0 {
- phTotal := ""
- if bCL.PodHealth.Total != 0 {
- phTotal = fmt.Sprintf(" (%d%%)", 100*bCL.PodHealth.Healthy/bCL.PodHealth.Total)
- }
- bPodsHealthMsg = fmt.Sprintf("%d/%d %s", bCL.PodHealth.Healthy, bCL.PodHealth.Total, phTotal)
- }
- fmt.Fprintf(tbWriter, " - Pods health\t: %s\t: %s\n", pPodsHealthMsg, bPodsHealthMsg)
- } else {
- fmt.Fprintf(tbWriter, " - Pods health\t: %s\n", pPodsHealthMsg)
- }
-
- tbWriter.Flush()
- return nil
-}
-
-func showProcessedSummary(cs *summary.ConsolidatedSummary) error {
-
- fmt.Printf("\n> Processed Summary <\n")
-
- fmt.Printf("\n Total tests by conformance suites:\n")
- fmt.Printf(" - %s: %d \n", summary.SuiteNameKubernetesConformance, cs.GetProvider().GetSuites().GetTotalK8S())
- fmt.Printf(" - %s: %d \n", summary.SuiteNameOpenshiftConformance, cs.GetProvider().GetSuites().GetTotalOCP())
-
- fmt.Printf("\n Result Summary by conformance plugins:\n")
- bProcessed := cs.GetBaseline().HasValidResults()
- showSummaryPlugin(cs.GetProvider().GetOpenShift().GetResultK8SValidated(), bProcessed)
- showSummaryPlugin(cs.GetProvider().GetOpenShift().GetResultOCPValidated(), bProcessed)
-
- return nil
-}
-
-func showSummaryPlugin(p *summary.OPCTPluginSummary, bProcessed bool) {
- fmt.Printf(" - %s:\n", p.Name)
- fmt.Printf(" - Status: %s\n", p.Status)
- fmt.Printf(" - Total: %d\n", p.Total)
- fmt.Printf(" - Passed: %d\n", p.Passed)
- fmt.Printf(" - Failed: %d\n", p.Failed)
- fmt.Printf(" - Timeout: %d\n", p.Timeout)
- fmt.Printf(" - Skipped: %d\n", p.Skipped)
- fmt.Printf(" - Failed (without filters) : %d\n", len(p.FailedList))
- fmt.Printf(" - Failed (Filter SuiteOnly): %d\n", len(p.FailedFilterSuite))
- if bProcessed {
- fmt.Printf(" - Failed (Filter Baseline) : %d\n", len(p.FailedFilterBaseline))
- }
- fmt.Printf(" - Failed (Filter CI Flakes): %d\n", len(p.FailedFilterFlaky))
-
- // checking for runtime failure
- runtimeFailed := false
- if p.Total == p.Failed {
- runtimeFailed = true
- }
-
- // rewrite the original status when pass on all filters and not failed on runtime
- status := p.Status
- if (len(p.FailedFilterFlaky) == 0) && !runtimeFailed {
- status = "pass"
- }
-
- fmt.Printf(" - Status After Filters : %s\n", status)
-}
-
-// showErrorDetails show details of failres for each plugin.
-func showErrorDetails(cs *summary.ConsolidatedSummary, verbose bool) error {
-
- fmt.Printf("\n Result details by conformance plugins: \n")
- bProcessed := cs.GetBaseline().HasValidResults()
- showErrorDetailPlugin(cs.GetProvider().GetOpenShift().GetResultK8SValidated(), verbose, bProcessed)
- showErrorDetailPlugin(cs.GetProvider().GetOpenShift().GetResultOCPValidated(), verbose, bProcessed)
-
- return nil
-}
-
-// showErrorDetailPlugin Show failed e2e tests by filter, when verbose each filter will be shown.
-func showErrorDetailPlugin(p *summary.OPCTPluginSummary, verbose bool, bProcessed bool) {
-
- flakeCount := len(p.FailedFilterBaseline) - len(p.FailedFilterFlaky)
-
- if verbose {
- fmt.Printf("\n\n => %s: (%d failures, %d failures filtered, %d flakes)\n", p.Name, len(p.FailedList), len(p.FailedFilterBaseline), flakeCount)
-
- fmt.Printf("\n --> [verbose] Failed tests detected on archive (without filters):\n")
- if len(p.FailedList) == 0 {
- fmt.Println("")
- }
- for _, test := range p.FailedList {
- fmt.Println(test)
- }
-
- fmt.Printf("\n --> [verbose] Failed tests detected on suite (Filter SuiteOnly):\n")
- if len(p.FailedFilterSuite) == 0 {
- fmt.Println("")
- }
- for _, test := range p.FailedFilterSuite {
- fmt.Println(test)
- }
- if bProcessed {
- fmt.Printf("\n --> [verbose] Failed tests removing baseline (Filter Baseline):\n")
- if len(p.FailedFilterBaseline) == 0 {
- fmt.Println("")
- }
- for _, test := range p.FailedFilterBaseline {
- fmt.Println(test)
- }
- }
- } else {
- fmt.Printf("\n\n => %s: (%d failures, %d flakes)\n", p.Name, len(p.FailedFilterBaseline), flakeCount)
- }
-
- fmt.Printf("\n --> Failed tests to Review (without flakes) - Immediate action:\n")
- if len(p.FailedFilterBaseline) == flakeCount {
- fmt.Println("")
- }
- for _, test := range p.FailedFilterFlaky {
- fmt.Println(test)
- }
-
- fmt.Printf("\n --> Failed flake tests - Statistic from OpenShift CI\n")
- tbWriter := tabwriter.NewWriter(os.Stdout, 0, 8, 1, '\t', tabwriter.AlignRight)
-
- if len(p.FailedFilterBaseline) == 0 {
- fmt.Fprintf(tbWriter, "\n")
- } else {
- fmt.Fprintf(tbWriter, "Flakes\tPerc\t TestName\n")
- for _, test := range p.FailedFilterBaseline {
- // When the was issues to create the flaky item (network connectivity with Sippy API),
- // fallback to '--' values.
- if p.FailedItems[test].Flaky == nil {
- fmt.Fprintf(tbWriter, "--\t--\t%s\n", test)
- } else if p.FailedItems[test].Flaky.CurrentFlakes != 0 {
- fmt.Fprintf(tbWriter, "%d\t%.3f%%\t%s\n", p.FailedItems[test].Flaky.CurrentFlakes, p.FailedItems[test].Flaky.CurrentFlakePerc, test)
- }
- }
- }
- tbWriter.Flush()
-}
diff --git a/pkg/retrieve/retrieve.go b/pkg/retrieve/retrieve.go
index a267d70d..ad81be7c 100644
--- a/pkg/retrieve/retrieve.go
+++ b/pkg/retrieve/retrieve.go
@@ -5,6 +5,8 @@ import (
"io"
"os"
"time"
+ "strings"
+ "path/filepath"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
@@ -103,7 +105,13 @@ func retrieveResults(sclient sonobuoyclient.Interface, destinationDirectory stri
// Log the new files to stdout
for _, result := range results {
- log.Infof("Results saved to %s", result)
+ // Rename the file prepending 'opct_' to the name.
+ newFile := fmt.Sprintf("%s/opct_%s", filepath.Dir(result), strings.Replace(filepath.Base(result), "sonobuoy_", "", 1))
+ log.Debugf("Renaming %s to %s", result, newFile)
+ if err := os.Rename(result, newFile); err != nil {
+ return fmt.Errorf("error renaming %s to %s: %w", result, newFile, err)
+ }
+ log.Infof("Results saved to %s", newFile)
}
return nil
diff --git a/pkg/run/manifests.go b/pkg/run/manifests.go
index 34bf6b4b..6162051e 100644
--- a/pkg/run/manifests.go
+++ b/pkg/run/manifests.go
@@ -36,18 +36,20 @@ func loadPluginManifests(r *RunOptions) ([]*manifest.Manifest, error) {
return nil, err
}
for _, m := range pluginManifests {
- log.Debugf("Loading certification plugin: %s", m)
+ log.Debugf("Loading plugin: %s", m)
pluginManifestTpl, err := efs.GetData().ReadFile(m)
if err != nil {
- log.Errorf("Unable to read plugin manifest %s", m)
+ log.Errorf("error reading config for plugin %s: %v", m, err)
return nil, err
}
pluginManifest, err := ProcessManifestTemplates(r, pluginManifestTpl)
if err != nil {
+ log.Errorf("error processing configuration for plugin %s: %v", m, err)
return nil, err
}
asset, err := loader.LoadDefinition(pluginManifest)
if err != nil {
+ log.Errorf("error loading configuration for plugin %s: %v", m, err)
return nil, err
}
manifests = append(manifests, &asset)
diff --git a/pkg/run/run.go b/pkg/run/run.go
index 662974ee..15cd61f8 100644
--- a/pkg/run/run.go
+++ b/pkg/run/run.go
@@ -22,21 +22,21 @@ import (
"github.com/vmware-tanzu/sonobuoy/pkg/plugin/loader"
"github.com/vmware-tanzu/sonobuoy/pkg/plugin/manifest"
v1 "k8s.io/api/core/v1"
- rbacv1 "k8s.io/api/rbac/v1"
- kerrors "k8s.io/apimachinery/pkg/api/errors"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime/schema"
- "k8s.io/client-go/kubernetes"
"github.com/redhat-openshift-ecosystem/provider-certification-tool/pkg"
"github.com/redhat-openshift-ecosystem/provider-certification-tool/pkg/client"
"github.com/redhat-openshift-ecosystem/provider-certification-tool/pkg/status"
"github.com/redhat-openshift-ecosystem/provider-certification-tool/pkg/wait"
+ rbacv1 "k8s.io/api/rbac/v1"
+ kerrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/client-go/kubernetes"
)
type RunOptions struct {
- plugins *[]string
- dedicated bool
+ plugins *[]string
+
sonobuoyImage string
imageRepository string
@@ -50,12 +50,15 @@ type RunOptions struct {
timeout int
watch bool
- watchInterval int
mode string
upgradeImage string
// devel flags
- devCount string
+ devCount string
+ devSkipChecks bool
+
+ // Dedicated node
+ dedicated bool
}
const (
@@ -93,56 +96,51 @@ func NewCmdRun() *cobra.Command {
// Client setup
kclient, sclient, err = client.CreateClients()
if err != nil {
- return fmt.Errorf("run finished with errors: %v", err)
+ log.WithError(err).Error("pre-run failed when creating clients")
+ return err
}
// Pre-checks and setup
if err = o.PreRunCheck(kclient); err != nil {
- return fmt.Errorf("run finished with errors: %v", err)
+ log.WithError(err).Error("pre-run failed when checking dependencies")
+ return err
}
if err = o.PreRunSetup(kclient); err != nil {
- return fmt.Errorf("run finished with errors: %v", err)
+ log.WithError(err).Error("pre-run failed when initializing the environment")
+ return err
}
return nil
},
RunE: func(cmd *cobra.Command, args []string) error {
log.Info("Running OPCT...")
-
- // Fire off sonobuoy
- err := o.Run(kclient, sclient)
- if err != nil {
- log.WithError(err).Errorf("Error running the tool. Please check the errors and try again.")
+ if err := o.Run(kclient, sclient); err != nil {
+ log.WithError(err).Errorf("execution finished with errors.")
return err
}
log.Info("Jobs scheduled! Waiting for resources be created...")
-
- // Wait for Sonobuoy to create
- err = wait.WaitForRequiredResources(kclient)
- if err != nil {
- log.WithError(err).Errorf("error waiting for sonobuoy pods to become ready")
+ if err := wait.WaitForRequiredResources(kclient); err != nil {
+ log.WithError(err).Errorf("error waiting for required pods to become ready")
return err
}
// Sleep to give status time to appear
- s := status.NewStatusOptions(&status.StatusInput{Watch: o.watch, IntervalSeconds: o.watchInterval})
- time.Sleep(s.GetIntervalSeconds())
+ time.Sleep(status.StatusInterval)
- err = s.WaitForStatusReport(cmd.Context(), sclient)
- if err != nil {
+ // Retrieve the first status and print it, finishing when --watch is not set.
+ s := status.NewStatusOptions(&status.StatusInput{Watch: o.watch})
+ if err := s.WaitForStatusReport(cmd.Context(), sclient); err != nil {
log.WithError(err).Error("error retrieving aggregator status")
return err
}
- err = s.Update(sclient)
- if err != nil {
+ if err := s.Update(sclient); err != nil {
log.WithError(err).Error("error retrieving update")
return err
}
- err = s.Print(cmd, sclient)
- if err != nil {
+ if err := s.Print(cmd, sclient); err != nil {
log.WithError(err).Error("error showing status")
return err
}
@@ -157,18 +155,12 @@ func NewCmdRun() *cobra.Command {
cmd.Flags().StringVar(&o.mode, "mode", defaultRunMode, "Run mode: Availble: regular, upgrade")
cmd.Flags().StringVar(&o.upgradeImage, "upgrade-to-image", defaultUpgradeImage, "Target OpenShift Release Image. Example: oc adm release info 4.11.18 -o jsonpath={.image}")
cmd.Flags().StringVar(&o.imageRepository, "image-repository", "", "Image repository containing required images test environment. Example: openshift-provider-cert-tool --mirror-repository mirror.repository.net/ocp-cert")
+
cmd.Flags().IntVar(&o.timeout, "timeout", defaultRunTimeoutSeconds, "Execution timeout in seconds")
cmd.Flags().BoolVarP(&o.watch, "watch", "w", defaultRunWatchFlag, "Keep watch status after running")
- cmd.Flags().IntVarP(&o.watchInterval, "watch-interval", "", status.DefaultStatusIntervalSeconds, "Interval to watch the status and print in the stdout")
-
- // Flags use for maitainance / development / CI. Those are intentionally hidden.
- cmd.Flags().StringArrayVar(o.plugins, "plugin", nil, "Override default conformance plugins to use. Can be used multiple times. (default plugins can be reviewed with assets subcommand)")
- cmd.Flags().BoolVar(&o.dedicated, "dedicated", defaultDedicatedFlag, "Setup plugins to run in dedicated test environment.")
- cmd.Flags().StringVar(&o.devCount, "dev-count", "0", "Developer Mode only: run small random set of tests. Default: 0 (disabled)")
- hideOptionalFlags(cmd, "plugin")
- hideOptionalFlags(cmd, "dedicated")
- hideOptionalFlags(cmd, "dev-count")
+ cmd.Flags().StringVar(&o.devCount, "devel-limit-tests", "0", "Developer Mode only: run small random set of tests. Default: 0 (disabled)")
+ cmd.Flags().BoolVar(&o.devSkipChecks, "devel-skip-checks", false, "Developer Mode only: skip checks")
// Override build-int images use by plugins/steps in the standard workflow.
cmd.Flags().StringVar(&o.sonobuoyImage, "sonobuoy-image", pkg.GetSonobuoyImage(), "Image override for the Sonobuoy worker and aggregator")
@@ -177,9 +169,19 @@ func NewCmdRun() *cobra.Command {
cmd.Flags().StringVar(&o.MustGatherMonitoringImage, "must-gather-monitoring-image", pkg.GetMustGatherMonitoring(), "Image containing the must-gather monitoring plugin.")
// devel can be override by quay.io/opct/openshift-tests:devel
- // opct run --devel-skip-checks=true --plugins-image=plugin-openshift-tests:v0.0.0-devel-8ff93d9 --openshift-tests-image=quay.io/opct/openshift-tests:devel
+ // opct run --devel-skip-checks=true --plugins-image=plugin-openshift-tests:v0.0.0-devel-8ff93d9 --devel-tests-image=quay.io/opct/openshift-tests:devel
cmd.Flags().StringVar(&o.OpenshiftTestsImage, "openshift-tests-image", pkg.OpenShiftTestsImage, "Developer Mode only: openshift-tests image override")
+ // Flags use for maitainance / development / CI. Those are intentionally hidden.
+ cmd.Flags().StringArrayVar(o.plugins, "plugin", nil, "Override default conformance plugins to use. Can be used multiple times. (default plugins can be reviewed with assets subcommand)")
+ cmd.Flags().BoolVar(&o.dedicated, "dedicated", defaultDedicatedFlag, "Setup plugins to run in dedicated test environment.")
+ cmd.Flags().StringVar(&o.devCount, "dev-count", "0", "Developer Mode only: run small random set of tests. Default: 0 (disabled)")
+
+ hideOptionalFlags(cmd, "plugin")
+ hideOptionalFlags(cmd, "dedicated")
+ // hideOptionalFlags(cmd, "devel-limit-tests")
+ // hideOptionalFlags(cmd, "devel-skip-checks")
+
hideOptionalFlags(cmd, "sonobuoy-image")
hideOptionalFlags(cmd, "plugins-image")
hideOptionalFlags(cmd, "collector-image")
@@ -198,18 +200,22 @@ func (r *RunOptions) PreRunCheck(kclient kubernetes.Interface) error {
if err != nil {
return err
}
- configClient, err := coclient.NewForConfig(restConfig)
+ oc, err := coclient.NewForConfig(restConfig)
if err != nil {
return err
}
// Check if Cluster Operators are stable
- errs := checkClusterOperators(configClient)
- if errs != nil {
+ if errs := checkClusterOperators(oc); errs != nil {
+ errorMessages := []string{}
for _, err := range errs {
- log.Warn(err)
+ errorMessages = append(errorMessages, err.Error())
+ }
+ log.Errorf("Preflights checks failed: operators are not in ready state, check the status with 'oc get clusteroperator': %v", errorMessages)
+ if !r.devSkipChecks {
+ return errors.New("All Cluster Operators must be available, not progressing, and not degraded before validation can run.")
}
- return errors.New("All Cluster Operators must be available, not progressing, and not degraded before validation can run")
+ log.Warnf("DEVEL MODE, THIS IS NOT SUPPORTED: Skipping Cluster Operator checks: %v", errs)
}
// Get ConfigV1 client for Cluster Operators
@@ -221,10 +227,16 @@ func (r *RunOptions) PreRunCheck(kclient kubernetes.Interface) error {
// Check if Registry is in managed state or exit
managed, err := checkRegistry(irClient)
if err != nil {
- return err
+ if !r.devSkipChecks {
+ return err
+ }
+ log.Warn("DEVEL MODE, THIS IS NOT SUPPORTED: Skipping Image registry check: %w", err)
}
if !managed {
- return errors.New("OpenShift Image Registry must deployed before validation can run")
+ if !r.devSkipChecks {
+ return errors.New("OpenShift Image Registry must deployed before validation can run")
+ }
+ log.Warn("DEVEL MODE, THIS IS NOT SUPPORTED: Skipping unmanaged image registry check")
}
if r.dedicated {
@@ -236,7 +248,10 @@ func (r *RunOptions) PreRunCheck(kclient kubernetes.Interface) error {
return errors.Wrap(err, "error getting the Node list")
}
if len(nodes.Items) == 0 {
- return fmt.Errorf("missing dedicated node. Set the label 'node-role.kubernetes.io/tests=\"\"' to a node and try again")
+ errMsg := fmt.Sprintf("missing dedicated node. Set the label %q to a node and try again", pkg.DedicatedNodeRoleLabelSelector)
+ errMsg = fmt.Sprintf("%s\nCheck the documentation[1] or run 'opct adm setup-node' to set the label and taints", errMsg)
+ errMsg = fmt.Sprintf("%s\n[1] https://redhat-openshift-ecosystem.github.io/provider-certification-tool/user/#standard-env-setup-node", errMsg)
+ return fmt.Errorf(errMsg)
}
if len(nodes.Items) > 2 {
return fmt.Errorf("too many nodes with label %q. Set the label to only one node and try again", pkg.DedicatedNodeRoleLabelSelector)
@@ -289,17 +304,17 @@ func (r *RunOptions) PreRunCheck(kclient kubernetes.Interface) error {
apiVersion: machineconfiguration.openshift.io/v1
kind: MachineConfigPool
metadata:
-name: opct
+ name: opct
spec:
-machineConfigSelector:
-matchExpressions:
- - key: machineconfiguration.openshift.io/role,
- operator: In,
- values: [worker,opct]
-nodeSelector:
-matchLabels:
- node-role.kubernetes.io/tests: ""
-paused: true
+ machineConfigSelector:
+ matchExpressions:
+ - key: machineconfiguration.openshift.io/role,
+ operator: In,
+ values: [worker,opct]
+ nodeSelector:
+ matchLabels:
+ node-role.kubernetes.io/tests: ""
+ paused: true
EOF`)
}
if len(poolList.Items) == 0 {
@@ -335,8 +350,9 @@ func (r *RunOptions) PreRunSetup(kclient kubernetes.Interface) error {
namespace := &v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
- Name: pkg.CertificationNamespace,
- Labels: pkg.SonobuoyDefaultLabels,
+ Name: pkg.CertificationNamespace,
+ Labels: pkg.SonobuoyDefaultLabels,
+ Annotations: make(map[string]string),
},
}
@@ -352,8 +368,8 @@ func (r *RunOptions) PreRunSetup(kclient kubernetes.Interface) error {
}
namespace.Annotations = map[string]string{
- "scheduler.alpha.kubernetes.io/defaultTolerations": string(tolerations),
"openshift.io/node-selector": pkg.DedicatedNodeRoleLabelSelector,
+ "scheduler.alpha.kubernetes.io/defaultTolerations": string(tolerations),
}
}
@@ -500,7 +516,10 @@ func (r *RunOptions) Run(kclient kubernetes.Interface, sclient sonobuoyclient.In
for _, err := range errs {
log.Error(err)
}
- return errors.New("preflight checks failed")
+ if !r.devSkipChecks {
+ return errors.New("preflight checks failed")
+ }
+ log.Warn("DEVEL MODE, THIS IS NOT SUPPORTED: Skipping preflight checks")
}
// Create version information ConfigMap
@@ -540,12 +559,11 @@ func (r *RunOptions) Run(kclient kubernetes.Interface, sclient sonobuoyclient.In
}
if r.plugins == nil || len(*r.plugins) == 0 {
- // Use default built-in plugins
log.Debugf("Loading default plugins")
var err error
manifests, err = loadPluginManifests(r)
if err != nil {
- return nil
+ return err
}
} else {
// User provided their own plugins at command line
diff --git a/pkg/status/printer.go b/pkg/status/printer.go
index b3cc832c..e72b9762 100644
--- a/pkg/status/printer.go
+++ b/pkg/status/printer.go
@@ -13,6 +13,7 @@ import (
type PrintableStatus struct {
GlobalStatus string
CurrentTime string
+ ElapsedTime string
PluginStatuses []PrintablePluginStatus
}
@@ -24,13 +25,13 @@ type PrintablePluginStatus struct {
Message string
}
-var runningStatusTemplate = `{{.CurrentTime}}> Global Status: {{.GlobalStatus}}
+var runningStatusTemplate = `{{.CurrentTime}}|{{.ElapsedTime}}> Global Status: {{.GlobalStatus}}
{{printf "%-34s | %-10s | %-10s | %-25s | %-50s" "JOB_NAME" "STATUS" "RESULTS" "PROGRESS" "MESSAGE"}}{{range $index, $pl := .PluginStatuses}}
{{printf "%-34s | %-10s | %-10s | %-25s | %-50s" $pl.Name $pl.Status $pl.Result $pl.Progress $pl.Message}}{{end}}
`
-func PrintRunningStatus(s *aggregation.Status) error {
- ps := getPrintableRunningStatus(s)
+func PrintRunningStatus(s *aggregation.Status, start time.Time) error {
+ ps := getPrintableRunningStatus(s, start)
statusTemplate, err := template.New("statusTemplate").Parse(runningStatusTemplate)
if err != nil {
return err
@@ -40,10 +41,12 @@ func PrintRunningStatus(s *aggregation.Status) error {
return err
}
-func getPrintableRunningStatus(s *aggregation.Status) PrintableStatus {
+func getPrintableRunningStatus(s *aggregation.Status, start time.Time) PrintableStatus {
+ now := time.Now()
ps := PrintableStatus{
GlobalStatus: s.Status,
- CurrentTime: time.Now().Format(time.RFC1123),
+ CurrentTime: now.Format(time.RFC1123),
+ ElapsedTime: now.Sub(start).String(),
}
for _, pl := range s.Plugins {
@@ -60,6 +63,9 @@ func getPrintableRunningStatus(s *aggregation.Status) PrintableStatus {
}
} else if pl.ResultStatus == "" {
message = "waiting for post-processor..."
+ if pl.Status != "" {
+ message = pl.Status
+ }
} else {
passCount := pl.ResultStatusCounts["passed"]
failedCount := pl.ResultStatusCounts["failed"]
diff --git a/pkg/status/printer_test.go b/pkg/status/printer_test.go
index 4053fa49..0a30a12f 100644
--- a/pkg/status/printer_test.go
+++ b/pkg/status/printer_test.go
@@ -5,6 +5,7 @@ import (
"html/template"
"os"
"testing"
+ "time"
"github.com/vmware-tanzu/sonobuoy/pkg/plugin"
"github.com/vmware-tanzu/sonobuoy/pkg/plugin/aggregation"
@@ -29,8 +30,8 @@ func Test_PrintStatus(t *testing.T) {
},
Status: "running",
}
-
- ps := getPrintableRunningStatus(a)
+ now := time.Now()
+ ps := getPrintableRunningStatus(a, now)
tmpl, err := template.New("test").Parse(runningStatusTemplate)
if err != nil {
diff --git a/pkg/status/status.go b/pkg/status/status.go
index 181b9071..95e3cbcf 100644
--- a/pkg/status/status.go
+++ b/pkg/status/status.go
@@ -21,12 +21,14 @@ import (
const (
DefaultStatusIntervalSeconds = 10
+ StatusInterval = time.Second * 10
StatusRetryLimit = 10
)
// StatusOptions is the interface to store input options to
// interface with Status command.
type StatusOptions struct {
+ StartTime time.Time
Latest *aggregation.Status
watch bool
shownPostProcessMsg bool
@@ -34,8 +36,6 @@ type StatusOptions struct {
waitInterval time.Duration
}
-// StatusInput is the interface to input options when
-// creating status object.
type StatusInput struct {
Watch bool
IntervalSeconds int
@@ -45,6 +45,7 @@ func NewStatusOptions(in *StatusInput) *StatusOptions {
s := &StatusOptions{
watch: in.Watch,
waitInterval: time.Second * DefaultStatusIntervalSeconds,
+ StartTime: time.Now(),
}
if in.IntervalSeconds != 0 {
s.waitInterval = time.Duration(in.IntervalSeconds) * time.Second
@@ -52,10 +53,6 @@ func NewStatusOptions(in *StatusInput) *StatusOptions {
return s
}
-func (s *StatusOptions) GetIntervalSeconds() time.Duration {
- return s.waitInterval
-}
-
func NewCmdStatus() *cobra.Command {
o := NewStatusOptions(&StatusInput{Watch: false})
@@ -178,7 +175,7 @@ func (s *StatusOptions) WaitForStatusReport(ctx context.Context, sclient sonobuo
}
tries++
- log.Warnf("waiting %ds to retry", int(s.waitInterval.Seconds()))
+ log.Warnf("waiting %ds to retry", int(StatusInterval.Seconds()))
return false, nil
})
return err
@@ -210,18 +207,18 @@ func (s *StatusOptions) Print(cmd *cobra.Command, sclient sonobuoyclient.Interfa
func (s *StatusOptions) doPrint() (complete bool, err error) {
switch s.GetStatus() {
case aggregation.RunningStatus:
- err := PrintRunningStatus(s.Latest)
+ err := PrintRunningStatus(s.Latest, s.StartTime)
if err != nil {
return false, err
}
case aggregation.PostProcessingStatus:
if !s.watch {
- err := PrintRunningStatus(s.Latest)
+ err := PrintRunningStatus(s.Latest, s.StartTime)
if err != nil {
return false, err
}
} else if !s.shownPostProcessMsg {
- err := PrintRunningStatus(s.Latest)
+ err := PrintRunningStatus(s.Latest, s.StartTime)
if err != nil {
return false, err
}
@@ -229,7 +226,7 @@ func (s *StatusOptions) doPrint() (complete bool, err error) {
s.shownPostProcessMsg = true
}
case aggregation.CompleteStatus:
- err := PrintRunningStatus(s.Latest)
+ err := PrintRunningStatus(s.Latest, s.StartTime)
if err != nil {
return true, err
}
diff --git a/pkg/types.go b/pkg/types.go
index 16cd9d8e..7502b163 100644
--- a/pkg/types.go
+++ b/pkg/types.go
@@ -9,8 +9,8 @@ import (
const (
PrivilegedClusterRole = "opct-scc-privileged"
PrivilegedClusterRoleBinding = "opct-scc-privileged"
- CertificationNamespace = "openshift-provider-certification"
- VersionInfoConfigMapName = "openshift-provider-certification-version"
+ CertificationNamespace = "opct"
+ VersionInfoConfigMapName = "opct-version"
PluginsVarsConfigMapName = "plugins-config"
DedicatedNodeRoleLabel = "node-role.kubernetes.io/tests"
DedicatedNodeRoleLabelSelector = "node-role.kubernetes.io/tests="
diff --git a/test/testdata/plugins/sample-v0-ok.yaml b/test/testdata/plugins/sample-v0-ok.yaml
index 94c7ad24..d3569e61 100644
--- a/test/testdata/plugins/sample-v0-ok.yaml
+++ b/test/testdata/plugins/sample-v0-ok.yaml
@@ -1,6 +1,7 @@
podSpec:
restartPolicy: Never
serviceAccountName: sonobuoy-serviceaccount
+ priorityClassName: system-node-critical
volumes:
- name: shared
emptyDir: {}
diff --git a/test/testdata/plugins/sample-v1-fail.yaml b/test/testdata/plugins/sample-v1-fail.yaml
index c1ba30de..7d85590d 100644
--- a/test/testdata/plugins/sample-v1-fail.yaml
+++ b/test/testdata/plugins/sample-v1-fail.yaml
@@ -1,6 +1,7 @@
podSpec:
restartPolicy: Never
serviceAccountName: sonobuoy-serviceaccount
+ priorityClassName: system-node-critical
volumes:
- name: shared
emptyDir: {}
diff --git a/test/testdata/templates/plugins/sample.yaml b/test/testdata/templates/plugins/sample.yaml
index 303cb8dc..893b2a50 100644
--- a/test/testdata/templates/plugins/sample.yaml
+++ b/test/testdata/templates/plugins/sample.yaml
@@ -1,6 +1,7 @@
podSpec:
restartPolicy: Never
serviceAccountName: sonobuoy-serviceaccount
+ priorityClassName: system-node-critical
volumes:
- name: shared
emptyDir: {}