diff --git a/test/integration/go.mod b/test/integration/go.mod index d3590e2c6..7e16712d7 100644 --- a/test/integration/go.mod +++ b/test/integration/go.mod @@ -8,29 +8,94 @@ require ( github.com/onsi/ginkgo/v2 v2.25.1 github.com/onsi/gomega v1.38.2 github.com/openshift-eng/openshift-tests-extension v0.0.0-20250916161632-d81c09058835 + github.com/openshift/lvm-operator/v4 v4.20.0 github.com/spf13/cobra v1.9.1 + k8s.io/api v0.34.1 + k8s.io/apimachinery v0.34.1 + k8s.io/client-go v0.34.1 + k8s.io/kubernetes v1.34.1 + sigs.k8s.io/controller-runtime v0.22.4 ) -replace github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 +replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 + k8s.io/kubernetes => github.com/openshift/kubernetes v1.30.1-0.20251017123720-96593f323733 + k8s.io/pod-security-admission => github.com/openshift/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0-20251017123720-96593f323733 +) require ( - github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect + cel.dev/expr v0.24.0 // indirect + github.com/antlr4-go/antlr/v4 v4.13.1 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/emicklei/go-restful/v3 v3.12.2 // indirect + github.com/evanphx/json-patch/v5 v5.9.11 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/fxamacker/cbor/v2 v2.9.0 // indirect github.com/go-logr/logr v1.4.3 // indirect + github.com/go-openapi/jsonpointer v0.21.1 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-openapi/swag v0.23.1 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect - github.com/google/cel-go v0.17.8 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/google/btree v1.1.3 // indirect + github.com/google/cel-go v0.26.0 // indirect + github.com/google/gnostic-models v0.7.0 // indirect github.com/google/go-cmp v0.7.0 // indirect github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/mailru/easyjson v0.9.0 // indirect + github.com/moby/spdystream v0.5.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/openshift/api v0.0.0-20251015095338-264e80a2b6e7 // indirect + github.com/openshift/library-go v0.0.0-20251015151611-6fc7a74b67c5 // indirect github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/client_golang v1.22.0 // indirect + github.com/prometheus/client_model v0.6.2 // indirect + github.com/prometheus/common v0.64.0 // indirect + github.com/prometheus/procfs v0.16.1 // indirect github.com/spf13/pflag v1.0.7 // indirect - github.com/stoewer/go-strcase v1.2.0 // indirect + github.com/stoewer/go-strcase v1.3.0 // indirect + github.com/x448/float16 v0.8.4 // indirect + go.opentelemetry.io/otel v1.35.0 // indirect + go.opentelemetry.io/otel/trace v1.35.0 // indirect + go.yaml.in/yaml/v2 v2.4.2 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect - golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc // indirect + golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 // indirect golang.org/x/net v0.43.0 // indirect + golang.org/x/oauth2 v0.30.0 // indirect + golang.org/x/sync v0.17.0 // indirect golang.org/x/sys v0.36.0 // indirect + golang.org/x/term v0.35.0 // indirect golang.org/x/text v0.29.0 // indirect + golang.org/x/time v0.11.0 // indirect golang.org/x/tools v0.36.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 // indirect + gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250428153025-10db94c68c34 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250428153025-10db94c68c34 // indirect google.golang.org/protobuf v1.36.7 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/apiextensions-apiserver v0.34.1 // indirect + k8s.io/apiserver v0.34.1 // indirect + k8s.io/component-base v0.34.1 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect + k8s.io/pod-security-admission v0.0.0 // indirect + k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 // indirect + sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect + sigs.k8s.io/yaml v1.6.0 // indirect ) diff --git a/test/integration/go.sum b/test/integration/go.sum index 997005ed4..e666e22a2 100644 --- a/test/integration/go.sum +++ b/test/integration/go.sum @@ -1,25 +1,105 @@ -github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18= -github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM= +cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= +cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= +github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ= +github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= +github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k= +github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= +github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= +github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/jsonpointer v0.21.1 h1:whnzv/pNXtK2FbX/W9yJfRmE2gsmkfahjMKB0fZvcic= +github.com/go-openapi/jsonpointer v0.21.1/go.mod h1:50I1STOfbY1ycR8jGz8DaMeLCdXiI6aDteEdRNNzpdk= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= +github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU= +github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= -github.com/google/cel-go v0.17.8 h1:j9m730pMZt1Fc4oKhCLUHfjj6527LuhYcYw0Rl8gqto= -github.com/google/cel-go v0.17.8/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/cel-go v0.26.0 h1:DPGjXackMpJWH680oGY4lZhYjIameYmR+/6RBdDGmaI= +github.com/google/cel-go v0.26.0/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PUIaryMM= +github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= +github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8= github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= +github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= +github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU= +github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= github.com/openshift-eng/openshift-tests-extension v0.0.0-20250916161632-d81c09058835 h1:rkqIIfdYYkasXbF2XKVgh/3f1mhjSQK9By8WtVMgYo8= github.com/openshift-eng/openshift-tests-extension v0.0.0-20250916161632-d81c09058835/go.mod h1:6gkP5f2HL0meusT0Aim8icAspcD1cG055xxBZ9yC68M= +github.com/openshift/api v0.0.0-20251015095338-264e80a2b6e7 h1:Ot2fbEEPmF3WlPQkyEW/bUCV38GMugH/UmZvxpWceNc= +github.com/openshift/api v0.0.0-20251015095338-264e80a2b6e7/go.mod h1:d5uzF0YN2nQQFA0jIEWzzOZ+edmo6wzlGLvx5Fhz4uY= +github.com/openshift/kubernetes v1.30.1-0.20251017123720-96593f323733 h1:Mpab1CmJPLVWGB0CNGoWnup/NScvv55MVPe94c8JgUk= +github.com/openshift/kubernetes v1.30.1-0.20251017123720-96593f323733/go.mod h1:w3+IfrXNp5RosdDXg3LB55yijJqR/FwouvVntYHQf0o= +github.com/openshift/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0-20251017123720-96593f323733 h1:2vQPmqKwQU+jpqm7Iv3EU3k8DYYNqZwN/A1AdydMYpc= +github.com/openshift/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0-20251017123720-96593f323733/go.mod h1:yuCdx9wLndqpNhmsYZh48wtbgrqc8ql1191ke9zIOfg= +github.com/openshift/library-go v0.0.0-20251015151611-6fc7a74b67c5 h1:bANtDc8SgetSK4nQehf59x3+H9FqVJCprgjs49/OTg0= +github.com/openshift/library-go v0.0.0-20251015151611-6fc7a74b67c5/go.mod h1:OlFFws1AO51uzfc48MsStGE4SFMWlMZD0+f5a/zCtKI= +github.com/openshift/lvm-operator/v4 v4.20.0 h1:rENUiQPyXnQahFkAwsecpfEGqEDKSSRCkXosX3nou7w= +github.com/openshift/lvm-operator/v4 v4.20.0/go.mod h1:BBa41rrG7hNmJm4qiBFppfArIX6J18aS8k1l/qfqAeM= github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 h1:AKx/w1qpS8We43bsRgf8Nll3CGlDHpr/WAXvuedTNZI= github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -27,38 +107,139 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= +github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.64.0 h1:pdZeA+g617P7oGv1CzdTzyeShxAGrTBsolKNOLQPGO4= +github.com/prometheus/common v0.64.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= +github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= +github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.7 h1:vN6T9TfwStFPFM5XzjsvmzZkLuaLX+HS+0SeFLRgU6M= github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= +github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= +go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= +go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= +go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= +go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= -golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc h1:mCRnTeVUjcrhlRmO0VK8a6k6Rrf6TF9htwo2pJVSjIU= -golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 h1:nDVHiLt8aIbd/VzvPWN6kSOPE7+F/fNFDSXLVYkE/Iw= +golang.org/x/exp v0.0.0-20250305212735-054e65f0b394/go.mod h1:sIifuuw/Yco/y6yb6+bDNfyeQ/MdPUy/hKEMYQV17cM= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= +golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ= +golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk= golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4= +golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= +golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg= golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= -google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:7whR9kGa5LUwFtpLm2ArCEejtnxlGeLbAyjFY8sGNFw= -google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 h1:N9BgCIAUvn/M+p4NJccWPWb3BWh88+zyL0ll9HgbEeM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.5.0 h1:JELs8RLM12qJGXU4u/TO3V25KW8GreMKl9pdkk14RM0= +gomodules.xyz/jsonpatch/v2 v2.5.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +google.golang.org/genproto/googleapis/api v0.0.0-20250428153025-10db94c68c34 h1:0PeQib/pH3nB/5pEmFeVQJotzGohV0dq4Vcp09H5yhE= +google.golang.org/genproto/googleapis/api v0.0.0-20250428153025-10db94c68c34/go.mod h1:0awUlEkap+Pb1UMeJwJQQAdJQrt3moU7J2moTy69irI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250428153025-10db94c68c34 h1:h6p3mQqrmT1XkHVTfzLdNz1u7IhINeZkz67/xTbOuWs= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250428153025-10db94c68c34/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/protobuf v1.36.7 h1:IgrO7UwFQGJdRNXH/sQux4R1Dj1WAKcLElzeeRaXV2A= google.golang.org/protobuf v1.36.7/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +k8s.io/api v0.34.1 h1:jC+153630BMdlFukegoEL8E/yT7aLyQkIVuwhmwDgJM= +k8s.io/api v0.34.1/go.mod h1:SB80FxFtXn5/gwzCoN6QCtPD7Vbu5w2n1S0J5gFfTYk= +k8s.io/apiextensions-apiserver v0.34.1 h1:NNPBva8FNAPt1iSVwIE0FsdrVriRXMsaWFMqJbII2CI= +k8s.io/apiextensions-apiserver v0.34.1/go.mod h1:hP9Rld3zF5Ay2Of3BeEpLAToP+l4s5UlxiHfqRaRcMc= +k8s.io/apimachinery v0.34.1 h1:dTlxFls/eikpJxmAC7MVE8oOeP1zryV7iRyIjB0gky4= +k8s.io/apimachinery v0.34.1/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= +k8s.io/apiserver v0.34.1 h1:U3JBGdgANK3dfFcyknWde1G6X1F4bg7PXuvlqt8lITA= +k8s.io/apiserver v0.34.1/go.mod h1:eOOc9nrVqlBI1AFCvVzsob0OxtPZUCPiUJL45JOTBG0= +k8s.io/client-go v0.34.1 h1:ZUPJKgXsnKwVwmKKdPfw4tB58+7/Ik3CrjOEhsiZ7mY= +k8s.io/client-go v0.34.1/go.mod h1:kA8v0FP+tk6sZA0yKLRG67LWjqufAoSHA2xVGKw9Of8= +k8s.io/component-base v0.34.1 h1:v7xFgG+ONhytZNFpIz5/kecwD+sUhVE6HU7qQUiRM4A= +k8s.io/component-base v0.34.1/go.mod h1:mknCpLlTSKHzAQJJnnHVKqjxR7gBeHRv0rPXA7gdtQ0= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts= +k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y= +k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/controller-runtime v0.22.4 h1:GEjV7KV3TY8e+tJ2LCTxUTanW4z/FmNB7l327UfMq9A= +sigs.k8s.io/controller-runtime v0.22.4/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/test/integration/qe-tests/lvms.go b/test/integration/qe-tests/lvms.go new file mode 100644 index 000000000..3e6524aa8 --- /dev/null +++ b/test/integration/qe-tests/lvms.go @@ -0,0 +1,1445 @@ +// NOTE: This test suite currently only support SNO env & rely on some pre-defined steps in CI pipeline which includes, +// 1. Installing LVMS operator +// 2. Adding blank disk/device to worker node to be consumed by LVMCluster +// 3. Create resources like OperatorGroup, Subscription, etc. to configure LVMS operator +// 4. Create LVMCLuster resource with single volumeGroup named as 'vg1', mutliple VGs could be added in future +// Also, these tests are utilizing preset lvms storageClass="lvms-vg1", volumeSnapshotClassName="lvms-vg1" + +package tests + +import ( + "context" + "fmt" + "os/exec" + "strings" + "time" + + g "github.com/onsi/ginkgo/v2" + o "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + e2e "k8s.io/kubernetes/test/e2e/framework" +) + +var ( + tc = NewTestClient("lvms") + testNamespace string + storageClass = "lvms-vg1" + volumeGroup = "vg1" + lvmsNamespace = "openshift-lvm-storage" +) + +var _ = g.BeforeSuite(func() { + // Verify LVMS operator is installed + checkLvmsOperatorInstalled(tc) +}) + +var _ = g.Describe("[sig-storage] STORAGE", func() { + g.BeforeEach(func() { + + // Create a unique test namespace for each test using timestamp for uniqueness + testNamespace = fmt.Sprintf("lvms-test-%d", time.Now().UnixNano()) + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: testNamespace, + Labels: map[string]string{ + "pod-security.kubernetes.io/enforce": "privileged", + "pod-security.kubernetes.io/audit": "privileged", + "pod-security.kubernetes.io/warn": "privileged", + }, + }, + } + _, err := tc.Clientset.CoreV1().Namespaces().Create(context.TODO(), ns, metav1.CreateOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + }) + + g.AfterEach(func() { + // Clean up test namespace + if testNamespace != "" { + err := tc.Clientset.CoreV1().Namespaces().Delete(context.TODO(), testNamespace, metav1.DeleteOptions{}) + if err != nil { + e2e.Logf("Warning: failed to delete namespace %s: %v\n", testNamespace, err) + } + } + }) + + // original author: rdeore@redhat.com; Ported by Claude Code + // OCP-61585-[LVMS] [Filesystem] [Clone] a pvc with the same capacity should be successful + g.It("Author:rdeore-LEVEL0-Critical-61585-[LVMS] [Filesystem] [Clone] a pvc with the same capacity should be successful", g.Label("SNO"), func() { + g.By("Create a PVC with the lvms csi storageclass") + pvcOri := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pvc-original", + Namespace: testNamespace, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Gi"), + }, + }, + StorageClassName: &storageClass, + }, + } + _, err := tc.Clientset.CoreV1().PersistentVolumeClaims(testNamespace).Create(context.TODO(), pvcOri, metav1.CreateOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("Create pod with the created pvc (required for WaitForFirstConsumer binding mode)") + podOri := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod-original", + Namespace: testNamespace, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test-container", + Image: "registry.redhat.io/rhel8/support-tools:latest", + Command: []string{"/bin/sh", "-c", "sleep 3600"}, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "test-volume", + MountPath: "/mnt/test", + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "test-volume", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "test-pvc-original", + }, + }, + }, + }, + }, + } + _, err = tc.Clientset.CoreV1().Pods(testNamespace).Create(context.TODO(), podOri, metav1.CreateOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("Wait for PVC to be bound (happens after pod is scheduled)") + o.Eventually(func() corev1.PersistentVolumeClaimPhase { + pvc, err := tc.Clientset.CoreV1().PersistentVolumeClaims(testNamespace).Get(context.TODO(), "test-pvc-original", metav1.GetOptions{}) + if err != nil { + return corev1.ClaimPending + } + return pvc.Status.Phase + }, 3*time.Minute, 5*time.Second).Should(o.Equal(corev1.ClaimBound)) + + g.By("Wait for pod to be running") + o.Eventually(func() corev1.PodPhase { + pod, err := tc.Clientset.CoreV1().Pods(testNamespace).Get(context.TODO(), "test-pod-original", metav1.GetOptions{}) + if err != nil { + return corev1.PodPending + } + return pod.Status.Phase + }, 3*time.Minute, 5*time.Second).Should(o.Equal(corev1.PodRunning)) + + g.By("Write file to volume") + + g.By("Create a clone pvc with the lvms storageclass") + pvcOriObj, err := tc.Clientset.CoreV1().PersistentVolumeClaims(testNamespace).Get(context.TODO(), "test-pvc-original", metav1.GetOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + + dataSource := &corev1.TypedLocalObjectReference{ + Kind: "PersistentVolumeClaim", + Name: "test-pvc-original", + } + + pvcClone := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pvc-clone", + Namespace: testNamespace, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: pvcOriObj.Spec.Resources.Requests[corev1.ResourceStorage], + }, + }, + StorageClassName: &storageClass, + DataSource: dataSource, + }, + } + _, err = tc.Clientset.CoreV1().PersistentVolumeClaims(testNamespace).Create(context.TODO(), pvcClone, metav1.CreateOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("Create pod with the cloned pvc (required for WaitForFirstConsumer binding mode)") + podClone := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod-clone", + Namespace: testNamespace, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test-container", + Image: "registry.redhat.io/rhel8/support-tools:latest", + Command: []string{"/bin/sh", "-c", "sleep 3600"}, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "test-volume", + MountPath: "/mnt/test", + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "test-volume", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "test-pvc-clone", + }, + }, + }, + }, + }, + } + _, err = tc.Clientset.CoreV1().Pods(testNamespace).Create(context.TODO(), podClone, metav1.CreateOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("Wait for cloned PVC to be bound (happens after pod is scheduled)") + o.Eventually(func() corev1.PersistentVolumeClaimPhase { + pvc, err := tc.Clientset.CoreV1().PersistentVolumeClaims(testNamespace).Get(context.TODO(), "test-pvc-clone", metav1.GetOptions{}) + if err != nil { + return corev1.ClaimPending + } + return pvc.Status.Phase + }, 3*time.Minute, 5*time.Second).Should(o.Equal(corev1.ClaimBound)) + + g.By("Wait for cloned pod to be running") + o.Eventually(func() corev1.PodPhase { + pod, err := tc.Clientset.CoreV1().Pods(testNamespace).Get(context.TODO(), "test-pod-clone", metav1.GetOptions{}) + if err != nil { + return corev1.PodPending + } + return pod.Status.Phase + }, 3*time.Minute, 5*time.Second).Should(o.Equal(corev1.PodRunning)) + + g.By("Delete original pvc will not impact the cloned one") + err = tc.Clientset.CoreV1().Pods(testNamespace).Delete(context.TODO(), "test-pod-original", metav1.DeleteOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + + o.Eventually(func() bool { + _, err := tc.Clientset.CoreV1().Pods(testNamespace).Get(context.TODO(), "test-pod-original", metav1.GetOptions{}) + return err != nil + }, 2*time.Minute, 5*time.Second).Should(o.BeTrue()) + + err = tc.Clientset.CoreV1().PersistentVolumeClaims(testNamespace).Delete(context.TODO(), "test-pvc-original", metav1.DeleteOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("Check the cloned pod is still running") + pod, err := tc.Clientset.CoreV1().Pods(testNamespace).Get(context.TODO(), "test-pod-clone", metav1.GetOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(pod.Status.Phase).To(o.Equal(corev1.PodRunning)) + }) + + // original author: rdeore@redhat.com; Ported by Claude Code + // OCP-61433-[LVMS] [Block] [WaitForFirstConsumer] PVC resize on LVM cluster beyond thinpool size, but within over-provisioning limit + g.It("Author:rdeore-Critical-61433-[LVMS] [Block] [WaitForFirstConsumer] PVC resize on LVM cluster beyond thinpool size, but within over-provisioning limit", g.Label("SNO"), func() { + g.By("Get thin pool size and over provision limit") + thinPoolSize := getThinPoolSizeByVolumeGroup(tc, volumeGroup, "thin-pool-1") + + g.By("Create a PVC with Block volumeMode") + initialCapacity := "2Gi" + volumeMode := corev1.PersistentVolumeBlock + pvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pvc-block-resize", + Namespace: testNamespace, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse(initialCapacity), + }, + }, + StorageClassName: &storageClass, + VolumeMode: &volumeMode, + }, + } + _, err := tc.Clientset.CoreV1().PersistentVolumeClaims(testNamespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("Create deployment with block volume device (WaitForFirstConsumer requires pod to exist)") + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-dep-block", + Namespace: testNamespace, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: int32Ptr(1), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "test-dep-block", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "test-dep-block", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test-container", + Image: "registry.redhat.io/rhel8/support-tools:latest", + Command: []string{"/bin/sh", "-c", "sleep 3600"}, + VolumeDevices: []corev1.VolumeDevice{ + { + Name: "test-volume", + DevicePath: "/dev/dblock", + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "test-volume", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "test-pvc-block-resize", + }, + }, + }, + }, + }, + }, + }, + } + _, err = tc.Clientset.AppsV1().Deployments(testNamespace).Create(context.TODO(), deployment, metav1.CreateOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("Wait for PVC to be bound (happens after pod is scheduled)") + o.Eventually(func() corev1.PersistentVolumeClaimPhase { + pvc, err := tc.Clientset.CoreV1().PersistentVolumeClaims(testNamespace).Get(context.TODO(), "test-pvc-block-resize", metav1.GetOptions{}) + if err != nil { + return corev1.ClaimPending + } + return pvc.Status.Phase + }, 3*time.Minute, 5*time.Second).Should(o.Equal(corev1.ClaimBound)) + + g.By("Wait for deployment to be ready") + o.Eventually(func() bool { + dep, err := tc.Clientset.AppsV1().Deployments(testNamespace).Get(context.TODO(), "test-dep-block", metav1.GetOptions{}) + if err != nil { + return false + } + return dep.Status.ReadyReplicas == 1 + }, 3*time.Minute, 5*time.Second).Should(o.BeTrue()) + + g.By("Check PVC can re-size beyond thinpool size, but within overprovisioning rate") + targetCapacityInt64 := getRandomNum(int64(thinPoolSize+1), int64(thinPoolSize+10)) + targetCapacity := fmt.Sprintf("%dGi", targetCapacityInt64) + + g.By(fmt.Sprintf("Resize PVC from %s to %s", initialCapacity, targetCapacity)) + pvcObj, err := tc.Clientset.CoreV1().PersistentVolumeClaims(testNamespace).Get(context.TODO(), "test-pvc-block-resize", metav1.GetOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + + pvcObj.Spec.Resources.Requests[corev1.ResourceStorage] = resource.MustParse(targetCapacity) + _, err = tc.Clientset.CoreV1().PersistentVolumeClaims(testNamespace).Update(context.TODO(), pvcObj, metav1.UpdateOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("Wait for PVC resize to complete") + o.Eventually(func() string { + pvc, err := tc.Clientset.CoreV1().PersistentVolumeClaims(testNamespace).Get(context.TODO(), "test-pvc-block-resize", metav1.GetOptions{}) + if err != nil { + return "" + } + if capacity, ok := pvc.Status.Capacity[corev1.ResourceStorage]; ok { + return capacity.String() + } + return "" + }, 3*time.Minute, 5*time.Second).Should(o.Equal(targetCapacity)) + + g.By("Verify deployment is still healthy after resize") + dep, err := tc.Clientset.AppsV1().Deployments(testNamespace).Get(context.TODO(), "test-dep-block", metav1.GetOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(dep.Status.ReadyReplicas).To(o.Equal(int32(1))) + }) + + // original author: rdeore@redhat.com; Ported by Claude Code + // OCP-66320-[LVMS] Pre-defined CSI Storageclass should get re-created automatically after deleting + g.It("Author:rdeore-LEVEL0-High-66320-[LVMS] Pre-defined CSI Storageclass should get re-created automatically after deleting [Disruptive]", g.Label("SNO"), func() { + g.By("Check lvms storageclass exists on cluster") + _, err := tc.Clientset.StorageV1().StorageClasses().Get(context.TODO(), storageClass, metav1.GetOptions{}) + if err != nil { + g.Skip(fmt.Sprintf("Skipped: the cluster does not have storage-class: %s", storageClass)) + } + + g.By("Save the original storage class for restoration") + originalSC, err := tc.Clientset.StorageV1().StorageClasses().Get(context.TODO(), storageClass, metav1.GetOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("Delete existing lvms storageClass") + err = tc.Clientset.StorageV1().StorageClasses().Delete(context.TODO(), storageClass, metav1.DeleteOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + + defer func() { + // Restore storage class if it doesn't exist + _, err := tc.Clientset.StorageV1().StorageClasses().Get(context.TODO(), storageClass, metav1.GetOptions{}) + if err != nil { + scCopy := &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: originalSC.Name, + Labels: originalSC.Labels, + }, + Provisioner: originalSC.Provisioner, + Parameters: originalSC.Parameters, + ReclaimPolicy: originalSC.ReclaimPolicy, + AllowVolumeExpansion: originalSC.AllowVolumeExpansion, + VolumeBindingMode: originalSC.VolumeBindingMode, + } + _, err = tc.Clientset.StorageV1().StorageClasses().Create(context.TODO(), scCopy, metav1.CreateOptions{}) + if err != nil { + e2e.Logf("Warning: failed to restore storage class: %v\n", err) + } + } + }() + + g.By("Check deleted lvms storageClass is re-created automatically") + o.Eventually(func() error { + _, err := tc.Clientset.StorageV1().StorageClasses().Get(context.TODO(), storageClass, metav1.GetOptions{}) + return err + }, 30*time.Second, 5*time.Second).Should(o.Succeed()) + }) + + // original author: mmakwana@redhat.com; Ported by Claude Code + // OCP-71012-[LVMS] Verify the wiping of local volumes in LVMS + g.It("Author:mmakwana-High-71012-[LVMS] Verify the wiping of local volumes in LVMS [Disruptive] [Serial]", g.Label("SNO"), func() { + g.By("#. Get list of available block devices/disks attached to all worker nodes") + freeDiskNameCountMap, err := getListOfFreeDisksFromWorkerNodes(tc) + o.Expect(err).NotTo(o.HaveOccurred()) + + if len(freeDiskNameCountMap) < 1 { + g.Skip("Skipped: Cluster's Worker nodes does not have minimum required free block devices/disks attached") + } + + workerNodes, err := getWorkersList() + o.Expect(err).NotTo(o.HaveOccurred()) + workerNodeCount := len(workerNodes) + + var diskName string + isDiskFound := false + for disk, count := range freeDiskNameCountMap { + if count == int64(workerNodeCount) { + diskName = disk + isDiskFound = true + delete(freeDiskNameCountMap, diskName) + break + } + } + if !isDiskFound { + g.Skip("Skipped: All Worker nodes does not have a free block device/disk with same name attached") + } + + g.By("#. Copy and save existing LVMCluster configuration in JSON format") + originLVMClusterName, err := getLVMClusterName(lvmsNamespace) + o.Expect(err).NotTo(o.HaveOccurred()) + e2e.Logf("Found LVMCluster: %s\n", originLVMClusterName) + + originLVMJSON, err := getLVMClusterJSON(originLVMClusterName, lvmsNamespace) + o.Expect(err).NotTo(o.HaveOccurred()) + e2e.Logf("Original LVMCluster saved\n") + + g.By("#. Delete existing LVMCluster resource") + err = deleteLVMClusterSafely(originLVMClusterName, lvmsNamespace, "vg1") + o.Expect(err).NotTo(o.HaveOccurred()) + + defer func() { + g.By("Restoring original LVMCluster") + exists, err := resourceExists("lvmcluster", originLVMClusterName, lvmsNamespace) + o.Expect(err).NotTo(o.HaveOccurred()) + if !exists { + err := createLVMClusterFromJSON(originLVMJSON) + o.Expect(err).NotTo(o.HaveOccurred()) + } + err = waitForLVMClusterReady(originLVMClusterName, lvmsNamespace, 4*time.Minute) + o.Expect(err).NotTo(o.HaveOccurred()) + }() + + g.By("#. Create logical volume on backend disk/device") + workerName := workerNodes[0] + vgName := "vg-71012" + lvName := "lv-71012" + err = createLogicalVolumeOnDisk(tc, workerName, diskName, vgName, lvName) + o.Expect(err).NotTo(o.HaveOccurred()) + + defer func() { + g.By("Cleaning up logical volume from disk") + removeLogicalVolumeOnDisk(tc, workerName, diskName, vgName, lvName) + }() + + g.By("#. Create a LVMCluster resource with the disk explicitly with forceWipeDevicesAndDestroyAllData") + newLVMClusterName := "test-lvmcluster-71012" + deviceClassName := "vg1" + diskPath := "/dev/" + diskName + err = createLVMClusterWithForceWipe(newLVMClusterName, lvmsNamespace, deviceClassName, diskPath) + o.Expect(err).NotTo(o.HaveOccurred()) + + defer func() { + g.By("Cleaning up test LVMCluster") + deleteLVMClusterSafely(newLVMClusterName, lvmsNamespace, deviceClassName) + }() + + g.By("#. Wait for new LVMCluster to be Ready") + err = waitForLVMClusterReady(newLVMClusterName, lvmsNamespace, 4*time.Minute) + o.Expect(err).NotTo(o.HaveOccurred()) + + storageClassName := "lvms-" + deviceClassName + pvcTest := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pvc-71012", + Namespace: testNamespace, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Gi"), + }, + }, + StorageClassName: &storageClassName, + }, + } + + g.By("#. Create a pvc") + _, err = tc.Clientset.CoreV1().PersistentVolumeClaims(testNamespace).Create(context.TODO(), pvcTest, metav1.CreateOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("#. Create a deployment") + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-dep-71012", + Namespace: testNamespace, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: int32Ptr(1), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "test-dep-71012", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "test-dep-71012", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test-container", + Image: "registry.redhat.io/rhel8/support-tools:latest", + Command: []string{"/bin/sh", "-c", "sleep 3600"}, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "test-volume", + MountPath: "/mnt/test", + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "test-volume", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "test-pvc-71012", + }, + }, + }, + }, + }, + }, + }, + } + _, err = tc.Clientset.AppsV1().Deployments(testNamespace).Create(context.TODO(), deployment, metav1.CreateOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("#. Wait for the deployment to be in ready state") + o.Eventually(func() bool { + dep, err := tc.Clientset.AppsV1().Deployments(testNamespace).Get(context.TODO(), "test-dep-71012", metav1.GetOptions{}) + if err != nil { + return false + } + return dep.Status.ReadyReplicas == 1 + }, 3*time.Minute, 5*time.Second).Should(o.BeTrue()) + + g.By("#. Write data in deployment pod") + pods, err := tc.Clientset.CoreV1().Pods(testNamespace).List(context.TODO(), metav1.ListOptions{ + LabelSelector: "app=test-dep-71012", + }) + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(len(pods.Items)).To(o.BeNumerically(">", 0)) + podName := pods.Items[0].Name + + writeCmd := "echo 'test data for OCP-71012' > /mnt/test/testfile.txt && cat /mnt/test/testfile.txt" + cmdOutput := execCommandInPod(tc, testNamespace, podName, "test-container", writeCmd) + o.Expect(cmdOutput).To(o.ContainSubstring("test data for OCP-71012")) + + g.By("#. Delete Deployment and PVC resources") + err = tc.Clientset.AppsV1().Deployments(testNamespace).Delete(context.TODO(), "test-dep-71012", metav1.DeleteOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + + err = tc.Clientset.CoreV1().PersistentVolumeClaims(testNamespace).Delete(context.TODO(), "test-pvc-71012", metav1.DeleteOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("#. Delete newly created LVMCluster resource") + err = deleteLVMClusterSafely(newLVMClusterName, lvmsNamespace, deviceClassName) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("#. Create original LVMCluster resource") + err = createLVMClusterFromJSON(originLVMJSON) + o.Expect(err).NotTo(o.HaveOccurred()) + + err = waitForLVMClusterReady(originLVMClusterName, lvmsNamespace, 4*time.Minute) + o.Expect(err).NotTo(o.HaveOccurred()) + }) + + // original author: mmakwana@redhat.com; Ported by Claude Code + // OCP-66241-[LVMS] Check workload management annotations are present in LVMS resources + g.It("Author:mmakwana-High-66241-[LVMS] Check workload management annotations are present in LVMS resources [Disruptive]", g.Label("SNO"), func() { + g.By("#. Get list of available block devices/disks attached to all worker nodes") + freeDiskNameCountMap, err := getListOfFreeDisksFromWorkerNodes(tc) + o.Expect(err).NotTo(o.HaveOccurred()) + + if len(freeDiskNameCountMap) < 1 { + g.Skip("Skipped: Cluster's Worker nodes does not have minimum required free block devices/disks attached") + } + + workerNodes, err := getWorkersList() + o.Expect(err).NotTo(o.HaveOccurred()) + workerNodeCount := len(workerNodes) + + var diskName string + isDiskFound := false + for disk, count := range freeDiskNameCountMap { + if count == int64(workerNodeCount) { + diskName = disk + isDiskFound = true + delete(freeDiskNameCountMap, diskName) + break + } + } + if !isDiskFound { + g.Skip("Skipped: All Worker nodes does not have a free block device/disk with same name attached") + } + + g.By("#. Copy and save existing LVMCluster configuration in JSON format") + originLVMClusterName, err := getLVMClusterName(lvmsNamespace) + o.Expect(err).NotTo(o.HaveOccurred()) + e2e.Logf("Found LVMCluster: %s\n", originLVMClusterName) + + originLVMJSON, err := getLVMClusterJSON(originLVMClusterName, lvmsNamespace) + o.Expect(err).NotTo(o.HaveOccurred()) + e2e.Logf("Original LVMCluster saved\n") + + g.By("#. Delete existing LVMCluster resource") + err = deleteLVMClusterSafely(originLVMClusterName, lvmsNamespace, "vg1") + o.Expect(err).NotTo(o.HaveOccurred()) + + defer func() { + g.By("Restoring original LVMCluster") + exists, err := resourceExists("lvmcluster", originLVMClusterName, lvmsNamespace) + o.Expect(err).NotTo(o.HaveOccurred()) + if !exists { + err := createLVMClusterFromJSON(originLVMJSON) + o.Expect(err).NotTo(o.HaveOccurred()) + } + err = waitForLVMClusterReady(originLVMClusterName, lvmsNamespace, 4*time.Minute) + o.Expect(err).NotTo(o.HaveOccurred()) + }() + + g.By("#. Create a new LVMCluster resource") + newLVMClusterName := "test-lvmcluster-66241" + deviceClassName := "vg1" + diskPath := "/dev/" + diskName + err = createLVMClusterWithPaths(newLVMClusterName, lvmsNamespace, deviceClassName, diskPath) + o.Expect(err).NotTo(o.HaveOccurred()) + + defer func() { + g.By("Cleaning up test LVMCluster") + deleteLVMClusterSafely(newLVMClusterName, lvmsNamespace, deviceClassName) + }() + + g.By("#. Wait for new LVMCluster to be Ready") + err = waitForLVMClusterReady(newLVMClusterName, lvmsNamespace, 4*time.Minute) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("#. Check workload management annotations are present in LVMS resources") + expectedSubstring := `{"effect": "PreferredDuringScheduling"}` + + // Check lvms-operator deployment annotation + deployment, err := tc.Clientset.AppsV1().Deployments(lvmsNamespace).Get(context.TODO(), "lvms-operator", metav1.GetOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + annotation1 := deployment.Spec.Template.Annotations["target.workload.openshift.io/management"] + e2e.Logf("LVM Operator Annotations: %s\n", annotation1) + o.Expect(annotation1).To(o.ContainSubstring(expectedSubstring)) + + // Check vg-manager daemonset annotation + daemonset, err := tc.Clientset.AppsV1().DaemonSets(lvmsNamespace).Get(context.TODO(), "vg-manager", metav1.GetOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + annotation2 := daemonset.Spec.Template.Annotations["target.workload.openshift.io/management"] + e2e.Logf("VG Manager Annotations: %s\n", annotation2) + o.Expect(annotation2).To(o.ContainSubstring(expectedSubstring)) + + g.By("#. Delete newly created LVMCluster resource") + err = deleteLVMClusterSafely(newLVMClusterName, lvmsNamespace, deviceClassName) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("#. Create original LVMCluster resource") + err = createLVMClusterFromJSON(originLVMJSON) + o.Expect(err).NotTo(o.HaveOccurred()) + + err = waitForLVMClusterReady(originLVMClusterName, lvmsNamespace, 4*time.Minute) + o.Expect(err).NotTo(o.HaveOccurred()) + }) + + // original author: mmakwana@redhat.com; Ported by Claude Code + // OCP-71378-[LVMS] Recover LVMS cluster from on-disk metadata + g.It("Author:mmakwana-High-71378-[LVMS] Recover LVMS cluster from on-disk metadata [Disruptive]", g.Label("SNO"), func() { + volumeGroup := "vg1" + storageClassName := "lvms-" + volumeGroup + + g.By("#. Get list of available block devices/disks attached to all worker nodes") + freeDiskNameCountMap, err := getListOfFreeDisksFromWorkerNodes(tc) + o.Expect(err).NotTo(o.HaveOccurred()) + + if len(freeDiskNameCountMap) < 1 { + g.Skip("Skipped: Cluster's Worker nodes does not have minimum required free block devices/disks attached") + } + + workerNodes, err := getWorkersList() + o.Expect(err).NotTo(o.HaveOccurred()) + workerNodeCount := len(workerNodes) + + var diskName string + isDiskFound := false + for disk, count := range freeDiskNameCountMap { + if count == int64(workerNodeCount) { + diskName = disk + isDiskFound = true + delete(freeDiskNameCountMap, diskName) + break + } + } + if !isDiskFound { + g.Skip("Skipped: All Worker nodes does not have a free block device/disk with same name attached") + } + + g.By("#. Copy and save existing LVMCluster configuration in JSON format") + originLVMClusterName, err := getLVMClusterName(lvmsNamespace) + o.Expect(err).NotTo(o.HaveOccurred()) + e2e.Logf("Found LVMCluster: %s\n", originLVMClusterName) + + originLVMJSON, err := getLVMClusterJSON(originLVMClusterName, lvmsNamespace) + o.Expect(err).NotTo(o.HaveOccurred()) + e2e.Logf("Original LVMCluster saved\n") + + g.By("#. Delete existing LVMCluster resource") + err = deleteLVMClusterSafely(originLVMClusterName, lvmsNamespace, "vg1") + o.Expect(err).NotTo(o.HaveOccurred()) + + defer func() { + g.By("Restoring original LVMCluster") + exists, err := resourceExists("lvmcluster", originLVMClusterName, lvmsNamespace) + o.Expect(err).NotTo(o.HaveOccurred()) + if !exists { + err := createLVMClusterFromJSON(originLVMJSON) + o.Expect(err).NotTo(o.HaveOccurred()) + } + err = waitForLVMClusterReady(originLVMClusterName, lvmsNamespace, 4*time.Minute) + o.Expect(err).NotTo(o.HaveOccurred()) + }() + + g.By("#. Create LVMCluster resource with single devicePath") + newLVMClusterName := "test-lvmcluster-71378" + deviceClassName := "vg1" + diskPath := "/dev/" + diskName + err = createLVMClusterWithPaths(newLVMClusterName, lvmsNamespace, deviceClassName, diskPath) + o.Expect(err).NotTo(o.HaveOccurred()) + + defer func() { + g.By("Cleaning up test LVMCluster") + deleteLVMClusterSafely(newLVMClusterName, lvmsNamespace, deviceClassName) + }() + + g.By("#. Wait for LVMCluster to be Ready") + err = waitForLVMClusterReady(newLVMClusterName, lvmsNamespace, 4*time.Minute) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("#. Create a PVC (pvc1)") + pvc1 := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pvc-71378-1", + Namespace: testNamespace, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Gi"), + }, + }, + StorageClassName: &storageClassName, + }, + } + _, err = tc.Clientset.CoreV1().PersistentVolumeClaims(testNamespace).Create(context.TODO(), pvc1, metav1.CreateOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("#. Create a deployment (dep1)") + deployment1 := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-dep-71378-1", + Namespace: testNamespace, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: int32Ptr(1), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "test-dep-71378-1", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "test-dep-71378-1", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test-container", + Image: "registry.redhat.io/rhel8/support-tools:latest", + Command: []string{"/bin/sh", "-c", "sleep 3600"}, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "test-volume", + MountPath: "/mnt/test", + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "test-volume", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "test-pvc-71378-1", + }, + }, + }, + }, + }, + }, + }, + } + _, err = tc.Clientset.AppsV1().Deployments(testNamespace).Create(context.TODO(), deployment1, metav1.CreateOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("#. Wait for the deployment to be in ready state") + o.Eventually(func() bool { + dep, err := tc.Clientset.AppsV1().Deployments(testNamespace).Get(context.TODO(), "test-dep-71378-1", metav1.GetOptions{}) + if err != nil { + return false + } + return dep.Status.ReadyReplicas == 1 + }, 3*time.Minute, 5*time.Second).Should(o.BeTrue()) + + g.By("#. Fetch disk path from current LVMCluster") + diskPaths, err := getLvmClusterPath(lvmsNamespace) + o.Expect(err).NotTo(o.HaveOccurred()) + selectedDisk := strings.Fields(diskPaths)[0] + e2e.Logf("Selected Disk Path: %s\n", selectedDisk) + + g.By("#. Remove finalizers from LVMCluster and LVMVolumeGroup and delete LVMCluster") + err = deleteLVMClusterForRecovery(newLVMClusterName, lvmsNamespace, deviceClassName) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("#. Create a new LVMCluster resource with same disk path (testing recovery)") + newLVMClusterName2 := "test-lvmcluster-71378-recovered" + err = createLVMClusterWithPaths(newLVMClusterName2, lvmsNamespace, deviceClassName, selectedDisk) + o.Expect(err).NotTo(o.HaveOccurred()) + + defer func() { + g.By("Cleaning up recovered test LVMCluster") + deleteLVMClusterSafely(newLVMClusterName2, lvmsNamespace, deviceClassName) + }() + + g.By("#. Wait for recovered LVMCluster to be Ready") + err = waitForLVMClusterReady(newLVMClusterName2, lvmsNamespace, 4*time.Minute) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("#. Create a PVC (pvc2)") + pvc2 := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pvc-71378-2", + Namespace: testNamespace, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Gi"), + }, + }, + StorageClassName: &storageClassName, + }, + } + _, err = tc.Clientset.CoreV1().PersistentVolumeClaims(testNamespace).Create(context.TODO(), pvc2, metav1.CreateOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("#. Create a deployment (dep2)") + deployment2 := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-dep-71378-2", + Namespace: testNamespace, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: int32Ptr(1), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "test-dep-71378-2", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "test-dep-71378-2", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test-container", + Image: "registry.redhat.io/rhel8/support-tools:latest", + Command: []string{"/bin/sh", "-c", "sleep 3600"}, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "test-volume", + MountPath: "/mnt/test", + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "test-volume", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "test-pvc-71378-2", + }, + }, + }, + }, + }, + }, + }, + } + _, err = tc.Clientset.AppsV1().Deployments(testNamespace).Create(context.TODO(), deployment2, metav1.CreateOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("#. Wait for the deployment2 to be in ready state") + o.Eventually(func() bool { + dep, err := tc.Clientset.AppsV1().Deployments(testNamespace).Get(context.TODO(), "test-dep-71378-2", metav1.GetOptions{}) + if err != nil { + return false + } + return dep.Status.ReadyReplicas == 1 + }, 3*time.Minute, 5*time.Second).Should(o.BeTrue()) + + g.By("#. Write data in deployment2 pod") + pods, err := tc.Clientset.CoreV1().Pods(testNamespace).List(context.TODO(), metav1.ListOptions{ + LabelSelector: "app=test-dep-71378-2", + }) + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(len(pods.Items)).To(o.BeNumerically(">", 0)) + pod2Name := pods.Items[0].Name + + writeCmd := "echo 'test data for OCP-71378' > /mnt/test/testfile.txt && cat /mnt/test/testfile.txt" + cmdOutput := execCommandInPod(tc, testNamespace, pod2Name, "test-container", writeCmd) + o.Expect(cmdOutput).To(o.ContainSubstring("test data for OCP-71378")) + + g.By("#. Check dep1 is still running (verifying recovery)") + dep1, err := tc.Clientset.AppsV1().Deployments(testNamespace).Get(context.TODO(), "test-dep-71378-1", metav1.GetOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(dep1.Status.ReadyReplicas).To(o.Equal(int32(1))) + e2e.Logf("The deployment %s in namespace %s is in healthy state after recovery\n", dep1.Name, dep1.Namespace) + + g.By("#. Delete Deployment and PVC resources") + err = tc.Clientset.AppsV1().Deployments(testNamespace).Delete(context.TODO(), "test-dep-71378-2", metav1.DeleteOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + + err = tc.Clientset.CoreV1().PersistentVolumeClaims(testNamespace).Delete(context.TODO(), "test-pvc-71378-2", metav1.DeleteOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + + err = tc.Clientset.AppsV1().Deployments(testNamespace).Delete(context.TODO(), "test-dep-71378-1", metav1.DeleteOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + + err = tc.Clientset.CoreV1().PersistentVolumeClaims(testNamespace).Delete(context.TODO(), "test-pvc-71378-1", metav1.DeleteOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("#. Delete newly created LVMCluster resource") + err = deleteLVMClusterSafely(newLVMClusterName2, lvmsNamespace, deviceClassName) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("#. Create original LVMCluster resource") + err = createLVMClusterFromJSON(originLVMJSON) + o.Expect(err).NotTo(o.HaveOccurred()) + + err = waitForLVMClusterReady(originLVMClusterName, lvmsNamespace, 4*time.Minute) + o.Expect(err).NotTo(o.HaveOccurred()) + }) + + // original author: mmakwana@redhat.com + // OCP-77069-[LVMS] Make thin pool metadata size configurable in LVMS + g.It("Author:mmakwana-High-77069-[LVMS] Make thin pool metadata size configurable in LVMS [Disruptive]", g.Label("SNO"), func() { + g.By("#. Get list of available block devices/disks attached to all worker nodes") + freeDiskNameCountMap, err := getListOfFreeDisksFromWorkerNodes(tc) + o.Expect(err).NotTo(o.HaveOccurred()) + if len(freeDiskNameCountMap) < 1 { + g.Skip("Skipped: Cluster's Worker nodes does not have minimum required free block devices/disks attached") + } + + workerNodes, err := getWorkersList() + o.Expect(err).NotTo(o.HaveOccurred()) + workerNodeCount := len(workerNodes) + + var diskName string + isDiskFound := false + for disk, count := range freeDiskNameCountMap { + if count == int64(workerNodeCount) { + diskName = disk + isDiskFound = true + delete(freeDiskNameCountMap, diskName) + break + } + } + if !isDiskFound { + g.Skip("Skipped: All Worker nodes does not have a free block device/disk with same name attached") + } + + g.By("#. Copy and save existing LVMCluster configuration in JSON format") + originLVMClusterName, err := getLVMClusterName(lvmsNamespace) + o.Expect(err).NotTo(o.HaveOccurred()) + originLVMJSON, err := getLVMClusterJSON(originLVMClusterName, lvmsNamespace) + e2e.Logf("Original LVMCluster: %s\n", originLVMJSON) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("#. Delete existing LVMCluster resource") + deviceClassNameOrig := "vg1" + err = deleteLVMClusterSafely(originLVMClusterName, lvmsNamespace, deviceClassNameOrig) + o.Expect(err).NotTo(o.HaveOccurred()) + + defer func() { + g.By("Restoring original LVMCluster in defer block") + exists, _ := resourceExists("lvmcluster", originLVMClusterName, lvmsNamespace) + if !exists { + err := createLVMClusterFromJSON(originLVMJSON) + o.Expect(err).NotTo(o.HaveOccurred()) + } + err := waitForLVMClusterReady(originLVMClusterName, lvmsNamespace, 4*time.Minute) + o.Expect(err).NotTo(o.HaveOccurred()) + }() + + g.By("#. Create LVMCluster resource and then patch MetadataSizeCalculationPolicy set to 'Static'") + newLVMClusterName := "test-lvmcluster-77069" + deviceClassName := "vg1" + diskPath := "/dev/" + diskName + metadataSize := "100Mi" + + err = createLVMClusterWithPaths(newLVMClusterName, lvmsNamespace, deviceClassName, diskPath) + o.Expect(err).NotTo(o.HaveOccurred()) + + defer func() { + g.By("Cleaning up test LVMCluster in defer block") + deleteLVMClusterSafely(newLVMClusterName, lvmsNamespace, deviceClassName) + }() + + err = patchMetadataSizeCalculationPolicyToStatic(newLVMClusterName, lvmsNamespace, metadataSize) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("#. Wait for LVMCluster to be Ready") + err = waitForLVMClusterReady(newLVMClusterName, lvmsNamespace, 4*time.Minute) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("#. Create a PVC") + storageClassName := "lvms-" + deviceClassName + pvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pvc-77069", + Namespace: testNamespace, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Gi"), + }, + }, + StorageClassName: &storageClassName, + }, + } + _, err = tc.Clientset.CoreV1().PersistentVolumeClaims(testNamespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + + defer func() { + g.By("Cleaning up PVC in defer block") + tc.Clientset.CoreV1().PersistentVolumeClaims(testNamespace).Delete(context.TODO(), "test-pvc-77069", metav1.DeleteOptions{}) + }() + + g.By("#. Create a deployment") + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-dep-77069", + Namespace: testNamespace, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: int32Ptr(1), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "test-dep-77069", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "test-dep-77069", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test-container", + Image: "registry.access.redhat.com/ubi8/ubi-minimal:latest", + Command: []string{ + "/bin/sh", + "-c", + "sleep infinity", + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "test-volume", + MountPath: "/mnt/test", + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "test-volume", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "test-pvc-77069", + }, + }, + }, + }, + }, + }, + }, + } + _, err = tc.Clientset.AppsV1().Deployments(testNamespace).Create(context.TODO(), deployment, metav1.CreateOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + + defer func() { + g.By("Cleaning up deployment in defer block") + tc.Clientset.AppsV1().Deployments(testNamespace).Delete(context.TODO(), "test-dep-77069", metav1.DeleteOptions{}) + }() + + g.By("#. Wait for the deployment to be in ready state") + o.Eventually(func() bool { + dep, err := tc.Clientset.AppsV1().Deployments(testNamespace).Get(context.TODO(), "test-dep-77069", metav1.GetOptions{}) + if err != nil { + return false + } + return dep.Status.ReadyReplicas == 1 + }, 3*time.Minute, 5*time.Second).Should(o.BeTrue()) + + g.By("#. Write data in deployment pod") + // Get pod name + pods, err := tc.Clientset.CoreV1().Pods(testNamespace).List(context.TODO(), metav1.ListOptions{ + LabelSelector: "app=test-dep-77069", + }) + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(len(pods.Items)).To(o.BeNumerically(">", 0)) + podName := pods.Items[0].Name + + // Write test data + writeCmd := "echo 'test data' > /mnt/test/testfile.txt" + output := execCommandInPod(tc, testNamespace, podName, "test-container", writeCmd) + o.Expect(output).NotTo(o.ContainSubstring("error")) + + // Verify data was written + readCmd := "cat /mnt/test/testfile.txt" + output = execCommandInPod(tc, testNamespace, podName, "test-container", readCmd) + o.Expect(output).To(o.ContainSubstring("test data")) + + g.By("#. Debug into the node and check the size of the metadata for logical volumes") + nodeName, err := getLogicalVolumeSelectedNode(testNamespace, "test-pvc-77069") + o.Expect(err).NotTo(o.HaveOccurred()) + + lvsCmd := "lvs --noheadings -o lv_name,lv_metadata_size" + lvsOutput := execCommandInNode(tc, nodeName, lvsCmd) + e2e.Logf("Logical volume metadata size: %s\n", lvsOutput) + + expectedLvsOutput := "100.00m" + o.Expect(lvsOutput).To(o.ContainSubstring(expectedLvsOutput)) + + g.By("#. Delete Deployment and PVC resources") + err = tc.Clientset.AppsV1().Deployments(testNamespace).Delete(context.TODO(), "test-dep-77069", metav1.DeleteOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + err = tc.Clientset.CoreV1().PersistentVolumeClaims(testNamespace).Delete(context.TODO(), "test-pvc-77069", metav1.DeleteOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("#. Delete newly created LVMCluster resource") + err = deleteLVMClusterSafely(newLVMClusterName, lvmsNamespace, deviceClassName) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("#. Create original LVMCluster resource") + err = createLVMClusterFromJSON(originLVMJSON) + o.Expect(err).NotTo(o.HaveOccurred()) + + err = waitForLVMClusterReady(originLVMClusterName, lvmsNamespace, 4*time.Minute) + o.Expect(err).NotTo(o.HaveOccurred()) + }) + +// original author: rdeore@redhat.com +g.It("Author:rdeore-Critical-61998-[LVMS] [Block] [Snapshot] should restore volume larger than disk size with snapshot dataSource successfully and the volume could be read and written [Serial]", g.Label("SNO"), func() { + volumeGroup := "vg1" + thinPoolName := "thin-pool-1" + storageClassName := "lvms-" + volumeGroup + volumeSnapshotClassName := "lvms-" + volumeGroup + + g.By("#. Get thin pool size") + thinPoolSize := getThinPoolSizeByVolumeGroup(tc, volumeGroup, thinPoolName) + + g.By("#. Create a PVC with Block volumeMode and capacity bigger than disk size") + pvcCapacity := fmt.Sprintf("%dGi", int64(thinPoolSize)+getRandomNum(2, 10)) + volumeMode := corev1.PersistentVolumeBlock + pvcOri := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pvc-ori-61998", + Namespace: testNamespace, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse(pvcCapacity), + }, + }, + StorageClassName: &storageClassName, + VolumeMode: &volumeMode, + }, + } + _, err := tc.Clientset.CoreV1().PersistentVolumeClaims(testNamespace).Create(context.TODO(), pvcOri, metav1.CreateOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("#. Create pod with the created pvc (using volumeDevices for block mode)") + podOri := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod-ori-61998", + Namespace: testNamespace, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test-container", + Image: "registry.redhat.io/rhel8/support-tools:latest", + Command: []string{"/bin/sh", "-c", "sleep 3600"}, + VolumeDevices: []corev1.VolumeDevice{ + { + Name: "test-volume", + DevicePath: "/dev/dblock", + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "test-volume", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "test-pvc-ori-61998", + }, + }, + }, + }, + }, + } + _, err = tc.Clientset.CoreV1().Pods(testNamespace).Create(context.TODO(), podOri, metav1.CreateOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("#. Wait for PVC to be bound") + o.Eventually(func() corev1.PersistentVolumeClaimPhase { + pvc, err := tc.Clientset.CoreV1().PersistentVolumeClaims(testNamespace).Get(context.TODO(), "test-pvc-ori-61998", metav1.GetOptions{}) + if err != nil { + return corev1.ClaimPending + } + return pvc.Status.Phase + }, 3*time.Minute, 5*time.Second).Should(o.Equal(corev1.ClaimBound)) + + g.By("#. Wait for pod to be running") + o.Eventually(func() corev1.PodPhase { + pod, err := tc.Clientset.CoreV1().Pods(testNamespace).Get(context.TODO(), "test-pod-ori-61998", metav1.GetOptions{}) + if err != nil { + return corev1.PodPending + } + return pod.Status.Phase + }, 3*time.Minute, 5*time.Second).Should(o.Equal(corev1.PodRunning)) + + g.By("#. Check volume size is bigger than disk size") + checkVolumeBiggerThanDisk(tc, "test-pvc-ori-61998", testNamespace, thinPoolSize) + + g.By("#. Sync data to disk") + syncCmd := "sync" + execCommandInPod(tc, testNamespace, "test-pod-ori-61998", "test-container", syncCmd) + + g.By("#. Create volumesnapshot using oc") + snapshotName := "test-snapshot-61998" + snapshotYAML := fmt.Sprintf(`apiVersion: snapshot.storage.k8s.io/v1 +kind: VolumeSnapshot +metadata: + name: %s + namespace: %s +spec: + volumeSnapshotClassName: %s + source: + persistentVolumeClaimName: test-pvc-ori-61998 +`, snapshotName, testNamespace, volumeSnapshotClassName) + + cmd := exec.Command("oc", "apply", "-f", "-") + cmd.Stdin = strings.NewReader(snapshotYAML) + output, err := cmd.CombinedOutput() + o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Failed to create snapshot: %s", string(output))) + + g.By("#. Wait for volumesnapshot to be ready") + o.Eventually(func() string { + cmd := exec.Command("oc", "get", "volumesnapshot", snapshotName, "-n", testNamespace, "-o=jsonpath={.status.readyToUse}") + output, _ := cmd.CombinedOutput() + return strings.TrimSpace(string(output)) + }, 3*time.Minute, 5*time.Second).Should(o.Equal("true")) + + g.By("#. Create a restored pvc with snapshot dataSource") + pvcRestore := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pvc-restore-61998", + Namespace: testNamespace, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse(pvcCapacity), + }, + }, + StorageClassName: &storageClassName, + VolumeMode: &volumeMode, + DataSource: &corev1.TypedLocalObjectReference{ + APIGroup: &[]string{"snapshot.storage.k8s.io"}[0], + Kind: "VolumeSnapshot", + Name: snapshotName, + }, + }, + } + _, err = tc.Clientset.CoreV1().PersistentVolumeClaims(testNamespace).Create(context.TODO(), pvcRestore, metav1.CreateOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("#. Create pod with the restored pvc") + podRestore := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod-restore-61998", + Namespace: testNamespace, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test-container", + Image: "registry.redhat.io/rhel8/support-tools:latest", + Command: []string{"/bin/sh", "-c", "sleep 3600"}, + VolumeDevices: []corev1.VolumeDevice{ + { + Name: "test-volume", + DevicePath: "/dev/dblock", + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "test-volume", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "test-pvc-restore-61998", + }, + }, + }, + }, + }, + } + _, err = tc.Clientset.CoreV1().Pods(testNamespace).Create(context.TODO(), podRestore, metav1.CreateOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("#. Wait for restored PVC to be bound") + o.Eventually(func() corev1.PersistentVolumeClaimPhase { + pvc, err := tc.Clientset.CoreV1().PersistentVolumeClaims(testNamespace).Get(context.TODO(), "test-pvc-restore-61998", metav1.GetOptions{}) + if err != nil { + return corev1.ClaimPending + } + return pvc.Status.Phase + }, 3*time.Minute, 5*time.Second).Should(o.Equal(corev1.ClaimBound)) + + g.By("#. Wait for restored pod to be running") + o.Eventually(func() corev1.PodPhase { + pod, err := tc.Clientset.CoreV1().Pods(testNamespace).Get(context.TODO(), "test-pod-restore-61998", metav1.GetOptions{}) + if err != nil { + return corev1.PodPending + } + return pod.Status.Phase + }, 3*time.Minute, 5*time.Second).Should(o.Equal(corev1.PodRunning)) + + g.By("#. Check restored volume size is bigger than disk size") + checkVolumeBiggerThanDisk(tc, "test-pvc-restore-61998", testNamespace, thinPoolSize) +}) +}) + +// checkLvmsOperatorInstalled verifies that LVMS operator is installed on the cluster +func checkLvmsOperatorInstalled(tc *TestClient) { + g.By("Checking if LVMS operator is installed") + + // Check if CSI driver exists + csiDrivers, err := tc.Clientset.StorageV1().CSIDrivers().List(context.TODO(), metav1.ListOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + + csiDriverFound := false + for _, driver := range csiDrivers.Items { + if driver.Name == "topolvm.io" { + csiDriverFound = true + break + } + } + + if !csiDriverFound { + g.Skip("LVMS Operator is not installed on the running OCP cluster") + } + + // Verify LVMCluster exists and is Ready + _, err = tc.Clientset.CoreV1().Namespaces().Get(context.TODO(), lvmsNamespace, metav1.GetOptions{}) + if err != nil { + g.Skip(fmt.Sprintf("LVMS namespace %s not found", lvmsNamespace)) + } + + e2e.Logf("LVMS operator is installed and ready\n") +} diff --git a/test/integration/qe-tests/lvms_utils.go b/test/integration/qe-tests/lvms_utils.go new file mode 100644 index 000000000..2d9c0f1b0 --- /dev/null +++ b/test/integration/qe-tests/lvms_utils.go @@ -0,0 +1,789 @@ +package tests + +import ( + "bytes" + "context" + "fmt" + "math/rand" + "os" + "os/exec" + "regexp" + "strconv" + "strings" + "time" + + o "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/tools/remotecommand" + e2e "k8s.io/kubernetes/test/e2e/framework" +) + +// int32Ptr returns a pointer to an int32 +func int32Ptr(i int32) *int32 { + return &i +} + +// boolPtr returns a pointer to a bool +func boolPtr(b bool) *bool { + return &b +} + +// getRandomNum returns a random number between m and n (inclusive) +func getRandomNum(m int64, n int64) int64 { + rand.Seed(time.Now().UnixNano()) + return rand.Int63n(n-m+1) + m +} + +// getThinPoolSizeByVolumeGroup gets the total thin pool size for a given volume group from all worker nodes +func getThinPoolSizeByVolumeGroup(tc *TestClient, volumeGroup string, thinPoolName string) int { + // Use lvs with specific VG/LV selection to avoid complex shell piping + cmd := fmt.Sprintf("lvs --units g --noheadings -o lv_size %s/%s 2>/dev/null || echo 0", volumeGroup, thinPoolName) + + // Get list of worker nodes + nodes, err := tc.Clientset.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{ + LabelSelector: "node-role.kubernetes.io/worker", + }) + o.Expect(err).NotTo(o.HaveOccurred()) + + var totalThinPoolSize int = 0 + + for _, node := range nodes.Items { + output := execCommandInNode(tc, node.Name, cmd) + if output == "" { + continue + } + + regexForNumbersOnly := regexp.MustCompile("[0-9.]+") + matches := regexForNumbersOnly.FindAllString(output, -1) + if len(matches) == 0 { + continue + } + + sizeVal := matches[0] + sizeNum := strings.Split(sizeVal, ".") + if len(sizeNum) == 0 { + continue + } + + thinPoolSize, err := strconv.Atoi(sizeNum[0]) + if err != nil { + continue + } + totalThinPoolSize = totalThinPoolSize + thinPoolSize + } + + e2e.Logf("Total thin pool size in Gi from backend nodes: %d\n", totalThinPoolSize) + return totalThinPoolSize +} + +// execCommandInNode executes a command in a specific node using debug pod +func execCommandInNode(tc *TestClient, nodeName string, command string) string { + // Create a debug pod on the specific node + debugPodName := fmt.Sprintf("debug-%s-%d", nodeName, time.Now().Unix()) + debugPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: debugPodName, + Namespace: "default", + }, + Spec: corev1.PodSpec{ + NodeName: nodeName, + HostNetwork: true, + HostPID: true, + HostIPC: true, + RestartPolicy: corev1.RestartPolicyNever, + Containers: []corev1.Container{ + { + Name: "debug", + Image: "registry.redhat.io/rhel8/support-tools:latest", + Command: []string{"/bin/sh", "-c", "sleep 3600"}, + SecurityContext: &corev1.SecurityContext{ + Privileged: boolPtr(true), + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "host", + MountPath: "/host", + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "host", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/", + }, + }, + }, + }, + Tolerations: []corev1.Toleration{ + { + Operator: corev1.TolerationOpExists, + }, + }, + }, + } + + _, err := tc.Clientset.CoreV1().Pods("default").Create(context.TODO(), debugPod, metav1.CreateOptions{}) + if err != nil { + e2e.Logf("Failed to create debug pod: %v\n", err) + return "" + } + + defer func() { + _ = tc.Clientset.CoreV1().Pods("default").Delete(context.TODO(), debugPodName, metav1.DeleteOptions{}) + }() + + // Wait for pod to be running + o.Eventually(func() bool { + pod, err := tc.Clientset.CoreV1().Pods("default").Get(context.TODO(), debugPodName, metav1.GetOptions{}) + if err != nil { + return false + } + return pod.Status.Phase == corev1.PodRunning + }, 2*time.Minute, 5*time.Second).Should(o.BeTrue()) + + // Execute command in the pod using nsenter to access host namespaces + // nsenter properly handles stdin for commands like cryptsetup + escapedCmd := strings.ReplaceAll(command, "'", "'\\''") + wrappedCmd := fmt.Sprintf("nsenter --target 1 --mount --uts --ipc --net --pid -- /bin/bash -c '%s'", escapedCmd) + output := execCommandInPod(tc, "default", debugPodName, "debug", wrappedCmd) + + return output +} + +// execCommandInPod executes a command in a pod +func execCommandInPod(tc *TestClient, namespace, podName, containerName, command string) string { + // Create a copy of config to avoid modifying the global config + config := *tc.Config + // Skip TLS verification to avoid certificate issues + config.Insecure = true + config.TLSClientConfig.Insecure = true + config.TLSClientConfig.CAData = nil + config.TLSClientConfig.CAFile = "" + + req := tc.Clientset.CoreV1().RESTClient().Post(). + Resource("pods"). + Name(podName). + Namespace(namespace). + SubResource("exec"). + VersionedParams(&corev1.PodExecOptions{ + Container: containerName, + Command: []string{"/bin/sh", "-c", command}, + Stdin: false, + Stdout: true, + Stderr: true, + TTY: false, + }, scheme.ParameterCodec) + + exec, err := remotecommand.NewSPDYExecutor(&config, "POST", req.URL()) + if err != nil { + e2e.Logf("Failed to create executor: %v\n", err) + return "" + } + + var stdout, stderr bytes.Buffer + err = exec.Stream(remotecommand.StreamOptions{ + Stdout: &stdout, + Stderr: &stderr, + }) + + if err != nil { + e2e.Logf("Failed to execute command: %v, stderr: %s\n", err, stderr.String()) + return "" + } + + return strings.TrimSpace(stdout.String()) +} + +// getWorkersList returns the list of worker node names +func getWorkersList() ([]string, error) { + cmd := exec.Command("kubectl", "get", "nodes", "-l", "node-role.kubernetes.io/worker", "-o=jsonpath={.items[*].metadata.name}") + output, err := cmd.CombinedOutput() + if err != nil { + return nil, fmt.Errorf("failed to get worker nodes: %w, output: %s", err, string(output)) + } + workers := strings.Fields(string(output)) + return workers, nil +} + +// getListOfFreeDisksFromWorkerNodes gets the list of unused free block devices/disks along with their total count from all the worker nodes +func getListOfFreeDisksFromWorkerNodes(tc *TestClient) (map[string]int64, error) { + // Check for mock mode (for CI environments without real disks) + if mockDisk := os.Getenv("LVMS_MOCK_FREE_DISK"); mockDisk != "" { + e2e.Logf("⚙ MOCK MODE: Using disk %s from LVMS_MOCK_FREE_DISK env variable\n", mockDisk) + workerNodes, err := getWorkersList() + if err != nil { + return nil, err + } + e2e.Logf(" Simulating disk %s available on all %d worker node(s)\n", mockDisk, len(workerNodes)) + return map[string]int64{mockDisk: int64(len(workerNodes))}, nil + } + + freeDiskNamesCount := make(map[string]int64) + workerNodes, err := getWorkersList() + if err != nil { + return nil, err + } + + e2e.Logf("\n========== DISK DISCOVERY STARTED ==========\n") + e2e.Logf("Scanning for free disks on %d worker node(s)...\n", len(workerNodes)) + + for _, workerName := range workerNodes { + isDiskFound := false + e2e.Logf("\n[Node: %s]\n", workerName) + e2e.Logf(" Running: lsblk | grep disk | awk \"{print $1}\"\n") + + output := execCommandInNode(tc, workerName, `lsblk | grep disk | awk "{print \$1}"`) + if output == "" { + e2e.Logf(" ⚠ WARNING: No disks found or lsblk command failed (empty output)\n") + continue + } + + e2e.Logf(" Raw disk list:\n") + diskList := strings.Split(output, "\n") + for _, diskLine := range diskList { + diskLine = strings.TrimSpace(diskLine) + if diskLine != "" { + e2e.Logf(" - %s\n", diskLine) + } + } + + e2e.Logf(" Checking disk availability with blkid:\n") + for _, diskName := range diskList { + diskName = strings.TrimSpace(diskName) + if diskName == "" { + continue + } + + blkidCmd := "blkid /dev/" + diskName + e2e.Logf(" Running: %s\n", blkidCmd) + output := execCommandInNode(tc, workerName, blkidCmd) + + // disks that are used by existing LVMCluster have TYPE='LVM' OR Unused free disk does not return any output + if strings.Contains(output, "LVM") || len(strings.TrimSpace(output)) == 0 { + freeDiskNamesCount[diskName] = freeDiskNamesCount[diskName] + 1 + isDiskFound = true // at least 1 required free disk found + if output == "" { + e2e.Logf(" ✓ /dev/%s is FREE (no filesystem signature)\n", diskName) + } else { + e2e.Logf(" ✓ /dev/%s is available (LVM-managed): %s\n", diskName, output) + } + } else { + e2e.Logf(" ✗ /dev/%s is IN USE: %s\n", diskName, output) + } + } + + if !isDiskFound { + e2e.Logf(" ⚠ WARNING: Worker node %s does not have mandatory unused free block device/disk attached\n", workerName) + } + } + + e2e.Logf("\n========== DISK DISCOVERY SUMMARY ==========\n") + if len(freeDiskNamesCount) == 0 { + e2e.Logf(" ✗ NO FREE DISKS FOUND on any worker node\n") + } else { + e2e.Logf(" Free disks found across nodes:\n") + for disk, count := range freeDiskNamesCount { + e2e.Logf(" - /dev/%s: available on %d/%d nodes\n", disk, count, len(workerNodes)) + } + } + e2e.Logf("===========================================\n\n") + + return freeDiskNamesCount, nil +} + +// createLogicalVolumeOnDisk makes a disk partition and creates a logical volume on new volume group +func createLogicalVolumeOnDisk(tc *TestClient, nodeHostName string, disk string, vgName string, lvName string) error { + diskName := "/dev/" + disk + + // Create LVM disk partition + createPartitionCmd := "echo -e 'n\\np\\n1\\n\\n\\nw' | fdisk " + diskName + _, err := execCommandInNodeWithError(tc, nodeHostName, createPartitionCmd) + if err != nil { + return fmt.Errorf("failed to create partition: %w", err) + } + + partitionName := diskName + "p1" + // Unmount the partition if it's mounted + unmountCmd := "umount " + partitionName + " || true" + execCommandInNode(tc, nodeHostName, unmountCmd) + + // Create Physical Volume + createPV := "pvcreate " + partitionName + _, err = execCommandInNodeWithError(tc, nodeHostName, createPV) + if err != nil { + return fmt.Errorf("failed to create PV: %w", err) + } + + // Create Volume Group + createVG := "vgcreate " + vgName + " " + partitionName + _, err = execCommandInNodeWithError(tc, nodeHostName, createVG) + if err != nil { + return fmt.Errorf("failed to create VG: %w", err) + } + + // Create Logical Volume + createLV := "lvcreate -n " + lvName + " -l 100%FREE " + vgName + _, err = execCommandInNodeWithError(tc, nodeHostName, createLV) + if err != nil { + return fmt.Errorf("failed to create LV: %w", err) + } + + return nil +} + +// removeLogicalVolumeOnDisk removes logical volume on volume group from backend disk +func removeLogicalVolumeOnDisk(tc *TestClient, nodeHostName string, disk string, vgName string, lvName string) error { + diskName := "/dev/" + disk + partitionName := disk + "p1" + pvName := diskName + "p1" + + existsLV := `lvdisplay /dev/` + vgName + `/` + lvName + ` && echo "true" || echo "false"` + outputLV := execCommandInNode(tc, nodeHostName, existsLV) + lvExists := strings.Contains(outputLV, "true") + + // If VG exists, proceed to check LV and remove accordingly + existsVG := `vgdisplay | grep -q '` + vgName + `' && echo "true" || echo "false"` + outputVG := execCommandInNode(tc, nodeHostName, existsVG) + if strings.Contains(outputVG, "true") { + if lvExists { + // Remove Logical Volume (LV) + removeLV := "lvremove -f /dev/" + vgName + "/" + lvName + execCommandInNode(tc, nodeHostName, removeLV) + } + // Remove Volume Group (VG) + removeVG := "vgremove -f " + vgName + execCommandInNode(tc, nodeHostName, removeVG) + } + + existsPV := `pvdisplay | grep -q '` + pvName + `' && echo "true" || echo "false"` + outputPV := execCommandInNode(tc, nodeHostName, existsPV) + if strings.Contains(outputPV, "true") { + //Remove Physical Volume (PV) + removePV := "pvremove -f " + pvName + execCommandInNode(tc, nodeHostName, removePV) + } + + existsPartition := `lsblk | grep -q '` + partitionName + `' && echo "true" || echo "false"` + outputPartition := execCommandInNode(tc, nodeHostName, existsPartition) + if strings.Contains(outputPartition, "true") { + // Remove LVM disk partition + removePartitionCmd := "echo -e 'd\\nw' | fdisk " + diskName + execCommandInNode(tc, nodeHostName, removePartitionCmd) + } + + // Wipe all filesystem signatures from disk + wipeDiskCmd := "wipefs -a " + diskName + execCommandInNode(tc, nodeHostName, wipeDiskCmd) + + return nil +} + +// execCommandInNodeWithError executes a command in a node and returns output and error +func execCommandInNodeWithError(tc *TestClient, nodeName string, command string) (string, error) { + output := execCommandInNode(tc, nodeName, command) + if strings.Contains(strings.ToLower(output), "error") || strings.Contains(strings.ToLower(output), "failed") { + return output, fmt.Errorf("command failed: %s", output) + } + return output, nil +} + +// getLVMClusterJSON retrieves the LVMCluster resource as JSON +func getLVMClusterJSON(name string, namespace string) (string, error) { + cmd := exec.Command("kubectl", "get", "lvmcluster", name, "-n", namespace, "-o", "json") + output, err := cmd.CombinedOutput() + if err != nil { + return "", fmt.Errorf("failed to get LVMCluster: %w, output: %s", err, string(output)) + } + return string(output), nil +} + +// deleteLVMCluster deletes an LVMCluster resource +func deleteLVMCluster(name string, namespace string) error { + cmd := exec.Command("kubectl", "delete", "lvmcluster", name, "-n", namespace, "--ignore-not-found", "--wait=false") + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("failed to delete LVMCluster: %w, output: %s", err, string(output)) + } + return nil +} + +// createLVMClusterFromJSON creates an LVMCluster from JSON content +func createLVMClusterFromJSON(jsonContent string) error { + cmd := exec.Command("kubectl", "apply", "-f", "-") + cmd.Stdin = strings.NewReader(jsonContent) + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("failed to create LVMCluster: %w, output: %s", err, string(output)) + } + return nil +} + +// createLVMClusterWithForceWipe creates an LVMCluster with forceWipeDevicesAndDestroyAllData set to true +func createLVMClusterWithForceWipe(name string, namespace string, deviceClass string, diskPath string) error { + lvmClusterYAML := fmt.Sprintf(`apiVersion: lvm.topolvm.io/v1alpha1 +kind: LVMCluster +metadata: + name: %s + namespace: %s +spec: + storage: + deviceClasses: + - name: %s + thinPoolConfig: + name: thin-pool-1 + sizePercent: 90 + overprovisionRatio: 10 + deviceSelector: + paths: + - %s + forceWipeDevicesAndDestroyAllData: true +`, name, namespace, deviceClass, diskPath) + + return createLVMClusterFromJSON(lvmClusterYAML) +} + +// createLVMClusterWithPaths creates an LVMCluster with specified device paths +func createLVMClusterWithPaths(name string, namespace string, deviceClass string, diskPath string) error { + lvmClusterYAML := fmt.Sprintf(`apiVersion: lvm.topolvm.io/v1alpha1 +kind: LVMCluster +metadata: + name: %s + namespace: %s +spec: + storage: + deviceClasses: + - name: %s + thinPoolConfig: + name: thin-pool-1 + sizePercent: 90 + overprovisionRatio: 10 + deviceSelector: + paths: + - %s +`, name, namespace, deviceClass, diskPath) + + return createLVMClusterFromJSON(lvmClusterYAML) +} + +// waitForLVMClusterReady waits for the LVMCluster to become Ready +func waitForLVMClusterReady(name string, namespace string, timeout time.Duration) error { + deadline := time.Now().Add(timeout) + for time.Now().Before(deadline) { + cmd := exec.Command("kubectl", "get", "lvmcluster", name, "-n", namespace, "-o=jsonpath={.status.state}") + output, err := cmd.CombinedOutput() + if err == nil && strings.TrimSpace(string(output)) == "Ready" { + e2e.Logf("LVMCluster %s is Ready\n", name) + return nil + } + e2e.Logf("LVMCluster %s state: %s, waiting...\n", name, string(output)) + time.Sleep(5 * time.Second) + } + return fmt.Errorf("timeout waiting for LVMCluster %s to become Ready", name) +} + +// removeLVMClusterFinalizers removes finalizers from LVMCluster to allow deletion +func removeLVMClusterFinalizers(name string, namespace string) error { + patch := `{"metadata":{"finalizers":[]}}` + cmd := exec.Command("kubectl", "patch", "lvmcluster", name, "-n", namespace, "--type=merge", "-p", patch) + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("failed to remove LVMCluster finalizers: %w, output: %s", err, string(output)) + } + return nil +} + +// removeLVMVolumeGroupFinalizers removes finalizers from LVMVolumeGroup +func removeLVMVolumeGroupFinalizers(deviceClassName string, namespace string) error { + // Check if resource exists first + checkCmd := exec.Command("kubectl", "get", "lvmvolumegroup", deviceClassName, "-n", namespace, "--ignore-not-found", "-o=name") + checkOutput, _ := checkCmd.CombinedOutput() + if len(strings.TrimSpace(string(checkOutput))) == 0 { + // Resource doesn't exist, skip patching + return nil + } + + patch := `{"metadata":{"finalizers":[]}}` + cmd := exec.Command("kubectl", "patch", "lvmvolumegroup", deviceClassName, "-n", namespace, "--type=merge", "-p", patch) + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("failed to remove LVMVolumeGroup finalizers: %w, output: %s", err, string(output)) + } + return nil +} + +// removeLVMVolumeGroupNodeStatusFinalizers removes finalizers from all LVMVolumeGroupNodeStatus resources +func removeLVMVolumeGroupNodeStatusFinalizers(namespace string) error { + workerNodes, err := getWorkersList() + if err != nil { + return err + } + + for _, workerName := range workerNodes { + // Check if resource exists first + checkCmd := exec.Command("kubectl", "get", "lvmvolumegroupnodestatus", workerName, "-n", namespace, "--ignore-not-found", "-o=name") + checkOutput, _ := checkCmd.CombinedOutput() + if len(strings.TrimSpace(string(checkOutput))) == 0 { + // Resource doesn't exist, skip patching + continue + } + + patch := `{"metadata":{"finalizers":[]}}` + cmd := exec.Command("kubectl", "patch", "lvmvolumegroupnodestatus", workerName, "-n", namespace, "--type=merge", "-p", patch) + output, err := cmd.CombinedOutput() + if err != nil { + e2e.Logf("Warning: failed to remove finalizers from LVMVolumeGroupNodeStatus %s: %v, output: %s\n", workerName, err, string(output)) + } + } + return nil +} + +// deleteLVMClusterSafely deletes an LVMCluster by removing finalizers after deletion +func deleteLVMClusterSafely(name string, namespace string, deviceClassName string) error { + // Delete the LVMCluster first with --wait=false + err := deleteLVMCluster(name, namespace) + if err != nil { + return err + } + + // Then remove finalizers from all related resources + removeLVMVolumeGroupNodeStatusFinalizers(namespace) + removeLVMVolumeGroupFinalizers(deviceClassName, namespace) + removeLVMClusterFinalizers(name, namespace) + + // Wait for LVMCluster to be fully deleted + deadline := time.Now().Add(2 * time.Minute) + for time.Now().Before(deadline) { + exists, err := resourceExists("lvmcluster", name, namespace) + if err != nil { + return fmt.Errorf("failed to check if LVMCluster exists: %w", err) + } + if !exists { + e2e.Logf("LVMCluster %s fully deleted\n", name) + return nil + } + e2e.Logf("Waiting for LVMCluster %s to be deleted...\n", name) + time.Sleep(5 * time.Second) + } + + return fmt.Errorf("timeout waiting for LVMCluster %s to be deleted", name) +} + +// deleteLVMClusterForRecovery deletes an LVMCluster but does NOT wait for backend cleanup +// This allows the backend VG to remain on disk for recovery testing +func deleteLVMClusterForRecovery(name string, namespace string, deviceClassName string) error { + // Step 1: Initiate delete WITHOUT waiting (matches original test behavior) + e2e.Logf("Initiating delete of LVMCluster %s (without waiting)...\n", name) + cmd := exec.Command("kubectl", "delete", "lvmcluster", name, "-n", namespace, "--wait=false") + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("failed to initiate LVMCluster deletion: %w, output: %s", err, string(output)) + } + + // Step 2: Immediately remove finalizers (this prevents backend cleanup) + e2e.Logf("Removing finalizers to prevent backend VG cleanup for %s...\n", name) + time.Sleep(2 * time.Second) // Small delay to let deletion start + + removeLVMClusterFinalizers(name, namespace) + removeLVMVolumeGroupFinalizers(deviceClassName, namespace) + removeLVMVolumeGroupNodeStatusFinalizers(namespace) + + // Step 3: Wait for LVMCluster to be fully deleted from Kubernetes + e2e.Logf("Waiting for LVMCluster %s to be deleted from Kubernetes...\n", name) + deadline := time.Now().Add(2 * time.Minute) + for time.Now().Before(deadline) { + exists, err := resourceExists("lvmcluster", name, namespace) + if err != nil { + return fmt.Errorf("failed to check if LVMCluster exists: %w", err) + } + if !exists { + e2e.Logf("LVMCluster %s deleted from Kubernetes (backend VG will remain for recovery)\n", name) + return nil + } + time.Sleep(5 * time.Second) + } + + return fmt.Errorf("timeout waiting for LVMCluster %s to be deleted", name) +} + +// getLVMClusterName retrieves the first LVMCluster name from a given namespace +func getLVMClusterName(namespace string) (string, error) { + cmd := exec.Command("kubectl", "get", "lvmcluster", "-n", namespace, "-o=jsonpath={.items[0].metadata.name}") + output, err := cmd.CombinedOutput() + if err != nil { + return "", fmt.Errorf("failed to get LVMCluster name: %w, output: %s", err, string(output)) + } + name := strings.TrimSpace(string(output)) + if name == "" { + return "", fmt.Errorf("no LVMCluster found in namespace %s", namespace) + } + return name, nil +} + +// resourceExists checks if a Kubernetes resource exists +func resourceExists(resourceType string, name string, namespace string) (bool, error) { + cmd := exec.Command("kubectl", "get", resourceType, name, "-n", namespace, "--ignore-not-found", "-o=jsonpath={.metadata.name}") + output, err := cmd.CombinedOutput() + if err != nil { + return false, fmt.Errorf("failed to check resource existence: %w, output: %s", err, string(output)) + } + return strings.TrimSpace(string(output)) != "", nil +} + +// getLvmClusterPath gets the current LVM cluster device path +func getLvmClusterPath(namespace string) (string, error) { + currentLVMClusterName, err := getLVMClusterName(namespace) + if err != nil { + return "", err + } + cmd := exec.Command("kubectl", "get", "lvmcluster", "-n", namespace, currentLVMClusterName, "-o=jsonpath={.status.deviceClassStatuses[*].nodeStatus[*].devices[*]}") + output, err := cmd.CombinedOutput() + if err != nil { + return "", fmt.Errorf("failed to get LVMCluster path: %w, output: %s", err, string(output)) + } + selectedDisk := strings.TrimSpace(string(output)) + e2e.Logf("Current LVM cluster path: %s\n", selectedDisk) + return selectedDisk, nil +} + +// patchMetadataSizeCalculationPolicyToStatic patches the LVMCluster to set metadataSizeCalculationPolicy to Static with the given metadataSize +func patchMetadataSizeCalculationPolicyToStatic(name string, namespace string, metadataSize string) error { + patch := fmt.Sprintf(`[ + {"op": "replace", "path": "/spec/storage/deviceClasses/0/thinPoolConfig/metadataSizeCalculationPolicy", "value": "Static"}, + {"op": "replace", "path": "/spec/storage/deviceClasses/0/thinPoolConfig/metadataSize", "value": "%s"} + ]`, metadataSize) + + cmd := exec.Command("kubectl", "patch", "lvmcluster", name, "-n", namespace, "--type=json", "-p", patch) + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("failed to patch LVMCluster metadataSize: %w, output: %s", err, string(output)) + } + e2e.Logf("Patched LVMCluster %s with metadataSizeCalculationPolicy set to 'Static' and metadataSize to %s\n", name, metadataSize) + return nil +} + +// getLogicalVolumeSelectedNode gets the node where the LVMS provisioned volume is located +func getLogicalVolumeSelectedNode(namespace string, pvcName string) (string, error) { + cmd := exec.Command("kubectl", "get", "pvc", pvcName, "-n", namespace, "-o=jsonpath={.metadata.annotations.volume\\.kubernetes\\.io/selected-node}") + output, err := cmd.CombinedOutput() + if err != nil { + return "", fmt.Errorf("failed to get selected node for PVC %s: %w, output: %s", pvcName, err, string(output)) + } + nodeName := strings.TrimSpace(string(output)) + e2e.Logf("The nodename in namespace %s for pvc %s is %s\n", namespace, pvcName, nodeName) + return nodeName, nil +} + +// patchOverprovisionRatio patches the LVMCluster to set overprovisionRatio +func patchOverprovisionRatio(name string, namespace string, overprovisionRatio string) error { + patch := fmt.Sprintf(`[ + {"op": "replace", "path": "/spec/storage/deviceClasses/0/thinPoolConfig/overprovisionRatio", "value": %s} + ]`, overprovisionRatio) + + cmd := exec.Command("kubectl", "patch", "lvmcluster", name, "-n", namespace, "--type=json", "-p", patch) + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("failed to patch LVMCluster overprovisionRatio: %w, output: %s", err, string(output)) + } + e2e.Logf("Patched LVMCluster %s with overprovisionRatio=%s\n", name, overprovisionRatio) + return nil +} + +// waitForVGManagerPodRunning waits for vg-manager pods to be in Running state +func waitForVGManagerPodRunning(tc *TestClient, namespace string, timeout time.Duration) error { + deadline := time.Now().Add(timeout) + for time.Now().Before(deadline) { + pods, err := tc.Clientset.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{ + LabelSelector: "app.kubernetes.io/component=vg-manager", + }) + if err != nil { + e2e.Logf("Failed to list vg-manager pods: %v\n", err) + time.Sleep(5 * time.Second) + continue + } + + if len(pods.Items) == 0 { + e2e.Logf("No vg-manager pods found, waiting...\n") + time.Sleep(5 * time.Second) + continue + } + + allRunning := true + for _, pod := range pods.Items { + if pod.Status.Phase != corev1.PodRunning { + allRunning = false + e2e.Logf("vg-manager pod %s is in %s phase, waiting...\n", pod.Name, pod.Status.Phase) + break + } + } + + if allRunning { + e2e.Logf("All vg-manager pods are Running\n") + return nil + } + + time.Sleep(5 * time.Second) + } + return fmt.Errorf("timeout waiting for vg-manager pods to be Running") +} + +// getVolSizeFromPvc gets the volume size from PVC status +func getVolSizeFromPvc(tc *TestClient, pvcName string, namespace string) (string, error) { + pvc, err := tc.Clientset.CoreV1().PersistentVolumeClaims(namespace).Get(context.TODO(), pvcName, metav1.GetOptions{}) + if err != nil { + return "", fmt.Errorf("failed to get PVC %s in namespace %s: %w", pvcName, namespace, err) + } + + if pvc.Status.Capacity == nil { + return "", fmt.Errorf("PVC %s has no capacity in status", pvcName) + } + + volumeSize := pvc.Status.Capacity[corev1.ResourceStorage] + volumeSizeStr := volumeSize.String() + e2e.Logf("The PVC %s volumesize is %s\n", pvcName, volumeSizeStr) + return volumeSizeStr, nil +} + +// checkVolumeBiggerThanDisk verifies that the PV size is bigger than the thin pool size +func checkVolumeBiggerThanDisk(tc *TestClient, pvcName string, pvcNamespace string, thinPoolSize int) { + pvSize, err := getVolSizeFromPvc(tc, pvcName, pvcNamespace) + o.Expect(err).NotTo(o.HaveOccurred()) + + // Extract numeric value from size string (e.g., "10Gi" -> "10") + regexForNumbersOnly := regexp.MustCompile("[0-9]+") + pvSizeVal := regexForNumbersOnly.FindAllString(pvSize, -1)[0] + pvSizeNum, err := strconv.Atoi(pvSizeVal) + o.Expect(err).NotTo(o.HaveOccurred()) + + e2e.Logf("Persistent volume Size in Gi: %d\n", pvSizeNum) + o.Expect(pvSizeNum > thinPoolSize).Should(o.BeTrue()) +} + +// writePodData writes test data to the mounted volume in a pod +func writePodData(tc *TestClient, namespace string, podName string, containerName string, mountPath string) { + writeCmd := fmt.Sprintf("echo 'storage test' > %s/testfile", mountPath) + output := execCommandInPod(tc, namespace, podName, containerName, writeCmd) + e2e.Logf("Write command output: %s\n", output) + + syncCmd := fmt.Sprintf("sync -f %s/testfile", mountPath) + output = execCommandInPod(tc, namespace, podName, containerName, syncCmd) + e2e.Logf("Sync command output: %s\n", output) +} + +// checkPodDataExists verifies that test data exists in the mounted volume +func checkPodDataExists(tc *TestClient, namespace string, podName string, containerName string, mountPath string, shouldExist bool) { + readCmd := fmt.Sprintf("cat %s/testfile", mountPath) + output := execCommandInPod(tc, namespace, podName, containerName, readCmd) + + if shouldExist { + o.Expect(output).To(o.ContainSubstring("storage test")) + e2e.Logf("Data exists and verified in pod %s\n", podName) + } else { + o.Expect(output).To(o.Or(o.ContainSubstring("No such file or directory"), o.BeEmpty())) + e2e.Logf("Data does not exist as expected in pod %s\n", podName) + } +}