From 5b3a55c19cacb04597d58db05d3058bd1cd8e260 Mon Sep 17 00:00:00 2001 From: Marco Braga Date: Wed, 14 Aug 2024 11:05:08 -0300 Subject: [PATCH] OPCT-257: fix status cmd must not required clients to init command (#119) The command initialization must have minimal to no external dependencies to create the command. Currently OPCT commands have been with some dependencies to initialize the `Run` command, which isn't the best practice. The #118 introduced a new external dependency requiring a live cluster when the cobra is creating and initializing the command (without triggering it), which isn't expected nor desired, as OPCT tool provides many capabilities to operate without access to O/K API, such as `report` command which is affected raising an error after #118: ~~~ $ opct-devel report --save-to res opct_202408131745_9dd0adf1-df49-44fd-ae32-9c2ace0946c9.tar.gz --log-level=debug ERRO[0000] error creating clients: error creating sonobuoy rest helper: could not get api group resources: Get "https://api.opct-base-05.devcluster.openshift.com:6443/api?timeout=32s": dial tcp: lookup api.opct-base-05.devcluster.openshift.com on 192.168.15.1:53: no such host error="error creating sonobuoy rest helper: could not get api group resources: Get \"https://api.opct-base-05.devcluster.openshift.com:6443/api?timeout=32s\": dial tcp: lookup api.opct-base-05.devcluster.openshift.com on 192.168.15.1:53: no such host" ~~~ The changes here creates declares the cmd Status as a var, declaring the `Run` as an external function. --- pkg/retrieve/retrieve.go | 2 +- pkg/run/run.go | 5 +-- pkg/status/cmd.go | 61 +++++++++++++++++++++++++++++ pkg/status/printer.go | 66 +------------------------------ pkg/status/printer_test.go | 4 +- pkg/status/status.go | 80 ++++++++------------------------------ pkg/status/utils.go | 67 +++++++++++++++++++++++++++++++ pkg/status/utils_test.go | 54 +++++++++++++++++++++++++ 8 files changed, 204 insertions(+), 135 deletions(-) create mode 100644 pkg/status/cmd.go create mode 100644 pkg/status/utils.go create mode 100644 pkg/status/utils_test.go diff --git a/pkg/retrieve/retrieve.go b/pkg/retrieve/retrieve.go index ef99a3a..58ffcfd 100644 --- a/pkg/retrieve/retrieve.go +++ b/pkg/retrieve/retrieve.go @@ -41,7 +41,7 @@ func NewCmdRetrieve() *cobra.Command { } } - s := status.NewStatusOptions(&status.StatusInput{Watch: false}) + s := status.NewStatus(&status.StatusInput{Watch: false}) if err := s.PreRunCheck(); err != nil { return fmt.Errorf("retrieve finished with errors: %v", err) } diff --git a/pkg/run/run.go b/pkg/run/run.go index ff466ef..ca7c146 100644 --- a/pkg/run/run.go +++ b/pkg/run/run.go @@ -124,11 +124,8 @@ func NewCmdRun() *cobra.Command { return err } - // Sleep to give status time to appear - // time.Sleep(status.StatusInterval) - // Retrieve the first status and print it, finishing when --watch is not set. - s := status.NewStatusOptions(&status.StatusInput{ + s := status.NewStatus(&status.StatusInput{ Watch: o.watch, KClient: kclient, SClient: sclient, diff --git a/pkg/status/cmd.go b/pkg/status/cmd.go new file mode 100644 index 0000000..7e0f5f4 --- /dev/null +++ b/pkg/status/cmd.go @@ -0,0 +1,61 @@ +package status + +import ( + "github.com/redhat-openshift-ecosystem/provider-certification-tool/pkg/wait" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" +) + +// cmdStatusArgs is the struct to store the input arguments for the status command. +type cmdInputStatus struct { + Watch bool + IntervalSeconds int +} + +var cmdArgsStatus cmdInputStatus +var cmdStatus = &cobra.Command{ + Use: "status", + Example: "opct status --watch", + Short: "Show the current status of the validation tool", + Long: ``, + RunE: cmdStatusRun, +} + +func init() { + cmdStatus.PersistentFlags().BoolVarP(&cmdArgsStatus.Watch, "watch", "w", false, "Keep watch status after running") + cmdStatus.Flags().IntVarP(&cmdArgsStatus.IntervalSeconds, "watch-interval", "", DefaultStatusIntervalSeconds, "Interval to watch the status and print in the stdout") +} + +func NewCmdStatus() *cobra.Command { + return cmdStatus +} + +func cmdStatusRun(cmd *cobra.Command, args []string) error { + o := NewStatus(&StatusInput{ + Watch: cmdArgsStatus.Watch, + IntervalSeconds: cmdArgsStatus.IntervalSeconds, + }) + // Pre-checks and setup + if err := o.PreRunCheck(); err != nil { + log.WithError(err).Error("error running pre-checks") + return err + } + + // Wait for Sonobuoy to create + if err := wait.WaitForRequiredResources(o.kclient); err != nil { + log.WithError(err).Error("error waiting for sonobuoy pods to become ready") + return err + } + + // Wait for Sononbuoy to start reporting status + if err := o.WaitForStatusReport(cmd.Context()); err != nil { + log.WithError(err).Error("error retrieving current aggregator status") + return err + } + + if err := o.Print(cmd); err != nil { + log.WithError(err).Error("error printing status") + return err + } + return nil +} diff --git a/pkg/status/printer.go b/pkg/status/printer.go index b1fcf58..b397101 100644 --- a/pkg/status/printer.go +++ b/pkg/status/printer.go @@ -1,7 +1,6 @@ package status import ( - "context" "fmt" "html/template" "os" @@ -9,13 +8,7 @@ import ( "time" "github.com/redhat-openshift-ecosystem/provider-certification-tool/pkg" - log "github.com/sirupsen/logrus" "github.com/vmware-tanzu/sonobuoy/pkg/plugin/aggregation" - - kcorev1 "k8s.io/api/core/v1" - kmmetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - klabels "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/kubernetes" ) type PrintableStatus struct { @@ -38,7 +31,7 @@ var runningStatusTemplate = `{{.CurrentTime}}|{{.ElapsedTime}}> Global Status: { {{printf "%-34s | %-10s | %-10s | %-25s | %-50s" $pl.Name $pl.Status $pl.Result $pl.Progress $pl.Message}}{{end}} ` -func (s *StatusOptions) printRunningStatus() error { +func (s *Status) printRunningStatus() error { statusTemplate, err := template.New("statusTemplate").Parse(runningStatusTemplate) if err != nil { return err @@ -46,7 +39,7 @@ func (s *StatusOptions) printRunningStatus() error { return statusTemplate.Execute(os.Stdout, s.getPrintableRunningStatus()) } -func (s *StatusOptions) getPrintableRunningStatus() PrintableStatus { +func (s *Status) getPrintableRunningStatus() PrintableStatus { now := time.Now() ps := PrintableStatus{ GlobalStatus: s.Latest.Status, @@ -109,58 +102,3 @@ func (s *StatusOptions) getPrintableRunningStatus() PrintableStatus { return ps } - -// GetPluginPod get the plugin pod spec. -func getPluginPod(kclient kubernetes.Interface, namespace string, pluginPodName string) (*kcorev1.Pod, error) { - labelSelector := kmmetav1.LabelSelector{MatchLabels: map[string]string{"component": "sonobuoy", "sonobuoy-plugin": pluginPodName}} - log.Debugf("Getting pod with labels: %v\n", labelSelector) - listOptions := kmmetav1.ListOptions{ - LabelSelector: klabels.Set(labelSelector.MatchLabels).String(), - } - - podList, err := kclient.CoreV1().Pods(namespace).List(context.TODO(), listOptions) - if err != nil { - return nil, fmt.Errorf("unable to list pods with label %q", labelSelector) - } - - switch { - case len(podList.Items) == 0: - log.Warnf("no pods found with label %q in namespace %s", labelSelector, namespace) - return nil, fmt.Errorf(fmt.Sprintf("no pods found with label %q in namespace %s", labelSelector, namespace)) - - case len(podList.Items) > 1: - log.Warnf("Found more than one pod with label %q. Using pod with name %q", labelSelector, podList.Items[0].GetName()) - return &podList.Items[0], nil - default: - return &podList.Items[0], nil - } -} - -// getPodStatusString get the pod status string. -func getPodStatusString(pod *kcorev1.Pod) string { - if pod == nil { - return "TBD(pod)" - } - - for _, cond := range pod.Status.Conditions { - // Pod Running - if cond.Type == kcorev1.PodReady && - cond.Status == kcorev1.ConditionTrue && - pod.Status.Phase == kcorev1.PodRunning { - return "Running" - } - // Pod Completed - if cond.Type == kcorev1.PodReady && - cond.Status == "False" && - cond.Reason == "PodCompleted" { - return "Completed" - } - // Pod NotReady (Container) - if cond.Type == kcorev1.PodReady && - cond.Status == "False" && - cond.Reason == "ContainersNotReady" { - return "NotReady" - } - } - return string(pod.Status.Phase) -} diff --git a/pkg/status/printer_test.go b/pkg/status/printer_test.go index f394ac0..db5d173 100644 --- a/pkg/status/printer_test.go +++ b/pkg/status/printer_test.go @@ -11,8 +11,8 @@ import ( "github.com/vmware-tanzu/sonobuoy/pkg/plugin/aggregation" ) -func Test_PrintStatus(t *testing.T) { - status := &StatusOptions{ +func Test_printStatus(t *testing.T) { + status := &Status{ StartTime: time.Now(), Latest: &aggregation.Status{ Plugins: []aggregation.PluginStatus{ diff --git a/pkg/status/status.go b/pkg/status/status.go index 022ed03..a91e76f 100644 --- a/pkg/status/status.go +++ b/pkg/status/status.go @@ -16,7 +16,6 @@ import ( "github.com/redhat-openshift-ecosystem/provider-certification-tool/pkg" "github.com/redhat-openshift-ecosystem/provider-certification-tool/pkg/client" - "github.com/redhat-openshift-ecosystem/provider-certification-tool/pkg/wait" ) const ( @@ -25,14 +24,13 @@ const ( StatusRetryLimit = 10 ) -// StatusOptions is the interface to store input options to +// Status is the interface to store input options to // interface with Status command. -type StatusOptions struct { +type Status struct { StartTime time.Time Latest *aggregation.Status watch bool shownPostProcessMsg bool - watchInterval int waitInterval time.Duration // clients @@ -49,10 +47,10 @@ type StatusInput struct { SClient sonobuoyclient.Interface } -func NewStatusOptions(in *StatusInput) *StatusOptions { - s := &StatusOptions{ +func NewStatus(in *StatusInput) *Status { + s := &Status{ watch: in.Watch, - waitInterval: time.Second * DefaultStatusIntervalSeconds, + waitInterval: DefaultStatusIntervalSeconds * time.Second, StartTime: time.Now(), } if in.IntervalSeconds != 0 { @@ -74,51 +72,8 @@ func NewStatusOptions(in *StatusInput) *StatusOptions { return s } -func NewCmdStatus() *cobra.Command { - o := NewStatusOptions(&StatusInput{Watch: false}) - - cmd := &cobra.Command{ - Use: "status", - Short: "Show the current status of the validation tool", - Long: ``, - RunE: func(cmd *cobra.Command, args []string) error { - // Pre-checks and setup - if err := o.PreRunCheck(); err != nil { - log.WithError(err).Error("error running pre-checks") - return err - } - - // Wait for Sonobuoy to create - if err := wait.WaitForRequiredResources(o.kclient); err != nil { - log.WithError(err).Error("error waiting for sonobuoy pods to become ready") - return err - } - - // Wait for Sononbuoy to start reporting status - if err := o.WaitForStatusReport(cmd.Context()); err != nil { - log.WithError(err).Error("error retrieving current aggregator status") - return err - } - - if err := o.Print(cmd); err != nil { - log.WithError(err).Error("error printing status") - return err - } - return nil - }, - } - - cmd.PersistentFlags().BoolVarP(&o.watch, "watch", "w", false, "Keep watch status after running") - cmd.Flags().IntVarP(&o.watchInterval, "watch-interval", "", DefaultStatusIntervalSeconds, "Interval to watch the status and print in the stdout") - if o.watchInterval != DefaultStatusIntervalSeconds { - o.waitInterval = time.Duration(o.watchInterval) * time.Second - } - - return cmd -} - -func (s *StatusOptions) PreRunCheck() error { - // Check if sonobuoy namespac already exists +// PreRunCheck will check if the validation environment is running. +func (s *Status) PreRunCheck() error { _, err := s.kclient.CoreV1().Namespaces().Get(context.TODO(), pkg.CertificationNamespace, metav1.GetOptions{}) if err != nil { // If error is due to namespace not being found, return guidance. @@ -126,14 +81,11 @@ func (s *StatusOptions) PreRunCheck() error { return errors.New("looks like there is no validation environment running. use run command to start the validation process") } } - - // Sonobuoy namespace exists so no error return nil } -// Update the Sonobuoy state saved in StatusOptions -func (s *StatusOptions) Update() error { - // TODO Is a retry in here needed? +// Update the Sonobuoy state saved in Status +func (s *Status) Update() error { sstatus, err := s.sclient.GetStatus(&sonobuoyclient.StatusConfig{Namespace: pkg.CertificationNamespace}) if err != nil { return err @@ -143,8 +95,8 @@ func (s *StatusOptions) Update() error { return nil } -// GetStatusForPlugin will get a plugin's status from the state saved in StatusOptions -func (s *StatusOptions) GetStatusForPlugin(name string) *aggregation.PluginStatus { +// GetStatusForPlugin will get a plugin's status from the state saved in Status. +func (s *Status) GetStatusForPlugin(name string) *aggregation.PluginStatus { if s.Latest == nil { return nil } @@ -159,7 +111,7 @@ func (s *StatusOptions) GetStatusForPlugin(name string) *aggregation.PluginStatu } // GetStatus returns the latest aggregator status if there is one, otherwise empty string. -func (s *StatusOptions) GetStatus() string { +func (s *Status) GetStatus() string { if s.Latest != nil { return s.Latest.Status } @@ -169,7 +121,7 @@ func (s *StatusOptions) GetStatus() string { // WaitForStatusReport will block until either context is canceled, status is reported, or retry limit reach. // An error will not result in immediate failure and will be retried. -func (s *StatusOptions) WaitForStatusReport(ctx context.Context) error { +func (s *Status) WaitForStatusReport(ctx context.Context) error { tries := 1 err := wait2.PollUntilContextCancel(ctx, s.waitInterval, true, func(ctx context.Context) (done bool, err error) { if tries == StatusRetryLimit { @@ -189,7 +141,7 @@ func (s *StatusOptions) WaitForStatusReport(ctx context.Context) error { return err } -func (s *StatusOptions) Print(cmd *cobra.Command) error { +func (s *Status) Print(cmd *cobra.Command) error { if !s.watch { _, err := s.doPrint() return err @@ -212,7 +164,7 @@ func (s *StatusOptions) Print(cmd *cobra.Command) error { }) } -func (s *StatusOptions) doPrint() (complete bool, err error) { +func (s *Status) doPrint() (complete bool, err error) { switch s.GetStatus() { case aggregation.RunningStatus: if err := s.printRunningStatus(); err != nil { @@ -243,6 +195,6 @@ func (s *StatusOptions) doPrint() (complete bool, err error) { return false, nil } -func (s *StatusOptions) GetSonobuoyClient() sonobuoyclient.Interface { +func (s *Status) GetSonobuoyClient() sonobuoyclient.Interface { return s.sclient } diff --git a/pkg/status/utils.go b/pkg/status/utils.go new file mode 100644 index 0000000..1005540 --- /dev/null +++ b/pkg/status/utils.go @@ -0,0 +1,67 @@ +package status + +import ( + "context" + "fmt" + + log "github.com/sirupsen/logrus" + kcorev1 "k8s.io/api/core/v1" + kmmetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + klabels "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/kubernetes" +) + +// GetPluginPod get the plugin pod spec. +func getPluginPod(kclient kubernetes.Interface, namespace string, pluginPodName string) (*kcorev1.Pod, error) { + labelSelector := kmmetav1.LabelSelector{MatchLabels: map[string]string{"component": "sonobuoy", "sonobuoy-plugin": pluginPodName}} + log.Debugf("Getting pod with labels: %v\n", labelSelector) + listOptions := kmmetav1.ListOptions{ + LabelSelector: klabels.Set(labelSelector.MatchLabels).String(), + } + + podList, err := kclient.CoreV1().Pods(namespace).List(context.TODO(), listOptions) + if err != nil { + return nil, fmt.Errorf("unable to list pods with label %q", labelSelector) + } + + switch { + case len(podList.Items) == 0: + log.Warnf("no pods found with label %q in namespace %s", labelSelector, namespace) + return nil, fmt.Errorf(fmt.Sprintf("no pods found with label %q in namespace %s", labelSelector, namespace)) + + case len(podList.Items) > 1: + log.Warnf("Found more than one pod with label %q. Using pod with name %q", labelSelector, podList.Items[0].GetName()) + return &podList.Items[0], nil + default: + return &podList.Items[0], nil + } +} + +// getPodStatusString get the pod status string. +func getPodStatusString(pod *kcorev1.Pod) string { + if pod == nil { + return "TBD(pod)" + } + + for _, cond := range pod.Status.Conditions { + // Pod Running + if cond.Type == kcorev1.PodReady && + cond.Status == kcorev1.ConditionTrue && + pod.Status.Phase == kcorev1.PodRunning { + return "Running" + } + // Pod Completed + if cond.Type == kcorev1.PodReady && + cond.Status == "False" && + cond.Reason == "PodCompleted" { + return "Completed" + } + // Pod NotReady (Container) + if cond.Type == kcorev1.PodReady && + cond.Status == "False" && + cond.Reason == "ContainersNotReady" { + return "NotReady" + } + } + return string(pod.Status.Phase) +} diff --git a/pkg/status/utils_test.go b/pkg/status/utils_test.go new file mode 100644 index 0000000..81842d9 --- /dev/null +++ b/pkg/status/utils_test.go @@ -0,0 +1,54 @@ +package status + +import ( + "testing" + + kcorev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/fake" +) + +func TestGetPluginPod(t *testing.T) { + kclient := fake.NewSimpleClientset(&kcorev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "test-namespace", + Labels: map[string]string{ + "component": "sonobuoy", + "sonobuoy-plugin": "test-plugin", + }, + }, + }) + + namespace := "test-namespace" + pluginPodName := "test-plugin" + + pod, err := getPluginPod(kclient, namespace, pluginPodName) + if err != nil { + t.Errorf("getPluginPod() returned an error: %v", err) + } + + expectedPodName := "test-pod" + if pod.Name != expectedPodName { + t.Errorf("getPluginPod() returned the wrong pod. Expected: %s, Got: %s", expectedPodName, pod.Name) + } +} +func TestGetPodStatusString(t *testing.T) { + pod := &kcorev1.Pod{ + Status: kcorev1.PodStatus{ + Phase: kcorev1.PodRunning, + Conditions: []kcorev1.PodCondition{ + { + Type: kcorev1.PodReady, + Status: kcorev1.ConditionTrue, + }, + }, + }, + } + + expectedStatus := "Running" + status := getPodStatusString(pod) + if status != expectedStatus { + t.Errorf("getPodStatusString() returned the wrong status. Expected: %s, Got: %s", expectedStatus, status) + } +}