diff --git a/.buildkite/pipeline.trigger.integration.tests.sh b/.buildkite/pipeline.trigger.integration.tests.sh index dc1e4d2ad..3e99ddf42 100755 --- a/.buildkite/pipeline.trigger.integration.tests.sh +++ b/.buildkite/pipeline.trigger.integration.tests.sh @@ -96,7 +96,6 @@ for package in $(find . -maxdepth 1 -mindepth 1 -type d) ; do label_suffix=" (independent agent)" fi package_name=$(basename "${package}") - if [[ "$independent_agent" == "false" && "$package_name" == "oracle" ]]; then echoerr "Package \"${package_name}\" skipped: not supported with Elastic Agent running in the stack (missing required software)." continue diff --git a/cmd/testrunner.go b/cmd/testrunner.go index 246c11d97..379f39d99 100644 --- a/cmd/testrunner.go +++ b/cmd/testrunner.go @@ -337,25 +337,31 @@ func testTypeCommandActionFactory(runner testrunner.TestRunner) cobraext.Command fmt.Printf("Running tests per stages (technical preview)\n") } + runnerOptions := testrunner.TestOptions{ + Profile: profile, + PackageRootPath: packageRootPath, + GenerateTestResult: generateTestResult, + API: esAPI, + KibanaClient: kibanaClient, + DeferCleanup: deferCleanup, + ServiceVariant: variantFlag, + WithCoverage: testCoverage, + CoverageType: testCoverageFormat, + ConfigFilePath: configFileFlag, + RunSetup: runSetup, + RunTearDown: runTearDown, + RunTestsOnly: runTestsOnly, + RunIndependentElasticAgent: false, + } + err = runner.SetupRunner(ctx, runnerOptions) + if err != nil { + return fmt.Errorf("failed to run setup runner process: %w", err) + } + var results []testrunner.TestResult for _, folder := range testFolders { - r, err := testrunner.Run(ctx, testType, testrunner.TestOptions{ - Profile: profile, - TestFolder: folder, - PackageRootPath: packageRootPath, - GenerateTestResult: generateTestResult, - API: esAPI, - KibanaClient: kibanaClient, - DeferCleanup: deferCleanup, - ServiceVariant: variantFlag, - WithCoverage: testCoverage, - CoverageType: testCoverageFormat, - ConfigFilePath: configFileFlag, - RunSetup: runSetup, - RunTearDown: runTearDown, - RunTestsOnly: runTestsOnly, - RunIndependentElasticAgent: false, - }) + runnerOptions.TestFolder = folder + r, err := testrunner.Run(ctx, runner, runnerOptions) // Results must be appended even if there is an error, since there could be // tests (e.g. system tests) that return both error and results. @@ -367,6 +373,11 @@ func testTypeCommandActionFactory(runner testrunner.TestRunner) cobraext.Command } + err = runner.TearDownRunner(ctx) + if err != nil { + return fmt.Errorf("failed to run tear down runner process: %w", err) + } + format := testrunner.TestReportFormat(reportFormat) report, err := testrunner.FormatReport(format, results) if err != nil { @@ -390,6 +401,7 @@ func testTypeCommandActionFactory(runner testrunner.TestRunner) cobraext.Command return errors.New("one or more test cases failed") } } + return nil } } diff --git a/internal/agentdeployer/agent.go b/internal/agentdeployer/agent.go index 246348255..ce1ac8534 100644 --- a/internal/agentdeployer/agent.go +++ b/internal/agentdeployer/agent.go @@ -391,12 +391,12 @@ func (s *dockerComposeDeployedAgent) TearDown(ctx context.Context) error { defer func() { // Remove the service logs dir for this agent if err := os.RemoveAll(s.agentInfo.Logs.Folder.Local); err != nil { - logger.Errorf("could not remove the agent logs (path: %s): %w", s.agentInfo.Logs.Folder.Local, err) + logger.Errorf("could not remove the agent logs (path: %s): %v", s.agentInfo.Logs.Folder.Local, err) } // Remove the configuration dir for this agent (e.g. compose scenario files) if err := os.RemoveAll(s.agentInfo.ConfigDir); err != nil { - logger.Errorf("could not remove the agent configuration directory (path: %s) %w", s.agentInfo.ConfigDir, err) + logger.Errorf("could not remove the agent configuration directory (path: %s) %v", s.agentInfo.ConfigDir, err) } }() diff --git a/internal/servicedeployer/compose.go b/internal/servicedeployer/compose.go index 9dc274b36..989e15d81 100644 --- a/internal/servicedeployer/compose.go +++ b/internal/servicedeployer/compose.go @@ -98,7 +98,7 @@ func (d *DockerComposeServiceDeployer) SetUp(ctx context.Context, svcInfo Servic // service logs folder must no be deleted to avoid breaking log files written // by the service. If this is required, those files should be rotated or truncated // so the service can still write to them. - logger.Debug("Skipping removing service logs folder folder %s", svcInfo.Logs.Folder.Local) + logger.Debugf("Skipping removing service logs folder folder %s", svcInfo.Logs.Folder.Local) } else { err = files.RemoveContent(svcInfo.Logs.Folder.Local) if err != nil { @@ -242,7 +242,7 @@ func (s *dockerComposeDeployedService) TearDown(ctx context.Context) error { } // Remove the outputs generated by the service container if err = os.RemoveAll(s.svcInfo.OutputDir); err != nil { - logger.Errorf("could not remove the temporary output files %w", err) + logger.Errorf("could not remove the temporary output files %s", err) } }() diff --git a/internal/servicedeployer/custom_agent.go b/internal/servicedeployer/custom_agent.go index 36cc111e8..2c8e01ab8 100644 --- a/internal/servicedeployer/custom_agent.go +++ b/internal/servicedeployer/custom_agent.go @@ -129,7 +129,7 @@ func (d *CustomAgentDeployer) SetUp(ctx context.Context, svcInfo ServiceInfo) (D // service logs folder must no be deleted to avoid breaking log files written // by the service. If this is required, those files should be rotated or truncated // so the service can still write to them. - logger.Debug("Skipping removing service logs folder folder %s", svcInfo.Logs.Folder.Local) + logger.Debugf("Skipping removing service logs folder folder %s", svcInfo.Logs.Folder.Local) } else { err = files.RemoveContent(svcInfo.Logs.Folder.Local) if err != nil { diff --git a/internal/testrunner/runners/asset/runner.go b/internal/testrunner/runners/asset/runner.go index 11451dec9..39f50758e 100644 --- a/internal/testrunner/runners/asset/runner.go +++ b/internal/testrunner/runners/asset/runner.go @@ -46,6 +46,24 @@ func (r runner) String() string { return "asset loading" } +// SetupRunner prepares global resources required by the test runner. +func (r *runner) SetupRunner(ctx context.Context, options testrunner.TestOptions) error { + r.packageRootPath = options.PackageRootPath + r.kibanaClient = options.KibanaClient + + manager := resources.NewManager() + manager.RegisterProvider(resources.DefaultKibanaProviderName, &resources.KibanaProvider{Client: r.kibanaClient}) + + r.resourcesManager = manager + return nil +} + +// TearDownRunner cleans up any global test runner resources. It must be called +// after the test runner has finished executing all its tests. +func (r runner) TearDownRunner(ctx context.Context) error { + return nil +} + // CanRunPerDataStream returns whether this test runner can run on individual // data streams within the package. func (r runner) CanRunPerDataStream() bool { @@ -61,13 +79,6 @@ func (r *runner) CanRunSetupTeardownIndependent() bool { // Run runs the asset loading tests func (r *runner) Run(ctx context.Context, options testrunner.TestOptions) ([]testrunner.TestResult, error) { r.testFolder = options.TestFolder - r.packageRootPath = options.PackageRootPath - r.kibanaClient = options.KibanaClient - - manager := resources.NewManager() - manager.RegisterProvider(resources.DefaultKibanaProviderName, &resources.KibanaProvider{Client: r.kibanaClient}) - r.resourcesManager = manager - return r.run(ctx) } diff --git a/internal/testrunner/runners/pipeline/runner.go b/internal/testrunner/runners/pipeline/runner.go index 14fa843e0..a63ef8659 100644 --- a/internal/testrunner/runners/pipeline/runner.go +++ b/internal/testrunner/runners/pipeline/runner.go @@ -71,9 +71,21 @@ func (r *runner) String() string { return "pipeline" } +// SetupRunner prepares global resources required by the test runner. +func (r *runner) SetupRunner(ctx context.Context, options testrunner.TestOptions) error { + r.options = options + return nil +} + +// TearDownRunner cleans up any global test runner resources. It must be called +// after the test runner has finished executing all its tests. +func (r *runner) TearDownRunner(ctx context.Context) error { + return nil +} + // Run runs the pipeline tests defined under the given folder func (r *runner) Run(ctx context.Context, options testrunner.TestOptions) ([]testrunner.TestResult, error) { - r.options = options + r.options.TestFolder = options.TestFolder stackConfig, err := stack.LoadConfig(r.options.Profile) if err != nil { diff --git a/internal/testrunner/runners/static/runner.go b/internal/testrunner/runners/static/runner.go index b6580d864..7ff15ec09 100644 --- a/internal/testrunner/runners/static/runner.go +++ b/internal/testrunner/runners/static/runner.go @@ -45,8 +45,20 @@ func (r runner) String() string { return "static files" } -func (r runner) Run(ctx context.Context, options testrunner.TestOptions) ([]testrunner.TestResult, error) { +// SetupRunner prepares global resources required by the test runner. +func (r *runner) SetupRunner(ctx context.Context, options testrunner.TestOptions) error { r.options = options + return nil +} + +// TearDownRunner cleans up any global test runner resources. It must be called +// after the test runner has finished executing all its tests. +func (r *runner) TearDownRunner(ctx context.Context) error { + return nil +} + +func (r runner) Run(ctx context.Context, options testrunner.TestOptions) ([]testrunner.TestResult, error) { + r.options.TestFolder = options.TestFolder return r.run(ctx) } diff --git a/internal/testrunner/runners/system/runner.go b/internal/testrunner/runners/system/runner.go index acd1236d1..5c6927a5d 100644 --- a/internal/testrunner/runners/system/runner.go +++ b/internal/testrunner/runners/system/runner.go @@ -150,6 +150,7 @@ type runner struct { // Execution order of following handlers is defined in runner.TearDown() method. removeAgentHandler func(context.Context) error deleteTestPolicyHandler func(context.Context) error + cleanTestScenarioHandler func(context.Context) error resetAgentPolicyHandler func(context.Context) error resetAgentLogLevelHandler func(context.Context) error shutdownServiceHandler func(context.Context) error @@ -170,6 +171,47 @@ func (r *runner) String() string { return "system" } +// SetupRunner prepares global resources required by the test runner. +func (r *runner) SetupRunner(ctx context.Context, options testrunner.TestOptions) error { + r.options = options + + r.resourcesManager = resources.NewManager() + r.resourcesManager.RegisterProvider(resources.DefaultKibanaProviderName, &resources.KibanaProvider{Client: r.options.KibanaClient}) + + if r.options.RunTearDown { + logger.Debug("Skip installing package") + } else { + // Install the package before creating the policy, so we control exactly what is being + // installed. + logger.Debug("Installing package...") + resourcesOptions := resourcesOptions{ + // Install it unless we are running the tear down only. + installedPackage: !r.options.RunTearDown, + } + _, err := r.resourcesManager.ApplyCtx(ctx, r.resources(resourcesOptions)) + if err != nil { + return fmt.Errorf("can't install the package: %w", err) + } + } + + return nil +} + +// TearDownRunner cleans up any global test runner resources. It must be called +// after the test runner has finished executing all its tests. +func (r *runner) TearDownRunner(ctx context.Context) error { + logger.Debugf("Uninstalling package...") + resourcesOptions := resourcesOptions{ + // Keep it installed only if we were running setup, or tests only. + installedPackage: r.options.RunSetup || r.options.RunTestsOnly, + } + _, err := r.resourcesManager.ApplyCtx(ctx, r.resources(resourcesOptions)) + if err != nil { + return err + } + return nil +} + // CanRunPerDataStream returns whether this test runner can run on individual // data streams within the package. func (r *runner) CanRunPerDataStream() bool { @@ -188,7 +230,7 @@ func (r *runner) CanRunSetupTeardownIndependent() bool { // Run runs the system tests defined under the given folder func (r *runner) Run(ctx context.Context, options testrunner.TestOptions) ([]testrunner.TestResult, error) { - r.options = options + r.options.TestFolder = options.TestFolder if !r.options.RunSetup && !r.options.RunTearDown && !r.options.RunTestsOnly { return r.run(ctx) } @@ -278,7 +320,13 @@ func (r *runner) Run(ctx context.Context, options testrunner.TestOptions) ([]tes if err != nil { return result.WithError(fmt.Errorf("failed to prepare scenario: %w", err)) } - return r.validateTestScenario(ctx, result, scenario, testConfig) + results, err := r.validateTestScenario(ctx, result, scenario, testConfig) + tdErr := r.tearDownTest(ctx) + if tdErr != nil { + logger.Errorf("failed to tear down runner: %s", tdErr.Error()) + } + return results, err + } if r.options.RunTearDown { @@ -421,6 +469,9 @@ func (r *runner) tearDownTest(ctx context.Context) error { // Avoid cancellations during cleanup. cleanupCtx := context.WithoutCancel(ctx) + // This handler should be run before shutting down Elastic Agents (agent deployer) + // or services that could run agents like Custom Agents (service deployer) + // or Kind deployer. if r.resetAgentPolicyHandler != nil { if err := r.resetAgentPolicyHandler(cleanupCtx); err != nil { return err @@ -428,6 +479,23 @@ func (r *runner) tearDownTest(ctx context.Context) error { r.resetAgentPolicyHandler = nil } + // Shutting down the service should be run one of the first actions + // to ensure that resources created by terraform are deleted even if other + // errors fail. + if r.shutdownServiceHandler != nil { + if err := r.shutdownServiceHandler(cleanupCtx); err != nil { + return err + } + r.shutdownServiceHandler = nil + } + + if r.cleanTestScenarioHandler != nil { + if err := r.cleanTestScenarioHandler(cleanupCtx); err != nil { + return err + } + r.cleanTestScenarioHandler = nil + } + if r.resetAgentLogLevelHandler != nil { if err := r.resetAgentLogLevelHandler(cleanupCtx); err != nil { return err @@ -449,22 +517,6 @@ func (r *runner) tearDownTest(ctx context.Context) error { r.deleteTestPolicyHandler = nil } - resourcesOptions := resourcesOptions{ - // Keep it installed only if we were running setup, or tests only. - installedPackage: r.options.RunSetup || r.options.RunTestsOnly, - } - _, err := r.resourcesManager.ApplyCtx(cleanupCtx, r.resources(resourcesOptions)) - if err != nil { - return err - } - - if r.shutdownServiceHandler != nil { - if err := r.shutdownServiceHandler(cleanupCtx); err != nil { - return err - } - r.shutdownServiceHandler = nil - } - if r.shutdownAgentHandler != nil { if err := r.shutdownAgentHandler(cleanupCtx); err != nil { return err @@ -499,9 +551,6 @@ func (r *runner) initRun() error { return fmt.Errorf("reading service logs directory failed: %w", err) } - r.resourcesManager = resources.NewManager() - r.resourcesManager.RegisterProvider(resources.DefaultKibanaProviderName, &resources.KibanaProvider{Client: r.options.KibanaClient}) - r.serviceStateFilePath = filepath.Join(testrunner.StateFolderPath(r.options.Profile.ProfilePath), testrunner.ServiceStateFileName) r.dataStreamPath, found, err = packages.FindDataStreamRootForPath(r.options.TestFolder.Path) @@ -821,6 +870,36 @@ type scenarioTest struct { startTestTime time.Time } +func (r *runner) shouldCreateNewAgentPolicyForTest() bool { + if r.options.RunTestsOnly { + // always that --no-provision is set, it should create new Agent Policies. + return true + } + if !r.options.RunIndependentElasticAgent { + // keep same behaviour as previously when Elastic Agent of the stack is used. + return false + } + // No need to create new Agent Policies for these stages + if r.options.RunSetup || r.options.RunTearDown { + return false + } + return true +} + +func (r *runner) deleteDataStream(ctx context.Context, dataStream string) error { + resp, err := r.options.API.Indices.DeleteDataStream([]string{dataStream}, + r.options.API.Indices.DeleteDataStream.WithContext(ctx), + ) + if err != nil { + return fmt.Errorf("failed to delete data stream %s: %w", dataStream, err) + } + defer resp.Body.Close() + if resp.IsError() { + return fmt.Errorf("could not get delete data stream %s: %s", dataStream, resp.String()) + } + return nil +} + func (r *runner) prepareScenario(ctx context.Context, config *testConfig, svcInfo servicedeployer.ServiceInfo) (*scenarioTest, error) { serviceOptions := r.createServiceOptions(config.ServiceVariantName) @@ -917,6 +996,9 @@ func (r *runner) prepareScenario(ctx context.Context, config *testConfig, svcInf } } r.deleteTestPolicyHandler = func(ctx context.Context) error { + if r.options.RunTestsOnly { + return nil + } logger.Debug("deleting test policies...") if err := r.options.KibanaClient.DeletePolicy(ctx, policyToTest.ID); err != nil { return fmt.Errorf("error cleaning up test policy: %w", err) @@ -955,29 +1037,27 @@ func (r *runner) prepareScenario(ctx context.Context, config *testConfig, svcInf return nil, fmt.Errorf("unable to reload system test case configuration: %w", err) } - if r.options.RunTearDown { - logger.Debug("Skip installing package") - } else { - // Install the package before creating the policy, so we control exactly what is being - // installed. - logger.Debug("Installing package...") - resourcesOptions := resourcesOptions{ - // Install it unless we are running the tear down only. - installedPackage: !r.options.RunTearDown, - } - _, err = r.resourcesManager.ApplyCtx(ctx, r.resources(resourcesOptions)) - if err != nil { - return nil, fmt.Errorf("can't install the package: %w", err) - } - } - // store the time just before adding the Test Policy, this time will be used to check // the agent logs from that time onwards to avoid possible previous errors present in logs scenario.startTestTime = time.Now() + suffixDatastream := svcInfo.Test.RunID logger.Debug("adding package data stream to test policy...") - ds := createPackageDatastream(*policyToTest, *scenario.pkgManifest, policyTemplate, *scenario.dataStreamManifest, *config, svcInfo.Test.RunID) - if r.options.RunTearDown || r.options.RunTestsOnly { + policyTesting := kibana.Policy{ + Name: fmt.Sprintf("ep-one-test-system-%s-%s-%s", r.options.TestFolder.Package, r.options.TestFolder.DataStream, scenario.startTestTime.Format("20060102T15:04:05Z")), + Description: fmt.Sprintf("test policy created by elastic-package test system for data stream %s/%s", r.options.TestFolder.Package, r.options.TestFolder.DataStream), + Namespace: createTestRunID(), + } + policyToAssignDatastreamTests := policyToTest + if r.shouldCreateNewAgentPolicyForTest() { + policyToAssignDatastreamTests, err = r.options.KibanaClient.CreatePolicy(ctx, policyTesting) + if err != nil { + return nil, fmt.Errorf("could not create test policy: %w", err) + } + suffixDatastream = policyTesting.Namespace + } + ds := createPackageDatastream(*policyToAssignDatastreamTests, *scenario.pkgManifest, policyTemplate, *scenario.dataStreamManifest, *config, suffixDatastream) + if r.options.RunTearDown { logger.Debug("Skip adding data stream config to policy") } else { if err := r.options.KibanaClient.AddPackageDataStreamToPolicy(ctx, ds); err != nil { @@ -1010,6 +1090,9 @@ func (r *runner) prepareScenario(ctx context.Context, config *testConfig, svcInf ) r.wipeDataStreamHandler = func(ctx context.Context) error { + if r.options.RunTestsOnly { + return nil + } logger.Debugf("deleting data in data stream...") if err := deleteDataStreamDocs(ctx, r.options.API, scenario.dataStream); err != nil { return fmt.Errorf("error deleting data in data stream: %w", err) @@ -1017,18 +1100,28 @@ func (r *runner) prepareScenario(ctx context.Context, config *testConfig, svcInf return nil } - switch { - case r.options.RunTearDown: - logger.Debugf("Skipped deleting old data in data stream %q", scenario.dataStream) - case r.options.RunTestsOnly: - // In this mode, service is still running and the agent is sending documents, so sometimes - // cannot be guaranteed to be zero documents - err := r.deleteOldDocumentsDataStreamAndWait(ctx, scenario.dataStream, false) + r.cleanTestScenarioHandler = func(ctx context.Context) error { + if !r.shouldCreateNewAgentPolicyForTest() { + return nil + } + + logger.Debug("Deleting test policy...") + err = r.options.KibanaClient.DeletePolicy(ctx, policyToAssignDatastreamTests.ID) if err != nil { - return nil, err + return fmt.Errorf("failed to delete policy %s: %w", policyToAssignDatastreamTests.Name, err) } - default: - err := r.deleteOldDocumentsDataStreamAndWait(ctx, scenario.dataStream, true) + logger.Debug("Deleting data stream for testing") + r.deleteDataStream(ctx, scenario.dataStream) + if err != nil { + return fmt.Errorf("failed to delete data stream %s: %w", scenario.dataStream, err) + } + return nil + } + + if r.options.RunTearDown { + logger.Debugf("Skipped deleting old data in data stream %q", scenario.dataStream) + } else { + err := r.deleteOldDocumentsDataStreamAndWait(ctx, scenario.dataStream) if err != nil { return nil, err } @@ -1044,6 +1137,9 @@ func (r *runner) prepareScenario(ctx context.Context, config *testConfig, svcInf logger.Debugf("Selected enrolled agent %q", agent.ID) r.removeAgentHandler = func(ctx context.Context) error { + if r.options.RunTestsOnly { + return nil + } // When not using independent agents, service deployers like kubernetes or custom agents create new Elastic Agent if !r.options.RunIndependentElasticAgent && !svcInfo.Agent.Independent { return nil @@ -1056,7 +1152,7 @@ func (r *runner) prepareScenario(ctx context.Context, config *testConfig, svcInf return nil } - if r.options.RunTearDown || r.options.RunTestsOnly { + if r.options.RunTearDown { origPolicy = serviceStateData.OrigPolicy logger.Debugf("Got orig policy from file: %q - %q", origPolicy.Name, origPolicy.ID) } else { @@ -1066,14 +1162,15 @@ func (r *runner) prepareScenario(ctx context.Context, config *testConfig, svcInf Revision: agent.PolicyRevision, } } - // Assign policy to agent + r.resetAgentPolicyHandler = func(ctx context.Context) error { - if r.options.RunIndependentElasticAgent { - return nil - } - logger.Debug("reassigning original policy back to agent...") - if err := r.options.KibanaClient.AssignPolicyToAgent(ctx, agent, origPolicy); err != nil { - return fmt.Errorf("error reassigning original policy to agent: %w", err) + if !r.options.RunSetup { + // it should be kept the same policy just when system tests are + // triggered with the flags for running setup stage (--setup) + logger.Debug("reassigning original policy back to agent...") + if err := r.options.KibanaClient.AssignPolicyToAgent(ctx, agent, origPolicy); err != nil { + return fmt.Errorf("error reassigning original policy to agent: %w", err) + } } return nil } @@ -1092,6 +1189,9 @@ func (r *runner) prepareScenario(ctx context.Context, config *testConfig, svcInf } } r.resetAgentLogLevelHandler = func(ctx context.Context) error { + if r.options.RunTestsOnly { + return nil + } logger.Debugf("reassigning original log level %q back to agent...", origLogLevel) if err := r.options.KibanaClient.SetAgentLogLevel(ctx, agent.ID, origLogLevel); err != nil { @@ -1100,10 +1200,10 @@ func (r *runner) prepareScenario(ctx context.Context, config *testConfig, svcInf return nil } - if r.options.RunTearDown || r.options.RunTestsOnly { + if r.options.RunTearDown { logger.Debug("Skip assiging package data stream to agent") } else { - policyWithDataStream, err := r.options.KibanaClient.GetPolicy(ctx, policyToTest.ID) + policyWithDataStream, err := r.options.KibanaClient.GetPolicy(ctx, policyToAssignDatastreamTests.ID) if err != nil { return nil, fmt.Errorf("could not read the policy with data stream: %w", err) } @@ -1244,6 +1344,9 @@ func (r *runner) setupService(ctx context.Context, config *testConfig, serviceOp } r.shutdownServiceHandler = func(ctx context.Context) error { + if r.options.RunTestsOnly { + return nil + } logger.Debug("tearing down service...") if err := service.TearDown(ctx); err != nil { return fmt.Errorf("error tearing down service: %w", err) @@ -1283,6 +1386,9 @@ func (r *runner) setupAgent(ctx context.Context, config *testConfig, state Servi return nil, agentInfo, fmt.Errorf("could not setup agent: %w", err) } r.shutdownAgentHandler = func(ctx context.Context) error { + if r.options.RunTestsOnly { + return nil + } if agentDeployer == nil { return nil } @@ -1373,29 +1479,17 @@ func (r *runner) writeScenarioState(opts scenarioStateOpts) error { return nil } -func (r *runner) deleteOldDocumentsDataStreamAndWait(ctx context.Context, dataStream string, mustBeZero bool) error { +func (r *runner) deleteOldDocumentsDataStreamAndWait(ctx context.Context, dataStream string) error { logger.Debugf("Delete previous documents in data stream %q", dataStream) if err := deleteDataStreamDocs(ctx, r.options.API, dataStream); err != nil { return fmt.Errorf("error deleting old data in data stream: %s: %w", dataStream, err) } - startHits, err := r.getDocs(ctx, dataStream) - if err != nil { - return err - } - // First call already reports zero documents - if startHits.size() == 0 { - return nil - } cleared, err := wait.UntilTrue(ctx, func(ctx context.Context) (bool, error) { hits, err := r.getDocs(ctx, dataStream) if err != nil { return false, err } - - if mustBeZero { - return hits.size() == 0, nil - } - return startHits.size() > hits.size(), nil + return hits.size() == 0, nil }, 1*time.Second, 2*time.Minute) if err != nil || !cleared { if err == nil { @@ -1579,7 +1673,7 @@ func createIntegrationPackageDatastream( ) kibana.PackageDataStream { r := kibana.PackageDataStream{ Name: fmt.Sprintf("%s-%s-%s", pkg.Name, ds.Name, suffix), - Namespace: "ep", + Namespace: kibanaPolicy.Namespace, PolicyID: kibanaPolicy.ID, Enabled: true, Inputs: []kibana.Input{ @@ -1633,7 +1727,7 @@ func createInputPackageDatastream( ) kibana.PackageDataStream { r := kibana.PackageDataStream{ Name: fmt.Sprintf("%s-%s-%s", pkg.Name, policyTemplate.Name, suffix), - Namespace: "ep", + Namespace: kibanaPolicy.Namespace, PolicyID: kibanaPolicy.ID, Enabled: true, } @@ -1942,6 +2036,7 @@ func deleteDataStreamDocs(ctx context.Context, api *elasticsearch.API, dataStrea if resp.StatusCode == http.StatusNotFound { // Unavailable index is ok, this means that data is already not there. + logger.Debugf("Failed but ignored with status not found %s: %s", dataStream, resp.String()) return nil } if resp.IsError() { diff --git a/internal/testrunner/testrunner.go b/internal/testrunner/testrunner.go index 8a87ec231..4d0261824 100644 --- a/internal/testrunner/testrunner.go +++ b/internal/testrunner/testrunner.go @@ -70,6 +70,13 @@ type TestRunner interface { TestFolderRequired() bool CanRunSetupTeardownIndependent() bool + + // SetupRunner prepares global resources required by the test runner. + SetupRunner(context.Context, TestOptions) error + + // TearDownRunner cleans up any global test runner resources. It must be called + // after the test runner has finished executing all its tests. + TearDownRunner(context.Context) error } var runners = map[TestType]TestRunner{} @@ -285,12 +292,7 @@ func RegisterRunner(runner TestRunner) { } // Run method delegates execution to the registered test runner, based on the test type. -func Run(ctx context.Context, testType TestType, options TestOptions) ([]TestResult, error) { - runner, defined := runners[testType] - if !defined { - return nil, fmt.Errorf("unregistered runner test: %s", testType) - } - +func Run(ctx context.Context, runner TestRunner, options TestOptions) ([]TestResult, error) { results, err := runner.Run(ctx, options) tdErr := runner.TearDown(ctx) if err != nil {