diff --git a/.errcheck-exclude b/.errcheck-exclude deleted file mode 100644 index b8e1dc928b7..00000000000 --- a/.errcheck-exclude +++ /dev/null @@ -1,6 +0,0 @@ -io/ioutil.WriteFile -io/ioutil.ReadFile -(github.com/go-kit/log.Logger).Log -io.Copy -(github.com/opentracing/opentracing-go.Tracer).Inject -(*github.com/cortexproject/cortex/pkg/util/spanlogger.SpanLogger).Error diff --git a/.github/workflows/test-build-deploy.yml b/.github/workflows/test-build-deploy.yml index 2edfa29a2fb..f08c2446d4f 100644 --- a/.github/workflows/test-build-deploy.yml +++ b/.github/workflows/test-build-deploy.yml @@ -17,7 +17,7 @@ jobs: lint: runs-on: ubuntu-24.04 container: - image: quay.io/cortexproject/build-image:master-281284ac1 + image: quay.io/cortexproject/build-image:master-7ce1d1b12 steps: - name: Checkout Repo uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 @@ -46,7 +46,7 @@ jobs: test: runs-on: ubuntu-24.04 container: - image: quay.io/cortexproject/build-image:master-281284ac1 + image: quay.io/cortexproject/build-image:master-7ce1d1b12 steps: - name: Checkout Repo uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 @@ -64,7 +64,7 @@ jobs: test-no-race: runs-on: ubuntu-24.04 container: - image: quay.io/cortexproject/build-image:master-281284ac1 + image: quay.io/cortexproject/build-image:master-7ce1d1b12 steps: - name: Checkout Repo uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 @@ -107,7 +107,7 @@ jobs: build: runs-on: ubuntu-24.04 container: - image: quay.io/cortexproject/build-image:master-281284ac1 + image: quay.io/cortexproject/build-image:master-7ce1d1b12 steps: - name: Checkout Repo uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 @@ -247,14 +247,14 @@ jobs: run: | touch build-image/.uptodate MIGRATIONS_DIR=$(pwd)/cmd/cortex/migrations - make BUILD_IMAGE=quay.io/cortexproject/build-image:master-281284ac1 TTY='' configs-integration-test + make BUILD_IMAGE=quay.io/cortexproject/build-image:master-7ce1d1b12 TTY='' configs-integration-test deploy_website: needs: [build, test] if: (github.ref == 'refs/heads/master' || startsWith(github.ref, 'refs/tags/')) && github.repository == 'cortexproject/cortex' runs-on: ubuntu-24.04 container: - image: quay.io/cortexproject/build-image:master-281284ac1 + image: quay.io/cortexproject/build-image:master-7ce1d1b12 steps: - name: Checkout Repo uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 @@ -296,7 +296,7 @@ jobs: if: (github.ref == 'refs/heads/master' || startsWith(github.ref, 'refs/tags/')) && github.repository == 'cortexproject/cortex' runs-on: ubuntu-24.04 container: - image: quay.io/cortexproject/build-image:master-281284ac1 + image: quay.io/cortexproject/build-image:master-7ce1d1b12 steps: - name: Checkout Repo uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 diff --git a/.golangci.yml b/.golangci.yml index dd0398764be..2812394d35b 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,45 +1,6 @@ -output: - format: line-number - -linters: - enable: - - goimports - - revive - - gofmt - - misspell - - depguard - - sloglint - - unused - -linters-settings: - staticcheck: - checks: - - all - errcheck: - # path to a file containing a list of functions to exclude from checking - # see https://github.com/kisielk/errcheck#excluding-functions for details - exclude: ./.errcheck-exclude - goimports: - local-prefixes: "github.com/cortexproject/cortex" - revive: - severity: error # We only want critical issues. - govet: - disable: - - printf - - depguard: - rules: - main: - list-mode: lax - files: - - $all - deny: - - pkg: "github.com/go-kit/kit/log" - desc: Use github.com/go-kit/log instead of github.com/go-kit/kit/log" - +version: "2" run: timeout: 5m - # List of build tags, all linters use it. build-tags: - netgo @@ -51,3 +12,51 @@ run: - integration_querier - integration_ruler - integration_query_fuzz +output: + formats: + text: + path: stdout + colors: false +linters: + enable: + - depguard + - misspell + - revive + - sloglint + settings: + depguard: + rules: + main: + list-mode: lax + files: + - $all + deny: + - pkg: github.com/go-kit/kit/log + desc: Use github.com/go-kit/log instead of github.com/go-kit/kit/log" + errcheck: + exclude-functions: + - io/ioutil.WriteFile + - io/ioutil.ReadFile + - io.Copy + - (github.com/go-kit/log.Logger).Log + - (*github.com/cortexproject/cortex/pkg/util/spanlogger.SpanLogger).Error + - (github.com/opentracing/opentracing-go.Tracer).Inject + govet: + disable: + - printf + revive: + severity: error + exclusions: + presets: + - comments + - common-false-positives + - legacy + - std-error-handling +formatters: + enable: + - gofmt + - goimports + settings: + goimports: + local-prefixes: + - github.com/cortexproject/cortex diff --git a/Makefile b/Makefile index bfca58e73f1..705e005dac1 100644 --- a/Makefile +++ b/Makefile @@ -115,7 +115,7 @@ build-image/$(UPTODATE): build-image/* SUDO := $(shell docker info >/dev/null 2>&1 || echo "sudo -E") BUILD_IN_CONTAINER := true BUILD_IMAGE ?= $(IMAGE_PREFIX)build-image -LATEST_BUILD_IMAGE_TAG ?= master-281284ac1 +LATEST_BUILD_IMAGE_TAG ?= master-7ce1d1b12 # TTY is parameterized to allow Google Cloud Builder to run builds, # as it currently disallows TTY devices. This value needs to be overridden diff --git a/integration/e2e/service.go b/integration/e2e/service.go index 0d021e19a39..bc99429e1b1 100644 --- a/integration/e2e/service.go +++ b/integration/e2e/service.go @@ -427,9 +427,10 @@ func NewHTTPReadinessProbe(port int, path string, expectedStatusRangeStart, expe func (p *HTTPReadinessProbe) Ready(service *ConcreteService) (err error) { endpoint := service.Endpoint(p.port) - if endpoint == "" { + switch endpoint { + case "": return fmt.Errorf("cannot get service endpoint for port %d", p.port) - } else if endpoint == "stopped" { + case "stopped": return errors.New("service has stopped") } @@ -467,9 +468,10 @@ func NewTCPReadinessProbe(port int) *TCPReadinessProbe { func (p *TCPReadinessProbe) Ready(service *ConcreteService) (err error) { endpoint := service.Endpoint(p.port) - if endpoint == "" { + switch endpoint { + case "": return fmt.Errorf("cannot get service endpoint for port %d", p.port) - } else if endpoint == "stopped" { + case "stopped": return errors.New("service has stopped") } diff --git a/integration/e2ecortex/services.go b/integration/e2ecortex/services.go index a33b0ac6ae5..727c60dd11a 100644 --- a/integration/e2ecortex/services.go +++ b/integration/e2ecortex/services.go @@ -42,9 +42,10 @@ func NewDistributorWithConfigFile(name string, store RingStore, address, configF // Configure the ingesters ring backend flags["-ring.store"] = string(store) - if store == RingStoreConsul { + switch store { + case RingStoreConsul: flags["-consul.hostname"] = address - } else if store == RingStoreEtcd { + case RingStoreEtcd: flags["-etcd.endpoints"] = address } @@ -82,10 +83,11 @@ func NewQuerierWithConfigFile(name string, store RingStore, address, configFile "-store-gateway.sharding-ring.store": string(store), } - if store == RingStoreConsul { + switch store { + case RingStoreConsul: ringBackendFlags["-consul.hostname"] = address ringBackendFlags["-store-gateway.sharding-ring.consul.hostname"] = address - } else if store == RingStoreEtcd { + case RingStoreEtcd: ringBackendFlags["-etcd.endpoints"] = address ringBackendFlags["-store-gateway.sharding-ring.etcd.endpoints"] = address } @@ -130,10 +132,11 @@ func NewStoreGatewayWithConfigFile(name string, store RingStore, address string, flags["-config.file"] = filepath.Join(e2e.ContainerSharedDir, configFile) } - if store == RingStoreConsul { + switch store { + case RingStoreConsul: flags["-consul.hostname"] = address flags["-store-gateway.sharding-ring.consul.hostname"] = address - } else if store == RingStoreEtcd { + case RingStoreEtcd: flags["-etcd.endpoints"] = address flags["-store-gateway.sharding-ring.etcd.endpoints"] = address } @@ -173,9 +176,10 @@ func NewIngesterWithConfigFile(name string, store RingStore, address, configFile // Configure the ingesters ring backend flags["-ring.store"] = string(store) - if store == RingStoreConsul { + switch store { + case RingStoreConsul: flags["-consul.hostname"] = address - } else if store == RingStoreEtcd { + case RingStoreEtcd: flags["-etcd.endpoints"] = address } diff --git a/integration/querier_tenant_federation_test.go b/integration/querier_tenant_federation_test.go index 11c4e545fc4..c2c2ec10a62 100644 --- a/integration/querier_tenant_federation_test.go +++ b/integration/querier_tenant_federation_test.go @@ -171,7 +171,7 @@ func mergeResults(tenantIDs []string, resultsPerTenant []model.Vector) model.Vec var v model.Vector for pos, tenantID := range tenantIDs { for _, r := range resultsPerTenant[pos] { - var s model.Sample = *r + var s = *r s.Metric = r.Metric.Clone() s.Metric[model.LabelName("__tenant_id__")] = model.LabelValue(tenantID) v = append(v, &s) diff --git a/integration/querier_test.go b/integration/querier_test.go index 67a4cedcc20..6305b4433c5 100644 --- a/integration/querier_test.go +++ b/integration/querier_test.go @@ -433,9 +433,10 @@ func TestQuerierWithBlocksStorageRunningInSingleBinaryMode(t *testing.T) { require.NoError(t, writeFileToSharedDir(s, "alertmanager_configs/user-1.yaml", []byte(cortexAlertmanagerUserConfigYaml))) // Add the cache address to the flags. - if testCfg.indexCacheBackend == tsdb.IndexCacheBackendMemcached { + switch testCfg.indexCacheBackend { + case tsdb.IndexCacheBackendMemcached: flags["-blocks-storage.bucket-store.index-cache.memcached.addresses"] = "dns+" + memcached.NetworkEndpoint(e2ecache.MemcachedPort) - } else if testCfg.indexCacheBackend == tsdb.IndexCacheBackendRedis { + case tsdb.IndexCacheBackendRedis: flags["-blocks-storage.bucket-store.index-cache.redis.addresses"] = redis.NetworkEndpoint(e2ecache.RedisPort) } diff --git a/integration/query_fuzz_test.go b/integration/query_fuzz_test.go index e92ac9fbdb6..135b0b7aa14 100644 --- a/integration/query_fuzz_test.go +++ b/integration/query_fuzz_test.go @@ -758,11 +758,12 @@ func TestVerticalShardingFuzz(t *testing.T) { // Generate another set of series for testing binary expression and vector matching. for i := numSeries; i < 2*numSeries; i++ { prompbLabels := []prompb.Label{{Name: "job", Value: "test"}, {Name: "series", Value: strconv.Itoa(i)}} - if i%3 == 0 { + switch i % 3 { + case 0: prompbLabels = append(prompbLabels, prompb.Label{Name: "status_code", Value: "200"}) - } else if i%3 == 1 { + case 1: prompbLabels = append(prompbLabels, prompb.Label{Name: "status_code", Value: "400"}) - } else { + default: prompbLabels = append(prompbLabels, prompb.Label{Name: "status_code", Value: "500"}) } series := e2e.GenerateSeriesWithSamples("test_series_b", start, scrapeInterval, i*numSamples, numSamples, prompbLabels...) @@ -874,11 +875,12 @@ func TestProtobufCodecFuzz(t *testing.T) { // Generate another set of series for testing binary expression and vector matching. for i := numSeries; i < 2*numSeries; i++ { prompbLabels := []prompb.Label{{Name: "job", Value: "test"}, {Name: "series", Value: strconv.Itoa(i)}} - if i%3 == 0 { + switch i % 3 { + case 0: prompbLabels = append(prompbLabels, prompb.Label{Name: "status_code", Value: "200"}) - } else if i%3 == 1 { + case 1: prompbLabels = append(prompbLabels, prompb.Label{Name: "status_code", Value: "400"}) - } else { + default: prompbLabels = append(prompbLabels, prompb.Label{Name: "status_code", Value: "500"}) } series := e2e.GenerateSeriesWithSamples("test_series_b", start, scrapeInterval, i*numSamples, numSamples, prompbLabels...) @@ -1532,11 +1534,12 @@ func TestBackwardCompatibilityQueryFuzz(t *testing.T) { // Generate another set of series for testing binary expression and vector matching. for i := numSeries; i < 2*numSeries; i++ { prompbLabels := []prompb.Label{{Name: "job", Value: "test"}, {Name: "series", Value: strconv.Itoa(i)}} - if i%3 == 0 { + switch i % 3 { + case 0: prompbLabels = append(prompbLabels, prompb.Label{Name: "status_code", Value: "200"}) - } else if i%3 == 1 { + case 1: prompbLabels = append(prompbLabels, prompb.Label{Name: "status_code", Value: "400"}) - } else { + default: prompbLabels = append(prompbLabels, prompb.Label{Name: "status_code", Value: "500"}) } series := e2e.GenerateSeriesWithSamples("test_series_b", start, scrapeInterval, i*numSamples, numSamples, prompbLabels...) diff --git a/pkg/alertmanager/api.go b/pkg/alertmanager/api.go index a4a73d43014..f546bbd4cea 100644 --- a/pkg/alertmanager/api.go +++ b/pkg/alertmanager/api.go @@ -76,10 +76,10 @@ func (am *MultitenantAlertmanager) GetUserConfig(w http.ResponseWriter, r *http. cfg, err := am.store.GetAlertConfig(r.Context(), userID) if err != nil { - switch { - case err == alertspb.ErrNotFound: + switch err { + case alertspb.ErrNotFound: http.Error(w, err.Error(), http.StatusNotFound) - case err == alertspb.ErrAccessDenied: + case alertspb.ErrAccessDenied: http.Error(w, err.Error(), http.StatusForbidden) default: http.Error(w, err.Error(), http.StatusInternalServerError) diff --git a/pkg/alertmanager/multitenant_test.go b/pkg/alertmanager/multitenant_test.go index 4fc55df02bb..3800aeaefa2 100644 --- a/pkg/alertmanager/multitenant_test.go +++ b/pkg/alertmanager/multitenant_test.go @@ -610,10 +610,8 @@ receivers: // Ensure the server endpoint has not been called if firewall is enabled. Since the alert is delivered // asynchronously, we should pool it for a short period. deadline := time.Now().Add(3 * time.Second) - for { - if time.Now().After(deadline) || serverInvoked.Load() { - break - } + for !time.Now().After(deadline) && !serverInvoked.Load() { + time.Sleep(100 * time.Millisecond) } diff --git a/pkg/alertmanager/rate_limited_notifier_test.go b/pkg/alertmanager/rate_limited_notifier_test.go index a8d498de5ad..1d35c9d99a7 100644 --- a/pkg/alertmanager/rate_limited_notifier_test.go +++ b/pkg/alertmanager/rate_limited_notifier_test.go @@ -47,12 +47,13 @@ func runNotifications(t *testing.T, rateLimitedNotifier *rateLimitedNotifier, co for i := 0; i < count; i++ { retry, err := rateLimitedNotifier.Notify(context.Background(), &types.Alert{}) - if err == nil { + switch err { + case nil: success++ - } else if err == errRateLimited { + case errRateLimited: rateLimited++ assert.False(t, retry) - } else { + default: assert.NotNil(t, err) } } diff --git a/pkg/alertmanager/state_persister_test.go b/pkg/alertmanager/state_persister_test.go index 38208ea4ca2..0eeaa9bba1a 100644 --- a/pkg/alertmanager/state_persister_test.go +++ b/pkg/alertmanager/state_persister_test.go @@ -105,7 +105,7 @@ func TestStatePersister_Position0ShouldWrite(t *testing.T) { { time.Sleep(5 * time.Second) - assert.Equal(t, services.Starting, s.Service.State()) + assert.Equal(t, services.Starting, s.State()) assert.Equal(t, 0, len(store.getWrites())) } @@ -139,13 +139,13 @@ func TestStatePersister_Position1ShouldNotWrite(t *testing.T) { // Start the persister. { - require.Equal(t, services.Starting, s.Service.State()) + require.Equal(t, services.Starting, s.State()) state.getResult = makeTestFullState() close(state.readyc) require.NoError(t, s.AwaitRunning(context.Background())) - require.Equal(t, services.Running, s.Service.State()) + require.Equal(t, services.Running, s.State()) } // Should not have stored anything, having passed the interval multiple times. diff --git a/pkg/alertmanager/state_replication.go b/pkg/alertmanager/state_replication.go index 04919f0b136..1021f6fefb1 100644 --- a/pkg/alertmanager/state_replication.go +++ b/pkg/alertmanager/state_replication.go @@ -254,11 +254,11 @@ func (s *state) starting(ctx context.Context) error { // WaitReady is needed for the pipeline builder to know whenever we've settled and the state is up to date. func (s *state) WaitReady(ctx context.Context) error { - return s.Service.AwaitRunning(ctx) + return s.AwaitRunning(ctx) } func (s *state) Ready() bool { - return s.Service.State() == services.Running + return s.State() == services.Running } // mergeFullStates attempts to merge all full states received from peers during settling. diff --git a/pkg/chunk/cache/memcached_client.go b/pkg/chunk/cache/memcached_client.go index 39a60f27af7..d1b167e26bd 100644 --- a/pkg/chunk/cache/memcached_client.go +++ b/pkg/chunk/cache/memcached_client.go @@ -141,7 +141,7 @@ func NewMemcachedClient(cfg MemcachedClientConfig, name string, r prometheus.Reg }), } if cfg.CBFailures > 0 { - newClient.Client.DialContext = newClient.dialViaCircuitBreaker + newClient.DialContext = newClient.dialViaCircuitBreaker } if len(cfg.Addresses) > 0 { diff --git a/pkg/compactor/blocks_cleaner_test.go b/pkg/compactor/blocks_cleaner_test.go index 82f477cea65..893657d0462 100644 --- a/pkg/compactor/blocks_cleaner_test.go +++ b/pkg/compactor/blocks_cleaner_test.go @@ -13,7 +13,6 @@ import ( "github.com/oklog/ulid" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/prometheus/client_golang/prometheus/testutil" prom_testutil "github.com/prometheus/client_golang/prometheus/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -258,11 +257,11 @@ func testBlocksCleanerWithOptions(t *testing.T, options testBlocksCleanerOptions assert.Equal(t, tc.expectedExists, exists, tc.user) } - assert.Equal(t, float64(1), testutil.ToFloat64(cleaner.runsStarted.WithLabelValues(activeStatus))) - assert.Equal(t, float64(1), testutil.ToFloat64(cleaner.runsCompleted.WithLabelValues(activeStatus))) - assert.Equal(t, float64(0), testutil.ToFloat64(cleaner.runsFailed.WithLabelValues(activeStatus))) - assert.Equal(t, float64(7), testutil.ToFloat64(cleaner.blocksCleanedTotal)) - assert.Equal(t, float64(0), testutil.ToFloat64(cleaner.blocksFailedTotal)) + assert.Equal(t, float64(1), prom_testutil.ToFloat64(cleaner.runsStarted.WithLabelValues(activeStatus))) + assert.Equal(t, float64(1), prom_testutil.ToFloat64(cleaner.runsCompleted.WithLabelValues(activeStatus))) + assert.Equal(t, float64(0), prom_testutil.ToFloat64(cleaner.runsFailed.WithLabelValues(activeStatus))) + assert.Equal(t, float64(7), prom_testutil.ToFloat64(cleaner.blocksCleanedTotal)) + assert.Equal(t, float64(0), prom_testutil.ToFloat64(cleaner.blocksFailedTotal)) // Check the updated bucket index. for _, tc := range []struct { @@ -390,11 +389,11 @@ func TestBlocksCleaner_ShouldContinueOnBlockDeletionFailure(t *testing.T) { assert.Equal(t, tc.expectedExists, exists, tc.path) } - assert.Equal(t, float64(1), testutil.ToFloat64(cleaner.runsStarted.WithLabelValues(activeStatus))) - assert.Equal(t, float64(1), testutil.ToFloat64(cleaner.runsCompleted.WithLabelValues(activeStatus))) - assert.Equal(t, float64(0), testutil.ToFloat64(cleaner.runsFailed.WithLabelValues(activeStatus))) - assert.Equal(t, float64(2), testutil.ToFloat64(cleaner.blocksCleanedTotal)) - assert.Equal(t, float64(1), testutil.ToFloat64(cleaner.blocksFailedTotal)) + assert.Equal(t, float64(1), prom_testutil.ToFloat64(cleaner.runsStarted.WithLabelValues(activeStatus))) + assert.Equal(t, float64(1), prom_testutil.ToFloat64(cleaner.runsCompleted.WithLabelValues(activeStatus))) + assert.Equal(t, float64(0), prom_testutil.ToFloat64(cleaner.runsFailed.WithLabelValues(activeStatus))) + assert.Equal(t, float64(2), prom_testutil.ToFloat64(cleaner.blocksCleanedTotal)) + assert.Equal(t, float64(1), prom_testutil.ToFloat64(cleaner.blocksFailedTotal)) // Check the updated bucket index. idx, err := bucketindex.ReadIndex(ctx, bucketClient, userID, nil, logger) @@ -454,11 +453,11 @@ func TestBlocksCleaner_ShouldRebuildBucketIndexOnCorruptedOne(t *testing.T) { assert.Equal(t, tc.expectedExists, exists, tc.path) } - assert.Equal(t, float64(1), testutil.ToFloat64(cleaner.runsStarted.WithLabelValues(activeStatus))) - assert.Equal(t, float64(1), testutil.ToFloat64(cleaner.runsCompleted.WithLabelValues(activeStatus))) - assert.Equal(t, float64(0), testutil.ToFloat64(cleaner.runsFailed.WithLabelValues(activeStatus))) - assert.Equal(t, float64(1), testutil.ToFloat64(cleaner.blocksCleanedTotal)) - assert.Equal(t, float64(0), testutil.ToFloat64(cleaner.blocksFailedTotal)) + assert.Equal(t, float64(1), prom_testutil.ToFloat64(cleaner.runsStarted.WithLabelValues(activeStatus))) + assert.Equal(t, float64(1), prom_testutil.ToFloat64(cleaner.runsCompleted.WithLabelValues(activeStatus))) + assert.Equal(t, float64(0), prom_testutil.ToFloat64(cleaner.runsFailed.WithLabelValues(activeStatus))) + assert.Equal(t, float64(1), prom_testutil.ToFloat64(cleaner.blocksCleanedTotal)) + assert.Equal(t, float64(0), prom_testutil.ToFloat64(cleaner.blocksFailedTotal)) // Check the updated bucket index. idx, err := bucketindex.ReadIndex(ctx, bucketClient, userID, nil, logger) diff --git a/pkg/compactor/cleaner_visit_marker.go b/pkg/compactor/cleaner_visit_marker.go index b31e8810666..c5ad4577793 100644 --- a/pkg/compactor/cleaner_visit_marker.go +++ b/pkg/compactor/cleaner_visit_marker.go @@ -36,7 +36,7 @@ func (b *CleanerVisitMarker) IsExpired(cleanerVisitMarkerTimeout time.Duration) } func (b *CleanerVisitMarker) IsVisited(cleanerVisitMarkerTimeout time.Duration) bool { - return !(b.GetStatus() == Completed) && !(b.GetStatus() == Failed) && !b.IsExpired(cleanerVisitMarkerTimeout) + return !(b.GetStatus() == Completed) && !(b.GetStatus() == Failed) && !b.IsExpired(cleanerVisitMarkerTimeout) //nolint:staticcheck } func (b *CleanerVisitMarker) GetStatus() VisitStatus { diff --git a/pkg/compactor/partition_compaction_grouper.go b/pkg/compactor/partition_compaction_grouper.go index 53f7762df87..c0a46f3ce5b 100644 --- a/pkg/compactor/partition_compaction_grouper.go +++ b/pkg/compactor/partition_compaction_grouper.go @@ -818,7 +818,7 @@ func NewCompletenessChecker(blocks map[ulid.ULID]*metadata.Meta, groups []blocks } } } - status.canTakeCompaction = !(previousTrBlocks == 0 || (previousTrBlocks == 1 && status.numActiveBlocks == 0)) + status.canTakeCompaction = !(previousTrBlocks == 0 || (previousTrBlocks == 1 && status.numActiveBlocks == 0)) //nolint:staticcheck } previousTimeRanges = append(previousTimeRanges, tr) } diff --git a/pkg/compactor/shuffle_sharding_grouper.go b/pkg/compactor/shuffle_sharding_grouper.go index f6328b8fb5b..c051aaef59f 100644 --- a/pkg/compactor/shuffle_sharding_grouper.go +++ b/pkg/compactor/shuffle_sharding_grouper.go @@ -506,6 +506,6 @@ func getRangeStart(m *metadata.Meta, tr int64) int64 { func sortMetasByMinTime(metas []*metadata.Meta) { sort.Slice(metas, func(i, j int) bool { - return metas[i].BlockMeta.MinTime < metas[j].BlockMeta.MinTime + return metas[i].MinTime < metas[j].MinTime }) } diff --git a/pkg/configs/client/client.go b/pkg/configs/client/client.go index 1903098d6b9..263d59ca508 100644 --- a/pkg/configs/client/client.go +++ b/pkg/configs/client/client.go @@ -150,7 +150,7 @@ func doRequest(endpoint string, timeout time.Duration, tlsConfig *tls.Config, si defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("Invalid response from configs server: %v", resp.StatusCode) + return nil, fmt.Errorf("invalid response from configs server: %v", resp.StatusCode) } var config ConfigsResponse diff --git a/pkg/configs/db/dbtest/integration.go b/pkg/configs/db/dbtest/integration.go index d9a445a2fbe..8c30c41bb5c 100644 --- a/pkg/configs/db/dbtest/integration.go +++ b/pkg/configs/db/dbtest/integration.go @@ -19,7 +19,7 @@ var ( done chan error dbAddr string migrationsDir string - errRollback = fmt.Errorf("Rolling back test data") + errRollback = fmt.Errorf("rolling back test data") ) func init() { diff --git a/pkg/configs/db/postgres/postgres.go b/pkg/configs/db/postgres/postgres.go index 50523795b4a..7ebd464bf8c 100644 --- a/pkg/configs/db/postgres/postgres.go +++ b/pkg/configs/db/postgres/postgres.go @@ -215,7 +215,7 @@ func (d DB) SetRulesConfig(ctx context.Context, userID string, oldConfig, newCon // The supplied oldConfig must match the current config. If no config // exists, then oldConfig must be nil. Otherwise, it must exactly // equal the existing config. - if !((err == sql.ErrNoRows && oldConfig.Files == nil) || oldConfig.Equal(current.Config.RulesConfig)) { + if !((err == sql.ErrNoRows && oldConfig.Files == nil) || oldConfig.Equal(current.Config.RulesConfig)) { //nolint:staticcheck return nil } new := userconfig.Config{ diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go index 3618f285fee..12ad89c4f81 100644 --- a/pkg/distributor/distributor_test.go +++ b/pkg/distributor/distributor_test.go @@ -3338,12 +3338,12 @@ func (i *mockIngester) Push(ctx context.Context, req *cortexpb.WriteRequest, opt if !ok { // Make a copy because the request Timeseries are reused item := cortexpb.TimeSeries{ - Labels: make([]cortexpb.LabelAdapter, len(series.TimeSeries.Labels)), - Samples: make([]cortexpb.Sample, len(series.TimeSeries.Samples)), + Labels: make([]cortexpb.LabelAdapter, len(series.Labels)), + Samples: make([]cortexpb.Sample, len(series.Samples)), } - copy(item.Labels, series.TimeSeries.Labels) - copy(item.Samples, series.TimeSeries.Samples) + copy(item.Labels, series.Labels) + copy(item.Samples, series.Samples) i.timeseries[hash] = &cortexpb.PreallocTimeseries{TimeSeries: &item} } else { diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index af618488efd..625a434d5c3 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -3145,7 +3145,7 @@ func (i *Ingester) flushHandler(w http.ResponseWriter, r *http.Request) { allowedUsers := util.NewAllowedTenants(tenants, nil) run := func() { - ingCtx := i.BasicService.ServiceContext() + ingCtx := i.ServiceContext() if ingCtx == nil || ingCtx.Err() != nil { level.Info(logutil.WithContext(r.Context(), i.logger)).Log("msg", "flushing TSDB blocks: ingester not running, ignoring flush request") return diff --git a/pkg/querier/blocks_store_queryable.go b/pkg/querier/blocks_store_queryable.go index a5647e55451..c71d4e49922 100644 --- a/pkg/querier/blocks_store_queryable.go +++ b/pkg/querier/blocks_store_queryable.go @@ -348,7 +348,7 @@ func (q *blocksStoreQuerier) LabelNames(ctx context.Context, hints *storage.Labe } spanLog, spanCtx := spanlogger.New(ctx, "blocksStoreQuerier.LabelNames") - defer spanLog.Span.Finish() + defer spanLog.Finish() minT, maxT, limit := q.minT, q.maxT, int64(0) @@ -391,7 +391,7 @@ func (q *blocksStoreQuerier) LabelValues(ctx context.Context, name string, hints } spanLog, spanCtx := spanlogger.New(ctx, "blocksStoreQuerier.LabelValues") - defer spanLog.Span.Finish() + defer spanLog.Finish() minT, maxT, limit := q.minT, q.maxT, int64(0) @@ -438,7 +438,7 @@ func (q *blocksStoreQuerier) selectSorted(ctx context.Context, sp *storage.Selec } spanLog, spanCtx := spanlogger.New(ctx, "blocksStoreQuerier.selectSorted") - defer spanLog.Span.Finish() + defer spanLog.Finish() minT, maxT, limit := q.minT, q.maxT, int64(0) if sp != nil { diff --git a/pkg/querier/blocks_store_queryable_test.go b/pkg/querier/blocks_store_queryable_test.go index 3ca5c03fee2..5b5ab23c230 100644 --- a/pkg/querier/blocks_store_queryable_test.go +++ b/pkg/querier/blocks_store_queryable_test.go @@ -2442,9 +2442,10 @@ func TestBlocksStoreQuerier_PromQLExecution(t *testing.T) { // Check sample timestamp is expected. require.Equal(t, h.T, int64(from)+int64(i)*15000) expectedH := tsdbutil.GenerateTestGaugeFloatHistogram(h.T) - if enc == encoding.PrometheusHistogramChunk { + switch enc { + case encoding.PrometheusHistogramChunk: require.Equal(t, expectedH, h.H) - } else if enc == encoding.PrometheusFloatHistogramChunk { + case encoding.PrometheusFloatHistogramChunk: require.Equal(t, expectedH, h.H) } } diff --git a/pkg/querier/distributor_queryable.go b/pkg/querier/distributor_queryable.go index 62ed4489c7e..ea8173f2025 100644 --- a/pkg/querier/distributor_queryable.go +++ b/pkg/querier/distributor_queryable.go @@ -90,7 +90,7 @@ type distributorQuerier struct { // The bool passed is ignored because the series is always sorted. func (q *distributorQuerier) Select(ctx context.Context, sortSeries bool, sp *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { log, ctx := spanlogger.New(ctx, "distributorQuerier.Select") - defer log.Span.Finish() + defer log.Finish() minT, maxT := q.mint, q.maxt if sp != nil { @@ -222,7 +222,7 @@ func (q *distributorQuerier) LabelNames(ctx context.Context, hints *storage.Labe } log, ctx := spanlogger.New(ctx, "distributorQuerier.LabelNames") - defer log.Span.Finish() + defer log.Finish() var ( ln []string @@ -246,7 +246,7 @@ func (q *distributorQuerier) LabelNames(ctx context.Context, hints *storage.Labe // labelNamesWithMatchers performs the LabelNames call by calling ingester's MetricsForLabelMatchers method func (q *distributorQuerier) labelNamesWithMatchers(ctx context.Context, hints *storage.LabelHints, partialDataEnabled bool, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { log, ctx := spanlogger.New(ctx, "distributorQuerier.labelNamesWithMatchers") - defer log.Span.Finish() + defer log.Finish() var ( ms []labels.Labels diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index f13121caf98..88eb968b128 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -352,7 +352,7 @@ func (q querier) Select(ctx context.Context, sortSeries bool, sp *storage.Select }() log, ctx := spanlogger.New(ctx, "querier.Select") - defer log.Span.Finish() + defer log.Finish() if sp != nil { level.Debug(log).Log("start", util.TimeFromMillis(sp.Start).UTC().String(), "end", util.TimeFromMillis(sp.End).UTC().String(), "step", sp.Step, "matchers", matchers) diff --git a/pkg/querier/tenantfederation/exemplar_merge_queryable.go b/pkg/querier/tenantfederation/exemplar_merge_queryable.go index 45e519af758..a5f40ca59dc 100644 --- a/pkg/querier/tenantfederation/exemplar_merge_queryable.go +++ b/pkg/querier/tenantfederation/exemplar_merge_queryable.go @@ -140,7 +140,7 @@ type exemplarSelectJob struct { // Select returns aggregated exemplars within given time range for multiple tenants. func (m mergeExemplarQuerier) Select(start, end int64, matchers ...[]*labels.Matcher) ([]exemplar.QueryResult, error) { log, ctx := spanlogger.New(m.ctx, "mergeExemplarQuerier.Select") - defer log.Span.Finish() + defer log.Finish() // filter out tenants to query and unrelated matchers allMatchedTenantIds, allUnrelatedMatchers := filterAllTenantsAndMatchers(m.idLabelName, m.tenantIds, matchers) diff --git a/pkg/querier/tenantfederation/merge_queryable.go b/pkg/querier/tenantfederation/merge_queryable.go index 8c339cd784e..71bf0e2531e 100644 --- a/pkg/querier/tenantfederation/merge_queryable.go +++ b/pkg/querier/tenantfederation/merge_queryable.go @@ -153,7 +153,7 @@ func (m *mergeQuerier) LabelValues(ctx context.Context, name string, hints *stor return queriers[0].LabelValues(ctx, name, hints, matchers...) } log, _ := spanlogger.New(ctx, "mergeQuerier.LabelValues") - defer log.Span.Finish() + defer log.Finish() matchedTenants, filteredMatchers := filterValuesByMatchers(m.idLabelName, ids, matchers...) @@ -194,7 +194,7 @@ func (m *mergeQuerier) LabelNames(ctx context.Context, hints *storage.LabelHints return queriers[0].LabelNames(ctx, hints, matchers...) } log, _ := spanlogger.New(ctx, "mergeQuerier.LabelNames") - defer log.Span.Finish() + defer log.Finish() matchedTenants, filteredMatchers := filterValuesByMatchers(m.idLabelName, ids, matchers...) @@ -337,7 +337,7 @@ func (m *mergeQuerier) Select(ctx context.Context, sortSeries bool, hints *stora } log, ctx := spanlogger.New(ctx, "mergeQuerier.Select") - defer log.Span.Finish() + defer log.Finish() matchedValues, filteredMatchers := filterValuesByMatchers(m.idLabelName, ids, matchers...) var jobs = make([]interface{}, len(matchedValues)) var seriesSets = make([]storage.SeriesSet, len(matchedValues)) diff --git a/pkg/querier/tenantfederation/merge_queryable_test.go b/pkg/querier/tenantfederation/merge_queryable_test.go index 595ef572fae..4d76071b024 100644 --- a/pkg/querier/tenantfederation/merge_queryable_test.go +++ b/pkg/querier/tenantfederation/merge_queryable_test.go @@ -169,7 +169,7 @@ func (m mockTenantQuerier) Select(ctx context.Context, _ bool, sp *storage.Selec } log, _ := spanlogger.New(ctx, "mockTenantQuerier.select") - defer log.Span.Finish() + defer log.Finish() var matrix model.Matrix for _, s := range m.matrix(tenantIDs[0]) { @@ -835,14 +835,14 @@ func TestMergeQueryable_LabelNames(t *testing.T) { t.Run(scenario.labelNamesTestCase.name, func(t *testing.T) { t.Parallel() - labelNames, warnings, err := querier.LabelNames(ctx, nil, scenario.labelNamesTestCase.matchers...) - if scenario.labelNamesTestCase.expectedQueryErr != nil { - require.EqualError(t, err, scenario.labelNamesTestCase.expectedQueryErr.Error()) + labelNames, warnings, err := querier.LabelNames(ctx, nil, scenario.matchers...) + if scenario.expectedQueryErr != nil { + require.EqualError(t, err, scenario.expectedQueryErr.Error()) } else { require.NoError(t, err) assert.NoError(t, testutil.GatherAndCompare(reg, strings.NewReader(scenario.expectedMetrics), "cortex_querier_federated_tenants_per_query")) - assert.Equal(t, scenario.labelNamesTestCase.expectedLabelNames, labelNames) - assertEqualWarnings(t, scenario.labelNamesTestCase.expectedWarnings, warnings) + assert.Equal(t, scenario.expectedLabelNames, labelNames) + assertEqualWarnings(t, scenario.expectedWarnings, warnings) } }) }) diff --git a/pkg/querier/tenantfederation/metadata_merge_querier.go b/pkg/querier/tenantfederation/metadata_merge_querier.go index 611bfbe1f55..7f796c2b39d 100644 --- a/pkg/querier/tenantfederation/metadata_merge_querier.go +++ b/pkg/querier/tenantfederation/metadata_merge_querier.go @@ -48,7 +48,7 @@ type metadataSelectJob struct { // MetricsMetadata returns aggregated metadata for multiple tenants func (m *mergeMetadataQuerier) MetricsMetadata(ctx context.Context, req *client.MetricsMetadataRequest) ([]scrape.MetricMetadata, error) { log, ctx := spanlogger.New(ctx, "mergeMetadataQuerier.MetricsMetadata") - defer log.Span.Finish() + defer log.Finish() tenantIds, err := tenant.TenantIDs(ctx) if err != nil { diff --git a/pkg/querier/tripperware/query_attribute_matcher.go b/pkg/querier/tripperware/query_attribute_matcher.go index 10c0319ab85..7edd9f0b098 100644 --- a/pkg/querier/tripperware/query_attribute_matcher.go +++ b/pkg/querier/tripperware/query_attribute_matcher.go @@ -17,7 +17,7 @@ import ( const QueryRejectErrorMessage = "This query does not perform well and has been rejected by the service operator." func rejectQueryOrSetPriority(r *http.Request, now time.Time, lookbackDelta time.Duration, limits Limits, userStr string, rejectedQueriesPerTenant *prometheus.CounterVec) error { - if limits == nil || !(limits.QueryPriority(userStr).Enabled || limits.QueryRejection(userStr).Enabled) { + if limits == nil || (!limits.QueryPriority(userStr).Enabled && !limits.QueryRejection(userStr).Enabled) { return nil } op := getOperation(r) diff --git a/pkg/querier/tripperware/queryrange/query_range_middlewares_test.go b/pkg/querier/tripperware/queryrange/query_range_middlewares_test.go index dbf013adc51..ba519125040 100644 --- a/pkg/querier/tripperware/queryrange/query_range_middlewares_test.go +++ b/pkg/querier/tripperware/queryrange/query_range_middlewares_test.go @@ -30,13 +30,14 @@ func TestRoundTrip(t *testing.T) { middleware.AuthenticateUser.Wrap( http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var err error - if r.RequestURI == query { + switch r.RequestURI { + case query: _, err = w.Write([]byte(responseBody)) - } else if r.RequestURI == queryWithWarnings { + case queryWithWarnings: _, err = w.Write([]byte(responseBodyWithWarnings)) - } else if r.RequestURI == queryWithInfos { + case queryWithInfos: _, err = w.Write([]byte(responseBodyWithInfos)) - } else { + default: _, err = w.Write([]byte("bar")) } if err != nil { diff --git a/pkg/querier/tripperware/queryrange/results_cache.go b/pkg/querier/tripperware/queryrange/results_cache.go index 3f581586ce2..598c0250625 100644 --- a/pkg/querier/tripperware/queryrange/results_cache.go +++ b/pkg/querier/tripperware/queryrange/results_cache.go @@ -544,10 +544,10 @@ func merge(extents []tripperware.Extent, acc *accumulator) ([]tripperware.Extent return nil, err } return append(extents, tripperware.Extent{ - Start: acc.Extent.Start, - End: acc.Extent.End, + Start: acc.Start, + End: acc.End, Response: any, - TraceId: acc.Extent.TraceId, + TraceId: acc.TraceId, }), nil } diff --git a/pkg/ring/kv/consul/client.go b/pkg/ring/kv/consul/client.go index a4a065416ef..ca7af54e9f6 100644 --- a/pkg/ring/kv/consul/client.go +++ b/pkg/ring/kv/consul/client.go @@ -31,7 +31,7 @@ var ( writeOptions = &consul.WriteOptions{} // ErrNotFound is returned by ConsulClient.Get. - ErrNotFound = fmt.Errorf("Not found") + ErrNotFound = fmt.Errorf("not found") backoffConfig = backoff.Config{ MinBackoff: 1 * time.Second, diff --git a/pkg/ring/kv/memberlist/memberlist_client_test.go b/pkg/ring/kv/memberlist/memberlist_client_test.go index 6c33b85441a..fbca1924c45 100644 --- a/pkg/ring/kv/memberlist/memberlist_client_test.go +++ b/pkg/ring/kv/memberlist/memberlist_client_test.go @@ -1324,10 +1324,8 @@ func poll(t testing.TB, d time.Duration, want interface{}, have func() interface t.Helper() deadline := time.Now().Add(d) - for { - if time.Now().After(deadline) { - break - } + for !time.Now().After(deadline) { + if reflect.DeepEqual(want, have()) { return } diff --git a/pkg/ring/kv/memberlist/memberlist_logger.go b/pkg/ring/kv/memberlist/memberlist_logger.go index 6ccb469b67c..30a28d06856 100644 --- a/pkg/ring/kv/memberlist/memberlist_logger.go +++ b/pkg/ring/kv/memberlist/memberlist_logger.go @@ -71,7 +71,7 @@ func (a loggerAdapter) Write(p []byte) (int, error) { if msg, ok := result["msg"]; ok { keyvals = append(keyvals, "msg", msg) } - if err := a.Logger.Log(keyvals...); err != nil { + if err := a.Log(keyvals...); err != nil { return 0, err } return len(p), nil diff --git a/pkg/ring/lifecycler.go b/pkg/ring/lifecycler.go index deaaf45837a..1a6812a9411 100644 --- a/pkg/ring/lifecycler.go +++ b/pkg/ring/lifecycler.go @@ -392,7 +392,7 @@ func (i *Lifecycler) setPreviousState(state InstanceState) { i.stateMtx.Lock() defer i.stateMtx.Unlock() - if !(state == ACTIVE || state == READONLY) { + if !(state == ACTIVE || state == READONLY) { //nolint:staticcheck level.Error(i.logger).Log("msg", "cannot store unsupported state to disk", "new_state", state, "old_state", i.tokenFile.PreviousState) return } @@ -449,7 +449,7 @@ func (i *Lifecycler) ClaimTokensFor(ctx context.Context, ingesterID string) erro claimTokens := func(in interface{}) (out interface{}, retry bool, err error) { ringDesc, ok := in.(*Desc) if !ok || ringDesc == nil { - return nil, false, fmt.Errorf("Cannot claim tokens in an empty ring") + return nil, false, fmt.Errorf("cannot claim tokens in an empty ring") } tokens = ringDesc.ClaimTokens(ingesterID, i.ID) @@ -1025,6 +1025,7 @@ func (i *Lifecycler) updateConsul(ctx context.Context) error { func (i *Lifecycler) changeState(ctx context.Context, state InstanceState) error { currState := i.GetState() // Only the following state transitions can be triggered externally + //nolint:staticcheck if !((currState == PENDING && state == JOINING) || (currState == JOINING && state == PENDING) || (currState == JOINING && state == ACTIVE) || @@ -1035,7 +1036,7 @@ func (i *Lifecycler) changeState(ctx context.Context, state InstanceState) error (currState == ACTIVE && state == READONLY) || // triggered by ingester mode (currState == READONLY && state == ACTIVE) || // triggered by ingester mode (currState == READONLY && state == LEAVING)) { // triggered by shutdown - return fmt.Errorf("Changing instance state from %v -> %v is disallowed", currState, state) + return fmt.Errorf("changing instance state from %v -> %v is disallowed", currState, state) } level.Info(i.logger).Log("msg", "changing instance state from", "old_state", currState, "new_state", state, "ring", i.RingName) diff --git a/pkg/ring/util.go b/pkg/ring/util.go index e05fc32a048..66a176c0543 100644 --- a/pkg/ring/util.go +++ b/pkg/ring/util.go @@ -177,7 +177,7 @@ func getFirstAddressOf(names []string, logger log.Logger) (string, error) { return ipAddr, nil } if ipAddr == "" { - return "", fmt.Errorf("No address found for %s", names) + return "", fmt.Errorf("no address found for %s", names) } if strings.HasPrefix(ipAddr, `169.254.`) { level.Warn(logger).Log("msg", "using automatic private ip", "address", ipAddr) diff --git a/pkg/ruler/frontend_client.go b/pkg/ruler/frontend_client.go index dff0fca11c5..5136db4e616 100644 --- a/pkg/ruler/frontend_client.go +++ b/pkg/ruler/frontend_client.go @@ -89,7 +89,7 @@ func (p *FrontendClient) makeRequest(ctx context.Context, qs string, ts time.Tim func (p *FrontendClient) InstantQuery(ctx context.Context, qs string, t time.Time) (promql.Vector, error) { log, ctx := spanlogger.New(ctx, "FrontendClient.InstantQuery") - defer log.Span.Finish() + defer log.Finish() req, err := p.makeRequest(ctx, qs, t) if err != nil { diff --git a/pkg/ruler/ruler_test.go b/pkg/ruler/ruler_test.go index ec7eb287c30..f6c1c61b096 100644 --- a/pkg/ruler/ruler_test.go +++ b/pkg/ruler/ruler_test.go @@ -1786,8 +1786,8 @@ func getRulesHATest(replicationFactor int) func(t *testing.T) { // Wait a bit to make sure ruler's ring is updated. time.Sleep(100 * time.Millisecond) - rulerAddrMap["ruler1"].Service.StopAsync() - if err := rulerAddrMap["ruler1"].Service.AwaitTerminated(context.Background()); err != nil { + rulerAddrMap["ruler1"].StopAsync() + if err := rulerAddrMap["ruler1"].AwaitTerminated(context.Background()); err != nil { t.Errorf("ruler %s was not terminated with error %s", "ruler1", err.Error()) } diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go index 647fb379b8b..7c7ef4b7b3e 100644 --- a/pkg/scheduler/scheduler.go +++ b/pkg/scheduler/scheduler.go @@ -213,10 +213,10 @@ func (s *Scheduler) FrontendLoop(frontend schedulerpb.SchedulerForFrontend_Front switch msg.GetType() { case schedulerpb.ENQUEUE: err = s.enqueueRequest(frontendCtx, frontendAddress, msg) - switch { - case err == nil: + switch err { + case nil: resp = &schedulerpb.SchedulerToFrontend{Status: schedulerpb.OK} - case err == queue.ErrTooManyRequests: + case queue.ErrTooManyRequests: resp = &schedulerpb.SchedulerToFrontend{Status: schedulerpb.TOO_MANY_REQUESTS_PER_TENANT} default: resp = &schedulerpb.SchedulerToFrontend{Status: schedulerpb.ERROR, Error: err.Error()} diff --git a/pkg/storage/tsdb/bucketindex/storage_test.go b/pkg/storage/tsdb/bucketindex/storage_test.go index ec6866cd982..e10d910e088 100644 --- a/pkg/storage/tsdb/bucketindex/storage_test.go +++ b/pkg/storage/tsdb/bucketindex/storage_test.go @@ -15,7 +15,6 @@ import ( "github.com/cortexproject/cortex/pkg/storage/bucket" "github.com/cortexproject/cortex/pkg/storage/bucket/s3" - "github.com/cortexproject/cortex/pkg/storage/tsdb/testutil" cortex_testutil "github.com/cortexproject/cortex/pkg/storage/tsdb/testutil" ) @@ -64,9 +63,9 @@ func TestReadIndex_ShouldReturnTheParsedIndexOnSuccess(t *testing.T) { // Mock some blocks in the storage. bkt = BucketWithGlobalMarkers(bkt) - testutil.MockStorageBlock(t, bkt, userID, 10, 20) - testutil.MockStorageBlock(t, bkt, userID, 20, 30) - testutil.MockStorageDeletionMark(t, bkt, userID, testutil.MockStorageBlock(t, bkt, userID, 30, 40)) + cortex_testutil.MockStorageBlock(t, bkt, userID, 10, 20) + cortex_testutil.MockStorageBlock(t, bkt, userID, 20, 30) + cortex_testutil.MockStorageDeletionMark(t, bkt, userID, cortex_testutil.MockStorageBlock(t, bkt, userID, 30, 40)) // Write the index. u := NewUpdater(bkt, userID, nil, logger) @@ -120,10 +119,10 @@ func BenchmarkReadIndex(b *testing.B) { minT := int64(i * 10) maxT := int64((i + 1) * 10) - block := testutil.MockStorageBlock(b, bkt, userID, minT, maxT) + block := cortex_testutil.MockStorageBlock(b, bkt, userID, minT, maxT) if i < numBlockDeletionMarks { - testutil.MockStorageDeletionMark(b, bkt, userID, block) + cortex_testutil.MockStorageDeletionMark(b, bkt, userID, block) } } diff --git a/pkg/storage/tsdb/caching_bucket.go b/pkg/storage/tsdb/caching_bucket.go index 79a4f96463b..d99ae0a49f0 100644 --- a/pkg/storage/tsdb/caching_bucket.go +++ b/pkg/storage/tsdb/caching_bucket.go @@ -134,7 +134,7 @@ func (cfg *ChunksCacheConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix st f.DurationVar(&cfg.SubrangeTTL, prefix+"subrange-ttl", 24*time.Hour, "TTL for caching individual chunks subranges.") // In the multi level chunk cache, backfill TTL follows subrange TTL - cfg.ChunkCacheBackend.MultiLevel.BackFillTTL = cfg.SubrangeTTL + cfg.MultiLevel.BackFillTTL = cfg.SubrangeTTL } func (cfg *ChunksCacheConfig) Validate() error { diff --git a/pkg/storage/tsdb/index_cache.go b/pkg/storage/tsdb/index_cache.go index ae30154d9f7..20200a76ec6 100644 --- a/pkg/storage/tsdb/index_cache.go +++ b/pkg/storage/tsdb/index_cache.go @@ -92,15 +92,16 @@ func (cfg *IndexCacheConfig) Validate() error { return errors.WithMessagef(errDuplicatedIndexCacheBackend, "duplicated backend: %v", backend) } - if backend == IndexCacheBackendMemcached { + switch backend { + case IndexCacheBackendMemcached: if err := cfg.Memcached.Validate(); err != nil { return err } - } else if backend == IndexCacheBackendRedis { + case IndexCacheBackendRedis: if err := cfg.Redis.Validate(); err != nil { return err } - } else { + default: if err := cfg.InMemory.Validate(); err != nil { return err } diff --git a/pkg/storegateway/bucket_stores.go b/pkg/storegateway/bucket_stores.go index fe69645c57c..14b2393dbf1 100644 --- a/pkg/storegateway/bucket_stores.go +++ b/pkg/storegateway/bucket_stores.go @@ -326,7 +326,7 @@ func (u *BucketStores) syncUsersBlocks(ctx context.Context, f func(context.Conte // Series makes a series request to the underlying user bucket store. func (u *BucketStores) Series(req *storepb.SeriesRequest, srv storepb.Store_SeriesServer) error { spanLog, spanCtx := spanlogger.New(srv.Context(), "BucketStores.Series") - defer spanLog.Span.Finish() + defer spanLog.Finish() userID := getUserIDFromGRPCContext(spanCtx) if userID == "" { @@ -387,7 +387,7 @@ func (u *BucketStores) decrementInflightRequestCnt() { // LabelNames implements the Storegateway proto service. func (u *BucketStores) LabelNames(ctx context.Context, req *storepb.LabelNamesRequest) (*storepb.LabelNamesResponse, error) { spanLog, spanCtx := spanlogger.New(ctx, "BucketStores.LabelNames") - defer spanLog.Span.Finish() + defer spanLog.Finish() userID := getUserIDFromGRPCContext(spanCtx) if userID == "" { @@ -417,7 +417,7 @@ func (u *BucketStores) LabelNames(ctx context.Context, req *storepb.LabelNamesRe // LabelValues implements the Storegateway proto service. func (u *BucketStores) LabelValues(ctx context.Context, req *storepb.LabelValuesRequest) (*storepb.LabelValuesResponse, error) { spanLog, spanCtx := spanlogger.New(ctx, "BucketStores.LabelValues") - defer spanLog.Span.Finish() + defer spanLog.Finish() userID := getUserIDFromGRPCContext(spanCtx) if userID == "" { diff --git a/pkg/testexporter/correctness/time_flag.go b/pkg/testexporter/correctness/time_flag.go index 88d0bcb1a62..b473e3a816e 100644 --- a/pkg/testexporter/correctness/time_flag.go +++ b/pkg/testexporter/correctness/time_flag.go @@ -20,7 +20,7 @@ func NewTimeValue(t time.Time) TimeValue { // String implements flag.Value func (v TimeValue) String() string { - return v.Time.Format(time.RFC3339) + return v.Format(time.RFC3339) } // Set implements flag.Value diff --git a/pkg/util/extract/extract.go b/pkg/util/extract/extract.go index 4e9eb8ec297..e76e1a9cb75 100644 --- a/pkg/util/extract/extract.go +++ b/pkg/util/extract/extract.go @@ -10,7 +10,7 @@ import ( ) var ( - errNoMetricNameLabel = fmt.Errorf("No metric name label") + errNoMetricNameLabel = fmt.Errorf("no metric name label") ) // MetricNameFromLabelAdapters extracts the metric name from a list of LabelPairs. diff --git a/pkg/util/net.go b/pkg/util/net.go index 852f6bf6fca..8db916ca35b 100644 --- a/pkg/util/net.go +++ b/pkg/util/net.go @@ -37,7 +37,7 @@ func GetFirstAddressOf(names []string) (string, error) { return ipAddr, nil } if ipAddr == "" { - return "", fmt.Errorf("No address found for %s", names) + return "", fmt.Errorf("no address found for %s", names) } if strings.HasPrefix(ipAddr, `169.254.`) { level.Warn(util_log.Logger).Log("msg", "using automatic private ip", "address", ipAddr) diff --git a/pkg/util/priority_queue.go b/pkg/util/priority_queue.go index 9937c231c93..8d11c550883 100644 --- a/pkg/util/priority_queue.go +++ b/pkg/util/priority_queue.go @@ -103,7 +103,7 @@ func (pq *PriorityQueue) Dequeue() PriorityOp { pq.lock.Lock() defer pq.lock.Unlock() - for len(pq.queue) == 0 && !(pq.closing || pq.closed) { + for len(pq.queue) == 0 && (!pq.closing && !pq.closed) { pq.cond.Wait() } diff --git a/pkg/util/spanlogger/spanlogger.go b/pkg/util/spanlogger/spanlogger.go index c0b1184f0a0..a96f95726f8 100644 --- a/pkg/util/spanlogger/spanlogger.go +++ b/pkg/util/spanlogger/spanlogger.go @@ -89,7 +89,7 @@ func (s *SpanLogger) Log(kvps ...interface{}) error { if err != nil { return err } - s.Span.LogFields(fields...) + s.LogFields(fields...) return nil } @@ -99,6 +99,6 @@ func (s *SpanLogger) Error(err error) error { return nil } ext.Error.Set(s.Span, true) - s.Span.LogFields(otlog.Error(err)) + s.LogFields(otlog.Error(err)) return err } diff --git a/pkg/util/test/poll.go b/pkg/util/test/poll.go index fdd33264c94..b88e073a86a 100644 --- a/pkg/util/test/poll.go +++ b/pkg/util/test/poll.go @@ -10,10 +10,8 @@ import ( func Poll(t testing.TB, d time.Duration, want interface{}, have func() interface{}) { t.Helper() deadline := time.Now().Add(d) - for { - if time.Now().After(deadline) { - break - } + for !time.Now().After(deadline) { + if reflect.DeepEqual(want, have()) { return } diff --git a/pkg/util/validation/limits.go b/pkg/util/validation/limits.go index a40d2f9d378..53c7244662a 100644 --- a/pkg/util/validation/limits.go +++ b/pkg/util/validation/limits.go @@ -23,7 +23,7 @@ import ( util_log "github.com/cortexproject/cortex/pkg/util/log" ) -var errMaxGlobalSeriesPerUserValidation = errors.New("The ingester.max-global-series-per-user limit is unsupported if distributor.shard-by-all-labels is disabled") +var errMaxGlobalSeriesPerUserValidation = errors.New("the ingested.max-global-series-per-user limit is unsupported if distributor.shard-by-all-labels is disabled") var errDuplicateQueryPriorities = errors.New("duplicate entry of priorities found. Make sure they are all unique, including the default priority") var errCompilingQueryPriorityRegex = errors.New("error compiling query priority regex") var errDuplicatePerLabelSetLimit = errors.New("duplicate per labelSet limits found. Make sure they are all unique") diff --git a/tools/doc-generator/writer.go b/tools/doc-generator/writer.go index d6cea1d6118..0b8d6b64bcc 100644 --- a/tools/doc-generator/writer.go +++ b/tools/doc-generator/writer.go @@ -61,9 +61,10 @@ func (w *specWriter) writeConfigEntry(e *configEntry, indent int) { // Specification fieldDefault := e.fieldDefault - if e.fieldType == "string" { + switch e.fieldType { + case "string": fieldDefault = strconv.Quote(fieldDefault) - } else if e.fieldType == "duration" { + case "duration": fieldDefault = cleanupDuration(fieldDefault) }