diff --git a/Makefile b/Makefile index dc8dc5a..1408681 100644 --- a/Makefile +++ b/Makefile @@ -52,7 +52,7 @@ fmt: ## Run go fmt against code. .PHONY: test test: ## Run go test against code. - $(GINKGO) --label-filter='!perf' + $(GINKGO) --label-filter='!perf' ./pkg/... ./ .PHONY: test-perf test-perf: ## Run performance tests @@ -61,6 +61,7 @@ test-perf: ## Run performance tests --name namespace-lister-perf-test $(PERF_CLUSTER_PROVIDER_FLAGS) KUBECONFIG=$(PERF_CLUSTER_KUBECONFIG) $(GINKGO) --label-filter='perf' \ --keep-going --procs=1 --flake-attempts 2 --output-dir=$(PERF_OUT_DIR) + # -$(PERF_CLUSTER_PROVIDER) delete cluster --name namespace-lister-perf-test .PHONY: image-build image-build: diff --git a/acceptance/config/patches/with_cache_resyncperiod.yaml b/acceptance/config/patches/with_cache_resyncperiod.yaml new file mode 100644 index 0000000..eaa2399 --- /dev/null +++ b/acceptance/config/patches/with_cache_resyncperiod.yaml @@ -0,0 +1,5 @@ +- op: add + path: /spec/template/spec/containers/0/env/- + value: + name: CACHE_RESYNC_PERIOD + value: '20s' diff --git a/acceptance/pkg/suite/steps.go b/acceptance/pkg/suite/steps.go index a1353ad..cbf0d59 100644 --- a/acceptance/pkg/suite/steps.go +++ b/acceptance/pkg/suite/steps.go @@ -3,7 +3,9 @@ package suite import ( "context" "fmt" + "log" "slices" + "time" "github.com/cucumber/godog" @@ -11,13 +13,14 @@ import ( rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" tcontext "github.com/konflux-ci/namespace-lister/acceptance/pkg/context" arest "github.com/konflux-ci/namespace-lister/acceptance/pkg/rest" ) func InjectSteps(ctx *godog.ScenarioContext) { - //read + // read ctx.Step(`^ServiceAccount has access to a namespace$`, func(ctx context.Context) (context.Context, error) { return UserInfoHasAccessToNNamespaces(ctx, 1) }) ctx.Step(`^User has access to a namespace$`, @@ -92,49 +95,56 @@ func TheUserCanRetrieveOnlyTheNamespacesTheyHaveAccessTo(ctx context.Context) (c return ctx, err } - ann := corev1.NamespaceList{} - if err := cli.List(ctx, &ann); err != nil { - return ctx, err - } - - enn := tcontext.Namespaces(ctx) - if expected, actual := len(enn), len(ann.Items); expected != actual { - return ctx, fmt.Errorf("expected %d namespaces, actual %d", expected, actual) - } + return ctx, wait.PollUntilContextTimeout(ctx, 2*time.Second, 1*time.Minute, true, func(ctx context.Context) (done bool, err error) { + ann := corev1.NamespaceList{} + if err := cli.List(ctx, &ann); err != nil { + log.Printf("error listing namespaces: %v", err) + return false, nil + } - for _, en := range enn { - if !slices.ContainsFunc(ann.Items, func(an corev1.Namespace) bool { - return en.Name == an.Name - }) { - return ctx, fmt.Errorf("expected namespace %s not found in actual namespace list: %v", en.Name, ann.Items) + enn := tcontext.Namespaces(ctx) + if expected, actual := len(enn), len(ann.Items); expected != actual { + log.Printf("expected %d namespaces, actual %d", expected, actual) + return false, nil } - } - return ctx, nil + for _, en := range enn { + if !slices.ContainsFunc(ann.Items, func(an corev1.Namespace) bool { + return en.Name == an.Name + }) { + log.Printf("expected namespace %s not found in actual namespace list: %v", en.Name, ann.Items) + return false, nil + } + } + return true, nil + }) } func TheUserCanRetrieveTheNamespace(ctx context.Context) (context.Context, error) { run := tcontext.RunId(ctx) - cli, err := tcontext.InvokeBuildUserClientFunc(ctx) if err != nil { return ctx, err } - n := corev1.Namespace{} - k := types.NamespacedName{Name: fmt.Sprintf("run-%s-0", run)} - if err := cli.Get(ctx, k, &n); err != nil { - return ctx, err - } - - enn := tcontext.Namespaces(ctx) - if expected, actual := 1, len(enn); expected != actual { - return ctx, fmt.Errorf("expected %d namespaces, actual %d: %v", expected, actual, enn) - } + return ctx, wait.PollUntilContextTimeout(ctx, 2*time.Second, 1*time.Minute, true, func(ctx context.Context) (done bool, err error) { + n := corev1.Namespace{} + k := types.NamespacedName{Name: fmt.Sprintf("run-%s-0", run)} + if err := cli.Get(ctx, k, &n); err != nil { + log.Printf("error getting namespace %v: %v", k, err) + return false, nil + } - if expected, actual := n.Name, enn[0].Name; actual != expected { - return ctx, fmt.Errorf("expected namespace %s, actual %s", expected, actual) - } + enn := tcontext.Namespaces(ctx) + if expected, actual := 1, len(enn); expected != actual { + log.Printf("expected %d namespaces, actual %d: %v", expected, actual, enn) + return false, nil + } - return ctx, nil + if expected, actual := n.Name, enn[0].Name; actual != expected { + log.Printf("expected namespace %s, actual %s", expected, actual) + return false, nil + } + return true, nil + }) } diff --git a/acceptance/test/dumb-proxy/Makefile b/acceptance/test/dumb-proxy/Makefile index d115db5..22e00e0 100644 --- a/acceptance/test/dumb-proxy/Makefile +++ b/acceptance/test/dumb-proxy/Makefile @@ -35,6 +35,20 @@ deploy-namespace-lister: $(OUT_DIR) --kind "Deployment" \ --name "namespace-lister" \ --version "v1" && \ + cp "$(ROOT_DIR)/../config/patches/with_log_debug.yaml" . && \ + kustomize edit add patch \ + --path "with_log_debug.yaml" \ + --group "apps" \ + --kind "Deployment" \ + --name "namespace-lister" \ + --version "v1" && \ + cp "$(ROOT_DIR)/config/patches/with_cache_resyncperiod.yaml" . && \ + kustomize edit add patch \ + --path "with_cache_resyncperiod.yaml" \ + --group "apps" \ + --kind "Deployment" \ + --name "namespace-lister" \ + --version "v1" && \ kustomize build | $(KUBECTL) apply -f - ; \ ) diff --git a/acceptance/test/smart-proxy/Makefile b/acceptance/test/smart-proxy/Makefile index d8b1f35..4d4b69d 100644 --- a/acceptance/test/smart-proxy/Makefile +++ b/acceptance/test/smart-proxy/Makefile @@ -26,6 +26,8 @@ deploy-namespace-lister: $(OUT_DIR) cd $(OUT_DIR)/config && \ kustomize init && \ kustomize edit add base "../../../../../config" && \ + kustomize edit set namespace namespace-lister && \ + kustomize edit set image "namespace-lister:latest=$(IMG)" && \ cp "$(ROOT_DIR)/../config/patches/with_header_auth.yaml" . && \ kustomize edit add patch \ --path "with_header_auth.yaml" \ @@ -40,8 +42,20 @@ deploy-namespace-lister: $(OUT_DIR) --kind "Deployment" \ --name "namespace-lister" \ --version "v1" && \ - kustomize edit set namespace namespace-lister && \ - kustomize edit set image "namespace-lister:latest=$(IMG)" && \ + cp "$(ROOT_DIR)/../config/patches/with_log_debug.yaml" . && \ + kustomize edit add patch \ + --path "with_log_debug.yaml" \ + --group "apps" \ + --kind "Deployment" \ + --name "namespace-lister" \ + --version "v1" && \ + cp "$(ROOT_DIR)/config/patches/with_cache_resyncperiod.yaml" . && \ + kustomize edit add patch \ + --path "with_cache_resyncperiod.yaml" \ + --group "apps" \ + --kind "Deployment" \ + --name "namespace-lister" \ + --version "v1" && \ kustomize build | $(KUBECTL) apply -f - ; \ ) diff --git a/access_cache_startup.go b/access_cache_startup.go new file mode 100644 index 0000000..e26712e --- /dev/null +++ b/access_cache_startup.go @@ -0,0 +1,72 @@ +package main + +import ( + "context" + "os" + "time" + + authcache "github.com/konflux-ci/namespace-lister/pkg/auth/cache" + + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + toolscache "k8s.io/client-go/tools/cache" + "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac" + crcache "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func buildAndStartAccessCache(ctx context.Context, resourceCache crcache.Cache) (*authcache.SynchronizedAccessCache, error) { + aur := &CRAuthRetriever{resourceCache, ctx, getLoggerFromContext(ctx)} + sae := rbac.NewSubjectAccessEvaluator(aur, aur, aur, aur, "") + synchCache := authcache.NewSynchronizedAccessCache( + sae, + resourceCache, authcache.CacheSynchronizerOptions{ + Logger: getLoggerFromContext(ctx), + ResyncPeriod: getResyncPeriodFromEnvOrZero(ctx), + }, + ) + + // register event handlers on resource cache + oo := []client.Object{ + &corev1.Namespace{}, + &rbacv1.RoleBinding{}, + &rbacv1.ClusterRole{}, + &rbacv1.ClusterRoleBinding{}, + &rbacv1.Role{}, + } + for _, o := range oo { + i, err := resourceCache.GetInformer(ctx, o) + if err != nil { + return nil, err + } + + if _, err := i.AddEventHandler( + toolscache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { synchCache.Request() }, + UpdateFunc: func(oldObj, newObj interface{}) { synchCache.Request() }, + DeleteFunc: func(obj interface{}) { synchCache.Request() }, + }); err != nil { + return nil, err + } + } + synchCache.Start(ctx) + + if err := synchCache.Synch(ctx); err != nil { + return nil, err + } + return synchCache, nil +} + +func getResyncPeriodFromEnvOrZero(ctx context.Context) time.Duration { + var zero time.Duration + rps, ok := os.LookupEnv(EnvCacheResyncPeriod) + if !ok { + return zero + } + rp, err := time.ParseDuration(rps) + if err != nil { + getLoggerFromContext(ctx).Warn("can not parse duration from environment variable", "error", err) + return zero + } + return rp +} diff --git a/config/patches/with_cache_resyncperiod.yaml b/config/patches/with_cache_resyncperiod.yaml new file mode 100644 index 0000000..cc837d2 --- /dev/null +++ b/config/patches/with_cache_resyncperiod.yaml @@ -0,0 +1,5 @@ +- op: add + path: /spec/template/spec/containers/0/env/- + value: + name: CACHE_RESYNC_PERIOD + value: '10m' diff --git a/config/patches/with_log_debug.yaml b/config/patches/with_log_debug.yaml new file mode 100644 index 0000000..4407e5b --- /dev/null +++ b/config/patches/with_log_debug.yaml @@ -0,0 +1,5 @@ +- op: replace + path: /spec/template/spec/containers/0/env/0 + value: + name: LOG_LEVEL + value: "-4" diff --git a/const.go b/const.go index 61b41e0..bc42e02 100644 --- a/const.go +++ b/const.go @@ -1,9 +1,10 @@ package main const ( - EnvLogLevel string = "LOG_LEVEL" - EnvUsernameHeader string = "AUTH_USERNAME_HEADER" - EnvAddress string = "ADDRESS" + EnvLogLevel string = "LOG_LEVEL" + EnvUsernameHeader string = "AUTH_USERNAME_HEADER" + EnvAddress string = "ADDRESS" + EnvCacheResyncPeriod string = "CACHE_RESYNC_PERIOD" DefaultAddr string = ":8080" DefaultHeaderUsername string = "X-Email" diff --git a/interfaces_test.go b/interfaces_test.go index 65200b7..9196499 100644 --- a/interfaces_test.go +++ b/interfaces_test.go @@ -2,9 +2,16 @@ package main_test import ( "k8s.io/client-go/rest" + + namespacelister "github.com/konflux-ci/namespace-lister" ) //go:generate mockgen -source=interfaces_test.go -destination=mocks/rest_interface.go -package=mocks + type FakeInterface interface { rest.Interface } + +type FakeSubjectNamespacesLister interface { + namespacelister.SubjectNamespacesLister +} diff --git a/main.go b/main.go index dd765be..6231e97 100644 --- a/main.go +++ b/main.go @@ -11,7 +11,6 @@ import ( "syscall" "github.com/go-logr/logr" - ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/log" ) @@ -69,20 +68,26 @@ func run(l *slog.Logger) error { ctx = setLoggerIntoContext(ctx, l) - // create cache - l.Info("creating cache") - cacheCfg, err := NewCacheConfigFromEnv(cfg) + // create resource cache + l.Info("creating resource cache") + cacheCfg, err := NewResourceCacheConfigFromEnv(cfg) if err != nil { return err } - cache, err := BuildAndStartCache(ctx, cacheCfg) + resourceCache, err := BuildAndStartResourceCache(ctx, cacheCfg) + if err != nil { + return err + } + + // create access cache + l.Info("creating access cache") + accessCache, err := buildAndStartAccessCache(ctx, resourceCache) if err != nil { return err } - // create the authorizer and the namespace lister - auth := NewAuthorizer(ctx, cache) - nsl := NewNamespaceLister(cache, auth) + // create the namespace lister + nsl := NewNamespaceListerForSubject(accessCache) // build http server l.Info("building server") diff --git a/mocks/rest_interface.go b/mocks/rest_interface.go index 8fb9583..978660f 100644 --- a/mocks/rest_interface.go +++ b/mocks/rest_interface.go @@ -13,6 +13,8 @@ import ( reflect "reflect" gomock "go.uber.org/mock/gomock" + v1 "k8s.io/api/core/v1" + v10 "k8s.io/api/rbac/v1" schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" rest "k8s.io/client-go/rest" @@ -154,3 +156,41 @@ func (mr *MockFakeInterfaceMockRecorder) Verb(verb any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Verb", reflect.TypeOf((*MockFakeInterface)(nil).Verb), verb) } + +// MockFakeSubjectNamespacesLister is a mock of FakeSubjectNamespacesLister interface. +type MockFakeSubjectNamespacesLister struct { + ctrl *gomock.Controller + recorder *MockFakeSubjectNamespacesListerMockRecorder + isgomock struct{} +} + +// MockFakeSubjectNamespacesListerMockRecorder is the mock recorder for MockFakeSubjectNamespacesLister. +type MockFakeSubjectNamespacesListerMockRecorder struct { + mock *MockFakeSubjectNamespacesLister +} + +// NewMockFakeSubjectNamespacesLister creates a new mock instance. +func NewMockFakeSubjectNamespacesLister(ctrl *gomock.Controller) *MockFakeSubjectNamespacesLister { + mock := &MockFakeSubjectNamespacesLister{ctrl: ctrl} + mock.recorder = &MockFakeSubjectNamespacesListerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockFakeSubjectNamespacesLister) EXPECT() *MockFakeSubjectNamespacesListerMockRecorder { + return m.recorder +} + +// List mocks base method. +func (m *MockFakeSubjectNamespacesLister) List(subject v10.Subject) []v1.Namespace { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "List", subject) + ret0, _ := ret[0].([]v1.Namespace) + return ret0 +} + +// List indicates an expected call of List. +func (mr *MockFakeSubjectNamespacesListerMockRecorder) List(subject any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockFakeSubjectNamespacesLister)(nil).List), subject) +} diff --git a/namespace_lister_suite_test.go b/namespace_lister_suite_test.go index 234ad63..0d029f3 100644 --- a/namespace_lister_suite_test.go +++ b/namespace_lister_suite_test.go @@ -7,8 +7,9 @@ import ( . "github.com/onsi/gomega" ) -//nolint:paralleltest func TestNamespaceLister(t *testing.T) { + t.Parallel() + RegisterFailHandler(Fail) RunSpecs(t, "NamespaceLister Suite") } diff --git a/namespacelister_for_subject.go b/namespacelister_for_subject.go new file mode 100644 index 0000000..2098749 --- /dev/null +++ b/namespacelister_for_subject.go @@ -0,0 +1,60 @@ +package main + +import ( + "context" + "strings" + + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var _ NamespaceLister = &subjectNamespaceLister{} + +type SubjectNamespacesLister interface { + List(subject rbacv1.Subject) []corev1.Namespace +} + +type subjectNamespaceLister struct { + subjectNamespacesLister SubjectNamespacesLister +} + +func NewNamespaceListerForSubject(subjectNamespacesLister SubjectNamespacesLister) NamespaceLister { + return &subjectNamespaceLister{ + subjectNamespacesLister: subjectNamespacesLister, + } +} + +func (c *subjectNamespaceLister) ListNamespaces(ctx context.Context, username string) (*corev1.NamespaceList, error) { + sub := c.parseUsername(username) + nn := c.subjectNamespacesLister.List(sub) + + // list all namespaces + return &corev1.NamespaceList{ + TypeMeta: metav1.TypeMeta{ + // even though `kubectl get namespaces -o yaml` is showing `kind: List` + // the plain response from the APIServer is using `kind: NamespaceList`. + // Use `kubectl get namespaces -v9` to inspect the APIServer plain response. + Kind: "NamespaceList", + APIVersion: corev1.SchemeGroupVersion.Version, + }, + Items: nn, + }, nil +} + +func (c *subjectNamespaceLister) parseUsername(username string) rbacv1.Subject { + if strings.HasPrefix(username, "system:serviceaccount:") { + ss := strings.Split(username, ":") + return rbacv1.Subject{ + Kind: "ServiceAccount", + Name: ss[3], + Namespace: ss[2], + } + } + + return rbacv1.Subject{ + APIGroup: rbacv1.GroupName, + Kind: "User", + Name: username, + } +} diff --git a/namespacelister_for_subject_test.go b/namespacelister_for_subject_test.go new file mode 100644 index 0000000..f6066ad --- /dev/null +++ b/namespacelister_for_subject_test.go @@ -0,0 +1,93 @@ +package main_test + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + "go.uber.org/mock/gomock" + + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + . "github.com/onsi/gomega" + + namespacelister "github.com/konflux-ci/namespace-lister" + "github.com/konflux-ci/namespace-lister/mocks" +) + +var _ = Describe("Subjectnamespaceslister", func() { + var subjectNamespacesLister *mocks.MockFakeSubjectNamespacesLister + enn := []corev1.Namespace{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "myns", + Labels: map[string]string{"key": "value"}, + Annotations: map[string]string{"key": "value"}, + }, + }, + } + + BeforeEach(func() { + ctrl := gomock.NewController(GinkgoT()) + defer ctrl.Finish() + + subjectNamespacesLister = mocks.NewMockFakeSubjectNamespacesLister(ctrl) + }) + + It("parses service account", func(ctx context.Context) { + // set expectation + subjectNamespacesLister.EXPECT(). + List( + rbacv1.Subject{ + Kind: "ServiceAccount", + Name: "myserviceaccount", + Namespace: "mynamespace", + }, + ). + Return(enn). + Times(1) + + // given + nl := namespacelister.NewNamespaceListerForSubject(subjectNamespacesLister) + + // when + Expect(nl.ListNamespaces(ctx, "system:serviceaccount:mynamespace:myserviceaccount")). + // then + To(BeEquivalentTo(&corev1.NamespaceList{ + TypeMeta: metav1.TypeMeta{ + Kind: "NamespaceList", + APIVersion: "v1", + }, + Items: enn, + })) + }) + + It("parses user", func(ctx context.Context) { + // set expectation + subjectNamespacesLister.EXPECT(). + List( + rbacv1.Subject{ + APIGroup: rbacv1.GroupName, + Kind: "User", + Name: "myuser", + }, + ). + Return(enn). + Times(1) + + // given + nl := namespacelister.NewNamespaceListerForSubject(subjectNamespacesLister) + + // when + Expect(nl.ListNamespaces(ctx, "myuser")). + // then + To(BeEquivalentTo(&corev1.NamespaceList{ + TypeMeta: metav1.TypeMeta{ + Kind: "NamespaceList", + APIVersion: "v1", + }, + Items: enn, + })) + }) +}) diff --git a/namespacelister.go b/namespacelister_with_authorizer.go similarity index 94% rename from namespacelister.go rename to namespacelister_with_authorizer.go index 0f2863a..fcc3c0c 100644 --- a/namespacelister.go +++ b/namespacelister_with_authorizer.go @@ -23,7 +23,7 @@ type namespaceLister struct { authorizer *rbac.RBACAuthorizer } -func NewNamespaceLister(reader client.Reader, authorizer *rbac.RBACAuthorizer) NamespaceLister { +func NewNamespaceListerWithAuthorizer(reader client.Reader, authorizer *rbac.RBACAuthorizer) NamespaceLister { return &namespaceLister{ Reader: reader, authorizer: authorizer, diff --git a/namespacelister_test.go b/namespacelister_with_authorizer_test.go similarity index 98% rename from namespacelister_test.go rename to namespacelister_with_authorizer_test.go index 4a8ae63..8e48d6e 100644 --- a/namespacelister_test.go +++ b/namespacelister_with_authorizer_test.go @@ -16,7 +16,6 @@ import ( ) var _ = Describe("Namespacelister", func() { - var ctx context.Context BeforeEach(func(tctx context.Context) { @@ -34,7 +33,7 @@ var _ = Describe("Namespacelister", func() { // given reader := fake.NewClientBuilder().WithLists(&nn, &cr, &crb, &r, &rb).Build() authorizer := namespacelister.NewAuthorizer(ctx, reader) - nsl := namespacelister.NewNamespaceLister(reader, authorizer) + nsl := namespacelister.NewNamespaceListerWithAuthorizer(reader, authorizer) // when ann, err := nsl.ListNamespaces(ctx, "user") diff --git a/performance_test.go b/performance_test.go index 5271a8c..d0b6228 100644 --- a/performance_test.go +++ b/performance_test.go @@ -22,7 +22,6 @@ import ( "k8s.io/apiserver/pkg/authorization/authorizer" "k8s.io/client-go/rest" ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -37,7 +36,7 @@ var _ = Describe("Authorizing requests", Serial, Ordered, func() { var c client.Client var ans []client.Object var uns []client.Object - var cache cache.Cache + var cacheCfg *cacheConfig BeforeAll(func(ctx context.Context) { var err error @@ -59,13 +58,13 @@ var _ = Describe("Authorizing requests", Serial, Ordered, func() { // create resources err, ans, uns = createResources(ctx, c, username, 300, 800, 1200) utilruntime.Must(err) + }) + BeforeEach(func(ctx context.Context) { // create cache ls, err := labels.Parse(fmt.Sprintf("%s=%s", NamespaceTypeLabelKey, NamespaceTypeUserLabelValue)) utilruntime.Must(err) - cacheConfig := cacheConfig{restConfig: restConfig, namespacesLabelSector: ls} - cache, err = BuildAndStartCache(ctx, &cacheConfig) - utilruntime.Must(err) + cacheCfg = &cacheConfig{restConfig: restConfig, namespacesLabelSector: ls} }) It("efficiently authorize on a huge environment", Serial, Label("perf"), func(ctx context.Context) { @@ -76,9 +75,11 @@ var _ = Describe("Authorizing requests", Serial, Ordered, func() { // to print out the experiment's report and to include the experiment in any generated reports AddReportEntry(experiment.Name, experiment) - // create authorizer, namespacelister, and handler + // create cache, authorizer, namespacelister, and handler + cache, err := BuildAndStartResourceCache(ctx, cacheCfg) + utilruntime.Must(err) authzr := NewAuthorizer(ctx, cache) - nl := NewNamespaceLister(cache, authzr) + nl := NewNamespaceListerWithAuthorizer(cache, authzr) lnh := NewListNamespacesHandler(nl) // we sample a function repeatedly to get a statistically significant set of measurements @@ -190,6 +191,61 @@ var _ = Describe("Authorizing requests", Serial, Ordered, func() { // and assert that it hasn't changed much from ~100ms Expect(medianDuration).To(BeNumerically("~", 100*time.Millisecond, 70*time.Millisecond)) }) + + It("efficiently authorize on a huge environment with cached accesses", Serial, Label("perf"), func(ctx context.Context) { + // new gomega experiment + experiment := gmeasure.NewExperiment("Authorizing Request") + + // Register the experiment as a ReportEntry - this will cause Ginkgo's reporter infrastructure + // to print out the experiment's report and to include the experiment in any generated reports + AddReportEntry(experiment.Name, experiment) + + // create cache, namespacelister, and handler + cache, err := BuildAndStartResourceCache(ctx, cacheCfg) + utilruntime.Must(err) + c, err := buildAndStartAccessCache(ctx, cache) + utilruntime.Must(err) + + nl := NewNamespaceListerForSubject(c) + lnh := NewListNamespacesHandler(nl) + + // we sample a function repeatedly to get a statistically significant set of measurements + experiment.Sample(func(idx int) { + var err error + var nn *corev1.NamespaceList + + // measure ListNamespaces + experiment.MeasureDuration("internal listing", func() { + nn, err = nl.ListNamespaces(ctx, username) + }) + + // check results + if err != nil { + panic(err) + } + if lnn := len(nn.Items); lnn != len(ans) { + panic(fmt.Errorf("expecting %d namespaces, received %d", len(ans), lnn)) + } + }, gmeasure.SamplingConfig{N: 30, Duration: 2 * time.Minute}) + // we'll sample the function up to 30 times or up to 2 minutes, whichever comes first. + + // we sample a function repeatedly to get a statistically significant set of measurements + experiment.Sample(func(idx int) { + rctx := context.WithValue(context.Background(), ContextKeyUserDetails, &authenticator.Response{ + User: &user.DefaultInfo{ + Name: username, + }, + }) + r := httptest.NewRequest(http.MethodGet, "/", nil).WithContext(rctx) + w := httptest.NewRecorder() + + // measure http Handler + experiment.MeasureDuration("http listing", func() { + lnh.ServeHTTP(w, r) + }) + }, gmeasure.SamplingConfig{N: 30, Duration: 2 * time.Minute}) + // we'll sample the function up to 30 times or up to 2 minutes, whichever comes first. + }) }) func createResources(ctx context.Context, cli client.Client, user string, numAllowedNamespaces, numUnallowedNamespaces, numNonMatchingClusterRoles int) (error, []client.Object, []client.Object) { diff --git a/pkg/auth/cache/access_cache.go b/pkg/auth/cache/access_cache.go new file mode 100644 index 0000000..44faaef --- /dev/null +++ b/pkg/auth/cache/access_cache.go @@ -0,0 +1,31 @@ +package cache + +import ( + "sync/atomic" + + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" +) + +// stores data +type AccessCache struct { + data atomic.Pointer[map[rbacv1.Subject][]corev1.Namespace] +} + +func NewAccessCache() *AccessCache { + return &AccessCache{ + data: atomic.Pointer[map[rbacv1.Subject][]corev1.Namespace]{}, + } +} + +func (c *AccessCache) List(subject rbacv1.Subject) []corev1.Namespace { + m := c.data.Load() + if m == nil { + return nil + } + return (*m)[subject] +} + +func (c *AccessCache) Restock(data *map[rbacv1.Subject][]corev1.Namespace) { + c.data.Store(data) +} diff --git a/pkg/auth/cache/access_cache_test.go b/pkg/auth/cache/access_cache_test.go new file mode 100644 index 0000000..d0fa384 --- /dev/null +++ b/pkg/auth/cache/access_cache_test.go @@ -0,0 +1,48 @@ +package cache_test + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/konflux-ci/namespace-lister/pkg/auth/cache" +) + +var _ = Describe("AuthCache", func() { + enn := []corev1.Namespace{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "myns", + Labels: map[string]string{"key": "value"}, + Annotations: map[string]string{"key": "value"}, + }, + }, + } + + It("returns an empty result if it is empty", func() { + // given + emptyCache := cache.NewAccessCache() + + // when + nn := emptyCache.List(rbacv1.Subject{}) + + // then + Expect(nn).To(BeEmpty()) + }) + + It("matches subjects", func() { + // given + sub := rbacv1.Subject{Kind: "User", Name: "myuser"} + c := cache.NewAccessCache() + c.Restock(&map[rbacv1.Subject][]corev1.Namespace{sub: enn}) + + // when + nn := c.List(sub) + + // then + Expect(nn).To(BeEquivalentTo(enn)) + }) +}) diff --git a/pkg/auth/cache/cache_suite_test.go b/pkg/auth/cache/cache_suite_test.go new file mode 100644 index 0000000..dd13a4a --- /dev/null +++ b/pkg/auth/cache/cache_suite_test.go @@ -0,0 +1,15 @@ +package cache_test + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestCache(t *testing.T) { + t.Parallel() + + RegisterFailHandler(Fail) + RunSpecs(t, "Cache Suite") +} diff --git a/pkg/auth/cache/interfaces_test.go b/pkg/auth/cache/interfaces_test.go new file mode 100644 index 0000000..fe41059 --- /dev/null +++ b/pkg/auth/cache/interfaces_test.go @@ -0,0 +1,16 @@ +package cache_test + +import ( + "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +//go:generate mockgen -source=interfaces_test.go -destination=mocks/mocks.go -package=mocks + +type SubjectLocator interface { + rbac.SubjectLocator +} + +type ClientReader interface { + client.Reader +} diff --git a/pkg/auth/cache/mocks/mocks.go b/pkg/auth/cache/mocks/mocks.go new file mode 100644 index 0000000..cb1e076 --- /dev/null +++ b/pkg/auth/cache/mocks/mocks.go @@ -0,0 +1,121 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: interfaces_test.go +// +// Generated by this command: +// +// mockgen -source=interfaces_test.go -destination=mocks/mocks.go -package=mocks +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + gomock "go.uber.org/mock/gomock" + v1 "k8s.io/api/rbac/v1" + authorizer "k8s.io/apiserver/pkg/authorization/authorizer" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +// MockSubjectLocator is a mock of SubjectLocator interface. +type MockSubjectLocator struct { + ctrl *gomock.Controller + recorder *MockSubjectLocatorMockRecorder + isgomock struct{} +} + +// MockSubjectLocatorMockRecorder is the mock recorder for MockSubjectLocator. +type MockSubjectLocatorMockRecorder struct { + mock *MockSubjectLocator +} + +// NewMockSubjectLocator creates a new mock instance. +func NewMockSubjectLocator(ctrl *gomock.Controller) *MockSubjectLocator { + mock := &MockSubjectLocator{ctrl: ctrl} + mock.recorder = &MockSubjectLocatorMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockSubjectLocator) EXPECT() *MockSubjectLocatorMockRecorder { + return m.recorder +} + +// AllowedSubjects mocks base method. +func (m *MockSubjectLocator) AllowedSubjects(attributes authorizer.Attributes) ([]v1.Subject, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AllowedSubjects", attributes) + ret0, _ := ret[0].([]v1.Subject) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AllowedSubjects indicates an expected call of AllowedSubjects. +func (mr *MockSubjectLocatorMockRecorder) AllowedSubjects(attributes any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AllowedSubjects", reflect.TypeOf((*MockSubjectLocator)(nil).AllowedSubjects), attributes) +} + +// MockClientReader is a mock of ClientReader interface. +type MockClientReader struct { + ctrl *gomock.Controller + recorder *MockClientReaderMockRecorder + isgomock struct{} +} + +// MockClientReaderMockRecorder is the mock recorder for MockClientReader. +type MockClientReaderMockRecorder struct { + mock *MockClientReader +} + +// NewMockClientReader creates a new mock instance. +func NewMockClientReader(ctrl *gomock.Controller) *MockClientReader { + mock := &MockClientReader{ctrl: ctrl} + mock.recorder = &MockClientReaderMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockClientReader) EXPECT() *MockClientReaderMockRecorder { + return m.recorder +} + +// Get mocks base method. +func (m *MockClientReader) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + m.ctrl.T.Helper() + varargs := []any{ctx, key, obj} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Get", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// Get indicates an expected call of Get. +func (mr *MockClientReaderMockRecorder) Get(ctx, key, obj any, opts ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{ctx, key, obj}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockClientReader)(nil).Get), varargs...) +} + +// List mocks base method. +func (m *MockClientReader) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + m.ctrl.T.Helper() + varargs := []any{ctx, list} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "List", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// List indicates an expected call of List. +func (mr *MockClientReaderMockRecorder) List(ctx, list any, opts ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{ctx, list}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockClientReader)(nil).List), varargs...) +} diff --git a/pkg/auth/cache/synchronized_access_cache.go b/pkg/auth/cache/synchronized_access_cache.go new file mode 100644 index 0000000..8d0ec20 --- /dev/null +++ b/pkg/auth/cache/synchronized_access_cache.go @@ -0,0 +1,142 @@ +package cache + +import ( + "context" + "errors" + "log/slog" + "sync" + "sync/atomic" + "time" + + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var SynchAlreadyRunningErr error = errors.New("Synch operation already running") + +// applies changes to cache async +type SynchronizedAccessCache struct { + *AccessCache + request chan struct{} + synchronizing atomic.Bool + once sync.Once + + subjectLocator rbac.SubjectLocator + namespaceLister client.Reader + + logger *slog.Logger + syncErrorHandler func(context.Context, error, *SynchronizedAccessCache) + resyncPeriod time.Duration +} + +func NewSynchronizedAccessCache( + subjectLocator rbac.SubjectLocator, + namespaceLister client.Reader, + opts CacheSynchronizerOptions, +) *SynchronizedAccessCache { + return opts.Apply(&SynchronizedAccessCache{ + AccessCache: NewAccessCache(), + request: make(chan struct{}, 1), + + subjectLocator: subjectLocator, + namespaceLister: namespaceLister, + }) +} + +func (s *SynchronizedAccessCache) Synch(ctx context.Context) error { + if !s.synchronizing.CompareAndSwap(false, true) { + // already running a synch operation + return SynchAlreadyRunningErr + } + defer s.synchronizing.Store(false) + + s.logger.Debug("start synchronization") + nn := corev1.NamespaceList{} + if err := s.namespaceLister.List(ctx, &nn); err != nil { + return err + } + + c := map[rbacv1.Subject][]corev1.Namespace{} + + // get subjects for each namespace + for _, ns := range nn.Items { + ar := authorizer.AttributesRecord{ + Verb: "get", + Resource: "namespaces", + APIGroup: corev1.GroupName, + APIVersion: corev1.SchemeGroupVersion.Version, + Name: ns.GetName(), + Namespace: ns.GetName(), + ResourceRequest: true, + } + + ss, err := s.subjectLocator.AllowedSubjects(ar) + if err != nil { + // do not forward the error as it should be due + // to cache evicted (cluster)roles + s.logger.Debug("cache restocking: error caculating allowed subjects", "error", err) + } + + for _, s := range ss { + c[s] = append(c[s], ns) + } + } + + // restock the cache + s.AccessCache.Restock(&c) + + s.logger.Debug("cache restocked") + return nil +} + +func (s *SynchronizedAccessCache) Request() bool { + select { + case s.request <- struct{}{}: + // requested correctly + return true + default: + // a request is already present + return false + } +} + +func (s *SynchronizedAccessCache) Start(ctx context.Context) { + s.once.Do(func() { + // run time based resync + go func() { + for { + select { + case <-ctx.Done(): + // termination + s.logger.Info("terminating time-based cache synchronization: context done") + return + case <-time.After(s.resyncPeriod): + ok := s.Request() + s.logger.Debug("time-based cache synchronization request made", "queued", ok) + } + } + }() + + // schedule requested synch + go func() { + for { + select { + case <-ctx.Done(): + // termination + s.logger.Info("terminating cache synchronization goroutine: context done") + return + + case <-s.request: + // a new request is present + s.logger.Debug("start requested cache synchronization") + if err := s.Synch(ctx); err != nil { + s.syncErrorHandler(ctx, err, s) + } + } + } + }() + }) +} diff --git a/pkg/auth/cache/synchronized_access_cache_opts.go b/pkg/auth/cache/synchronized_access_cache_opts.go new file mode 100644 index 0000000..6a9a37e --- /dev/null +++ b/pkg/auth/cache/synchronized_access_cache_opts.go @@ -0,0 +1,34 @@ +package cache + +import ( + "cmp" + "context" + "log/slog" + "time" +) + +type CacheSynchronizerOptions struct { + Logger *slog.Logger + ResyncPeriod time.Duration + SyncErrorHandler func(context.Context, error, *SynchronizedAccessCache) +} + +var defaultCacheSynchronizerOptions = CacheSynchronizerOptions{ + Logger: slog.Default(), + ResyncPeriod: 10 * time.Minute, + SyncErrorHandler: func(ctx context.Context, err error, s *SynchronizedAccessCache) { + s.logger.Error("error synchronizing cache", "error", err) + }, +} + +func (opts *CacheSynchronizerOptions) Apply(s *SynchronizedAccessCache) *SynchronizedAccessCache { + s.resyncPeriod = cmp.Or(opts.ResyncPeriod, defaultCacheSynchronizerOptions.ResyncPeriod) + + s.syncErrorHandler = opts.SyncErrorHandler + if s.syncErrorHandler == nil { + s.syncErrorHandler = defaultCacheSynchronizerOptions.SyncErrorHandler + } + + s.logger = cmp.Or(opts.Logger, defaultCacheSynchronizerOptions.Logger) + return s +} diff --git a/pkg/auth/cache/synchronized_access_cache_test.go b/pkg/auth/cache/synchronized_access_cache_test.go new file mode 100644 index 0000000..a2b2df4 --- /dev/null +++ b/pkg/auth/cache/synchronized_access_cache_test.go @@ -0,0 +1,137 @@ +package cache_test + +import ( + "context" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "go.uber.org/mock/gomock" + + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/konflux-ci/namespace-lister/pkg/auth/cache" + "github.com/konflux-ci/namespace-lister/pkg/auth/cache/mocks" +) + +var _ = Describe("SynchronizedAccessCache", func() { + var ctrl *gomock.Controller + var subjectLocator *mocks.MockSubjectLocator + var namespaceListerBuilder fake.ClientBuilder + + userSubject := rbacv1.Subject{ + Kind: "User", + APIGroup: rbacv1.SchemeGroupVersion.Group, + Name: "myuser", + } + + serviceAccountSubject := rbacv1.Subject{ + Kind: "ServiceAccount", + Name: "myserviceaccount", + Namespace: "mynamespace", + } + + namespaces := []corev1.Namespace{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "myns", + Labels: map[string]string{"key": "value"}, + Annotations: map[string]string{"key": "value"}, + }, + }, + } + + BeforeEach(func() { + ctrl = gomock.NewController(GinkgoT()) + subjectLocator = mocks.NewMockSubjectLocator(ctrl) + s := runtime.NewScheme() + utilruntime.Must(corev1.AddToScheme(s)) + namespaceListerBuilder.WithScheme(s) + }) + + It("can not run synch twice", func(ctx context.Context) { + // given + namespaceLister := mocks.NewMockClientReader(ctrl) + namespaceLister.EXPECT(). + List(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, nn *corev1.NamespaceList, opts ...client.ListOption) error { + time.Sleep(5 * time.Second) + return nil + }). + Times(1) + nsc := cache.NewSynchronizedAccessCache(subjectLocator, namespaceLister, cache.CacheSynchronizerOptions{}) + + // when + go func() { _ = nsc.Synch(ctx) }() + time.Sleep(1 * time.Second) + + // then + Expect(nsc.Synch(ctx)).To(MatchError(cache.SynchAlreadyRunningErr)) + }) + + It("restocks cache with empty list", func(ctx context.Context) { + namespaceLister := mocks.NewMockClientReader(ctrl) + namespaceLister.EXPECT(). + List(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, nn *corev1.NamespaceList, opts ...client.ListOption) error { + (&corev1.NamespaceList{Items: namespaces}).DeepCopyInto(nn) + return nil + }). + Times(1) + subjectLocator.EXPECT(). + AllowedSubjects(gomock.Any()). + Return([]rbacv1.Subject{}, nil). + Times(1) + + nsc := cache.NewSynchronizedAccessCache(subjectLocator, namespaceLister, cache.CacheSynchronizerOptions{}) + + Expect(nsc.Synch(ctx)).ToNot(HaveOccurred()) + Expect(nsc.AccessCache.List(userSubject)).To(BeEmpty()) + }) + + It("matches user after synch", func(ctx context.Context) { + namespaceLister := mocks.NewMockClientReader(ctrl) + namespaceLister.EXPECT(). + List(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, nn *corev1.NamespaceList, opts ...client.ListOption) error { + (&corev1.NamespaceList{Items: namespaces}).DeepCopyInto(nn) + return nil + }). + Times(1) + subjectLocator.EXPECT(). + AllowedSubjects(gomock.Any()). + Return([]rbacv1.Subject{userSubject}, nil). + Times(1) + + nsc := cache.NewSynchronizedAccessCache(subjectLocator, namespaceLister, cache.CacheSynchronizerOptions{}) + + Expect(nsc.Synch(ctx)).ToNot(HaveOccurred()) + Expect(nsc.AccessCache.List(userSubject)).To(BeEquivalentTo(namespaces)) + }) + + It("matches ServiceAccount after synch", func(ctx context.Context) { + namespaceLister := mocks.NewMockClientReader(ctrl) + namespaceLister.EXPECT(). + List(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, nn *corev1.NamespaceList, opts ...client.ListOption) error { + (&corev1.NamespaceList{Items: namespaces}).DeepCopyInto(nn) + return nil + }). + Times(1) + subjectLocator.EXPECT(). + AllowedSubjects(gomock.Any()). + Return([]rbacv1.Subject{serviceAccountSubject}, nil). + Times(1) + + nsc := cache.NewSynchronizedAccessCache(subjectLocator, namespaceLister, cache.CacheSynchronizerOptions{}) + + Expect(nsc.Synch(ctx)).ToNot(HaveOccurred()) + Expect(nsc.AccessCache.List(serviceAccountSubject)).To(BeEquivalentTo(namespaces)) + }) +}) diff --git a/cache.go b/resource_cache.go similarity index 97% rename from cache.go rename to resource_cache.go index d80661b..0cc326b 100644 --- a/cache.go +++ b/resource_cache.go @@ -100,7 +100,7 @@ type cacheConfig struct { namespacesLabelSector labels.Selector } -func BuildAndStartCache(ctx context.Context, cfg *cacheConfig) (cache.Cache, error) { +func BuildAndStartResourceCache(ctx context.Context, cfg *cacheConfig) (cache.Cache, error) { // build scheme s := runtime.NewScheme() if err := corev1.AddToScheme(s); err != nil { diff --git a/cache_config.go b/resource_cache_config.go similarity index 69% rename from cache_config.go rename to resource_cache_config.go index f4d4533..2f64e47 100644 --- a/cache_config.go +++ b/resource_cache_config.go @@ -11,9 +11,9 @@ import ( const CacheNamespaceLabelSelectorEnv string = "CACHE_NAMESPACE_LABELSELECTOR" -var ConfigErr error = errors.New("error building cache configuration") +var ResourceCacheConfigErr error = errors.New("error building resource cache configuration") -func NewCacheConfigFromEnv(cfg *rest.Config) (*cacheConfig, error) { +func NewResourceCacheConfigFromEnv(cfg *rest.Config) (*cacheConfig, error) { // get namespaces labelSelector cacheCfg := &cacheConfig{restConfig: cfg} if err := getNamespacesLabelSelectors(cacheCfg); err != nil { @@ -26,7 +26,7 @@ func NewCacheConfigFromEnv(cfg *rest.Config) (*cacheConfig, error) { func getNamespacesLabelSelectors(cfg *cacheConfig) error { ls, err := labels.Parse(os.Getenv(CacheNamespaceLabelSelectorEnv)) if err != nil { - return fmt.Errorf("%w for namespaces: %w", ConfigErr, err) + return fmt.Errorf("%w for namespaces: %w", ResourceCacheConfigErr, err) } cfg.namespacesLabelSector = ls