diff --git a/Makefile b/Makefile index 26b142f26..89e0a4052 100644 --- a/Makefile +++ b/Makefile @@ -96,6 +96,9 @@ $(OUTPUT_DIR)/.clientset: $(GENERATED_PROTO_FILES) $(SOURCES) .PHONY: generated-code generated-code: $(OUTPUT_DIR)/.generated-code update-licenses +.PHONY: generate-all +generate-all: generated-code + SUBDIRS:=pkg test $(OUTPUT_DIR)/.generated-code: mkdir -p ${OUTPUT_DIR} diff --git a/README.md b/README.md index df29233fe..f6eae39d3 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,10 @@ # solo-kit A collection of code generation and libraries to for API development. +### Testing +To generate tests set `SkipGeneratedTests` to false in the `generate.go` file. This will use the test templates +to generate the code for the tests. This will allow generation of templates such as `pkg/code-generator/codegen/templates/snapshot_emitter_test_template.go` + ### Description: - Define your declarative API in `.proto` files - APIs are defined by top-level protobuf messages in `.proto` files diff --git a/changelog/v0.41.0/namespace-selectors.yaml b/changelog/v0.41.0/namespace-selectors.yaml new file mode 100644 index 000000000..2bf84cbd9 --- /dev/null +++ b/changelog/v0.41.0/namespace-selectors.yaml @@ -0,0 +1,10 @@ +changelog: + - type: BREAKING_CHANGE + issueLink: https://github.com/solo-io/gloo/issues/5868 + resolvesIssue: false + description: | + Added the ability to watch namespaces given by Expression Selectors in the Watch Opts. + Watched Namespaces work as normally. When Expression Selectors are set the snapshot + emitter will watch, in addition to the watched namespaces, namespaces that + are labeled and meet the criteria of the Expression Selector. All resource + clients will watch these namespaces, if set, via the snapshot emitters. \ No newline at end of file diff --git a/pkg/api/external/kubernetes/customresourcedefinition/resource_client.go b/pkg/api/external/kubernetes/customresourcedefinition/resource_client.go index 3182cdad2..b9f9d86ff 100644 --- a/pkg/api/external/kubernetes/customresourcedefinition/resource_client.go +++ b/pkg/api/external/kubernetes/customresourcedefinition/resource_client.go @@ -67,6 +67,10 @@ func ToKubeCustomResourceDefinition(resource resources.Resource) (*v1.CustomReso var _ clients.ResourceClient = &customResourceDefinitionResourceClient{} +func (rc *customResourceDefinitionResourceClient) RegisterNamespace(namespace string) error { + return nil +} + func (rc *customResourceDefinitionResourceClient) Read(namespace, name string, opts clients.ReadOpts) (resources.Resource, error) { if err := resources.ValidateName(name); err != nil { return nil, errors.Wrapf(err, "validation error") diff --git a/pkg/api/external/kubernetes/deployment/resource_client.go b/pkg/api/external/kubernetes/deployment/resource_client.go index 2a4e4cf68..934467533 100644 --- a/pkg/api/external/kubernetes/deployment/resource_client.go +++ b/pkg/api/external/kubernetes/deployment/resource_client.go @@ -66,6 +66,10 @@ func ToKubeDeployment(resource resources.Resource) (*appsv1.Deployment, error) { var _ clients.ResourceClient = &deploymentResourceClient{} +func (rc *deploymentResourceClient) RegisterNamespace(namespace string) error { + return nil +} + func (rc *deploymentResourceClient) Read(namespace, name string, opts clients.ReadOpts) (resources.Resource, error) { if err := resources.ValidateName(name); err != nil { return nil, errors.Wrapf(err, "validation error") diff --git a/pkg/api/external/kubernetes/job/resource_client.go b/pkg/api/external/kubernetes/job/resource_client.go index 2a129a9e0..e5a785d23 100644 --- a/pkg/api/external/kubernetes/job/resource_client.go +++ b/pkg/api/external/kubernetes/job/resource_client.go @@ -66,6 +66,10 @@ func ToKubeJob(resource resources.Resource) (*batchv1.Job, error) { var _ clients.ResourceClient = &jobResourceClient{} +func (rc *jobResourceClient) RegisterNamespace(namespace string) error { + return nil +} + func (rc *jobResourceClient) Read(namespace, name string, opts clients.ReadOpts) (resources.Resource, error) { if err := resources.ValidateName(name); err != nil { return nil, errors.Wrapf(err, "validation error") diff --git a/pkg/api/external/kubernetes/namespace/resource_client.go b/pkg/api/external/kubernetes/namespace/resource_client.go index 7bef56223..6ecb37400 100644 --- a/pkg/api/external/kubernetes/namespace/resource_client.go +++ b/pkg/api/external/kubernetes/namespace/resource_client.go @@ -17,7 +17,6 @@ import ( kubev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes" ) @@ -65,6 +64,10 @@ func ToKubeNamespace(resource resources.Resource) (*kubev1.Namespace, error) { return &namespace, nil } +func (rc *namespaceResourceClient) RegisterNamespace(namespace string) error { + return rc.cache.RegisterNewNamespaceCache(namespace) +} + func (rc *namespaceResourceClient) Read(namespace, name string, opts clients.ReadOpts) (resources.Resource, error) { if err := resources.ValidateName(name); err != nil { return nil, eris.Wrapf(err, "validation error") @@ -173,7 +176,11 @@ func (rc *namespaceResourceClient) List(namespace string, opts clients.ListOpts) return nil, eris.New("to list namespaces you must watch all namespaces") } - namespaceObjList, err := rc.cache.NamespaceLister().List(labels.SelectorFromSet(opts.Selector)) + listOptions, err := clients.GetLabelSelector(opts) + if err != nil { + return nil, err + } + namespaceObjList, err := rc.cache.NamespaceLister().List(listOptions) if err != nil { return nil, eris.Wrapf(err, "listing namespaces level") } diff --git a/pkg/api/external/kubernetes/namespace/resource_namespace.go b/pkg/api/external/kubernetes/namespace/resource_namespace.go new file mode 100644 index 000000000..f5ffd78c4 --- /dev/null +++ b/pkg/api/external/kubernetes/namespace/resource_namespace.go @@ -0,0 +1,185 @@ +package namespace + +import ( + "bytes" + + "github.com/pkg/errors" + "github.com/solo-io/solo-kit/pkg/api/v1/clients" + "github.com/solo-io/solo-kit/pkg/api/v1/clients/kube/cache" + "github.com/solo-io/solo-kit/pkg/api/v1/resources" + skkube "github.com/solo-io/solo-kit/pkg/api/v1/resources/common/kubernetes" + kubev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kubewatch "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/kubernetes" +) + +var _ resources.ResourceNamespaceLister = &kubeResourceNamespaceLister{} +var _ resources.ResourceNamespaceLister = &kubeClientResourceNamespaceLister{} + +// NewKubeClientCacheResourceNamespaceLister will create a new resource namespace lister that requires the kubernestes +// client and cache. +func NewKubeClientCacheResourceNamespaceLister(kube kubernetes.Interface, cache cache.KubeCoreCache) resources.ResourceNamespaceLister { + return &kubeResourceNamespaceLister{ + client: NewNamespaceClient(kube, cache), + } +} + +// NewKubeClientResourceNamespaceLister will create a new resource namespace lister that requires the kubernetes client +// interface. +func NewKubeClientResourceNamespaceLister(kube kubernetes.Interface) resources.ResourceNamespaceLister { + return &kubeClientResourceNamespaceLister{ + kube: kube, + } +} + +type kubeResourceNamespaceLister struct { + client skkube.KubeNamespaceClient +} + +// GetResourceNamespaceList is the kubernetes implementation that returns the list of namespaces +func (kns *kubeResourceNamespaceLister) GetResourceNamespaceList(opts resources.ResourceNamespaceListOptions, filtered resources.ResourceNamespaceList) (resources.ResourceNamespaceList, error) { + namespaces, err := kns.client.List(clients.TranslateResourceNamespaceListToListOptions(opts)) + if err != nil { + return nil, err + } + converted := convertNamespaceListToResourceNamespace(namespaces) + return kns.filter(converted, filtered), nil +} + +// GetResourceNamespaceWatch returns a watch for events that occur on kube namespaces returning a list of all the namespaces +func (kns *kubeResourceNamespaceLister) GetResourceNamespaceWatch(opts resources.ResourceNamespaceWatchOptions, filtered resources.ResourceNamespaceList) (chan resources.ResourceNamespaceList, <-chan error, error) { + ctx := opts.Ctx + wopts := clients.TranslateResourceNamespaceListToWatchOptions(opts) + namespaceChan, errorChan, err := kns.client.Watch(wopts) + if err != nil { + return nil, nil, err + } + + resourceNamespaceChan := make(chan resources.ResourceNamespaceList) + go func() { + defer close(resourceNamespaceChan) + for { + select { + case namespaceList := <-namespaceChan: + select { + case resourceNamespaceChan <- kns.filter(convertNamespaceListToResourceNamespace(namespaceList), filtered): + case <-ctx.Done(): + return + } + case <-ctx.Done(): + return + } + } + }() + return resourceNamespaceChan, errorChan, nil +} + +func (kns *kubeResourceNamespaceLister) filter(namespaces resources.ResourceNamespaceList, filter resources.ResourceNamespaceList) resources.ResourceNamespaceList { + filteredList := resources.ResourceNamespaceList{} + for _, ns := range namespaces { + add := true + for _, wns := range filter { + if ns.Name == wns.Name { + add = false + break + } + } + if add { + filteredList = append(filteredList, ns) + } + } + return filteredList +} + +func convertNamespaceListToResourceNamespace(namespaces skkube.KubeNamespaceList) resources.ResourceNamespaceList { + l := make(resources.ResourceNamespaceList, len(namespaces)) + for i, ns := range namespaces { + l[i] = resources.ResourceNamespace{Name: ns.ObjectMeta.Name} + } + return l +} + +type kubeClientResourceNamespaceLister struct { + kube kubernetes.Interface +} + +// GetResourceNamespaceList is the kubernetes implementation that returns the list of namespaces +func (client *kubeClientResourceNamespaceLister) GetResourceNamespaceList(opts resources.ResourceNamespaceListOptions, filtered resources.ResourceNamespaceList) (resources.ResourceNamespaceList, error) { + excludeNamespaces := client.getExcludeFieldSelector(filtered) + namespaceList, err := client.kube.CoreV1().Namespaces().List(opts.Ctx, metav1.ListOptions{FieldSelector: excludeNamespaces, LabelSelector: opts.ExpressionSelector}) + if err != nil { + return nil, err + } + return convertNamespaceListToResourceNamespaceList(namespaceList), nil +} + +// GetResourceNamespaceWatch returns a watch for events that occur on kube namespaces returning a list of all the namespaces +func (client *kubeClientResourceNamespaceLister) GetResourceNamespaceWatch(opts resources.ResourceNamespaceWatchOptions, filtered resources.ResourceNamespaceList) (chan resources.ResourceNamespaceList, <-chan error, error) { + excludeNamespaces := client.getExcludeFieldSelector(filtered) + namespaceWatcher, err := client.kube.CoreV1().Namespaces().Watch(opts.Ctx, metav1.ListOptions{FieldSelector: excludeNamespaces, LabelSelector: opts.ExpressionSelector}) + if err != nil { + return nil, nil, err + } + namespaceChan := namespaceWatcher.ResultChan() + resourceNamespaceChan := make(chan resources.ResourceNamespaceList) + errorChannel := make(chan error) + go func() { + defer func() { + close(resourceNamespaceChan) + close(errorChannel) + }() + for { + select { + case <-opts.Ctx.Done(): + return + case event, ok := <-namespaceChan: + if !ok { + return + } + switch event.Type { + case kubewatch.Error: + errorChannel <- errors.Errorf("error with the event from watching namespaces: %v", event) + return + default: + resourceNamespaceList, err := client.GetResourceNamespaceList(resources.ResourceNamespaceListOptions{ + Ctx: opts.Ctx, + ExpressionSelector: opts.ExpressionSelector, + }, filtered) + if err != nil { + errorChannel <- errors.Wrap(err, "error getting the list of resource namespaces while watching") + return + } + resourceNamespaceChan <- resourceNamespaceList + } + } + } + }() + return resourceNamespaceChan, errorChannel, nil +} + +func (client *kubeClientResourceNamespaceLister) getExcludeFieldSelector(filtered resources.ResourceNamespaceList) string { + // you can filter the namespaces by using metadata.name for more information on field selectors + // https://kubernetes.io/docs/concepts/overview/working-with-objects/field-selectors/ + var buffer bytes.Buffer + for i, rns := range filtered { + ns := rns.Name + if ns != "" { + buffer.WriteString("metadata.name!=") + buffer.WriteString(ns) + if i < len(filtered)-1 { + buffer.WriteByte(',') + } + } + } + return buffer.String() +} + +func convertNamespaceListToResourceNamespaceList(namespaceList *kubev1.NamespaceList) resources.ResourceNamespaceList { + resourceNamespaces := make(resources.ResourceNamespaceList, len(namespaceList.Items)) + for i, item := range namespaceList.Items { + ns := item.Name + resourceNamespaces[i] = resources.ResourceNamespace{Name: ns} + } + return resourceNamespaces +} diff --git a/pkg/api/external/kubernetes/pod/resource_client.go b/pkg/api/external/kubernetes/pod/resource_client.go index e7235f4ae..664b9ece8 100644 --- a/pkg/api/external/kubernetes/pod/resource_client.go +++ b/pkg/api/external/kubernetes/pod/resource_client.go @@ -66,6 +66,10 @@ func ToKubePod(resource resources.Resource) (*kubev1.Pod, error) { var _ clients.ResourceClient = &podResourceClient{} +func (rc *podResourceClient) RegisterNamespace(namespace string) error { + return rc.cache.RegisterNewNamespaceCache(namespace) +} + func (rc *podResourceClient) Read(namespace, name string, opts clients.ReadOpts) (resources.Resource, error) { if err := resources.ValidateName(name); err != nil { return nil, errors.Wrapf(err, "validation error") diff --git a/pkg/api/external/kubernetes/service/resource_client.go b/pkg/api/external/kubernetes/service/resource_client.go index 1f453b914..e93b5bc62 100644 --- a/pkg/api/external/kubernetes/service/resource_client.go +++ b/pkg/api/external/kubernetes/service/resource_client.go @@ -66,6 +66,10 @@ func ToKubeService(resource resources.Resource) (*kubev1.Service, error) { var _ clients.ResourceClient = &serviceResourceClient{} +func (rc *serviceResourceClient) RegisterNamespace(namespace string) error { + return rc.cache.RegisterNewNamespaceCache(namespace) +} + func (rc *serviceResourceClient) Read(namespace, name string, opts clients.ReadOpts) (resources.Resource, error) { if err := resources.ValidateName(name); err != nil { return nil, errors.Wrapf(err, "validation error") diff --git a/pkg/api/v1/clients/apiclient/resource_client.go b/pkg/api/v1/clients/apiclient/resource_client.go index c73a20ea5..2ab606c1c 100644 --- a/pkg/api/v1/clients/apiclient/resource_client.go +++ b/pkg/api/v1/clients/apiclient/resource_client.go @@ -47,6 +47,10 @@ func (rc *ResourceClient) Register() error { return nil } +func (rc *ResourceClient) RegisterNamespace(namespace string) error { + return nil +} + func (rc *ResourceClient) Read(namespace, name string, opts clients.ReadOpts) (resources.Resource, error) { if err := resources.ValidateName(name); err != nil { return nil, errors.Wrapf(err, "validation error") diff --git a/pkg/api/v1/clients/client_interface.go b/pkg/api/v1/clients/client_interface.go index 6dbe5e19e..c1c8fb399 100644 --- a/pkg/api/v1/clients/client_interface.go +++ b/pkg/api/v1/clients/client_interface.go @@ -32,6 +32,7 @@ type ResourceClient interface { NewResource() resources.Resource // Deprecated: implemented only by the kubernetes resource client. Will be removed from the interface. Register() error + RegisterNamespace(namespace string) error Read(namespace, name string, opts ReadOpts) (resources.Resource, error) Write(resource resources.Resource, opts WriteOpts) (resources.Resource, error) Delete(namespace, name string, opts DeleteOpts) error diff --git a/pkg/api/v1/clients/common/common.go b/pkg/api/v1/clients/common/common.go index ab4eab816..50f07f4f6 100644 --- a/pkg/api/v1/clients/common/common.go +++ b/pkg/api/v1/clients/common/common.go @@ -37,10 +37,8 @@ func KubeResourceWatch(cache cache.Cache, listFunc ResourceListFunc, namespace s // prevent flooding the channel with duplicates var previous *resources.ResourceList updateResourceList := func() { - list, err := listFunc(namespace, clients.ListOpts{ - Ctx: opts.Ctx, - Selector: opts.Selector, - }) + lopts := clients.TranslateWatchOptsIntoListOpts(opts) + list, err := listFunc(namespace, lopts) if err != nil { errs <- err return diff --git a/pkg/api/v1/clients/configmap/resource_client.go b/pkg/api/v1/clients/configmap/resource_client.go index c97cc235e..9a04d6e8f 100644 --- a/pkg/api/v1/clients/configmap/resource_client.go +++ b/pkg/api/v1/clients/configmap/resource_client.go @@ -54,6 +54,10 @@ func NewResourceClientWithConverter(kube kubernetes.Interface, resourceType reso var _ clients.ResourceClient = &ResourceClient{} +func (rc *ResourceClient) RegisterNamespace(namespace string) error { + return rc.cache.RegisterNewNamespaceCache(namespace) +} + func (rc *ResourceClient) Read(namespace, name string, opts clients.ReadOpts) (resources.Resource, error) { if err := resources.ValidateName(name); err != nil { return nil, errors.Wrapf(err, "validation error") diff --git a/pkg/api/v1/clients/consul/resource_client.go b/pkg/api/v1/clients/consul/resource_client.go index c442a4873..5671d051f 100644 --- a/pkg/api/v1/clients/consul/resource_client.go +++ b/pkg/api/v1/clients/consul/resource_client.go @@ -51,6 +51,10 @@ func (rc *ResourceClient) Register() error { return nil } +func (rc *ResourceClient) RegisterNamespace(ns string) error { + return nil +} + func (rc *ResourceClient) Read(namespace, name string, opts clients.ReadOpts) (resources.Resource, error) { if err := resources.ValidateName(name); err != nil { return nil, errors.Wrapf(err, "validation error") diff --git a/pkg/api/v1/clients/file/resource_client.go b/pkg/api/v1/clients/file/resource_client.go index 1b4adc745..22f06e594 100644 --- a/pkg/api/v1/clients/file/resource_client.go +++ b/pkg/api/v1/clients/file/resource_client.go @@ -53,6 +53,10 @@ func (rc *ResourceClient) Register() error { return nil } +func (rc *ResourceClient) RegisterNamespace(namespace string) error { + return nil +} + func (rc *ResourceClient) Read(namespace, name string, opts clients.ReadOpts) (resources.Resource, error) { if err := resources.ValidateName(name); err != nil { return nil, errors.Wrapf(err, "validation error") diff --git a/pkg/api/v1/clients/kube/cache/cache.go b/pkg/api/v1/clients/kube/cache/cache.go index 8c90eb3e1..1c682dbc6 100644 --- a/pkg/api/v1/clients/kube/cache/cache.go +++ b/pkg/api/v1/clients/kube/cache/cache.go @@ -23,6 +23,12 @@ import ( "k8s.io/client-go/tools/cache" ) +// onceAndSent is used to capture errs made by once functions +type onceAndSent struct { + Err error + Once *sync.Once +} + type ServiceLister interface { // List lists all Services in the indexer. List(selector labels.Selector) (ret []*v1.Service, err error) @@ -52,6 +58,9 @@ type KubeCoreCache interface { Cache clustercache.ClusterCache + // RegisterNewNamespaceCache will register the namespace so that the resources + // are available in the cache listers. + RegisterNewNamespaceCache(ns string) error // Deprecated: Use NamespacedPodLister instead PodLister() kubelisters.PodLister // Deprecated: Use NamespacedServiceLister instead @@ -75,7 +84,20 @@ type kubeCoreCaches struct { configMapListers map[string]kubelisters.ConfigMapLister secretListers map[string]kubelisters.SecretLister namespaceLister kubelisters.NamespaceLister - + // ctx is the context of the cache + ctx context.Context + // client kubernetes client + client kubernetes.Interface + // kubeController is the controller used to start the informers, and is used to + // watch events that occur on the informers. This is used to send information back to the + // [resource]listers. + kubeController *controller.Controller + // resyncDuration is the time + resyncDuration time.Duration + // informers are the kube resources that provide events + informers []cache.SharedIndexInformer + // registerNamespaceLock is a map string(namespace) -> sync.Once. Is used to register namespaces only once. + registerNamespaceLock sync.Map cacheUpdatedWatchers []chan struct{} cacheUpdatedWatchersMutex sync.Mutex } @@ -96,13 +118,14 @@ func NewCoreCacheForConfig(ctx context.Context, cluster string, restConfig *rest var _ clustercache.NewClusterCacheForConfig = NewCoreCacheForConfig +// NewFromConfigWithOptions creates a new Cluster Cahce For Config function. The function will create the namespace lister by default. func NewFromConfigWithOptions(resyncDuration time.Duration, namesapcesToWatch []string) clustercache.NewClusterCacheForConfig { return func(ctx context.Context, cluster string, restConfig *rest.Config) clustercache.ClusterCache { kubeClient, err := kubernetes.NewForConfig(restConfig) if err != nil { return nil } - c, err := NewKubeCoreCacheWithOptions(ctx, kubeClient, resyncDuration, namesapcesToWatch) + c, err := NewKubeCoreCacheWithOptions(ctx, kubeClient, resyncDuration, namesapcesToWatch, true) if err != nil { return nil } @@ -110,14 +133,17 @@ func NewFromConfigWithOptions(resyncDuration time.Duration, namesapcesToWatch [] } } +// NewKubeCoreCache will create a new kube Core Caches. The namespace lister is created from this function. // This context should live as long as the cache is desired. i.e. if the cache is shared // across clients, it should get a context that has a longer lifetime than the clients themselves func NewKubeCoreCache(ctx context.Context, client kubernetes.Interface) (*kubeCoreCaches, error) { resyncDuration := 12 * time.Hour - return NewKubeCoreCacheWithOptions(ctx, client, resyncDuration, []string{metav1.NamespaceAll}) + return NewKubeCoreCacheWithOptions(ctx, client, resyncDuration, []string{metav1.NamespaceAll}, true) } -func NewKubeCoreCacheWithOptions(ctx context.Context, client kubernetes.Interface, resyncDuration time.Duration, namesapcesToWatch []string) (*kubeCoreCaches, error) { +// NewKubeCoreCacheWithOptions will create a new kube Core Cache. By setting the +// createNamespaceLister the namespace lister will be created. +func NewKubeCoreCacheWithOptions(ctx context.Context, client kubernetes.Interface, resyncDuration time.Duration, namesapcesToWatch []string, createNamespaceLister bool) (*kubeCoreCaches, error) { if len(namesapcesToWatch) == 0 { namesapcesToWatch = []string{metav1.NamespaceAll} @@ -136,107 +162,31 @@ func NewKubeCoreCacheWithOptions(ctx context.Context, client kubernetes.Interfac configMaps := map[string]kubelisters.ConfigMapLister{} secrets := map[string]kubelisters.SecretLister{} - for _, nsToWatch := range namesapcesToWatch { - nsToWatch := nsToWatch - nsCtx := ctx - if ctxWithTags, err := tag.New(nsCtx, tag.Insert(skkube.KeyNamespaceKind, skkube.NotEmptyValue(nsToWatch))); err == nil { - nsCtx = ctxWithTags - } - - { - var typeCtx = nsCtx - if ctxWithTags, err := tag.New(nsCtx, tag.Insert(skkube.KeyKind, "Pods")); err == nil { - typeCtx = ctxWithTags - } - // Pods - watch := client.CoreV1().Pods(nsToWatch).Watch - list := func(options metav1.ListOptions) (runtime.Object, error) { - return client.CoreV1().Pods(nsToWatch).List(ctx, options) - } - informer := skkube.NewSharedInformer(typeCtx, resyncDuration, &v1.Pod{}, list, watch) - informers = append(informers, informer) - lister := kubelisters.NewPodLister(informer.GetIndexer()) - pods[nsToWatch] = lister - } - { - var typeCtx = nsCtx - if ctxWithTags, err := tag.New(nsCtx, tag.Insert(skkube.KeyKind, "Services")); err == nil { - typeCtx = ctxWithTags - } - // Services - watch := client.CoreV1().Services(nsToWatch).Watch - list := func(options metav1.ListOptions) (runtime.Object, error) { - return client.CoreV1().Services(nsToWatch).List(ctx, options) - } - informer := skkube.NewSharedInformer(typeCtx, resyncDuration, &v1.Service{}, list, watch) - informers = append(informers, informer) - lister := kubelisters.NewServiceLister(informer.GetIndexer()) - services[nsToWatch] = lister - } - { - var typeCtx = nsCtx - if ctxWithTags, err := tag.New(nsCtx, tag.Insert(skkube.KeyKind, "ConfigMap")); err == nil { - typeCtx = ctxWithTags - } - // ConfigMap - watch := client.CoreV1().ConfigMaps(nsToWatch).Watch - list := func(options metav1.ListOptions) (runtime.Object, error) { - return client.CoreV1().ConfigMaps(nsToWatch).List(ctx, options) - } - informer := skkube.NewSharedInformer(typeCtx, resyncDuration, &v1.ConfigMap{}, list, watch) - informers = append(informers, informer) - lister := kubelisters.NewConfigMapLister(informer.GetIndexer()) - configMaps[nsToWatch] = lister - } - { - var typeCtx = nsCtx - if ctxWithTags, err := tag.New(nsCtx, tag.Insert(skkube.KeyKind, "Secrets")); err == nil { - typeCtx = ctxWithTags - } - // Secrets - watch := client.CoreV1().Secrets(nsToWatch).Watch - list := func(options metav1.ListOptions) (runtime.Object, error) { - return client.CoreV1().Secrets(nsToWatch).List(ctx, options) - } - informer := skkube.NewSharedInformer(typeCtx, resyncDuration, &v1.Secret{}, list, watch) - informers = append(informers, informer) - lister := kubelisters.NewSecretLister(informer.GetIndexer()) - secrets[nsToWatch] = lister - } - - } - - var namespaceLister kubelisters.NamespaceLister - if len(namesapcesToWatch) == 1 && namesapcesToWatch[0] == metav1.NamespaceAll { - - // Pods - watch := client.CoreV1().Namespaces().Watch - list := func(options metav1.ListOptions) (runtime.Object, error) { - return client.CoreV1().Namespaces().List(ctx, options) - } - nsCtx := ctx - if ctxWithTags, err := tag.New(nsCtx, tag.Insert(skkube.KeyNamespaceKind, skkube.NotEmptyValue(metav1.NamespaceAll)), tag.Insert(skkube.KeyKind, "Namespaces")); err == nil { - nsCtx = ctxWithTags - } - informer := skkube.NewSharedInformer(nsCtx, resyncDuration, &v1.Namespace{}, list, watch) - informers = append(informers, informer) - namespaceLister = kubelisters.NewNamespaceLister(informer.GetIndexer()) - } - k := &kubeCoreCaches{ podListers: pods, serviceListers: services, configMapListers: configMaps, secretListers: secrets, - namespaceLister: namespaceLister, + client: client, + ctx: ctx, + resyncDuration: resyncDuration, + informers: informers, + } + + for _, nsToWatch := range namesapcesToWatch { + k.addNewNamespace(nsToWatch) + } + + if createNamespaceLister { + k.addNamespaceLister() } - kubeController := controller.NewController("kube-plugin-controller", - controller.NewLockingSyncHandler(k.updatedOccured), informers..., + k.kubeController = controller.NewController("kube-plugin-controller", + controller.NewLockingSyncHandler(k.updatedOccured), k.informers..., ) stop := ctx.Done() - err := kubeController.Run(2, stop) + err := k.kubeController.Run(2, stop) if err != nil { return nil, err } @@ -244,6 +194,106 @@ func NewKubeCoreCacheWithOptions(ctx context.Context, client kubernetes.Interfac return k, nil } +func (c *kubeCoreCaches) addPod(namespace string, typeCtx context.Context) cache.SharedIndexInformer { + if ctxWithTags, err := tag.New(typeCtx, tag.Insert(skkube.KeyKind, "Pods")); err == nil { + typeCtx = ctxWithTags + } + watch := c.client.CoreV1().Pods(namespace).Watch + list := func(options metav1.ListOptions) (runtime.Object, error) { + return c.client.CoreV1().Pods(namespace).List(c.ctx, options) + } + informer := skkube.NewSharedInformer(typeCtx, c.resyncDuration, &v1.Pod{}, list, watch) + c.informers = append(c.informers, informer) + lister := kubelisters.NewPodLister(informer.GetIndexer()) + c.podListers[namespace] = lister + return informer +} + +func (c *kubeCoreCaches) addService(namespace string, typeCtx context.Context) cache.SharedIndexInformer { + if ctxWithTags, err := tag.New(typeCtx, tag.Insert(skkube.KeyKind, "Services")); err == nil { + typeCtx = ctxWithTags + } + watch := c.client.CoreV1().Services(namespace).Watch + list := func(options metav1.ListOptions) (runtime.Object, error) { + return c.client.CoreV1().Services(namespace).List(c.ctx, options) + } + informer := skkube.NewSharedInformer(typeCtx, c.resyncDuration, &v1.Service{}, list, watch) + c.informers = append(c.informers, informer) + lister := kubelisters.NewServiceLister(informer.GetIndexer()) + c.serviceListers[namespace] = lister + return informer +} + +func (c *kubeCoreCaches) addConfigMap(namespace string, typeCtx context.Context) cache.SharedIndexInformer { + if ctxWithTags, err := tag.New(typeCtx, tag.Insert(skkube.KeyKind, "ConfigMap")); err == nil { + typeCtx = ctxWithTags + } + watch := c.client.CoreV1().ConfigMaps(namespace).Watch + list := func(options metav1.ListOptions) (runtime.Object, error) { + return c.client.CoreV1().ConfigMaps(namespace).List(c.ctx, options) + } + informer := skkube.NewSharedInformer(typeCtx, c.resyncDuration, &v1.ConfigMap{}, list, watch) + c.informers = append(c.informers, informer) + lister := kubelisters.NewConfigMapLister(informer.GetIndexer()) + c.configMapListers[namespace] = lister + return informer +} + +func (c *kubeCoreCaches) addSecret(namespace string, typeCtx context.Context) cache.SharedIndexInformer { + if ctxWithTags, err := tag.New(typeCtx, tag.Insert(skkube.KeyKind, "Secrets")); err == nil { + typeCtx = ctxWithTags + } + watch := c.client.CoreV1().Secrets(namespace).Watch + list := func(options metav1.ListOptions) (runtime.Object, error) { + return c.client.CoreV1().Secrets(namespace).List(c.ctx, options) + } + informer := skkube.NewSharedInformer(typeCtx, c.resyncDuration, &v1.Secret{}, list, watch) + c.informers = append(c.informers, informer) + lister := kubelisters.NewSecretLister(informer.GetIndexer()) + c.secretListers[namespace] = lister + return informer +} + +func (c *kubeCoreCaches) addNamespaceLister() { + watch := c.client.CoreV1().Namespaces().Watch + list := func(options metav1.ListOptions) (runtime.Object, error) { + return c.client.CoreV1().Namespaces().List(c.ctx, options) + } + nsCtx := c.ctx + if ctxWithTags, err := tag.New(nsCtx, tag.Insert(skkube.KeyNamespaceKind, skkube.NotEmptyValue(metav1.NamespaceAll)), tag.Insert(skkube.KeyKind, "Namespaces")); err == nil { + nsCtx = ctxWithTags + } + informer := skkube.NewSharedInformer(nsCtx, c.resyncDuration, &v1.Namespace{}, list, watch) + c.informers = append(c.informers, informer) + c.namespaceLister = kubelisters.NewNamespaceLister(informer.GetIndexer()) +} + +func (k *kubeCoreCaches) addNewNamespace(namespace string) []cache.SharedIndexInformer { + nsCtx := k.ctx + if ctxWithTags, err := tag.New(k.ctx, tag.Insert(skkube.KeyNamespaceKind, skkube.NotEmptyValue(namespace))); err == nil { + nsCtx = ctxWithTags + } + podInformer := k.addPod(namespace, nsCtx) + serviceInformer := k.addService(namespace, nsCtx) + configMapInformer := k.addConfigMap(namespace, nsCtx) + secretInformer := k.addSecret(namespace, nsCtx) + return []cache.SharedIndexInformer{podInformer, serviceInformer, configMapInformer, secretInformer} +} + +// RegisterNewNamespaceCache will create the cache informers for each resource type +// this will add the informer to the kube controller so that events can be watched. +func (k *kubeCoreCaches) RegisterNewNamespaceCache(namespace string) error { + once, _ := k.registerNamespaceLock.LoadOrStore(namespace, &onceAndSent{Once: &sync.Once{}}) + onceFunc := once.(*onceAndSent) + onceFunc.Once.Do(func() { + informers := k.addNewNamespace(namespace) + if err := k.kubeController.AddNewOfInformers(informers...); err != nil { + onceFunc.Err = errors.Wrapf(err, "failed to add new list of informers to kube controller") + } + }) + return onceFunc.Err +} + // Deprecated: Use NamespacedPodLister instead func (k *kubeCoreCaches) PodLister() kubelisters.PodLister { return k.podListers[metav1.NamespaceAll] diff --git a/pkg/api/v1/clients/kube/cache/cache_test.go b/pkg/api/v1/clients/kube/cache/cache_test.go index a0cf5eeb0..fc9401d95 100644 --- a/pkg/api/v1/clients/kube/cache/cache_test.go +++ b/pkg/api/v1/clients/kube/cache/cache_test.go @@ -35,6 +35,25 @@ var _ = Describe("kube core cache tests", func() { selectors = labels.SelectorFromSet(make(map[string]string)) ) + createNamespaceAndResource := func(namespace string) { + _, err := client.CoreV1().Namespaces().Create(ctx, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + _, err = client.CoreV1().ConfigMaps(namespace).Create(ctx, &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "cfg"}}, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + } + + validateNamespaceResource := func(namespace string) { + _, err := cache.NamespacedPodLister(namespace).List(selectors) + Expect(err).NotTo(HaveOccurred()) + cfgMap, err := cache.NamespacedConfigMapLister(namespace).List(selectors) + Expect(err).NotTo(HaveOccurred()) + cfgMap = cleanConfigMaps(cfgMap) + _, err = cache.NamespacedSecretLister(namespace).List(selectors) + Expect(err).NotTo(HaveOccurred()) + Expect(cfgMap).To(HaveLen(1)) + Expect(cfgMap[0].Namespace).To(Equal(namespace)) + } + BeforeEach(func() { var err error cfg, err = kubeutils.GetConfig("", "") @@ -85,12 +104,12 @@ var _ = Describe("kube core cache tests", func() { BeforeEach(func() { var err error - cache, err = NewKubeCoreCacheWithOptions(ctx, client, time.Hour, []string{"default"}) + cache, err = NewKubeCoreCacheWithOptions(ctx, client, time.Hour, []string{"default"}, true) Expect(err).NotTo(HaveOccurred()) }) It("can list resources for all listers", func() { - Expect(cache.NamespaceLister()).To(BeNil()) + Expect(cache.NamespaceLister()).ToNot(BeNil()) _, err := cache.NamespacedPodLister("default").List(selectors) Expect(err).NotTo(HaveOccurred()) _, err = cache.NamespacedConfigMapLister("default").List(selectors) @@ -111,14 +130,10 @@ var _ = Describe("kube core cache tests", func() { testns = fmt.Sprintf("test-%d", randomvalue) testns2 = fmt.Sprintf("test2-%d", randomvalue) for _, ns := range []string{testns, testns2} { - _, err := client.CoreV1().Namespaces().Create(ctx, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - _, err = client.CoreV1().ConfigMaps(ns).Create(ctx, &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "cfg"}}, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - + createNamespaceAndResource(ns) } var err error - cache, err = NewKubeCoreCacheWithOptions(ctx, client, time.Hour, []string{testns, testns2}) + cache, err = NewKubeCoreCacheWithOptions(ctx, client, time.Hour, []string{testns, testns2}, false) Expect(err).NotTo(HaveOccurred()) }) @@ -130,37 +145,72 @@ var _ = Describe("kube core cache tests", func() { It("can list resources for all listers", func() { Expect(cache.NamespaceLister()).To(BeNil()) - _, err := cache.NamespacedPodLister(testns).List(selectors) - Expect(err).NotTo(HaveOccurred()) - cfgMaps, err := cache.NamespacedConfigMapLister(testns).List(selectors) - Expect(err).NotTo(HaveOccurred()) - cfgMaps = cleanConfigMaps(cfgMaps) - _, err = cache.NamespacedSecretLister(testns).List(selectors) - Expect(err).NotTo(HaveOccurred()) - - Expect(cache.NamespaceLister()).To(BeNil()) - _, err = cache.NamespacedPodLister(testns2).List(selectors) - Expect(err).NotTo(HaveOccurred()) - cfgMaps2, err := cache.NamespacedConfigMapLister(testns2).List(selectors) - Expect(err).NotTo(HaveOccurred()) - cfgMaps2 = cleanConfigMaps(cfgMaps2) - _, err = cache.NamespacedSecretLister(testns2).List(selectors) - Expect(err).NotTo(HaveOccurred()) - - Expect(cfgMaps).To(HaveLen(1)) - Expect(cfgMaps2).To(HaveLen(1)) - Expect(cfgMaps[0].Namespace).To(Equal(testns)) - Expect(cfgMaps2[0].Namespace).To(Equal(testns2)) + validateNamespaceResource(testns) + validateNamespaceResource(testns2) }) }) Context("Invalid namespaces", func() { It("should error with invalid namespace config", func() { var err error - _, err = NewKubeCoreCacheWithOptions(ctx, client, time.Hour, []string{"default", ""}) + _, err = NewKubeCoreCacheWithOptions(ctx, client, time.Hour, []string{"default", ""}, true) Expect(err).To(HaveOccurred()) }) }) + Context("Register a new namespace", func() { + var ( + initialNs string + registeredNs string + ) + + BeforeEach(func() { + randomvalue := rand.Int31() + initialNs = fmt.Sprintf("initial-%d", randomvalue) + registeredNs = fmt.Sprintf("registered-%d", randomvalue) + + createNamespaceAndResource(initialNs) + + var err error + cache, err = NewKubeCoreCacheWithOptions(ctx, client, time.Hour, []string{initialNs}, true) + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + client.CoreV1().Namespaces().Delete(ctx, initialNs, metav1.DeleteOptions{}) + client.CoreV1().Namespaces().Delete(ctx, registeredNs, metav1.DeleteOptions{}) + }) + + It("should be able to register a new namespace", func() { + createNamespaceAndResource(registeredNs) + + err := cache.RegisterNewNamespaceCache(registeredNs) + Expect(err).NotTo(HaveOccurred()) + + validateNamespaceResource(initialNs) + validateNamespaceResource(registeredNs) + }) + + It("should be able to register a new namespace after the namespace was previously registered then deleted", func() { + createNamespaceAndResource(registeredNs) + + err := cache.RegisterNewNamespaceCache(registeredNs) + Expect(err).NotTo(HaveOccurred()) + + validateNamespaceResource(initialNs) + validateNamespaceResource(registeredNs) + + client.CoreV1().Namespaces().Delete(ctx, registeredNs, metav1.DeleteOptions{}) + // let the namespace be deleted + Eventually(func() bool { + _, err := client.CoreV1().Namespaces().Get(ctx, registeredNs, metav1.GetOptions{}) + return err != nil + }, 10*time.Second, time.Second).Should(BeTrue()) + createNamespaceAndResource(registeredNs) + // have to ensure that the configmap is created in the new namespace + time.Sleep(50 * time.Millisecond) + validateNamespaceResource(registeredNs) + }) + }) }) }) }) diff --git a/pkg/api/v1/clients/kube/controller/controller.go b/pkg/api/v1/clients/kube/controller/controller.go index 43551f3ef..3a4ef8da6 100644 --- a/pkg/api/v1/clients/kube/controller/controller.go +++ b/pkg/api/v1/clients/kube/controller/controller.go @@ -16,6 +16,7 @@ import ( type Controller struct { name string + // informers are the caching indexes used to retrieve events of add,update, and deletes from. informers []cache.SharedIndexInformer // WorkQueue is a rate limited work queue. This is used to queue work to be @@ -27,6 +28,13 @@ type Controller struct { // handler to call handler cache.ResourceEventHandler + // suncFunctions of the informers used to ensure that the informers + // are set up and ready to transmit information to the controller. + syncFunctions []cache.InformerSynced + // stopCh is used to stop all the go routines of the controller. + stopCh <-chan struct{} + // isRunning this flag is used to identify when the controller is running, or not. + isRunning bool } // Returns a new kubernetes controller without starting it. @@ -55,31 +63,25 @@ func NewController( func (c *Controller) Run(parallelism int, stopCh <-chan struct{}) error { defer runtime.HandleCrash() + c.stopCh = stopCh + log.Debugf("Starting %v controller", c.name) // For each informer - var syncFunctions []cache.InformerSynced for _, informer := range c.informers { - - // 1. Get the function to tell if it has synced - syncFunctions = append(syncFunctions, informer.HasSynced) - - // 2. Register the event handler with the informer - informer.AddEventHandler(c.eventHandlerFunctions()) - - // 3. Run the informer - go informer.Run(stopCh) + c.setupInformer(informer) } - // Wait for all the informer caches to be synced before starting workers - log.Debugf("Waiting for informer caches to sync") - if ok := cache.WaitForCacheSync(stopCh, []cache.InformerSynced(syncFunctions)...); !ok { - return fmt.Errorf("error while waiting for caches to sync") + if err := c.waitForCacheToSync(); err != nil { + return err } // Start workers in goroutine so we can defer the queue shutdown go func() { - defer c.workQueue.ShutDown() + defer func() { + c.workQueue.ShutDown() + c.isRunning = false + }() log.Debugf("Starting workers") // Launch parallel workers to process resources @@ -93,7 +95,42 @@ func (c *Controller) Run(parallelism int, stopCh <-chan struct{}) error { <-stopCh log.Debugf("Stopping workers") }() + c.isRunning = true + return nil +} +// Wait for all the informer caches to be synced before starting workers +func (c *Controller) waitForCacheToSync() error { + log.Debugf("Waiting for informer caches to sync") + if ok := cache.WaitForCacheSync(c.stopCh, []cache.InformerSynced(c.syncFunctions)...); !ok { + return fmt.Errorf("error while waiting for caches to sync") + } + return nil +} + +// setupInformer +// 1. Get the function to tell if it has synced +// 2. Register the event handler with the informer +// 3. Run the informer +func (c *Controller) setupInformer(informer cache.SharedIndexInformer) { + c.syncFunctions = append(c.syncFunctions, informer.HasSynced) + informer.AddEventHandler(c.eventHandlerFunctions()) + go informer.Run(c.stopCh) +} + +// AddNewOfInformers will add a list of new informers to the already running controller. +// If the controller is not running, it will just append the informers to the controllers +// list of informers +func (c *Controller) AddNewOfInformers(newInformers ...cache.SharedIndexInformer) error { + c.informers = append(c.informers, newInformers...) + if c.isRunning { + for _, in := range newInformers { + c.setupInformer(in) + } + if err := c.waitForCacheToSync(); err != nil { + return err + } + } return nil } diff --git a/pkg/api/v1/clients/kube/controller/controller_test.go b/pkg/api/v1/clients/kube/controller/controller_test.go index 24a381245..8f23c5cc8 100644 --- a/pkg/api/v1/clients/kube/controller/controller_test.go +++ b/pkg/api/v1/clients/kube/controller/controller_test.go @@ -2,6 +2,7 @@ package controller_test import ( "context" + "fmt" "time" . "github.com/onsi/ginkgo" @@ -43,6 +44,29 @@ var _ = Describe("Test KubeController", func() { err error ) + const ( + name1 = "res-1" + value1 = "test" + name2 = "res-2" + value2 = "secondNamespaceValue" + ) + + getResultFromkMockResource := func(namespace, name, value string) { + select { + case res := <-resultChan: + Expect(res.Namespace).To(BeEquivalentTo(namespace)) + Expect(res.Name).To(BeEquivalentTo(name)) + Expect(res.Kind).To(BeEquivalentTo("MockResource")) + Expect(res.Spec).To(Not(BeNil())) + + fieldValue, ok := (*res.Spec)["someDumbField"] + Expect(ok).To(BeTrue()) + Expect(fieldValue).To(BeEquivalentTo(value)) + case <-time.After(50 * time.Millisecond): + Fail("timed out waiting for watch event") + } + } + BeforeEach(func() { clientset = fake.NewSimpleClientset(mocksv1.MockResourceCrd) resyncPeriod = time.Duration(0) @@ -76,23 +100,7 @@ var _ = Describe("Test KubeController", func() { err = util.CreateMockResource(ctx, clientset, namespace1, "res-1", "test") Expect(err).NotTo(HaveOccurred()) - for { - select { - case res := <-resultChan: - Expect(res.Namespace).To(BeEquivalentTo(namespace1)) - Expect(res.Name).To(BeEquivalentTo("res-1")) - Expect(res.Kind).To(BeEquivalentTo("MockResource")) - Expect(res.Spec).To(Not(BeNil())) - - fieldValue, ok := (*res.Spec)["someDumbField"] - Expect(ok).To(BeTrue()) - Expect(fieldValue).To(BeEquivalentTo("test")) - return - case <-time.After(50 * time.Millisecond): - Fail("timed out waiting for watch event") - return - } - } + getResultFromkMockResource(namespace1, name1, value1) }) It("does not react to events in a non relevant namespace", func() { @@ -107,6 +115,37 @@ var _ = Describe("Test KubeController", func() { Succeed() } }) + + It("can add new informers so that events can be received on the new informer", func() { + err = util.CreateMockResource(ctx, clientset, namespace1, name1, value1) + Expect(err).NotTo(HaveOccurred()) + + newInformer := cache.NewSharedIndexInformer( + listWatchForClientAndNamespace(ctx, clientset, namespace2), + &solov1.Resource{}, + resyncPeriod, + cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, + ) + + getResultFromkMockResource(namespace1, name1, value1) + + // create the second value that we want to look at, but do not set up the informer quit + // yet, we still want to ensure that the controller does not learn about the new resource + // until the new informer has been added to the kube controller + err = util.CreateMockResource(ctx, clientset, namespace2, name2, value2) + Expect(err).NotTo(HaveOccurred()) + + select { + case res := <-resultChan: + Fail(fmt.Sprintf("Should not have received the resource %s from Namespace %s as the informer has not yet been added to the KubeController yet", res.Name, res.Namespace)) + case <-time.After(100 * time.Millisecond): + } + + err = kubeController.AddNewOfInformers(newInformer) + Expect(err).NotTo(HaveOccurred()) + + getResultFromkMockResource(namespace2, name2, value2) + }) }) Context("controller is configured with a resync period", func() { diff --git a/pkg/api/v1/clients/kube/resource_client.go b/pkg/api/v1/clients/kube/resource_client.go index d6409e89d..7e34e6b61 100644 --- a/pkg/api/v1/clients/kube/resource_client.go +++ b/pkg/api/v1/clients/kube/resource_client.go @@ -5,6 +5,7 @@ import ( "reflect" "sort" "strings" + "sync" "time" "github.com/solo-io/solo-kit/pkg/utils/specutils" @@ -25,7 +26,6 @@ import ( "go.opencensus.io/tag" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" ) var ( @@ -110,6 +110,7 @@ type ResourceClient struct { namespaceWhitelist []string // Will contain at least metaV1.NamespaceAll ("") resyncPeriod time.Duration resourceStatusUnmarshaler resources.StatusUnmarshaler + namespaceLock sync.Mutex } func NewResourceClient( @@ -154,6 +155,17 @@ func (rc *ResourceClient) Register() error { return rc.sharedCache.Register(rc) } +func (rc *ResourceClient) RegisterNamespace(namespace string) error { + err := rc.sharedCache.RegisterNewNamespace(namespace, rc) + if err != nil { + return err + } + rc.namespaceLock.Lock() + rc.namespaceWhitelist = append(rc.namespaceWhitelist, namespace) + rc.namespaceLock.Unlock() + return nil +} + func (rc *ResourceClient) Read(namespace, name string, opts clients.ReadOpts) (resources.Resource, error) { if err := resources.ValidateName(name); err != nil { return nil, errors.Wrapf(err, "validation error") @@ -298,7 +310,7 @@ func (rc *ResourceClient) List(namespace string, opts clients.ListOpts) (resourc return nil, err } - labelSelector, err := rc.getLabelSelector(opts) + labelSelector, err := clients.GetLabelSelector(opts) if err != nil { return nil, errors.Wrapf(err, "parsing label selector") } @@ -381,7 +393,6 @@ func (rc *ResourceClient) ApplyStatus(statusClient resources.StatusClient, input } func (rc *ResourceClient) Watch(namespace string, opts clients.WatchOpts) (<-chan resources.ResourceList, <-chan error, error) { - if err := rc.validateNamespace(namespace); err != nil { return nil, nil, err } @@ -458,16 +469,6 @@ func (rc *ResourceClient) Watch(namespace string, opts clients.WatchOpts) (<-cha return resourcesChan, errs, nil } -func (rc *ResourceClient) getLabelSelector(listOpts clients.ListOpts) (labels.Selector, error) { - // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#set-based-requirement - if listOpts.ExpressionSelector != "" { - return labels.Parse(listOpts.ExpressionSelector) - } - - // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#equality-based-requirement - return labels.SelectorFromSet(listOpts.Selector), nil -} - // Checks whether the group version kind of the given resource matches that of the client's underlying CRD: func (rc *ResourceClient) matchesClientGVK(resource v1.Resource) bool { return resource.GroupVersionKind().String() == rc.crd.GroupVersionKind().String() @@ -519,6 +520,8 @@ func (rc *ResourceClient) convertCrdToResource(resourceCrd *v1.Resource) (resour // Check whether the given namespace is in the whitelist or we allow all namespaces func (rc *ResourceClient) validateNamespace(namespace string) error { + rc.namespaceLock.Lock() + defer rc.namespaceLock.Unlock() if !stringutils.ContainsAny([]string{namespace, metav1.NamespaceAll}, rc.namespaceWhitelist) { return errors.Errorf("this client was not configured to access resources in the [%v] namespace. "+ "Allowed namespaces are %v", namespace, rc.namespaceWhitelist) diff --git a/pkg/api/v1/clients/kube/resource_client_factory.go b/pkg/api/v1/clients/kube/resource_client_factory.go index 9ba767173..468290d7f 100644 --- a/pkg/api/v1/clients/kube/resource_client_factory.go +++ b/pkg/api/v1/clients/kube/resource_client_factory.go @@ -55,6 +55,11 @@ var ( } ) +type onceAndSent struct { + Err error + Once *sync.Once +} + func init() { view.Register(ListCountView, WatchCountView) } @@ -64,6 +69,8 @@ type SharedCache interface { // Registers the client with the shared cache Register(rc *ResourceClient) error + // RegisterNewNamespace will register the client with a new namespace + RegisterNewNamespace(namespace string, rc *ResourceClient) error // Starts all informers in the factory's registry. Must be idempotent. Start() // Returns a lister for resources of the given type in the given namespace. @@ -97,6 +104,9 @@ func NewKubeSharedCacheForConfig(ctx context.Context, cluster string, restConfig // and, when started, creates a kubernetes controller that distributes notifications for changes to the watches that // have been added to the factory. // All direct operations on the ResourceClientSharedInformerFactory are synchronized. +// Note it might be best to use this as a Singleton instead of a Structure +// if a Singleton suggestion is proposed, then Register will no long be needed, +// and we can just use Register New Namespace. Also this would clear up some mutexes, that are being used. type ResourceClientSharedInformerFactory struct { // Contains all the informers managed by this factory registry *informerRegistry @@ -120,8 +130,14 @@ type ResourceClientSharedInformerFactory struct { // Determines how long the controller will wait for a watch channel to accept an event before aborting the delivery watchTimeout time.Duration - // Mutexes - lock sync.Mutex + // kubeController is the controller used to watch for events on the informers. It can be used to add new informers too. + kubeController *controller.Controller + // registerNamespaceLock is a map of string(namespace) -> Type -> sync.Once. It is used to register a new namespace Type. + registerNamespaceLock sync.Map + // registryLock is used when adding or getting information from the registry + registryLock sync.Mutex + // startingLock is used when checking isRunning or to lock starting of the SharedInformer + startingLock sync.Mutex cacheUpdatedWatchersMutex sync.Mutex } @@ -132,6 +148,7 @@ func NotEmptyValue(ns string) string { return ns } +// NewSharedInformer creates a new Shared Index Informer with the list and watch template functions. func NewSharedInformer(ctx context.Context, resyncPeriod time.Duration, objType runtime.Object, listFunc func(options metav1.ListOptions) (runtime.Object, error), watchFunc func(context.Context, metav1.ListOptions) (kubewatch.Interface, error)) cache.SharedIndexInformer { @@ -144,7 +161,11 @@ func NewSharedInformer(ctx context.Context, resyncPeriod time.Duration, objType } stats.Record(listCtx, MLists.M(1), MInFlight.M(1)) defer stats.Record(listCtx, MInFlight.M(-1)) - return listFunc(options) + listOfResources, err := listFunc(options) + if err != nil { + contextutils.LoggerFrom(ctx).Error(errors.Wrapf(err, "listing crs from the resource client factory")) + } + return listOfResources, err }, WatchFunc: func(options metav1.ListOptions) (kubewatch.Interface, error) { watchCtx := ctx @@ -154,7 +175,11 @@ func NewSharedInformer(ctx context.Context, resyncPeriod time.Duration, objType stats.Record(watchCtx, MWatches.M(1), MInFlight.M(1)) defer stats.Record(watchCtx, MInFlight.M(-1)) - return watchFunc(ctx, options) + watches, err := watchFunc(ctx, options) + if err != nil { + contextutils.LoggerFrom(ctx).Error(errors.Wrapf(err, "watching crs from the resource client factory")) + } + return watches, err }, }, objType, @@ -164,55 +189,69 @@ func NewSharedInformer(ctx context.Context, resyncPeriod time.Duration, objType } // Creates a new SharedIndexInformer and adds it to the factory's informer registry. +// This method is meant to be called once per rc namespace set, please call Register New Namespace when adding new namespaces. // NOTE: Currently we cannot share informers between resource clients, because the listWatch functions are configured -// with the client's specific token. Hence, we must enforce a one-to-one relationship between informers and clients. +// with the client's specific token(resource client type). Hence, we must enforce a one-to-one relationship between informers and clients. func (f *ResourceClientSharedInformerFactory) Register(rc *ResourceClient) error { - f.lock.Lock() - defer f.lock.Unlock() - + // because we do not know that we have started yet or not, we need to make a lock + f.startingLock.Lock() ctx := f.ctx if f.started { - contextutils.LoggerFrom(ctx).Panic("can't register informer after factory has started. This may change in the future.") - } + f.startingLock.Unlock() + for _, ns := range rc.namespaceWhitelist { + if err := f.RegisterNewNamespace(ns, rc); err != nil { + return err + } + } + } else { + defer f.startingLock.Unlock() + if ctxWithTags, err := tag.New(ctx, tag.Insert(KeyKind, rc.resourceName)); err == nil { + ctx = ctxWithTags + } - if ctxWithTags, err := tag.New(ctx, tag.Insert(KeyKind, rc.resourceName)); err == nil { - ctx = ctxWithTags + // Create a shared informer for each of the given namespaces. + // NOTE: We do not distinguish between the value "" (all namespaces) and a regular namespace here. + for _, ns := range rc.namespaceWhitelist { + // we want to make sure that we have registered this namespace + f.registerNamespaceLock.LoadOrStore(ns, &sync.Once{}) + if _, err := f.addNewNamespaceToRegistry(ctx, ns, rc); err != nil { + return err + } + } } + return nil +} + +// addNewNamespaceToRegistry will create a watch for the resource client type and namespace +func (f *ResourceClientSharedInformerFactory) addNewNamespaceToRegistry(ctx context.Context, ns string, rc *ResourceClient) (cache.SharedIndexInformer, error) { + f.registryLock.Lock() + defer f.registryLock.Unlock() + nsCtx := ctx resourceType := reflect.TypeOf(rc.crd.Version.Type) - namespaces := rc.namespaceWhitelist // will always contain at least one element + // get the resync value resyncPeriod := f.defaultResync if rc.resyncPeriod != 0 { resyncPeriod = rc.resyncPeriod } - // Create a shared informer for each of the given namespaces. - // NOTE: We do not distinguish between the value "" (all namespaces) and a regular namespace here. - for _, ns := range namespaces { - // copy to variable, so we can send it to closures - ns := ns - // To nip configuration errors in the bud, error if the registry already contains an informer for the given resource/namespace. - if f.registry.get(resourceType, ns) != nil { - return errors.Errorf("Shared cache already contains informer for resource [%v] and namespace [%v]", resourceType, ns) - - } - - nsCtx := ctx - if ctxWithTags, err := tag.New(nsCtx, tag.Insert(KeyNamespaceKind, NotEmptyValue(ns))); err == nil { - nsCtx = ctxWithTags - } - - resourceList := rc.crdClientset.ResourcesV1().Resources(ns).List - list := func(options metav1.ListOptions) (runtime.Object, error) { - return resourceList(ctx, options) - } - watch := rc.crdClientset.ResourcesV1().Resources(ns).Watch - sharedInformer := NewSharedInformer(nsCtx, resyncPeriod, &v1.Resource{}, list, watch) - f.registry.add(resourceType, ns, sharedInformer) + // To nip configuration errors in the bud, error if the registry already contains an informer for the given resource/namespace. + if f.registry.get(resourceType, ns) != nil { + return nil, errors.Errorf("Shared cache already contains informer for resource [%v] and namespace [%v]", resourceType, ns) + } + if ctxWithTags, err := tag.New(ctx, tag.Insert(KeyNamespaceKind, NotEmptyValue(ns))); err == nil { + nsCtx = ctxWithTags } - return nil + listResourceFunc := rc.crdClientset.ResourcesV1().Resources(ns).List + list := func(options metav1.ListOptions) (runtime.Object, error) { + return listResourceFunc(ctx, options) + } + watch := rc.crdClientset.ResourcesV1().Resources(ns).Watch + sharedInformer := NewSharedInformer(nsCtx, resyncPeriod, &v1.Resource{}, list, watch) + f.registry.add(resourceType, ns, sharedInformer) + return sharedInformer, nil } func (f *ResourceClientSharedInformerFactory) IsClusterCache() {} @@ -232,16 +271,22 @@ var cacheSyncTimeout = func() time.Duration { // Starts all informers in the factory's registry (if they have not yet been started) and configures the factory to call // the updateCallback function whenever any of the resources associated with the informers changes. func (f *ResourceClientSharedInformerFactory) Start() { + // if starting and managing the locks becomes to much a problem, we can start the factory + // when the factory is instantiated. That way we will not have multiple processes trying to Start() this structure + // which should be a singleton. // Guarantees that the factory will be started at most once f.factoryStarter.Do(func() { + f.startingLock.Lock() + defer f.startingLock.Unlock() ctx := f.ctx // Collect all registered informers + sharedInformers := f.registry.list() // Initialize a new kubernetes controller - kubeController := controller.NewController("solo-resource-controller", + f.kubeController = controller.NewController("solo-resource-controller", controller.NewLockingCallbackHandler(f.updatedOccurred), sharedInformers...) // Start the controller @@ -249,7 +294,7 @@ func (f *ResourceClientSharedInformerFactory) Start() { go func() { // If there is a problem with the ListWatch, the Run method might wait indefinitely for the informer caches // to sync, so we start it in a goroutine to be able to timeout. - runResult <- kubeController.Run(2, ctx.Done()) + runResult <- f.kubeController.Run(2, ctx.Done()) }() // Fail if the caches have not synchronized after 10 seconds. This prevents the controller from hanging forever. @@ -272,9 +317,42 @@ func (f *ResourceClientSharedInformerFactory) Start() { }) } +// RegisterNewNamespace is used when the resource client is running. This will add a new namespace to the +// kube controller so that events can be received. +func (f *ResourceClientSharedInformerFactory) RegisterNewNamespace(namespace string, rc *ResourceClient) error { + // because this is an exposed function, Register New Namespace could be called at any time. + // In that event, we want to make sure that the cache has started. The reason is because + // we have to initiallize the default namespaces as well as this new namespace + f.Start() + + // we should only register a (namespace, type) once and only once + mapToTypes, _ := f.registerNamespaceLock.LoadOrStore(namespace, &sync.Map{}) + once, loaded := mapToTypes.(*sync.Map).LoadOrStore(reflect.TypeOf(rc.crd.Version.Type), &onceAndSent{Once: &sync.Once{}}) + onceSent := once.(*onceAndSent) + if loaded { + return onceSent.Err + } + onceSent.Once.Do(func() { + ctx := f.ctx + if ctxWithTags, err := tag.New(ctx, tag.Insert(KeyKind, rc.resourceName)); err == nil { + ctx = ctxWithTags + } + informer, err := f.addNewNamespaceToRegistry(ctx, namespace, rc) + if err != nil { + onceSent.Err = errors.Wrapf(err, "failed to add new namespace to registry:") + return + } + if err := f.kubeController.AddNewOfInformers(informer); err != nil { + onceSent.Err = errors.Wrapf(err, "failed to add new informer to kube controller") + return + } + }) + return onceSent.Err +} + func (f *ResourceClientSharedInformerFactory) GetLister(namespace string, obj runtime.Object) (ResourceLister, error) { - f.lock.Lock() - defer f.lock.Unlock() + f.registryLock.Lock() + defer f.registryLock.Unlock() // If the factory (and hence the informers) have not been started, the list operations will be meaningless. // Will not happen in our current use of this, but since this method is public it's worth having this check. diff --git a/pkg/api/v1/clients/kube/resource_client_factory_test.go b/pkg/api/v1/clients/kube/resource_client_factory_test.go index 448dae3c7..daaeae20c 100644 --- a/pkg/api/v1/clients/kube/resource_client_factory_test.go +++ b/pkg/api/v1/clients/kube/resource_client_factory_test.go @@ -8,6 +8,7 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + "github.com/solo-io/solo-kit/pkg/api/v1/clients" "github.com/solo-io/solo-kit/pkg/api/v1/clients/kube" "github.com/solo-io/solo-kit/pkg/api/v1/clients/kube/crd/client/clientset/versioned/fake" solov1 "github.com/solo-io/solo-kit/pkg/api/v1/clients/kube/crd/solo.io/v1" @@ -75,12 +76,19 @@ var _ = Describe("Test ResourceClientSharedInformerFactory", func() { Expect(len(kubeCache.Informers())).To(BeEquivalentTo(1)) }) - It("panics when attempting of register a client with a running factory", func() { + It("should not panic when attempting of register a client with a running factory", func() { // Start without registering clients, just to set the "started" flag kubeCache.Start() Expect(kubeCache.IsRunning()).To(BeTrue()) - Expect(func() { _ = kubeCache.Register(client1) }).To(Panic()) + Expect(func() { _ = kubeCache.Register(client1) }).ToNot(Panic()) + }) + It("can register a new namespace even when the factory is running", func() { + kubeCache.Start() + Expect(kubeCache.IsRunning()).To(BeTrue()) + + err := kubeCache.RegisterNewNamespace("newNamespace", client2) + Expect(err).NotTo(HaveOccurred()) }) }) @@ -111,13 +119,19 @@ var _ = Describe("Test ResourceClientSharedInformerFactory", func() { var ( clientset *fake.Clientset + clientset2 *fake.Clientset preStartGoroutines int + client *kube.ResourceClient + client2 *kube.ResourceClient ) BeforeEach(func() { clientset = fake.NewSimpleClientset(mocksv1.MockResourceCrd) - // We need the resourceClient so that we can register its resourceType/namespaces with the cache - client := util.ClientForClientsetAndResource(clientset, kubeCache, mocksv1.MockResourceCrd, &mocksv1.MockResource{}, []string{namespace1}) + clientset2 = fake.NewSimpleClientset(mocksv1.AnotherMockResourceCrd) + + client = util.ClientForClientsetAndResource(clientset, kubeCache, mocksv1.MockResourceCrd, &mocksv1.MockResource{}, []string{namespace1}) + client2 = util.ClientForClientsetAndResource(clientset2, kubeCache, mocksv1.AnotherMockResourceCrd, &mocksv1.AnotherMockResource{}, []string{namespace1}) + err := kubeCache.Register(client) Expect(err).NotTo(HaveOccurred()) @@ -130,6 +144,9 @@ var _ = Describe("Test ResourceClientSharedInformerFactory", func() { var watch <-chan solov1.Resource BeforeEach(func() { + // there is a race condition when a go routine uses the watch + // before the entire test finishes + time.Sleep(50 * time.Millisecond) watch = kubeCache.AddWatch(10) }) @@ -191,6 +208,44 @@ var _ = Describe("Test ResourceClientSharedInformerFactory", func() { Expect(len(watchResults)).To(BeEquivalentTo(3)) Expect(watchResults).To(ConsistOf("mock-res-1", "mock-res-3", "mock-res-1")) }) + It("should be able to register a new namespace", func() { + err := kubeCache.RegisterNewNamespace(namespace2, client) + Expect(err).NotTo(HaveOccurred()) + + var watchResults []string + + ctx, _ := context.WithDeadline(context.Background(), time.Now().Add(time.Millisecond*100)) + + go func() { + for { + select { + case <-ctx.Done(): + return + case res := <-watch: + watchResults = append(watchResults, res.ObjectMeta.Name) + } + } + }() + + go Expect(util.CreateMockResource(ctx, clientset, namespace2, "mock-res-2", "test")).To(BeNil()) + + <-ctx.Done() + + Expect(len(watchResults)).To(BeEquivalentTo(1)) + Expect(watchResults).To(ConsistOf("mock-res-2")) + }) + + It("should be able to register two or more clients in a new namespace", func() { + err := client.RegisterNamespace(namespace2) + Expect(err).ToNot(HaveOccurred()) + err = client2.RegisterNamespace(namespace2) + Expect(err).ToNot(HaveOccurred()) + + // it should fail here, saying that the client has not registered the informer for the namespace and the client + resources, err := client2.List(namespace2, clients.ListOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + Expect(len(resources)).To(BeEquivalentTo(0)) + }) }) Context("multiple watches", func() { diff --git a/pkg/api/v1/clients/kubesecret/resource_client.go b/pkg/api/v1/clients/kubesecret/resource_client.go index c35517f63..df236f93e 100644 --- a/pkg/api/v1/clients/kubesecret/resource_client.go +++ b/pkg/api/v1/clients/kubesecret/resource_client.go @@ -120,6 +120,10 @@ func NewResourceClientWithSecretConverter(kube kubernetes.Interface, resourceTyp var _ clients.ResourceClient = &ResourceClient{} +func (rc *ResourceClient) RegisterNamespace(namespace string) error { + return rc.cache.RegisterNewNamespaceCache(namespace) +} + func (rc *ResourceClient) Read(namespace, name string, opts clients.ReadOpts) (resources.Resource, error) { if err := resources.ValidateName(name); err != nil { return nil, errors.Wrapf(err, "validation error") diff --git a/pkg/api/v1/clients/memory/resource_client.go b/pkg/api/v1/clients/memory/resource_client.go index 2f2f01861..c05feb767 100644 --- a/pkg/api/v1/clients/memory/resource_client.go +++ b/pkg/api/v1/clients/memory/resource_client.go @@ -127,6 +127,10 @@ func (rc *ResourceClient) Register() error { return nil } +func (rc *ResourceClient) RegisterNamespace(namespace string) error { + return nil +} + func (rc *ResourceClient) Read(namespace, name string, opts clients.ReadOpts) (resources.Resource, error) { if err := resources.ValidateName(name); err != nil { return nil, errors.Wrapf(err, "validation error") diff --git a/pkg/api/v1/clients/mocks/client_interface.go b/pkg/api/v1/clients/mocks/client_interface.go index e0f023d74..c8aa3663c 100644 --- a/pkg/api/v1/clients/mocks/client_interface.go +++ b/pkg/api/v1/clients/mocks/client_interface.go @@ -116,6 +116,20 @@ func (mr *MockResourceClientMockRecorder) Register() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Register", reflect.TypeOf((*MockResourceClient)(nil).Register)) } +// RegisterNamespace mocks base method +func (m *MockResourceClient) RegisterNamespace(namespace string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RegisterNamespace", namespace) + ret0, _ := ret[0].(error) + return ret0 +} + +// RegisterNamespace indicates an expected call of RegisterNamespace +func (mr *MockResourceClientMockRecorder) RegisterNamespace(namespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterNamespace", reflect.TypeOf((*MockResourceClient)(nil).RegisterNamespace), namespace) +} + // Read mocks base method func (m *MockResourceClient) Read(namespace, name string, opts clients.ReadOpts) (resources.Resource, error) { m.ctrl.T.Helper() diff --git a/pkg/api/v1/clients/options.go b/pkg/api/v1/clients/options.go new file mode 100644 index 000000000..c05f6982c --- /dev/null +++ b/pkg/api/v1/clients/options.go @@ -0,0 +1,35 @@ +package clients + +import ( + "github.com/solo-io/solo-kit/pkg/api/v1/resources" + "k8s.io/apimachinery/pkg/labels" +) + +// GetLabelSelector will parse ExpresionSelector if present, else it selects Selector. +func GetLabelSelector(listOpts ListOpts) (labels.Selector, error) { + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#set-based-requirement + if listOpts.ExpressionSelector != "" { + return labels.Parse(listOpts.ExpressionSelector) + } + + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#equality-based-requirement + return labels.SelectorFromSet(listOpts.Selector), nil +} + +// TranslateWatchOptsIntoListOpts translates the watch options into list options +func TranslateWatchOptsIntoListOpts(wopts WatchOpts) ListOpts { + clopts := ListOpts{Ctx: wopts.Ctx, ExpressionSelector: wopts.ExpressionSelector, Selector: wopts.Selector} + return clopts +} + +// TranslateResourceNamespaceListToListOptions translates the resource namespace list options to List Options +func TranslateResourceNamespaceListToListOptions(lopts resources.ResourceNamespaceListOptions) ListOpts { + clopts := ListOpts{Ctx: lopts.Ctx, ExpressionSelector: lopts.ExpressionSelector} + return clopts +} + +// TranslateResourceNamespaceListToWatchOptions translates the resource namespace watch options to Watch Options +func TranslateResourceNamespaceListToWatchOptions(wopts resources.ResourceNamespaceWatchOptions) WatchOpts { + clopts := WatchOpts{Ctx: wopts.Ctx, ExpressionSelector: wopts.ExpressionSelector} + return clopts +} diff --git a/pkg/api/v1/clients/vault/resource_client.go b/pkg/api/v1/clients/vault/resource_client.go index 2b164f0b1..163a2401b 100644 --- a/pkg/api/v1/clients/vault/resource_client.go +++ b/pkg/api/v1/clients/vault/resource_client.go @@ -105,6 +105,10 @@ func (rc *ResourceClient) Register() error { return nil } +func (rc *ResourceClient) RegisterNamespace(ns string) error { + return nil +} + func (rc *ResourceClient) Read(namespace, name string, opts clients.ReadOpts) (resources.Resource, error) { if err := resources.ValidateName(name); err != nil { return nil, errors.Wrapf(err, "validation error") diff --git a/pkg/api/v1/reconcile/reconciler_test.go b/pkg/api/v1/reconcile/reconciler_test.go index b5b76cb03..621e82fee 100644 --- a/pkg/api/v1/reconcile/reconciler_test.go +++ b/pkg/api/v1/reconcile/reconciler_test.go @@ -202,6 +202,8 @@ var _ = Describe("Reconciler", func() { }) }) +var _ clients.ResourceClient = &testResourceClient{} + type testResourceClient struct { errorOnRead bool errorOnWrite bool @@ -221,6 +223,10 @@ func (c *testResourceClient) Register() error { panic("implement me") } +func (c *testResourceClient) RegisterNamespace(namespace string) error { + panic("implement me") +} + func (c *testResourceClient) Read(namespace, name string, opts clients.ReadOpts) (resources.Resource, error) { if c.errorOnRead { return nil, errors.Errorf("read should not have been called") diff --git a/pkg/api/v1/resources/common/kubernetes/config_map_client.sk.go b/pkg/api/v1/resources/common/kubernetes/config_map_client.sk.go index b70d81a02..b79724b49 100644 --- a/pkg/api/v1/resources/common/kubernetes/config_map_client.sk.go +++ b/pkg/api/v1/resources/common/kubernetes/config_map_client.sk.go @@ -19,6 +19,7 @@ type ConfigMapWatcher interface { type ConfigMapClient interface { BaseClient() clients.ResourceClient Register() error + RegisterNamespace(namespace string) error Read(namespace, name string, opts clients.ReadOpts) (*ConfigMap, error) Write(resource *ConfigMap, opts clients.WriteOpts) (*ConfigMap, error) Delete(namespace, name string, opts clients.DeleteOpts) error @@ -59,6 +60,10 @@ func (client *configMapClient) Register() error { return client.rc.Register() } +func (client *configMapClient) RegisterNamespace(namespace string) error { + return client.rc.RegisterNamespace(namespace) +} + func (client *configMapClient) Read(namespace, name string, opts clients.ReadOpts) (*ConfigMap, error) { opts = opts.WithDefaults() diff --git a/pkg/api/v1/resources/common/kubernetes/custom_resource_definition_client.sk.go b/pkg/api/v1/resources/common/kubernetes/custom_resource_definition_client.sk.go index 0051b2d02..b8af5d1c1 100644 --- a/pkg/api/v1/resources/common/kubernetes/custom_resource_definition_client.sk.go +++ b/pkg/api/v1/resources/common/kubernetes/custom_resource_definition_client.sk.go @@ -19,6 +19,7 @@ type CustomResourceDefinitionWatcher interface { type CustomResourceDefinitionClient interface { BaseClient() clients.ResourceClient Register() error + RegisterNamespace(namespace string) error Read(name string, opts clients.ReadOpts) (*CustomResourceDefinition, error) Write(resource *CustomResourceDefinition, opts clients.WriteOpts) (*CustomResourceDefinition, error) Delete(name string, opts clients.DeleteOpts) error @@ -59,6 +60,10 @@ func (client *customResourceDefinitionClient) Register() error { return client.rc.Register() } +func (client *customResourceDefinitionClient) RegisterNamespace(namespace string) error { + return client.rc.RegisterNamespace(namespace) +} + func (client *customResourceDefinitionClient) Read(name string, opts clients.ReadOpts) (*CustomResourceDefinition, error) { opts = opts.WithDefaults() diff --git a/pkg/api/v1/resources/common/kubernetes/deployment_client.sk.go b/pkg/api/v1/resources/common/kubernetes/deployment_client.sk.go index 300c8da3c..ab218b93c 100644 --- a/pkg/api/v1/resources/common/kubernetes/deployment_client.sk.go +++ b/pkg/api/v1/resources/common/kubernetes/deployment_client.sk.go @@ -19,6 +19,7 @@ type DeploymentWatcher interface { type DeploymentClient interface { BaseClient() clients.ResourceClient Register() error + RegisterNamespace(namespace string) error Read(namespace, name string, opts clients.ReadOpts) (*Deployment, error) Write(resource *Deployment, opts clients.WriteOpts) (*Deployment, error) Delete(namespace, name string, opts clients.DeleteOpts) error @@ -59,6 +60,10 @@ func (client *deploymentClient) Register() error { return client.rc.Register() } +func (client *deploymentClient) RegisterNamespace(namespace string) error { + return client.rc.RegisterNamespace(namespace) +} + func (client *deploymentClient) Read(namespace, name string, opts clients.ReadOpts) (*Deployment, error) { opts = opts.WithDefaults() diff --git a/pkg/api/v1/resources/common/kubernetes/job_client.sk.go b/pkg/api/v1/resources/common/kubernetes/job_client.sk.go index e059ed9fb..3186b16ec 100644 --- a/pkg/api/v1/resources/common/kubernetes/job_client.sk.go +++ b/pkg/api/v1/resources/common/kubernetes/job_client.sk.go @@ -19,6 +19,7 @@ type JobWatcher interface { type JobClient interface { BaseClient() clients.ResourceClient Register() error + RegisterNamespace(namespace string) error Read(namespace, name string, opts clients.ReadOpts) (*Job, error) Write(resource *Job, opts clients.WriteOpts) (*Job, error) Delete(namespace, name string, opts clients.DeleteOpts) error @@ -59,6 +60,10 @@ func (client *jobClient) Register() error { return client.rc.Register() } +func (client *jobClient) RegisterNamespace(namespace string) error { + return client.rc.RegisterNamespace(namespace) +} + func (client *jobClient) Read(namespace, name string, opts clients.ReadOpts) (*Job, error) { opts = opts.WithDefaults() diff --git a/pkg/api/v1/resources/common/kubernetes/kube_namespace_client.sk.go b/pkg/api/v1/resources/common/kubernetes/kube_namespace_client.sk.go index e89d918fb..8abcd58c6 100644 --- a/pkg/api/v1/resources/common/kubernetes/kube_namespace_client.sk.go +++ b/pkg/api/v1/resources/common/kubernetes/kube_namespace_client.sk.go @@ -19,6 +19,7 @@ type KubeNamespaceWatcher interface { type KubeNamespaceClient interface { BaseClient() clients.ResourceClient Register() error + RegisterNamespace(namespace string) error Read(name string, opts clients.ReadOpts) (*KubeNamespace, error) Write(resource *KubeNamespace, opts clients.WriteOpts) (*KubeNamespace, error) Delete(name string, opts clients.DeleteOpts) error @@ -59,6 +60,10 @@ func (client *kubeNamespaceClient) Register() error { return client.rc.Register() } +func (client *kubeNamespaceClient) RegisterNamespace(namespace string) error { + return client.rc.RegisterNamespace(namespace) +} + func (client *kubeNamespaceClient) Read(name string, opts clients.ReadOpts) (*KubeNamespace, error) { opts = opts.WithDefaults() diff --git a/pkg/api/v1/resources/common/kubernetes/pod_client.sk.go b/pkg/api/v1/resources/common/kubernetes/pod_client.sk.go index c84531b3f..fc936b5fb 100644 --- a/pkg/api/v1/resources/common/kubernetes/pod_client.sk.go +++ b/pkg/api/v1/resources/common/kubernetes/pod_client.sk.go @@ -19,6 +19,7 @@ type PodWatcher interface { type PodClient interface { BaseClient() clients.ResourceClient Register() error + RegisterNamespace(namespace string) error Read(namespace, name string, opts clients.ReadOpts) (*Pod, error) Write(resource *Pod, opts clients.WriteOpts) (*Pod, error) Delete(namespace, name string, opts clients.DeleteOpts) error @@ -59,6 +60,10 @@ func (client *podClient) Register() error { return client.rc.Register() } +func (client *podClient) RegisterNamespace(namespace string) error { + return client.rc.RegisterNamespace(namespace) +} + func (client *podClient) Read(namespace, name string, opts clients.ReadOpts) (*Pod, error) { opts = opts.WithDefaults() diff --git a/pkg/api/v1/resources/common/kubernetes/service_client.sk.go b/pkg/api/v1/resources/common/kubernetes/service_client.sk.go index c4f6efef6..b10d853aa 100644 --- a/pkg/api/v1/resources/common/kubernetes/service_client.sk.go +++ b/pkg/api/v1/resources/common/kubernetes/service_client.sk.go @@ -19,6 +19,7 @@ type ServiceWatcher interface { type ServiceClient interface { BaseClient() clients.ResourceClient Register() error + RegisterNamespace(namespace string) error Read(namespace, name string, opts clients.ReadOpts) (*Service, error) Write(resource *Service, opts clients.WriteOpts) (*Service, error) Delete(namespace, name string, opts clients.DeleteOpts) error @@ -59,6 +60,10 @@ func (client *serviceClient) Register() error { return client.rc.Register() } +func (client *serviceClient) RegisterNamespace(namespace string) error { + return client.rc.RegisterNamespace(namespace) +} + func (client *serviceClient) Read(namespace, name string, opts clients.ReadOpts) (*Service, error) { opts = opts.WithDefaults() diff --git a/pkg/api/v1/resources/resource_interface.go b/pkg/api/v1/resources/resource_interface.go index ba2b7b7ee..9d3ca6d55 100644 --- a/pkg/api/v1/resources/resource_interface.go +++ b/pkg/api/v1/resources/resource_interface.go @@ -1,6 +1,7 @@ package resources import ( + "context" "fmt" "reflect" "sort" @@ -80,6 +81,62 @@ type CustomInputResource interface { MarshalStatus() (v1.Status, error) } +// ResourceNamespaceListOptions provides the options for listing Resource Namespaces +type ResourceNamespaceListOptions struct { + // Ctx is the context + Ctx context.Context + + // Equality-based label requirements + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#equality-based-requirement + // Equality-based requirements allow filtering by label keys and values. + // Matching objects must satisfy all of the specified label constraints, + // though they may have additional labels as well. + // Example: + // {product: edge} would return all objects with a label key equal to + // product and label value equal to edge + // If both ExpressionSelector and Selector are defined, ExpressionSelector is preferred + ExpressionSelector string +} + +// ResourceNamespaceWatchOptions provides the options for watching Resource Namespaces +type ResourceNamespaceWatchOptions struct { + // Ctx is the context + Ctx context.Context + // Equality-based label requirements + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#equality-based-requirement + // Equality-based requirements allow filtering by label keys and values. + // Matching objects must satisfy all of the specified label constraints, + // though they may have additional labels as well. + // Example: + // {product: edge} would return all objects with a label key equal to + // product and label value equal to edge + // If both ExpressionSelector and Selector are defined, ExpressionSelector is preferred + ExpressionSelector string +} + +// ResoruceNamespace is the namespaces that resources can be found. ResourceNamespaces are +// anything that contains resources independent of other resoruces. They provide sections +// independent infrastructure or regions. IE kubernetes namespaces +type ResourceNamespace struct { + // Name the name of the namespace + Name string +} + +// ResourceNamespaceList contains a list of ResourceNamespaces +type ResourceNamespaceList []ResourceNamespace + +// ResourceNamespaceLister is anything that can list and watch namespaces that +// resources can be found. +type ResourceNamespaceLister interface { + // GetResourceNamespaceList returns the list of the namespaces that resources + // can be found. The list returned will not contain namespacesToFilter. + GetResourceNamespaceList(opts ResourceNamespaceListOptions, namespacesToFilter ResourceNamespaceList) (ResourceNamespaceList, error) + // GetResourceNamespaceWatch returns a watch that receives events when namespaces + // are updated or created. The channel will not return namespacesToFilter. Use the errs for when + // errors are async. + GetResourceNamespaceWatch(opts ResourceNamespaceWatchOptions, namespacesToFilter ResourceNamespaceList) (chan ResourceNamespaceList, <-chan error, error) +} + type ResourceList []Resource type ResourcesById map[string]Resource type ResourcesByKind map[string]ResourceList diff --git a/pkg/code-generator/codegen/templates/event_loop_test_template.go b/pkg/code-generator/codegen/templates/event_loop_test_template.go index 64076ad3d..321a43cc9 100644 --- a/pkg/code-generator/codegen/templates/event_loop_test_template.go +++ b/pkg/code-generator/codegen/templates/event_loop_test_template.go @@ -25,6 +25,9 @@ import ( "github.com/solo-io/solo-kit/pkg/api/v1/clients" "github.com/solo-io/solo-kit/pkg/api/v1/clients/factory" "github.com/solo-io/solo-kit/pkg/api/v1/clients/memory" + "github.com/solo-io/solo-kit/pkg/api/v1/clients/kube/cache" + skNamespace "github.com/solo-io/solo-kit/pkg/api/external/kubernetes/namespace" + "github.com/solo-io/solo-kit/test/helpers" ) var _ = Describe("{{ .GoName }}EventLoop", func() { @@ -37,6 +40,12 @@ var _ = Describe("{{ .GoName }}EventLoop", func() { BeforeEach(func() { ctx = context.Background() + + kube := helpers.MustKubeClient() + kubeCache, err := cache.NewKubeCoreCache(context.TODO(), kube) + Expect(err).NotTo(HaveOccurred()) + resourceNamespaceLister := skNamespace.NewKubeClientCacheResourceNamespaceLister(kube, kubeCache) + {{- range .Resources}} {{ lower_camel .Name }}ClientFactory := &factory.MemoryResourceClientFactory{ @@ -46,7 +55,7 @@ var _ = Describe("{{ .GoName }}EventLoop", func() { Expect(err).NotTo(HaveOccurred()) {{- end}} - emitter = New{{ .GoName }}Emitter({{ $clients }}) + emitter = New{{ .GoName }}Emitter({{ $clients }}, resourceNamespaceLister) }) It("runs sync function on a new snapshot", func() { {{- range .Resources }} diff --git a/pkg/code-generator/codegen/templates/funcs.go b/pkg/code-generator/codegen/templates/funcs.go index c9fd6642f..712fbabcf 100644 --- a/pkg/code-generator/codegen/templates/funcs.go +++ b/pkg/code-generator/codegen/templates/funcs.go @@ -88,6 +88,15 @@ var Funcs = template.FuncMap{ "backtick": func() string { return "`" }, + "minus": func(a, b int) int { + return a - b + }, + "inc": func(a int) int { + return a + 1 + }, + "ge": func(a, b int) bool { + return a >= b + }, } func printPointer(format string, p *string) string { diff --git a/pkg/code-generator/codegen/templates/resource_client_template.go b/pkg/code-generator/codegen/templates/resource_client_template.go index d285f928c..d92fdd718 100644 --- a/pkg/code-generator/codegen/templates/resource_client_template.go +++ b/pkg/code-generator/codegen/templates/resource_client_template.go @@ -28,6 +28,7 @@ type {{ .Name }}Watcher interface { type {{ .Name }}Client interface { BaseClient() clients.ResourceClient Register() error + RegisterNamespace(namespace string) error {{- if .ClusterScoped }} Read(name string, opts clients.ReadOpts) (*{{ .Name }}, error) {{- else }} @@ -77,6 +78,10 @@ func (client *{{ lower_camel .Name }}Client) Register() error { return client.rc.Register() } +func (client *{{ lower_camel .Name }}Client) RegisterNamespace(namespace string) error { + return client.rc.RegisterNamespace(namespace) +} + {{ if .ClusterScoped }} func (client *{{ lower_camel .Name }}Client) Read(name string, opts clients.ReadOpts) (*{{ .Name }}, error) { {{- else }} diff --git a/pkg/code-generator/codegen/templates/snapshot_emitter_template.go b/pkg/code-generator/codegen/templates/snapshot_emitter_template.go index a86d7adc0..fcdb17663 100644 --- a/pkg/code-generator/codegen/templates/snapshot_emitter_template.go +++ b/pkg/code-generator/codegen/templates/snapshot_emitter_template.go @@ -4,6 +4,19 @@ import ( "text/template" ) +// Snapshot Emitters are used to take snapshots of the current system, using either +// cluster scoped or non namespaced scoped selection. Watches are used to notify +// the snapshot emitter when new resources have been created or updated. +// Snapshot Emitters will delegate to Resource Clients to list and watch defined +// resources. +// Non-ClusterScoped - gets all the resources within the watched set of namespaces. If Watched Namespaces is +// set to [""] then watches all namespaces. If the Expression Selector on the WatchOpts is set, then it will watch +// namespaces that meet the label criteria on the Expression Selector in addition to the watched namespaces. +// If watched Namespaces is set to [""] with the Expression Selectors, then it only watches the namespaces that meet the +// label criteria on the Expression Selectors. +// ClusterScoped -- there is no namespacing. Gets all the resources within the entire cluster. There is one watch per resource. +// Setting the Expression Selectors will have no impact on namespaces. +// Not using ClusterScoped - allows for using namespacing, so that each namespace has it's own watch per resource. var ResourceGroupEmitterTemplate = template.Must(template.New("resource_group_emitter").Funcs(Funcs).Parse( `package {{ .Project.ProjectConfig.Version }} @@ -17,7 +30,7 @@ var ResourceGroupEmitterTemplate = template.Must(template.New("resource_group_em {{- $clients := (join_str_slice $clients ", ") }} import ( - "fmt" + "bytes" "sync" "time" @@ -30,6 +43,7 @@ import ( "github.com/solo-io/solo-kit/pkg/api/v1/clients" "github.com/solo-io/solo-kit/pkg/errors" + "github.com/solo-io/solo-kit/pkg/api/v1/resources" skstats "github.com/solo-io/solo-kit/pkg/stats" "github.com/solo-io/go-utils/errutils" @@ -109,15 +123,16 @@ type {{ .GoName }}Emitter interface { {{- end}} } -func New{{ .GoName }}Emitter({{ $client_declarations }}) {{ .GoName }}Emitter { - return New{{ .GoName }}EmitterWithEmit({{ $clients }}, make(chan struct{})) +func New{{ .GoName }}Emitter({{ $client_declarations }}, resourceNamespaceLister resources.ResourceNamespaceLister) {{ .GoName }}Emitter { + return New{{ .GoName }}EmitterWithEmit({{ $clients }}, resourceNamespaceLister, make(chan struct{})) } -func New{{ .GoName }}EmitterWithEmit({{ $client_declarations }}, emit <-chan struct{}) {{ .GoName }}Emitter { +func New{{ .GoName }}EmitterWithEmit({{ $client_declarations }}, resourceNamespaceLister resources.ResourceNamespaceLister, emit <-chan struct{}) {{ .GoName }}Emitter { return &{{ lower_camel .GoName }}Emitter{ {{- range .Resources}} {{ lower_camel .Name }}:{{ lower_camel .Name }}Client, {{- end}} + resourceNamespaceLister: resourceNamespaceLister, forceEmit: emit, } } @@ -127,6 +142,14 @@ type {{ lower_camel .GoName }}Emitter struct { {{- range .Resources}} {{ lower_camel .Name }} {{ .ImportPrefix }}{{ .Name }}Client {{- end}} + // resourceNamespaceLister is used to watch for new namespaces when they are created. + // It is used when Expression Selector is in the Watch Opts set in Snapshot(). + resourceNamespaceLister resources.ResourceNamespaceLister + // namespacesWatching is the set of namespaces that we are watching. This is helpful + // when Expression Selector is set on the Watch Opts in Snapshot(). + namespacesWatching sync.Map + // updateNamespaces is used to perform locks and unlocks when watches on namespaces are being updated/created + updateNamespaces sync.Mutex } func (c *{{ lower_camel .GoName }}Emitter) Register() error { @@ -145,6 +168,14 @@ func (c *{{ lower_camel $.GoName }}Emitter) {{ .Name }}() {{ .ImportPrefix }}{{ } {{- end}} +// Snapshots will return a channel that can be used to receive snapshots of the +// state of the resources it is watching +// when watching resources, you can set the watchNamespaces, and you can set the +// ExpressionSelector of the WatchOpts. Setting watchNamespaces will watch for all resources +// that are in the specified namespaces. In addition if ExpressionSelector of the WatchOpts is +// set, then all namespaces that meet the label criteria of the ExpressionSelector will +// also be watched. If Expression Selector is set and watched namespaces is set to [""], then it +// will only watch namespaces that meet the label expression selector criteria. func (c *{{ lower_camel .GoName }}Emitter) Snapshots(watchNamespaces []string, opts clients.WatchOpts) (<-chan *{{ .GoName }}Snapshot, <-chan error, error) { if len(watchNamespaces) == 0 { @@ -159,81 +190,309 @@ func (c *{{ lower_camel .GoName }}Emitter) Snapshots(watchNamespaces []string, o } errs := make(chan error) + hasWatchedNamespaces := len(watchNamespaces) > 1 || (len(watchNamespaces) == 1 && watchNamespaces[0] != "") + watchingLabeledNamespaces := ! (opts.ExpressionSelector == "") var done sync.WaitGroup ctx := opts.Ctx + // setting up the options for both listing and watching resources in namespaces + watchedNamespacesListOptions := clients.ListOpts{Ctx: opts.Ctx, Selector: opts.Selector } + watchedNamespacesWatchOptions := clients.WatchOpts{Ctx: opts.Ctx, Selector: opts.Selector } {{- range .Resources}} /* Create channel for {{ .Name }} */ {{- if (not .ClusterScoped) }} - type {{ lower_camel .Name }}ListWithNamespace struct { - list {{ .ImportPrefix }}{{ .Name }}List - namespace string - } +type {{ lower_camel .Name }}ListWithNamespace struct { + list {{ .ImportPrefix }}{{ .Name }}List + namespace string +} {{ lower_camel .Name }}Chan := make(chan {{ lower_camel .Name }}ListWithNamespace) - - var initial{{ upper_camel .Name }}List {{ .ImportPrefix }}{{ .Name }}List{{- end }} - -{{- end}} + var initial{{ upper_camel .Name }}List {{ .ImportPrefix }}{{ .Name }}List +{{- end }} +{{- end }} currentSnapshot := {{ .GoName }}Snapshot{} +{{- range .Resources}} +{{- if not .ClusterScoped }} + {{ lower_camel .PluralName }}ByNamespace := sync.Map{} +{{- end }} +{{- end }} + if hasWatchedNamespaces || ! watchingLabeledNamespaces { + // then watch all resources on watch Namespaces + + // watched namespaces + for _, namespace := range watchNamespaces { {{- range .Resources}} - {{- if not .ClusterScoped }} - {{ lower_camel .PluralName }}ByNamespace := make(map[string]{{ .ImportPrefix }}{{ .Name }}List) + {{- if (not .ClusterScoped) }} + /* Setup namespaced watch for {{ .Name }} */ + { + {{ lower_camel .PluralName }}, err := c.{{ lower_camel .Name }}.List(namespace, watchedNamespacesListOptions) + if err != nil { + return nil, nil, errors.Wrapf(err, "initial {{ .Name }} list") + } + initial{{ upper_camel .Name }}List = append(initial{{ upper_camel .Name }}List, {{ lower_camel .PluralName }}...) + {{ lower_camel .PluralName }}ByNamespace.Store(namespace, {{ lower_camel .PluralName }}) + } + {{ lower_camel .Name }}NamespacesChan, {{ lower_camel .Name }}Errs, err := c.{{ lower_camel .Name }}.Watch(namespace, watchedNamespacesWatchOptions) + if err != nil { + return nil, nil, errors.Wrapf(err, "starting {{ .Name }} watch") + } + + done.Add(1) + go func(namespace string) { + defer done.Done() + errutils.AggregateErrs(ctx, errs, {{ lower_camel .Name }}Errs, namespace+"-{{ lower_camel .PluralName }}") + }(namespace) + {{- end }} {{- end }} + /* Watch for changes and update snapshot */ + go func(namespace string) { + defer func() { + c.namespacesWatching.Delete(namespace) + }() + c.namespacesWatching.Store(namespace, true) + for { + select { + case <-ctx.Done(): + return + {{- range .Resources}} + {{- if (not .ClusterScoped) }} + case {{ lower_camel .Name }}List, ok := <- {{ lower_camel .Name }}NamespacesChan: + if !ok { + return + } + select { + case <-ctx.Done(): + return + case {{ lower_camel .Name }}Chan <- {{ lower_camel .Name }}ListWithNamespace{list:{{ lower_camel .Name }}List, namespace:namespace}: + } + {{- end }} + {{- end}} + } + } + }(namespace) + } + } + // watch all other namespaces that are labeled and fit the Expression Selector + if opts.ExpressionSelector != "" { + // watch resources of non-watched namespaces that fit the expression selectors + namespaceListOptions := resources.ResourceNamespaceListOptions{ + Ctx: opts.Ctx, + ExpressionSelector: opts.ExpressionSelector, + } + namespaceWatchOptions := resources.ResourceNamespaceWatchOptions{ + Ctx: opts.Ctx, + ExpressionSelector: opts.ExpressionSelector, + } - for _, namespace := range watchNamespaces { -{{- range .Resources}} + filterNamespaces := resources.ResourceNamespaceList{} + for _, ns := range watchNamespaces { + // we do not want to filter out "" which equals all namespaces + // the reason is because we will never create a watch on ""(all namespaces) because + // doing so means we watch all resources regardless of namespace. Our intent is to + // watch only certain namespaces. + if ns != "" { + filterNamespaces = append(filterNamespaces, resources.ResourceNamespace{Name: ns}) + } + } + namespacesResources, err := c.resourceNamespaceLister.GetResourceNamespaceList(namespaceListOptions, filterNamespaces) + if err != nil { + return nil, nil, err + } + newlyRegisteredNamespaces := make([]string, len(namespacesResources)) + // non watched namespaces that are labeled + for i, resourceNamespace := range namespacesResources { + c.namespacesWatching.Load(resourceNamespace) + namespace := resourceNamespace.Name + newlyRegisteredNamespaces[i] = namespace +{{- range .Resources }} {{- if (not .ClusterScoped) }} - /* Setup namespaced watch for {{ .Name }} */ - { - {{ lower_camel .PluralName }}, err := c.{{ lower_camel .Name }}.List(namespace, clients.ListOpts{Ctx: opts.Ctx, Selector: opts.Selector}) + err = c.{{ lower_camel .Name }}.RegisterNamespace(namespace) if err != nil { - return nil, nil, errors.Wrapf(err, "initial {{ .Name }} list") + return nil, nil, errors.Wrapf(err, "there was an error registering the namespace to the {{ lower_camel .Name }}") + } + /* Setup namespaced watch for {{ upper_camel .Name }} */ + { + {{ lower_camel .PluralName }}, err := c.{{ lower_camel .Name }}.List(namespace, clients.ListOpts{Ctx: opts.Ctx}) + if err != nil { + return nil, nil, errors.Wrapf(err, "initial {{ upper_camel .Name }} list with new namespace") + } + initial{{ upper_camel .Name }}List = append(initial{{ upper_camel .Name }}List,{{ lower_camel .PluralName }}...) + {{ lower_camel .PluralName }}ByNamespace.Store(namespace, {{ lower_camel .PluralName }}) + } + {{ lower_camel .Name }}NamespacesChan, {{ lower_camel .Name }}Errs, err := c.{{ lower_camel .Name }}.Watch(namespace, clients.WatchOpts{Ctx: opts.Ctx}) + if err != nil { + return nil, nil, errors.Wrapf(err, "starting {{ upper_camel .Name }} watch") } - initial{{ upper_camel .Name }}List = append(initial{{ upper_camel .Name }}List, {{ lower_camel .PluralName }}...) - {{ lower_camel .PluralName }}ByNamespace[namespace] = {{ lower_camel .PluralName }} + + done.Add(1) + go func(namespace string) { + defer done.Done() + errutils.AggregateErrs(ctx, errs, {{ lower_camel .Name }}Errs, namespace+"-{{ lower_camel .PluralName }}") + }(namespace) +{{- end }} +{{- end }} + /* Watch for changes and update snapshot */ + go func(namespace string) { + for { + select { + case <-ctx.Done(): + return +{{- range .Resources }} +{{- if (not .ClusterScoped) }} + case {{ lower_camel .Name }}List, ok := <-{{ lower_camel .Name }}NamespacesChan: + if !ok { + return + } + select { + case <-ctx.Done(): + return + case {{ lower_camel .Name }}Chan <- {{ lower_camel .Name }}ListWithNamespace{list: {{ lower_camel .Name }}List, namespace: namespace}: + } +{{- end }} +{{- end }} + } + } + }(namespace) } - {{ lower_camel .Name }}NamespacesChan, {{ lower_camel .Name }}Errs, err := c.{{ lower_camel .Name }}.Watch(namespace, opts) - if err != nil { - return nil, nil, errors.Wrapf(err, "starting {{ .Name }} watch") + if len(newlyRegisteredNamespaces) > 0 { + contextutils.LoggerFrom(ctx).Infof("registered the new namespace %v", newlyRegisteredNamespaces) } - done.Add(1) - go func(namespace string) { - defer done.Done() - errutils.AggregateErrs(ctx, errs, {{ lower_camel .Name }}Errs, namespace+"-{{ lower_camel .PluralName }}") - }(namespace) + // create watch on all namespaces, so that we can add all resources from new namespaces + // we will be watching namespaces that meet the Expression Selector filter -{{- end }} -{{- end}} + namespaceWatch, errsReceiver, err := c.resourceNamespaceLister.GetResourceNamespaceWatch(namespaceWatchOptions, filterNamespaces) + if err != nil { + return nil, nil, err + } + if errsReceiver != nil { + go func() { + for { + select{ + case <-ctx.Done(): + return + case err = <- errsReceiver: + errs <- errors.Wrapf(err, "received error from watch on resource namespaces") + } + } + }() + } - /* Watch for changes and update snapshot */ - go func(namespace string) { + go func() { for { select { case <-ctx.Done(): return -{{- range .Resources}} -{{- if (not .ClusterScoped) }} - case {{ lower_camel .Name }}List, ok := <- {{ lower_camel .Name }}NamespacesChan: + case resourceNamespaces, ok := <-namespaceWatch: if !ok { return } - select { - case <-ctx.Done(): - return - case {{ lower_camel .Name }}Chan <- {{ lower_camel .Name }}ListWithNamespace{list:{{ lower_camel .Name }}List, namespace:namespace}: + // get the list of new namespaces, if there is a new namespace + // get the list of resources from that namespace, and add + // a watch for new resources created/deleted on that namespace + c.updateNamespaces.Lock() + + // get the new namespaces, and get a map of the namespaces + mapOfResourceNamespaces := make(map[string]struct{}, len(resourceNamespaces)) + newNamespaces := []string{} + for _, ns := range resourceNamespaces { + if _, hit := c.namespacesWatching.Load(ns.Name); !hit { + newNamespaces = append(newNamespaces, ns.Name) + } + mapOfResourceNamespaces[ns.Name] = struct{}{} + } + + for _, ns := range watchNamespaces { + mapOfResourceNamespaces[ns] = struct{}{} } + + missingNamespaces := []string{} + // use the map of namespace resources to find missing/deleted namespaces + c.namespacesWatching.Range(func(key interface{}, value interface{}) bool { + name := key.(string) + if _, hit := mapOfResourceNamespaces[name]; !hit { + missingNamespaces = append(missingNamespaces, name) + } + return true + }) + + for _, ns := range missingNamespaces { +{{- range .Resources}} +{{- if (not .ClusterScoped) }} + {{ lower_camel .Name }}Chan <- {{ lower_camel .Name }}ListWithNamespace{list: {{ .ImportPrefix }}{{ .Name }}List{}, namespace: ns} {{- end }} -{{- end}} +{{- end }} + } + + for _, namespace := range newNamespaces { + var err error +{{- range .Resources }} +{{- if (not .ClusterScoped) }} + err = c.{{ lower_camel .Name }}.RegisterNamespace(namespace) + if err != nil { + errs <- errors.Wrapf(err, "there was an error registering the namespace to the {{ lower_camel .Name }}") + continue + } + /* Setup namespaced watch for {{ upper_camel .Name }} for new namespace */ + { + {{ lower_camel .PluralName }}, err := c.{{ lower_camel .Name }}.List(namespace, clients.ListOpts{Ctx: opts.Ctx, Selector: opts.Selector}) + if err != nil { + errs <- errors.Wrapf(err, "initial new namespace {{ upper_camel .Name }} list in namespace watch") + continue + } + {{ lower_camel .PluralName }}ByNamespace.Store(namespace, {{ lower_camel .PluralName }}) + } + {{ lower_camel .Name }}NamespacesChan, {{ lower_camel .Name }}Errs, err := c.{{ lower_camel .Name }}.Watch(namespace, clients.WatchOpts{Ctx: opts.Ctx, Selector: opts.Selector}) + if err != nil { + errs <- errors.Wrapf(err, "starting new namespace {{ upper_camel .Name }} watch") + continue + } + + done.Add(1) + go func(namespace string) { + defer done.Done() + errutils.AggregateErrs(ctx, errs, {{ lower_camel .Name }}Errs, namespace+"-new-namespace-{{ lower_camel .PluralName }}") + }(namespace) +{{- end }} +{{- end }} + /* Watch for changes and update snapshot */ + go func(namespace string) { + defer func() { + c.namespacesWatching.Delete(namespace) + }() + c.namespacesWatching.Store(namespace, true) + for { + select { + case <-ctx.Done(): + return +{{- range .Resources }} +{{- if (not .ClusterScoped) }} + case {{ lower_camel .Name }}List, ok := <-{{ lower_camel .Name }}NamespacesChan: + if !ok { + return + } + select { + case <-ctx.Done(): + return + case {{ lower_camel .Name }}Chan <- {{ lower_camel .Name }}ListWithNamespace{list: {{ lower_camel .Name }}List, namespace: namespace}: + } +{{- end }} +{{- end }} + } + } + }(namespace) + } + if len(newNamespaces) > 0 { + contextutils.LoggerFrom(ctx).Infof("registered the new namespace %v", newNamespaces) + } + c.updateNamespaces.Unlock() } } - }(namespace) + }() } - {{- range .Resources}} {{- if .ClusterScoped }} /* Setup cluster-wide watch for {{ .Name }} */ @@ -242,7 +501,11 @@ func (c *{{ lower_camel .GoName }}Emitter) Snapshots(watchNamespaces []string, o if err != nil { return nil, nil, errors.Wrapf(err, "initial {{ .Name }} list") } - {{ lower_camel .Name }}Chan, {{ lower_camel .Name }}Errs, err := c.{{ lower_camel .Name }}.Watch(opts) + // for Cluster scoped resources, we do not use Expression Selectors + {{ lower_camel .Name }}Chan, {{ lower_camel .Name }}Errs, err := c.{{ lower_camel .Name }}.Watch(clients.WatchOpts{ + Ctx: opts.Ctx, + Selector: opts.Selector, + }) if err != nil { return nil, nil, errors.Wrapf(err, "starting {{ .Name }} watch") } @@ -340,17 +603,20 @@ func (c *{{ lower_camel .GoName }}Emitter) Snapshots(watchNamespaces []string, o ) // merge lists by namespace - {{ lower_camel .PluralName }}ByNamespace[namespace] = {{ lower_camel .Name }}NamespacedList.list + {{ lower_camel .PluralName }}ByNamespace.Store(namespace, {{ lower_camel .Name }}NamespacedList.list) var {{ lower_camel .Name }}List {{ .ImportPrefix }}{{ .Name }}List - for _, {{ lower_camel .PluralName }} := range {{ lower_camel .PluralName }}ByNamespace { - {{ lower_camel .Name }}List = append({{ lower_camel .Name }}List, {{ lower_camel .PluralName }}...) - } + {{ lower_camel .PluralName }}ByNamespace.Range(func(key interface{}, value interface{}) bool { + mocks := value.({{ .ImportPrefix }}{{ .Name }}List) + {{ lower_camel .Name }}List = append({{ lower_camel .Name }}List, mocks...) + return true + }) currentSnapshot.{{ upper_camel .PluralName }} = {{ lower_camel .Name }}List.Sort() {{- end }} -{{- end}} +{{- end }} } } }() return snapshots, errs, nil } + `)) diff --git a/pkg/code-generator/codegen/templates/snapshot_emitter_test_template.go b/pkg/code-generator/codegen/templates/snapshot_emitter_test_template.go index 15dfe78c0..169cdf94f 100644 --- a/pkg/code-generator/codegen/templates/snapshot_emitter_test_template.go +++ b/pkg/code-generator/codegen/templates/snapshot_emitter_test_template.go @@ -20,8 +20,10 @@ package {{ .Project.ProjectConfig.Version }} {{- $clients := (join_str_slice $clients ", ") }} import ( + "bytes" "context" "os" + "fmt" "time" {{ .Imports }} @@ -32,6 +34,10 @@ import ( "github.com/solo-io/solo-kit/pkg/api/v1/clients" "github.com/solo-io/solo-kit/pkg/api/v1/clients/factory" "github.com/solo-io/solo-kit/pkg/api/v1/clients/memory" + "github.com/solo-io/solo-kit/pkg/api/v1/resources/core" + "github.com/solo-io/solo-kit/pkg/api/v1/clients/kube/cache" + "github.com/solo-io/solo-kit/pkg/api/external/kubernetes/namespace" + "github.com/solo-io/solo-kit/pkg/api/v1/resources" "github.com/solo-io/solo-kit/test/helpers" "github.com/solo-io/solo-kit/test/setup" "github.com/solo-io/k8s-utils/kubeutils" @@ -39,6 +45,9 @@ import ( kuberc "github.com/solo-io/solo-kit/pkg/api/v1/clients/kube" "k8s.io/client-go/rest" apiext "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // Needed to run tests in GKE _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" @@ -52,11 +61,21 @@ var _ = Describe("{{ upper_camel .Project.ProjectConfig.Version }}Emitter", func log.Printf("This test creates kubernetes resources and is disabled by default. To enable, set RUN_KUBE_TESTS=1 in your env.") return } + + type metadataGetter interface { + GetMetadata() *core.Metadata + } + var ( ctx context.Context - namespace1 string - namespace2 string + namespace1, namespace2 string + namespace3, namespace4 string + namespace5, namespace6 string name1, name2 = "angela"+helpers.RandString(3), "bob"+helpers.RandString(3) + name3, name4 = "susan" + helpers.RandString(3), "jim" + helpers.RandString(3) + name5 = "melisa" + helpers.RandString(3) + labels1 = map[string]string{"env": "test"} + labelExpression1 = "env in (test)" {{- if $need_kube_config }} cfg *rest.Config clientset *apiext.Clientset @@ -66,7 +85,330 @@ var _ = Describe("{{ upper_camel .Project.ProjectConfig.Version }}Emitter", func {{- range .Resources }} {{ lower_camel .Name }}Client {{ .ImportPrefix }}{{ .Name }}Client {{- end}} + resourceNamespaceLister resources.ResourceNamespaceLister + kubeCache cache.KubeCoreCache ) + const ( + TIME_BETWEEN_MESSAGES = 5 + ) +{{- range .Resources }} + New{{ .Name }}WithLabels := func(namespace, name string, labels map[string]string) (*{{ .ImportPrefix }}{{ .Name }}) { + resource := {{ .ImportPrefix }}New{{ .Name }}(namespace, name) + resource.GetMetadata().Labels = labels + return resource + } +{{- end }} + + createNamespaces := func(ctx context.Context, kube kubernetes.Interface, namespaces ...string) { + err := kubeutils.CreateNamespacesInParallel(ctx, kube, namespaces...) + Expect(err).NotTo(HaveOccurred()) + } + + createNamespaceWithLabel := func(ctx context.Context, kube kubernetes.Interface, namespace string, labels map[string]string) { + _, err := kube.CoreV1().Namespaces().Create(ctx, &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + Labels: labels, + }, + }, metav1.CreateOptions{}) + Expect(err).ToNot(HaveOccurred()) + } + + + deleteNamespaces := func(ctx context.Context, kube kubernetes.Interface, namespaces ...string) { + err := kubeutils.DeleteNamespacesInParallelBlocking(ctx, kube, namespaces...) + Expect(err).NotTo(HaveOccurred()) + } + + // getNewNamespaces is used to generate new namespace names, so that we do not have to wait + // when deleting namespaces in runNamespacedSelectorsWithWatchNamespaces. Since + // runNamespacedSelectorsWithWatchNamespaces uses watchNamespaces set to namespace1 and + // namespace2, this will work. Because the emitter willl only be watching namespaces that are + // labeled. + getNewNamespaces := func() { + namespace3 = helpers.RandString(8) + namespace4 = helpers.RandString(8) + namespace5 = helpers.RandString(8) + namespace6 = helpers.RandString(8) + } + + // getNewNamespaces1and2 is used to generate new namespaces for namespace 1 and 2. + // used for the same reason as getNewNamespaces() above + getNewNamespaces1and2 := func() { + namespace1 = helpers.RandString(8) + namespace2 = helpers.RandString(8) + } + + getMapOfNamespaceResources := func(getList func(string) ([]metadataGetter,error)) map[string][]string { + namespaces := []string{namespace1, namespace2, namespace3, namespace4, namespace5, namespace6} + namespaceResources := make(map[string][]string, len(namespaces)) + for _, ns := range namespaces { + list, _ := getList(ns) + for _, snap := range list { + snapMeta := snap.GetMetadata() + if _, hit := namespaceResources[snapMeta.Namespace]; hit { + namespaceResources[snap.GetMetadata().Namespace] = make([]string, 1) + } + namespaceResources[snapMeta.Namespace] = append(namespaceResources[snapMeta.Namespace], snapMeta.Name) + } + } + return namespaceResources + } + + findNonMatchingResources := func(matchList, findList []metadataGetter) map[string][]string { + nonMatching := make(map[string][]string) + for _, snap := range matchList { + snapMeta := snap.GetMetadata() + matched := false + for _,pre := range findList { + preMeta := pre.GetMetadata() + if preMeta.Namespace == snapMeta.Namespace && preMeta.Name == snapMeta.Name { + matched = true + break + } + } + if ! matched { + if _, hit := nonMatching[snapMeta.Namespace]; hit { + nonMatching[snap.GetMetadata().Namespace] = make([]string, 1) + } + nonMatching[snapMeta.Namespace] = append(nonMatching[snapMeta.Namespace], snapMeta.Name) + } + } + return nonMatching + } + + findMatchingResources := func(matchList, findList []metadataGetter) map[string][]string { + matching := make(map[string][]string) + for _, snap := range matchList { + snapMeta := snap.GetMetadata() + matched := false + for _,pre := range findList { + preMeta := pre.GetMetadata() + if preMeta.Namespace == snapMeta.Namespace && preMeta.Name == snapMeta.Name { + matched = true + break + } + } + if matched { + if _, hit := matching[snapMeta.Namespace]; hit { + matching[snap.GetMetadata().Namespace] = make([]string, 1) + } + matching[snapMeta.Namespace] = append(matching[snapMeta.Namespace], snapMeta.Name) + } + } + return matching + } + + getMapOfResources := func(listOfResources []metadataGetter) map[string][]string { + resources := make(map[string][]string) + for _, snap := range listOfResources { + snapMeta := snap.GetMetadata() + if _, hit := resources[snapMeta.Namespace]; hit { + resources[snap.GetMetadata().Namespace] = make([]string, 1) + } + resources[snapMeta.Namespace] = append(resources[snapMeta.Namespace], snapMeta.Name) + } + return resources + } + +{{- range .Resources }} +{{- if not .ClusterScoped }} + convert{{ .PluralName }}ToMetadataGetter := func(rl {{ .ImportPrefix }}{{ .Name }}List) []metadataGetter { + listConv := make([]metadataGetter, len(rl)) + for i, r := range rl { + listConv[i] = r + } + return listConv + } +{{- end }} +{{- end }} + + runNamespacedSelectorsWithWatchNamespaces := func() { + ctx := context.Background() + err := emitter.Register() + Expect(err).NotTo(HaveOccurred()) + + // There is an error here in the code. + snapshots, errs, err := emitter.Snapshots([]string{namespace1, namespace2}, clients.WatchOpts{ + Ctx: ctx, + RefreshRate: time.Second, + ExpressionSelector: labelExpression1, + }) + Expect(err).NotTo(HaveOccurred()) + + var snap *{{ .GoName }}Snapshot + var previous *{{ .GoName }}Snapshot + +{{- range .Resources }} + + /* + {{ .Name }} + */ + assertSnapshot{{ .PluralName }} := func(expect{{ .PluralName }} {{ .ImportPrefix }}{{ .Name }}List, unexpect{{ .PluralName }} {{ .ImportPrefix }}{{ .Name }}List) { + drain: + for { + select { + case snap = <-snapshots: + previous = snap + for _, expected := range expect{{ .PluralName }} { + if _, err := snap.{{ upper_camel .PluralName }}.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpect{{ .PluralName }} { + if _, err := snap.{{ upper_camel .PluralName }}.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + {{- if .ClusterScoped }} + combined, _ := {{ lower_camel .Name }}Client.List(clients.ListOpts{}) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + {{- else }} + var expectedResources map[string][]string + var unexpectedResource map[string][]string + + if previous != nil { + expectedResources = findNonMatchingResources(convert{{ .PluralName }}ToMetadataGetter(expect{{ .PluralName }}), convert{{ .PluralName }}ToMetadataGetter(previous.{{ upper_camel .PluralName }})) + unexpectedResource = findMatchingResources(convert{{ .PluralName }}ToMetadataGetter(unexpect{{ .PluralName }}), convert{{ .PluralName }}ToMetadataGetter(previous.{{ upper_camel .PluralName }})) + } else { + expectedResources = getMapOfResources(convert{{ .PluralName }}ToMetadataGetter(expect{{ .PluralName }})) + unexpectedResource = getMapOfResources(convert{{ .PluralName }}ToMetadataGetter(unexpect{{ .PluralName }})) + } + getList := func (ns string) ([]metadataGetter, error) { + l, err := {{ lower_camel .Name }}Client.List(ns, clients.ListOpts{}) + return convert{{ .PluralName }}ToMetadataGetter(l), err + } + namespaceResources := getMapOfNamespaceResources(getList) + Fail(fmt.Sprintf("expected final snapshot before 10 seconds. expected \nExpected:\n%#v\n\nUnexpected:\n%#v\n\nnamespaces:\n%#v", expectedResources, unexpectedResource, namespaceResources)) + {{- end }} + } + } + } + +{{- if .ClusterScoped }} + + {{ lower_camel .Name }}1a, err := {{ lower_camel .Name }}Client.Write({{ .ImportPrefix }}New{{ .Name }}(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + {{ lower_camel .Name }}Watched := {{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}1a } + assertSnapshot{{ .PluralName }}({{ lower_camel .Name }}Watched, nil) + + {{ lower_camel .Name }}3a, err := {{ lower_camel .Name }}Client.Write(New{{ .Name }}WithLabels(namespace1, name3, labels1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + {{ lower_camel .Name }}Watched = append({{ lower_camel .Name }}Watched, {{ lower_camel .Name }}3a ) + assertSnapshot{{ .PluralName }}({{ lower_camel .Name }}Watched, nil) + +{{- else }} + + {{ lower_camel .Name }}1a, err := {{ lower_camel .Name }}Client.Write({{ .ImportPrefix }}New{{ .Name }}(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + {{ lower_camel .Name }}1b, err := {{ lower_camel .Name }}Client.Write({{ .ImportPrefix }}New{{ .Name }}(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + {{ lower_camel .Name }}Watched := {{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}1a, {{ lower_camel .Name }}1b } + assertSnapshot{{ .PluralName }}({{ lower_camel .Name }}Watched, nil) + + {{ lower_camel .Name }}3a, err := {{ lower_camel .Name }}Client.Write(New{{ .Name }}WithLabels(namespace1, name3, labels1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + {{ lower_camel .Name }}3b, err := {{ lower_camel .Name }}Client.Write(New{{ .Name }}WithLabels(namespace2, name3, labels1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + {{ lower_camel .Name }}Watched = append({{ lower_camel .Name }}Watched, {{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}3a, {{ lower_camel .Name }}3b }...) + assertSnapshot{{ .PluralName }}({{ lower_camel .Name }}Watched, nil) + +{{- end }} + + createNamespaceWithLabel(ctx, kube, namespace3, labels1) + createNamespaces(ctx, kube, namespace4) + +{{- if .ClusterScoped }} + + {{ lower_camel .Name }}4a, err := {{ lower_camel .Name }}Client.Write({{ .ImportPrefix }}New{{ .Name }}(namespace3, name4), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + {{ lower_camel .Name }}Watched = append({{ lower_camel .Name }}Watched, {{ lower_camel .Name }}4a ) + assertSnapshot{{ .PluralName }}({{ lower_camel .Name }}Watched, nil) + + {{ lower_camel .Name }}5a, err := {{ lower_camel .Name }}Client.Write(New{{ .Name }}WithLabels(namespace3, name5, labels1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + {{ lower_camel .Name }}Watched = append({{ lower_camel .Name }}Watched, {{ lower_camel .Name }}5a ) + assertSnapshot{{ .PluralName }}({{ lower_camel .Name }}Watched, nil) + +{{- else }} + + {{ lower_camel .Name }}4a, err := {{ lower_camel .Name }}Client.Write({{ .ImportPrefix }}New{{ .Name }}(namespace3, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + {{ lower_camel .Name }}4b, err := {{ lower_camel .Name }}Client.Write({{ .ImportPrefix }}New{{ .Name }}(namespace4, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + {{ lower_camel .Name }}Watched = append({{ lower_camel .Name }}Watched, {{ lower_camel .Name }}4a) + {{ lower_camel .Name }}NotWatched := {{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}4b } + assertSnapshot{{ .PluralName }}({{ lower_camel .Name }}Watched, {{ lower_camel .Name }}NotWatched) + + {{ lower_camel .Name }}5a, err := {{ lower_camel .Name }}Client.Write(New{{ .Name }}WithLabels(namespace3, name2, labels1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + {{ lower_camel .Name }}5b, err := {{ lower_camel .Name }}Client.Write(New{{ .Name }}WithLabels(namespace4, name2, labels1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + {{ lower_camel .Name }}Watched = append({{ lower_camel .Name }}Watched, {{ lower_camel .Name }}5a) + {{ lower_camel .Name }}NotWatched = append({{ lower_camel .Name }}NotWatched, {{ lower_camel .Name }}5b) + assertSnapshot{{ .PluralName }}({{ lower_camel .Name }}Watched, {{ lower_camel .Name }}NotWatched) + + for _, r := range {{ lower_camel .Name }}NotWatched { + err = {{ lower_camel .Name }}Client.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + } + +{{- end }} + +{{- if .ClusterScoped }} + + err = {{ lower_camel .Name }}Client.Delete({{ lower_camel .Name }}1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = {{ lower_camel .Name }}Client.Delete({{ lower_camel .Name }}3a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + {{ lower_camel .Name }}NotWatched := {{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}1a, {{ lower_camel .Name }}3a } + {{ lower_camel .Name }}Watched = {{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}4a, {{ lower_camel .Name }}5a} + assertSnapshot{{ .PluralName }}({{ lower_camel .Name }}Watched, {{ lower_camel .Name }}NotWatched) + + err = {{ lower_camel .Name }}Client.Delete({{ lower_camel .Name }}4a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = {{ lower_camel .Name }}Client.Delete({{ lower_camel .Name }}5a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + {{ lower_camel .Name }}NotWatched = append({{ lower_camel .Name }}NotWatched, {{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}4a, {{ lower_camel .Name }}5a}...) + assertSnapshot{{ .PluralName }}(nil, {{ lower_camel .Name }}NotWatched) + +{{- else }} + + err = {{ lower_camel .Name }}Client.Delete({{ lower_camel .Name }}1a.GetMetadata().Namespace, {{ lower_camel .Name }}1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = {{ lower_camel .Name }}Client.Delete({{ lower_camel .Name }}1b.GetMetadata().Namespace, {{ lower_camel .Name }}1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + {{ lower_camel .Name }}NotWatched = append({{ lower_camel .Name }}NotWatched, {{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}1a, {{ lower_camel .Name }}1b}...) + {{ lower_camel .Name }}Watched = {{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}3a, {{ lower_camel .Name }}3b, {{ lower_camel .Name }}4a, {{ lower_camel .Name }}5a} + assertSnapshot{{ .PluralName }}({{ lower_camel .Name }}Watched, {{ lower_camel .Name }}NotWatched) + + err = {{ lower_camel .Name }}Client.Delete({{ lower_camel .Name }}3a.GetMetadata().Namespace, {{ lower_camel .Name }}3a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = {{ lower_camel .Name }}Client.Delete({{ lower_camel .Name }}3b.GetMetadata().Namespace, {{ lower_camel .Name }}3b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + {{ lower_camel .Name }}NotWatched = append({{ lower_camel .Name }}NotWatched, {{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}3a, {{ lower_camel .Name }}3b}...) + {{ lower_camel .Name }}Watched = {{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}4a, {{ lower_camel .Name }}5a} + assertSnapshot{{ .PluralName }}({{ lower_camel .Name }}Watched, {{ lower_camel .Name }}NotWatched) + + err = {{ lower_camel .Name }}Client.Delete({{ lower_camel .Name }}4a.GetMetadata().Namespace, {{ lower_camel .Name }}4a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = {{ lower_camel .Name }}Client.Delete({{ lower_camel .Name }}5a.GetMetadata().Namespace, {{ lower_camel .Name }}5a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + {{ lower_camel .Name }}NotWatched = append({{ lower_camel .Name }}NotWatched, {{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}5a, {{ lower_camel .Name }}5b}...) + assertSnapshot{{ .PluralName }}(nil, {{ lower_camel .Name }}NotWatched) + +{{- end }} + + // clean up environment + deleteNamespaces(ctx, kube, namespace3, namespace4) + getNewNamespaces() + +{{- end }} + } BeforeEach(func() { err := os.Setenv(statusutils.PodNamespaceEnvName, "default") @@ -75,15 +417,26 @@ var _ = Describe("{{ upper_camel .Project.ProjectConfig.Version }}Emitter", func ctx = context.Background() namespace1 = helpers.RandString(8) namespace2 = helpers.RandString(8) + namespace3 = helpers.RandString(8) + namespace4 = helpers.RandString(8) + namespace5 = helpers.RandString(8) + namespace6 = helpers.RandString(8) + kube = helpers.MustKubeClient() - err = kubeutils.CreateNamespacesInParallel(ctx, kube, namespace1, namespace2) + kubeCache, err = cache.NewKubeCoreCache(context.TODO(), kube) Expect(err).NotTo(HaveOccurred()) + resourceNamespaceLister = namespace.NewKubeClientCacheResourceNamespaceLister(kube, kubeCache) + + createNamespaces(ctx, kube, namespace1, namespace2) + {{- if $need_kube_config }} + cfg, err = kubeutils.GetConfig("", "") Expect(err).NotTo(HaveOccurred()) clientset, err = apiext.NewForConfig(cfg) Expect(err).NotTo(HaveOccurred()) + {{- end}} {{- range .Resources }} @@ -108,14 +461,13 @@ var _ = Describe("{{ upper_camel .Project.ProjectConfig.Version }}Emitter", func {{ lower_camel .Name }}Client, err = {{ .ImportPrefix }}New{{ .Name }}Client(ctx, {{ lower_camel .Name }}ClientFactory) Expect(err).NotTo(HaveOccurred()) {{- end}} - emitter = New{{ .GoName }}Emitter({{ $clients }}) + emitter = New{{ .GoName }}Emitter({{ $clients }}, resourceNamespaceLister) }) AfterEach(func() { err := os.Unsetenv(statusutils.PodNamespaceEnvName) Expect(err).NotTo(HaveOccurred()) - err = kubeutils.DeleteNamespacesInParallelBlocking(ctx, kube, namespace1, namespace2) - Expect(err).NotTo(HaveOccurred()) + kubeutils.DeleteNamespacesInParallelBlocking(ctx, kube, namespace1, namespace2) {{- range .Resources }} {{- if .ClusterScoped }} @@ -125,227 +477,718 @@ var _ = Describe("{{ upper_camel .Project.ProjectConfig.Version }}Emitter", func {{- end }} }) - It("tracks snapshots on changes to any resource", func() { - ctx := context.Background() - err := emitter.Register() - Expect(err).NotTo(HaveOccurred()) + Context("Tracking watched namespaces", func () { + It("tracks snapshots on changes to any resource", func() { + ctx := context.Background() + err := emitter.Register() + Expect(err).NotTo(HaveOccurred()) - snapshots, errs, err := emitter.Snapshots([]string{namespace1, namespace2}, clients.WatchOpts{ - Ctx: ctx, - RefreshRate: time.Second, - }) - Expect(err).NotTo(HaveOccurred()) + snapshots, errs, err := emitter.Snapshots([]string{namespace1, namespace2}, clients.WatchOpts{ + Ctx: ctx, + RefreshRate: time.Second, + }) + Expect(err).NotTo(HaveOccurred()) - var snap *{{ .GoName }}Snapshot -{{- range .Resources }} + var snap *{{ .GoName }}Snapshot + {{- range .Resources }} - /* - {{ .Name }} - */ - - assertSnapshot{{ .PluralName }} := func(expect{{ .PluralName }} {{ .ImportPrefix }}{{ .Name }}List, unexpect{{ .PluralName }} {{ .ImportPrefix }}{{ .Name }}List) { - drain: - for { - select { - case snap = <-snapshots: - for _, expected := range expect{{ .PluralName }} { - if _, err := snap.{{ upper_camel .PluralName }}.Find(expected.GetMetadata().Ref().Strings()); err != nil { - continue drain + /* + {{ .Name }} + */ + + assertSnapshot{{ .PluralName }} := func(expect{{ .PluralName }} {{ .ImportPrefix }}{{ .Name }}List, unexpect{{ .PluralName }} {{ .ImportPrefix }}{{ .Name }}List) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expect{{ .PluralName }} { + if _, err := snap.{{ upper_camel .PluralName }}.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } } + for _, unexpected := range unexpect{{ .PluralName }} { + if _, err := snap.{{ upper_camel .PluralName }}.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + {{- if .ClusterScoped }} + combined, _ := {{ lower_camel .Name }}Client.List(clients.ListOpts{}) + {{- else }} + nsList1, _ := {{ lower_camel .Name }}Client.List(namespace1, clients.ListOpts{}) + nsList2, _ := {{ lower_camel .Name }}Client.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + {{- end }} + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) } - for _, unexpected := range unexpect{{ .PluralName }} { - if _, err := snap.{{ upper_camel .PluralName }}.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { - continue drain + } + } + + {{- if .ClusterScoped }} + {{ lower_camel .Name }}1a, err := {{ lower_camel .Name }}Client.Write({{ .ImportPrefix }}New{{ .Name }}(namespace1, name5), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshot{{ .PluralName }}({{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}1a }, nil) + {{- else }} + {{ lower_camel .Name }}1a, err := {{ lower_camel .Name }}Client.Write({{ .ImportPrefix }}New{{ .Name }}(namespace1, name5), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + {{ lower_camel .Name }}1b, err := {{ lower_camel .Name }}Client.Write({{ .ImportPrefix }}New{{ .Name }}(namespace2, name5), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshot{{ .PluralName }}({{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}1a, {{ lower_camel .Name }}1b }, nil) + {{- end }} + + {{- if .ClusterScoped }} + {{ lower_camel .Name }}2a, err := {{ lower_camel .Name }}Client.Write({{ .ImportPrefix }}New{{ .Name }}(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshot{{ .PluralName }}({{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}1a, {{ lower_camel .Name }}2a }, nil) + {{- else }} + {{ lower_camel .Name }}2a, err := {{ lower_camel .Name }}Client.Write({{ .ImportPrefix }}New{{ .Name }}(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + {{ lower_camel .Name }}2b, err := {{ lower_camel .Name }}Client.Write({{ .ImportPrefix }}New{{ .Name }}(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshot{{ .PluralName }}({{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}1a, {{ lower_camel .Name }}1b, {{ lower_camel .Name }}2a, {{ lower_camel .Name }}2b }, nil) + {{- end }} + + {{- if .ClusterScoped }} + + err = {{ lower_camel .Name }}Client.Delete({{ lower_camel .Name }}2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshot{{ .PluralName }}({{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}1a }, {{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}2a }) + + {{- else }} + + err = {{ lower_camel .Name }}Client.Delete({{ lower_camel .Name }}2a.GetMetadata().Namespace, {{ lower_camel .Name }}2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = {{ lower_camel .Name }}Client.Delete({{ lower_camel .Name }}2b.GetMetadata().Namespace, {{ lower_camel .Name }}2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshot{{ .PluralName }}({{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}1a, {{ lower_camel .Name }}1b }, {{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}2a, {{ lower_camel .Name }}2b }) + {{- end }} + + {{- if .ClusterScoped }} + + err = {{ lower_camel .Name }}Client.Delete({{ lower_camel .Name }}1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshot{{ .PluralName }}(nil, {{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}1a, {{ lower_camel .Name }}2a }) + {{- else }} + + err = {{ lower_camel .Name }}Client.Delete({{ lower_camel .Name }}1a.GetMetadata().Namespace, {{ lower_camel .Name }}1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = {{ lower_camel .Name }}Client.Delete({{ lower_camel .Name }}1b.GetMetadata().Namespace, {{ lower_camel .Name }}1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshot{{ .PluralName }}(nil, {{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}1a, {{ lower_camel .Name }}1b, {{ lower_camel .Name }}2a, {{ lower_camel .Name }}2b }) + {{- end }} + {{- end}} + }) + + It("should be able to track all resources that are on labeled namespaces", func() { + runNamespacedSelectorsWithWatchNamespaces() + }) + }) + + Context("Tracking empty watched namespaces", func () { + It("tracks snapshots on changes to any resource using AllNamespace", func() { + ctx := context.Background() + err := emitter.Register() + Expect(err).NotTo(HaveOccurred()) + + snapshots, errs, err := emitter.Snapshots([]string{""}, clients.WatchOpts{ + Ctx: ctx, + RefreshRate: time.Second, + }) + Expect(err).NotTo(HaveOccurred()) + + var snap *{{ .GoName }}Snapshot + {{- range .Resources }} + + /* + {{ .Name }} + */ + + assertSnapshot{{ .PluralName }} := func(expect{{ .PluralName }} {{ .ImportPrefix }}{{ .Name }}List, unexpect{{ .PluralName }} {{ .ImportPrefix }}{{ .Name }}List) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expect{{ .PluralName }} { + if _, err := snap.{{ upper_camel .PluralName }}.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpect{{ .PluralName }} { + if _, err := snap.{{ upper_camel .PluralName }}.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + {{- if .ClusterScoped }} + combined, _ := {{ lower_camel .Name }}Client.List(clients.ListOpts{}) + {{- else }} + nsList1, _ := {{ lower_camel .Name }}Client.List(namespace1, clients.ListOpts{}) + nsList2, _ := {{ lower_camel .Name }}Client.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + {{- end }} + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) } - break drain - case err := <-errs: - Expect(err).NotTo(HaveOccurred()) - case <-time.After(time.Second * 10): + } + } + + {{- if .ClusterScoped }} + + {{ lower_camel .Name }}1a, err := {{ lower_camel .Name }}Client.Write({{ .ImportPrefix }}New{{ .Name }}(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshot{{ .PluralName }}({{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}1a }, nil) + + {{- else }} + + {{ lower_camel .Name }}1a, err := {{ lower_camel .Name }}Client.Write({{ .ImportPrefix }}New{{ .Name }}(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + {{ lower_camel .Name }}1b, err := {{ lower_camel .Name }}Client.Write({{ .ImportPrefix }}New{{ .Name }}(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshot{{ .PluralName }}({{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}1a, {{ lower_camel .Name }}1b }, nil) + + {{- end }} + + {{- if .ClusterScoped }} + + {{ lower_camel .Name }}2a, err := {{ lower_camel .Name }}Client.Write({{ .ImportPrefix }}New{{ .Name }}(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshot{{ .PluralName }}({{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}1a, {{ lower_camel .Name }}2a }, nil) + + {{- else }} + + {{ lower_camel .Name }}2a, err := {{ lower_camel .Name }}Client.Write({{ .ImportPrefix }}New{{ .Name }}(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + {{ lower_camel .Name }}2b, err := {{ lower_camel .Name }}Client.Write({{ .ImportPrefix }}New{{ .Name }}(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshot{{ .PluralName }}({{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}1a, {{ lower_camel .Name }}1b, {{ lower_camel .Name }}2a, {{ lower_camel .Name }}2b }, nil) + + {{- end }} + + {{- if .ClusterScoped }} + + err = {{ lower_camel .Name }}Client.Delete({{ lower_camel .Name }}2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshot{{ .PluralName }}({{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}1a }, {{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}2a }) + + {{- else }} + + err = {{ lower_camel .Name }}Client.Delete({{ lower_camel .Name }}2a.GetMetadata().Namespace, {{ lower_camel .Name }}2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = {{ lower_camel .Name }}Client.Delete({{ lower_camel .Name }}2b.GetMetadata().Namespace, {{ lower_camel .Name }}2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshot{{ .PluralName }}({{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}1a, {{ lower_camel .Name }}1b }, {{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}2a, {{ lower_camel .Name }}2b }) + + {{- end }} + + {{- if .ClusterScoped }} + + err = {{ lower_camel .Name }}Client.Delete({{ lower_camel .Name }}1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshot{{ .PluralName }}(nil, {{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}1a, {{ lower_camel .Name }}2a }) + + {{- else }} + + err = {{ lower_camel .Name }}Client.Delete({{ lower_camel .Name }}1a.GetMetadata().Namespace, {{ lower_camel .Name }}1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = {{ lower_camel .Name }}Client.Delete({{ lower_camel .Name }}1b.GetMetadata().Namespace, {{ lower_camel .Name }}1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshot{{ .PluralName }}(nil, {{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}1a, {{ lower_camel .Name }}1b, {{ lower_camel .Name }}2a, {{ lower_camel .Name }}2b }) + + {{- end }} + {{- end}} + }) + + It("should be able to track resources only made with the matching labels", func() { + ctx := context.Background() + err := emitter.Register() + Expect(err).NotTo(HaveOccurred()) + + snapshots, errs, err := emitter.Snapshots([]string{""}, clients.WatchOpts{ + Ctx: ctx, + RefreshRate: time.Second, + ExpressionSelector: labelExpression1, + }) + Expect(err).NotTo(HaveOccurred()) + + var snap *{{ .GoName }}Snapshot + var previous *{{ .GoName }}Snapshot + +{{- range .Resources }} + + /* + {{ .Name }} + */ + + assertSnapshot{{ .PluralName }} := func(expect{{ .PluralName }} {{ .ImportPrefix }}{{ .Name }}List, unexpect{{ .PluralName }} {{ .ImportPrefix }}{{ .Name }}List) { + drain: + for { + select { + case snap = <-snapshots: + previous = snap + for _, expected := range expect{{ .PluralName }} { + if _, err := snap.{{ upper_camel .PluralName }}.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpect{{ .PluralName }} { + if _, err := snap.{{ upper_camel .PluralName }}.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): {{- if .ClusterScoped }} - combined, _ := {{ lower_camel .Name }}Client.List(clients.ListOpts{}) + combined, _ := {{ lower_camel .Name }}Client.List(clients.ListOpts{}) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) {{- else }} - nsList1, _ := {{ lower_camel .Name }}Client.List(namespace1, clients.ListOpts{}) - nsList2, _ := {{ lower_camel .Name }}Client.List(namespace2, clients.ListOpts{}) - combined := append(nsList1, nsList2...) + + var expectedResources map[string][]string + var unexpectedResource map[string][]string + + if previous != nil { + expectedResources = findNonMatchingResources(convert{{ .PluralName }}ToMetadataGetter(expect{{ .PluralName }}), convert{{ .PluralName }}ToMetadataGetter(previous.{{ upper_camel .PluralName }})) + unexpectedResource = findMatchingResources(convert{{ .PluralName }}ToMetadataGetter(unexpect{{ .PluralName }}), convert{{ .PluralName }}ToMetadataGetter(previous.{{ upper_camel .PluralName }})) + } else { + expectedResources = getMapOfResources(convert{{ .PluralName }}ToMetadataGetter(expect{{ .PluralName }})) + unexpectedResource = getMapOfResources(convert{{ .PluralName }}ToMetadataGetter(unexpect{{ .PluralName }})) + } + getList := func (ns string) ([]metadataGetter, error) { + l, err := {{ lower_camel .Name }}Client.List(ns, clients.ListOpts{}) + return convert{{ .PluralName }}ToMetadataGetter(l), err + } + namespaceResources := getMapOfNamespaceResources(getList) + Fail(fmt.Sprintf("expected final snapshot before 10 seconds. expected \nExpected:\n%#v\n\nUnexpected:\n%#v\n\nnamespaces:\n%#v", expectedResources, unexpectedResource, namespaceResources)) {{- end }} - Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) - } - } - } + } + } + } + {{- if .ClusterScoped }} - {{ lower_camel .Name }}1a, err := {{ lower_camel .Name }}Client.Write({{ .ImportPrefix }}New{{ .Name }}(namespace1, name1), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - assertSnapshot{{ .PluralName }}({{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}1a }, nil) + {{ lower_camel .Name }}1a, err := {{ lower_camel .Name }}Client.Write({{ .ImportPrefix }}New{{ .Name }}(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + {{ lower_camel .Name }}Watched := {{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}1a } + assertSnapshot{{ .PluralName }}({{ lower_camel .Name }}Watched, nil) + {{- else }} - {{ lower_camel .Name }}1a, err := {{ lower_camel .Name }}Client.Write({{ .ImportPrefix }}New{{ .Name }}(namespace1, name1), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - {{ lower_camel .Name }}1b, err := {{ lower_camel .Name }}Client.Write({{ .ImportPrefix }}New{{ .Name }}(namespace2, name1), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - assertSnapshot{{ .PluralName }}({{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}1a, {{ lower_camel .Name }}1b }, nil) + {{ lower_camel .Name }}1a, err := {{ lower_camel .Name }}Client.Write({{ .ImportPrefix }}New{{ .Name }}(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + {{ lower_camel .Name }}1b, err := {{ lower_camel .Name }}Client.Write({{ .ImportPrefix }}New{{ .Name }}(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + {{ lower_camel .Name }}NotWatched := {{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}1a, {{ lower_camel .Name }}1b } + {{- end }} + createNamespaceWithLabel(ctx, kube, namespace3, labels1) + createNamespaceWithLabel(ctx, kube, namespace4, labels1) + {{- if .ClusterScoped }} - {{ lower_camel .Name }}2a, err := {{ lower_camel .Name }}Client.Write({{ .ImportPrefix }}New{{ .Name }}(namespace1, name2), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - assertSnapshot{{ .PluralName }}({{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}1a, {{ lower_camel .Name }}2a }, nil) + {{ lower_camel .Name }}2a, err := {{ lower_camel .Name }}Client.Write({{ .ImportPrefix }}New{{ .Name }}(namespace3, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + {{ lower_camel .Name }}Watched = append({{ lower_camel .Name }}Watched, {{ lower_camel .Name }}2a ) + assertSnapshot{{ .PluralName }}({{ lower_camel .Name }}Watched, nil) + {{- else }} - {{ lower_camel .Name }}2a, err := {{ lower_camel .Name }}Client.Write({{ .ImportPrefix }}New{{ .Name }}(namespace1, name2), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - {{ lower_camel .Name }}2b, err := {{ lower_camel .Name }}Client.Write({{ .ImportPrefix }}New{{ .Name }}(namespace2, name2), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - assertSnapshot{{ .PluralName }}({{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}1a, {{ lower_camel .Name }}1b, {{ lower_camel .Name }}2a, {{ lower_camel .Name }}2b }, nil) + {{ lower_camel .Name }}2a, err := {{ lower_camel .Name }}Client.Write({{ .ImportPrefix }}New{{ .Name }}(namespace3, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + {{ lower_camel .Name }}2b, err := {{ lower_camel .Name }}Client.Write({{ .ImportPrefix }}New{{ .Name }}(namespace4, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + {{ lower_camel .Name }}Watched := {{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}2a, {{ lower_camel .Name }}2b} + assertSnapshot{{ .PluralName }}({{ lower_camel .Name }}Watched, {{ lower_camel .Name }}NotWatched) + {{- end }} + createNamespaces(ctx, kube, namespace5) + createNamespaceWithLabel(ctx, kube, namespace6, labels1) + {{- if .ClusterScoped }} - err = {{ lower_camel .Name }}Client.Delete({{ lower_camel .Name }}2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) + {{ lower_camel .Name }}5a, err := {{ lower_camel .Name }}Client.Write({{ .ImportPrefix }}New{{ .Name }}(namespace5, name5), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + {{ lower_camel .Name }}Watched = append({{ lower_camel .Name }}Watched, {{ lower_camel .Name }}5a) + assertSnapshot{{ .PluralName }}({{ lower_camel .Name }}Watched, nil) - assertSnapshot{{ .PluralName }}({{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}1a }, {{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}2a }) {{- else }} - err = {{ lower_camel .Name }}Client.Delete({{ lower_camel .Name }}2a.GetMetadata().Namespace, {{ lower_camel .Name }}2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - err = {{ lower_camel .Name }}Client.Delete({{ lower_camel .Name }}2b.GetMetadata().Namespace, {{ lower_camel .Name }}2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) + {{ lower_camel .Name }}5a, err := {{ lower_camel .Name }}Client.Write({{ .ImportPrefix }}New{{ .Name }}(namespace5, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + {{ lower_camel .Name }}5b, err := {{ lower_camel .Name }}Client.Write({{ .ImportPrefix }}New{{ .Name }}(namespace6, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + {{ lower_camel .Name }}NotWatched = append({{ lower_camel .Name }}NotWatched, {{ lower_camel .Name }}5a) + {{ lower_camel .Name }}Watched = append({{ lower_camel .Name }}Watched, {{ lower_camel .Name }}5b) + assertSnapshot{{ .PluralName }}({{ lower_camel .Name }}Watched, {{ lower_camel .Name }}NotWatched) + + {{ lower_camel .Name }}7a, err := {{ lower_camel .Name }}Client.Write({{ .ImportPrefix }}New{{ .Name }}(namespace5, name4), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + {{ lower_camel .Name }}7b, err := {{ lower_camel .Name }}Client.Write({{ .ImportPrefix }}New{{ .Name }}(namespace6, name4), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + {{ lower_camel .Name }}NotWatched = append({{ lower_camel .Name }}NotWatched, {{ lower_camel .Name }}7a) + {{ lower_camel .Name }}Watched = append({{ lower_camel .Name }}Watched, {{ lower_camel .Name }}7b) + assertSnapshot{{ .PluralName }}({{ lower_camel .Name }}Watched, {{ lower_camel .Name }}NotWatched) + + for _, r := range {{ lower_camel .Name }}NotWatched { + err = {{ lower_camel .Name }}Client.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + } - assertSnapshot{{ .PluralName }}({{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}1a, {{ lower_camel .Name }}1b }, {{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}2a, {{ lower_camel .Name }}2b }) {{- end }} {{- if .ClusterScoped }} - err = {{ lower_camel .Name }}Client.Delete({{ lower_camel .Name }}1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) + err = {{ lower_camel .Name }}Client.Delete({{ lower_camel .Name }}1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + {{ lower_camel .Name }}Watched = {{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}2a, {{ lower_camel .Name }}5a } + {{ lower_camel .Name }}NotWatched := {{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}1a } + assertSnapshot{{ .PluralName }}({{ lower_camel .Name }}Watched, {{ lower_camel .Name }}NotWatched) + + err = {{ lower_camel .Name }}Client.Delete({{ lower_camel .Name }}2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + {{ lower_camel .Name }}Watched = {{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}5a } + {{ lower_camel .Name }}NotWatched = append({{ lower_camel .Name }}NotWatched, {{ lower_camel .Name }}2a) + assertSnapshot{{ .PluralName }}({{ lower_camel .Name }}Watched, {{ lower_camel .Name }}NotWatched) + + err = {{ lower_camel .Name }}Client.Delete({{ lower_camel .Name }}5a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + {{ lower_camel .Name }}NotWatched = append({{ lower_camel .Name }}NotWatched, {{ lower_camel .Name }}5a) + assertSnapshot{{ .PluralName }}(nil, {{ lower_camel .Name }}NotWatched) - assertSnapshot{{ .PluralName }}(nil, {{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}1a, {{ lower_camel .Name }}2a }) {{- else }} - err = {{ lower_camel .Name }}Client.Delete({{ lower_camel .Name }}1a.GetMetadata().Namespace, {{ lower_camel .Name }}1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - err = {{ lower_camel .Name }}Client.Delete({{ lower_camel .Name }}1b.GetMetadata().Namespace, {{ lower_camel .Name }}1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) + for _, r := range {{ lower_camel .Name }}Watched { + err = {{ lower_camel .Name }}Client.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + {{ lower_camel .Name }}NotWatched = append({{ lower_camel .Name }}NotWatched, r) + } + assertSnapshot{{ .PluralName }}(nil, {{ lower_camel .Name }}NotWatched) - assertSnapshot{{ .PluralName }}(nil, {{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}1a, {{ lower_camel .Name }}1b, {{ lower_camel .Name }}2a, {{ lower_camel .Name }}2b }) {{- end }} -{{- end}} - }) - It("tracks snapshots on changes to any resource using AllNamespace", func() { - ctx := context.Background() - err := emitter.Register() - Expect(err).NotTo(HaveOccurred()) + // clean up environment + deleteNamespaces(ctx, kube, namespace3, namespace4, namespace5, namespace6) + getNewNamespaces() - snapshots, errs, err := emitter.Snapshots([]string{""}, clients.WatchOpts{ - Ctx: ctx, - RefreshRate: time.Second, +{{- end }} }) - Expect(err).NotTo(HaveOccurred()) + }) + + Context("Tracking resources on namespaces that are deleted", func () { + It("Should not contain resources from a deleted namespace", func () { + ctx := context.Background() + err := emitter.Register() + Expect(err).NotTo(HaveOccurred()) + + snapshots, errs, err := emitter.Snapshots([]string{""}, clients.WatchOpts{ + Ctx: ctx, + RefreshRate: time.Second, + }) + Expect(err).NotTo(HaveOccurred()) + + var snap *{{ .GoName }}Snapshot - var snap *{{ .GoName }}Snapshot {{- range .Resources }} - /* - {{ .Name }} - */ - - assertSnapshot{{ .PluralName }} := func(expect{{ .PluralName }} {{ .ImportPrefix }}{{ .Name }}List, unexpect{{ .PluralName }} {{ .ImportPrefix }}{{ .Name }}List) { - drain: - for { - select { - case snap = <-snapshots: - for _, expected := range expect{{ .PluralName }} { - if _, err := snap.{{ upper_camel .PluralName }}.Find(expected.GetMetadata().Ref().Strings()); err != nil { - continue drain - } - } - for _, unexpected := range unexpect{{ .PluralName }} { - if _, err := snap.{{ upper_camel .PluralName }}.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { - continue drain - } - } - break drain - case err := <-errs: - Expect(err).NotTo(HaveOccurred()) - case <-time.After(time.Second * 10): + /* + {{ .Name }} + */ + assertSnapshot{{ .PluralName }} := func(expect{{ .PluralName }} {{ .ImportPrefix }}{{ .Name }}List, unexpect{{ .PluralName }} {{ .ImportPrefix }}{{ .Name }}List) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expect{{ .PluralName }} { + if _, err := snap.{{ upper_camel .PluralName }}.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpect{{ .PluralName }} { + if _, err := snap.{{ upper_camel .PluralName }}.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): {{- if .ClusterScoped }} - combined, _ := {{ lower_camel .Name }}Client.List(clients.ListOpts{}) + combined, _ := {{ lower_camel .Name }}Client.List(clients.ListOpts{}) {{- else }} - nsList1, _ := {{ lower_camel .Name }}Client.List(namespace1, clients.ListOpts{}) - nsList2, _ := {{ lower_camel .Name }}Client.List(namespace2, clients.ListOpts{}) - combined := append(nsList1, nsList2...) + nsList1, _ := {{ lower_camel .Name }}Client.List(namespace1, clients.ListOpts{}) + nsList2, _ := {{ lower_camel .Name }}Client.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) {{- end }} - Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) - } + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } } - } -{{- if .ClusterScoped }} - {{ lower_camel .Name }}1a, err := {{ lower_camel .Name }}Client.Write({{ .ImportPrefix }}New{{ .Name }}(namespace1, name1), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) + {{ lower_camel .Name }}1a, err := {{ lower_camel .Name }}Client.Write({{ .ImportPrefix }}New{{ .Name }}(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + {{ lower_camel .Name }}1b, err := {{ lower_camel .Name }}Client.Write({{ .ImportPrefix }}New{{ .Name }}(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + {{ lower_camel .Name }}Watched := {{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}1a, {{ lower_camel .Name }}1b} + assertSnapshot{{ .PluralName }}({{ lower_camel .Name }}Watched, nil) - assertSnapshot{{ .PluralName }}({{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}1a }, nil) +{{- if .ClusterScoped }} + err = {{ lower_camel .Name }}Client.Delete({{ lower_camel .Name }}1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = {{ lower_camel .Name }}Client.Delete({{ lower_camel .Name }}1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) {{- else }} - {{ lower_camel .Name }}1a, err := {{ lower_camel .Name }}Client.Write({{ .ImportPrefix }}New{{ .Name }}(namespace1, name1), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - {{ lower_camel .Name }}1b, err := {{ lower_camel .Name }}Client.Write({{ .ImportPrefix }}New{{ .Name }}(namespace2, name1), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - - assertSnapshot{{ .PluralName }}({{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}1a, {{ lower_camel .Name }}1b }, nil) + err = {{ lower_camel .Name }}Client.Delete({{ lower_camel .Name }}1a.GetMetadata().Namespace, {{ lower_camel .Name }}1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = {{ lower_camel .Name }}Client.Delete({{ lower_camel .Name }}1b.GetMetadata().Namespace, {{ lower_camel .Name }}1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) {{- end }} -{{- if .ClusterScoped }} - {{ lower_camel .Name }}2a, err := {{ lower_camel .Name }}Client.Write({{ .ImportPrefix }}New{{ .Name }}(namespace1, name2), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) + {{ lower_camel .Name }}NotWatched := {{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}1a, {{ lower_camel .Name }}1b} + assertSnapshot{{ .PluralName }}(nil, {{ lower_camel .Name }}NotWatched) - assertSnapshot{{ .PluralName }}({{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}1a, {{ lower_camel .Name }}2a }, nil) -{{- else }} - {{ lower_camel .Name }}2a, err := {{ lower_camel .Name }}Client.Write({{ .ImportPrefix }}New{{ .Name }}(namespace1, name2), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - {{ lower_camel .Name }}2b, err := {{ lower_camel .Name }}Client.Write({{ .ImportPrefix }}New{{ .Name }}(namespace2, name2), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) + deleteNamespaces(ctx, kube, namespace1, namespace2) - assertSnapshot{{ .PluralName }}({{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}1a, {{ lower_camel .Name }}1b, {{ lower_camel .Name }}2a, {{ lower_camel .Name }}2b }, nil) + getNewNamespaces1and2() + createNamespaces(ctx, kube, namespace1, namespace2) {{- end }} + }) + + It("Should not contain resources from a deleted namespace, that is filtered", func () { + ctx := context.Background() + err := emitter.Register() + Expect(err).NotTo(HaveOccurred()) + + snapshots, errs, err := emitter.Snapshots([]string{""}, clients.WatchOpts{ + Ctx: ctx, + RefreshRate: time.Second, + ExpressionSelector: labelExpression1, + }) + Expect(err).NotTo(HaveOccurred()) + + var snap *{{ .GoName }}Snapshot + +{{- $length := len .Resources }} +{{- $last_entry := minus $length 1 }} +{{- range $i, $r := .Resources }} +{{ with $r }} + /* + {{ .Name }} + */ + + assertSnapshot{{ .PluralName }} := func(expect{{ .PluralName }} {{ .ImportPrefix }}{{ .Name }}List, unexpect{{ .PluralName }} {{ .ImportPrefix }}{{ .Name }}List) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expect{{ .PluralName }} { + if _, err := snap.{{ upper_camel .PluralName }}.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpect{{ .PluralName }} { + if _, err := snap.{{ upper_camel .PluralName }}.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + {{- if .ClusterScoped }} + combined, _ := {{ lower_camel .Name }}Client.List(clients.ListOpts{}) + {{- else }} + nsList1, _ := {{ lower_camel .Name }}Client.List(namespace1, clients.ListOpts{}) + nsList2, _ := {{ lower_camel .Name }}Client.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + {{- end }} + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } + + // create namespaces + createNamespaceWithLabel(ctx, kube, namespace3, labels1) + createNamespaceWithLabel(ctx, kube, namespace4, labels1) {{- if .ClusterScoped }} - err = {{ lower_camel .Name }}Client.Delete({{ lower_camel .Name }}2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) + {{ lower_camel .Name }}2a, err := {{ lower_camel .Name }}Client.Write({{ .ImportPrefix }}New{{ .Name }}(namespace3, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + {{ lower_camel .Name }}NotWatched := {{ .ImportPrefix }}{{ .Name }}List{} + {{ lower_camel .Name }}Watched := {{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}2a } + assertSnapshot{{ .PluralName }}({{ lower_camel .Name }}Watched, nil) + + deleteNamespaces(ctx, kube, namespace3) + + for _, r := range {{ lower_camel .Name }}Watched { + err = {{ lower_camel .Name }}Client.Delete(r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + {{ lower_camel .Name }}NotWatched = append({{ lower_camel .Name }}NotWatched , r) + } + assertSnapshot{{ .PluralName }}(nil, {{ lower_camel .Name }}NotWatched) - assertSnapshot{{ .PluralName }}({{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}1a }, {{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}2a }) {{- else }} - err = {{ lower_camel .Name }}Client.Delete({{ lower_camel .Name }}2a.GetMetadata().Namespace, {{ lower_camel .Name }}2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - err = {{ lower_camel .Name }}Client.Delete({{ lower_camel .Name }}2b.GetMetadata().Namespace, {{ lower_camel .Name }}2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) + {{ lower_camel .Name }}2a, err := {{ lower_camel .Name }}Client.Write({{ .ImportPrefix }}New{{ .Name }}(namespace3, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + {{ lower_camel .Name }}2b, err := {{ lower_camel .Name }}Client.Write({{ .ImportPrefix }}New{{ .Name }}(namespace4, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + {{ lower_camel .Name }}NotWatched := {{ .ImportPrefix }}{{ .Name }}List{} + {{ lower_camel .Name }}Watched := {{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}2a, {{ lower_camel .Name }}2b } + assertSnapshot{{ .PluralName }}({{ lower_camel .Name }}Watched, {{ lower_camel .Name }}NotWatched) + + deleteNamespaces(ctx, kube, namespace3) + + {{ lower_camel .Name }}Watched = {{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}2b } + {{ lower_camel .Name }}NotWatched = append({{ lower_camel .Name }}NotWatched, {{ lower_camel .Name }}2a) + assertSnapshot{{ .PluralName }}({{ lower_camel .Name }}Watched, {{ lower_camel .Name }}NotWatched) + + for _, r := range {{ lower_camel .Name }}Watched { + err = {{ lower_camel .Name }}Client.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + {{ lower_camel .Name }}NotWatched = append({{ lower_camel .Name }}NotWatched , r) + } + assertSnapshot{{ .PluralName }}(nil, {{ lower_camel .Name }}NotWatched) - assertSnapshot{{ .PluralName }}({{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}1a, {{ lower_camel .Name }}1b }, {{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}2a, {{ lower_camel .Name }}2b }) {{- end }} -{{- if .ClusterScoped }} + deleteNamespaces(ctx, kube, namespace4) + getNewNamespaces() - err = {{ lower_camel .Name }}Client.Delete({{ lower_camel .Name }}1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) +{{- end }}{{/* end of with */}} +{{- end }}{{/* end of range */}} + }) - assertSnapshot{{ .PluralName }}(nil, {{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}1a, {{ lower_camel .Name }}2a }) -{{- else }} +{{- $num_of_clients_supported := 0 }} +{{- range .Resources }} +{{ if not .ClusterScoped }} +{{ if .HasStatus }} + {{ $num_of_clients_supported = inc $num_of_clients_supported }} +{{- end }} +{{- end }} +{{- end }} - err = {{ lower_camel .Name }}Client.Delete({{ lower_camel .Name }}1a.GetMetadata().Namespace, {{ lower_camel .Name }}1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - err = {{ lower_camel .Name }}Client.Delete({{ lower_camel .Name }}1b.GetMetadata().Namespace, {{ lower_camel .Name }}1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) +{{ if ge $num_of_clients_supported 1 }} + It("should be able to return a resource from a deleted namespace, after the namespace is re-created", func () { + ctx := context.Background() + err := emitter.Register() + Expect(err).NotTo(HaveOccurred()) + + snapshots, errs, err := emitter.Snapshots([]string{""}, clients.WatchOpts{ + Ctx: ctx, + RefreshRate: time.Second, + ExpressionSelector: labelExpression1, + }) + Expect(err).NotTo(HaveOccurred()) + + var snap *TestingSnapshot + var previous *TestingSnapshot + +{{- range .Resources }} +{{ if not .ClusterScoped }} +{{ if .HasStatus }} + +{{/* no need for anything else, this only works on clients that have kube resource factories, this will not work on clients that have memory resource factories.*/}} + + /* + {{ .Name }} + */ + assertSnapshot{{ .PluralName }} := func(expect{{ .PluralName }} {{ .ImportPrefix }}{{ .Name }}List, unexpect{{ .PluralName }} {{ .ImportPrefix }}{{ .Name }}List) { + drain: + for { + select { + case snap = <-snapshots: + previous = snap + for _, expected := range expect{{ .PluralName }} { + if _, err := snap.{{ upper_camel .PluralName }}.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpect{{ .PluralName }} { + if _, err := snap.{{ upper_camel .PluralName }}.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + var expectedResources map[string][]string + var unexpectedResource map[string][]string + + if previous != nil { + expectedResources = findNonMatchingResources(convert{{ .PluralName }}ToMetadataGetter(expect{{ .PluralName }}), convert{{ .PluralName }}ToMetadataGetter(previous.{{ upper_camel .PluralName }})) + unexpectedResource = findMatchingResources(convert{{ .PluralName }}ToMetadataGetter(unexpect{{ .PluralName }}), convert{{ .PluralName }}ToMetadataGetter(previous.{{ upper_camel .PluralName }})) + } else { + expectedResources = getMapOfResources(convert{{ .PluralName }}ToMetadataGetter(expect{{ .PluralName }})) + unexpectedResource = getMapOfResources(convert{{ .PluralName }}ToMetadataGetter(unexpect{{ .PluralName }})) + } + getList := func (ns string) ([]metadataGetter, error) { + l, err := {{ lower_camel .Name }}Client.List(ns, clients.ListOpts{}) + return convert{{ .PluralName }}ToMetadataGetter(l), err + } + namespaceResources := getMapOfNamespaceResources(getList) + Fail(fmt.Sprintf("expected final snapshot before 10 seconds. expected \nExpected:\n%#v\n\nUnexpected:\n%#v\n\nnamespaces:\n%#v", expectedResources, unexpectedResource, namespaceResources)) + } + } + } + + createNamespaceWithLabel(ctx, kube, namespace3, labels1) + + {{ lower_camel .Name }}1a, err := {{ lower_camel .Name }}Client.Write({{ .ImportPrefix }}New{{ .Name }}(namespace3, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshot{{ .PluralName }}({{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}1a}, nil) + + deleteNamespaces(ctx, kube, namespace3) + Eventually(func () bool { + _, err = kube.CoreV1().Namespaces().Get(ctx, namespace3, metav1.GetOptions{}) + return apierrors.IsNotFound(err) + }, 15*time.Second, 1 * time.Second).Should(BeTrue()) + createNamespaceWithLabel(ctx, kube, namespace3, labels1) - assertSnapshot{{ .PluralName }}(nil, {{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}1a, {{ lower_camel .Name }}1b, {{ lower_camel .Name }}2a, {{ lower_camel .Name }}2b }) + {{ lower_camel .Name }}2a, err := {{ lower_camel .Name }}Client.Write({{ .ImportPrefix }}New{{ .Name }}(namespace3, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshot{{ .PluralName }}({{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}2a}, {{ .ImportPrefix }}{{ .Name }}List{ {{ lower_camel .Name }}1a}) + + deleteNamespaces(ctx, kube, namespace3) + Eventually(func () bool { + _, err = kube.CoreV1().Namespaces().Get(ctx, namespace3, metav1.GetOptions{}) + return apierrors.IsNotFound(err) + }, 15*time.Second, 1 * time.Second).Should(BeTrue()) {{- end }} -{{- end}} +{{- end }} +{{- end }} + }) +{{- end }}{{/* if $num_of_clients_supported */}} }) + + Context("use different resource namespace listers", func() { + BeforeEach(func () { + resourceNamespaceLister = namespace.NewKubeClientResourceNamespaceLister(kube) + emitter = New{{ .GoName }}Emitter({{ $clients }}, resourceNamespaceLister) + }) + + It("Should work with the Kube Client Namespace Lister", func () { + runNamespacedSelectorsWithWatchNamespaces() + }) + }) + }) `)) diff --git a/pkg/multicluster/v1/kube_config_client.sk.go b/pkg/multicluster/v1/kube_config_client.sk.go index 0deb1dcab..6689ff54b 100644 --- a/pkg/multicluster/v1/kube_config_client.sk.go +++ b/pkg/multicluster/v1/kube_config_client.sk.go @@ -19,6 +19,7 @@ type KubeConfigWatcher interface { type KubeConfigClient interface { BaseClient() clients.ResourceClient Register() error + RegisterNamespace(namespace string) error Read(namespace, name string, opts clients.ReadOpts) (*KubeConfig, error) Write(resource *KubeConfig, opts clients.WriteOpts) (*KubeConfig, error) Delete(namespace, name string, opts clients.DeleteOpts) error @@ -59,6 +60,10 @@ func (client *kubeConfigClient) Register() error { return client.rc.Register() } +func (client *kubeConfigClient) RegisterNamespace(namespace string) error { + return client.rc.RegisterNamespace(namespace) +} + func (client *kubeConfigClient) Read(namespace, name string, opts clients.ReadOpts) (*KubeConfig, error) { opts = opts.WithDefaults() diff --git a/pkg/multicluster/v1/kubeconfigs_event_loop_test.go b/pkg/multicluster/v1/kubeconfigs_event_loop_test.go index 0b39e01f2..94d36ef06 100644 --- a/pkg/multicluster/v1/kubeconfigs_event_loop_test.go +++ b/pkg/multicluster/v1/kubeconfigs_event_loop_test.go @@ -1,6 +1,5 @@ // Code generated by solo-kit. DO NOT EDIT. -//go:build solokit // +build solokit package v1 @@ -12,9 +11,12 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + skNamespace "github.com/solo-io/solo-kit/pkg/api/external/kubernetes/namespace" "github.com/solo-io/solo-kit/pkg/api/v1/clients" "github.com/solo-io/solo-kit/pkg/api/v1/clients/factory" + "github.com/solo-io/solo-kit/pkg/api/v1/clients/kube/cache" "github.com/solo-io/solo-kit/pkg/api/v1/clients/memory" + "github.com/solo-io/solo-kit/test/helpers" ) var _ = Describe("KubeconfigsEventLoop", func() { @@ -28,13 +30,18 @@ var _ = Describe("KubeconfigsEventLoop", func() { BeforeEach(func() { ctx = context.Background() + kube := helpers.MustKubeClient() + kubeCache, err := cache.NewKubeCoreCache(context.TODO(), kube) + Expect(err).NotTo(HaveOccurred()) + resourceNamespaceLister := skNamespace.NewKubeClientCacheResourceNamespaceLister(kube, kubeCache) + kubeConfigClientFactory := &factory.MemoryResourceClientFactory{ Cache: memory.NewInMemoryResourceCache(), } kubeConfigClient, err := NewKubeConfigClient(ctx, kubeConfigClientFactory) Expect(err).NotTo(HaveOccurred()) - emitter = NewKubeconfigsEmitter(kubeConfigClient) + emitter = NewKubeconfigsEmitter(kubeConfigClient, resourceNamespaceLister) }) It("runs sync function on a new snapshot", func() { _, err = emitter.KubeConfig().Write(NewKubeConfig(namespace, "jerry"), clients.WriteOpts{}) diff --git a/pkg/multicluster/v1/kubeconfigs_snapshot_emitter.sk.go b/pkg/multicluster/v1/kubeconfigs_snapshot_emitter.sk.go index f6e29546c..41755ac43 100644 --- a/pkg/multicluster/v1/kubeconfigs_snapshot_emitter.sk.go +++ b/pkg/multicluster/v1/kubeconfigs_snapshot_emitter.sk.go @@ -12,6 +12,7 @@ import ( "go.uber.org/zap" "github.com/solo-io/solo-kit/pkg/api/v1/clients" + "github.com/solo-io/solo-kit/pkg/api/v1/resources" "github.com/solo-io/solo-kit/pkg/errors" skstats "github.com/solo-io/solo-kit/pkg/stats" @@ -83,20 +84,29 @@ type KubeconfigsEmitter interface { KubeConfig() KubeConfigClient } -func NewKubeconfigsEmitter(kubeConfigClient KubeConfigClient) KubeconfigsEmitter { - return NewKubeconfigsEmitterWithEmit(kubeConfigClient, make(chan struct{})) +func NewKubeconfigsEmitter(kubeConfigClient KubeConfigClient, resourceNamespaceLister resources.ResourceNamespaceLister) KubeconfigsEmitter { + return NewKubeconfigsEmitterWithEmit(kubeConfigClient, resourceNamespaceLister, make(chan struct{})) } -func NewKubeconfigsEmitterWithEmit(kubeConfigClient KubeConfigClient, emit <-chan struct{}) KubeconfigsEmitter { +func NewKubeconfigsEmitterWithEmit(kubeConfigClient KubeConfigClient, resourceNamespaceLister resources.ResourceNamespaceLister, emit <-chan struct{}) KubeconfigsEmitter { return &kubeconfigsEmitter{ - kubeConfig: kubeConfigClient, - forceEmit: emit, + kubeConfig: kubeConfigClient, + resourceNamespaceLister: resourceNamespaceLister, + forceEmit: emit, } } type kubeconfigsEmitter struct { forceEmit <-chan struct{} kubeConfig KubeConfigClient + // resourceNamespaceLister is used to watch for new namespaces when they are created. + // It is used when Expression Selector is in the Watch Opts set in Snapshot(). + resourceNamespaceLister resources.ResourceNamespaceLister + // namespacesWatching is the set of namespaces that we are watching. This is helpful + // when Expression Selector is set on the Watch Opts in Snapshot(). + namespacesWatching sync.Map + // updateNamespaces is used to perform locks and unlocks when watches on namespaces are being updated/created + updateNamespaces sync.Mutex } func (c *kubeconfigsEmitter) Register() error { @@ -110,6 +120,14 @@ func (c *kubeconfigsEmitter) KubeConfig() KubeConfigClient { return c.kubeConfig } +// Snapshots will return a channel that can be used to receive snapshots of the +// state of the resources it is watching +// when watching resources, you can set the watchNamespaces, and you can set the +// ExpressionSelector of the WatchOpts. Setting watchNamespaces will watch for all resources +// that are in the specified namespaces. In addition if ExpressionSelector of the WatchOpts is +// set, then all namespaces that meet the label criteria of the ExpressionSelector will +// also be watched. If Expression Selector is set and watched namespaces is set to [""], then it +// will only watch namespaces that meet the label expression selector criteria. func (c *kubeconfigsEmitter) Snapshots(watchNamespaces []string, opts clients.WatchOpts) (<-chan *KubeconfigsSnapshot, <-chan error, error) { if len(watchNamespaces) == 0 { @@ -124,59 +142,269 @@ func (c *kubeconfigsEmitter) Snapshots(watchNamespaces []string, opts clients.Wa } errs := make(chan error) + hasWatchedNamespaces := len(watchNamespaces) > 1 || (len(watchNamespaces) == 1 && watchNamespaces[0] != "") + watchingLabeledNamespaces := !(opts.ExpressionSelector == "") var done sync.WaitGroup ctx := opts.Ctx + + // setting up the options for both listing and watching resources in namespaces + watchedNamespacesListOptions := clients.ListOpts{Ctx: opts.Ctx, Selector: opts.Selector} + watchedNamespacesWatchOptions := clients.WatchOpts{Ctx: opts.Ctx, Selector: opts.Selector} /* Create channel for KubeConfig */ type kubeConfigListWithNamespace struct { list KubeConfigList namespace string } kubeConfigChan := make(chan kubeConfigListWithNamespace) - var initialKubeConfigList KubeConfigList currentSnapshot := KubeconfigsSnapshot{} - kubeconfigsByNamespace := make(map[string]KubeConfigList) - - for _, namespace := range watchNamespaces { - /* Setup namespaced watch for KubeConfig */ - { - kubeconfigs, err := c.kubeConfig.List(namespace, clients.ListOpts{Ctx: opts.Ctx, Selector: opts.Selector}) + kubeconfigsByNamespace := sync.Map{} + if hasWatchedNamespaces || !watchingLabeledNamespaces { + // then watch all resources on watch Namespaces + + // watched namespaces + for _, namespace := range watchNamespaces { + /* Setup namespaced watch for KubeConfig */ + { + kubeconfigs, err := c.kubeConfig.List(namespace, watchedNamespacesListOptions) + if err != nil { + return nil, nil, errors.Wrapf(err, "initial KubeConfig list") + } + initialKubeConfigList = append(initialKubeConfigList, kubeconfigs...) + kubeconfigsByNamespace.Store(namespace, kubeconfigs) + } + kubeConfigNamespacesChan, kubeConfigErrs, err := c.kubeConfig.Watch(namespace, watchedNamespacesWatchOptions) if err != nil { - return nil, nil, errors.Wrapf(err, "initial KubeConfig list") + return nil, nil, errors.Wrapf(err, "starting KubeConfig watch") + } + + done.Add(1) + go func(namespace string) { + defer done.Done() + errutils.AggregateErrs(ctx, errs, kubeConfigErrs, namespace+"-kubeconfigs") + }(namespace) + /* Watch for changes and update snapshot */ + go func(namespace string) { + defer func() { + c.namespacesWatching.Delete(namespace) + }() + c.namespacesWatching.Store(namespace, true) + for { + select { + case <-ctx.Done(): + return + case kubeConfigList, ok := <-kubeConfigNamespacesChan: + if !ok { + return + } + select { + case <-ctx.Done(): + return + case kubeConfigChan <- kubeConfigListWithNamespace{list: kubeConfigList, namespace: namespace}: + } + } + } + }(namespace) + } + } + // watch all other namespaces that are labeled and fit the Expression Selector + if opts.ExpressionSelector != "" { + // watch resources of non-watched namespaces that fit the expression selectors + namespaceListOptions := resources.ResourceNamespaceListOptions{ + Ctx: opts.Ctx, + ExpressionSelector: opts.ExpressionSelector, + } + namespaceWatchOptions := resources.ResourceNamespaceWatchOptions{ + Ctx: opts.Ctx, + ExpressionSelector: opts.ExpressionSelector, + } + + filterNamespaces := resources.ResourceNamespaceList{} + for _, ns := range watchNamespaces { + // we do not want to filter out "" which equals all namespaces + // the reason is because we will never create a watch on ""(all namespaces) because + // doing so means we watch all resources regardless of namespace. Our intent is to + // watch only certain namespaces. + if ns != "" { + filterNamespaces = append(filterNamespaces, resources.ResourceNamespace{Name: ns}) } - initialKubeConfigList = append(initialKubeConfigList, kubeconfigs...) - kubeconfigsByNamespace[namespace] = kubeconfigs } - kubeConfigNamespacesChan, kubeConfigErrs, err := c.kubeConfig.Watch(namespace, opts) + namespacesResources, err := c.resourceNamespaceLister.GetResourceNamespaceList(namespaceListOptions, filterNamespaces) if err != nil { - return nil, nil, errors.Wrapf(err, "starting KubeConfig watch") + return nil, nil, err + } + newlyRegisteredNamespaces := make([]string, len(namespacesResources)) + // non watched namespaces that are labeled + for i, resourceNamespace := range namespacesResources { + c.namespacesWatching.Load(resourceNamespace) + namespace := resourceNamespace.Name + newlyRegisteredNamespaces[i] = namespace + err = c.kubeConfig.RegisterNamespace(namespace) + if err != nil { + return nil, nil, errors.Wrapf(err, "there was an error registering the namespace to the kubeConfig") + } + /* Setup namespaced watch for KubeConfig */ + { + kubeconfigs, err := c.kubeConfig.List(namespace, clients.ListOpts{Ctx: opts.Ctx}) + if err != nil { + return nil, nil, errors.Wrapf(err, "initial KubeConfig list with new namespace") + } + initialKubeConfigList = append(initialKubeConfigList, kubeconfigs...) + kubeconfigsByNamespace.Store(namespace, kubeconfigs) + } + kubeConfigNamespacesChan, kubeConfigErrs, err := c.kubeConfig.Watch(namespace, clients.WatchOpts{Ctx: opts.Ctx}) + if err != nil { + return nil, nil, errors.Wrapf(err, "starting KubeConfig watch") + } + + done.Add(1) + go func(namespace string) { + defer done.Done() + errutils.AggregateErrs(ctx, errs, kubeConfigErrs, namespace+"-kubeconfigs") + }(namespace) + /* Watch for changes and update snapshot */ + go func(namespace string) { + for { + select { + case <-ctx.Done(): + return + case kubeConfigList, ok := <-kubeConfigNamespacesChan: + if !ok { + return + } + select { + case <-ctx.Done(): + return + case kubeConfigChan <- kubeConfigListWithNamespace{list: kubeConfigList, namespace: namespace}: + } + } + } + }(namespace) } + if len(newlyRegisteredNamespaces) > 0 { + contextutils.LoggerFrom(ctx).Infof("registered the new namespace %v", newlyRegisteredNamespaces) + } + + // create watch on all namespaces, so that we can add all resources from new namespaces + // we will be watching namespaces that meet the Expression Selector filter - done.Add(1) - go func(namespace string) { - defer done.Done() - errutils.AggregateErrs(ctx, errs, kubeConfigErrs, namespace+"-kubeconfigs") - }(namespace) + namespaceWatch, errsReceiver, err := c.resourceNamespaceLister.GetResourceNamespaceWatch(namespaceWatchOptions, filterNamespaces) + if err != nil { + return nil, nil, err + } + if errsReceiver != nil { + go func() { + for { + select { + case <-ctx.Done(): + return + case err = <-errsReceiver: + errs <- errors.Wrapf(err, "received error from watch on resource namespaces") + } + } + }() + } - /* Watch for changes and update snapshot */ - go func(namespace string) { + go func() { for { select { case <-ctx.Done(): return - case kubeConfigList, ok := <-kubeConfigNamespacesChan: + case resourceNamespaces, ok := <-namespaceWatch: if !ok { return } - select { - case <-ctx.Done(): - return - case kubeConfigChan <- kubeConfigListWithNamespace{list: kubeConfigList, namespace: namespace}: + // get the list of new namespaces, if there is a new namespace + // get the list of resources from that namespace, and add + // a watch for new resources created/deleted on that namespace + c.updateNamespaces.Lock() + + // get the new namespaces, and get a map of the namespaces + mapOfResourceNamespaces := make(map[string]struct{}, len(resourceNamespaces)) + newNamespaces := []string{} + for _, ns := range resourceNamespaces { + if _, hit := c.namespacesWatching.Load(ns.Name); !hit { + newNamespaces = append(newNamespaces, ns.Name) + } + mapOfResourceNamespaces[ns.Name] = struct{}{} } + + for _, ns := range watchNamespaces { + mapOfResourceNamespaces[ns] = struct{}{} + } + + missingNamespaces := []string{} + // use the map of namespace resources to find missing/deleted namespaces + c.namespacesWatching.Range(func(key interface{}, value interface{}) bool { + name := key.(string) + if _, hit := mapOfResourceNamespaces[name]; !hit { + missingNamespaces = append(missingNamespaces, name) + } + return true + }) + + for _, ns := range missingNamespaces { + kubeConfigChan <- kubeConfigListWithNamespace{list: KubeConfigList{}, namespace: ns} + } + + for _, namespace := range newNamespaces { + var err error + err = c.kubeConfig.RegisterNamespace(namespace) + if err != nil { + errs <- errors.Wrapf(err, "there was an error registering the namespace to the kubeConfig") + continue + } + /* Setup namespaced watch for KubeConfig for new namespace */ + { + kubeconfigs, err := c.kubeConfig.List(namespace, clients.ListOpts{Ctx: opts.Ctx, Selector: opts.Selector}) + if err != nil { + errs <- errors.Wrapf(err, "initial new namespace KubeConfig list in namespace watch") + continue + } + kubeconfigsByNamespace.Store(namespace, kubeconfigs) + } + kubeConfigNamespacesChan, kubeConfigErrs, err := c.kubeConfig.Watch(namespace, clients.WatchOpts{Ctx: opts.Ctx, Selector: opts.Selector}) + if err != nil { + errs <- errors.Wrapf(err, "starting new namespace KubeConfig watch") + continue + } + + done.Add(1) + go func(namespace string) { + defer done.Done() + errutils.AggregateErrs(ctx, errs, kubeConfigErrs, namespace+"-new-namespace-kubeconfigs") + }(namespace) + /* Watch for changes and update snapshot */ + go func(namespace string) { + defer func() { + c.namespacesWatching.Delete(namespace) + }() + c.namespacesWatching.Store(namespace, true) + for { + select { + case <-ctx.Done(): + return + case kubeConfigList, ok := <-kubeConfigNamespacesChan: + if !ok { + return + } + select { + case <-ctx.Done(): + return + case kubeConfigChan <- kubeConfigListWithNamespace{list: kubeConfigList, namespace: namespace}: + } + } + } + }(namespace) + } + if len(newNamespaces) > 0 { + contextutils.LoggerFrom(ctx).Infof("registered the new namespace %v", newNamespaces) + } + c.updateNamespaces.Unlock() } } - }(namespace) + }() } /* Initialize snapshot for Kubeconfigs */ currentSnapshot.Kubeconfigs = initialKubeConfigList.Sort() @@ -246,11 +474,13 @@ func (c *kubeconfigsEmitter) Snapshots(watchNamespaces []string, opts clients.Wa ) // merge lists by namespace - kubeconfigsByNamespace[namespace] = kubeConfigNamespacedList.list + kubeconfigsByNamespace.Store(namespace, kubeConfigNamespacedList.list) var kubeConfigList KubeConfigList - for _, kubeconfigs := range kubeconfigsByNamespace { - kubeConfigList = append(kubeConfigList, kubeconfigs...) - } + kubeconfigsByNamespace.Range(func(key interface{}, value interface{}) bool { + mocks := value.(KubeConfigList) + kubeConfigList = append(kubeConfigList, mocks...) + return true + }) currentSnapshot.Kubeconfigs = kubeConfigList.Sort() } } diff --git a/pkg/multicluster/v1/kubeconfigs_snapshot_emitter_test.go b/pkg/multicluster/v1/kubeconfigs_snapshot_emitter_test.go index 113dd484d..60cb4fc5b 100644 --- a/pkg/multicluster/v1/kubeconfigs_snapshot_emitter_test.go +++ b/pkg/multicluster/v1/kubeconfigs_snapshot_emitter_test.go @@ -7,6 +7,7 @@ package v1 import ( "context" + "fmt" "os" "time" @@ -14,11 +15,17 @@ import ( . "github.com/onsi/gomega" "github.com/solo-io/go-utils/log" "github.com/solo-io/k8s-utils/kubeutils" + "github.com/solo-io/solo-kit/pkg/api/external/kubernetes/namespace" "github.com/solo-io/solo-kit/pkg/api/v1/clients" "github.com/solo-io/solo-kit/pkg/api/v1/clients/factory" + "github.com/solo-io/solo-kit/pkg/api/v1/clients/kube/cache" "github.com/solo-io/solo-kit/pkg/api/v1/clients/memory" + "github.com/solo-io/solo-kit/pkg/api/v1/resources" + "github.com/solo-io/solo-kit/pkg/api/v1/resources/core" "github.com/solo-io/solo-kit/pkg/utils/statusutils" "github.com/solo-io/solo-kit/test/helpers" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" // Needed to run tests in GKE @@ -33,65 +40,179 @@ var _ = Describe("V1Emitter", func() { log.Printf("This test creates kubernetes resources and is disabled by default. To enable, set RUN_KUBE_TESTS=1 in your env.") return } + + type metadataGetter interface { + GetMetadata() *core.Metadata + } + var ( - ctx context.Context - namespace1 string - namespace2 string - name1, name2 = "angela" + helpers.RandString(3), "bob" + helpers.RandString(3) - kube kubernetes.Interface - emitter KubeconfigsEmitter - kubeConfigClient KubeConfigClient + ctx context.Context + namespace1, namespace2 string + namespace3, namespace4 string + namespace5, namespace6 string + name1, name2 = "angela" + helpers.RandString(3), "bob" + helpers.RandString(3) + name3, name4 = "susan" + helpers.RandString(3), "jim" + helpers.RandString(3) + name5 = "melisa" + helpers.RandString(3) + labels1 = map[string]string{"env": "test"} + labelExpression1 = "env in (test)" + kube kubernetes.Interface + emitter KubeconfigsEmitter + kubeConfigClient KubeConfigClient + resourceNamespaceLister resources.ResourceNamespaceLister + kubeCache cache.KubeCoreCache ) + const ( + TIME_BETWEEN_MESSAGES = 5 + ) + NewKubeConfigWithLabels := func(namespace, name string, labels map[string]string) *KubeConfig { + resource := NewKubeConfig(namespace, name) + resource.GetMetadata().Labels = labels + return resource + } - BeforeEach(func() { - err := os.Setenv(statusutils.PodNamespaceEnvName, "default") + createNamespaces := func(ctx context.Context, kube kubernetes.Interface, namespaces ...string) { + err := kubeutils.CreateNamespacesInParallel(ctx, kube, namespaces...) Expect(err).NotTo(HaveOccurred()) + } - ctx = context.Background() + createNamespaceWithLabel := func(ctx context.Context, kube kubernetes.Interface, namespace string, labels map[string]string) { + _, err := kube.CoreV1().Namespaces().Create(ctx, &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + Labels: labels, + }, + }, metav1.CreateOptions{}) + Expect(err).ToNot(HaveOccurred()) + } + + deleteNamespaces := func(ctx context.Context, kube kubernetes.Interface, namespaces ...string) { + err := kubeutils.DeleteNamespacesInParallelBlocking(ctx, kube, namespaces...) + Expect(err).NotTo(HaveOccurred()) + } + + // getNewNamespaces is used to generate new namespace names, so that we do not have to wait + // when deleting namespaces in runNamespacedSelectorsWithWatchNamespaces. Since + // runNamespacedSelectorsWithWatchNamespaces uses watchNamespaces set to namespace1 and + // namespace2, this will work. Because the emitter willl only be watching namespaces that are + // labeled. + getNewNamespaces := func() { + namespace3 = helpers.RandString(8) + namespace4 = helpers.RandString(8) + namespace5 = helpers.RandString(8) + namespace6 = helpers.RandString(8) + } + + // getNewNamespaces1and2 is used to generate new namespaces for namespace 1 and 2. + // used for the same reason as getNewNamespaces() above + getNewNamespaces1and2 := func() { namespace1 = helpers.RandString(8) namespace2 = helpers.RandString(8) - kube = helpers.MustKubeClient() - err = kubeutils.CreateNamespacesInParallel(ctx, kube, namespace1, namespace2) - Expect(err).NotTo(HaveOccurred()) - // KubeConfig Constructor - kubeConfigClientFactory := &factory.MemoryResourceClientFactory{ - Cache: memory.NewInMemoryResourceCache(), + } + + getMapOfNamespaceResources := func(getList func(string) ([]metadataGetter, error)) map[string][]string { + namespaces := []string{namespace1, namespace2, namespace3, namespace4, namespace5, namespace6} + namespaceResources := make(map[string][]string, len(namespaces)) + for _, ns := range namespaces { + list, _ := getList(ns) + for _, snap := range list { + snapMeta := snap.GetMetadata() + if _, hit := namespaceResources[snapMeta.Namespace]; hit { + namespaceResources[snap.GetMetadata().Namespace] = make([]string, 1) + } + namespaceResources[snapMeta.Namespace] = append(namespaceResources[snapMeta.Namespace], snapMeta.Name) + } } + return namespaceResources + } - kubeConfigClient, err = NewKubeConfigClient(ctx, kubeConfigClientFactory) - Expect(err).NotTo(HaveOccurred()) - emitter = NewKubeconfigsEmitter(kubeConfigClient) - }) - AfterEach(func() { - err := os.Unsetenv(statusutils.PodNamespaceEnvName) - Expect(err).NotTo(HaveOccurred()) + findNonMatchingResources := func(matchList, findList []metadataGetter) map[string][]string { + nonMatching := make(map[string][]string) + for _, snap := range matchList { + snapMeta := snap.GetMetadata() + matched := false + for _, pre := range findList { + preMeta := pre.GetMetadata() + if preMeta.Namespace == snapMeta.Namespace && preMeta.Name == snapMeta.Name { + matched = true + break + } + } + if !matched { + if _, hit := nonMatching[snapMeta.Namespace]; hit { + nonMatching[snap.GetMetadata().Namespace] = make([]string, 1) + } + nonMatching[snapMeta.Namespace] = append(nonMatching[snapMeta.Namespace], snapMeta.Name) + } + } + return nonMatching + } - err = kubeutils.DeleteNamespacesInParallelBlocking(ctx, kube, namespace1, namespace2) - Expect(err).NotTo(HaveOccurred()) - }) + findMatchingResources := func(matchList, findList []metadataGetter) map[string][]string { + matching := make(map[string][]string) + for _, snap := range matchList { + snapMeta := snap.GetMetadata() + matched := false + for _, pre := range findList { + preMeta := pre.GetMetadata() + if preMeta.Namespace == snapMeta.Namespace && preMeta.Name == snapMeta.Name { + matched = true + break + } + } + if matched { + if _, hit := matching[snapMeta.Namespace]; hit { + matching[snap.GetMetadata().Namespace] = make([]string, 1) + } + matching[snapMeta.Namespace] = append(matching[snapMeta.Namespace], snapMeta.Name) + } + } + return matching + } + + getMapOfResources := func(listOfResources []metadataGetter) map[string][]string { + resources := make(map[string][]string) + for _, snap := range listOfResources { + snapMeta := snap.GetMetadata() + if _, hit := resources[snapMeta.Namespace]; hit { + resources[snap.GetMetadata().Namespace] = make([]string, 1) + } + resources[snapMeta.Namespace] = append(resources[snapMeta.Namespace], snapMeta.Name) + } + return resources + } + convertkubeconfigsToMetadataGetter := func(rl KubeConfigList) []metadataGetter { + listConv := make([]metadataGetter, len(rl)) + for i, r := range rl { + listConv[i] = r + } + return listConv + } - It("tracks snapshots on changes to any resource", func() { + runNamespacedSelectorsWithWatchNamespaces := func() { ctx := context.Background() err := emitter.Register() Expect(err).NotTo(HaveOccurred()) + // There is an error here in the code. snapshots, errs, err := emitter.Snapshots([]string{namespace1, namespace2}, clients.WatchOpts{ - Ctx: ctx, - RefreshRate: time.Second, + Ctx: ctx, + RefreshRate: time.Second, + ExpressionSelector: labelExpression1, }) Expect(err).NotTo(HaveOccurred()) var snap *KubeconfigsSnapshot + var previous *KubeconfigsSnapshot /* KubeConfig */ - assertSnapshotkubeconfigs := func(expectkubeconfigs KubeConfigList, unexpectkubeconfigs KubeConfigList) { drain: for { select { case snap = <-snapshots: + previous = snap for _, expected := range expectkubeconfigs { if _, err := snap.Kubeconfigs.Find(expected.GetMetadata().Ref().Strings()); err != nil { continue drain @@ -106,109 +227,535 @@ var _ = Describe("V1Emitter", func() { case err := <-errs: Expect(err).NotTo(HaveOccurred()) case <-time.After(time.Second * 10): - nsList1, _ := kubeConfigClient.List(namespace1, clients.ListOpts{}) - nsList2, _ := kubeConfigClient.List(namespace2, clients.ListOpts{}) - combined := append(nsList1, nsList2...) - Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + var expectedResources map[string][]string + var unexpectedResource map[string][]string + + if previous != nil { + expectedResources = findNonMatchingResources(convertkubeconfigsToMetadataGetter(expectkubeconfigs), convertkubeconfigsToMetadataGetter(previous.Kubeconfigs)) + unexpectedResource = findMatchingResources(convertkubeconfigsToMetadataGetter(unexpectkubeconfigs), convertkubeconfigsToMetadataGetter(previous.Kubeconfigs)) + } else { + expectedResources = getMapOfResources(convertkubeconfigsToMetadataGetter(expectkubeconfigs)) + unexpectedResource = getMapOfResources(convertkubeconfigsToMetadataGetter(unexpectkubeconfigs)) + } + getList := func(ns string) ([]metadataGetter, error) { + l, err := kubeConfigClient.List(ns, clients.ListOpts{}) + return convertkubeconfigsToMetadataGetter(l), err + } + namespaceResources := getMapOfNamespaceResources(getList) + Fail(fmt.Sprintf("expected final snapshot before 10 seconds. expected \nExpected:\n%#v\n\nUnexpected:\n%#v\n\nnamespaces:\n%#v", expectedResources, unexpectedResource, namespaceResources)) } } } + kubeConfig1a, err := kubeConfigClient.Write(NewKubeConfig(namespace1, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) kubeConfig1b, err := kubeConfigClient.Write(NewKubeConfig(namespace2, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + kubeConfigWatched := KubeConfigList{kubeConfig1a, kubeConfig1b} + assertSnapshotkubeconfigs(kubeConfigWatched, nil) - assertSnapshotkubeconfigs(KubeConfigList{kubeConfig1a, kubeConfig1b}, nil) - kubeConfig2a, err := kubeConfigClient.Write(NewKubeConfig(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + kubeConfig3a, err := kubeConfigClient.Write(NewKubeConfigWithLabels(namespace1, name3, labels1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - kubeConfig2b, err := kubeConfigClient.Write(NewKubeConfig(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + kubeConfig3b, err := kubeConfigClient.Write(NewKubeConfigWithLabels(namespace2, name3, labels1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + kubeConfigWatched = append(kubeConfigWatched, KubeConfigList{kubeConfig3a, kubeConfig3b}...) + assertSnapshotkubeconfigs(kubeConfigWatched, nil) - assertSnapshotkubeconfigs(KubeConfigList{kubeConfig1a, kubeConfig1b, kubeConfig2a, kubeConfig2b}, nil) + createNamespaceWithLabel(ctx, kube, namespace3, labels1) + createNamespaces(ctx, kube, namespace4) - err = kubeConfigClient.Delete(kubeConfig2a.GetMetadata().Namespace, kubeConfig2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + kubeConfig4a, err := kubeConfigClient.Write(NewKubeConfig(namespace3, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - err = kubeConfigClient.Delete(kubeConfig2b.GetMetadata().Namespace, kubeConfig2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + kubeConfig4b, err := kubeConfigClient.Write(NewKubeConfig(namespace4, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + kubeConfigWatched = append(kubeConfigWatched, kubeConfig4a) + kubeConfigNotWatched := KubeConfigList{kubeConfig4b} + assertSnapshotkubeconfigs(kubeConfigWatched, kubeConfigNotWatched) - assertSnapshotkubeconfigs(KubeConfigList{kubeConfig1a, kubeConfig1b}, KubeConfigList{kubeConfig2a, kubeConfig2b}) + kubeConfig5a, err := kubeConfigClient.Write(NewKubeConfigWithLabels(namespace3, name2, labels1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + kubeConfig5b, err := kubeConfigClient.Write(NewKubeConfigWithLabels(namespace4, name2, labels1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + kubeConfigWatched = append(kubeConfigWatched, kubeConfig5a) + kubeConfigNotWatched = append(kubeConfigNotWatched, kubeConfig5b) + assertSnapshotkubeconfigs(kubeConfigWatched, kubeConfigNotWatched) + + for _, r := range kubeConfigNotWatched { + err = kubeConfigClient.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + } err = kubeConfigClient.Delete(kubeConfig1a.GetMetadata().Namespace, kubeConfig1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = kubeConfigClient.Delete(kubeConfig1b.GetMetadata().Namespace, kubeConfig1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + kubeConfigNotWatched = append(kubeConfigNotWatched, KubeConfigList{kubeConfig1a, kubeConfig1b}...) + kubeConfigWatched = KubeConfigList{kubeConfig3a, kubeConfig3b, kubeConfig4a, kubeConfig5a} + assertSnapshotkubeconfigs(kubeConfigWatched, kubeConfigNotWatched) - assertSnapshotkubeconfigs(nil, KubeConfigList{kubeConfig1a, kubeConfig1b, kubeConfig2a, kubeConfig2b}) - }) + err = kubeConfigClient.Delete(kubeConfig3a.GetMetadata().Namespace, kubeConfig3a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = kubeConfigClient.Delete(kubeConfig3b.GetMetadata().Namespace, kubeConfig3b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + kubeConfigNotWatched = append(kubeConfigNotWatched, KubeConfigList{kubeConfig3a, kubeConfig3b}...) + kubeConfigWatched = KubeConfigList{kubeConfig4a, kubeConfig5a} + assertSnapshotkubeconfigs(kubeConfigWatched, kubeConfigNotWatched) - It("tracks snapshots on changes to any resource using AllNamespace", func() { - ctx := context.Background() - err := emitter.Register() + err = kubeConfigClient.Delete(kubeConfig4a.GetMetadata().Namespace, kubeConfig4a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = kubeConfigClient.Delete(kubeConfig5a.GetMetadata().Namespace, kubeConfig5a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + kubeConfigNotWatched = append(kubeConfigNotWatched, KubeConfigList{kubeConfig5a, kubeConfig5b}...) + assertSnapshotkubeconfigs(nil, kubeConfigNotWatched) - snapshots, errs, err := emitter.Snapshots([]string{""}, clients.WatchOpts{ - Ctx: ctx, - RefreshRate: time.Second, - }) + // clean up environment + deleteNamespaces(ctx, kube, namespace3, namespace4) + getNewNamespaces() + } + + BeforeEach(func() { + err := os.Setenv(statusutils.PodNamespaceEnvName, "default") Expect(err).NotTo(HaveOccurred()) - var snap *KubeconfigsSnapshot + ctx = context.Background() + namespace1 = helpers.RandString(8) + namespace2 = helpers.RandString(8) + namespace3 = helpers.RandString(8) + namespace4 = helpers.RandString(8) + namespace5 = helpers.RandString(8) + namespace6 = helpers.RandString(8) - /* - KubeConfig - */ + kube = helpers.MustKubeClient() + kubeCache, err = cache.NewKubeCoreCache(context.TODO(), kube) + Expect(err).NotTo(HaveOccurred()) + resourceNamespaceLister = namespace.NewKubeClientCacheResourceNamespaceLister(kube, kubeCache) - assertSnapshotkubeconfigs := func(expectkubeconfigs KubeConfigList, unexpectkubeconfigs KubeConfigList) { - drain: - for { - select { - case snap = <-snapshots: - for _, expected := range expectkubeconfigs { - if _, err := snap.Kubeconfigs.Find(expected.GetMetadata().Ref().Strings()); err != nil { - continue drain + createNamespaces(ctx, kube, namespace1, namespace2) + // KubeConfig Constructor + kubeConfigClientFactory := &factory.MemoryResourceClientFactory{ + Cache: memory.NewInMemoryResourceCache(), + } + + kubeConfigClient, err = NewKubeConfigClient(ctx, kubeConfigClientFactory) + Expect(err).NotTo(HaveOccurred()) + emitter = NewKubeconfigsEmitter(kubeConfigClient, resourceNamespaceLister) + }) + AfterEach(func() { + err := os.Unsetenv(statusutils.PodNamespaceEnvName) + Expect(err).NotTo(HaveOccurred()) + + kubeutils.DeleteNamespacesInParallelBlocking(ctx, kube, namespace1, namespace2) + }) + + Context("Tracking watched namespaces", func() { + It("tracks snapshots on changes to any resource", func() { + ctx := context.Background() + err := emitter.Register() + Expect(err).NotTo(HaveOccurred()) + + snapshots, errs, err := emitter.Snapshots([]string{namespace1, namespace2}, clients.WatchOpts{ + Ctx: ctx, + RefreshRate: time.Second, + }) + Expect(err).NotTo(HaveOccurred()) + + var snap *KubeconfigsSnapshot + + /* + KubeConfig + */ + + assertSnapshotkubeconfigs := func(expectkubeconfigs KubeConfigList, unexpectkubeconfigs KubeConfigList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectkubeconfigs { + if _, err := snap.Kubeconfigs.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } } + for _, unexpected := range unexpectkubeconfigs { + if _, err := snap.Kubeconfigs.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := kubeConfigClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := kubeConfigClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) } - for _, unexpected := range unexpectkubeconfigs { - if _, err := snap.Kubeconfigs.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { - continue drain + } + } + kubeConfig1a, err := kubeConfigClient.Write(NewKubeConfig(namespace1, name5), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + kubeConfig1b, err := kubeConfigClient.Write(NewKubeConfig(namespace2, name5), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotkubeconfigs(KubeConfigList{kubeConfig1a, kubeConfig1b}, nil) + kubeConfig2a, err := kubeConfigClient.Write(NewKubeConfig(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + kubeConfig2b, err := kubeConfigClient.Write(NewKubeConfig(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotkubeconfigs(KubeConfigList{kubeConfig1a, kubeConfig1b, kubeConfig2a, kubeConfig2b}, nil) + + err = kubeConfigClient.Delete(kubeConfig2a.GetMetadata().Namespace, kubeConfig2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = kubeConfigClient.Delete(kubeConfig2b.GetMetadata().Namespace, kubeConfig2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotkubeconfigs(KubeConfigList{kubeConfig1a, kubeConfig1b}, KubeConfigList{kubeConfig2a, kubeConfig2b}) + + err = kubeConfigClient.Delete(kubeConfig1a.GetMetadata().Namespace, kubeConfig1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = kubeConfigClient.Delete(kubeConfig1b.GetMetadata().Namespace, kubeConfig1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotkubeconfigs(nil, KubeConfigList{kubeConfig1a, kubeConfig1b, kubeConfig2a, kubeConfig2b}) + }) + + It("should be able to track all resources that are on labeled namespaces", func() { + runNamespacedSelectorsWithWatchNamespaces() + }) + }) + + Context("Tracking empty watched namespaces", func() { + It("tracks snapshots on changes to any resource using AllNamespace", func() { + ctx := context.Background() + err := emitter.Register() + Expect(err).NotTo(HaveOccurred()) + + snapshots, errs, err := emitter.Snapshots([]string{""}, clients.WatchOpts{ + Ctx: ctx, + RefreshRate: time.Second, + }) + Expect(err).NotTo(HaveOccurred()) + + var snap *KubeconfigsSnapshot + + /* + KubeConfig + */ + + assertSnapshotkubeconfigs := func(expectkubeconfigs KubeConfigList, unexpectkubeconfigs KubeConfigList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectkubeconfigs { + if _, err := snap.Kubeconfigs.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } } + for _, unexpected := range unexpectkubeconfigs { + if _, err := snap.Kubeconfigs.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := kubeConfigClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := kubeConfigClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) } - break drain - case err := <-errs: - Expect(err).NotTo(HaveOccurred()) - case <-time.After(time.Second * 10): - nsList1, _ := kubeConfigClient.List(namespace1, clients.ListOpts{}) - nsList2, _ := kubeConfigClient.List(namespace2, clients.ListOpts{}) - combined := append(nsList1, nsList2...) - Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) } } - } - kubeConfig1a, err := kubeConfigClient.Write(NewKubeConfig(namespace1, name1), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - kubeConfig1b, err := kubeConfigClient.Write(NewKubeConfig(namespace2, name1), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - assertSnapshotkubeconfigs(KubeConfigList{kubeConfig1a, kubeConfig1b}, nil) - kubeConfig2a, err := kubeConfigClient.Write(NewKubeConfig(namespace1, name2), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - kubeConfig2b, err := kubeConfigClient.Write(NewKubeConfig(namespace2, name2), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) + kubeConfig1a, err := kubeConfigClient.Write(NewKubeConfig(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + kubeConfig1b, err := kubeConfigClient.Write(NewKubeConfig(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotkubeconfigs(KubeConfigList{kubeConfig1a, kubeConfig1b}, nil) + + kubeConfig2a, err := kubeConfigClient.Write(NewKubeConfig(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + kubeConfig2b, err := kubeConfigClient.Write(NewKubeConfig(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotkubeconfigs(KubeConfigList{kubeConfig1a, kubeConfig1b, kubeConfig2a, kubeConfig2b}, nil) + + err = kubeConfigClient.Delete(kubeConfig2a.GetMetadata().Namespace, kubeConfig2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = kubeConfigClient.Delete(kubeConfig2b.GetMetadata().Namespace, kubeConfig2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotkubeconfigs(KubeConfigList{kubeConfig1a, kubeConfig1b}, KubeConfigList{kubeConfig2a, kubeConfig2b}) + + err = kubeConfigClient.Delete(kubeConfig1a.GetMetadata().Namespace, kubeConfig1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = kubeConfigClient.Delete(kubeConfig1b.GetMetadata().Namespace, kubeConfig1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotkubeconfigs(nil, KubeConfigList{kubeConfig1a, kubeConfig1b, kubeConfig2a, kubeConfig2b}) + }) + + It("should be able to track resources only made with the matching labels", func() { + ctx := context.Background() + err := emitter.Register() + Expect(err).NotTo(HaveOccurred()) + + snapshots, errs, err := emitter.Snapshots([]string{""}, clients.WatchOpts{ + Ctx: ctx, + RefreshRate: time.Second, + ExpressionSelector: labelExpression1, + }) + Expect(err).NotTo(HaveOccurred()) + + var snap *KubeconfigsSnapshot + var previous *KubeconfigsSnapshot + + /* + KubeConfig + */ + + assertSnapshotkubeconfigs := func(expectkubeconfigs KubeConfigList, unexpectkubeconfigs KubeConfigList) { + drain: + for { + select { + case snap = <-snapshots: + previous = snap + for _, expected := range expectkubeconfigs { + if _, err := snap.Kubeconfigs.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectkubeconfigs { + if _, err := snap.Kubeconfigs.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + + var expectedResources map[string][]string + var unexpectedResource map[string][]string + + if previous != nil { + expectedResources = findNonMatchingResources(convertkubeconfigsToMetadataGetter(expectkubeconfigs), convertkubeconfigsToMetadataGetter(previous.Kubeconfigs)) + unexpectedResource = findMatchingResources(convertkubeconfigsToMetadataGetter(unexpectkubeconfigs), convertkubeconfigsToMetadataGetter(previous.Kubeconfigs)) + } else { + expectedResources = getMapOfResources(convertkubeconfigsToMetadataGetter(expectkubeconfigs)) + unexpectedResource = getMapOfResources(convertkubeconfigsToMetadataGetter(unexpectkubeconfigs)) + } + getList := func(ns string) ([]metadataGetter, error) { + l, err := kubeConfigClient.List(ns, clients.ListOpts{}) + return convertkubeconfigsToMetadataGetter(l), err + } + namespaceResources := getMapOfNamespaceResources(getList) + Fail(fmt.Sprintf("expected final snapshot before 10 seconds. expected \nExpected:\n%#v\n\nUnexpected:\n%#v\n\nnamespaces:\n%#v", expectedResources, unexpectedResource, namespaceResources)) + } + } + } - assertSnapshotkubeconfigs(KubeConfigList{kubeConfig1a, kubeConfig1b, kubeConfig2a, kubeConfig2b}, nil) + kubeConfig1a, err := kubeConfigClient.Write(NewKubeConfig(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + kubeConfig1b, err := kubeConfigClient.Write(NewKubeConfig(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + kubeConfigNotWatched := KubeConfigList{kubeConfig1a, kubeConfig1b} + + createNamespaceWithLabel(ctx, kube, namespace3, labels1) + createNamespaceWithLabel(ctx, kube, namespace4, labels1) + + kubeConfig2a, err := kubeConfigClient.Write(NewKubeConfig(namespace3, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + kubeConfig2b, err := kubeConfigClient.Write(NewKubeConfig(namespace4, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + kubeConfigWatched := KubeConfigList{kubeConfig2a, kubeConfig2b} + assertSnapshotkubeconfigs(kubeConfigWatched, kubeConfigNotWatched) + + createNamespaces(ctx, kube, namespace5) + createNamespaceWithLabel(ctx, kube, namespace6, labels1) + + kubeConfig5a, err := kubeConfigClient.Write(NewKubeConfig(namespace5, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + kubeConfig5b, err := kubeConfigClient.Write(NewKubeConfig(namespace6, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + kubeConfigNotWatched = append(kubeConfigNotWatched, kubeConfig5a) + kubeConfigWatched = append(kubeConfigWatched, kubeConfig5b) + assertSnapshotkubeconfigs(kubeConfigWatched, kubeConfigNotWatched) + + kubeConfig7a, err := kubeConfigClient.Write(NewKubeConfig(namespace5, name4), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + kubeConfig7b, err := kubeConfigClient.Write(NewKubeConfig(namespace6, name4), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + kubeConfigNotWatched = append(kubeConfigNotWatched, kubeConfig7a) + kubeConfigWatched = append(kubeConfigWatched, kubeConfig7b) + assertSnapshotkubeconfigs(kubeConfigWatched, kubeConfigNotWatched) + + for _, r := range kubeConfigNotWatched { + err = kubeConfigClient.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + } - err = kubeConfigClient.Delete(kubeConfig2a.GetMetadata().Namespace, kubeConfig2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - err = kubeConfigClient.Delete(kubeConfig2b.GetMetadata().Namespace, kubeConfig2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) + for _, r := range kubeConfigWatched { + err = kubeConfigClient.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + kubeConfigNotWatched = append(kubeConfigNotWatched, r) + } + assertSnapshotkubeconfigs(nil, kubeConfigNotWatched) - assertSnapshotkubeconfigs(KubeConfigList{kubeConfig1a, kubeConfig1b}, KubeConfigList{kubeConfig2a, kubeConfig2b}) + // clean up environment + deleteNamespaces(ctx, kube, namespace3, namespace4, namespace5, namespace6) + getNewNamespaces() + }) + }) - err = kubeConfigClient.Delete(kubeConfig1a.GetMetadata().Namespace, kubeConfig1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - err = kubeConfigClient.Delete(kubeConfig1b.GetMetadata().Namespace, kubeConfig1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) + Context("Tracking resources on namespaces that are deleted", func() { + It("Should not contain resources from a deleted namespace", func() { + ctx := context.Background() + err := emitter.Register() + Expect(err).NotTo(HaveOccurred()) + + snapshots, errs, err := emitter.Snapshots([]string{""}, clients.WatchOpts{ + Ctx: ctx, + RefreshRate: time.Second, + }) + Expect(err).NotTo(HaveOccurred()) + + var snap *KubeconfigsSnapshot + + /* + KubeConfig + */ + assertSnapshotkubeconfigs := func(expectkubeconfigs KubeConfigList, unexpectkubeconfigs KubeConfigList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectkubeconfigs { + if _, err := snap.Kubeconfigs.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectkubeconfigs { + if _, err := snap.Kubeconfigs.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := kubeConfigClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := kubeConfigClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } + + kubeConfig1a, err := kubeConfigClient.Write(NewKubeConfig(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + kubeConfig1b, err := kubeConfigClient.Write(NewKubeConfig(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + kubeConfigWatched := KubeConfigList{kubeConfig1a, kubeConfig1b} + assertSnapshotkubeconfigs(kubeConfigWatched, nil) + err = kubeConfigClient.Delete(kubeConfig1a.GetMetadata().Namespace, kubeConfig1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = kubeConfigClient.Delete(kubeConfig1b.GetMetadata().Namespace, kubeConfig1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + kubeConfigNotWatched := KubeConfigList{kubeConfig1a, kubeConfig1b} + assertSnapshotkubeconfigs(nil, kubeConfigNotWatched) + + deleteNamespaces(ctx, kube, namespace1, namespace2) + + getNewNamespaces1and2() + createNamespaces(ctx, kube, namespace1, namespace2) + }) + + It("Should not contain resources from a deleted namespace, that is filtered", func() { + ctx := context.Background() + err := emitter.Register() + Expect(err).NotTo(HaveOccurred()) + + snapshots, errs, err := emitter.Snapshots([]string{""}, clients.WatchOpts{ + Ctx: ctx, + RefreshRate: time.Second, + ExpressionSelector: labelExpression1, + }) + Expect(err).NotTo(HaveOccurred()) + + var snap *KubeconfigsSnapshot + + /* + KubeConfig + */ + + assertSnapshotkubeconfigs := func(expectkubeconfigs KubeConfigList, unexpectkubeconfigs KubeConfigList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectkubeconfigs { + if _, err := snap.Kubeconfigs.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectkubeconfigs { + if _, err := snap.Kubeconfigs.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := kubeConfigClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := kubeConfigClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } + + // create namespaces + createNamespaceWithLabel(ctx, kube, namespace3, labels1) + createNamespaceWithLabel(ctx, kube, namespace4, labels1) + + kubeConfig2a, err := kubeConfigClient.Write(NewKubeConfig(namespace3, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + kubeConfig2b, err := kubeConfigClient.Write(NewKubeConfig(namespace4, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + kubeConfigNotWatched := KubeConfigList{} + kubeConfigWatched := KubeConfigList{kubeConfig2a, kubeConfig2b} + assertSnapshotkubeconfigs(kubeConfigWatched, kubeConfigNotWatched) + + deleteNamespaces(ctx, kube, namespace3) + + kubeConfigWatched = KubeConfigList{kubeConfig2b} + kubeConfigNotWatched = append(kubeConfigNotWatched, kubeConfig2a) + assertSnapshotkubeconfigs(kubeConfigWatched, kubeConfigNotWatched) + + for _, r := range kubeConfigWatched { + err = kubeConfigClient.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + kubeConfigNotWatched = append(kubeConfigNotWatched, r) + } + assertSnapshotkubeconfigs(nil, kubeConfigNotWatched) + + deleteNamespaces(ctx, kube, namespace4) + getNewNamespaces() + }) - assertSnapshotkubeconfigs(nil, KubeConfigList{kubeConfig1a, kubeConfig1b, kubeConfig2a, kubeConfig2b}) }) + + Context("use different resource namespace listers", func() { + BeforeEach(func() { + resourceNamespaceLister = namespace.NewKubeClientResourceNamespaceLister(kube) + emitter = NewKubeconfigsEmitter(kubeConfigClient, resourceNamespaceLister) + }) + + It("Should work with the Kube Client Namespace Lister", func() { + runNamespacedSelectorsWithWatchNamespaces() + }) + }) + }) diff --git a/test/mocks/v1/another_mock_resource_client.sk.go b/test/mocks/v1/another_mock_resource_client.sk.go index f5cbd2bda..fb55ec0cb 100644 --- a/test/mocks/v1/another_mock_resource_client.sk.go +++ b/test/mocks/v1/another_mock_resource_client.sk.go @@ -19,6 +19,7 @@ type AnotherMockResourceWatcher interface { type AnotherMockResourceClient interface { BaseClient() clients.ResourceClient Register() error + RegisterNamespace(namespace string) error Read(namespace, name string, opts clients.ReadOpts) (*AnotherMockResource, error) Write(resource *AnotherMockResource, opts clients.WriteOpts) (*AnotherMockResource, error) Delete(namespace, name string, opts clients.DeleteOpts) error @@ -59,6 +60,10 @@ func (client *anotherMockResourceClient) Register() error { return client.rc.Register() } +func (client *anotherMockResourceClient) RegisterNamespace(namespace string) error { + return client.rc.RegisterNamespace(namespace) +} + func (client *anotherMockResourceClient) Read(namespace, name string, opts clients.ReadOpts) (*AnotherMockResource, error) { opts = opts.WithDefaults() diff --git a/test/mocks/v1/cluster_resource_client.sk.go b/test/mocks/v1/cluster_resource_client.sk.go index ec6c8f1ca..d6b9040ae 100644 --- a/test/mocks/v1/cluster_resource_client.sk.go +++ b/test/mocks/v1/cluster_resource_client.sk.go @@ -19,6 +19,7 @@ type ClusterResourceWatcher interface { type ClusterResourceClient interface { BaseClient() clients.ResourceClient Register() error + RegisterNamespace(namespace string) error Read(name string, opts clients.ReadOpts) (*ClusterResource, error) Write(resource *ClusterResource, opts clients.WriteOpts) (*ClusterResource, error) Delete(name string, opts clients.DeleteOpts) error @@ -59,6 +60,10 @@ func (client *clusterResourceClient) Register() error { return client.rc.Register() } +func (client *clusterResourceClient) RegisterNamespace(namespace string) error { + return client.rc.RegisterNamespace(namespace) +} + func (client *clusterResourceClient) Read(name string, opts clients.ReadOpts) (*ClusterResource, error) { opts = opts.WithDefaults() diff --git a/test/mocks/v1/fake_resource_client.sk.go b/test/mocks/v1/fake_resource_client.sk.go index 33132ca6d..73a06329d 100644 --- a/test/mocks/v1/fake_resource_client.sk.go +++ b/test/mocks/v1/fake_resource_client.sk.go @@ -19,6 +19,7 @@ type FakeResourceWatcher interface { type FakeResourceClient interface { BaseClient() clients.ResourceClient Register() error + RegisterNamespace(namespace string) error Read(namespace, name string, opts clients.ReadOpts) (*FakeResource, error) Write(resource *FakeResource, opts clients.WriteOpts) (*FakeResource, error) Delete(namespace, name string, opts clients.DeleteOpts) error @@ -59,6 +60,10 @@ func (client *fakeResourceClient) Register() error { return client.rc.Register() } +func (client *fakeResourceClient) RegisterNamespace(namespace string) error { + return client.rc.RegisterNamespace(namespace) +} + func (client *fakeResourceClient) Read(namespace, name string, opts clients.ReadOpts) (*FakeResource, error) { opts = opts.WithDefaults() diff --git a/test/mocks/v1/mock_custom_spec_hash_type_client.sk.go b/test/mocks/v1/mock_custom_spec_hash_type_client.sk.go index 42df31aba..22ed10bb1 100644 --- a/test/mocks/v1/mock_custom_spec_hash_type_client.sk.go +++ b/test/mocks/v1/mock_custom_spec_hash_type_client.sk.go @@ -19,6 +19,7 @@ type MockCustomSpecHashTypeWatcher interface { type MockCustomSpecHashTypeClient interface { BaseClient() clients.ResourceClient Register() error + RegisterNamespace(namespace string) error Read(namespace, name string, opts clients.ReadOpts) (*MockCustomSpecHashType, error) Write(resource *MockCustomSpecHashType, opts clients.WriteOpts) (*MockCustomSpecHashType, error) Delete(namespace, name string, opts clients.DeleteOpts) error @@ -59,6 +60,10 @@ func (client *mockCustomSpecHashTypeClient) Register() error { return client.rc.Register() } +func (client *mockCustomSpecHashTypeClient) RegisterNamespace(namespace string) error { + return client.rc.RegisterNamespace(namespace) +} + func (client *mockCustomSpecHashTypeClient) Read(namespace, name string, opts clients.ReadOpts) (*MockCustomSpecHashType, error) { opts = opts.WithDefaults() diff --git a/test/mocks/v1/mock_custom_type_client.sk.go b/test/mocks/v1/mock_custom_type_client.sk.go index 3b21d9ae0..372b1de03 100644 --- a/test/mocks/v1/mock_custom_type_client.sk.go +++ b/test/mocks/v1/mock_custom_type_client.sk.go @@ -19,6 +19,7 @@ type MockCustomTypeWatcher interface { type MockCustomTypeClient interface { BaseClient() clients.ResourceClient Register() error + RegisterNamespace(namespace string) error Read(namespace, name string, opts clients.ReadOpts) (*MockCustomType, error) Write(resource *MockCustomType, opts clients.WriteOpts) (*MockCustomType, error) Delete(namespace, name string, opts clients.DeleteOpts) error @@ -59,6 +60,10 @@ func (client *mockCustomTypeClient) Register() error { return client.rc.Register() } +func (client *mockCustomTypeClient) RegisterNamespace(namespace string) error { + return client.rc.RegisterNamespace(namespace) +} + func (client *mockCustomTypeClient) Read(namespace, name string, opts clients.ReadOpts) (*MockCustomType, error) { opts = opts.WithDefaults() diff --git a/test/mocks/v1/mock_resource_client.sk.go b/test/mocks/v1/mock_resource_client.sk.go index 2ef1fb113..abca019b7 100644 --- a/test/mocks/v1/mock_resource_client.sk.go +++ b/test/mocks/v1/mock_resource_client.sk.go @@ -19,6 +19,7 @@ type MockResourceWatcher interface { type MockResourceClient interface { BaseClient() clients.ResourceClient Register() error + RegisterNamespace(namespace string) error Read(namespace, name string, opts clients.ReadOpts) (*MockResource, error) Write(resource *MockResource, opts clients.WriteOpts) (*MockResource, error) Delete(namespace, name string, opts clients.DeleteOpts) error @@ -59,6 +60,10 @@ func (client *mockResourceClient) Register() error { return client.rc.Register() } +func (client *mockResourceClient) RegisterNamespace(namespace string) error { + return client.rc.RegisterNamespace(namespace) +} + func (client *mockResourceClient) Read(namespace, name string, opts clients.ReadOpts) (*MockResource, error) { opts = opts.WithDefaults() diff --git a/test/mocks/v1/non_template_tests_test.go b/test/mocks/v1/non_template_tests_test.go index 4944acdbd..5b5499e23 100644 --- a/test/mocks/v1/non_template_tests_test.go +++ b/test/mocks/v1/non_template_tests_test.go @@ -11,6 +11,7 @@ import ( . "github.com/onsi/gomega" "github.com/solo-io/go-utils/log" "github.com/solo-io/k8s-utils/kubeutils" + "github.com/solo-io/solo-kit/pkg/api/external/kubernetes/namespace" "github.com/solo-io/solo-kit/pkg/api/v1/clients" "github.com/solo-io/solo-kit/pkg/api/v1/clients/factory" kuberc "github.com/solo-io/solo-kit/pkg/api/v1/clients/kube" @@ -141,7 +142,9 @@ var _ = Describe("V1Emitter", func() { podClient, err = github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPodClient(ctx, podClientFactory) Expect(err).NotTo(HaveOccurred()) - emitter = NewTestingEmitter(simpleMockResourceClient, mockResourceClient, fakeResourceClient, anotherMockResourceClient, clusterResourceClient, mockCustomTypeClient, mockCustomSpecHashTypeClient, podClient) + + resourceNamespaceLister := namespace.NewKubeClientResourceNamespaceLister(kube) + emitter = NewTestingEmitter(simpleMockResourceClient, mockResourceClient, fakeResourceClient, anotherMockResourceClient, clusterResourceClient, mockCustomTypeClient, mockCustomSpecHashTypeClient, podClient, resourceNamespaceLister) // create `FakeResource`s in "namespace1" and "slowWatchNamespace" _, err = fakeResourceClient.Write(NewFakeResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) diff --git a/test/mocks/v1/simple_mock_resource_client.sk.go b/test/mocks/v1/simple_mock_resource_client.sk.go index 89bf97341..40babc6e8 100644 --- a/test/mocks/v1/simple_mock_resource_client.sk.go +++ b/test/mocks/v1/simple_mock_resource_client.sk.go @@ -19,6 +19,7 @@ type SimpleMockResourceWatcher interface { type SimpleMockResourceClient interface { BaseClient() clients.ResourceClient Register() error + RegisterNamespace(namespace string) error Read(namespace, name string, opts clients.ReadOpts) (*SimpleMockResource, error) Write(resource *SimpleMockResource, opts clients.WriteOpts) (*SimpleMockResource, error) Delete(namespace, name string, opts clients.DeleteOpts) error @@ -59,6 +60,10 @@ func (client *simpleMockResourceClient) Register() error { return client.rc.Register() } +func (client *simpleMockResourceClient) RegisterNamespace(namespace string) error { + return client.rc.RegisterNamespace(namespace) +} + func (client *simpleMockResourceClient) Read(namespace, name string, opts clients.ReadOpts) (*SimpleMockResource, error) { opts = opts.WithDefaults() diff --git a/test/mocks/v1/testing.solo.io_suite_test.go b/test/mocks/v1/testing.solo.io_suite_test.go index 3c5c8674c..a86964623 100644 --- a/test/mocks/v1/testing.solo.io_suite_test.go +++ b/test/mocks/v1/testing.solo.io_suite_test.go @@ -54,6 +54,7 @@ var ( testutils.ErrorNotOccuredOrNotFound(err) err = clientset.ApiextensionsV1().CustomResourceDefinitions().Delete(ctx, "simplemocks.testing.solo.io", metav1.DeleteOptions{}) testutils.ErrorNotOccuredOrNotFound(err) + // known race condition occurs here https://storage.googleapis.com/solo-public-build-logs/log-286333d3-2840-4323-b508-7e351539a9f9.txt Expect(lock.ReleaseLock()).NotTo(HaveOccurred()) }) diff --git a/test/mocks/v1/testing_event_loop_test.go b/test/mocks/v1/testing_event_loop_test.go index 8537ef489..c6a2c26bf 100644 --- a/test/mocks/v1/testing_event_loop_test.go +++ b/test/mocks/v1/testing_event_loop_test.go @@ -1,3 +1,5 @@ +// Code generated by solo-kit. DO NOT EDIT. + //go:build solokit // +build solokit @@ -12,9 +14,12 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + skNamespace "github.com/solo-io/solo-kit/pkg/api/external/kubernetes/namespace" "github.com/solo-io/solo-kit/pkg/api/v1/clients" "github.com/solo-io/solo-kit/pkg/api/v1/clients/factory" + "github.com/solo-io/solo-kit/pkg/api/v1/clients/kube/cache" "github.com/solo-io/solo-kit/pkg/api/v1/clients/memory" + "github.com/solo-io/solo-kit/test/helpers" ) var _ = Describe("TestingEventLoop", func() { @@ -28,6 +33,11 @@ var _ = Describe("TestingEventLoop", func() { BeforeEach(func() { ctx = context.Background() + kube := helpers.MustKubeClient() + kubeCache, err := cache.NewKubeCoreCache(context.TODO(), kube) + Expect(err).NotTo(HaveOccurred()) + resourceNamespaceLister := skNamespace.NewKubeClientCacheResourceNamespaceLister(kube, kubeCache) + simpleMockResourceClientFactory := &factory.MemoryResourceClientFactory{ Cache: memory.NewInMemoryResourceCache(), } @@ -76,7 +86,7 @@ var _ = Describe("TestingEventLoop", func() { podClient, err := github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPodClient(ctx, podClientFactory) Expect(err).NotTo(HaveOccurred()) - emitter = NewTestingEmitter(simpleMockResourceClient, mockResourceClient, fakeResourceClient, anotherMockResourceClient, clusterResourceClient, mockCustomTypeClient, mockCustomSpecHashTypeClient, podClient) + emitter = NewTestingEmitter(simpleMockResourceClient, mockResourceClient, fakeResourceClient, anotherMockResourceClient, clusterResourceClient, mockCustomTypeClient, mockCustomSpecHashTypeClient, podClient, resourceNamespaceLister) }) It("runs sync function on a new snapshot", func() { _, err = emitter.SimpleMockResource().Write(NewSimpleMockResource(namespace, "jerry"), clients.WriteOpts{}) @@ -91,6 +101,8 @@ var _ = Describe("TestingEventLoop", func() { Expect(err).NotTo(HaveOccurred()) _, err = emitter.MockCustomType().Write(NewMockCustomType(namespace, "jerry"), clients.WriteOpts{}) Expect(err).NotTo(HaveOccurred()) + _, err = emitter.MockCustomSpecHashType().Write(NewMockCustomSpecHashType(namespace, "jerry"), clients.WriteOpts{}) + Expect(err).NotTo(HaveOccurred()) _, err = emitter.Pod().Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace, "jerry"), clients.WriteOpts{}) Expect(err).NotTo(HaveOccurred()) sync := &mockTestingSyncer{} diff --git a/test/mocks/v1/testing_snapshot_emitter.sk.go b/test/mocks/v1/testing_snapshot_emitter.sk.go index f3d7ee991..8315c2d4e 100644 --- a/test/mocks/v1/testing_snapshot_emitter.sk.go +++ b/test/mocks/v1/testing_snapshot_emitter.sk.go @@ -14,6 +14,7 @@ import ( "go.uber.org/zap" "github.com/solo-io/solo-kit/pkg/api/v1/clients" + "github.com/solo-io/solo-kit/pkg/api/v1/resources" "github.com/solo-io/solo-kit/pkg/errors" skstats "github.com/solo-io/solo-kit/pkg/stats" @@ -92,21 +93,22 @@ type TestingEmitter interface { Pod() github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodClient } -func NewTestingEmitter(simpleMockResourceClient SimpleMockResourceClient, mockResourceClient MockResourceClient, fakeResourceClient FakeResourceClient, anotherMockResourceClient AnotherMockResourceClient, clusterResourceClient ClusterResourceClient, mockCustomTypeClient MockCustomTypeClient, mockCustomSpecHashTypeClient MockCustomSpecHashTypeClient, podClient github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodClient) TestingEmitter { - return NewTestingEmitterWithEmit(simpleMockResourceClient, mockResourceClient, fakeResourceClient, anotherMockResourceClient, clusterResourceClient, mockCustomTypeClient, mockCustomSpecHashTypeClient, podClient, make(chan struct{})) +func NewTestingEmitter(simpleMockResourceClient SimpleMockResourceClient, mockResourceClient MockResourceClient, fakeResourceClient FakeResourceClient, anotherMockResourceClient AnotherMockResourceClient, clusterResourceClient ClusterResourceClient, mockCustomTypeClient MockCustomTypeClient, mockCustomSpecHashTypeClient MockCustomSpecHashTypeClient, podClient github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodClient, resourceNamespaceLister resources.ResourceNamespaceLister) TestingEmitter { + return NewTestingEmitterWithEmit(simpleMockResourceClient, mockResourceClient, fakeResourceClient, anotherMockResourceClient, clusterResourceClient, mockCustomTypeClient, mockCustomSpecHashTypeClient, podClient, resourceNamespaceLister, make(chan struct{})) } -func NewTestingEmitterWithEmit(simpleMockResourceClient SimpleMockResourceClient, mockResourceClient MockResourceClient, fakeResourceClient FakeResourceClient, anotherMockResourceClient AnotherMockResourceClient, clusterResourceClient ClusterResourceClient, mockCustomTypeClient MockCustomTypeClient, mockCustomSpecHashTypeClient MockCustomSpecHashTypeClient, podClient github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodClient, emit <-chan struct{}) TestingEmitter { +func NewTestingEmitterWithEmit(simpleMockResourceClient SimpleMockResourceClient, mockResourceClient MockResourceClient, fakeResourceClient FakeResourceClient, anotherMockResourceClient AnotherMockResourceClient, clusterResourceClient ClusterResourceClient, mockCustomTypeClient MockCustomTypeClient, mockCustomSpecHashTypeClient MockCustomSpecHashTypeClient, podClient github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodClient, resourceNamespaceLister resources.ResourceNamespaceLister, emit <-chan struct{}) TestingEmitter { return &testingEmitter{ - simpleMockResource: simpleMockResourceClient, - mockResource: mockResourceClient, - fakeResource: fakeResourceClient, - anotherMockResource: anotherMockResourceClient, - clusterResource: clusterResourceClient, - mockCustomType: mockCustomTypeClient, - mockCustomSpecHashType: mockCustomSpecHashTypeClient, - pod: podClient, - forceEmit: emit, + simpleMockResource: simpleMockResourceClient, + mockResource: mockResourceClient, + fakeResource: fakeResourceClient, + anotherMockResource: anotherMockResourceClient, + clusterResource: clusterResourceClient, + mockCustomType: mockCustomTypeClient, + mockCustomSpecHashType: mockCustomSpecHashTypeClient, + pod: podClient, + resourceNamespaceLister: resourceNamespaceLister, + forceEmit: emit, } } @@ -120,6 +122,14 @@ type testingEmitter struct { mockCustomType MockCustomTypeClient mockCustomSpecHashType MockCustomSpecHashTypeClient pod github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodClient + // resourceNamespaceLister is used to watch for new namespaces when they are created. + // It is used when Expression Selector is in the Watch Opts set in Snapshot(). + resourceNamespaceLister resources.ResourceNamespaceLister + // namespacesWatching is the set of namespaces that we are watching. This is helpful + // when Expression Selector is set on the Watch Opts in Snapshot(). + namespacesWatching sync.Map + // updateNamespaces is used to perform locks and unlocks when watches on namespaces are being updated/created + updateNamespaces sync.Mutex } func (c *testingEmitter) Register() error { @@ -182,6 +192,14 @@ func (c *testingEmitter) Pod() github_com_solo_io_solo_kit_pkg_api_v1_resources_ return c.pod } +// Snapshots will return a channel that can be used to receive snapshots of the +// state of the resources it is watching +// when watching resources, you can set the watchNamespaces, and you can set the +// ExpressionSelector of the WatchOpts. Setting watchNamespaces will watch for all resources +// that are in the specified namespaces. In addition if ExpressionSelector of the WatchOpts is +// set, then all namespaces that meet the label criteria of the ExpressionSelector will +// also be watched. If Expression Selector is set and watched namespaces is set to [""], then it +// will only watch namespaces that meet the label expression selector criteria. func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchOpts) (<-chan *TestingSnapshot, <-chan error, error) { if len(watchNamespaces) == 0 { @@ -196,15 +214,20 @@ func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchO } errs := make(chan error) + hasWatchedNamespaces := len(watchNamespaces) > 1 || (len(watchNamespaces) == 1 && watchNamespaces[0] != "") + watchingLabeledNamespaces := !(opts.ExpressionSelector == "") var done sync.WaitGroup ctx := opts.Ctx + + // setting up the options for both listing and watching resources in namespaces + watchedNamespacesListOptions := clients.ListOpts{Ctx: opts.Ctx, Selector: opts.Selector} + watchedNamespacesWatchOptions := clients.WatchOpts{Ctx: opts.Ctx, Selector: opts.Selector} /* Create channel for SimpleMockResource */ type simpleMockResourceListWithNamespace struct { list SimpleMockResourceList namespace string } simpleMockResourceChan := make(chan simpleMockResourceListWithNamespace) - var initialSimpleMockResourceList SimpleMockResourceList /* Create channel for MockResource */ type mockResourceListWithNamespace struct { @@ -212,7 +235,6 @@ func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchO namespace string } mockResourceChan := make(chan mockResourceListWithNamespace) - var initialMockResourceList MockResourceList /* Create channel for FakeResource */ type fakeResourceListWithNamespace struct { @@ -220,7 +242,6 @@ func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchO namespace string } fakeResourceChan := make(chan fakeResourceListWithNamespace) - var initialFakeResourceList FakeResourceList /* Create channel for AnotherMockResource */ type anotherMockResourceListWithNamespace struct { @@ -228,7 +249,6 @@ func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchO namespace string } anotherMockResourceChan := make(chan anotherMockResourceListWithNamespace) - var initialAnotherMockResourceList AnotherMockResourceList /* Create channel for ClusterResource */ /* Create channel for MockCustomType */ @@ -237,7 +257,6 @@ func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchO namespace string } mockCustomTypeChan := make(chan mockCustomTypeListWithNamespace) - var initialMockCustomTypeList MockCustomTypeList /* Create channel for MockCustomSpecHashType */ type mockCustomSpecHashTypeListWithNamespace struct { @@ -245,7 +264,6 @@ func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchO namespace string } mockCustomSpecHashTypeChan := make(chan mockCustomSpecHashTypeListWithNamespace) - var initialMockCustomSpecHashTypeList MockCustomSpecHashTypeList /* Create channel for Pod */ type podListWithNamespace struct { @@ -253,225 +271,831 @@ func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchO namespace string } podChan := make(chan podListWithNamespace) - var initialPodList github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList currentSnapshot := TestingSnapshot{} - simplemocksByNamespace := make(map[string]SimpleMockResourceList) - mocksByNamespace := make(map[string]MockResourceList) - fakesByNamespace := make(map[string]FakeResourceList) - anothermockresourcesByNamespace := make(map[string]AnotherMockResourceList) - mctsByNamespace := make(map[string]MockCustomTypeList) - mcshtsByNamespace := make(map[string]MockCustomSpecHashTypeList) - podsByNamespace := make(map[string]github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList) - - for _, namespace := range watchNamespaces { - /* Setup namespaced watch for SimpleMockResource */ - { - simplemocks, err := c.simpleMockResource.List(namespace, clients.ListOpts{Ctx: opts.Ctx, Selector: opts.Selector}) + simplemocksByNamespace := sync.Map{} + mocksByNamespace := sync.Map{} + fakesByNamespace := sync.Map{} + anothermockresourcesByNamespace := sync.Map{} + mctsByNamespace := sync.Map{} + mcshtsByNamespace := sync.Map{} + podsByNamespace := sync.Map{} + if hasWatchedNamespaces || !watchingLabeledNamespaces { + // then watch all resources on watch Namespaces + + // watched namespaces + for _, namespace := range watchNamespaces { + /* Setup namespaced watch for SimpleMockResource */ + { + simplemocks, err := c.simpleMockResource.List(namespace, watchedNamespacesListOptions) + if err != nil { + return nil, nil, errors.Wrapf(err, "initial SimpleMockResource list") + } + initialSimpleMockResourceList = append(initialSimpleMockResourceList, simplemocks...) + simplemocksByNamespace.Store(namespace, simplemocks) + } + simpleMockResourceNamespacesChan, simpleMockResourceErrs, err := c.simpleMockResource.Watch(namespace, watchedNamespacesWatchOptions) if err != nil { - return nil, nil, errors.Wrapf(err, "initial SimpleMockResource list") + return nil, nil, errors.Wrapf(err, "starting SimpleMockResource watch") } - initialSimpleMockResourceList = append(initialSimpleMockResourceList, simplemocks...) - simplemocksByNamespace[namespace] = simplemocks - } - simpleMockResourceNamespacesChan, simpleMockResourceErrs, err := c.simpleMockResource.Watch(namespace, opts) - if err != nil { - return nil, nil, errors.Wrapf(err, "starting SimpleMockResource watch") - } - done.Add(1) - go func(namespace string) { - defer done.Done() - errutils.AggregateErrs(ctx, errs, simpleMockResourceErrs, namespace+"-simplemocks") - }(namespace) - /* Setup namespaced watch for MockResource */ - { - mocks, err := c.mockResource.List(namespace, clients.ListOpts{Ctx: opts.Ctx, Selector: opts.Selector}) + done.Add(1) + go func(namespace string) { + defer done.Done() + errutils.AggregateErrs(ctx, errs, simpleMockResourceErrs, namespace+"-simplemocks") + }(namespace) + /* Setup namespaced watch for MockResource */ + { + mocks, err := c.mockResource.List(namespace, watchedNamespacesListOptions) + if err != nil { + return nil, nil, errors.Wrapf(err, "initial MockResource list") + } + initialMockResourceList = append(initialMockResourceList, mocks...) + mocksByNamespace.Store(namespace, mocks) + } + mockResourceNamespacesChan, mockResourceErrs, err := c.mockResource.Watch(namespace, watchedNamespacesWatchOptions) if err != nil { - return nil, nil, errors.Wrapf(err, "initial MockResource list") + return nil, nil, errors.Wrapf(err, "starting MockResource watch") + } + + done.Add(1) + go func(namespace string) { + defer done.Done() + errutils.AggregateErrs(ctx, errs, mockResourceErrs, namespace+"-mocks") + }(namespace) + /* Setup namespaced watch for FakeResource */ + { + fakes, err := c.fakeResource.List(namespace, watchedNamespacesListOptions) + if err != nil { + return nil, nil, errors.Wrapf(err, "initial FakeResource list") + } + initialFakeResourceList = append(initialFakeResourceList, fakes...) + fakesByNamespace.Store(namespace, fakes) + } + fakeResourceNamespacesChan, fakeResourceErrs, err := c.fakeResource.Watch(namespace, watchedNamespacesWatchOptions) + if err != nil { + return nil, nil, errors.Wrapf(err, "starting FakeResource watch") } - initialMockResourceList = append(initialMockResourceList, mocks...) - mocksByNamespace[namespace] = mocks - } - mockResourceNamespacesChan, mockResourceErrs, err := c.mockResource.Watch(namespace, opts) - if err != nil { - return nil, nil, errors.Wrapf(err, "starting MockResource watch") - } - done.Add(1) - go func(namespace string) { - defer done.Done() - errutils.AggregateErrs(ctx, errs, mockResourceErrs, namespace+"-mocks") - }(namespace) - /* Setup namespaced watch for FakeResource */ - { - fakes, err := c.fakeResource.List(namespace, clients.ListOpts{Ctx: opts.Ctx, Selector: opts.Selector}) + done.Add(1) + go func(namespace string) { + defer done.Done() + errutils.AggregateErrs(ctx, errs, fakeResourceErrs, namespace+"-fakes") + }(namespace) + /* Setup namespaced watch for AnotherMockResource */ + { + anothermockresources, err := c.anotherMockResource.List(namespace, watchedNamespacesListOptions) + if err != nil { + return nil, nil, errors.Wrapf(err, "initial AnotherMockResource list") + } + initialAnotherMockResourceList = append(initialAnotherMockResourceList, anothermockresources...) + anothermockresourcesByNamespace.Store(namespace, anothermockresources) + } + anotherMockResourceNamespacesChan, anotherMockResourceErrs, err := c.anotherMockResource.Watch(namespace, watchedNamespacesWatchOptions) if err != nil { - return nil, nil, errors.Wrapf(err, "initial FakeResource list") + return nil, nil, errors.Wrapf(err, "starting AnotherMockResource watch") } - initialFakeResourceList = append(initialFakeResourceList, fakes...) - fakesByNamespace[namespace] = fakes - } - fakeResourceNamespacesChan, fakeResourceErrs, err := c.fakeResource.Watch(namespace, opts) - if err != nil { - return nil, nil, errors.Wrapf(err, "starting FakeResource watch") - } - done.Add(1) - go func(namespace string) { - defer done.Done() - errutils.AggregateErrs(ctx, errs, fakeResourceErrs, namespace+"-fakes") - }(namespace) - /* Setup namespaced watch for AnotherMockResource */ - { - anothermockresources, err := c.anotherMockResource.List(namespace, clients.ListOpts{Ctx: opts.Ctx, Selector: opts.Selector}) + done.Add(1) + go func(namespace string) { + defer done.Done() + errutils.AggregateErrs(ctx, errs, anotherMockResourceErrs, namespace+"-anothermockresources") + }(namespace) + /* Setup namespaced watch for MockCustomType */ + { + mcts, err := c.mockCustomType.List(namespace, watchedNamespacesListOptions) + if err != nil { + return nil, nil, errors.Wrapf(err, "initial MockCustomType list") + } + initialMockCustomTypeList = append(initialMockCustomTypeList, mcts...) + mctsByNamespace.Store(namespace, mcts) + } + mockCustomTypeNamespacesChan, mockCustomTypeErrs, err := c.mockCustomType.Watch(namespace, watchedNamespacesWatchOptions) if err != nil { - return nil, nil, errors.Wrapf(err, "initial AnotherMockResource list") + return nil, nil, errors.Wrapf(err, "starting MockCustomType watch") } - initialAnotherMockResourceList = append(initialAnotherMockResourceList, anothermockresources...) - anothermockresourcesByNamespace[namespace] = anothermockresources - } - anotherMockResourceNamespacesChan, anotherMockResourceErrs, err := c.anotherMockResource.Watch(namespace, opts) - if err != nil { - return nil, nil, errors.Wrapf(err, "starting AnotherMockResource watch") - } - done.Add(1) - go func(namespace string) { - defer done.Done() - errutils.AggregateErrs(ctx, errs, anotherMockResourceErrs, namespace+"-anothermockresources") - }(namespace) - /* Setup namespaced watch for MockCustomType */ - { - mcts, err := c.mockCustomType.List(namespace, clients.ListOpts{Ctx: opts.Ctx, Selector: opts.Selector}) + done.Add(1) + go func(namespace string) { + defer done.Done() + errutils.AggregateErrs(ctx, errs, mockCustomTypeErrs, namespace+"-mcts") + }(namespace) + /* Setup namespaced watch for MockCustomSpecHashType */ + { + mcshts, err := c.mockCustomSpecHashType.List(namespace, watchedNamespacesListOptions) + if err != nil { + return nil, nil, errors.Wrapf(err, "initial MockCustomSpecHashType list") + } + initialMockCustomSpecHashTypeList = append(initialMockCustomSpecHashTypeList, mcshts...) + mcshtsByNamespace.Store(namespace, mcshts) + } + mockCustomSpecHashTypeNamespacesChan, mockCustomSpecHashTypeErrs, err := c.mockCustomSpecHashType.Watch(namespace, watchedNamespacesWatchOptions) if err != nil { - return nil, nil, errors.Wrapf(err, "initial MockCustomType list") + return nil, nil, errors.Wrapf(err, "starting MockCustomSpecHashType watch") } - initialMockCustomTypeList = append(initialMockCustomTypeList, mcts...) - mctsByNamespace[namespace] = mcts - } - mockCustomTypeNamespacesChan, mockCustomTypeErrs, err := c.mockCustomType.Watch(namespace, opts) - if err != nil { - return nil, nil, errors.Wrapf(err, "starting MockCustomType watch") - } - done.Add(1) - go func(namespace string) { - defer done.Done() - errutils.AggregateErrs(ctx, errs, mockCustomTypeErrs, namespace+"-mcts") - }(namespace) - /* Setup namespaced watch for MockCustomSpecHashType */ - { - mcshts, err := c.mockCustomSpecHashType.List(namespace, clients.ListOpts{Ctx: opts.Ctx, Selector: opts.Selector}) + done.Add(1) + go func(namespace string) { + defer done.Done() + errutils.AggregateErrs(ctx, errs, mockCustomSpecHashTypeErrs, namespace+"-mcshts") + }(namespace) + /* Setup namespaced watch for Pod */ + { + pods, err := c.pod.List(namespace, watchedNamespacesListOptions) + if err != nil { + return nil, nil, errors.Wrapf(err, "initial Pod list") + } + initialPodList = append(initialPodList, pods...) + podsByNamespace.Store(namespace, pods) + } + podNamespacesChan, podErrs, err := c.pod.Watch(namespace, watchedNamespacesWatchOptions) if err != nil { - return nil, nil, errors.Wrapf(err, "initial MockCustomSpecHashType list") + return nil, nil, errors.Wrapf(err, "starting Pod watch") } - initialMockCustomSpecHashTypeList = append(initialMockCustomSpecHashTypeList, mcshts...) - mcshtsByNamespace[namespace] = mcshts + + done.Add(1) + go func(namespace string) { + defer done.Done() + errutils.AggregateErrs(ctx, errs, podErrs, namespace+"-pods") + }(namespace) + /* Watch for changes and update snapshot */ + go func(namespace string) { + defer func() { + c.namespacesWatching.Delete(namespace) + }() + c.namespacesWatching.Store(namespace, true) + for { + select { + case <-ctx.Done(): + return + case simpleMockResourceList, ok := <-simpleMockResourceNamespacesChan: + if !ok { + return + } + select { + case <-ctx.Done(): + return + case simpleMockResourceChan <- simpleMockResourceListWithNamespace{list: simpleMockResourceList, namespace: namespace}: + } + case mockResourceList, ok := <-mockResourceNamespacesChan: + if !ok { + return + } + select { + case <-ctx.Done(): + return + case mockResourceChan <- mockResourceListWithNamespace{list: mockResourceList, namespace: namespace}: + } + case fakeResourceList, ok := <-fakeResourceNamespacesChan: + if !ok { + return + } + select { + case <-ctx.Done(): + return + case fakeResourceChan <- fakeResourceListWithNamespace{list: fakeResourceList, namespace: namespace}: + } + case anotherMockResourceList, ok := <-anotherMockResourceNamespacesChan: + if !ok { + return + } + select { + case <-ctx.Done(): + return + case anotherMockResourceChan <- anotherMockResourceListWithNamespace{list: anotherMockResourceList, namespace: namespace}: + } + case mockCustomTypeList, ok := <-mockCustomTypeNamespacesChan: + if !ok { + return + } + select { + case <-ctx.Done(): + return + case mockCustomTypeChan <- mockCustomTypeListWithNamespace{list: mockCustomTypeList, namespace: namespace}: + } + case mockCustomSpecHashTypeList, ok := <-mockCustomSpecHashTypeNamespacesChan: + if !ok { + return + } + select { + case <-ctx.Done(): + return + case mockCustomSpecHashTypeChan <- mockCustomSpecHashTypeListWithNamespace{list: mockCustomSpecHashTypeList, namespace: namespace}: + } + case podList, ok := <-podNamespacesChan: + if !ok { + return + } + select { + case <-ctx.Done(): + return + case podChan <- podListWithNamespace{list: podList, namespace: namespace}: + } + } + } + }(namespace) } - mockCustomSpecHashTypeNamespacesChan, mockCustomSpecHashTypeErrs, err := c.mockCustomSpecHashType.Watch(namespace, opts) - if err != nil { - return nil, nil, errors.Wrapf(err, "starting MockCustomSpecHashType watch") + } + // watch all other namespaces that are labeled and fit the Expression Selector + if opts.ExpressionSelector != "" { + // watch resources of non-watched namespaces that fit the expression selectors + namespaceListOptions := resources.ResourceNamespaceListOptions{ + Ctx: opts.Ctx, + ExpressionSelector: opts.ExpressionSelector, + } + namespaceWatchOptions := resources.ResourceNamespaceWatchOptions{ + Ctx: opts.Ctx, + ExpressionSelector: opts.ExpressionSelector, } - done.Add(1) - go func(namespace string) { - defer done.Done() - errutils.AggregateErrs(ctx, errs, mockCustomSpecHashTypeErrs, namespace+"-mcshts") - }(namespace) - /* Setup namespaced watch for Pod */ - { - pods, err := c.pod.List(namespace, clients.ListOpts{Ctx: opts.Ctx, Selector: opts.Selector}) - if err != nil { - return nil, nil, errors.Wrapf(err, "initial Pod list") + filterNamespaces := resources.ResourceNamespaceList{} + for _, ns := range watchNamespaces { + // we do not want to filter out "" which equals all namespaces + // the reason is because we will never create a watch on ""(all namespaces) because + // doing so means we watch all resources regardless of namespace. Our intent is to + // watch only certain namespaces. + if ns != "" { + filterNamespaces = append(filterNamespaces, resources.ResourceNamespace{Name: ns}) } - initialPodList = append(initialPodList, pods...) - podsByNamespace[namespace] = pods } - podNamespacesChan, podErrs, err := c.pod.Watch(namespace, opts) + namespacesResources, err := c.resourceNamespaceLister.GetResourceNamespaceList(namespaceListOptions, filterNamespaces) if err != nil { - return nil, nil, errors.Wrapf(err, "starting Pod watch") + return nil, nil, err } + newlyRegisteredNamespaces := make([]string, len(namespacesResources)) + // non watched namespaces that are labeled + for i, resourceNamespace := range namespacesResources { + c.namespacesWatching.Load(resourceNamespace) + namespace := resourceNamespace.Name + newlyRegisteredNamespaces[i] = namespace + err = c.simpleMockResource.RegisterNamespace(namespace) + if err != nil { + return nil, nil, errors.Wrapf(err, "there was an error registering the namespace to the simpleMockResource") + } + /* Setup namespaced watch for SimpleMockResource */ + { + simplemocks, err := c.simpleMockResource.List(namespace, clients.ListOpts{Ctx: opts.Ctx}) + if err != nil { + return nil, nil, errors.Wrapf(err, "initial SimpleMockResource list with new namespace") + } + initialSimpleMockResourceList = append(initialSimpleMockResourceList, simplemocks...) + simplemocksByNamespace.Store(namespace, simplemocks) + } + simpleMockResourceNamespacesChan, simpleMockResourceErrs, err := c.simpleMockResource.Watch(namespace, clients.WatchOpts{Ctx: opts.Ctx}) + if err != nil { + return nil, nil, errors.Wrapf(err, "starting SimpleMockResource watch") + } - done.Add(1) - go func(namespace string) { - defer done.Done() - errutils.AggregateErrs(ctx, errs, podErrs, namespace+"-pods") - }(namespace) + done.Add(1) + go func(namespace string) { + defer done.Done() + errutils.AggregateErrs(ctx, errs, simpleMockResourceErrs, namespace+"-simplemocks") + }(namespace) + err = c.mockResource.RegisterNamespace(namespace) + if err != nil { + return nil, nil, errors.Wrapf(err, "there was an error registering the namespace to the mockResource") + } + /* Setup namespaced watch for MockResource */ + { + mocks, err := c.mockResource.List(namespace, clients.ListOpts{Ctx: opts.Ctx}) + if err != nil { + return nil, nil, errors.Wrapf(err, "initial MockResource list with new namespace") + } + initialMockResourceList = append(initialMockResourceList, mocks...) + mocksByNamespace.Store(namespace, mocks) + } + mockResourceNamespacesChan, mockResourceErrs, err := c.mockResource.Watch(namespace, clients.WatchOpts{Ctx: opts.Ctx}) + if err != nil { + return nil, nil, errors.Wrapf(err, "starting MockResource watch") + } - /* Watch for changes and update snapshot */ - go func(namespace string) { - for { - select { - case <-ctx.Done(): - return - case simpleMockResourceList, ok := <-simpleMockResourceNamespacesChan: - if !ok { - return - } - select { - case <-ctx.Done(): - return - case simpleMockResourceChan <- simpleMockResourceListWithNamespace{list: simpleMockResourceList, namespace: namespace}: - } - case mockResourceList, ok := <-mockResourceNamespacesChan: - if !ok { - return - } - select { - case <-ctx.Done(): - return - case mockResourceChan <- mockResourceListWithNamespace{list: mockResourceList, namespace: namespace}: - } - case fakeResourceList, ok := <-fakeResourceNamespacesChan: - if !ok { - return - } + done.Add(1) + go func(namespace string) { + defer done.Done() + errutils.AggregateErrs(ctx, errs, mockResourceErrs, namespace+"-mocks") + }(namespace) + err = c.fakeResource.RegisterNamespace(namespace) + if err != nil { + return nil, nil, errors.Wrapf(err, "there was an error registering the namespace to the fakeResource") + } + /* Setup namespaced watch for FakeResource */ + { + fakes, err := c.fakeResource.List(namespace, clients.ListOpts{Ctx: opts.Ctx}) + if err != nil { + return nil, nil, errors.Wrapf(err, "initial FakeResource list with new namespace") + } + initialFakeResourceList = append(initialFakeResourceList, fakes...) + fakesByNamespace.Store(namespace, fakes) + } + fakeResourceNamespacesChan, fakeResourceErrs, err := c.fakeResource.Watch(namespace, clients.WatchOpts{Ctx: opts.Ctx}) + if err != nil { + return nil, nil, errors.Wrapf(err, "starting FakeResource watch") + } + + done.Add(1) + go func(namespace string) { + defer done.Done() + errutils.AggregateErrs(ctx, errs, fakeResourceErrs, namespace+"-fakes") + }(namespace) + err = c.anotherMockResource.RegisterNamespace(namespace) + if err != nil { + return nil, nil, errors.Wrapf(err, "there was an error registering the namespace to the anotherMockResource") + } + /* Setup namespaced watch for AnotherMockResource */ + { + anothermockresources, err := c.anotherMockResource.List(namespace, clients.ListOpts{Ctx: opts.Ctx}) + if err != nil { + return nil, nil, errors.Wrapf(err, "initial AnotherMockResource list with new namespace") + } + initialAnotherMockResourceList = append(initialAnotherMockResourceList, anothermockresources...) + anothermockresourcesByNamespace.Store(namespace, anothermockresources) + } + anotherMockResourceNamespacesChan, anotherMockResourceErrs, err := c.anotherMockResource.Watch(namespace, clients.WatchOpts{Ctx: opts.Ctx}) + if err != nil { + return nil, nil, errors.Wrapf(err, "starting AnotherMockResource watch") + } + + done.Add(1) + go func(namespace string) { + defer done.Done() + errutils.AggregateErrs(ctx, errs, anotherMockResourceErrs, namespace+"-anothermockresources") + }(namespace) + err = c.mockCustomType.RegisterNamespace(namespace) + if err != nil { + return nil, nil, errors.Wrapf(err, "there was an error registering the namespace to the mockCustomType") + } + /* Setup namespaced watch for MockCustomType */ + { + mcts, err := c.mockCustomType.List(namespace, clients.ListOpts{Ctx: opts.Ctx}) + if err != nil { + return nil, nil, errors.Wrapf(err, "initial MockCustomType list with new namespace") + } + initialMockCustomTypeList = append(initialMockCustomTypeList, mcts...) + mctsByNamespace.Store(namespace, mcts) + } + mockCustomTypeNamespacesChan, mockCustomTypeErrs, err := c.mockCustomType.Watch(namespace, clients.WatchOpts{Ctx: opts.Ctx}) + if err != nil { + return nil, nil, errors.Wrapf(err, "starting MockCustomType watch") + } + + done.Add(1) + go func(namespace string) { + defer done.Done() + errutils.AggregateErrs(ctx, errs, mockCustomTypeErrs, namespace+"-mcts") + }(namespace) + err = c.mockCustomSpecHashType.RegisterNamespace(namespace) + if err != nil { + return nil, nil, errors.Wrapf(err, "there was an error registering the namespace to the mockCustomSpecHashType") + } + /* Setup namespaced watch for MockCustomSpecHashType */ + { + mcshts, err := c.mockCustomSpecHashType.List(namespace, clients.ListOpts{Ctx: opts.Ctx}) + if err != nil { + return nil, nil, errors.Wrapf(err, "initial MockCustomSpecHashType list with new namespace") + } + initialMockCustomSpecHashTypeList = append(initialMockCustomSpecHashTypeList, mcshts...) + mcshtsByNamespace.Store(namespace, mcshts) + } + mockCustomSpecHashTypeNamespacesChan, mockCustomSpecHashTypeErrs, err := c.mockCustomSpecHashType.Watch(namespace, clients.WatchOpts{Ctx: opts.Ctx}) + if err != nil { + return nil, nil, errors.Wrapf(err, "starting MockCustomSpecHashType watch") + } + + done.Add(1) + go func(namespace string) { + defer done.Done() + errutils.AggregateErrs(ctx, errs, mockCustomSpecHashTypeErrs, namespace+"-mcshts") + }(namespace) + err = c.pod.RegisterNamespace(namespace) + if err != nil { + return nil, nil, errors.Wrapf(err, "there was an error registering the namespace to the pod") + } + /* Setup namespaced watch for Pod */ + { + pods, err := c.pod.List(namespace, clients.ListOpts{Ctx: opts.Ctx}) + if err != nil { + return nil, nil, errors.Wrapf(err, "initial Pod list with new namespace") + } + initialPodList = append(initialPodList, pods...) + podsByNamespace.Store(namespace, pods) + } + podNamespacesChan, podErrs, err := c.pod.Watch(namespace, clients.WatchOpts{Ctx: opts.Ctx}) + if err != nil { + return nil, nil, errors.Wrapf(err, "starting Pod watch") + } + + done.Add(1) + go func(namespace string) { + defer done.Done() + errutils.AggregateErrs(ctx, errs, podErrs, namespace+"-pods") + }(namespace) + /* Watch for changes and update snapshot */ + go func(namespace string) { + for { select { case <-ctx.Done(): return - case fakeResourceChan <- fakeResourceListWithNamespace{list: fakeResourceList, namespace: namespace}: - } - case anotherMockResourceList, ok := <-anotherMockResourceNamespacesChan: - if !ok { - return + case simpleMockResourceList, ok := <-simpleMockResourceNamespacesChan: + if !ok { + return + } + select { + case <-ctx.Done(): + return + case simpleMockResourceChan <- simpleMockResourceListWithNamespace{list: simpleMockResourceList, namespace: namespace}: + } + case mockResourceList, ok := <-mockResourceNamespacesChan: + if !ok { + return + } + select { + case <-ctx.Done(): + return + case mockResourceChan <- mockResourceListWithNamespace{list: mockResourceList, namespace: namespace}: + } + case fakeResourceList, ok := <-fakeResourceNamespacesChan: + if !ok { + return + } + select { + case <-ctx.Done(): + return + case fakeResourceChan <- fakeResourceListWithNamespace{list: fakeResourceList, namespace: namespace}: + } + case anotherMockResourceList, ok := <-anotherMockResourceNamespacesChan: + if !ok { + return + } + select { + case <-ctx.Done(): + return + case anotherMockResourceChan <- anotherMockResourceListWithNamespace{list: anotherMockResourceList, namespace: namespace}: + } + case mockCustomTypeList, ok := <-mockCustomTypeNamespacesChan: + if !ok { + return + } + select { + case <-ctx.Done(): + return + case mockCustomTypeChan <- mockCustomTypeListWithNamespace{list: mockCustomTypeList, namespace: namespace}: + } + case mockCustomSpecHashTypeList, ok := <-mockCustomSpecHashTypeNamespacesChan: + if !ok { + return + } + select { + case <-ctx.Done(): + return + case mockCustomSpecHashTypeChan <- mockCustomSpecHashTypeListWithNamespace{list: mockCustomSpecHashTypeList, namespace: namespace}: + } + case podList, ok := <-podNamespacesChan: + if !ok { + return + } + select { + case <-ctx.Done(): + return + case podChan <- podListWithNamespace{list: podList, namespace: namespace}: + } } + } + }(namespace) + } + if len(newlyRegisteredNamespaces) > 0 { + contextutils.LoggerFrom(ctx).Infof("registered the new namespace %v", newlyRegisteredNamespaces) + } + + // create watch on all namespaces, so that we can add all resources from new namespaces + // we will be watching namespaces that meet the Expression Selector filter + + namespaceWatch, errsReceiver, err := c.resourceNamespaceLister.GetResourceNamespaceWatch(namespaceWatchOptions, filterNamespaces) + if err != nil { + return nil, nil, err + } + if errsReceiver != nil { + go func() { + for { select { case <-ctx.Done(): return - case anotherMockResourceChan <- anotherMockResourceListWithNamespace{list: anotherMockResourceList, namespace: namespace}: + case err = <-errsReceiver: + errs <- errors.Wrapf(err, "received error from watch on resource namespaces") } - case mockCustomTypeList, ok := <-mockCustomTypeNamespacesChan: + } + }() + } + + go func() { + for { + select { + case <-ctx.Done(): + return + case resourceNamespaces, ok := <-namespaceWatch: if !ok { return } - select { - case <-ctx.Done(): - return - case mockCustomTypeChan <- mockCustomTypeListWithNamespace{list: mockCustomTypeList, namespace: namespace}: + // get the list of new namespaces, if there is a new namespace + // get the list of resources from that namespace, and add + // a watch for new resources created/deleted on that namespace + c.updateNamespaces.Lock() + + // get the new namespaces, and get a map of the namespaces + mapOfResourceNamespaces := make(map[string]struct{}, len(resourceNamespaces)) + newNamespaces := []string{} + for _, ns := range resourceNamespaces { + if _, hit := c.namespacesWatching.Load(ns.Name); !hit { + newNamespaces = append(newNamespaces, ns.Name) + } + mapOfResourceNamespaces[ns.Name] = struct{}{} } - case mockCustomSpecHashTypeList, ok := <-mockCustomSpecHashTypeNamespacesChan: - if !ok { - return + + for _, ns := range watchNamespaces { + mapOfResourceNamespaces[ns] = struct{}{} } - select { - case <-ctx.Done(): - return - case mockCustomSpecHashTypeChan <- mockCustomSpecHashTypeListWithNamespace{list: mockCustomSpecHashTypeList, namespace: namespace}: + + missingNamespaces := []string{} + // use the map of namespace resources to find missing/deleted namespaces + c.namespacesWatching.Range(func(key interface{}, value interface{}) bool { + name := key.(string) + if _, hit := mapOfResourceNamespaces[name]; !hit { + missingNamespaces = append(missingNamespaces, name) + } + return true + }) + + for _, ns := range missingNamespaces { + simpleMockResourceChan <- simpleMockResourceListWithNamespace{list: SimpleMockResourceList{}, namespace: ns} + mockResourceChan <- mockResourceListWithNamespace{list: MockResourceList{}, namespace: ns} + fakeResourceChan <- fakeResourceListWithNamespace{list: FakeResourceList{}, namespace: ns} + anotherMockResourceChan <- anotherMockResourceListWithNamespace{list: AnotherMockResourceList{}, namespace: ns} + mockCustomTypeChan <- mockCustomTypeListWithNamespace{list: MockCustomTypeList{}, namespace: ns} + mockCustomSpecHashTypeChan <- mockCustomSpecHashTypeListWithNamespace{list: MockCustomSpecHashTypeList{}, namespace: ns} + podChan <- podListWithNamespace{list: github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{}, namespace: ns} } - case podList, ok := <-podNamespacesChan: - if !ok { - return + + for _, namespace := range newNamespaces { + var err error + err = c.simpleMockResource.RegisterNamespace(namespace) + if err != nil { + errs <- errors.Wrapf(err, "there was an error registering the namespace to the simpleMockResource") + continue + } + /* Setup namespaced watch for SimpleMockResource for new namespace */ + { + simplemocks, err := c.simpleMockResource.List(namespace, clients.ListOpts{Ctx: opts.Ctx, Selector: opts.Selector}) + if err != nil { + errs <- errors.Wrapf(err, "initial new namespace SimpleMockResource list in namespace watch") + continue + } + simplemocksByNamespace.Store(namespace, simplemocks) + } + simpleMockResourceNamespacesChan, simpleMockResourceErrs, err := c.simpleMockResource.Watch(namespace, clients.WatchOpts{Ctx: opts.Ctx, Selector: opts.Selector}) + if err != nil { + errs <- errors.Wrapf(err, "starting new namespace SimpleMockResource watch") + continue + } + + done.Add(1) + go func(namespace string) { + defer done.Done() + errutils.AggregateErrs(ctx, errs, simpleMockResourceErrs, namespace+"-new-namespace-simplemocks") + }(namespace) + err = c.mockResource.RegisterNamespace(namespace) + if err != nil { + errs <- errors.Wrapf(err, "there was an error registering the namespace to the mockResource") + continue + } + /* Setup namespaced watch for MockResource for new namespace */ + { + mocks, err := c.mockResource.List(namespace, clients.ListOpts{Ctx: opts.Ctx, Selector: opts.Selector}) + if err != nil { + errs <- errors.Wrapf(err, "initial new namespace MockResource list in namespace watch") + continue + } + mocksByNamespace.Store(namespace, mocks) + } + mockResourceNamespacesChan, mockResourceErrs, err := c.mockResource.Watch(namespace, clients.WatchOpts{Ctx: opts.Ctx, Selector: opts.Selector}) + if err != nil { + errs <- errors.Wrapf(err, "starting new namespace MockResource watch") + continue + } + + done.Add(1) + go func(namespace string) { + defer done.Done() + errutils.AggregateErrs(ctx, errs, mockResourceErrs, namespace+"-new-namespace-mocks") + }(namespace) + err = c.fakeResource.RegisterNamespace(namespace) + if err != nil { + errs <- errors.Wrapf(err, "there was an error registering the namespace to the fakeResource") + continue + } + /* Setup namespaced watch for FakeResource for new namespace */ + { + fakes, err := c.fakeResource.List(namespace, clients.ListOpts{Ctx: opts.Ctx, Selector: opts.Selector}) + if err != nil { + errs <- errors.Wrapf(err, "initial new namespace FakeResource list in namespace watch") + continue + } + fakesByNamespace.Store(namespace, fakes) + } + fakeResourceNamespacesChan, fakeResourceErrs, err := c.fakeResource.Watch(namespace, clients.WatchOpts{Ctx: opts.Ctx, Selector: opts.Selector}) + if err != nil { + errs <- errors.Wrapf(err, "starting new namespace FakeResource watch") + continue + } + + done.Add(1) + go func(namespace string) { + defer done.Done() + errutils.AggregateErrs(ctx, errs, fakeResourceErrs, namespace+"-new-namespace-fakes") + }(namespace) + err = c.anotherMockResource.RegisterNamespace(namespace) + if err != nil { + errs <- errors.Wrapf(err, "there was an error registering the namespace to the anotherMockResource") + continue + } + /* Setup namespaced watch for AnotherMockResource for new namespace */ + { + anothermockresources, err := c.anotherMockResource.List(namespace, clients.ListOpts{Ctx: opts.Ctx, Selector: opts.Selector}) + if err != nil { + errs <- errors.Wrapf(err, "initial new namespace AnotherMockResource list in namespace watch") + continue + } + anothermockresourcesByNamespace.Store(namespace, anothermockresources) + } + anotherMockResourceNamespacesChan, anotherMockResourceErrs, err := c.anotherMockResource.Watch(namespace, clients.WatchOpts{Ctx: opts.Ctx, Selector: opts.Selector}) + if err != nil { + errs <- errors.Wrapf(err, "starting new namespace AnotherMockResource watch") + continue + } + + done.Add(1) + go func(namespace string) { + defer done.Done() + errutils.AggregateErrs(ctx, errs, anotherMockResourceErrs, namespace+"-new-namespace-anothermockresources") + }(namespace) + err = c.mockCustomType.RegisterNamespace(namespace) + if err != nil { + errs <- errors.Wrapf(err, "there was an error registering the namespace to the mockCustomType") + continue + } + /* Setup namespaced watch for MockCustomType for new namespace */ + { + mcts, err := c.mockCustomType.List(namespace, clients.ListOpts{Ctx: opts.Ctx, Selector: opts.Selector}) + if err != nil { + errs <- errors.Wrapf(err, "initial new namespace MockCustomType list in namespace watch") + continue + } + mctsByNamespace.Store(namespace, mcts) + } + mockCustomTypeNamespacesChan, mockCustomTypeErrs, err := c.mockCustomType.Watch(namespace, clients.WatchOpts{Ctx: opts.Ctx, Selector: opts.Selector}) + if err != nil { + errs <- errors.Wrapf(err, "starting new namespace MockCustomType watch") + continue + } + + done.Add(1) + go func(namespace string) { + defer done.Done() + errutils.AggregateErrs(ctx, errs, mockCustomTypeErrs, namespace+"-new-namespace-mcts") + }(namespace) + err = c.mockCustomSpecHashType.RegisterNamespace(namespace) + if err != nil { + errs <- errors.Wrapf(err, "there was an error registering the namespace to the mockCustomSpecHashType") + continue + } + /* Setup namespaced watch for MockCustomSpecHashType for new namespace */ + { + mcshts, err := c.mockCustomSpecHashType.List(namespace, clients.ListOpts{Ctx: opts.Ctx, Selector: opts.Selector}) + if err != nil { + errs <- errors.Wrapf(err, "initial new namespace MockCustomSpecHashType list in namespace watch") + continue + } + mcshtsByNamespace.Store(namespace, mcshts) + } + mockCustomSpecHashTypeNamespacesChan, mockCustomSpecHashTypeErrs, err := c.mockCustomSpecHashType.Watch(namespace, clients.WatchOpts{Ctx: opts.Ctx, Selector: opts.Selector}) + if err != nil { + errs <- errors.Wrapf(err, "starting new namespace MockCustomSpecHashType watch") + continue + } + + done.Add(1) + go func(namespace string) { + defer done.Done() + errutils.AggregateErrs(ctx, errs, mockCustomSpecHashTypeErrs, namespace+"-new-namespace-mcshts") + }(namespace) + err = c.pod.RegisterNamespace(namespace) + if err != nil { + errs <- errors.Wrapf(err, "there was an error registering the namespace to the pod") + continue + } + /* Setup namespaced watch for Pod for new namespace */ + { + pods, err := c.pod.List(namespace, clients.ListOpts{Ctx: opts.Ctx, Selector: opts.Selector}) + if err != nil { + errs <- errors.Wrapf(err, "initial new namespace Pod list in namespace watch") + continue + } + podsByNamespace.Store(namespace, pods) + } + podNamespacesChan, podErrs, err := c.pod.Watch(namespace, clients.WatchOpts{Ctx: opts.Ctx, Selector: opts.Selector}) + if err != nil { + errs <- errors.Wrapf(err, "starting new namespace Pod watch") + continue + } + + done.Add(1) + go func(namespace string) { + defer done.Done() + errutils.AggregateErrs(ctx, errs, podErrs, namespace+"-new-namespace-pods") + }(namespace) + /* Watch for changes and update snapshot */ + go func(namespace string) { + defer func() { + c.namespacesWatching.Delete(namespace) + }() + c.namespacesWatching.Store(namespace, true) + for { + select { + case <-ctx.Done(): + return + case simpleMockResourceList, ok := <-simpleMockResourceNamespacesChan: + if !ok { + return + } + select { + case <-ctx.Done(): + return + case simpleMockResourceChan <- simpleMockResourceListWithNamespace{list: simpleMockResourceList, namespace: namespace}: + } + case mockResourceList, ok := <-mockResourceNamespacesChan: + if !ok { + return + } + select { + case <-ctx.Done(): + return + case mockResourceChan <- mockResourceListWithNamespace{list: mockResourceList, namespace: namespace}: + } + case fakeResourceList, ok := <-fakeResourceNamespacesChan: + if !ok { + return + } + select { + case <-ctx.Done(): + return + case fakeResourceChan <- fakeResourceListWithNamespace{list: fakeResourceList, namespace: namespace}: + } + case anotherMockResourceList, ok := <-anotherMockResourceNamespacesChan: + if !ok { + return + } + select { + case <-ctx.Done(): + return + case anotherMockResourceChan <- anotherMockResourceListWithNamespace{list: anotherMockResourceList, namespace: namespace}: + } + case mockCustomTypeList, ok := <-mockCustomTypeNamespacesChan: + if !ok { + return + } + select { + case <-ctx.Done(): + return + case mockCustomTypeChan <- mockCustomTypeListWithNamespace{list: mockCustomTypeList, namespace: namespace}: + } + case mockCustomSpecHashTypeList, ok := <-mockCustomSpecHashTypeNamespacesChan: + if !ok { + return + } + select { + case <-ctx.Done(): + return + case mockCustomSpecHashTypeChan <- mockCustomSpecHashTypeListWithNamespace{list: mockCustomSpecHashTypeList, namespace: namespace}: + } + case podList, ok := <-podNamespacesChan: + if !ok { + return + } + select { + case <-ctx.Done(): + return + case podChan <- podListWithNamespace{list: podList, namespace: namespace}: + } + } + } + }(namespace) } - select { - case <-ctx.Done(): - return - case podChan <- podListWithNamespace{list: podList, namespace: namespace}: + if len(newNamespaces) > 0 { + contextutils.LoggerFrom(ctx).Infof("registered the new namespace %v", newNamespaces) } + c.updateNamespaces.Unlock() } } - }(namespace) + }() } /* Initialize snapshot for Simplemocks */ currentSnapshot.Simplemocks = initialSimpleMockResourceList.Sort() @@ -487,7 +1111,11 @@ func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchO if err != nil { return nil, nil, errors.Wrapf(err, "initial ClusterResource list") } - clusterResourceChan, clusterResourceErrs, err := c.clusterResource.Watch(opts) + // for Cluster scoped resources, we do not use Expression Selectors + clusterResourceChan, clusterResourceErrs, err := c.clusterResource.Watch(clients.WatchOpts{ + Ctx: opts.Ctx, + Selector: opts.Selector, + }) if err != nil { return nil, nil, errors.Wrapf(err, "starting ClusterResource watch") } @@ -568,11 +1196,13 @@ func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchO ) // merge lists by namespace - simplemocksByNamespace[namespace] = simpleMockResourceNamespacedList.list + simplemocksByNamespace.Store(namespace, simpleMockResourceNamespacedList.list) var simpleMockResourceList SimpleMockResourceList - for _, simplemocks := range simplemocksByNamespace { - simpleMockResourceList = append(simpleMockResourceList, simplemocks...) - } + simplemocksByNamespace.Range(func(key interface{}, value interface{}) bool { + mocks := value.(SimpleMockResourceList) + simpleMockResourceList = append(simpleMockResourceList, mocks...) + return true + }) currentSnapshot.Simplemocks = simpleMockResourceList.Sort() case mockResourceNamespacedList, ok := <-mockResourceChan: if !ok { @@ -590,11 +1220,13 @@ func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchO ) // merge lists by namespace - mocksByNamespace[namespace] = mockResourceNamespacedList.list + mocksByNamespace.Store(namespace, mockResourceNamespacedList.list) var mockResourceList MockResourceList - for _, mocks := range mocksByNamespace { + mocksByNamespace.Range(func(key interface{}, value interface{}) bool { + mocks := value.(MockResourceList) mockResourceList = append(mockResourceList, mocks...) - } + return true + }) currentSnapshot.Mocks = mockResourceList.Sort() case fakeResourceNamespacedList, ok := <-fakeResourceChan: if !ok { @@ -612,11 +1244,13 @@ func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchO ) // merge lists by namespace - fakesByNamespace[namespace] = fakeResourceNamespacedList.list + fakesByNamespace.Store(namespace, fakeResourceNamespacedList.list) var fakeResourceList FakeResourceList - for _, fakes := range fakesByNamespace { - fakeResourceList = append(fakeResourceList, fakes...) - } + fakesByNamespace.Range(func(key interface{}, value interface{}) bool { + mocks := value.(FakeResourceList) + fakeResourceList = append(fakeResourceList, mocks...) + return true + }) currentSnapshot.Fakes = fakeResourceList.Sort() case anotherMockResourceNamespacedList, ok := <-anotherMockResourceChan: if !ok { @@ -634,11 +1268,13 @@ func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchO ) // merge lists by namespace - anothermockresourcesByNamespace[namespace] = anotherMockResourceNamespacedList.list + anothermockresourcesByNamespace.Store(namespace, anotherMockResourceNamespacedList.list) var anotherMockResourceList AnotherMockResourceList - for _, anothermockresources := range anothermockresourcesByNamespace { - anotherMockResourceList = append(anotherMockResourceList, anothermockresources...) - } + anothermockresourcesByNamespace.Range(func(key interface{}, value interface{}) bool { + mocks := value.(AnotherMockResourceList) + anotherMockResourceList = append(anotherMockResourceList, mocks...) + return true + }) currentSnapshot.Anothermockresources = anotherMockResourceList.Sort() case clusterResourceList, ok := <-clusterResourceChan: if !ok { @@ -670,11 +1306,13 @@ func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchO ) // merge lists by namespace - mctsByNamespace[namespace] = mockCustomTypeNamespacedList.list + mctsByNamespace.Store(namespace, mockCustomTypeNamespacedList.list) var mockCustomTypeList MockCustomTypeList - for _, mcts := range mctsByNamespace { - mockCustomTypeList = append(mockCustomTypeList, mcts...) - } + mctsByNamespace.Range(func(key interface{}, value interface{}) bool { + mocks := value.(MockCustomTypeList) + mockCustomTypeList = append(mockCustomTypeList, mocks...) + return true + }) currentSnapshot.Mcts = mockCustomTypeList.Sort() case mockCustomSpecHashTypeNamespacedList, ok := <-mockCustomSpecHashTypeChan: if !ok { @@ -692,11 +1330,13 @@ func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchO ) // merge lists by namespace - mcshtsByNamespace[namespace] = mockCustomSpecHashTypeNamespacedList.list + mcshtsByNamespace.Store(namespace, mockCustomSpecHashTypeNamespacedList.list) var mockCustomSpecHashTypeList MockCustomSpecHashTypeList - for _, mcshts := range mcshtsByNamespace { - mockCustomSpecHashTypeList = append(mockCustomSpecHashTypeList, mcshts...) - } + mcshtsByNamespace.Range(func(key interface{}, value interface{}) bool { + mocks := value.(MockCustomSpecHashTypeList) + mockCustomSpecHashTypeList = append(mockCustomSpecHashTypeList, mocks...) + return true + }) currentSnapshot.Mcshts = mockCustomSpecHashTypeList.Sort() case podNamespacedList, ok := <-podChan: if !ok { @@ -714,11 +1354,13 @@ func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchO ) // merge lists by namespace - podsByNamespace[namespace] = podNamespacedList.list + podsByNamespace.Store(namespace, podNamespacedList.list) var podList github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList - for _, pods := range podsByNamespace { - podList = append(podList, pods...) - } + podsByNamespace.Range(func(key interface{}, value interface{}) bool { + mocks := value.(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList) + podList = append(podList, mocks...) + return true + }) currentSnapshot.Pods = podList.Sort() } } diff --git a/test/mocks/v1/testing_snapshot_emitter_test.go b/test/mocks/v1/testing_snapshot_emitter_test.go index 7f133e7a9..0b77bf73e 100644 --- a/test/mocks/v1/testing_snapshot_emitter_test.go +++ b/test/mocks/v1/testing_snapshot_emitter_test.go @@ -1,3 +1,5 @@ +// Code generated by solo-kit. DO NOT EDIT. + //go:build solokit // +build solokit @@ -5,6 +7,7 @@ package v1 import ( "context" + "fmt" "os" "time" @@ -15,12 +18,19 @@ import ( . "github.com/onsi/gomega" "github.com/solo-io/go-utils/log" "github.com/solo-io/k8s-utils/kubeutils" + "github.com/solo-io/solo-kit/pkg/api/external/kubernetes/namespace" "github.com/solo-io/solo-kit/pkg/api/v1/clients" "github.com/solo-io/solo-kit/pkg/api/v1/clients/factory" kuberc "github.com/solo-io/solo-kit/pkg/api/v1/clients/kube" + "github.com/solo-io/solo-kit/pkg/api/v1/clients/kube/cache" "github.com/solo-io/solo-kit/pkg/api/v1/clients/memory" + "github.com/solo-io/solo-kit/pkg/api/v1/resources" + "github.com/solo-io/solo-kit/pkg/api/v1/resources/core" "github.com/solo-io/solo-kit/test/helpers" + corev1 "k8s.io/api/core/v1" apiext "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" @@ -36,144 +46,265 @@ var _ = Describe("V1Emitter", func() { log.Printf("This test creates kubernetes resources and is disabled by default. To enable, set RUN_KUBE_TESTS=1 in your env.") return } + + type metadataGetter interface { + GetMetadata() *core.Metadata + } + var ( - ctx context.Context - namespace1 string - namespace2 string - name1, name2 = "angela" + helpers.RandString(3), "bob" + helpers.RandString(3) - cfg *rest.Config - clientset *apiext.Clientset - kube kubernetes.Interface - emitter TestingEmitter - simpleMockResourceClient SimpleMockResourceClient - mockResourceClient MockResourceClient - fakeResourceClient FakeResourceClient - anotherMockResourceClient AnotherMockResourceClient - clusterResourceClient ClusterResourceClient - mockCustomTypeClient MockCustomTypeClient - podClient github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodClient + ctx context.Context + namespace1, namespace2 string + namespace3, namespace4 string + namespace5, namespace6 string + name1, name2 = "angela" + helpers.RandString(3), "bob" + helpers.RandString(3) + name3, name4 = "susan" + helpers.RandString(3), "jim" + helpers.RandString(3) + name5 = "melisa" + helpers.RandString(3) + labels1 = map[string]string{"env": "test"} + labelExpression1 = "env in (test)" + cfg *rest.Config + clientset *apiext.Clientset + kube kubernetes.Interface + emitter TestingEmitter + simpleMockResourceClient SimpleMockResourceClient + mockResourceClient MockResourceClient + fakeResourceClient FakeResourceClient + anotherMockResourceClient AnotherMockResourceClient + clusterResourceClient ClusterResourceClient + mockCustomTypeClient MockCustomTypeClient + mockCustomSpecHashTypeClient MockCustomSpecHashTypeClient + podClient github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodClient + resourceNamespaceLister resources.ResourceNamespaceLister + kubeCache cache.KubeCoreCache ) + const ( + TIME_BETWEEN_MESSAGES = 5 + ) + NewSimpleMockResourceWithLabels := func(namespace, name string, labels map[string]string) *SimpleMockResource { + resource := NewSimpleMockResource(namespace, name) + resource.GetMetadata().Labels = labels + return resource + } + NewMockResourceWithLabels := func(namespace, name string, labels map[string]string) *MockResource { + resource := NewMockResource(namespace, name) + resource.GetMetadata().Labels = labels + return resource + } + NewFakeResourceWithLabels := func(namespace, name string, labels map[string]string) *FakeResource { + resource := NewFakeResource(namespace, name) + resource.GetMetadata().Labels = labels + return resource + } + NewAnotherMockResourceWithLabels := func(namespace, name string, labels map[string]string) *AnotherMockResource { + resource := NewAnotherMockResource(namespace, name) + resource.GetMetadata().Labels = labels + return resource + } + NewClusterResourceWithLabels := func(namespace, name string, labels map[string]string) *ClusterResource { + resource := NewClusterResource(namespace, name) + resource.GetMetadata().Labels = labels + return resource + } + NewMockCustomTypeWithLabels := func(namespace, name string, labels map[string]string) *MockCustomType { + resource := NewMockCustomType(namespace, name) + resource.GetMetadata().Labels = labels + return resource + } + NewMockCustomSpecHashTypeWithLabels := func(namespace, name string, labels map[string]string) *MockCustomSpecHashType { + resource := NewMockCustomSpecHashType(namespace, name) + resource.GetMetadata().Labels = labels + return resource + } + NewPodWithLabels := func(namespace, name string, labels map[string]string) *github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.Pod { + resource := github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace, name) + resource.GetMetadata().Labels = labels + return resource + } - BeforeEach(func() { - err := os.Setenv(statusutils.PodNamespaceEnvName, "default") + createNamespaces := func(ctx context.Context, kube kubernetes.Interface, namespaces ...string) { + err := kubeutils.CreateNamespacesInParallel(ctx, kube, namespaces...) Expect(err).NotTo(HaveOccurred()) + } - ctx = context.Background() - namespace1 = helpers.RandString(8) - namespace2 = helpers.RandString(8) - kube = helpers.MustKubeClient() - err = kubeutils.CreateNamespacesInParallel(ctx, kube, namespace1, namespace2) - Expect(err).NotTo(HaveOccurred()) - cfg, err = kubeutils.GetConfig("", "") - Expect(err).NotTo(HaveOccurred()) + createNamespaceWithLabel := func(ctx context.Context, kube kubernetes.Interface, namespace string, labels map[string]string) { + _, err := kube.CoreV1().Namespaces().Create(ctx, &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + Labels: labels, + }, + }, metav1.CreateOptions{}) + Expect(err).ToNot(HaveOccurred()) + } - clientset, err = apiext.NewForConfig(cfg) + deleteNamespaces := func(ctx context.Context, kube kubernetes.Interface, namespaces ...string) { + err := kubeutils.DeleteNamespacesInParallelBlocking(ctx, kube, namespaces...) Expect(err).NotTo(HaveOccurred()) - // SimpleMockResource Constructor - simpleMockResourceClientFactory := &factory.MemoryResourceClientFactory{ - Cache: memory.NewInMemoryResourceCache(), - } + } - simpleMockResourceClient, err = NewSimpleMockResourceClient(ctx, simpleMockResourceClientFactory) - Expect(err).NotTo(HaveOccurred()) - // MockResource Constructor - mockResourceClientFactory := &factory.KubeResourceClientFactory{ - Crd: MockResourceCrd, - Cfg: cfg, - SharedCache: kuberc.NewKubeCache(context.TODO()), - } + // getNewNamespaces is used to generate new namespace names, so that we do not have to wait + // when deleting namespaces in runNamespacedSelectorsWithWatchNamespaces. Since + // runNamespacedSelectorsWithWatchNamespaces uses watchNamespaces set to namespace1 and + // namespace2, this will work. Because the emitter willl only be watching namespaces that are + // labeled. + getNewNamespaces := func() { + namespace3 = helpers.RandString(8) + namespace4 = helpers.RandString(8) + namespace5 = helpers.RandString(8) + namespace6 = helpers.RandString(8) + } - err = helpers.AddAndRegisterCrd(ctx, MockResourceCrd, clientset) - Expect(err).NotTo(HaveOccurred()) + // getNewNamespaces1and2 is used to generate new namespaces for namespace 1 and 2. + // used for the same reason as getNewNamespaces() above + getNewNamespaces1and2 := func() { + namespace1 = helpers.RandString(8) + namespace2 = helpers.RandString(8) + } - mockResourceClient, err = NewMockResourceClient(ctx, mockResourceClientFactory) - Expect(err).NotTo(HaveOccurred()) - // FakeResource Constructor - fakeResourceClientFactory := &factory.MemoryResourceClientFactory{ - Cache: memory.NewInMemoryResourceCache(), + getMapOfNamespaceResources := func(getList func(string) ([]metadataGetter, error)) map[string][]string { + namespaces := []string{namespace1, namespace2, namespace3, namespace4, namespace5, namespace6} + namespaceResources := make(map[string][]string, len(namespaces)) + for _, ns := range namespaces { + list, _ := getList(ns) + for _, snap := range list { + snapMeta := snap.GetMetadata() + if _, hit := namespaceResources[snapMeta.Namespace]; hit { + namespaceResources[snap.GetMetadata().Namespace] = make([]string, 1) + } + namespaceResources[snapMeta.Namespace] = append(namespaceResources[snapMeta.Namespace], snapMeta.Name) + } } + return namespaceResources + } - fakeResourceClient, err = NewFakeResourceClient(ctx, fakeResourceClientFactory) - Expect(err).NotTo(HaveOccurred()) - // AnotherMockResource Constructor - anotherMockResourceClientFactory := &factory.KubeResourceClientFactory{ - Crd: AnotherMockResourceCrd, - Cfg: cfg, - SharedCache: kuberc.NewKubeCache(context.TODO()), + findNonMatchingResources := func(matchList, findList []metadataGetter) map[string][]string { + nonMatching := make(map[string][]string) + for _, snap := range matchList { + snapMeta := snap.GetMetadata() + matched := false + for _, pre := range findList { + preMeta := pre.GetMetadata() + if preMeta.Namespace == snapMeta.Namespace && preMeta.Name == snapMeta.Name { + matched = true + break + } + } + if !matched { + if _, hit := nonMatching[snapMeta.Namespace]; hit { + nonMatching[snap.GetMetadata().Namespace] = make([]string, 1) + } + nonMatching[snapMeta.Namespace] = append(nonMatching[snapMeta.Namespace], snapMeta.Name) + } } + return nonMatching + } - err = helpers.AddAndRegisterCrd(ctx, AnotherMockResourceCrd, clientset) - Expect(err).NotTo(HaveOccurred()) - - anotherMockResourceClient, err = NewAnotherMockResourceClient(ctx, anotherMockResourceClientFactory) - Expect(err).NotTo(HaveOccurred()) - // ClusterResource Constructor - clusterResourceClientFactory := &factory.KubeResourceClientFactory{ - Crd: ClusterResourceCrd, - Cfg: cfg, - SharedCache: kuberc.NewKubeCache(context.TODO()), + findMatchingResources := func(matchList, findList []metadataGetter) map[string][]string { + matching := make(map[string][]string) + for _, snap := range matchList { + snapMeta := snap.GetMetadata() + matched := false + for _, pre := range findList { + preMeta := pre.GetMetadata() + if preMeta.Namespace == snapMeta.Namespace && preMeta.Name == snapMeta.Name { + matched = true + break + } + } + if matched { + if _, hit := matching[snapMeta.Namespace]; hit { + matching[snap.GetMetadata().Namespace] = make([]string, 1) + } + matching[snapMeta.Namespace] = append(matching[snapMeta.Namespace], snapMeta.Name) + } } + return matching + } - err = helpers.AddAndRegisterCrd(ctx, ClusterResourceCrd, clientset) - Expect(err).NotTo(HaveOccurred()) - - clusterResourceClient, err = NewClusterResourceClient(ctx, clusterResourceClientFactory) - Expect(err).NotTo(HaveOccurred()) - // MockCustomType Constructor - mockCustomTypeClientFactory := &factory.MemoryResourceClientFactory{ - Cache: memory.NewInMemoryResourceCache(), + getMapOfResources := func(listOfResources []metadataGetter) map[string][]string { + resources := make(map[string][]string) + for _, snap := range listOfResources { + snapMeta := snap.GetMetadata() + if _, hit := resources[snapMeta.Namespace]; hit { + resources[snap.GetMetadata().Namespace] = make([]string, 1) + } + resources[snapMeta.Namespace] = append(resources[snapMeta.Namespace], snapMeta.Name) } - - mockCustomTypeClient, err = NewMockCustomTypeClient(ctx, mockCustomTypeClientFactory) - Expect(err).NotTo(HaveOccurred()) - - mockCustomSpecHashTypeClientFactory := &factory.MemoryResourceClientFactory{ - Cache: memory.NewInMemoryResourceCache(), + return resources + } + convertSimplemocksToMetadataGetter := func(rl SimpleMockResourceList) []metadataGetter { + listConv := make([]metadataGetter, len(rl)) + for i, r := range rl { + listConv[i] = r } - mockCustomSpecHashTypeClient, err := NewMockCustomSpecHashTypeClient(ctx, mockCustomSpecHashTypeClientFactory) - Expect(err).NotTo(HaveOccurred()) - - // Pod Constructor - podClientFactory := &factory.MemoryResourceClientFactory{ - Cache: memory.NewInMemoryResourceCache(), + return listConv + } + convertMocksToMetadataGetter := func(rl MockResourceList) []metadataGetter { + listConv := make([]metadataGetter, len(rl)) + for i, r := range rl { + listConv[i] = r } + return listConv + } + convertFakesToMetadataGetter := func(rl FakeResourceList) []metadataGetter { + listConv := make([]metadataGetter, len(rl)) + for i, r := range rl { + listConv[i] = r + } + return listConv + } + convertAnothermockresourcesToMetadataGetter := func(rl AnotherMockResourceList) []metadataGetter { + listConv := make([]metadataGetter, len(rl)) + for i, r := range rl { + listConv[i] = r + } + return listConv + } + convertmctsToMetadataGetter := func(rl MockCustomTypeList) []metadataGetter { + listConv := make([]metadataGetter, len(rl)) + for i, r := range rl { + listConv[i] = r + } + return listConv + } + convertmcshtsToMetadataGetter := func(rl MockCustomSpecHashTypeList) []metadataGetter { + listConv := make([]metadataGetter, len(rl)) + for i, r := range rl { + listConv[i] = r + } + return listConv + } + convertpodsToMetadataGetter := func(rl github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList) []metadataGetter { + listConv := make([]metadataGetter, len(rl)) + for i, r := range rl { + listConv[i] = r + } + return listConv + } - podClient, err = github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPodClient(ctx, podClientFactory) - Expect(err).NotTo(HaveOccurred()) - emitter = NewTestingEmitter(simpleMockResourceClient, mockResourceClient, fakeResourceClient, anotherMockResourceClient, clusterResourceClient, mockCustomTypeClient, mockCustomSpecHashTypeClient, podClient) - }) - AfterEach(func() { - err := os.Unsetenv(statusutils.PodNamespaceEnvName) - Expect(err).NotTo(HaveOccurred()) - - err = kubeutils.DeleteNamespacesInParallelBlocking(ctx, kube, namespace1, namespace2) - Expect(err).NotTo(HaveOccurred()) - clusterResourceClient.Delete(name1, clients.DeleteOpts{}) - clusterResourceClient.Delete(name2, clients.DeleteOpts{}) - }) - - It("tracks snapshots on changes to any resource", func() { + runNamespacedSelectorsWithWatchNamespaces := func() { ctx := context.Background() err := emitter.Register() Expect(err).NotTo(HaveOccurred()) + // There is an error here in the code. snapshots, errs, err := emitter.Snapshots([]string{namespace1, namespace2}, clients.WatchOpts{ - Ctx: ctx, - RefreshRate: time.Second, + Ctx: ctx, + RefreshRate: time.Second, + ExpressionSelector: labelExpression1, }) Expect(err).NotTo(HaveOccurred()) var snap *TestingSnapshot + var previous *TestingSnapshot /* SimpleMockResource */ - assertSnapshotSimplemocks := func(expectSimplemocks SimpleMockResourceList, unexpectSimplemocks SimpleMockResourceList) { drain: for { select { case snap = <-snapshots: + previous = snap for _, expected := range expectSimplemocks { if _, err := snap.Simplemocks.Find(expected.GetMetadata().Ref().Strings()); err != nil { continue drain @@ -188,49 +319,100 @@ var _ = Describe("V1Emitter", func() { case err := <-errs: Expect(err).NotTo(HaveOccurred()) case <-time.After(time.Second * 10): - nsList1, _ := simpleMockResourceClient.List(namespace1, clients.ListOpts{}) - nsList2, _ := simpleMockResourceClient.List(namespace2, clients.ListOpts{}) - combined := append(nsList1, nsList2...) - Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + var expectedResources map[string][]string + var unexpectedResource map[string][]string + + if previous != nil { + expectedResources = findNonMatchingResources(convertSimplemocksToMetadataGetter(expectSimplemocks), convertSimplemocksToMetadataGetter(previous.Simplemocks)) + unexpectedResource = findMatchingResources(convertSimplemocksToMetadataGetter(unexpectSimplemocks), convertSimplemocksToMetadataGetter(previous.Simplemocks)) + } else { + expectedResources = getMapOfResources(convertSimplemocksToMetadataGetter(expectSimplemocks)) + unexpectedResource = getMapOfResources(convertSimplemocksToMetadataGetter(unexpectSimplemocks)) + } + getList := func(ns string) ([]metadataGetter, error) { + l, err := simpleMockResourceClient.List(ns, clients.ListOpts{}) + return convertSimplemocksToMetadataGetter(l), err + } + namespaceResources := getMapOfNamespaceResources(getList) + Fail(fmt.Sprintf("expected final snapshot before 10 seconds. expected \nExpected:\n%#v\n\nUnexpected:\n%#v\n\nnamespaces:\n%#v", expectedResources, unexpectedResource, namespaceResources)) } } } + simpleMockResource1a, err := simpleMockResourceClient.Write(NewSimpleMockResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) simpleMockResource1b, err := simpleMockResourceClient.Write(NewSimpleMockResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + simpleMockResourceWatched := SimpleMockResourceList{simpleMockResource1a, simpleMockResource1b} + assertSnapshotSimplemocks(simpleMockResourceWatched, nil) - assertSnapshotSimplemocks(SimpleMockResourceList{simpleMockResource1a, simpleMockResource1b}, nil) - simpleMockResource2a, err := simpleMockResourceClient.Write(NewSimpleMockResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + simpleMockResource3a, err := simpleMockResourceClient.Write(NewSimpleMockResourceWithLabels(namespace1, name3, labels1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - simpleMockResource2b, err := simpleMockResourceClient.Write(NewSimpleMockResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + simpleMockResource3b, err := simpleMockResourceClient.Write(NewSimpleMockResourceWithLabels(namespace2, name3, labels1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + simpleMockResourceWatched = append(simpleMockResourceWatched, SimpleMockResourceList{simpleMockResource3a, simpleMockResource3b}...) + assertSnapshotSimplemocks(simpleMockResourceWatched, nil) + + createNamespaceWithLabel(ctx, kube, namespace3, labels1) + createNamespaces(ctx, kube, namespace4) - assertSnapshotSimplemocks(SimpleMockResourceList{simpleMockResource1a, simpleMockResource1b, simpleMockResource2a, simpleMockResource2b}, nil) + simpleMockResource4a, err := simpleMockResourceClient.Write(NewSimpleMockResource(namespace3, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + simpleMockResource4b, err := simpleMockResourceClient.Write(NewSimpleMockResource(namespace4, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + simpleMockResourceWatched = append(simpleMockResourceWatched, simpleMockResource4a) + simpleMockResourceNotWatched := SimpleMockResourceList{simpleMockResource4b} + assertSnapshotSimplemocks(simpleMockResourceWatched, simpleMockResourceNotWatched) - err = simpleMockResourceClient.Delete(simpleMockResource2a.GetMetadata().Namespace, simpleMockResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + simpleMockResource5a, err := simpleMockResourceClient.Write(NewSimpleMockResourceWithLabels(namespace3, name2, labels1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - err = simpleMockResourceClient.Delete(simpleMockResource2b.GetMetadata().Namespace, simpleMockResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + simpleMockResource5b, err := simpleMockResourceClient.Write(NewSimpleMockResourceWithLabels(namespace4, name2, labels1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + simpleMockResourceWatched = append(simpleMockResourceWatched, simpleMockResource5a) + simpleMockResourceNotWatched = append(simpleMockResourceNotWatched, simpleMockResource5b) + assertSnapshotSimplemocks(simpleMockResourceWatched, simpleMockResourceNotWatched) - assertSnapshotSimplemocks(SimpleMockResourceList{simpleMockResource1a, simpleMockResource1b}, SimpleMockResourceList{simpleMockResource2a, simpleMockResource2b}) + for _, r := range simpleMockResourceNotWatched { + err = simpleMockResourceClient.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + } err = simpleMockResourceClient.Delete(simpleMockResource1a.GetMetadata().Namespace, simpleMockResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = simpleMockResourceClient.Delete(simpleMockResource1b.GetMetadata().Namespace, simpleMockResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + simpleMockResourceNotWatched = append(simpleMockResourceNotWatched, SimpleMockResourceList{simpleMockResource1a, simpleMockResource1b}...) + simpleMockResourceWatched = SimpleMockResourceList{simpleMockResource3a, simpleMockResource3b, simpleMockResource4a, simpleMockResource5a} + assertSnapshotSimplemocks(simpleMockResourceWatched, simpleMockResourceNotWatched) + + err = simpleMockResourceClient.Delete(simpleMockResource3a.GetMetadata().Namespace, simpleMockResource3a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = simpleMockResourceClient.Delete(simpleMockResource3b.GetMetadata().Namespace, simpleMockResource3b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + simpleMockResourceNotWatched = append(simpleMockResourceNotWatched, SimpleMockResourceList{simpleMockResource3a, simpleMockResource3b}...) + simpleMockResourceWatched = SimpleMockResourceList{simpleMockResource4a, simpleMockResource5a} + assertSnapshotSimplemocks(simpleMockResourceWatched, simpleMockResourceNotWatched) - assertSnapshotSimplemocks(nil, SimpleMockResourceList{simpleMockResource1a, simpleMockResource1b, simpleMockResource2a, simpleMockResource2b}) + err = simpleMockResourceClient.Delete(simpleMockResource4a.GetMetadata().Namespace, simpleMockResource4a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = simpleMockResourceClient.Delete(simpleMockResource5a.GetMetadata().Namespace, simpleMockResource5a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + simpleMockResourceNotWatched = append(simpleMockResourceNotWatched, SimpleMockResourceList{simpleMockResource5a, simpleMockResource5b}...) + assertSnapshotSimplemocks(nil, simpleMockResourceNotWatched) + + // clean up environment + deleteNamespaces(ctx, kube, namespace3, namespace4) + getNewNamespaces() /* MockResource */ - assertSnapshotMocks := func(expectMocks MockResourceList, unexpectMocks MockResourceList) { drain: for { select { case snap = <-snapshots: + previous = snap for _, expected := range expectMocks { if _, err := snap.Mocks.Find(expected.GetMetadata().Ref().Strings()); err != nil { continue drain @@ -245,49 +427,100 @@ var _ = Describe("V1Emitter", func() { case err := <-errs: Expect(err).NotTo(HaveOccurred()) case <-time.After(time.Second * 10): - nsList1, _ := mockResourceClient.List(namespace1, clients.ListOpts{}) - nsList2, _ := mockResourceClient.List(namespace2, clients.ListOpts{}) - combined := append(nsList1, nsList2...) - Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + var expectedResources map[string][]string + var unexpectedResource map[string][]string + + if previous != nil { + expectedResources = findNonMatchingResources(convertMocksToMetadataGetter(expectMocks), convertMocksToMetadataGetter(previous.Mocks)) + unexpectedResource = findMatchingResources(convertMocksToMetadataGetter(unexpectMocks), convertMocksToMetadataGetter(previous.Mocks)) + } else { + expectedResources = getMapOfResources(convertMocksToMetadataGetter(expectMocks)) + unexpectedResource = getMapOfResources(convertMocksToMetadataGetter(unexpectMocks)) + } + getList := func(ns string) ([]metadataGetter, error) { + l, err := mockResourceClient.List(ns, clients.ListOpts{}) + return convertMocksToMetadataGetter(l), err + } + namespaceResources := getMapOfNamespaceResources(getList) + Fail(fmt.Sprintf("expected final snapshot before 10 seconds. expected \nExpected:\n%#v\n\nUnexpected:\n%#v\n\nnamespaces:\n%#v", expectedResources, unexpectedResource, namespaceResources)) } } } + mockResource1a, err := mockResourceClient.Write(NewMockResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) mockResource1b, err := mockResourceClient.Write(NewMockResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + mockResourceWatched := MockResourceList{mockResource1a, mockResource1b} + assertSnapshotMocks(mockResourceWatched, nil) - assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b}, nil) - mockResource2a, err := mockResourceClient.Write(NewMockResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + mockResource3a, err := mockResourceClient.Write(NewMockResourceWithLabels(namespace1, name3, labels1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - mockResource2b, err := mockResourceClient.Write(NewMockResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + mockResource3b, err := mockResourceClient.Write(NewMockResourceWithLabels(namespace2, name3, labels1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + mockResourceWatched = append(mockResourceWatched, MockResourceList{mockResource3a, mockResource3b}...) + assertSnapshotMocks(mockResourceWatched, nil) - assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}, nil) + createNamespaceWithLabel(ctx, kube, namespace3, labels1) + createNamespaces(ctx, kube, namespace4) - err = mockResourceClient.Delete(mockResource2a.GetMetadata().Namespace, mockResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + mockResource4a, err := mockResourceClient.Write(NewMockResource(namespace3, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - err = mockResourceClient.Delete(mockResource2b.GetMetadata().Namespace, mockResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + mockResource4b, err := mockResourceClient.Write(NewMockResource(namespace4, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + mockResourceWatched = append(mockResourceWatched, mockResource4a) + mockResourceNotWatched := MockResourceList{mockResource4b} + assertSnapshotMocks(mockResourceWatched, mockResourceNotWatched) - assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b}, MockResourceList{mockResource2a, mockResource2b}) + mockResource5a, err := mockResourceClient.Write(NewMockResourceWithLabels(namespace3, name2, labels1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResource5b, err := mockResourceClient.Write(NewMockResourceWithLabels(namespace4, name2, labels1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResourceWatched = append(mockResourceWatched, mockResource5a) + mockResourceNotWatched = append(mockResourceNotWatched, mockResource5b) + assertSnapshotMocks(mockResourceWatched, mockResourceNotWatched) + + for _, r := range mockResourceNotWatched { + err = mockResourceClient.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + } err = mockResourceClient.Delete(mockResource1a.GetMetadata().Namespace, mockResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = mockResourceClient.Delete(mockResource1b.GetMetadata().Namespace, mockResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + mockResourceNotWatched = append(mockResourceNotWatched, MockResourceList{mockResource1a, mockResource1b}...) + mockResourceWatched = MockResourceList{mockResource3a, mockResource3b, mockResource4a, mockResource5a} + assertSnapshotMocks(mockResourceWatched, mockResourceNotWatched) + + err = mockResourceClient.Delete(mockResource3a.GetMetadata().Namespace, mockResource3a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = mockResourceClient.Delete(mockResource3b.GetMetadata().Namespace, mockResource3b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResourceNotWatched = append(mockResourceNotWatched, MockResourceList{mockResource3a, mockResource3b}...) + mockResourceWatched = MockResourceList{mockResource4a, mockResource5a} + assertSnapshotMocks(mockResourceWatched, mockResourceNotWatched) + + err = mockResourceClient.Delete(mockResource4a.GetMetadata().Namespace, mockResource4a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = mockResourceClient.Delete(mockResource5a.GetMetadata().Namespace, mockResource5a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResourceNotWatched = append(mockResourceNotWatched, MockResourceList{mockResource5a, mockResource5b}...) + assertSnapshotMocks(nil, mockResourceNotWatched) - assertSnapshotMocks(nil, MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}) + // clean up environment + deleteNamespaces(ctx, kube, namespace3, namespace4) + getNewNamespaces() /* FakeResource */ - assertSnapshotFakes := func(expectFakes FakeResourceList, unexpectFakes FakeResourceList) { drain: for { select { case snap = <-snapshots: + previous = snap for _, expected := range expectFakes { if _, err := snap.Fakes.Find(expected.GetMetadata().Ref().Strings()); err != nil { continue drain @@ -302,49 +535,100 @@ var _ = Describe("V1Emitter", func() { case err := <-errs: Expect(err).NotTo(HaveOccurred()) case <-time.After(time.Second * 10): - nsList1, _ := fakeResourceClient.List(namespace1, clients.ListOpts{}) - nsList2, _ := fakeResourceClient.List(namespace2, clients.ListOpts{}) - combined := append(nsList1, nsList2...) - Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + var expectedResources map[string][]string + var unexpectedResource map[string][]string + + if previous != nil { + expectedResources = findNonMatchingResources(convertFakesToMetadataGetter(expectFakes), convertFakesToMetadataGetter(previous.Fakes)) + unexpectedResource = findMatchingResources(convertFakesToMetadataGetter(unexpectFakes), convertFakesToMetadataGetter(previous.Fakes)) + } else { + expectedResources = getMapOfResources(convertFakesToMetadataGetter(expectFakes)) + unexpectedResource = getMapOfResources(convertFakesToMetadataGetter(unexpectFakes)) + } + getList := func(ns string) ([]metadataGetter, error) { + l, err := fakeResourceClient.List(ns, clients.ListOpts{}) + return convertFakesToMetadataGetter(l), err + } + namespaceResources := getMapOfNamespaceResources(getList) + Fail(fmt.Sprintf("expected final snapshot before 10 seconds. expected \nExpected:\n%#v\n\nUnexpected:\n%#v\n\nnamespaces:\n%#v", expectedResources, unexpectedResource, namespaceResources)) } } } + fakeResource1a, err := fakeResourceClient.Write(NewFakeResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) fakeResource1b, err := fakeResourceClient.Write(NewFakeResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + fakeResourceWatched := FakeResourceList{fakeResource1a, fakeResource1b} + assertSnapshotFakes(fakeResourceWatched, nil) - assertSnapshotFakes(FakeResourceList{fakeResource1a, fakeResource1b}, nil) - fakeResource2a, err := fakeResourceClient.Write(NewFakeResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + fakeResource3a, err := fakeResourceClient.Write(NewFakeResourceWithLabels(namespace1, name3, labels1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - fakeResource2b, err := fakeResourceClient.Write(NewFakeResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + fakeResource3b, err := fakeResourceClient.Write(NewFakeResourceWithLabels(namespace2, name3, labels1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + fakeResourceWatched = append(fakeResourceWatched, FakeResourceList{fakeResource3a, fakeResource3b}...) + assertSnapshotFakes(fakeResourceWatched, nil) + + createNamespaceWithLabel(ctx, kube, namespace3, labels1) + createNamespaces(ctx, kube, namespace4) - assertSnapshotFakes(FakeResourceList{fakeResource1a, fakeResource1b, fakeResource2a, fakeResource2b}, nil) + fakeResource4a, err := fakeResourceClient.Write(NewFakeResource(namespace3, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + fakeResource4b, err := fakeResourceClient.Write(NewFakeResource(namespace4, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + fakeResourceWatched = append(fakeResourceWatched, fakeResource4a) + fakeResourceNotWatched := FakeResourceList{fakeResource4b} + assertSnapshotFakes(fakeResourceWatched, fakeResourceNotWatched) - err = fakeResourceClient.Delete(fakeResource2a.GetMetadata().Namespace, fakeResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + fakeResource5a, err := fakeResourceClient.Write(NewFakeResourceWithLabels(namespace3, name2, labels1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - err = fakeResourceClient.Delete(fakeResource2b.GetMetadata().Namespace, fakeResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + fakeResource5b, err := fakeResourceClient.Write(NewFakeResourceWithLabels(namespace4, name2, labels1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + fakeResourceWatched = append(fakeResourceWatched, fakeResource5a) + fakeResourceNotWatched = append(fakeResourceNotWatched, fakeResource5b) + assertSnapshotFakes(fakeResourceWatched, fakeResourceNotWatched) - assertSnapshotFakes(FakeResourceList{fakeResource1a, fakeResource1b}, FakeResourceList{fakeResource2a, fakeResource2b}) + for _, r := range fakeResourceNotWatched { + err = fakeResourceClient.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + } err = fakeResourceClient.Delete(fakeResource1a.GetMetadata().Namespace, fakeResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = fakeResourceClient.Delete(fakeResource1b.GetMetadata().Namespace, fakeResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + fakeResourceNotWatched = append(fakeResourceNotWatched, FakeResourceList{fakeResource1a, fakeResource1b}...) + fakeResourceWatched = FakeResourceList{fakeResource3a, fakeResource3b, fakeResource4a, fakeResource5a} + assertSnapshotFakes(fakeResourceWatched, fakeResourceNotWatched) + + err = fakeResourceClient.Delete(fakeResource3a.GetMetadata().Namespace, fakeResource3a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = fakeResourceClient.Delete(fakeResource3b.GetMetadata().Namespace, fakeResource3b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + fakeResourceNotWatched = append(fakeResourceNotWatched, FakeResourceList{fakeResource3a, fakeResource3b}...) + fakeResourceWatched = FakeResourceList{fakeResource4a, fakeResource5a} + assertSnapshotFakes(fakeResourceWatched, fakeResourceNotWatched) + + err = fakeResourceClient.Delete(fakeResource4a.GetMetadata().Namespace, fakeResource4a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = fakeResourceClient.Delete(fakeResource5a.GetMetadata().Namespace, fakeResource5a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + fakeResourceNotWatched = append(fakeResourceNotWatched, FakeResourceList{fakeResource5a, fakeResource5b}...) + assertSnapshotFakes(nil, fakeResourceNotWatched) - assertSnapshotFakes(nil, FakeResourceList{fakeResource1a, fakeResource1b, fakeResource2a, fakeResource2b}) + // clean up environment + deleteNamespaces(ctx, kube, namespace3, namespace4) + getNewNamespaces() /* AnotherMockResource */ - assertSnapshotAnothermockresources := func(expectAnothermockresources AnotherMockResourceList, unexpectAnothermockresources AnotherMockResourceList) { drain: for { select { case snap = <-snapshots: + previous = snap for _, expected := range expectAnothermockresources { if _, err := snap.Anothermockresources.Find(expected.GetMetadata().Ref().Strings()); err != nil { continue drain @@ -359,49 +643,100 @@ var _ = Describe("V1Emitter", func() { case err := <-errs: Expect(err).NotTo(HaveOccurred()) case <-time.After(time.Second * 10): - nsList1, _ := anotherMockResourceClient.List(namespace1, clients.ListOpts{}) - nsList2, _ := anotherMockResourceClient.List(namespace2, clients.ListOpts{}) - combined := append(nsList1, nsList2...) - Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + var expectedResources map[string][]string + var unexpectedResource map[string][]string + + if previous != nil { + expectedResources = findNonMatchingResources(convertAnothermockresourcesToMetadataGetter(expectAnothermockresources), convertAnothermockresourcesToMetadataGetter(previous.Anothermockresources)) + unexpectedResource = findMatchingResources(convertAnothermockresourcesToMetadataGetter(unexpectAnothermockresources), convertAnothermockresourcesToMetadataGetter(previous.Anothermockresources)) + } else { + expectedResources = getMapOfResources(convertAnothermockresourcesToMetadataGetter(expectAnothermockresources)) + unexpectedResource = getMapOfResources(convertAnothermockresourcesToMetadataGetter(unexpectAnothermockresources)) + } + getList := func(ns string) ([]metadataGetter, error) { + l, err := anotherMockResourceClient.List(ns, clients.ListOpts{}) + return convertAnothermockresourcesToMetadataGetter(l), err + } + namespaceResources := getMapOfNamespaceResources(getList) + Fail(fmt.Sprintf("expected final snapshot before 10 seconds. expected \nExpected:\n%#v\n\nUnexpected:\n%#v\n\nnamespaces:\n%#v", expectedResources, unexpectedResource, namespaceResources)) } } } + anotherMockResource1a, err := anotherMockResourceClient.Write(NewAnotherMockResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) anotherMockResource1b, err := anotherMockResourceClient.Write(NewAnotherMockResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + anotherMockResourceWatched := AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b} + assertSnapshotAnothermockresources(anotherMockResourceWatched, nil) - assertSnapshotAnothermockresources(AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b}, nil) - anotherMockResource2a, err := anotherMockResourceClient.Write(NewAnotherMockResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + anotherMockResource3a, err := anotherMockResourceClient.Write(NewAnotherMockResourceWithLabels(namespace1, name3, labels1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - anotherMockResource2b, err := anotherMockResourceClient.Write(NewAnotherMockResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + anotherMockResource3b, err := anotherMockResourceClient.Write(NewAnotherMockResourceWithLabels(namespace2, name3, labels1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + anotherMockResourceWatched = append(anotherMockResourceWatched, AnotherMockResourceList{anotherMockResource3a, anotherMockResource3b}...) + assertSnapshotAnothermockresources(anotherMockResourceWatched, nil) - assertSnapshotAnothermockresources(AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b, anotherMockResource2a, anotherMockResource2b}, nil) + createNamespaceWithLabel(ctx, kube, namespace3, labels1) + createNamespaces(ctx, kube, namespace4) + + anotherMockResource4a, err := anotherMockResourceClient.Write(NewAnotherMockResource(namespace3, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + anotherMockResource4b, err := anotherMockResourceClient.Write(NewAnotherMockResource(namespace4, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + anotherMockResourceWatched = append(anotherMockResourceWatched, anotherMockResource4a) + anotherMockResourceNotWatched := AnotherMockResourceList{anotherMockResource4b} + assertSnapshotAnothermockresources(anotherMockResourceWatched, anotherMockResourceNotWatched) - err = anotherMockResourceClient.Delete(anotherMockResource2a.GetMetadata().Namespace, anotherMockResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + anotherMockResource5a, err := anotherMockResourceClient.Write(NewAnotherMockResourceWithLabels(namespace3, name2, labels1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - err = anotherMockResourceClient.Delete(anotherMockResource2b.GetMetadata().Namespace, anotherMockResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + anotherMockResource5b, err := anotherMockResourceClient.Write(NewAnotherMockResourceWithLabels(namespace4, name2, labels1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + anotherMockResourceWatched = append(anotherMockResourceWatched, anotherMockResource5a) + anotherMockResourceNotWatched = append(anotherMockResourceNotWatched, anotherMockResource5b) + assertSnapshotAnothermockresources(anotherMockResourceWatched, anotherMockResourceNotWatched) - assertSnapshotAnothermockresources(AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b}, AnotherMockResourceList{anotherMockResource2a, anotherMockResource2b}) + for _, r := range anotherMockResourceNotWatched { + err = anotherMockResourceClient.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + } err = anotherMockResourceClient.Delete(anotherMockResource1a.GetMetadata().Namespace, anotherMockResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = anotherMockResourceClient.Delete(anotherMockResource1b.GetMetadata().Namespace, anotherMockResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + anotherMockResourceNotWatched = append(anotherMockResourceNotWatched, AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b}...) + anotherMockResourceWatched = AnotherMockResourceList{anotherMockResource3a, anotherMockResource3b, anotherMockResource4a, anotherMockResource5a} + assertSnapshotAnothermockresources(anotherMockResourceWatched, anotherMockResourceNotWatched) + + err = anotherMockResourceClient.Delete(anotherMockResource3a.GetMetadata().Namespace, anotherMockResource3a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = anotherMockResourceClient.Delete(anotherMockResource3b.GetMetadata().Namespace, anotherMockResource3b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + anotherMockResourceNotWatched = append(anotherMockResourceNotWatched, AnotherMockResourceList{anotherMockResource3a, anotherMockResource3b}...) + anotherMockResourceWatched = AnotherMockResourceList{anotherMockResource4a, anotherMockResource5a} + assertSnapshotAnothermockresources(anotherMockResourceWatched, anotherMockResourceNotWatched) + + err = anotherMockResourceClient.Delete(anotherMockResource4a.GetMetadata().Namespace, anotherMockResource4a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = anotherMockResourceClient.Delete(anotherMockResource5a.GetMetadata().Namespace, anotherMockResource5a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + anotherMockResourceNotWatched = append(anotherMockResourceNotWatched, AnotherMockResourceList{anotherMockResource5a, anotherMockResource5b}...) + assertSnapshotAnothermockresources(nil, anotherMockResourceNotWatched) - assertSnapshotAnothermockresources(nil, AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b, anotherMockResource2a, anotherMockResource2b}) + // clean up environment + deleteNamespaces(ctx, kube, namespace3, namespace4) + getNewNamespaces() /* ClusterResource */ - assertSnapshotClusterresources := func(expectClusterresources ClusterResourceList, unexpectClusterresources ClusterResourceList) { drain: for { select { case snap = <-snapshots: + previous = snap for _, expected := range expectClusterresources { if _, err := snap.Clusterresources.Find(expected.GetMetadata().Ref().Strings()); err != nil { continue drain @@ -421,34 +756,58 @@ var _ = Describe("V1Emitter", func() { } } } + clusterResource1a, err := clusterResourceClient.Write(NewClusterResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + clusterResourceWatched := ClusterResourceList{clusterResource1a} + assertSnapshotClusterresources(clusterResourceWatched, nil) - assertSnapshotClusterresources(ClusterResourceList{clusterResource1a}, nil) - clusterResource2a, err := clusterResourceClient.Write(NewClusterResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + clusterResource3a, err := clusterResourceClient.Write(NewClusterResourceWithLabels(namespace1, name3, labels1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + clusterResourceWatched = append(clusterResourceWatched, clusterResource3a) + assertSnapshotClusterresources(clusterResourceWatched, nil) - assertSnapshotClusterresources(ClusterResourceList{clusterResource1a, clusterResource2a}, nil) + createNamespaceWithLabel(ctx, kube, namespace3, labels1) + createNamespaces(ctx, kube, namespace4) - err = clusterResourceClient.Delete(clusterResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + clusterResource4a, err := clusterResourceClient.Write(NewClusterResource(namespace3, name4), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + clusterResourceWatched = append(clusterResourceWatched, clusterResource4a) + assertSnapshotClusterresources(clusterResourceWatched, nil) - assertSnapshotClusterresources(ClusterResourceList{clusterResource1a}, ClusterResourceList{clusterResource2a}) + clusterResource5a, err := clusterResourceClient.Write(NewClusterResourceWithLabels(namespace3, name5, labels1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + clusterResourceWatched = append(clusterResourceWatched, clusterResource5a) + assertSnapshotClusterresources(clusterResourceWatched, nil) err = clusterResourceClient.Delete(clusterResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + err = clusterResourceClient.Delete(clusterResource3a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + clusterResourceNotWatched := ClusterResourceList{clusterResource1a, clusterResource3a} + clusterResourceWatched = ClusterResourceList{clusterResource4a, clusterResource5a} + assertSnapshotClusterresources(clusterResourceWatched, clusterResourceNotWatched) - assertSnapshotClusterresources(nil, ClusterResourceList{clusterResource1a, clusterResource2a}) + err = clusterResourceClient.Delete(clusterResource4a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = clusterResourceClient.Delete(clusterResource5a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + clusterResourceNotWatched = append(clusterResourceNotWatched, ClusterResourceList{clusterResource4a, clusterResource5a}...) + assertSnapshotClusterresources(nil, clusterResourceNotWatched) + + // clean up environment + deleteNamespaces(ctx, kube, namespace3, namespace4) + getNewNamespaces() /* MockCustomType */ - assertSnapshotmcts := func(expectmcts MockCustomTypeList, unexpectmcts MockCustomTypeList) { drain: for { select { case snap = <-snapshots: + previous = snap for _, expected := range expectmcts { if _, err := snap.Mcts.Find(expected.GetMetadata().Ref().Strings()); err != nil { continue drain @@ -463,56 +822,107 @@ var _ = Describe("V1Emitter", func() { case err := <-errs: Expect(err).NotTo(HaveOccurred()) case <-time.After(time.Second * 10): - nsList1, _ := mockCustomTypeClient.List(namespace1, clients.ListOpts{}) - nsList2, _ := mockCustomTypeClient.List(namespace2, clients.ListOpts{}) - combined := append(nsList1, nsList2...) - Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + var expectedResources map[string][]string + var unexpectedResource map[string][]string + + if previous != nil { + expectedResources = findNonMatchingResources(convertmctsToMetadataGetter(expectmcts), convertmctsToMetadataGetter(previous.Mcts)) + unexpectedResource = findMatchingResources(convertmctsToMetadataGetter(unexpectmcts), convertmctsToMetadataGetter(previous.Mcts)) + } else { + expectedResources = getMapOfResources(convertmctsToMetadataGetter(expectmcts)) + unexpectedResource = getMapOfResources(convertmctsToMetadataGetter(unexpectmcts)) + } + getList := func(ns string) ([]metadataGetter, error) { + l, err := mockCustomTypeClient.List(ns, clients.ListOpts{}) + return convertmctsToMetadataGetter(l), err + } + namespaceResources := getMapOfNamespaceResources(getList) + Fail(fmt.Sprintf("expected final snapshot before 10 seconds. expected \nExpected:\n%#v\n\nUnexpected:\n%#v\n\nnamespaces:\n%#v", expectedResources, unexpectedResource, namespaceResources)) } } } + mockCustomType1a, err := mockCustomTypeClient.Write(NewMockCustomType(namespace1, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) mockCustomType1b, err := mockCustomTypeClient.Write(NewMockCustomType(namespace2, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + mockCustomTypeWatched := MockCustomTypeList{mockCustomType1a, mockCustomType1b} + assertSnapshotmcts(mockCustomTypeWatched, nil) - assertSnapshotmcts(MockCustomTypeList{mockCustomType1a, mockCustomType1b}, nil) - mockCustomType2a, err := mockCustomTypeClient.Write(NewMockCustomType(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + mockCustomType3a, err := mockCustomTypeClient.Write(NewMockCustomTypeWithLabels(namespace1, name3, labels1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - mockCustomType2b, err := mockCustomTypeClient.Write(NewMockCustomType(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + mockCustomType3b, err := mockCustomTypeClient.Write(NewMockCustomTypeWithLabels(namespace2, name3, labels1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + mockCustomTypeWatched = append(mockCustomTypeWatched, MockCustomTypeList{mockCustomType3a, mockCustomType3b}...) + assertSnapshotmcts(mockCustomTypeWatched, nil) - assertSnapshotmcts(MockCustomTypeList{mockCustomType1a, mockCustomType1b, mockCustomType2a, mockCustomType2b}, nil) + createNamespaceWithLabel(ctx, kube, namespace3, labels1) + createNamespaces(ctx, kube, namespace4) - err = mockCustomTypeClient.Delete(mockCustomType2a.GetMetadata().Namespace, mockCustomType2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + mockCustomType4a, err := mockCustomTypeClient.Write(NewMockCustomType(namespace3, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - err = mockCustomTypeClient.Delete(mockCustomType2b.GetMetadata().Namespace, mockCustomType2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + mockCustomType4b, err := mockCustomTypeClient.Write(NewMockCustomType(namespace4, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + mockCustomTypeWatched = append(mockCustomTypeWatched, mockCustomType4a) + mockCustomTypeNotWatched := MockCustomTypeList{mockCustomType4b} + assertSnapshotmcts(mockCustomTypeWatched, mockCustomTypeNotWatched) - assertSnapshotmcts(MockCustomTypeList{mockCustomType1a, mockCustomType1b}, MockCustomTypeList{mockCustomType2a, mockCustomType2b}) + mockCustomType5a, err := mockCustomTypeClient.Write(NewMockCustomTypeWithLabels(namespace3, name2, labels1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockCustomType5b, err := mockCustomTypeClient.Write(NewMockCustomTypeWithLabels(namespace4, name2, labels1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockCustomTypeWatched = append(mockCustomTypeWatched, mockCustomType5a) + mockCustomTypeNotWatched = append(mockCustomTypeNotWatched, mockCustomType5b) + assertSnapshotmcts(mockCustomTypeWatched, mockCustomTypeNotWatched) + + for _, r := range mockCustomTypeNotWatched { + err = mockCustomTypeClient.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + } err = mockCustomTypeClient.Delete(mockCustomType1a.GetMetadata().Namespace, mockCustomType1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = mockCustomTypeClient.Delete(mockCustomType1b.GetMetadata().Namespace, mockCustomType1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + mockCustomTypeNotWatched = append(mockCustomTypeNotWatched, MockCustomTypeList{mockCustomType1a, mockCustomType1b}...) + mockCustomTypeWatched = MockCustomTypeList{mockCustomType3a, mockCustomType3b, mockCustomType4a, mockCustomType5a} + assertSnapshotmcts(mockCustomTypeWatched, mockCustomTypeNotWatched) - assertSnapshotmcts(nil, MockCustomTypeList{mockCustomType1a, mockCustomType1b, mockCustomType2a, mockCustomType2b}) + err = mockCustomTypeClient.Delete(mockCustomType3a.GetMetadata().Namespace, mockCustomType3a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = mockCustomTypeClient.Delete(mockCustomType3b.GetMetadata().Namespace, mockCustomType3b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockCustomTypeNotWatched = append(mockCustomTypeNotWatched, MockCustomTypeList{mockCustomType3a, mockCustomType3b}...) + mockCustomTypeWatched = MockCustomTypeList{mockCustomType4a, mockCustomType5a} + assertSnapshotmcts(mockCustomTypeWatched, mockCustomTypeNotWatched) + + err = mockCustomTypeClient.Delete(mockCustomType4a.GetMetadata().Namespace, mockCustomType4a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = mockCustomTypeClient.Delete(mockCustomType5a.GetMetadata().Namespace, mockCustomType5a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockCustomTypeNotWatched = append(mockCustomTypeNotWatched, MockCustomTypeList{mockCustomType5a, mockCustomType5b}...) + assertSnapshotmcts(nil, mockCustomTypeNotWatched) + + // clean up environment + deleteNamespaces(ctx, kube, namespace3, namespace4) + getNewNamespaces() /* - Pod + MockCustomSpecHashType */ - - assertSnapshotpods := func(expectpods github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList, unexpectpods github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList) { + assertSnapshotmcshts := func(expectmcshts MockCustomSpecHashTypeList, unexpectmcshts MockCustomSpecHashTypeList) { drain: for { select { case snap = <-snapshots: - for _, expected := range expectpods { - if _, err := snap.Pods.Find(expected.GetMetadata().Ref().Strings()); err != nil { + previous = snap + for _, expected := range expectmcshts { + if _, err := snap.Mcshts.Find(expected.GetMetadata().Ref().Strings()); err != nil { continue drain } } - for _, unexpected := range unexpectpods { - if _, err := snap.Pods.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + for _, unexpected := range unexpectmcshts { + if _, err := snap.Mcshts.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { continue drain } } @@ -520,70 +930,107 @@ var _ = Describe("V1Emitter", func() { case err := <-errs: Expect(err).NotTo(HaveOccurred()) case <-time.After(time.Second * 10): - nsList1, _ := podClient.List(namespace1, clients.ListOpts{}) - nsList2, _ := podClient.List(namespace2, clients.ListOpts{}) - combined := append(nsList1, nsList2...) - Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + var expectedResources map[string][]string + var unexpectedResource map[string][]string + + if previous != nil { + expectedResources = findNonMatchingResources(convertmcshtsToMetadataGetter(expectmcshts), convertmcshtsToMetadataGetter(previous.Mcshts)) + unexpectedResource = findMatchingResources(convertmcshtsToMetadataGetter(unexpectmcshts), convertmcshtsToMetadataGetter(previous.Mcshts)) + } else { + expectedResources = getMapOfResources(convertmcshtsToMetadataGetter(expectmcshts)) + unexpectedResource = getMapOfResources(convertmcshtsToMetadataGetter(unexpectmcshts)) + } + getList := func(ns string) ([]metadataGetter, error) { + l, err := mockCustomSpecHashTypeClient.List(ns, clients.ListOpts{}) + return convertmcshtsToMetadataGetter(l), err + } + namespaceResources := getMapOfNamespaceResources(getList) + Fail(fmt.Sprintf("expected final snapshot before 10 seconds. expected \nExpected:\n%#v\n\nUnexpected:\n%#v\n\nnamespaces:\n%#v", expectedResources, unexpectedResource, namespaceResources)) } } } - pod1a, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + + mockCustomSpecHashType1a, err := mockCustomSpecHashTypeClient.Write(NewMockCustomSpecHashType(namespace1, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - pod1b, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + mockCustomSpecHashType1b, err := mockCustomSpecHashTypeClient.Write(NewMockCustomSpecHashType(namespace2, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + mockCustomSpecHashTypeWatched := MockCustomSpecHashTypeList{mockCustomSpecHashType1a, mockCustomSpecHashType1b} + assertSnapshotmcshts(mockCustomSpecHashTypeWatched, nil) - assertSnapshotpods(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod1a, pod1b}, nil) - pod2a, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + mockCustomSpecHashType3a, err := mockCustomSpecHashTypeClient.Write(NewMockCustomSpecHashTypeWithLabels(namespace1, name3, labels1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - pod2b, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + mockCustomSpecHashType3b, err := mockCustomSpecHashTypeClient.Write(NewMockCustomSpecHashTypeWithLabels(namespace2, name3, labels1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + mockCustomSpecHashTypeWatched = append(mockCustomSpecHashTypeWatched, MockCustomSpecHashTypeList{mockCustomSpecHashType3a, mockCustomSpecHashType3b}...) + assertSnapshotmcshts(mockCustomSpecHashTypeWatched, nil) - assertSnapshotpods(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod1a, pod1b, pod2a, pod2b}, nil) + createNamespaceWithLabel(ctx, kube, namespace3, labels1) + createNamespaces(ctx, kube, namespace4) - err = podClient.Delete(pod2a.GetMetadata().Namespace, pod2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + mockCustomSpecHashType4a, err := mockCustomSpecHashTypeClient.Write(NewMockCustomSpecHashType(namespace3, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - err = podClient.Delete(pod2b.GetMetadata().Namespace, pod2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + mockCustomSpecHashType4b, err := mockCustomSpecHashTypeClient.Write(NewMockCustomSpecHashType(namespace4, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + mockCustomSpecHashTypeWatched = append(mockCustomSpecHashTypeWatched, mockCustomSpecHashType4a) + mockCustomSpecHashTypeNotWatched := MockCustomSpecHashTypeList{mockCustomSpecHashType4b} + assertSnapshotmcshts(mockCustomSpecHashTypeWatched, mockCustomSpecHashTypeNotWatched) - assertSnapshotpods(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod1a, pod1b}, github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod2a, pod2b}) - - err = podClient.Delete(pod1a.GetMetadata().Namespace, pod1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + mockCustomSpecHashType5a, err := mockCustomSpecHashTypeClient.Write(NewMockCustomSpecHashTypeWithLabels(namespace3, name2, labels1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - err = podClient.Delete(pod1b.GetMetadata().Namespace, pod1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + mockCustomSpecHashType5b, err := mockCustomSpecHashTypeClient.Write(NewMockCustomSpecHashTypeWithLabels(namespace4, name2, labels1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + mockCustomSpecHashTypeWatched = append(mockCustomSpecHashTypeWatched, mockCustomSpecHashType5a) + mockCustomSpecHashTypeNotWatched = append(mockCustomSpecHashTypeNotWatched, mockCustomSpecHashType5b) + assertSnapshotmcshts(mockCustomSpecHashTypeWatched, mockCustomSpecHashTypeNotWatched) - assertSnapshotpods(nil, github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod1a, pod1b, pod2a, pod2b}) - }) + for _, r := range mockCustomSpecHashTypeNotWatched { + err = mockCustomSpecHashTypeClient.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + } - It("tracks snapshots on changes to any resource using AllNamespace", func() { - ctx := context.Background() - err := emitter.Register() + err = mockCustomSpecHashTypeClient.Delete(mockCustomSpecHashType1a.GetMetadata().Namespace, mockCustomSpecHashType1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + err = mockCustomSpecHashTypeClient.Delete(mockCustomSpecHashType1b.GetMetadata().Namespace, mockCustomSpecHashType1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockCustomSpecHashTypeNotWatched = append(mockCustomSpecHashTypeNotWatched, MockCustomSpecHashTypeList{mockCustomSpecHashType1a, mockCustomSpecHashType1b}...) + mockCustomSpecHashTypeWatched = MockCustomSpecHashTypeList{mockCustomSpecHashType3a, mockCustomSpecHashType3b, mockCustomSpecHashType4a, mockCustomSpecHashType5a} + assertSnapshotmcshts(mockCustomSpecHashTypeWatched, mockCustomSpecHashTypeNotWatched) - snapshots, errs, err := emitter.Snapshots([]string{""}, clients.WatchOpts{ - Ctx: ctx, - RefreshRate: time.Second, - }) + err = mockCustomSpecHashTypeClient.Delete(mockCustomSpecHashType3a.GetMetadata().Namespace, mockCustomSpecHashType3a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + err = mockCustomSpecHashTypeClient.Delete(mockCustomSpecHashType3b.GetMetadata().Namespace, mockCustomSpecHashType3b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockCustomSpecHashTypeNotWatched = append(mockCustomSpecHashTypeNotWatched, MockCustomSpecHashTypeList{mockCustomSpecHashType3a, mockCustomSpecHashType3b}...) + mockCustomSpecHashTypeWatched = MockCustomSpecHashTypeList{mockCustomSpecHashType4a, mockCustomSpecHashType5a} + assertSnapshotmcshts(mockCustomSpecHashTypeWatched, mockCustomSpecHashTypeNotWatched) - var snap *TestingSnapshot + err = mockCustomSpecHashTypeClient.Delete(mockCustomSpecHashType4a.GetMetadata().Namespace, mockCustomSpecHashType4a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = mockCustomSpecHashTypeClient.Delete(mockCustomSpecHashType5a.GetMetadata().Namespace, mockCustomSpecHashType5a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockCustomSpecHashTypeNotWatched = append(mockCustomSpecHashTypeNotWatched, MockCustomSpecHashTypeList{mockCustomSpecHashType5a, mockCustomSpecHashType5b}...) + assertSnapshotmcshts(nil, mockCustomSpecHashTypeNotWatched) + + // clean up environment + deleteNamespaces(ctx, kube, namespace3, namespace4) + getNewNamespaces() /* - SimpleMockResource + Pod */ - - assertSnapshotSimplemocks := func(expectSimplemocks SimpleMockResourceList, unexpectSimplemocks SimpleMockResourceList) { + assertSnapshotpods := func(expectpods github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList, unexpectpods github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList) { drain: for { select { case snap = <-snapshots: - for _, expected := range expectSimplemocks { - if _, err := snap.Simplemocks.Find(expected.GetMetadata().Ref().Strings()); err != nil { + previous = snap + for _, expected := range expectpods { + if _, err := snap.Pods.Find(expected.GetMetadata().Ref().Strings()); err != nil { continue drain } } - for _, unexpected := range unexpectSimplemocks { - if _, err := snap.Simplemocks.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + for _, unexpected := range unexpectpods { + if _, err := snap.Pods.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { continue drain } } @@ -591,370 +1038,2913 @@ var _ = Describe("V1Emitter", func() { case err := <-errs: Expect(err).NotTo(HaveOccurred()) case <-time.After(time.Second * 10): - nsList1, _ := simpleMockResourceClient.List(namespace1, clients.ListOpts{}) - nsList2, _ := simpleMockResourceClient.List(namespace2, clients.ListOpts{}) - combined := append(nsList1, nsList2...) - Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + var expectedResources map[string][]string + var unexpectedResource map[string][]string + + if previous != nil { + expectedResources = findNonMatchingResources(convertpodsToMetadataGetter(expectpods), convertpodsToMetadataGetter(previous.Pods)) + unexpectedResource = findMatchingResources(convertpodsToMetadataGetter(unexpectpods), convertpodsToMetadataGetter(previous.Pods)) + } else { + expectedResources = getMapOfResources(convertpodsToMetadataGetter(expectpods)) + unexpectedResource = getMapOfResources(convertpodsToMetadataGetter(unexpectpods)) + } + getList := func(ns string) ([]metadataGetter, error) { + l, err := podClient.List(ns, clients.ListOpts{}) + return convertpodsToMetadataGetter(l), err + } + namespaceResources := getMapOfNamespaceResources(getList) + Fail(fmt.Sprintf("expected final snapshot before 10 seconds. expected \nExpected:\n%#v\n\nUnexpected:\n%#v\n\nnamespaces:\n%#v", expectedResources, unexpectedResource, namespaceResources)) } } } - simpleMockResource1a, err := simpleMockResourceClient.Write(NewSimpleMockResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + + pod1a, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace1, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - simpleMockResource1b, err := simpleMockResourceClient.Write(NewSimpleMockResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + pod1b, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace2, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + podWatched := github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod1a, pod1b} + assertSnapshotpods(podWatched, nil) - assertSnapshotSimplemocks(SimpleMockResourceList{simpleMockResource1a, simpleMockResource1b}, nil) - simpleMockResource2a, err := simpleMockResourceClient.Write(NewSimpleMockResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + pod3a, err := podClient.Write(NewPodWithLabels(namespace1, name3, labels1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - simpleMockResource2b, err := simpleMockResourceClient.Write(NewSimpleMockResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + pod3b, err := podClient.Write(NewPodWithLabels(namespace2, name3, labels1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + podWatched = append(podWatched, github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod3a, pod3b}...) + assertSnapshotpods(podWatched, nil) - assertSnapshotSimplemocks(SimpleMockResourceList{simpleMockResource1a, simpleMockResource1b, simpleMockResource2a, simpleMockResource2b}, nil) + createNamespaceWithLabel(ctx, kube, namespace3, labels1) + createNamespaces(ctx, kube, namespace4) - err = simpleMockResourceClient.Delete(simpleMockResource2a.GetMetadata().Namespace, simpleMockResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + pod4a, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace3, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - err = simpleMockResourceClient.Delete(simpleMockResource2b.GetMetadata().Namespace, simpleMockResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + pod4b, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace4, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + podWatched = append(podWatched, pod4a) + podNotWatched := github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod4b} + assertSnapshotpods(podWatched, podNotWatched) - assertSnapshotSimplemocks(SimpleMockResourceList{simpleMockResource1a, simpleMockResource1b}, SimpleMockResourceList{simpleMockResource2a, simpleMockResource2b}) - - err = simpleMockResourceClient.Delete(simpleMockResource1a.GetMetadata().Namespace, simpleMockResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + pod5a, err := podClient.Write(NewPodWithLabels(namespace3, name2, labels1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - err = simpleMockResourceClient.Delete(simpleMockResource1b.GetMetadata().Namespace, simpleMockResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + pod5b, err := podClient.Write(NewPodWithLabels(namespace4, name2, labels1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + podWatched = append(podWatched, pod5a) + podNotWatched = append(podNotWatched, pod5b) + assertSnapshotpods(podWatched, podNotWatched) + + for _, r := range podNotWatched { + err = podClient.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + } + + err = podClient.Delete(pod1a.GetMetadata().Namespace, pod1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = podClient.Delete(pod1b.GetMetadata().Namespace, pod1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + podNotWatched = append(podNotWatched, github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod1a, pod1b}...) + podWatched = github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod3a, pod3b, pod4a, pod5a} + assertSnapshotpods(podWatched, podNotWatched) + + err = podClient.Delete(pod3a.GetMetadata().Namespace, pod3a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = podClient.Delete(pod3b.GetMetadata().Namespace, pod3b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + podNotWatched = append(podNotWatched, github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod3a, pod3b}...) + podWatched = github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod4a, pod5a} + assertSnapshotpods(podWatched, podNotWatched) + + err = podClient.Delete(pod4a.GetMetadata().Namespace, pod4a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = podClient.Delete(pod5a.GetMetadata().Namespace, pod5a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + podNotWatched = append(podNotWatched, github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod5a, pod5b}...) + assertSnapshotpods(nil, podNotWatched) + + // clean up environment + deleteNamespaces(ctx, kube, namespace3, namespace4) + getNewNamespaces() + } + + BeforeEach(func() { + err := os.Setenv(statusutils.PodNamespaceEnvName, "default") + Expect(err).NotTo(HaveOccurred()) + + ctx = context.Background() + namespace1 = helpers.RandString(8) + namespace2 = helpers.RandString(8) + namespace3 = helpers.RandString(8) + namespace4 = helpers.RandString(8) + namespace5 = helpers.RandString(8) + namespace6 = helpers.RandString(8) + + kube = helpers.MustKubeClient() + kubeCache, err = cache.NewKubeCoreCache(context.TODO(), kube) + Expect(err).NotTo(HaveOccurred()) + resourceNamespaceLister = namespace.NewKubeClientCacheResourceNamespaceLister(kube, kubeCache) + + createNamespaces(ctx, kube, namespace1, namespace2) + + cfg, err = kubeutils.GetConfig("", "") + Expect(err).NotTo(HaveOccurred()) + + clientset, err = apiext.NewForConfig(cfg) + Expect(err).NotTo(HaveOccurred()) + // SimpleMockResource Constructor + simpleMockResourceClientFactory := &factory.MemoryResourceClientFactory{ + Cache: memory.NewInMemoryResourceCache(), + } + + simpleMockResourceClient, err = NewSimpleMockResourceClient(ctx, simpleMockResourceClientFactory) + Expect(err).NotTo(HaveOccurred()) + // MockResource Constructor + mockResourceClientFactory := &factory.KubeResourceClientFactory{ + Crd: MockResourceCrd, + Cfg: cfg, + SharedCache: kuberc.NewKubeCache(context.TODO()), + } + + err = helpers.AddAndRegisterCrd(ctx, MockResourceCrd, clientset) + Expect(err).NotTo(HaveOccurred()) + + mockResourceClient, err = NewMockResourceClient(ctx, mockResourceClientFactory) + Expect(err).NotTo(HaveOccurred()) + // FakeResource Constructor + fakeResourceClientFactory := &factory.MemoryResourceClientFactory{ + Cache: memory.NewInMemoryResourceCache(), + } + + fakeResourceClient, err = NewFakeResourceClient(ctx, fakeResourceClientFactory) + Expect(err).NotTo(HaveOccurred()) + // AnotherMockResource Constructor + anotherMockResourceClientFactory := &factory.KubeResourceClientFactory{ + Crd: AnotherMockResourceCrd, + Cfg: cfg, + SharedCache: kuberc.NewKubeCache(context.TODO()), + } + + err = helpers.AddAndRegisterCrd(ctx, AnotherMockResourceCrd, clientset) + Expect(err).NotTo(HaveOccurred()) + + anotherMockResourceClient, err = NewAnotherMockResourceClient(ctx, anotherMockResourceClientFactory) + Expect(err).NotTo(HaveOccurred()) + // ClusterResource Constructor + clusterResourceClientFactory := &factory.KubeResourceClientFactory{ + Crd: ClusterResourceCrd, + Cfg: cfg, + SharedCache: kuberc.NewKubeCache(context.TODO()), + } + + err = helpers.AddAndRegisterCrd(ctx, ClusterResourceCrd, clientset) + Expect(err).NotTo(HaveOccurred()) + + clusterResourceClient, err = NewClusterResourceClient(ctx, clusterResourceClientFactory) Expect(err).NotTo(HaveOccurred()) + // MockCustomType Constructor + mockCustomTypeClientFactory := &factory.MemoryResourceClientFactory{ + Cache: memory.NewInMemoryResourceCache(), + } + + mockCustomTypeClient, err = NewMockCustomTypeClient(ctx, mockCustomTypeClientFactory) + Expect(err).NotTo(HaveOccurred()) + // MockCustomSpecHashType Constructor + mockCustomSpecHashTypeClientFactory := &factory.MemoryResourceClientFactory{ + Cache: memory.NewInMemoryResourceCache(), + } + + mockCustomSpecHashTypeClient, err = NewMockCustomSpecHashTypeClient(ctx, mockCustomSpecHashTypeClientFactory) + Expect(err).NotTo(HaveOccurred()) + // Pod Constructor + podClientFactory := &factory.MemoryResourceClientFactory{ + Cache: memory.NewInMemoryResourceCache(), + } + + podClient, err = github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPodClient(ctx, podClientFactory) + Expect(err).NotTo(HaveOccurred()) + emitter = NewTestingEmitter(simpleMockResourceClient, mockResourceClient, fakeResourceClient, anotherMockResourceClient, clusterResourceClient, mockCustomTypeClient, mockCustomSpecHashTypeClient, podClient, resourceNamespaceLister) + }) + AfterEach(func() { + err := os.Unsetenv(statusutils.PodNamespaceEnvName) + Expect(err).NotTo(HaveOccurred()) + + kubeutils.DeleteNamespacesInParallelBlocking(ctx, kube, namespace1, namespace2) + clusterResourceClient.Delete(name1, clients.DeleteOpts{}) + clusterResourceClient.Delete(name2, clients.DeleteOpts{}) + }) + + Context("Tracking watched namespaces", func() { + It("tracks snapshots on changes to any resource", func() { + ctx := context.Background() + err := emitter.Register() + Expect(err).NotTo(HaveOccurred()) + + snapshots, errs, err := emitter.Snapshots([]string{namespace1, namespace2}, clients.WatchOpts{ + Ctx: ctx, + RefreshRate: time.Second, + }) + Expect(err).NotTo(HaveOccurred()) + + var snap *TestingSnapshot + + /* + SimpleMockResource + */ + + assertSnapshotSimplemocks := func(expectSimplemocks SimpleMockResourceList, unexpectSimplemocks SimpleMockResourceList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectSimplemocks { + if _, err := snap.Simplemocks.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectSimplemocks { + if _, err := snap.Simplemocks.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := simpleMockResourceClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := simpleMockResourceClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } + simpleMockResource1a, err := simpleMockResourceClient.Write(NewSimpleMockResource(namespace1, name5), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + simpleMockResource1b, err := simpleMockResourceClient.Write(NewSimpleMockResource(namespace2, name5), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotSimplemocks(SimpleMockResourceList{simpleMockResource1a, simpleMockResource1b}, nil) + simpleMockResource2a, err := simpleMockResourceClient.Write(NewSimpleMockResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + simpleMockResource2b, err := simpleMockResourceClient.Write(NewSimpleMockResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotSimplemocks(SimpleMockResourceList{simpleMockResource1a, simpleMockResource1b, simpleMockResource2a, simpleMockResource2b}, nil) + + err = simpleMockResourceClient.Delete(simpleMockResource2a.GetMetadata().Namespace, simpleMockResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = simpleMockResourceClient.Delete(simpleMockResource2b.GetMetadata().Namespace, simpleMockResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotSimplemocks(SimpleMockResourceList{simpleMockResource1a, simpleMockResource1b}, SimpleMockResourceList{simpleMockResource2a, simpleMockResource2b}) + + err = simpleMockResourceClient.Delete(simpleMockResource1a.GetMetadata().Namespace, simpleMockResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = simpleMockResourceClient.Delete(simpleMockResource1b.GetMetadata().Namespace, simpleMockResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotSimplemocks(nil, SimpleMockResourceList{simpleMockResource1a, simpleMockResource1b, simpleMockResource2a, simpleMockResource2b}) + + /* + MockResource + */ + + assertSnapshotMocks := func(expectMocks MockResourceList, unexpectMocks MockResourceList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectMocks { + if _, err := snap.Mocks.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectMocks { + if _, err := snap.Mocks.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := mockResourceClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := mockResourceClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } + mockResource1a, err := mockResourceClient.Write(NewMockResource(namespace1, name5), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResource1b, err := mockResourceClient.Write(NewMockResource(namespace2, name5), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b}, nil) + mockResource2a, err := mockResourceClient.Write(NewMockResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResource2b, err := mockResourceClient.Write(NewMockResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}, nil) + + err = mockResourceClient.Delete(mockResource2a.GetMetadata().Namespace, mockResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = mockResourceClient.Delete(mockResource2b.GetMetadata().Namespace, mockResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b}, MockResourceList{mockResource2a, mockResource2b}) + + err = mockResourceClient.Delete(mockResource1a.GetMetadata().Namespace, mockResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = mockResourceClient.Delete(mockResource1b.GetMetadata().Namespace, mockResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotMocks(nil, MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}) + + /* + FakeResource + */ + + assertSnapshotFakes := func(expectFakes FakeResourceList, unexpectFakes FakeResourceList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectFakes { + if _, err := snap.Fakes.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectFakes { + if _, err := snap.Fakes.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := fakeResourceClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := fakeResourceClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } + fakeResource1a, err := fakeResourceClient.Write(NewFakeResource(namespace1, name5), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + fakeResource1b, err := fakeResourceClient.Write(NewFakeResource(namespace2, name5), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotFakes(FakeResourceList{fakeResource1a, fakeResource1b}, nil) + fakeResource2a, err := fakeResourceClient.Write(NewFakeResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + fakeResource2b, err := fakeResourceClient.Write(NewFakeResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotFakes(FakeResourceList{fakeResource1a, fakeResource1b, fakeResource2a, fakeResource2b}, nil) + + err = fakeResourceClient.Delete(fakeResource2a.GetMetadata().Namespace, fakeResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = fakeResourceClient.Delete(fakeResource2b.GetMetadata().Namespace, fakeResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotFakes(FakeResourceList{fakeResource1a, fakeResource1b}, FakeResourceList{fakeResource2a, fakeResource2b}) + + err = fakeResourceClient.Delete(fakeResource1a.GetMetadata().Namespace, fakeResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = fakeResourceClient.Delete(fakeResource1b.GetMetadata().Namespace, fakeResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotFakes(nil, FakeResourceList{fakeResource1a, fakeResource1b, fakeResource2a, fakeResource2b}) + + /* + AnotherMockResource + */ + + assertSnapshotAnothermockresources := func(expectAnothermockresources AnotherMockResourceList, unexpectAnothermockresources AnotherMockResourceList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectAnothermockresources { + if _, err := snap.Anothermockresources.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectAnothermockresources { + if _, err := snap.Anothermockresources.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := anotherMockResourceClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := anotherMockResourceClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } + anotherMockResource1a, err := anotherMockResourceClient.Write(NewAnotherMockResource(namespace1, name5), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + anotherMockResource1b, err := anotherMockResourceClient.Write(NewAnotherMockResource(namespace2, name5), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotAnothermockresources(AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b}, nil) + anotherMockResource2a, err := anotherMockResourceClient.Write(NewAnotherMockResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + anotherMockResource2b, err := anotherMockResourceClient.Write(NewAnotherMockResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotAnothermockresources(AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b, anotherMockResource2a, anotherMockResource2b}, nil) + + err = anotherMockResourceClient.Delete(anotherMockResource2a.GetMetadata().Namespace, anotherMockResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = anotherMockResourceClient.Delete(anotherMockResource2b.GetMetadata().Namespace, anotherMockResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotAnothermockresources(AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b}, AnotherMockResourceList{anotherMockResource2a, anotherMockResource2b}) + + err = anotherMockResourceClient.Delete(anotherMockResource1a.GetMetadata().Namespace, anotherMockResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = anotherMockResourceClient.Delete(anotherMockResource1b.GetMetadata().Namespace, anotherMockResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotAnothermockresources(nil, AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b, anotherMockResource2a, anotherMockResource2b}) + + /* + ClusterResource + */ + + assertSnapshotClusterresources := func(expectClusterresources ClusterResourceList, unexpectClusterresources ClusterResourceList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectClusterresources { + if _, err := snap.Clusterresources.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectClusterresources { + if _, err := snap.Clusterresources.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + combined, _ := clusterResourceClient.List(clients.ListOpts{}) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } + clusterResource1a, err := clusterResourceClient.Write(NewClusterResource(namespace1, name5), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotClusterresources(ClusterResourceList{clusterResource1a}, nil) + clusterResource2a, err := clusterResourceClient.Write(NewClusterResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotClusterresources(ClusterResourceList{clusterResource1a, clusterResource2a}, nil) + + err = clusterResourceClient.Delete(clusterResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotClusterresources(ClusterResourceList{clusterResource1a}, ClusterResourceList{clusterResource2a}) + + err = clusterResourceClient.Delete(clusterResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotClusterresources(nil, ClusterResourceList{clusterResource1a, clusterResource2a}) + + /* + MockCustomType + */ + + assertSnapshotmcts := func(expectmcts MockCustomTypeList, unexpectmcts MockCustomTypeList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectmcts { + if _, err := snap.Mcts.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectmcts { + if _, err := snap.Mcts.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := mockCustomTypeClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := mockCustomTypeClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } + mockCustomType1a, err := mockCustomTypeClient.Write(NewMockCustomType(namespace1, name5), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockCustomType1b, err := mockCustomTypeClient.Write(NewMockCustomType(namespace2, name5), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotmcts(MockCustomTypeList{mockCustomType1a, mockCustomType1b}, nil) + mockCustomType2a, err := mockCustomTypeClient.Write(NewMockCustomType(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockCustomType2b, err := mockCustomTypeClient.Write(NewMockCustomType(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotmcts(MockCustomTypeList{mockCustomType1a, mockCustomType1b, mockCustomType2a, mockCustomType2b}, nil) + + err = mockCustomTypeClient.Delete(mockCustomType2a.GetMetadata().Namespace, mockCustomType2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = mockCustomTypeClient.Delete(mockCustomType2b.GetMetadata().Namespace, mockCustomType2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotmcts(MockCustomTypeList{mockCustomType1a, mockCustomType1b}, MockCustomTypeList{mockCustomType2a, mockCustomType2b}) + + err = mockCustomTypeClient.Delete(mockCustomType1a.GetMetadata().Namespace, mockCustomType1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = mockCustomTypeClient.Delete(mockCustomType1b.GetMetadata().Namespace, mockCustomType1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotmcts(nil, MockCustomTypeList{mockCustomType1a, mockCustomType1b, mockCustomType2a, mockCustomType2b}) + + /* + MockCustomSpecHashType + */ + + assertSnapshotmcshts := func(expectmcshts MockCustomSpecHashTypeList, unexpectmcshts MockCustomSpecHashTypeList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectmcshts { + if _, err := snap.Mcshts.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectmcshts { + if _, err := snap.Mcshts.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := mockCustomSpecHashTypeClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := mockCustomSpecHashTypeClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } + mockCustomSpecHashType1a, err := mockCustomSpecHashTypeClient.Write(NewMockCustomSpecHashType(namespace1, name5), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockCustomSpecHashType1b, err := mockCustomSpecHashTypeClient.Write(NewMockCustomSpecHashType(namespace2, name5), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotmcshts(MockCustomSpecHashTypeList{mockCustomSpecHashType1a, mockCustomSpecHashType1b}, nil) + mockCustomSpecHashType2a, err := mockCustomSpecHashTypeClient.Write(NewMockCustomSpecHashType(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockCustomSpecHashType2b, err := mockCustomSpecHashTypeClient.Write(NewMockCustomSpecHashType(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotmcshts(MockCustomSpecHashTypeList{mockCustomSpecHashType1a, mockCustomSpecHashType1b, mockCustomSpecHashType2a, mockCustomSpecHashType2b}, nil) + + err = mockCustomSpecHashTypeClient.Delete(mockCustomSpecHashType2a.GetMetadata().Namespace, mockCustomSpecHashType2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = mockCustomSpecHashTypeClient.Delete(mockCustomSpecHashType2b.GetMetadata().Namespace, mockCustomSpecHashType2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotmcshts(MockCustomSpecHashTypeList{mockCustomSpecHashType1a, mockCustomSpecHashType1b}, MockCustomSpecHashTypeList{mockCustomSpecHashType2a, mockCustomSpecHashType2b}) + + err = mockCustomSpecHashTypeClient.Delete(mockCustomSpecHashType1a.GetMetadata().Namespace, mockCustomSpecHashType1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = mockCustomSpecHashTypeClient.Delete(mockCustomSpecHashType1b.GetMetadata().Namespace, mockCustomSpecHashType1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotmcshts(nil, MockCustomSpecHashTypeList{mockCustomSpecHashType1a, mockCustomSpecHashType1b, mockCustomSpecHashType2a, mockCustomSpecHashType2b}) + + /* + Pod + */ + + assertSnapshotpods := func(expectpods github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList, unexpectpods github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectpods { + if _, err := snap.Pods.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectpods { + if _, err := snap.Pods.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := podClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := podClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } + pod1a, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace1, name5), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + pod1b, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace2, name5), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotpods(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod1a, pod1b}, nil) + pod2a, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + pod2b, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotpods(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod1a, pod1b, pod2a, pod2b}, nil) + + err = podClient.Delete(pod2a.GetMetadata().Namespace, pod2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = podClient.Delete(pod2b.GetMetadata().Namespace, pod2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) - assertSnapshotSimplemocks(nil, SimpleMockResourceList{simpleMockResource1a, simpleMockResource1b, simpleMockResource2a, simpleMockResource2b}) + assertSnapshotpods(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod1a, pod1b}, github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod2a, pod2b}) + + err = podClient.Delete(pod1a.GetMetadata().Namespace, pod1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = podClient.Delete(pod1b.GetMetadata().Namespace, pod1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotpods(nil, github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod1a, pod1b, pod2a, pod2b}) + }) + + It("should be able to track all resources that are on labeled namespaces", func() { + runNamespacedSelectorsWithWatchNamespaces() + }) + }) + + Context("Tracking empty watched namespaces", func() { + It("tracks snapshots on changes to any resource using AllNamespace", func() { + ctx := context.Background() + err := emitter.Register() + Expect(err).NotTo(HaveOccurred()) + + snapshots, errs, err := emitter.Snapshots([]string{""}, clients.WatchOpts{ + Ctx: ctx, + RefreshRate: time.Second, + }) + Expect(err).NotTo(HaveOccurred()) + + var snap *TestingSnapshot + + /* + SimpleMockResource + */ + + assertSnapshotSimplemocks := func(expectSimplemocks SimpleMockResourceList, unexpectSimplemocks SimpleMockResourceList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectSimplemocks { + if _, err := snap.Simplemocks.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectSimplemocks { + if _, err := snap.Simplemocks.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := simpleMockResourceClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := simpleMockResourceClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } + + simpleMockResource1a, err := simpleMockResourceClient.Write(NewSimpleMockResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + simpleMockResource1b, err := simpleMockResourceClient.Write(NewSimpleMockResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotSimplemocks(SimpleMockResourceList{simpleMockResource1a, simpleMockResource1b}, nil) + + simpleMockResource2a, err := simpleMockResourceClient.Write(NewSimpleMockResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + simpleMockResource2b, err := simpleMockResourceClient.Write(NewSimpleMockResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotSimplemocks(SimpleMockResourceList{simpleMockResource1a, simpleMockResource1b, simpleMockResource2a, simpleMockResource2b}, nil) + + err = simpleMockResourceClient.Delete(simpleMockResource2a.GetMetadata().Namespace, simpleMockResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = simpleMockResourceClient.Delete(simpleMockResource2b.GetMetadata().Namespace, simpleMockResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotSimplemocks(SimpleMockResourceList{simpleMockResource1a, simpleMockResource1b}, SimpleMockResourceList{simpleMockResource2a, simpleMockResource2b}) + + err = simpleMockResourceClient.Delete(simpleMockResource1a.GetMetadata().Namespace, simpleMockResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = simpleMockResourceClient.Delete(simpleMockResource1b.GetMetadata().Namespace, simpleMockResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotSimplemocks(nil, SimpleMockResourceList{simpleMockResource1a, simpleMockResource1b, simpleMockResource2a, simpleMockResource2b}) + + /* + MockResource + */ + + assertSnapshotMocks := func(expectMocks MockResourceList, unexpectMocks MockResourceList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectMocks { + if _, err := snap.Mocks.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectMocks { + if _, err := snap.Mocks.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := mockResourceClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := mockResourceClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } + + mockResource1a, err := mockResourceClient.Write(NewMockResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResource1b, err := mockResourceClient.Write(NewMockResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b}, nil) + + mockResource2a, err := mockResourceClient.Write(NewMockResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResource2b, err := mockResourceClient.Write(NewMockResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}, nil) + + err = mockResourceClient.Delete(mockResource2a.GetMetadata().Namespace, mockResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = mockResourceClient.Delete(mockResource2b.GetMetadata().Namespace, mockResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b}, MockResourceList{mockResource2a, mockResource2b}) + + err = mockResourceClient.Delete(mockResource1a.GetMetadata().Namespace, mockResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = mockResourceClient.Delete(mockResource1b.GetMetadata().Namespace, mockResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotMocks(nil, MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}) + + /* + FakeResource + */ + + assertSnapshotFakes := func(expectFakes FakeResourceList, unexpectFakes FakeResourceList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectFakes { + if _, err := snap.Fakes.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectFakes { + if _, err := snap.Fakes.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := fakeResourceClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := fakeResourceClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } + + fakeResource1a, err := fakeResourceClient.Write(NewFakeResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + fakeResource1b, err := fakeResourceClient.Write(NewFakeResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotFakes(FakeResourceList{fakeResource1a, fakeResource1b}, nil) + + fakeResource2a, err := fakeResourceClient.Write(NewFakeResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + fakeResource2b, err := fakeResourceClient.Write(NewFakeResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotFakes(FakeResourceList{fakeResource1a, fakeResource1b, fakeResource2a, fakeResource2b}, nil) + + err = fakeResourceClient.Delete(fakeResource2a.GetMetadata().Namespace, fakeResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = fakeResourceClient.Delete(fakeResource2b.GetMetadata().Namespace, fakeResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotFakes(FakeResourceList{fakeResource1a, fakeResource1b}, FakeResourceList{fakeResource2a, fakeResource2b}) + + err = fakeResourceClient.Delete(fakeResource1a.GetMetadata().Namespace, fakeResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = fakeResourceClient.Delete(fakeResource1b.GetMetadata().Namespace, fakeResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotFakes(nil, FakeResourceList{fakeResource1a, fakeResource1b, fakeResource2a, fakeResource2b}) + + /* + AnotherMockResource + */ + + assertSnapshotAnothermockresources := func(expectAnothermockresources AnotherMockResourceList, unexpectAnothermockresources AnotherMockResourceList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectAnothermockresources { + if _, err := snap.Anothermockresources.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectAnothermockresources { + if _, err := snap.Anothermockresources.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := anotherMockResourceClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := anotherMockResourceClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } + + anotherMockResource1a, err := anotherMockResourceClient.Write(NewAnotherMockResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + anotherMockResource1b, err := anotherMockResourceClient.Write(NewAnotherMockResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotAnothermockresources(AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b}, nil) + + anotherMockResource2a, err := anotherMockResourceClient.Write(NewAnotherMockResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + anotherMockResource2b, err := anotherMockResourceClient.Write(NewAnotherMockResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotAnothermockresources(AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b, anotherMockResource2a, anotherMockResource2b}, nil) + + err = anotherMockResourceClient.Delete(anotherMockResource2a.GetMetadata().Namespace, anotherMockResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = anotherMockResourceClient.Delete(anotherMockResource2b.GetMetadata().Namespace, anotherMockResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotAnothermockresources(AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b}, AnotherMockResourceList{anotherMockResource2a, anotherMockResource2b}) + + err = anotherMockResourceClient.Delete(anotherMockResource1a.GetMetadata().Namespace, anotherMockResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = anotherMockResourceClient.Delete(anotherMockResource1b.GetMetadata().Namespace, anotherMockResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotAnothermockresources(nil, AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b, anotherMockResource2a, anotherMockResource2b}) + + /* + ClusterResource + */ + + assertSnapshotClusterresources := func(expectClusterresources ClusterResourceList, unexpectClusterresources ClusterResourceList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectClusterresources { + if _, err := snap.Clusterresources.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectClusterresources { + if _, err := snap.Clusterresources.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + combined, _ := clusterResourceClient.List(clients.ListOpts{}) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } + + clusterResource1a, err := clusterResourceClient.Write(NewClusterResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotClusterresources(ClusterResourceList{clusterResource1a}, nil) + + clusterResource2a, err := clusterResourceClient.Write(NewClusterResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotClusterresources(ClusterResourceList{clusterResource1a, clusterResource2a}, nil) + + err = clusterResourceClient.Delete(clusterResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotClusterresources(ClusterResourceList{clusterResource1a}, ClusterResourceList{clusterResource2a}) + + err = clusterResourceClient.Delete(clusterResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotClusterresources(nil, ClusterResourceList{clusterResource1a, clusterResource2a}) + + /* + MockCustomType + */ + + assertSnapshotmcts := func(expectmcts MockCustomTypeList, unexpectmcts MockCustomTypeList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectmcts { + if _, err := snap.Mcts.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectmcts { + if _, err := snap.Mcts.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := mockCustomTypeClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := mockCustomTypeClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } + + mockCustomType1a, err := mockCustomTypeClient.Write(NewMockCustomType(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockCustomType1b, err := mockCustomTypeClient.Write(NewMockCustomType(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotmcts(MockCustomTypeList{mockCustomType1a, mockCustomType1b}, nil) + + mockCustomType2a, err := mockCustomTypeClient.Write(NewMockCustomType(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockCustomType2b, err := mockCustomTypeClient.Write(NewMockCustomType(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotmcts(MockCustomTypeList{mockCustomType1a, mockCustomType1b, mockCustomType2a, mockCustomType2b}, nil) + + err = mockCustomTypeClient.Delete(mockCustomType2a.GetMetadata().Namespace, mockCustomType2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = mockCustomTypeClient.Delete(mockCustomType2b.GetMetadata().Namespace, mockCustomType2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotmcts(MockCustomTypeList{mockCustomType1a, mockCustomType1b}, MockCustomTypeList{mockCustomType2a, mockCustomType2b}) + + err = mockCustomTypeClient.Delete(mockCustomType1a.GetMetadata().Namespace, mockCustomType1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = mockCustomTypeClient.Delete(mockCustomType1b.GetMetadata().Namespace, mockCustomType1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotmcts(nil, MockCustomTypeList{mockCustomType1a, mockCustomType1b, mockCustomType2a, mockCustomType2b}) + + /* + MockCustomSpecHashType + */ + + assertSnapshotmcshts := func(expectmcshts MockCustomSpecHashTypeList, unexpectmcshts MockCustomSpecHashTypeList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectmcshts { + if _, err := snap.Mcshts.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectmcshts { + if _, err := snap.Mcshts.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := mockCustomSpecHashTypeClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := mockCustomSpecHashTypeClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } + + mockCustomSpecHashType1a, err := mockCustomSpecHashTypeClient.Write(NewMockCustomSpecHashType(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockCustomSpecHashType1b, err := mockCustomSpecHashTypeClient.Write(NewMockCustomSpecHashType(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotmcshts(MockCustomSpecHashTypeList{mockCustomSpecHashType1a, mockCustomSpecHashType1b}, nil) + + mockCustomSpecHashType2a, err := mockCustomSpecHashTypeClient.Write(NewMockCustomSpecHashType(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockCustomSpecHashType2b, err := mockCustomSpecHashTypeClient.Write(NewMockCustomSpecHashType(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotmcshts(MockCustomSpecHashTypeList{mockCustomSpecHashType1a, mockCustomSpecHashType1b, mockCustomSpecHashType2a, mockCustomSpecHashType2b}, nil) + + err = mockCustomSpecHashTypeClient.Delete(mockCustomSpecHashType2a.GetMetadata().Namespace, mockCustomSpecHashType2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = mockCustomSpecHashTypeClient.Delete(mockCustomSpecHashType2b.GetMetadata().Namespace, mockCustomSpecHashType2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotmcshts(MockCustomSpecHashTypeList{mockCustomSpecHashType1a, mockCustomSpecHashType1b}, MockCustomSpecHashTypeList{mockCustomSpecHashType2a, mockCustomSpecHashType2b}) + + err = mockCustomSpecHashTypeClient.Delete(mockCustomSpecHashType1a.GetMetadata().Namespace, mockCustomSpecHashType1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = mockCustomSpecHashTypeClient.Delete(mockCustomSpecHashType1b.GetMetadata().Namespace, mockCustomSpecHashType1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotmcshts(nil, MockCustomSpecHashTypeList{mockCustomSpecHashType1a, mockCustomSpecHashType1b, mockCustomSpecHashType2a, mockCustomSpecHashType2b}) + + /* + Pod + */ + + assertSnapshotpods := func(expectpods github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList, unexpectpods github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectpods { + if _, err := snap.Pods.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectpods { + if _, err := snap.Pods.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := podClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := podClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } + + pod1a, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + pod1b, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotpods(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod1a, pod1b}, nil) + + pod2a, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + pod2b, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotpods(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod1a, pod1b, pod2a, pod2b}, nil) + + err = podClient.Delete(pod2a.GetMetadata().Namespace, pod2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = podClient.Delete(pod2b.GetMetadata().Namespace, pod2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotpods(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod1a, pod1b}, github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod2a, pod2b}) + + err = podClient.Delete(pod1a.GetMetadata().Namespace, pod1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = podClient.Delete(pod1b.GetMetadata().Namespace, pod1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotpods(nil, github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod1a, pod1b, pod2a, pod2b}) + }) + + It("should be able to track resources only made with the matching labels", func() { + ctx := context.Background() + err := emitter.Register() + Expect(err).NotTo(HaveOccurred()) + + snapshots, errs, err := emitter.Snapshots([]string{""}, clients.WatchOpts{ + Ctx: ctx, + RefreshRate: time.Second, + ExpressionSelector: labelExpression1, + }) + Expect(err).NotTo(HaveOccurred()) + + var snap *TestingSnapshot + var previous *TestingSnapshot + + /* + SimpleMockResource + */ + + assertSnapshotSimplemocks := func(expectSimplemocks SimpleMockResourceList, unexpectSimplemocks SimpleMockResourceList) { + drain: + for { + select { + case snap = <-snapshots: + previous = snap + for _, expected := range expectSimplemocks { + if _, err := snap.Simplemocks.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectSimplemocks { + if _, err := snap.Simplemocks.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + + var expectedResources map[string][]string + var unexpectedResource map[string][]string + + if previous != nil { + expectedResources = findNonMatchingResources(convertSimplemocksToMetadataGetter(expectSimplemocks), convertSimplemocksToMetadataGetter(previous.Simplemocks)) + unexpectedResource = findMatchingResources(convertSimplemocksToMetadataGetter(unexpectSimplemocks), convertSimplemocksToMetadataGetter(previous.Simplemocks)) + } else { + expectedResources = getMapOfResources(convertSimplemocksToMetadataGetter(expectSimplemocks)) + unexpectedResource = getMapOfResources(convertSimplemocksToMetadataGetter(unexpectSimplemocks)) + } + getList := func(ns string) ([]metadataGetter, error) { + l, err := simpleMockResourceClient.List(ns, clients.ListOpts{}) + return convertSimplemocksToMetadataGetter(l), err + } + namespaceResources := getMapOfNamespaceResources(getList) + Fail(fmt.Sprintf("expected final snapshot before 10 seconds. expected \nExpected:\n%#v\n\nUnexpected:\n%#v\n\nnamespaces:\n%#v", expectedResources, unexpectedResource, namespaceResources)) + } + } + } + + simpleMockResource1a, err := simpleMockResourceClient.Write(NewSimpleMockResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + simpleMockResource1b, err := simpleMockResourceClient.Write(NewSimpleMockResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + simpleMockResourceNotWatched := SimpleMockResourceList{simpleMockResource1a, simpleMockResource1b} + + createNamespaceWithLabel(ctx, kube, namespace3, labels1) + createNamespaceWithLabel(ctx, kube, namespace4, labels1) + + simpleMockResource2a, err := simpleMockResourceClient.Write(NewSimpleMockResource(namespace3, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + simpleMockResource2b, err := simpleMockResourceClient.Write(NewSimpleMockResource(namespace4, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + simpleMockResourceWatched := SimpleMockResourceList{simpleMockResource2a, simpleMockResource2b} + assertSnapshotSimplemocks(simpleMockResourceWatched, simpleMockResourceNotWatched) + + createNamespaces(ctx, kube, namespace5) + createNamespaceWithLabel(ctx, kube, namespace6, labels1) + + simpleMockResource5a, err := simpleMockResourceClient.Write(NewSimpleMockResource(namespace5, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + simpleMockResource5b, err := simpleMockResourceClient.Write(NewSimpleMockResource(namespace6, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + simpleMockResourceNotWatched = append(simpleMockResourceNotWatched, simpleMockResource5a) + simpleMockResourceWatched = append(simpleMockResourceWatched, simpleMockResource5b) + assertSnapshotSimplemocks(simpleMockResourceWatched, simpleMockResourceNotWatched) + + simpleMockResource7a, err := simpleMockResourceClient.Write(NewSimpleMockResource(namespace5, name4), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + simpleMockResource7b, err := simpleMockResourceClient.Write(NewSimpleMockResource(namespace6, name4), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + simpleMockResourceNotWatched = append(simpleMockResourceNotWatched, simpleMockResource7a) + simpleMockResourceWatched = append(simpleMockResourceWatched, simpleMockResource7b) + assertSnapshotSimplemocks(simpleMockResourceWatched, simpleMockResourceNotWatched) + + for _, r := range simpleMockResourceNotWatched { + err = simpleMockResourceClient.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + } + + for _, r := range simpleMockResourceWatched { + err = simpleMockResourceClient.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + simpleMockResourceNotWatched = append(simpleMockResourceNotWatched, r) + } + assertSnapshotSimplemocks(nil, simpleMockResourceNotWatched) + + // clean up environment + deleteNamespaces(ctx, kube, namespace3, namespace4, namespace5, namespace6) + getNewNamespaces() + + /* + MockResource + */ + + assertSnapshotMocks := func(expectMocks MockResourceList, unexpectMocks MockResourceList) { + drain: + for { + select { + case snap = <-snapshots: + previous = snap + for _, expected := range expectMocks { + if _, err := snap.Mocks.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectMocks { + if _, err := snap.Mocks.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + + var expectedResources map[string][]string + var unexpectedResource map[string][]string + + if previous != nil { + expectedResources = findNonMatchingResources(convertMocksToMetadataGetter(expectMocks), convertMocksToMetadataGetter(previous.Mocks)) + unexpectedResource = findMatchingResources(convertMocksToMetadataGetter(unexpectMocks), convertMocksToMetadataGetter(previous.Mocks)) + } else { + expectedResources = getMapOfResources(convertMocksToMetadataGetter(expectMocks)) + unexpectedResource = getMapOfResources(convertMocksToMetadataGetter(unexpectMocks)) + } + getList := func(ns string) ([]metadataGetter, error) { + l, err := mockResourceClient.List(ns, clients.ListOpts{}) + return convertMocksToMetadataGetter(l), err + } + namespaceResources := getMapOfNamespaceResources(getList) + Fail(fmt.Sprintf("expected final snapshot before 10 seconds. expected \nExpected:\n%#v\n\nUnexpected:\n%#v\n\nnamespaces:\n%#v", expectedResources, unexpectedResource, namespaceResources)) + } + } + } + + mockResource1a, err := mockResourceClient.Write(NewMockResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResource1b, err := mockResourceClient.Write(NewMockResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResourceNotWatched := MockResourceList{mockResource1a, mockResource1b} + + createNamespaceWithLabel(ctx, kube, namespace3, labels1) + createNamespaceWithLabel(ctx, kube, namespace4, labels1) + + mockResource2a, err := mockResourceClient.Write(NewMockResource(namespace3, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResource2b, err := mockResourceClient.Write(NewMockResource(namespace4, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResourceWatched := MockResourceList{mockResource2a, mockResource2b} + assertSnapshotMocks(mockResourceWatched, mockResourceNotWatched) + + createNamespaces(ctx, kube, namespace5) + createNamespaceWithLabel(ctx, kube, namespace6, labels1) + + mockResource5a, err := mockResourceClient.Write(NewMockResource(namespace5, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResource5b, err := mockResourceClient.Write(NewMockResource(namespace6, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResourceNotWatched = append(mockResourceNotWatched, mockResource5a) + mockResourceWatched = append(mockResourceWatched, mockResource5b) + assertSnapshotMocks(mockResourceWatched, mockResourceNotWatched) + + mockResource7a, err := mockResourceClient.Write(NewMockResource(namespace5, name4), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResource7b, err := mockResourceClient.Write(NewMockResource(namespace6, name4), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResourceNotWatched = append(mockResourceNotWatched, mockResource7a) + mockResourceWatched = append(mockResourceWatched, mockResource7b) + assertSnapshotMocks(mockResourceWatched, mockResourceNotWatched) + + for _, r := range mockResourceNotWatched { + err = mockResourceClient.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + } + + for _, r := range mockResourceWatched { + err = mockResourceClient.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResourceNotWatched = append(mockResourceNotWatched, r) + } + assertSnapshotMocks(nil, mockResourceNotWatched) + + // clean up environment + deleteNamespaces(ctx, kube, namespace3, namespace4, namespace5, namespace6) + getNewNamespaces() + + /* + FakeResource + */ + + assertSnapshotFakes := func(expectFakes FakeResourceList, unexpectFakes FakeResourceList) { + drain: + for { + select { + case snap = <-snapshots: + previous = snap + for _, expected := range expectFakes { + if _, err := snap.Fakes.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectFakes { + if _, err := snap.Fakes.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + + var expectedResources map[string][]string + var unexpectedResource map[string][]string + + if previous != nil { + expectedResources = findNonMatchingResources(convertFakesToMetadataGetter(expectFakes), convertFakesToMetadataGetter(previous.Fakes)) + unexpectedResource = findMatchingResources(convertFakesToMetadataGetter(unexpectFakes), convertFakesToMetadataGetter(previous.Fakes)) + } else { + expectedResources = getMapOfResources(convertFakesToMetadataGetter(expectFakes)) + unexpectedResource = getMapOfResources(convertFakesToMetadataGetter(unexpectFakes)) + } + getList := func(ns string) ([]metadataGetter, error) { + l, err := fakeResourceClient.List(ns, clients.ListOpts{}) + return convertFakesToMetadataGetter(l), err + } + namespaceResources := getMapOfNamespaceResources(getList) + Fail(fmt.Sprintf("expected final snapshot before 10 seconds. expected \nExpected:\n%#v\n\nUnexpected:\n%#v\n\nnamespaces:\n%#v", expectedResources, unexpectedResource, namespaceResources)) + } + } + } + + fakeResource1a, err := fakeResourceClient.Write(NewFakeResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + fakeResource1b, err := fakeResourceClient.Write(NewFakeResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + fakeResourceNotWatched := FakeResourceList{fakeResource1a, fakeResource1b} + + createNamespaceWithLabel(ctx, kube, namespace3, labels1) + createNamespaceWithLabel(ctx, kube, namespace4, labels1) + + fakeResource2a, err := fakeResourceClient.Write(NewFakeResource(namespace3, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + fakeResource2b, err := fakeResourceClient.Write(NewFakeResource(namespace4, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + fakeResourceWatched := FakeResourceList{fakeResource2a, fakeResource2b} + assertSnapshotFakes(fakeResourceWatched, fakeResourceNotWatched) + + createNamespaces(ctx, kube, namespace5) + createNamespaceWithLabel(ctx, kube, namespace6, labels1) + + fakeResource5a, err := fakeResourceClient.Write(NewFakeResource(namespace5, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + fakeResource5b, err := fakeResourceClient.Write(NewFakeResource(namespace6, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + fakeResourceNotWatched = append(fakeResourceNotWatched, fakeResource5a) + fakeResourceWatched = append(fakeResourceWatched, fakeResource5b) + assertSnapshotFakes(fakeResourceWatched, fakeResourceNotWatched) + + fakeResource7a, err := fakeResourceClient.Write(NewFakeResource(namespace5, name4), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + fakeResource7b, err := fakeResourceClient.Write(NewFakeResource(namespace6, name4), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + fakeResourceNotWatched = append(fakeResourceNotWatched, fakeResource7a) + fakeResourceWatched = append(fakeResourceWatched, fakeResource7b) + assertSnapshotFakes(fakeResourceWatched, fakeResourceNotWatched) + + for _, r := range fakeResourceNotWatched { + err = fakeResourceClient.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + } + + for _, r := range fakeResourceWatched { + err = fakeResourceClient.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + fakeResourceNotWatched = append(fakeResourceNotWatched, r) + } + assertSnapshotFakes(nil, fakeResourceNotWatched) + + // clean up environment + deleteNamespaces(ctx, kube, namespace3, namespace4, namespace5, namespace6) + getNewNamespaces() + + /* + AnotherMockResource + */ + + assertSnapshotAnothermockresources := func(expectAnothermockresources AnotherMockResourceList, unexpectAnothermockresources AnotherMockResourceList) { + drain: + for { + select { + case snap = <-snapshots: + previous = snap + for _, expected := range expectAnothermockresources { + if _, err := snap.Anothermockresources.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectAnothermockresources { + if _, err := snap.Anothermockresources.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + + var expectedResources map[string][]string + var unexpectedResource map[string][]string + + if previous != nil { + expectedResources = findNonMatchingResources(convertAnothermockresourcesToMetadataGetter(expectAnothermockresources), convertAnothermockresourcesToMetadataGetter(previous.Anothermockresources)) + unexpectedResource = findMatchingResources(convertAnothermockresourcesToMetadataGetter(unexpectAnothermockresources), convertAnothermockresourcesToMetadataGetter(previous.Anothermockresources)) + } else { + expectedResources = getMapOfResources(convertAnothermockresourcesToMetadataGetter(expectAnothermockresources)) + unexpectedResource = getMapOfResources(convertAnothermockresourcesToMetadataGetter(unexpectAnothermockresources)) + } + getList := func(ns string) ([]metadataGetter, error) { + l, err := anotherMockResourceClient.List(ns, clients.ListOpts{}) + return convertAnothermockresourcesToMetadataGetter(l), err + } + namespaceResources := getMapOfNamespaceResources(getList) + Fail(fmt.Sprintf("expected final snapshot before 10 seconds. expected \nExpected:\n%#v\n\nUnexpected:\n%#v\n\nnamespaces:\n%#v", expectedResources, unexpectedResource, namespaceResources)) + } + } + } + + anotherMockResource1a, err := anotherMockResourceClient.Write(NewAnotherMockResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + anotherMockResource1b, err := anotherMockResourceClient.Write(NewAnotherMockResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + anotherMockResourceNotWatched := AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b} + + createNamespaceWithLabel(ctx, kube, namespace3, labels1) + createNamespaceWithLabel(ctx, kube, namespace4, labels1) + + anotherMockResource2a, err := anotherMockResourceClient.Write(NewAnotherMockResource(namespace3, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + anotherMockResource2b, err := anotherMockResourceClient.Write(NewAnotherMockResource(namespace4, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + anotherMockResourceWatched := AnotherMockResourceList{anotherMockResource2a, anotherMockResource2b} + assertSnapshotAnothermockresources(anotherMockResourceWatched, anotherMockResourceNotWatched) + + createNamespaces(ctx, kube, namespace5) + createNamespaceWithLabel(ctx, kube, namespace6, labels1) + + anotherMockResource5a, err := anotherMockResourceClient.Write(NewAnotherMockResource(namespace5, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + anotherMockResource5b, err := anotherMockResourceClient.Write(NewAnotherMockResource(namespace6, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + anotherMockResourceNotWatched = append(anotherMockResourceNotWatched, anotherMockResource5a) + anotherMockResourceWatched = append(anotherMockResourceWatched, anotherMockResource5b) + assertSnapshotAnothermockresources(anotherMockResourceWatched, anotherMockResourceNotWatched) + + anotherMockResource7a, err := anotherMockResourceClient.Write(NewAnotherMockResource(namespace5, name4), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + anotherMockResource7b, err := anotherMockResourceClient.Write(NewAnotherMockResource(namespace6, name4), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + anotherMockResourceNotWatched = append(anotherMockResourceNotWatched, anotherMockResource7a) + anotherMockResourceWatched = append(anotherMockResourceWatched, anotherMockResource7b) + assertSnapshotAnothermockresources(anotherMockResourceWatched, anotherMockResourceNotWatched) + + for _, r := range anotherMockResourceNotWatched { + err = anotherMockResourceClient.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + } + + for _, r := range anotherMockResourceWatched { + err = anotherMockResourceClient.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + anotherMockResourceNotWatched = append(anotherMockResourceNotWatched, r) + } + assertSnapshotAnothermockresources(nil, anotherMockResourceNotWatched) + + // clean up environment + deleteNamespaces(ctx, kube, namespace3, namespace4, namespace5, namespace6) + getNewNamespaces() + + /* + ClusterResource + */ + + assertSnapshotClusterresources := func(expectClusterresources ClusterResourceList, unexpectClusterresources ClusterResourceList) { + drain: + for { + select { + case snap = <-snapshots: + previous = snap + for _, expected := range expectClusterresources { + if _, err := snap.Clusterresources.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectClusterresources { + if _, err := snap.Clusterresources.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + combined, _ := clusterResourceClient.List(clients.ListOpts{}) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } + + clusterResource1a, err := clusterResourceClient.Write(NewClusterResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + clusterResourceWatched := ClusterResourceList{clusterResource1a} + assertSnapshotClusterresources(clusterResourceWatched, nil) + + createNamespaceWithLabel(ctx, kube, namespace3, labels1) + createNamespaceWithLabel(ctx, kube, namespace4, labels1) + + clusterResource2a, err := clusterResourceClient.Write(NewClusterResource(namespace3, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + clusterResourceWatched = append(clusterResourceWatched, clusterResource2a) + assertSnapshotClusterresources(clusterResourceWatched, nil) + + createNamespaces(ctx, kube, namespace5) + createNamespaceWithLabel(ctx, kube, namespace6, labels1) + + clusterResource5a, err := clusterResourceClient.Write(NewClusterResource(namespace5, name5), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + clusterResourceWatched = append(clusterResourceWatched, clusterResource5a) + assertSnapshotClusterresources(clusterResourceWatched, nil) + + err = clusterResourceClient.Delete(clusterResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + clusterResourceWatched = ClusterResourceList{clusterResource2a, clusterResource5a} + clusterResourceNotWatched := ClusterResourceList{clusterResource1a} + assertSnapshotClusterresources(clusterResourceWatched, clusterResourceNotWatched) + + err = clusterResourceClient.Delete(clusterResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + clusterResourceWatched = ClusterResourceList{clusterResource5a} + clusterResourceNotWatched = append(clusterResourceNotWatched, clusterResource2a) + assertSnapshotClusterresources(clusterResourceWatched, clusterResourceNotWatched) + + err = clusterResourceClient.Delete(clusterResource5a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + clusterResourceNotWatched = append(clusterResourceNotWatched, clusterResource5a) + assertSnapshotClusterresources(nil, clusterResourceNotWatched) + + // clean up environment + deleteNamespaces(ctx, kube, namespace3, namespace4, namespace5, namespace6) + getNewNamespaces() + + /* + MockCustomType + */ + + assertSnapshotmcts := func(expectmcts MockCustomTypeList, unexpectmcts MockCustomTypeList) { + drain: + for { + select { + case snap = <-snapshots: + previous = snap + for _, expected := range expectmcts { + if _, err := snap.Mcts.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectmcts { + if _, err := snap.Mcts.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + + var expectedResources map[string][]string + var unexpectedResource map[string][]string + + if previous != nil { + expectedResources = findNonMatchingResources(convertmctsToMetadataGetter(expectmcts), convertmctsToMetadataGetter(previous.Mcts)) + unexpectedResource = findMatchingResources(convertmctsToMetadataGetter(unexpectmcts), convertmctsToMetadataGetter(previous.Mcts)) + } else { + expectedResources = getMapOfResources(convertmctsToMetadataGetter(expectmcts)) + unexpectedResource = getMapOfResources(convertmctsToMetadataGetter(unexpectmcts)) + } + getList := func(ns string) ([]metadataGetter, error) { + l, err := mockCustomTypeClient.List(ns, clients.ListOpts{}) + return convertmctsToMetadataGetter(l), err + } + namespaceResources := getMapOfNamespaceResources(getList) + Fail(fmt.Sprintf("expected final snapshot before 10 seconds. expected \nExpected:\n%#v\n\nUnexpected:\n%#v\n\nnamespaces:\n%#v", expectedResources, unexpectedResource, namespaceResources)) + } + } + } + + mockCustomType1a, err := mockCustomTypeClient.Write(NewMockCustomType(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockCustomType1b, err := mockCustomTypeClient.Write(NewMockCustomType(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockCustomTypeNotWatched := MockCustomTypeList{mockCustomType1a, mockCustomType1b} + + createNamespaceWithLabel(ctx, kube, namespace3, labels1) + createNamespaceWithLabel(ctx, kube, namespace4, labels1) + + mockCustomType2a, err := mockCustomTypeClient.Write(NewMockCustomType(namespace3, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockCustomType2b, err := mockCustomTypeClient.Write(NewMockCustomType(namespace4, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockCustomTypeWatched := MockCustomTypeList{mockCustomType2a, mockCustomType2b} + assertSnapshotmcts(mockCustomTypeWatched, mockCustomTypeNotWatched) + + createNamespaces(ctx, kube, namespace5) + createNamespaceWithLabel(ctx, kube, namespace6, labels1) + + mockCustomType5a, err := mockCustomTypeClient.Write(NewMockCustomType(namespace5, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockCustomType5b, err := mockCustomTypeClient.Write(NewMockCustomType(namespace6, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockCustomTypeNotWatched = append(mockCustomTypeNotWatched, mockCustomType5a) + mockCustomTypeWatched = append(mockCustomTypeWatched, mockCustomType5b) + assertSnapshotmcts(mockCustomTypeWatched, mockCustomTypeNotWatched) + + mockCustomType7a, err := mockCustomTypeClient.Write(NewMockCustomType(namespace5, name4), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockCustomType7b, err := mockCustomTypeClient.Write(NewMockCustomType(namespace6, name4), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockCustomTypeNotWatched = append(mockCustomTypeNotWatched, mockCustomType7a) + mockCustomTypeWatched = append(mockCustomTypeWatched, mockCustomType7b) + assertSnapshotmcts(mockCustomTypeWatched, mockCustomTypeNotWatched) + + for _, r := range mockCustomTypeNotWatched { + err = mockCustomTypeClient.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + } + + for _, r := range mockCustomTypeWatched { + err = mockCustomTypeClient.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockCustomTypeNotWatched = append(mockCustomTypeNotWatched, r) + } + assertSnapshotmcts(nil, mockCustomTypeNotWatched) + + // clean up environment + deleteNamespaces(ctx, kube, namespace3, namespace4, namespace5, namespace6) + getNewNamespaces() + + /* + MockCustomSpecHashType + */ + + assertSnapshotmcshts := func(expectmcshts MockCustomSpecHashTypeList, unexpectmcshts MockCustomSpecHashTypeList) { + drain: + for { + select { + case snap = <-snapshots: + previous = snap + for _, expected := range expectmcshts { + if _, err := snap.Mcshts.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectmcshts { + if _, err := snap.Mcshts.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + + var expectedResources map[string][]string + var unexpectedResource map[string][]string + + if previous != nil { + expectedResources = findNonMatchingResources(convertmcshtsToMetadataGetter(expectmcshts), convertmcshtsToMetadataGetter(previous.Mcshts)) + unexpectedResource = findMatchingResources(convertmcshtsToMetadataGetter(unexpectmcshts), convertmcshtsToMetadataGetter(previous.Mcshts)) + } else { + expectedResources = getMapOfResources(convertmcshtsToMetadataGetter(expectmcshts)) + unexpectedResource = getMapOfResources(convertmcshtsToMetadataGetter(unexpectmcshts)) + } + getList := func(ns string) ([]metadataGetter, error) { + l, err := mockCustomSpecHashTypeClient.List(ns, clients.ListOpts{}) + return convertmcshtsToMetadataGetter(l), err + } + namespaceResources := getMapOfNamespaceResources(getList) + Fail(fmt.Sprintf("expected final snapshot before 10 seconds. expected \nExpected:\n%#v\n\nUnexpected:\n%#v\n\nnamespaces:\n%#v", expectedResources, unexpectedResource, namespaceResources)) + } + } + } + + mockCustomSpecHashType1a, err := mockCustomSpecHashTypeClient.Write(NewMockCustomSpecHashType(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockCustomSpecHashType1b, err := mockCustomSpecHashTypeClient.Write(NewMockCustomSpecHashType(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockCustomSpecHashTypeNotWatched := MockCustomSpecHashTypeList{mockCustomSpecHashType1a, mockCustomSpecHashType1b} + + createNamespaceWithLabel(ctx, kube, namespace3, labels1) + createNamespaceWithLabel(ctx, kube, namespace4, labels1) + + mockCustomSpecHashType2a, err := mockCustomSpecHashTypeClient.Write(NewMockCustomSpecHashType(namespace3, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockCustomSpecHashType2b, err := mockCustomSpecHashTypeClient.Write(NewMockCustomSpecHashType(namespace4, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockCustomSpecHashTypeWatched := MockCustomSpecHashTypeList{mockCustomSpecHashType2a, mockCustomSpecHashType2b} + assertSnapshotmcshts(mockCustomSpecHashTypeWatched, mockCustomSpecHashTypeNotWatched) + + createNamespaces(ctx, kube, namespace5) + createNamespaceWithLabel(ctx, kube, namespace6, labels1) + + mockCustomSpecHashType5a, err := mockCustomSpecHashTypeClient.Write(NewMockCustomSpecHashType(namespace5, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockCustomSpecHashType5b, err := mockCustomSpecHashTypeClient.Write(NewMockCustomSpecHashType(namespace6, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockCustomSpecHashTypeNotWatched = append(mockCustomSpecHashTypeNotWatched, mockCustomSpecHashType5a) + mockCustomSpecHashTypeWatched = append(mockCustomSpecHashTypeWatched, mockCustomSpecHashType5b) + assertSnapshotmcshts(mockCustomSpecHashTypeWatched, mockCustomSpecHashTypeNotWatched) + + mockCustomSpecHashType7a, err := mockCustomSpecHashTypeClient.Write(NewMockCustomSpecHashType(namespace5, name4), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockCustomSpecHashType7b, err := mockCustomSpecHashTypeClient.Write(NewMockCustomSpecHashType(namespace6, name4), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockCustomSpecHashTypeNotWatched = append(mockCustomSpecHashTypeNotWatched, mockCustomSpecHashType7a) + mockCustomSpecHashTypeWatched = append(mockCustomSpecHashTypeWatched, mockCustomSpecHashType7b) + assertSnapshotmcshts(mockCustomSpecHashTypeWatched, mockCustomSpecHashTypeNotWatched) + + for _, r := range mockCustomSpecHashTypeNotWatched { + err = mockCustomSpecHashTypeClient.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + } + + for _, r := range mockCustomSpecHashTypeWatched { + err = mockCustomSpecHashTypeClient.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockCustomSpecHashTypeNotWatched = append(mockCustomSpecHashTypeNotWatched, r) + } + assertSnapshotmcshts(nil, mockCustomSpecHashTypeNotWatched) + + // clean up environment + deleteNamespaces(ctx, kube, namespace3, namespace4, namespace5, namespace6) + getNewNamespaces() + + /* + Pod + */ + + assertSnapshotpods := func(expectpods github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList, unexpectpods github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList) { + drain: + for { + select { + case snap = <-snapshots: + previous = snap + for _, expected := range expectpods { + if _, err := snap.Pods.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectpods { + if _, err := snap.Pods.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + + var expectedResources map[string][]string + var unexpectedResource map[string][]string + + if previous != nil { + expectedResources = findNonMatchingResources(convertpodsToMetadataGetter(expectpods), convertpodsToMetadataGetter(previous.Pods)) + unexpectedResource = findMatchingResources(convertpodsToMetadataGetter(unexpectpods), convertpodsToMetadataGetter(previous.Pods)) + } else { + expectedResources = getMapOfResources(convertpodsToMetadataGetter(expectpods)) + unexpectedResource = getMapOfResources(convertpodsToMetadataGetter(unexpectpods)) + } + getList := func(ns string) ([]metadataGetter, error) { + l, err := podClient.List(ns, clients.ListOpts{}) + return convertpodsToMetadataGetter(l), err + } + namespaceResources := getMapOfNamespaceResources(getList) + Fail(fmt.Sprintf("expected final snapshot before 10 seconds. expected \nExpected:\n%#v\n\nUnexpected:\n%#v\n\nnamespaces:\n%#v", expectedResources, unexpectedResource, namespaceResources)) + } + } + } + + pod1a, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + pod1b, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + podNotWatched := github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod1a, pod1b} + + createNamespaceWithLabel(ctx, kube, namespace3, labels1) + createNamespaceWithLabel(ctx, kube, namespace4, labels1) + + pod2a, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace3, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + pod2b, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace4, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + podWatched := github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod2a, pod2b} + assertSnapshotpods(podWatched, podNotWatched) + + createNamespaces(ctx, kube, namespace5) + createNamespaceWithLabel(ctx, kube, namespace6, labels1) + + pod5a, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace5, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + pod5b, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace6, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + podNotWatched = append(podNotWatched, pod5a) + podWatched = append(podWatched, pod5b) + assertSnapshotpods(podWatched, podNotWatched) + + pod7a, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace5, name4), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + pod7b, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace6, name4), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + podNotWatched = append(podNotWatched, pod7a) + podWatched = append(podWatched, pod7b) + assertSnapshotpods(podWatched, podNotWatched) + + for _, r := range podNotWatched { + err = podClient.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + } + + for _, r := range podWatched { + err = podClient.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + podNotWatched = append(podNotWatched, r) + } + assertSnapshotpods(nil, podNotWatched) + + // clean up environment + deleteNamespaces(ctx, kube, namespace3, namespace4, namespace5, namespace6) + getNewNamespaces() + }) + }) + + Context("Tracking resources on namespaces that are deleted", func() { + It("Should not contain resources from a deleted namespace", func() { + ctx := context.Background() + err := emitter.Register() + Expect(err).NotTo(HaveOccurred()) + + snapshots, errs, err := emitter.Snapshots([]string{""}, clients.WatchOpts{ + Ctx: ctx, + RefreshRate: time.Second, + }) + Expect(err).NotTo(HaveOccurred()) + + var snap *TestingSnapshot + + /* + SimpleMockResource + */ + assertSnapshotSimplemocks := func(expectSimplemocks SimpleMockResourceList, unexpectSimplemocks SimpleMockResourceList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectSimplemocks { + if _, err := snap.Simplemocks.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectSimplemocks { + if _, err := snap.Simplemocks.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := simpleMockResourceClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := simpleMockResourceClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } + + simpleMockResource1a, err := simpleMockResourceClient.Write(NewSimpleMockResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + simpleMockResource1b, err := simpleMockResourceClient.Write(NewSimpleMockResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + simpleMockResourceWatched := SimpleMockResourceList{simpleMockResource1a, simpleMockResource1b} + assertSnapshotSimplemocks(simpleMockResourceWatched, nil) + err = simpleMockResourceClient.Delete(simpleMockResource1a.GetMetadata().Namespace, simpleMockResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = simpleMockResourceClient.Delete(simpleMockResource1b.GetMetadata().Namespace, simpleMockResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + simpleMockResourceNotWatched := SimpleMockResourceList{simpleMockResource1a, simpleMockResource1b} + assertSnapshotSimplemocks(nil, simpleMockResourceNotWatched) + + deleteNamespaces(ctx, kube, namespace1, namespace2) + + getNewNamespaces1and2() + createNamespaces(ctx, kube, namespace1, namespace2) + + /* + MockResource + */ + assertSnapshotMocks := func(expectMocks MockResourceList, unexpectMocks MockResourceList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectMocks { + if _, err := snap.Mocks.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectMocks { + if _, err := snap.Mocks.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := mockResourceClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := mockResourceClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } + + mockResource1a, err := mockResourceClient.Write(NewMockResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResource1b, err := mockResourceClient.Write(NewMockResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResourceWatched := MockResourceList{mockResource1a, mockResource1b} + assertSnapshotMocks(mockResourceWatched, nil) + err = mockResourceClient.Delete(mockResource1a.GetMetadata().Namespace, mockResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = mockResourceClient.Delete(mockResource1b.GetMetadata().Namespace, mockResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + mockResourceNotWatched := MockResourceList{mockResource1a, mockResource1b} + assertSnapshotMocks(nil, mockResourceNotWatched) + + deleteNamespaces(ctx, kube, namespace1, namespace2) + + getNewNamespaces1and2() + createNamespaces(ctx, kube, namespace1, namespace2) + + /* + FakeResource + */ + assertSnapshotFakes := func(expectFakes FakeResourceList, unexpectFakes FakeResourceList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectFakes { + if _, err := snap.Fakes.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectFakes { + if _, err := snap.Fakes.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := fakeResourceClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := fakeResourceClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } + + fakeResource1a, err := fakeResourceClient.Write(NewFakeResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + fakeResource1b, err := fakeResourceClient.Write(NewFakeResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + fakeResourceWatched := FakeResourceList{fakeResource1a, fakeResource1b} + assertSnapshotFakes(fakeResourceWatched, nil) + err = fakeResourceClient.Delete(fakeResource1a.GetMetadata().Namespace, fakeResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = fakeResourceClient.Delete(fakeResource1b.GetMetadata().Namespace, fakeResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + fakeResourceNotWatched := FakeResourceList{fakeResource1a, fakeResource1b} + assertSnapshotFakes(nil, fakeResourceNotWatched) + + deleteNamespaces(ctx, kube, namespace1, namespace2) + + getNewNamespaces1and2() + createNamespaces(ctx, kube, namespace1, namespace2) + + /* + AnotherMockResource + */ + assertSnapshotAnothermockresources := func(expectAnothermockresources AnotherMockResourceList, unexpectAnothermockresources AnotherMockResourceList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectAnothermockresources { + if _, err := snap.Anothermockresources.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectAnothermockresources { + if _, err := snap.Anothermockresources.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := anotherMockResourceClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := anotherMockResourceClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } + + anotherMockResource1a, err := anotherMockResourceClient.Write(NewAnotherMockResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + anotherMockResource1b, err := anotherMockResourceClient.Write(NewAnotherMockResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + anotherMockResourceWatched := AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b} + assertSnapshotAnothermockresources(anotherMockResourceWatched, nil) + err = anotherMockResourceClient.Delete(anotherMockResource1a.GetMetadata().Namespace, anotherMockResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = anotherMockResourceClient.Delete(anotherMockResource1b.GetMetadata().Namespace, anotherMockResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + anotherMockResourceNotWatched := AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b} + assertSnapshotAnothermockresources(nil, anotherMockResourceNotWatched) + + deleteNamespaces(ctx, kube, namespace1, namespace2) + + getNewNamespaces1and2() + createNamespaces(ctx, kube, namespace1, namespace2) + + /* + ClusterResource + */ + assertSnapshotClusterresources := func(expectClusterresources ClusterResourceList, unexpectClusterresources ClusterResourceList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectClusterresources { + if _, err := snap.Clusterresources.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectClusterresources { + if _, err := snap.Clusterresources.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + combined, _ := clusterResourceClient.List(clients.ListOpts{}) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } - /* - MockResource - */ + clusterResource1a, err := clusterResourceClient.Write(NewClusterResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + clusterResource1b, err := clusterResourceClient.Write(NewClusterResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + clusterResourceWatched := ClusterResourceList{clusterResource1a, clusterResource1b} + assertSnapshotClusterresources(clusterResourceWatched, nil) + err = clusterResourceClient.Delete(clusterResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = clusterResourceClient.Delete(clusterResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + clusterResourceNotWatched := ClusterResourceList{clusterResource1a, clusterResource1b} + assertSnapshotClusterresources(nil, clusterResourceNotWatched) + + deleteNamespaces(ctx, kube, namespace1, namespace2) + + getNewNamespaces1and2() + createNamespaces(ctx, kube, namespace1, namespace2) + + /* + MockCustomType + */ + assertSnapshotmcts := func(expectmcts MockCustomTypeList, unexpectmcts MockCustomTypeList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectmcts { + if _, err := snap.Mcts.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectmcts { + if _, err := snap.Mcts.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := mockCustomTypeClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := mockCustomTypeClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } - assertSnapshotMocks := func(expectMocks MockResourceList, unexpectMocks MockResourceList) { - drain: - for { - select { - case snap = <-snapshots: - for _, expected := range expectMocks { - if _, err := snap.Mocks.Find(expected.GetMetadata().Ref().Strings()); err != nil { - continue drain + mockCustomType1a, err := mockCustomTypeClient.Write(NewMockCustomType(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockCustomType1b, err := mockCustomTypeClient.Write(NewMockCustomType(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockCustomTypeWatched := MockCustomTypeList{mockCustomType1a, mockCustomType1b} + assertSnapshotmcts(mockCustomTypeWatched, nil) + err = mockCustomTypeClient.Delete(mockCustomType1a.GetMetadata().Namespace, mockCustomType1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = mockCustomTypeClient.Delete(mockCustomType1b.GetMetadata().Namespace, mockCustomType1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + mockCustomTypeNotWatched := MockCustomTypeList{mockCustomType1a, mockCustomType1b} + assertSnapshotmcts(nil, mockCustomTypeNotWatched) + + deleteNamespaces(ctx, kube, namespace1, namespace2) + + getNewNamespaces1and2() + createNamespaces(ctx, kube, namespace1, namespace2) + + /* + MockCustomSpecHashType + */ + assertSnapshotmcshts := func(expectmcshts MockCustomSpecHashTypeList, unexpectmcshts MockCustomSpecHashTypeList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectmcshts { + if _, err := snap.Mcshts.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectmcshts { + if _, err := snap.Mcshts.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := mockCustomSpecHashTypeClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := mockCustomSpecHashTypeClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) } - for _, unexpected := range unexpectMocks { - if _, err := snap.Mocks.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { - continue drain + } + } + + mockCustomSpecHashType1a, err := mockCustomSpecHashTypeClient.Write(NewMockCustomSpecHashType(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockCustomSpecHashType1b, err := mockCustomSpecHashTypeClient.Write(NewMockCustomSpecHashType(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockCustomSpecHashTypeWatched := MockCustomSpecHashTypeList{mockCustomSpecHashType1a, mockCustomSpecHashType1b} + assertSnapshotmcshts(mockCustomSpecHashTypeWatched, nil) + err = mockCustomSpecHashTypeClient.Delete(mockCustomSpecHashType1a.GetMetadata().Namespace, mockCustomSpecHashType1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = mockCustomSpecHashTypeClient.Delete(mockCustomSpecHashType1b.GetMetadata().Namespace, mockCustomSpecHashType1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + mockCustomSpecHashTypeNotWatched := MockCustomSpecHashTypeList{mockCustomSpecHashType1a, mockCustomSpecHashType1b} + assertSnapshotmcshts(nil, mockCustomSpecHashTypeNotWatched) + + deleteNamespaces(ctx, kube, namespace1, namespace2) + + getNewNamespaces1and2() + createNamespaces(ctx, kube, namespace1, namespace2) + + /* + Pod + */ + assertSnapshotpods := func(expectpods github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList, unexpectpods github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectpods { + if _, err := snap.Pods.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } } + for _, unexpected := range unexpectpods { + if _, err := snap.Pods.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := podClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := podClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) } - break drain - case err := <-errs: - Expect(err).NotTo(HaveOccurred()) - case <-time.After(time.Second * 10): - nsList1, _ := mockResourceClient.List(namespace1, clients.ListOpts{}) - nsList2, _ := mockResourceClient.List(namespace2, clients.ListOpts{}) - combined := append(nsList1, nsList2...) - Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) } } - } - mockResource1a, err := mockResourceClient.Write(NewMockResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - mockResource1b, err := mockResourceClient.Write(NewMockResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b}, nil) - mockResource2a, err := mockResourceClient.Write(NewMockResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - mockResource2b, err := mockResourceClient.Write(NewMockResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) + pod1a, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + pod1b, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + podWatched := github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod1a, pod1b} + assertSnapshotpods(podWatched, nil) + err = podClient.Delete(pod1a.GetMetadata().Namespace, pod1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = podClient.Delete(pod1b.GetMetadata().Namespace, pod1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) - assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}, nil) + podNotWatched := github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod1a, pod1b} + assertSnapshotpods(nil, podNotWatched) - err = mockResourceClient.Delete(mockResource2a.GetMetadata().Namespace, mockResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - err = mockResourceClient.Delete(mockResource2b.GetMetadata().Namespace, mockResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) + deleteNamespaces(ctx, kube, namespace1, namespace2) - assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b}, MockResourceList{mockResource2a, mockResource2b}) + getNewNamespaces1and2() + createNamespaces(ctx, kube, namespace1, namespace2) + }) - err = mockResourceClient.Delete(mockResource1a.GetMetadata().Namespace, mockResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - err = mockResourceClient.Delete(mockResource1b.GetMetadata().Namespace, mockResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) + It("Should not contain resources from a deleted namespace, that is filtered", func() { + ctx := context.Background() + err := emitter.Register() + Expect(err).NotTo(HaveOccurred()) + + snapshots, errs, err := emitter.Snapshots([]string{""}, clients.WatchOpts{ + Ctx: ctx, + RefreshRate: time.Second, + ExpressionSelector: labelExpression1, + }) + Expect(err).NotTo(HaveOccurred()) + + var snap *TestingSnapshot + + /* + SimpleMockResource + */ + + assertSnapshotSimplemocks := func(expectSimplemocks SimpleMockResourceList, unexpectSimplemocks SimpleMockResourceList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectSimplemocks { + if _, err := snap.Simplemocks.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectSimplemocks { + if _, err := snap.Simplemocks.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := simpleMockResourceClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := simpleMockResourceClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } - assertSnapshotMocks(nil, MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}) + // create namespaces + createNamespaceWithLabel(ctx, kube, namespace3, labels1) + createNamespaceWithLabel(ctx, kube, namespace4, labels1) - /* - FakeResource - */ + simpleMockResource2a, err := simpleMockResourceClient.Write(NewSimpleMockResource(namespace3, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + simpleMockResource2b, err := simpleMockResourceClient.Write(NewSimpleMockResource(namespace4, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + simpleMockResourceNotWatched := SimpleMockResourceList{} + simpleMockResourceWatched := SimpleMockResourceList{simpleMockResource2a, simpleMockResource2b} + assertSnapshotSimplemocks(simpleMockResourceWatched, simpleMockResourceNotWatched) - assertSnapshotFakes := func(expectFakes FakeResourceList, unexpectFakes FakeResourceList) { - drain: - for { - select { - case snap = <-snapshots: - for _, expected := range expectFakes { - if _, err := snap.Fakes.Find(expected.GetMetadata().Ref().Strings()); err != nil { - continue drain + deleteNamespaces(ctx, kube, namespace3) + + simpleMockResourceWatched = SimpleMockResourceList{simpleMockResource2b} + simpleMockResourceNotWatched = append(simpleMockResourceNotWatched, simpleMockResource2a) + assertSnapshotSimplemocks(simpleMockResourceWatched, simpleMockResourceNotWatched) + + for _, r := range simpleMockResourceWatched { + err = simpleMockResourceClient.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + simpleMockResourceNotWatched = append(simpleMockResourceNotWatched, r) + } + assertSnapshotSimplemocks(nil, simpleMockResourceNotWatched) + + deleteNamespaces(ctx, kube, namespace4) + getNewNamespaces() + + /* + MockResource + */ + + assertSnapshotMocks := func(expectMocks MockResourceList, unexpectMocks MockResourceList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectMocks { + if _, err := snap.Mocks.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } } - } - for _, unexpected := range unexpectFakes { - if _, err := snap.Fakes.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { - continue drain + for _, unexpected := range unexpectMocks { + if _, err := snap.Mocks.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := mockResourceClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := mockResourceClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) } - break drain - case err := <-errs: - Expect(err).NotTo(HaveOccurred()) - case <-time.After(time.Second * 10): - nsList1, _ := fakeResourceClient.List(namespace1, clients.ListOpts{}) - nsList2, _ := fakeResourceClient.List(namespace2, clients.ListOpts{}) - combined := append(nsList1, nsList2...) - Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) } } - } - fakeResource1a, err := fakeResourceClient.Write(NewFakeResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - fakeResource1b, err := fakeResourceClient.Write(NewFakeResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - assertSnapshotFakes(FakeResourceList{fakeResource1a, fakeResource1b}, nil) - fakeResource2a, err := fakeResourceClient.Write(NewFakeResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - fakeResource2b, err := fakeResourceClient.Write(NewFakeResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) + // create namespaces + createNamespaceWithLabel(ctx, kube, namespace3, labels1) + createNamespaceWithLabel(ctx, kube, namespace4, labels1) - assertSnapshotFakes(FakeResourceList{fakeResource1a, fakeResource1b, fakeResource2a, fakeResource2b}, nil) + mockResource2a, err := mockResourceClient.Write(NewMockResource(namespace3, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResource2b, err := mockResourceClient.Write(NewMockResource(namespace4, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResourceNotWatched := MockResourceList{} + mockResourceWatched := MockResourceList{mockResource2a, mockResource2b} + assertSnapshotMocks(mockResourceWatched, mockResourceNotWatched) - err = fakeResourceClient.Delete(fakeResource2a.GetMetadata().Namespace, fakeResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - err = fakeResourceClient.Delete(fakeResource2b.GetMetadata().Namespace, fakeResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) + deleteNamespaces(ctx, kube, namespace3) - assertSnapshotFakes(FakeResourceList{fakeResource1a, fakeResource1b}, FakeResourceList{fakeResource2a, fakeResource2b}) + mockResourceWatched = MockResourceList{mockResource2b} + mockResourceNotWatched = append(mockResourceNotWatched, mockResource2a) + assertSnapshotMocks(mockResourceWatched, mockResourceNotWatched) - err = fakeResourceClient.Delete(fakeResource1a.GetMetadata().Namespace, fakeResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - err = fakeResourceClient.Delete(fakeResource1b.GetMetadata().Namespace, fakeResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) + for _, r := range mockResourceWatched { + err = mockResourceClient.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResourceNotWatched = append(mockResourceNotWatched, r) + } + assertSnapshotMocks(nil, mockResourceNotWatched) + + deleteNamespaces(ctx, kube, namespace4) + getNewNamespaces() + + /* + FakeResource + */ + + assertSnapshotFakes := func(expectFakes FakeResourceList, unexpectFakes FakeResourceList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectFakes { + if _, err := snap.Fakes.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectFakes { + if _, err := snap.Fakes.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := fakeResourceClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := fakeResourceClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } - assertSnapshotFakes(nil, FakeResourceList{fakeResource1a, fakeResource1b, fakeResource2a, fakeResource2b}) + // create namespaces + createNamespaceWithLabel(ctx, kube, namespace3, labels1) + createNamespaceWithLabel(ctx, kube, namespace4, labels1) - /* - AnotherMockResource - */ + fakeResource2a, err := fakeResourceClient.Write(NewFakeResource(namespace3, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + fakeResource2b, err := fakeResourceClient.Write(NewFakeResource(namespace4, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + fakeResourceNotWatched := FakeResourceList{} + fakeResourceWatched := FakeResourceList{fakeResource2a, fakeResource2b} + assertSnapshotFakes(fakeResourceWatched, fakeResourceNotWatched) - assertSnapshotAnothermockresources := func(expectAnothermockresources AnotherMockResourceList, unexpectAnothermockresources AnotherMockResourceList) { - drain: - for { - select { - case snap = <-snapshots: - for _, expected := range expectAnothermockresources { - if _, err := snap.Anothermockresources.Find(expected.GetMetadata().Ref().Strings()); err != nil { - continue drain + deleteNamespaces(ctx, kube, namespace3) + + fakeResourceWatched = FakeResourceList{fakeResource2b} + fakeResourceNotWatched = append(fakeResourceNotWatched, fakeResource2a) + assertSnapshotFakes(fakeResourceWatched, fakeResourceNotWatched) + + for _, r := range fakeResourceWatched { + err = fakeResourceClient.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + fakeResourceNotWatched = append(fakeResourceNotWatched, r) + } + assertSnapshotFakes(nil, fakeResourceNotWatched) + + deleteNamespaces(ctx, kube, namespace4) + getNewNamespaces() + + /* + AnotherMockResource + */ + + assertSnapshotAnothermockresources := func(expectAnothermockresources AnotherMockResourceList, unexpectAnothermockresources AnotherMockResourceList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectAnothermockresources { + if _, err := snap.Anothermockresources.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } } - } - for _, unexpected := range unexpectAnothermockresources { - if _, err := snap.Anothermockresources.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { - continue drain + for _, unexpected := range unexpectAnothermockresources { + if _, err := snap.Anothermockresources.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := anotherMockResourceClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := anotherMockResourceClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) } - break drain - case err := <-errs: - Expect(err).NotTo(HaveOccurred()) - case <-time.After(time.Second * 10): - nsList1, _ := anotherMockResourceClient.List(namespace1, clients.ListOpts{}) - nsList2, _ := anotherMockResourceClient.List(namespace2, clients.ListOpts{}) - combined := append(nsList1, nsList2...) - Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) } } - } - anotherMockResource1a, err := anotherMockResourceClient.Write(NewAnotherMockResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - anotherMockResource1b, err := anotherMockResourceClient.Write(NewAnotherMockResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - assertSnapshotAnothermockresources(AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b}, nil) - anotherMockResource2a, err := anotherMockResourceClient.Write(NewAnotherMockResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - anotherMockResource2b, err := anotherMockResourceClient.Write(NewAnotherMockResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) + // create namespaces + createNamespaceWithLabel(ctx, kube, namespace3, labels1) + createNamespaceWithLabel(ctx, kube, namespace4, labels1) - assertSnapshotAnothermockresources(AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b, anotherMockResource2a, anotherMockResource2b}, nil) + anotherMockResource2a, err := anotherMockResourceClient.Write(NewAnotherMockResource(namespace3, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + anotherMockResource2b, err := anotherMockResourceClient.Write(NewAnotherMockResource(namespace4, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + anotherMockResourceNotWatched := AnotherMockResourceList{} + anotherMockResourceWatched := AnotherMockResourceList{anotherMockResource2a, anotherMockResource2b} + assertSnapshotAnothermockresources(anotherMockResourceWatched, anotherMockResourceNotWatched) - err = anotherMockResourceClient.Delete(anotherMockResource2a.GetMetadata().Namespace, anotherMockResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - err = anotherMockResourceClient.Delete(anotherMockResource2b.GetMetadata().Namespace, anotherMockResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) + deleteNamespaces(ctx, kube, namespace3) - assertSnapshotAnothermockresources(AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b}, AnotherMockResourceList{anotherMockResource2a, anotherMockResource2b}) + anotherMockResourceWatched = AnotherMockResourceList{anotherMockResource2b} + anotherMockResourceNotWatched = append(anotherMockResourceNotWatched, anotherMockResource2a) + assertSnapshotAnothermockresources(anotherMockResourceWatched, anotherMockResourceNotWatched) - err = anotherMockResourceClient.Delete(anotherMockResource1a.GetMetadata().Namespace, anotherMockResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - err = anotherMockResourceClient.Delete(anotherMockResource1b.GetMetadata().Namespace, anotherMockResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) + for _, r := range anotherMockResourceWatched { + err = anotherMockResourceClient.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + anotherMockResourceNotWatched = append(anotherMockResourceNotWatched, r) + } + assertSnapshotAnothermockresources(nil, anotherMockResourceNotWatched) + + deleteNamespaces(ctx, kube, namespace4) + getNewNamespaces() + + /* + ClusterResource + */ + + assertSnapshotClusterresources := func(expectClusterresources ClusterResourceList, unexpectClusterresources ClusterResourceList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectClusterresources { + if _, err := snap.Clusterresources.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectClusterresources { + if _, err := snap.Clusterresources.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + combined, _ := clusterResourceClient.List(clients.ListOpts{}) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } - assertSnapshotAnothermockresources(nil, AnotherMockResourceList{anotherMockResource1a, anotherMockResource1b, anotherMockResource2a, anotherMockResource2b}) + // create namespaces + createNamespaceWithLabel(ctx, kube, namespace3, labels1) + createNamespaceWithLabel(ctx, kube, namespace4, labels1) - /* - ClusterResource - */ + clusterResource2a, err := clusterResourceClient.Write(NewClusterResource(namespace3, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + clusterResourceNotWatched := ClusterResourceList{} + clusterResourceWatched := ClusterResourceList{clusterResource2a} + assertSnapshotClusterresources(clusterResourceWatched, nil) - assertSnapshotClusterresources := func(expectClusterresources ClusterResourceList, unexpectClusterresources ClusterResourceList) { - drain: - for { - select { - case snap = <-snapshots: - for _, expected := range expectClusterresources { - if _, err := snap.Clusterresources.Find(expected.GetMetadata().Ref().Strings()); err != nil { - continue drain + deleteNamespaces(ctx, kube, namespace3) + + for _, r := range clusterResourceWatched { + err = clusterResourceClient.Delete(r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + clusterResourceNotWatched = append(clusterResourceNotWatched, r) + } + assertSnapshotClusterresources(nil, clusterResourceNotWatched) + + deleteNamespaces(ctx, kube, namespace4) + getNewNamespaces() + + /* + MockCustomType + */ + + assertSnapshotmcts := func(expectmcts MockCustomTypeList, unexpectmcts MockCustomTypeList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectmcts { + if _, err := snap.Mcts.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } } - } - for _, unexpected := range unexpectClusterresources { - if _, err := snap.Clusterresources.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { - continue drain + for _, unexpected := range unexpectmcts { + if _, err := snap.Mcts.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := mockCustomTypeClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := mockCustomTypeClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) } - break drain - case err := <-errs: - Expect(err).NotTo(HaveOccurred()) - case <-time.After(time.Second * 10): - combined, _ := clusterResourceClient.List(clients.ListOpts{}) - Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) } } - } - clusterResource1a, err := clusterResourceClient.Write(NewClusterResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - assertSnapshotClusterresources(ClusterResourceList{clusterResource1a}, nil) - clusterResource2a, err := clusterResourceClient.Write(NewClusterResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) + // create namespaces + createNamespaceWithLabel(ctx, kube, namespace3, labels1) + createNamespaceWithLabel(ctx, kube, namespace4, labels1) - assertSnapshotClusterresources(ClusterResourceList{clusterResource1a, clusterResource2a}, nil) + mockCustomType2a, err := mockCustomTypeClient.Write(NewMockCustomType(namespace3, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockCustomType2b, err := mockCustomTypeClient.Write(NewMockCustomType(namespace4, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockCustomTypeNotWatched := MockCustomTypeList{} + mockCustomTypeWatched := MockCustomTypeList{mockCustomType2a, mockCustomType2b} + assertSnapshotmcts(mockCustomTypeWatched, mockCustomTypeNotWatched) - err = clusterResourceClient.Delete(clusterResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) + deleteNamespaces(ctx, kube, namespace3) - assertSnapshotClusterresources(ClusterResourceList{clusterResource1a}, ClusterResourceList{clusterResource2a}) + mockCustomTypeWatched = MockCustomTypeList{mockCustomType2b} + mockCustomTypeNotWatched = append(mockCustomTypeNotWatched, mockCustomType2a) + assertSnapshotmcts(mockCustomTypeWatched, mockCustomTypeNotWatched) - err = clusterResourceClient.Delete(clusterResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) + for _, r := range mockCustomTypeWatched { + err = mockCustomTypeClient.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockCustomTypeNotWatched = append(mockCustomTypeNotWatched, r) + } + assertSnapshotmcts(nil, mockCustomTypeNotWatched) + + deleteNamespaces(ctx, kube, namespace4) + getNewNamespaces() + + /* + MockCustomSpecHashType + */ + + assertSnapshotmcshts := func(expectmcshts MockCustomSpecHashTypeList, unexpectmcshts MockCustomSpecHashTypeList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectmcshts { + if _, err := snap.Mcshts.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectmcshts { + if _, err := snap.Mcshts.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := mockCustomSpecHashTypeClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := mockCustomSpecHashTypeClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } - assertSnapshotClusterresources(nil, ClusterResourceList{clusterResource1a, clusterResource2a}) + // create namespaces + createNamespaceWithLabel(ctx, kube, namespace3, labels1) + createNamespaceWithLabel(ctx, kube, namespace4, labels1) - /* - MockCustomType - */ + mockCustomSpecHashType2a, err := mockCustomSpecHashTypeClient.Write(NewMockCustomSpecHashType(namespace3, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockCustomSpecHashType2b, err := mockCustomSpecHashTypeClient.Write(NewMockCustomSpecHashType(namespace4, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockCustomSpecHashTypeNotWatched := MockCustomSpecHashTypeList{} + mockCustomSpecHashTypeWatched := MockCustomSpecHashTypeList{mockCustomSpecHashType2a, mockCustomSpecHashType2b} + assertSnapshotmcshts(mockCustomSpecHashTypeWatched, mockCustomSpecHashTypeNotWatched) - assertSnapshotmcts := func(expectmcts MockCustomTypeList, unexpectmcts MockCustomTypeList) { - drain: - for { - select { - case snap = <-snapshots: - for _, expected := range expectmcts { - if _, err := snap.Mcts.Find(expected.GetMetadata().Ref().Strings()); err != nil { - continue drain + deleteNamespaces(ctx, kube, namespace3) + + mockCustomSpecHashTypeWatched = MockCustomSpecHashTypeList{mockCustomSpecHashType2b} + mockCustomSpecHashTypeNotWatched = append(mockCustomSpecHashTypeNotWatched, mockCustomSpecHashType2a) + assertSnapshotmcshts(mockCustomSpecHashTypeWatched, mockCustomSpecHashTypeNotWatched) + + for _, r := range mockCustomSpecHashTypeWatched { + err = mockCustomSpecHashTypeClient.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockCustomSpecHashTypeNotWatched = append(mockCustomSpecHashTypeNotWatched, r) + } + assertSnapshotmcshts(nil, mockCustomSpecHashTypeNotWatched) + + deleteNamespaces(ctx, kube, namespace4) + getNewNamespaces() + + /* + Pod + */ + + assertSnapshotpods := func(expectpods github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList, unexpectpods github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectpods { + if _, err := snap.Pods.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } } - } - for _, unexpected := range unexpectmcts { - if _, err := snap.Mcts.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { - continue drain + for _, unexpected := range unexpectpods { + if _, err := snap.Pods.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := podClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := podClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) } - break drain - case err := <-errs: - Expect(err).NotTo(HaveOccurred()) - case <-time.After(time.Second * 10): - nsList1, _ := mockCustomTypeClient.List(namespace1, clients.ListOpts{}) - nsList2, _ := mockCustomTypeClient.List(namespace2, clients.ListOpts{}) - combined := append(nsList1, nsList2...) - Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) } } - } - mockCustomType1a, err := mockCustomTypeClient.Write(NewMockCustomType(namespace1, name1), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - mockCustomType1b, err := mockCustomTypeClient.Write(NewMockCustomType(namespace2, name1), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - - assertSnapshotmcts(MockCustomTypeList{mockCustomType1a, mockCustomType1b}, nil) - mockCustomType2a, err := mockCustomTypeClient.Write(NewMockCustomType(namespace1, name2), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - mockCustomType2b, err := mockCustomTypeClient.Write(NewMockCustomType(namespace2, name2), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - assertSnapshotmcts(MockCustomTypeList{mockCustomType1a, mockCustomType1b, mockCustomType2a, mockCustomType2b}, nil) + // create namespaces + createNamespaceWithLabel(ctx, kube, namespace3, labels1) + createNamespaceWithLabel(ctx, kube, namespace4, labels1) - err = mockCustomTypeClient.Delete(mockCustomType2a.GetMetadata().Namespace, mockCustomType2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - err = mockCustomTypeClient.Delete(mockCustomType2b.GetMetadata().Namespace, mockCustomType2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) + pod2a, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace3, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + pod2b, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace4, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + podNotWatched := github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{} + podWatched := github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod2a, pod2b} + assertSnapshotpods(podWatched, podNotWatched) - assertSnapshotmcts(MockCustomTypeList{mockCustomType1a, mockCustomType1b}, MockCustomTypeList{mockCustomType2a, mockCustomType2b}) + deleteNamespaces(ctx, kube, namespace3) - err = mockCustomTypeClient.Delete(mockCustomType1a.GetMetadata().Namespace, mockCustomType1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - err = mockCustomTypeClient.Delete(mockCustomType1b.GetMetadata().Namespace, mockCustomType1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) + podWatched = github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod2b} + podNotWatched = append(podNotWatched, pod2a) + assertSnapshotpods(podWatched, podNotWatched) - assertSnapshotmcts(nil, MockCustomTypeList{mockCustomType1a, mockCustomType1b, mockCustomType2a, mockCustomType2b}) + for _, r := range podWatched { + err = podClient.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + podNotWatched = append(podNotWatched, r) + } + assertSnapshotpods(nil, podNotWatched) - /* - Pod - */ + deleteNamespaces(ctx, kube, namespace4) + getNewNamespaces() + }) - assertSnapshotpods := func(expectpods github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList, unexpectpods github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList) { - drain: - for { - select { - case snap = <-snapshots: - for _, expected := range expectpods { - if _, err := snap.Pods.Find(expected.GetMetadata().Ref().Strings()); err != nil { - continue drain + It("should be able to return a resource from a deleted namespace, after the namespace is re-created", func() { + ctx := context.Background() + err := emitter.Register() + Expect(err).NotTo(HaveOccurred()) + + snapshots, errs, err := emitter.Snapshots([]string{""}, clients.WatchOpts{ + Ctx: ctx, + RefreshRate: time.Second, + ExpressionSelector: labelExpression1, + }) + Expect(err).NotTo(HaveOccurred()) + + var snap *TestingSnapshot + var previous *TestingSnapshot + + /* + MockResource + */ + assertSnapshotMocks := func(expectMocks MockResourceList, unexpectMocks MockResourceList) { + drain: + for { + select { + case snap = <-snapshots: + previous = snap + for _, expected := range expectMocks { + if _, err := snap.Mocks.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectMocks { + if _, err := snap.Mocks.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + var expectedResources map[string][]string + var unexpectedResource map[string][]string + + if previous != nil { + expectedResources = findNonMatchingResources(convertMocksToMetadataGetter(expectMocks), convertMocksToMetadataGetter(previous.Mocks)) + unexpectedResource = findMatchingResources(convertMocksToMetadataGetter(unexpectMocks), convertMocksToMetadataGetter(previous.Mocks)) + } else { + expectedResources = getMapOfResources(convertMocksToMetadataGetter(expectMocks)) + unexpectedResource = getMapOfResources(convertMocksToMetadataGetter(unexpectMocks)) + } + getList := func(ns string) ([]metadataGetter, error) { + l, err := mockResourceClient.List(ns, clients.ListOpts{}) + return convertMocksToMetadataGetter(l), err + } + namespaceResources := getMapOfNamespaceResources(getList) + Fail(fmt.Sprintf("expected final snapshot before 10 seconds. expected \nExpected:\n%#v\n\nUnexpected:\n%#v\n\nnamespaces:\n%#v", expectedResources, unexpectedResource, namespaceResources)) } - for _, unexpected := range unexpectpods { - if _, err := snap.Pods.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { - continue drain + } + } + + createNamespaceWithLabel(ctx, kube, namespace3, labels1) + + mockResource1a, err := mockResourceClient.Write(NewMockResource(namespace3, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotMocks(MockResourceList{mockResource1a}, nil) + + deleteNamespaces(ctx, kube, namespace3) + Eventually(func() bool { + _, err = kube.CoreV1().Namespaces().Get(ctx, namespace3, metav1.GetOptions{}) + return apierrors.IsNotFound(err) + }, 15*time.Second, 1*time.Second).Should(BeTrue()) + createNamespaceWithLabel(ctx, kube, namespace3, labels1) + + mockResource2a, err := mockResourceClient.Write(NewMockResource(namespace3, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotMocks(MockResourceList{mockResource2a}, MockResourceList{mockResource1a}) + + deleteNamespaces(ctx, kube, namespace3) + Eventually(func() bool { + _, err = kube.CoreV1().Namespaces().Get(ctx, namespace3, metav1.GetOptions{}) + return apierrors.IsNotFound(err) + }, 15*time.Second, 1*time.Second).Should(BeTrue()) + + /* + AnotherMockResource + */ + assertSnapshotAnothermockresources := func(expectAnothermockresources AnotherMockResourceList, unexpectAnothermockresources AnotherMockResourceList) { + drain: + for { + select { + case snap = <-snapshots: + previous = snap + for _, expected := range expectAnothermockresources { + if _, err := snap.Anothermockresources.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } } + for _, unexpected := range unexpectAnothermockresources { + if _, err := snap.Anothermockresources.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + var expectedResources map[string][]string + var unexpectedResource map[string][]string + + if previous != nil { + expectedResources = findNonMatchingResources(convertAnothermockresourcesToMetadataGetter(expectAnothermockresources), convertAnothermockresourcesToMetadataGetter(previous.Anothermockresources)) + unexpectedResource = findMatchingResources(convertAnothermockresourcesToMetadataGetter(unexpectAnothermockresources), convertAnothermockresourcesToMetadataGetter(previous.Anothermockresources)) + } else { + expectedResources = getMapOfResources(convertAnothermockresourcesToMetadataGetter(expectAnothermockresources)) + unexpectedResource = getMapOfResources(convertAnothermockresourcesToMetadataGetter(unexpectAnothermockresources)) + } + getList := func(ns string) ([]metadataGetter, error) { + l, err := anotherMockResourceClient.List(ns, clients.ListOpts{}) + return convertAnothermockresourcesToMetadataGetter(l), err + } + namespaceResources := getMapOfNamespaceResources(getList) + Fail(fmt.Sprintf("expected final snapshot before 10 seconds. expected \nExpected:\n%#v\n\nUnexpected:\n%#v\n\nnamespaces:\n%#v", expectedResources, unexpectedResource, namespaceResources)) } - break drain - case err := <-errs: - Expect(err).NotTo(HaveOccurred()) - case <-time.After(time.Second * 10): - nsList1, _ := podClient.List(namespace1, clients.ListOpts{}) - nsList2, _ := podClient.List(namespace2, clients.ListOpts{}) - combined := append(nsList1, nsList2...) - Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) } } - } - pod1a, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace1, name1), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - pod1b, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace2, name1), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - assertSnapshotpods(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod1a, pod1b}, nil) - pod2a, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace1, name2), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - pod2b, err := podClient.Write(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.NewPod(namespace2, name2), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) + createNamespaceWithLabel(ctx, kube, namespace3, labels1) - assertSnapshotpods(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod1a, pod1b, pod2a, pod2b}, nil) + anotherMockResource1a, err := anotherMockResourceClient.Write(NewAnotherMockResource(namespace3, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotAnothermockresources(AnotherMockResourceList{anotherMockResource1a}, nil) - err = podClient.Delete(pod2a.GetMetadata().Namespace, pod2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - err = podClient.Delete(pod2b.GetMetadata().Namespace, pod2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) + deleteNamespaces(ctx, kube, namespace3) + Eventually(func() bool { + _, err = kube.CoreV1().Namespaces().Get(ctx, namespace3, metav1.GetOptions{}) + return apierrors.IsNotFound(err) + }, 15*time.Second, 1*time.Second).Should(BeTrue()) + createNamespaceWithLabel(ctx, kube, namespace3, labels1) - assertSnapshotpods(github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod1a, pod1b}, github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod2a, pod2b}) + anotherMockResource2a, err := anotherMockResourceClient.Write(NewAnotherMockResource(namespace3, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotAnothermockresources(AnotherMockResourceList{anotherMockResource2a}, AnotherMockResourceList{anotherMockResource1a}) - err = podClient.Delete(pod1a.GetMetadata().Namespace, pod1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - err = podClient.Delete(pod1b.GetMetadata().Namespace, pod1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) + deleteNamespaces(ctx, kube, namespace3) + Eventually(func() bool { + _, err = kube.CoreV1().Namespaces().Get(ctx, namespace3, metav1.GetOptions{}) + return apierrors.IsNotFound(err) + }, 15*time.Second, 1*time.Second).Should(BeTrue()) + + }) + }) - assertSnapshotpods(nil, github_com_solo_io_solo_kit_pkg_api_v1_resources_common_kubernetes.PodList{pod1a, pod1b, pod2a, pod2b}) + Context("use different resource namespace listers", func() { + BeforeEach(func() { + resourceNamespaceLister = namespace.NewKubeClientResourceNamespaceLister(kube) + emitter = NewTestingEmitter(simpleMockResourceClient, mockResourceClient, fakeResourceClient, anotherMockResourceClient, clusterResourceClient, mockCustomTypeClient, mockCustomSpecHashTypeClient, podClient, resourceNamespaceLister) + }) + + It("Should work with the Kube Client Namespace Lister", func() { + runNamespacedSelectorsWithWatchNamespaces() + }) }) + }) diff --git a/test/mocks/v1alpha1/fake_resource_client.sk.go b/test/mocks/v1alpha1/fake_resource_client.sk.go index 4d6e3e461..a7aadcdc4 100644 --- a/test/mocks/v1alpha1/fake_resource_client.sk.go +++ b/test/mocks/v1alpha1/fake_resource_client.sk.go @@ -19,6 +19,7 @@ type FakeResourceWatcher interface { type FakeResourceClient interface { BaseClient() clients.ResourceClient Register() error + RegisterNamespace(namespace string) error Read(namespace, name string, opts clients.ReadOpts) (*FakeResource, error) Write(resource *FakeResource, opts clients.WriteOpts) (*FakeResource, error) Delete(namespace, name string, opts clients.DeleteOpts) error @@ -59,6 +60,10 @@ func (client *fakeResourceClient) Register() error { return client.rc.Register() } +func (client *fakeResourceClient) RegisterNamespace(namespace string) error { + return client.rc.RegisterNamespace(namespace) +} + func (client *fakeResourceClient) Read(namespace, name string, opts clients.ReadOpts) (*FakeResource, error) { opts = opts.WithDefaults() diff --git a/test/mocks/v1alpha1/mock_resource_client.sk.go b/test/mocks/v1alpha1/mock_resource_client.sk.go index d6dfd501a..05ccc0210 100644 --- a/test/mocks/v1alpha1/mock_resource_client.sk.go +++ b/test/mocks/v1alpha1/mock_resource_client.sk.go @@ -19,6 +19,7 @@ type MockResourceWatcher interface { type MockResourceClient interface { BaseClient() clients.ResourceClient Register() error + RegisterNamespace(namespace string) error Read(namespace, name string, opts clients.ReadOpts) (*MockResource, error) Write(resource *MockResource, opts clients.WriteOpts) (*MockResource, error) Delete(namespace, name string, opts clients.DeleteOpts) error @@ -59,6 +60,10 @@ func (client *mockResourceClient) Register() error { return client.rc.Register() } +func (client *mockResourceClient) RegisterNamespace(namespace string) error { + return client.rc.RegisterNamespace(namespace) +} + func (client *mockResourceClient) Read(namespace, name string, opts clients.ReadOpts) (*MockResource, error) { opts = opts.WithDefaults() diff --git a/test/mocks/v1alpha1/testing_event_loop_test.go b/test/mocks/v1alpha1/testing_event_loop_test.go index fb3255d4f..4bdbb5a80 100644 --- a/test/mocks/v1alpha1/testing_event_loop_test.go +++ b/test/mocks/v1alpha1/testing_event_loop_test.go @@ -1,6 +1,5 @@ // Code generated by solo-kit. DO NOT EDIT. -//go:build solokit // +build solokit package v1alpha1 @@ -12,9 +11,12 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + skNamespace "github.com/solo-io/solo-kit/pkg/api/external/kubernetes/namespace" "github.com/solo-io/solo-kit/pkg/api/v1/clients" "github.com/solo-io/solo-kit/pkg/api/v1/clients/factory" + "github.com/solo-io/solo-kit/pkg/api/v1/clients/kube/cache" "github.com/solo-io/solo-kit/pkg/api/v1/clients/memory" + "github.com/solo-io/solo-kit/test/helpers" ) var _ = Describe("TestingEventLoop", func() { @@ -28,13 +30,18 @@ var _ = Describe("TestingEventLoop", func() { BeforeEach(func() { ctx = context.Background() + kube := helpers.MustKubeClient() + kubeCache, err := cache.NewKubeCoreCache(context.TODO(), kube) + Expect(err).NotTo(HaveOccurred()) + resourceNamespaceLister := skNamespace.NewKubeClientCacheResourceNamespaceLister(kube, kubeCache) + mockResourceClientFactory := &factory.MemoryResourceClientFactory{ Cache: memory.NewInMemoryResourceCache(), } mockResourceClient, err := NewMockResourceClient(ctx, mockResourceClientFactory) Expect(err).NotTo(HaveOccurred()) - emitter = NewTestingEmitter(mockResourceClient) + emitter = NewTestingEmitter(mockResourceClient, resourceNamespaceLister) }) It("runs sync function on a new snapshot", func() { _, err = emitter.MockResource().Write(NewMockResource(namespace, "jerry"), clients.WriteOpts{}) diff --git a/test/mocks/v1alpha1/testing_snapshot_emitter.sk.go b/test/mocks/v1alpha1/testing_snapshot_emitter.sk.go index 7cc0459ed..f01941dcf 100644 --- a/test/mocks/v1alpha1/testing_snapshot_emitter.sk.go +++ b/test/mocks/v1alpha1/testing_snapshot_emitter.sk.go @@ -12,6 +12,7 @@ import ( "go.uber.org/zap" "github.com/solo-io/solo-kit/pkg/api/v1/clients" + "github.com/solo-io/solo-kit/pkg/api/v1/resources" "github.com/solo-io/solo-kit/pkg/errors" skstats "github.com/solo-io/solo-kit/pkg/stats" @@ -83,20 +84,29 @@ type TestingEmitter interface { MockResource() MockResourceClient } -func NewTestingEmitter(mockResourceClient MockResourceClient) TestingEmitter { - return NewTestingEmitterWithEmit(mockResourceClient, make(chan struct{})) +func NewTestingEmitter(mockResourceClient MockResourceClient, resourceNamespaceLister resources.ResourceNamespaceLister) TestingEmitter { + return NewTestingEmitterWithEmit(mockResourceClient, resourceNamespaceLister, make(chan struct{})) } -func NewTestingEmitterWithEmit(mockResourceClient MockResourceClient, emit <-chan struct{}) TestingEmitter { +func NewTestingEmitterWithEmit(mockResourceClient MockResourceClient, resourceNamespaceLister resources.ResourceNamespaceLister, emit <-chan struct{}) TestingEmitter { return &testingEmitter{ - mockResource: mockResourceClient, - forceEmit: emit, + mockResource: mockResourceClient, + resourceNamespaceLister: resourceNamespaceLister, + forceEmit: emit, } } type testingEmitter struct { forceEmit <-chan struct{} mockResource MockResourceClient + // resourceNamespaceLister is used to watch for new namespaces when they are created. + // It is used when Expression Selector is in the Watch Opts set in Snapshot(). + resourceNamespaceLister resources.ResourceNamespaceLister + // namespacesWatching is the set of namespaces that we are watching. This is helpful + // when Expression Selector is set on the Watch Opts in Snapshot(). + namespacesWatching sync.Map + // updateNamespaces is used to perform locks and unlocks when watches on namespaces are being updated/created + updateNamespaces sync.Mutex } func (c *testingEmitter) Register() error { @@ -110,6 +120,14 @@ func (c *testingEmitter) MockResource() MockResourceClient { return c.mockResource } +// Snapshots will return a channel that can be used to receive snapshots of the +// state of the resources it is watching +// when watching resources, you can set the watchNamespaces, and you can set the +// ExpressionSelector of the WatchOpts. Setting watchNamespaces will watch for all resources +// that are in the specified namespaces. In addition if ExpressionSelector of the WatchOpts is +// set, then all namespaces that meet the label criteria of the ExpressionSelector will +// also be watched. If Expression Selector is set and watched namespaces is set to [""], then it +// will only watch namespaces that meet the label expression selector criteria. func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchOpts) (<-chan *TestingSnapshot, <-chan error, error) { if len(watchNamespaces) == 0 { @@ -124,59 +142,269 @@ func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchO } errs := make(chan error) + hasWatchedNamespaces := len(watchNamespaces) > 1 || (len(watchNamespaces) == 1 && watchNamespaces[0] != "") + watchingLabeledNamespaces := !(opts.ExpressionSelector == "") var done sync.WaitGroup ctx := opts.Ctx + + // setting up the options for both listing and watching resources in namespaces + watchedNamespacesListOptions := clients.ListOpts{Ctx: opts.Ctx, Selector: opts.Selector} + watchedNamespacesWatchOptions := clients.WatchOpts{Ctx: opts.Ctx, Selector: opts.Selector} /* Create channel for MockResource */ type mockResourceListWithNamespace struct { list MockResourceList namespace string } mockResourceChan := make(chan mockResourceListWithNamespace) - var initialMockResourceList MockResourceList currentSnapshot := TestingSnapshot{} - mocksByNamespace := make(map[string]MockResourceList) - - for _, namespace := range watchNamespaces { - /* Setup namespaced watch for MockResource */ - { - mocks, err := c.mockResource.List(namespace, clients.ListOpts{Ctx: opts.Ctx, Selector: opts.Selector}) + mocksByNamespace := sync.Map{} + if hasWatchedNamespaces || !watchingLabeledNamespaces { + // then watch all resources on watch Namespaces + + // watched namespaces + for _, namespace := range watchNamespaces { + /* Setup namespaced watch for MockResource */ + { + mocks, err := c.mockResource.List(namespace, watchedNamespacesListOptions) + if err != nil { + return nil, nil, errors.Wrapf(err, "initial MockResource list") + } + initialMockResourceList = append(initialMockResourceList, mocks...) + mocksByNamespace.Store(namespace, mocks) + } + mockResourceNamespacesChan, mockResourceErrs, err := c.mockResource.Watch(namespace, watchedNamespacesWatchOptions) if err != nil { - return nil, nil, errors.Wrapf(err, "initial MockResource list") + return nil, nil, errors.Wrapf(err, "starting MockResource watch") + } + + done.Add(1) + go func(namespace string) { + defer done.Done() + errutils.AggregateErrs(ctx, errs, mockResourceErrs, namespace+"-mocks") + }(namespace) + /* Watch for changes and update snapshot */ + go func(namespace string) { + defer func() { + c.namespacesWatching.Delete(namespace) + }() + c.namespacesWatching.Store(namespace, true) + for { + select { + case <-ctx.Done(): + return + case mockResourceList, ok := <-mockResourceNamespacesChan: + if !ok { + return + } + select { + case <-ctx.Done(): + return + case mockResourceChan <- mockResourceListWithNamespace{list: mockResourceList, namespace: namespace}: + } + } + } + }(namespace) + } + } + // watch all other namespaces that are labeled and fit the Expression Selector + if opts.ExpressionSelector != "" { + // watch resources of non-watched namespaces that fit the expression selectors + namespaceListOptions := resources.ResourceNamespaceListOptions{ + Ctx: opts.Ctx, + ExpressionSelector: opts.ExpressionSelector, + } + namespaceWatchOptions := resources.ResourceNamespaceWatchOptions{ + Ctx: opts.Ctx, + ExpressionSelector: opts.ExpressionSelector, + } + + filterNamespaces := resources.ResourceNamespaceList{} + for _, ns := range watchNamespaces { + // we do not want to filter out "" which equals all namespaces + // the reason is because we will never create a watch on ""(all namespaces) because + // doing so means we watch all resources regardless of namespace. Our intent is to + // watch only certain namespaces. + if ns != "" { + filterNamespaces = append(filterNamespaces, resources.ResourceNamespace{Name: ns}) } - initialMockResourceList = append(initialMockResourceList, mocks...) - mocksByNamespace[namespace] = mocks } - mockResourceNamespacesChan, mockResourceErrs, err := c.mockResource.Watch(namespace, opts) + namespacesResources, err := c.resourceNamespaceLister.GetResourceNamespaceList(namespaceListOptions, filterNamespaces) if err != nil { - return nil, nil, errors.Wrapf(err, "starting MockResource watch") + return nil, nil, err + } + newlyRegisteredNamespaces := make([]string, len(namespacesResources)) + // non watched namespaces that are labeled + for i, resourceNamespace := range namespacesResources { + c.namespacesWatching.Load(resourceNamespace) + namespace := resourceNamespace.Name + newlyRegisteredNamespaces[i] = namespace + err = c.mockResource.RegisterNamespace(namespace) + if err != nil { + return nil, nil, errors.Wrapf(err, "there was an error registering the namespace to the mockResource") + } + /* Setup namespaced watch for MockResource */ + { + mocks, err := c.mockResource.List(namespace, clients.ListOpts{Ctx: opts.Ctx}) + if err != nil { + return nil, nil, errors.Wrapf(err, "initial MockResource list with new namespace") + } + initialMockResourceList = append(initialMockResourceList, mocks...) + mocksByNamespace.Store(namespace, mocks) + } + mockResourceNamespacesChan, mockResourceErrs, err := c.mockResource.Watch(namespace, clients.WatchOpts{Ctx: opts.Ctx}) + if err != nil { + return nil, nil, errors.Wrapf(err, "starting MockResource watch") + } + + done.Add(1) + go func(namespace string) { + defer done.Done() + errutils.AggregateErrs(ctx, errs, mockResourceErrs, namespace+"-mocks") + }(namespace) + /* Watch for changes and update snapshot */ + go func(namespace string) { + for { + select { + case <-ctx.Done(): + return + case mockResourceList, ok := <-mockResourceNamespacesChan: + if !ok { + return + } + select { + case <-ctx.Done(): + return + case mockResourceChan <- mockResourceListWithNamespace{list: mockResourceList, namespace: namespace}: + } + } + } + }(namespace) } + if len(newlyRegisteredNamespaces) > 0 { + contextutils.LoggerFrom(ctx).Infof("registered the new namespace %v", newlyRegisteredNamespaces) + } + + // create watch on all namespaces, so that we can add all resources from new namespaces + // we will be watching namespaces that meet the Expression Selector filter - done.Add(1) - go func(namespace string) { - defer done.Done() - errutils.AggregateErrs(ctx, errs, mockResourceErrs, namespace+"-mocks") - }(namespace) + namespaceWatch, errsReceiver, err := c.resourceNamespaceLister.GetResourceNamespaceWatch(namespaceWatchOptions, filterNamespaces) + if err != nil { + return nil, nil, err + } + if errsReceiver != nil { + go func() { + for { + select { + case <-ctx.Done(): + return + case err = <-errsReceiver: + errs <- errors.Wrapf(err, "received error from watch on resource namespaces") + } + } + }() + } - /* Watch for changes and update snapshot */ - go func(namespace string) { + go func() { for { select { case <-ctx.Done(): return - case mockResourceList, ok := <-mockResourceNamespacesChan: + case resourceNamespaces, ok := <-namespaceWatch: if !ok { return } - select { - case <-ctx.Done(): - return - case mockResourceChan <- mockResourceListWithNamespace{list: mockResourceList, namespace: namespace}: + // get the list of new namespaces, if there is a new namespace + // get the list of resources from that namespace, and add + // a watch for new resources created/deleted on that namespace + c.updateNamespaces.Lock() + + // get the new namespaces, and get a map of the namespaces + mapOfResourceNamespaces := make(map[string]struct{}, len(resourceNamespaces)) + newNamespaces := []string{} + for _, ns := range resourceNamespaces { + if _, hit := c.namespacesWatching.Load(ns.Name); !hit { + newNamespaces = append(newNamespaces, ns.Name) + } + mapOfResourceNamespaces[ns.Name] = struct{}{} } + + for _, ns := range watchNamespaces { + mapOfResourceNamespaces[ns] = struct{}{} + } + + missingNamespaces := []string{} + // use the map of namespace resources to find missing/deleted namespaces + c.namespacesWatching.Range(func(key interface{}, value interface{}) bool { + name := key.(string) + if _, hit := mapOfResourceNamespaces[name]; !hit { + missingNamespaces = append(missingNamespaces, name) + } + return true + }) + + for _, ns := range missingNamespaces { + mockResourceChan <- mockResourceListWithNamespace{list: MockResourceList{}, namespace: ns} + } + + for _, namespace := range newNamespaces { + var err error + err = c.mockResource.RegisterNamespace(namespace) + if err != nil { + errs <- errors.Wrapf(err, "there was an error registering the namespace to the mockResource") + continue + } + /* Setup namespaced watch for MockResource for new namespace */ + { + mocks, err := c.mockResource.List(namespace, clients.ListOpts{Ctx: opts.Ctx, Selector: opts.Selector}) + if err != nil { + errs <- errors.Wrapf(err, "initial new namespace MockResource list in namespace watch") + continue + } + mocksByNamespace.Store(namespace, mocks) + } + mockResourceNamespacesChan, mockResourceErrs, err := c.mockResource.Watch(namespace, clients.WatchOpts{Ctx: opts.Ctx, Selector: opts.Selector}) + if err != nil { + errs <- errors.Wrapf(err, "starting new namespace MockResource watch") + continue + } + + done.Add(1) + go func(namespace string) { + defer done.Done() + errutils.AggregateErrs(ctx, errs, mockResourceErrs, namespace+"-new-namespace-mocks") + }(namespace) + /* Watch for changes and update snapshot */ + go func(namespace string) { + defer func() { + c.namespacesWatching.Delete(namespace) + }() + c.namespacesWatching.Store(namespace, true) + for { + select { + case <-ctx.Done(): + return + case mockResourceList, ok := <-mockResourceNamespacesChan: + if !ok { + return + } + select { + case <-ctx.Done(): + return + case mockResourceChan <- mockResourceListWithNamespace{list: mockResourceList, namespace: namespace}: + } + } + } + }(namespace) + } + if len(newNamespaces) > 0 { + contextutils.LoggerFrom(ctx).Infof("registered the new namespace %v", newNamespaces) + } + c.updateNamespaces.Unlock() } } - }(namespace) + }() } /* Initialize snapshot for Mocks */ currentSnapshot.Mocks = initialMockResourceList.Sort() @@ -246,11 +474,13 @@ func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchO ) // merge lists by namespace - mocksByNamespace[namespace] = mockResourceNamespacedList.list + mocksByNamespace.Store(namespace, mockResourceNamespacedList.list) var mockResourceList MockResourceList - for _, mocks := range mocksByNamespace { + mocksByNamespace.Range(func(key interface{}, value interface{}) bool { + mocks := value.(MockResourceList) mockResourceList = append(mockResourceList, mocks...) - } + return true + }) currentSnapshot.Mocks = mockResourceList.Sort() } } diff --git a/test/mocks/v1alpha1/testing_snapshot_emitter_test.go b/test/mocks/v1alpha1/testing_snapshot_emitter_test.go index 4ac4c6719..cfbe39220 100644 --- a/test/mocks/v1alpha1/testing_snapshot_emitter_test.go +++ b/test/mocks/v1alpha1/testing_snapshot_emitter_test.go @@ -7,6 +7,7 @@ package v1alpha1 import ( "context" + "fmt" "os" "time" @@ -14,12 +15,19 @@ import ( . "github.com/onsi/gomega" "github.com/solo-io/go-utils/log" "github.com/solo-io/k8s-utils/kubeutils" + "github.com/solo-io/solo-kit/pkg/api/external/kubernetes/namespace" "github.com/solo-io/solo-kit/pkg/api/v1/clients" "github.com/solo-io/solo-kit/pkg/api/v1/clients/factory" kuberc "github.com/solo-io/solo-kit/pkg/api/v1/clients/kube" + "github.com/solo-io/solo-kit/pkg/api/v1/clients/kube/cache" + "github.com/solo-io/solo-kit/pkg/api/v1/resources" + "github.com/solo-io/solo-kit/pkg/api/v1/resources/core" "github.com/solo-io/solo-kit/pkg/utils/statusutils" "github.com/solo-io/solo-kit/test/helpers" + corev1 "k8s.io/api/core/v1" apiext "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" @@ -35,77 +43,181 @@ var _ = Describe("V1Alpha1Emitter", func() { log.Printf("This test creates kubernetes resources and is disabled by default. To enable, set RUN_KUBE_TESTS=1 in your env.") return } + + type metadataGetter interface { + GetMetadata() *core.Metadata + } + var ( - ctx context.Context - namespace1 string - namespace2 string - name1, name2 = "angela" + helpers.RandString(3), "bob" + helpers.RandString(3) - cfg *rest.Config - clientset *apiext.Clientset - kube kubernetes.Interface - emitter TestingEmitter - mockResourceClient MockResourceClient + ctx context.Context + namespace1, namespace2 string + namespace3, namespace4 string + namespace5, namespace6 string + name1, name2 = "angela" + helpers.RandString(3), "bob" + helpers.RandString(3) + name3, name4 = "susan" + helpers.RandString(3), "jim" + helpers.RandString(3) + name5 = "melisa" + helpers.RandString(3) + labels1 = map[string]string{"env": "test"} + labelExpression1 = "env in (test)" + cfg *rest.Config + clientset *apiext.Clientset + kube kubernetes.Interface + emitter TestingEmitter + mockResourceClient MockResourceClient + resourceNamespaceLister resources.ResourceNamespaceLister + kubeCache cache.KubeCoreCache + ) + const ( + TIME_BETWEEN_MESSAGES = 5 ) + NewMockResourceWithLabels := func(namespace, name string, labels map[string]string) *MockResource { + resource := NewMockResource(namespace, name) + resource.GetMetadata().Labels = labels + return resource + } - BeforeEach(func() { - err := os.Setenv(statusutils.PodNamespaceEnvName, "default") + createNamespaces := func(ctx context.Context, kube kubernetes.Interface, namespaces ...string) { + err := kubeutils.CreateNamespacesInParallel(ctx, kube, namespaces...) Expect(err).NotTo(HaveOccurred()) + } - ctx = context.Background() + createNamespaceWithLabel := func(ctx context.Context, kube kubernetes.Interface, namespace string, labels map[string]string) { + _, err := kube.CoreV1().Namespaces().Create(ctx, &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + Labels: labels, + }, + }, metav1.CreateOptions{}) + Expect(err).ToNot(HaveOccurred()) + } + + deleteNamespaces := func(ctx context.Context, kube kubernetes.Interface, namespaces ...string) { + err := kubeutils.DeleteNamespacesInParallelBlocking(ctx, kube, namespaces...) + Expect(err).NotTo(HaveOccurred()) + } + + // getNewNamespaces is used to generate new namespace names, so that we do not have to wait + // when deleting namespaces in runNamespacedSelectorsWithWatchNamespaces. Since + // runNamespacedSelectorsWithWatchNamespaces uses watchNamespaces set to namespace1 and + // namespace2, this will work. Because the emitter willl only be watching namespaces that are + // labeled. + getNewNamespaces := func() { + namespace3 = helpers.RandString(8) + namespace4 = helpers.RandString(8) + namespace5 = helpers.RandString(8) + namespace6 = helpers.RandString(8) + } + + // getNewNamespaces1and2 is used to generate new namespaces for namespace 1 and 2. + // used for the same reason as getNewNamespaces() above + getNewNamespaces1and2 := func() { namespace1 = helpers.RandString(8) namespace2 = helpers.RandString(8) - kube = helpers.MustKubeClient() - err = kubeutils.CreateNamespacesInParallel(ctx, kube, namespace1, namespace2) - Expect(err).NotTo(HaveOccurred()) - cfg, err = kubeutils.GetConfig("", "") - Expect(err).NotTo(HaveOccurred()) + } - clientset, err = apiext.NewForConfig(cfg) - Expect(err).NotTo(HaveOccurred()) - // MockResource Constructor - mockResourceClientFactory := &factory.KubeResourceClientFactory{ - Crd: MockResourceCrd, - Cfg: cfg, - SharedCache: kuberc.NewKubeCache(context.TODO()), + getMapOfNamespaceResources := func(getList func(string) ([]metadataGetter, error)) map[string][]string { + namespaces := []string{namespace1, namespace2, namespace3, namespace4, namespace5, namespace6} + namespaceResources := make(map[string][]string, len(namespaces)) + for _, ns := range namespaces { + list, _ := getList(ns) + for _, snap := range list { + snapMeta := snap.GetMetadata() + if _, hit := namespaceResources[snapMeta.Namespace]; hit { + namespaceResources[snap.GetMetadata().Namespace] = make([]string, 1) + } + namespaceResources[snapMeta.Namespace] = append(namespaceResources[snapMeta.Namespace], snapMeta.Name) + } } + return namespaceResources + } - err = helpers.AddAndRegisterCrd(ctx, MockResourceCrd, clientset) - Expect(err).NotTo(HaveOccurred()) + findNonMatchingResources := func(matchList, findList []metadataGetter) map[string][]string { + nonMatching := make(map[string][]string) + for _, snap := range matchList { + snapMeta := snap.GetMetadata() + matched := false + for _, pre := range findList { + preMeta := pre.GetMetadata() + if preMeta.Namespace == snapMeta.Namespace && preMeta.Name == snapMeta.Name { + matched = true + break + } + } + if !matched { + if _, hit := nonMatching[snapMeta.Namespace]; hit { + nonMatching[snap.GetMetadata().Namespace] = make([]string, 1) + } + nonMatching[snapMeta.Namespace] = append(nonMatching[snapMeta.Namespace], snapMeta.Name) + } + } + return nonMatching + } - mockResourceClient, err = NewMockResourceClient(ctx, mockResourceClientFactory) - Expect(err).NotTo(HaveOccurred()) - emitter = NewTestingEmitter(mockResourceClient) - }) - AfterEach(func() { - err := os.Unsetenv(statusutils.PodNamespaceEnvName) - Expect(err).NotTo(HaveOccurred()) + findMatchingResources := func(matchList, findList []metadataGetter) map[string][]string { + matching := make(map[string][]string) + for _, snap := range matchList { + snapMeta := snap.GetMetadata() + matched := false + for _, pre := range findList { + preMeta := pre.GetMetadata() + if preMeta.Namespace == snapMeta.Namespace && preMeta.Name == snapMeta.Name { + matched = true + break + } + } + if matched { + if _, hit := matching[snapMeta.Namespace]; hit { + matching[snap.GetMetadata().Namespace] = make([]string, 1) + } + matching[snapMeta.Namespace] = append(matching[snapMeta.Namespace], snapMeta.Name) + } + } + return matching + } - err = kubeutils.DeleteNamespacesInParallelBlocking(ctx, kube, namespace1, namespace2) - Expect(err).NotTo(HaveOccurred()) - }) + getMapOfResources := func(listOfResources []metadataGetter) map[string][]string { + resources := make(map[string][]string) + for _, snap := range listOfResources { + snapMeta := snap.GetMetadata() + if _, hit := resources[snapMeta.Namespace]; hit { + resources[snap.GetMetadata().Namespace] = make([]string, 1) + } + resources[snapMeta.Namespace] = append(resources[snapMeta.Namespace], snapMeta.Name) + } + return resources + } + convertMocksToMetadataGetter := func(rl MockResourceList) []metadataGetter { + listConv := make([]metadataGetter, len(rl)) + for i, r := range rl { + listConv[i] = r + } + return listConv + } - It("tracks snapshots on changes to any resource", func() { + runNamespacedSelectorsWithWatchNamespaces := func() { ctx := context.Background() err := emitter.Register() Expect(err).NotTo(HaveOccurred()) + // There is an error here in the code. snapshots, errs, err := emitter.Snapshots([]string{namespace1, namespace2}, clients.WatchOpts{ - Ctx: ctx, - RefreshRate: time.Second, + Ctx: ctx, + RefreshRate: time.Second, + ExpressionSelector: labelExpression1, }) Expect(err).NotTo(HaveOccurred()) var snap *TestingSnapshot + var previous *TestingSnapshot /* MockResource */ - assertSnapshotMocks := func(expectMocks MockResourceList, unexpectMocks MockResourceList) { drain: for { select { case snap = <-snapshots: + previous = snap for _, expected := range expectMocks { if _, err := snap.Mocks.Find(expected.GetMetadata().Ref().Strings()); err != nil { continue drain @@ -120,109 +232,627 @@ var _ = Describe("V1Alpha1Emitter", func() { case err := <-errs: Expect(err).NotTo(HaveOccurred()) case <-time.After(time.Second * 10): - nsList1, _ := mockResourceClient.List(namespace1, clients.ListOpts{}) - nsList2, _ := mockResourceClient.List(namespace2, clients.ListOpts{}) - combined := append(nsList1, nsList2...) - Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + var expectedResources map[string][]string + var unexpectedResource map[string][]string + + if previous != nil { + expectedResources = findNonMatchingResources(convertMocksToMetadataGetter(expectMocks), convertMocksToMetadataGetter(previous.Mocks)) + unexpectedResource = findMatchingResources(convertMocksToMetadataGetter(unexpectMocks), convertMocksToMetadataGetter(previous.Mocks)) + } else { + expectedResources = getMapOfResources(convertMocksToMetadataGetter(expectMocks)) + unexpectedResource = getMapOfResources(convertMocksToMetadataGetter(unexpectMocks)) + } + getList := func(ns string) ([]metadataGetter, error) { + l, err := mockResourceClient.List(ns, clients.ListOpts{}) + return convertMocksToMetadataGetter(l), err + } + namespaceResources := getMapOfNamespaceResources(getList) + Fail(fmt.Sprintf("expected final snapshot before 10 seconds. expected \nExpected:\n%#v\n\nUnexpected:\n%#v\n\nnamespaces:\n%#v", expectedResources, unexpectedResource, namespaceResources)) } } } + mockResource1a, err := mockResourceClient.Write(NewMockResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) mockResource1b, err := mockResourceClient.Write(NewMockResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + mockResourceWatched := MockResourceList{mockResource1a, mockResource1b} + assertSnapshotMocks(mockResourceWatched, nil) - assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b}, nil) - mockResource2a, err := mockResourceClient.Write(NewMockResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + mockResource3a, err := mockResourceClient.Write(NewMockResourceWithLabels(namespace1, name3, labels1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - mockResource2b, err := mockResourceClient.Write(NewMockResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + mockResource3b, err := mockResourceClient.Write(NewMockResourceWithLabels(namespace2, name3, labels1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + mockResourceWatched = append(mockResourceWatched, MockResourceList{mockResource3a, mockResource3b}...) + assertSnapshotMocks(mockResourceWatched, nil) + + createNamespaceWithLabel(ctx, kube, namespace3, labels1) + createNamespaces(ctx, kube, namespace4) - assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}, nil) + mockResource4a, err := mockResourceClient.Write(NewMockResource(namespace3, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResource4b, err := mockResourceClient.Write(NewMockResource(namespace4, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResourceWatched = append(mockResourceWatched, mockResource4a) + mockResourceNotWatched := MockResourceList{mockResource4b} + assertSnapshotMocks(mockResourceWatched, mockResourceNotWatched) - err = mockResourceClient.Delete(mockResource2a.GetMetadata().Namespace, mockResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + mockResource5a, err := mockResourceClient.Write(NewMockResourceWithLabels(namespace3, name2, labels1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - err = mockResourceClient.Delete(mockResource2b.GetMetadata().Namespace, mockResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + mockResource5b, err := mockResourceClient.Write(NewMockResourceWithLabels(namespace4, name2, labels1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + mockResourceWatched = append(mockResourceWatched, mockResource5a) + mockResourceNotWatched = append(mockResourceNotWatched, mockResource5b) + assertSnapshotMocks(mockResourceWatched, mockResourceNotWatched) - assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b}, MockResourceList{mockResource2a, mockResource2b}) + for _, r := range mockResourceNotWatched { + err = mockResourceClient.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + } err = mockResourceClient.Delete(mockResource1a.GetMetadata().Namespace, mockResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = mockResourceClient.Delete(mockResource1b.GetMetadata().Namespace, mockResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + mockResourceNotWatched = append(mockResourceNotWatched, MockResourceList{mockResource1a, mockResource1b}...) + mockResourceWatched = MockResourceList{mockResource3a, mockResource3b, mockResource4a, mockResource5a} + assertSnapshotMocks(mockResourceWatched, mockResourceNotWatched) - assertSnapshotMocks(nil, MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}) - }) + err = mockResourceClient.Delete(mockResource3a.GetMetadata().Namespace, mockResource3a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = mockResourceClient.Delete(mockResource3b.GetMetadata().Namespace, mockResource3b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResourceNotWatched = append(mockResourceNotWatched, MockResourceList{mockResource3a, mockResource3b}...) + mockResourceWatched = MockResourceList{mockResource4a, mockResource5a} + assertSnapshotMocks(mockResourceWatched, mockResourceNotWatched) - It("tracks snapshots on changes to any resource using AllNamespace", func() { - ctx := context.Background() - err := emitter.Register() + err = mockResourceClient.Delete(mockResource4a.GetMetadata().Namespace, mockResource4a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = mockResourceClient.Delete(mockResource5a.GetMetadata().Namespace, mockResource5a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + mockResourceNotWatched = append(mockResourceNotWatched, MockResourceList{mockResource5a, mockResource5b}...) + assertSnapshotMocks(nil, mockResourceNotWatched) - snapshots, errs, err := emitter.Snapshots([]string{""}, clients.WatchOpts{ - Ctx: ctx, - RefreshRate: time.Second, - }) + // clean up environment + deleteNamespaces(ctx, kube, namespace3, namespace4) + getNewNamespaces() + } + + BeforeEach(func() { + err := os.Setenv(statusutils.PodNamespaceEnvName, "default") Expect(err).NotTo(HaveOccurred()) - var snap *TestingSnapshot + ctx = context.Background() + namespace1 = helpers.RandString(8) + namespace2 = helpers.RandString(8) + namespace3 = helpers.RandString(8) + namespace4 = helpers.RandString(8) + namespace5 = helpers.RandString(8) + namespace6 = helpers.RandString(8) - /* - MockResource - */ + kube = helpers.MustKubeClient() + kubeCache, err = cache.NewKubeCoreCache(context.TODO(), kube) + Expect(err).NotTo(HaveOccurred()) + resourceNamespaceLister = namespace.NewKubeClientCacheResourceNamespaceLister(kube, kubeCache) - assertSnapshotMocks := func(expectMocks MockResourceList, unexpectMocks MockResourceList) { - drain: - for { - select { - case snap = <-snapshots: - for _, expected := range expectMocks { - if _, err := snap.Mocks.Find(expected.GetMetadata().Ref().Strings()); err != nil { - continue drain + createNamespaces(ctx, kube, namespace1, namespace2) + + cfg, err = kubeutils.GetConfig("", "") + Expect(err).NotTo(HaveOccurred()) + + clientset, err = apiext.NewForConfig(cfg) + Expect(err).NotTo(HaveOccurred()) + // MockResource Constructor + mockResourceClientFactory := &factory.KubeResourceClientFactory{ + Crd: MockResourceCrd, + Cfg: cfg, + SharedCache: kuberc.NewKubeCache(context.TODO()), + } + + err = helpers.AddAndRegisterCrd(ctx, MockResourceCrd, clientset) + Expect(err).NotTo(HaveOccurred()) + + mockResourceClient, err = NewMockResourceClient(ctx, mockResourceClientFactory) + Expect(err).NotTo(HaveOccurred()) + emitter = NewTestingEmitter(mockResourceClient, resourceNamespaceLister) + }) + AfterEach(func() { + err := os.Unsetenv(statusutils.PodNamespaceEnvName) + Expect(err).NotTo(HaveOccurred()) + + kubeutils.DeleteNamespacesInParallelBlocking(ctx, kube, namespace1, namespace2) + }) + + Context("Tracking watched namespaces", func() { + It("tracks snapshots on changes to any resource", func() { + ctx := context.Background() + err := emitter.Register() + Expect(err).NotTo(HaveOccurred()) + + snapshots, errs, err := emitter.Snapshots([]string{namespace1, namespace2}, clients.WatchOpts{ + Ctx: ctx, + RefreshRate: time.Second, + }) + Expect(err).NotTo(HaveOccurred()) + + var snap *TestingSnapshot + + /* + MockResource + */ + + assertSnapshotMocks := func(expectMocks MockResourceList, unexpectMocks MockResourceList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectMocks { + if _, err := snap.Mocks.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } } + for _, unexpected := range unexpectMocks { + if _, err := snap.Mocks.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := mockResourceClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := mockResourceClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) } - for _, unexpected := range unexpectMocks { - if _, err := snap.Mocks.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { - continue drain + } + } + mockResource1a, err := mockResourceClient.Write(NewMockResource(namespace1, name5), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResource1b, err := mockResourceClient.Write(NewMockResource(namespace2, name5), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b}, nil) + mockResource2a, err := mockResourceClient.Write(NewMockResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResource2b, err := mockResourceClient.Write(NewMockResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}, nil) + + err = mockResourceClient.Delete(mockResource2a.GetMetadata().Namespace, mockResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = mockResourceClient.Delete(mockResource2b.GetMetadata().Namespace, mockResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b}, MockResourceList{mockResource2a, mockResource2b}) + + err = mockResourceClient.Delete(mockResource1a.GetMetadata().Namespace, mockResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = mockResourceClient.Delete(mockResource1b.GetMetadata().Namespace, mockResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotMocks(nil, MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}) + }) + + It("should be able to track all resources that are on labeled namespaces", func() { + runNamespacedSelectorsWithWatchNamespaces() + }) + }) + + Context("Tracking empty watched namespaces", func() { + It("tracks snapshots on changes to any resource using AllNamespace", func() { + ctx := context.Background() + err := emitter.Register() + Expect(err).NotTo(HaveOccurred()) + + snapshots, errs, err := emitter.Snapshots([]string{""}, clients.WatchOpts{ + Ctx: ctx, + RefreshRate: time.Second, + }) + Expect(err).NotTo(HaveOccurred()) + + var snap *TestingSnapshot + + /* + MockResource + */ + + assertSnapshotMocks := func(expectMocks MockResourceList, unexpectMocks MockResourceList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectMocks { + if _, err := snap.Mocks.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } } + for _, unexpected := range unexpectMocks { + if _, err := snap.Mocks.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := mockResourceClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := mockResourceClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) } - break drain - case err := <-errs: - Expect(err).NotTo(HaveOccurred()) - case <-time.After(time.Second * 10): - nsList1, _ := mockResourceClient.List(namespace1, clients.ListOpts{}) - nsList2, _ := mockResourceClient.List(namespace2, clients.ListOpts{}) - combined := append(nsList1, nsList2...) - Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) } } - } - mockResource1a, err := mockResourceClient.Write(NewMockResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - mockResource1b, err := mockResourceClient.Write(NewMockResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b}, nil) - mockResource2a, err := mockResourceClient.Write(NewMockResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - mockResource2b, err := mockResourceClient.Write(NewMockResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) + mockResource1a, err := mockResourceClient.Write(NewMockResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResource1b, err := mockResourceClient.Write(NewMockResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b}, nil) + + mockResource2a, err := mockResourceClient.Write(NewMockResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResource2b, err := mockResourceClient.Write(NewMockResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}, nil) + + err = mockResourceClient.Delete(mockResource2a.GetMetadata().Namespace, mockResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = mockResourceClient.Delete(mockResource2b.GetMetadata().Namespace, mockResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b}, MockResourceList{mockResource2a, mockResource2b}) + + err = mockResourceClient.Delete(mockResource1a.GetMetadata().Namespace, mockResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = mockResourceClient.Delete(mockResource1b.GetMetadata().Namespace, mockResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotMocks(nil, MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}) + }) - assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}, nil) + It("should be able to track resources only made with the matching labels", func() { + ctx := context.Background() + err := emitter.Register() + Expect(err).NotTo(HaveOccurred()) + + snapshots, errs, err := emitter.Snapshots([]string{""}, clients.WatchOpts{ + Ctx: ctx, + RefreshRate: time.Second, + ExpressionSelector: labelExpression1, + }) + Expect(err).NotTo(HaveOccurred()) + + var snap *TestingSnapshot + var previous *TestingSnapshot + + /* + MockResource + */ + + assertSnapshotMocks := func(expectMocks MockResourceList, unexpectMocks MockResourceList) { + drain: + for { + select { + case snap = <-snapshots: + previous = snap + for _, expected := range expectMocks { + if _, err := snap.Mocks.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectMocks { + if _, err := snap.Mocks.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + + var expectedResources map[string][]string + var unexpectedResource map[string][]string + + if previous != nil { + expectedResources = findNonMatchingResources(convertMocksToMetadataGetter(expectMocks), convertMocksToMetadataGetter(previous.Mocks)) + unexpectedResource = findMatchingResources(convertMocksToMetadataGetter(unexpectMocks), convertMocksToMetadataGetter(previous.Mocks)) + } else { + expectedResources = getMapOfResources(convertMocksToMetadataGetter(expectMocks)) + unexpectedResource = getMapOfResources(convertMocksToMetadataGetter(unexpectMocks)) + } + getList := func(ns string) ([]metadataGetter, error) { + l, err := mockResourceClient.List(ns, clients.ListOpts{}) + return convertMocksToMetadataGetter(l), err + } + namespaceResources := getMapOfNamespaceResources(getList) + Fail(fmt.Sprintf("expected final snapshot before 10 seconds. expected \nExpected:\n%#v\n\nUnexpected:\n%#v\n\nnamespaces:\n%#v", expectedResources, unexpectedResource, namespaceResources)) + } + } + } - err = mockResourceClient.Delete(mockResource2a.GetMetadata().Namespace, mockResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - err = mockResourceClient.Delete(mockResource2b.GetMetadata().Namespace, mockResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) + mockResource1a, err := mockResourceClient.Write(NewMockResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResource1b, err := mockResourceClient.Write(NewMockResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResourceNotWatched := MockResourceList{mockResource1a, mockResource1b} + + createNamespaceWithLabel(ctx, kube, namespace3, labels1) + createNamespaceWithLabel(ctx, kube, namespace4, labels1) + + mockResource2a, err := mockResourceClient.Write(NewMockResource(namespace3, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResource2b, err := mockResourceClient.Write(NewMockResource(namespace4, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResourceWatched := MockResourceList{mockResource2a, mockResource2b} + assertSnapshotMocks(mockResourceWatched, mockResourceNotWatched) + + createNamespaces(ctx, kube, namespace5) + createNamespaceWithLabel(ctx, kube, namespace6, labels1) + + mockResource5a, err := mockResourceClient.Write(NewMockResource(namespace5, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResource5b, err := mockResourceClient.Write(NewMockResource(namespace6, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResourceNotWatched = append(mockResourceNotWatched, mockResource5a) + mockResourceWatched = append(mockResourceWatched, mockResource5b) + assertSnapshotMocks(mockResourceWatched, mockResourceNotWatched) + + mockResource7a, err := mockResourceClient.Write(NewMockResource(namespace5, name4), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResource7b, err := mockResourceClient.Write(NewMockResource(namespace6, name4), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResourceNotWatched = append(mockResourceNotWatched, mockResource7a) + mockResourceWatched = append(mockResourceWatched, mockResource7b) + assertSnapshotMocks(mockResourceWatched, mockResourceNotWatched) + + for _, r := range mockResourceNotWatched { + err = mockResourceClient.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + } - assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b}, MockResourceList{mockResource2a, mockResource2b}) + for _, r := range mockResourceWatched { + err = mockResourceClient.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResourceNotWatched = append(mockResourceNotWatched, r) + } + assertSnapshotMocks(nil, mockResourceNotWatched) - err = mockResourceClient.Delete(mockResource1a.GetMetadata().Namespace, mockResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - err = mockResourceClient.Delete(mockResource1b.GetMetadata().Namespace, mockResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) + // clean up environment + deleteNamespaces(ctx, kube, namespace3, namespace4, namespace5, namespace6) + getNewNamespaces() + }) + }) + + Context("Tracking resources on namespaces that are deleted", func() { + It("Should not contain resources from a deleted namespace", func() { + ctx := context.Background() + err := emitter.Register() + Expect(err).NotTo(HaveOccurred()) + + snapshots, errs, err := emitter.Snapshots([]string{""}, clients.WatchOpts{ + Ctx: ctx, + RefreshRate: time.Second, + }) + Expect(err).NotTo(HaveOccurred()) + + var snap *TestingSnapshot + + /* + MockResource + */ + assertSnapshotMocks := func(expectMocks MockResourceList, unexpectMocks MockResourceList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectMocks { + if _, err := snap.Mocks.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectMocks { + if _, err := snap.Mocks.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := mockResourceClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := mockResourceClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } + + mockResource1a, err := mockResourceClient.Write(NewMockResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResource1b, err := mockResourceClient.Write(NewMockResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResourceWatched := MockResourceList{mockResource1a, mockResource1b} + assertSnapshotMocks(mockResourceWatched, nil) + err = mockResourceClient.Delete(mockResource1a.GetMetadata().Namespace, mockResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = mockResourceClient.Delete(mockResource1b.GetMetadata().Namespace, mockResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + mockResourceNotWatched := MockResourceList{mockResource1a, mockResource1b} + assertSnapshotMocks(nil, mockResourceNotWatched) + + deleteNamespaces(ctx, kube, namespace1, namespace2) + + getNewNamespaces1and2() + createNamespaces(ctx, kube, namespace1, namespace2) + }) + + It("Should not contain resources from a deleted namespace, that is filtered", func() { + ctx := context.Background() + err := emitter.Register() + Expect(err).NotTo(HaveOccurred()) + + snapshots, errs, err := emitter.Snapshots([]string{""}, clients.WatchOpts{ + Ctx: ctx, + RefreshRate: time.Second, + ExpressionSelector: labelExpression1, + }) + Expect(err).NotTo(HaveOccurred()) + + var snap *TestingSnapshot + + /* + MockResource + */ + + assertSnapshotMocks := func(expectMocks MockResourceList, unexpectMocks MockResourceList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectMocks { + if _, err := snap.Mocks.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectMocks { + if _, err := snap.Mocks.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := mockResourceClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := mockResourceClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } + + // create namespaces + createNamespaceWithLabel(ctx, kube, namespace3, labels1) + createNamespaceWithLabel(ctx, kube, namespace4, labels1) - assertSnapshotMocks(nil, MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}) + mockResource2a, err := mockResourceClient.Write(NewMockResource(namespace3, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResource2b, err := mockResourceClient.Write(NewMockResource(namespace4, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResourceNotWatched := MockResourceList{} + mockResourceWatched := MockResourceList{mockResource2a, mockResource2b} + assertSnapshotMocks(mockResourceWatched, mockResourceNotWatched) + + deleteNamespaces(ctx, kube, namespace3) + + mockResourceWatched = MockResourceList{mockResource2b} + mockResourceNotWatched = append(mockResourceNotWatched, mockResource2a) + assertSnapshotMocks(mockResourceWatched, mockResourceNotWatched) + + for _, r := range mockResourceWatched { + err = mockResourceClient.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResourceNotWatched = append(mockResourceNotWatched, r) + } + assertSnapshotMocks(nil, mockResourceNotWatched) + + deleteNamespaces(ctx, kube, namespace4) + getNewNamespaces() + }) + + It("should be able to return a resource from a deleted namespace, after the namespace is re-created", func() { + ctx := context.Background() + err := emitter.Register() + Expect(err).NotTo(HaveOccurred()) + + snapshots, errs, err := emitter.Snapshots([]string{""}, clients.WatchOpts{ + Ctx: ctx, + RefreshRate: time.Second, + ExpressionSelector: labelExpression1, + }) + Expect(err).NotTo(HaveOccurred()) + + var snap *TestingSnapshot + var previous *TestingSnapshot + + /* + MockResource + */ + assertSnapshotMocks := func(expectMocks MockResourceList, unexpectMocks MockResourceList) { + drain: + for { + select { + case snap = <-snapshots: + previous = snap + for _, expected := range expectMocks { + if _, err := snap.Mocks.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectMocks { + if _, err := snap.Mocks.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + var expectedResources map[string][]string + var unexpectedResource map[string][]string + + if previous != nil { + expectedResources = findNonMatchingResources(convertMocksToMetadataGetter(expectMocks), convertMocksToMetadataGetter(previous.Mocks)) + unexpectedResource = findMatchingResources(convertMocksToMetadataGetter(unexpectMocks), convertMocksToMetadataGetter(previous.Mocks)) + } else { + expectedResources = getMapOfResources(convertMocksToMetadataGetter(expectMocks)) + unexpectedResource = getMapOfResources(convertMocksToMetadataGetter(unexpectMocks)) + } + getList := func(ns string) ([]metadataGetter, error) { + l, err := mockResourceClient.List(ns, clients.ListOpts{}) + return convertMocksToMetadataGetter(l), err + } + namespaceResources := getMapOfNamespaceResources(getList) + Fail(fmt.Sprintf("expected final snapshot before 10 seconds. expected \nExpected:\n%#v\n\nUnexpected:\n%#v\n\nnamespaces:\n%#v", expectedResources, unexpectedResource, namespaceResources)) + } + } + } + + createNamespaceWithLabel(ctx, kube, namespace3, labels1) + + mockResource1a, err := mockResourceClient.Write(NewMockResource(namespace3, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotMocks(MockResourceList{mockResource1a}, nil) + + deleteNamespaces(ctx, kube, namespace3) + Eventually(func() bool { + _, err = kube.CoreV1().Namespaces().Get(ctx, namespace3, metav1.GetOptions{}) + return apierrors.IsNotFound(err) + }, 15*time.Second, 1*time.Second).Should(BeTrue()) + createNamespaceWithLabel(ctx, kube, namespace3, labels1) + + mockResource2a, err := mockResourceClient.Write(NewMockResource(namespace3, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotMocks(MockResourceList{mockResource2a}, MockResourceList{mockResource1a}) + + deleteNamespaces(ctx, kube, namespace3) + Eventually(func() bool { + _, err = kube.CoreV1().Namespaces().Get(ctx, namespace3, metav1.GetOptions{}) + return apierrors.IsNotFound(err) + }, 15*time.Second, 1*time.Second).Should(BeTrue()) + }) }) + + Context("use different resource namespace listers", func() { + BeforeEach(func() { + resourceNamespaceLister = namespace.NewKubeClientResourceNamespaceLister(kube) + emitter = NewTestingEmitter(mockResourceClient, resourceNamespaceLister) + }) + + It("Should work with the Kube Client Namespace Lister", func() { + runNamespacedSelectorsWithWatchNamespaces() + }) + }) + }) diff --git a/test/mocks/v2alpha1/frequently_changing_annotations_resource_client.sk.go b/test/mocks/v2alpha1/frequently_changing_annotations_resource_client.sk.go index 70c60a40d..0aa490399 100644 --- a/test/mocks/v2alpha1/frequently_changing_annotations_resource_client.sk.go +++ b/test/mocks/v2alpha1/frequently_changing_annotations_resource_client.sk.go @@ -19,6 +19,7 @@ type FrequentlyChangingAnnotationsResourceWatcher interface { type FrequentlyChangingAnnotationsResourceClient interface { BaseClient() clients.ResourceClient Register() error + RegisterNamespace(namespace string) error Read(namespace, name string, opts clients.ReadOpts) (*FrequentlyChangingAnnotationsResource, error) Write(resource *FrequentlyChangingAnnotationsResource, opts clients.WriteOpts) (*FrequentlyChangingAnnotationsResource, error) Delete(namespace, name string, opts clients.DeleteOpts) error @@ -59,6 +60,10 @@ func (client *frequentlyChangingAnnotationsResourceClient) Register() error { return client.rc.Register() } +func (client *frequentlyChangingAnnotationsResourceClient) RegisterNamespace(namespace string) error { + return client.rc.RegisterNamespace(namespace) +} + func (client *frequentlyChangingAnnotationsResourceClient) Read(namespace, name string, opts clients.ReadOpts) (*FrequentlyChangingAnnotationsResource, error) { opts = opts.WithDefaults() diff --git a/test/mocks/v2alpha1/mock_resource_client.sk.go b/test/mocks/v2alpha1/mock_resource_client.sk.go index 05b2cac3e..993b4ba26 100644 --- a/test/mocks/v2alpha1/mock_resource_client.sk.go +++ b/test/mocks/v2alpha1/mock_resource_client.sk.go @@ -19,6 +19,7 @@ type MockResourceWatcher interface { type MockResourceClient interface { BaseClient() clients.ResourceClient Register() error + RegisterNamespace(namespace string) error Read(namespace, name string, opts clients.ReadOpts) (*MockResource, error) Write(resource *MockResource, opts clients.WriteOpts) (*MockResource, error) Delete(namespace, name string, opts clients.DeleteOpts) error @@ -59,6 +60,10 @@ func (client *mockResourceClient) Register() error { return client.rc.Register() } +func (client *mockResourceClient) RegisterNamespace(namespace string) error { + return client.rc.RegisterNamespace(namespace) +} + func (client *mockResourceClient) Read(namespace, name string, opts clients.ReadOpts) (*MockResource, error) { opts = opts.WithDefaults() diff --git a/test/mocks/v2alpha1/mock_resource_client_test.go b/test/mocks/v2alpha1/mock_resource_client_test.go index c0f1fd56e..eba59bcc5 100644 --- a/test/mocks/v2alpha1/mock_resource_client_test.go +++ b/test/mocks/v2alpha1/mock_resource_client_test.go @@ -1,6 +1,5 @@ // Code generated by solo-kit. DO NOT EDIT. -//go:build solokit // +build solokit package v2alpha1 diff --git a/test/mocks/v2alpha1/testing_event_loop_test.go b/test/mocks/v2alpha1/testing_event_loop_test.go index 3985d62d8..867460177 100644 --- a/test/mocks/v2alpha1/testing_event_loop_test.go +++ b/test/mocks/v2alpha1/testing_event_loop_test.go @@ -1,6 +1,5 @@ // Code generated by solo-kit. DO NOT EDIT. -//go:build solokit // +build solokit package v2alpha1 @@ -14,9 +13,12 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + skNamespace "github.com/solo-io/solo-kit/pkg/api/external/kubernetes/namespace" "github.com/solo-io/solo-kit/pkg/api/v1/clients" "github.com/solo-io/solo-kit/pkg/api/v1/clients/factory" + "github.com/solo-io/solo-kit/pkg/api/v1/clients/kube/cache" "github.com/solo-io/solo-kit/pkg/api/v1/clients/memory" + "github.com/solo-io/solo-kit/test/helpers" ) var _ = Describe("TestingEventLoop", func() { @@ -30,6 +32,11 @@ var _ = Describe("TestingEventLoop", func() { BeforeEach(func() { ctx = context.Background() + kube := helpers.MustKubeClient() + kubeCache, err := cache.NewKubeCoreCache(context.TODO(), kube) + Expect(err).NotTo(HaveOccurred()) + resourceNamespaceLister := skNamespace.NewKubeClientCacheResourceNamespaceLister(kube, kubeCache) + mockResourceClientFactory := &factory.MemoryResourceClientFactory{ Cache: memory.NewInMemoryResourceCache(), } @@ -48,7 +55,7 @@ var _ = Describe("TestingEventLoop", func() { fakeResourceClient, err := testing_solo_io.NewFakeResourceClient(ctx, fakeResourceClientFactory) Expect(err).NotTo(HaveOccurred()) - emitter = NewTestingEmitter(mockResourceClient, frequentlyChangingAnnotationsResourceClient, fakeResourceClient) + emitter = NewTestingEmitter(mockResourceClient, frequentlyChangingAnnotationsResourceClient, fakeResourceClient, resourceNamespaceLister) }) It("runs sync function on a new snapshot", func() { _, err = emitter.MockResource().Write(NewMockResource(namespace, "jerry"), clients.WriteOpts{}) diff --git a/test/mocks/v2alpha1/testing_snapshot_emitter.sk.go b/test/mocks/v2alpha1/testing_snapshot_emitter.sk.go index fab0272bf..cf86b012b 100644 --- a/test/mocks/v2alpha1/testing_snapshot_emitter.sk.go +++ b/test/mocks/v2alpha1/testing_snapshot_emitter.sk.go @@ -14,6 +14,7 @@ import ( "go.uber.org/zap" "github.com/solo-io/solo-kit/pkg/api/v1/clients" + "github.com/solo-io/solo-kit/pkg/api/v1/resources" "github.com/solo-io/solo-kit/pkg/errors" skstats "github.com/solo-io/solo-kit/pkg/stats" @@ -87,15 +88,16 @@ type TestingEmitter interface { FakeResource() testing_solo_io.FakeResourceClient } -func NewTestingEmitter(mockResourceClient MockResourceClient, frequentlyChangingAnnotationsResourceClient FrequentlyChangingAnnotationsResourceClient, fakeResourceClient testing_solo_io.FakeResourceClient) TestingEmitter { - return NewTestingEmitterWithEmit(mockResourceClient, frequentlyChangingAnnotationsResourceClient, fakeResourceClient, make(chan struct{})) +func NewTestingEmitter(mockResourceClient MockResourceClient, frequentlyChangingAnnotationsResourceClient FrequentlyChangingAnnotationsResourceClient, fakeResourceClient testing_solo_io.FakeResourceClient, resourceNamespaceLister resources.ResourceNamespaceLister) TestingEmitter { + return NewTestingEmitterWithEmit(mockResourceClient, frequentlyChangingAnnotationsResourceClient, fakeResourceClient, resourceNamespaceLister, make(chan struct{})) } -func NewTestingEmitterWithEmit(mockResourceClient MockResourceClient, frequentlyChangingAnnotationsResourceClient FrequentlyChangingAnnotationsResourceClient, fakeResourceClient testing_solo_io.FakeResourceClient, emit <-chan struct{}) TestingEmitter { +func NewTestingEmitterWithEmit(mockResourceClient MockResourceClient, frequentlyChangingAnnotationsResourceClient FrequentlyChangingAnnotationsResourceClient, fakeResourceClient testing_solo_io.FakeResourceClient, resourceNamespaceLister resources.ResourceNamespaceLister, emit <-chan struct{}) TestingEmitter { return &testingEmitter{ mockResource: mockResourceClient, frequentlyChangingAnnotationsResource: frequentlyChangingAnnotationsResourceClient, fakeResource: fakeResourceClient, + resourceNamespaceLister: resourceNamespaceLister, forceEmit: emit, } } @@ -105,6 +107,14 @@ type testingEmitter struct { mockResource MockResourceClient frequentlyChangingAnnotationsResource FrequentlyChangingAnnotationsResourceClient fakeResource testing_solo_io.FakeResourceClient + // resourceNamespaceLister is used to watch for new namespaces when they are created. + // It is used when Expression Selector is in the Watch Opts set in Snapshot(). + resourceNamespaceLister resources.ResourceNamespaceLister + // namespacesWatching is the set of namespaces that we are watching. This is helpful + // when Expression Selector is set on the Watch Opts in Snapshot(). + namespacesWatching sync.Map + // updateNamespaces is used to perform locks and unlocks when watches on namespaces are being updated/created + updateNamespaces sync.Mutex } func (c *testingEmitter) Register() error { @@ -132,6 +142,14 @@ func (c *testingEmitter) FakeResource() testing_solo_io.FakeResourceClient { return c.fakeResource } +// Snapshots will return a channel that can be used to receive snapshots of the +// state of the resources it is watching +// when watching resources, you can set the watchNamespaces, and you can set the +// ExpressionSelector of the WatchOpts. Setting watchNamespaces will watch for all resources +// that are in the specified namespaces. In addition if ExpressionSelector of the WatchOpts is +// set, then all namespaces that meet the label criteria of the ExpressionSelector will +// also be watched. If Expression Selector is set and watched namespaces is set to [""], then it +// will only watch namespaces that meet the label expression selector criteria. func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchOpts) (<-chan *TestingSnapshot, <-chan error, error) { if len(watchNamespaces) == 0 { @@ -146,15 +164,20 @@ func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchO } errs := make(chan error) + hasWatchedNamespaces := len(watchNamespaces) > 1 || (len(watchNamespaces) == 1 && watchNamespaces[0] != "") + watchingLabeledNamespaces := !(opts.ExpressionSelector == "") var done sync.WaitGroup ctx := opts.Ctx + + // setting up the options for both listing and watching resources in namespaces + watchedNamespacesListOptions := clients.ListOpts{Ctx: opts.Ctx, Selector: opts.Selector} + watchedNamespacesWatchOptions := clients.WatchOpts{Ctx: opts.Ctx, Selector: opts.Selector} /* Create channel for MockResource */ type mockResourceListWithNamespace struct { list MockResourceList namespace string } mockResourceChan := make(chan mockResourceListWithNamespace) - var initialMockResourceList MockResourceList /* Create channel for FrequentlyChangingAnnotationsResource */ type frequentlyChangingAnnotationsResourceListWithNamespace struct { @@ -162,7 +185,6 @@ func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchO namespace string } frequentlyChangingAnnotationsResourceChan := make(chan frequentlyChangingAnnotationsResourceListWithNamespace) - var initialFrequentlyChangingAnnotationsResourceList FrequentlyChangingAnnotationsResourceList /* Create channel for FakeResource */ type fakeResourceListWithNamespace struct { @@ -170,109 +192,447 @@ func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchO namespace string } fakeResourceChan := make(chan fakeResourceListWithNamespace) - var initialFakeResourceList testing_solo_io.FakeResourceList currentSnapshot := TestingSnapshot{} - mocksByNamespace := make(map[string]MockResourceList) - fcarsByNamespace := make(map[string]FrequentlyChangingAnnotationsResourceList) - fakesByNamespace := make(map[string]testing_solo_io.FakeResourceList) - - for _, namespace := range watchNamespaces { - /* Setup namespaced watch for MockResource */ - { - mocks, err := c.mockResource.List(namespace, clients.ListOpts{Ctx: opts.Ctx, Selector: opts.Selector}) + mocksByNamespace := sync.Map{} + fcarsByNamespace := sync.Map{} + fakesByNamespace := sync.Map{} + if hasWatchedNamespaces || !watchingLabeledNamespaces { + // then watch all resources on watch Namespaces + + // watched namespaces + for _, namespace := range watchNamespaces { + /* Setup namespaced watch for MockResource */ + { + mocks, err := c.mockResource.List(namespace, watchedNamespacesListOptions) + if err != nil { + return nil, nil, errors.Wrapf(err, "initial MockResource list") + } + initialMockResourceList = append(initialMockResourceList, mocks...) + mocksByNamespace.Store(namespace, mocks) + } + mockResourceNamespacesChan, mockResourceErrs, err := c.mockResource.Watch(namespace, watchedNamespacesWatchOptions) + if err != nil { + return nil, nil, errors.Wrapf(err, "starting MockResource watch") + } + + done.Add(1) + go func(namespace string) { + defer done.Done() + errutils.AggregateErrs(ctx, errs, mockResourceErrs, namespace+"-mocks") + }(namespace) + /* Setup namespaced watch for FrequentlyChangingAnnotationsResource */ + { + fcars, err := c.frequentlyChangingAnnotationsResource.List(namespace, watchedNamespacesListOptions) + if err != nil { + return nil, nil, errors.Wrapf(err, "initial FrequentlyChangingAnnotationsResource list") + } + initialFrequentlyChangingAnnotationsResourceList = append(initialFrequentlyChangingAnnotationsResourceList, fcars...) + fcarsByNamespace.Store(namespace, fcars) + } + frequentlyChangingAnnotationsResourceNamespacesChan, frequentlyChangingAnnotationsResourceErrs, err := c.frequentlyChangingAnnotationsResource.Watch(namespace, watchedNamespacesWatchOptions) + if err != nil { + return nil, nil, errors.Wrapf(err, "starting FrequentlyChangingAnnotationsResource watch") + } + + done.Add(1) + go func(namespace string) { + defer done.Done() + errutils.AggregateErrs(ctx, errs, frequentlyChangingAnnotationsResourceErrs, namespace+"-fcars") + }(namespace) + /* Setup namespaced watch for FakeResource */ + { + fakes, err := c.fakeResource.List(namespace, watchedNamespacesListOptions) + if err != nil { + return nil, nil, errors.Wrapf(err, "initial FakeResource list") + } + initialFakeResourceList = append(initialFakeResourceList, fakes...) + fakesByNamespace.Store(namespace, fakes) + } + fakeResourceNamespacesChan, fakeResourceErrs, err := c.fakeResource.Watch(namespace, watchedNamespacesWatchOptions) if err != nil { - return nil, nil, errors.Wrapf(err, "initial MockResource list") + return nil, nil, errors.Wrapf(err, "starting FakeResource watch") } - initialMockResourceList = append(initialMockResourceList, mocks...) - mocksByNamespace[namespace] = mocks + + done.Add(1) + go func(namespace string) { + defer done.Done() + errutils.AggregateErrs(ctx, errs, fakeResourceErrs, namespace+"-fakes") + }(namespace) + /* Watch for changes and update snapshot */ + go func(namespace string) { + defer func() { + c.namespacesWatching.Delete(namespace) + }() + c.namespacesWatching.Store(namespace, true) + for { + select { + case <-ctx.Done(): + return + case mockResourceList, ok := <-mockResourceNamespacesChan: + if !ok { + return + } + select { + case <-ctx.Done(): + return + case mockResourceChan <- mockResourceListWithNamespace{list: mockResourceList, namespace: namespace}: + } + case frequentlyChangingAnnotationsResourceList, ok := <-frequentlyChangingAnnotationsResourceNamespacesChan: + if !ok { + return + } + select { + case <-ctx.Done(): + return + case frequentlyChangingAnnotationsResourceChan <- frequentlyChangingAnnotationsResourceListWithNamespace{list: frequentlyChangingAnnotationsResourceList, namespace: namespace}: + } + case fakeResourceList, ok := <-fakeResourceNamespacesChan: + if !ok { + return + } + select { + case <-ctx.Done(): + return + case fakeResourceChan <- fakeResourceListWithNamespace{list: fakeResourceList, namespace: namespace}: + } + } + } + }(namespace) } - mockResourceNamespacesChan, mockResourceErrs, err := c.mockResource.Watch(namespace, opts) - if err != nil { - return nil, nil, errors.Wrapf(err, "starting MockResource watch") + } + // watch all other namespaces that are labeled and fit the Expression Selector + if opts.ExpressionSelector != "" { + // watch resources of non-watched namespaces that fit the expression selectors + namespaceListOptions := resources.ResourceNamespaceListOptions{ + Ctx: opts.Ctx, + ExpressionSelector: opts.ExpressionSelector, + } + namespaceWatchOptions := resources.ResourceNamespaceWatchOptions{ + Ctx: opts.Ctx, + ExpressionSelector: opts.ExpressionSelector, } - done.Add(1) - go func(namespace string) { - defer done.Done() - errutils.AggregateErrs(ctx, errs, mockResourceErrs, namespace+"-mocks") - }(namespace) - /* Setup namespaced watch for FrequentlyChangingAnnotationsResource */ - { - fcars, err := c.frequentlyChangingAnnotationsResource.List(namespace, clients.ListOpts{Ctx: opts.Ctx, Selector: opts.Selector}) - if err != nil { - return nil, nil, errors.Wrapf(err, "initial FrequentlyChangingAnnotationsResource list") + filterNamespaces := resources.ResourceNamespaceList{} + for _, ns := range watchNamespaces { + // we do not want to filter out "" which equals all namespaces + // the reason is because we will never create a watch on ""(all namespaces) because + // doing so means we watch all resources regardless of namespace. Our intent is to + // watch only certain namespaces. + if ns != "" { + filterNamespaces = append(filterNamespaces, resources.ResourceNamespace{Name: ns}) } - initialFrequentlyChangingAnnotationsResourceList = append(initialFrequentlyChangingAnnotationsResourceList, fcars...) - fcarsByNamespace[namespace] = fcars } - frequentlyChangingAnnotationsResourceNamespacesChan, frequentlyChangingAnnotationsResourceErrs, err := c.frequentlyChangingAnnotationsResource.Watch(namespace, opts) + namespacesResources, err := c.resourceNamespaceLister.GetResourceNamespaceList(namespaceListOptions, filterNamespaces) if err != nil { - return nil, nil, errors.Wrapf(err, "starting FrequentlyChangingAnnotationsResource watch") + return nil, nil, err } + newlyRegisteredNamespaces := make([]string, len(namespacesResources)) + // non watched namespaces that are labeled + for i, resourceNamespace := range namespacesResources { + c.namespacesWatching.Load(resourceNamespace) + namespace := resourceNamespace.Name + newlyRegisteredNamespaces[i] = namespace + err = c.mockResource.RegisterNamespace(namespace) + if err != nil { + return nil, nil, errors.Wrapf(err, "there was an error registering the namespace to the mockResource") + } + /* Setup namespaced watch for MockResource */ + { + mocks, err := c.mockResource.List(namespace, clients.ListOpts{Ctx: opts.Ctx}) + if err != nil { + return nil, nil, errors.Wrapf(err, "initial MockResource list with new namespace") + } + initialMockResourceList = append(initialMockResourceList, mocks...) + mocksByNamespace.Store(namespace, mocks) + } + mockResourceNamespacesChan, mockResourceErrs, err := c.mockResource.Watch(namespace, clients.WatchOpts{Ctx: opts.Ctx}) + if err != nil { + return nil, nil, errors.Wrapf(err, "starting MockResource watch") + } + + done.Add(1) + go func(namespace string) { + defer done.Done() + errutils.AggregateErrs(ctx, errs, mockResourceErrs, namespace+"-mocks") + }(namespace) + err = c.frequentlyChangingAnnotationsResource.RegisterNamespace(namespace) + if err != nil { + return nil, nil, errors.Wrapf(err, "there was an error registering the namespace to the frequentlyChangingAnnotationsResource") + } + /* Setup namespaced watch for FrequentlyChangingAnnotationsResource */ + { + fcars, err := c.frequentlyChangingAnnotationsResource.List(namespace, clients.ListOpts{Ctx: opts.Ctx}) + if err != nil { + return nil, nil, errors.Wrapf(err, "initial FrequentlyChangingAnnotationsResource list with new namespace") + } + initialFrequentlyChangingAnnotationsResourceList = append(initialFrequentlyChangingAnnotationsResourceList, fcars...) + fcarsByNamespace.Store(namespace, fcars) + } + frequentlyChangingAnnotationsResourceNamespacesChan, frequentlyChangingAnnotationsResourceErrs, err := c.frequentlyChangingAnnotationsResource.Watch(namespace, clients.WatchOpts{Ctx: opts.Ctx}) + if err != nil { + return nil, nil, errors.Wrapf(err, "starting FrequentlyChangingAnnotationsResource watch") + } - done.Add(1) - go func(namespace string) { - defer done.Done() - errutils.AggregateErrs(ctx, errs, frequentlyChangingAnnotationsResourceErrs, namespace+"-fcars") - }(namespace) - /* Setup namespaced watch for FakeResource */ - { - fakes, err := c.fakeResource.List(namespace, clients.ListOpts{Ctx: opts.Ctx, Selector: opts.Selector}) + done.Add(1) + go func(namespace string) { + defer done.Done() + errutils.AggregateErrs(ctx, errs, frequentlyChangingAnnotationsResourceErrs, namespace+"-fcars") + }(namespace) + err = c.fakeResource.RegisterNamespace(namespace) if err != nil { - return nil, nil, errors.Wrapf(err, "initial FakeResource list") + return nil, nil, errors.Wrapf(err, "there was an error registering the namespace to the fakeResource") } - initialFakeResourceList = append(initialFakeResourceList, fakes...) - fakesByNamespace[namespace] = fakes + /* Setup namespaced watch for FakeResource */ + { + fakes, err := c.fakeResource.List(namespace, clients.ListOpts{Ctx: opts.Ctx}) + if err != nil { + return nil, nil, errors.Wrapf(err, "initial FakeResource list with new namespace") + } + initialFakeResourceList = append(initialFakeResourceList, fakes...) + fakesByNamespace.Store(namespace, fakes) + } + fakeResourceNamespacesChan, fakeResourceErrs, err := c.fakeResource.Watch(namespace, clients.WatchOpts{Ctx: opts.Ctx}) + if err != nil { + return nil, nil, errors.Wrapf(err, "starting FakeResource watch") + } + + done.Add(1) + go func(namespace string) { + defer done.Done() + errutils.AggregateErrs(ctx, errs, fakeResourceErrs, namespace+"-fakes") + }(namespace) + /* Watch for changes and update snapshot */ + go func(namespace string) { + for { + select { + case <-ctx.Done(): + return + case mockResourceList, ok := <-mockResourceNamespacesChan: + if !ok { + return + } + select { + case <-ctx.Done(): + return + case mockResourceChan <- mockResourceListWithNamespace{list: mockResourceList, namespace: namespace}: + } + case frequentlyChangingAnnotationsResourceList, ok := <-frequentlyChangingAnnotationsResourceNamespacesChan: + if !ok { + return + } + select { + case <-ctx.Done(): + return + case frequentlyChangingAnnotationsResourceChan <- frequentlyChangingAnnotationsResourceListWithNamespace{list: frequentlyChangingAnnotationsResourceList, namespace: namespace}: + } + case fakeResourceList, ok := <-fakeResourceNamespacesChan: + if !ok { + return + } + select { + case <-ctx.Done(): + return + case fakeResourceChan <- fakeResourceListWithNamespace{list: fakeResourceList, namespace: namespace}: + } + } + } + }(namespace) } - fakeResourceNamespacesChan, fakeResourceErrs, err := c.fakeResource.Watch(namespace, opts) - if err != nil { - return nil, nil, errors.Wrapf(err, "starting FakeResource watch") + if len(newlyRegisteredNamespaces) > 0 { + contextutils.LoggerFrom(ctx).Infof("registered the new namespace %v", newlyRegisteredNamespaces) } - done.Add(1) - go func(namespace string) { - defer done.Done() - errutils.AggregateErrs(ctx, errs, fakeResourceErrs, namespace+"-fakes") - }(namespace) + // create watch on all namespaces, so that we can add all resources from new namespaces + // we will be watching namespaces that meet the Expression Selector filter - /* Watch for changes and update snapshot */ - go func(namespace string) { + namespaceWatch, errsReceiver, err := c.resourceNamespaceLister.GetResourceNamespaceWatch(namespaceWatchOptions, filterNamespaces) + if err != nil { + return nil, nil, err + } + if errsReceiver != nil { + go func() { + for { + select { + case <-ctx.Done(): + return + case err = <-errsReceiver: + errs <- errors.Wrapf(err, "received error from watch on resource namespaces") + } + } + }() + } + + go func() { for { select { case <-ctx.Done(): return - case mockResourceList, ok := <-mockResourceNamespacesChan: + case resourceNamespaces, ok := <-namespaceWatch: if !ok { return } - select { - case <-ctx.Done(): - return - case mockResourceChan <- mockResourceListWithNamespace{list: mockResourceList, namespace: namespace}: + // get the list of new namespaces, if there is a new namespace + // get the list of resources from that namespace, and add + // a watch for new resources created/deleted on that namespace + c.updateNamespaces.Lock() + + // get the new namespaces, and get a map of the namespaces + mapOfResourceNamespaces := make(map[string]struct{}, len(resourceNamespaces)) + newNamespaces := []string{} + for _, ns := range resourceNamespaces { + if _, hit := c.namespacesWatching.Load(ns.Name); !hit { + newNamespaces = append(newNamespaces, ns.Name) + } + mapOfResourceNamespaces[ns.Name] = struct{}{} } - case frequentlyChangingAnnotationsResourceList, ok := <-frequentlyChangingAnnotationsResourceNamespacesChan: - if !ok { - return + + for _, ns := range watchNamespaces { + mapOfResourceNamespaces[ns] = struct{}{} } - select { - case <-ctx.Done(): - return - case frequentlyChangingAnnotationsResourceChan <- frequentlyChangingAnnotationsResourceListWithNamespace{list: frequentlyChangingAnnotationsResourceList, namespace: namespace}: + + missingNamespaces := []string{} + // use the map of namespace resources to find missing/deleted namespaces + c.namespacesWatching.Range(func(key interface{}, value interface{}) bool { + name := key.(string) + if _, hit := mapOfResourceNamespaces[name]; !hit { + missingNamespaces = append(missingNamespaces, name) + } + return true + }) + + for _, ns := range missingNamespaces { + mockResourceChan <- mockResourceListWithNamespace{list: MockResourceList{}, namespace: ns} + frequentlyChangingAnnotationsResourceChan <- frequentlyChangingAnnotationsResourceListWithNamespace{list: FrequentlyChangingAnnotationsResourceList{}, namespace: ns} + fakeResourceChan <- fakeResourceListWithNamespace{list: testing_solo_io.FakeResourceList{}, namespace: ns} } - case fakeResourceList, ok := <-fakeResourceNamespacesChan: - if !ok { - return + + for _, namespace := range newNamespaces { + var err error + err = c.mockResource.RegisterNamespace(namespace) + if err != nil { + errs <- errors.Wrapf(err, "there was an error registering the namespace to the mockResource") + continue + } + /* Setup namespaced watch for MockResource for new namespace */ + { + mocks, err := c.mockResource.List(namespace, clients.ListOpts{Ctx: opts.Ctx, Selector: opts.Selector}) + if err != nil { + errs <- errors.Wrapf(err, "initial new namespace MockResource list in namespace watch") + continue + } + mocksByNamespace.Store(namespace, mocks) + } + mockResourceNamespacesChan, mockResourceErrs, err := c.mockResource.Watch(namespace, clients.WatchOpts{Ctx: opts.Ctx, Selector: opts.Selector}) + if err != nil { + errs <- errors.Wrapf(err, "starting new namespace MockResource watch") + continue + } + + done.Add(1) + go func(namespace string) { + defer done.Done() + errutils.AggregateErrs(ctx, errs, mockResourceErrs, namespace+"-new-namespace-mocks") + }(namespace) + err = c.frequentlyChangingAnnotationsResource.RegisterNamespace(namespace) + if err != nil { + errs <- errors.Wrapf(err, "there was an error registering the namespace to the frequentlyChangingAnnotationsResource") + continue + } + /* Setup namespaced watch for FrequentlyChangingAnnotationsResource for new namespace */ + { + fcars, err := c.frequentlyChangingAnnotationsResource.List(namespace, clients.ListOpts{Ctx: opts.Ctx, Selector: opts.Selector}) + if err != nil { + errs <- errors.Wrapf(err, "initial new namespace FrequentlyChangingAnnotationsResource list in namespace watch") + continue + } + fcarsByNamespace.Store(namespace, fcars) + } + frequentlyChangingAnnotationsResourceNamespacesChan, frequentlyChangingAnnotationsResourceErrs, err := c.frequentlyChangingAnnotationsResource.Watch(namespace, clients.WatchOpts{Ctx: opts.Ctx, Selector: opts.Selector}) + if err != nil { + errs <- errors.Wrapf(err, "starting new namespace FrequentlyChangingAnnotationsResource watch") + continue + } + + done.Add(1) + go func(namespace string) { + defer done.Done() + errutils.AggregateErrs(ctx, errs, frequentlyChangingAnnotationsResourceErrs, namespace+"-new-namespace-fcars") + }(namespace) + err = c.fakeResource.RegisterNamespace(namespace) + if err != nil { + errs <- errors.Wrapf(err, "there was an error registering the namespace to the fakeResource") + continue + } + /* Setup namespaced watch for FakeResource for new namespace */ + { + fakes, err := c.fakeResource.List(namespace, clients.ListOpts{Ctx: opts.Ctx, Selector: opts.Selector}) + if err != nil { + errs <- errors.Wrapf(err, "initial new namespace FakeResource list in namespace watch") + continue + } + fakesByNamespace.Store(namespace, fakes) + } + fakeResourceNamespacesChan, fakeResourceErrs, err := c.fakeResource.Watch(namespace, clients.WatchOpts{Ctx: opts.Ctx, Selector: opts.Selector}) + if err != nil { + errs <- errors.Wrapf(err, "starting new namespace FakeResource watch") + continue + } + + done.Add(1) + go func(namespace string) { + defer done.Done() + errutils.AggregateErrs(ctx, errs, fakeResourceErrs, namespace+"-new-namespace-fakes") + }(namespace) + /* Watch for changes and update snapshot */ + go func(namespace string) { + defer func() { + c.namespacesWatching.Delete(namespace) + }() + c.namespacesWatching.Store(namespace, true) + for { + select { + case <-ctx.Done(): + return + case mockResourceList, ok := <-mockResourceNamespacesChan: + if !ok { + return + } + select { + case <-ctx.Done(): + return + case mockResourceChan <- mockResourceListWithNamespace{list: mockResourceList, namespace: namespace}: + } + case frequentlyChangingAnnotationsResourceList, ok := <-frequentlyChangingAnnotationsResourceNamespacesChan: + if !ok { + return + } + select { + case <-ctx.Done(): + return + case frequentlyChangingAnnotationsResourceChan <- frequentlyChangingAnnotationsResourceListWithNamespace{list: frequentlyChangingAnnotationsResourceList, namespace: namespace}: + } + case fakeResourceList, ok := <-fakeResourceNamespacesChan: + if !ok { + return + } + select { + case <-ctx.Done(): + return + case fakeResourceChan <- fakeResourceListWithNamespace{list: fakeResourceList, namespace: namespace}: + } + } + } + }(namespace) } - select { - case <-ctx.Done(): - return - case fakeResourceChan <- fakeResourceListWithNamespace{list: fakeResourceList, namespace: namespace}: + if len(newNamespaces) > 0 { + contextutils.LoggerFrom(ctx).Infof("registered the new namespace %v", newNamespaces) } + c.updateNamespaces.Unlock() } } - }(namespace) + }() } /* Initialize snapshot for Mocks */ currentSnapshot.Mocks = initialMockResourceList.Sort() @@ -346,11 +706,13 @@ func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchO ) // merge lists by namespace - mocksByNamespace[namespace] = mockResourceNamespacedList.list + mocksByNamespace.Store(namespace, mockResourceNamespacedList.list) var mockResourceList MockResourceList - for _, mocks := range mocksByNamespace { + mocksByNamespace.Range(func(key interface{}, value interface{}) bool { + mocks := value.(MockResourceList) mockResourceList = append(mockResourceList, mocks...) - } + return true + }) currentSnapshot.Mocks = mockResourceList.Sort() case frequentlyChangingAnnotationsResourceNamespacedList, ok := <-frequentlyChangingAnnotationsResourceChan: if !ok { @@ -368,11 +730,13 @@ func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchO ) // merge lists by namespace - fcarsByNamespace[namespace] = frequentlyChangingAnnotationsResourceNamespacedList.list + fcarsByNamespace.Store(namespace, frequentlyChangingAnnotationsResourceNamespacedList.list) var frequentlyChangingAnnotationsResourceList FrequentlyChangingAnnotationsResourceList - for _, fcars := range fcarsByNamespace { - frequentlyChangingAnnotationsResourceList = append(frequentlyChangingAnnotationsResourceList, fcars...) - } + fcarsByNamespace.Range(func(key interface{}, value interface{}) bool { + mocks := value.(FrequentlyChangingAnnotationsResourceList) + frequentlyChangingAnnotationsResourceList = append(frequentlyChangingAnnotationsResourceList, mocks...) + return true + }) currentSnapshot.Fcars = frequentlyChangingAnnotationsResourceList.Sort() case fakeResourceNamespacedList, ok := <-fakeResourceChan: if !ok { @@ -390,11 +754,13 @@ func (c *testingEmitter) Snapshots(watchNamespaces []string, opts clients.WatchO ) // merge lists by namespace - fakesByNamespace[namespace] = fakeResourceNamespacedList.list + fakesByNamespace.Store(namespace, fakeResourceNamespacedList.list) var fakeResourceList testing_solo_io.FakeResourceList - for _, fakes := range fakesByNamespace { - fakeResourceList = append(fakeResourceList, fakes...) - } + fakesByNamespace.Range(func(key interface{}, value interface{}) bool { + mocks := value.(testing_solo_io.FakeResourceList) + fakeResourceList = append(fakeResourceList, mocks...) + return true + }) currentSnapshot.Fakes = fakeResourceList.Sort() } } diff --git a/test/mocks/v2alpha1/testing_snapshot_emitter_test.go b/test/mocks/v2alpha1/testing_snapshot_emitter_test.go index 175bf4e3e..816b72eef 100644 --- a/test/mocks/v2alpha1/testing_snapshot_emitter_test.go +++ b/test/mocks/v2alpha1/testing_snapshot_emitter_test.go @@ -7,6 +7,7 @@ package v2alpha1 import ( "context" + "fmt" "os" "time" @@ -16,13 +17,20 @@ import ( . "github.com/onsi/gomega" "github.com/solo-io/go-utils/log" "github.com/solo-io/k8s-utils/kubeutils" + "github.com/solo-io/solo-kit/pkg/api/external/kubernetes/namespace" "github.com/solo-io/solo-kit/pkg/api/v1/clients" "github.com/solo-io/solo-kit/pkg/api/v1/clients/factory" kuberc "github.com/solo-io/solo-kit/pkg/api/v1/clients/kube" + "github.com/solo-io/solo-kit/pkg/api/v1/clients/kube/cache" "github.com/solo-io/solo-kit/pkg/api/v1/clients/memory" + "github.com/solo-io/solo-kit/pkg/api/v1/resources" + "github.com/solo-io/solo-kit/pkg/api/v1/resources/core" "github.com/solo-io/solo-kit/pkg/utils/statusutils" "github.com/solo-io/solo-kit/test/helpers" + corev1 "k8s.io/api/core/v1" apiext "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" @@ -38,11 +46,21 @@ var _ = Describe("V2Alpha1Emitter", func() { log.Printf("This test creates kubernetes resources and is disabled by default. To enable, set RUN_KUBE_TESTS=1 in your env.") return } + + type metadataGetter interface { + GetMetadata() *core.Metadata + } + var ( ctx context.Context - namespace1 string - namespace2 string + namespace1, namespace2 string + namespace3, namespace4 string + namespace5, namespace6 string name1, name2 = "angela" + helpers.RandString(3), "bob" + helpers.RandString(3) + name3, name4 = "susan" + helpers.RandString(3), "jim" + helpers.RandString(3) + name5 = "melisa" + helpers.RandString(3) + labels1 = map[string]string{"env": "test"} + labelExpression1 = "env in (test)" cfg *rest.Config clientset *apiext.Clientset kube kubernetes.Interface @@ -50,81 +68,185 @@ var _ = Describe("V2Alpha1Emitter", func() { mockResourceClient MockResourceClient frequentlyChangingAnnotationsResourceClient FrequentlyChangingAnnotationsResourceClient fakeResourceClient testing_solo_io.FakeResourceClient + resourceNamespaceLister resources.ResourceNamespaceLister + kubeCache cache.KubeCoreCache ) + const ( + TIME_BETWEEN_MESSAGES = 5 + ) + NewMockResourceWithLabels := func(namespace, name string, labels map[string]string) *MockResource { + resource := NewMockResource(namespace, name) + resource.GetMetadata().Labels = labels + return resource + } + NewFrequentlyChangingAnnotationsResourceWithLabels := func(namespace, name string, labels map[string]string) *FrequentlyChangingAnnotationsResource { + resource := NewFrequentlyChangingAnnotationsResource(namespace, name) + resource.GetMetadata().Labels = labels + return resource + } + NewFakeResourceWithLabels := func(namespace, name string, labels map[string]string) *testing_solo_io.FakeResource { + resource := testing_solo_io.NewFakeResource(namespace, name) + resource.GetMetadata().Labels = labels + return resource + } - BeforeEach(func() { - err := os.Setenv(statusutils.PodNamespaceEnvName, "default") + createNamespaces := func(ctx context.Context, kube kubernetes.Interface, namespaces ...string) { + err := kubeutils.CreateNamespacesInParallel(ctx, kube, namespaces...) Expect(err).NotTo(HaveOccurred()) + } - ctx = context.Background() - namespace1 = helpers.RandString(8) - namespace2 = helpers.RandString(8) - kube = helpers.MustKubeClient() - err = kubeutils.CreateNamespacesInParallel(ctx, kube, namespace1, namespace2) - Expect(err).NotTo(HaveOccurred()) - cfg, err = kubeutils.GetConfig("", "") - Expect(err).NotTo(HaveOccurred()) + createNamespaceWithLabel := func(ctx context.Context, kube kubernetes.Interface, namespace string, labels map[string]string) { + _, err := kube.CoreV1().Namespaces().Create(ctx, &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + Labels: labels, + }, + }, metav1.CreateOptions{}) + Expect(err).ToNot(HaveOccurred()) + } - clientset, err = apiext.NewForConfig(cfg) + deleteNamespaces := func(ctx context.Context, kube kubernetes.Interface, namespaces ...string) { + err := kubeutils.DeleteNamespacesInParallelBlocking(ctx, kube, namespaces...) Expect(err).NotTo(HaveOccurred()) - // MockResource Constructor - mockResourceClientFactory := &factory.KubeResourceClientFactory{ - Crd: MockResourceCrd, - Cfg: cfg, - SharedCache: kuberc.NewKubeCache(context.TODO()), - } + } - err = helpers.AddAndRegisterCrd(ctx, MockResourceCrd, clientset) - Expect(err).NotTo(HaveOccurred()) + // getNewNamespaces is used to generate new namespace names, so that we do not have to wait + // when deleting namespaces in runNamespacedSelectorsWithWatchNamespaces. Since + // runNamespacedSelectorsWithWatchNamespaces uses watchNamespaces set to namespace1 and + // namespace2, this will work. Because the emitter willl only be watching namespaces that are + // labeled. + getNewNamespaces := func() { + namespace3 = helpers.RandString(8) + namespace4 = helpers.RandString(8) + namespace5 = helpers.RandString(8) + namespace6 = helpers.RandString(8) + } - mockResourceClient, err = NewMockResourceClient(ctx, mockResourceClientFactory) - Expect(err).NotTo(HaveOccurred()) - // FrequentlyChangingAnnotationsResource Constructor - frequentlyChangingAnnotationsResourceClientFactory := &factory.MemoryResourceClientFactory{ - Cache: memory.NewInMemoryResourceCache(), + // getNewNamespaces1and2 is used to generate new namespaces for namespace 1 and 2. + // used for the same reason as getNewNamespaces() above + getNewNamespaces1and2 := func() { + namespace1 = helpers.RandString(8) + namespace2 = helpers.RandString(8) + } + + getMapOfNamespaceResources := func(getList func(string) ([]metadataGetter, error)) map[string][]string { + namespaces := []string{namespace1, namespace2, namespace3, namespace4, namespace5, namespace6} + namespaceResources := make(map[string][]string, len(namespaces)) + for _, ns := range namespaces { + list, _ := getList(ns) + for _, snap := range list { + snapMeta := snap.GetMetadata() + if _, hit := namespaceResources[snapMeta.Namespace]; hit { + namespaceResources[snap.GetMetadata().Namespace] = make([]string, 1) + } + namespaceResources[snapMeta.Namespace] = append(namespaceResources[snapMeta.Namespace], snapMeta.Name) + } } + return namespaceResources + } - frequentlyChangingAnnotationsResourceClient, err = NewFrequentlyChangingAnnotationsResourceClient(ctx, frequentlyChangingAnnotationsResourceClientFactory) - Expect(err).NotTo(HaveOccurred()) - // FakeResource Constructor - fakeResourceClientFactory := &factory.MemoryResourceClientFactory{ - Cache: memory.NewInMemoryResourceCache(), + findNonMatchingResources := func(matchList, findList []metadataGetter) map[string][]string { + nonMatching := make(map[string][]string) + for _, snap := range matchList { + snapMeta := snap.GetMetadata() + matched := false + for _, pre := range findList { + preMeta := pre.GetMetadata() + if preMeta.Namespace == snapMeta.Namespace && preMeta.Name == snapMeta.Name { + matched = true + break + } + } + if !matched { + if _, hit := nonMatching[snapMeta.Namespace]; hit { + nonMatching[snap.GetMetadata().Namespace] = make([]string, 1) + } + nonMatching[snapMeta.Namespace] = append(nonMatching[snapMeta.Namespace], snapMeta.Name) + } } + return nonMatching + } - fakeResourceClient, err = testing_solo_io.NewFakeResourceClient(ctx, fakeResourceClientFactory) - Expect(err).NotTo(HaveOccurred()) - emitter = NewTestingEmitter(mockResourceClient, frequentlyChangingAnnotationsResourceClient, fakeResourceClient) - }) - AfterEach(func() { - err := os.Unsetenv(statusutils.PodNamespaceEnvName) - Expect(err).NotTo(HaveOccurred()) + findMatchingResources := func(matchList, findList []metadataGetter) map[string][]string { + matching := make(map[string][]string) + for _, snap := range matchList { + snapMeta := snap.GetMetadata() + matched := false + for _, pre := range findList { + preMeta := pre.GetMetadata() + if preMeta.Namespace == snapMeta.Namespace && preMeta.Name == snapMeta.Name { + matched = true + break + } + } + if matched { + if _, hit := matching[snapMeta.Namespace]; hit { + matching[snap.GetMetadata().Namespace] = make([]string, 1) + } + matching[snapMeta.Namespace] = append(matching[snapMeta.Namespace], snapMeta.Name) + } + } + return matching + } - err = kubeutils.DeleteNamespacesInParallelBlocking(ctx, kube, namespace1, namespace2) - Expect(err).NotTo(HaveOccurred()) - }) + getMapOfResources := func(listOfResources []metadataGetter) map[string][]string { + resources := make(map[string][]string) + for _, snap := range listOfResources { + snapMeta := snap.GetMetadata() + if _, hit := resources[snapMeta.Namespace]; hit { + resources[snap.GetMetadata().Namespace] = make([]string, 1) + } + resources[snapMeta.Namespace] = append(resources[snapMeta.Namespace], snapMeta.Name) + } + return resources + } + convertMocksToMetadataGetter := func(rl MockResourceList) []metadataGetter { + listConv := make([]metadataGetter, len(rl)) + for i, r := range rl { + listConv[i] = r + } + return listConv + } + convertFcarsToMetadataGetter := func(rl FrequentlyChangingAnnotationsResourceList) []metadataGetter { + listConv := make([]metadataGetter, len(rl)) + for i, r := range rl { + listConv[i] = r + } + return listConv + } + convertFakesToMetadataGetter := func(rl testing_solo_io.FakeResourceList) []metadataGetter { + listConv := make([]metadataGetter, len(rl)) + for i, r := range rl { + listConv[i] = r + } + return listConv + } - It("tracks snapshots on changes to any resource", func() { + runNamespacedSelectorsWithWatchNamespaces := func() { ctx := context.Background() err := emitter.Register() Expect(err).NotTo(HaveOccurred()) + // There is an error here in the code. snapshots, errs, err := emitter.Snapshots([]string{namespace1, namespace2}, clients.WatchOpts{ - Ctx: ctx, - RefreshRate: time.Second, + Ctx: ctx, + RefreshRate: time.Second, + ExpressionSelector: labelExpression1, }) Expect(err).NotTo(HaveOccurred()) var snap *TestingSnapshot + var previous *TestingSnapshot /* MockResource */ - assertSnapshotMocks := func(expectMocks MockResourceList, unexpectMocks MockResourceList) { drain: for { select { case snap = <-snapshots: + previous = snap for _, expected := range expectMocks { if _, err := snap.Mocks.Find(expected.GetMetadata().Ref().Strings()); err != nil { continue drain @@ -139,49 +261,100 @@ var _ = Describe("V2Alpha1Emitter", func() { case err := <-errs: Expect(err).NotTo(HaveOccurred()) case <-time.After(time.Second * 10): - nsList1, _ := mockResourceClient.List(namespace1, clients.ListOpts{}) - nsList2, _ := mockResourceClient.List(namespace2, clients.ListOpts{}) - combined := append(nsList1, nsList2...) - Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + var expectedResources map[string][]string + var unexpectedResource map[string][]string + + if previous != nil { + expectedResources = findNonMatchingResources(convertMocksToMetadataGetter(expectMocks), convertMocksToMetadataGetter(previous.Mocks)) + unexpectedResource = findMatchingResources(convertMocksToMetadataGetter(unexpectMocks), convertMocksToMetadataGetter(previous.Mocks)) + } else { + expectedResources = getMapOfResources(convertMocksToMetadataGetter(expectMocks)) + unexpectedResource = getMapOfResources(convertMocksToMetadataGetter(unexpectMocks)) + } + getList := func(ns string) ([]metadataGetter, error) { + l, err := mockResourceClient.List(ns, clients.ListOpts{}) + return convertMocksToMetadataGetter(l), err + } + namespaceResources := getMapOfNamespaceResources(getList) + Fail(fmt.Sprintf("expected final snapshot before 10 seconds. expected \nExpected:\n%#v\n\nUnexpected:\n%#v\n\nnamespaces:\n%#v", expectedResources, unexpectedResource, namespaceResources)) } } } + mockResource1a, err := mockResourceClient.Write(NewMockResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) mockResource1b, err := mockResourceClient.Write(NewMockResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + mockResourceWatched := MockResourceList{mockResource1a, mockResource1b} + assertSnapshotMocks(mockResourceWatched, nil) - assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b}, nil) - mockResource2a, err := mockResourceClient.Write(NewMockResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + mockResource3a, err := mockResourceClient.Write(NewMockResourceWithLabels(namespace1, name3, labels1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - mockResource2b, err := mockResourceClient.Write(NewMockResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + mockResource3b, err := mockResourceClient.Write(NewMockResourceWithLabels(namespace2, name3, labels1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + mockResourceWatched = append(mockResourceWatched, MockResourceList{mockResource3a, mockResource3b}...) + assertSnapshotMocks(mockResourceWatched, nil) - assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}, nil) + createNamespaceWithLabel(ctx, kube, namespace3, labels1) + createNamespaces(ctx, kube, namespace4) - err = mockResourceClient.Delete(mockResource2a.GetMetadata().Namespace, mockResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + mockResource4a, err := mockResourceClient.Write(NewMockResource(namespace3, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - err = mockResourceClient.Delete(mockResource2b.GetMetadata().Namespace, mockResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + mockResource4b, err := mockResourceClient.Write(NewMockResource(namespace4, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + mockResourceWatched = append(mockResourceWatched, mockResource4a) + mockResourceNotWatched := MockResourceList{mockResource4b} + assertSnapshotMocks(mockResourceWatched, mockResourceNotWatched) - assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b}, MockResourceList{mockResource2a, mockResource2b}) + mockResource5a, err := mockResourceClient.Write(NewMockResourceWithLabels(namespace3, name2, labels1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResource5b, err := mockResourceClient.Write(NewMockResourceWithLabels(namespace4, name2, labels1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResourceWatched = append(mockResourceWatched, mockResource5a) + mockResourceNotWatched = append(mockResourceNotWatched, mockResource5b) + assertSnapshotMocks(mockResourceWatched, mockResourceNotWatched) + + for _, r := range mockResourceNotWatched { + err = mockResourceClient.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + } err = mockResourceClient.Delete(mockResource1a.GetMetadata().Namespace, mockResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = mockResourceClient.Delete(mockResource1b.GetMetadata().Namespace, mockResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + mockResourceNotWatched = append(mockResourceNotWatched, MockResourceList{mockResource1a, mockResource1b}...) + mockResourceWatched = MockResourceList{mockResource3a, mockResource3b, mockResource4a, mockResource5a} + assertSnapshotMocks(mockResourceWatched, mockResourceNotWatched) + + err = mockResourceClient.Delete(mockResource3a.GetMetadata().Namespace, mockResource3a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = mockResourceClient.Delete(mockResource3b.GetMetadata().Namespace, mockResource3b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResourceNotWatched = append(mockResourceNotWatched, MockResourceList{mockResource3a, mockResource3b}...) + mockResourceWatched = MockResourceList{mockResource4a, mockResource5a} + assertSnapshotMocks(mockResourceWatched, mockResourceNotWatched) + + err = mockResourceClient.Delete(mockResource4a.GetMetadata().Namespace, mockResource4a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = mockResourceClient.Delete(mockResource5a.GetMetadata().Namespace, mockResource5a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResourceNotWatched = append(mockResourceNotWatched, MockResourceList{mockResource5a, mockResource5b}...) + assertSnapshotMocks(nil, mockResourceNotWatched) - assertSnapshotMocks(nil, MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}) + // clean up environment + deleteNamespaces(ctx, kube, namespace3, namespace4) + getNewNamespaces() /* FrequentlyChangingAnnotationsResource */ - assertSnapshotFcars := func(expectFcars FrequentlyChangingAnnotationsResourceList, unexpectFcars FrequentlyChangingAnnotationsResourceList) { drain: for { select { case snap = <-snapshots: + previous = snap for _, expected := range expectFcars { if _, err := snap.Fcars.Find(expected.GetMetadata().Ref().Strings()); err != nil { continue drain @@ -196,49 +369,100 @@ var _ = Describe("V2Alpha1Emitter", func() { case err := <-errs: Expect(err).NotTo(HaveOccurred()) case <-time.After(time.Second * 10): - nsList1, _ := frequentlyChangingAnnotationsResourceClient.List(namespace1, clients.ListOpts{}) - nsList2, _ := frequentlyChangingAnnotationsResourceClient.List(namespace2, clients.ListOpts{}) - combined := append(nsList1, nsList2...) - Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + var expectedResources map[string][]string + var unexpectedResource map[string][]string + + if previous != nil { + expectedResources = findNonMatchingResources(convertFcarsToMetadataGetter(expectFcars), convertFcarsToMetadataGetter(previous.Fcars)) + unexpectedResource = findMatchingResources(convertFcarsToMetadataGetter(unexpectFcars), convertFcarsToMetadataGetter(previous.Fcars)) + } else { + expectedResources = getMapOfResources(convertFcarsToMetadataGetter(expectFcars)) + unexpectedResource = getMapOfResources(convertFcarsToMetadataGetter(unexpectFcars)) + } + getList := func(ns string) ([]metadataGetter, error) { + l, err := frequentlyChangingAnnotationsResourceClient.List(ns, clients.ListOpts{}) + return convertFcarsToMetadataGetter(l), err + } + namespaceResources := getMapOfNamespaceResources(getList) + Fail(fmt.Sprintf("expected final snapshot before 10 seconds. expected \nExpected:\n%#v\n\nUnexpected:\n%#v\n\nnamespaces:\n%#v", expectedResources, unexpectedResource, namespaceResources)) } } } + frequentlyChangingAnnotationsResource1a, err := frequentlyChangingAnnotationsResourceClient.Write(NewFrequentlyChangingAnnotationsResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) frequentlyChangingAnnotationsResource1b, err := frequentlyChangingAnnotationsResourceClient.Write(NewFrequentlyChangingAnnotationsResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + frequentlyChangingAnnotationsResourceWatched := FrequentlyChangingAnnotationsResourceList{frequentlyChangingAnnotationsResource1a, frequentlyChangingAnnotationsResource1b} + assertSnapshotFcars(frequentlyChangingAnnotationsResourceWatched, nil) - assertSnapshotFcars(FrequentlyChangingAnnotationsResourceList{frequentlyChangingAnnotationsResource1a, frequentlyChangingAnnotationsResource1b}, nil) - frequentlyChangingAnnotationsResource2a, err := frequentlyChangingAnnotationsResourceClient.Write(NewFrequentlyChangingAnnotationsResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + frequentlyChangingAnnotationsResource3a, err := frequentlyChangingAnnotationsResourceClient.Write(NewFrequentlyChangingAnnotationsResourceWithLabels(namespace1, name3, labels1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - frequentlyChangingAnnotationsResource2b, err := frequentlyChangingAnnotationsResourceClient.Write(NewFrequentlyChangingAnnotationsResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + frequentlyChangingAnnotationsResource3b, err := frequentlyChangingAnnotationsResourceClient.Write(NewFrequentlyChangingAnnotationsResourceWithLabels(namespace2, name3, labels1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + frequentlyChangingAnnotationsResourceWatched = append(frequentlyChangingAnnotationsResourceWatched, FrequentlyChangingAnnotationsResourceList{frequentlyChangingAnnotationsResource3a, frequentlyChangingAnnotationsResource3b}...) + assertSnapshotFcars(frequentlyChangingAnnotationsResourceWatched, nil) + + createNamespaceWithLabel(ctx, kube, namespace3, labels1) + createNamespaces(ctx, kube, namespace4) - assertSnapshotFcars(FrequentlyChangingAnnotationsResourceList{frequentlyChangingAnnotationsResource1a, frequentlyChangingAnnotationsResource1b, frequentlyChangingAnnotationsResource2a, frequentlyChangingAnnotationsResource2b}, nil) + frequentlyChangingAnnotationsResource4a, err := frequentlyChangingAnnotationsResourceClient.Write(NewFrequentlyChangingAnnotationsResource(namespace3, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + frequentlyChangingAnnotationsResource4b, err := frequentlyChangingAnnotationsResourceClient.Write(NewFrequentlyChangingAnnotationsResource(namespace4, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + frequentlyChangingAnnotationsResourceWatched = append(frequentlyChangingAnnotationsResourceWatched, frequentlyChangingAnnotationsResource4a) + frequentlyChangingAnnotationsResourceNotWatched := FrequentlyChangingAnnotationsResourceList{frequentlyChangingAnnotationsResource4b} + assertSnapshotFcars(frequentlyChangingAnnotationsResourceWatched, frequentlyChangingAnnotationsResourceNotWatched) - err = frequentlyChangingAnnotationsResourceClient.Delete(frequentlyChangingAnnotationsResource2a.GetMetadata().Namespace, frequentlyChangingAnnotationsResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + frequentlyChangingAnnotationsResource5a, err := frequentlyChangingAnnotationsResourceClient.Write(NewFrequentlyChangingAnnotationsResourceWithLabels(namespace3, name2, labels1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - err = frequentlyChangingAnnotationsResourceClient.Delete(frequentlyChangingAnnotationsResource2b.GetMetadata().Namespace, frequentlyChangingAnnotationsResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + frequentlyChangingAnnotationsResource5b, err := frequentlyChangingAnnotationsResourceClient.Write(NewFrequentlyChangingAnnotationsResourceWithLabels(namespace4, name2, labels1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + frequentlyChangingAnnotationsResourceWatched = append(frequentlyChangingAnnotationsResourceWatched, frequentlyChangingAnnotationsResource5a) + frequentlyChangingAnnotationsResourceNotWatched = append(frequentlyChangingAnnotationsResourceNotWatched, frequentlyChangingAnnotationsResource5b) + assertSnapshotFcars(frequentlyChangingAnnotationsResourceWatched, frequentlyChangingAnnotationsResourceNotWatched) - assertSnapshotFcars(FrequentlyChangingAnnotationsResourceList{frequentlyChangingAnnotationsResource1a, frequentlyChangingAnnotationsResource1b}, FrequentlyChangingAnnotationsResourceList{frequentlyChangingAnnotationsResource2a, frequentlyChangingAnnotationsResource2b}) + for _, r := range frequentlyChangingAnnotationsResourceNotWatched { + err = frequentlyChangingAnnotationsResourceClient.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + } err = frequentlyChangingAnnotationsResourceClient.Delete(frequentlyChangingAnnotationsResource1a.GetMetadata().Namespace, frequentlyChangingAnnotationsResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = frequentlyChangingAnnotationsResourceClient.Delete(frequentlyChangingAnnotationsResource1b.GetMetadata().Namespace, frequentlyChangingAnnotationsResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + frequentlyChangingAnnotationsResourceNotWatched = append(frequentlyChangingAnnotationsResourceNotWatched, FrequentlyChangingAnnotationsResourceList{frequentlyChangingAnnotationsResource1a, frequentlyChangingAnnotationsResource1b}...) + frequentlyChangingAnnotationsResourceWatched = FrequentlyChangingAnnotationsResourceList{frequentlyChangingAnnotationsResource3a, frequentlyChangingAnnotationsResource3b, frequentlyChangingAnnotationsResource4a, frequentlyChangingAnnotationsResource5a} + assertSnapshotFcars(frequentlyChangingAnnotationsResourceWatched, frequentlyChangingAnnotationsResourceNotWatched) - assertSnapshotFcars(nil, FrequentlyChangingAnnotationsResourceList{frequentlyChangingAnnotationsResource1a, frequentlyChangingAnnotationsResource1b, frequentlyChangingAnnotationsResource2a, frequentlyChangingAnnotationsResource2b}) + err = frequentlyChangingAnnotationsResourceClient.Delete(frequentlyChangingAnnotationsResource3a.GetMetadata().Namespace, frequentlyChangingAnnotationsResource3a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = frequentlyChangingAnnotationsResourceClient.Delete(frequentlyChangingAnnotationsResource3b.GetMetadata().Namespace, frequentlyChangingAnnotationsResource3b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + frequentlyChangingAnnotationsResourceNotWatched = append(frequentlyChangingAnnotationsResourceNotWatched, FrequentlyChangingAnnotationsResourceList{frequentlyChangingAnnotationsResource3a, frequentlyChangingAnnotationsResource3b}...) + frequentlyChangingAnnotationsResourceWatched = FrequentlyChangingAnnotationsResourceList{frequentlyChangingAnnotationsResource4a, frequentlyChangingAnnotationsResource5a} + assertSnapshotFcars(frequentlyChangingAnnotationsResourceWatched, frequentlyChangingAnnotationsResourceNotWatched) + + err = frequentlyChangingAnnotationsResourceClient.Delete(frequentlyChangingAnnotationsResource4a.GetMetadata().Namespace, frequentlyChangingAnnotationsResource4a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = frequentlyChangingAnnotationsResourceClient.Delete(frequentlyChangingAnnotationsResource5a.GetMetadata().Namespace, frequentlyChangingAnnotationsResource5a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + frequentlyChangingAnnotationsResourceNotWatched = append(frequentlyChangingAnnotationsResourceNotWatched, FrequentlyChangingAnnotationsResourceList{frequentlyChangingAnnotationsResource5a, frequentlyChangingAnnotationsResource5b}...) + assertSnapshotFcars(nil, frequentlyChangingAnnotationsResourceNotWatched) + + // clean up environment + deleteNamespaces(ctx, kube, namespace3, namespace4) + getNewNamespaces() /* FakeResource */ - assertSnapshotFakes := func(expectFakes testing_solo_io.FakeResourceList, unexpectFakes testing_solo_io.FakeResourceList) { drain: for { select { case snap = <-snapshots: + previous = snap for _, expected := range expectFakes { if _, err := snap.Fakes.Find(expected.GetMetadata().Ref().Strings()); err != nil { continue drain @@ -253,223 +477,1274 @@ var _ = Describe("V2Alpha1Emitter", func() { case err := <-errs: Expect(err).NotTo(HaveOccurred()) case <-time.After(time.Second * 10): - nsList1, _ := fakeResourceClient.List(namespace1, clients.ListOpts{}) - nsList2, _ := fakeResourceClient.List(namespace2, clients.ListOpts{}) - combined := append(nsList1, nsList2...) - Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + var expectedResources map[string][]string + var unexpectedResource map[string][]string + + if previous != nil { + expectedResources = findNonMatchingResources(convertFakesToMetadataGetter(expectFakes), convertFakesToMetadataGetter(previous.Fakes)) + unexpectedResource = findMatchingResources(convertFakesToMetadataGetter(unexpectFakes), convertFakesToMetadataGetter(previous.Fakes)) + } else { + expectedResources = getMapOfResources(convertFakesToMetadataGetter(expectFakes)) + unexpectedResource = getMapOfResources(convertFakesToMetadataGetter(unexpectFakes)) + } + getList := func(ns string) ([]metadataGetter, error) { + l, err := fakeResourceClient.List(ns, clients.ListOpts{}) + return convertFakesToMetadataGetter(l), err + } + namespaceResources := getMapOfNamespaceResources(getList) + Fail(fmt.Sprintf("expected final snapshot before 10 seconds. expected \nExpected:\n%#v\n\nUnexpected:\n%#v\n\nnamespaces:\n%#v", expectedResources, unexpectedResource, namespaceResources)) } } } + fakeResource1a, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) fakeResource1b, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + fakeResourceWatched := testing_solo_io.FakeResourceList{fakeResource1a, fakeResource1b} + assertSnapshotFakes(fakeResourceWatched, nil) - assertSnapshotFakes(testing_solo_io.FakeResourceList{fakeResource1a, fakeResource1b}, nil) - fakeResource2a, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + fakeResource3a, err := fakeResourceClient.Write(NewFakeResourceWithLabels(namespace1, name3, labels1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - fakeResource2b, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + fakeResource3b, err := fakeResourceClient.Write(NewFakeResourceWithLabels(namespace2, name3, labels1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + fakeResourceWatched = append(fakeResourceWatched, testing_solo_io.FakeResourceList{fakeResource3a, fakeResource3b}...) + assertSnapshotFakes(fakeResourceWatched, nil) + + createNamespaceWithLabel(ctx, kube, namespace3, labels1) + createNamespaces(ctx, kube, namespace4) - assertSnapshotFakes(testing_solo_io.FakeResourceList{fakeResource1a, fakeResource1b, fakeResource2a, fakeResource2b}, nil) + fakeResource4a, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace3, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + fakeResource4b, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace4, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + fakeResourceWatched = append(fakeResourceWatched, fakeResource4a) + fakeResourceNotWatched := testing_solo_io.FakeResourceList{fakeResource4b} + assertSnapshotFakes(fakeResourceWatched, fakeResourceNotWatched) - err = fakeResourceClient.Delete(fakeResource2a.GetMetadata().Namespace, fakeResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + fakeResource5a, err := fakeResourceClient.Write(NewFakeResourceWithLabels(namespace3, name2, labels1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) - err = fakeResourceClient.Delete(fakeResource2b.GetMetadata().Namespace, fakeResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + fakeResource5b, err := fakeResourceClient.Write(NewFakeResourceWithLabels(namespace4, name2, labels1), clients.WriteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + fakeResourceWatched = append(fakeResourceWatched, fakeResource5a) + fakeResourceNotWatched = append(fakeResourceNotWatched, fakeResource5b) + assertSnapshotFakes(fakeResourceWatched, fakeResourceNotWatched) - assertSnapshotFakes(testing_solo_io.FakeResourceList{fakeResource1a, fakeResource1b}, testing_solo_io.FakeResourceList{fakeResource2a, fakeResource2b}) + for _, r := range fakeResourceNotWatched { + err = fakeResourceClient.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + } err = fakeResourceClient.Delete(fakeResource1a.GetMetadata().Namespace, fakeResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) err = fakeResourceClient.Delete(fakeResource1b.GetMetadata().Namespace, fakeResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + fakeResourceNotWatched = append(fakeResourceNotWatched, testing_solo_io.FakeResourceList{fakeResource1a, fakeResource1b}...) + fakeResourceWatched = testing_solo_io.FakeResourceList{fakeResource3a, fakeResource3b, fakeResource4a, fakeResource5a} + assertSnapshotFakes(fakeResourceWatched, fakeResourceNotWatched) - assertSnapshotFakes(nil, testing_solo_io.FakeResourceList{fakeResource1a, fakeResource1b, fakeResource2a, fakeResource2b}) - }) + err = fakeResourceClient.Delete(fakeResource3a.GetMetadata().Namespace, fakeResource3a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = fakeResourceClient.Delete(fakeResource3b.GetMetadata().Namespace, fakeResource3b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + fakeResourceNotWatched = append(fakeResourceNotWatched, testing_solo_io.FakeResourceList{fakeResource3a, fakeResource3b}...) + fakeResourceWatched = testing_solo_io.FakeResourceList{fakeResource4a, fakeResource5a} + assertSnapshotFakes(fakeResourceWatched, fakeResourceNotWatched) - It("tracks snapshots on changes to any resource using AllNamespace", func() { - ctx := context.Background() - err := emitter.Register() + err = fakeResourceClient.Delete(fakeResource4a.GetMetadata().Namespace, fakeResource4a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = fakeResourceClient.Delete(fakeResource5a.GetMetadata().Namespace, fakeResource5a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) Expect(err).NotTo(HaveOccurred()) + fakeResourceNotWatched = append(fakeResourceNotWatched, testing_solo_io.FakeResourceList{fakeResource5a, fakeResource5b}...) + assertSnapshotFakes(nil, fakeResourceNotWatched) - snapshots, errs, err := emitter.Snapshots([]string{""}, clients.WatchOpts{ - Ctx: ctx, - RefreshRate: time.Second, - }) + // clean up environment + deleteNamespaces(ctx, kube, namespace3, namespace4) + getNewNamespaces() + } + + BeforeEach(func() { + err := os.Setenv(statusutils.PodNamespaceEnvName, "default") Expect(err).NotTo(HaveOccurred()) - var snap *TestingSnapshot + ctx = context.Background() + namespace1 = helpers.RandString(8) + namespace2 = helpers.RandString(8) + namespace3 = helpers.RandString(8) + namespace4 = helpers.RandString(8) + namespace5 = helpers.RandString(8) + namespace6 = helpers.RandString(8) - /* - MockResource - */ + kube = helpers.MustKubeClient() + kubeCache, err = cache.NewKubeCoreCache(context.TODO(), kube) + Expect(err).NotTo(HaveOccurred()) + resourceNamespaceLister = namespace.NewKubeClientCacheResourceNamespaceLister(kube, kubeCache) - assertSnapshotMocks := func(expectMocks MockResourceList, unexpectMocks MockResourceList) { - drain: - for { - select { - case snap = <-snapshots: - for _, expected := range expectMocks { - if _, err := snap.Mocks.Find(expected.GetMetadata().Ref().Strings()); err != nil { - continue drain - } - } - for _, unexpected := range unexpectMocks { - if _, err := snap.Mocks.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { - continue drain - } - } - break drain - case err := <-errs: - Expect(err).NotTo(HaveOccurred()) - case <-time.After(time.Second * 10): - nsList1, _ := mockResourceClient.List(namespace1, clients.ListOpts{}) - nsList2, _ := mockResourceClient.List(namespace2, clients.ListOpts{}) - combined := append(nsList1, nsList2...) - Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) - } - } - } - mockResource1a, err := mockResourceClient.Write(NewMockResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + createNamespaces(ctx, kube, namespace1, namespace2) + + cfg, err = kubeutils.GetConfig("", "") Expect(err).NotTo(HaveOccurred()) - mockResource1b, err := mockResourceClient.Write(NewMockResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + + clientset, err = apiext.NewForConfig(cfg) Expect(err).NotTo(HaveOccurred()) + // MockResource Constructor + mockResourceClientFactory := &factory.KubeResourceClientFactory{ + Crd: MockResourceCrd, + Cfg: cfg, + SharedCache: kuberc.NewKubeCache(context.TODO()), + } - assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b}, nil) - mockResource2a, err := mockResourceClient.Write(NewMockResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + err = helpers.AddAndRegisterCrd(ctx, MockResourceCrd, clientset) Expect(err).NotTo(HaveOccurred()) - mockResource2b, err := mockResourceClient.Write(NewMockResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + + mockResourceClient, err = NewMockResourceClient(ctx, mockResourceClientFactory) Expect(err).NotTo(HaveOccurred()) + // FrequentlyChangingAnnotationsResource Constructor + frequentlyChangingAnnotationsResourceClientFactory := &factory.MemoryResourceClientFactory{ + Cache: memory.NewInMemoryResourceCache(), + } - assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}, nil) + frequentlyChangingAnnotationsResourceClient, err = NewFrequentlyChangingAnnotationsResourceClient(ctx, frequentlyChangingAnnotationsResourceClientFactory) + Expect(err).NotTo(HaveOccurred()) + // FakeResource Constructor + fakeResourceClientFactory := &factory.MemoryResourceClientFactory{ + Cache: memory.NewInMemoryResourceCache(), + } - err = mockResourceClient.Delete(mockResource2a.GetMetadata().Namespace, mockResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + fakeResourceClient, err = testing_solo_io.NewFakeResourceClient(ctx, fakeResourceClientFactory) Expect(err).NotTo(HaveOccurred()) - err = mockResourceClient.Delete(mockResource2b.GetMetadata().Namespace, mockResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + emitter = NewTestingEmitter(mockResourceClient, frequentlyChangingAnnotationsResourceClient, fakeResourceClient, resourceNamespaceLister) + }) + AfterEach(func() { + err := os.Unsetenv(statusutils.PodNamespaceEnvName) Expect(err).NotTo(HaveOccurred()) - assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b}, MockResourceList{mockResource2a, mockResource2b}) + kubeutils.DeleteNamespacesInParallelBlocking(ctx, kube, namespace1, namespace2) + }) - err = mockResourceClient.Delete(mockResource1a.GetMetadata().Namespace, mockResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - err = mockResourceClient.Delete(mockResource1b.GetMetadata().Namespace, mockResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) + Context("Tracking watched namespaces", func() { + It("tracks snapshots on changes to any resource", func() { + ctx := context.Background() + err := emitter.Register() + Expect(err).NotTo(HaveOccurred()) + + snapshots, errs, err := emitter.Snapshots([]string{namespace1, namespace2}, clients.WatchOpts{ + Ctx: ctx, + RefreshRate: time.Second, + }) + Expect(err).NotTo(HaveOccurred()) + + var snap *TestingSnapshot + + /* + MockResource + */ + + assertSnapshotMocks := func(expectMocks MockResourceList, unexpectMocks MockResourceList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectMocks { + if _, err := snap.Mocks.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectMocks { + if _, err := snap.Mocks.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := mockResourceClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := mockResourceClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } + mockResource1a, err := mockResourceClient.Write(NewMockResource(namespace1, name5), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResource1b, err := mockResourceClient.Write(NewMockResource(namespace2, name5), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b}, nil) + mockResource2a, err := mockResourceClient.Write(NewMockResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResource2b, err := mockResourceClient.Write(NewMockResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}, nil) + + err = mockResourceClient.Delete(mockResource2a.GetMetadata().Namespace, mockResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = mockResourceClient.Delete(mockResource2b.GetMetadata().Namespace, mockResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b}, MockResourceList{mockResource2a, mockResource2b}) + + err = mockResourceClient.Delete(mockResource1a.GetMetadata().Namespace, mockResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = mockResourceClient.Delete(mockResource1b.GetMetadata().Namespace, mockResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotMocks(nil, MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}) + + /* + FrequentlyChangingAnnotationsResource + */ + + assertSnapshotFcars := func(expectFcars FrequentlyChangingAnnotationsResourceList, unexpectFcars FrequentlyChangingAnnotationsResourceList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectFcars { + if _, err := snap.Fcars.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectFcars { + if _, err := snap.Fcars.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := frequentlyChangingAnnotationsResourceClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := frequentlyChangingAnnotationsResourceClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } + frequentlyChangingAnnotationsResource1a, err := frequentlyChangingAnnotationsResourceClient.Write(NewFrequentlyChangingAnnotationsResource(namespace1, name5), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + frequentlyChangingAnnotationsResource1b, err := frequentlyChangingAnnotationsResourceClient.Write(NewFrequentlyChangingAnnotationsResource(namespace2, name5), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotFcars(FrequentlyChangingAnnotationsResourceList{frequentlyChangingAnnotationsResource1a, frequentlyChangingAnnotationsResource1b}, nil) + frequentlyChangingAnnotationsResource2a, err := frequentlyChangingAnnotationsResourceClient.Write(NewFrequentlyChangingAnnotationsResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + frequentlyChangingAnnotationsResource2b, err := frequentlyChangingAnnotationsResourceClient.Write(NewFrequentlyChangingAnnotationsResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotFcars(FrequentlyChangingAnnotationsResourceList{frequentlyChangingAnnotationsResource1a, frequentlyChangingAnnotationsResource1b, frequentlyChangingAnnotationsResource2a, frequentlyChangingAnnotationsResource2b}, nil) + + err = frequentlyChangingAnnotationsResourceClient.Delete(frequentlyChangingAnnotationsResource2a.GetMetadata().Namespace, frequentlyChangingAnnotationsResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = frequentlyChangingAnnotationsResourceClient.Delete(frequentlyChangingAnnotationsResource2b.GetMetadata().Namespace, frequentlyChangingAnnotationsResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotFcars(FrequentlyChangingAnnotationsResourceList{frequentlyChangingAnnotationsResource1a, frequentlyChangingAnnotationsResource1b}, FrequentlyChangingAnnotationsResourceList{frequentlyChangingAnnotationsResource2a, frequentlyChangingAnnotationsResource2b}) + + err = frequentlyChangingAnnotationsResourceClient.Delete(frequentlyChangingAnnotationsResource1a.GetMetadata().Namespace, frequentlyChangingAnnotationsResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = frequentlyChangingAnnotationsResourceClient.Delete(frequentlyChangingAnnotationsResource1b.GetMetadata().Namespace, frequentlyChangingAnnotationsResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotFcars(nil, FrequentlyChangingAnnotationsResourceList{frequentlyChangingAnnotationsResource1a, frequentlyChangingAnnotationsResource1b, frequentlyChangingAnnotationsResource2a, frequentlyChangingAnnotationsResource2b}) + + /* + FakeResource + */ + + assertSnapshotFakes := func(expectFakes testing_solo_io.FakeResourceList, unexpectFakes testing_solo_io.FakeResourceList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectFakes { + if _, err := snap.Fakes.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectFakes { + if _, err := snap.Fakes.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := fakeResourceClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := fakeResourceClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } + fakeResource1a, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace1, name5), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + fakeResource1b, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace2, name5), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) - assertSnapshotMocks(nil, MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}) + assertSnapshotFakes(testing_solo_io.FakeResourceList{fakeResource1a, fakeResource1b}, nil) + fakeResource2a, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + fakeResource2b, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) - /* - FrequentlyChangingAnnotationsResource - */ + assertSnapshotFakes(testing_solo_io.FakeResourceList{fakeResource1a, fakeResource1b, fakeResource2a, fakeResource2b}, nil) - assertSnapshotFcars := func(expectFcars FrequentlyChangingAnnotationsResourceList, unexpectFcars FrequentlyChangingAnnotationsResourceList) { - drain: - for { - select { - case snap = <-snapshots: - for _, expected := range expectFcars { - if _, err := snap.Fcars.Find(expected.GetMetadata().Ref().Strings()); err != nil { - continue drain + err = fakeResourceClient.Delete(fakeResource2a.GetMetadata().Namespace, fakeResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = fakeResourceClient.Delete(fakeResource2b.GetMetadata().Namespace, fakeResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotFakes(testing_solo_io.FakeResourceList{fakeResource1a, fakeResource1b}, testing_solo_io.FakeResourceList{fakeResource2a, fakeResource2b}) + + err = fakeResourceClient.Delete(fakeResource1a.GetMetadata().Namespace, fakeResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = fakeResourceClient.Delete(fakeResource1b.GetMetadata().Namespace, fakeResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + assertSnapshotFakes(nil, testing_solo_io.FakeResourceList{fakeResource1a, fakeResource1b, fakeResource2a, fakeResource2b}) + }) + + It("should be able to track all resources that are on labeled namespaces", func() { + runNamespacedSelectorsWithWatchNamespaces() + }) + }) + + Context("Tracking empty watched namespaces", func() { + It("tracks snapshots on changes to any resource using AllNamespace", func() { + ctx := context.Background() + err := emitter.Register() + Expect(err).NotTo(HaveOccurred()) + + snapshots, errs, err := emitter.Snapshots([]string{""}, clients.WatchOpts{ + Ctx: ctx, + RefreshRate: time.Second, + }) + Expect(err).NotTo(HaveOccurred()) + + var snap *TestingSnapshot + + /* + MockResource + */ + + assertSnapshotMocks := func(expectMocks MockResourceList, unexpectMocks MockResourceList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectMocks { + if _, err := snap.Mocks.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectMocks { + if _, err := snap.Mocks.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := mockResourceClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := mockResourceClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) } - for _, unexpected := range unexpectFcars { - if _, err := snap.Fcars.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { - continue drain + } + } + + mockResource1a, err := mockResourceClient.Write(NewMockResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResource1b, err := mockResourceClient.Write(NewMockResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b}, nil) + + mockResource2a, err := mockResourceClient.Write(NewMockResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResource2b, err := mockResourceClient.Write(NewMockResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}, nil) + + err = mockResourceClient.Delete(mockResource2a.GetMetadata().Namespace, mockResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = mockResourceClient.Delete(mockResource2b.GetMetadata().Namespace, mockResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotMocks(MockResourceList{mockResource1a, mockResource1b}, MockResourceList{mockResource2a, mockResource2b}) + + err = mockResourceClient.Delete(mockResource1a.GetMetadata().Namespace, mockResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = mockResourceClient.Delete(mockResource1b.GetMetadata().Namespace, mockResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotMocks(nil, MockResourceList{mockResource1a, mockResource1b, mockResource2a, mockResource2b}) + + /* + FrequentlyChangingAnnotationsResource + */ + + assertSnapshotFcars := func(expectFcars FrequentlyChangingAnnotationsResourceList, unexpectFcars FrequentlyChangingAnnotationsResourceList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectFcars { + if _, err := snap.Fcars.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } } + for _, unexpected := range unexpectFcars { + if _, err := snap.Fcars.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := frequentlyChangingAnnotationsResourceClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := frequentlyChangingAnnotationsResourceClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) } - break drain - case err := <-errs: - Expect(err).NotTo(HaveOccurred()) - case <-time.After(time.Second * 10): - nsList1, _ := frequentlyChangingAnnotationsResourceClient.List(namespace1, clients.ListOpts{}) - nsList2, _ := frequentlyChangingAnnotationsResourceClient.List(namespace2, clients.ListOpts{}) - combined := append(nsList1, nsList2...) - Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) } } - } - frequentlyChangingAnnotationsResource1a, err := frequentlyChangingAnnotationsResourceClient.Write(NewFrequentlyChangingAnnotationsResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - frequentlyChangingAnnotationsResource1b, err := frequentlyChangingAnnotationsResourceClient.Write(NewFrequentlyChangingAnnotationsResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - assertSnapshotFcars(FrequentlyChangingAnnotationsResourceList{frequentlyChangingAnnotationsResource1a, frequentlyChangingAnnotationsResource1b}, nil) - frequentlyChangingAnnotationsResource2a, err := frequentlyChangingAnnotationsResourceClient.Write(NewFrequentlyChangingAnnotationsResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - frequentlyChangingAnnotationsResource2b, err := frequentlyChangingAnnotationsResourceClient.Write(NewFrequentlyChangingAnnotationsResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) + frequentlyChangingAnnotationsResource1a, err := frequentlyChangingAnnotationsResourceClient.Write(NewFrequentlyChangingAnnotationsResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + frequentlyChangingAnnotationsResource1b, err := frequentlyChangingAnnotationsResourceClient.Write(NewFrequentlyChangingAnnotationsResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotFcars(FrequentlyChangingAnnotationsResourceList{frequentlyChangingAnnotationsResource1a, frequentlyChangingAnnotationsResource1b}, nil) + + frequentlyChangingAnnotationsResource2a, err := frequentlyChangingAnnotationsResourceClient.Write(NewFrequentlyChangingAnnotationsResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + frequentlyChangingAnnotationsResource2b, err := frequentlyChangingAnnotationsResourceClient.Write(NewFrequentlyChangingAnnotationsResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotFcars(FrequentlyChangingAnnotationsResourceList{frequentlyChangingAnnotationsResource1a, frequentlyChangingAnnotationsResource1b, frequentlyChangingAnnotationsResource2a, frequentlyChangingAnnotationsResource2b}, nil) + + err = frequentlyChangingAnnotationsResourceClient.Delete(frequentlyChangingAnnotationsResource2a.GetMetadata().Namespace, frequentlyChangingAnnotationsResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = frequentlyChangingAnnotationsResourceClient.Delete(frequentlyChangingAnnotationsResource2b.GetMetadata().Namespace, frequentlyChangingAnnotationsResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotFcars(FrequentlyChangingAnnotationsResourceList{frequentlyChangingAnnotationsResource1a, frequentlyChangingAnnotationsResource1b}, FrequentlyChangingAnnotationsResourceList{frequentlyChangingAnnotationsResource2a, frequentlyChangingAnnotationsResource2b}) + + err = frequentlyChangingAnnotationsResourceClient.Delete(frequentlyChangingAnnotationsResource1a.GetMetadata().Namespace, frequentlyChangingAnnotationsResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = frequentlyChangingAnnotationsResourceClient.Delete(frequentlyChangingAnnotationsResource1b.GetMetadata().Namespace, frequentlyChangingAnnotationsResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotFcars(nil, FrequentlyChangingAnnotationsResourceList{frequentlyChangingAnnotationsResource1a, frequentlyChangingAnnotationsResource1b, frequentlyChangingAnnotationsResource2a, frequentlyChangingAnnotationsResource2b}) + + /* + FakeResource + */ + + assertSnapshotFakes := func(expectFakes testing_solo_io.FakeResourceList, unexpectFakes testing_solo_io.FakeResourceList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectFakes { + if _, err := snap.Fakes.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectFakes { + if _, err := snap.Fakes.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := fakeResourceClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := fakeResourceClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } - assertSnapshotFcars(FrequentlyChangingAnnotationsResourceList{frequentlyChangingAnnotationsResource1a, frequentlyChangingAnnotationsResource1b, frequentlyChangingAnnotationsResource2a, frequentlyChangingAnnotationsResource2b}, nil) + fakeResource1a, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + fakeResource1b, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotFakes(testing_solo_io.FakeResourceList{fakeResource1a, fakeResource1b}, nil) + + fakeResource2a, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + fakeResource2b, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotFakes(testing_solo_io.FakeResourceList{fakeResource1a, fakeResource1b, fakeResource2a, fakeResource2b}, nil) + + err = fakeResourceClient.Delete(fakeResource2a.GetMetadata().Namespace, fakeResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = fakeResourceClient.Delete(fakeResource2b.GetMetadata().Namespace, fakeResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotFakes(testing_solo_io.FakeResourceList{fakeResource1a, fakeResource1b}, testing_solo_io.FakeResourceList{fakeResource2a, fakeResource2b}) + + err = fakeResourceClient.Delete(fakeResource1a.GetMetadata().Namespace, fakeResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = fakeResourceClient.Delete(fakeResource1b.GetMetadata().Namespace, fakeResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotFakes(nil, testing_solo_io.FakeResourceList{fakeResource1a, fakeResource1b, fakeResource2a, fakeResource2b}) + }) - err = frequentlyChangingAnnotationsResourceClient.Delete(frequentlyChangingAnnotationsResource2a.GetMetadata().Namespace, frequentlyChangingAnnotationsResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - err = frequentlyChangingAnnotationsResourceClient.Delete(frequentlyChangingAnnotationsResource2b.GetMetadata().Namespace, frequentlyChangingAnnotationsResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) + It("should be able to track resources only made with the matching labels", func() { + ctx := context.Background() + err := emitter.Register() + Expect(err).NotTo(HaveOccurred()) + + snapshots, errs, err := emitter.Snapshots([]string{""}, clients.WatchOpts{ + Ctx: ctx, + RefreshRate: time.Second, + ExpressionSelector: labelExpression1, + }) + Expect(err).NotTo(HaveOccurred()) + + var snap *TestingSnapshot + var previous *TestingSnapshot + + /* + MockResource + */ + + assertSnapshotMocks := func(expectMocks MockResourceList, unexpectMocks MockResourceList) { + drain: + for { + select { + case snap = <-snapshots: + previous = snap + for _, expected := range expectMocks { + if _, err := snap.Mocks.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectMocks { + if _, err := snap.Mocks.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + + var expectedResources map[string][]string + var unexpectedResource map[string][]string + + if previous != nil { + expectedResources = findNonMatchingResources(convertMocksToMetadataGetter(expectMocks), convertMocksToMetadataGetter(previous.Mocks)) + unexpectedResource = findMatchingResources(convertMocksToMetadataGetter(unexpectMocks), convertMocksToMetadataGetter(previous.Mocks)) + } else { + expectedResources = getMapOfResources(convertMocksToMetadataGetter(expectMocks)) + unexpectedResource = getMapOfResources(convertMocksToMetadataGetter(unexpectMocks)) + } + getList := func(ns string) ([]metadataGetter, error) { + l, err := mockResourceClient.List(ns, clients.ListOpts{}) + return convertMocksToMetadataGetter(l), err + } + namespaceResources := getMapOfNamespaceResources(getList) + Fail(fmt.Sprintf("expected final snapshot before 10 seconds. expected \nExpected:\n%#v\n\nUnexpected:\n%#v\n\nnamespaces:\n%#v", expectedResources, unexpectedResource, namespaceResources)) + } + } + } - assertSnapshotFcars(FrequentlyChangingAnnotationsResourceList{frequentlyChangingAnnotationsResource1a, frequentlyChangingAnnotationsResource1b}, FrequentlyChangingAnnotationsResourceList{frequentlyChangingAnnotationsResource2a, frequentlyChangingAnnotationsResource2b}) + mockResource1a, err := mockResourceClient.Write(NewMockResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResource1b, err := mockResourceClient.Write(NewMockResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResourceNotWatched := MockResourceList{mockResource1a, mockResource1b} + + createNamespaceWithLabel(ctx, kube, namespace3, labels1) + createNamespaceWithLabel(ctx, kube, namespace4, labels1) + + mockResource2a, err := mockResourceClient.Write(NewMockResource(namespace3, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResource2b, err := mockResourceClient.Write(NewMockResource(namespace4, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResourceWatched := MockResourceList{mockResource2a, mockResource2b} + assertSnapshotMocks(mockResourceWatched, mockResourceNotWatched) + + createNamespaces(ctx, kube, namespace5) + createNamespaceWithLabel(ctx, kube, namespace6, labels1) + + mockResource5a, err := mockResourceClient.Write(NewMockResource(namespace5, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResource5b, err := mockResourceClient.Write(NewMockResource(namespace6, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResourceNotWatched = append(mockResourceNotWatched, mockResource5a) + mockResourceWatched = append(mockResourceWatched, mockResource5b) + assertSnapshotMocks(mockResourceWatched, mockResourceNotWatched) + + mockResource7a, err := mockResourceClient.Write(NewMockResource(namespace5, name4), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResource7b, err := mockResourceClient.Write(NewMockResource(namespace6, name4), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResourceNotWatched = append(mockResourceNotWatched, mockResource7a) + mockResourceWatched = append(mockResourceWatched, mockResource7b) + assertSnapshotMocks(mockResourceWatched, mockResourceNotWatched) + + for _, r := range mockResourceNotWatched { + err = mockResourceClient.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + } - err = frequentlyChangingAnnotationsResourceClient.Delete(frequentlyChangingAnnotationsResource1a.GetMetadata().Namespace, frequentlyChangingAnnotationsResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - err = frequentlyChangingAnnotationsResourceClient.Delete(frequentlyChangingAnnotationsResource1b.GetMetadata().Namespace, frequentlyChangingAnnotationsResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) + for _, r := range mockResourceWatched { + err = mockResourceClient.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResourceNotWatched = append(mockResourceNotWatched, r) + } + assertSnapshotMocks(nil, mockResourceNotWatched) + + // clean up environment + deleteNamespaces(ctx, kube, namespace3, namespace4, namespace5, namespace6) + getNewNamespaces() + + /* + FrequentlyChangingAnnotationsResource + */ + + assertSnapshotFcars := func(expectFcars FrequentlyChangingAnnotationsResourceList, unexpectFcars FrequentlyChangingAnnotationsResourceList) { + drain: + for { + select { + case snap = <-snapshots: + previous = snap + for _, expected := range expectFcars { + if _, err := snap.Fcars.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectFcars { + if _, err := snap.Fcars.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + + var expectedResources map[string][]string + var unexpectedResource map[string][]string + + if previous != nil { + expectedResources = findNonMatchingResources(convertFcarsToMetadataGetter(expectFcars), convertFcarsToMetadataGetter(previous.Fcars)) + unexpectedResource = findMatchingResources(convertFcarsToMetadataGetter(unexpectFcars), convertFcarsToMetadataGetter(previous.Fcars)) + } else { + expectedResources = getMapOfResources(convertFcarsToMetadataGetter(expectFcars)) + unexpectedResource = getMapOfResources(convertFcarsToMetadataGetter(unexpectFcars)) + } + getList := func(ns string) ([]metadataGetter, error) { + l, err := frequentlyChangingAnnotationsResourceClient.List(ns, clients.ListOpts{}) + return convertFcarsToMetadataGetter(l), err + } + namespaceResources := getMapOfNamespaceResources(getList) + Fail(fmt.Sprintf("expected final snapshot before 10 seconds. expected \nExpected:\n%#v\n\nUnexpected:\n%#v\n\nnamespaces:\n%#v", expectedResources, unexpectedResource, namespaceResources)) + } + } + } - assertSnapshotFcars(nil, FrequentlyChangingAnnotationsResourceList{frequentlyChangingAnnotationsResource1a, frequentlyChangingAnnotationsResource1b, frequentlyChangingAnnotationsResource2a, frequentlyChangingAnnotationsResource2b}) + frequentlyChangingAnnotationsResource1a, err := frequentlyChangingAnnotationsResourceClient.Write(NewFrequentlyChangingAnnotationsResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + frequentlyChangingAnnotationsResource1b, err := frequentlyChangingAnnotationsResourceClient.Write(NewFrequentlyChangingAnnotationsResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + frequentlyChangingAnnotationsResourceNotWatched := FrequentlyChangingAnnotationsResourceList{frequentlyChangingAnnotationsResource1a, frequentlyChangingAnnotationsResource1b} + + createNamespaceWithLabel(ctx, kube, namespace3, labels1) + createNamespaceWithLabel(ctx, kube, namespace4, labels1) + + frequentlyChangingAnnotationsResource2a, err := frequentlyChangingAnnotationsResourceClient.Write(NewFrequentlyChangingAnnotationsResource(namespace3, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + frequentlyChangingAnnotationsResource2b, err := frequentlyChangingAnnotationsResourceClient.Write(NewFrequentlyChangingAnnotationsResource(namespace4, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + frequentlyChangingAnnotationsResourceWatched := FrequentlyChangingAnnotationsResourceList{frequentlyChangingAnnotationsResource2a, frequentlyChangingAnnotationsResource2b} + assertSnapshotFcars(frequentlyChangingAnnotationsResourceWatched, frequentlyChangingAnnotationsResourceNotWatched) + + createNamespaces(ctx, kube, namespace5) + createNamespaceWithLabel(ctx, kube, namespace6, labels1) + + frequentlyChangingAnnotationsResource5a, err := frequentlyChangingAnnotationsResourceClient.Write(NewFrequentlyChangingAnnotationsResource(namespace5, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + frequentlyChangingAnnotationsResource5b, err := frequentlyChangingAnnotationsResourceClient.Write(NewFrequentlyChangingAnnotationsResource(namespace6, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + frequentlyChangingAnnotationsResourceNotWatched = append(frequentlyChangingAnnotationsResourceNotWatched, frequentlyChangingAnnotationsResource5a) + frequentlyChangingAnnotationsResourceWatched = append(frequentlyChangingAnnotationsResourceWatched, frequentlyChangingAnnotationsResource5b) + assertSnapshotFcars(frequentlyChangingAnnotationsResourceWatched, frequentlyChangingAnnotationsResourceNotWatched) + + frequentlyChangingAnnotationsResource7a, err := frequentlyChangingAnnotationsResourceClient.Write(NewFrequentlyChangingAnnotationsResource(namespace5, name4), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + frequentlyChangingAnnotationsResource7b, err := frequentlyChangingAnnotationsResourceClient.Write(NewFrequentlyChangingAnnotationsResource(namespace6, name4), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + frequentlyChangingAnnotationsResourceNotWatched = append(frequentlyChangingAnnotationsResourceNotWatched, frequentlyChangingAnnotationsResource7a) + frequentlyChangingAnnotationsResourceWatched = append(frequentlyChangingAnnotationsResourceWatched, frequentlyChangingAnnotationsResource7b) + assertSnapshotFcars(frequentlyChangingAnnotationsResourceWatched, frequentlyChangingAnnotationsResourceNotWatched) + + for _, r := range frequentlyChangingAnnotationsResourceNotWatched { + err = frequentlyChangingAnnotationsResourceClient.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + } - /* - FakeResource - */ + for _, r := range frequentlyChangingAnnotationsResourceWatched { + err = frequentlyChangingAnnotationsResourceClient.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + frequentlyChangingAnnotationsResourceNotWatched = append(frequentlyChangingAnnotationsResourceNotWatched, r) + } + assertSnapshotFcars(nil, frequentlyChangingAnnotationsResourceNotWatched) + + // clean up environment + deleteNamespaces(ctx, kube, namespace3, namespace4, namespace5, namespace6) + getNewNamespaces() + + /* + FakeResource + */ + + assertSnapshotFakes := func(expectFakes testing_solo_io.FakeResourceList, unexpectFakes testing_solo_io.FakeResourceList) { + drain: + for { + select { + case snap = <-snapshots: + previous = snap + for _, expected := range expectFakes { + if _, err := snap.Fakes.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectFakes { + if _, err := snap.Fakes.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + + var expectedResources map[string][]string + var unexpectedResource map[string][]string + + if previous != nil { + expectedResources = findNonMatchingResources(convertFakesToMetadataGetter(expectFakes), convertFakesToMetadataGetter(previous.Fakes)) + unexpectedResource = findMatchingResources(convertFakesToMetadataGetter(unexpectFakes), convertFakesToMetadataGetter(previous.Fakes)) + } else { + expectedResources = getMapOfResources(convertFakesToMetadataGetter(expectFakes)) + unexpectedResource = getMapOfResources(convertFakesToMetadataGetter(unexpectFakes)) + } + getList := func(ns string) ([]metadataGetter, error) { + l, err := fakeResourceClient.List(ns, clients.ListOpts{}) + return convertFakesToMetadataGetter(l), err + } + namespaceResources := getMapOfNamespaceResources(getList) + Fail(fmt.Sprintf("expected final snapshot before 10 seconds. expected \nExpected:\n%#v\n\nUnexpected:\n%#v\n\nnamespaces:\n%#v", expectedResources, unexpectedResource, namespaceResources)) + } + } + } - assertSnapshotFakes := func(expectFakes testing_solo_io.FakeResourceList, unexpectFakes testing_solo_io.FakeResourceList) { - drain: - for { - select { - case snap = <-snapshots: - for _, expected := range expectFakes { - if _, err := snap.Fakes.Find(expected.GetMetadata().Ref().Strings()); err != nil { - continue drain + fakeResource1a, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + fakeResource1b, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + fakeResourceNotWatched := testing_solo_io.FakeResourceList{fakeResource1a, fakeResource1b} + + createNamespaceWithLabel(ctx, kube, namespace3, labels1) + createNamespaceWithLabel(ctx, kube, namespace4, labels1) + + fakeResource2a, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace3, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + fakeResource2b, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace4, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + fakeResourceWatched := testing_solo_io.FakeResourceList{fakeResource2a, fakeResource2b} + assertSnapshotFakes(fakeResourceWatched, fakeResourceNotWatched) + + createNamespaces(ctx, kube, namespace5) + createNamespaceWithLabel(ctx, kube, namespace6, labels1) + + fakeResource5a, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace5, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + fakeResource5b, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace6, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + fakeResourceNotWatched = append(fakeResourceNotWatched, fakeResource5a) + fakeResourceWatched = append(fakeResourceWatched, fakeResource5b) + assertSnapshotFakes(fakeResourceWatched, fakeResourceNotWatched) + + fakeResource7a, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace5, name4), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + fakeResource7b, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace6, name4), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + fakeResourceNotWatched = append(fakeResourceNotWatched, fakeResource7a) + fakeResourceWatched = append(fakeResourceWatched, fakeResource7b) + assertSnapshotFakes(fakeResourceWatched, fakeResourceNotWatched) + + for _, r := range fakeResourceNotWatched { + err = fakeResourceClient.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + } + + for _, r := range fakeResourceWatched { + err = fakeResourceClient.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + fakeResourceNotWatched = append(fakeResourceNotWatched, r) + } + assertSnapshotFakes(nil, fakeResourceNotWatched) + + // clean up environment + deleteNamespaces(ctx, kube, namespace3, namespace4, namespace5, namespace6) + getNewNamespaces() + }) + }) + + Context("Tracking resources on namespaces that are deleted", func() { + It("Should not contain resources from a deleted namespace", func() { + ctx := context.Background() + err := emitter.Register() + Expect(err).NotTo(HaveOccurred()) + + snapshots, errs, err := emitter.Snapshots([]string{""}, clients.WatchOpts{ + Ctx: ctx, + RefreshRate: time.Second, + }) + Expect(err).NotTo(HaveOccurred()) + + var snap *TestingSnapshot + + /* + MockResource + */ + assertSnapshotMocks := func(expectMocks MockResourceList, unexpectMocks MockResourceList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectMocks { + if _, err := snap.Mocks.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectMocks { + if _, err := snap.Mocks.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := mockResourceClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := mockResourceClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) } - for _, unexpected := range unexpectFakes { - if _, err := snap.Fakes.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { - continue drain + } + } + + mockResource1a, err := mockResourceClient.Write(NewMockResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResource1b, err := mockResourceClient.Write(NewMockResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResourceWatched := MockResourceList{mockResource1a, mockResource1b} + assertSnapshotMocks(mockResourceWatched, nil) + err = mockResourceClient.Delete(mockResource1a.GetMetadata().Namespace, mockResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = mockResourceClient.Delete(mockResource1b.GetMetadata().Namespace, mockResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + mockResourceNotWatched := MockResourceList{mockResource1a, mockResource1b} + assertSnapshotMocks(nil, mockResourceNotWatched) + + deleteNamespaces(ctx, kube, namespace1, namespace2) + + getNewNamespaces1and2() + createNamespaces(ctx, kube, namespace1, namespace2) + + /* + FrequentlyChangingAnnotationsResource + */ + assertSnapshotFcars := func(expectFcars FrequentlyChangingAnnotationsResourceList, unexpectFcars FrequentlyChangingAnnotationsResourceList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectFcars { + if _, err := snap.Fcars.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } } + for _, unexpected := range unexpectFcars { + if _, err := snap.Fcars.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := frequentlyChangingAnnotationsResourceClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := frequentlyChangingAnnotationsResourceClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) } - break drain - case err := <-errs: - Expect(err).NotTo(HaveOccurred()) - case <-time.After(time.Second * 10): - nsList1, _ := fakeResourceClient.List(namespace1, clients.ListOpts{}) - nsList2, _ := fakeResourceClient.List(namespace2, clients.ListOpts{}) - combined := append(nsList1, nsList2...) - Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) } } - } - fakeResource1a, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - fakeResource1b, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace2, name1), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - assertSnapshotFakes(testing_solo_io.FakeResourceList{fakeResource1a, fakeResource1b}, nil) - fakeResource2a, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace1, name2), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - fakeResource2b, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) + frequentlyChangingAnnotationsResource1a, err := frequentlyChangingAnnotationsResourceClient.Write(NewFrequentlyChangingAnnotationsResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + frequentlyChangingAnnotationsResource1b, err := frequentlyChangingAnnotationsResourceClient.Write(NewFrequentlyChangingAnnotationsResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + frequentlyChangingAnnotationsResourceWatched := FrequentlyChangingAnnotationsResourceList{frequentlyChangingAnnotationsResource1a, frequentlyChangingAnnotationsResource1b} + assertSnapshotFcars(frequentlyChangingAnnotationsResourceWatched, nil) + err = frequentlyChangingAnnotationsResourceClient.Delete(frequentlyChangingAnnotationsResource1a.GetMetadata().Namespace, frequentlyChangingAnnotationsResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = frequentlyChangingAnnotationsResourceClient.Delete(frequentlyChangingAnnotationsResource1b.GetMetadata().Namespace, frequentlyChangingAnnotationsResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + + frequentlyChangingAnnotationsResourceNotWatched := FrequentlyChangingAnnotationsResourceList{frequentlyChangingAnnotationsResource1a, frequentlyChangingAnnotationsResource1b} + assertSnapshotFcars(nil, frequentlyChangingAnnotationsResourceNotWatched) + + deleteNamespaces(ctx, kube, namespace1, namespace2) + + getNewNamespaces1and2() + createNamespaces(ctx, kube, namespace1, namespace2) + + /* + FakeResource + */ + assertSnapshotFakes := func(expectFakes testing_solo_io.FakeResourceList, unexpectFakes testing_solo_io.FakeResourceList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectFakes { + if _, err := snap.Fakes.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectFakes { + if _, err := snap.Fakes.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := fakeResourceClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := fakeResourceClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } - assertSnapshotFakes(testing_solo_io.FakeResourceList{fakeResource1a, fakeResource1b, fakeResource2a, fakeResource2b}, nil) + fakeResource1a, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace1, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + fakeResource1b, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace2, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + fakeResourceWatched := testing_solo_io.FakeResourceList{fakeResource1a, fakeResource1b} + assertSnapshotFakes(fakeResourceWatched, nil) + err = fakeResourceClient.Delete(fakeResource1a.GetMetadata().Namespace, fakeResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + err = fakeResourceClient.Delete(fakeResource1b.GetMetadata().Namespace, fakeResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) - err = fakeResourceClient.Delete(fakeResource2a.GetMetadata().Namespace, fakeResource2a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - err = fakeResourceClient.Delete(fakeResource2b.GetMetadata().Namespace, fakeResource2b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) + fakeResourceNotWatched := testing_solo_io.FakeResourceList{fakeResource1a, fakeResource1b} + assertSnapshotFakes(nil, fakeResourceNotWatched) - assertSnapshotFakes(testing_solo_io.FakeResourceList{fakeResource1a, fakeResource1b}, testing_solo_io.FakeResourceList{fakeResource2a, fakeResource2b}) + deleteNamespaces(ctx, kube, namespace1, namespace2) - err = fakeResourceClient.Delete(fakeResource1a.GetMetadata().Namespace, fakeResource1a.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) - err = fakeResourceClient.Delete(fakeResource1b.GetMetadata().Namespace, fakeResource1b.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) - Expect(err).NotTo(HaveOccurred()) + getNewNamespaces1and2() + createNamespaces(ctx, kube, namespace1, namespace2) + }) + + It("Should not contain resources from a deleted namespace, that is filtered", func() { + ctx := context.Background() + err := emitter.Register() + Expect(err).NotTo(HaveOccurred()) + + snapshots, errs, err := emitter.Snapshots([]string{""}, clients.WatchOpts{ + Ctx: ctx, + RefreshRate: time.Second, + ExpressionSelector: labelExpression1, + }) + Expect(err).NotTo(HaveOccurred()) + + var snap *TestingSnapshot + + /* + MockResource + */ + + assertSnapshotMocks := func(expectMocks MockResourceList, unexpectMocks MockResourceList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectMocks { + if _, err := snap.Mocks.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectMocks { + if _, err := snap.Mocks.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := mockResourceClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := mockResourceClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } + + // create namespaces + createNamespaceWithLabel(ctx, kube, namespace3, labels1) + createNamespaceWithLabel(ctx, kube, namespace4, labels1) + + mockResource2a, err := mockResourceClient.Write(NewMockResource(namespace3, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResource2b, err := mockResourceClient.Write(NewMockResource(namespace4, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResourceNotWatched := MockResourceList{} + mockResourceWatched := MockResourceList{mockResource2a, mockResource2b} + assertSnapshotMocks(mockResourceWatched, mockResourceNotWatched) + + deleteNamespaces(ctx, kube, namespace3) - assertSnapshotFakes(nil, testing_solo_io.FakeResourceList{fakeResource1a, fakeResource1b, fakeResource2a, fakeResource2b}) + mockResourceWatched = MockResourceList{mockResource2b} + mockResourceNotWatched = append(mockResourceNotWatched, mockResource2a) + assertSnapshotMocks(mockResourceWatched, mockResourceNotWatched) + + for _, r := range mockResourceWatched { + err = mockResourceClient.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + mockResourceNotWatched = append(mockResourceNotWatched, r) + } + assertSnapshotMocks(nil, mockResourceNotWatched) + + deleteNamespaces(ctx, kube, namespace4) + getNewNamespaces() + + /* + FrequentlyChangingAnnotationsResource + */ + + assertSnapshotFcars := func(expectFcars FrequentlyChangingAnnotationsResourceList, unexpectFcars FrequentlyChangingAnnotationsResourceList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectFcars { + if _, err := snap.Fcars.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectFcars { + if _, err := snap.Fcars.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := frequentlyChangingAnnotationsResourceClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := frequentlyChangingAnnotationsResourceClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } + + // create namespaces + createNamespaceWithLabel(ctx, kube, namespace3, labels1) + createNamespaceWithLabel(ctx, kube, namespace4, labels1) + + frequentlyChangingAnnotationsResource2a, err := frequentlyChangingAnnotationsResourceClient.Write(NewFrequentlyChangingAnnotationsResource(namespace3, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + frequentlyChangingAnnotationsResource2b, err := frequentlyChangingAnnotationsResourceClient.Write(NewFrequentlyChangingAnnotationsResource(namespace4, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + frequentlyChangingAnnotationsResourceNotWatched := FrequentlyChangingAnnotationsResourceList{} + frequentlyChangingAnnotationsResourceWatched := FrequentlyChangingAnnotationsResourceList{frequentlyChangingAnnotationsResource2a, frequentlyChangingAnnotationsResource2b} + assertSnapshotFcars(frequentlyChangingAnnotationsResourceWatched, frequentlyChangingAnnotationsResourceNotWatched) + + deleteNamespaces(ctx, kube, namespace3) + + frequentlyChangingAnnotationsResourceWatched = FrequentlyChangingAnnotationsResourceList{frequentlyChangingAnnotationsResource2b} + frequentlyChangingAnnotationsResourceNotWatched = append(frequentlyChangingAnnotationsResourceNotWatched, frequentlyChangingAnnotationsResource2a) + assertSnapshotFcars(frequentlyChangingAnnotationsResourceWatched, frequentlyChangingAnnotationsResourceNotWatched) + + for _, r := range frequentlyChangingAnnotationsResourceWatched { + err = frequentlyChangingAnnotationsResourceClient.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + frequentlyChangingAnnotationsResourceNotWatched = append(frequentlyChangingAnnotationsResourceNotWatched, r) + } + assertSnapshotFcars(nil, frequentlyChangingAnnotationsResourceNotWatched) + + deleteNamespaces(ctx, kube, namespace4) + getNewNamespaces() + + /* + FakeResource + */ + + assertSnapshotFakes := func(expectFakes testing_solo_io.FakeResourceList, unexpectFakes testing_solo_io.FakeResourceList) { + drain: + for { + select { + case snap = <-snapshots: + for _, expected := range expectFakes { + if _, err := snap.Fakes.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectFakes { + if _, err := snap.Fakes.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + nsList1, _ := fakeResourceClient.List(namespace1, clients.ListOpts{}) + nsList2, _ := fakeResourceClient.List(namespace2, clients.ListOpts{}) + combined := append(nsList1, nsList2...) + Fail("expected final snapshot before 10 seconds. expected " + log.Sprintf("%v", combined)) + } + } + } + + // create namespaces + createNamespaceWithLabel(ctx, kube, namespace3, labels1) + createNamespaceWithLabel(ctx, kube, namespace4, labels1) + + fakeResource2a, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace3, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + fakeResource2b, err := fakeResourceClient.Write(testing_solo_io.NewFakeResource(namespace4, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + fakeResourceNotWatched := testing_solo_io.FakeResourceList{} + fakeResourceWatched := testing_solo_io.FakeResourceList{fakeResource2a, fakeResource2b} + assertSnapshotFakes(fakeResourceWatched, fakeResourceNotWatched) + + deleteNamespaces(ctx, kube, namespace3) + + fakeResourceWatched = testing_solo_io.FakeResourceList{fakeResource2b} + fakeResourceNotWatched = append(fakeResourceNotWatched, fakeResource2a) + assertSnapshotFakes(fakeResourceWatched, fakeResourceNotWatched) + + for _, r := range fakeResourceWatched { + err = fakeResourceClient.Delete(r.GetMetadata().Namespace, r.GetMetadata().Name, clients.DeleteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + fakeResourceNotWatched = append(fakeResourceNotWatched, r) + } + assertSnapshotFakes(nil, fakeResourceNotWatched) + + deleteNamespaces(ctx, kube, namespace4) + getNewNamespaces() + }) + + It("should be able to return a resource from a deleted namespace, after the namespace is re-created", func() { + ctx := context.Background() + err := emitter.Register() + Expect(err).NotTo(HaveOccurred()) + + snapshots, errs, err := emitter.Snapshots([]string{""}, clients.WatchOpts{ + Ctx: ctx, + RefreshRate: time.Second, + ExpressionSelector: labelExpression1, + }) + Expect(err).NotTo(HaveOccurred()) + + var snap *TestingSnapshot + var previous *TestingSnapshot + + /* + MockResource + */ + assertSnapshotMocks := func(expectMocks MockResourceList, unexpectMocks MockResourceList) { + drain: + for { + select { + case snap = <-snapshots: + previous = snap + for _, expected := range expectMocks { + if _, err := snap.Mocks.Find(expected.GetMetadata().Ref().Strings()); err != nil { + continue drain + } + } + for _, unexpected := range unexpectMocks { + if _, err := snap.Mocks.Find(unexpected.GetMetadata().Ref().Strings()); err == nil { + continue drain + } + } + break drain + case err := <-errs: + Expect(err).NotTo(HaveOccurred()) + case <-time.After(time.Second * 10): + var expectedResources map[string][]string + var unexpectedResource map[string][]string + + if previous != nil { + expectedResources = findNonMatchingResources(convertMocksToMetadataGetter(expectMocks), convertMocksToMetadataGetter(previous.Mocks)) + unexpectedResource = findMatchingResources(convertMocksToMetadataGetter(unexpectMocks), convertMocksToMetadataGetter(previous.Mocks)) + } else { + expectedResources = getMapOfResources(convertMocksToMetadataGetter(expectMocks)) + unexpectedResource = getMapOfResources(convertMocksToMetadataGetter(unexpectMocks)) + } + getList := func(ns string) ([]metadataGetter, error) { + l, err := mockResourceClient.List(ns, clients.ListOpts{}) + return convertMocksToMetadataGetter(l), err + } + namespaceResources := getMapOfNamespaceResources(getList) + Fail(fmt.Sprintf("expected final snapshot before 10 seconds. expected \nExpected:\n%#v\n\nUnexpected:\n%#v\n\nnamespaces:\n%#v", expectedResources, unexpectedResource, namespaceResources)) + } + } + } + + createNamespaceWithLabel(ctx, kube, namespace3, labels1) + + mockResource1a, err := mockResourceClient.Write(NewMockResource(namespace3, name1), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotMocks(MockResourceList{mockResource1a}, nil) + + deleteNamespaces(ctx, kube, namespace3) + Eventually(func() bool { + _, err = kube.CoreV1().Namespaces().Get(ctx, namespace3, metav1.GetOptions{}) + return apierrors.IsNotFound(err) + }, 15*time.Second, 1*time.Second).Should(BeTrue()) + createNamespaceWithLabel(ctx, kube, namespace3, labels1) + + mockResource2a, err := mockResourceClient.Write(NewMockResource(namespace3, name2), clients.WriteOpts{Ctx: ctx}) + Expect(err).NotTo(HaveOccurred()) + assertSnapshotMocks(MockResourceList{mockResource2a}, MockResourceList{mockResource1a}) + + deleteNamespaces(ctx, kube, namespace3) + Eventually(func() bool { + _, err = kube.CoreV1().Namespaces().Get(ctx, namespace3, metav1.GetOptions{}) + return apierrors.IsNotFound(err) + }, 15*time.Second, 1*time.Second).Should(BeTrue()) + + }) }) + + Context("use different resource namespace listers", func() { + BeforeEach(func() { + resourceNamespaceLister = namespace.NewKubeClientResourceNamespaceLister(kube) + emitter = NewTestingEmitter(mockResourceClient, frequentlyChangingAnnotationsResourceClient, fakeResourceClient, resourceNamespaceLister) + }) + + It("Should work with the Kube Client Namespace Lister", func() { + runNamespacedSelectorsWithWatchNamespaces() + }) + }) + })