From c18800e19da2e993c108df70fe9dd6c8c5e40d76 Mon Sep 17 00:00:00 2001 From: Hasan Turken Date: Wed, 1 May 2024 00:32:46 +0300 Subject: [PATCH 01/19] Bump build submodule and dependencies to latest Signed-off-by: Hasan Turken --- Makefile | 4 ++-- build | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 5534faec..4bc23a4f 100644 --- a/Makefile +++ b/Makefile @@ -35,8 +35,8 @@ GOLANGCILINT_VERSION = 1.55.2 # ==================================================================================== # Setup Kubernetes tools -KIND_VERSION = v0.18.0 -UP_VERSION = v0.21.0 +KIND_VERSION = v0.22.0 +UP_VERSION = v0.28.0 UPTEST_VERSION = v0.9.0 UP_CHANNEL = stable USE_HELM3 = true diff --git a/build b/build index 7233e364..cdee0068 160000 --- a/build +++ b/build @@ -1 +1 @@ -Subproject commit 7233e36491dc8298d33f1feb1bf8c5056a960cac +Subproject commit cdee00685cfc90fce40d3f6a5eb9c8f044edbaa3 From 3138fe04ce4a60fc4c9abb59fc3d1e836b71bf8e Mon Sep 17 00:00:00 2001 From: Hasan Turken Date: Wed, 1 May 2024 00:33:06 +0300 Subject: [PATCH 02/19] Implement watching referenced resources Signed-off-by: Hasan Turken --- go.mod | 2 +- internal/controller/object/indexes.go | 105 ++++++++++++ internal/controller/object/informers.go | 205 ++++++++++++++++++++++++ internal/controller/object/object.go | 67 +++++++- 4 files changed, 377 insertions(+), 2 deletions(-) create mode 100644 internal/controller/object/indexes.go create mode 100644 internal/controller/object/informers.go diff --git a/go.mod b/go.mod index aef6678d..3d5e4b54 100644 --- a/go.mod +++ b/go.mod @@ -14,6 +14,7 @@ require ( golang.org/x/oauth2 v0.16.0 gopkg.in/alecthomas/kingpin.v2 v2.2.6 k8s.io/api v0.29.1 + k8s.io/apiextensions-apiserver v0.29.1 k8s.io/apimachinery v0.29.1 k8s.io/client-go v0.29.1 k8s.io/utils v0.0.0-20230726121419-3b25d923346b @@ -95,7 +96,6 @@ require ( gopkg.in/retry.v1 v1.0.3 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.29.1 // indirect k8s.io/component-base v0.29.1 // indirect k8s.io/klog/v2 v2.110.1 // indirect k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect diff --git a/internal/controller/object/indexes.go b/internal/controller/object/indexes.go new file mode 100644 index 00000000..f831c5dc --- /dev/null +++ b/internal/controller/object/indexes.go @@ -0,0 +1,105 @@ +/* +Copyright 2023 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "context" + "fmt" + "github.com/crossplane-contrib/provider-kubernetes/apis/object/v1alpha2" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + runtimeevent "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/crossplane/crossplane-runtime/pkg/logging" +) + +const ( + // objectRefGVKsIndex is an index of all GroupKinds that + // are in use by a Composition. It indexes from spec.resourceRefs, not + // from spec.resources. Hence, it will also work with functions. + objectRefGVKsIndex = "objectsRefsGVKs" + // objectsRefsIndex is an index of resourceRefs that are owned + // by a composite. + objectsRefsIndex = "objectsRefs" +) + +var ( + _ client.IndexerFunc = IndexReferencedResourceRefGVKs + _ client.IndexerFunc = IndexReferencesResourcesRefs +) + +// IndexReferencedResourceRefGVKs assumes the passed object is a composite. It +// returns gvk keys for every resource referenced in the composite. +func IndexReferencedResourceRefGVKs(o client.Object) []string { + obj, ok := o.(*v1alpha2.Object) + if !ok { + return nil // should never happen + } + refs := obj.Spec.References + keys := make([]string, 0, len(refs)) + for _, ref := range refs { + refAPIVersion, refKind, _, _ := getReferenceInfo(ref) + group, version := parseAPIVersion(refAPIVersion) + keys = append(keys, schema.GroupVersionKind{Group: group, Version: version, Kind: refKind}.String()) + } + // unification is done by the informer. + return keys +} + +// IndexReferencesResourcesRefs assumes the passed object is a composite. It +// returns keys for every composed resource referenced in the composite. +func IndexReferencesResourcesRefs(o client.Object) []string { + obj, ok := o.(*v1alpha2.Object) + if !ok { + return nil // should never happen + } + refs := obj.Spec.References + keys := make([]string, 0, len(refs)) + for _, ref := range refs { + refAPIVersion, refKind, refNamespace, refName := getReferenceInfo(ref) + keys = append(keys, refKey(refNamespace, refName, refKind, refAPIVersion)) + } + return keys +} + +func refKey(ns, name, kind, apiVersion string) string { + return fmt.Sprintf("%s.%s.%s.%s", name, ns, kind, apiVersion) +} + +func enqueueObjectsForReferences(ca cache.Cache, log logging.Logger) func(ctx context.Context, ev runtimeevent.UpdateEvent, q workqueue.RateLimitingInterface) { + return func(ctx context.Context, ev runtimeevent.UpdateEvent, q workqueue.RateLimitingInterface) { + rGVK := ev.ObjectNew.GetObjectKind().GroupVersionKind() + key := refKey(ev.ObjectNew.GetNamespace(), ev.ObjectNew.GetName(), rGVK.Kind, rGVK.GroupVersion().String()) + + objects := v1alpha2.ObjectList{} + if err := ca.List(ctx, &objects, client.MatchingFields{objectsRefsIndex: key}); err != nil { + log.Debug("cannot list objects related to a reference change", "error", err, "fieldSelector", objectsRefsIndex+"="+key) + return + } + + // queue those Objects for reconciliation + for _, o := range objects.Items { + log.Info("Enqueueing Object because referenced resource changed", "name", o.GetName(), "referencedGVK", rGVK.String(), "referencedName", ev.ObjectNew.GetName()) + q.Add(reconcile.Request{NamespacedName: types.NamespacedName{Name: o.GetName()}}) + } + } +} diff --git a/internal/controller/object/informers.go b/internal/controller/object/informers.go new file mode 100644 index 00000000..0426ac0e --- /dev/null +++ b/internal/controller/object/informers.go @@ -0,0 +1,205 @@ +package object + +import ( + "context" + "github.com/crossplane-contrib/provider-kubernetes/apis/object/v1alpha2" + "strings" + "sync" + + "github.com/google/uuid" + kunstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + kcache "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + cache "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/cluster" + runtimeevent "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/source" + + "github.com/crossplane/crossplane-runtime/pkg/controller" + "github.com/crossplane/crossplane-runtime/pkg/logging" +) + +// referencedResourceInformers manages composed resource informers referenced by +// composite resources. It serves as an event source for realtime notifications +// of changed composed resources, with the composite reconcilers as sinks. +// It keeps composed resource informers alive as long as there are composites +// referencing them. In parallel, the composite reconcilers keep track of +// references to composed resources, and inform referencedResourceInformers about +// them via the WatchReferencedResources method. +type referencedResourceInformers struct { + log logging.Logger + cluster cluster.Cluster + + gvkRoutedCache *controller.GVKRoutedCache + + lock sync.RWMutex // everything below is protected by this lock + + // cdCaches holds the composed resource informers. These are dynamically + // started and stopped based on the composites that reference them. + cdCaches map[schema.GroupVersionKind]cdCache + objectsCache cache.Cache + sinks map[string]func(ev runtimeevent.UpdateEvent) // by some uid +} + +type cdCache struct { + cache cache.Cache + cancelFn context.CancelFunc +} + +var _ source.Source = &referencedResourceInformers{} + +// Start implements source.Source, i.e. starting referencedResourceInformers as +// source with h as the sink of update events. It keeps sending events until +// ctx is done. +// Note that Start can be called multiple times to deliver events to multiple +// (composite resource) controllers. +func (i *referencedResourceInformers) Start(ctx context.Context, h handler.EventHandler, q workqueue.RateLimitingInterface, ps ...predicate.Predicate) error { + id := uuid.New().String() + + i.lock.Lock() + defer i.lock.Unlock() + i.sinks[id] = func(ev runtimeevent.UpdateEvent) { + for _, p := range ps { + if !p.Update(ev) { + return + } + } + h.Update(ctx, ev, q) + } + + go func() { + <-ctx.Done() + + i.lock.Lock() + defer i.lock.Unlock() + delete(i.sinks, id) + }() + + return nil +} + +// WatchReferencedResources starts informers for the given composed resource GVKs. +// The is wired into the composite reconciler, which will call this method on +// every reconcile to make referencedResourceInformers aware of the composed +// resources the given composite resource references. +// +// Note that this complements cleanupReferencedResourceInformers which regularly +// garbage collects composed resource informers that are no longer referenced by +// any composite. +func (i *referencedResourceInformers) WatchReferencedResources(gvks ...schema.GroupVersionKind) { + i.lock.RLock() + defer i.lock.RUnlock() + + // start new informers + for _, gvk := range gvks { + if _, found := i.cdCaches[gvk]; found { + continue + } + + log := i.log.WithValues("gvk", gvk.String()) + + ca, err := cache.New(i.cluster.GetConfig(), cache.Options{}) + if err != nil { + log.Debug("failed creating a cache", "error", err) + continue + } + + // don't forget to call cancelFn in error cases to avoid leaks. In the + // happy case it's called from the go routine starting the cache below. + ctx, cancelFn := context.WithCancel(context.Background()) + + u := kunstructured.Unstructured{} + u.SetGroupVersionKind(gvk) + inf, err := ca.GetInformer(ctx, &u, cache.BlockUntilSynced(false)) // don't block. We wait in the go routine below. + if err != nil { + cancelFn() + log.Debug("failed getting informer", "error", err) + continue + } + + if _, err := inf.AddEventHandler(kcache.ResourceEventHandlerFuncs{ + UpdateFunc: func(oldObj, newObj interface{}) { + old := oldObj.(client.Object) //nolint:forcetypeassert // Will always be client.Object. + obj := newObj.(client.Object) //nolint:forcetypeassert // Will always be client.Object. + if old.GetResourceVersion() == obj.GetResourceVersion() { + return + } + + i.lock.RLock() + defer i.lock.RUnlock() + + ev := runtimeevent.UpdateEvent{ + ObjectOld: old, + ObjectNew: obj, + } + for _, handleFn := range i.sinks { + handleFn(ev) + } + }, + }); err != nil { + cancelFn() + log.Debug("failed adding event handler", "error", err) + continue + } + + go func() { + defer cancelFn() + + log.Info("Starting composed resource watch") + _ = ca.Start(ctx) + }() + + i.cdCaches[gvk] = cdCache{ + cache: ca, + cancelFn: cancelFn, + } + + // wait for in the background, and only when synced add to the routed cache + go func(gvk schema.GroupVersionKind) { + if synced := ca.WaitForCacheSync(ctx); synced { + log.Debug("Composed resource cache synced") + i.gvkRoutedCache.AddDelegate(gvk, ca) + } + }(gvk) + } +} + +// cleanupReferencedResourceInformers garbage collects composed resource informers +// that are no longer referenced by any composite resource. +// +// Note that this complements WatchReferencedResources which starts informers for +// the composed resources referenced by a composite resource. +func (i *referencedResourceInformers) cleanupReferencedResourceInformers(ctx context.Context) { + // stop old informers + for gvk, inf := range i.cdCaches { + list := v1alpha2.ObjectList{} + if err := i.objectsCache.List(ctx, &list, client.MatchingFields{objectRefGVKsIndex: gvk.String()}); err != nil { + i.log.Debug("cannot list objects referencing a certain resource GVK", "error", err, "fieldSelector", objectRefGVKsIndex+"="+gvk.String()) + } + + if len(list.Items) > 0 { + continue + } + + inf.cancelFn() + i.gvkRoutedCache.RemoveDelegate(gvk) + i.log.Info("Stopped referenced resource watch", "gvk", gvk.String()) + delete(i.cdCaches, gvk) + } +} + +func parseAPIVersion(v string) (string, string) { + parts := strings.SplitN(v, "/", 2) + switch len(parts) { + case 1: + return "", parts[0] + case 2: + return parts[0], parts[1] + default: + return "", "" + } +} diff --git a/internal/controller/object/object.go b/internal/controller/object/object.go index b1fd63f6..733f3717 100644 --- a/internal/controller/object/object.go +++ b/internal/controller/object/object.go @@ -20,7 +20,13 @@ import ( "context" "encoding/base64" "fmt" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/workqueue" "math/rand" + runtimeevent "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" "strings" "time" @@ -82,12 +88,47 @@ const ( errSanitizeSecretData = "cannot sanitize secret data" ) +// KindObserver tracks kinds of referenced composed resources in order to start +// watches for them for realtime events. +type KindObserver interface { + // WatchReferencedResources starts a watch of the given kinds to trigger + // reconciles when a referenced object of those kinds changes. + WatchReferencedResources(kind ...schema.GroupVersionKind) +} + // Setup adds a controller that reconciles Object managed resources. func Setup(mgr ctrl.Manager, o controller.Options, sanitizeSecrets bool, pollJitter time.Duration) error { name := managed.ControllerName(v1alpha2.ObjectGroupKind) + l := o.Logger.WithValues("controller", name) cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + ca := mgr.GetCache() + if err := ca.IndexField(context.Background(), &v1alpha2.Object{}, objectRefGVKsIndex, IndexReferencedResourceRefGVKs); err != nil { + return errors.Wrap(err, "cannot add index for object reference GVKs") + } + if err := ca.IndexField(context.Background(), &v1alpha2.Object{}, objectsRefsIndex, IndexReferencesResourcesRefs); err != nil { + return errors.Wrap(err, "cannot add index for object references") + } + + i := referencedResourceInformers{ + log: l, + cluster: mgr, + + gvkRoutedCache: controller.NewGVKRoutedCache(mgr.GetScheme(), mgr.GetCache()), + cdCaches: make(map[schema.GroupVersionKind]cdCache), + objectsCache: ca, + sinks: make(map[string]func(ev runtimeevent.UpdateEvent)), + } + + if err := mgr.Add(manager.RunnableFunc(func(ctx context.Context) error { + // Run every 5 minutes. + wait.UntilWithContext(ctx, i.cleanupReferencedResourceInformers, 5*time.Second) + return nil + })); err != nil { + return errors.Wrap(err, "cannot add cleanup referenced resource informers runnable") + } + reconcilerOptions := []managed.ReconcilerOption{ managed.WithExternalConnecter(&connector{ logger: o.Logger, @@ -95,6 +136,7 @@ func Setup(mgr ctrl.Manager, o controller.Options, sanitizeSecrets bool, pollJit kube: mgr.GetClient(), usage: resource.NewProviderConfigUsageTracker(mgr.GetClient(), &apisv1alpha1.ProviderConfigUsage{}), clientForProviderFn: clients.ClientForProvider, + kindObserver: &i, }), managed.WithFinalizer(&objFinalizer{client: mgr.GetClient()}), managed.WithPollInterval(o.PollInterval), @@ -107,7 +149,7 @@ func Setup(mgr ctrl.Manager, o controller.Options, sanitizeSecrets bool, pollJit // https://github.com/crossplane/crossplane-runtime/blob/7fcb8c5cad6fc4abb6649813b92ab92e1832d368/pkg/reconciler/managed/reconciler.go#L573 return pollInterval + time.Duration((rand.Float64()-0.5)*2*float64(pollJitter)) //nolint G404 // No need for secure randomness }), - managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithLogger(l), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), managed.WithConnectionPublishers(cps...), } @@ -125,6 +167,11 @@ func Setup(mgr ctrl.Manager, o controller.Options, sanitizeSecrets bool, pollJit Named(name). WithOptions(o.ForControllerRuntime()). For(&v1alpha2.Object{}). + WatchesRawSource(&i, handler.Funcs{ + UpdateFunc: func(ctx context.Context, ev runtimeevent.UpdateEvent, q workqueue.RateLimitingInterface) { + enqueueObjectsForReferences(ca, l)(ctx, ev, q) + }, + }). Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) } @@ -134,6 +181,8 @@ type connector struct { logger logging.Logger sanitizeSecrets bool + kindObserver KindObserver + clientForProviderFn func(ctx context.Context, inclusterClient client.Client, providerConfigName string) (client.Client, error) } @@ -164,6 +213,8 @@ func (c *connector) Connect(ctx context.Context, mg resource.Managed) (managed.E }, localClient: c.kube, sanitizeSecrets: c.sanitizeSecrets, + + kindObserver: c.kindObserver, }, nil } @@ -173,6 +224,8 @@ type external struct { // localClient is specifically used to connect to local cluster localClient client.Client sanitizeSecrets bool + + kindObserver KindObserver } func (c *external) Observe(ctx context.Context, mg resource.Managed) (managed.ExternalObservation, error) { @@ -407,6 +460,7 @@ func (c *external) resolveReferencies(ctx context.Context, obj *v1alpha2.Object) c.logger.Debug("Resolving referencies.") // Loop through references to resolve each referenced resource + var gvks []schema.GroupVersionKind for _, ref := range obj.Spec.References { if ref.DependsOn == nil && ref.PatchesFrom == nil { continue @@ -432,6 +486,17 @@ func (c *external) resolveReferencies(ctx context.Context, obj *v1alpha2.Object) return errors.Wrap(err, errPatchFromReferencedResource) } } + + g, v := parseAPIVersion(refAPIVersion) + gvks = append(gvks, schema.GroupVersionKind{ + Group: g, + Version: v, + Kind: refKind, + }) + } + + if c.kindObserver != nil { + c.kindObserver.WatchReferencedResources(gvks...) } return nil From 096e3ac3a82186d0faec9a9f01ade8e208801058 Mon Sep 17 00:00:00 2001 From: Hasan Turken Date: Thu, 2 May 2024 11:49:33 +0300 Subject: [PATCH 03/19] Watch managed resources as well Signed-off-by: Hasan Turken --- go.mod | 2 +- internal/clients/client.go | 35 ++++++++++- internal/controller/object/indexes.go | 46 ++++++++++++--- internal/controller/object/informers.go | 59 +++++++++++-------- internal/controller/object/object.go | 51 +++++++++------- .../observedobjectcollection/reconciler.go | 2 +- 6 files changed, 138 insertions(+), 57 deletions(-) diff --git a/go.mod b/go.mod index 3d5e4b54..aef6678d 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,6 @@ require ( golang.org/x/oauth2 v0.16.0 gopkg.in/alecthomas/kingpin.v2 v2.2.6 k8s.io/api v0.29.1 - k8s.io/apiextensions-apiserver v0.29.1 k8s.io/apimachinery v0.29.1 k8s.io/client-go v0.29.1 k8s.io/utils v0.0.0-20230726121419-3b25d923346b @@ -96,6 +95,7 @@ require ( gopkg.in/retry.v1 v1.0.3 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/apiextensions-apiserver v0.29.1 // indirect k8s.io/component-base v0.29.1 // indirect k8s.io/klog/v2 v2.110.1 // indirect k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect diff --git a/internal/clients/client.go b/internal/clients/client.go index eff1a1a0..0c410a17 100644 --- a/internal/clients/client.go +++ b/internal/clients/client.go @@ -107,8 +107,28 @@ func restConfigFromAPIConfig(c *api.Config) (*rest.Config, error) { return config, nil } +type Cluster interface { + // GetConfig returns an initialized Config + GetConfig() *rest.Config +} + +type ClusterClient interface { + client.Client + resource.Applicator + Cluster +} + +type clusterClient struct { + resource.ClientApplicator + config *rest.Config +} + +func (c *clusterClient) GetConfig() *rest.Config { + return c.config +} + // ClientForProvider returns the client for the given provider config -func ClientForProvider(ctx context.Context, inclusterClient client.Client, providerConfigName string) (client.Client, error) { //nolint:gocyclo +func ClientForProvider(ctx context.Context, inclusterClient client.Client, providerConfigName string) (ClusterClient, error) { //nolint:gocyclo pc := &v1alpha1.ProviderConfig{} if err := inclusterClient.Get(ctx, types.NamespacedName{Name: providerConfigName}, pc); err != nil { return nil, errors.Wrap(err, errGetPC) @@ -171,6 +191,15 @@ func ClientForProvider(ctx context.Context, inclusterClient client.Client, provi return nil, errors.Errorf("unknown identity type: %s", id.Type) } } - - return NewKubeClient(rc) + k, err := NewKubeClient(rc) + if err != nil { + return nil, errors.Wrap(err, "cannot create Kubernetes client") + } + return &clusterClient{ + ClientApplicator: resource.ClientApplicator{ + Client: k, + Applicator: resource.NewAPIPatchingApplicator(k), + }, + config: rc, + }, nil } diff --git a/internal/controller/object/indexes.go b/internal/controller/object/indexes.go index f831c5dc..01c80007 100644 --- a/internal/controller/object/indexes.go +++ b/internal/controller/object/indexes.go @@ -21,7 +21,6 @@ import ( "fmt" "github.com/crossplane-contrib/provider-kubernetes/apis/object/v1alpha2" - "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/cache" @@ -59,12 +58,25 @@ func IndexReferencedResourceRefGVKs(o client.Object) []string { for _, ref := range refs { refAPIVersion, refKind, _, _ := getReferenceInfo(ref) group, version := parseAPIVersion(refAPIVersion) - keys = append(keys, schema.GroupVersionKind{Group: group, Version: version, Kind: refKind}.String()) + // References are always on control plane, so we don't pass the config name. + keys = append(keys, refKeyGKV("", refKind, group, version)) } + + d, err := getDesired(obj) + if err == nil { + keys = append(keys, refKeyGKV(obj.Spec.ProviderConfigReference.Name, d.GetKind(), d.GroupVersionKind().Group, d.GroupVersionKind().Version)) // unification is done by the informer. + } else { + // TODO: what to do here? + } + // unification is done by the informer. return keys } +func refKeyGKV(config, kind, group, version string) string { + return fmt.Sprintf("%s.%s.%s.%s", config, kind, group, version) +} + // IndexReferencesResourcesRefs assumes the passed object is a composite. It // returns keys for every composed resource referenced in the composite. func IndexReferencesResourcesRefs(o client.Object) []string { @@ -76,30 +88,50 @@ func IndexReferencesResourcesRefs(o client.Object) []string { keys := make([]string, 0, len(refs)) for _, ref := range refs { refAPIVersion, refKind, refNamespace, refName := getReferenceInfo(ref) - keys = append(keys, refKey(refNamespace, refName, refKind, refAPIVersion)) + // References are always on control plane, so we don't pass the config name. + keys = append(keys, refKey("", refNamespace, refName, refKind, refAPIVersion)) + } + d, err := getDesired(obj) + if err == nil { + keys = append(keys, refKey(obj.Spec.ProviderConfigReference.Name, d.GetNamespace(), d.GetName(), d.GetKind(), d.GetAPIVersion())) // unification is done by the informer. + } else { + // TODO: what to do here? } return keys } -func refKey(ns, name, kind, apiVersion string) string { - return fmt.Sprintf("%s.%s.%s.%s", name, ns, kind, apiVersion) +func refKey(config, ns, name, kind, apiVersion string) string { + return fmt.Sprintf("%s.%s.%s.%s.%s", config, name, ns, kind, apiVersion) } func enqueueObjectsForReferences(ca cache.Cache, log logging.Logger) func(ctx context.Context, ev runtimeevent.UpdateEvent, q workqueue.RateLimitingInterface) { return func(ctx context.Context, ev runtimeevent.UpdateEvent, q workqueue.RateLimitingInterface) { + config := getConfigName(ev.ObjectNew) + // "config" is the provider config name, which is the value for the + // provider config ref annotation. It will be empty for objects that + // are not controlled by the "Object" resource, i.e. referenced objects. rGVK := ev.ObjectNew.GetObjectKind().GroupVersionKind() - key := refKey(ev.ObjectNew.GetNamespace(), ev.ObjectNew.GetName(), rGVK.Kind, rGVK.GroupVersion().String()) + key := refKey(config, ev.ObjectNew.GetNamespace(), ev.ObjectNew.GetName(), rGVK.Kind, rGVK.GroupVersion().String()) objects := v1alpha2.ObjectList{} if err := ca.List(ctx, &objects, client.MatchingFields{objectsRefsIndex: key}); err != nil { log.Debug("cannot list objects related to a reference change", "error", err, "fieldSelector", objectsRefsIndex+"="+key) return } + log.Info("Will enqueue objects for references", "len", len(objects.Items)) // queue those Objects for reconciliation for _, o := range objects.Items { - log.Info("Enqueueing Object because referenced resource changed", "name", o.GetName(), "referencedGVK", rGVK.String(), "referencedName", ev.ObjectNew.GetName()) + log.Info("Enqueueing Object because referenced resource changed", "len", len(objects.Items), "name", o.GetName(), "referencedGVK", rGVK.String(), "referencedName", ev.ObjectNew.GetName()) q.Add(reconcile.Request{NamespacedName: types.NamespacedName{Name: o.GetName()}}) } } } + +func getConfigName(o client.Object) string { + ann := o.GetAnnotations() + if ann == nil { + return "" + } + return ann["kubernetes.crossplane.io/provider-config-ref"] +} diff --git a/internal/controller/object/informers.go b/internal/controller/object/informers.go index 0426ac0e..31305633 100644 --- a/internal/controller/object/informers.go +++ b/internal/controller/object/informers.go @@ -2,7 +2,6 @@ package object import ( "context" - "github.com/crossplane-contrib/provider-kubernetes/apis/object/v1alpha2" "strings" "sync" @@ -11,15 +10,17 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" kcache "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" - cache "sigs.k8s.io/controller-runtime/pkg/cache" + + "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/cluster" + runtimeevent "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/source" - "github.com/crossplane/crossplane-runtime/pkg/controller" + "github.com/crossplane-contrib/provider-kubernetes/apis/object/v1alpha2" + "github.com/crossplane-contrib/provider-kubernetes/internal/clients" "github.com/crossplane/crossplane-runtime/pkg/logging" ) @@ -32,19 +33,26 @@ import ( // them via the WatchReferencedResources method. type referencedResourceInformers struct { log logging.Logger - cluster cluster.Cluster - - gvkRoutedCache *controller.GVKRoutedCache + cluster clients.Cluster lock sync.RWMutex // everything below is protected by this lock // cdCaches holds the composed resource informers. These are dynamically // started and stopped based on the composites that reference them. - cdCaches map[schema.GroupVersionKind]cdCache + cdCaches map[gvkWithConfig]cdCache objectsCache cache.Cache sinks map[string]func(ev runtimeevent.UpdateEvent) // by some uid } +type gvkWithConfig struct { + config string + gvk schema.GroupVersionKind +} + +func (g gvkWithConfig) String() string { + return g.config + "." + g.gvk.String() +} + type cdCache struct { cache cache.Cache cancelFn context.CancelFunc @@ -90,19 +98,24 @@ func (i *referencedResourceInformers) Start(ctx context.Context, h handler.Event // Note that this complements cleanupReferencedResourceInformers which regularly // garbage collects composed resource informers that are no longer referenced by // any composite. -func (i *referencedResourceInformers) WatchReferencedResources(gvks ...schema.GroupVersionKind) { +func (i *referencedResourceInformers) WatchReferencedResources(cluster clients.Cluster, gcs ...gvkWithConfig) { i.lock.RLock() defer i.lock.RUnlock() // start new informers - for _, gvk := range gvks { - if _, found := i.cdCaches[gvk]; found { + for _, gc := range gcs { + if _, found := i.cdCaches[gc]; found { continue } - log := i.log.WithValues("gvk", gvk.String()) + log := i.log.WithValues("config", gc.config, "gvk", gc.gvk.String()) + + if cluster == nil { + // Default to control plane cluster. + cluster = i.cluster + } - ca, err := cache.New(i.cluster.GetConfig(), cache.Options{}) + ca, err := cache.New(cluster.GetConfig(), cache.Options{}) if err != nil { log.Debug("failed creating a cache", "error", err) continue @@ -113,7 +126,7 @@ func (i *referencedResourceInformers) WatchReferencedResources(gvks ...schema.Gr ctx, cancelFn := context.WithCancel(context.Background()) u := kunstructured.Unstructured{} - u.SetGroupVersionKind(gvk) + u.SetGroupVersionKind(gc.gvk) inf, err := ca.GetInformer(ctx, &u, cache.BlockUntilSynced(false)) // don't block. We wait in the go routine below. if err != nil { cancelFn() @@ -153,18 +166,17 @@ func (i *referencedResourceInformers) WatchReferencedResources(gvks ...schema.Gr _ = ca.Start(ctx) }() - i.cdCaches[gvk] = cdCache{ + i.cdCaches[gc] = cdCache{ cache: ca, cancelFn: cancelFn, } // wait for in the background, and only when synced add to the routed cache - go func(gvk schema.GroupVersionKind) { + go func() { if synced := ca.WaitForCacheSync(ctx); synced { log.Debug("Composed resource cache synced") - i.gvkRoutedCache.AddDelegate(gvk, ca) } - }(gvk) + }() } } @@ -175,10 +187,10 @@ func (i *referencedResourceInformers) WatchReferencedResources(gvks ...schema.Gr // the composed resources referenced by a composite resource. func (i *referencedResourceInformers) cleanupReferencedResourceInformers(ctx context.Context) { // stop old informers - for gvk, inf := range i.cdCaches { + for gc, inf := range i.cdCaches { list := v1alpha2.ObjectList{} - if err := i.objectsCache.List(ctx, &list, client.MatchingFields{objectRefGVKsIndex: gvk.String()}); err != nil { - i.log.Debug("cannot list objects referencing a certain resource GVK", "error", err, "fieldSelector", objectRefGVKsIndex+"="+gvk.String()) + if err := i.objectsCache.List(ctx, &list, client.MatchingFields{objectRefGVKsIndex: refKeyGKV(gc.config, gc.gvk.Kind, gc.gvk.Group, gc.gvk.Version)}); err != nil { + i.log.Debug("cannot list objects referencing a certain resource GVK", "error", err, "fieldSelector", objectRefGVKsIndex+"="+gc.String()) } if len(list.Items) > 0 { @@ -186,9 +198,8 @@ func (i *referencedResourceInformers) cleanupReferencedResourceInformers(ctx con } inf.cancelFn() - i.gvkRoutedCache.RemoveDelegate(gvk) - i.log.Info("Stopped referenced resource watch", "gvk", gvk.String()) - delete(i.cdCaches, gvk) + i.log.Info("Stopped referenced resource watch", "gc", gc.String()) + delete(i.cdCaches, gc) } } diff --git a/internal/controller/object/object.go b/internal/controller/object/object.go index 733f3717..b183ba84 100644 --- a/internal/controller/object/object.go +++ b/internal/controller/object/object.go @@ -93,7 +93,7 @@ const ( type KindObserver interface { // WatchReferencedResources starts a watch of the given kinds to trigger // reconciles when a referenced object of those kinds changes. - WatchReferencedResources(kind ...schema.GroupVersionKind) + WatchReferencedResources(cluster clients.Cluster, gcs ...gvkWithConfig) } // Setup adds a controller that reconciles Object managed resources. @@ -115,10 +115,9 @@ func Setup(mgr ctrl.Manager, o controller.Options, sanitizeSecrets bool, pollJit log: l, cluster: mgr, - gvkRoutedCache: controller.NewGVKRoutedCache(mgr.GetScheme(), mgr.GetCache()), - cdCaches: make(map[schema.GroupVersionKind]cdCache), - objectsCache: ca, - sinks: make(map[string]func(ev runtimeevent.UpdateEvent)), + cdCaches: make(map[gvkWithConfig]cdCache), + objectsCache: ca, + sinks: make(map[string]func(ev runtimeevent.UpdateEvent)), } if err := mgr.Add(manager.RunnableFunc(func(ctx context.Context) error { @@ -183,7 +182,7 @@ type connector struct { kindObserver KindObserver - clientForProviderFn func(ctx context.Context, inclusterClient client.Client, providerConfigName string) (client.Client, error) + clientForProviderFn func(ctx context.Context, inclusterClient client.Client, providerConfigName string) (clients.ClusterClient, error) } func (c *connector) Connect(ctx context.Context, mg resource.Managed) (managed.ExternalClient, error) { //nolint:gocyclo @@ -206,11 +205,8 @@ func (c *connector) Connect(ctx context.Context, mg resource.Managed) (managed.E } return &external{ - logger: c.logger, - client: resource.ClientApplicator{ - Client: k, - Applicator: resource.NewAPIPatchingApplicator(k), - }, + logger: c.logger, + client: k, localClient: c.kube, sanitizeSecrets: c.sanitizeSecrets, @@ -220,7 +216,7 @@ func (c *connector) Connect(ctx context.Context, mg resource.Managed) (managed.E type external struct { logger logging.Logger - client resource.ClientApplicator + client clients.ClusterClient // localClient is specifically used to connect to local cluster localClient client.Client sanitizeSecrets bool @@ -235,7 +231,7 @@ func (c *external) Observe(ctx context.Context, mg resource.Managed) (managed.Ex } c.logger.Debug("Observing", "resource", cr) - + if err := c.resolveReferencies(ctx, cr); err != nil { return managed.ExternalObservation{}, errors.Wrap(err, errResolveResourceReferences) } @@ -256,6 +252,13 @@ func (c *external) Observe(ctx context.Context, mg resource.Managed) (managed.Ex return managed.ExternalObservation{ResourceExists: false}, nil } + if c.kindObserver != nil { + c.kindObserver.WatchReferencedResources(c.client, gvkWithConfig{ + config: cr.Spec.ProviderConfigReference.Name, + gvk: observed.GroupVersionKind(), + }) + } + if err != nil { return managed.ExternalObservation{}, errors.Wrap(err, errGetObject) } @@ -285,7 +288,8 @@ func (c *external) Create(ctx context.Context, mg resource.Managed) (managed.Ext } meta.AddAnnotations(obj, map[string]string{ - v1.LastAppliedConfigAnnotation: string(cr.Spec.ForProvider.Manifest.Raw), + v1.LastAppliedConfigAnnotation: string(cr.Spec.ForProvider.Manifest.Raw), + "kubernetes.crossplane.io/provider-config-ref": cr.Spec.ProviderConfigReference.Name, }) if err := c.client.Create(ctx, obj); err != nil { @@ -309,7 +313,8 @@ func (c *external) Update(ctx context.Context, mg resource.Managed) (managed.Ext } meta.AddAnnotations(obj, map[string]string{ - v1.LastAppliedConfigAnnotation: string(cr.Spec.ForProvider.Manifest.Raw), + v1.LastAppliedConfigAnnotation: string(cr.Spec.ForProvider.Manifest.Raw), + "kubernetes.crossplane.io/provider-config-ref": cr.Spec.ProviderConfigReference.Name, }) if err := c.client.Apply(ctx, obj); err != nil { @@ -344,6 +349,7 @@ func getDesired(obj *v1alpha2.Object) (*unstructured.Unstructured, error) { if desired.GetName() == "" { desired.SetName(obj.Name) } + return desired, nil } @@ -460,7 +466,7 @@ func (c *external) resolveReferencies(ctx context.Context, obj *v1alpha2.Object) c.logger.Debug("Resolving referencies.") // Loop through references to resolve each referenced resource - var gvks []schema.GroupVersionKind + var gcs []gvkWithConfig for _, ref := range obj.Spec.References { if ref.DependsOn == nil && ref.PatchesFrom == nil { continue @@ -488,15 +494,18 @@ func (c *external) resolveReferencies(ctx context.Context, obj *v1alpha2.Object) } g, v := parseAPIVersion(refAPIVersion) - gvks = append(gvks, schema.GroupVersionKind{ - Group: g, - Version: v, - Kind: refKind, + gcs = append(gcs, gvkWithConfig{ + config: "", + gvk: schema.GroupVersionKind{ + Group: g, + Version: v, + Kind: refKind, + }, }) } if c.kindObserver != nil { - c.kindObserver.WatchReferencedResources(gvks...) + c.kindObserver.WatchReferencedResources(nil, gcs...) } return nil diff --git a/internal/controller/observedobjectcollection/reconciler.go b/internal/controller/observedobjectcollection/reconciler.go index 619e2a80..7620051c 100644 --- a/internal/controller/observedobjectcollection/reconciler.go +++ b/internal/controller/observedobjectcollection/reconciler.go @@ -59,7 +59,7 @@ type Reconciler struct { client client.Client log logging.Logger pollInterval func() time.Duration - clientForProvider func(ctx context.Context, inclusterClient client.Client, providerConfigName string) (client.Client, error) + clientForProvider func(ctx context.Context, inclusterClient client.Client, providerConfigName string) (clients.ClusterClient, error) observedObjectName func(collection client.Object, matchedObject client.Object) (string, error) } From 143476bfd16591b0a1efaa065317396517ae490d Mon Sep 17 00:00:00 2001 From: Hasan Turken Date: Thu, 2 May 2024 13:00:51 +0300 Subject: [PATCH 04/19] Do not start multiple caches against the same host Signed-off-by: Hasan Turken --- internal/controller/object/indexes.go | 2 - internal/controller/object/informers.go | 71 +++++++++++++------------ internal/controller/object/object.go | 48 ++++++++--------- 3 files changed, 61 insertions(+), 60 deletions(-) diff --git a/internal/controller/object/indexes.go b/internal/controller/object/indexes.go index 01c80007..b0be1711 100644 --- a/internal/controller/object/indexes.go +++ b/internal/controller/object/indexes.go @@ -118,8 +118,6 @@ func enqueueObjectsForReferences(ca cache.Cache, log logging.Logger) func(ctx co log.Debug("cannot list objects related to a reference change", "error", err, "fieldSelector", objectsRefsIndex+"="+key) return } - log.Info("Will enqueue objects for references", "len", len(objects.Items)) - // queue those Objects for reconciliation for _, o := range objects.Items { log.Info("Enqueueing Object because referenced resource changed", "len", len(objects.Items), "name", o.GetName(), "referencedGVK", rGVK.String(), "referencedName", ev.ObjectNew.GetName()) diff --git a/internal/controller/object/informers.go b/internal/controller/object/informers.go index 31305633..c48b1742 100644 --- a/internal/controller/object/informers.go +++ b/internal/controller/object/informers.go @@ -2,6 +2,7 @@ package object import ( "context" + "k8s.io/client-go/rest" "strings" "sync" @@ -20,7 +21,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" "github.com/crossplane-contrib/provider-kubernetes/apis/object/v1alpha2" - "github.com/crossplane-contrib/provider-kubernetes/internal/clients" "github.com/crossplane/crossplane-runtime/pkg/logging" ) @@ -32,30 +32,32 @@ import ( // references to composed resources, and inform referencedResourceInformers about // them via the WatchReferencedResources method. type referencedResourceInformers struct { - log logging.Logger - cluster clients.Cluster + log logging.Logger + config *rest.Config lock sync.RWMutex // everything below is protected by this lock - // cdCaches holds the composed resource informers. These are dynamically - // started and stopped based on the composites that reference them. - cdCaches map[gvkWithConfig]cdCache - objectsCache cache.Cache - sinks map[string]func(ev runtimeevent.UpdateEvent) // by some uid + // resourceCaches holds the managed resource informers. These are + // dynamically started and stopped based on the Objects that reference OR + // manages them. + resourceCaches map[gvkWithHost]resourceCache + objectsCache cache.Cache + sinks map[string]func(ev runtimeevent.UpdateEvent) // by some uid } -type gvkWithConfig struct { - config string - gvk schema.GroupVersionKind +type gvkWithHost struct { + host string + gvk schema.GroupVersionKind } -func (g gvkWithConfig) String() string { - return g.config + "." + g.gvk.String() -} - -type cdCache struct { +type resourceCache struct { cache cache.Cache cancelFn context.CancelFunc + + // Which provider config was used to create this cache. We will use this + // information to figure out whether there are Objects relying on this cache + // left during garbage collection of caches. + providerConfig string } var _ source.Source = &referencedResourceInformers{} @@ -98,24 +100,23 @@ func (i *referencedResourceInformers) Start(ctx context.Context, h handler.Event // Note that this complements cleanupReferencedResourceInformers which regularly // garbage collects composed resource informers that are no longer referenced by // any composite. -func (i *referencedResourceInformers) WatchReferencedResources(cluster clients.Cluster, gcs ...gvkWithConfig) { +func (i *referencedResourceInformers) WatchResources(rc *rest.Config, providerConfig string, gvks ...schema.GroupVersionKind) { i.lock.RLock() defer i.lock.RUnlock() + if rc == nil { + rc = i.config + } + // start new informers - for _, gc := range gcs { - if _, found := i.cdCaches[gc]; found { + for _, gvk := range gvks { + if _, found := i.resourceCaches[gvkWithHost{host: rc.Host, gvk: gvk}]; found { continue } - log := i.log.WithValues("config", gc.config, "gvk", gc.gvk.String()) - - if cluster == nil { - // Default to control plane cluster. - cluster = i.cluster - } + log := i.log.WithValues("host", rc.Host, "gvk", gvk.String()) - ca, err := cache.New(cluster.GetConfig(), cache.Options{}) + ca, err := cache.New(rc, cache.Options{}) if err != nil { log.Debug("failed creating a cache", "error", err) continue @@ -126,7 +127,7 @@ func (i *referencedResourceInformers) WatchReferencedResources(cluster clients.C ctx, cancelFn := context.WithCancel(context.Background()) u := kunstructured.Unstructured{} - u.SetGroupVersionKind(gc.gvk) + u.SetGroupVersionKind(gvk) inf, err := ca.GetInformer(ctx, &u, cache.BlockUntilSynced(false)) // don't block. We wait in the go routine below. if err != nil { cancelFn() @@ -166,9 +167,11 @@ func (i *referencedResourceInformers) WatchReferencedResources(cluster clients.C _ = ca.Start(ctx) }() - i.cdCaches[gc] = cdCache{ + i.resourceCaches[gvkWithHost{host: rc.Host, gvk: gvk}] = resourceCache{ cache: ca, cancelFn: cancelFn, + + providerConfig: providerConfig, } // wait for in the background, and only when synced add to the routed cache @@ -187,19 +190,19 @@ func (i *referencedResourceInformers) WatchReferencedResources(cluster clients.C // the composed resources referenced by a composite resource. func (i *referencedResourceInformers) cleanupReferencedResourceInformers(ctx context.Context) { // stop old informers - for gc, inf := range i.cdCaches { + for gh, ca := range i.resourceCaches { list := v1alpha2.ObjectList{} - if err := i.objectsCache.List(ctx, &list, client.MatchingFields{objectRefGVKsIndex: refKeyGKV(gc.config, gc.gvk.Kind, gc.gvk.Group, gc.gvk.Version)}); err != nil { - i.log.Debug("cannot list objects referencing a certain resource GVK", "error", err, "fieldSelector", objectRefGVKsIndex+"="+gc.String()) + if err := i.objectsCache.List(ctx, &list, client.MatchingFields{objectRefGVKsIndex: refKeyGKV(ca.providerConfig, gh.gvk.Kind, gh.gvk.Group, gh.gvk.Version)}); err != nil { + i.log.Debug("cannot list objects referencing a certain resource GVK", "error", err, "fieldSelector", objectRefGVKsIndex+"="+refKeyGKV(ca.providerConfig, gh.gvk.Kind, gh.gvk.Group, gh.gvk.Version)) } if len(list.Items) > 0 { continue } - inf.cancelFn() - i.log.Info("Stopped referenced resource watch", "gc", gc.String()) - delete(i.cdCaches, gc) + ca.cancelFn() + i.log.Info("Stopped referenced resource watch", "provider config", ca.providerConfig, "host", gh.host, "gvk", gh.gvk) + delete(i.resourceCaches, gh) } } diff --git a/internal/controller/object/object.go b/internal/controller/object/object.go index b183ba84..b72f1956 100644 --- a/internal/controller/object/object.go +++ b/internal/controller/object/object.go @@ -22,6 +22,7 @@ import ( "fmt" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/rest" "k8s.io/client-go/util/workqueue" "math/rand" runtimeevent "sigs.k8s.io/controller-runtime/pkg/event" @@ -91,9 +92,9 @@ const ( // KindObserver tracks kinds of referenced composed resources in order to start // watches for them for realtime events. type KindObserver interface { - // WatchReferencedResources starts a watch of the given kinds to trigger - // reconciles when a referenced object of those kinds changes. - WatchReferencedResources(cluster clients.Cluster, gcs ...gvkWithConfig) + // WatchResources starts a watch of the given kinds to trigger reconciles + // when a referenced or managed objects of those kinds changes. + WatchResources(rc *rest.Config, providerConfig string, gvks ...schema.GroupVersionKind) } // Setup adds a controller that reconciles Object managed resources. @@ -112,12 +113,12 @@ func Setup(mgr ctrl.Manager, o controller.Options, sanitizeSecrets bool, pollJit } i := referencedResourceInformers{ - log: l, - cluster: mgr, + log: l, + config: mgr.GetConfig(), - cdCaches: make(map[gvkWithConfig]cdCache), - objectsCache: ca, - sinks: make(map[string]func(ev runtimeevent.UpdateEvent)), + objectsCache: ca, + resourceCaches: make(map[gvkWithHost]resourceCache), + sinks: make(map[string]func(ev runtimeevent.UpdateEvent)), } if err := mgr.Add(manager.RunnableFunc(func(ctx context.Context) error { @@ -252,17 +253,16 @@ func (c *external) Observe(ctx context.Context, mg resource.Managed) (managed.Ex return managed.ExternalObservation{ResourceExists: false}, nil } - if c.kindObserver != nil { - c.kindObserver.WatchReferencedResources(c.client, gvkWithConfig{ - config: cr.Spec.ProviderConfigReference.Name, - gvk: observed.GroupVersionKind(), - }) - } - if err != nil { return managed.ExternalObservation{}, errors.Wrap(err, errGetObject) } + // We know the resource exists, so we can start watching it for realtime + // events. + if c.kindObserver != nil { + c.kindObserver.WatchResources(c.client.GetConfig(), cr.Spec.ProviderConfigReference.Name, observed.GroupVersionKind()) + } + if err = c.setObserved(cr, observed); err != nil { return managed.ExternalObservation{}, err } @@ -466,7 +466,7 @@ func (c *external) resolveReferencies(ctx context.Context, obj *v1alpha2.Object) c.logger.Debug("Resolving referencies.") // Loop through references to resolve each referenced resource - var gcs []gvkWithConfig + var gvks []schema.GroupVersionKind for _, ref := range obj.Spec.References { if ref.DependsOn == nil && ref.PatchesFrom == nil { continue @@ -494,18 +494,18 @@ func (c *external) resolveReferencies(ctx context.Context, obj *v1alpha2.Object) } g, v := parseAPIVersion(refAPIVersion) - gcs = append(gcs, gvkWithConfig{ - config: "", - gvk: schema.GroupVersionKind{ - Group: g, - Version: v, - Kind: refKind, - }, + gvks = append(gvks, schema.GroupVersionKind{ + Group: g, + Version: v, + Kind: refKind, }) } if c.kindObserver != nil { - c.kindObserver.WatchReferencedResources(nil, gcs...) + // Referenced resources always live on the control plane (i.e. local cluster), + // so we don't pass an extra rest config or provider config with the + // watch call. + c.kindObserver.WatchResources(nil, "", gvks...) } return nil From f5769b42c6ea71b21a52655562c3f4ceacd99f4d Mon Sep 17 00:00:00 2001 From: Hasan Turken Date: Thu, 2 May 2024 13:58:20 +0300 Subject: [PATCH 05/19] Do not attempt resolving references if object is being deleted Fixes #138 Signed-off-by: Hasan Turken --- internal/controller/object/object.go | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/internal/controller/object/object.go b/internal/controller/object/object.go index b72f1956..8f8ecffe 100644 --- a/internal/controller/object/object.go +++ b/internal/controller/object/object.go @@ -232,9 +232,12 @@ func (c *external) Observe(ctx context.Context, mg resource.Managed) (managed.Ex } c.logger.Debug("Observing", "resource", cr) - - if err := c.resolveReferencies(ctx, cr); err != nil { - return managed.ExternalObservation{}, errors.Wrap(err, errResolveResourceReferences) + + if !meta.WasDeleted(cr) { + // If the object is not being deleted, we need to resolve references + if err := c.resolveReferencies(ctx, cr); err != nil { + return managed.ExternalObservation{}, errors.Wrap(err, errResolveResourceReferences) + } } desired, err := getDesired(cr) From 27de77f33808af1d36f45f6efda51327a17a19af Mon Sep 17 00:00:00 2001 From: Hasan Turken Date: Thu, 2 May 2024 14:20:46 +0300 Subject: [PATCH 06/19] Put resource watching behind feature flag Signed-off-by: Hasan Turken --- cmd/provider/main.go | 24 ++++--- internal/controller/object/informers.go | 7 +- internal/controller/object/object.go | 96 +++++++++++++------------ internal/features/features.go | 23 ++++++ 4 files changed, 94 insertions(+), 56 deletions(-) create mode 100644 internal/features/features.go diff --git a/cmd/provider/main.go b/cmd/provider/main.go index ab235cc7..742b3db0 100644 --- a/cmd/provider/main.go +++ b/cmd/provider/main.go @@ -17,6 +17,7 @@ limitations under the License. package main import ( + "github.com/crossplane-contrib/provider-kubernetes/internal/features" "io" "os" "path/filepath" @@ -50,15 +51,17 @@ const ( func main() { var ( - app = kingpin.New(filepath.Base(os.Args[0]), "Template support for Crossplane.").DefaultEnvars() - debug = app.Flag("debug", "Run with debug logging.").Short('d').Bool() - syncInterval = app.Flag("sync", "Controller manager sync period such as 300ms, 1.5h, or 2h45m").Short('s').Default("1h").Duration() - pollInterval = app.Flag("poll", "Poll interval controls how often an individual resource should be checked for drift.").Default("10m").Duration() - pollJitterPercentage = app.Flag("poll-jitter-percentage", "Percentage of jitter to apply to poll interval. It cannot be negative, and must be less than 100.").Default("10").Uint() - leaderElection = app.Flag("leader-election", "Use leader election for the controller manager.").Short('l').Default("false").Envar("LEADER_ELECTION").Bool() - maxReconcileRate = app.Flag("max-reconcile-rate", "The number of concurrent reconciliations that may be running at one time.").Default("100").Int() + app = kingpin.New(filepath.Base(os.Args[0]), "Template support for Crossplane.").DefaultEnvars() + debug = app.Flag("debug", "Run with debug logging.").Short('d').Bool() + syncInterval = app.Flag("sync", "Controller manager sync period such as 300ms, 1.5h, or 2h45m").Short('s').Default("1h").Duration() + pollInterval = app.Flag("poll", "Poll interval controls how often an individual resource should be checked for drift.").Default("10m").Duration() + pollJitterPercentage = app.Flag("poll-jitter-percentage", "Percentage of jitter to apply to poll interval. It cannot be negative, and must be less than 100.").Default("10").Uint() + leaderElection = app.Flag("leader-election", "Use leader election for the controller manager.").Short('l').Default("false").Envar("LEADER_ELECTION").Bool() + maxReconcileRate = app.Flag("max-reconcile-rate", "The number of concurrent reconciliations that may be running at one time.").Default("100").Int() + sanitizeSecrets = app.Flag("sanitize-secrets", "when enabled, redacts Secret data from Object status").Default("false").Envar("SANITIZE_SECRETS").Bool() + enableManagementPolicies = app.Flag("enable-management-policies", "Enable support for Management Policies.").Default("true").Envar("ENABLE_MANAGEMENT_POLICIES").Bool() - sanitizeSecrets = app.Flag("sanitize-secrets", "when enabled, redacts Secret data from Object status").Default("false").Envar("SANITIZE_SECRETS").Bool() + enableWatches = app.Flag("enable-watches", "Enable support for watching resources.").Default("false").Envar("ENABLE_WATCHES").Bool() ) kingpin.MustParse(app.Parse(os.Args[1:])) @@ -135,6 +138,11 @@ func main() { log.Info("Beta feature enabled", "flag", feature.EnableBetaManagementPolicies) } + if *enableWatches { + o.Features.Enable(features.EnableAlphaWatches) + log.Info("Alpha feature enabled", "flag", features.EnableAlphaWatches) + } + // NOTE(lsviben): We are registering the conversion webhook with v1alpha1 // Object. As far as I can see and based on some tests, it doesn't matter // which version we use here. Leaving it as v1alpha1 as it will be easy to diff --git a/internal/controller/object/informers.go b/internal/controller/object/informers.go index c48b1742..8fe57d94 100644 --- a/internal/controller/object/informers.go +++ b/internal/controller/object/informers.go @@ -97,7 +97,7 @@ func (i *referencedResourceInformers) Start(ctx context.Context, h handler.Event // every reconcile to make referencedResourceInformers aware of the composed // resources the given composite resource references. // -// Note that this complements cleanupReferencedResourceInformers which regularly +// Note that this complements cleanupResourceInformers which regularly // garbage collects composed resource informers that are no longer referenced by // any composite. func (i *referencedResourceInformers) WatchResources(rc *rest.Config, providerConfig string, gvks ...schema.GroupVersionKind) { @@ -183,13 +183,14 @@ func (i *referencedResourceInformers) WatchResources(rc *rest.Config, providerCo } } -// cleanupReferencedResourceInformers garbage collects composed resource informers +// cleanupResourceInformers garbage collects composed resource informers // that are no longer referenced by any composite resource. // // Note that this complements WatchReferencedResources which starts informers for // the composed resources referenced by a composite resource. -func (i *referencedResourceInformers) cleanupReferencedResourceInformers(ctx context.Context) { +func (i *referencedResourceInformers) cleanupResourceInformers(ctx context.Context) { // stop old informers + i.log.Debug("Running garbage collection for resource informers", "count", len(i.resourceCaches)) for gh, ca := range i.resourceCaches { list := v1alpha2.ObjectList{} if err := i.objectsCache.List(ctx, &list, client.MatchingFields{objectRefGVKsIndex: refKeyGKV(ca.providerConfig, gh.gvk.Kind, gh.gvk.Group, gh.gvk.Version)}); err != nil { diff --git a/internal/controller/object/object.go b/internal/controller/object/object.go index 8f8ecffe..e2acd2b8 100644 --- a/internal/controller/object/object.go +++ b/internal/controller/object/object.go @@ -20,6 +20,7 @@ import ( "context" "encoding/base64" "fmt" + "github.com/crossplane-contrib/provider-kubernetes/internal/features" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/rest" @@ -104,40 +105,7 @@ func Setup(mgr ctrl.Manager, o controller.Options, sanitizeSecrets bool, pollJit cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} - ca := mgr.GetCache() - if err := ca.IndexField(context.Background(), &v1alpha2.Object{}, objectRefGVKsIndex, IndexReferencedResourceRefGVKs); err != nil { - return errors.Wrap(err, "cannot add index for object reference GVKs") - } - if err := ca.IndexField(context.Background(), &v1alpha2.Object{}, objectsRefsIndex, IndexReferencesResourcesRefs); err != nil { - return errors.Wrap(err, "cannot add index for object references") - } - - i := referencedResourceInformers{ - log: l, - config: mgr.GetConfig(), - - objectsCache: ca, - resourceCaches: make(map[gvkWithHost]resourceCache), - sinks: make(map[string]func(ev runtimeevent.UpdateEvent)), - } - - if err := mgr.Add(manager.RunnableFunc(func(ctx context.Context) error { - // Run every 5 minutes. - wait.UntilWithContext(ctx, i.cleanupReferencedResourceInformers, 5*time.Second) - return nil - })); err != nil { - return errors.Wrap(err, "cannot add cleanup referenced resource informers runnable") - } - reconcilerOptions := []managed.ReconcilerOption{ - managed.WithExternalConnecter(&connector{ - logger: o.Logger, - sanitizeSecrets: sanitizeSecrets, - kube: mgr.GetClient(), - usage: resource.NewProviderConfigUsageTracker(mgr.GetClient(), &apisv1alpha1.ProviderConfigUsage{}), - clientForProviderFn: clients.ClientForProvider, - kindObserver: &i, - }), managed.WithFinalizer(&objFinalizer{client: mgr.GetClient()}), managed.WithPollInterval(o.PollInterval), managed.WithPollIntervalHook(func(mg resource.Managed, pollInterval time.Duration) time.Duration { @@ -154,25 +122,63 @@ func Setup(mgr ctrl.Manager, o controller.Options, sanitizeSecrets bool, pollJit managed.WithConnectionPublishers(cps...), } - if o.Features.Enabled(feature.EnableBetaManagementPolicies) { - reconcilerOptions = append(reconcilerOptions, managed.WithManagementPolicies()) + conn := &connector{ + logger: o.Logger, + sanitizeSecrets: sanitizeSecrets, + kube: mgr.GetClient(), + usage: resource.NewProviderConfigUsageTracker(mgr.GetClient(), &apisv1alpha1.ProviderConfigUsage{}), + clientForProviderFn: clients.ClientForProvider, } - r := managed.NewReconciler(mgr, - resource.ManagedKind(v1alpha2.ObjectGroupVersionKind), - reconcilerOptions..., - ) - - return ctrl.NewControllerManagedBy(mgr). + cb := ctrl.NewControllerManagedBy(mgr). Named(name). WithOptions(o.ForControllerRuntime()). - For(&v1alpha2.Object{}). - WatchesRawSource(&i, handler.Funcs{ + For(&v1alpha2.Object{}) + + if o.Features.Enabled(features.EnableAlphaWatches) { + ca := mgr.GetCache() + if err := ca.IndexField(context.Background(), &v1alpha2.Object{}, objectRefGVKsIndex, IndexReferencedResourceRefGVKs); err != nil { + return errors.Wrap(err, "cannot add index for object reference GVKs") + } + if err := ca.IndexField(context.Background(), &v1alpha2.Object{}, objectsRefsIndex, IndexReferencesResourcesRefs); err != nil { + return errors.Wrap(err, "cannot add index for object references") + } + + i := referencedResourceInformers{ + log: l, + config: mgr.GetConfig(), + + objectsCache: ca, + resourceCaches: make(map[gvkWithHost]resourceCache), + sinks: make(map[string]func(ev runtimeevent.UpdateEvent)), + } + conn.kindObserver = &i + + if err := mgr.Add(manager.RunnableFunc(func(ctx context.Context) error { + // Run every 5 minutes. + // TODO: Fix the period after development is done. + wait.UntilWithContext(ctx, i.cleanupResourceInformers, 5*time.Second) + return nil + })); err != nil { + return errors.Wrap(err, "cannot add cleanup referenced resource informers runnable") + } + + cb = cb.WatchesRawSource(&i, handler.Funcs{ UpdateFunc: func(ctx context.Context, ev runtimeevent.UpdateEvent, q workqueue.RateLimitingInterface) { enqueueObjectsForReferences(ca, l)(ctx, ev, q) }, - }). - Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) + }) + } + reconcilerOptions = append(reconcilerOptions, managed.WithExternalConnecter(conn)) + + if o.Features.Enabled(feature.EnableBetaManagementPolicies) { + reconcilerOptions = append(reconcilerOptions, managed.WithManagementPolicies()) + } + + return cb.Complete(ratelimiter.NewReconciler(name, managed.NewReconciler(mgr, + resource.ManagedKind(v1alpha2.ObjectGroupVersionKind), + reconcilerOptions..., + ), o.GlobalRateLimiter)) } type connector struct { diff --git a/internal/features/features.go b/internal/features/features.go new file mode 100644 index 00000000..3c294983 --- /dev/null +++ b/internal/features/features.go @@ -0,0 +1,23 @@ +/* + Copyright 2022 The Crossplane Authors. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package features + +import "github.com/crossplane/crossplane-runtime/pkg/feature" + +// Feature flags. +const ( + // EnableAlphaWatches enables alpha support for watching referenced and + // managed resources. + EnableAlphaWatches feature.Flag = "EnableAlphaWatches" +) From 95fe431a6a299ea8b89c74191aec4a9d5b568a9e Mon Sep 17 00:00:00 2001 From: Hasan Turken Date: Thu, 2 May 2024 15:24:58 +0300 Subject: [PATCH 07/19] Fix unit tests and code comments Signed-off-by: Hasan Turken --- cmd/provider/main.go | 2 +- internal/clients/{client.go => clients.go} | 15 +- internal/controller/object/indexes.go | 74 ++-- internal/controller/object/informers.go | 88 ++-- internal/controller/object/object.go | 58 ++- internal/controller/object/object_test.go | 381 ++++++++++-------- .../reconciler_test.go | 10 +- 7 files changed, 352 insertions(+), 276 deletions(-) rename internal/clients/{client.go => clients.go} (92%) diff --git a/cmd/provider/main.go b/cmd/provider/main.go index 742b3db0..fca9670b 100644 --- a/cmd/provider/main.go +++ b/cmd/provider/main.go @@ -17,7 +17,6 @@ limitations under the License. package main import ( - "github.com/crossplane-contrib/provider-kubernetes/internal/features" "io" "os" "path/filepath" @@ -39,6 +38,7 @@ import ( "github.com/crossplane-contrib/provider-kubernetes/apis" "github.com/crossplane-contrib/provider-kubernetes/apis/object/v1alpha1" object "github.com/crossplane-contrib/provider-kubernetes/internal/controller" + "github.com/crossplane-contrib/provider-kubernetes/internal/features" _ "k8s.io/client-go/plugin/pkg/client/auth" ) diff --git a/internal/clients/client.go b/internal/clients/clients.go similarity index 92% rename from internal/clients/client.go rename to internal/clients/clients.go index 0c410a17..34b22d30 100644 --- a/internal/clients/client.go +++ b/internal/clients/clients.go @@ -107,23 +107,28 @@ func restConfigFromAPIConfig(c *api.Config) (*rest.Config, error) { return config, nil } -type Cluster interface { +// RestConfigGetter is an interface that provides a REST config. +type RestConfigGetter interface { // GetConfig returns an initialized Config GetConfig() *rest.Config } +// ClusterClient is a client that can be used to interact with a Kubernetes +// cluster including getting its rest config. type ClusterClient interface { client.Client resource.Applicator - Cluster + RestConfigGetter } -type clusterClient struct { +// ApplicatorClientWithConfig is a ClusterClient that also has a rest config. +type ApplicatorClientWithConfig struct { resource.ClientApplicator config *rest.Config } -func (c *clusterClient) GetConfig() *rest.Config { +// GetConfig returns the rest config for the client. +func (c *ApplicatorClientWithConfig) GetConfig() *rest.Config { return c.config } @@ -195,7 +200,7 @@ func ClientForProvider(ctx context.Context, inclusterClient client.Client, provi if err != nil { return nil, errors.Wrap(err, "cannot create Kubernetes client") } - return &clusterClient{ + return &ApplicatorClientWithConfig{ ClientApplicator: resource.ClientApplicator{ Client: k, Applicator: resource.NewAPIPatchingApplicator(k), diff --git a/internal/controller/object/indexes.go b/internal/controller/object/indexes.go index b0be1711..d127f81c 100644 --- a/internal/controller/object/indexes.go +++ b/internal/controller/object/indexes.go @@ -19,7 +19,6 @@ package object import ( "context" "fmt" - "github.com/crossplane-contrib/provider-kubernetes/apis/object/v1alpha2" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/workqueue" @@ -29,16 +28,17 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/crossplane/crossplane-runtime/pkg/logging" + + "github.com/crossplane-contrib/provider-kubernetes/apis/object/v1alpha2" ) const ( - // objectRefGVKsIndex is an index of all GroupKinds that - // are in use by a Composition. It indexes from spec.resourceRefs, not - // from spec.resources. Hence, it will also work with functions. - objectRefGVKsIndex = "objectsRefsGVKs" - // objectsRefsIndex is an index of resourceRefs that are owned - // by a composite. - objectsRefsIndex = "objectsRefs" + // resourceRefGVKsIndex is an index of all GroupKinds that + // are in use by an Object. + resourceRefGVKsIndex = "objectsRefsGVKs" + // resourceRefsIndex is an index of resourceRefs that are referenced or + // managed by an Object. + resourceRefsIndex = "objectsRefs" ) var ( @@ -46,13 +46,15 @@ var ( _ client.IndexerFunc = IndexReferencesResourcesRefs ) -// IndexReferencedResourceRefGVKs assumes the passed object is a composite. It -// returns gvk keys for every resource referenced in the composite. +// IndexReferencedResourceRefGVKs assumes the passed object is an Object. It +// returns gvk keys for every resource referenced or managed by the Object. func IndexReferencedResourceRefGVKs(o client.Object) []string { obj, ok := o.(*v1alpha2.Object) if !ok { return nil // should never happen } + + // Index references. refs := obj.Spec.References keys := make([]string, 0, len(refs)) for _, ref := range refs { @@ -62,46 +64,48 @@ func IndexReferencedResourceRefGVKs(o client.Object) []string { keys = append(keys, refKeyGKV("", refKind, group, version)) } - d, err := getDesired(obj) - if err == nil { - keys = append(keys, refKeyGKV(obj.Spec.ProviderConfigReference.Name, d.GetKind(), d.GroupVersionKind().Group, d.GroupVersionKind().Version)) // unification is done by the informer. - } else { - // TODO: what to do here? - } + // Index the desired object. + // We don't expect errors here, as the getDesired function is already called + // in the reconciler and the desired object already validated. + d, _ := getDesired(obj) + keys = append(keys, refKeyGKV(obj.Spec.ProviderConfigReference.Name, d.GetKind(), d.GroupVersionKind().Group, d.GroupVersionKind().Version)) // unification is done by the informer. // unification is done by the informer. return keys } -func refKeyGKV(config, kind, group, version string) string { - return fmt.Sprintf("%s.%s.%s.%s", config, kind, group, version) +func refKeyGKV(providerConfig, kind, group, version string) string { + return fmt.Sprintf("%s.%s.%s.%s", providerConfig, kind, group, version) } -// IndexReferencesResourcesRefs assumes the passed object is a composite. It -// returns keys for every composed resource referenced in the composite. +// IndexReferencesResourcesRefs assumes the passed object is an Object. It +// returns keys for every resource referenced or managed by the Object. func IndexReferencesResourcesRefs(o client.Object) []string { obj, ok := o.(*v1alpha2.Object) if !ok { return nil // should never happen } + + // Index references. refs := obj.Spec.References keys := make([]string, 0, len(refs)) for _, ref := range refs { refAPIVersion, refKind, refNamespace, refName := getReferenceInfo(ref) - // References are always on control plane, so we don't pass the config name. + // References are always on control plane, so we don't pass the provider config name. keys = append(keys, refKey("", refNamespace, refName, refKind, refAPIVersion)) } - d, err := getDesired(obj) - if err == nil { - keys = append(keys, refKey(obj.Spec.ProviderConfigReference.Name, d.GetNamespace(), d.GetName(), d.GetKind(), d.GetAPIVersion())) // unification is done by the informer. - } else { - // TODO: what to do here? - } + + // Index the desired object. + // We don't expect errors here, as the getDesired function is already called + // in the reconciler and the desired object already validated. + d, _ := getDesired(obj) + keys = append(keys, refKey(obj.Spec.ProviderConfigReference.Name, d.GetNamespace(), d.GetName(), d.GetKind(), d.GetAPIVersion())) // unification is done by the informer. + return keys } -func refKey(config, ns, name, kind, apiVersion string) string { - return fmt.Sprintf("%s.%s.%s.%s.%s", config, name, ns, kind, apiVersion) +func refKey(providerConfig, ns, name, kind, apiVersion string) string { + return fmt.Sprintf("%s.%s.%s.%s.%s", providerConfig, name, ns, kind, apiVersion) } func enqueueObjectsForReferences(ca cache.Cache, log logging.Logger) func(ctx context.Context, ev runtimeevent.UpdateEvent, q workqueue.RateLimitingInterface) { @@ -114,22 +118,22 @@ func enqueueObjectsForReferences(ca cache.Cache, log logging.Logger) func(ctx co key := refKey(config, ev.ObjectNew.GetNamespace(), ev.ObjectNew.GetName(), rGVK.Kind, rGVK.GroupVersion().String()) objects := v1alpha2.ObjectList{} - if err := ca.List(ctx, &objects, client.MatchingFields{objectsRefsIndex: key}); err != nil { - log.Debug("cannot list objects related to a reference change", "error", err, "fieldSelector", objectsRefsIndex+"="+key) + if err := ca.List(ctx, &objects, client.MatchingFields{resourceRefsIndex: key}); err != nil { + log.Debug("cannot list objects related to a reference change", "error", err, "fieldSelector", resourceRefsIndex+"="+key) return } // queue those Objects for reconciliation for _, o := range objects.Items { - log.Info("Enqueueing Object because referenced resource changed", "len", len(objects.Items), "name", o.GetName(), "referencedGVK", rGVK.String(), "referencedName", ev.ObjectNew.GetName()) + log.Info("Enqueueing Object because referenced resource changed", "name", o.GetName(), "referencedGVK", rGVK.String(), "referencedName", ev.ObjectNew.GetName()) q.Add(reconcile.Request{NamespacedName: types.NamespacedName{Name: o.GetName()}}) } } } func getConfigName(o client.Object) string { - ann := o.GetAnnotations() - if ann == nil { + a := o.GetAnnotations() + if a == nil { return "" } - return ann["kubernetes.crossplane.io/provider-config-ref"] + return a[AnnotationKeyProviderConfigRef] } diff --git a/internal/controller/object/informers.go b/internal/controller/object/informers.go index 8fe57d94..bcca7fde 100644 --- a/internal/controller/object/informers.go +++ b/internal/controller/object/informers.go @@ -2,47 +2,45 @@ package object import ( "context" - "k8s.io/client-go/rest" + "github.com/crossplane/crossplane-runtime/pkg/errors" "strings" "sync" - "github.com/google/uuid" kunstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/rest" kcache "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" - "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" - runtimeevent "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/source" - "github.com/crossplane-contrib/provider-kubernetes/apis/object/v1alpha2" "github.com/crossplane/crossplane-runtime/pkg/logging" + + "github.com/crossplane-contrib/provider-kubernetes/apis/object/v1alpha2" ) -// referencedResourceInformers manages composed resource informers referenced by -// composite resources. It serves as an event source for realtime notifications -// of changed composed resources, with the composite reconcilers as sinks. -// It keeps composed resource informers alive as long as there are composites -// referencing them. In parallel, the composite reconcilers keep track of -// references to composed resources, and inform referencedResourceInformers about -// them via the WatchReferencedResources method. -type referencedResourceInformers struct { +// resourceInformers manages resource informers referenced or managed +// by Objects. It serves as an event source for realtime notifications of +// changed resources, with the Object reconcilers as sinks. +// It keeps resource informers alive as long as there are Objects referencing +// them. In parallel, the Object reconcilers keep track of references to +// resources, and inform resourceInformers about them via the +// WatchReferencedResources method. +type resourceInformers struct { log logging.Logger config *rest.Config lock sync.RWMutex // everything below is protected by this lock - // resourceCaches holds the managed resource informers. These are - // dynamically started and stopped based on the Objects that reference OR - // manages them. + // resourceCaches holds the resource caches. These are dynamically started + // and stopped based on the Objects that reference or managing them. resourceCaches map[gvkWithHost]resourceCache objectsCache cache.Cache - sinks map[string]func(ev runtimeevent.UpdateEvent) // by some uid + sink func(ev runtimeevent.UpdateEvent) } type gvkWithHost struct { @@ -60,19 +58,18 @@ type resourceCache struct { providerConfig string } -var _ source.Source = &referencedResourceInformers{} +var _ source.Source = &resourceInformers{} -// Start implements source.Source, i.e. starting referencedResourceInformers as +// Start implements source.Source, i.e. starting resourceInformers as // source with h as the sink of update events. It keeps sending events until // ctx is done. -// Note that Start can be called multiple times to deliver events to multiple -// (composite resource) controllers. -func (i *referencedResourceInformers) Start(ctx context.Context, h handler.EventHandler, q workqueue.RateLimitingInterface, ps ...predicate.Predicate) error { - id := uuid.New().String() - +func (i *resourceInformers) Start(ctx context.Context, h handler.EventHandler, q workqueue.RateLimitingInterface, ps ...predicate.Predicate) error { i.lock.Lock() defer i.lock.Unlock() - i.sinks[id] = func(ev runtimeevent.UpdateEvent) { + if i.sink != nil { + return errors.New("source already started, cannot start it again") + } + i.sink = func(ev runtimeevent.UpdateEvent) { for _, p := range ps { if !p.Update(ev) { return @@ -86,21 +83,22 @@ func (i *referencedResourceInformers) Start(ctx context.Context, h handler.Event i.lock.Lock() defer i.lock.Unlock() - delete(i.sinks, id) + i.sink = nil }() return nil } -// WatchReferencedResources starts informers for the given composed resource GVKs. -// The is wired into the composite reconciler, which will call this method on -// every reconcile to make referencedResourceInformers aware of the composed -// resources the given composite resource references. +// WatchResources starts informers for the given resource GVKs for the given +// cluster (i.e. rest.Config & providerConfig). +// The is wired into the Object reconciler, which will call this method on +// every reconcile to make resourceInformers aware of the referenced or managed +// resources of the given Object. // // Note that this complements cleanupResourceInformers which regularly -// garbage collects composed resource informers that are no longer referenced by -// any composite. -func (i *referencedResourceInformers) WatchResources(rc *rest.Config, providerConfig string, gvks ...schema.GroupVersionKind) { +// garbage collects resource informers that are no longer referenced by +// any Object. +func (i *resourceInformers) WatchResources(rc *rest.Config, providerConfig string, gvks ...schema.GroupVersionKind) { i.lock.RLock() defer i.lock.RUnlock() @@ -150,9 +148,7 @@ func (i *referencedResourceInformers) WatchResources(rc *rest.Config, providerCo ObjectOld: old, ObjectNew: obj, } - for _, handleFn := range i.sinks { - handleFn(ev) - } + i.sink(ev) }, }); err != nil { cancelFn() @@ -163,7 +159,7 @@ func (i *referencedResourceInformers) WatchResources(rc *rest.Config, providerCo go func() { defer cancelFn() - log.Info("Starting composed resource watch") + log.Info("Starting resource watch") _ = ca.Start(ctx) }() @@ -177,24 +173,24 @@ func (i *referencedResourceInformers) WatchResources(rc *rest.Config, providerCo // wait for in the background, and only when synced add to the routed cache go func() { if synced := ca.WaitForCacheSync(ctx); synced { - log.Debug("Composed resource cache synced") + log.Debug("Resource cache synced") } }() } } -// cleanupResourceInformers garbage collects composed resource informers -// that are no longer referenced by any composite resource. +// cleanupResourceInformers garbage collects resource informers that are no +// longer referenced by any Object. // -// Note that this complements WatchReferencedResources which starts informers for -// the composed resources referenced by a composite resource. -func (i *referencedResourceInformers) cleanupResourceInformers(ctx context.Context) { +// Note that this complements WatchResources which starts informers for +// the resources referenced or managed by an Object. +func (i *resourceInformers) cleanupResourceInformers(ctx context.Context) { // stop old informers i.log.Debug("Running garbage collection for resource informers", "count", len(i.resourceCaches)) for gh, ca := range i.resourceCaches { list := v1alpha2.ObjectList{} - if err := i.objectsCache.List(ctx, &list, client.MatchingFields{objectRefGVKsIndex: refKeyGKV(ca.providerConfig, gh.gvk.Kind, gh.gvk.Group, gh.gvk.Version)}); err != nil { - i.log.Debug("cannot list objects referencing a certain resource GVK", "error", err, "fieldSelector", objectRefGVKsIndex+"="+refKeyGKV(ca.providerConfig, gh.gvk.Kind, gh.gvk.Group, gh.gvk.Version)) + if err := i.objectsCache.List(ctx, &list, client.MatchingFields{resourceRefGVKsIndex: refKeyGKV(ca.providerConfig, gh.gvk.Kind, gh.gvk.Group, gh.gvk.Version)}); err != nil { + i.log.Debug("cannot list objects referencing a certain resource GVK", "error", err, "fieldSelector", resourceRefGVKsIndex+"="+refKeyGKV(ca.providerConfig, gh.gvk.Kind, gh.gvk.Group, gh.gvk.Version)) } if len(list.Items) > 0 { @@ -202,7 +198,7 @@ func (i *referencedResourceInformers) cleanupResourceInformers(ctx context.Conte } ca.cancelFn() - i.log.Info("Stopped referenced resource watch", "provider config", ca.providerConfig, "host", gh.host, "gvk", gh.gvk) + i.log.Info("Stopped resource watch", "provider config", ca.providerConfig, "host", gh.host, "gvk", gh.gvk) delete(i.resourceCaches, gh) } } diff --git a/internal/controller/object/object.go b/internal/controller/object/object.go index e2acd2b8..690ddf9a 100644 --- a/internal/controller/object/object.go +++ b/internal/controller/object/object.go @@ -20,15 +20,7 @@ import ( "context" "encoding/base64" "fmt" - "github.com/crossplane-contrib/provider-kubernetes/internal/features" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/rest" - "k8s.io/client-go/util/workqueue" "math/rand" - runtimeevent "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/manager" "strings" "time" @@ -37,11 +29,18 @@ import ( "k8s.io/apimachinery/pkg/api/equality" kerrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/json" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/rest" + "k8s.io/client-go/util/workqueue" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + runtimeevent "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" "github.com/crossplane/crossplane-runtime/pkg/controller" @@ -57,6 +56,15 @@ import ( "github.com/crossplane-contrib/provider-kubernetes/apis/object/v1alpha2" apisv1alpha1 "github.com/crossplane-contrib/provider-kubernetes/apis/v1alpha1" "github.com/crossplane-contrib/provider-kubernetes/internal/clients" + "github.com/crossplane-contrib/provider-kubernetes/internal/features" +) + +const ( + // AnnotationKeyProviderConfigRef is the annotation key for the provider config + // reference on the managed kubernetes resource. We use this to trigger the + // reconciler for the Object with that provider config in case of watch + // events. Only used when alpha watches are enabled. + AnnotationKeyProviderConfigRef = "kubernetes.crossplane.io/provider-config-ref" ) const ( @@ -137,27 +145,25 @@ func Setup(mgr ctrl.Manager, o controller.Options, sanitizeSecrets bool, pollJit if o.Features.Enabled(features.EnableAlphaWatches) { ca := mgr.GetCache() - if err := ca.IndexField(context.Background(), &v1alpha2.Object{}, objectRefGVKsIndex, IndexReferencedResourceRefGVKs); err != nil { + if err := ca.IndexField(context.Background(), &v1alpha2.Object{}, resourceRefGVKsIndex, IndexReferencedResourceRefGVKs); err != nil { return errors.Wrap(err, "cannot add index for object reference GVKs") } - if err := ca.IndexField(context.Background(), &v1alpha2.Object{}, objectsRefsIndex, IndexReferencesResourcesRefs); err != nil { + if err := ca.IndexField(context.Background(), &v1alpha2.Object{}, resourceRefsIndex, IndexReferencesResourcesRefs); err != nil { return errors.Wrap(err, "cannot add index for object references") } - i := referencedResourceInformers{ + i := resourceInformers{ log: l, config: mgr.GetConfig(), objectsCache: ca, resourceCaches: make(map[gvkWithHost]resourceCache), - sinks: make(map[string]func(ev runtimeevent.UpdateEvent)), } conn.kindObserver = &i if err := mgr.Add(manager.RunnableFunc(func(ctx context.Context) error { // Run every 5 minutes. - // TODO: Fix the period after development is done. - wait.UntilWithContext(ctx, i.cleanupResourceInformers, 5*time.Second) + wait.UntilWithContext(ctx, i.cleanupResourceInformers, 5*time.Minute) return nil })); err != nil { return errors.Wrap(err, "cannot add cleanup referenced resource informers runnable") @@ -267,7 +273,7 @@ func (c *external) Observe(ctx context.Context, mg resource.Managed) (managed.Ex } // We know the resource exists, so we can start watching it for realtime - // events. + // events if we have the kindObserver (i.e. watches enabled). if c.kindObserver != nil { c.kindObserver.WatchResources(c.client.GetConfig(), cr.Spec.ProviderConfigReference.Name, observed.GroupVersionKind()) } @@ -297,9 +303,13 @@ func (c *external) Create(ctx context.Context, mg resource.Managed) (managed.Ext } meta.AddAnnotations(obj, map[string]string{ - v1.LastAppliedConfigAnnotation: string(cr.Spec.ForProvider.Manifest.Raw), - "kubernetes.crossplane.io/provider-config-ref": cr.Spec.ProviderConfigReference.Name, + v1.LastAppliedConfigAnnotation: string(cr.Spec.ForProvider.Manifest.Raw), }) + if c.kindObserver != nil { + meta.AddAnnotations(obj, map[string]string{ + AnnotationKeyProviderConfigRef: cr.Spec.ProviderConfigReference.Name, + }) + } if err := c.client.Create(ctx, obj); err != nil { return managed.ExternalCreation{}, errors.Wrap(err, errCreateObject) @@ -322,9 +332,13 @@ func (c *external) Update(ctx context.Context, mg resource.Managed) (managed.Ext } meta.AddAnnotations(obj, map[string]string{ - v1.LastAppliedConfigAnnotation: string(cr.Spec.ForProvider.Manifest.Raw), - "kubernetes.crossplane.io/provider-config-ref": cr.Spec.ProviderConfigReference.Name, + v1.LastAppliedConfigAnnotation: string(cr.Spec.ForProvider.Manifest.Raw), }) + if c.kindObserver != nil { + meta.AddAnnotations(obj, map[string]string{ + AnnotationKeyProviderConfigRef: cr.Spec.ProviderConfigReference.Name, + }) + } if err := c.client.Apply(ctx, obj); err != nil { return managed.ExternalUpdate{}, errors.Wrap(CleanErr(err), errApplyObject) @@ -475,7 +489,7 @@ func (c *external) resolveReferencies(ctx context.Context, obj *v1alpha2.Object) c.logger.Debug("Resolving referencies.") // Loop through references to resolve each referenced resource - var gvks []schema.GroupVersionKind + gvks := make([]schema.GroupVersionKind, 0, len(obj.Spec.References)) for _, ref := range obj.Spec.References { if ref.DependsOn == nil && ref.PatchesFrom == nil { continue @@ -512,8 +526,8 @@ func (c *external) resolveReferencies(ctx context.Context, obj *v1alpha2.Object) if c.kindObserver != nil { // Referenced resources always live on the control plane (i.e. local cluster), - // so we don't pass an extra rest config or provider config with the - // watch call. + // so we don't pass an extra rest config (defaulting local rest config) + // or provider config with the watch call. c.kindObserver.WatchResources(nil, "", gvks...) } diff --git a/internal/controller/object/object_test.go b/internal/controller/object/object_test.go index 7e151bcd..814d685a 100644 --- a/internal/controller/object/object_test.go +++ b/internal/controller/object/object_test.go @@ -43,6 +43,7 @@ import ( "github.com/crossplane-contrib/provider-kubernetes/apis/object/v1alpha2" kubernetesv1alpha1 "github.com/crossplane-contrib/provider-kubernetes/apis/v1alpha1" + "github.com/crossplane-contrib/provider-kubernetes/internal/clients" ) const ( @@ -238,7 +239,7 @@ func Test_connector_Connect(t *testing.T) { type args struct { client client.Client - clientForProvider func(ctx context.Context, inclusterClient client.Client, providerConfigName string) (client.Client, error) + clientForProvider func(ctx context.Context, inclusterClient client.Client, providerConfigName string) (clients.ClusterClient, error) usage resource.Tracker mg resource.Managed } @@ -268,8 +269,12 @@ func Test_connector_Connect(t *testing.T) { }, "Success": { args: args{ - clientForProvider: func(ctx context.Context, inclusterClient client.Client, providerConfigName string) (client.Client, error) { - return &test.MockClient{}, nil + clientForProvider: func(ctx context.Context, inclusterClient client.Client, providerConfigName string) (clients.ClusterClient, error) { + return &clients.ApplicatorClientWithConfig{ + ClientApplicator: resource.ClientApplicator{ + Client: &test.MockClient{}, + }, + }, nil }, usage: resource.TrackerFn(func(ctx context.Context, mg resource.Managed) error { return nil }), mg: kubernetesObject(), @@ -280,7 +285,7 @@ func Test_connector_Connect(t *testing.T) { }, "ErrorGettingClientForProvider": { args: args{ - clientForProvider: func(ctx context.Context, inclusterClient client.Client, providerConfigName string) (client.Client, error) { + clientForProvider: func(ctx context.Context, inclusterClient client.Client, providerConfigName string) (clients.ClusterClient, error) { return nil, errBoom }, usage: resource.TrackerFn(func(ctx context.Context, mg resource.Managed) error { return nil }), @@ -309,7 +314,7 @@ func Test_connector_Connect(t *testing.T) { func Test_helmExternal_Observe(t *testing.T) { type args struct { - client resource.ClientApplicator + client clients.ClusterClient mg resource.Managed } type want struct { @@ -331,9 +336,11 @@ func Test_helmExternal_Observe(t *testing.T) { "NoKubernetesObjectExists": { args: args{ mg: kubernetesObject(), - client: resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(kerrors.NewNotFound(schema.GroupResource{}, "")), + client: &clients.ApplicatorClientWithConfig{ + ClientApplicator: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(kerrors.NewNotFound(schema.GroupResource{}, "")), + }, }, }, }, @@ -355,9 +362,11 @@ func Test_helmExternal_Observe(t *testing.T) { "FailedToGet": { args: args{ mg: kubernetesObject(), - client: resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(errBoom), + client: &clients.ApplicatorClientWithConfig{ + ClientApplicator: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(errBoom), + }, }, }, }, @@ -368,12 +377,14 @@ func Test_helmExternal_Observe(t *testing.T) { "NoLastAppliedAnnotation": { args: args{ mg: kubernetesObject(), - client: resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - *obj.(*unstructured.Unstructured) = *externalResource() - return nil - }), + client: &clients.ApplicatorClientWithConfig{ + ClientApplicator: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + *obj.(*unstructured.Unstructured) = *externalResource() + return nil + }), + }, }, }, }, @@ -385,15 +396,17 @@ func Test_helmExternal_Observe(t *testing.T) { "NotUpToDate": { args: args{ mg: kubernetesObject(), - client: resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - *obj.(*unstructured.Unstructured) = - *externalResourceWithLastAppliedConfigAnnotation( - `{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"crossplane-system", "labels": {"old-label":"gone"}}}`, - ) - return nil - }), + client: &clients.ApplicatorClientWithConfig{ + ClientApplicator: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + *obj.(*unstructured.Unstructured) = + *externalResourceWithLastAppliedConfigAnnotation( + `{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"crossplane-system", "labels": {"old-label":"gone"}}}`, + ) + return nil + }), + }, }, }, }, @@ -409,15 +422,17 @@ func Test_helmExternal_Observe(t *testing.T) { "apiVersion": "v1", "kind": "Namespace" }`) }), - client: resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - *obj.(*unstructured.Unstructured) = - *externalResourceWithLastAppliedConfigAnnotation( - `{"apiVersion":"v1","kind":"Namespace"}`, - ) - return nil - }), + client: &clients.ApplicatorClientWithConfig{ + ClientApplicator: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + *obj.(*unstructured.Unstructured) = + *externalResourceWithLastAppliedConfigAnnotation( + `{"apiVersion":"v1","kind":"Namespace"}`, + ) + return nil + }), + }, }, }, }, @@ -433,12 +448,14 @@ func Test_helmExternal_Observe(t *testing.T) { "UpToDate": { args: args{ mg: kubernetesObject(), - client: resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - *obj.(*unstructured.Unstructured) = *upToDateExternalResource() - return nil - }), + client: &clients.ApplicatorClientWithConfig{ + ClientApplicator: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + *obj.(*unstructured.Unstructured) = *upToDateExternalResource() + return nil + }), + }, }, }, }, @@ -457,12 +474,14 @@ func Test_helmExternal_Observe(t *testing.T) { obj.Spec.References = objectReferences() obj.Spec.References[0].PatchesFrom.FieldPath = ptr.To("nonexistent_field") }), - client: resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - *obj.(*unstructured.Unstructured) = *referenceObject() - return nil - }), + client: &clients.ApplicatorClientWithConfig{ + ClientApplicator: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + *obj.(*unstructured.Unstructured) = *referenceObject() + return nil + }), + }, }, }, }, @@ -477,9 +496,11 @@ func Test_helmExternal_Observe(t *testing.T) { mg: kubernetesObject(func(obj *v1alpha2.Object) { obj.Spec.References = objectReferences() }), - client: resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(errBoom), + client: &clients.ApplicatorClientWithConfig{ + ClientApplicator: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(errBoom), + }, }, }, }, @@ -495,18 +516,20 @@ func Test_helmExternal_Observe(t *testing.T) { obj.ObjectMeta.DeletionTimestamp = &metav1.Time{Time: time.Now()} obj.Spec.References = objectReferences() }), - client: resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { - if key.Name == testReferenceObjectName { - *obj.(*unstructured.Unstructured) = *referenceObject() - return nil - } else if key.Name == externalResourceName { - return kerrors.NewNotFound(schema.GroupResource{}, "") - } - return errBoom + client: &clients.ApplicatorClientWithConfig{ + ClientApplicator: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + if key.Name == testReferenceObjectName { + *obj.(*unstructured.Unstructured) = *referenceObject() + return nil + } else if key.Name == externalResourceName { + return kerrors.NewNotFound(schema.GroupResource{}, "") + } + return errBoom + }, + MockUpdate: test.NewMockUpdateFn(nil), }, - MockUpdate: test.NewMockUpdateFn(nil), }, }, }, @@ -520,19 +543,21 @@ func Test_helmExternal_Observe(t *testing.T) { mg: kubernetesObject(func(obj *v1alpha2.Object) { obj.Spec.References = objectReferences() }), - client: resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { - if key.Name == testReferenceObjectName { - *obj.(*unstructured.Unstructured) = *referenceObject() - return nil - } else if key.Name == externalResourceName { - *obj.(*unstructured.Unstructured) = *upToDateExternalResource() - return nil - } - return errBoom + client: &clients.ApplicatorClientWithConfig{ + ClientApplicator: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + if key.Name == testReferenceObjectName { + *obj.(*unstructured.Unstructured) = *referenceObject() + return nil + } else if key.Name == externalResourceName { + *obj.(*unstructured.Unstructured) = *upToDateExternalResource() + return nil + } + return errBoom + }, + MockUpdate: test.NewMockUpdateFn(nil), }, - MockUpdate: test.NewMockUpdateFn(nil), }, }, }, @@ -550,9 +575,11 @@ func Test_helmExternal_Observe(t *testing.T) { mg: kubernetesObject(func(obj *v1alpha2.Object) { obj.Spec.References = []v1alpha2.Reference{{}} }), - client: resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil), + client: &clients.ApplicatorClientWithConfig{ + ClientApplicator: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + }, }, }, }, @@ -578,22 +605,24 @@ func Test_helmExternal_Observe(t *testing.T) { }, } }), - client: resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { - switch key.Name { - case externalResourceName: - *obj.(*unstructured.Unstructured) = *upToDateExternalResource() - case testSecretName: - *obj.(*unstructured.Unstructured) = unstructured.Unstructured{ - Object: map[string]interface{}{ - "data": map[string]interface{}{ - "db-password": "MTIzNDU=", + client: &clients.ApplicatorClientWithConfig{ + ClientApplicator: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + switch key.Name { + case externalResourceName: + *obj.(*unstructured.Unstructured) = *upToDateExternalResource() + case testSecretName: + *obj.(*unstructured.Unstructured) = unstructured.Unstructured{ + Object: map[string]interface{}{ + "data": map[string]interface{}{ + "db-password": "MTIzNDU=", + }, }, - }, + } } - } - return nil + return nil + }, }, }, }, @@ -624,16 +653,18 @@ func Test_helmExternal_Observe(t *testing.T) { }, } }), - client: resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { - switch key.Name { - case externalResourceName: - *obj.(*unstructured.Unstructured) = *upToDateExternalResource() - case testSecretName: - return errBoom - } - return nil + client: &clients.ApplicatorClientWithConfig{ + ClientApplicator: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + switch key.Name { + case externalResourceName: + *obj.(*unstructured.Unstructured) = *upToDateExternalResource() + case testSecretName: + return errBoom + } + return nil + }, }, }, }, @@ -647,15 +678,17 @@ func Test_helmExternal_Observe(t *testing.T) { mg: kubernetesObject(func(obj *v1alpha2.Object) { obj.Spec.ManagementPolicies = xpv1.ManagementPolicies{xpv1.ManagementActionObserve} }), - client: resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - *obj.(*unstructured.Unstructured) = - *externalResourceWithLastAppliedConfigAnnotation( - `{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"crossplane-system", "labels": {"old-label":"gone"}}}`, - ) - return nil - }), + client: &clients.ApplicatorClientWithConfig{ + ClientApplicator: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + *obj.(*unstructured.Unstructured) = + *externalResourceWithLastAppliedConfigAnnotation( + `{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"crossplane-system", "labels": {"old-label":"gone"}}}`, + ) + return nil + }), + }, }, }, }, @@ -686,7 +719,7 @@ func Test_helmExternal_Observe(t *testing.T) { func Test_helmExternal_Create(t *testing.T) { type args struct { - client resource.ClientApplicator + client clients.ClusterClient mg resource.Managed } type want struct { @@ -718,9 +751,11 @@ func Test_helmExternal_Create(t *testing.T) { "FailedToCreate": { args: args{ mg: kubernetesObject(), - client: resource.ClientApplicator{ - Client: &test.MockClient{ - MockCreate: test.NewMockCreateFn(errBoom), + client: &clients.ApplicatorClientWithConfig{ + ClientApplicator: resource.ClientApplicator{ + Client: &test.MockClient{ + MockCreate: test.NewMockCreateFn(errBoom), + }, }, }, }, @@ -735,18 +770,20 @@ func Test_helmExternal_Create(t *testing.T) { "apiVersion": "v1", "kind": "Namespace" }`) }), - client: resource.ClientApplicator{ - Client: &test.MockClient{ - MockCreate: test.NewMockCreateFn(nil, func(obj client.Object) error { - _, ok := obj.GetAnnotations()[corev1.LastAppliedConfigAnnotation] - if !ok { - t.Errorf("Last applied annotation not set with create") - } - if obj.GetName() != testObjectName { - t.Errorf("Name should default to object name when not provider in manifest") - } - return nil - }), + client: &clients.ApplicatorClientWithConfig{ + ClientApplicator: resource.ClientApplicator{ + Client: &test.MockClient{ + MockCreate: test.NewMockCreateFn(nil, func(obj client.Object) error { + _, ok := obj.GetAnnotations()[corev1.LastAppliedConfigAnnotation] + if !ok { + t.Errorf("Last applied annotation not set with create") + } + if obj.GetName() != testObjectName { + t.Errorf("Name should default to object name when not provider in manifest") + } + return nil + }), + }, }, }, }, @@ -757,15 +794,17 @@ func Test_helmExternal_Create(t *testing.T) { "Success": { args: args{ mg: kubernetesObject(), - client: resource.ClientApplicator{ - Client: &test.MockClient{ - MockCreate: test.NewMockCreateFn(nil, func(obj client.Object) error { - _, ok := obj.GetAnnotations()[corev1.LastAppliedConfigAnnotation] - if !ok { - t.Errorf("Last applied annotation not set with create") - } - return nil - }), + client: &clients.ApplicatorClientWithConfig{ + ClientApplicator: resource.ClientApplicator{ + Client: &test.MockClient{ + MockCreate: test.NewMockCreateFn(nil, func(obj client.Object) error { + _, ok := obj.GetAnnotations()[corev1.LastAppliedConfigAnnotation] + if !ok { + t.Errorf("Last applied annotation not set with create") + } + return nil + }), + }, }, }, }, @@ -794,7 +833,7 @@ func Test_helmExternal_Create(t *testing.T) { func Test_helmExternal_Update(t *testing.T) { type args struct { - client resource.ClientApplicator + client clients.ClusterClient mg resource.Managed } type want struct { @@ -826,10 +865,12 @@ func Test_helmExternal_Update(t *testing.T) { "FailedToApply": { args: args{ mg: kubernetesObject(), - client: resource.ClientApplicator{ - Applicator: resource.ApplyFn(func(context.Context, client.Object, ...resource.ApplyOption) error { - return errBoom - }), + client: &clients.ApplicatorClientWithConfig{ + ClientApplicator: resource.ClientApplicator{ + Applicator: resource.ApplyFn(func(context.Context, client.Object, ...resource.ApplyOption) error { + return errBoom + }), + }, }, }, want: want{ @@ -843,13 +884,15 @@ func Test_helmExternal_Update(t *testing.T) { "apiVersion": "v1", "kind": "Namespace" }`) }), - client: resource.ClientApplicator{ - Applicator: resource.ApplyFn(func(ctx context.Context, obj client.Object, op ...resource.ApplyOption) error { - if obj.GetName() != testObjectName { - t.Errorf("Name should default to object name when not provider in manifest") - } - return nil - }), + client: &clients.ApplicatorClientWithConfig{ + ClientApplicator: resource.ClientApplicator{ + Applicator: resource.ApplyFn(func(ctx context.Context, obj client.Object, op ...resource.ApplyOption) error { + if obj.GetName() != testObjectName { + t.Errorf("Name should default to object name when not provider in manifest") + } + return nil + }), + }, }, }, want: want{ @@ -859,10 +902,12 @@ func Test_helmExternal_Update(t *testing.T) { "Success": { args: args{ mg: kubernetesObject(), - client: resource.ClientApplicator{ - Applicator: resource.ApplyFn(func(context.Context, client.Object, ...resource.ApplyOption) error { - return nil - }), + client: &clients.ApplicatorClientWithConfig{ + ClientApplicator: resource.ClientApplicator{ + Applicator: resource.ApplyFn(func(context.Context, client.Object, ...resource.ApplyOption) error { + return nil + }), + }, }, }, want: want{ @@ -890,7 +935,7 @@ func Test_helmExternal_Update(t *testing.T) { func Test_helmExternal_Delete(t *testing.T) { type args struct { - client resource.ClientApplicator + client clients.ClusterClient mg resource.Managed } type want struct { @@ -921,9 +966,11 @@ func Test_helmExternal_Delete(t *testing.T) { "FailedToDelete": { args: args{ mg: kubernetesObject(), - client: resource.ClientApplicator{ - Client: &test.MockClient{ - MockDelete: test.NewMockDeleteFn(errBoom), + client: &clients.ApplicatorClientWithConfig{ + ClientApplicator: resource.ClientApplicator{ + Client: &test.MockClient{ + MockDelete: test.NewMockDeleteFn(errBoom), + }, }, }, }, @@ -938,14 +985,16 @@ func Test_helmExternal_Delete(t *testing.T) { "apiVersion": "v1", "kind": "Namespace" }`) }), - client: resource.ClientApplicator{ - Client: &test.MockClient{ - MockDelete: test.NewMockDeleteFn(nil, func(obj client.Object) error { - if obj.GetName() != testObjectName { - t.Errorf("Name should default to object name when not provider in manifest") - } - return nil - }), + client: &clients.ApplicatorClientWithConfig{ + ClientApplicator: resource.ClientApplicator{ + Client: &test.MockClient{ + MockDelete: test.NewMockDeleteFn(nil, func(obj client.Object) error { + if obj.GetName() != testObjectName { + t.Errorf("Name should default to object name when not provider in manifest") + } + return nil + }), + }, }, }, }, @@ -956,9 +1005,11 @@ func Test_helmExternal_Delete(t *testing.T) { "Success": { args: args{ mg: kubernetesObject(), - client: resource.ClientApplicator{ - Client: &test.MockClient{ - MockDelete: test.NewMockDeleteFn(nil), + client: &clients.ApplicatorClientWithConfig{ + ClientApplicator: resource.ClientApplicator{ + Client: &test.MockClient{ + MockDelete: test.NewMockDeleteFn(nil), + }, }, }, }, diff --git a/internal/controller/observedobjectcollection/reconciler_test.go b/internal/controller/observedobjectcollection/reconciler_test.go index f7faaa30..ad984cde 100644 --- a/internal/controller/observedobjectcollection/reconciler_test.go +++ b/internal/controller/observedobjectcollection/reconciler_test.go @@ -38,10 +38,12 @@ import ( xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" "github.com/crossplane/crossplane-runtime/pkg/logging" + "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/test" "github.com/crossplane-contrib/provider-kubernetes/apis/object/v1alpha2" "github.com/crossplane-contrib/provider-kubernetes/apis/observedobjectcollection/v1alpha1" + "github.com/crossplane-contrib/provider-kubernetes/internal/clients" ) func TestReconciler(t *testing.T) { @@ -380,8 +382,12 @@ func TestReconciler(t *testing.T) { r := &Reconciler{ client: tc.args.client, log: logging.NewNopLogger(), - clientForProvider: func(ctx context.Context, inclusterClient client.Client, providerConfigName string) (client.Client, error) { - return tc.args.client, nil + clientForProvider: func(ctx context.Context, inclusterClient client.Client, providerConfigName string) (clients.ClusterClient, error) { + return &clients.ApplicatorClientWithConfig{ + ClientApplicator: resource.ClientApplicator{ + Client: tc.args.client, + }, + }, nil }, observedObjectName: func(collection client.Object, matchedObject client.Object) (string, error) { return fmt.Sprintf("%s-%s", collection.GetName(), matchedObject.GetName()), nil From d9740785723e3b8cfc946aade70aa3d1a3845999 Mon Sep 17 00:00:00 2001 From: Hasan Turken Date: Thu, 2 May 2024 18:16:45 +0300 Subject: [PATCH 08/19] Start an informer per provider config & gvk pair Signed-off-by: Hasan Turken --- internal/controller/object/indexes.go | 19 +++--------- internal/controller/object/informers.go | 41 ++++++++++++------------- internal/controller/object/object.go | 20 +++--------- 3 files changed, 28 insertions(+), 52 deletions(-) diff --git a/internal/controller/object/indexes.go b/internal/controller/object/indexes.go index d127f81c..2ed9d634 100644 --- a/internal/controller/object/indexes.go +++ b/internal/controller/object/indexes.go @@ -110,12 +110,11 @@ func refKey(providerConfig, ns, name, kind, apiVersion string) string { func enqueueObjectsForReferences(ca cache.Cache, log logging.Logger) func(ctx context.Context, ev runtimeevent.UpdateEvent, q workqueue.RateLimitingInterface) { return func(ctx context.Context, ev runtimeevent.UpdateEvent, q workqueue.RateLimitingInterface) { - config := getConfigName(ev.ObjectNew) - // "config" is the provider config name, which is the value for the - // provider config ref annotation. It will be empty for objects that - // are not controlled by the "Object" resource, i.e. referenced objects. + // "pc" is the provider pc name. It will be empty for referenced + // resources, as they are always on the control plane. + pc, _ := ctx.Value(keyProviderConfigName).(string) rGVK := ev.ObjectNew.GetObjectKind().GroupVersionKind() - key := refKey(config, ev.ObjectNew.GetNamespace(), ev.ObjectNew.GetName(), rGVK.Kind, rGVK.GroupVersion().String()) + key := refKey(pc, ev.ObjectNew.GetNamespace(), ev.ObjectNew.GetName(), rGVK.Kind, rGVK.GroupVersion().String()) objects := v1alpha2.ObjectList{} if err := ca.List(ctx, &objects, client.MatchingFields{resourceRefsIndex: key}); err != nil { @@ -124,16 +123,8 @@ func enqueueObjectsForReferences(ca cache.Cache, log logging.Logger) func(ctx co } // queue those Objects for reconciliation for _, o := range objects.Items { - log.Info("Enqueueing Object because referenced resource changed", "name", o.GetName(), "referencedGVK", rGVK.String(), "referencedName", ev.ObjectNew.GetName()) + log.Info("Enqueueing Object because referenced resource changed", "name", o.GetName(), "referencedGVK", rGVK.String(), "referencedName", ev.ObjectNew.GetName(), "providerConfig", pc) q.Add(reconcile.Request{NamespacedName: types.NamespacedName{Name: o.GetName()}}) } } } - -func getConfigName(o client.Object) string { - a := o.GetAnnotations() - if a == nil { - return "" - } - return a[AnnotationKeyProviderConfigRef] -} diff --git a/internal/controller/object/informers.go b/internal/controller/object/informers.go index bcca7fde..85fadac0 100644 --- a/internal/controller/object/informers.go +++ b/internal/controller/object/informers.go @@ -2,7 +2,6 @@ package object import ( "context" - "github.com/crossplane/crossplane-runtime/pkg/errors" "strings" "sync" @@ -18,6 +17,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/source" + "github.com/crossplane/crossplane-runtime/pkg/errors" "github.com/crossplane/crossplane-runtime/pkg/logging" "github.com/crossplane-contrib/provider-kubernetes/apis/object/v1alpha2" @@ -38,24 +38,22 @@ type resourceInformers struct { // resourceCaches holds the resource caches. These are dynamically started // and stopped based on the Objects that reference or managing them. - resourceCaches map[gvkWithHost]resourceCache + resourceCaches map[gvkWithConfig]resourceCache objectsCache cache.Cache - sink func(ev runtimeevent.UpdateEvent) + sink func(providerConfig string, ev runtimeevent.UpdateEvent) } -type gvkWithHost struct { - host string - gvk schema.GroupVersionKind +type gvkWithConfig struct { + // Which provider config was used to create this cache. We will use this + // information to figure out whether there are Objects relying on this cache + // left during garbage collection of caches. + providerConfig string + gvk schema.GroupVersionKind } type resourceCache struct { cache cache.Cache cancelFn context.CancelFunc - - // Which provider config was used to create this cache. We will use this - // information to figure out whether there are Objects relying on this cache - // left during garbage collection of caches. - providerConfig string } var _ source.Source = &resourceInformers{} @@ -69,13 +67,13 @@ func (i *resourceInformers) Start(ctx context.Context, h handler.EventHandler, q if i.sink != nil { return errors.New("source already started, cannot start it again") } - i.sink = func(ev runtimeevent.UpdateEvent) { + i.sink = func(providerConfig string, ev runtimeevent.UpdateEvent) { for _, p := range ps { if !p.Update(ev) { return } } - h.Update(ctx, ev, q) + h.Update(context.WithValue(ctx, keyProviderConfigName, providerConfig), ev, q) } go func() { @@ -108,11 +106,11 @@ func (i *resourceInformers) WatchResources(rc *rest.Config, providerConfig strin // start new informers for _, gvk := range gvks { - if _, found := i.resourceCaches[gvkWithHost{host: rc.Host, gvk: gvk}]; found { + if _, found := i.resourceCaches[gvkWithConfig{providerConfig: providerConfig, gvk: gvk}]; found { continue } - log := i.log.WithValues("host", rc.Host, "gvk", gvk.String()) + log := i.log.WithValues("providerConfig", providerConfig, "gvk", gvk.String()) ca, err := cache.New(rc, cache.Options{}) if err != nil { @@ -148,7 +146,8 @@ func (i *resourceInformers) WatchResources(rc *rest.Config, providerConfig strin ObjectOld: old, ObjectNew: obj, } - i.sink(ev) + + i.sink(providerConfig, ev) }, }); err != nil { cancelFn() @@ -163,11 +162,9 @@ func (i *resourceInformers) WatchResources(rc *rest.Config, providerConfig strin _ = ca.Start(ctx) }() - i.resourceCaches[gvkWithHost{host: rc.Host, gvk: gvk}] = resourceCache{ + i.resourceCaches[gvkWithConfig{providerConfig: providerConfig, gvk: gvk}] = resourceCache{ cache: ca, cancelFn: cancelFn, - - providerConfig: providerConfig, } // wait for in the background, and only when synced add to the routed cache @@ -189,8 +186,8 @@ func (i *resourceInformers) cleanupResourceInformers(ctx context.Context) { i.log.Debug("Running garbage collection for resource informers", "count", len(i.resourceCaches)) for gh, ca := range i.resourceCaches { list := v1alpha2.ObjectList{} - if err := i.objectsCache.List(ctx, &list, client.MatchingFields{resourceRefGVKsIndex: refKeyGKV(ca.providerConfig, gh.gvk.Kind, gh.gvk.Group, gh.gvk.Version)}); err != nil { - i.log.Debug("cannot list objects referencing a certain resource GVK", "error", err, "fieldSelector", resourceRefGVKsIndex+"="+refKeyGKV(ca.providerConfig, gh.gvk.Kind, gh.gvk.Group, gh.gvk.Version)) + if err := i.objectsCache.List(ctx, &list, client.MatchingFields{resourceRefGVKsIndex: refKeyGKV(gh.providerConfig, gh.gvk.Kind, gh.gvk.Group, gh.gvk.Version)}); err != nil { + i.log.Debug("cannot list objects referencing a certain resource GVK", "error", err, "fieldSelector", resourceRefGVKsIndex+"="+refKeyGKV(gh.providerConfig, gh.gvk.Kind, gh.gvk.Group, gh.gvk.Version)) } if len(list.Items) > 0 { @@ -198,7 +195,7 @@ func (i *resourceInformers) cleanupResourceInformers(ctx context.Context) { } ca.cancelFn() - i.log.Info("Stopped resource watch", "provider config", ca.providerConfig, "host", gh.host, "gvk", gh.gvk) + i.log.Info("Stopped resource watch", "provider config", gh.providerConfig, "gvk", gh.gvk) delete(i.resourceCaches, gh) } } diff --git a/internal/controller/object/object.go b/internal/controller/object/object.go index 690ddf9a..d8ce3f3a 100644 --- a/internal/controller/object/object.go +++ b/internal/controller/object/object.go @@ -59,12 +59,10 @@ import ( "github.com/crossplane-contrib/provider-kubernetes/internal/features" ) +type key int + const ( - // AnnotationKeyProviderConfigRef is the annotation key for the provider config - // reference on the managed kubernetes resource. We use this to trigger the - // reconciler for the Object with that provider config in case of watch - // events. Only used when alpha watches are enabled. - AnnotationKeyProviderConfigRef = "kubernetes.crossplane.io/provider-config-ref" + keyProviderConfigName key = iota ) const ( @@ -157,7 +155,7 @@ func Setup(mgr ctrl.Manager, o controller.Options, sanitizeSecrets bool, pollJit config: mgr.GetConfig(), objectsCache: ca, - resourceCaches: make(map[gvkWithHost]resourceCache), + resourceCaches: make(map[gvkWithConfig]resourceCache), } conn.kindObserver = &i @@ -305,11 +303,6 @@ func (c *external) Create(ctx context.Context, mg resource.Managed) (managed.Ext meta.AddAnnotations(obj, map[string]string{ v1.LastAppliedConfigAnnotation: string(cr.Spec.ForProvider.Manifest.Raw), }) - if c.kindObserver != nil { - meta.AddAnnotations(obj, map[string]string{ - AnnotationKeyProviderConfigRef: cr.Spec.ProviderConfigReference.Name, - }) - } if err := c.client.Create(ctx, obj); err != nil { return managed.ExternalCreation{}, errors.Wrap(err, errCreateObject) @@ -334,11 +327,6 @@ func (c *external) Update(ctx context.Context, mg resource.Managed) (managed.Ext meta.AddAnnotations(obj, map[string]string{ v1.LastAppliedConfigAnnotation: string(cr.Spec.ForProvider.Manifest.Raw), }) - if c.kindObserver != nil { - meta.AddAnnotations(obj, map[string]string{ - AnnotationKeyProviderConfigRef: cr.Spec.ProviderConfigReference.Name, - }) - } if err := c.client.Apply(ctx, obj); err != nil { return managed.ExternalUpdate{}, errors.Wrap(CleanErr(err), errApplyObject) From 8d2223781e9875125494cc0e8ea2116e4a6b95d8 Mon Sep 17 00:00:00 2001 From: Hasan Turken Date: Thu, 2 May 2024 23:47:29 +0300 Subject: [PATCH 09/19] Start watching even if it does not exist (yet) Signed-off-by: Hasan Turken --- internal/controller/object/object.go | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/internal/controller/object/object.go b/internal/controller/object/object.go index d8ce3f3a..8f34bab5 100644 --- a/internal/controller/object/object.go +++ b/internal/controller/object/object.go @@ -254,6 +254,10 @@ func (c *external) Observe(ctx context.Context, mg resource.Managed) (managed.Ex if err != nil { return managed.ExternalObservation{}, err } + + if c.kindObserver != nil { + c.kindObserver.WatchResources(c.client.GetConfig(), cr.Spec.ProviderConfigReference.Name, desired.GroupVersionKind()) + } observed := desired.DeepCopy() @@ -270,12 +274,6 @@ func (c *external) Observe(ctx context.Context, mg resource.Managed) (managed.Ex return managed.ExternalObservation{}, errors.Wrap(err, errGetObject) } - // We know the resource exists, so we can start watching it for realtime - // events if we have the kindObserver (i.e. watches enabled). - if c.kindObserver != nil { - c.kindObserver.WatchResources(c.client.GetConfig(), cr.Spec.ProviderConfigReference.Name, observed.GroupVersionKind()) - } - if err = c.setObserved(cr, observed); err != nil { return managed.ExternalObservation{}, err } From 3f2e36e36f442d30d41bb69aa8c0e28730296115 Mon Sep 17 00:00:00 2001 From: Hasan Turken Date: Fri, 3 May 2024 08:54:14 +0300 Subject: [PATCH 10/19] Catch add or delete events as well Signed-off-by: Hasan Turken --- internal/controller/object/indexes.go | 10 ++++---- internal/controller/object/informers.go | 33 +++++++++++++++++++------ internal/controller/object/object.go | 4 +-- 3 files changed, 33 insertions(+), 14 deletions(-) diff --git a/internal/controller/object/indexes.go b/internal/controller/object/indexes.go index 2ed9d634..2bddca7f 100644 --- a/internal/controller/object/indexes.go +++ b/internal/controller/object/indexes.go @@ -108,13 +108,13 @@ func refKey(providerConfig, ns, name, kind, apiVersion string) string { return fmt.Sprintf("%s.%s.%s.%s.%s", providerConfig, name, ns, kind, apiVersion) } -func enqueueObjectsForReferences(ca cache.Cache, log logging.Logger) func(ctx context.Context, ev runtimeevent.UpdateEvent, q workqueue.RateLimitingInterface) { - return func(ctx context.Context, ev runtimeevent.UpdateEvent, q workqueue.RateLimitingInterface) { +func enqueueObjectsForReferences(ca cache.Cache, log logging.Logger) func(ctx context.Context, ev runtimeevent.GenericEvent, q workqueue.RateLimitingInterface) { + return func(ctx context.Context, ev runtimeevent.GenericEvent, q workqueue.RateLimitingInterface) { // "pc" is the provider pc name. It will be empty for referenced // resources, as they are always on the control plane. pc, _ := ctx.Value(keyProviderConfigName).(string) - rGVK := ev.ObjectNew.GetObjectKind().GroupVersionKind() - key := refKey(pc, ev.ObjectNew.GetNamespace(), ev.ObjectNew.GetName(), rGVK.Kind, rGVK.GroupVersion().String()) + rGVK := ev.Object.GetObjectKind().GroupVersionKind() + key := refKey(pc, ev.Object.GetNamespace(), ev.Object.GetName(), rGVK.Kind, rGVK.GroupVersion().String()) objects := v1alpha2.ObjectList{} if err := ca.List(ctx, &objects, client.MatchingFields{resourceRefsIndex: key}); err != nil { @@ -123,7 +123,7 @@ func enqueueObjectsForReferences(ca cache.Cache, log logging.Logger) func(ctx co } // queue those Objects for reconciliation for _, o := range objects.Items { - log.Info("Enqueueing Object because referenced resource changed", "name", o.GetName(), "referencedGVK", rGVK.String(), "referencedName", ev.ObjectNew.GetName(), "providerConfig", pc) + log.Info("Enqueueing Object because referenced resource changed", "name", o.GetName(), "referencedGVK", rGVK.String(), "referencedName", ev.Object.GetName(), "providerConfig", pc) q.Add(reconcile.Request{NamespacedName: types.NamespacedName{Name: o.GetName()}}) } } diff --git a/internal/controller/object/informers.go b/internal/controller/object/informers.go index 85fadac0..26367fcf 100644 --- a/internal/controller/object/informers.go +++ b/internal/controller/object/informers.go @@ -40,7 +40,7 @@ type resourceInformers struct { // and stopped based on the Objects that reference or managing them. resourceCaches map[gvkWithConfig]resourceCache objectsCache cache.Cache - sink func(providerConfig string, ev runtimeevent.UpdateEvent) + sink func(providerConfig string, ev runtimeevent.GenericEvent) } type gvkWithConfig struct { @@ -67,13 +67,13 @@ func (i *resourceInformers) Start(ctx context.Context, h handler.EventHandler, q if i.sink != nil { return errors.New("source already started, cannot start it again") } - i.sink = func(providerConfig string, ev runtimeevent.UpdateEvent) { + i.sink = func(providerConfig string, ev runtimeevent.GenericEvent) { for _, p := range ps { - if !p.Update(ev) { + if !p.Generic(ev) { return } } - h.Update(context.WithValue(ctx, keyProviderConfigName, providerConfig), ev, q) + h.Generic(context.WithValue(ctx, keyProviderConfigName, providerConfig), ev, q) } go func() { @@ -132,6 +132,16 @@ func (i *resourceInformers) WatchResources(rc *rest.Config, providerConfig strin } if _, err := inf.AddEventHandler(kcache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + i.lock.RLock() + defer i.lock.RUnlock() + + ev := runtimeevent.GenericEvent{ + Object: obj.(client.Object), + } + + i.sink(providerConfig, ev) + }, UpdateFunc: func(oldObj, newObj interface{}) { old := oldObj.(client.Object) //nolint:forcetypeassert // Will always be client.Object. obj := newObj.(client.Object) //nolint:forcetypeassert // Will always be client.Object. @@ -142,9 +152,18 @@ func (i *resourceInformers) WatchResources(rc *rest.Config, providerConfig strin i.lock.RLock() defer i.lock.RUnlock() - ev := runtimeevent.UpdateEvent{ - ObjectOld: old, - ObjectNew: obj, + ev := runtimeevent.GenericEvent{ + Object: obj, + } + + i.sink(providerConfig, ev) + }, + DeleteFunc: func(obj interface{}) { + i.lock.RLock() + defer i.lock.RUnlock() + + ev := runtimeevent.GenericEvent{ + Object: obj.(client.Object), } i.sink(providerConfig, ev) diff --git a/internal/controller/object/object.go b/internal/controller/object/object.go index 8f34bab5..ede6c134 100644 --- a/internal/controller/object/object.go +++ b/internal/controller/object/object.go @@ -168,7 +168,7 @@ func Setup(mgr ctrl.Manager, o controller.Options, sanitizeSecrets bool, pollJit } cb = cb.WatchesRawSource(&i, handler.Funcs{ - UpdateFunc: func(ctx context.Context, ev runtimeevent.UpdateEvent, q workqueue.RateLimitingInterface) { + GenericFunc: func(ctx context.Context, ev runtimeevent.GenericEvent, q workqueue.RateLimitingInterface) { enqueueObjectsForReferences(ca, l)(ctx, ev, q) }, }) @@ -254,7 +254,7 @@ func (c *external) Observe(ctx context.Context, mg resource.Managed) (managed.Ex if err != nil { return managed.ExternalObservation{}, err } - + if c.kindObserver != nil { c.kindObserver.WatchResources(c.client.GetConfig(), cr.Spec.ProviderConfigReference.Name, desired.GroupVersionKind()) } From 227a30c60477742f4ed17d0e814d9bc05737277d Mon Sep 17 00:00:00 2001 From: Hasan Turken Date: Fri, 3 May 2024 12:18:22 +0300 Subject: [PATCH 11/19] Stop watching on Object deletion Signed-off-by: Hasan Turken --- internal/controller/object/informers.go | 85 ++++++++++++++++--------- internal/controller/object/object.go | 28 +++++++- 2 files changed, 81 insertions(+), 32 deletions(-) diff --git a/internal/controller/object/informers.go b/internal/controller/object/informers.go index 26367fcf..ce93064b 100644 --- a/internal/controller/object/informers.go +++ b/internal/controller/object/informers.go @@ -31,16 +31,15 @@ import ( // resources, and inform resourceInformers about them via the // WatchReferencedResources method. type resourceInformers struct { - log logging.Logger - config *rest.Config + log logging.Logger + config *rest.Config + objectsCache cache.Cache + sink func(providerConfig string, ev runtimeevent.GenericEvent) lock sync.RWMutex // everything below is protected by this lock - // resourceCaches holds the resource caches. These are dynamically started // and stopped based on the Objects that reference or managing them. resourceCaches map[gvkWithConfig]resourceCache - objectsCache cache.Cache - sink func(providerConfig string, ev runtimeevent.GenericEvent) } type gvkWithConfig struct { @@ -62,8 +61,6 @@ var _ source.Source = &resourceInformers{} // source with h as the sink of update events. It keeps sending events until // ctx is done. func (i *resourceInformers) Start(ctx context.Context, h handler.EventHandler, q workqueue.RateLimitingInterface, ps ...predicate.Predicate) error { - i.lock.Lock() - defer i.lock.Unlock() if i.sink != nil { return errors.New("source already started, cannot start it again") } @@ -78,9 +75,6 @@ func (i *resourceInformers) Start(ctx context.Context, h handler.EventHandler, q go func() { <-ctx.Done() - - i.lock.Lock() - defer i.lock.Unlock() i.sink = nil }() @@ -93,20 +87,20 @@ func (i *resourceInformers) Start(ctx context.Context, h handler.EventHandler, q // every reconcile to make resourceInformers aware of the referenced or managed // resources of the given Object. // -// Note that this complements cleanupResourceInformers which regularly +// Note that this complements garbageCollectResourceInformers which regularly // garbage collects resource informers that are no longer referenced by // any Object. func (i *resourceInformers) WatchResources(rc *rest.Config, providerConfig string, gvks ...schema.GroupVersionKind) { - i.lock.RLock() - defer i.lock.RUnlock() - if rc == nil { rc = i.config } // start new informers for _, gvk := range gvks { - if _, found := i.resourceCaches[gvkWithConfig{providerConfig: providerConfig, gvk: gvk}]; found { + i.lock.RLock() + _, found := i.resourceCaches[gvkWithConfig{providerConfig: providerConfig, gvk: gvk}] + i.lock.RUnlock() + if found { continue } @@ -133,9 +127,6 @@ func (i *resourceInformers) WatchResources(rc *rest.Config, providerConfig strin if _, err := inf.AddEventHandler(kcache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { - i.lock.RLock() - defer i.lock.RUnlock() - ev := runtimeevent.GenericEvent{ Object: obj.(client.Object), } @@ -149,9 +140,6 @@ func (i *resourceInformers) WatchResources(rc *rest.Config, providerConfig strin return } - i.lock.RLock() - defer i.lock.RUnlock() - ev := runtimeevent.GenericEvent{ Object: obj, } @@ -159,9 +147,6 @@ func (i *resourceInformers) WatchResources(rc *rest.Config, providerConfig strin i.sink(providerConfig, ev) }, DeleteFunc: func(obj interface{}) { - i.lock.RLock() - defer i.lock.RUnlock() - ev := runtimeevent.GenericEvent{ Object: obj.(client.Object), } @@ -181,10 +166,12 @@ func (i *resourceInformers) WatchResources(rc *rest.Config, providerConfig strin _ = ca.Start(ctx) }() + i.lock.Lock() i.resourceCaches[gvkWithConfig{providerConfig: providerConfig, gvk: gvk}] = resourceCache{ cache: ca, cancelFn: cancelFn, } + i.lock.Unlock() // wait for in the background, and only when synced add to the routed cache go func() { @@ -195,12 +182,50 @@ func (i *resourceInformers) WatchResources(rc *rest.Config, providerConfig strin } } -// cleanupResourceInformers garbage collects resource informers that are no -// longer referenced by any Object. -// -// Note that this complements WatchResources which starts informers for -// the resources referenced or managed by an Object. -func (i *resourceInformers) cleanupResourceInformers(ctx context.Context) { +func (i *resourceInformers) StopWatchingResources(ctx context.Context, providerConfig string, gvks ...schema.GroupVersionKind) { + i.lock.Lock() + defer i.lock.Unlock() + + for _, gvk := range gvks { + ca, found := i.resourceCaches[gvkWithConfig{providerConfig: providerConfig, gvk: gvk}] + if !found { + continue + } + // Check if there are any other objects referencing this resource GVK. + list := v1alpha2.ObjectList{} + if err := i.objectsCache.List(ctx, &list, client.MatchingFields{resourceRefGVKsIndex: refKeyGKV(providerConfig, gvk.Kind, gvk.Group, gvk.Version)}); err != nil { + i.log.Debug("cannot list objects referencing a certain resource GVK", "error", err, "fieldSelector", resourceRefGVKsIndex+"="+refKeyGKV(providerConfig, gvk.Kind, gvk.Group, gvk.Version)) + } + + inUse := false + for _, o := range list.Items { + // We only care about objects that are not being deleted. Otherwise, + // we are getting into deadlocks while stopping the watches during + // deletion. + if o.GetDeletionTimestamp().IsZero() { + inUse = true + break + } + } + if inUse { + continue + } + + ca.cancelFn() + i.log.Info("Stopped resource watch", "provider config", providerConfig, "gvk", gvk) + delete(i.resourceCaches, gvkWithConfig{providerConfig: providerConfig, gvk: gvk}) + } +} + +// garbageCollectResourceInformers garbage collects resource informers that are +// no longer referenced by any Object. Ideally, all resource informers should +// stopped/cleaned up when the Object is deleted. However, in practice, this +// is not always the case. This method is a safety net to clean up resource +// informers that are no longer referenced by any Object. +func (i *resourceInformers) garbageCollectResourceInformers(ctx context.Context) { + i.lock.Lock() + defer i.lock.Unlock() + // stop old informers i.log.Debug("Running garbage collection for resource informers", "count", len(i.resourceCaches)) for gh, ca := range i.resourceCaches { diff --git a/internal/controller/object/object.go b/internal/controller/object/object.go index ede6c134..90422bdf 100644 --- a/internal/controller/object/object.go +++ b/internal/controller/object/object.go @@ -102,6 +102,10 @@ type KindObserver interface { // WatchResources starts a watch of the given kinds to trigger reconciles // when a referenced or managed objects of those kinds changes. WatchResources(rc *rest.Config, providerConfig string, gvks ...schema.GroupVersionKind) + + // UnwatchResources stops watching the given kinds if they are no longer + // referenced or managed by any other Object that is not being deleted. + StopWatchingResources(ctx context.Context, providerConfig string, gvks ...schema.GroupVersionKind) } // Setup adds a controller that reconciles Object managed resources. @@ -160,8 +164,7 @@ func Setup(mgr ctrl.Manager, o controller.Options, sanitizeSecrets bool, pollJit conn.kindObserver = &i if err := mgr.Add(manager.RunnableFunc(func(ctx context.Context) error { - // Run every 5 minutes. - wait.UntilWithContext(ctx, i.cleanupResourceInformers, 5*time.Minute) + wait.UntilWithContext(ctx, i.garbageCollectResourceInformers, time.Minute) return nil })); err != nil { return errors.Wrap(err, "cannot add cleanup referenced resource informers runnable") @@ -346,6 +349,27 @@ func (c *external) Delete(ctx context.Context, mg resource.Managed) error { return err } + if c.kindObserver != nil { + c.kindObserver.StopWatchingResources(ctx, cr.Spec.ProviderConfigReference.Name, obj.GroupVersionKind()) + if len(cr.Spec.References) > 0 { + gvks := make([]schema.GroupVersionKind, 0, len(cr.Spec.References)) + for _, ref := range cr.Spec.References { + if ref.DependsOn == nil && ref.PatchesFrom == nil { + continue + } + + refAPIVersion, refKind, _, _ := getReferenceInfo(ref) + g, v := parseAPIVersion(refAPIVersion) + gvks = append(gvks, schema.GroupVersionKind{ + Group: g, + Version: v, + Kind: refKind, + }) + } + c.kindObserver.StopWatchingResources(ctx, "", gvks...) + } + } + return errors.Wrap(resource.IgnoreNotFound(c.client.Delete(ctx, obj)), errDeleteObject) } From f6b36b3340eb5ab164dfd889920ead5786660aa4 Mon Sep 17 00:00:00 2001 From: Hasan Turken Date: Fri, 3 May 2024 14:55:17 +0300 Subject: [PATCH 12/19] Only watch the resource if desired Signed-off-by: Hasan Turken --- apis/object/v1alpha1/types.go | 8 ++++++++ apis/object/v1alpha1/zz_generated.deepcopy.go | 5 +++++ apis/object/v1alpha2/types.go | 8 ++++++++ apis/object/v1alpha2/zz_generated.deepcopy.go | 5 +++++ examples/object/object.yaml | 4 ++++ .../references/patches-from-resource.yaml | 4 ++++ internal/controller/object/object.go | 11 +++++++--- .../kubernetes.crossplane.io_objects.yaml | 20 +++++++++++++++++++ 8 files changed, 62 insertions(+), 3 deletions(-) diff --git a/apis/object/v1alpha1/types.go b/apis/object/v1alpha1/types.go index d44eca82..76f65f55 100644 --- a/apis/object/v1alpha1/types.go +++ b/apis/object/v1alpha1/types.go @@ -124,6 +124,14 @@ type ObjectSpec struct { ManagementPolicy `json:"managementPolicy,omitempty"` References []Reference `json:"references,omitempty"` Readiness Readiness `json:"readiness,omitempty"` + // Watch enables watching the referenced or managed kubernetes resources. + // + // THIS IS AN ALPHA FIELD. Do not use it in production. It is not honored + // unless the relevant Crossplane feature flag is enabled, and may be + // changed or removed without notice. + // +optional + // +kubebuilder:default=false + Watch *bool `json:"watch,omitempty"` } // ReadinessPolicy defines how the Object's readiness condition should be computed. diff --git a/apis/object/v1alpha1/zz_generated.deepcopy.go b/apis/object/v1alpha1/zz_generated.deepcopy.go index aad34718..b0968cc2 100644 --- a/apis/object/v1alpha1/zz_generated.deepcopy.go +++ b/apis/object/v1alpha1/zz_generated.deepcopy.go @@ -165,6 +165,11 @@ func (in *ObjectSpec) DeepCopyInto(out *ObjectSpec) { } } out.Readiness = in.Readiness + if in.Watch != nil { + in, out := &in.Watch, &out.Watch + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectSpec. diff --git a/apis/object/v1alpha2/types.go b/apis/object/v1alpha2/types.go index 75210ed8..fb8fb136 100644 --- a/apis/object/v1alpha2/types.go +++ b/apis/object/v1alpha2/types.go @@ -97,6 +97,14 @@ type ObjectSpec struct { ForProvider ObjectParameters `json:"forProvider"` References []Reference `json:"references,omitempty"` Readiness Readiness `json:"readiness,omitempty"` + // Watch enables watching the referenced or managed kubernetes resources. + // + // THIS IS AN ALPHA FIELD. Do not use it in production. It is not honored + // unless the relevant Crossplane feature flag is enabled, and may be + // changed or removed without notice. + // +optional + // +kubebuilder:default=false + Watch *bool `json:"watch,omitempty"` } // ReadinessPolicy defines how the Object's readiness condition should be computed. diff --git a/apis/object/v1alpha2/zz_generated.deepcopy.go b/apis/object/v1alpha2/zz_generated.deepcopy.go index c0d78570..ef2e20e8 100644 --- a/apis/object/v1alpha2/zz_generated.deepcopy.go +++ b/apis/object/v1alpha2/zz_generated.deepcopy.go @@ -164,6 +164,11 @@ func (in *ObjectSpec) DeepCopyInto(out *ObjectSpec) { } } out.Readiness = in.Readiness + if in.Watch != nil { + in, out := &in.Watch, &out.Watch + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectSpec. diff --git a/examples/object/object.yaml b/examples/object/object.yaml index 255e5b7b..b028dd23 100644 --- a/examples/object/object.yaml +++ b/examples/object/object.yaml @@ -3,6 +3,10 @@ kind: Object metadata: name: sample-namespace spec: + # Watch for changes to the Namespace object. + # Watching resources is an alpha feature and needs to be enabled with --enable-watches + # in the provider to get this configuration working. + # watch: true forProvider: manifest: apiVersion: v1 diff --git a/examples/object/references/patches-from-resource.yaml b/examples/object/references/patches-from-resource.yaml index 843aaf3d..3f06366b 100644 --- a/examples/object/references/patches-from-resource.yaml +++ b/examples/object/references/patches-from-resource.yaml @@ -4,6 +4,10 @@ kind: Object metadata: name: foo spec: + # Watch for changes to the Namespace object. + # Watching resources is an alpha feature and needs to be enabled with --enable-watches + # in the provider to get this configuration working. + # watch: true references: # Use patchesFrom to patch field from other k8s resource to this object - patchesFrom: diff --git a/internal/controller/object/object.go b/internal/controller/object/object.go index 90422bdf..00b6b8e0 100644 --- a/internal/controller/object/object.go +++ b/internal/controller/object/object.go @@ -36,6 +36,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/rest" "k8s.io/client-go/util/workqueue" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" runtimeevent "sigs.k8s.io/controller-runtime/pkg/event" @@ -258,7 +259,7 @@ func (c *external) Observe(ctx context.Context, mg resource.Managed) (managed.Ex return managed.ExternalObservation{}, err } - if c.kindObserver != nil { + if c.shouldWatch(cr) { c.kindObserver.WatchResources(c.client.GetConfig(), cr.Spec.ProviderConfigReference.Name, desired.GroupVersionKind()) } @@ -349,7 +350,7 @@ func (c *external) Delete(ctx context.Context, mg resource.Managed) error { return err } - if c.kindObserver != nil { + if c.shouldWatch(cr) { c.kindObserver.StopWatchingResources(ctx, cr.Spec.ProviderConfigReference.Name, obj.GroupVersionKind()) if len(cr.Spec.References) > 0 { gvks := make([]schema.GroupVersionKind, 0, len(cr.Spec.References)) @@ -534,7 +535,7 @@ func (c *external) resolveReferencies(ctx context.Context, obj *v1alpha2.Object) }) } - if c.kindObserver != nil { + if c.shouldWatch(obj) { // Referenced resources always live on the control plane (i.e. local cluster), // so we don't pass an extra rest config (defaulting local rest config) // or provider config with the watch call. @@ -715,6 +716,10 @@ func connectionDetails(ctx context.Context, kube client.Client, connDetails []v1 return mcd, nil } +func (c *external) shouldWatch(cr *v1alpha2.Object) bool { + return c.kindObserver != nil && ptr.Deref(cr.Spec.Watch, false) +} + func unstructuredFromObjectRef(r v1.ObjectReference) unstructured.Unstructured { u := unstructured.Unstructured{} u.SetAPIVersion(r.APIVersion) diff --git a/package/crds/kubernetes.crossplane.io_objects.yaml b/package/crds/kubernetes.crossplane.io_objects.yaml index 4a652679..f3aded13 100644 --- a/package/crds/kubernetes.crossplane.io_objects.yaml +++ b/package/crds/kubernetes.crossplane.io_objects.yaml @@ -400,6 +400,16 @@ spec: type: string type: object type: array + watch: + default: false + description: |- + Watch enables watching the referenced or managed kubernetes resources. + + + THIS IS AN ALPHA FIELD. Do not use it in production. It is not honored + unless the relevant Crossplane feature flag is enabled, and may be + changed or removed without notice. + type: boolean writeConnectionSecretToRef: description: |- WriteConnectionSecretToReference specifies the namespace and name of a @@ -834,6 +844,16 @@ spec: type: string type: object type: array + watch: + default: false + description: |- + Watch enables watching the referenced or managed kubernetes resources. + + + THIS IS AN ALPHA FIELD. Do not use it in production. It is not honored + unless the relevant Crossplane feature flag is enabled, and may be + changed or removed without notice. + type: boolean writeConnectionSecretToRef: description: |- WriteConnectionSecretToReference specifies the namespace and name of a From bf010873171dd1fefcec48bccf2f25dcd460cc04 Mon Sep 17 00:00:00 2001 From: Hasan Turken Date: Sat, 4 May 2024 09:45:27 +0300 Subject: [PATCH 13/19] Better naming for index functions Signed-off-by: Hasan Turken --- internal/controller/object/indexes.go | 40 ++++++++++++------------- internal/controller/object/informers.go | 24 ++++++++++++--- internal/controller/object/object.go | 4 +-- internal/features/features.go | 23 +++++++------- 4 files changed, 55 insertions(+), 36 deletions(-) diff --git a/internal/controller/object/indexes.go b/internal/controller/object/indexes.go index 2bddca7f..25f28283 100644 --- a/internal/controller/object/indexes.go +++ b/internal/controller/object/indexes.go @@ -1,5 +1,5 @@ /* -Copyright 2023 The Crossplane Authors. +Copyright 2024 The Crossplane Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -42,13 +42,14 @@ const ( ) var ( - _ client.IndexerFunc = IndexReferencedResourceRefGVKs - _ client.IndexerFunc = IndexReferencesResourcesRefs + _ client.IndexerFunc = IndexByProviderGVK + _ client.IndexerFunc = IndexByProviderNamespacedNameGVK ) -// IndexReferencedResourceRefGVKs assumes the passed object is an Object. It -// returns gvk keys for every resource referenced or managed by the Object. -func IndexReferencedResourceRefGVKs(o client.Object) []string { +// IndexByProviderGVK assumes the passed object is an Object. It returns keys +// with "ProviderConfig + GVK" for every resource referenced or managed by the +// Object. +func IndexByProviderGVK(o client.Object) []string { obj, ok := o.(*v1alpha2.Object) if !ok { return nil // should never happen @@ -60,27 +61,28 @@ func IndexReferencedResourceRefGVKs(o client.Object) []string { for _, ref := range refs { refAPIVersion, refKind, _, _ := getReferenceInfo(ref) group, version := parseAPIVersion(refAPIVersion) - // References are always on control plane, so we don't pass the config name. - keys = append(keys, refKeyGKV("", refKind, group, version)) + providerConfig := "" // references are always local (i.e. on the control plane), which we represent as an empty provider config. + keys = append(keys, refKeyProviderGVK(providerConfig, refKind, group, version)) } // Index the desired object. // We don't expect errors here, as the getDesired function is already called // in the reconciler and the desired object already validated. d, _ := getDesired(obj) - keys = append(keys, refKeyGKV(obj.Spec.ProviderConfigReference.Name, d.GetKind(), d.GroupVersionKind().Group, d.GroupVersionKind().Version)) // unification is done by the informer. + keys = append(keys, refKeyProviderGVK(obj.Spec.ProviderConfigReference.Name, d.GetKind(), d.GroupVersionKind().Group, d.GroupVersionKind().Version)) // unification is done by the informer. // unification is done by the informer. return keys } -func refKeyGKV(providerConfig, kind, group, version string) string { +func refKeyProviderGVK(providerConfig, kind, group, version string) string { return fmt.Sprintf("%s.%s.%s.%s", providerConfig, kind, group, version) } -// IndexReferencesResourcesRefs assumes the passed object is an Object. It -// returns keys for every resource referenced or managed by the Object. -func IndexReferencesResourcesRefs(o client.Object) []string { +// IndexByProviderNamespacedNameGVK assumes the passed object is an Object. It +// returns keys with "ProviderConfig + NamespacedName + GVK" for every resource +// referenced or managed by the Object. +func IndexByProviderNamespacedNameGVK(o client.Object) []string { obj, ok := o.(*v1alpha2.Object) if !ok { return nil // should never happen @@ -91,30 +93,28 @@ func IndexReferencesResourcesRefs(o client.Object) []string { keys := make([]string, 0, len(refs)) for _, ref := range refs { refAPIVersion, refKind, refNamespace, refName := getReferenceInfo(ref) - // References are always on control plane, so we don't pass the provider config name. - keys = append(keys, refKey("", refNamespace, refName, refKind, refAPIVersion)) + providerConfig := "" // references are always local (i.e. on the control plane), which we represent as an empty provider config. + keys = append(keys, refKeyProviderNamespacedNameGVK(providerConfig, refNamespace, refName, refKind, refAPIVersion)) } // Index the desired object. // We don't expect errors here, as the getDesired function is already called // in the reconciler and the desired object already validated. d, _ := getDesired(obj) - keys = append(keys, refKey(obj.Spec.ProviderConfigReference.Name, d.GetNamespace(), d.GetName(), d.GetKind(), d.GetAPIVersion())) // unification is done by the informer. + keys = append(keys, refKeyProviderNamespacedNameGVK(obj.Spec.ProviderConfigReference.Name, d.GetNamespace(), d.GetName(), d.GetKind(), d.GetAPIVersion())) // unification is done by the informer. return keys } -func refKey(providerConfig, ns, name, kind, apiVersion string) string { +func refKeyProviderNamespacedNameGVK(providerConfig, ns, name, kind, apiVersion string) string { return fmt.Sprintf("%s.%s.%s.%s.%s", providerConfig, name, ns, kind, apiVersion) } func enqueueObjectsForReferences(ca cache.Cache, log logging.Logger) func(ctx context.Context, ev runtimeevent.GenericEvent, q workqueue.RateLimitingInterface) { return func(ctx context.Context, ev runtimeevent.GenericEvent, q workqueue.RateLimitingInterface) { - // "pc" is the provider pc name. It will be empty for referenced - // resources, as they are always on the control plane. pc, _ := ctx.Value(keyProviderConfigName).(string) rGVK := ev.Object.GetObjectKind().GroupVersionKind() - key := refKey(pc, ev.Object.GetNamespace(), ev.Object.GetName(), rGVK.Kind, rGVK.GroupVersion().String()) + key := refKeyProviderNamespacedNameGVK(pc, ev.Object.GetNamespace(), ev.Object.GetName(), rGVK.Kind, rGVK.GroupVersion().String()) objects := v1alpha2.ObjectList{} if err := ca.List(ctx, &objects, client.MatchingFields{resourceRefsIndex: key}); err != nil { diff --git a/internal/controller/object/informers.go b/internal/controller/object/informers.go index ce93064b..af3a76c7 100644 --- a/internal/controller/object/informers.go +++ b/internal/controller/object/informers.go @@ -1,3 +1,19 @@ +/* +Copyright 2024 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package object import ( @@ -193,8 +209,8 @@ func (i *resourceInformers) StopWatchingResources(ctx context.Context, providerC } // Check if there are any other objects referencing this resource GVK. list := v1alpha2.ObjectList{} - if err := i.objectsCache.List(ctx, &list, client.MatchingFields{resourceRefGVKsIndex: refKeyGKV(providerConfig, gvk.Kind, gvk.Group, gvk.Version)}); err != nil { - i.log.Debug("cannot list objects referencing a certain resource GVK", "error", err, "fieldSelector", resourceRefGVKsIndex+"="+refKeyGKV(providerConfig, gvk.Kind, gvk.Group, gvk.Version)) + if err := i.objectsCache.List(ctx, &list, client.MatchingFields{resourceRefGVKsIndex: refKeyProviderGVK(providerConfig, gvk.Kind, gvk.Group, gvk.Version)}); err != nil { + i.log.Debug("cannot list objects referencing a certain resource GVK", "error", err, "fieldSelector", resourceRefGVKsIndex+"="+refKeyProviderGVK(providerConfig, gvk.Kind, gvk.Group, gvk.Version)) } inUse := false @@ -230,8 +246,8 @@ func (i *resourceInformers) garbageCollectResourceInformers(ctx context.Context) i.log.Debug("Running garbage collection for resource informers", "count", len(i.resourceCaches)) for gh, ca := range i.resourceCaches { list := v1alpha2.ObjectList{} - if err := i.objectsCache.List(ctx, &list, client.MatchingFields{resourceRefGVKsIndex: refKeyGKV(gh.providerConfig, gh.gvk.Kind, gh.gvk.Group, gh.gvk.Version)}); err != nil { - i.log.Debug("cannot list objects referencing a certain resource GVK", "error", err, "fieldSelector", resourceRefGVKsIndex+"="+refKeyGKV(gh.providerConfig, gh.gvk.Kind, gh.gvk.Group, gh.gvk.Version)) + if err := i.objectsCache.List(ctx, &list, client.MatchingFields{resourceRefGVKsIndex: refKeyProviderGVK(gh.providerConfig, gh.gvk.Kind, gh.gvk.Group, gh.gvk.Version)}); err != nil { + i.log.Debug("cannot list objects referencing a certain resource GVK", "error", err, "fieldSelector", resourceRefGVKsIndex+"="+refKeyProviderGVK(gh.providerConfig, gh.gvk.Kind, gh.gvk.Group, gh.gvk.Version)) } if len(list.Items) > 0 { diff --git a/internal/controller/object/object.go b/internal/controller/object/object.go index 00b6b8e0..769d7c98 100644 --- a/internal/controller/object/object.go +++ b/internal/controller/object/object.go @@ -148,10 +148,10 @@ func Setup(mgr ctrl.Manager, o controller.Options, sanitizeSecrets bool, pollJit if o.Features.Enabled(features.EnableAlphaWatches) { ca := mgr.GetCache() - if err := ca.IndexField(context.Background(), &v1alpha2.Object{}, resourceRefGVKsIndex, IndexReferencedResourceRefGVKs); err != nil { + if err := ca.IndexField(context.Background(), &v1alpha2.Object{}, resourceRefGVKsIndex, IndexByProviderGVK); err != nil { return errors.Wrap(err, "cannot add index for object reference GVKs") } - if err := ca.IndexField(context.Background(), &v1alpha2.Object{}, resourceRefsIndex, IndexReferencesResourcesRefs); err != nil { + if err := ca.IndexField(context.Background(), &v1alpha2.Object{}, resourceRefsIndex, IndexByProviderNamespacedNameGVK); err != nil { return errors.Wrap(err, "cannot add index for object references") } diff --git a/internal/features/features.go b/internal/features/features.go index 3c294983..41b4a4bf 100644 --- a/internal/features/features.go +++ b/internal/features/features.go @@ -1,14 +1,17 @@ /* - Copyright 2022 The Crossplane Authors. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +Copyright 2024 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ package features From bc5a3b20753f79394fd17ed0ae896f2f424812c2 Mon Sep 17 00:00:00 2001 From: Hasan Turken Date: Thu, 9 May 2024 15:29:15 +0300 Subject: [PATCH 14/19] Do not use pointer for optional bool watch Signed-off-by: Hasan Turken --- apis/object/v1alpha1/types.go | 2 +- apis/object/v1alpha1/zz_generated.deepcopy.go | 5 ----- apis/object/v1alpha2/types.go | 2 +- apis/object/v1alpha2/zz_generated.deepcopy.go | 5 ----- internal/controller/object/object.go | 3 +-- 5 files changed, 3 insertions(+), 14 deletions(-) diff --git a/apis/object/v1alpha1/types.go b/apis/object/v1alpha1/types.go index 76f65f55..dfed2914 100644 --- a/apis/object/v1alpha1/types.go +++ b/apis/object/v1alpha1/types.go @@ -131,7 +131,7 @@ type ObjectSpec struct { // changed or removed without notice. // +optional // +kubebuilder:default=false - Watch *bool `json:"watch,omitempty"` + Watch bool `json:"watch,omitempty"` } // ReadinessPolicy defines how the Object's readiness condition should be computed. diff --git a/apis/object/v1alpha1/zz_generated.deepcopy.go b/apis/object/v1alpha1/zz_generated.deepcopy.go index b0968cc2..aad34718 100644 --- a/apis/object/v1alpha1/zz_generated.deepcopy.go +++ b/apis/object/v1alpha1/zz_generated.deepcopy.go @@ -165,11 +165,6 @@ func (in *ObjectSpec) DeepCopyInto(out *ObjectSpec) { } } out.Readiness = in.Readiness - if in.Watch != nil { - in, out := &in.Watch, &out.Watch - *out = new(bool) - **out = **in - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectSpec. diff --git a/apis/object/v1alpha2/types.go b/apis/object/v1alpha2/types.go index fb8fb136..426def82 100644 --- a/apis/object/v1alpha2/types.go +++ b/apis/object/v1alpha2/types.go @@ -104,7 +104,7 @@ type ObjectSpec struct { // changed or removed without notice. // +optional // +kubebuilder:default=false - Watch *bool `json:"watch,omitempty"` + Watch bool `json:"watch,omitempty"` } // ReadinessPolicy defines how the Object's readiness condition should be computed. diff --git a/apis/object/v1alpha2/zz_generated.deepcopy.go b/apis/object/v1alpha2/zz_generated.deepcopy.go index ef2e20e8..c0d78570 100644 --- a/apis/object/v1alpha2/zz_generated.deepcopy.go +++ b/apis/object/v1alpha2/zz_generated.deepcopy.go @@ -164,11 +164,6 @@ func (in *ObjectSpec) DeepCopyInto(out *ObjectSpec) { } } out.Readiness = in.Readiness - if in.Watch != nil { - in, out := &in.Watch, &out.Watch - *out = new(bool) - **out = **in - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectSpec. diff --git a/internal/controller/object/object.go b/internal/controller/object/object.go index 769d7c98..2d8d4e32 100644 --- a/internal/controller/object/object.go +++ b/internal/controller/object/object.go @@ -36,7 +36,6 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/rest" "k8s.io/client-go/util/workqueue" - "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" runtimeevent "sigs.k8s.io/controller-runtime/pkg/event" @@ -717,7 +716,7 @@ func connectionDetails(ctx context.Context, kube client.Client, connDetails []v1 } func (c *external) shouldWatch(cr *v1alpha2.Object) bool { - return c.kindObserver != nil && ptr.Deref(cr.Spec.Watch, false) + return c.kindObserver != nil && cr.Spec.Watch } func unstructuredFromObjectRef(r v1.ObjectReference) unstructured.Unstructured { From 39f37b4422fe6a1a2e23a704e673db275f4fa52b Mon Sep 17 00:00:00 2001 From: Hasan Turken Date: Thu, 9 May 2024 22:25:24 +0300 Subject: [PATCH 15/19] Double check if cache is started already Signed-off-by: Hasan Turken --- internal/controller/object/informers.go | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/internal/controller/object/informers.go b/internal/controller/object/informers.go index af3a76c7..b38fe235 100644 --- a/internal/controller/object/informers.go +++ b/internal/controller/object/informers.go @@ -183,13 +183,21 @@ func (i *resourceInformers) WatchResources(rc *rest.Config, providerConfig strin }() i.lock.Lock() + _, ok := i.resourceCaches[gvkWithConfig{providerConfig: providerConfig, gvk: gvk}] + if ok { + // Another goroutine already started the cache in parallel. We + // should cancel the new one. + cancelFn() + i.lock.Unlock() + continue + } i.resourceCaches[gvkWithConfig{providerConfig: providerConfig, gvk: gvk}] = resourceCache{ cache: ca, cancelFn: cancelFn, } i.lock.Unlock() - // wait for in the background, and only when synced add to the routed cache + // wait for in the background. go func() { if synced := ca.WaitForCacheSync(ctx); synced { log.Debug("Resource cache synced") @@ -211,6 +219,7 @@ func (i *resourceInformers) StopWatchingResources(ctx context.Context, providerC list := v1alpha2.ObjectList{} if err := i.objectsCache.List(ctx, &list, client.MatchingFields{resourceRefGVKsIndex: refKeyProviderGVK(providerConfig, gvk.Kind, gvk.Group, gvk.Version)}); err != nil { i.log.Debug("cannot list objects referencing a certain resource GVK", "error", err, "fieldSelector", resourceRefGVKsIndex+"="+refKeyProviderGVK(providerConfig, gvk.Kind, gvk.Group, gvk.Version)) + continue } inUse := false @@ -248,6 +257,7 @@ func (i *resourceInformers) garbageCollectResourceInformers(ctx context.Context) list := v1alpha2.ObjectList{} if err := i.objectsCache.List(ctx, &list, client.MatchingFields{resourceRefGVKsIndex: refKeyProviderGVK(gh.providerConfig, gh.gvk.Kind, gh.gvk.Group, gh.gvk.Version)}); err != nil { i.log.Debug("cannot list objects referencing a certain resource GVK", "error", err, "fieldSelector", resourceRefGVKsIndex+"="+refKeyProviderGVK(gh.providerConfig, gh.gvk.Kind, gh.gvk.Group, gh.gvk.Version)) + continue } if len(list.Items) > 0 { From 94541bdaec57be40c48aa74edd6ffefe8e07e85e Mon Sep 17 00:00:00 2001 From: Hasan Turken Date: Fri, 10 May 2024 12:35:09 +0300 Subject: [PATCH 16/19] Refactor clients package Signed-off-by: Hasan Turken --- internal/clients/clients.go | 198 +-------- internal/clients/kube/kube.go | 175 ++++++++ internal/controller/object/object.go | 23 +- internal/controller/object/object_test.go | 406 ++++++++---------- .../observedobjectcollection/reconciler.go | 9 +- .../reconciler_test.go | 11 +- 6 files changed, 370 insertions(+), 452 deletions(-) create mode 100644 internal/clients/kube/kube.go diff --git a/internal/clients/clients.go b/internal/clients/clients.go index 34b22d30..2a31392e 100644 --- a/internal/clients/clients.go +++ b/internal/clients/clients.go @@ -1,5 +1,5 @@ /* -Copyright 2021 The Crossplane Authors. +Copyright 2024 The Crossplane Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -12,199 +12,3 @@ limitations under the License. */ package clients - -import ( - "context" - - "github.com/pkg/errors" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" - "k8s.io/client-go/tools/clientcmd/api" - "sigs.k8s.io/controller-runtime/pkg/client" - - xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - "github.com/crossplane/crossplane-runtime/pkg/resource" - - "github.com/crossplane-contrib/provider-kubernetes/apis/v1alpha1" - "github.com/crossplane-contrib/provider-kubernetes/internal/clients/azure" - "github.com/crossplane-contrib/provider-kubernetes/internal/clients/gke" -) - -const ( - errGetPC = "cannot get ProviderConfig" - errGetCreds = "cannot get credentials" - errCreateRestConfig = "cannot create new REST config using provider secret" - errExtractGoogleCredentials = "cannot extract Google Application Credentials" - errInjectGoogleCredentials = "cannot wrap REST client with Google Application Credentials" - errExtractAzureCredentials = "failed to extract Azure Application Credentials" - errInjectAzureCredentials = "failed to wrap REST client with Azure Application Credentials" -) - -// NewRESTConfig returns a rest config given a secret with connection information. -func NewRESTConfig(kubeconfig []byte) (*rest.Config, error) { - ac, err := clientcmd.Load(kubeconfig) - if err != nil { - return nil, errors.Wrap(err, "failed to load kubeconfig") - } - return restConfigFromAPIConfig(ac) -} - -// NewKubeClient returns a kubernetes client given a secret with connection -// information. -func NewKubeClient(config *rest.Config) (client.Client, error) { - kc, err := client.New(config, client.Options{}) - if err != nil { - return nil, errors.Wrap(err, "cannot create Kubernetes client") - } - - return kc, nil -} - -func restConfigFromAPIConfig(c *api.Config) (*rest.Config, error) { - if c.CurrentContext == "" { - return nil, errors.New("currentContext not set in kubeconfig") - } - ctx := c.Contexts[c.CurrentContext] - cluster := c.Clusters[ctx.Cluster] - if cluster == nil { - return nil, errors.Errorf("cluster for currentContext (%s) not found", c.CurrentContext) - } - user := c.AuthInfos[ctx.AuthInfo] - if user == nil { - // We don't require a user because it's possible user - // authorization configuration will be loaded from a separate - // set of identity credentials (e.g. Google Application Creds). - user = &api.AuthInfo{} - } - config := &rest.Config{ - Host: cluster.Server, - Username: user.Username, - Password: user.Password, - BearerToken: user.Token, - BearerTokenFile: user.TokenFile, - Impersonate: rest.ImpersonationConfig{ - UserName: user.Impersonate, - Groups: user.ImpersonateGroups, - Extra: user.ImpersonateUserExtra, - }, - AuthProvider: user.AuthProvider, - ExecProvider: user.Exec, - TLSClientConfig: rest.TLSClientConfig{ - Insecure: cluster.InsecureSkipTLSVerify, - ServerName: cluster.TLSServerName, - CertData: user.ClientCertificateData, - KeyData: user.ClientKeyData, - CAData: cluster.CertificateAuthorityData, - }, - } - - // NOTE(tnthornton): these values match the burst and QPS values in kubectl. - // xref: https://github.com/kubernetes/kubernetes/pull/105520 - config.Burst = 300 - config.QPS = 50 - - return config, nil -} - -// RestConfigGetter is an interface that provides a REST config. -type RestConfigGetter interface { - // GetConfig returns an initialized Config - GetConfig() *rest.Config -} - -// ClusterClient is a client that can be used to interact with a Kubernetes -// cluster including getting its rest config. -type ClusterClient interface { - client.Client - resource.Applicator - RestConfigGetter -} - -// ApplicatorClientWithConfig is a ClusterClient that also has a rest config. -type ApplicatorClientWithConfig struct { - resource.ClientApplicator - config *rest.Config -} - -// GetConfig returns the rest config for the client. -func (c *ApplicatorClientWithConfig) GetConfig() *rest.Config { - return c.config -} - -// ClientForProvider returns the client for the given provider config -func ClientForProvider(ctx context.Context, inclusterClient client.Client, providerConfigName string) (ClusterClient, error) { //nolint:gocyclo - pc := &v1alpha1.ProviderConfig{} - if err := inclusterClient.Get(ctx, types.NamespacedName{Name: providerConfigName}, pc); err != nil { - return nil, errors.Wrap(err, errGetPC) - } - - var rc *rest.Config - var err error - - switch cd := pc.Spec.Credentials; cd.Source { //nolint:exhaustive - case xpv1.CredentialsSourceInjectedIdentity: - rc, err = rest.InClusterConfig() - if err != nil { - return nil, errors.Wrap(err, errCreateRestConfig) - } - default: - kc, err := resource.CommonCredentialExtractor(ctx, cd.Source, inclusterClient, cd.CommonCredentialSelectors) - if err != nil { - return nil, errors.Wrap(err, errGetCreds) - } - - if rc, err = NewRESTConfig(kc); err != nil { - return nil, errors.Wrap(err, errCreateRestConfig) - } - } - - if id := pc.Spec.Identity; id != nil { - switch id.Type { - case v1alpha1.IdentityTypeGoogleApplicationCredentials: - switch id.Source { //nolint:exhaustive - case xpv1.CredentialsSourceInjectedIdentity: - if err := gke.WrapRESTConfig(ctx, rc, nil, gke.DefaultScopes...); err != nil { - return nil, errors.Wrap(err, errInjectGoogleCredentials) - } - default: - creds, err := resource.CommonCredentialExtractor(ctx, id.Source, inclusterClient, id.CommonCredentialSelectors) - if err != nil { - return nil, errors.Wrap(err, errExtractGoogleCredentials) - } - - if err := gke.WrapRESTConfig(ctx, rc, creds, gke.DefaultScopes...); err != nil { - return nil, errors.Wrap(err, errInjectGoogleCredentials) - } - } - case v1alpha1.IdentityTypeAzureServicePrincipalCredentials: - switch id.Source { //nolint:exhaustive - case xpv1.CredentialsSourceInjectedIdentity: - return nil, errors.Errorf("%s is not supported as identity source for identity type %s", - xpv1.CredentialsSourceInjectedIdentity, v1alpha1.IdentityTypeAzureServicePrincipalCredentials) - default: - creds, err := resource.CommonCredentialExtractor(ctx, id.Source, inclusterClient, id.CommonCredentialSelectors) - if err != nil { - return nil, errors.Wrap(err, errExtractAzureCredentials) - } - - if err := azure.WrapRESTConfig(ctx, rc, creds); err != nil { - return nil, errors.Wrap(err, errInjectAzureCredentials) - } - } - default: - return nil, errors.Errorf("unknown identity type: %s", id.Type) - } - } - k, err := NewKubeClient(rc) - if err != nil { - return nil, errors.Wrap(err, "cannot create Kubernetes client") - } - return &ApplicatorClientWithConfig{ - ClientApplicator: resource.ClientApplicator{ - Client: k, - Applicator: resource.NewAPIPatchingApplicator(k), - }, - config: rc, - }, nil -} diff --git a/internal/clients/kube/kube.go b/internal/clients/kube/kube.go new file mode 100644 index 00000000..f7420045 --- /dev/null +++ b/internal/clients/kube/kube.go @@ -0,0 +1,175 @@ +/* +Copyright 2024 The Crossplane Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kube + +import ( + "context" + + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/tools/clientcmd/api" + "sigs.k8s.io/controller-runtime/pkg/client" + + xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + "github.com/crossplane/crossplane-runtime/pkg/resource" + + "github.com/crossplane-contrib/provider-kubernetes/apis/v1alpha1" + "github.com/crossplane-contrib/provider-kubernetes/internal/clients/azure" + "github.com/crossplane-contrib/provider-kubernetes/internal/clients/gke" +) + +const ( + errGetPC = "cannot get ProviderConfig" + errGetCreds = "cannot get credentials" + errCreateRestConfig = "cannot create new REST config using provider secret" + errExtractGoogleCredentials = "cannot extract Google Application Credentials" + errInjectGoogleCredentials = "cannot wrap REST client with Google Application Credentials" + errExtractAzureCredentials = "failed to extract Azure Application Credentials" + errInjectAzureCredentials = "failed to wrap REST client with Azure Application Credentials" +) + +// ClientForProvider returns the client and *rest.config for the given provider +// config. +func ClientForProvider(ctx context.Context, inclusterClient client.Client, providerConfigName string) (client.Client, *rest.Config, error) { //nolint:gocyclo + rc, err := configForProvider(ctx, inclusterClient, providerConfigName) + if err != nil { + return nil, nil, errors.Wrapf(err, "cannot get REST config for provider %q", providerConfigName) + } + k, err := client.New(rc, client.Options{}) + if err != nil { + return nil, nil, errors.Wrapf(err, "cannot create Kubernetes client for provider %q", providerConfigName) + } + return k, rc, nil +} + +// ConfigForProvider returns the *rest.config for the given provider config. +func configForProvider(ctx context.Context, local client.Client, providerConfigName string) (*rest.Config, error) { // nolint:gocyclo + pc := &v1alpha1.ProviderConfig{} + if err := local.Get(ctx, types.NamespacedName{Name: providerConfigName}, pc); err != nil { + return nil, errors.Wrap(err, errGetPC) + } + + var rc *rest.Config + var err error + + switch cd := pc.Spec.Credentials; cd.Source { //nolint:exhaustive + case xpv1.CredentialsSourceInjectedIdentity: + rc, err = rest.InClusterConfig() + if err != nil { + return nil, errors.Wrap(err, errCreateRestConfig) + } + default: + kc, err := resource.CommonCredentialExtractor(ctx, cd.Source, local, cd.CommonCredentialSelectors) + if err != nil { + return nil, errors.Wrap(err, errGetCreds) + } + + ac, err := clientcmd.Load(kc) + if err != nil { + return nil, errors.Wrap(err, "failed to load kubeconfig") + } + + if rc, err = fromAPIConfig(ac); err != nil { + return nil, errors.Wrap(err, errCreateRestConfig) + } + } + + if id := pc.Spec.Identity; id != nil { + switch id.Type { + case v1alpha1.IdentityTypeGoogleApplicationCredentials: + switch id.Source { //nolint:exhaustive + case xpv1.CredentialsSourceInjectedIdentity: + if err := gke.WrapRESTConfig(ctx, rc, nil, gke.DefaultScopes...); err != nil { + return nil, errors.Wrap(err, errInjectGoogleCredentials) + } + default: + creds, err := resource.CommonCredentialExtractor(ctx, id.Source, local, id.CommonCredentialSelectors) + if err != nil { + return nil, errors.Wrap(err, errExtractGoogleCredentials) + } + + if err := gke.WrapRESTConfig(ctx, rc, creds, gke.DefaultScopes...); err != nil { + return nil, errors.Wrap(err, errInjectGoogleCredentials) + } + } + case v1alpha1.IdentityTypeAzureServicePrincipalCredentials: + switch id.Source { //nolint:exhaustive + case xpv1.CredentialsSourceInjectedIdentity: + return nil, errors.Errorf("%s is not supported as identity source for identity type %s", + xpv1.CredentialsSourceInjectedIdentity, v1alpha1.IdentityTypeAzureServicePrincipalCredentials) + default: + creds, err := resource.CommonCredentialExtractor(ctx, id.Source, local, id.CommonCredentialSelectors) + if err != nil { + return nil, errors.Wrap(err, errExtractAzureCredentials) + } + + if err := azure.WrapRESTConfig(ctx, rc, creds); err != nil { + return nil, errors.Wrap(err, errInjectAzureCredentials) + } + } + default: + return nil, errors.Errorf("unknown identity type: %s", id.Type) + } + } + + return rc, nil +} + +func fromAPIConfig(c *api.Config) (*rest.Config, error) { + if c.CurrentContext == "" { + return nil, errors.New("currentContext not set in kubeconfig") + } + ctx := c.Contexts[c.CurrentContext] + cluster := c.Clusters[ctx.Cluster] + if cluster == nil { + return nil, errors.Errorf("cluster for currentContext (%s) not found", c.CurrentContext) + } + user := c.AuthInfos[ctx.AuthInfo] + if user == nil { + // We don't require a user because it's possible user + // authorization configuration will be loaded from a separate + // set of identity credentials (e.g. Google Application Creds). + user = &api.AuthInfo{} + } + config := &rest.Config{ + Host: cluster.Server, + Username: user.Username, + Password: user.Password, + BearerToken: user.Token, + BearerTokenFile: user.TokenFile, + Impersonate: rest.ImpersonationConfig{ + UserName: user.Impersonate, + Groups: user.ImpersonateGroups, + Extra: user.ImpersonateUserExtra, + }, + AuthProvider: user.AuthProvider, + ExecProvider: user.Exec, + TLSClientConfig: rest.TLSClientConfig{ + Insecure: cluster.InsecureSkipTLSVerify, + ServerName: cluster.TLSServerName, + CertData: user.ClientCertificateData, + KeyData: user.ClientKeyData, + CAData: cluster.CertificateAuthorityData, + }, + } + + // NOTE(tnthornton): these values match the burst and QPS values in kubectl. + // xref: https://github.com/kubernetes/kubernetes/pull/105520 + config.Burst = 300 + config.QPS = 50 + + return config, nil +} diff --git a/internal/controller/object/object.go b/internal/controller/object/object.go index 2d8d4e32..d42026c1 100644 --- a/internal/controller/object/object.go +++ b/internal/controller/object/object.go @@ -55,7 +55,7 @@ import ( "github.com/crossplane-contrib/provider-kubernetes/apis/object/v1alpha2" apisv1alpha1 "github.com/crossplane-contrib/provider-kubernetes/apis/v1alpha1" - "github.com/crossplane-contrib/provider-kubernetes/internal/clients" + "github.com/crossplane-contrib/provider-kubernetes/internal/clients/kube" "github.com/crossplane-contrib/provider-kubernetes/internal/features" ) @@ -137,7 +137,7 @@ func Setup(mgr ctrl.Manager, o controller.Options, sanitizeSecrets bool, pollJit sanitizeSecrets: sanitizeSecrets, kube: mgr.GetClient(), usage: resource.NewProviderConfigUsageTracker(mgr.GetClient(), &apisv1alpha1.ProviderConfigUsage{}), - clientForProviderFn: clients.ClientForProvider, + clientForProviderFn: kube.ClientForProvider, } cb := ctrl.NewControllerManagedBy(mgr). @@ -196,7 +196,7 @@ type connector struct { kindObserver KindObserver - clientForProviderFn func(ctx context.Context, inclusterClient client.Client, providerConfigName string) (clients.ClusterClient, error) + clientForProviderFn func(ctx context.Context, local client.Client, providerConfigName string) (client.Client, *rest.Config, error) } func (c *connector) Connect(ctx context.Context, mg resource.Managed) (managed.ExternalClient, error) { //nolint:gocyclo @@ -212,15 +212,19 @@ func (c *connector) Connect(ctx context.Context, mg resource.Managed) (managed.E return nil, errors.Wrap(err, errTrackPCUsage) } - k, err := c.clientForProviderFn(ctx, c.kube, cr.GetProviderConfigReference().Name) + k, rc, err := c.clientForProviderFn(ctx, c.kube, cr.GetProviderConfigReference().Name) if err != nil { return nil, errors.Wrap(err, errNewKubernetesClient) } return &external{ - logger: c.logger, - client: k, + logger: c.logger, + client: resource.ClientApplicator{ + Client: k, + Applicator: resource.NewAPIPatchingApplicator(k), + }, + rest: rc, localClient: c.kube, sanitizeSecrets: c.sanitizeSecrets, @@ -230,8 +234,9 @@ func (c *connector) Connect(ctx context.Context, mg resource.Managed) (managed.E type external struct { logger logging.Logger - client clients.ClusterClient - // localClient is specifically used to connect to local cluster + client resource.ClientApplicator + rest *rest.Config + // localClient is specifically used to connect to local cluster, a.k.a control plane. localClient client.Client sanitizeSecrets bool @@ -259,7 +264,7 @@ func (c *external) Observe(ctx context.Context, mg resource.Managed) (managed.Ex } if c.shouldWatch(cr) { - c.kindObserver.WatchResources(c.client.GetConfig(), cr.Spec.ProviderConfigReference.Name, desired.GroupVersionKind()) + c.kindObserver.WatchResources(c.rest, cr.Spec.ProviderConfigReference.Name, desired.GroupVersionKind()) } observed := desired.DeepCopy() diff --git a/internal/controller/object/object_test.go b/internal/controller/object/object_test.go index 814d685a..048deb4d 100644 --- a/internal/controller/object/object_test.go +++ b/internal/controller/object/object_test.go @@ -32,6 +32,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/rest" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" @@ -43,7 +44,6 @@ import ( "github.com/crossplane-contrib/provider-kubernetes/apis/object/v1alpha2" kubernetesv1alpha1 "github.com/crossplane-contrib/provider-kubernetes/apis/v1alpha1" - "github.com/crossplane-contrib/provider-kubernetes/internal/clients" ) const ( @@ -239,7 +239,7 @@ func Test_connector_Connect(t *testing.T) { type args struct { client client.Client - clientForProvider func(ctx context.Context, inclusterClient client.Client, providerConfigName string) (clients.ClusterClient, error) + clientForProvider client.Client usage resource.Tracker mg resource.Managed } @@ -269,40 +269,24 @@ func Test_connector_Connect(t *testing.T) { }, "Success": { args: args{ - clientForProvider: func(ctx context.Context, inclusterClient client.Client, providerConfigName string) (clients.ClusterClient, error) { - return &clients.ApplicatorClientWithConfig{ - ClientApplicator: resource.ClientApplicator{ - Client: &test.MockClient{}, - }, - }, nil - }, - usage: resource.TrackerFn(func(ctx context.Context, mg resource.Managed) error { return nil }), - mg: kubernetesObject(), + clientForProvider: &test.MockClient{}, + usage: resource.TrackerFn(func(ctx context.Context, mg resource.Managed) error { return nil }), + mg: kubernetesObject(), }, want: want{ err: nil, }, }, - "ErrorGettingClientForProvider": { - args: args{ - clientForProvider: func(ctx context.Context, inclusterClient client.Client, providerConfigName string) (clients.ClusterClient, error) { - return nil, errBoom - }, - usage: resource.TrackerFn(func(ctx context.Context, mg resource.Managed) error { return nil }), - mg: kubernetesObject(), - }, - want: want{ - err: errors.Wrap(errBoom, errNewKubernetesClient), - }, - }, } for name, tc := range cases { t.Run(name, func(t *testing.T) { c := &connector{ - logger: logging.NewNopLogger(), - kube: tc.args.client, - clientForProviderFn: tc.args.clientForProvider, - usage: tc.usage, + logger: logging.NewNopLogger(), + kube: tc.args.client, + clientForProviderFn: func(ctx context.Context, local client.Client, providerConfigName string) (client.Client, *rest.Config, error) { + return tc.args.clientForProvider, nil, nil + }, + usage: tc.usage, } _, gotErr := c.Connect(context.Background(), tc.args.mg) if diff := cmp.Diff(tc.want.err, gotErr, test.EquateErrors()); diff != "" { @@ -314,7 +298,7 @@ func Test_connector_Connect(t *testing.T) { func Test_helmExternal_Observe(t *testing.T) { type args struct { - client clients.ClusterClient + client resource.ClientApplicator mg resource.Managed } type want struct { @@ -336,11 +320,9 @@ func Test_helmExternal_Observe(t *testing.T) { "NoKubernetesObjectExists": { args: args{ mg: kubernetesObject(), - client: &clients.ApplicatorClientWithConfig{ - ClientApplicator: resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(kerrors.NewNotFound(schema.GroupResource{}, "")), - }, + client: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(kerrors.NewNotFound(schema.GroupResource{}, "")), }, }, }, @@ -362,11 +344,9 @@ func Test_helmExternal_Observe(t *testing.T) { "FailedToGet": { args: args{ mg: kubernetesObject(), - client: &clients.ApplicatorClientWithConfig{ - ClientApplicator: resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(errBoom), - }, + client: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(errBoom), }, }, }, @@ -377,14 +357,12 @@ func Test_helmExternal_Observe(t *testing.T) { "NoLastAppliedAnnotation": { args: args{ mg: kubernetesObject(), - client: &clients.ApplicatorClientWithConfig{ - ClientApplicator: resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - *obj.(*unstructured.Unstructured) = *externalResource() - return nil - }), - }, + client: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + *obj.(*unstructured.Unstructured) = *externalResource() + return nil + }), }, }, }, @@ -396,17 +374,15 @@ func Test_helmExternal_Observe(t *testing.T) { "NotUpToDate": { args: args{ mg: kubernetesObject(), - client: &clients.ApplicatorClientWithConfig{ - ClientApplicator: resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - *obj.(*unstructured.Unstructured) = - *externalResourceWithLastAppliedConfigAnnotation( - `{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"crossplane-system", "labels": {"old-label":"gone"}}}`, - ) - return nil - }), - }, + client: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + *obj.(*unstructured.Unstructured) = + *externalResourceWithLastAppliedConfigAnnotation( + `{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"crossplane-system", "labels": {"old-label":"gone"}}}`, + ) + return nil + }), }, }, }, @@ -422,17 +398,15 @@ func Test_helmExternal_Observe(t *testing.T) { "apiVersion": "v1", "kind": "Namespace" }`) }), - client: &clients.ApplicatorClientWithConfig{ - ClientApplicator: resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - *obj.(*unstructured.Unstructured) = - *externalResourceWithLastAppliedConfigAnnotation( - `{"apiVersion":"v1","kind":"Namespace"}`, - ) - return nil - }), - }, + client: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + *obj.(*unstructured.Unstructured) = + *externalResourceWithLastAppliedConfigAnnotation( + `{"apiVersion":"v1","kind":"Namespace"}`, + ) + return nil + }), }, }, }, @@ -448,14 +422,12 @@ func Test_helmExternal_Observe(t *testing.T) { "UpToDate": { args: args{ mg: kubernetesObject(), - client: &clients.ApplicatorClientWithConfig{ - ClientApplicator: resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - *obj.(*unstructured.Unstructured) = *upToDateExternalResource() - return nil - }), - }, + client: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + *obj.(*unstructured.Unstructured) = *upToDateExternalResource() + return nil + }), }, }, }, @@ -474,14 +446,12 @@ func Test_helmExternal_Observe(t *testing.T) { obj.Spec.References = objectReferences() obj.Spec.References[0].PatchesFrom.FieldPath = ptr.To("nonexistent_field") }), - client: &clients.ApplicatorClientWithConfig{ - ClientApplicator: resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - *obj.(*unstructured.Unstructured) = *referenceObject() - return nil - }), - }, + client: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + *obj.(*unstructured.Unstructured) = *referenceObject() + return nil + }), }, }, }, @@ -496,11 +466,9 @@ func Test_helmExternal_Observe(t *testing.T) { mg: kubernetesObject(func(obj *v1alpha2.Object) { obj.Spec.References = objectReferences() }), - client: &clients.ApplicatorClientWithConfig{ - ClientApplicator: resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(errBoom), - }, + client: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(errBoom), }, }, }, @@ -516,20 +484,18 @@ func Test_helmExternal_Observe(t *testing.T) { obj.ObjectMeta.DeletionTimestamp = &metav1.Time{Time: time.Now()} obj.Spec.References = objectReferences() }), - client: &clients.ApplicatorClientWithConfig{ - ClientApplicator: resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { - if key.Name == testReferenceObjectName { - *obj.(*unstructured.Unstructured) = *referenceObject() - return nil - } else if key.Name == externalResourceName { - return kerrors.NewNotFound(schema.GroupResource{}, "") - } - return errBoom - }, - MockUpdate: test.NewMockUpdateFn(nil), + client: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + if key.Name == testReferenceObjectName { + *obj.(*unstructured.Unstructured) = *referenceObject() + return nil + } else if key.Name == externalResourceName { + return kerrors.NewNotFound(schema.GroupResource{}, "") + } + return errBoom }, + MockUpdate: test.NewMockUpdateFn(nil), }, }, }, @@ -543,21 +509,19 @@ func Test_helmExternal_Observe(t *testing.T) { mg: kubernetesObject(func(obj *v1alpha2.Object) { obj.Spec.References = objectReferences() }), - client: &clients.ApplicatorClientWithConfig{ - ClientApplicator: resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { - if key.Name == testReferenceObjectName { - *obj.(*unstructured.Unstructured) = *referenceObject() - return nil - } else if key.Name == externalResourceName { - *obj.(*unstructured.Unstructured) = *upToDateExternalResource() - return nil - } - return errBoom - }, - MockUpdate: test.NewMockUpdateFn(nil), + client: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + if key.Name == testReferenceObjectName { + *obj.(*unstructured.Unstructured) = *referenceObject() + return nil + } else if key.Name == externalResourceName { + *obj.(*unstructured.Unstructured) = *upToDateExternalResource() + return nil + } + return errBoom }, + MockUpdate: test.NewMockUpdateFn(nil), }, }, }, @@ -575,11 +539,9 @@ func Test_helmExternal_Observe(t *testing.T) { mg: kubernetesObject(func(obj *v1alpha2.Object) { obj.Spec.References = []v1alpha2.Reference{{}} }), - client: &clients.ApplicatorClientWithConfig{ - ClientApplicator: resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil), - }, + client: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), }, }, }, @@ -605,24 +567,22 @@ func Test_helmExternal_Observe(t *testing.T) { }, } }), - client: &clients.ApplicatorClientWithConfig{ - ClientApplicator: resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { - switch key.Name { - case externalResourceName: - *obj.(*unstructured.Unstructured) = *upToDateExternalResource() - case testSecretName: - *obj.(*unstructured.Unstructured) = unstructured.Unstructured{ - Object: map[string]interface{}{ - "data": map[string]interface{}{ - "db-password": "MTIzNDU=", - }, + client: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + switch key.Name { + case externalResourceName: + *obj.(*unstructured.Unstructured) = *upToDateExternalResource() + case testSecretName: + *obj.(*unstructured.Unstructured) = unstructured.Unstructured{ + Object: map[string]interface{}{ + "data": map[string]interface{}{ + "db-password": "MTIzNDU=", }, - } + }, } - return nil - }, + } + return nil }, }, }, @@ -653,18 +613,16 @@ func Test_helmExternal_Observe(t *testing.T) { }, } }), - client: &clients.ApplicatorClientWithConfig{ - ClientApplicator: resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { - switch key.Name { - case externalResourceName: - *obj.(*unstructured.Unstructured) = *upToDateExternalResource() - case testSecretName: - return errBoom - } - return nil - }, + client: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + switch key.Name { + case externalResourceName: + *obj.(*unstructured.Unstructured) = *upToDateExternalResource() + case testSecretName: + return errBoom + } + return nil }, }, }, @@ -678,17 +636,15 @@ func Test_helmExternal_Observe(t *testing.T) { mg: kubernetesObject(func(obj *v1alpha2.Object) { obj.Spec.ManagementPolicies = xpv1.ManagementPolicies{xpv1.ManagementActionObserve} }), - client: &clients.ApplicatorClientWithConfig{ - ClientApplicator: resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - *obj.(*unstructured.Unstructured) = - *externalResourceWithLastAppliedConfigAnnotation( - `{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"crossplane-system", "labels": {"old-label":"gone"}}}`, - ) - return nil - }), - }, + client: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + *obj.(*unstructured.Unstructured) = + *externalResourceWithLastAppliedConfigAnnotation( + `{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"crossplane-system", "labels": {"old-label":"gone"}}}`, + ) + return nil + }), }, }, }, @@ -719,7 +675,7 @@ func Test_helmExternal_Observe(t *testing.T) { func Test_helmExternal_Create(t *testing.T) { type args struct { - client clients.ClusterClient + client resource.ClientApplicator mg resource.Managed } type want struct { @@ -751,11 +707,9 @@ func Test_helmExternal_Create(t *testing.T) { "FailedToCreate": { args: args{ mg: kubernetesObject(), - client: &clients.ApplicatorClientWithConfig{ - ClientApplicator: resource.ClientApplicator{ - Client: &test.MockClient{ - MockCreate: test.NewMockCreateFn(errBoom), - }, + client: resource.ClientApplicator{ + Client: &test.MockClient{ + MockCreate: test.NewMockCreateFn(errBoom), }, }, }, @@ -770,20 +724,18 @@ func Test_helmExternal_Create(t *testing.T) { "apiVersion": "v1", "kind": "Namespace" }`) }), - client: &clients.ApplicatorClientWithConfig{ - ClientApplicator: resource.ClientApplicator{ - Client: &test.MockClient{ - MockCreate: test.NewMockCreateFn(nil, func(obj client.Object) error { - _, ok := obj.GetAnnotations()[corev1.LastAppliedConfigAnnotation] - if !ok { - t.Errorf("Last applied annotation not set with create") - } - if obj.GetName() != testObjectName { - t.Errorf("Name should default to object name when not provider in manifest") - } - return nil - }), - }, + client: resource.ClientApplicator{ + Client: &test.MockClient{ + MockCreate: test.NewMockCreateFn(nil, func(obj client.Object) error { + _, ok := obj.GetAnnotations()[corev1.LastAppliedConfigAnnotation] + if !ok { + t.Errorf("Last applied annotation not set with create") + } + if obj.GetName() != testObjectName { + t.Errorf("Name should default to object name when not provider in manifest") + } + return nil + }), }, }, }, @@ -794,17 +746,15 @@ func Test_helmExternal_Create(t *testing.T) { "Success": { args: args{ mg: kubernetesObject(), - client: &clients.ApplicatorClientWithConfig{ - ClientApplicator: resource.ClientApplicator{ - Client: &test.MockClient{ - MockCreate: test.NewMockCreateFn(nil, func(obj client.Object) error { - _, ok := obj.GetAnnotations()[corev1.LastAppliedConfigAnnotation] - if !ok { - t.Errorf("Last applied annotation not set with create") - } - return nil - }), - }, + client: resource.ClientApplicator{ + Client: &test.MockClient{ + MockCreate: test.NewMockCreateFn(nil, func(obj client.Object) error { + _, ok := obj.GetAnnotations()[corev1.LastAppliedConfigAnnotation] + if !ok { + t.Errorf("Last applied annotation not set with create") + } + return nil + }), }, }, }, @@ -833,7 +783,7 @@ func Test_helmExternal_Create(t *testing.T) { func Test_helmExternal_Update(t *testing.T) { type args struct { - client clients.ClusterClient + client resource.ClientApplicator mg resource.Managed } type want struct { @@ -865,12 +815,10 @@ func Test_helmExternal_Update(t *testing.T) { "FailedToApply": { args: args{ mg: kubernetesObject(), - client: &clients.ApplicatorClientWithConfig{ - ClientApplicator: resource.ClientApplicator{ - Applicator: resource.ApplyFn(func(context.Context, client.Object, ...resource.ApplyOption) error { - return errBoom - }), - }, + client: resource.ClientApplicator{ + Applicator: resource.ApplyFn(func(context.Context, client.Object, ...resource.ApplyOption) error { + return errBoom + }), }, }, want: want{ @@ -884,15 +832,13 @@ func Test_helmExternal_Update(t *testing.T) { "apiVersion": "v1", "kind": "Namespace" }`) }), - client: &clients.ApplicatorClientWithConfig{ - ClientApplicator: resource.ClientApplicator{ - Applicator: resource.ApplyFn(func(ctx context.Context, obj client.Object, op ...resource.ApplyOption) error { - if obj.GetName() != testObjectName { - t.Errorf("Name should default to object name when not provider in manifest") - } - return nil - }), - }, + client: resource.ClientApplicator{ + Applicator: resource.ApplyFn(func(ctx context.Context, obj client.Object, op ...resource.ApplyOption) error { + if obj.GetName() != testObjectName { + t.Errorf("Name should default to object name when not provider in manifest") + } + return nil + }), }, }, want: want{ @@ -902,12 +848,10 @@ func Test_helmExternal_Update(t *testing.T) { "Success": { args: args{ mg: kubernetesObject(), - client: &clients.ApplicatorClientWithConfig{ - ClientApplicator: resource.ClientApplicator{ - Applicator: resource.ApplyFn(func(context.Context, client.Object, ...resource.ApplyOption) error { - return nil - }), - }, + client: resource.ClientApplicator{ + Applicator: resource.ApplyFn(func(context.Context, client.Object, ...resource.ApplyOption) error { + return nil + }), }, }, want: want{ @@ -935,7 +879,7 @@ func Test_helmExternal_Update(t *testing.T) { func Test_helmExternal_Delete(t *testing.T) { type args struct { - client clients.ClusterClient + client resource.ClientApplicator mg resource.Managed } type want struct { @@ -966,11 +910,9 @@ func Test_helmExternal_Delete(t *testing.T) { "FailedToDelete": { args: args{ mg: kubernetesObject(), - client: &clients.ApplicatorClientWithConfig{ - ClientApplicator: resource.ClientApplicator{ - Client: &test.MockClient{ - MockDelete: test.NewMockDeleteFn(errBoom), - }, + client: resource.ClientApplicator{ + Client: &test.MockClient{ + MockDelete: test.NewMockDeleteFn(errBoom), }, }, }, @@ -985,16 +927,14 @@ func Test_helmExternal_Delete(t *testing.T) { "apiVersion": "v1", "kind": "Namespace" }`) }), - client: &clients.ApplicatorClientWithConfig{ - ClientApplicator: resource.ClientApplicator{ - Client: &test.MockClient{ - MockDelete: test.NewMockDeleteFn(nil, func(obj client.Object) error { - if obj.GetName() != testObjectName { - t.Errorf("Name should default to object name when not provider in manifest") - } - return nil - }), - }, + client: resource.ClientApplicator{ + Client: &test.MockClient{ + MockDelete: test.NewMockDeleteFn(nil, func(obj client.Object) error { + if obj.GetName() != testObjectName { + t.Errorf("Name should default to object name when not provider in manifest") + } + return nil + }), }, }, }, @@ -1005,11 +945,9 @@ func Test_helmExternal_Delete(t *testing.T) { "Success": { args: args{ mg: kubernetesObject(), - client: &clients.ApplicatorClientWithConfig{ - ClientApplicator: resource.ClientApplicator{ - Client: &test.MockClient{ - MockDelete: test.NewMockDeleteFn(nil), - }, + client: resource.ClientApplicator{ + Client: &test.MockClient{ + MockDelete: test.NewMockDeleteFn(nil), }, }, }, diff --git a/internal/controller/observedobjectcollection/reconciler.go b/internal/controller/observedobjectcollection/reconciler.go index 7620051c..825e0da7 100644 --- a/internal/controller/observedobjectcollection/reconciler.go +++ b/internal/controller/observedobjectcollection/reconciler.go @@ -29,6 +29,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/rest" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/predicate" @@ -43,7 +44,7 @@ import ( "github.com/crossplane-contrib/provider-kubernetes/apis/object/v1alpha2" "github.com/crossplane-contrib/provider-kubernetes/apis/observedobjectcollection/v1alpha1" - "github.com/crossplane-contrib/provider-kubernetes/internal/clients" + "github.com/crossplane-contrib/provider-kubernetes/internal/clients/kube" ) const ( @@ -59,7 +60,7 @@ type Reconciler struct { client client.Client log logging.Logger pollInterval func() time.Duration - clientForProvider func(ctx context.Context, inclusterClient client.Client, providerConfigName string) (clients.ClusterClient, error) + clientForProvider func(ctx context.Context, inclusterClient client.Client, providerConfigName string) (client.Client, *rest.Config, error) observedObjectName func(collection client.Object, matchedObject client.Object) (string, error) } @@ -73,7 +74,7 @@ func Setup(mgr ctrl.Manager, o controller.Options, pollJitter time.Duration) err pollInterval: func() time.Duration { return o.PollInterval + +time.Duration((rand.Float64()-0.5)*2*float64(pollJitter)) //nolint }, - clientForProvider: clients.ClientForProvider, + clientForProvider: kube.ClientForProvider, observedObjectName: observedObjectName, } @@ -124,7 +125,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ct log.Info("Reconciling") // Get client for the referenced provider config. - clusterClient, err := r.clientForProvider(ctx, r.client, c.Spec.ProviderConfigReference.Name) + clusterClient, _, err := r.clientForProvider(ctx, r.client, c.Spec.ProviderConfigReference.Name) if err != nil { werr := errors.Wrap(err, errNewKubernetesClient) c.Status.SetConditions(xpv1.ReconcileError(werr)) diff --git a/internal/controller/observedobjectcollection/reconciler_test.go b/internal/controller/observedobjectcollection/reconciler_test.go index ad984cde..a3f898bf 100644 --- a/internal/controller/observedobjectcollection/reconciler_test.go +++ b/internal/controller/observedobjectcollection/reconciler_test.go @@ -32,18 +32,17 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/rest" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" "github.com/crossplane/crossplane-runtime/pkg/logging" - "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/test" "github.com/crossplane-contrib/provider-kubernetes/apis/object/v1alpha2" "github.com/crossplane-contrib/provider-kubernetes/apis/observedobjectcollection/v1alpha1" - "github.com/crossplane-contrib/provider-kubernetes/internal/clients" ) func TestReconciler(t *testing.T) { @@ -382,12 +381,8 @@ func TestReconciler(t *testing.T) { r := &Reconciler{ client: tc.args.client, log: logging.NewNopLogger(), - clientForProvider: func(ctx context.Context, inclusterClient client.Client, providerConfigName string) (clients.ClusterClient, error) { - return &clients.ApplicatorClientWithConfig{ - ClientApplicator: resource.ClientApplicator{ - Client: tc.args.client, - }, - }, nil + clientForProvider: func(ctx context.Context, inclusterClient client.Client, providerConfigName string) (client.Client, *rest.Config, error) { + return tc.args.client, nil, nil }, observedObjectName: func(collection client.Object, matchedObject client.Object) (string, error) { return fmt.Sprintf("%s-%s", collection.GetName(), matchedObject.GetName()), nil From c7edad5931dc396fe744d588b877be2615be57b4 Mon Sep 17 00:00:00 2001 From: Hasan Turken Date: Fri, 10 May 2024 15:08:05 +0300 Subject: [PATCH 17/19] Rely on garbage collect and override default DefaultWatchErrorHandler Signed-off-by: Hasan Turken --- apis/object/v1alpha1/types.go | 4 +- apis/object/v1alpha2/types.go | 4 +- internal/controller/object/informers.go | 79 ++++++++----------- internal/controller/object/object.go | 27 +------ .../kubernetes.crossplane.io_objects.yaml | 8 +- 5 files changed, 40 insertions(+), 82 deletions(-) diff --git a/apis/object/v1alpha1/types.go b/apis/object/v1alpha1/types.go index dfed2914..beaff8f0 100644 --- a/apis/object/v1alpha1/types.go +++ b/apis/object/v1alpha1/types.go @@ -127,8 +127,8 @@ type ObjectSpec struct { // Watch enables watching the referenced or managed kubernetes resources. // // THIS IS AN ALPHA FIELD. Do not use it in production. It is not honored - // unless the relevant Crossplane feature flag is enabled, and may be - // changed or removed without notice. + // unless "watches" feature gate is enabled, and may be changed or removed + // without notice. // +optional // +kubebuilder:default=false Watch bool `json:"watch,omitempty"` diff --git a/apis/object/v1alpha2/types.go b/apis/object/v1alpha2/types.go index 426def82..337212c0 100644 --- a/apis/object/v1alpha2/types.go +++ b/apis/object/v1alpha2/types.go @@ -100,8 +100,8 @@ type ObjectSpec struct { // Watch enables watching the referenced or managed kubernetes resources. // // THIS IS AN ALPHA FIELD. Do not use it in production. It is not honored - // unless the relevant Crossplane feature flag is enabled, and may be - // changed or removed without notice. + // unless "watches" feature gate is enabled, and may be changed or removed + // without notice. // +optional // +kubebuilder:default=false Watch bool `json:"watch,omitempty"` diff --git a/internal/controller/object/informers.go b/internal/controller/object/informers.go index b38fe235..6a43cc67 100644 --- a/internal/controller/object/informers.go +++ b/internal/controller/object/informers.go @@ -18,6 +18,7 @@ package object import ( "context" + "io" "strings" "sync" @@ -103,10 +104,10 @@ func (i *resourceInformers) Start(ctx context.Context, h handler.EventHandler, q // every reconcile to make resourceInformers aware of the referenced or managed // resources of the given Object. // -// Note that this complements garbageCollectResourceInformers which regularly +// Note that this complements cleanupResourceInformers which regularly // garbage collects resource informers that are no longer referenced by // any Object. -func (i *resourceInformers) WatchResources(rc *rest.Config, providerConfig string, gvks ...schema.GroupVersionKind) { +func (i *resourceInformers) WatchResources(rc *rest.Config, providerConfig string, gvks ...schema.GroupVersionKind) { // nolint:gocyclo // we need to handle all cases. if rc == nil { rc = i.config } @@ -122,7 +123,15 @@ func (i *resourceInformers) WatchResources(rc *rest.Config, providerConfig strin log := i.log.WithValues("providerConfig", providerConfig, "gvk", gvk.String()) - ca, err := cache.New(rc, cache.Options{}) + ca, err := cache.New(rc, cache.Options{ + DefaultWatchErrorHandler: func(r *kcache.Reflector, err error) { + if errors.Is(io.EOF, err) { + // Watch closed normally. + return + } + log.Debug("Watch error - probably remote cluster api is gone", "error", err) + }, + }) if err != nil { log.Debug("failed creating a cache", "error", err) continue @@ -163,6 +172,9 @@ func (i *resourceInformers) WatchResources(rc *rest.Config, providerConfig strin i.sink(providerConfig, ev) }, DeleteFunc: func(obj interface{}) { + if final, ok := obj.(kcache.DeletedFinalStateUnknown); ok { + obj = final.Obj + } ev := runtimeevent.GenericEvent{ Object: obj.(client.Object), } @@ -206,57 +218,26 @@ func (i *resourceInformers) WatchResources(rc *rest.Config, providerConfig strin } } -func (i *resourceInformers) StopWatchingResources(ctx context.Context, providerConfig string, gvks ...schema.GroupVersionKind) { - i.lock.Lock() - defer i.lock.Unlock() - - for _, gvk := range gvks { - ca, found := i.resourceCaches[gvkWithConfig{providerConfig: providerConfig, gvk: gvk}] - if !found { - continue - } - // Check if there are any other objects referencing this resource GVK. - list := v1alpha2.ObjectList{} - if err := i.objectsCache.List(ctx, &list, client.MatchingFields{resourceRefGVKsIndex: refKeyProviderGVK(providerConfig, gvk.Kind, gvk.Group, gvk.Version)}); err != nil { - i.log.Debug("cannot list objects referencing a certain resource GVK", "error", err, "fieldSelector", resourceRefGVKsIndex+"="+refKeyProviderGVK(providerConfig, gvk.Kind, gvk.Group, gvk.Version)) - continue - } - - inUse := false - for _, o := range list.Items { - // We only care about objects that are not being deleted. Otherwise, - // we are getting into deadlocks while stopping the watches during - // deletion. - if o.GetDeletionTimestamp().IsZero() { - inUse = true - break - } - } - if inUse { - continue - } - - ca.cancelFn() - i.log.Info("Stopped resource watch", "provider config", providerConfig, "gvk", gvk) - delete(i.resourceCaches, gvkWithConfig{providerConfig: providerConfig, gvk: gvk}) - } -} - -// garbageCollectResourceInformers garbage collects resource informers that are +// cleanupResourceInformers garbage collects resource informers that are // no longer referenced by any Object. Ideally, all resource informers should // stopped/cleaned up when the Object is deleted. However, in practice, this // is not always the case. This method is a safety net to clean up resource // informers that are no longer referenced by any Object. -func (i *resourceInformers) garbageCollectResourceInformers(ctx context.Context) { - i.lock.Lock() - defer i.lock.Unlock() +func (i *resourceInformers) cleanupResourceInformers(ctx context.Context) { + // copy map to avoid locking it for the entire duration of the loop + i.lock.RLock() + resourceCaches := make(map[gvkWithConfig]resourceCache, len(i.resourceCaches)) + for gc, ca := range i.resourceCaches { + resourceCaches[gc] = ca + } + i.lock.RUnlock() // stop old informers i.log.Debug("Running garbage collection for resource informers", "count", len(i.resourceCaches)) - for gh, ca := range i.resourceCaches { + for gc, ca := range resourceCaches { list := v1alpha2.ObjectList{} - if err := i.objectsCache.List(ctx, &list, client.MatchingFields{resourceRefGVKsIndex: refKeyProviderGVK(gh.providerConfig, gh.gvk.Kind, gh.gvk.Group, gh.gvk.Version)}); err != nil { - i.log.Debug("cannot list objects referencing a certain resource GVK", "error", err, "fieldSelector", resourceRefGVKsIndex+"="+refKeyProviderGVK(gh.providerConfig, gh.gvk.Kind, gh.gvk.Group, gh.gvk.Version)) + if err := i.objectsCache.List(ctx, &list, client.MatchingFields{resourceRefGVKsIndex: refKeyProviderGVK(gc.providerConfig, gc.gvk.Kind, gc.gvk.Group, gc.gvk.Version)}); err != nil { + i.log.Debug("cannot list objects referencing a certain resource GVK", "error", err, "fieldSelector", resourceRefGVKsIndex+"="+refKeyProviderGVK(gc.providerConfig, gc.gvk.Kind, gc.gvk.Group, gc.gvk.Version)) continue } @@ -265,8 +246,10 @@ func (i *resourceInformers) garbageCollectResourceInformers(ctx context.Context) } ca.cancelFn() - i.log.Info("Stopped resource watch", "provider config", gh.providerConfig, "gvk", gh.gvk) - delete(i.resourceCaches, gh) + i.log.Info("Stopped resource watch", "provider config", gc.providerConfig, "gvk", gc.gvk) + i.lock.Lock() + delete(i.resourceCaches, gc) + i.lock.Unlock() } } diff --git a/internal/controller/object/object.go b/internal/controller/object/object.go index d42026c1..072be877 100644 --- a/internal/controller/object/object.go +++ b/internal/controller/object/object.go @@ -102,10 +102,6 @@ type KindObserver interface { // WatchResources starts a watch of the given kinds to trigger reconciles // when a referenced or managed objects of those kinds changes. WatchResources(rc *rest.Config, providerConfig string, gvks ...schema.GroupVersionKind) - - // UnwatchResources stops watching the given kinds if they are no longer - // referenced or managed by any other Object that is not being deleted. - StopWatchingResources(ctx context.Context, providerConfig string, gvks ...schema.GroupVersionKind) } // Setup adds a controller that reconciles Object managed resources. @@ -164,7 +160,7 @@ func Setup(mgr ctrl.Manager, o controller.Options, sanitizeSecrets bool, pollJit conn.kindObserver = &i if err := mgr.Add(manager.RunnableFunc(func(ctx context.Context) error { - wait.UntilWithContext(ctx, i.garbageCollectResourceInformers, time.Minute) + wait.UntilWithContext(ctx, i.cleanupResourceInformers, time.Minute) return nil })); err != nil { return errors.Wrap(err, "cannot add cleanup referenced resource informers runnable") @@ -354,27 +350,6 @@ func (c *external) Delete(ctx context.Context, mg resource.Managed) error { return err } - if c.shouldWatch(cr) { - c.kindObserver.StopWatchingResources(ctx, cr.Spec.ProviderConfigReference.Name, obj.GroupVersionKind()) - if len(cr.Spec.References) > 0 { - gvks := make([]schema.GroupVersionKind, 0, len(cr.Spec.References)) - for _, ref := range cr.Spec.References { - if ref.DependsOn == nil && ref.PatchesFrom == nil { - continue - } - - refAPIVersion, refKind, _, _ := getReferenceInfo(ref) - g, v := parseAPIVersion(refAPIVersion) - gvks = append(gvks, schema.GroupVersionKind{ - Group: g, - Version: v, - Kind: refKind, - }) - } - c.kindObserver.StopWatchingResources(ctx, "", gvks...) - } - } - return errors.Wrap(resource.IgnoreNotFound(c.client.Delete(ctx, obj)), errDeleteObject) } diff --git a/package/crds/kubernetes.crossplane.io_objects.yaml b/package/crds/kubernetes.crossplane.io_objects.yaml index f3aded13..13ee7dea 100644 --- a/package/crds/kubernetes.crossplane.io_objects.yaml +++ b/package/crds/kubernetes.crossplane.io_objects.yaml @@ -407,8 +407,8 @@ spec: THIS IS AN ALPHA FIELD. Do not use it in production. It is not honored - unless the relevant Crossplane feature flag is enabled, and may be - changed or removed without notice. + unless "watches" feature gate is enabled, and may be changed or removed + without notice. type: boolean writeConnectionSecretToRef: description: |- @@ -851,8 +851,8 @@ spec: THIS IS AN ALPHA FIELD. Do not use it in production. It is not honored - unless the relevant Crossplane feature flag is enabled, and may be - changed or removed without notice. + unless "watches" feature gate is enabled, and may be changed or removed + without notice. type: boolean writeConnectionSecretToRef: description: |- From 8a4ecf4df7b320f0c85f6617777ca47105ea1488 Mon Sep 17 00:00:00 2001 From: Hasan Turken Date: Fri, 10 May 2024 16:22:06 +0300 Subject: [PATCH 18/19] Add e2e for watching object Signed-off-by: Hasan Turken --- Makefile | 2 +- build | 2 +- examples/object/object-watching.yaml | 40 ++++++++++++++ examples/object/object.yaml | 2 + .../object/testhooks/validate-watching.sh | 53 +++++++++++++++++++ 5 files changed, 97 insertions(+), 2 deletions(-) create mode 100644 examples/object/object-watching.yaml create mode 100755 examples/object/testhooks/validate-watching.sh diff --git a/Makefile b/Makefile index 4bc23a4f..7abe9c54 100644 --- a/Makefile +++ b/Makefile @@ -89,7 +89,7 @@ CROSSPLANE_NAMESPACE = crossplane-system -include build/makelib/local.xpkg.mk -include build/makelib/controlplane.mk -UPTEST_EXAMPLE_LIST ?= "examples/object/object.yaml" +UPTEST_EXAMPLE_LIST ?= "examples/object/object.yaml,examples/object/object-watching.yaml" uptest: $(UPTEST) $(KUBECTL) $(KUTTL) @$(INFO) running automated tests @KUBECTL=$(KUBECTL) KUTTL=$(KUTTL) CROSSPLANE_NAMESPACE=${CROSSPLANE_NAMESPACE} $(UPTEST) e2e "$(UPTEST_EXAMPLE_LIST)" --setup-script=cluster/test/setup.sh || $(FAIL) diff --git a/build b/build index cdee0068..93d68794 160000 --- a/build +++ b/build @@ -1 +1 @@ -Subproject commit cdee00685cfc90fce40d3f6a5eb9c8f044edbaa3 +Subproject commit 93d68794581d7991b0d6816a077e0ab60eb0eb24 diff --git a/examples/object/object-watching.yaml b/examples/object/object-watching.yaml new file mode 100644 index 00000000..cfd9e012 --- /dev/null +++ b/examples/object/object-watching.yaml @@ -0,0 +1,40 @@ +--- +apiVersion: kubernetes.crossplane.io/v1alpha2 +kind: Object +metadata: + name: foo + annotations: + uptest.upbound.io/post-assert-hook: testhooks/validate-watching.sh + uptest.upbound.io/timeout: "60" +spec: + # Watch for changes to the Namespace object. + # Watching resources is an alpha feature and needs to be enabled with --enable-watches + # in the provider to get this configuration working. + watch: true + references: + # Use patchesFrom to patch field from other k8s resource to this object + - patchesFrom: + apiVersion: v1 + kind: Secret + name: bar + namespace: default + fieldPath: data.key + toFieldPath: data.key-from-bar + forProvider: + manifest: + apiVersion: v1 + kind: Secret + metadata: + namespace: default + stringData: + another-key: another-value + providerConfigRef: + name: kubernetes-provider +--- +apiVersion: v1 +kind: Secret +metadata: + name: bar + namespace: default +stringData: + key: some-value diff --git a/examples/object/object.yaml b/examples/object/object.yaml index b028dd23..749b8c3f 100644 --- a/examples/object/object.yaml +++ b/examples/object/object.yaml @@ -2,6 +2,8 @@ apiVersion: kubernetes.crossplane.io/v1alpha2 kind: Object metadata: name: sample-namespace + annotations: + uptest.upbound.io/timeout: "60" spec: # Watch for changes to the Namespace object. # Watching resources is an alpha feature and needs to be enabled with --enable-watches diff --git a/examples/object/testhooks/validate-watching.sh b/examples/object/testhooks/validate-watching.sh new file mode 100755 index 00000000..29a7bb8d --- /dev/null +++ b/examples/object/testhooks/validate-watching.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash +set -aeuo pipefail + +# This script is used to validate the watch feature of the object, triggered by the +# uptest framework via `uptest.upbound.io/post-assert-hook`: https://github.com/crossplane/uptest/tree/e64457e2cce153ada54da686c8bf96143f3f6329?tab=readme-ov-file#hooks +KUBECTL="kubectl" + +VALUE=$(${KUBECTL} get secret bar -o jsonpath='{.data.key}' | base64 -d) +if [ "${VALUE}" == "new-value" ]; then + echo "This test has to pass in the first run since we're validating the realtime watching behaviour." + exit 1 +fi + +echo "Enabling watch feature for the provider" +${KUBECTL} patch deploymentruntimeconfig runtimeconfig-provider-kubernetes --type='json' -p='[{"op":"replace","path":"/spec/deploymentTemplate/spec/template/spec/containers/0/args", "value":["--debug", "--enable-watches"]}]' + +sleep 3 + +echo "Patching referenced secret" +${KUBECTL} patch secret bar --type='merge' -p='{"stringData":{"key":"new-value"}}' + +sleep 3 + +echo "Checking if the managed secret has been updated" +VALUE=$(${KUBECTL} get secret foo -o jsonpath='{.data.key-from-bar}' | base64 -d) +if [ "${VALUE}" != "new-value" ]; then + echo "Expected value to be 'new-value' but got '${VALUE}'" + exit 1 +fi +echo "Checking if the managed secret has been updated...Success" + +echo "Patching managed secret" +${KUBECTL} patch secret foo --type='merge' -p='{"stringData":{"a-new-key":"with-new-value"}}' + +sleep 3 + +echo "Checking if the object grabbed the new value at status.atProvider" +VALUE=$(${KUBECTL} get object foo -o jsonpath='{.status.atProvider.manifest.data.a-new-key}' | base64 -d) + +if [ "${VALUE}" != "with-new-value" ]; then + echo "Expected value to be 'with-new-value' but got '${VALUE}'" + exit 1 +fi +echo "Checking if the object grabbed the new value at status.atProvider...Success" + +# TODO(turkenh): Add one more test case to validate the drift is reverted back to the desired state +# in realtime after https://github.com/crossplane-contrib/provider-kubernetes/issues/37 is resolved. + +echo "Successfully validated the watch feature!" + +echo "Disabling watch feature for the provider" +${KUBECTL} patch deploymentruntimeconfig runtimeconfig-provider-kubernetes --type='json' -p='[{"op":"replace","path":"/spec/deploymentTemplate/spec/template/spec/containers/0/args", "value":["--debug"]}]' + From c334ea4f917065bc051a97dc007971cfbdeda190 Mon Sep 17 00:00:00 2001 From: Hasan Turken Date: Fri, 10 May 2024 18:27:14 +0300 Subject: [PATCH 19/19] Always pass update event to reconciler Signed-off-by: Hasan Turken --- internal/controller/object/informers.go | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/internal/controller/object/informers.go b/internal/controller/object/informers.go index 6a43cc67..0daa8fc2 100644 --- a/internal/controller/object/informers.go +++ b/internal/controller/object/informers.go @@ -159,14 +159,8 @@ func (i *resourceInformers) WatchResources(rc *rest.Config, providerConfig strin i.sink(providerConfig, ev) }, UpdateFunc: func(oldObj, newObj interface{}) { - old := oldObj.(client.Object) //nolint:forcetypeassert // Will always be client.Object. - obj := newObj.(client.Object) //nolint:forcetypeassert // Will always be client.Object. - if old.GetResourceVersion() == obj.GetResourceVersion() { - return - } - ev := runtimeevent.GenericEvent{ - Object: obj, + Object: newObj.(client.Object), } i.sink(providerConfig, ev)