diff --git a/pkg/controllers/hpareplicassyncer/hpa_replicas_syncer_controller.go b/pkg/controllers/hpareplicassyncer/hpa_replicas_syncer_controller.go index 43754e4b2386..f43945f957c7 100644 --- a/pkg/controllers/hpareplicassyncer/hpa_replicas_syncer_controller.go +++ b/pkg/controllers/hpareplicassyncer/hpa_replicas_syncer_controller.go @@ -9,12 +9,15 @@ import ( "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/dynamic" "k8s.io/client-go/scale" + "k8s.io/client-go/util/retry" "k8s.io/klog/v2" controllerruntime "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "github.com/karmada-io/karmada/pkg/util" ) @@ -72,13 +75,27 @@ func (r *HPAReplicasSyncer) Reconcile(ctx context.Context, req controllerruntime return controllerruntime.Result{}, err } - err = r.updateScaleIfNeed(ctx, workloadGR, scale.DeepCopy(), hpa) + synced := isReplicasSynced(scale, hpa) + + if !hpa.DeletionTimestamp.IsZero() && synced { + return controllerruntime.Result{}, r.removeFinalizerIfNeed(ctx, hpa) + } + + err = r.addFinalizerIfNeed(ctx, hpa) if err != nil { return controllerruntime.Result{}, err } - // TODO(@lxtywypc): Add finalizer for HPA and remove them - // when the HPA is deleting and the replicas have been synced. + if !synced { + err = r.updateScale(ctx, workloadGR, scale, hpa) + if err != nil { + return controllerruntime.Result{}, err + } + + // If it needs to sync, we had better to requeue once immediately to make sure + // the syncing successfully and the finalizer removed successfully. + return controllerruntime.Result{Requeue: true}, nil + } return controllerruntime.Result{}, nil } @@ -102,6 +119,9 @@ func (r *HPAReplicasSyncer) getGroupResourceAndScaleForWorkloadFromHPA(ctx conte if err != nil { if apierrors.IsNotFound(err) { // If the scale of workload is not found, skip processing. + klog.V(4).Infof("Scale of resource(kind=%s, %s/%s) not found, the resource might have been removed", + hpa.Spec.ScaleTargetRef.Kind, hpa.Namespace, hpa.Spec.ScaleTargetRef.Name) + return gr, nil, nil } @@ -114,32 +134,98 @@ func (r *HPAReplicasSyncer) getGroupResourceAndScaleForWorkloadFromHPA(ctx conte return gr, scale, nil } -// updateScaleIfNeed would update the scale of workload on fed-control plane -// if the replicas declared in the workload on karmada-control-plane does not match -// the actual replicas in member clusters effected by HPA. -func (r *HPAReplicasSyncer) updateScaleIfNeed(ctx context.Context, workloadGR schema.GroupResource, scale *autoscalingv1.Scale, hpa *autoscalingv2.HorizontalPodAutoscaler) error { - // If the scale of workload is not found, skip processing. - if scale == nil { - klog.V(4).Infof("Scale of resource(kind=%s, %s/%s) not found, the resource might have been removed, skip", - hpa.Spec.ScaleTargetRef.Kind, hpa.Namespace, hpa.Spec.ScaleTargetRef.Name) +// updateScale would update the scale of workload on fed-control plane +// with the actual replicas in member clusters effected by HPA. +func (r *HPAReplicasSyncer) updateScale(ctx context.Context, workloadGR schema.GroupResource, scale *autoscalingv1.Scale, hpa *autoscalingv2.HorizontalPodAutoscaler) error { + scaleCopy := scale.DeepCopy() + + scaleCopy.Spec.Replicas = hpa.Status.DesiredReplicas + _, err := r.ScaleClient.Scales(hpa.Namespace).Update(ctx, workloadGR, scaleCopy, metav1.UpdateOptions{}) + if err != nil { + klog.Errorf("Failed to try to sync scale for resource(kind=%s, %s/%s) from %d to %d: %v", + hpa.Spec.ScaleTargetRef.Kind, hpa.Namespace, hpa.Spec.ScaleTargetRef.Name, scale.Spec.Replicas, hpa.Status.CurrentReplicas, err) + + return err + } + + klog.V(4).Infof("Successfully synced scale for resource(kind=%s, %s/%s) from %d to %d", + hpa.Spec.ScaleTargetRef.Kind, hpa.Namespace, hpa.Spec.ScaleTargetRef.Name, scale.Spec.Replicas, hpa.Status.CurrentReplicas) + + return nil +} + +// removeFinalizerIfNeed tries to remove HPAReplicasSyncerFinalizer from HPA if the finalizer exists. +func (r *HPAReplicasSyncer) removeFinalizerIfNeed(ctx context.Context, hpa *autoscalingv2.HorizontalPodAutoscaler) error { + hpaCopy := hpa.DeepCopy() + + err := retry.RetryOnConflict(retry.DefaultRetry, func() error { + updated := controllerutil.RemoveFinalizer(hpaCopy, util.HPAReplicasSyncerFinalizer) + if !updated { + // If the hpa does not have the finalizer, return immediately without error. + return nil + } + + updateErr := r.Client.Update(ctx, hpaCopy) + if updateErr == nil { + klog.V(4).Infof("Remove HPAReplicasSyncerFinalizer from horizontalpodautoscaler %s/%s successfully", hpa.Namespace, hpa.Name) - return nil + return nil + } + + err := r.Client.Get(ctx, types.NamespacedName{Namespace: hpa.Namespace, Name: hpa.Name}, hpaCopy) + if err != nil { + return err + } + + return updateErr + }) + if err != nil { + klog.Errorf("Failed to remove HPAReplicasSyncerFinalizer from horizontalpodautoscaler %s/%s: %v", hpa.Namespace, hpa.Name, err) } - if scale.Spec.Replicas != hpa.Status.DesiredReplicas { - oldReplicas := scale.Spec.Replicas + return err +} + +// addFinalizerIfNeed tries to add HPAReplicasSyncerFinalizer to HPA if the finalizer does not exist. +func (r *HPAReplicasSyncer) addFinalizerIfNeed(ctx context.Context, hpa *autoscalingv2.HorizontalPodAutoscaler) error { + hpaCopy := hpa.DeepCopy() + + err := retry.RetryOnConflict(retry.DefaultRetry, func() error { + updated := controllerutil.AddFinalizer(hpaCopy, util.HPAReplicasSyncerFinalizer) + if !updated { + // If the hpa has had the finalizer, return immediately without error. + return nil + } + + updateErr := r.Client.Update(ctx, hpaCopy) + if updateErr == nil { + klog.V(4).Infof("Add HPAReplicasSyncerFinalizer to horizontalpodautoscaler %s/%s successfully", hpa.Namespace, hpa.Name) - scale.Spec.Replicas = hpa.Status.DesiredReplicas - _, err := r.ScaleClient.Scales(hpa.Namespace).Update(ctx, workloadGR, scale, metav1.UpdateOptions{}) + return nil + } + + err := r.Client.Get(ctx, types.NamespacedName{Namespace: hpa.Namespace, Name: hpa.Name}, hpaCopy) if err != nil { - klog.Errorf("Failed to try to sync scale for resource(kind=%s, %s/%s) from %d to %d: %v", - hpa.Spec.ScaleTargetRef.Kind, hpa.Namespace, hpa.Spec.ScaleTargetRef.Name, oldReplicas, hpa.Status.DesiredReplicas, err) return err } - klog.V(4).Infof("Successfully synced scale for resource(kind=%s, %s/%s) from %d to %d", - hpa.Spec.ScaleTargetRef.Kind, hpa.Namespace, hpa.Spec.ScaleTargetRef.Name, oldReplicas, hpa.Status.DesiredReplicas) + return updateErr + }) + if err != nil { + klog.Errorf("Failed to add HPAReplicasSyncerFinalizer to horizontalpodautoscaler %s/%s: %v", hpa.Namespace, hpa.Name, err) + } + + return err +} + +// isReplicasSynced judges if the replicas of workload scale has been synced successfully. +func isReplicasSynced(scale *autoscalingv1.Scale, hpa *autoscalingv2.HorizontalPodAutoscaler) bool { + if scale == nil || hpa == nil { + // If the scale not found, the workload might has been removed; + // if the hpa not found, the workload is not managed by hpa; + // we treat these as replicas synced. + return true } - return nil + return scale.Spec.Replicas == hpa.Status.DesiredReplicas } diff --git a/pkg/controllers/hpareplicassyncer/hpa_replicas_syncer_controller_test.go b/pkg/controllers/hpareplicassyncer/hpa_replicas_syncer_controller_test.go index 74bda4c32689..28f6e070e903 100644 --- a/pkg/controllers/hpareplicassyncer/hpa_replicas_syncer_controller_test.go +++ b/pkg/controllers/hpareplicassyncer/hpa_replicas_syncer_controller_test.go @@ -17,10 +17,12 @@ import ( "k8s.io/apimachinery/pkg/types" scalefake "k8s.io/client-go/scale/fake" coretesting "k8s.io/client-go/testing" + controllerruntime "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" workloadv1alpha1 "github.com/karmada-io/karmada/examples/customresourceinterpreter/apis/workload/v1alpha1" + "github.com/karmada-io/karmada/pkg/util" "github.com/karmada-io/karmada/pkg/util/gclient" ) @@ -86,7 +88,7 @@ func TestGetGroupResourceAndScaleForWorkloadFromHPA(t *testing.T) { } } -func TestUpdateScaleIfNeed(t *testing.T) { +func TestUpdateScale(t *testing.T) { cases := []struct { name string object client.Object @@ -124,7 +126,7 @@ func TestUpdateScaleIfNeed(t *testing.T) { for _, tt := range cases { t.Run(tt.name, func(t *testing.T) { syncer := newHPAReplicasSyncer(tt.object) - err := syncer.updateScaleIfNeed(context.TODO(), tt.gr, tt.scale, tt.hpa) + err := syncer.updateScale(context.TODO(), tt.gr, tt.scale, tt.hpa) if tt.expectedError { assert.NotEmpty(t, err) return @@ -152,6 +154,369 @@ func TestUpdateScaleIfNeed(t *testing.T) { } } +func TestRemoveFinalizerIfNeed(t *testing.T) { + cases := []struct { + name string + hpa *autoscalingv2.HorizontalPodAutoscaler + withFinalizer bool + expectedError bool + }{ + { + name: "hpa with finalizer", + hpa: newHPA(appsv1.SchemeGroupVersion.String(), "Deployment", "deployment-1", 3), + withFinalizer: true, + expectedError: false, + }, + { + name: "hpa without finalizer", + hpa: newHPA(appsv1.SchemeGroupVersion.String(), "Deployment", "deployment-1", 3), + withFinalizer: false, + expectedError: false, + }, + { + name: "hpa not found", + hpa: newHPA(appsv1.SchemeGroupVersion.String(), "Deployment", "deployment-1", 3), + withFinalizer: true, + expectedError: true, + }, + } + + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + if tt.withFinalizer { + tt.hpa.Finalizers = []string{util.HPAReplicasSyncerFinalizer} + } + + syncer := newHPAReplicasSyncer() + if !tt.expectedError { + err := syncer.Client.Create(context.TODO(), tt.hpa) + assert.Empty(t, err) + if err != nil { + return + } + } + + err := syncer.removeFinalizerIfNeed(context.TODO(), tt.hpa) + if tt.expectedError { + assert.NotEmpty(t, err) + return + } + assert.Empty(t, err) + + newHPA := &autoscalingv2.HorizontalPodAutoscaler{} + err = syncer.Client.Get(context.TODO(), types.NamespacedName{Namespace: tt.hpa.Namespace, Name: tt.hpa.Name}, newHPA) + assert.Empty(t, err) + if err != nil { + return + } + + assert.Empty(t, newHPA.Finalizers) + + if tt.withFinalizer { + assert.Equal(t, "2", newHPA.ResourceVersion) + } else { + assert.Equal(t, "1", newHPA.ResourceVersion) + } + }) + } +} + +func TestAddFinalizerIfNeed(t *testing.T) { + cases := []struct { + name string + hpa *autoscalingv2.HorizontalPodAutoscaler + withFinalizer bool + expectedError bool + }{ + { + name: "hpa without finalizer", + hpa: newHPA(appsv1.SchemeGroupVersion.String(), "Deployment", "deployment-1", 3), + withFinalizer: false, + expectedError: false, + }, + { + name: "hpa with finalizer", + hpa: newHPA(appsv1.SchemeGroupVersion.String(), "Deployment", "deployment-1", 3), + withFinalizer: true, + expectedError: false, + }, + { + name: "hpa not found", + hpa: newHPA(appsv1.SchemeGroupVersion.String(), "Deployment", "deployment-1", 3), + withFinalizer: false, + expectedError: true, + }, + } + + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + if tt.withFinalizer { + tt.hpa.Finalizers = []string{util.HPAReplicasSyncerFinalizer} + } + + syncer := newHPAReplicasSyncer() + if !tt.expectedError { + err := syncer.Client.Create(context.TODO(), tt.hpa) + assert.Empty(t, err) + if err != nil { + return + } + } + + err := syncer.addFinalizerIfNeed(context.TODO(), tt.hpa) + if tt.expectedError { + assert.NotEmpty(t, err) + return + } + assert.Empty(t, err) + + newHPA := &autoscalingv2.HorizontalPodAutoscaler{} + err = syncer.Client.Get(context.TODO(), types.NamespacedName{Namespace: tt.hpa.Namespace, Name: tt.hpa.Name}, newHPA) + assert.Empty(t, err) + if err != nil { + return + } + + assert.NotEmpty(t, newHPA.Finalizers) + + if !tt.withFinalizer { + assert.Equal(t, "2", newHPA.ResourceVersion) + } else { + assert.Equal(t, "1", newHPA.ResourceVersion) + } + }) + } +} + +func TestIsReplicasSynced(t *testing.T) { + cases := []struct { + name string + hpa *autoscalingv2.HorizontalPodAutoscaler + scale *autoscalingv1.Scale + expected bool + }{ + { + name: "nil hpa", + hpa: nil, + scale: newScale("deployment-1", 0), + expected: true, + }, + { + name: "nil scale", + hpa: newHPA(appsv1.SchemeGroupVersion.String(), "Deployment", "deployment-1", 3), + scale: nil, + expected: true, + }, + { + name: "nil scale and hpa", + hpa: nil, + scale: nil, + expected: true, + }, + { + name: "replicas has not been synced", + hpa: newHPA(appsv1.SchemeGroupVersion.String(), "Deployment", "deployment-1", 3), + scale: newScale("deployment-1", 0), + expected: false, + }, + { + name: "replicas has been synced", + hpa: newHPA(appsv1.SchemeGroupVersion.String(), "Deployment", "deployment-1", 3), + scale: newScale("deployment-1", 3), + expected: true, + }, + } + + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + res := isReplicasSynced(tt.scale, tt.hpa) + + assert.Equal(t, tt.expected, res) + }) + } +} + +func TestReconcile(t *testing.T) { + cases := []struct { + name string + object client.Object + gr schema.GroupResource + hpa *autoscalingv2.HorizontalPodAutoscaler + request controllerruntime.Request + withFinalizer bool + isDeleting bool + expectedError bool + expectedFinalizer bool + expectedResult controllerruntime.Result + expectedScale *autoscalingv1.Scale + }{ + { + name: "normal case", + object: newDeployment("deployment-1", 0), + gr: schema.GroupResource{Group: appsv1.SchemeGroupVersion.Group, Resource: "deployments"}, + hpa: newHPA(appsv1.SchemeGroupVersion.String(), "Deployment", "deployment-1", 3), + request: controllerruntime.Request{NamespacedName: types.NamespacedName{Namespace: "default", Name: "deployment-1"}}, + withFinalizer: true, + isDeleting: false, + expectedError: false, + expectedFinalizer: true, + expectedResult: controllerruntime.Result{Requeue: true}, + expectedScale: newScale("deployment-1", 3), + }, + { + name: "customized resource normal case", + object: newWorkload("workload-1", 0), + gr: schema.GroupResource{Group: workloadv1alpha1.SchemeGroupVersion.Group, Resource: "workloads"}, + hpa: newHPA(workloadv1alpha1.SchemeGroupVersion.String(), "Workload", "workload-1", 3), + request: controllerruntime.Request{NamespacedName: types.NamespacedName{Namespace: "default", Name: "workload-1"}}, + withFinalizer: true, + isDeleting: false, + expectedError: false, + expectedFinalizer: true, + expectedResult: controllerruntime.Result{Requeue: true}, + expectedScale: newScale("workload-1", 3), + }, + { + name: "replicas has been synced", + object: newDeployment("deployment-1", 3), + gr: schema.GroupResource{Group: appsv1.SchemeGroupVersion.Group, Resource: "deployments"}, + hpa: newHPA(appsv1.SchemeGroupVersion.String(), "Deployment", "deployment-1", 3), + request: controllerruntime.Request{NamespacedName: types.NamespacedName{Namespace: "default", Name: "deployment-1"}}, + withFinalizer: true, + isDeleting: false, + expectedError: false, + expectedFinalizer: true, + expectedResult: controllerruntime.Result{}, + expectedScale: newScale("deployment-1", 3), + }, + { + name: "hpa not found", + object: newDeployment("deployment-1", 0), + gr: schema.GroupResource{Group: appsv1.SchemeGroupVersion.Group, Resource: "deployments"}, + hpa: nil, + request: controllerruntime.Request{NamespacedName: types.NamespacedName{Namespace: "default", Name: "deployment-1"}}, + withFinalizer: false, + isDeleting: false, + expectedError: false, + expectedFinalizer: false, + expectedResult: controllerruntime.Result{}, + expectedScale: nil, + }, + { + name: "scale not found", + object: nil, + gr: schema.GroupResource{Group: appsv1.SchemeGroupVersion.Group, Resource: "deployments"}, + hpa: newHPA(appsv1.SchemeGroupVersion.String(), "Deployment", "deployment-1", 3), + request: controllerruntime.Request{NamespacedName: types.NamespacedName{Namespace: "default", Name: "deployment-1"}}, + withFinalizer: false, + isDeleting: false, + expectedError: false, + expectedFinalizer: true, + expectedResult: controllerruntime.Result{}, + expectedScale: nil, + }, + { + name: "without finalizer", + object: newDeployment("deployment-1", 0), + gr: schema.GroupResource{Group: appsv1.SchemeGroupVersion.Group, Resource: "deployments"}, + hpa: newHPA(appsv1.SchemeGroupVersion.String(), "Deployment", "deployment-1", 3), + request: controllerruntime.Request{NamespacedName: types.NamespacedName{Namespace: "default", Name: "deployment-1"}}, + withFinalizer: false, + isDeleting: false, + expectedError: false, + expectedFinalizer: true, + expectedResult: controllerruntime.Result{Requeue: true}, + expectedScale: newScale("deployment-1", 3), + }, + { + name: "hpa is deleting but replicas has not been synced", + object: newDeployment("deployment-1", 0), + gr: schema.GroupResource{Group: appsv1.SchemeGroupVersion.Group, Resource: "deployments"}, + hpa: newHPA(appsv1.SchemeGroupVersion.String(), "Deployment", "deployment-1", 3), + request: controllerruntime.Request{NamespacedName: types.NamespacedName{Namespace: "default", Name: "deployment-1"}}, + withFinalizer: true, + isDeleting: true, + expectedError: false, + expectedFinalizer: true, + expectedResult: controllerruntime.Result{Requeue: true}, + expectedScale: newScale("deployment-1", 3), + }, + { + name: "hpa is deleting with replicas has been synced", + object: newDeployment("deployment-1", 3), + gr: schema.GroupResource{Group: appsv1.SchemeGroupVersion.Group, Resource: "deployments"}, + hpa: newHPA(appsv1.SchemeGroupVersion.String(), "Deployment", "deployment-1", 3), + request: controllerruntime.Request{NamespacedName: types.NamespacedName{Namespace: "default", Name: "deployment-1"}}, + withFinalizer: true, + isDeleting: true, + expectedError: false, + expectedFinalizer: false, + expectedResult: controllerruntime.Result{}, + expectedScale: newScale("deployment-1", 3), + }, + } + + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + syncer := newHPAReplicasSyncer() + + if tt.object != nil { + err := syncer.Client.Create(context.TODO(), tt.object) + assert.Empty(t, err) + if err != nil { + return + } + } + + if tt.hpa != nil { + if tt.withFinalizer { + tt.hpa.Finalizers = []string{util.HPAReplicasSyncerFinalizer} + } + + err := syncer.Client.Create(context.TODO(), tt.hpa) + assert.Empty(t, err) + if err != nil { + return + } + } + + if tt.isDeleting { + err := syncer.Client.Delete(context.TODO(), tt.hpa) + assert.Empty(t, err) + if err != nil { + return + } + } + + res, err := syncer.Reconcile(context.TODO(), tt.request) + if tt.expectedError { + assert.NotEmpty(t, err) + return + } + assert.Empty(t, err) + + assert.Equal(t, tt.expectedResult, res) + + if tt.expectedScale != nil { + scale, _ := syncer.ScaleClient.Scales(tt.hpa.Namespace).Get(context.TODO(), tt.gr, tt.hpa.Spec.ScaleTargetRef.Name, metav1.GetOptions{}) + + assert.Equal(t, tt.expectedScale, scale) + } + + if tt.hpa != nil { + hpa := &autoscalingv2.HorizontalPodAutoscaler{} + _ = syncer.Client.Get(context.TODO(), types.NamespacedName{Namespace: tt.hpa.Namespace, Name: tt.hpa.Name}, hpa) + + if tt.expectedFinalizer { + assert.NotEmpty(t, hpa.Finalizers) + } else { + assert.Empty(t, hpa.Finalizers) + } + } + }) + } +} + func newHPAReplicasSyncer(objs ...client.Object) *HPAReplicasSyncer { scheme := gclient.NewSchema() _ = workloadv1alpha1.AddToScheme(scheme) @@ -214,6 +579,10 @@ func getScaleFromUnstructured(obj *unstructured.Unstructured) (*autoscalingv1.Sc } return &autoscalingv1.Scale{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: obj.GetNamespace(), + Name: obj.GetName(), + }, Spec: autoscalingv1.ScaleSpec{ Replicas: replicas, }, @@ -321,5 +690,8 @@ func newScale(name string, replicas int32) *autoscalingv1.Scale { Spec: autoscalingv1.ScaleSpec{ Replicas: replicas, }, + Status: autoscalingv1.ScaleStatus{ + Replicas: replicas, + }, } } diff --git a/pkg/util/constants.go b/pkg/util/constants.go index 9b560e3b9192..5780fbf3b434 100644 --- a/pkg/util/constants.go +++ b/pkg/util/constants.go @@ -73,6 +73,10 @@ const ( // ClusterResourceBindingControllerFinalizer is added to ClusterResourceBinding to ensure related Works are deleted // before ClusterResourceBinding itself is deleted. ClusterResourceBindingControllerFinalizer = "karmada.io/cluster-resource-binding-controller" + + // HPAReplicasSyncerFinalizer is added to HorizontalPodAutoscaler propagated by karmada to ensure + // the actual replicas in member clusters effected by HPA been synced before HPA itself is deleted. + HPAReplicasSyncerFinalizer = "karmada.io/hpa-replicas-syncer-controller" ) const (