From a1e8f164cb249659be25de1fdde37f33ede92f46 Mon Sep 17 00:00:00 2001 From: RainbowMango Date: Mon, 30 Jan 2023 12:23:31 +0800 Subject: [PATCH] adopt static check issues Signed-off-by: RainbowMango --- cmd/agent/app/agent.go | 2 +- .../app/options/options.go | 3 +++ .../app/controllermanager.go | 4 +-- cmd/karmada-search/app/karmada-search.go | 3 +++ operator/cmd/operator/app/operator.go | 4 +-- operator/pkg/controller/context/context.go | 4 +-- .../v1alpha1/propagation_helper_test.go | 4 +-- .../v1alpha2/binding_types_helper_test.go | 8 +++--- pkg/controllers/context/context.go | 4 +-- pkg/controllers/context/context_test.go | 2 +- pkg/descheduler/descheduler.go | 6 ++++- pkg/estimator/server/eventhandlers.go | 11 ++++++-- pkg/karmadactl/get/get.go | 2 +- .../defaultinterpreter/dependencies.go | 16 ++++++------ .../defaultinterpreter/dependencies_test.go | 26 +++++++++---------- pkg/scheduler/cache/snapshot.go | 4 +-- pkg/scheduler/event_handler.go | 26 +++++++++++++++---- pkg/search/controller.go | 11 ++++++-- pkg/search/proxy/controller.go | 12 +++++++-- .../proxy/store/multi_cluster_cache_test.go | 22 ++++++++-------- pkg/util/binding.go | 4 +-- pkg/util/binding_test.go | 6 ++--- .../genericmanager/single-cluster-manager.go | 8 +++++- .../typedmanager/single-cluster-manager.go | 8 +++++- pkg/util/helper/binding.go | 4 +-- pkg/util/helper/binding_test.go | 16 ++++++------ pkg/util/helper/workstatus.go | 2 +- pkg/util/helper/workstatus_test.go | 12 ++++----- pkg/util/lifted/retain_test.go | 2 +- pkg/util/lifted/scheduler/cache/cache.go | 10 +++++-- pkg/util/lifted/scheduler/cache/interface.go | 2 ++ pkg/util/lifted/scheduler/cache/snapshot.go | 12 +++++++++ pkg/util/lifted/scheduler/framework/types.go | 7 +++++ pkg/util/lifted/taint.go | 4 +++ pkg/util/lifted/validateclustertaints.go | 4 +++ pkg/util/lifted/visitpod_test.go | 3 +++ test/e2e/framework/dynamic.go | 4 +-- test/e2e/propagationpolicy_test.go | 2 +- test/e2e/rescheduling_test.go | 6 ++--- test/e2e/resource_test.go | 4 +-- test/e2e/resourceinterpreter_test.go | 6 ++--- test/e2e/scheduling_test.go | 2 +- test/e2e/search_test.go | 10 +++---- test/helper/resource.go | 12 ++++----- 44 files changed, 211 insertions(+), 113 deletions(-) diff --git a/cmd/agent/app/agent.go b/cmd/agent/app/agent.go index 1a01a086283a..018b3f068767 100644 --- a/cmd/agent/app/agent.go +++ b/cmd/agent/app/agent.go @@ -99,7 +99,7 @@ cluster and manifests to the Karmada control plane.`, var controllers = make(controllerscontext.Initializers) -var controllersDisabledByDefault = sets.NewString( +var controllersDisabledByDefault = sets.New( "certRotation", ) diff --git a/cmd/aggregated-apiserver/app/options/options.go b/cmd/aggregated-apiserver/app/options/options.go index 9b506ad339a4..0f80327a6768 100644 --- a/cmd/aggregated-apiserver/app/options/options.go +++ b/cmd/aggregated-apiserver/app/options/options.go @@ -143,6 +143,9 @@ func (o *Options) Config() (*aggregatedapiserver.Config, error) { return config, nil } +// disable `deprecation` check until the underlying genericfilters.BasicLongRunningRequestCheck starts using generic Set. +// +//nolint:staticcheck func customLongRunningRequestCheck(longRunningVerbs, longRunningSubresources sets.String) apirequest.LongRunningRequestCheck { return func(r *http.Request, requestInfo *apirequest.RequestInfo) bool { reqClone := r.Clone(context.Background()) diff --git a/cmd/controller-manager/app/controllermanager.go b/cmd/controller-manager/app/controllermanager.go index b40fefb286e8..0f0376d04a75 100644 --- a/cmd/controller-manager/app/controllermanager.go +++ b/cmd/controller-manager/app/controllermanager.go @@ -89,7 +89,7 @@ to create regular Kubernetes resources.`, // and update the flag usage. genericFlagSet.AddGoFlagSet(flag.CommandLine) genericFlagSet.Lookup("kubeconfig").Usage = "Path to karmada control plane kubeconfig file." - opts.AddFlags(genericFlagSet, controllers.ControllerNames(), controllersDisabledByDefault.List()) + opts.AddFlags(genericFlagSet, controllers.ControllerNames(), sets.List(controllersDisabledByDefault)) // Set klog flags logsFlagSet := fss.FlagSet("logs") @@ -175,7 +175,7 @@ func Run(ctx context.Context, opts *options.Options) error { var controllers = make(controllerscontext.Initializers) // controllersDisabledByDefault is the set of controllers which is disabled by default -var controllersDisabledByDefault = sets.NewString( +var controllersDisabledByDefault = sets.New( "hpa", ) diff --git a/cmd/karmada-search/app/karmada-search.go b/cmd/karmada-search/app/karmada-search.go index f35734138973..d8e75a131d8b 100644 --- a/cmd/karmada-search/app/karmada-search.go +++ b/cmd/karmada-search/app/karmada-search.go @@ -209,6 +209,9 @@ func config(o *options.Options, outOfTreeRegistryOptions ...Option) (*search.Con return config, nil } +// disable `deprecation` check until the underlying genericfilters.BasicLongRunningRequestCheck starts using generic Set. +// +//nolint:staticcheck func customLongRunningRequestCheck(longRunningVerbs, longRunningSubresources sets.String) request.LongRunningRequestCheck { return func(r *http.Request, requestInfo *request.RequestInfo) bool { if requestInfo.APIGroup == "search.karmada.io" && requestInfo.Resource == "proxying" { diff --git a/operator/cmd/operator/app/operator.go b/operator/cmd/operator/app/operator.go index 2dd2087c3e9f..fc54a3795785 100644 --- a/operator/cmd/operator/app/operator.go +++ b/operator/cmd/operator/app/operator.go @@ -61,7 +61,7 @@ func NewOperatorCommand(ctx context.Context) *cobra.Command { // Add the flag(--kubeconfig) that is added by controller-runtime // (https://github.com/kubernetes-sigs/controller-runtime/blob/v0.11.1/pkg/client/config/config.go#L39). genericFlagSet.AddGoFlagSet(flag.CommandLine) - o.AddFlags(genericFlagSet, controllers.ControllerNames(), controllersDisabledByDefault.List()) + o.AddFlags(genericFlagSet, controllers.ControllerNames(), sets.List(controllersDisabledByDefault)) // Set klog flags logsFlagSet := fss.FlagSet("logs") @@ -112,7 +112,7 @@ func Run(ctx context.Context, o *options.Options) error { var controllers = make(ctrlctx.Initializers) // controllersDisabledByDefault is the set of controllers which is disabled by default -var controllersDisabledByDefault = sets.NewString() +var controllersDisabledByDefault = sets.New[string]() func init() { controllers["karmada"] = startKarmadaController diff --git a/operator/pkg/controller/context/context.go b/operator/pkg/controller/context/context.go index 24666f8afe85..e6d65367c9e4 100644 --- a/operator/pkg/controller/context/context.go +++ b/operator/pkg/controller/context/context.go @@ -19,7 +19,7 @@ type Context struct { } // IsControllerEnabled checks if the context's controllers enabled or not -func (c Context) IsControllerEnabled(name string, disabledByDefaultControllers sets.String) bool { +func (c Context) IsControllerEnabled(name string, disabledByDefaultControllers sets.Set[string]) bool { hasStar := false for _, ctrl := range c.Controllers { if ctrl == name { @@ -55,7 +55,7 @@ func (i Initializers) ControllerNames() []string { } // StartControllers starts a set of controllers with a specified ControllerContext -func (i Initializers) StartControllers(ctx Context, controllersDisabledByDefault sets.String) error { +func (i Initializers) StartControllers(ctx Context, controllersDisabledByDefault sets.Set[string]) error { for controllerName, initFn := range i { if !ctx.IsControllerEnabled(controllerName, controllersDisabledByDefault) { klog.Warningf("%q is disabled", controllerName) diff --git a/pkg/apis/policy/v1alpha1/propagation_helper_test.go b/pkg/apis/policy/v1alpha1/propagation_helper_test.go index 7798f38b7428..18ea8e5e0f8a 100644 --- a/pkg/apis/policy/v1alpha1/propagation_helper_test.go +++ b/pkg/apis/policy/v1alpha1/propagation_helper_test.go @@ -18,7 +18,7 @@ func TestPropagationPolicy_ExplicitPriority(t *testing.T) { }, { name: "no priority declared should defaults to zero", - declaredPriority: pointer.Int32Ptr(20), + declaredPriority: pointer.Int32(20), expectedPriority: 20, }, } @@ -46,7 +46,7 @@ func TestClusterPropagationPolicy_ExplicitPriority(t *testing.T) { }, { name: "no priority declared should defaults to zero", - declaredPriority: pointer.Int32Ptr(20), + declaredPriority: pointer.Int32(20), expectedPriority: 20, }, } diff --git a/pkg/apis/work/v1alpha2/binding_types_helper_test.go b/pkg/apis/work/v1alpha2/binding_types_helper_test.go index 8f8cf3f7f44b..c535e2fec3f8 100644 --- a/pkg/apis/work/v1alpha2/binding_types_helper_test.go +++ b/pkg/apis/work/v1alpha2/binding_types_helper_test.go @@ -165,7 +165,7 @@ func TestResourceBindingSpec_GracefulEvictCluster(t *testing.T) { GracefulEvictionTasks: []GracefulEvictionTask{ { FromCluster: "m1", - Replicas: pointer.Int32Ptr(1), + Replicas: pointer.Int32(1), Reason: EvictionReasonTaintUntolerated, Message: "graceful eviction", Producer: EvictionProducerTaintManager, @@ -189,7 +189,7 @@ func TestResourceBindingSpec_GracefulEvictCluster(t *testing.T) { GracefulEvictionTasks: []GracefulEvictionTask{ { FromCluster: "m2", - Replicas: pointer.Int32Ptr(2), + Replicas: pointer.Int32(2), Reason: EvictionReasonTaintUntolerated, Message: "graceful eviction", Producer: EvictionProducerTaintManager, @@ -213,7 +213,7 @@ func TestResourceBindingSpec_GracefulEvictCluster(t *testing.T) { GracefulEvictionTasks: []GracefulEvictionTask{ { FromCluster: "m3", - Replicas: pointer.Int32Ptr(3), + Replicas: pointer.Int32(3), Reason: EvictionReasonTaintUntolerated, Message: "graceful eviction", Producer: EvictionProducerTaintManager, @@ -241,7 +241,7 @@ func TestResourceBindingSpec_GracefulEvictCluster(t *testing.T) { }, { FromCluster: "m3", - Replicas: pointer.Int32Ptr(3), + Replicas: pointer.Int32(3), Reason: EvictionReasonTaintUntolerated, Message: "graceful eviction", Producer: EvictionProducerTaintManager, diff --git a/pkg/controllers/context/context.go b/pkg/controllers/context/context.go index 605fc8867042..992d28998c2e 100644 --- a/pkg/controllers/context/context.go +++ b/pkg/controllers/context/context.go @@ -93,7 +93,7 @@ type Context struct { } // IsControllerEnabled check if a specified controller enabled or not. -func (c Context) IsControllerEnabled(name string, disabledByDefaultControllers sets.String) bool { +func (c Context) IsControllerEnabled(name string, disabledByDefaultControllers sets.Set[string]) bool { hasStar := false for _, ctrl := range c.Opts.Controllers { if ctrl == name { @@ -129,7 +129,7 @@ func (i Initializers) ControllerNames() []string { } // StartControllers starts a set of controllers with a specified ControllerContext -func (i Initializers) StartControllers(ctx Context, controllersDisabledByDefault sets.String) error { +func (i Initializers) StartControllers(ctx Context, controllersDisabledByDefault sets.Set[string]) error { for controllerName, initFn := range i { if !ctx.IsControllerEnabled(controllerName, controllersDisabledByDefault) { klog.Warningf("%q is disabled", controllerName) diff --git a/pkg/controllers/context/context_test.go b/pkg/controllers/context/context_test.go index 7f56632d7fc2..1cc604d8ec20 100644 --- a/pkg/controllers/context/context_test.go +++ b/pkg/controllers/context/context_test.go @@ -71,7 +71,7 @@ func TestContext_IsControllerEnabled(t *testing.T) { Controllers: tt.controllers, }, } - if got := c.IsControllerEnabled(tt.controllerName, sets.NewString(tt.disabledByDefaultControllers...)); got != tt.expected { + if got := c.IsControllerEnabled(tt.controllerName, sets.New(tt.disabledByDefaultControllers...)); got != tt.expected { t.Errorf("expected %v, but got %v", tt.expected, got) } }) diff --git a/pkg/descheduler/descheduler.go b/pkg/descheduler/descheduler.go index 24873276095f..da7c4564f725 100644 --- a/pkg/descheduler/descheduler.go +++ b/pkg/descheduler/descheduler.go @@ -93,11 +93,15 @@ func NewDescheduler(karmadaClient karmadaclientset.Interface, kubeClient kuberne } desched.deschedulerWorker = util.NewAsyncWorker(deschedulerWorkerOptions) - desched.clusterInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + _, err := desched.clusterInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: desched.addCluster, UpdateFunc: desched.updateCluster, DeleteFunc: desched.deleteCluster, }) + if err != nil { + klog.Errorf("Failed add handler for Clusters: %v", err) + return nil + } eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartStructuredLogging(0) diff --git a/pkg/estimator/server/eventhandlers.go b/pkg/estimator/server/eventhandlers.go index 73b45a6610a9..c5f3bf3c9021 100644 --- a/pkg/estimator/server/eventhandlers.go +++ b/pkg/estimator/server/eventhandlers.go @@ -15,7 +15,7 @@ import ( func addAllEventHandlers(es *AccurateSchedulerEstimatorServer, informerFactory informers.SharedInformerFactory) { // scheduled pod cache - informerFactory.Core().V1().Pods().Informer().AddEventHandler( + _, err := informerFactory.Core().V1().Pods().Informer().AddEventHandler( cache.FilteringResourceEventHandler{ FilterFunc: func(obj interface{}) bool { switch t := obj.(type) { @@ -41,13 +41,20 @@ func addAllEventHandlers(es *AccurateSchedulerEstimatorServer, informerFactory i }, }, ) - informerFactory.Core().V1().Nodes().Informer().AddEventHandler( + if err != nil { + klog.Errorf("Failed to add handler for Pods: %v", err) + } + + _, err = informerFactory.Core().V1().Nodes().Informer().AddEventHandler( cache.ResourceEventHandlerFuncs{ AddFunc: es.addNodeToCache, UpdateFunc: es.updateNodeInCache, DeleteFunc: es.deleteNodeFromCache, }, ) + if err != nil { + klog.Errorf("Failed to add handler for Pods: %v", err) + } } func (es *AccurateSchedulerEstimatorServer) addPodToCache(obj interface{}) { diff --git a/pkg/karmadactl/get/get.go b/pkg/karmadactl/get/get.go index 0318b5ed08ae..30a694b601da 100644 --- a/pkg/karmadactl/get/get.go +++ b/pkg/karmadactl/get/get.go @@ -624,7 +624,7 @@ func (g *CommandGetOptions) watch(watchObjs []WatchObj) error { info := infos[0] mapping := info.ResourceMapping() - outputObjects := utilpointer.BoolPtr(!g.WatchOnly) + outputObjects := utilpointer.Bool(!g.WatchOnly) printer, err := g.ToPrinter(mapping, outputObjects, g.AllNamespaces, false) if err != nil { diff --git a/pkg/resourceinterpreter/defaultinterpreter/dependencies.go b/pkg/resourceinterpreter/defaultinterpreter/dependencies.go index 1bdc8e912582..4d51da97bb9b 100644 --- a/pkg/resourceinterpreter/defaultinterpreter/dependencies.go +++ b/pkg/resourceinterpreter/defaultinterpreter/dependencies.go @@ -157,8 +157,8 @@ func getDependenciesFromPodTemplate(podObj *corev1.Pod) ([]configv1alpha1.Depend return dependentObjectRefs, nil } -func getSecretNames(pod *corev1.Pod) sets.String { - result := sets.NewString() +func getSecretNames(pod *corev1.Pod) sets.Set[string] { + result := sets.New[string]() lifted.VisitPodSecretNames(pod, func(name string) bool { result.Insert(name) return true @@ -166,16 +166,16 @@ func getSecretNames(pod *corev1.Pod) sets.String { return result } -func getServiceAccountNames(pod *corev1.Pod) sets.String { - result := sets.NewString() +func getServiceAccountNames(pod *corev1.Pod) sets.Set[string] { + result := sets.New[string]() if pod.Spec.ServiceAccountName != "" && pod.Spec.ServiceAccountName != "default" { result.Insert(pod.Spec.ServiceAccountName) } return result } -func getConfigMapNames(pod *corev1.Pod) sets.String { - result := sets.NewString() +func getConfigMapNames(pod *corev1.Pod) sets.Set[string] { + result := sets.New[string]() lifted.VisitPodConfigmapNames(pod, func(name string) bool { result.Insert(name) return true @@ -183,8 +183,8 @@ func getConfigMapNames(pod *corev1.Pod) sets.String { return result } -func getPVCNames(pod *corev1.Pod) sets.String { - result := sets.NewString() +func getPVCNames(pod *corev1.Pod) sets.Set[string] { + result := sets.New[string]() for i := range pod.Spec.Volumes { volume := pod.Spec.Volumes[i] if volume.PersistentVolumeClaim != nil { diff --git a/pkg/resourceinterpreter/defaultinterpreter/dependencies_test.go b/pkg/resourceinterpreter/defaultinterpreter/dependencies_test.go index 62f701cfd125..b981eb9e6ec5 100644 --- a/pkg/resourceinterpreter/defaultinterpreter/dependencies_test.go +++ b/pkg/resourceinterpreter/defaultinterpreter/dependencies_test.go @@ -35,12 +35,12 @@ func TestGetSecretNames(t *testing.T) { tests := []struct { name string pod *corev1.Pod - expected sets.String + expected sets.Set[string] }{ { name: "get secret names from pod", pod: fakePod, - expected: sets.NewString("fake-foo", "fake-bar"), + expected: sets.New("fake-foo", "fake-bar"), }, } @@ -49,7 +49,7 @@ func TestGetSecretNames(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() res := getSecretNames(tt.pod) - if !reflect.DeepEqual(res, tt.expected) { + if !res.Equal(tt.expected) { t.Errorf("getSecretNames() = %v, want %v", res, tt.expected) } }) @@ -84,12 +84,12 @@ func TestGetConfigMapNames(t *testing.T) { tests := []struct { name string pod *corev1.Pod - expected sets.String + expected sets.Set[string] }{ { name: "get configMap names from pod", pod: fakePod, - expected: sets.NewString("fake-foo", "fake-bar"), + expected: sets.New("fake-foo", "fake-bar"), }, } @@ -98,7 +98,7 @@ func TestGetConfigMapNames(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() res := getConfigMapNames(tt.pod) - if !reflect.DeepEqual(res, tt.expected) { + if !res.Equal(tt.expected) { t.Errorf("getConfigMapNames() = %v, want %v", res, tt.expected) } }) @@ -131,12 +131,12 @@ func TestGetPVCNames(t *testing.T) { tests := []struct { name string pod *corev1.Pod - expected sets.String + expected sets.Set[string] }{ { name: "get pvc names from pod", pod: fakePod, - expected: sets.NewString("fake-foo", "fake-bar"), + expected: sets.New("fake-foo", "fake-bar"), }, } @@ -145,7 +145,7 @@ func TestGetPVCNames(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() res := getPVCNames(tt.pod) - if !reflect.DeepEqual(res, tt.expected) { + if !res.Equal(tt.expected) { t.Errorf("getPVCNames() = %v, want %v", res, tt.expected) } }) @@ -219,24 +219,24 @@ func Test_getServiceAccountNames(t *testing.T) { tests := []struct { name string args args - want sets.String + want sets.Set[string] }{ { name: "get ServiceAccountName from pod ", args: args{pod: &corev1.Pod{Spec: corev1.PodSpec{ServiceAccountName: "test"}}}, - want: sets.NewString("test"), + want: sets.New("test"), }, { name: "get default ServiceAccountName from pod ", args: args{pod: &corev1.Pod{Spec: corev1.PodSpec{ServiceAccountName: "default"}}}, - want: sets.NewString(), + want: sets.New[string](), }, } for i := range tests { tt := tests[i] t.Run(tt.name, func(t *testing.T) { t.Parallel() - if got := getServiceAccountNames(tt.args.pod); !reflect.DeepEqual(got, tt.want) { + if got := getServiceAccountNames(tt.args.pod); !got.Equal(tt.want) { t.Errorf("getServiceAccountNames() = %v, want %v", got, tt.want) } }) diff --git a/pkg/scheduler/cache/snapshot.go b/pkg/scheduler/cache/snapshot.go index 2bc5e319aa8e..2e22f4f3ec1c 100644 --- a/pkg/scheduler/cache/snapshot.go +++ b/pkg/scheduler/cache/snapshot.go @@ -42,8 +42,8 @@ func (s *Snapshot) GetReadyClusters() []*framework.ClusterInfo { } // GetReadyClusterNames returns the clusterNames in ready status. -func (s *Snapshot) GetReadyClusterNames() sets.String { - readyClusterNames := sets.NewString() +func (s *Snapshot) GetReadyClusterNames() sets.Set[string] { + readyClusterNames := sets.New[string]() for _, c := range s.clusterInfoList { if util.IsClusterReady(&c.Cluster().Status) { readyClusterNames.Insert(c.Cluster().Name) diff --git a/pkg/scheduler/event_handler.go b/pkg/scheduler/event_handler.go index 0ec5202b766b..d2393ea8d196 100644 --- a/pkg/scheduler/event_handler.go +++ b/pkg/scheduler/event_handler.go @@ -23,41 +23,57 @@ import ( // to add event handlers for various informers. func (s *Scheduler) addAllEventHandlers() { bindingInformer := s.informerFactory.Work().V1alpha2().ResourceBindings().Informer() - bindingInformer.AddEventHandler(cache.FilteringResourceEventHandler{ + _, err := bindingInformer.AddEventHandler(cache.FilteringResourceEventHandler{ FilterFunc: s.resourceBindingEventFilter, Handler: cache.ResourceEventHandlerFuncs{ AddFunc: s.onResourceBindingAdd, UpdateFunc: s.onResourceBindingUpdate, }, }) + if err != nil { + klog.Errorf("Failed to add handlers for ResourceBindings: %v", err) + } policyInformer := s.informerFactory.Policy().V1alpha1().PropagationPolicies().Informer() - policyInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + _, err = policyInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ UpdateFunc: s.onPropagationPolicyUpdate, }) + if err != nil { + klog.Errorf("Failed to add handlers for PropagationPolicies: %v", err) + } clusterBindingInformer := s.informerFactory.Work().V1alpha2().ClusterResourceBindings().Informer() - clusterBindingInformer.AddEventHandler(cache.FilteringResourceEventHandler{ + _, err = clusterBindingInformer.AddEventHandler(cache.FilteringResourceEventHandler{ FilterFunc: s.resourceBindingEventFilter, Handler: cache.ResourceEventHandlerFuncs{ AddFunc: s.onResourceBindingAdd, UpdateFunc: s.onResourceBindingUpdate, }, }) + if err != nil { + klog.Errorf("Failed to add handlers for ClusterResourceBindings: %v", err) + } clusterPolicyInformer := s.informerFactory.Policy().V1alpha1().ClusterPropagationPolicies().Informer() - clusterPolicyInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + _, err = clusterPolicyInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ UpdateFunc: s.onClusterPropagationPolicyUpdate, }) + if err != nil { + klog.Errorf("Failed to add handlers for ClusterPropagationPolicies: %v", err) + } memClusterInformer := s.informerFactory.Cluster().V1alpha1().Clusters().Informer() - memClusterInformer.AddEventHandler( + _, err = memClusterInformer.AddEventHandler( cache.ResourceEventHandlerFuncs{ AddFunc: s.addCluster, UpdateFunc: s.updateCluster, DeleteFunc: s.deleteCluster, }, ) + if err != nil { + klog.Errorf("Failed to add handlers for Clusters: %v", err) + } + // ignore the error here because the informers haven't been started _ = bindingInformer.SetTransform(fedinformer.StripUnusedFields) _ = policyInformer.SetTransform(fedinformer.StripUnusedFields) diff --git a/pkg/search/controller.go b/pkg/search/controller.go index 61e321508469..ecc90a46cd8b 100644 --- a/pkg/search/controller.go +++ b/pkg/search/controller.go @@ -83,18 +83,25 @@ func NewController(restConfig *rest.Config, factory informerfactory.SharedInform // addAllEventHandlers adds all event handlers to the informer func (c *Controller) addAllEventHandlers() { clusterInformer := c.informerFactory.Cluster().V1alpha1().Clusters().Informer() - clusterInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + _, err := clusterInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: c.addCluster, UpdateFunc: c.updateCluster, DeleteFunc: c.deleteCluster, }) + if err != nil { + klog.Errorf("Failed to add handlers for Clusters: %v", err) + } resourceRegistryInformer := c.informerFactory.Search().V1alpha1().ResourceRegistries().Informer() - resourceRegistryInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + _, err = resourceRegistryInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: c.addResourceRegistry, UpdateFunc: c.updateResourceRegistry, DeleteFunc: c.deleteResourceRegistry, }) + if err != nil { + klog.Errorf("Failed to add handlers for Clusters: %v", err) + } + // ignore the error here because the informers haven't been started _ = clusterInformer.SetTransform(fedinformer.StripUnusedFields) _ = resourceRegistryInformer.SetTransform(fedinformer.StripUnusedFields) diff --git a/pkg/search/proxy/controller.go b/pkg/search/proxy/controller.go index 41e3a3c263e7..c072b23e7517 100644 --- a/pkg/search/proxy/controller.go +++ b/pkg/search/proxy/controller.go @@ -108,8 +108,16 @@ func NewController(option NewControllerOption) (*Controller, error) { }, } - option.KarmadaFactory.Cluster().V1alpha1().Clusters().Informer().AddEventHandler(resourceEventHandler) - option.KarmadaFactory.Search().V1alpha1().ResourceRegistries().Informer().AddEventHandler(resourceEventHandler) + _, err = option.KarmadaFactory.Cluster().V1alpha1().Clusters().Informer().AddEventHandler(resourceEventHandler) + if err != nil { + klog.Errorf("Failed to add handler for Clusters: %v", err) + return nil, err + } + _, err = option.KarmadaFactory.Search().V1alpha1().ResourceRegistries().Informer().AddEventHandler(resourceEventHandler) + if err != nil { + klog.Errorf("Failed to add handler for ResourceRegistries: %v", err) + return nil, err + } return ctl, nil } diff --git a/pkg/search/proxy/store/multi_cluster_cache_test.go b/pkg/search/proxy/store/multi_cluster_cache_test.go index 0788cffe0448..f1402a12a852 100644 --- a/pkg/search/proxy/store/multi_cluster_cache_test.go +++ b/pkg/search/proxy/store/multi_cluster_cache_test.go @@ -438,7 +438,7 @@ func TestMultiClusterCache_List(t *testing.T) { } type want struct { resourceVersion string - names sets.String + names sets.Set[string] errAssert func(error) bool } tests := []struct { @@ -460,7 +460,7 @@ func TestMultiClusterCache_List(t *testing.T) { want: want{ // fakeDynamic returns list with resourceVersion="" resourceVersion: "", - names: sets.NewString(), + names: sets.New[string](), errAssert: noError, }, }, @@ -478,7 +478,7 @@ func TestMultiClusterCache_List(t *testing.T) { want: want{ // fakeDynamic returns list with resourceVersion="" resourceVersion: buildMultiClusterRV("cluster1", "", "cluster2", ""), - names: sets.NewString("pod11", "pod12", "pod13", "pod14", "pod15", "pod21", "pod22", "pod23", "pod24", "pod25"), + names: sets.New[string]("pod11", "pod12", "pod13", "pod14", "pod15", "pod21", "pod22", "pod23", "pod24", "pod25"), errAssert: noError, }, }, @@ -498,7 +498,7 @@ func TestMultiClusterCache_List(t *testing.T) { want: want{ // fakeDynamic returns list with resourceVersion="" resourceVersion: buildMultiClusterRV("cluster1", "", "cluster2", ""), - names: sets.NewString("pod11", "pod13", "pod21", "pod23"), + names: sets.New[string]("pod11", "pod13", "pod21", "pod23"), errAssert: noError, }, }, @@ -531,7 +531,7 @@ func TestMultiClusterCache_List(t *testing.T) { if tt.want.resourceVersion != object.GetResourceVersion() { t.Errorf("ResourceVersion want=%v, actual=%v", tt.want.resourceVersion, object.GetResourceVersion()) } - names := sets.NewString() + names := sets.New[string]() err = meta.EachListItem(obj, func(o runtime.Object) error { a, err := meta.Accessor(o) @@ -547,7 +547,7 @@ func TestMultiClusterCache_List(t *testing.T) { } if !tt.want.names.Equal(names) { - t.Errorf("List items want=%v, actual=%v", strings.Join(tt.want.names.List(), ","), strings.Join(names.List(), ",")) + t.Errorf("List items want=%v, actual=%v", strings.Join(sets.List(tt.want.names), ","), strings.Join(sets.List(names), ",")) } }) } @@ -652,7 +652,7 @@ func TestMultiClusterCache_Watch(t *testing.T) { options *metainternalversion.ListOptions } type want struct { - gets sets.String + gets sets.Set[string] } tests := []struct { name string @@ -667,7 +667,7 @@ func TestMultiClusterCache_Watch(t *testing.T) { }, }, want: want{ - gets: sets.NewString("pod11", "pod12", "pod13", "pod21", "pod22", "pod23"), + gets: sets.New[string]("pod11", "pod12", "pod13", "pod21", "pod22", "pod23"), }, }, { @@ -678,7 +678,7 @@ func TestMultiClusterCache_Watch(t *testing.T) { }, }, want: want{ - gets: sets.NewString("pod13", "pod21", "pod22", "pod23"), + gets: sets.New[string]("pod13", "pod21", "pod22", "pod23"), }, }, { @@ -689,7 +689,7 @@ func TestMultiClusterCache_Watch(t *testing.T) { }, }, want: want{ - gets: sets.NewString("pod13", "pod23"), + gets: sets.New[string]("pod13", "pod23"), }, }, } @@ -706,7 +706,7 @@ func TestMultiClusterCache_Watch(t *testing.T) { defer watcher.Stop() timeout := time.After(time.Second * 5) - gets := sets.NewString() + gets := sets.New[string]() LOOP: for { select { diff --git a/pkg/util/binding.go b/pkg/util/binding.go index fcb16fc92530..f23a0f38a3ae 100644 --- a/pkg/util/binding.go +++ b/pkg/util/binding.go @@ -46,8 +46,8 @@ func GetSumOfReplicas(clusters []workv1alpha2.TargetCluster) int32 { } // ConvertToClusterNames will convert a cluster slice to clusterName's sets.String -func ConvertToClusterNames(clusters []workv1alpha2.TargetCluster) sets.String { - clusterNames := sets.NewString() +func ConvertToClusterNames(clusters []workv1alpha2.TargetCluster) sets.Set[string] { + clusterNames := sets.New[string]() for _, cluster := range clusters { clusterNames.Insert(cluster.Name) } diff --git a/pkg/util/binding_test.go b/pkg/util/binding_test.go index 44a970789e07..0474b4b1f55d 100644 --- a/pkg/util/binding_test.go +++ b/pkg/util/binding_test.go @@ -215,12 +215,12 @@ func TestConvertToClusterNames(t *testing.T) { tests := []struct { name string clusters []workv1alpha2.TargetCluster - expected sets.String + expected sets.Set[string] }{ { name: "empty", clusters: []workv1alpha2.TargetCluster{}, - expected: sets.String{}, + expected: sets.New[string](), }, { name: "not empty", @@ -234,7 +234,7 @@ func TestConvertToClusterNames(t *testing.T) { Replicas: 3, }, }, - expected: sets.NewString(ClusterMember1, ClusterMember2), + expected: sets.New(ClusterMember1, ClusterMember2), }, } diff --git a/pkg/util/fedinformer/genericmanager/single-cluster-manager.go b/pkg/util/fedinformer/genericmanager/single-cluster-manager.go index 1f940b6c1529..1875e3a52d1d 100644 --- a/pkg/util/fedinformer/genericmanager/single-cluster-manager.go +++ b/pkg/util/fedinformer/genericmanager/single-cluster-manager.go @@ -9,6 +9,7 @@ import ( "k8s.io/client-go/dynamic" "k8s.io/client-go/dynamic/dynamicinformer" "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" "github.com/karmada-io/karmada/pkg/util" ) @@ -91,7 +92,12 @@ func (s *singleClusterInformerManagerImpl) ForResource(resource schema.GroupVers return } - s.informerFactory.ForResource(resource).Informer().AddEventHandler(handler) + _, err := s.informerFactory.ForResource(resource).Informer().AddEventHandler(handler) + if err != nil { + klog.Errorf("Failed to add handler for resource(%s): %v", resource.String(), err) + return + } + s.appendHandler(resource, handler) } diff --git a/pkg/util/fedinformer/typedmanager/single-cluster-manager.go b/pkg/util/fedinformer/typedmanager/single-cluster-manager.go index a627c99bf9c0..5bf2ea67b1e4 100644 --- a/pkg/util/fedinformer/typedmanager/single-cluster-manager.go +++ b/pkg/util/fedinformer/typedmanager/single-cluster-manager.go @@ -11,6 +11,7 @@ import ( "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" "github.com/karmada-io/karmada/pkg/util" ) @@ -131,7 +132,12 @@ func (s *singleClusterInformerManagerImpl) ForResource(resource schema.GroupVers } s.lock.RUnlock() - resourceInformer.Informer().AddEventHandler(handler) + _, err = resourceInformer.Informer().AddEventHandler(handler) + if err != nil { + klog.Errorf("Failed to add handler for resource(%s): %v", resource.String(), err) + return err + } + s.appendHandler(resource, handler) return nil } diff --git a/pkg/util/helper/binding.go b/pkg/util/helper/binding.go index 5ed57ed4da59..e6a140ce82db 100644 --- a/pkg/util/helper/binding.go +++ b/pkg/util/helper/binding.go @@ -159,7 +159,7 @@ func HasScheduledReplica(scheduleResult []workv1alpha2.TargetCluster) bool { } // ObtainBindingSpecExistingClusters will obtain the cluster slice existing in the binding's spec field. -func ObtainBindingSpecExistingClusters(bindingSpec workv1alpha2.ResourceBindingSpec) sets.String { +func ObtainBindingSpecExistingClusters(bindingSpec workv1alpha2.ResourceBindingSpec) sets.Set[string] { clusterNames := util.ConvertToClusterNames(bindingSpec.Clusters) for _, binding := range bindingSpec.RequiredBy { for _, targetCluster := range binding.Clusters { @@ -176,7 +176,7 @@ func ObtainBindingSpecExistingClusters(bindingSpec workv1alpha2.ResourceBindingS // FindOrphanWorks retrieves all works that labeled with current binding(ResourceBinding or ClusterResourceBinding) objects, // then pick the works that not meet current binding declaration. -func FindOrphanWorks(c client.Client, bindingNamespace, bindingName string, expectClusters sets.String) ([]workv1alpha1.Work, error) { +func FindOrphanWorks(c client.Client, bindingNamespace, bindingName string, expectClusters sets.Set[string]) ([]workv1alpha1.Work, error) { var needJudgeWorks []workv1alpha1.Work workList, err := GetWorksByBindingNamespaceName(c, bindingNamespace, bindingName) if err != nil { diff --git a/pkg/util/helper/binding_test.go b/pkg/util/helper/binding_test.go index f0621e65418c..1f0eeb48d89e 100644 --- a/pkg/util/helper/binding_test.go +++ b/pkg/util/helper/binding_test.go @@ -253,7 +253,7 @@ func TestObtainBindingSpecExistingClusters(t *testing.T) { tests := []struct { name string bindingSpec workv1alpha2.ResourceBindingSpec - want sets.String + want sets.Set[string] }{ { name: "unique cluster name without GracefulEvictionTasks field", @@ -279,7 +279,7 @@ func TestObtainBindingSpecExistingClusters(t *testing.T) { }, }, }, - want: sets.NewString("member1", "member2", "member3"), + want: sets.New("member1", "member2", "member3"), }, { name: "all spec fields do not contain duplicate cluster names", @@ -310,7 +310,7 @@ func TestObtainBindingSpecExistingClusters(t *testing.T) { }, }, }, - want: sets.NewString("member1", "member2", "member3", "member4"), + want: sets.New("member1", "member2", "member3", "member4"), }, { name: "duplicate cluster name", @@ -341,7 +341,7 @@ func TestObtainBindingSpecExistingClusters(t *testing.T) { }, }, }, - want: sets.NewString("member1", "member2", "member3"), + want: sets.New("member1", "member2", "member3"), }, } for _, tt := range tests { @@ -412,7 +412,7 @@ func TestFindOrphanWorks(t *testing.T) { c client.Client bindingNamespace string bindingName string - expectClusters sets.String + expectClusters sets.Set[string] } tests := []struct { name string @@ -441,7 +441,7 @@ func TestFindOrphanWorks(t *testing.T) { ).Build(), bindingNamespace: "default", bindingName: "binding", - expectClusters: sets.NewString("clusterx"), + expectClusters: sets.New("clusterx"), }, want: nil, wantErr: true, @@ -503,7 +503,7 @@ func TestFindOrphanWorks(t *testing.T) { ).Build(), bindingNamespace: "default", bindingName: "binding", - expectClusters: sets.NewString("clusterx"), + expectClusters: sets.New("clusterx"), }, want: []workv1alpha1.Work{ { @@ -577,7 +577,7 @@ func TestFindOrphanWorks(t *testing.T) { ).Build(), bindingNamespace: "", bindingName: "binding", - expectClusters: sets.NewString("clusterx"), + expectClusters: sets.New("clusterx"), }, want: []workv1alpha1.Work{ { diff --git a/pkg/util/helper/workstatus.go b/pkg/util/helper/workstatus.go index bc194c176991..74059a81ea13 100644 --- a/pkg/util/helper/workstatus.go +++ b/pkg/util/helper/workstatus.go @@ -259,7 +259,7 @@ func equalIdentifier(targetIdentifier *workv1alpha1.ResourceIdentifier, ordinal } // worksFullyApplied checks if all works are applied according the scheduled result and collected status. -func worksFullyApplied(aggregatedStatuses []workv1alpha2.AggregatedStatusItem, targetClusters sets.String) bool { +func worksFullyApplied(aggregatedStatuses []workv1alpha2.AggregatedStatusItem, targetClusters sets.Set[string]) bool { // short path: not scheduled if len(targetClusters) == 0 { return false diff --git a/pkg/util/helper/workstatus_test.go b/pkg/util/helper/workstatus_test.go index 33e8857a0846..3dec98487bae 100644 --- a/pkg/util/helper/workstatus_test.go +++ b/pkg/util/helper/workstatus_test.go @@ -11,7 +11,7 @@ import ( func TestWorksFullyApplied(t *testing.T) { type args struct { aggregatedStatuses []workv1alpha2.AggregatedStatusItem - targetClusters sets.String + targetClusters sets.Set[string] } tests := []struct { name string @@ -35,7 +35,7 @@ func TestWorksFullyApplied(t *testing.T) { name: "no aggregatedStatuses", args: args{ aggregatedStatuses: nil, - targetClusters: sets.NewString("member1"), + targetClusters: sets.New("member1"), }, want: false, }, @@ -48,7 +48,7 @@ func TestWorksFullyApplied(t *testing.T) { Applied: true, }, }, - targetClusters: sets.NewString("member1", "member2"), + targetClusters: sets.New("member1", "member2"), }, want: false, }, @@ -65,7 +65,7 @@ func TestWorksFullyApplied(t *testing.T) { Applied: true, }, }, - targetClusters: sets.NewString("member1", "member2"), + targetClusters: sets.New("member1", "member2"), }, want: true, }, @@ -82,7 +82,7 @@ func TestWorksFullyApplied(t *testing.T) { Applied: false, }, }, - targetClusters: sets.NewString("member1", "member2"), + targetClusters: sets.New("member1", "member2"), }, want: false, }, @@ -95,7 +95,7 @@ func TestWorksFullyApplied(t *testing.T) { Applied: true, }, }, - targetClusters: sets.NewString("member2"), + targetClusters: sets.New("member2"), }, want: false, }, diff --git a/pkg/util/lifted/retain_test.go b/pkg/util/lifted/retain_test.go index 2e4eab128624..26570b733d56 100644 --- a/pkg/util/lifted/retain_test.go +++ b/pkg/util/lifted/retain_test.go @@ -93,7 +93,7 @@ func TestRetainHealthCheckNodePortInServiceFields(t *testing.T) { }, }, true, - pointer.Int64Ptr(1000), + pointer.Int64(1000), }, } for _, test := range tests { diff --git a/pkg/util/lifted/scheduler/cache/cache.go b/pkg/util/lifted/scheduler/cache/cache.go index ab7be0790d85..136b8726b998 100644 --- a/pkg/util/lifted/scheduler/cache/cache.go +++ b/pkg/util/lifted/scheduler/cache/cache.go @@ -65,6 +65,8 @@ type cacheImpl struct { mu sync.RWMutex // a set of assumed pod keys. // The key could further be used to get an entry in podStates. + //nolint:staticcheck + // disable `deprecation` check for lifted code. assumedPods sets.String // a map from pod key to podState. podStates map[string]*podState @@ -90,6 +92,8 @@ type imageState struct { // Size of the image size int64 // A set of node names for nodes having this image present + //nolint:staticcheck + // disable `deprecation` check for lifted code. nodes sets.String } @@ -107,8 +111,10 @@ func newCache(ttl, period time.Duration, stop <-chan struct{}) *cacheImpl { period: period, stop: stop, - nodes: make(map[string]*nodeInfoListItem), - nodeTree: newNodeTree(nil), + nodes: make(map[string]*nodeInfoListItem), + nodeTree: newNodeTree(nil), + //nolint:staticcheck + // disable `deprecation` check for lifted code. assumedPods: make(sets.String), podStates: make(map[string]*podState), imageStates: make(map[string]*imageState), diff --git a/pkg/util/lifted/scheduler/cache/interface.go b/pkg/util/lifted/scheduler/cache/interface.go index 7c162451ab65..0ffbb2973197 100644 --- a/pkg/util/lifted/scheduler/cache/interface.go +++ b/pkg/util/lifted/scheduler/cache/interface.go @@ -122,6 +122,8 @@ type Cache interface { // Dump is a dump of the cache state. type Dump struct { + //nolint:staticcheck + // disable `deprecation` check for lifted code. AssumedPods sets.String Nodes map[string]*framework.NodeInfo } diff --git a/pkg/util/lifted/scheduler/cache/snapshot.go b/pkg/util/lifted/scheduler/cache/snapshot.go index 5b31adc91ab8..f0fd9402aa7b 100644 --- a/pkg/util/lifted/scheduler/cache/snapshot.go +++ b/pkg/util/lifted/scheduler/cache/snapshot.go @@ -43,6 +43,8 @@ type Snapshot struct { havePodsWithRequiredAntiAffinityNodeInfoList []*framework.NodeInfo // usedPVCSet contains a set of PVC names that have one or more scheduled pods using them, // keyed in the format "namespace/name". + //nolint:staticcheck + // disable `deprecation` check for lifted code. usedPVCSet sets.String generation int64 } @@ -108,6 +110,9 @@ func createNodeInfoMap(pods []*corev1.Pod, nodes []*corev1.Node) map[string]*fra return nodeNameToInfo } +// disable `deprecation` check for lifted code. +// +//nolint:staticcheck func createUsedPVCSet(pods []*corev1.Pod) sets.String { usedPVCSet := sets.NewString() for _, pod := range pods { @@ -128,6 +133,9 @@ func createUsedPVCSet(pods []*corev1.Pod) sets.String { } // getNodeImageStates returns the given node's image states based on the given imageExistence map. +// disable `deprecation` check for lifted code. +// +//nolint:staticcheck func getNodeImageStates(node *corev1.Node, imageExistenceMap map[string]sets.String) map[string]*framework.ImageStateSummary { imageStates := make(map[string]*framework.ImageStateSummary) @@ -143,6 +151,10 @@ func getNodeImageStates(node *corev1.Node, imageExistenceMap map[string]sets.Str } // createImageExistenceMap returns a map recording on which nodes the images exist, keyed by the images' names. +// +// disable `deprecation` check for lifted code. +// +//nolint:staticcheck func createImageExistenceMap(nodes []*corev1.Node) map[string]sets.String { imageExistenceMap := make(map[string]sets.String) for _, node := range nodes { diff --git a/pkg/util/lifted/scheduler/framework/types.go b/pkg/util/lifted/scheduler/framework/types.go index 6d9a6009a4fe..15aba5580aa7 100644 --- a/pkg/util/lifted/scheduler/framework/types.go +++ b/pkg/util/lifted/scheduler/framework/types.go @@ -105,6 +105,8 @@ type QueuedPodInfo struct { // latency for a pod. InitialAttemptTimestamp time.Time // If a Pod failed in a scheduling cycle, record the plugin names it failed by. + //nolint:staticcheck + // disable `deprecation` check for lifted code. UnschedulablePlugins sets.String } @@ -192,6 +194,8 @@ func (pi *PodInfo) Update(pod *corev1.Pod) { // AffinityTerm is a processed version of v1.PodAffinityTerm. type AffinityTerm struct { + //nolint:staticcheck + // disable `deprecation` check for lifted code. Namespaces sets.String Selector labels.Selector TopologyKey string @@ -299,6 +303,9 @@ func getPodAntiAffinityTerms(affinity *corev1.Affinity) (terms []corev1.PodAffin // returns a set of names according to the namespaces indicated in podAffinityTerm. // If namespaces is empty it considers the given pod's namespace. +// disable `deprecation` check for lifted code. +// +//nolint:staticcheck func getNamespacesFromPodAffinityTerm(pod *corev1.Pod, podAffinityTerm *corev1.PodAffinityTerm) sets.String { names := sets.String{} if len(podAffinityTerm.Namespaces) == 0 && podAffinityTerm.NamespaceSelector == nil { diff --git a/pkg/util/lifted/taint.go b/pkg/util/lifted/taint.go index af42f370c4ed..d0e8c35ff8e4 100644 --- a/pkg/util/lifted/taint.go +++ b/pkg/util/lifted/taint.go @@ -35,6 +35,8 @@ import ( // It also validates the spec. For example, the form `` may be used to remove a taint, but not to add one. func ParseTaints(spec []string) ([]corev1.Taint, []corev1.Taint, error) { var taints, taintsToRemove []corev1.Taint + //nolint:staticcheck + // disable `deprecation` check for lifted code. uniqueTaints := map[corev1.TaintEffect]sets.String{} for _, taintSpec := range spec { @@ -59,6 +61,8 @@ func ParseTaints(spec []string) ([]corev1.Taint, []corev1.Taint, error) { } // add taint to existingTaints for uniqueness check if len(uniqueTaints[newTaint.Effect]) == 0 { + //nolint:staticcheck + // disable `deprecation` check for lifted code. uniqueTaints[newTaint.Effect] = sets.String{} } uniqueTaints[newTaint.Effect].Insert(newTaint.Key) diff --git a/pkg/util/lifted/validateclustertaints.go b/pkg/util/lifted/validateclustertaints.go index 19d779f378d5..8e52d62963e8 100644 --- a/pkg/util/lifted/validateclustertaints.go +++ b/pkg/util/lifted/validateclustertaints.go @@ -38,6 +38,8 @@ import ( func ValidateClusterTaints(taints []corev1.Taint, fldPath *field.Path) field.ErrorList { allErrors := field.ErrorList{} + //nolint:staticcheck + // disable `deprecation` check for lifted code. uniqueTaints := map[corev1.TaintEffect]sets.String{} for i, currTaint := range taints { @@ -61,6 +63,8 @@ func ValidateClusterTaints(taints []corev1.Taint, fldPath *field.Path) field.Err // add taint to existingTaints for uniqueness check if len(uniqueTaints[currTaint.Effect]) == 0 { + //nolint:staticcheck + // disable `deprecation` check for lifted code. uniqueTaints[currTaint.Effect] = sets.String{} } uniqueTaints[currTaint.Effect].Insert(currTaint.Key) diff --git a/pkg/util/lifted/visitpod_test.go b/pkg/util/lifted/visitpod_test.go index 598b92d38c2b..ec3525b40446 100644 --- a/pkg/util/lifted/visitpod_test.go +++ b/pkg/util/lifted/visitpod_test.go @@ -438,6 +438,9 @@ func TestPodSecrets(t *testing.T) { // +lifted:source=https://github.com/kubernetes/kubernetes/blob/release-1.23/pkg/api/v1/pod/util_test.go#L553-L591 // collectResourcePaths traverses the object, computing all the struct paths that lead to fields with resourcename in the name. +// disable `deprecation` check for lifted code. +// +//nolint:staticcheck func collectResourcePaths(t *testing.T, resourcename string, path *field.Path, name string, tp reflect.Type) sets.String { resourcename = strings.ToLower(resourcename) resourcePaths := sets.NewString() diff --git a/test/e2e/framework/dynamic.go b/test/e2e/framework/dynamic.go index ad2d78965a52..803d8094b941 100644 --- a/test/e2e/framework/dynamic.go +++ b/test/e2e/framework/dynamic.go @@ -12,11 +12,11 @@ import ( ) // GetResourceNames list resources and return their names. -func GetResourceNames(client dynamic.ResourceInterface) sets.String { +func GetResourceNames(client dynamic.ResourceInterface) sets.Set[string] { list, err := client.List(context.TODO(), metav1.ListOptions{}) gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) - names := sets.NewString() + names := sets.New[string]() for _, item := range list.Items { names.Insert(item.GetName()) } diff --git a/test/e2e/propagationpolicy_test.go b/test/e2e/propagationpolicy_test.go index 9847dcbe19a0..eea7f249b3ff 100644 --- a/test/e2e/propagationpolicy_test.go +++ b/test/e2e/propagationpolicy_test.go @@ -392,7 +392,7 @@ var _ = ginkgo.Describe("[BasicPropagation] propagation testing", func() { return true }) - patch := []map[string]interface{}{{"op": "replace", "path": "/spec/backoffLimit", "value": pointer.Int32Ptr(updateBackoffLimit)}} + patch := []map[string]interface{}{{"op": "replace", "path": "/spec/backoffLimit", "value": pointer.Int32(updateBackoffLimit)}} bytes, err := json.Marshal(patch) gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) framework.UpdateJobWithPatchBytes(kubeClient, job.Namespace, job.Name, bytes, types.JSONPatchType) diff --git a/test/e2e/rescheduling_test.go b/test/e2e/rescheduling_test.go index ed0c821d21d2..76da740f059b 100644 --- a/test/e2e/rescheduling_test.go +++ b/test/e2e/rescheduling_test.go @@ -72,7 +72,7 @@ var _ = ginkgo.Describe("[cluster unjoined] reschedule testing", func() { deploymentNamespace = testNamespace deploymentName = policyName deployment = testhelper.NewDeployment(deploymentNamespace, deploymentName) - deployment.Spec.Replicas = pointer.Int32Ptr(10) + deployment.Spec.Replicas = pointer.Int32(10) policy = testhelper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{ { @@ -200,7 +200,7 @@ var _ = ginkgo.Describe("[cluster joined] reschedule testing", func() { deploymentNamespace = testNamespace deploymentName = policyName deployment = testhelper.NewDeployment(deploymentNamespace, deploymentName) - deployment.Spec.Replicas = pointer.Int32Ptr(1) + deployment.Spec.Replicas = pointer.Int32(1) // set ReplicaSchedulingType=Duplicated. policy = testhelper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{ { @@ -256,7 +256,7 @@ var _ = ginkgo.Describe("[cluster joined] reschedule testing", func() { deploymentNamespace = testNamespace deploymentName = policyName deployment = testhelper.NewDeployment(deploymentNamespace, deploymentName) - deployment.Spec.Replicas = pointer.Int32Ptr(1) + deployment.Spec.Replicas = pointer.Int32(1) // set clusterAffinity for Placement. policy = testhelper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{ { diff --git a/test/e2e/resource_test.go b/test/e2e/resource_test.go index 7a365b3d4dc9..f844cbceea9e 100644 --- a/test/e2e/resource_test.go +++ b/test/e2e/resource_test.go @@ -290,7 +290,7 @@ var _ = ginkgo.Describe("[resource-status collection] resource status collection }) ginkgo.It("ingress status collection testing", func() { - ingLoadBalancer := corev1.LoadBalancerStatus{} + ingLoadBalancer := networkingv1.IngressLoadBalancerStatus{} // simulate the update of the ingress status in member clusters. ginkgo.By("Update ingress status in member clusters", func() { @@ -300,7 +300,7 @@ var _ = ginkgo.Describe("[resource-status collection] resource status collection ingresses := []networkingv1.IngressLoadBalancerIngress{{IP: fmt.Sprintf("172.19.2.%d", index+6)}} for _, ingress := range ingresses { - ingLoadBalancer.Ingress = append(ingLoadBalancer.Ingress, corev1.LoadBalancerIngress{ + ingLoadBalancer.Ingress = append(ingLoadBalancer.Ingress, networkingv1.IngressLoadBalancerIngress{ IP: ingress.IP, Hostname: clusterName, }) diff --git a/test/e2e/resourceinterpreter_test.go b/test/e2e/resourceinterpreter_test.go index 0e7dc1083cba..f2283f3a8c77 100644 --- a/test/e2e/resourceinterpreter_test.go +++ b/test/e2e/resourceinterpreter_test.go @@ -96,7 +96,7 @@ var _ = ginkgo.Describe("Resource interpreter webhook testing", func() { gomega.Eventually(func(g gomega.Gomega) error { curWorkload := framework.GetWorkload(dynamicClient, workloadNamespace, workloadName) // construct two values that need to be changed, and only one value is retained. - curWorkload.Spec.Replicas = pointer.Int32Ptr(2) + curWorkload.Spec.Replicas = pointer.Int32(2) curWorkload.Spec.Paused = true newUnstructuredObj, err := helper.ToUnstructured(curWorkload) @@ -131,7 +131,7 @@ var _ = ginkgo.Describe("Resource interpreter webhook testing", func() { sumWeight += index + 1 staticWeightLists = append(staticWeightLists, staticWeightList) } - workload.Spec.Replicas = pointer.Int32Ptr(int32(sumWeight)) + workload.Spec.Replicas = pointer.Int32(int32(sumWeight)) policy = testhelper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{ { APIVersion: workload.APIVersion, @@ -440,7 +440,7 @@ var _ = framework.SerialDescribe("Resource interpreter customization testing", f sumWeight += index + 1 staticWeightLists = append(staticWeightLists, staticWeightList) } - deployment.Spec.Replicas = pointer.Int32Ptr(int32(sumWeight)) + deployment.Spec.Replicas = pointer.Int32(int32(sumWeight)) policy.Spec.Placement = policyv1alpha1.Placement{ ClusterAffinity: &policyv1alpha1.ClusterAffinity{ ClusterNames: framework.ClusterNames(), diff --git a/test/e2e/scheduling_test.go b/test/e2e/scheduling_test.go index 0feff61474c1..95806fade5fa 100644 --- a/test/e2e/scheduling_test.go +++ b/test/e2e/scheduling_test.go @@ -323,7 +323,7 @@ var _ = ginkgo.Describe("propagation with label and group constraints testing", gomega.Expect(minGroups == len(groupMatchedClusters)).ShouldNot(gomega.BeFalse()) }) - patch := map[string]interface{}{"spec": map[string]interface{}{"parallelism": pointer.Int32Ptr(updateParallelism)}} + patch := map[string]interface{}{"spec": map[string]interface{}{"parallelism": pointer.Int32(updateParallelism)}} bytes, err := json.Marshal(patch) gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) framework.UpdateJobWithPatchBytes(kubeClient, job.Namespace, job.Name, bytes, types.StrategicMergePatchType) diff --git a/test/e2e/search_test.go b/test/e2e/search_test.go index 9dc4d710b4a8..58a07ab9fa89 100644 --- a/test/e2e/search_test.go +++ b/test/e2e/search_test.go @@ -565,10 +565,10 @@ var _ = ginkgo.Describe("[karmada-search] karmada search testing", ginkgo.Ordere ginkgo.It("could list nodes", func() { fromM1 := framework.GetResourceNames(m1Dynamic.Resource(nodeGVR)) - ginkgo.By("list nodes from member1: " + strings.Join(fromM1.List(), ",")) + ginkgo.By("list nodes from member1: " + strings.Join(sets.List(fromM1), ",")) fromM2 := framework.GetResourceNames(m2Dynamic.Resource(nodeGVR)) - ginkgo.By("list nodes from member2: " + strings.Join(fromM2.List(), ",")) - fromMembers := sets.NewString().Union(fromM1).Union(fromM2) + ginkgo.By("list nodes from member2: " + strings.Join(sets.List(fromM2), ",")) + fromMembers := sets.New[string]().Union(fromM1).Union(fromM2) var proxyList *corev1.NodeList gomega.Eventually(func(g gomega.Gomega) { @@ -576,7 +576,7 @@ var _ = ginkgo.Describe("[karmada-search] karmada search testing", ginkgo.Ordere proxyList, err = proxyClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) g.Expect(err).ShouldNot(gomega.HaveOccurred()) - fromProxy := sets.NewString() + fromProxy := sets.New[string]() for _, item := range proxyList.Items { fromProxy.Insert(item.Name) } @@ -584,7 +584,7 @@ var _ = ginkgo.Describe("[karmada-search] karmada search testing", ginkgo.Ordere }, pollTimeout, pollInterval).Should(gomega.Succeed()) // assert cache source annotation - groupM1, groupM2 := sets.NewString(), sets.NewString() + groupM1, groupM2 := sets.New[string](), sets.New[string]() for _, item := range proxyList.Items { cluster := item.Annotations[clusterv1alpha1.CacheSourceAnnotationKey] switch cluster { diff --git a/test/helper/resource.go b/test/helper/resource.go index 1235c55aa547..8cdb49a86ae3 100644 --- a/test/helper/resource.go +++ b/test/helper/resource.go @@ -45,7 +45,7 @@ func NewDeployment(namespace string, name string) *appsv1.Deployment { Name: name, }, Spec: appsv1.DeploymentSpec{ - Replicas: pointer.Int32Ptr(3), + Replicas: pointer.Int32(3), Selector: &metav1.LabelSelector{ MatchLabels: podLabels, }, @@ -115,7 +115,7 @@ func NewStatefulSet(namespace string, name string) *appsv1.StatefulSet { Name: name, }, Spec: appsv1.StatefulSetSpec{ - Replicas: pointer.Int32Ptr(3), + Replicas: pointer.Int32(3), Selector: &metav1.LabelSelector{ MatchLabels: podLabels, }, @@ -327,7 +327,7 @@ func NewJob(namespace string, name string) *batchv1.Job { RestartPolicy: corev1.RestartPolicyNever, }, }, - BackoffLimit: pointer.Int32Ptr(4), + BackoffLimit: pointer.Int32(4), }, } } @@ -519,7 +519,7 @@ func NewWorkload(namespace, name string) *workloadv1alpha1.Workload { Name: name, }, Spec: workloadv1alpha1.WorkloadSpec{ - Replicas: pointer.Int32Ptr(3), + Replicas: pointer.Int32(3), Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: podLabels, @@ -549,7 +549,7 @@ func NewDeploymentWithVolumes(namespace, deploymentName string, volumes []corev1 Name: deploymentName, }, Spec: appsv1.DeploymentSpec{ - Replicas: pointer.Int32Ptr(3), + Replicas: pointer.Int32(3), Selector: &metav1.LabelSelector{ MatchLabels: podLabels, }, @@ -584,7 +584,7 @@ func NewDeploymentWithServiceAccount(namespace, deploymentName string, serviceAc }, Spec: appsv1.DeploymentSpec{ - Replicas: pointer.Int32Ptr(3), + Replicas: pointer.Int32(3), Selector: &metav1.LabelSelector{ MatchLabels: podLabels, },