Skip to content

Commit

Permalink
karmada-metrics-adapter: reduce memory usage
Browse files Browse the repository at this point in the history
When there is a large amount of pod usage in the member cluster, metrics-adapter will consume a lot of memory. The reason is that it caches all the information of all pods in the cluster. However, we don't need all this information, so we trim some of the information to reduce memory usage.

Signed-off-by: chaunceyjiang <[email protected]>
  • Loading branch information
chaunceyjiang committed Apr 3, 2024
1 parent 57c1989 commit be10451
Showing 1 changed file with 7 additions and 5 deletions.
12 changes: 7 additions & 5 deletions pkg/metricsadapter/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ import (
"github.com/karmada-io/karmada/pkg/metricsadapter/provider"
"github.com/karmada-io/karmada/pkg/util"
"github.com/karmada-io/karmada/pkg/util/fedinformer/genericmanager"
"github.com/karmada-io/karmada/pkg/util/fedinformer/typedmanager"
"github.com/karmada-io/karmada/pkg/util/gclient"
)

Expand All @@ -49,7 +50,7 @@ var (
type MetricsController struct {
InformerFactory informerfactory.SharedInformerFactory
ClusterLister clusterlister.ClusterLister
InformerManager genericmanager.MultiClusterInformerManager
InformerManager typedmanager.MultiClusterInformerManager
MultiClusterDiscovery multiclient.MultiClusterDiscoveryInterface
queue workqueue.RateLimitingInterface
restConfig *rest.Config
Expand All @@ -62,7 +63,7 @@ func NewMetricsController(restConfig *rest.Config, factory informerfactory.Share
InformerFactory: factory,
ClusterLister: clusterLister,
MultiClusterDiscovery: multiclient.NewMultiClusterDiscoveryClient(clusterLister, kubeFactory),
InformerManager: genericmanager.GetInstance(),
InformerManager: typedmanager.GetInstance(),
restConfig: restConfig,
queue: workqueue.NewRateLimitingQueueWithConfig(workqueue.DefaultControllerRateLimiter(), workqueue.RateLimitingQueueConfig{
Name: "metrics-adapter",
Expand Down Expand Up @@ -171,6 +172,7 @@ func (m *MetricsController) handleClusters() bool {
if !m.InformerManager.IsManagerExist(clusterName) {
klog.Info("Try to build informer manager for cluster ", clusterName)
controlPlaneClient := gclient.NewForConfigOrDie(m.restConfig)
clusterClient, err := util.NewClusterClientSet(clusterName, controlPlaneClient, nil)
clusterDynamicClient, err := util.NewClusterDynamicClientSet(clusterName, controlPlaneClient)
if err != nil {
return false
Expand All @@ -181,7 +183,7 @@ func (m *MetricsController) handleClusters() bool {
klog.Warningf("unable to access cluster %s, Error: %+v", clusterName, err)
return true
}
_ = m.InformerManager.ForCluster(clusterName, clusterDynamicClient.DynamicClientSet, 0)
_ = m.InformerManager.ForCluster(clusterName, clusterClient.KubeClient, 0)
}
err = m.MultiClusterDiscovery.Set(clusterName)
if err != nil {
Expand All @@ -190,8 +192,8 @@ func (m *MetricsController) handleClusters() bool {
}
sci := m.InformerManager.GetSingleClusterManager(clusterName)
// Just trigger the informer to work
_ = sci.Lister(provider.PodsGVR)
_ = sci.Lister(provider.NodesGVR)
_, _ = sci.Lister(provider.PodsGVR)
_, _ = sci.Lister(provider.NodesGVR)

sci.Start()
_ = sci.WaitForCacheSync()
Expand Down

0 comments on commit be10451

Please sign in to comment.