Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Make ds pods eviction best effort when draining empty nodes(nodes wit… #7236

Merged
merged 1 commit into from
Sep 4, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 2 additions & 5 deletions cluster-autoscaler/core/scaledown/actuation/drain.go
Original file line number Diff line number Diff line change
Expand Up @@ -85,14 +85,11 @@ func (e Evictor) DrainNode(ctx *acontext.AutoscalingContext, nodeInfo *framework
return e.drainNodeWithPodsBasedOnPodPriority(ctx, node, pods, dsPods)
}

// EvictDaemonSetPods groups daemonSet pods in the node in to priority groups and, evicts daemonSet pods in the ascending order of priorities.
// If priority evictor is not enable, eviction of daemonSet pods is the best effort.
// EvictDaemonSetPods creates eviction objects for all DaemonSet pods on the node.
// Eviction of DaemonSet pods are best effort. Does not wait for evictions to finish.
func (e Evictor) EvictDaemonSetPods(ctx *acontext.AutoscalingContext, nodeInfo *framework.NodeInfo) (map[string]status.PodEvictionResult, error) {
node := nodeInfo.Node()
dsPods, _ := podsToEvict(nodeInfo, ctx.DaemonSetEvictionForEmptyNodes)
if e.fullDsEviction {
return e.drainNodeWithPodsBasedOnPodPriority(ctx, node, dsPods, nil)
}
return e.drainNodeWithPodsBasedOnPodPriority(ctx, node, nil, dsPods)
}

Expand Down
72 changes: 1 addition & 71 deletions cluster-autoscaler/core/scaledown/actuation/drain_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,6 @@ import (
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
kubelet_config "k8s.io/kubernetes/pkg/kubelet/apis/config"
"k8s.io/kubernetes/pkg/kubelet/types"
)

Expand All @@ -57,7 +56,6 @@ func TestDaemonSetEvictionForEmptyNodes(t *testing.T) {
evictByDefault bool
extraAnnotationValue map[string]string
expectNotEvicted map[string]struct{}
fullDsEviction bool
podPriorities []int32
}{
{
Expand All @@ -84,57 +82,6 @@ func TestDaemonSetEvictionForEmptyNodes(t *testing.T) {
extraAnnotationValue: map[string]string{"d1": "false"},
expectNotEvicted: map[string]struct{}{"d1": {}},
},
{
name: "Failed to create DaemonSet eviction",
dsPods: []string{"d1", "d2"},
dsEvictionTimeout: 5000 * time.Millisecond,
evictionSuccess: false,
err: fmt.Errorf("Failed to drain node /n1, due to following errors"),
evictByDefault: true,
fullDsEviction: true,
podPriorities: []int32{0, 1000},
},
{
name: "Eviction timeout exceed",
dsPods: []string{"d1", "d2", "d3"},
evictionTimeoutExceed: true,
dsEvictionTimeout: 100 * time.Millisecond,
evictionSuccess: false,
err: fmt.Errorf("Failed to drain node /n1, due to following errors"),
evictByDefault: true,
fullDsEviction: true,
podPriorities: []int32{0, 1000, 2000},
},
{
name: "Successful attempt to evict DaemonSet pods",
dsPods: []string{"d1", "d2"},
dsEvictionTimeout: 5000 * time.Millisecond,
evictionSuccess: true,
evictByDefault: true,
fullDsEviction: true,
podPriorities: []int32{0, 1000},
},
{
name: "Evict single pod due to annotation",
dsPods: []string{"d1", "d2"},
dsEvictionTimeout: 5000 * time.Millisecond,
evictionSuccess: true,
extraAnnotationValue: map[string]string{"d1": "true"},
expectNotEvicted: map[string]struct{}{"d2": {}},
fullDsEviction: true,
podPriorities: []int32{0, 1000},
},
{
name: "Don't evict single pod due to annotation",
dsPods: []string{"d1", "d2"},
dsEvictionTimeout: 5000 * time.Millisecond,
evictionSuccess: true,
evictByDefault: true,
extraAnnotationValue: map[string]string{"d1": "false"},
expectNotEvicted: map[string]struct{}{"d1": {}},
fullDsEviction: true,
podPriorities: []int32{0, 1000},
},
}

for _, scenario := range testScenarios {
Expand All @@ -160,9 +107,6 @@ func TestDaemonSetEvictionForEmptyNodes(t *testing.T) {
for i, dsName := range scenario.dsPods {
ds := BuildTestPod(dsName, 100, 0, WithDSController())
ds.Spec.NodeName = "n1"
if scenario.fullDsEviction {
ds.Spec.Priority = &scenario.podPriorities[i]
}
if v, ok := scenario.extraAnnotationValue[dsName]; ok {
ds.Annotations[daemonset.EnableDsEvictionKey] = v
}
Expand Down Expand Up @@ -198,19 +142,9 @@ func TestDaemonSetEvictionForEmptyNodes(t *testing.T) {
clustersnapshot.InitializeClusterSnapshotOrDie(t, context.ClusterSnapshot, []*apiv1.Node{n1}, dsPods)

drainConfig := SingleRuleDrainConfig(context.MaxGracefulTerminationSec)
if scenario.fullDsEviction {
drainConfig = []kubelet_config.ShutdownGracePeriodByPodPriority{}
for _, priority := range scenario.podPriorities {
drainConfig = append(drainConfig, kubelet_config.ShutdownGracePeriodByPodPriority{
Priority: priority,
ShutdownGracePeriodSeconds: int64(context.MaxGracefulTerminationSec),
})
}
}
evictor := Evictor{
EvictionRetryTime: waitBetweenRetries,
shutdownGracePeriodByPodPriority: drainConfig,
fullDsEviction: scenario.fullDsEviction,
}
nodeInfo, err := context.ClusterSnapshot.NodeInfos().Get(n1.Name)
assert.NoError(t, err)
Expand All @@ -232,11 +166,7 @@ func TestDaemonSetEvictionForEmptyNodes(t *testing.T) {
for i := 0; i < len(expectEvicted); i++ {
deleted[i] = utils.GetStringFromChan(deletedPods)
}
if scenario.fullDsEviction {
assert.Equal(t, expectEvicted, deleted)
} else {
assert.ElementsMatch(t, deleted, expectEvicted)
}
assert.ElementsMatch(t, deleted, expectEvicted)
})
}
}
Expand Down
Loading