Skip to content

Commit

Permalink
Merge pull request #323 from denkensk/move-podgroup-key-to-api
Browse files Browse the repository at this point in the history
move PodGroup label to pkg/api
  • Loading branch information
k8s-ci-robot authored Jan 27, 2022
2 parents ec632c3 + abbdb9f commit ac5c8be
Show file tree
Hide file tree
Showing 10 changed files with 125 additions and 144 deletions.
4 changes: 4 additions & 0 deletions pkg/apis/scheduling/v1alpha1/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ package v1alpha1
import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/scheduler-plugins/pkg/apis/scheduling"
)

// +genclient
Expand Down Expand Up @@ -107,6 +108,9 @@ const (

// PodGroupFailed means at least one of `spec.minMember` pods is failed.
PodGroupFailed PodGroupPhase = "Failed"

// PodGroupLabel is the default label of coscheduling
PodGroupLabel = "pod-group." + scheduling.GroupName
)

// +genclient
Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/podgroup.go
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,7 @@ func (ctrl *PodGroupController) syncHandler(key string) error {
}

pgCopy := pg.DeepCopy()
selector := labels.Set(map[string]string{util.PodGroupLabel: pgCopy.Name}).AsSelector()
selector := labels.Set(map[string]string{schedv1alpha1.PodGroupLabel: pgCopy.Name}).AsSelector()
pods, err := ctrl.podLister.List(selector)
if err != nil {
klog.ErrorS(err, "List pods for group failed", "podGroup", klog.KObj(pgCopy))
Expand Down
3 changes: 1 addition & 2 deletions pkg/controller/podgroup_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ import (
"sigs.k8s.io/scheduler-plugins/pkg/apis/scheduling/v1alpha1"
pgfake "sigs.k8s.io/scheduler-plugins/pkg/generated/clientset/versioned/fake"
schedinformer "sigs.k8s.io/scheduler-plugins/pkg/generated/informers/externalversions"
"sigs.k8s.io/scheduler-plugins/pkg/util"
)

func Test_Run(t *testing.T) {
Expand Down Expand Up @@ -188,7 +187,7 @@ func makePods(podNames []string, pgName string, phase v1.PodPhase) []*v1.Pod {
pds := make([]*v1.Pod, 0)
for _, name := range podNames {
pod := st.MakePod().Namespace("default").Name(name).Obj()
pod.Labels = map[string]string{util.PodGroupLabel: pgName}
pod.Labels = map[string]string{v1alpha1.PodGroupLabel: pgName}
pod.Status.Phase = phase
pds = append(pds, pod)
}
Expand Down
6 changes: 3 additions & 3 deletions pkg/coscheduling/core/core.go
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ func (pgMgr *PodGroupManager) ActivateSiblings(pod *corev1.Pod, state *framework
}

pods, err := pgMgr.podLister.Pods(pod.Namespace).List(
labels.SelectorFromSet(labels.Set{util.PodGroupLabel: pgName}),
labels.SelectorFromSet(labels.Set{v1alpha1.PodGroupLabel: pgName}),
)
if err != nil {
klog.ErrorS(err, "Failed to obtain pods belong to a PodGroup", "podGroup", pgName)
Expand Down Expand Up @@ -156,7 +156,7 @@ func (pgMgr *PodGroupManager) PreFilter(ctx context.Context, pod *corev1.Pod) er
return fmt.Errorf("pod with pgName: %v last failed in 3s, deny", pgFullName)
}
pods, err := pgMgr.podLister.Pods(pod.Namespace).List(
labels.SelectorFromSet(labels.Set{util.PodGroupLabel: util.GetPodGroupLabel(pod)}),
labels.SelectorFromSet(labels.Set{v1alpha1.PodGroupLabel: util.GetPodGroupLabel(pod)}),
)
if err != nil {
return fmt.Errorf("podLister list pods failed: %v", err)
Expand Down Expand Up @@ -312,7 +312,7 @@ func (pgMgr *PodGroupManager) CalculateAssignedPods(podGroupName, namespace stri
for _, nodeInfo := range nodeInfos {
for _, podInfo := range nodeInfo.Pods {
pod := podInfo.Pod
if pod.Labels[util.PodGroupLabel] == podGroupName && pod.Namespace == namespace && pod.Spec.NodeName != "" {
if pod.Labels[v1alpha1.PodGroupLabel] == podGroupName && pod.Namespace == namespace && pod.Spec.NodeName != "" {
count++
}
}
Expand Down
60 changes: 30 additions & 30 deletions pkg/coscheduling/core/core_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,82 +70,82 @@ func TestPreFilter(t *testing.T) {
name: "pod does not belong to any pg",
pod: st.MakePod().Name("p").UID("p").Namespace("ns1").Obj(),
pods: []*corev1.Pod{
st.MakePod().Name("pg1-1").UID("pg1-1").Namespace("ns1").Label(util.PodGroupLabel, "pg1").Obj(),
st.MakePod().Name("pg2-1").UID("pg2-1").Namespace("ns1").Label(util.PodGroupLabel, "pg2").Obj(),
st.MakePod().Name("pg1-1").UID("pg1-1").Namespace("ns1").Label(v1alpha1.PodGroupLabel, "pg1").Obj(),
st.MakePod().Name("pg2-1").UID("pg2-1").Namespace("ns1").Label(v1alpha1.PodGroupLabel, "pg2").Obj(),
},
lastDeniedPG: newCache(),
expectedSuccess: true,
},
{
name: "pg was previously denied",
pod: st.MakePod().Name("p1").UID("p1").Namespace("ns1").Label(util.PodGroupLabel, "pg1").Obj(),
pod: st.MakePod().Name("p1").UID("p1").Namespace("ns1").Label(v1alpha1.PodGroupLabel, "pg1").Obj(),
lastDeniedPG: denyCache,
expectedSuccess: false,
},
{
name: "pod belongs to a non-existing pg",
pod: st.MakePod().Name("p2").UID("p2").Namespace("ns1").Label(util.PodGroupLabel, "pg-notexisting").Obj(),
pod: st.MakePod().Name("p2").UID("p2").Namespace("ns1").Label(v1alpha1.PodGroupLabel, "pg-notexisting").Obj(),
lastDeniedPG: newCache(),
expectedSuccess: true,
},
{
name: "pod count less than minMember",
pod: st.MakePod().Name("p2").UID("p2").Namespace("ns1").Label(util.PodGroupLabel, "pg1").Obj(),
pod: st.MakePod().Name("p2").UID("p2").Namespace("ns1").Label(v1alpha1.PodGroupLabel, "pg1").Obj(),
pods: []*corev1.Pod{
st.MakePod().Name("pg2-1").UID("pg2-1").Namespace("ns1").Label(util.PodGroupLabel, "pg2").Obj(),
st.MakePod().Name("pg2-1").UID("pg2-1").Namespace("ns1").Label(v1alpha1.PodGroupLabel, "pg2").Obj(),
},
lastDeniedPG: newCache(),
expectedSuccess: false,
},
{
name: "pod count equal minMember",
pod: st.MakePod().Name("p2").UID("p2").Namespace("ns1").Label(util.PodGroupLabel, "pg1").Obj(),
pod: st.MakePod().Name("p2").UID("p2").Namespace("ns1").Label(v1alpha1.PodGroupLabel, "pg1").Obj(),
pods: []*corev1.Pod{
st.MakePod().Name("pg1-1").UID("pg1-1").Namespace("ns1").Label(util.PodGroupLabel, "pg1").Obj(),
st.MakePod().Name("pg2-1").UID("pg2-1").Namespace("ns1").Label(util.PodGroupLabel, "pg1").Obj(),
st.MakePod().Name("pg1-1").UID("pg1-1").Namespace("ns1").Label(v1alpha1.PodGroupLabel, "pg1").Obj(),
st.MakePod().Name("pg2-1").UID("pg2-1").Namespace("ns1").Label(v1alpha1.PodGroupLabel, "pg1").Obj(),
},
lastDeniedPG: newCache(),
expectedSuccess: true,
},
{
name: "pod count more minMember",
pod: st.MakePod().Name("p2").UID("p2").Namespace("ns1").Label(util.PodGroupLabel, "pg1").Obj(),
pod: st.MakePod().Name("p2").UID("p2").Namespace("ns1").Label(v1alpha1.PodGroupLabel, "pg1").Obj(),
pods: []*corev1.Pod{
st.MakePod().Name("pg1-1").UID("pg1-1").Namespace("ns1").Label(util.PodGroupLabel, "pg1").Obj(),
st.MakePod().Name("pg2-1").UID("pg2-1").Namespace("ns1").Label(util.PodGroupLabel, "pg1").Obj(),
st.MakePod().Name("pg3-1").UID("pg3-1").Namespace("ns1").Label(util.PodGroupLabel, "pg1").Obj(),
st.MakePod().Name("pg1-1").UID("pg1-1").Namespace("ns1").Label(v1alpha1.PodGroupLabel, "pg1").Obj(),
st.MakePod().Name("pg2-1").UID("pg2-1").Namespace("ns1").Label(v1alpha1.PodGroupLabel, "pg1").Obj(),
st.MakePod().Name("pg3-1").UID("pg3-1").Namespace("ns1").Label(v1alpha1.PodGroupLabel, "pg1").Obj(),
},
lastDeniedPG: newCache(),
expectedSuccess: true,
},
{
name: "cluster resource enough, min Resource",
pod: st.MakePod().Name("p2-1").UID("p2-1").Namespace("ns1").Label(util.PodGroupLabel, "pg2").
pod: st.MakePod().Name("p2-1").UID("p2-1").Namespace("ns1").Label(v1alpha1.PodGroupLabel, "pg2").
Req(map[corev1.ResourceName]string{corev1.ResourceCPU: "1"}).Obj(),
pods: []*corev1.Pod{
st.MakePod().Name("pg1-1").UID("pg1-1").Namespace("ns1").Label(util.PodGroupLabel, "pg2").Obj(),
st.MakePod().Name("pg2-1").UID("pg2-1").Namespace("ns1").Label(util.PodGroupLabel, "pg2").Obj(),
st.MakePod().Name("pg1-1").UID("pg1-1").Namespace("ns1").Label(v1alpha1.PodGroupLabel, "pg2").Obj(),
st.MakePod().Name("pg2-1").UID("pg2-1").Namespace("ns1").Label(v1alpha1.PodGroupLabel, "pg2").Obj(),
},
lastDeniedPG: newCache(),
expectedSuccess: true,
},
{
name: "cluster resource not enough, min Resource",
pod: st.MakePod().Name("p2-1").UID("p2-1").Namespace("ns1").Label(util.PodGroupLabel, "pg3").
pod: st.MakePod().Name("p2-1").UID("p2-1").Namespace("ns1").Label(v1alpha1.PodGroupLabel, "pg3").
Req(map[corev1.ResourceName]string{corev1.ResourceCPU: "20"}).Obj(),
pods: []*corev1.Pod{
st.MakePod().Name("pg1-1").UID("pg1-1").Namespace("ns1").Label(util.PodGroupLabel, "pg3").Obj(),
st.MakePod().Name("pg2-1").UID("pg2-1").Namespace("ns1").Label(util.PodGroupLabel, "pg3").Obj(),
st.MakePod().Name("pg1-1").UID("pg1-1").Namespace("ns1").Label(v1alpha1.PodGroupLabel, "pg3").Obj(),
st.MakePod().Name("pg2-1").UID("pg2-1").Namespace("ns1").Label(v1alpha1.PodGroupLabel, "pg3").Obj(),
},
lastDeniedPG: newCache(),
expectedSuccess: false,
},
{
name: "cluster resource enough not required",
pod: st.MakePod().Name("p2-1").UID("p2-1").Namespace("ns1").Label(util.PodGroupLabel, "pg1").Obj(),
pod: st.MakePod().Name("p2-1").UID("p2-1").Namespace("ns1").Label(v1alpha1.PodGroupLabel, "pg1").Obj(),
pods: []*corev1.Pod{
st.MakePod().Name("pg1-1").UID("pg1-1").Namespace("ns1").Label(util.PodGroupLabel, "pg1").Obj(),
st.MakePod().Name("pg2-1").UID("pg2-1").Namespace("ns1").Label(util.PodGroupLabel, "pg1").Obj(),
st.MakePod().Name("pg1-1").UID("pg1-1").Namespace("ns1").Label(v1alpha1.PodGroupLabel, "pg1").Obj(),
st.MakePod().Name("pg2-1").UID("pg2-1").Namespace("ns1").Label(v1alpha1.PodGroupLabel, "pg1").Obj(),
},
lastDeniedPG: newCache(),
expectedSuccess: true,
Expand Down Expand Up @@ -189,7 +189,7 @@ func TestPermit(t *testing.T) {
pgInformer.Informer().GetStore().Add(pg1)
pgLister := pgInformer.Lister()

existingPods, allNodes := testutil.MakeNodesAndPods(map[string]string{util.PodGroupLabel: "pg1"}, 1, 1)
existingPods, allNodes := testutil.MakeNodesAndPods(map[string]string{v1alpha1.PodGroupLabel: "pg1"}, 1, 1)
existingPods[0].Spec.NodeName = allNodes[0].Name
existingPods[0].Namespace = "ns1"
snapshot := testutil.NewFakeSharedLister(existingPods, allNodes)
Expand All @@ -207,18 +207,18 @@ func TestPermit(t *testing.T) {
},
{
name: "pod belongs to a non-existing pg",
pod: st.MakePod().Name("p").UID("p").Namespace("ns1").Label(util.PodGroupLabel, "pg-noexist").Obj(),
pod: st.MakePod().Name("p").UID("p").Namespace("ns1").Label(v1alpha1.PodGroupLabel, "pg-noexist").Obj(),
want: PodGroupNotFound,
},
{
name: "pod belongs to a pg that doesn't have enough pods",
pod: st.MakePod().Name("p").UID("p").Namespace("ns1").Label(util.PodGroupLabel, "pg1").Obj(),
pod: st.MakePod().Name("p").UID("p").Namespace("ns1").Label(v1alpha1.PodGroupLabel, "pg1").Obj(),
snapshot: testutil.NewFakeSharedLister([]*corev1.Pod{}, []*corev1.Node{}),
want: Wait,
},
{
name: "pod belongs to a pg that has enough pods",
pod: st.MakePod().Name("p").UID("p").Namespace("ns1").Label(util.PodGroupLabel, "pg1").Obj(),
pod: st.MakePod().Name("p").UID("p").Namespace("ns1").Label(v1alpha1.PodGroupLabel, "pg1").Obj(),
snapshot: snapshot,
want: Success,
},
Expand Down Expand Up @@ -259,19 +259,19 @@ func TestPostBind(t *testing.T) {
}{
{
name: "pg status convert to scheduled",
pod: st.MakePod().Name("p").UID("p").Namespace("ns1").Label(util.PodGroupLabel, "pg").Obj(),
pod: st.MakePod().Name("p").UID("p").Namespace("ns1").Label(v1alpha1.PodGroupLabel, "pg").Obj(),
desiredGroupPhase: v1alpha1.PodGroupScheduled,
desiredScheduled: 1,
},
{
name: "pg status convert to scheduling",
pod: st.MakePod().Name("p").UID("p").Namespace("ns1").Label(util.PodGroupLabel, "pg1").Obj(),
pod: st.MakePod().Name("p").UID("p").Namespace("ns1").Label(v1alpha1.PodGroupLabel, "pg1").Obj(),
desiredGroupPhase: v1alpha1.PodGroupScheduling,
desiredScheduled: 1,
},
{
name: "pg status does not convert, although scheduled pods change",
pod: st.MakePod().Name("p").UID("p").Namespace("ns1").Label(util.PodGroupLabel, "pg2").Obj(),
pod: st.MakePod().Name("p").UID("p").Namespace("ns1").Label(v1alpha1.PodGroupLabel, "pg2").Obj(),
desiredGroupPhase: v1alpha1.PodGroupScheduling,
desiredScheduled: 1,
},
Expand Down Expand Up @@ -306,7 +306,7 @@ func TestCheckClusterResource(t *testing.T) {
snapshot := testutil.NewFakeSharedLister(nil, []*corev1.Node{node})
nodeInfo, _ := snapshot.NodeInfos().List()

pod := st.MakePod().Name("t1-p1-3").Req(map[corev1.ResourceName]string{corev1.ResourceMemory: "100"}).Label(util.PodGroupLabel,
pod := st.MakePod().Name("t1-p1-3").Req(map[corev1.ResourceName]string{corev1.ResourceMemory: "100"}).Label(v1alpha1.PodGroupLabel,
"pg1-1").ZeroTerminationGracePeriod().Obj()
snapshotWithAssumedPod := testutil.NewFakeSharedLister([]*corev1.Pod{pod}, []*corev1.Node{node})
scheduledNodeInfo, _ := snapshotWithAssumedPod.NodeInfos().List()
Expand Down
5 changes: 3 additions & 2 deletions pkg/coscheduling/coscheduling.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ import (
"k8s.io/kubernetes/pkg/scheduler/framework"

"sigs.k8s.io/scheduler-plugins/pkg/apis/config"
"sigs.k8s.io/scheduler-plugins/pkg/apis/scheduling/v1alpha1"
"sigs.k8s.io/scheduler-plugins/pkg/coscheduling/core"
pgclientset "sigs.k8s.io/scheduler-plugins/pkg/generated/clientset/versioned"
pgformers "sigs.k8s.io/scheduler-plugins/pkg/generated/informers/externalversions"
Expand Down Expand Up @@ -159,7 +160,7 @@ func (cs *Coscheduling) PostFilter(ctx context.Context, state *framework.CycleSt
// It's based on an implicit assumption: if the nth Pod failed,
// it's inferrable other Pods belonging to the same PodGroup would be very likely to fail.
cs.frameworkHandler.IterateOverWaitingPods(func(waitingPod framework.WaitingPod) {
if waitingPod.GetPod().Namespace == pod.Namespace && waitingPod.GetPod().Labels[util.PodGroupLabel] == pg.Name {
if waitingPod.GetPod().Namespace == pod.Namespace && waitingPod.GetPod().Labels[v1alpha1.PodGroupLabel] == pg.Name {
klog.V(3).InfoS("PostFilter rejects the pod", "podGroup", klog.KObj(pg), "pod", klog.KObj(waitingPod.GetPod()))
waitingPod.Reject(cs.Name(), "optimistic rejection in PostFilter")
}
Expand Down Expand Up @@ -222,7 +223,7 @@ func (cs *Coscheduling) Unreserve(ctx context.Context, state *framework.CycleSta
return
}
cs.frameworkHandler.IterateOverWaitingPods(func(waitingPod framework.WaitingPod) {
if waitingPod.GetPod().Namespace == pod.Namespace && waitingPod.GetPod().Labels[util.PodGroupLabel] == pg.Name {
if waitingPod.GetPod().Namespace == pod.Namespace && waitingPod.GetPod().Labels[v1alpha1.PodGroupLabel] == pg.Name {
klog.V(3).InfoS("Unreserve rejects", "pod", klog.KObj(waitingPod.GetPod()), "podGroup", klog.KObj(pg))
waitingPod.Reject(cs.Name(), "rejection in Unreserve")
}
Expand Down
Loading

0 comments on commit ac5c8be

Please sign in to comment.