diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index f6ba791bbfb4..4b3cf197b80e 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -19151,6 +19151,11 @@ "$ref": "#/definitions/com.github.karmada-io.karmada.pkg.apis.work.v1alpha2.BindingSnapshot" } }, + "rescheduleTriggeredAt": { + "description": "RescheduleTriggeredAt is a timestamp representing when the referenced resource is triggered rescheduling. Only when this timestamp is later than timestamp in status.lastScheduledTime will the rescheduling actually execute.\n\nIt is represented in RFC3339 form (like '2006-01-02T15:04:05Z') and is in UTC. It is recommended to be populated by the REST handler of command.karmada.io/Reschedule API.", + "default": {}, + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, "resource": { "description": "Resource represents the Kubernetes resource to be propagated.", "default": {}, @@ -19182,6 +19187,11 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Condition" } }, + "lastScheduledTime": { + "description": "LastScheduledTime is a timestamp representing scheduler successfully finished a scheduling. It is represented in RFC3339 form (like '2006-01-02T15:04:05Z') and is in UTC.", + "default": {}, + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, "schedulerObservedGeneration": { "description": "SchedulerObservedGeneration is the generation(.metadata.generation) observed by the scheduler. If SchedulerObservedGeneration is less than the generation in metadata means the scheduler hasn't confirmed the scheduling result or hasn't done the schedule yet.", "type": "integer", diff --git a/charts/karmada/_crds/bases/work/work.karmada.io_clusterresourcebindings.yaml b/charts/karmada/_crds/bases/work/work.karmada.io_clusterresourcebindings.yaml index 9c00dfe94555..221b49f58249 100644 --- a/charts/karmada/_crds/bases/work/work.karmada.io_clusterresourcebindings.yaml +++ b/charts/karmada/_crds/bases/work/work.karmada.io_clusterresourcebindings.yaml @@ -1128,6 +1128,16 @@ spec: - name type: object type: array + rescheduleTriggeredAt: + description: "RescheduleTriggeredAt is a timestamp representing when + the referenced resource is triggered rescheduling. Only when this + timestamp is later than timestamp in status.lastScheduledTime will + the rescheduling actually execute. \n It is represented in RFC3339 + form (like '2006-01-02T15:04:05Z') and is in UTC. It is recommended + to be populated by the REST handler of command.karmada.io/Reschedule + API." + format: date-time + type: string resource: description: Resource represents the Kubernetes resource to be propagated. properties: @@ -1279,6 +1289,12 @@ spec: - type type: object type: array + lastScheduledTime: + description: LastScheduledTime is a timestamp representing scheduler + successfully finished a scheduling. It is represented in RFC3339 + form (like '2006-01-02T15:04:05Z') and is in UTC. + format: date-time + type: string schedulerObservedGeneration: description: SchedulerObservedGeneration is the generation(.metadata.generation) observed by the scheduler. If SchedulerObservedGeneration is less diff --git a/charts/karmada/_crds/bases/work/work.karmada.io_resourcebindings.yaml b/charts/karmada/_crds/bases/work/work.karmada.io_resourcebindings.yaml index 6216500ea01f..4c3839b9b434 100644 --- a/charts/karmada/_crds/bases/work/work.karmada.io_resourcebindings.yaml +++ b/charts/karmada/_crds/bases/work/work.karmada.io_resourcebindings.yaml @@ -1128,6 +1128,16 @@ spec: - name type: object type: array + rescheduleTriggeredAt: + description: "RescheduleTriggeredAt is a timestamp representing when + the referenced resource is triggered rescheduling. Only when this + timestamp is later than timestamp in status.lastScheduledTime will + the rescheduling actually execute. \n It is represented in RFC3339 + form (like '2006-01-02T15:04:05Z') and is in UTC. It is recommended + to be populated by the REST handler of command.karmada.io/Reschedule + API." + format: date-time + type: string resource: description: Resource represents the Kubernetes resource to be propagated. properties: @@ -1279,6 +1289,12 @@ spec: - type type: object type: array + lastScheduledTime: + description: LastScheduledTime is a timestamp representing scheduler + successfully finished a scheduling. It is represented in RFC3339 + form (like '2006-01-02T15:04:05Z') and is in UTC. + format: date-time + type: string schedulerObservedGeneration: description: SchedulerObservedGeneration is the generation(.metadata.generation) observed by the scheduler. If SchedulerObservedGeneration is less diff --git a/pkg/apis/work/v1alpha2/binding_types.go b/pkg/apis/work/v1alpha2/binding_types.go index b9868f32528d..30d4860c127a 100644 --- a/pkg/apis/work/v1alpha2/binding_types.go +++ b/pkg/apis/work/v1alpha2/binding_types.go @@ -136,6 +136,14 @@ type ResourceBindingSpec struct { // +kubebuilder:validation:Enum=Abort;Overwrite // +optional ConflictResolution policyv1alpha1.ConflictResolution `json:"conflictResolution,omitempty"` + + // RescheduleTriggeredAt is a timestamp representing when the referenced resource is triggered rescheduling. + // Only when this timestamp is later than timestamp in status.lastScheduledTime will the rescheduling actually execute. + // + // It is represented in RFC3339 form (like '2006-01-02T15:04:05Z') and is in UTC. + // It is recommended to be populated by the REST handler of command.karmada.io/Reschedule API. + // +optional + RescheduleTriggeredAt metav1.Time `json:"rescheduleTriggeredAt,omitempty"` } // ObjectReference contains enough information to locate the referenced object inside current cluster. @@ -297,6 +305,11 @@ type ResourceBindingStatus struct { // +optional SchedulerObservedAffinityName string `json:"schedulerObservingAffinityName,omitempty"` + // LastScheduledTime is a timestamp representing scheduler successfully finished a scheduling. + // It is represented in RFC3339 form (like '2006-01-02T15:04:05Z') and is in UTC. + // +optional + LastScheduledTime metav1.Time `json:"lastScheduledTime,omitempty"` + // Conditions contain the different condition statuses. // +optional Conditions []metav1.Condition `json:"conditions,omitempty"` diff --git a/pkg/apis/work/v1alpha2/zz_generated.deepcopy.go b/pkg/apis/work/v1alpha2/zz_generated.deepcopy.go index 0a05f8f7a486..2447b3dee7ca 100644 --- a/pkg/apis/work/v1alpha2/zz_generated.deepcopy.go +++ b/pkg/apis/work/v1alpha2/zz_generated.deepcopy.go @@ -344,6 +344,7 @@ func (in *ResourceBindingSpec) DeepCopyInto(out *ResourceBindingSpec) { *out = new(v1alpha1.FailoverBehavior) (*in).DeepCopyInto(*out) } + in.RescheduleTriggeredAt.DeepCopyInto(&out.RescheduleTriggeredAt) return } @@ -360,6 +361,7 @@ func (in *ResourceBindingSpec) DeepCopy() *ResourceBindingSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceBindingStatus) DeepCopyInto(out *ResourceBindingStatus) { *out = *in + in.LastScheduledTime.DeepCopyInto(&out.LastScheduledTime) if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make([]metav1.Condition, len(*in)) diff --git a/pkg/generated/openapi/zz_generated.openapi.go b/pkg/generated/openapi/zz_generated.openapi.go index 722792e6877c..2f9243d8fdf8 100755 --- a/pkg/generated/openapi/zz_generated.openapi.go +++ b/pkg/generated/openapi/zz_generated.openapi.go @@ -6748,12 +6748,19 @@ func schema_pkg_apis_work_v1alpha2_ResourceBindingSpec(ref common.ReferenceCallb Format: "", }, }, + "rescheduleTriggeredAt": { + SchemaProps: spec.SchemaProps{ + Description: "RescheduleTriggeredAt is a timestamp representing when the referenced resource is triggered rescheduling. Only when this timestamp is later than timestamp in status.lastScheduledTime will the rescheduling actually execute.\n\nIt is represented in RFC3339 form (like '2006-01-02T15:04:05Z') and is in UTC. It is recommended to be populated by the REST handler of command.karmada.io/Reschedule API.", + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, }, Required: []string{"resource"}, }, }, Dependencies: []string{ - "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.FailoverBehavior", "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.Placement", "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2.BindingSnapshot", "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2.GracefulEvictionTask", "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2.ObjectReference", "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2.ReplicaRequirements", "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2.TargetCluster"}, + "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.FailoverBehavior", "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.Placement", "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2.BindingSnapshot", "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2.GracefulEvictionTask", "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2.ObjectReference", "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2.ReplicaRequirements", "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2.TargetCluster", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, } } @@ -6778,6 +6785,13 @@ func schema_pkg_apis_work_v1alpha2_ResourceBindingStatus(ref common.ReferenceCal Format: "", }, }, + "lastScheduledTime": { + SchemaProps: spec.SchemaProps{ + Description: "LastScheduledTime is a timestamp representing scheduler successfully finished a scheduling. It is represented in RFC3339 form (like '2006-01-02T15:04:05Z') and is in UTC.", + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, "conditions": { SchemaProps: spec.SchemaProps{ Description: "Conditions contain the different condition statuses.", @@ -6810,7 +6824,7 @@ func schema_pkg_apis_work_v1alpha2_ResourceBindingStatus(ref common.ReferenceCal }, }, Dependencies: []string{ - "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2.AggregatedStatusItem", "k8s.io/apimachinery/pkg/apis/meta/v1.Condition"}, + "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2.AggregatedStatusItem", "k8s.io/apimachinery/pkg/apis/meta/v1.Condition", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, } } diff --git a/pkg/scheduler/core/assignment.go b/pkg/scheduler/core/assignment.go index 0f2e3c6d8481..e9785b5d3644 100644 --- a/pkg/scheduler/core/assignment.go +++ b/pkg/scheduler/core/assignment.go @@ -65,28 +65,34 @@ type assignState struct { // targetReplicas is the replicas that we need to schedule in this round targetReplicas int32 + + // rescheduleSpecified when spec.rescheduleTriggeredAt later than status.lastScheduledTime in binding, means + // there is a rescheduling explicitly specified by user, and scheduler should do a purely rescale. + rescheduleSpecified bool } -func newAssignState(candidates []*clusterv1alpha1.Cluster, placement *policyv1alpha1.Placement, obj *workv1alpha2.ResourceBindingSpec) *assignState { +func newAssignState(candidates []*clusterv1alpha1.Cluster, spec *workv1alpha2.ResourceBindingSpec, + status *workv1alpha2.ResourceBindingStatus) *assignState { var strategyType string - switch placement.ReplicaSchedulingType() { + switch spec.Placement.ReplicaSchedulingType() { case policyv1alpha1.ReplicaSchedulingTypeDuplicated: strategyType = DuplicatedStrategy case policyv1alpha1.ReplicaSchedulingTypeDivided: - switch placement.ReplicaScheduling.ReplicaDivisionPreference { + switch spec.Placement.ReplicaScheduling.ReplicaDivisionPreference { case policyv1alpha1.ReplicaDivisionPreferenceAggregated: strategyType = AggregatedStrategy case policyv1alpha1.ReplicaDivisionPreferenceWeighted: - if placement.ReplicaScheduling.WeightPreference != nil && len(placement.ReplicaScheduling.WeightPreference.DynamicWeight) != 0 { + if spec.Placement.ReplicaScheduling.WeightPreference != nil && len(spec.Placement.ReplicaScheduling.WeightPreference.DynamicWeight) != 0 { strategyType = DynamicWeightStrategy } else { strategyType = StaticWeightStrategy } } } + rescheduleSpecified := spec.RescheduleTriggeredAt.After(status.LastScheduledTime.Time) - return &assignState{candidates: candidates, strategy: placement.ReplicaScheduling, spec: obj, strategyType: strategyType} + return &assignState{candidates: candidates, strategy: spec.Placement.ReplicaScheduling, spec: spec, strategyType: strategyType, rescheduleSpecified: rescheduleSpecified} } func (as *assignState) buildScheduledClusters() { @@ -194,6 +200,13 @@ func assignByDynamicStrategy(state *assignState) ([]workv1alpha2.TargetCluster, return nil, fmt.Errorf("failed to scale up: %v", err) } return result, nil + } else if state.rescheduleSpecified { + // when a rescheduling is explicitly specified by user, the scheduler should do a purely rescale. + result, err := dynamicReScale(state) + if err != nil { + return nil, fmt.Errorf("failed to do rescale: %v", err) + } + return result, nil } return state.scheduledClusters, nil diff --git a/pkg/scheduler/core/assignment_test.go b/pkg/scheduler/core/assignment_test.go index 28518bb7531f..fe8506a85def 100644 --- a/pkg/scheduler/core/assignment_test.go +++ b/pkg/scheduler/core/assignment_test.go @@ -299,7 +299,6 @@ func Test_dynamicScale(t *testing.T) { name string candidates []*clusterv1alpha1.Cluster object *workv1alpha2.ResourceBindingSpec - placement *policyv1alpha1.Placement want []workv1alpha2.TargetCluster wantErr bool }{ @@ -326,9 +325,9 @@ func Test_dynamicScale(t *testing.T) { {Name: ClusterMember2, Replicas: 4}, {Name: ClusterMember3, Replicas: 6}, }, - }, - placement: &policyv1alpha1.Placement{ - ReplicaScheduling: dynamicWeightStrategy, + Placement: &policyv1alpha1.Placement{ + ReplicaScheduling: dynamicWeightStrategy, + }, }, want: []workv1alpha2.TargetCluster{ {Name: ClusterMember1, Replicas: 1}, @@ -360,9 +359,9 @@ func Test_dynamicScale(t *testing.T) { {Name: ClusterMember2, Replicas: 4}, {Name: ClusterMember3, Replicas: 6}, }, - }, - placement: &policyv1alpha1.Placement{ - ReplicaScheduling: dynamicWeightStrategy, + Placement: &policyv1alpha1.Placement{ + ReplicaScheduling: dynamicWeightStrategy, + }, }, want: []workv1alpha2.TargetCluster{ {Name: ClusterMember1, Replicas: 6}, @@ -394,9 +393,9 @@ func Test_dynamicScale(t *testing.T) { {Name: ClusterMember2, Replicas: 4}, {Name: ClusterMember3, Replicas: 6}, }, - }, - placement: &policyv1alpha1.Placement{ - ReplicaScheduling: dynamicWeightStrategy, + Placement: &policyv1alpha1.Placement{ + ReplicaScheduling: dynamicWeightStrategy, + }, }, wantErr: true, }, @@ -422,9 +421,9 @@ func Test_dynamicScale(t *testing.T) { {Name: ClusterMember1, Replicas: 4}, {Name: ClusterMember2, Replicas: 8}, }, - }, - placement: &policyv1alpha1.Placement{ - ReplicaScheduling: aggregatedStrategy, + Placement: &policyv1alpha1.Placement{ + ReplicaScheduling: aggregatedStrategy, + }, }, want: []workv1alpha2.TargetCluster{ {Name: ClusterMember2, Replicas: 6}, @@ -450,9 +449,9 @@ func Test_dynamicScale(t *testing.T) { {Name: ClusterMember1, Replicas: 4}, {Name: ClusterMember2, Replicas: 8}, }, - }, - placement: &policyv1alpha1.Placement{ - ReplicaScheduling: aggregatedStrategy, + Placement: &policyv1alpha1.Placement{ + ReplicaScheduling: aggregatedStrategy, + }, }, want: []workv1alpha2.TargetCluster{ {Name: ClusterMember2, Replicas: 8}, @@ -481,9 +480,9 @@ func Test_dynamicScale(t *testing.T) { {Name: ClusterMember1, Replicas: 4}, {Name: ClusterMember2, Replicas: 8}, }, - }, - placement: &policyv1alpha1.Placement{ - ReplicaScheduling: aggregatedStrategy, + Placement: &policyv1alpha1.Placement{ + ReplicaScheduling: aggregatedStrategy, + }, }, want: []workv1alpha2.TargetCluster{ {Name: ClusterMember1, Replicas: 6}, @@ -514,9 +513,9 @@ func Test_dynamicScale(t *testing.T) { {Name: ClusterMember1, Replicas: 4}, {Name: ClusterMember2, Replicas: 8}, }, - }, - placement: &policyv1alpha1.Placement{ - ReplicaScheduling: aggregatedStrategy, + Placement: &policyv1alpha1.Placement{ + ReplicaScheduling: aggregatedStrategy, + }, }, want: []workv1alpha2.TargetCluster{ {Name: ClusterMember1, Replicas: 10}, @@ -546,9 +545,9 @@ func Test_dynamicScale(t *testing.T) { {Name: ClusterMember1, Replicas: 4}, {Name: ClusterMember2, Replicas: 8}, }, - }, - placement: &policyv1alpha1.Placement{ - ReplicaScheduling: aggregatedStrategy, + Placement: &policyv1alpha1.Placement{ + ReplicaScheduling: aggregatedStrategy, + }, }, wantErr: true, }, @@ -574,9 +573,9 @@ func Test_dynamicScale(t *testing.T) { {Name: ClusterMember1, Replicas: 4}, {Name: ClusterMember2, Replicas: 8}, }, - }, - placement: &policyv1alpha1.Placement{ - ReplicaScheduling: aggregatedStrategy, + Placement: &policyv1alpha1.Placement{ + ReplicaScheduling: aggregatedStrategy, + }, }, want: []workv1alpha2.TargetCluster{ {Name: ClusterMember1, Replicas: 7}, @@ -588,7 +587,7 @@ func Test_dynamicScale(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - state := newAssignState(tt.candidates, tt.placement, tt.object) + state := newAssignState(tt.candidates, tt.object, &workv1alpha2.ResourceBindingStatus{}) got, err := assignByDynamicStrategy(state) if (err != nil) != tt.wantErr { t.Errorf("assignByDynamicStrategy() error = %v, wantErr %v", err, tt.wantErr) @@ -819,7 +818,7 @@ func Test_dynamicScaleUp(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - state := newAssignState(tt.candidates, tt.placement, tt.object) + state := newAssignState(tt.candidates, tt.object, &workv1alpha2.ResourceBindingStatus{}) state.buildScheduledClusters() got, err := dynamicScaleUp(state) if (err != nil) != tt.wantErr { diff --git a/pkg/scheduler/core/common.go b/pkg/scheduler/core/common.go index a34d4add07e1..374d4d64c088 100644 --- a/pkg/scheduler/core/common.go +++ b/pkg/scheduler/core/common.go @@ -41,8 +41,8 @@ func SelectClusters(clustersScore framework.ClusterScoreList, // AssignReplicas assigns replicas to clusters based on the placement and resource binding spec. func AssignReplicas( clusters []*clusterv1alpha1.Cluster, - placement *policyv1alpha1.Placement, - object *workv1alpha2.ResourceBindingSpec, + spec *workv1alpha2.ResourceBindingSpec, + status *workv1alpha2.ResourceBindingStatus, ) ([]workv1alpha2.TargetCluster, error) { startTime := time.Now() defer metrics.ScheduleStep(metrics.ScheduleStepAssignReplicas, startTime) @@ -51,13 +51,13 @@ func AssignReplicas( return nil, fmt.Errorf("no clusters available to schedule") } - if object.Replicas > 0 { - state := newAssignState(clusters, placement, object) + if spec.Replicas > 0 { + state := newAssignState(clusters, spec, status) assignFunc, ok := assignFuncMap[state.strategyType] if !ok { // should never happen at present return nil, fmt.Errorf("unsupported replica scheduling strategy, replicaSchedulingType: %s, replicaDivisionPreference: %s, "+ - "please try another scheduling strategy", placement.ReplicaSchedulingType(), placement.ReplicaScheduling.ReplicaDivisionPreference) + "please try another scheduling strategy", spec.Placement.ReplicaSchedulingType(), spec.Placement.ReplicaScheduling.ReplicaDivisionPreference) } assignResults, err := assignFunc(state) if err != nil { diff --git a/pkg/scheduler/core/division_algorithm.go b/pkg/scheduler/core/division_algorithm.go index 5fbadc45cce6..3bbb03172bb1 100644 --- a/pkg/scheduler/core/division_algorithm.go +++ b/pkg/scheduler/core/division_algorithm.go @@ -126,3 +126,27 @@ func dynamicScaleUp(state *assignState) ([]workv1alpha2.TargetCluster, error) { }) return dynamicDivideReplicas(state) } + +// dynamicReScale re-calculate target cluster replicas ocne more, which considering this action as a first schedule. +func dynamicReScale(state *assignState) ([]workv1alpha2.TargetCluster, error) { + // 1. targetReplicas is set to desired replicas + state.targetReplicas = state.spec.Replicas + state.buildAvailableClusters(func(clusters []*clusterv1alpha1.Cluster, spec *workv1alpha2.ResourceBindingSpec) []workv1alpha2.TargetCluster { + clusterAvailableReplicas := calAvailableReplicas(clusters, spec) + // 2. clusterAvailableReplicas should take into account the replicas already allocated + for _, scheduledCluster := range state.scheduledClusters { + for i, availableCluster := range clusterAvailableReplicas { + if availableCluster.Name != scheduledCluster.Name { + continue + } + clusterAvailableReplicas[i].Replicas += scheduledCluster.Replicas + break + } + } + sort.Sort(TargetClustersList(clusterAvailableReplicas)) + return clusterAvailableReplicas + }) + // 3. scheduledClusters are not set, which implicates we consider this action as a first schedule. + state.scheduledClusters = nil + return dynamicDivideReplicas(state) +} diff --git a/pkg/scheduler/core/generic_scheduler.go b/pkg/scheduler/core/generic_scheduler.go index 8c750fb80d0f..ffa2a172e556 100644 --- a/pkg/scheduler/core/generic_scheduler.go +++ b/pkg/scheduler/core/generic_scheduler.go @@ -100,7 +100,7 @@ func (g *genericScheduler) Schedule( } klog.V(4).Infof("Selected clusters: %v", clusters) - clustersWithReplicas, err := g.assignReplicas(clusters, spec.Placement, spec) + clustersWithReplicas, err := g.assignReplicas(clusters, spec, status) if err != nil { return result, fmt.Errorf("failed to assign replicas: %w", err) } @@ -179,7 +179,7 @@ func (g *genericScheduler) selectClusters(clustersScore framework.ClusterScoreLi return SelectClusters(clustersScore, placement, spec) } -func (g *genericScheduler) assignReplicas(clusters []*clusterv1alpha1.Cluster, placement *policyv1alpha1.Placement, - object *workv1alpha2.ResourceBindingSpec) ([]workv1alpha2.TargetCluster, error) { - return AssignReplicas(clusters, placement, object) +func (g *genericScheduler) assignReplicas(clusters []*clusterv1alpha1.Cluster, spec *workv1alpha2.ResourceBindingSpec, + status *workv1alpha2.ResourceBindingStatus) ([]workv1alpha2.TargetCluster, error) { + return AssignReplicas(clusters, spec, status) } diff --git a/pkg/scheduler/core/generic_scheduler_test.go b/pkg/scheduler/core/generic_scheduler_test.go index 57e68754a854..14bfb30102bd 100644 --- a/pkg/scheduler/core/generic_scheduler_test.go +++ b/pkg/scheduler/core/generic_scheduler_test.go @@ -31,7 +31,6 @@ type testcase struct { name string clusters []*clusterv1alpha1.Cluster object workv1alpha2.ResourceBindingSpec - placement *policyv1alpha1.Placement previousResultToNewResult map[string][]string wantErr bool } @@ -74,15 +73,15 @@ func Test_EvenDistributionOfReplicas(t *testing.T) { }, object: workv1alpha2.ResourceBindingSpec{ Replicas: 3, - }, - placement: &policyv1alpha1.Placement{ - ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ - ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided, - ReplicaDivisionPreference: policyv1alpha1.ReplicaDivisionPreferenceWeighted, - WeightPreference: &policyv1alpha1.ClusterPreferences{ - StaticWeightList: []policyv1alpha1.StaticClusterWeight{ - {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember1}}, Weight: 1}, - {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember2}}, Weight: 1}, + Placement: &policyv1alpha1.Placement{ + ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ + ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided, + ReplicaDivisionPreference: policyv1alpha1.ReplicaDivisionPreferenceWeighted, + WeightPreference: &policyv1alpha1.ClusterPreferences{ + StaticWeightList: []policyv1alpha1.StaticClusterWeight{ + {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember1}}, Weight: 1}, + {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember2}}, Weight: 1}, + }, }, }, }, @@ -106,16 +105,16 @@ func Test_EvenDistributionOfReplicas(t *testing.T) { }, object: workv1alpha2.ResourceBindingSpec{ Replicas: 3, - }, - placement: &policyv1alpha1.Placement{ - ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ - ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided, - ReplicaDivisionPreference: policyv1alpha1.ReplicaDivisionPreferenceWeighted, - WeightPreference: &policyv1alpha1.ClusterPreferences{ - StaticWeightList: []policyv1alpha1.StaticClusterWeight{ - {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember1}}, Weight: 1}, - {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember2}}, Weight: 1}, - {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember3}}, Weight: 1}, + Placement: &policyv1alpha1.Placement{ + ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ + ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided, + ReplicaDivisionPreference: policyv1alpha1.ReplicaDivisionPreferenceWeighted, + WeightPreference: &policyv1alpha1.ClusterPreferences{ + StaticWeightList: []policyv1alpha1.StaticClusterWeight{ + {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember1}}, Weight: 1}, + {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember2}}, Weight: 1}, + {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember3}}, Weight: 1}, + }, }, }, }, @@ -134,16 +133,16 @@ func Test_EvenDistributionOfReplicas(t *testing.T) { }, object: workv1alpha2.ResourceBindingSpec{ Replicas: 5, // change replicas from 3 to 5 - }, - placement: &policyv1alpha1.Placement{ - ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ - ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided, - ReplicaDivisionPreference: policyv1alpha1.ReplicaDivisionPreferenceWeighted, - WeightPreference: &policyv1alpha1.ClusterPreferences{ - StaticWeightList: []policyv1alpha1.StaticClusterWeight{ - {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember1}}, Weight: 1}, - {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember2}}, Weight: 1}, - {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember3}}, Weight: 1}, + Placement: &policyv1alpha1.Placement{ + ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ + ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided, + ReplicaDivisionPreference: policyv1alpha1.ReplicaDivisionPreferenceWeighted, + WeightPreference: &policyv1alpha1.ClusterPreferences{ + StaticWeightList: []policyv1alpha1.StaticClusterWeight{ + {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember1}}, Weight: 1}, + {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember2}}, Weight: 1}, + {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember3}}, Weight: 1}, + }, }, }, }, @@ -171,17 +170,17 @@ func Test_EvenDistributionOfReplicas(t *testing.T) { }, object: workv1alpha2.ResourceBindingSpec{ Replicas: 7, - }, - placement: &policyv1alpha1.Placement{ - ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ - ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided, - ReplicaDivisionPreference: policyv1alpha1.ReplicaDivisionPreferenceWeighted, - WeightPreference: &policyv1alpha1.ClusterPreferences{ - StaticWeightList: []policyv1alpha1.StaticClusterWeight{ - {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember1}}, Weight: 2}, - {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember2}}, Weight: 1}, - {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember3}}, Weight: 1}, - {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember4}}, Weight: 1}, + Placement: &policyv1alpha1.Placement{ + ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ + ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided, + ReplicaDivisionPreference: policyv1alpha1.ReplicaDivisionPreferenceWeighted, + WeightPreference: &policyv1alpha1.ClusterPreferences{ + StaticWeightList: []policyv1alpha1.StaticClusterWeight{ + {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember1}}, Weight: 2}, + {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember2}}, Weight: 1}, + {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember3}}, Weight: 1}, + {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember4}}, Weight: 1}, + }, }, }, }, @@ -201,17 +200,17 @@ func Test_EvenDistributionOfReplicas(t *testing.T) { }, object: workv1alpha2.ResourceBindingSpec{ Replicas: 8, // change replicas from 7 to 8 - }, - placement: &policyv1alpha1.Placement{ - ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ - ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided, - ReplicaDivisionPreference: policyv1alpha1.ReplicaDivisionPreferenceWeighted, - WeightPreference: &policyv1alpha1.ClusterPreferences{ - StaticWeightList: []policyv1alpha1.StaticClusterWeight{ - {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember1}}, Weight: 2}, - {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember2}}, Weight: 1}, - {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember3}}, Weight: 1}, - {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember4}}, Weight: 1}, + Placement: &policyv1alpha1.Placement{ + ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ + ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided, + ReplicaDivisionPreference: policyv1alpha1.ReplicaDivisionPreferenceWeighted, + WeightPreference: &policyv1alpha1.ClusterPreferences{ + StaticWeightList: []policyv1alpha1.StaticClusterWeight{ + {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember1}}, Weight: 2}, + {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember2}}, Weight: 1}, + {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember3}}, Weight: 1}, + {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember4}}, Weight: 1}, + }, }, }, }, @@ -241,17 +240,17 @@ func Test_EvenDistributionOfReplicas(t *testing.T) { }, object: workv1alpha2.ResourceBindingSpec{ Replicas: 9, - }, - placement: &policyv1alpha1.Placement{ - ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ - ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided, - ReplicaDivisionPreference: policyv1alpha1.ReplicaDivisionPreferenceWeighted, - WeightPreference: &policyv1alpha1.ClusterPreferences{ - StaticWeightList: []policyv1alpha1.StaticClusterWeight{ - {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember1}}, Weight: 2}, - {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember2}}, Weight: 1}, - {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember3}}, Weight: 1}, - {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember4}}, Weight: 1}, + Placement: &policyv1alpha1.Placement{ + ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ + ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided, + ReplicaDivisionPreference: policyv1alpha1.ReplicaDivisionPreferenceWeighted, + WeightPreference: &policyv1alpha1.ClusterPreferences{ + StaticWeightList: []policyv1alpha1.StaticClusterWeight{ + {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember1}}, Weight: 2}, + {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember2}}, Weight: 1}, + {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember3}}, Weight: 1}, + {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember4}}, Weight: 1}, + }, }, }, }, @@ -271,17 +270,17 @@ func Test_EvenDistributionOfReplicas(t *testing.T) { }, object: workv1alpha2.ResourceBindingSpec{ Replicas: 8, - }, - placement: &policyv1alpha1.Placement{ - ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ - ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided, - ReplicaDivisionPreference: policyv1alpha1.ReplicaDivisionPreferenceWeighted, - WeightPreference: &policyv1alpha1.ClusterPreferences{ - StaticWeightList: []policyv1alpha1.StaticClusterWeight{ - {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember1}}, Weight: 2}, - {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember2}}, Weight: 1}, - {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember3}}, Weight: 1}, - {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember4}}, Weight: 1}, + Placement: &policyv1alpha1.Placement{ + ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ + ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided, + ReplicaDivisionPreference: policyv1alpha1.ReplicaDivisionPreferenceWeighted, + WeightPreference: &policyv1alpha1.ClusterPreferences{ + StaticWeightList: []policyv1alpha1.StaticClusterWeight{ + {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember1}}, Weight: 2}, + {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember2}}, Weight: 1}, + {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember3}}, Weight: 1}, + {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember4}}, Weight: 1}, + }, }, }, }, @@ -308,17 +307,17 @@ func Test_EvenDistributionOfReplicas(t *testing.T) { }, object: workv1alpha2.ResourceBindingSpec{ Replicas: 6, - }, - placement: &policyv1alpha1.Placement{ - ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ - ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided, - ReplicaDivisionPreference: policyv1alpha1.ReplicaDivisionPreferenceWeighted, - WeightPreference: &policyv1alpha1.ClusterPreferences{ - StaticWeightList: []policyv1alpha1.StaticClusterWeight{ - {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember1}}, Weight: 1}, - {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember2}}, Weight: 1}, - {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember3}}, Weight: 1}, - {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember4}}, Weight: 1}, + Placement: &policyv1alpha1.Placement{ + ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ + ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided, + ReplicaDivisionPreference: policyv1alpha1.ReplicaDivisionPreferenceWeighted, + WeightPreference: &policyv1alpha1.ClusterPreferences{ + StaticWeightList: []policyv1alpha1.StaticClusterWeight{ + {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember1}}, Weight: 1}, + {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember2}}, Weight: 1}, + {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember3}}, Weight: 1}, + {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember4}}, Weight: 1}, + }, }, }, }, @@ -338,17 +337,17 @@ func Test_EvenDistributionOfReplicas(t *testing.T) { }, object: workv1alpha2.ResourceBindingSpec{ Replicas: 6, - }, - placement: &policyv1alpha1.Placement{ - ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ - ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided, - ReplicaDivisionPreference: policyv1alpha1.ReplicaDivisionPreferenceWeighted, - WeightPreference: &policyv1alpha1.ClusterPreferences{ - StaticWeightList: []policyv1alpha1.StaticClusterWeight{ - {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember1}}, Weight: 2}, - {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember2}}, Weight: 1}, - {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember3}}, Weight: 1}, - {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember4}}, Weight: 1}, + Placement: &policyv1alpha1.Placement{ + ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ + ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided, + ReplicaDivisionPreference: policyv1alpha1.ReplicaDivisionPreferenceWeighted, + WeightPreference: &policyv1alpha1.ClusterPreferences{ + StaticWeightList: []policyv1alpha1.StaticClusterWeight{ + {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember1}}, Weight: 2}, + {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember2}}, Weight: 1}, + {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember3}}, Weight: 1}, + {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember4}}, Weight: 1}, + }, }, }, }, @@ -380,16 +379,16 @@ func Test_EvenDistributionOfReplicas(t *testing.T) { }, object: workv1alpha2.ResourceBindingSpec{ Replicas: 5, - }, - placement: &policyv1alpha1.Placement{ - ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ - ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided, - ReplicaDivisionPreference: policyv1alpha1.ReplicaDivisionPreferenceWeighted, - WeightPreference: &policyv1alpha1.ClusterPreferences{ - StaticWeightList: []policyv1alpha1.StaticClusterWeight{ - {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember1}}, Weight: 1}, - {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember2}}, Weight: 1}, - {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember3}}, Weight: 1}, + Placement: &policyv1alpha1.Placement{ + ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ + ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided, + ReplicaDivisionPreference: policyv1alpha1.ReplicaDivisionPreferenceWeighted, + WeightPreference: &policyv1alpha1.ClusterPreferences{ + StaticWeightList: []policyv1alpha1.StaticClusterWeight{ + {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember1}}, Weight: 1}, + {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember2}}, Weight: 1}, + {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember3}}, Weight: 1}, + }, }, }, }, @@ -409,17 +408,17 @@ func Test_EvenDistributionOfReplicas(t *testing.T) { }, object: workv1alpha2.ResourceBindingSpec{ Replicas: 5, - }, - placement: &policyv1alpha1.Placement{ - ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ - ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided, - ReplicaDivisionPreference: policyv1alpha1.ReplicaDivisionPreferenceWeighted, - WeightPreference: &policyv1alpha1.ClusterPreferences{ - StaticWeightList: []policyv1alpha1.StaticClusterWeight{ - {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember1}}, Weight: 1}, - {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember2}}, Weight: 1}, - {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember3}}, Weight: 1}, - {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember4}}, Weight: 1}, + Placement: &policyv1alpha1.Placement{ + ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ + ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided, + ReplicaDivisionPreference: policyv1alpha1.ReplicaDivisionPreferenceWeighted, + WeightPreference: &policyv1alpha1.ClusterPreferences{ + StaticWeightList: []policyv1alpha1.StaticClusterWeight{ + {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember1}}, Weight: 1}, + {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember2}}, Weight: 1}, + {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember3}}, Weight: 1}, + {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember4}}, Weight: 1}, + }, }, }, }, @@ -449,17 +448,17 @@ func Test_EvenDistributionOfReplicas(t *testing.T) { }, object: workv1alpha2.ResourceBindingSpec{ Replicas: 6, - }, - placement: &policyv1alpha1.Placement{ - ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ - ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided, - ReplicaDivisionPreference: policyv1alpha1.ReplicaDivisionPreferenceWeighted, - WeightPreference: &policyv1alpha1.ClusterPreferences{ - StaticWeightList: []policyv1alpha1.StaticClusterWeight{ - {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember1}}, Weight: 1}, - {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember2}}, Weight: 1}, - {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember3}}, Weight: 1}, - {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember4}}, Weight: 1}, + Placement: &policyv1alpha1.Placement{ + ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ + ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided, + ReplicaDivisionPreference: policyv1alpha1.ReplicaDivisionPreferenceWeighted, + WeightPreference: &policyv1alpha1.ClusterPreferences{ + StaticWeightList: []policyv1alpha1.StaticClusterWeight{ + {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember1}}, Weight: 1}, + {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember2}}, Weight: 1}, + {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember3}}, Weight: 1}, + {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember4}}, Weight: 1}, + }, }, }, }, @@ -478,16 +477,16 @@ func Test_EvenDistributionOfReplicas(t *testing.T) { }, object: workv1alpha2.ResourceBindingSpec{ Replicas: 6, - }, - placement: &policyv1alpha1.Placement{ - ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ - ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided, - ReplicaDivisionPreference: policyv1alpha1.ReplicaDivisionPreferenceWeighted, - WeightPreference: &policyv1alpha1.ClusterPreferences{ - StaticWeightList: []policyv1alpha1.StaticClusterWeight{ - {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember1}}, Weight: 1}, - {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember2}}, Weight: 1}, - {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember3}}, Weight: 1}, + Placement: &policyv1alpha1.Placement{ + ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ + ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided, + ReplicaDivisionPreference: policyv1alpha1.ReplicaDivisionPreferenceWeighted, + WeightPreference: &policyv1alpha1.ClusterPreferences{ + StaticWeightList: []policyv1alpha1.StaticClusterWeight{ + {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember1}}, Weight: 1}, + {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember2}}, Weight: 1}, + {TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember3}}, Weight: 1}, + }, }, }, }, @@ -512,7 +511,7 @@ func Test_EvenDistributionOfReplicas(t *testing.T) { } // 2. schedule basing on previous schedule result - got, err := g.assignReplicas(tt.clusters, tt.placement, &obj) + got, err := g.assignReplicas(tt.clusters, &obj, &workv1alpha2.ResourceBindingStatus{}) if (err != nil) != tt.wantErr { t.Errorf("AssignReplicas() error = %v, wantErr %v", err, tt.wantErr) return diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go index 1bd2c6af58bf..70489337c892 100644 --- a/pkg/scheduler/scheduler.go +++ b/pkg/scheduler/scheduler.go @@ -22,6 +22,7 @@ import ( "errors" "fmt" "reflect" + "strings" "time" corev1 "k8s.io/api/core/v1" @@ -68,6 +69,9 @@ const ( // ScaleSchedule means the replicas of binding object has been changed. ScaleSchedule ScheduleType = "ScaleSchedule" + + // ExplicitlyTriggerReschedule explicitly triggered reschedule + ExplicitlyTriggerReschedule ScheduleType = "ExplicitlyTriggerReschedule" ) const ( @@ -357,6 +361,13 @@ func (s *Scheduler) doScheduleBinding(namespace, name string) (err error) { metrics.BindingSchedule(string(ScaleSchedule), utilmetrics.DurationInSeconds(start), err) return err } + if rb.Spec.RescheduleTriggeredAt.After(rb.Status.LastScheduledTime.Time) { + // explicitly triggered reschedule + klog.Infof("Reschedule ResourceBinding(%s/%s) as explicitly triggered reschedule", namespace, name) + err = s.scheduleResourceBinding(rb) + metrics.BindingSchedule(string(ExplicitlyTriggerReschedule), utilmetrics.DurationInSeconds(start), err) + return err + } if rb.Spec.Replicas == 0 || rb.Spec.Placement.ReplicaSchedulingType() == policyv1alpha1.ReplicaSchedulingTypeDuplicated { // Duplicated resources should always be scheduled. Note: non-workload is considered as duplicated @@ -414,6 +425,13 @@ func (s *Scheduler) doScheduleClusterBinding(name string) (err error) { metrics.BindingSchedule(string(ScaleSchedule), utilmetrics.DurationInSeconds(start), err) return err } + if crb.Spec.RescheduleTriggeredAt.After(crb.Status.LastScheduledTime.Time) { + // explicitly triggered reschedule + klog.Infof("Start to schedule ClusterResourceBinding(%s) as explicitly triggered reschedule", name) + err = s.scheduleClusterResourceBinding(crb) + metrics.BindingSchedule(string(ExplicitlyTriggerReschedule), utilmetrics.DurationInSeconds(start), err) + return err + } if crb.Spec.Replicas == 0 || crb.Spec.Placement.ReplicaSchedulingType() == policyv1alpha1.ReplicaSchedulingTypeDuplicated { // Duplicated resources should always be scheduled. Note: non-workload is considered as duplicated @@ -770,6 +788,7 @@ func patchBindingStatusCondition(karmadaClient karmadaclientset.Interface, rb *w // will succeed eventually. if newScheduledCondition.Status == metav1.ConditionTrue { updateRB.Status.SchedulerObservedGeneration = rb.Generation + updateRB.Status.LastScheduledTime = metav1.Now() } if reflect.DeepEqual(rb.Status, updateRB.Status) { @@ -819,6 +838,7 @@ func patchClusterBindingStatusCondition(karmadaClient karmadaclientset.Interface // will succeed eventually. if newScheduledCondition.Status == metav1.ConditionTrue { updateCRB.Status.SchedulerObservedGeneration = crb.Generation + updateCRB.Status.LastScheduledTime = metav1.Now() } if reflect.DeepEqual(crb.Status, updateCRB.Status) { @@ -870,10 +890,11 @@ func (s *Scheduler) recordScheduleResultEventForResourceBinding(rb *workv1alpha2 Name: rb.Spec.Resource.Name, UID: rb.Spec.Resource.UID, } + successMsg := fmt.Sprintf("%s Result: {%s}", successfulSchedulingMessage, targetClustersToString(rb.Spec.Clusters)) if schedulerErr == nil { - s.eventRecorder.Event(rb, corev1.EventTypeNormal, events.EventReasonScheduleBindingSucceed, successfulSchedulingMessage) - s.eventRecorder.Event(ref, corev1.EventTypeNormal, events.EventReasonScheduleBindingSucceed, successfulSchedulingMessage) + s.eventRecorder.Event(rb, corev1.EventTypeNormal, events.EventReasonScheduleBindingSucceed, successMsg) + s.eventRecorder.Event(ref, corev1.EventTypeNormal, events.EventReasonScheduleBindingSucceed, successMsg) } else { s.eventRecorder.Event(rb, corev1.EventTypeWarning, events.EventReasonScheduleBindingFailed, schedulerErr.Error()) s.eventRecorder.Event(ref, corev1.EventTypeWarning, events.EventReasonScheduleBindingFailed, schedulerErr.Error()) @@ -892,12 +913,22 @@ func (s *Scheduler) recordScheduleResultEventForClusterResourceBinding(crb *work Name: crb.Spec.Resource.Name, UID: crb.Spec.Resource.UID, } + successMsg := fmt.Sprintf("%s Result: {%s}", successfulSchedulingMessage, targetClustersToString(crb.Spec.Clusters)) if schedulerErr == nil { - s.eventRecorder.Event(crb, corev1.EventTypeNormal, events.EventReasonScheduleBindingSucceed, successfulSchedulingMessage) - s.eventRecorder.Event(ref, corev1.EventTypeNormal, events.EventReasonScheduleBindingSucceed, successfulSchedulingMessage) + s.eventRecorder.Event(crb, corev1.EventTypeNormal, events.EventReasonScheduleBindingSucceed, successMsg) + s.eventRecorder.Event(ref, corev1.EventTypeNormal, events.EventReasonScheduleBindingSucceed, successMsg) } else { s.eventRecorder.Event(crb, corev1.EventTypeWarning, events.EventReasonScheduleBindingFailed, schedulerErr.Error()) s.eventRecorder.Event(ref, corev1.EventTypeWarning, events.EventReasonScheduleBindingFailed, schedulerErr.Error()) } } + +// targetClustersToString convert []workv1alpha2.TargetCluster to string in format like "member:1, member2:2". +func targetClustersToString(tcs []workv1alpha2.TargetCluster) string { + tcsStrs := make([]string, 0, len(tcs)) + for _, cluster := range tcs { + tcsStrs = append(tcsStrs, fmt.Sprintf("%s:%d", cluster.Name, cluster.Replicas)) + } + return strings.Join(tcsStrs, ", ") +} diff --git a/pkg/scheduler/scheduler_test.go b/pkg/scheduler/scheduler_test.go index 0826ac1c75ec..a36ea35946d6 100644 --- a/pkg/scheduler/scheduler_test.go +++ b/pkg/scheduler/scheduler_test.go @@ -236,6 +236,7 @@ func Test_patchBindingStatusCondition(t *testing.T) { if err != nil { t.Fatal(err) } + res.Status.LastScheduledTime = metav1.Time{} if !reflect.DeepEqual(res.Status, test.expected.Status) { t.Errorf("expected status: %v, but got: %v", test.expected.Status, res.Status) } @@ -439,6 +440,7 @@ func Test_patchClusterBindingStatusCondition(t *testing.T) { if err != nil { t.Fatal(err) } + res.Status.LastScheduledTime = metav1.Time{} if !reflect.DeepEqual(res.Status, test.expected.Status) { t.Errorf("expected status: %v, but got: %v", test.expected.Status, res.Status) }