@@ -34,6 +34,7 @@ type Stack interface {
34
34
type SelectOptions struct {
35
35
PenaltyNodeIDs map [string ]struct {}
36
36
PreferredNodes []* structs.Node
37
+ Preempt bool
37
38
}
38
39
39
40
// GenericStack is the Stack used for the Generic scheduler. It is
@@ -62,77 +63,6 @@ type GenericStack struct {
62
63
scoreNorm * ScoreNormalizationIterator
63
64
}
64
65
65
- // NewGenericStack constructs a stack used for selecting service placements
66
- func NewGenericStack (batch bool , ctx Context ) * GenericStack {
67
- // Create a new stack
68
- s := & GenericStack {
69
- batch : batch ,
70
- ctx : ctx ,
71
- }
72
-
73
- // Create the source iterator. We randomize the order we visit nodes
74
- // to reduce collisions between schedulers and to do a basic load
75
- // balancing across eligible nodes.
76
- s .source = NewRandomIterator (ctx , nil )
77
-
78
- // Create the quota iterator to determine if placements would result in the
79
- // quota attached to the namespace of the job to go over.
80
- s .quota = NewQuotaIterator (ctx , s .source )
81
-
82
- // Attach the job constraints. The job is filled in later.
83
- s .jobConstraint = NewConstraintChecker (ctx , nil )
84
-
85
- // Filter on task group drivers first as they are faster
86
- s .taskGroupDrivers = NewDriverChecker (ctx , nil )
87
-
88
- // Filter on task group constraints second
89
- s .taskGroupConstraint = NewConstraintChecker (ctx , nil )
90
-
91
- // Filter on task group devices
92
- s .taskGroupDevices = NewDeviceChecker (ctx )
93
-
94
- // Create the feasibility wrapper which wraps all feasibility checks in
95
- // which feasibility checking can be skipped if the computed node class has
96
- // previously been marked as eligible or ineligible. Generally this will be
97
- // checks that only needs to examine the single node to determine feasibility.
98
- jobs := []FeasibilityChecker {s .jobConstraint }
99
- tgs := []FeasibilityChecker {s .taskGroupDrivers , s .taskGroupConstraint , s .taskGroupDevices }
100
- s .wrappedChecks = NewFeasibilityWrapper (ctx , s .quota , jobs , tgs )
101
-
102
- // Filter on distinct host constraints.
103
- s .distinctHostsConstraint = NewDistinctHostsIterator (ctx , s .wrappedChecks )
104
-
105
- // Filter on distinct property constraints.
106
- s .distinctPropertyConstraint = NewDistinctPropertyIterator (ctx , s .distinctHostsConstraint )
107
-
108
- // Upgrade from feasible to rank iterator
109
- rankSource := NewFeasibleRankIterator (ctx , s .distinctPropertyConstraint )
110
-
111
- // Apply the bin packing, this depends on the resources needed
112
- // by a particular task group.
113
-
114
- s .binPack = NewBinPackIterator (ctx , rankSource , false , 0 )
115
-
116
- // Apply the job anti-affinity iterator. This is to avoid placing
117
- // multiple allocations on the same node for this job.
118
- s .jobAntiAff = NewJobAntiAffinityIterator (ctx , s .binPack , "" )
119
-
120
- s .nodeReschedulingPenalty = NewNodeReschedulingPenaltyIterator (ctx , s .jobAntiAff )
121
-
122
- s .nodeAffinity = NewNodeAffinityIterator (ctx , s .nodeReschedulingPenalty )
123
-
124
- s .spread = NewSpreadIterator (ctx , s .nodeAffinity )
125
-
126
- s .scoreNorm = NewScoreNormalizationIterator (ctx , s .spread )
127
-
128
- // Apply a limit function. This is to avoid scanning *every* possible node.
129
- s .limit = NewLimitIterator (ctx , s .scoreNorm , 2 , skipScoreThreshold , maxSkip )
130
-
131
- // Select the node with the maximum score for placement
132
- s .maxScore = NewMaxScoreIterator (ctx , s .limit )
133
- return s
134
- }
135
-
136
66
func (s * GenericStack ) SetNodes (baseNodes []* structs.Node ) {
137
67
// Shuffle base nodes
138
68
shuffleNodes (baseNodes )
@@ -159,7 +89,7 @@ func (s *GenericStack) SetJob(job *structs.Job) {
159
89
s .jobConstraint .SetConstraints (job .Constraints )
160
90
s .distinctHostsConstraint .SetJob (job )
161
91
s .distinctPropertyConstraint .SetJob (job )
162
- s .binPack .SetPriority (job . Priority )
92
+ s .binPack .SetJob (job )
163
93
s .jobAntiAff .SetJob (job )
164
94
s .nodeAffinity .SetJob (job )
165
95
s .spread .SetJob (job )
@@ -203,6 +133,9 @@ func (s *GenericStack) Select(tg *structs.TaskGroup, options *SelectOptions) *Ra
203
133
s .distinctPropertyConstraint .SetTaskGroup (tg )
204
134
s .wrappedChecks .SetTaskGroup (tg .Name )
205
135
s .binPack .SetTaskGroup (tg )
136
+ if options != nil {
137
+ s .binPack .evict = options .Preempt
138
+ }
206
139
s .jobAntiAff .SetTaskGroup (tg )
207
140
if options != nil {
208
141
s .nodeReschedulingPenalty .SetPenaltyNodes (options .PenaltyNodeIDs )
@@ -306,7 +239,7 @@ func (s *SystemStack) SetNodes(baseNodes []*structs.Node) {
306
239
func (s * SystemStack ) SetJob (job * structs.Job ) {
307
240
s .jobConstraint .SetConstraints (job .Constraints )
308
241
s .distinctPropertyConstraint .SetJob (job )
309
- s .binPack .SetPriority (job . Priority )
242
+ s .binPack .SetJob (job )
310
243
s .ctx .Eligibility ().SetJob (job )
311
244
312
245
if contextual , ok := s .quota .(ContextualIterator ); ok {
0 commit comments