Skip to content

Commit 7bba295

Browse files
authored
Merge pull request #1568 from hashicorp/b-plan-system-constraints
Plan on system scheduler doesn't count nodes who don't meet constraints
2 parents 518d2a3 + bc0e60d commit 7bba295

File tree

3 files changed

+37
-10
lines changed

3 files changed

+37
-10
lines changed

command/plan.go

+9-3
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ import (
1010

1111
"github.com/hashicorp/nomad/api"
1212
"github.com/hashicorp/nomad/jobspec"
13+
"github.com/hashicorp/nomad/nomad/structs"
1314
"github.com/hashicorp/nomad/scheduler"
1415
"github.com/mitchellh/colorstring"
1516
)
@@ -172,7 +173,7 @@ func (c *PlanCommand) Run(args []string) int {
172173

173174
// Print the scheduler dry-run output
174175
c.Ui.Output(c.Colorize().Color("[bold]Scheduler dry-run:[reset]"))
175-
c.Ui.Output(c.Colorize().Color(formatDryRun(resp)))
176+
c.Ui.Output(c.Colorize().Color(formatDryRun(resp, job)))
176177
c.Ui.Output("")
177178

178179
// Print the job index info
@@ -203,7 +204,7 @@ func formatJobModifyIndex(jobModifyIndex uint64, jobName string) string {
203204
}
204205

205206
// formatDryRun produces a string explaining the results of the dry run.
206-
func formatDryRun(resp *api.JobPlanResponse) string {
207+
func formatDryRun(resp *api.JobPlanResponse, job *structs.Job) string {
207208
var rolling *api.Evaluation
208209
for _, eval := range resp.CreatedEvals {
209210
if eval.TriggeredBy == "rolling-update" {
@@ -215,7 +216,12 @@ func formatDryRun(resp *api.JobPlanResponse) string {
215216
if len(resp.FailedTGAllocs) == 0 {
216217
out = "[bold][green]- All tasks successfully allocated.[reset]\n"
217218
} else {
218-
out = "[bold][yellow]- WARNING: Failed to place all allocations.[reset]\n"
219+
// Change the output depending on if we are a system job or not
220+
if job.Type == "system" {
221+
out = "[bold][yellow]- WARNING: Failed to place allocations on all nodes.[reset]\n"
222+
} else {
223+
out = "[bold][yellow]- WARNING: Failed to place all allocations.[reset]\n"
224+
}
219225
sorted := sortedTaskGroupFromMetrics(resp.FailedTGAllocs)
220226
for _, tg := range sorted {
221227
metrics := resp.FailedTGAllocs[tg]

scheduler/system_sched.go

+11-2
Original file line numberDiff line numberDiff line change
@@ -277,13 +277,22 @@ func (s *SystemScheduler) computePlacements(place []allocTuple) error {
277277
option, _ := s.stack.Select(missing.TaskGroup)
278278

279279
if option == nil {
280-
// if nodes were filtered because of constain mismatches and we
280+
// If nodes were filtered because of constain mismatches and we
281281
// couldn't create an allocation then decrementing queued for that
282282
// task group
283283
if s.ctx.metrics.NodesFiltered > nodesFiltered {
284284
s.queuedAllocs[missing.TaskGroup.Name] -= 1
285+
286+
// If we are annotating the plan, then decrement the desired
287+
// placements based on whether the node meets the constraints
288+
if s.eval.AnnotatePlan && s.plan.Annotations != nil &&
289+
s.plan.Annotations.DesiredTGUpdates != nil {
290+
desired := s.plan.Annotations.DesiredTGUpdates[missing.TaskGroup.Name]
291+
desired.Place -= 1
292+
}
285293
}
286-
// record the current number of nodes filtered in this iteration
294+
295+
// Record the current number of nodes filtered in this iteration
287296
nodesFiltered = s.ctx.metrics.NodesFiltered
288297

289298
// Check if this task group has already failed

scheduler/system_sched_test.go

+17-5
Original file line numberDiff line numberDiff line change
@@ -136,11 +136,23 @@ func TestSystemSched_JobRegister_Annotate(t *testing.T) {
136136
// Create some nodes
137137
for i := 0; i < 10; i++ {
138138
node := mock.Node()
139+
if i < 9 {
140+
node.NodeClass = "foo"
141+
} else {
142+
node.NodeClass = "bar"
143+
}
144+
node.ComputeClass()
139145
noErr(t, h.State.UpsertNode(h.NextIndex(), node))
140146
}
141147

142-
// Create a job
148+
// Create a job constraining on node class
143149
job := mock.SystemJob()
150+
fooConstraint := &structs.Constraint{
151+
LTarget: "${node.class}",
152+
RTarget: "foo",
153+
Operand: "==",
154+
}
155+
job.Constraints = append(job.Constraints, fooConstraint)
144156
noErr(t, h.State.UpsertJob(h.NextIndex(), job))
145157

146158
// Create a mock evaluation to deregister the job
@@ -169,16 +181,16 @@ func TestSystemSched_JobRegister_Annotate(t *testing.T) {
169181
for _, allocList := range plan.NodeAllocation {
170182
planned = append(planned, allocList...)
171183
}
172-
if len(planned) != 10 {
173-
t.Fatalf("bad: %#v", plan)
184+
if len(planned) != 9 {
185+
t.Fatalf("bad: %#v %d", planned, len(planned))
174186
}
175187

176188
// Lookup the allocations by JobID
177189
out, err := h.State.AllocsByJob(job.ID)
178190
noErr(t, err)
179191

180192
// Ensure all allocations placed
181-
if len(out) != 10 {
193+
if len(out) != 9 {
182194
t.Fatalf("bad: %#v", out)
183195
}
184196

@@ -204,7 +216,7 @@ func TestSystemSched_JobRegister_Annotate(t *testing.T) {
204216
t.Fatalf("expected task group web to have desired changes")
205217
}
206218

207-
expected := &structs.DesiredUpdates{Place: 10}
219+
expected := &structs.DesiredUpdates{Place: 9}
208220
if !reflect.DeepEqual(desiredChanges, expected) {
209221
t.Fatalf("Unexpected desired updates; got %#v; want %#v", desiredChanges, expected)
210222
}

0 commit comments

Comments
 (0)