Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

prestart-restart #10017

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions client/allocrunner/alloc_runner.go
Original file line number Diff line number Diff line change
Expand Up @@ -260,6 +260,7 @@ func (ar *allocRunner) initTaskRunners(tasks []*structs.Task) error {
DriverManager: ar.driverManager,
ServersContactedCh: ar.serversContactedCh,
StartConditionMetCtx: ar.taskHookCoordinator.startConditionForTask(task),
EndConditionMetCtx: ar.taskHookCoordinator.endConditionForTask(task),
}

// Create, but do not Run, the task runner
Expand Down
5 changes: 5 additions & 0 deletions client/allocrunner/task_hook_coordinator.go
Original file line number Diff line number Diff line change
Expand Up @@ -121,6 +121,11 @@ func (c *taskHookCoordinator) startConditionForTask(task *structs.Task) <-chan s
}
}

// Tasks are able to exit the taskrunner Run() loop when poststop tasks are ready to start
func (c *taskHookCoordinator) endConditionForTask(task *structs.Task) <-chan struct{} {
return c.poststopTaskCtx.Done()
}

// This is not thread safe! This must only be called from one thread per alloc runner.
func (c *taskHookCoordinator) taskStateUpdated(states map[string]*structs.TaskState) {
for task := range c.prestartSidecar {
Expand Down
4 changes: 3 additions & 1 deletion client/allocrunner/taskrunner/lifecycle.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package taskrunner

import (
"context"
"fmt"

"github.com/hashicorp/nomad/nomad/structs"
)
Expand All @@ -15,7 +16,8 @@ func (tr *TaskRunner) Restart(ctx context.Context, event *structs.TaskEvent, fai
handle := tr.getDriverHandle()

// Check it is running
if handle == nil {
if handle == nil && !tr.IsPrestartTask() {
fmt.Println("!!! task not running, not restarting")
return ErrTaskNotRunning
}

Expand Down
1 change: 1 addition & 0 deletions client/allocrunner/taskrunner/restarts/restarts.go
Original file line number Diff line number Diff line change
Expand Up @@ -164,6 +164,7 @@ func (r *RestartTracker) GetState() (string, time.Duration) {

// Hot path if a restart was triggered
if r.restartTriggered {
fmt.Println("!!! restart triggered")
r.reason = ""
return structs.TaskRestarting, 0
}
Expand Down
25 changes: 24 additions & 1 deletion client/allocrunner/taskrunner/task_runner.go
Original file line number Diff line number Diff line change
Expand Up @@ -215,9 +215,16 @@ type TaskRunner struct {
// GetClientAllocs has been called in case of a failed restore.
serversContactedCh <-chan struct{}

// startConditionMetCtx is done when TR should start the task
// startConditionMetCtx is done when TaskRunner should start the task
// within the allocation lifecycle. This will allow the task to proceed
// runtime execution.
startConditionMetCtx <-chan struct{}

// endConditionMetCtx blocks tasks from exiting the taskrunner Run loop
// until the whole allocation is ready to finish. This allows prestart ephemeral
// tasks to be restarted successfully
endConditionMetCtx <-chan struct{}

// waitOnServers defaults to false but will be set true if a restore
// fails and the Run method should wait until serversContactedCh is
// closed.
Expand Down Expand Up @@ -278,6 +285,10 @@ type Config struct {

// startConditionMetCtx is done when TR should start the task
StartConditionMetCtx <-chan struct{}

// EndConditionMetCtx is done when TR can let the task exit the run loop
// i.e. when the whole allocation is ready to finish
EndConditionMetCtx <-chan struct{}
}

func NewTaskRunner(config *Config) (*TaskRunner, error) {
Expand Down Expand Up @@ -332,6 +343,7 @@ func NewTaskRunner(config *Config) (*TaskRunner, error) {
maxEvents: defaultMaxEvents,
serversContactedCh: config.ServersContactedCh,
startConditionMetCtx: config.StartConditionMetCtx,
endConditionMetCtx: config.EndConditionMetCtx,
}

// Create the logger based on the allocation ID
Expand Down Expand Up @@ -598,6 +610,7 @@ MAIN:
tr.logger.Trace("gracefully shutting down during restart delay")
return
}

}

// Ensure handle is cleaned up. Restore could have recovered a task
Expand All @@ -623,6 +636,16 @@ MAIN:
tr.logger.Error("stop failed", "error", err)
}

// Block until the allocation is ready to finish
select {
case <-tr.endConditionMetCtx:
tr.logger.Debug("lifecycle end condition has been met, proceeding")
// yay proceed
case <-tr.killCtx.Done():
case <-tr.shutdownCtx.Done():
return
}

tr.logger.Debug("task run loop exiting")
}

Expand Down
5 changes: 5 additions & 0 deletions client/allocrunner/taskrunner/task_runner_getters.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,11 @@ func (tr *TaskRunner) IsLeader() bool {
return tr.taskLeader
}

// IsPrestartTask returns true if this task is a prestart task in its task group.
func (tr *TaskRunner) IsPrestartTask() bool {
return tr.Task().Lifecycle != nil && tr.Task().Lifecycle.Hook == structs.TaskLifecycleHookPrestart
}

// IsPoststopTask returns true if this task is a poststop task in its task group.
func (tr *TaskRunner) IsPoststopTask() bool {
return tr.Task().Lifecycle != nil && tr.Task().Lifecycle.Hook == structs.TaskLifecycleHookPoststop
Expand Down