Skip to content

Commit

Permalink
cgroups: make sure cgroup still exists after task restart
Browse files Browse the repository at this point in the history
This PR modifies raw_exec and exec to ensure the cgroup for a task
they are driving still exists during a task restart. These drivers
have the same bug but with different root cause.

For raw_exec, we were removing the cgroup in 2 places - the cpuset
manager, and in the unix containment implementation (the thing that
uses freezer cgroup to clean house). During a task restart, the
containment would remove the cgroup, and when the task runner hooks
went to start again would block on waiting for the cgroup to exist,
which will never happen, because it gets created by the cpuset manager
which only runs as an alloc pre-start hook. The fix here is to simply
not delete the cgroup in the containment implementation; killing the
PIDs is enough. The removal happens in the cpuset manager later anyway.

For exec, it's the same idea, except DestroyTask is called on task
failure, which in turn calls into libcontainer, which in turn deletes
the cgroup. In this case we do not have control over the deletion of
the cgroup, so instead we hack the cgroup back into life after the
call to DestroyTask.

All of this only applies to cgroups v2.
  • Loading branch information
shoenig committed May 4, 2022
1 parent 52faa16 commit 3f0b766
Show file tree
Hide file tree
Showing 3 changed files with 27 additions and 2 deletions.
1 change: 1 addition & 0 deletions client/allocrunner/taskrunner/task_runner.go
Original file line number Diff line number Diff line change
Expand Up @@ -783,6 +783,7 @@ func (tr *TaskRunner) runDriver() error {

taskConfig := tr.buildTaskConfig()
if tr.cpusetCgroupPathGetter != nil {
tr.logger.Trace("waiting for cgroup to exist for", "allocID", tr.allocID, "task", tr.task)
cpusetCgroupPath, err := tr.cpusetCgroupPathGetter(tr.killCtx)
if err != nil {
return err
Expand Down
6 changes: 4 additions & 2 deletions client/lib/cgutil/group_killer.go
Original file line number Diff line number Diff line change
Expand Up @@ -143,8 +143,10 @@ func (d *killer) v2(cgroup *configs.Cgroup) error {
return err
}

// remove the cgroup from disk
return mgr.Destroy()
// note: do NOT remove the cgroup from disk; leave that to the alloc-level
// cpuset mananager.

return nil
}

// kill is used to SIGKILL all processes in cgroup
Expand Down
22 changes: 22 additions & 0 deletions drivers/exec/driver.go
Original file line number Diff line number Diff line change
Expand Up @@ -581,6 +581,25 @@ func (d *Driver) StopTask(taskID string, timeout time.Duration, signal string) e
return nil
}

// resetCgroup will re-create the v2 cgroup for the task after the task has been
// destroyed by libcontainer. In the case of a task restart we call DestroyTask
// which removes the cgroup - but we still need it!
//
// Ideally the cgroup management would be more unified - and we could do the creation
// on a task runner pre-start hook, eliminating the need for this hack.
func (d *Driver) resetCgroup(handle *taskHandle) {
if cgutil.UseV2 {
if handle.taskConfig.Resources != nil &&
handle.taskConfig.Resources.LinuxResources != nil &&
handle.taskConfig.Resources.LinuxResources.CpusetCgroupPath != "" {
err := os.Mkdir(handle.taskConfig.Resources.LinuxResources.CpusetCgroupPath, 0755)
if err != nil {
d.logger.Trace("failed to reset cgroup", "path", handle.taskConfig.Resources.LinuxResources.CpusetCgroupPath)
}
}
}
}

func (d *Driver) DestroyTask(taskID string, force bool) error {
handle, ok := d.tasks.Get(taskID)
if !ok {
Expand All @@ -599,6 +618,9 @@ func (d *Driver) DestroyTask(taskID string, force bool) error {
handle.pluginClient.Kill()
}

// workaround for the case where the StopTask was issued on task restart
d.resetCgroup(handle)

d.tasks.Delete(taskID)
return nil
}
Expand Down

0 comments on commit 3f0b766

Please sign in to comment.