summaryrefslogtreecommitdiffstats
path: root/kernel/cpuset.c
diff options
context:
space:
mode:
authorLi Zefan <lizefan@huawei.com>2014-07-09 16:48:54 +0800
committerTejun Heo <tj@kernel.org>2014-07-09 15:56:17 -0400
commit390a36aadf39e241c83035469aae48ed1a144088 (patch)
tree4a19ec08869bf82e0dc387524d14ebb602692ffc /kernel/cpuset.c
parent7e88291beefbb758fa3b27e500ee2e0c888d6e44 (diff)
downloadlinux-stable-390a36aadf39e241c83035469aae48ed1a144088.tar.gz
linux-stable-390a36aadf39e241c83035469aae48ed1a144088.tar.bz2
linux-stable-390a36aadf39e241c83035469aae48ed1a144088.zip
cpuset: refactor cpuset_hotplug_update_tasks()
We mix the handling for both default hierarchy and legacy hierarchy in the same function, and it's quite messy, so split into two functions. Signed-off-by: Li Zefan <lizefan@huawei.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/cpuset.c')
-rw-r--r--kernel/cpuset.c121
1 files changed, 66 insertions, 55 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 4b409d2ecbb9..41822e2027c1 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -2080,6 +2080,65 @@ static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
}
}
+static void hotplug_update_tasks_legacy(struct cpuset *cs,
+ struct cpumask *off_cpus,
+ nodemask_t *off_mems)
+{
+ bool is_empty;
+
+ mutex_lock(&callback_mutex);
+ cpumask_andnot(cs->cpus_allowed, cs->cpus_allowed, off_cpus);
+ cpumask_andnot(cs->effective_cpus, cs->effective_cpus, off_cpus);
+ nodes_andnot(cs->mems_allowed, cs->mems_allowed, *off_mems);
+ nodes_andnot(cs->effective_mems, cs->effective_mems, *off_mems);
+ mutex_unlock(&callback_mutex);
+
+ /*
+ * Don't call update_tasks_cpumask() if the cpuset becomes empty,
+ * as the tasks will be migratecd to an ancestor.
+ */
+ if (!cpumask_empty(off_cpus) && !cpumask_empty(cs->cpus_allowed))
+ update_tasks_cpumask(cs);
+ if (!nodes_empty(*off_mems) && !nodes_empty(cs->mems_allowed))
+ update_tasks_nodemask(cs);
+
+ is_empty = cpumask_empty(cs->cpus_allowed) ||
+ nodes_empty(cs->mems_allowed);
+
+ mutex_unlock(&cpuset_mutex);
+
+ /*
+ * Move tasks to the nearest ancestor with execution resources,
+ * This is full cgroup operation which will also call back into
+ * cpuset. Should be done outside any lock.
+ */
+ if (is_empty)
+ remove_tasks_in_empty_cpuset(cs);
+
+ mutex_lock(&cpuset_mutex);
+}
+
+static void hotplug_update_tasks(struct cpuset *cs,
+ struct cpumask *off_cpus,
+ nodemask_t *off_mems)
+{
+ mutex_lock(&callback_mutex);
+ cpumask_andnot(cs->effective_cpus, cs->effective_cpus, off_cpus);
+ if (cpumask_empty(cs->effective_cpus))
+ cpumask_copy(cs->effective_cpus,
+ parent_cs(cs)->effective_cpus);
+
+ nodes_andnot(cs->effective_mems, cs->effective_mems, *off_mems);
+ if (nodes_empty(cs->effective_mems))
+ cs->effective_mems = parent_cs(cs)->effective_mems;
+ mutex_unlock(&callback_mutex);
+
+ if (!cpumask_empty(off_cpus))
+ update_tasks_cpumask(cs);
+ if (!nodes_empty(*off_mems))
+ update_tasks_nodemask(cs);
+}
+
/**
* cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug
* @cs: cpuset in interest
@@ -2092,9 +2151,6 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs)
{
static cpumask_t off_cpus;
static nodemask_t off_mems;
- bool is_empty;
- bool on_dfl = cgroup_on_dfl(cs->css.cgroup);
-
retry:
wait_event(cpuset_attach_wq, cs->attach_in_progress == 0);
@@ -2109,61 +2165,16 @@ retry:
goto retry;
}
- cpumask_andnot(&off_cpus, cs->cpus_allowed, top_cpuset.cpus_allowed);
- nodes_andnot(off_mems, cs->mems_allowed, top_cpuset.mems_allowed);
-
- mutex_lock(&callback_mutex);
- cpumask_andnot(cs->cpus_allowed, cs->cpus_allowed, &off_cpus);
-
- /* Inherit the effective mask of the parent, if it becomes empty. */
- cpumask_andnot(cs->effective_cpus, cs->effective_cpus, &off_cpus);
- if (on_dfl && cpumask_empty(cs->effective_cpus))
- cpumask_copy(cs->effective_cpus, parent_cs(cs)->effective_cpus);
- mutex_unlock(&callback_mutex);
-
- /*
- * If on_dfl, we need to update tasks' cpumask for empty cpuset to
- * take on ancestor's cpumask. Otherwise, don't call
- * update_tasks_cpumask() if the cpuset becomes empty, as the tasks
- * in it will be migrated to an ancestor.
- */
- if ((on_dfl && cpumask_empty(cs->cpus_allowed)) ||
- (!cpumask_empty(&off_cpus) && !cpumask_empty(cs->cpus_allowed)))
- update_tasks_cpumask(cs);
-
- mutex_lock(&callback_mutex);
- nodes_andnot(cs->mems_allowed, cs->mems_allowed, off_mems);
+ cpumask_andnot(&off_cpus, cs->effective_cpus,
+ top_cpuset.effective_cpus);
+ nodes_andnot(off_mems, cs->effective_mems, top_cpuset.effective_mems);
- /* Inherit the effective mask of the parent, if it becomes empty */
- nodes_andnot(cs->effective_mems, cs->effective_mems, off_mems);
- if (on_dfl && nodes_empty(cs->effective_mems))
- cs->effective_mems = parent_cs(cs)->effective_mems;
- mutex_unlock(&callback_mutex);
-
- /*
- * If on_dfl, we need to update tasks' nodemask for empty cpuset to
- * take on ancestor's nodemask. Otherwise, don't call
- * update_tasks_nodemask() if the cpuset becomes empty, as the
- * tasks in it will be migratd to an ancestor.
- */
- if ((on_dfl && nodes_empty(cs->mems_allowed)) ||
- (!nodes_empty(off_mems) && !nodes_empty(cs->mems_allowed)))
- update_tasks_nodemask(cs);
-
- is_empty = cpumask_empty(cs->cpus_allowed) ||
- nodes_empty(cs->mems_allowed);
+ if (cgroup_on_dfl(cs->css.cgroup))
+ hotplug_update_tasks(cs, &off_cpus, &off_mems);
+ else
+ hotplug_update_tasks_legacy(cs, &off_cpus, &off_mems);
mutex_unlock(&cpuset_mutex);
-
- /*
- * If on_dfl, we'll keep tasks in empty cpusets.
- *
- * Otherwise move tasks to the nearest ancestor with execution
- * resources. This is full cgroup operation which will
- * also call back into cpuset. Should be done outside any lock.
- */
- if (!on_dfl && is_empty)
- remove_tasks_in_empty_cpuset(cs);
}
/**