summaryrefslogtreecommitdiffstats
path: root/kernel/cgroup.c
diff options
context:
space:
mode:
authorMandeep Singh Baines <msb@chromium.org>2011-12-21 20:18:37 -0800
committerTejun Heo <tj@kernel.org>2011-12-22 07:32:57 -0800
commit892a2b90ba15cb7dbee40979f23fdb492913abf8 (patch)
treedb1cb827649a846b84e9c5da03d2b2cf4aedc656 /kernel/cgroup.c
parentb07ef7741122a83575499c11417e514877941e76 (diff)
downloadlinux-stable-892a2b90ba15cb7dbee40979f23fdb492913abf8.tar.gz
linux-stable-892a2b90ba15cb7dbee40979f23fdb492913abf8.tar.bz2
linux-stable-892a2b90ba15cb7dbee40979f23fdb492913abf8.zip
cgroup: only need to check oldcgrp==newgrp once
In cgroup_attach_proc it is now sufficient to only check that oldcgrp==newcgrp once. Now that we are using threadgroup_lock() during the migrations, oldcgrp will not change. Signed-off-by: Mandeep Singh Baines <msb@chromium.org> Acked-by: Li Zefan <lizf@cn.fujitsu.com> Signed-off-by: Tejun Heo <tj@kernel.org> Cc: containers@lists.linux-foundation.org Cc: cgroups@vger.kernel.org Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Paul Menage <paul@paulmenage.org>
Diffstat (limited to 'kernel/cgroup.c')
-rw-r--r--kernel/cgroup.c22
1 files changed, 6 insertions, 16 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index a85a7002ca33..1042b3c41314 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2067,7 +2067,7 @@ static int css_set_prefetch(struct cgroup *cgrp, struct css_set *cg,
*/
int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
{
- int retval, i, group_size, nr_migrating_tasks;
+ int retval, i, group_size;
struct cgroup_subsys *ss, *failed_ss = NULL;
/* guaranteed to be initialized later, but the compiler needs this */
struct css_set *oldcg;
@@ -2118,7 +2118,7 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
}
tsk = leader;
- i = nr_migrating_tasks = 0;
+ i = 0;
do {
struct task_and_cgroup ent;
@@ -2134,11 +2134,12 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
*/
ent.task = tsk;
ent.cgrp = task_cgroup_from_root(tsk, root);
+ /* nothing to do if this task is already in the cgroup */
+ if (ent.cgrp == cgrp)
+ continue;
retval = flex_array_put(group, i, &ent, GFP_ATOMIC);
BUG_ON(retval != 0);
i++;
- if (ent.cgrp != cgrp)
- nr_migrating_tasks++;
} while_each_thread(leader, tsk);
/* remember the number of threads in the array for later. */
group_size = i;
@@ -2148,7 +2149,7 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
/* methods shouldn't be called if no task is actually migrating */
retval = 0;
- if (!nr_migrating_tasks)
+ if (!group_size)
goto out_free_group_list;
/*
@@ -2171,14 +2172,6 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
INIT_LIST_HEAD(&newcg_list);
for (i = 0; i < group_size; i++) {
tc = flex_array_get(group, i);
- /* nothing to do if this task is already in the cgroup */
- if (tc->cgrp == cgrp)
- continue;
- /*
- * get old css_set pointer. threadgroup is locked so this is
- * safe against concurrent cgroup_exit() changing this to
- * init_css_set.
- */
oldcg = tc->task->cgroups;
/* if we don't already have it in the list get a new one */
@@ -2194,9 +2187,6 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
*/
for (i = 0; i < group_size; i++) {
tc = flex_array_get(group, i);
- /* leave current thread as it is if it's already there */
- if (tc->cgrp == cgrp)
- continue;
retval = cgroup_task_migrate(cgrp, tc->cgrp, tc->task, true);
BUG_ON(retval);
}