summaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorGerald Schaefer <gerald.schaefer@de.ibm.com>2013-05-24 18:07:49 +0200
committerIngo Molnar <mingo@kernel.org>2013-05-28 09:40:22 +0200
commit41261b6a832ea0e788627f6a8707854423f9ff49 (patch)
treeda525ae676d3794ff05ff7cdb9fbf2a74ca65066 /kernel/sched
parentd07e75a6e0e8582bdecefe8868b0bfbdf2ee7085 (diff)
downloadlinux-41261b6a832ea0e788627f6a8707854423f9ff49.tar.gz
linux-41261b6a832ea0e788627f6a8707854423f9ff49.tar.bz2
linux-41261b6a832ea0e788627f6a8707854423f9ff49.zip
sched/autogroup: Fix race with task_groups list
In autogroup_create(), a tg is allocated and added to the task_groups list. If CONFIG_RT_GROUP_SCHED is set, this tg is then modified while on the list, without locking. This can race with someone walking the list, like __enable_runtime() during CPU unplug, and result in a use-after-free bug. To fix this, move sched_online_group(), which adds the tg to the list, to the end of the autogroup_create() function after the modification. Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1369411669-46971-2-git-send-email-gerald.schaefer@de.ibm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/auto_group.c3
1 files changed, 1 insertions, 2 deletions
diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
index 64de5f8b0c9e..4a073539c58e 100644
--- a/kernel/sched/auto_group.c
+++ b/kernel/sched/auto_group.c
@@ -77,8 +77,6 @@ static inline struct autogroup *autogroup_create(void)
if (IS_ERR(tg))
goto out_free;
- sched_online_group(tg, &root_task_group);
-
kref_init(&ag->kref);
init_rwsem(&ag->lock);
ag->id = atomic_inc_return(&autogroup_seq_nr);
@@ -98,6 +96,7 @@ static inline struct autogroup *autogroup_create(void)
#endif
tg->autogroup = ag;
+ sched_online_group(tg, &root_task_group);
return ag;
out_free: