summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2013-10-11 17:41:11 +0200
committerFrederic Weisbecker <fweisbec@gmail.com>2013-12-09 16:53:51 +0100
commit50875788a1d4a3f662a27ed13cd05282d835939a (patch)
tree254cb6e105b576170aac22c195289de7b1998749 /kernel
parent33ab0fec33527e8b5ab124cff6aefd4746508e04 (diff)
downloadlinux-stable-50875788a1d4a3f662a27ed13cd05282d835939a.tar.gz
linux-stable-50875788a1d4a3f662a27ed13cd05282d835939a.tar.bz2
linux-stable-50875788a1d4a3f662a27ed13cd05282d835939a.zip
posix-timers: Use sighand lock instead of tasklist_lock for task clock sample
There is no need for the tasklist_lock just to take a process wide clock sample. All we need is to get a coherent sample that doesn't race with exit() and exec(): * exit() may be concurrently reaping a task and flushing its time * sighand is unstable under exit() and exec(), and the latter also result in group leader that can change To protect against these, locking the target's sighand is enough. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Kosaki Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/posix-cpu-timers.c16
1 files changed, 13 insertions, 3 deletions
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 03c5d6c3e614..71a07699a36b 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -271,12 +271,22 @@ static int posix_cpu_clock_get_task(struct task_struct *tsk,
if (same_thread_group(tsk, current))
err = cpu_clock_sample(which_clock, tsk, &rtn);
} else {
- read_lock(&tasklist_lock);
+ unsigned long flags;
+ struct sighand_struct *sighand;
- if (tsk->sighand && (tsk == current || thread_group_leader(tsk)))
+ /*
+ * while_each_thread() is not yet entirely RCU safe,
+ * keep locking the group while sampling process
+ * clock for now.
+ */
+ sighand = lock_task_sighand(tsk, &flags);
+ if (!sighand)
+ return err;
+
+ if (tsk == current || thread_group_leader(tsk))
err = cpu_clock_sample_group(which_clock, tsk, &rtn);
- read_unlock(&tasklist_lock);
+ unlock_task_sighand(tsk, &flags);
}
if (!err)