summaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2015-02-17 13:07:38 +0100
committerIngo Molnar <mingo@kernel.org>2015-02-18 14:27:28 +0100
commit74b8a4cb6ce3685049ee124243a52238c5cabe55 (patch)
tree72ea9b09e55fbe3a59e365e8e51a712057309347 /kernel/sched
parente07e0d4cb0c4bfe822ec8491cc06269096a38bea (diff)
downloadlinux-stable-74b8a4cb6ce3685049ee124243a52238c5cabe55.tar.gz
linux-stable-74b8a4cb6ce3685049ee124243a52238c5cabe55.tar.bz2
linux-stable-74b8a4cb6ce3685049ee124243a52238c5cabe55.zip
sched: Clarify ordering between task_rq_lock() and move_queued_task()
There was a wee bit of confusion around the exact ordering here; clarify things. Reported-by: Kirill Tkhai <ktkhai@parallels.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Link: http://lkml.kernel.org/r/20150217121258.GM5029@twins.programming.kicks-ass.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c16
1 files changed, 16 insertions, 0 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 1f37fe7f77a4..fd0a6ed21849 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -341,6 +341,22 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
raw_spin_lock_irqsave(&p->pi_lock, *flags);
rq = task_rq(p);
raw_spin_lock(&rq->lock);
+ /*
+ * move_queued_task() task_rq_lock()
+ *
+ * ACQUIRE (rq->lock)
+ * [S] ->on_rq = MIGRATING [L] rq = task_rq()
+ * WMB (__set_task_cpu()) ACQUIRE (rq->lock);
+ * [S] ->cpu = new_cpu [L] task_rq()
+ * [L] ->on_rq
+ * RELEASE (rq->lock)
+ *
+ * If we observe the old cpu in task_rq_lock, the acquire of
+ * the old rq->lock will fully serialize against the stores.
+ *
+ * If we observe the new cpu in task_rq_lock, the acquire will
+ * pair with the WMB to ensure we must then also see migrating.
+ */
if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
return rq;
raw_spin_unlock(&rq->lock);