diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2017-04-11 15:50:41 -0700 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2017-04-21 05:59:27 -0700 |
commit | bcbfdd01dce5556a952fae84ef16fd0f12525e7b (patch) | |
tree | d674b23e7a573c6e5234acb5e914fc60e581594a /kernel | |
parent | 0497b489b8255054f113fd31faeb72f6dbc50a68 (diff) | |
download | linux-stable-bcbfdd01dce5556a952fae84ef16fd0f12525e7b.tar.gz linux-stable-bcbfdd01dce5556a952fae84ef16fd0f12525e7b.tar.bz2 linux-stable-bcbfdd01dce5556a952fae84ef16fd0f12525e7b.zip |
rcu: Make non-preemptive schedule be Tasks RCU quiescent state
Currently, a call to schedule() acts as a Tasks RCU quiescent state
only if a context switch actually takes place. However, just the
call to schedule() guarantees that the calling task has moved off of
whatever tracing trampoline that it might have been one previously.
This commit therefore plumbs schedule()'s "preempt" parameter into
rcu_note_context_switch(), which then records the Tasks RCU quiescent
state, but only if this call to schedule() was -not- due to a preemption.
To avoid adding overhead to the common-case context-switch path,
this commit hides the rcu_note_context_switch() check under an existing
non-common-case check.
Suggested-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/rcu/tree.c | 22 | ||||
-rw-r--r-- | kernel/rcu/update.c | 1 | ||||
-rw-r--r-- | kernel/sched/core.c | 2 |
3 files changed, 23 insertions, 2 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 3c23435d2083..891d97109e09 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -458,7 +458,7 @@ static void rcu_momentary_dyntick_idle(void) * and requires special handling for preemptible RCU. * The caller must have disabled interrupts. */ -void rcu_note_context_switch(void) +void rcu_note_context_switch(bool preempt) { barrier(); /* Avoid RCU read-side critical sections leaking down. */ trace_rcu_utilization(TPS("Start context switch")); @@ -471,6 +471,8 @@ void rcu_note_context_switch(void) if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs))) rcu_momentary_dyntick_idle(); this_cpu_inc(rcu_dynticks.rcu_qs_ctr); + if (!preempt) + rcu_note_voluntary_context_switch_lite(current); out: trace_rcu_utilization(TPS("End context switch")); barrier(); /* Avoid RCU read-side critical sections leaking up. */ @@ -1149,6 +1151,24 @@ bool notrace rcu_is_watching(void) } EXPORT_SYMBOL_GPL(rcu_is_watching); +/* + * If a holdout task is actually running, request an urgent quiescent + * state from its CPU. This is unsynchronized, so migrations can cause + * the request to go to the wrong CPU. Which is OK, all that will happen + * is that the CPU's next context switch will be a bit slower and next + * time around this task will generate another request. + */ +void rcu_request_urgent_qs_task(struct task_struct *t) +{ + int cpu; + + barrier(); + cpu = task_cpu(t); + if (!task_curr(t)) + return; /* This task is not running on that CPU. */ + smp_store_release(per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, cpu), true); +} + #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) /* diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c index c5df0d756900..273e869ca21d 100644 --- a/kernel/rcu/update.c +++ b/kernel/rcu/update.c @@ -665,6 +665,7 @@ static void check_holdout_task(struct task_struct *t, put_task_struct(t); return; } + rcu_request_urgent_qs_task(t); if (!needreport) return; if (*firstreport) { diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 3b31fc05a0f1..2adf7b6c04e7 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3378,7 +3378,7 @@ static void __sched notrace __schedule(bool preempt) hrtick_clear(rq); local_irq_disable(); - rcu_note_context_switch(); + rcu_note_context_switch(preempt); /* * Make sure that signal_pending_state()->signal_pending() below |