diff options
author | Paul E. McKenney <paulmck@kernel.org> | 2019-11-26 18:05:45 -0800 |
---|---|---|
committer | Paul E. McKenney <paulmck@kernel.org> | 2020-01-24 10:27:33 -0800 |
commit | 5b14557b073c96a7cf79adc4d7b6c4a8c26b2a43 (patch) | |
tree | 4c4590af8b9c59b6408350d387c5aed3c4e76204 /kernel/rcu | |
parent | 77339e61aa309310a535bd01eb3388f7a27b36f9 (diff) | |
download | linux-5b14557b073c96a7cf79adc4d7b6c4a8c26b2a43.tar.gz linux-5b14557b073c96a7cf79adc4d7b6c4a8c26b2a43.tar.bz2 linux-5b14557b073c96a7cf79adc4d7b6c4a8c26b2a43.zip |
rcu: Avoid tick_dep_set_cpu() misordering
In the current code, rcu_nmi_enter_common() might decide to turn on
the tick using tick_dep_set_cpu(), but be delayed just before doing so.
Then the grace-period kthread might notice that the CPU in question had
in fact gone through a quiescent state, thus turning off the tick using
tick_dep_clear_cpu(). The later invocation of tick_dep_set_cpu() would
then incorrectly leave the tick on.
This commit therefore enlists the aid of the leaf rcu_node structure's
->lock to ensure that decisions to enable or disable the tick are
carried out before they can be reversed.
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Diffstat (limited to 'kernel/rcu')
-rw-r--r-- | kernel/rcu/tree.c | 12 |
1 files changed, 9 insertions, 3 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 5445da2326a0..b0e0612392a9 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -800,8 +800,8 @@ void rcu_user_exit(void) */ static __always_inline void rcu_nmi_enter_common(bool irq) { - struct rcu_data *rdp = this_cpu_ptr(&rcu_data); long incby = 2; + struct rcu_data *rdp = this_cpu_ptr(&rcu_data); /* Complain about underflow. */ WARN_ON_ONCE(rdp->dynticks_nmi_nesting < 0); @@ -828,8 +828,13 @@ static __always_inline void rcu_nmi_enter_common(bool irq) } else if (tick_nohz_full_cpu(rdp->cpu) && rdp->dynticks_nmi_nesting == DYNTICK_IRQ_NONIDLE && READ_ONCE(rdp->rcu_urgent_qs) && !rdp->rcu_forced_tick) { - rdp->rcu_forced_tick = true; - tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU); + raw_spin_lock_rcu_node(rdp->mynode); + // Recheck under lock. + if (rdp->rcu_urgent_qs && !rdp->rcu_forced_tick) { + rdp->rcu_forced_tick = true; + tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU); + } + raw_spin_unlock_rcu_node(rdp->mynode); } trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="), rdp->dynticks_nmi_nesting, @@ -898,6 +903,7 @@ void rcu_irq_enter_irqson(void) */ static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp) { + raw_lockdep_assert_held_rcu_node(rdp->mynode); WRITE_ONCE(rdp->rcu_urgent_qs, false); WRITE_ONCE(rdp->rcu_need_heavy_qs, false); if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) { |