diff options
author | Peter Zijlstra <peterz@infradead.org> | 2018-05-22 09:50:53 -0700 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2018-05-22 16:12:26 -0700 |
commit | f64c6013a2029316ea552f99823d116ee5f5f955 (patch) | |
tree | 07426bae4e4500acc8bc64a0edd0c6fe6b298e6e | |
parent | 22df7316ac71dc1ac57415349938737d2a229c59 (diff) | |
download | linux-f64c6013a2029316ea552f99823d116ee5f5f955.tar.gz linux-f64c6013a2029316ea552f99823d116ee5f5f955.tar.bz2 linux-f64c6013a2029316ea552f99823d116ee5f5f955.zip |
rcu/x86: Provide early rcu_cpu_starting() callback
The x86/mtrr code does horrific things because hardware. It uses
stop_machine_from_inactive_cpu(), which does a wakeup (of the stopper
thread on another CPU), which uses RCU, all before the CPU is onlined.
RCU complains about this, because wakeups use RCU and RCU does
(rightfully) not consider offline CPUs for grace-periods.
Fix this by initializing RCU way early in the MTRR case.
Tested-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
[ paulmck: Add !SMP support, per 0day Test Robot report. ]
-rw-r--r-- | arch/x86/kernel/cpu/mtrr/main.c | 4 | ||||
-rw-r--r-- | include/linux/rcupdate.h | 1 | ||||
-rw-r--r-- | include/linux/rcutiny.h | 1 | ||||
-rw-r--r-- | include/linux/rcutree.h | 1 | ||||
-rw-r--r-- | kernel/rcu/tree.c | 9 |
5 files changed, 15 insertions, 1 deletions
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index 7468de429087..3ea0047beb40 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c @@ -46,6 +46,7 @@ #include <linux/pci.h> #include <linux/smp.h> #include <linux/syscore_ops.h> +#include <linux/rcupdate.h> #include <asm/cpufeature.h> #include <asm/e820/api.h> @@ -793,6 +794,9 @@ void mtrr_ap_init(void) if (!use_intel() || mtrr_aps_delayed_init) return; + + rcu_cpu_starting(smp_processor_id()); + /* * Ideally we should hold mtrr_mutex here to avoid mtrr entries * changed, but this routine will be called in cpu boot time, diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 19d235fefdb9..e679b175b411 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -108,7 +108,6 @@ void rcu_sched_qs(void); void rcu_bh_qs(void); void rcu_check_callbacks(int user); void rcu_report_dead(unsigned int cpu); -void rcu_cpu_starting(unsigned int cpu); void rcutree_migrate_callbacks(int cpu); #ifdef CONFIG_RCU_STALL_COMMON diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index ce9beec35e34..7b3c82e8a625 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -132,5 +132,6 @@ static inline void rcu_all_qs(void) { barrier(); } #define rcutree_offline_cpu NULL #define rcutree_dead_cpu NULL #define rcutree_dying_cpu NULL +static inline void rcu_cpu_starting(unsigned int cpu) { } #endif /* __LINUX_RCUTINY_H */ diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 448f20f27396..914655848ef6 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -101,5 +101,6 @@ int rcutree_online_cpu(unsigned int cpu); int rcutree_offline_cpu(unsigned int cpu); int rcutree_dead_cpu(unsigned int cpu); int rcutree_dying_cpu(unsigned int cpu); +void rcu_cpu_starting(unsigned int cpu); #endif /* __LINUX_RCUTREE_H */ diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 4fccdfa25716..aa7cade1b9f3 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -3665,6 +3665,8 @@ int rcutree_dead_cpu(unsigned int cpu) return 0; } +static DEFINE_PER_CPU(int, rcu_cpu_started); + /* * Mark the specified CPU as being online so that subsequent grace periods * (both expedited and normal) will wait on it. Note that this means that @@ -3686,6 +3688,11 @@ void rcu_cpu_starting(unsigned int cpu) struct rcu_node *rnp; struct rcu_state *rsp; + if (per_cpu(rcu_cpu_started, cpu)) + return; + + per_cpu(rcu_cpu_started, cpu) = 1; + for_each_rcu_flavor(rsp) { rdp = per_cpu_ptr(rsp->rda, cpu); rnp = rdp->mynode; @@ -3742,6 +3749,8 @@ void rcu_report_dead(unsigned int cpu) preempt_enable(); for_each_rcu_flavor(rsp) rcu_cleanup_dying_idle_cpu(cpu, rsp); + + per_cpu(rcu_cpu_started, cpu) = 0; } /* Migrate the dead CPU's callbacks to the current CPU. */ |