diff options
author | Paul E. McKenney <paul.mckenney@linaro.org> | 2011-07-10 15:57:35 -0700 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2011-07-13 08:17:56 -0700 |
commit | b0d304172f49061b4ff78f9e2b02719ac69c8a7e (patch) | |
tree | 882123e565ced895910490514f6e676c708b5001 /kernel/rcutree_plugin.h | |
parent | 620917de59eeb934b9f8cf35cc2d95c1ac8ed0fc (diff) | |
download | linux-b0d304172f49061b4ff78f9e2b02719ac69c8a7e.tar.gz linux-b0d304172f49061b4ff78f9e2b02719ac69c8a7e.tar.bz2 linux-b0d304172f49061b4ff78f9e2b02719ac69c8a7e.zip |
rcu: Prevent RCU callbacks from executing before scheduler initialized
Under some rare but real combinations of configuration parameters, RCU
callbacks are posted during early boot that use kernel facilities that
are not yet initialized. Therefore, when these callbacks are invoked,
hard hangs and crashes ensue. This commit therefore prevents RCU
callbacks from being invoked until after the scheduler is fully up and
running, as in after multiple tasks have been spawned.
It might well turn out that a better approach is to identify the specific
RCU callbacks that are causing this problem, but that discussion will
wait until such time as someone really needs an RCU callback to be invoked
(as opposed to merely registered) during early boot.
Reported-by: julie Sullivan <kernelmail.jms@gmail.com>
Reported-by: RKK <kulkarni.ravi4@gmail.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Tested-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Tested-by: julie Sullivan <kernelmail.jms@gmail.com>
Tested-by: RKK <kulkarni.ravi4@gmail.com>
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r-- | kernel/rcutree_plugin.h | 15 |
1 files changed, 11 insertions, 4 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 14dc7dd00902..75113cb7c4fb 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h @@ -1532,7 +1532,7 @@ static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu) struct sched_param sp; struct task_struct *t; - if (!rcu_kthreads_spawnable || + if (!rcu_scheduler_fully_active || per_cpu(rcu_cpu_kthread_task, cpu) != NULL) return 0; t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu); @@ -1639,7 +1639,7 @@ static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp, struct sched_param sp; struct task_struct *t; - if (!rcu_kthreads_spawnable || + if (!rcu_scheduler_fully_active || rnp->qsmaskinit == 0) return 0; if (rnp->node_kthread_task == NULL) { @@ -1665,7 +1665,7 @@ static int __init rcu_spawn_kthreads(void) int cpu; struct rcu_node *rnp; - rcu_kthreads_spawnable = 1; + rcu_scheduler_fully_active = 1; for_each_possible_cpu(cpu) { per_cpu(rcu_cpu_has_work, cpu) = 0; if (cpu_online(cpu)) @@ -1687,7 +1687,7 @@ static void __cpuinit rcu_prepare_kthreads(int cpu) struct rcu_node *rnp = rdp->mynode; /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */ - if (rcu_kthreads_spawnable) { + if (rcu_scheduler_fully_active) { (void)rcu_spawn_one_cpu_kthread(cpu); if (rnp->node_kthread_task == NULL) (void)rcu_spawn_one_node_kthread(rcu_state, rnp); @@ -1726,6 +1726,13 @@ static void rcu_cpu_kthread_setrt(int cpu, int to_rt) { } +static int __init rcu_scheduler_really_started(void) +{ + rcu_scheduler_fully_active = 1; + return 0; +} +early_initcall(rcu_scheduler_really_started); + static void __cpuinit rcu_prepare_kthreads(int cpu) { } |