summaryrefslogtreecommitdiffstats
path: root/kernel/rcutree_plugin.h
diff options
context:
space:
mode:
authorPaul E. McKenney <paul.mckenney@linaro.org>2011-11-22 17:07:11 -0800
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2011-12-11 10:32:00 -0800
commit3084f2f80cc8a1fd66233722d88beac0fe85e26f (patch)
treebce77805235278599eda0572d5d36b11419681d7 /kernel/rcutree_plugin.h
parent433cdddcd9ac5558068edd7f8d4707a70f7710f5 (diff)
downloadlinux-3084f2f80cc8a1fd66233722d88beac0fe85e26f.tar.gz
linux-3084f2f80cc8a1fd66233722d88beac0fe85e26f.tar.bz2
linux-3084f2f80cc8a1fd66233722d88beac0fe85e26f.zip
rcu: Go dyntick-idle more quickly if CPU has serviced current grace period
The earlier version would attempt to push callbacks through five times before going into dyntick-idle mode if callbacks remained, but the CPU had done all that it needed to do for the current RCU grace periods. This is wasteful: In most cases, once the CPU has done all that it needs to for the current RCU grace periods, it will make no further progress on the callbacks no matter how many times it loops through the RCU core processing and the idle-entry code. This commit therefore goes to dyntick-idle mode whenever the current CPU has done all it can for the current grace period. Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r--kernel/rcutree_plugin.h24
1 files changed, 18 insertions, 6 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 6467f5669ab7..45790bfb6e8c 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -2028,12 +2028,29 @@ static void rcu_prepare_for_idle(int cpu)
{
int c = 0;
- /* If no callbacks or in the holdoff period, enter dyntick-idle. */
+ /*
+ * If there are no callbacks on this CPU or if RCU has no further
+ * need for this CPU at the moment, enter dyntick-idle mode.
+ * Also reset state so as to not prejudice later attempts.
+ */
if (!rcu_cpu_has_callbacks(cpu)) {
per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
+ per_cpu(rcu_dyntick_drain, cpu) = 0;
trace_rcu_prep_idle("No callbacks");
return;
}
+ if (!rcu_pending(cpu)) {
+ trace_rcu_prep_idle("Dyntick with callbacks");
+ per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
+ per_cpu(rcu_dyntick_drain, cpu) = 0;
+ per_cpu(rcu_awake_at_gp_end, cpu) = 1;
+ return; /* Nothing to do immediately. */
+ }
+
+ /*
+ * If in holdoff mode, just return. We will presumably have
+ * refrained from disabling the scheduling-clock tick.
+ */
if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies) {
trace_rcu_prep_idle("In holdoff");
return;
@@ -2046,11 +2063,6 @@ static void rcu_prepare_for_idle(int cpu)
} else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) {
/* We have hit the limit, so time to give up. */
per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
- if (!rcu_pending(cpu)) {
- trace_rcu_prep_idle("Dyntick with callbacks");
- per_cpu(rcu_awake_at_gp_end, cpu) = 1;
- return; /* Nothing to do immediately. */
- }
trace_rcu_prep_idle("Begin holdoff");
invoke_rcu_core(); /* Force the CPU out of dyntick-idle. */
return;