summaryrefslogtreecommitdiffstats
path: root/kernel/rcu
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.ibm.com>2019-04-01 14:12:50 -0700
committerPaul E. McKenney <paulmck@linux.ibm.com>2019-05-25 14:50:48 -0700
commit25102de65fdd246eb6801114ce6dfa3a076bb678 (patch)
tree86ed797ae9362153f3e61289db3d324c17e3b9b9 /kernel/rcu
parent23634ebc1d946f19eb112d4455c1d84948875e31 (diff)
downloadlinux-25102de65fdd246eb6801114ce6dfa3a076bb678.tar.gz
linux-25102de65fdd246eb6801114ce6dfa3a076bb678.tar.bz2
linux-25102de65fdd246eb6801114ce6dfa3a076bb678.zip
rcu: Only do rcu_read_unlock_special() wakeups if expedited
Currently, rcu_read_unlock_special() will do wakeups whenever it is safe to do so. However, wakeups are expensive, and they are only really needed when the just-ended RCU read-side critical section is blocking an expedited grace period (in which case speed is of the essence) or on a nohz_full CPU (where it might be a good long time before an interrupt arrives). This commit therefore checks for these conditions, and does the expensive wakeups only if doing so would be useful. Note it can be rather expensive to determine whether or not the current task (as opposed to the current CPU) is blocking the current expedited grace period. Doing so requires traversing the ->blkd_tasks list, which can be quite long. This commit therefore cheats: If the current task is on a given ->blkd_tasks list, and some task on that list is blocking the current expedited grace period, the code assumes that the current task is blocking that expedited grace period. Reported-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
Diffstat (limited to 'kernel/rcu')
-rw-r--r--kernel/rcu/tree_plugin.h12
1 files changed, 10 insertions, 2 deletions
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 75110ea75d01..d15cdab6aeb4 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -606,20 +606,28 @@ static void rcu_read_unlock_special(struct task_struct *t)
local_irq_save(flags);
irqs_were_disabled = irqs_disabled_flags(flags);
if (preempt_bh_were_disabled || irqs_were_disabled) {
+ bool exp;
+ struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
+ struct rcu_node *rnp = rdp->mynode;
+
t->rcu_read_unlock_special.b.exp_hint = false;
+ exp = (t->rcu_blocked_node && t->rcu_blocked_node->exp_tasks) ||
+ (rdp->grpmask & rnp->expmask) ||
+ tick_nohz_full_cpu(rdp->cpu);
// Need to defer quiescent state until everything is enabled.
- if (irqs_were_disabled && use_softirq &&
+ if (exp && irqs_were_disabled && use_softirq &&
(in_irq() || !t->rcu_read_unlock_special.b.deferred_qs)) {
// Using softirq, safe to awaken, and we get
// no help from enabling irqs, unlike bh/preempt.
raise_softirq_irqoff(RCU_SOFTIRQ);
- } else if (irqs_were_disabled && !use_softirq &&
+ } else if (exp && irqs_were_disabled && !use_softirq &&
!t->rcu_read_unlock_special.b.deferred_qs) {
// Safe to awaken and we get no help from enabling
// irqs, unlike bh/preempt.
invoke_rcu_core();
} else {
// Enabling BH or preempt does reschedule, so...
+ // Also if no expediting or NO_HZ_FULL, slow is OK.
set_tsk_need_resched(current);
set_preempt_need_resched();
}