summaryrefslogtreecommitdiffstats
path: root/kernel/rcu
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@kernel.org>2024-02-01 06:10:26 -0800
committerBoqun Feng <boqun.feng@gmail.com>2024-02-25 14:27:49 -0800
commit0bb11a372fc8d7006b4d0f42a2882939747bdbff (patch)
treeb87f28feefac58a52950abe40bdf1901bc7b32bb /kernel/rcu
parent1612160b91272f5b1596f499584d6064bf5be794 (diff)
downloadlinux-0bb11a372fc8d7006b4d0f42a2882939747bdbff.tar.gz
linux-0bb11a372fc8d7006b4d0f42a2882939747bdbff.tar.bz2
linux-0bb11a372fc8d7006b4d0f42a2882939747bdbff.zip
rcu-tasks: Maintain real-time response in rcu_tasks_postscan()
The current code will scan the entirety of each per-CPU list of exiting tasks in ->rtp_exit_list with interrupts disabled. This is normally just fine, because each CPU typically won't have very many tasks in this state. However, if a large number of tasks block late in do_exit(), these lists could be arbitrarily long. Low probability, perhaps, but it really could happen. This commit therefore occasionally re-enables interrupts while traversing these lists, inserting a dummy element to hold the current place in the list. In kernels built with CONFIG_PREEMPT_RT=y, this re-enabling happens after each list element is processed, otherwise every one-to-two jiffies. [ paulmck: Apply Frederic Weisbecker feedback. ] Link: https://lore.kernel.org/all/ZdeI_-RfdLR8jlsm@localhost.localdomain/ Signed-off-by: Paul E. McKenney <paulmck@kernel.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Sebastian Siewior <bigeasy@linutronix.de> Cc: Anna-Maria Behnsen <anna-maria@linutronix.de> Cc: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
Diffstat (limited to 'kernel/rcu')
-rw-r--r--kernel/rcu/tasks.h22
1 files changed, 21 insertions, 1 deletions
diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
index c61dc92537db..147b5945d67a 100644
--- a/kernel/rcu/tasks.h
+++ b/kernel/rcu/tasks.h
@@ -968,13 +968,33 @@ static void rcu_tasks_postscan(struct list_head *hop)
*/
for_each_possible_cpu(cpu) {
+ unsigned long j = jiffies + 1;
struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rcu_tasks.rtpcpu, cpu);
struct task_struct *t;
+ struct task_struct *t1;
+ struct list_head tmp;
raw_spin_lock_irq_rcu_node(rtpcp);
- list_for_each_entry(t, &rtpcp->rtp_exit_list, rcu_tasks_exit_list)
+ list_for_each_entry_safe(t, t1, &rtpcp->rtp_exit_list, rcu_tasks_exit_list) {
if (list_empty(&t->rcu_tasks_holdout_list))
rcu_tasks_pertask(t, hop);
+
+ // RT kernels need frequent pauses, otherwise
+ // pause at least once per pair of jiffies.
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT) && time_before(jiffies, j))
+ continue;
+
+ // Keep our place in the list while pausing.
+ // Nothing else traverses this list, so adding a
+ // bare list_head is OK.
+ list_add(&tmp, &t->rcu_tasks_exit_list);
+ raw_spin_unlock_irq_rcu_node(rtpcp);
+ cond_resched(); // For CONFIG_PREEMPT=n kernels
+ raw_spin_lock_irq_rcu_node(rtpcp);
+ t1 = list_entry(tmp.next, struct task_struct, rcu_tasks_exit_list);
+ list_del(&tmp);
+ j = jiffies + 1;
+ }
raw_spin_unlock_irq_rcu_node(rtpcp);
}