summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@kernel.org>2021-05-24 11:26:53 -0700
committerPaul E. McKenney <paulmck@kernel.org>2021-07-20 13:43:44 -0700
commit45f4b4a202c03de14e315aaae3d305820cd12221 (patch)
tree6452dd2f25e25c186e2656c80400946e0be0f026 /kernel
parent2734d6c1b1a089fb593ef6a23d4b70903526fe0c (diff)
downloadlinux-stable-45f4b4a202c03de14e315aaae3d305820cd12221.tar.gz
linux-stable-45f4b4a202c03de14e315aaae3d305820cd12221.tar.bz2
linux-stable-45f4b4a202c03de14e315aaae3d305820cd12221.zip
rcu-tasks: Add comments explaining task_struct strategy
Accesses to task_struct structures must be either protected by RCU or by get_task_struct(). Tasks trace RCU uses these in a non-obvious combination, in conjunction with an IPI handler. This commit therefore adds comments explaining this usage. Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/rcu/tasks.h11
1 files changed, 10 insertions, 1 deletions
diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
index 8536c55df514..6a117375a62a 100644
--- a/kernel/rcu/tasks.h
+++ b/kernel/rcu/tasks.h
@@ -785,7 +785,10 @@ EXPORT_SYMBOL_GPL(show_rcu_tasks_rude_gp_kthread);
// set that task's .need_qs flag so that task's next outermost
// rcu_read_unlock_trace() will report the quiescent state (in which
// case the count of readers is incremented). If both attempts fail,
-// the task is added to a "holdout" list.
+// the task is added to a "holdout" list. Note that IPIs are used
+// to invoke trc_read_check_handler() in the context of running tasks
+// in order to avoid ordering overhead on common-case shared-variable
+// accessses.
// rcu_tasks_trace_postscan():
// Initialize state and attempt to identify an immediate quiescent
// state as above (but only for idle tasks), unblock CPU-hotplug
@@ -994,6 +997,12 @@ static void trc_wait_for_one_reader(struct task_struct *t,
}
put_task_struct(t);
+ // If this task is not yet on the holdout list, then we are in
+ // an RCU read-side critical section. Otherwise, the invocation of
+ // rcu_add_holdout() that added it to the list did the necessary
+ // get_task_struct(). Either way, the task cannot be freed out
+ // from under this code.
+
// If currently running, send an IPI, either way, add to list.
trc_add_holdout(t, bhp);
if (task_curr(t) &&