summaryrefslogtreecommitdiffstats
path: root/kernel/rcu
diff options
context:
space:
mode:
authorNeeraj Upadhyay <quic_neeraju@quicinc.com>2022-02-22 11:39:01 +0530
committerPaul E. McKenney <paulmck@kernel.org>2022-04-11 15:31:03 -0700
commit0b56f953908a751716f2c8f907942674b60d8db5 (patch)
tree57793e5db7561ed1bb53e5a41bfea5cb3dbe0f42 /kernel/rcu
parentcbdc98e93efa7bbf6f2fcd68c73df82c37b5fa65 (diff)
downloadlinux-0b56f953908a751716f2c8f907942674b60d8db5.tar.gz
linux-0b56f953908a751716f2c8f907942674b60d8db5.tar.bz2
linux-0b56f953908a751716f2c8f907942674b60d8db5.zip
srcu: Ensure snp nodes tree is fully initialized before traversal
For configurations where snp node tree is not initialized at init time (added in subsequent commits), srcu_funnel_gp_start() and srcu_funnel_exp_start() can potential traverse and observe the snp nodes' transient (uninitialized) states. This can potentially happen, when init_srcu_struct_nodes() initialization of sdp->mynode races with srcu_funnel_gp_start() and srcu_funnel_exp_start() Consider the case below where srcu_funnel_gp_start() observes sdp->mynode to be not NULL and uses an uninitialized sdp->grpmask P1 P2 init_srcu_struct_nodes() void srcu_funnel_gp_start(...) { for_each_possible_cpu(cpu) { ... sdp->mynode = &snp_first[...]; for (snp = sdp->mynode;...) struct srcu_node *snp_leaf = smp_load_acquire(&sdp->mynode) ... if (snp_leaf) { for (snp = snp_leaf; ...) ... if (snp == snp_leaf) snp->srcu_data_have_cbs[idx] |= sdp->grpmask; sdp->grpmask = 1 << (cpu - sdp->mynode->grplo); } } Similarly, init_srcu_struct_nodes() and srcu_funnel_exp_start() can race, where srcu_funnel_exp_start() could observe state of snp lock before spin_lock_init(). P1 P2 init_srcu_struct_nodes() void srcu_funnel_exp_start(...) { srcu_for_each_node_breadth_first(ssp, snp) { for (; ...) { spin_lock_...(snp, ) spin_lock_init(&ACCESS_PRIVATE(snp, lock)); ... } for_each_possible_cpu(cpu) { ... sdp->mynode = &snp_first[...]; To avoid these issues, ensure that snp node tree initialization is complete i.e. after SRCU_SIZE_WAIT_BARRIER srcu_size_state is reached, before traversing the tree. Given that srcu_funnel_gp_start() and srcu_funnel_exp_start() are called within SRCU read side critical sections, this check is safe, in the sense that all callbacks are enqueued on CPU0 srcu_cblist until SRCU_SIZE_WAIT_CALL is entered, and these read side critical sections (containing srcu_funnel_gp_start() and srcu_funnel_exp_start()) need to complete, before SRCU_SIZE_WAIT_CALL is reached. Signed-off-by: Neeraj Upadhyay <quic_neeraju@quicinc.com> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Diffstat (limited to 'kernel/rcu')
-rw-r--r--kernel/rcu/srcutree.c22
1 files changed, 19 insertions, 3 deletions
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
index 155c430c6a73..2e7ed67646db 100644
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
@@ -705,9 +705,15 @@ static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp,
int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs);
unsigned long sgsne;
struct srcu_node *snp;
- struct srcu_node *snp_leaf = smp_load_acquire(&sdp->mynode);
+ struct srcu_node *snp_leaf;
unsigned long snp_seq;
+ /* Ensure that snp node tree is fully initialized before traversing it */
+ if (smp_load_acquire(&ssp->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
+ snp_leaf = NULL;
+ else
+ snp_leaf = sdp->mynode;
+
if (snp_leaf)
/* Each pass through the loop does one level of the srcu_node tree. */
for (snp = snp_leaf; snp != NULL; snp = snp->srcu_parent) {
@@ -889,10 +895,13 @@ static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
bool needgp = false;
unsigned long s;
struct srcu_data *sdp;
+ struct srcu_node *sdp_mynode;
+ int ss_state;
check_init_srcu_struct(ssp);
idx = srcu_read_lock(ssp);
- if (smp_load_acquire(&ssp->srcu_size_state) < SRCU_SIZE_WAIT_CALL)
+ ss_state = smp_load_acquire(&ssp->srcu_size_state);
+ if (ss_state < SRCU_SIZE_WAIT_CALL)
sdp = per_cpu_ptr(ssp->sda, 0);
else
sdp = raw_cpu_ptr(ssp->sda);
@@ -912,10 +921,17 @@ static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
needexp = true;
}
spin_unlock_irqrestore_rcu_node(sdp, flags);
+
+ /* Ensure that snp node tree is fully initialized before traversing it */
+ if (ss_state < SRCU_SIZE_WAIT_BARRIER)
+ sdp_mynode = NULL;
+ else
+ sdp_mynode = sdp->mynode;
+
if (needgp)
srcu_funnel_gp_start(ssp, sdp, s, do_norm);
else if (needexp)
- srcu_funnel_exp_start(ssp, smp_load_acquire(&sdp->mynode), s);
+ srcu_funnel_exp_start(ssp, sdp_mynode, s);
srcu_read_unlock(ssp, idx);
return s;
}