diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2017-03-21 07:28:14 -0700 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2017-04-18 11:38:21 -0700 |
commit | 031aeee000e895247eafa4233c2d193dd39d4ea2 (patch) | |
tree | 8a8f567d15a94788150ec924c726ddc324c55ead /kernel | |
parent | 91e27c356c96d760f5c283f05627ed976377debb (diff) | |
download | linux-stable-031aeee000e895247eafa4233c2d193dd39d4ea2.tar.gz linux-stable-031aeee000e895247eafa4233c2d193dd39d4ea2.tar.bz2 linux-stable-031aeee000e895247eafa4233c2d193dd39d4ea2.zip |
srcu: Improve rcu_seq grace-period-counter abstraction
The expedited grace-period code contains several open-coded shifts
know the format of an rcu_seq grace-period counter, which is not
particularly good style. This commit therefore creates a new
rcu_seq_ctr() function that extracts the counter portion of the
counter, and an rcu_seq_state() function that extracts the low-order
state bit. This commit prepares for SRCU callback parallelization,
which will require two state bits.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/rcu/rcu.h | 29 | ||||
-rw-r--r-- | kernel/rcu/tree_exp.h | 9 |
2 files changed, 29 insertions, 9 deletions
diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h index 4303b880ac99..c62df93bfc1b 100644 --- a/kernel/rcu/rcu.h +++ b/kernel/rcu/rcu.h @@ -61,20 +61,41 @@ * Grace-period counter management. */ +#define RCU_SEQ_CTR_SHIFT 1 +#define RCU_SEQ_STATE_MASK ((1 << RCU_SEQ_CTR_SHIFT) - 1) + +/* + * Return the counter portion of a sequence number previously returned + * by rcu_seq_snap() or rcu_seq_current(). + */ +static inline unsigned long rcu_seq_ctr(unsigned long s) +{ + return s >> RCU_SEQ_CTR_SHIFT; +} + +/* + * Return the state portion of a sequence number previously returned + * by rcu_seq_snap() or rcu_seq_current(). + */ +static inline int rcu_seq_state(unsigned long s) +{ + return s & RCU_SEQ_STATE_MASK; +} + /* Adjust sequence number for start of update-side operation. */ static inline void rcu_seq_start(unsigned long *sp) { WRITE_ONCE(*sp, *sp + 1); smp_mb(); /* Ensure update-side operation after counter increment. */ - WARN_ON_ONCE(!(*sp & 0x1)); + WARN_ON_ONCE(rcu_seq_state(*sp) != 1); } /* Adjust sequence number for end of update-side operation. */ static inline void rcu_seq_end(unsigned long *sp) { smp_mb(); /* Ensure update-side operation before counter increment. */ - WARN_ON_ONCE(!(*sp & 0x1)); - WRITE_ONCE(*sp, *sp + 1); + WARN_ON_ONCE(!rcu_seq_state(*sp)); + WRITE_ONCE(*sp, (*sp | RCU_SEQ_STATE_MASK) + 1); } /* Take a snapshot of the update side's sequence number. */ @@ -82,7 +103,7 @@ static inline unsigned long rcu_seq_snap(unsigned long *sp) { unsigned long s; - s = (READ_ONCE(*sp) + 3) & ~0x1; + s = (READ_ONCE(*sp) + 2 * RCU_SEQ_STATE_MASK + 1) & ~RCU_SEQ_STATE_MASK; smp_mb(); /* Above access must not bleed into critical section. */ return s; } diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index 027e123d93c7..e513b4ab1197 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -292,7 +292,7 @@ static bool exp_funnel_lock(struct rcu_state *rsp, unsigned long s) trace_rcu_exp_funnel_lock(rsp->name, rnp->level, rnp->grplo, rnp->grphi, TPS("wait")); - wait_event(rnp->exp_wq[(s >> 1) & 0x3], + wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3], sync_exp_work_done(rsp, &rdp->exp_workdone2, s)); return true; @@ -534,7 +534,7 @@ static void rcu_exp_wait_wake(struct rcu_state *rsp, unsigned long s) spin_unlock(&rnp->exp_lock); } smp_mb(); /* All above changes before wakeup. */ - wake_up_all(&rnp->exp_wq[(rsp->expedited_sequence >> 1) & 0x3]); + wake_up_all(&rnp->exp_wq[rcu_seq_ctr(rsp->expedited_sequence) & 0x3]); } trace_rcu_exp_grace_period(rsp->name, s, TPS("endwake")); mutex_unlock(&rsp->exp_wake_mutex); @@ -612,9 +612,8 @@ static void _synchronize_rcu_expedited(struct rcu_state *rsp, /* Wait for expedited grace period to complete. */ rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id()); rnp = rcu_get_root(rsp); - wait_event(rnp->exp_wq[(s >> 1) & 0x3], - sync_exp_work_done(rsp, - &rdp->exp_workdone0, s)); + wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3], + sync_exp_work_done(rsp, &rdp->exp_workdone0, s)); smp_mb(); /* Workqueue actions happen before return. */ /* Let the next expedited grace period start. */ |