diff options
author | Paul E. McKenney <paul.mckenney@linaro.org> | 2012-05-09 15:44:42 -0700 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2012-07-02 12:34:22 -0700 |
commit | 1d1fb395f6dbc07b36285bbedcf01a73b57f7cb5 (patch) | |
tree | 450c8e5e2d69c6e8b6fd7820d6a7dc112d60325b /kernel/rcutree.c | |
parent | 3f5d3ea64f1783f0d4ea0d35890ae3297f045a8b (diff) | |
download | linux-stable-1d1fb395f6dbc07b36285bbedcf01a73b57f7cb5.tar.gz linux-stable-1d1fb395f6dbc07b36285bbedcf01a73b57f7cb5.tar.bz2 linux-stable-1d1fb395f6dbc07b36285bbedcf01a73b57f7cb5.zip |
rcu: Add ACCESS_ONCE() to ->qlen accesses
The _rcu_barrier() function accesses other CPUs' rcu_data structure's
->qlen field without benefit of locking. This commit therefore adds
the required ACCESS_ONCE() wrappers around accesses and updates that
need it.
ACCESS_ONCE() is not needed when a CPU accesses its own ->qlen, or
in code that cannot run while _rcu_barrier() is sampling ->qlen fields.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r-- | kernel/rcutree.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 81e0394e46af..89addada3e3a 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -1350,7 +1350,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp, rsp->qlen += rdp->qlen; rdp->n_cbs_orphaned += rdp->qlen; rdp->qlen_lazy = 0; - rdp->qlen = 0; + ACCESS_ONCE(rdp->qlen) = 0; } /* @@ -1600,7 +1600,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) } smp_mb(); /* List handling before counting for rcu_barrier(). */ rdp->qlen_lazy -= count_lazy; - rdp->qlen -= count; + ACCESS_ONCE(rdp->qlen) -= count; rdp->n_cbs_invoked += count; /* Reinstate batch limit if we have worked down the excess. */ @@ -1889,7 +1889,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), rdp = this_cpu_ptr(rsp->rda); /* Add the callback to our list. */ - rdp->qlen++; + ACCESS_ONCE(rdp->qlen)++; if (lazy) rdp->qlen_lazy++; else @@ -2423,7 +2423,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo); init_callback_list(rdp); rdp->qlen_lazy = 0; - rdp->qlen = 0; + ACCESS_ONCE(rdp->qlen) = 0; rdp->dynticks = &per_cpu(rcu_dynticks, cpu); WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE); WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1); |