diff options
author | Nikolay Borisov <nborisov@suse.com> | 2017-06-20 21:01:20 +0300 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2017-06-20 15:42:32 -0400 |
commit | 104b4e5139fe384431ac11c3b8a6cf4a529edf4a (patch) | |
tree | e8a0157f2294f006f31e535949327b3484a5dcb8 /lib | |
parent | df95e795a722892a9e0603ce4b9b62fab9f02967 (diff) | |
download | linux-104b4e5139fe384431ac11c3b8a6cf4a529edf4a.tar.gz linux-104b4e5139fe384431ac11c3b8a6cf4a529edf4a.tar.bz2 linux-104b4e5139fe384431ac11c3b8a6cf4a529edf4a.zip |
percpu_counter: Rename __percpu_counter_add to percpu_counter_add_batch
Currently, percpu_counter_add is a wrapper around __percpu_counter_add
which is preempt safe due to explicit calls to preempt_disable. Given
how __ prefix is used in percpu related interfaces, the naming
unfortunately creates the false sense that __percpu_counter_add is
less safe than percpu_counter_add. In terms of context-safety,
they're equivalent. The only difference is that the __ version takes
a batch parameter.
Make this a bit more explicit by just renaming __percpu_counter_add to
percpu_counter_add_batch.
This patch doesn't cause any functional changes.
tj: Minor updates to patch description for clarity. Cosmetic
indentation updates.
Signed-off-by: Nikolay Borisov <nborisov@suse.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Chris Mason <clm@fb.com>
Cc: Josef Bacik <jbacik@fb.com>
Cc: David Sterba <dsterba@suse.com>
Cc: Darrick J. Wong <darrick.wong@oracle.com>
Cc: Jan Kara <jack@suse.com>
Cc: Jens Axboe <axboe@fb.com>
Cc: linux-mm@kvack.org
Cc: "David S. Miller" <davem@davemloft.net>
Diffstat (limited to 'lib')
-rw-r--r-- | lib/flex_proportions.c | 6 | ||||
-rw-r--r-- | lib/percpu_counter.c | 4 |
2 files changed, 5 insertions, 5 deletions
diff --git a/lib/flex_proportions.c b/lib/flex_proportions.c index a71cf1bdd4c9..2cc1f94e03a1 100644 --- a/lib/flex_proportions.c +++ b/lib/flex_proportions.c @@ -207,7 +207,7 @@ static void fprop_reflect_period_percpu(struct fprop_global *p, if (val < (nr_cpu_ids * PROP_BATCH)) val = percpu_counter_sum(&pl->events); - __percpu_counter_add(&pl->events, + percpu_counter_add_batch(&pl->events, -val + (val >> (period-pl->period)), PROP_BATCH); } else percpu_counter_set(&pl->events, 0); @@ -219,7 +219,7 @@ static void fprop_reflect_period_percpu(struct fprop_global *p, void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl) { fprop_reflect_period_percpu(p, pl); - __percpu_counter_add(&pl->events, 1, PROP_BATCH); + percpu_counter_add_batch(&pl->events, 1, PROP_BATCH); percpu_counter_add(&p->events, 1); } @@ -267,6 +267,6 @@ void __fprop_inc_percpu_max(struct fprop_global *p, return; } else fprop_reflect_period_percpu(p, pl); - __percpu_counter_add(&pl->events, 1, PROP_BATCH); + percpu_counter_add_batch(&pl->events, 1, PROP_BATCH); percpu_counter_add(&p->events, 1); } diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index 9c21000df0b5..8ee7e5ec21be 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c @@ -72,7 +72,7 @@ void percpu_counter_set(struct percpu_counter *fbc, s64 amount) } EXPORT_SYMBOL(percpu_counter_set); -void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch) +void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch) { s64 count; @@ -89,7 +89,7 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch) } preempt_enable(); } -EXPORT_SYMBOL(__percpu_counter_add); +EXPORT_SYMBOL(percpu_counter_add_batch); /* * Add up all the per-cpu counts, return the result. This is a more accurate |