diff options
author | Mateusz Guzik <mjguzik@gmail.com> | 2023-08-23 07:06:08 +0200 |
---|---|---|
committer | Dennis Zhou <dennis@kernel.org> | 2023-08-25 08:06:53 -0700 |
commit | c439d5e8a0deb7310b5bb4e5f2fe47c40ff5297f (patch) | |
tree | ec3fa53c5487f15036a02f6eb29d70896302baa5 /include/linux/percpu_counter.h | |
parent | f7d77dfc91f747f64cb00884fd6d7940c3b49fca (diff) | |
download | linux-c439d5e8a0deb7310b5bb4e5f2fe47c40ff5297f.tar.gz linux-c439d5e8a0deb7310b5bb4e5f2fe47c40ff5297f.tar.bz2 linux-c439d5e8a0deb7310b5bb4e5f2fe47c40ff5297f.zip |
pcpcntr: add group allocation/free
Allocations and frees are globally serialized on the pcpu lock (and the
CPU hotplug lock if enabled, which is the case on Debian).
At least one frequent consumer allocates 4 back-to-back counters (and
frees them in the same manner), exacerbating the problem.
While this does not fully remedy scalability issues, it is a step
towards that goal and provides immediate relief.
Signed-off-by: Mateusz Guzik <mjguzik@gmail.com>
Reviewed-by: Dennis Zhou <dennis@kernel.org>
Reviewed-by: Vegard Nossum <vegard.nossum@oracle.com>
Link: https://lore.kernel.org/r/20230823050609.2228718-2-mjguzik@gmail.com
[Dennis: reflowed a few lines]
Signed-off-by: Dennis Zhou <dennis@kernel.org>
Diffstat (limited to 'include/linux/percpu_counter.h')
-rw-r--r-- | include/linux/percpu_counter.h | 41 |
1 files changed, 34 insertions, 7 deletions
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h index 75b73c83bc9d..d01351b1526f 100644 --- a/include/linux/percpu_counter.h +++ b/include/linux/percpu_counter.h @@ -30,17 +30,28 @@ struct percpu_counter { extern int percpu_counter_batch; -int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp, - struct lock_class_key *key); +int __percpu_counter_init_many(struct percpu_counter *fbc, s64 amount, + gfp_t gfp, u32 nr_counters, + struct lock_class_key *key); -#define percpu_counter_init(fbc, value, gfp) \ +#define percpu_counter_init_many(fbc, value, gfp, nr_counters) \ ({ \ static struct lock_class_key __key; \ \ - __percpu_counter_init(fbc, value, gfp, &__key); \ + __percpu_counter_init_many(fbc, value, gfp, nr_counters,\ + &__key); \ }) -void percpu_counter_destroy(struct percpu_counter *fbc); + +#define percpu_counter_init(fbc, value, gfp) \ + percpu_counter_init_many(fbc, value, gfp, 1) + +void percpu_counter_destroy_many(struct percpu_counter *fbc, u32 nr_counters); +static inline void percpu_counter_destroy(struct percpu_counter *fbc) +{ + percpu_counter_destroy_many(fbc, 1); +} + void percpu_counter_set(struct percpu_counter *fbc, s64 amount); void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch); @@ -116,11 +127,27 @@ struct percpu_counter { s64 count; }; +static inline int percpu_counter_init_many(struct percpu_counter *fbc, + s64 amount, gfp_t gfp, + u32 nr_counters) +{ + u32 i; + + for (i = 0; i < nr_counters; i++) + fbc[i].count = amount; + + return 0; +} + static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp) { - fbc->count = amount; - return 0; + return percpu_counter_init_many(fbc, amount, gfp, 1); +} + +static inline void percpu_counter_destroy_many(struct percpu_counter *fbc, + u32 nr_counters) +{ } static inline void percpu_counter_destroy(struct percpu_counter *fbc) |