diff options
author | Eric Dumazet <dada1@cosmosbay.com> | 2009-01-06 14:41:04 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-01-06 15:59:13 -0800 |
commit | 179f7ebff6be45738c6e2fa68c8d2cc5c2c6308e (patch) | |
tree | 3d48b5f825cfa29f5b39656503c5157872454e9f /lib/percpu_counter.c | |
parent | e3d5a27d5862b6425d0879272e24abecf7245105 (diff) | |
download | linux-179f7ebff6be45738c6e2fa68c8d2cc5c2c6308e.tar.gz linux-179f7ebff6be45738c6e2fa68c8d2cc5c2c6308e.tar.bz2 linux-179f7ebff6be45738c6e2fa68c8d2cc5c2c6308e.zip |
percpu_counter: FBC_BATCH should be a variable
For NR_CPUS >= 16 values, FBC_BATCH is 2*NR_CPUS
Considering more and more distros are using high NR_CPUS values, it makes
sense to use a more sensible value for FBC_BATCH, and get rid of NR_CPUS.
A sensible value is 2*num_online_cpus(), with a minimum value of 32 (This
minimum value helps branch prediction in __percpu_counter_add())
We already have a hotcpu notifier, so we can adjust FBC_BATCH dynamically.
We rename FBC_BATCH to percpu_counter_batch since its not a constant
anymore.
Signed-off-by: Eric Dumazet <dada1@cosmosbay.com>
Acked-by: David S. Miller <davem@davemloft.net>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'lib/percpu_counter.c')
-rw-r--r-- | lib/percpu_counter.c | 18 |
1 files changed, 14 insertions, 4 deletions
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index b255b939bc1b..a60bd8046095 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c @@ -9,10 +9,8 @@ #include <linux/cpu.h> #include <linux/module.h> -#ifdef CONFIG_HOTPLUG_CPU static LIST_HEAD(percpu_counters); static DEFINE_MUTEX(percpu_counters_lock); -#endif void percpu_counter_set(struct percpu_counter *fbc, s64 amount) { @@ -111,13 +109,24 @@ void percpu_counter_destroy(struct percpu_counter *fbc) } EXPORT_SYMBOL(percpu_counter_destroy); -#ifdef CONFIG_HOTPLUG_CPU +int percpu_counter_batch __read_mostly = 32; +EXPORT_SYMBOL(percpu_counter_batch); + +static void compute_batch_value(void) +{ + int nr = num_online_cpus(); + + percpu_counter_batch = max(32, nr*2); +} + static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) { +#ifdef CONFIG_HOTPLUG_CPU unsigned int cpu; struct percpu_counter *fbc; + compute_batch_value(); if (action != CPU_DEAD) return NOTIFY_OK; @@ -134,13 +143,14 @@ static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb, spin_unlock_irqrestore(&fbc->lock, flags); } mutex_unlock(&percpu_counters_lock); +#endif return NOTIFY_OK; } static int __init percpu_counter_startup(void) { + compute_batch_value(); hotcpu_notifier(percpu_counter_hotcpu_callback, 0); return 0; } module_init(percpu_counter_startup); -#endif |