summaryrefslogtreecommitdiffstats
path: root/include/linux/percpu_counter.h
diff options
context:
space:
mode:
authorEric Dumazet <dada1@cosmosbay.com>2009-01-06 14:41:04 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2009-01-06 15:59:13 -0800
commit179f7ebff6be45738c6e2fa68c8d2cc5c2c6308e (patch)
tree3d48b5f825cfa29f5b39656503c5157872454e9f /include/linux/percpu_counter.h
parente3d5a27d5862b6425d0879272e24abecf7245105 (diff)
downloadlinux-stable-179f7ebff6be45738c6e2fa68c8d2cc5c2c6308e.tar.gz
linux-stable-179f7ebff6be45738c6e2fa68c8d2cc5c2c6308e.tar.bz2
linux-stable-179f7ebff6be45738c6e2fa68c8d2cc5c2c6308e.zip
percpu_counter: FBC_BATCH should be a variable
For NR_CPUS >= 16 values, FBC_BATCH is 2*NR_CPUS Considering more and more distros are using high NR_CPUS values, it makes sense to use a more sensible value for FBC_BATCH, and get rid of NR_CPUS. A sensible value is 2*num_online_cpus(), with a minimum value of 32 (This minimum value helps branch prediction in __percpu_counter_add()) We already have a hotcpu notifier, so we can adjust FBC_BATCH dynamically. We rename FBC_BATCH to percpu_counter_batch since its not a constant anymore. Signed-off-by: Eric Dumazet <dada1@cosmosbay.com> Acked-by: David S. Miller <davem@davemloft.net> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/percpu_counter.h')
-rw-r--r--include/linux/percpu_counter.h8
1 files changed, 2 insertions, 6 deletions
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index 9007ccdfc112..99de7a31bab8 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -24,11 +24,7 @@ struct percpu_counter {
s32 *counters;
};
-#if NR_CPUS >= 16
-#define FBC_BATCH (NR_CPUS*2)
-#else
-#define FBC_BATCH (NR_CPUS*4)
-#endif
+extern int percpu_counter_batch;
int percpu_counter_init(struct percpu_counter *fbc, s64 amount);
int percpu_counter_init_irq(struct percpu_counter *fbc, s64 amount);
@@ -39,7 +35,7 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc);
static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
{
- __percpu_counter_add(fbc, amount, FBC_BATCH);
+ __percpu_counter_add(fbc, amount, percpu_counter_batch);
}
static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)