summaryrefslogtreecommitdiffstats
path: root/include/linux/slab_def.h
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2011-07-20 19:04:23 +0200
committerPekka Enberg <penberg@kernel.org>2011-07-20 20:27:56 +0300
commitb56efcf0a45aa7fc32de90d5f9838541082fbc19 (patch)
treee1de7ba007c79042d30454c11b78aa942e2666b3 /include/linux/slab_def.h
parentc225150b86fef9f7663219b6e9f7606ea1607312 (diff)
downloadlinux-stable-b56efcf0a45aa7fc32de90d5f9838541082fbc19.tar.gz
linux-stable-b56efcf0a45aa7fc32de90d5f9838541082fbc19.tar.bz2
linux-stable-b56efcf0a45aa7fc32de90d5f9838541082fbc19.zip
slab: shrink sizeof(struct kmem_cache)
Reduce high order allocations for some setups. (NR_CPUS=4096 -> we need 64KB per kmem_cache struct) We now allocate exact needed size (using nr_cpu_ids and nr_node_ids) This also makes code a bit smaller on x86_64, since some field offsets are less than the 127 limit : Before patch : # size mm/slab.o text data bss dec hex filename 22605 361665 32 384302 5dd2e mm/slab.o After patch : # size mm/slab.o text data bss dec hex filename 22349 353473 8224 384046 5dc2e mm/slab.o CC: Andrew Morton <akpm@linux-foundation.org> Reported-by: Konstantin Khlebnikov <khlebnikov@openvz.org> Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Acked-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'include/linux/slab_def.h')
-rw-r--r--include/linux/slab_def.h26
1 files changed, 13 insertions, 13 deletions
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index d7f63112f63c..d00e0bacda93 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -24,21 +24,19 @@
*/
struct kmem_cache {
-/* 1) per-cpu data, touched during every alloc/free */
- struct array_cache *array[NR_CPUS];
-/* 2) Cache tunables. Protected by cache_chain_mutex */
+/* 1) Cache tunables. Protected by cache_chain_mutex */
unsigned int batchcount;
unsigned int limit;
unsigned int shared;
unsigned int buffer_size;
u32 reciprocal_buffer_size;
-/* 3) touched by every alloc & free from the backend */
+/* 2) touched by every alloc & free from the backend */
unsigned int flags; /* constant flags */
unsigned int num; /* # of objs per slab */
-/* 4) cache_grow/shrink */
+/* 3) cache_grow/shrink */
/* order of pgs per slab (2^n) */
unsigned int gfporder;
@@ -54,11 +52,11 @@ struct kmem_cache {
/* constructor func */
void (*ctor)(void *obj);
-/* 5) cache creation/removal */
+/* 4) cache creation/removal */
const char *name;
struct list_head next;
-/* 6) statistics */
+/* 5) statistics */
#ifdef CONFIG_DEBUG_SLAB
unsigned long num_active;
unsigned long num_allocations;
@@ -85,16 +83,18 @@ struct kmem_cache {
int obj_size;
#endif /* CONFIG_DEBUG_SLAB */
+/* 6) per-cpu/per-node data, touched during every alloc/free */
/*
- * We put nodelists[] at the end of kmem_cache, because we want to size
- * this array to nr_node_ids slots instead of MAX_NUMNODES
+ * We put array[] at the end of kmem_cache, because we want to size
+ * this array to nr_cpu_ids slots instead of NR_CPUS
* (see kmem_cache_init())
- * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache
- * is statically defined, so we reserve the max number of nodes.
+ * We still use [NR_CPUS] and not [1] or [0] because cache_cache
+ * is statically defined, so we reserve the max number of cpus.
*/
- struct kmem_list3 *nodelists[MAX_NUMNODES];
+ struct kmem_list3 **nodelists;
+ struct array_cache *array[NR_CPUS];
/*
- * Do not add fields after nodelists[]
+ * Do not add fields after array[]
*/
};