summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-06-16 10:16:13 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-06-16 13:16:16 -0700
commit4b356be019d0c28f67af02809df7072c1c8f7d32 (patch)
tree03c340e3168a1cae72fd7c96855382ac0c195da6 /mm
parent8dab5241d06bfc9ee141ea78c56cde5070d7460d (diff)
downloadlinux-4b356be019d0c28f67af02809df7072c1c8f7d32.tar.gz
linux-4b356be019d0c28f67af02809df7072c1c8f7d32.tar.bz2
linux-4b356be019d0c28f67af02809df7072c1c8f7d32.zip
SLUB: minimum alignment fixes
If ARCH_KMALLOC_MINALIGN is set to a value greater than 8 (SLUBs smallest kmalloc cache) then SLUB may generate duplicate slabs in sysfs (yes again) because the object size is padded to reach ARCH_KMALLOC_MINALIGN. Thus the size of the small slabs is all the same. No arch sets ARCH_KMALLOC_MINALIGN larger than 8 though except mips which for some reason wants a 128 byte alignment. This patch increases the size of the smallest cache if ARCH_KMALLOC_MINALIGN is greater than 8. In that case more and more of the smallest caches are disabled. If we do that then the count of the active general caches that is displayed on boot is not correct anymore since we may skip elements of the kmalloc array. So count them separately. This approach was tested by Havard yesterday. Signed-off-by: Christoph Lameter <clameter@sgi.com> Cc: Haavard Skinnemoen <hskinnemoen@atmel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slub.c20
1 files changed, 15 insertions, 5 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 2a1338c516fc..fa28b1623644 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2436,6 +2436,7 @@ EXPORT_SYMBOL(krealloc);
void __init kmem_cache_init(void)
{
int i;
+ int caches = 0;
#ifdef CONFIG_NUMA
/*
@@ -2446,20 +2447,29 @@ void __init kmem_cache_init(void)
create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
sizeof(struct kmem_cache_node), GFP_KERNEL);
kmalloc_caches[0].refcount = -1;
+ caches++;
#endif
/* Able to allocate the per node structures */
slab_state = PARTIAL;
/* Caches that are not of the two-to-the-power-of size */
- create_kmalloc_cache(&kmalloc_caches[1],
+ if (KMALLOC_MIN_SIZE <= 64) {
+ create_kmalloc_cache(&kmalloc_caches[1],
"kmalloc-96", 96, GFP_KERNEL);
- create_kmalloc_cache(&kmalloc_caches[2],
+ caches++;
+ }
+ if (KMALLOC_MIN_SIZE <= 128) {
+ create_kmalloc_cache(&kmalloc_caches[2],
"kmalloc-192", 192, GFP_KERNEL);
+ caches++;
+ }
- for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++)
+ for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
create_kmalloc_cache(&kmalloc_caches[i],
"kmalloc", 1 << i, GFP_KERNEL);
+ caches++;
+ }
slab_state = UP;
@@ -2476,8 +2486,8 @@ void __init kmem_cache_init(void)
nr_cpu_ids * sizeof(struct page *);
printk(KERN_INFO "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
- " Processors=%d, Nodes=%d\n",
- KMALLOC_SHIFT_HIGH, cache_line_size(),
+ " CPUs=%d, Nodes=%d\n",
+ caches, cache_line_size(),
slub_min_order, slub_max_order, slub_min_objects,
nr_cpu_ids, nr_node_ids);
}