diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-11 18:52:23 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-11 18:52:23 -0800 |
commit | 6296e5d3c067df41980a5fd09ad4cc6765f79bb9 (patch) | |
tree | ac10bc5321ac1d750612c0e0ae53d6c4097c5734 /mm | |
parent | c086ae4ed94f9a1d283318e006813268c2dbf9fc (diff) | |
parent | 5878cf431ca7233a56819ca6970153ac0b129599 (diff) | |
download | linux-stable-6296e5d3c067df41980a5fd09ad4cc6765f79bb9.tar.gz linux-stable-6296e5d3c067df41980a5fd09ad4cc6765f79bb9.tar.bz2 linux-stable-6296e5d3c067df41980a5fd09ad4cc6765f79bb9.zip |
Merge branch 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux
* 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux:
slub: disallow changing cpu_partial from userspace for debug caches
slub: add missed accounting
slub: Extract get_freelist from __slab_alloc
slub: Switch per cpu partial page support off for debugging
slub: fix a possible memleak in __slab_alloc()
slub: fix slub_max_order Documentation
slub: add missed accounting
slab: add taint flag outputting to debug paths.
slub: add taint flag outputting to debug paths
slab: introduce slab_max_order kernel parameter
slab: rename slab_break_gfp_order to slab_max_order
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slab.c | 39 | ||||
-rw-r--r-- | mm/slub.c | 77 |
2 files changed, 75 insertions, 41 deletions
diff --git a/mm/slab.c b/mm/slab.c index 2acfa0d90943..f0bd7857ab3b 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -481,11 +481,13 @@ EXPORT_SYMBOL(slab_buffer_size); #endif /* - * Do not go above this order unless 0 objects fit into the slab. + * Do not go above this order unless 0 objects fit into the slab or + * overridden on the command line. */ -#define BREAK_GFP_ORDER_HI 1 -#define BREAK_GFP_ORDER_LO 0 -static int slab_break_gfp_order = BREAK_GFP_ORDER_LO; +#define SLAB_MAX_ORDER_HI 1 +#define SLAB_MAX_ORDER_LO 0 +static int slab_max_order = SLAB_MAX_ORDER_LO; +static bool slab_max_order_set __initdata; /* * Functions for storing/retrieving the cachep and or slab from the page @@ -854,6 +856,17 @@ static int __init noaliencache_setup(char *s) } __setup("noaliencache", noaliencache_setup); +static int __init slab_max_order_setup(char *str) +{ + get_option(&str, &slab_max_order); + slab_max_order = slab_max_order < 0 ? 0 : + min(slab_max_order, MAX_ORDER - 1); + slab_max_order_set = true; + + return 1; +} +__setup("slab_max_order=", slab_max_order_setup); + #ifdef CONFIG_NUMA /* * Special reaping functions for NUMA systems called from cache_reap(). @@ -1502,10 +1515,11 @@ void __init kmem_cache_init(void) /* * Fragmentation resistance on low memory - only use bigger - * page orders on machines with more than 32MB of memory. + * page orders on machines with more than 32MB of memory if + * not overridden on the command line. */ - if (totalram_pages > (32 << 20) >> PAGE_SHIFT) - slab_break_gfp_order = BREAK_GFP_ORDER_HI; + if (!slab_max_order_set && totalram_pages > (32 << 20) >> PAGE_SHIFT) + slab_max_order = SLAB_MAX_ORDER_HI; /* Bootstrap is tricky, because several objects are allocated * from caches that do not exist yet: @@ -1932,8 +1946,8 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp) /* Print header */ if (lines == 0) { printk(KERN_ERR - "Slab corruption: %s start=%p, len=%d\n", - cachep->name, realobj, size); + "Slab corruption (%s): %s start=%p, len=%d\n", + print_tainted(), cachep->name, realobj, size); print_objinfo(cachep, objp, 0); } /* Hexdump the affected line */ @@ -2117,7 +2131,7 @@ static size_t calculate_slab_order(struct kmem_cache *cachep, * Large number of objects is good, but very large slabs are * currently bad for the gfp()s. */ - if (gfporder >= slab_break_gfp_order) + if (gfporder >= slab_max_order) break; /* @@ -3042,8 +3056,9 @@ static void check_slabp(struct kmem_cache *cachep, struct slab *slabp) if (entries != cachep->num - slabp->inuse) { bad: printk(KERN_ERR "slab: Internal list corruption detected in " - "cache '%s'(%d), slabp %p(%d). Hexdump:\n", - cachep->name, cachep->num, slabp, slabp->inuse); + "cache '%s'(%d), slabp %p(%d). Tainted(%s). Hexdump:\n", + cachep->name, cachep->num, slabp, slabp->inuse, + print_tainted()); print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, slabp, sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t), 1); diff --git a/mm/slub.c b/mm/slub.c index d99acbf14e01..5d37b5e44140 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -570,7 +570,7 @@ static void slab_bug(struct kmem_cache *s, char *fmt, ...) va_end(args); printk(KERN_ERR "========================================" "=====================================\n"); - printk(KERN_ERR "BUG %s: %s\n", s->name, buf); + printk(KERN_ERR "BUG %s (%s): %s\n", s->name, print_tainted(), buf); printk(KERN_ERR "----------------------------------------" "-------------------------------------\n\n"); } @@ -1901,11 +1901,14 @@ static void unfreeze_partials(struct kmem_cache *s) } if (l != m) { - if (l == M_PARTIAL) + if (l == M_PARTIAL) { remove_partial(n, page); - else + stat(s, FREE_REMOVE_PARTIAL); + } else { add_partial(n, page, DEACTIVATE_TO_TAIL); + stat(s, FREE_ADD_PARTIAL); + } l = m; } @@ -2124,6 +2127,37 @@ static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags, } /* + * Check the page->freelist of a page and either transfer the freelist to the per cpu freelist + * or deactivate the page. + * + * The page is still frozen if the return value is not NULL. + * + * If this function returns NULL then the page has been unfrozen. + */ +static inline void *get_freelist(struct kmem_cache *s, struct page *page) +{ + struct page new; + unsigned long counters; + void *freelist; + + do { + freelist = page->freelist; + counters = page->counters; + new.counters = counters; + VM_BUG_ON(!new.frozen); + + new.inuse = page->objects; + new.frozen = freelist != NULL; + + } while (!cmpxchg_double_slab(s, page, + freelist, counters, + NULL, new.counters, + "get_freelist")); + + return freelist; +} + +/* * Slow path. The lockless freelist is empty or we need to perform * debugging duties. * @@ -2144,8 +2178,6 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, { void **object; unsigned long flags; - struct page new; - unsigned long counters; local_irq_save(flags); #ifdef CONFIG_PREEMPT @@ -2166,31 +2198,14 @@ redo: goto new_slab; } - stat(s, ALLOC_SLOWPATH); - - do { - object = c->page->freelist; - counters = c->page->counters; - new.counters = counters; - VM_BUG_ON(!new.frozen); - - /* - * If there is no object left then we use this loop to - * deactivate the slab which is simple since no objects - * are left in the slab and therefore we do not need to - * put the page back onto the partial list. - * - * If there are objects left then we retrieve them - * and use them to refill the per cpu queue. - */ + /* must check again c->freelist in case of cpu migration or IRQ */ + object = c->freelist; + if (object) + goto load_freelist; - new.inuse = c->page->objects; - new.frozen = object != NULL; + stat(s, ALLOC_SLOWPATH); - } while (!__cmpxchg_double_slab(s, c->page, - object, counters, - NULL, new.counters, - "__slab_alloc")); + object = get_freelist(s, c->page); if (!object) { c->page = NULL; @@ -3028,7 +3043,9 @@ static int kmem_cache_open(struct kmem_cache *s, * per node list when we run out of per cpu objects. We only fetch 50% * to keep some capacity around for frees. */ - if (s->size >= PAGE_SIZE) + if (kmem_cache_debug(s)) + s->cpu_partial = 0; + else if (s->size >= PAGE_SIZE) s->cpu_partial = 2; else if (s->size >= 1024) s->cpu_partial = 6; @@ -4637,6 +4654,8 @@ static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf, err = strict_strtoul(buf, 10, &objects); if (err) return err; + if (objects && kmem_cache_debug(s)) + return -EINVAL; s->cpu_partial = objects; flush_all(s); |