diff options
-rw-r--r-- | mm/slab.c | 72 | ||||
-rw-r--r-- | mm/slab.h | 6 | ||||
-rw-r--r-- | mm/slab_common.c | 1 | ||||
-rw-r--r-- | mm/slob.c | 8 | ||||
-rw-r--r-- | mm/slub.c | 2 |
5 files changed, 52 insertions, 37 deletions
diff --git a/mm/slab.c b/mm/slab.c index 49a74b349e39..ef94799a1aa5 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -578,9 +578,9 @@ static struct arraycache_init initarray_generic = { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; /* internal cache of cache description objs */ -static struct kmem_list3 *cache_cache_nodelists[MAX_NUMNODES]; -static struct kmem_cache cache_cache = { - .nodelists = cache_cache_nodelists, +static struct kmem_list3 *kmem_cache_nodelists[MAX_NUMNODES]; +static struct kmem_cache kmem_cache_boot = { + .nodelists = kmem_cache_nodelists, .batchcount = 1, .limit = BOOT_CPUCACHE_ENTRIES, .shared = 1, @@ -1594,15 +1594,17 @@ void __init kmem_cache_init(void) int order; int node; + kmem_cache = &kmem_cache_boot; + if (num_possible_nodes() == 1) use_alien_caches = 0; for (i = 0; i < NUM_INIT_LISTS; i++) { kmem_list3_init(&initkmem_list3[i]); if (i < MAX_NUMNODES) - cache_cache.nodelists[i] = NULL; + kmem_cache->nodelists[i] = NULL; } - set_up_list3s(&cache_cache, CACHE_CACHE); + set_up_list3s(kmem_cache, CACHE_CACHE); /* * Fragmentation resistance on low memory - only use bigger @@ -1614,9 +1616,9 @@ void __init kmem_cache_init(void) /* Bootstrap is tricky, because several objects are allocated * from caches that do not exist yet: - * 1) initialize the cache_cache cache: it contains the struct - * kmem_cache structures of all caches, except cache_cache itself: - * cache_cache is statically allocated. + * 1) initialize the kmem_cache cache: it contains the struct + * kmem_cache structures of all caches, except kmem_cache itself: + * kmem_cache is statically allocated. * Initially an __init data area is used for the head array and the * kmem_list3 structures, it's replaced with a kmalloc allocated * array at the end of the bootstrap. @@ -1625,43 +1627,43 @@ void __init kmem_cache_init(void) * An __init data area is used for the head array. * 3) Create the remaining kmalloc caches, with minimally sized * head arrays. - * 4) Replace the __init data head arrays for cache_cache and the first + * 4) Replace the __init data head arrays for kmem_cache and the first * kmalloc cache with kmalloc allocated arrays. - * 5) Replace the __init data for kmem_list3 for cache_cache and + * 5) Replace the __init data for kmem_list3 for kmem_cache and * the other cache's with kmalloc allocated memory. * 6) Resize the head arrays of the kmalloc caches to their final sizes. */ node = numa_mem_id(); - /* 1) create the cache_cache */ + /* 1) create the kmem_cache */ INIT_LIST_HEAD(&slab_caches); - list_add(&cache_cache.list, &slab_caches); - cache_cache.colour_off = cache_line_size(); - cache_cache.array[smp_processor_id()] = &initarray_cache.cache; - cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node]; + list_add(&kmem_cache->list, &slab_caches); + kmem_cache->colour_off = cache_line_size(); + kmem_cache->array[smp_processor_id()] = &initarray_cache.cache; + kmem_cache->nodelists[node] = &initkmem_list3[CACHE_CACHE + node]; /* * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids */ - cache_cache.size = offsetof(struct kmem_cache, array[nr_cpu_ids]) + + kmem_cache->size = offsetof(struct kmem_cache, array[nr_cpu_ids]) + nr_node_ids * sizeof(struct kmem_list3 *); - cache_cache.object_size = cache_cache.size; - cache_cache.size = ALIGN(cache_cache.size, + kmem_cache->object_size = kmem_cache->size; + kmem_cache->size = ALIGN(kmem_cache->object_size, cache_line_size()); - cache_cache.reciprocal_buffer_size = - reciprocal_value(cache_cache.size); + kmem_cache->reciprocal_buffer_size = + reciprocal_value(kmem_cache->size); for (order = 0; order < MAX_ORDER; order++) { - cache_estimate(order, cache_cache.size, - cache_line_size(), 0, &left_over, &cache_cache.num); - if (cache_cache.num) + cache_estimate(order, kmem_cache->size, + cache_line_size(), 0, &left_over, &kmem_cache->num); + if (kmem_cache->num) break; } - BUG_ON(!cache_cache.num); - cache_cache.gfporder = order; - cache_cache.colour = left_over / cache_cache.colour_off; - cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) + + BUG_ON(!kmem_cache->num); + kmem_cache->gfporder = order; + kmem_cache->colour = left_over / kmem_cache->colour_off; + kmem_cache->slab_size = ALIGN(kmem_cache->num * sizeof(kmem_bufctl_t) + sizeof(struct slab), cache_line_size()); /* 2+3) create the kmalloc caches */ @@ -1728,15 +1730,15 @@ void __init kmem_cache_init(void) ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); - BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache); - memcpy(ptr, cpu_cache_get(&cache_cache), + BUG_ON(cpu_cache_get(kmem_cache) != &initarray_cache.cache); + memcpy(ptr, cpu_cache_get(kmem_cache), sizeof(struct arraycache_init)); /* * Do not assume that spinlocks can be initialized via memcpy: */ spin_lock_init(&ptr->lock); - cache_cache.array[smp_processor_id()] = ptr; + kmem_cache->array[smp_processor_id()] = ptr; ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); @@ -1757,7 +1759,7 @@ void __init kmem_cache_init(void) int nid; for_each_online_node(nid) { - init_list(&cache_cache, &initkmem_list3[CACHE_CACHE + nid], nid); + init_list(kmem_cache, &initkmem_list3[CACHE_CACHE + nid], nid); init_list(malloc_sizes[INDEX_AC].cs_cachep, &initkmem_list3[SIZE_AC + nid], nid); @@ -2223,7 +2225,7 @@ void __kmem_cache_destroy(struct kmem_cache *cachep) kfree(l3); } } - kmem_cache_free(&cache_cache, cachep); + kmem_cache_free(kmem_cache, cachep); } @@ -2473,7 +2475,7 @@ __kmem_cache_create (const char *name, size_t size, size_t align, gfp = GFP_NOWAIT; /* Get cache's description obj. */ - cachep = kmem_cache_zalloc(&cache_cache, gfp); + cachep = kmem_cache_zalloc(kmem_cache, gfp); if (!cachep) return NULL; @@ -2531,7 +2533,7 @@ __kmem_cache_create (const char *name, size_t size, size_t align, if (!cachep->num) { printk(KERN_ERR "kmem_cache_create: couldn't create cache %s.\n", name); - kmem_cache_free(&cache_cache, cachep); + kmem_cache_free(kmem_cache, cachep); return NULL; } slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t) @@ -3299,7 +3301,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags) { - if (cachep == &cache_cache) + if (cachep == kmem_cache) return false; return should_failslab(cachep->object_size, flags, cachep->flags); diff --git a/mm/slab.h b/mm/slab.h index 07a537ed5da3..6724aa6f662f 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -25,8 +25,14 @@ extern enum slab_state slab_state; /* The slab cache mutex protects the management structures during changes */ extern struct mutex slab_mutex; + +/* The list of all slab caches on the system */ extern struct list_head slab_caches; +/* The slab cache that manages slab cache information */ +extern struct kmem_cache *kmem_cache; + +/* Functions provided by the slab allocators */ struct kmem_cache *__kmem_cache_create(const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(void *)); diff --git a/mm/slab_common.c b/mm/slab_common.c index a1c4f0b5aaed..5374150f5486 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -22,6 +22,7 @@ enum slab_state slab_state; LIST_HEAD(slab_caches); DEFINE_MUTEX(slab_mutex); +struct kmem_cache *kmem_cache; #ifdef CONFIG_DEBUG_VM static int kmem_cache_sanity_check(const char *name, size_t size) diff --git a/mm/slob.c b/mm/slob.c index 289be4f4681a..7d272c3dcc08 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -622,8 +622,16 @@ int kmem_cache_shrink(struct kmem_cache *d) } EXPORT_SYMBOL(kmem_cache_shrink); +struct kmem_cache kmem_cache_boot = { + .name = "kmem_cache", + .size = sizeof(struct kmem_cache), + .flags = SLAB_PANIC, + .align = ARCH_KMALLOC_MINALIGN, +}; + void __init kmem_cache_init(void) { + kmem_cache = &kmem_cache_boot; slab_state = UP; } diff --git a/mm/slub.c b/mm/slub.c index 724adea34384..e0d1e0470309 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3221,8 +3221,6 @@ void __kmem_cache_destroy(struct kmem_cache *s) struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT]; EXPORT_SYMBOL(kmalloc_caches); -static struct kmem_cache *kmem_cache; - #ifdef CONFIG_ZONE_DMA static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT]; #endif |