summaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2012-09-05 00:20:33 +0000
committerPekka Enberg <penberg@kernel.org>2012-09-05 12:00:36 +0300
commit9b030cb865f137e1574596983face2a07e41e8b2 (patch)
tree51caca89688beefd5ba910069d5c5754140906ae /mm/slab.c
parent945cf2b6199be70ff03102b9e642c3bb05d01de9 (diff)
downloadlinux-9b030cb865f137e1574596983face2a07e41e8b2.tar.gz
linux-9b030cb865f137e1574596983face2a07e41e8b2.tar.bz2
linux-9b030cb865f137e1574596983face2a07e41e8b2.zip
mm/sl[aou]b: Use "kmem_cache" name for slab cache with kmem_cache struct
Make all allocators use the "kmem_cache" slabname for the "kmem_cache" structure. Reviewed-by: Glauber Costa <glommer@parallels.com> Reviewed-by: Joonsoo Kim <js1304@gmail.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c72
1 files changed, 37 insertions, 35 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 49a74b349e39..ef94799a1aa5 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -578,9 +578,9 @@ static struct arraycache_init initarray_generic =
{ {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
/* internal cache of cache description objs */
-static struct kmem_list3 *cache_cache_nodelists[MAX_NUMNODES];
-static struct kmem_cache cache_cache = {
- .nodelists = cache_cache_nodelists,
+static struct kmem_list3 *kmem_cache_nodelists[MAX_NUMNODES];
+static struct kmem_cache kmem_cache_boot = {
+ .nodelists = kmem_cache_nodelists,
.batchcount = 1,
.limit = BOOT_CPUCACHE_ENTRIES,
.shared = 1,
@@ -1594,15 +1594,17 @@ void __init kmem_cache_init(void)
int order;
int node;
+ kmem_cache = &kmem_cache_boot;
+
if (num_possible_nodes() == 1)
use_alien_caches = 0;
for (i = 0; i < NUM_INIT_LISTS; i++) {
kmem_list3_init(&initkmem_list3[i]);
if (i < MAX_NUMNODES)
- cache_cache.nodelists[i] = NULL;
+ kmem_cache->nodelists[i] = NULL;
}
- set_up_list3s(&cache_cache, CACHE_CACHE);
+ set_up_list3s(kmem_cache, CACHE_CACHE);
/*
* Fragmentation resistance on low memory - only use bigger
@@ -1614,9 +1616,9 @@ void __init kmem_cache_init(void)
/* Bootstrap is tricky, because several objects are allocated
* from caches that do not exist yet:
- * 1) initialize the cache_cache cache: it contains the struct
- * kmem_cache structures of all caches, except cache_cache itself:
- * cache_cache is statically allocated.
+ * 1) initialize the kmem_cache cache: it contains the struct
+ * kmem_cache structures of all caches, except kmem_cache itself:
+ * kmem_cache is statically allocated.
* Initially an __init data area is used for the head array and the
* kmem_list3 structures, it's replaced with a kmalloc allocated
* array at the end of the bootstrap.
@@ -1625,43 +1627,43 @@ void __init kmem_cache_init(void)
* An __init data area is used for the head array.
* 3) Create the remaining kmalloc caches, with minimally sized
* head arrays.
- * 4) Replace the __init data head arrays for cache_cache and the first
+ * 4) Replace the __init data head arrays for kmem_cache and the first
* kmalloc cache with kmalloc allocated arrays.
- * 5) Replace the __init data for kmem_list3 for cache_cache and
+ * 5) Replace the __init data for kmem_list3 for kmem_cache and
* the other cache's with kmalloc allocated memory.
* 6) Resize the head arrays of the kmalloc caches to their final sizes.
*/
node = numa_mem_id();
- /* 1) create the cache_cache */
+ /* 1) create the kmem_cache */
INIT_LIST_HEAD(&slab_caches);
- list_add(&cache_cache.list, &slab_caches);
- cache_cache.colour_off = cache_line_size();
- cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
- cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node];
+ list_add(&kmem_cache->list, &slab_caches);
+ kmem_cache->colour_off = cache_line_size();
+ kmem_cache->array[smp_processor_id()] = &initarray_cache.cache;
+ kmem_cache->nodelists[node] = &initkmem_list3[CACHE_CACHE + node];
/*
* struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
*/
- cache_cache.size = offsetof(struct kmem_cache, array[nr_cpu_ids]) +
+ kmem_cache->size = offsetof(struct kmem_cache, array[nr_cpu_ids]) +
nr_node_ids * sizeof(struct kmem_list3 *);
- cache_cache.object_size = cache_cache.size;
- cache_cache.size = ALIGN(cache_cache.size,
+ kmem_cache->object_size = kmem_cache->size;
+ kmem_cache->size = ALIGN(kmem_cache->object_size,
cache_line_size());
- cache_cache.reciprocal_buffer_size =
- reciprocal_value(cache_cache.size);
+ kmem_cache->reciprocal_buffer_size =
+ reciprocal_value(kmem_cache->size);
for (order = 0; order < MAX_ORDER; order++) {
- cache_estimate(order, cache_cache.size,
- cache_line_size(), 0, &left_over, &cache_cache.num);
- if (cache_cache.num)
+ cache_estimate(order, kmem_cache->size,
+ cache_line_size(), 0, &left_over, &kmem_cache->num);
+ if (kmem_cache->num)
break;
}
- BUG_ON(!cache_cache.num);
- cache_cache.gfporder = order;
- cache_cache.colour = left_over / cache_cache.colour_off;
- cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
+ BUG_ON(!kmem_cache->num);
+ kmem_cache->gfporder = order;
+ kmem_cache->colour = left_over / kmem_cache->colour_off;
+ kmem_cache->slab_size = ALIGN(kmem_cache->num * sizeof(kmem_bufctl_t) +
sizeof(struct slab), cache_line_size());
/* 2+3) create the kmalloc caches */
@@ -1728,15 +1730,15 @@ void __init kmem_cache_init(void)
ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
- BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache);
- memcpy(ptr, cpu_cache_get(&cache_cache),
+ BUG_ON(cpu_cache_get(kmem_cache) != &initarray_cache.cache);
+ memcpy(ptr, cpu_cache_get(kmem_cache),
sizeof(struct arraycache_init));
/*
* Do not assume that spinlocks can be initialized via memcpy:
*/
spin_lock_init(&ptr->lock);
- cache_cache.array[smp_processor_id()] = ptr;
+ kmem_cache->array[smp_processor_id()] = ptr;
ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
@@ -1757,7 +1759,7 @@ void __init kmem_cache_init(void)
int nid;
for_each_online_node(nid) {
- init_list(&cache_cache, &initkmem_list3[CACHE_CACHE + nid], nid);
+ init_list(kmem_cache, &initkmem_list3[CACHE_CACHE + nid], nid);
init_list(malloc_sizes[INDEX_AC].cs_cachep,
&initkmem_list3[SIZE_AC + nid], nid);
@@ -2223,7 +2225,7 @@ void __kmem_cache_destroy(struct kmem_cache *cachep)
kfree(l3);
}
}
- kmem_cache_free(&cache_cache, cachep);
+ kmem_cache_free(kmem_cache, cachep);
}
@@ -2473,7 +2475,7 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
gfp = GFP_NOWAIT;
/* Get cache's description obj. */
- cachep = kmem_cache_zalloc(&cache_cache, gfp);
+ cachep = kmem_cache_zalloc(kmem_cache, gfp);
if (!cachep)
return NULL;
@@ -2531,7 +2533,7 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
if (!cachep->num) {
printk(KERN_ERR
"kmem_cache_create: couldn't create cache %s.\n", name);
- kmem_cache_free(&cache_cache, cachep);
+ kmem_cache_free(kmem_cache, cachep);
return NULL;
}
slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
@@ -3299,7 +3301,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags)
{
- if (cachep == &cache_cache)
+ if (cachep == kmem_cache)
return false;
return should_failslab(cachep->object_size, flags, cachep->flags);