diff options
author | Paul Mundt <lethal@linux-sh.org> | 2007-07-20 10:11:58 +0900 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2007-07-20 10:11:58 +0900 |
commit | 20c2df83d25c6a95affe6157a4c9cac4cf5ffaac (patch) | |
tree | 415c4453d2b17a50abe7a3e515177e1fa337bd67 /mm | |
parent | 64fb98fc40738ae1a98bcea9ca3145b89fb71524 (diff) | |
download | linux-20c2df83d25c6a95affe6157a4c9cac4cf5ffaac.tar.gz linux-20c2df83d25c6a95affe6157a4c9cac4cf5ffaac.tar.bz2 linux-20c2df83d25c6a95affe6157a4c9cac4cf5ffaac.zip |
mm: Remove slab destructors from kmem_cache_create().
Slab destructors were no longer supported after Christoph's
c59def9f222d44bb7e2f0a559f2906191a0862d7 change. They've been
BUGs for both slab and slub, and slob never supported them
either.
This rips out support for the dtor pointer from kmem_cache_create()
completely and fixes up every single callsite in the kernel (there were
about 224, not including the slab allocator definitions themselves,
or the documentation references).
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/mempolicy.c | 4 | ||||
-rw-r--r-- | mm/rmap.c | 2 | ||||
-rw-r--r-- | mm/shmem.c | 2 | ||||
-rw-r--r-- | mm/slab.c | 17 | ||||
-rw-r--r-- | mm/slob.c | 3 | ||||
-rw-r--r-- | mm/slub.c | 4 |
6 files changed, 13 insertions, 19 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 9f4e9b95e8f2..71b84b45154a 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1605,11 +1605,11 @@ void __init numa_policy_init(void) policy_cache = kmem_cache_create("numa_policy", sizeof(struct mempolicy), - 0, SLAB_PANIC, NULL, NULL); + 0, SLAB_PANIC, NULL); sn_cache = kmem_cache_create("shared_policy_node", sizeof(struct sp_node), - 0, SLAB_PANIC, NULL, NULL); + 0, SLAB_PANIC, NULL); /* * Set interleaving policy for system init. Interleaving is only diff --git a/mm/rmap.c b/mm/rmap.c index fede5c7910be..41ac39749ef4 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -149,7 +149,7 @@ static void anon_vma_ctor(void *data, struct kmem_cache *cachep, void __init anon_vma_init(void) { anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), - 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor, NULL); + 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor); } /* diff --git a/mm/shmem.c b/mm/shmem.c index ad155c7745dc..fcd19d323f9f 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2322,7 +2322,7 @@ static int init_inodecache(void) { shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", sizeof(struct shmem_inode_info), - 0, 0, init_once, NULL); + 0, 0, init_once); if (shmem_inode_cachep == NULL) return -ENOMEM; return 0; diff --git a/mm/slab.c b/mm/slab.c index c3feeaab3875..bde271c001ba 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1484,7 +1484,7 @@ void __init kmem_cache_init(void) sizes[INDEX_AC].cs_size, ARCH_KMALLOC_MINALIGN, ARCH_KMALLOC_FLAGS|SLAB_PANIC, - NULL, NULL); + NULL); if (INDEX_AC != INDEX_L3) { sizes[INDEX_L3].cs_cachep = @@ -1492,7 +1492,7 @@ void __init kmem_cache_init(void) sizes[INDEX_L3].cs_size, ARCH_KMALLOC_MINALIGN, ARCH_KMALLOC_FLAGS|SLAB_PANIC, - NULL, NULL); + NULL); } slab_early_init = 0; @@ -1510,7 +1510,7 @@ void __init kmem_cache_init(void) sizes->cs_size, ARCH_KMALLOC_MINALIGN, ARCH_KMALLOC_FLAGS|SLAB_PANIC, - NULL, NULL); + NULL); } #ifdef CONFIG_ZONE_DMA sizes->cs_dmacachep = kmem_cache_create( @@ -1519,7 +1519,7 @@ void __init kmem_cache_init(void) ARCH_KMALLOC_MINALIGN, ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| SLAB_PANIC, - NULL, NULL); + NULL); #endif sizes++; names++; @@ -2101,12 +2101,10 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep) * @align: The required alignment for the objects. * @flags: SLAB flags * @ctor: A constructor for the objects. - * @dtor: A destructor for the objects (not implemented anymore). * * Returns a ptr to the cache on success, NULL on failure. * Cannot be called within a int, but can be interrupted. - * The @ctor is run when new pages are allocated by the cache - * and the @dtor is run before the pages are handed back. + * The @ctor is run when new pages are allocated by the cache. * * @name must be valid until the cache is destroyed. This implies that * the module calling this has to destroy the cache before getting unloaded. @@ -2126,8 +2124,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep) struct kmem_cache * kmem_cache_create (const char *name, size_t size, size_t align, unsigned long flags, - void (*ctor)(void*, struct kmem_cache *, unsigned long), - void (*dtor)(void*, struct kmem_cache *, unsigned long)) + void (*ctor)(void*, struct kmem_cache *, unsigned long)) { size_t left_over, slab_size, ralign; struct kmem_cache *cachep = NULL, *pc; @@ -2136,7 +2133,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, * Sanity checks... these are all serious usage bugs. */ if (!name || in_interrupt() || (size < BYTES_PER_WORD) || - size > KMALLOC_MAX_SIZE || dtor) { + size > KMALLOC_MAX_SIZE) { printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__, name); BUG(); diff --git a/mm/slob.c b/mm/slob.c index c89ef116d7aa..d50920ecc02b 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -492,8 +492,7 @@ struct kmem_cache { struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align, unsigned long flags, - void (*ctor)(void*, struct kmem_cache *, unsigned long), - void (*dtor)(void*, struct kmem_cache *, unsigned long)) + void (*ctor)(void*, struct kmem_cache *, unsigned long)) { struct kmem_cache *c; diff --git a/mm/slub.c b/mm/slub.c index 322f3a5d72c7..9b2d6178d06c 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2668,12 +2668,10 @@ static struct kmem_cache *find_mergeable(size_t size, struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align, unsigned long flags, - void (*ctor)(void *, struct kmem_cache *, unsigned long), - void (*dtor)(void *, struct kmem_cache *, unsigned long)) + void (*ctor)(void *, struct kmem_cache *, unsigned long)) { struct kmem_cache *s; - BUG_ON(dtor); down_write(&slub_lock); s = find_mergeable(size, align, flags, ctor); if (s) { |