summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVladimir Davydov <vdavydov@parallels.com>2014-06-04 16:10:03 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-04 16:54:08 -0700
commit0bd62b1190607e4f1b3c2927ba48672a1cf2a83d (patch)
tree4f19d0dbbf7e82747328de5439b21d8e6013d01f
parent776ed0f0377914d1e65fed903c052e9eef3f4cc3 (diff)
downloadlinux-stable-0bd62b1190607e4f1b3c2927ba48672a1cf2a83d.tar.gz
linux-stable-0bd62b1190607e4f1b3c2927ba48672a1cf2a83d.tar.bz2
linux-stable-0bd62b1190607e4f1b3c2927ba48672a1cf2a83d.zip
slab: delete cache from list after __kmem_cache_shutdown succeeds
Currently, on kmem_cache_destroy we delete the cache from the slab_list before __kmem_cache_shutdown, inserting it back to the list on failure. Initially, this was done, because we could release the slab_mutex in __kmem_cache_shutdown to delete sysfs slub entry, but since commit 41a212859a4d ("slub: use sysfs'es release mechanism for kmem_cache") we remove sysfs entry later in kmem_cache_destroy after dropping the slab_mutex, so that no implementation of __kmem_cache_shutdown can ever release the lock. Therefore we can simplify the code a bit by moving list_del after __kmem_cache_shutdown. Signed-off-by: Vladimir Davydov <vdavydov@parallels.com> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: Pekka Enberg <penberg@kernel.org> Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/slab_common.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 48fafb61f35e..735e01a0db6f 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -346,15 +346,15 @@ void kmem_cache_destroy(struct kmem_cache *s)
if (memcg_cleanup_cache_params(s) != 0)
goto out_unlock;
- list_del(&s->list);
if (__kmem_cache_shutdown(s) != 0) {
- list_add(&s->list, &slab_caches);
printk(KERN_ERR "kmem_cache_destroy %s: "
"Slab cache still has objects\n", s->name);
dump_stack();
goto out_unlock;
}
+ list_del(&s->list);
+
mutex_unlock(&slab_mutex);
if (s->flags & SLAB_DESTROY_BY_RCU)
rcu_barrier();