summaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorJesper Dangaard Brouer <brouer@redhat.com>2016-03-15 14:53:47 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-15 16:55:16 -0700
commitd5e3ed66d6f260b3bb68cb5cf0fe79777e8febf0 (patch)
tree29d6877b56a7c7b701ab6c8d78e45ef9aaea3fc9 /mm/slab.c
parent0142eae3ae15b9b9f0ae2a8e68e3c8dc347a2394 (diff)
downloadlinux-d5e3ed66d6f260b3bb68cb5cf0fe79777e8febf0.tar.gz
linux-d5e3ed66d6f260b3bb68cb5cf0fe79777e8febf0.tar.bz2
linux-d5e3ed66d6f260b3bb68cb5cf0fe79777e8febf0.zip
slab: use slab_post_alloc_hook in SLAB allocator shared with SLUB
Reviewers notice that the order in slab_post_alloc_hook() of kmemcheck_slab_alloc() and kmemleak_alloc_recursive() gets swapped compared to slab.c / SLAB allocator. Also notice memset now occurs before calling kmemcheck_slab_alloc() and kmemleak_alloc_recursive(). I assume this reordering of kmemcheck, kmemleak and memset is okay because this is the order they are used by the SLUB allocator. This patch completes the sharing of alloc_hook's between SLUB and SLAB. Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Vladimir Davydov <vdavydov@virtuozzo.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c22
1 files changed, 6 insertions, 16 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 1857a652c928..f872208a0912 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3176,16 +3176,11 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
out:
local_irq_restore(save_flags);
ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
- kmemleak_alloc_recursive(ptr, cachep->object_size, 1, cachep->flags,
- flags);
- if (likely(ptr)) {
- kmemcheck_slab_alloc(cachep, flags, ptr, cachep->object_size);
- if (unlikely(flags & __GFP_ZERO))
- memset(ptr, 0, cachep->object_size);
- }
+ if (unlikely(flags & __GFP_ZERO) && ptr)
+ memset(ptr, 0, cachep->object_size);
- memcg_kmem_put_cache(cachep);
+ slab_post_alloc_hook(cachep, flags, 1, &ptr);
return ptr;
}
@@ -3237,17 +3232,12 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
objp = __do_cache_alloc(cachep, flags);
local_irq_restore(save_flags);
objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
- kmemleak_alloc_recursive(objp, cachep->object_size, 1, cachep->flags,
- flags);
prefetchw(objp);
- if (likely(objp)) {
- kmemcheck_slab_alloc(cachep, flags, objp, cachep->object_size);
- if (unlikely(flags & __GFP_ZERO))
- memset(objp, 0, cachep->object_size);
- }
+ if (unlikely(flags & __GFP_ZERO) && objp)
+ memset(objp, 0, cachep->object_size);
- memcg_kmem_put_cache(cachep);
+ slab_post_alloc_hook(cachep, flags, 1, &objp);
return objp;
}