diff options
author | Joonsoo Kim <iamjoonsoo.kim@lge.com> | 2014-10-09 15:26:02 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-09 22:25:50 -0400 |
commit | 61f47105a2c9c60e950ca808b7560f776f9bfa31 (patch) | |
tree | 10bbc7fb213285e1e673073eedf21a1991a22ea1 /mm/slab.c | |
parent | 07f361b2bee38896df8be17d8c3f8af3f3610606 (diff) | |
download | linux-stable-61f47105a2c9c60e950ca808b7560f776f9bfa31.tar.gz linux-stable-61f47105a2c9c60e950ca808b7560f776f9bfa31.tar.bz2 linux-stable-61f47105a2c9c60e950ca808b7560f776f9bfa31.zip |
mm/sl[ao]b: always track caller in kmalloc_(node_)track_caller()
Now, we track caller if tracing or slab debugging is enabled. If they are
disabled, we could save one argument passing overhead by calling
__kmalloc(_node)(). But, I think that it would be marginal. Furthermore,
default slab allocator, SLUB, doesn't use this technique so I think that
it's okay to change this situation.
After this change, we can turn on/off CONFIG_DEBUG_SLAB without full
kernel build and remove some complicated '#if' defintion. It looks more
benefitial to me.
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Acked-by: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Zhang Yanfei <zhangyanfei@cn.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 18 |
1 files changed, 0 insertions, 18 deletions
diff --git a/mm/slab.c b/mm/slab.c index 7c52b3890d25..c52bc5aa6ba0 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3496,7 +3496,6 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller) return kmem_cache_alloc_node_trace(cachep, flags, node, size); } -#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING) void *__kmalloc_node(size_t size, gfp_t flags, int node) { return __do_kmalloc_node(size, flags, node, _RET_IP_); @@ -3509,13 +3508,6 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t flags, return __do_kmalloc_node(size, flags, node, caller); } EXPORT_SYMBOL(__kmalloc_node_track_caller); -#else -void *__kmalloc_node(size_t size, gfp_t flags, int node) -{ - return __do_kmalloc_node(size, flags, node, 0); -} -EXPORT_SYMBOL(__kmalloc_node); -#endif /* CONFIG_DEBUG_SLAB || CONFIG_TRACING */ #endif /* CONFIG_NUMA */ /** @@ -3541,8 +3533,6 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, return ret; } - -#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING) void *__kmalloc(size_t size, gfp_t flags) { return __do_kmalloc(size, flags, _RET_IP_); @@ -3555,14 +3545,6 @@ void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller) } EXPORT_SYMBOL(__kmalloc_track_caller); -#else -void *__kmalloc(size_t size, gfp_t flags) -{ - return __do_kmalloc(size, flags, 0); -} -EXPORT_SYMBOL(__kmalloc); -#endif - /** * kmem_cache_free - Deallocate an object * @cachep: The cache the allocation was from. |