summaryrefslogtreecommitdiffstats
path: root/mm/percpu.c
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2011-09-26 17:12:53 +0100
committerCatalin Marinas <catalin.marinas@arm.com>2011-12-02 16:12:42 +0000
commitf528f0b8e53d73b18be71e96693cfab9322f33c7 (patch)
tree5a25d6ac1c2f18561e554074aa7c06bf3d899183 /mm/percpu.c
parent74341703edca6bc68a165a18453071b097828407 (diff)
downloadlinux-f528f0b8e53d73b18be71e96693cfab9322f33c7.tar.gz
linux-f528f0b8e53d73b18be71e96693cfab9322f33c7.tar.bz2
linux-f528f0b8e53d73b18be71e96693cfab9322f33c7.zip
kmemleak: Handle percpu memory allocation
This patch adds kmemleak callbacks from the percpu allocator, reducing a number of false positives caused by kmemleak not scanning such memory blocks. The percpu chunks are never reported as leaks because of current kmemleak limitations with the __percpu pointer not pointing directly to the actual chunks. Reported-by: Huajun Li <huajun.li.lee@gmail.com> Acked-by: Christoph Lameter <cl@gentwo.org> Acked-by: Tejun Heo <tj@kernel.org> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'mm/percpu.c')
-rw-r--r--mm/percpu.c12
1 files changed, 11 insertions, 1 deletions
diff --git a/mm/percpu.c b/mm/percpu.c
index 3bb810a72006..86c5bdbdc370 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -67,6 +67,7 @@
#include <linux/spinlock.h>
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
+#include <linux/kmemleak.h>
#include <asm/cacheflush.h>
#include <asm/sections.h>
@@ -710,6 +711,7 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
const char *err;
int slot, off, new_alloc;
unsigned long flags;
+ void __percpu *ptr;
if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
WARN(true, "illegal size (%zu) or align (%zu) for "
@@ -802,7 +804,9 @@ area_found:
mutex_unlock(&pcpu_alloc_mutex);
/* return address relative to base address */
- return __addr_to_pcpu_ptr(chunk->base_addr + off);
+ ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
+ kmemleak_alloc_percpu(ptr, size);
+ return ptr;
fail_unlock:
spin_unlock_irqrestore(&pcpu_lock, flags);
@@ -916,6 +920,8 @@ void free_percpu(void __percpu *ptr)
if (!ptr)
return;
+ kmemleak_free_percpu(ptr);
+
addr = __pcpu_ptr_to_addr(ptr);
spin_lock_irqsave(&pcpu_lock, flags);
@@ -1637,6 +1643,8 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
rc = -ENOMEM;
goto out_free_areas;
}
+ /* kmemleak tracks the percpu allocations separately */
+ kmemleak_free(ptr);
areas[group] = ptr;
base = min(ptr, base);
@@ -1751,6 +1759,8 @@ int __init pcpu_page_first_chunk(size_t reserved_size,
"for cpu%u\n", psize_str, cpu);
goto enomem;
}
+ /* kmemleak tracks the percpu allocations separately */
+ kmemleak_free(ptr);
pages[j++] = virt_to_page(ptr);
}