diff options
author | Catalin Marinas <catalin.marinas@arm.com> | 2019-09-23 15:34:02 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-09-24 15:54:07 -0700 |
commit | 0647398a8c7bd55e0b7565c5076e86b7c3c204c5 (patch) | |
tree | 6613297ab1c94ffc0bfb60d810592d1d61b48319 | |
parent | dba82d9431770e68c45b03f0ffa2daa8abfb9429 (diff) | |
download | linux-stable-0647398a8c7bd55e0b7565c5076e86b7c3c204c5.tar.gz linux-stable-0647398a8c7bd55e0b7565c5076e86b7c3c204c5.tar.bz2 linux-stable-0647398a8c7bd55e0b7565c5076e86b7c3c204c5.zip |
mm: kmemleak: simple memory allocation pool for kmemleak objects
Add a memory pool for struct kmemleak_object in case the normal
kmem_cache_alloc() fails under the gfp constraints passed by the caller.
The mem_pool[] array size is currently fixed at 16000.
We are not using the existing mempool kernel API since this requires
the slab allocator to be available (for pool->elements allocation). A
subsequent kmemleak patch will replace the static early log buffer with
the pool allocation introduced here and this functionality is required
to be available before the slab was initialised.
Link: http://lkml.kernel.org/r/20190812160642.52134-3-catalin.marinas@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Qian Cai <cai@lca.pw>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | mm/kmemleak.c | 54 |
1 files changed, 52 insertions, 2 deletions
diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 5ba7fad00fda..2fb86524d70b 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -180,11 +180,17 @@ struct kmemleak_object { #define HEX_ASCII 1 /* max number of lines to be printed */ #define HEX_MAX_LINES 2 +/* memory pool size */ +#define MEM_POOL_SIZE 16000 /* the list of all allocated objects */ static LIST_HEAD(object_list); /* the list of gray-colored objects (see color_gray comment below) */ static LIST_HEAD(gray_list); +/* memory pool allocation */ +static struct kmemleak_object mem_pool[MEM_POOL_SIZE]; +static int mem_pool_free_count = ARRAY_SIZE(mem_pool); +static LIST_HEAD(mem_pool_free_list); /* search tree for object boundaries */ static struct rb_root object_tree_root = RB_ROOT; /* rw_lock protecting the access to object_list and object_tree_root */ @@ -452,6 +458,50 @@ static int get_object(struct kmemleak_object *object) } /* + * Memory pool allocation and freeing. kmemleak_lock must not be held. + */ +static struct kmemleak_object *mem_pool_alloc(gfp_t gfp) +{ + unsigned long flags; + struct kmemleak_object *object; + + /* try the slab allocator first */ + object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp)); + if (object) + return object; + + /* slab allocation failed, try the memory pool */ + write_lock_irqsave(&kmemleak_lock, flags); + object = list_first_entry_or_null(&mem_pool_free_list, + typeof(*object), object_list); + if (object) + list_del(&object->object_list); + else if (mem_pool_free_count) + object = &mem_pool[--mem_pool_free_count]; + write_unlock_irqrestore(&kmemleak_lock, flags); + + return object; +} + +/* + * Return the object to either the slab allocator or the memory pool. + */ +static void mem_pool_free(struct kmemleak_object *object) +{ + unsigned long flags; + + if (object < mem_pool || object >= mem_pool + ARRAY_SIZE(mem_pool)) { + kmem_cache_free(object_cache, object); + return; + } + + /* add the object to the memory pool free list */ + write_lock_irqsave(&kmemleak_lock, flags); + list_add(&object->object_list, &mem_pool_free_list); + write_unlock_irqrestore(&kmemleak_lock, flags); +} + +/* * RCU callback to free a kmemleak_object. */ static void free_object_rcu(struct rcu_head *rcu) @@ -469,7 +519,7 @@ static void free_object_rcu(struct rcu_head *rcu) hlist_del(&area->node); kmem_cache_free(scan_area_cache, area); } - kmem_cache_free(object_cache, object); + mem_pool_free(object); } /* @@ -552,7 +602,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size, struct rb_node **link, *rb_parent; unsigned long untagged_ptr; - object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp)); + object = mem_pool_alloc(gfp); if (!object) { pr_warn("Cannot allocate a kmemleak_object structure\n"); kmemleak_disable(); |