summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2015-09-14 18:16:02 +0100
committerJens Axboe <axboe@fb.com>2015-09-23 11:00:57 -0600
commitf75782e4e067fd68249635699cb20dfe0489d743 (patch)
tree9e8ed77c60570d04b8af9ce097bfbed26e4b19df
parentbcee19f424a0d8c26ecf2607b73c690802658b29 (diff)
downloadlinux-stable-f75782e4e067fd68249635699cb20dfe0489d743.tar.gz
linux-stable-f75782e4e067fd68249635699cb20dfe0489d743.tar.bz2
linux-stable-f75782e4e067fd68249635699cb20dfe0489d743.zip
block: kmemleak: Track the page allocations for struct request
The pages allocated for struct request contain pointers to other slab allocations (via ops->init_request). Since kmemleak does not track/scan page allocations, the slab objects will be reported as leaks (false positives). This patch adds kmemleak callbacks to allow tracking of such pages. Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Reported-by: Bart Van Assche <bart.vanassche@sandisk.com> Tested-by: Bart Van Assche<bart.vanassche@sandisk.com> Cc: Christoph Hellwig <hch@infradead.org> Cc: Jens Axboe <axboe@kernel.dk> Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--block/blk-mq.c11
1 files changed, 11 insertions, 0 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index f2d67b4047a0..2077f0d2f95f 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -9,6 +9,7 @@
#include <linux/backing-dev.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
+#include <linux/kmemleak.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -1438,6 +1439,11 @@ static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
while (!list_empty(&tags->page_list)) {
page = list_first_entry(&tags->page_list, struct page, lru);
list_del_init(&page->lru);
+ /*
+ * Remove kmemleak object previously allocated in
+ * blk_mq_init_rq_map().
+ */
+ kmemleak_free(page_address(page));
__free_pages(page, page->private);
}
@@ -1510,6 +1516,11 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
list_add_tail(&page->lru, &tags->page_list);
p = page_address(page);
+ /*
+ * Allow kmemleak to scan these pages as they contain pointers
+ * to additional allocations like via ops->init_request().
+ */
+ kmemleak_alloc(p, order_to_size(this_order), 1, GFP_KERNEL);
entries_per_page = order_to_size(this_order) / rq_size;
to_do = min(entries_per_page, set->queue_depth - i);
left -= to_do * rq_size;