summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2016-10-07 16:58:18 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-10-07 18:46:27 -0700
commitf1c1e9f7b5b3ddce6b4f1986939ec87b27515086 (patch)
tree114527f311ef32b5c8c0cc9f511a61c974c2c3f4 /mm
parentacbc15a4b397f86d39416df143e30982b1da528b (diff)
downloadlinux-f1c1e9f7b5b3ddce6b4f1986939ec87b27515086.tar.gz
linux-f1c1e9f7b5b3ddce6b4f1986939ec87b27515086.tar.bz2
linux-f1c1e9f7b5b3ddce6b4f1986939ec87b27515086.zip
mm/debug_pagealloc.c: don't allocate page_ext if we don't use guard page
What debug_pagealloc does is just mapping/unmapping page table. Basically, it doesn't need additional memory space to memorize something. But, with guard page feature, it requires additional memory to distinguish if the page is for guard or not. Guard page is only used when debug_guardpage_minorder is non-zero so this patch removes additional memory allocation (page_ext) if debug_guardpage_minorder is zero. It saves memory if we just use debug_pagealloc and not guard page. Link: http://lkml.kernel.org/r/1471315879-32294-3-git-send-email-iamjoonsoo.kim@lge.com Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Minchan Kim <minchan@kernel.org> Cc: Michal Hocko <mhocko@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c8
1 files changed, 7 insertions, 1 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e150ba9e1311..06ea805d1b14 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -607,6 +607,9 @@ static bool need_debug_guardpage(void)
if (!debug_pagealloc_enabled())
return false;
+ if (!debug_guardpage_minorder())
+ return false;
+
return true;
}
@@ -615,6 +618,9 @@ static void init_debug_guardpage(void)
if (!debug_pagealloc_enabled())
return;
+ if (!debug_guardpage_minorder())
+ return;
+
_debug_guardpage_enabled = true;
}
@@ -635,7 +641,7 @@ static int __init debug_guardpage_minorder_setup(char *buf)
pr_info("Setting debug_guardpage_minorder to %lu\n", res);
return 0;
}
-__setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
+early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup);
static inline bool set_page_guard(struct zone *zone, struct page *page,
unsigned int order, int migratetype)