summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorJoonsoo Kim <js1304@gmail.com>2015-07-17 16:24:18 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2015-07-17 16:39:54 -0700
commite2cfc91120fa01e3458167054af993fb83d7d0ec (patch)
tree22ffd1c08779eb7ff49c313202cfe03492397c29 /mm/page_alloc.c
parentf3a14ced32513d103a3ed0ce89c4e713fac01461 (diff)
downloadlinux-e2cfc91120fa01e3458167054af993fb83d7d0ec.tar.gz
linux-e2cfc91120fa01e3458167054af993fb83d7d0ec.tar.bz2
linux-e2cfc91120fa01e3458167054af993fb83d7d0ec.zip
mm/page_owner: set correct gfp_mask on page_owner
Currently, we set wrong gfp_mask to page_owner info in case of isolated freepage by compaction and split page. It causes incorrect mixed pageblock report that we can get from '/proc/pagetypeinfo'. This metric is really useful to measure fragmentation effect so should be accurate. This patch fixes it by setting correct information. Without this patch, after kernel build workload is finished, number of mixed pageblock is 112 among roughly 210 movable pageblocks. But, with this fix, output shows that mixed pageblock is just 57. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index fbba675a0bd9..ef19f22b2b7d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1948,6 +1948,7 @@ void free_hot_cold_page_list(struct list_head *list, bool cold)
void split_page(struct page *page, unsigned int order)
{
int i;
+ gfp_t gfp_mask;
VM_BUG_ON_PAGE(PageCompound(page), page);
VM_BUG_ON_PAGE(!page_count(page), page);
@@ -1961,10 +1962,11 @@ void split_page(struct page *page, unsigned int order)
split_page(virt_to_page(page[0].shadow), order);
#endif
- set_page_owner(page, 0, 0);
+ gfp_mask = get_page_owner_gfp(page);
+ set_page_owner(page, 0, gfp_mask);
for (i = 1; i < (1 << order); i++) {
set_page_refcounted(page + i);
- set_page_owner(page + i, 0, 0);
+ set_page_owner(page + i, 0, gfp_mask);
}
}
EXPORT_SYMBOL_GPL(split_page);
@@ -1994,7 +1996,7 @@ int __isolate_free_page(struct page *page, unsigned int order)
zone->free_area[order].nr_free--;
rmv_page_order(page);
- set_page_owner(page, order, 0);
+ set_page_owner(page, order, __GFP_MOVABLE);
/* Set the pageblock if the isolated page is at least a pageblock */
if (order >= pageblock_order - 1) {