diff options
-rw-r--r-- | mm/compaction.c | 8 | ||||
-rw-r--r-- | mm/internal.h | 2 | ||||
-rw-r--r-- | mm/page_alloc.c | 23 | ||||
-rw-r--r-- | mm/page_isolation.c | 4 |
4 files changed, 18 insertions, 19 deletions
diff --git a/mm/compaction.c b/mm/compaction.c index 4ae1294068a8..64df5fe052db 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -74,14 +74,8 @@ static void map_pages(struct list_head *list) order = page_private(page); nr_pages = 1 << order; - set_page_private(page, 0); - set_page_refcounted(page); - arch_alloc_page(page, order); - kernel_map_pages(page, nr_pages, 1); - kasan_alloc_pages(page, order); - - set_page_owner(page, order, __GFP_MOVABLE); + post_alloc_hook(page, order, __GFP_MOVABLE); if (order) split_page(page, order); diff --git a/mm/internal.h b/mm/internal.h index 2524ec880e24..fbfba0cc2c35 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -150,6 +150,8 @@ extern int __isolate_free_page(struct page *page, unsigned int order); extern void __free_pages_bootmem(struct page *page, unsigned long pfn, unsigned int order); extern void prep_compound_page(struct page *page, unsigned int order); +extern void post_alloc_hook(struct page *page, unsigned int order, + gfp_t gfp_flags); extern int user_min_free_kbytes; #if defined CONFIG_COMPACTION || defined CONFIG_CMA diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a82b303c19b1..13cf4c665321 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1724,6 +1724,19 @@ static bool check_new_pages(struct page *page, unsigned int order) return false; } +inline void post_alloc_hook(struct page *page, unsigned int order, + gfp_t gfp_flags) +{ + set_page_private(page, 0); + set_page_refcounted(page); + + arch_alloc_page(page, order); + kernel_map_pages(page, 1 << order, 1); + kernel_poison_pages(page, 1 << order, 1); + kasan_alloc_pages(page, order); + set_page_owner(page, order, gfp_flags); +} + static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, unsigned int alloc_flags) { @@ -1736,13 +1749,7 @@ static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags poisoned &= page_is_poisoned(p); } - set_page_private(page, 0); - set_page_refcounted(page); - - arch_alloc_page(page, order); - kernel_map_pages(page, 1 << order, 1); - kernel_poison_pages(page, 1 << order, 1); - kasan_alloc_pages(page, order); + post_alloc_hook(page, order, gfp_flags); if (!free_pages_prezeroed(poisoned) && (gfp_flags & __GFP_ZERO)) for (i = 0; i < (1 << order); i++) @@ -1751,8 +1758,6 @@ static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags if (order && (gfp_flags & __GFP_COMP)) prep_compound_page(page, order); - set_page_owner(page, order, gfp_flags); - /* * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to * allocate the page. The expectation is that the caller is taking diff --git a/mm/page_isolation.c b/mm/page_isolation.c index 927f5ee24c87..4639163b78f9 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -128,9 +128,7 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype) out: spin_unlock_irqrestore(&zone->lock, flags); if (isolated_page) { - kernel_map_pages(page, (1 << order), 1); - set_page_refcounted(page); - set_page_owner(page, order, __GFP_MOVABLE); + post_alloc_hook(page, order, __GFP_MOVABLE); __free_pages(isolated_page, order); } } |