diff options
author | Matthew Wilcox (Oracle) <willy@infradead.org> | 2021-04-29 23:01:15 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-04-30 11:20:42 -0700 |
commit | 84172f4bb752424415756351a40f8da5714e1554 (patch) | |
tree | daf3da208d6fa979bf4e32ca8dfe61e6b87e3fe0 /mm/mempolicy.c | |
parent | 6e5e0f286eb0ecf12afaa3e73c321bc5bf599abb (diff) | |
download | linux-stable-84172f4bb752424415756351a40f8da5714e1554.tar.gz linux-stable-84172f4bb752424415756351a40f8da5714e1554.tar.bz2 linux-stable-84172f4bb752424415756351a40f8da5714e1554.zip |
mm/page_alloc: combine __alloc_pages and __alloc_pages_nodemask
There are only two callers of __alloc_pages() so prune the thicket of
alloc_page variants by combining the two functions together. Current
callers of __alloc_pages() simply add an extra 'NULL' parameter and
current callers of __alloc_pages_nodemask() call __alloc_pages() instead.
Link: https://lkml.kernel.org/r/20210225150642.2582252-4-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@linux.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mempolicy.c')
-rw-r--r-- | mm/mempolicy.c | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index ab51132547b8..5f0d20298736 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2140,7 +2140,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, { struct page *page; - page = __alloc_pages(gfp, order, nid); + page = __alloc_pages(gfp, order, nid, NULL); /* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */ if (!static_branch_likely(&vm_numa_stat_key)) return page; @@ -2237,7 +2237,7 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, nmask = policy_nodemask(gfp, pol); preferred_nid = policy_node(gfp, pol, node); - page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask); + page = __alloc_pages(gfp, order, preferred_nid, nmask); mpol_cond_put(pol); out: return page; @@ -2274,7 +2274,7 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order) if (pol->mode == MPOL_INTERLEAVE) page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); else - page = __alloc_pages_nodemask(gfp, order, + page = __alloc_pages(gfp, order, policy_node(gfp, pol, numa_node_id()), policy_nodemask(gfp, pol)); |