summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorSidhartha Kumar <sidhartha.kumar@oracle.com>2023-01-13 16:30:55 -0600
committerAndrew Morton <akpm@linux-foundation.org>2023-02-13 15:54:27 -0800
commite37d3e838d9078538f920957d1e89682b6764977 (patch)
tree469e711a0f828039941d301b30bfd502be1e86d6 /mm
parentff7d853b031302376a0d3640fa1c463d94079637 (diff)
downloadlinux-stable-e37d3e838d9078538f920957d1e89682b6764977.tar.gz
linux-stable-e37d3e838d9078538f920957d1e89682b6764977.tar.bz2
linux-stable-e37d3e838d9078538f920957d1e89682b6764977.zip
mm/hugetlb: convert alloc_migrate_huge_page to folios
Change alloc_huge_page_nodemask() to alloc_hugetlb_folio_nodemask() and alloc_migrate_huge_page() to alloc_migrate_hugetlb_folio(). Both functions now return a folio rather than a page. Link: https://lkml.kernel.org/r/20230113223057.173292-7-sidhartha.kumar@oracle.com Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com> Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Muchun Song <songmuchun@bytedance.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c18
-rw-r--r--mm/migrate.c5
2 files changed, 13 insertions, 10 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 5d0d1efbe590..57894beb3382 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2419,7 +2419,7 @@ out_unlock:
return folio;
}
-static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
+static struct folio *alloc_migrate_hugetlb_folio(struct hstate *h, gfp_t gfp_mask,
int nid, nodemask_t *nmask)
{
struct folio *folio;
@@ -2439,7 +2439,7 @@ static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
*/
folio_set_hugetlb_temporary(folio);
- return &folio->page;
+ return folio;
}
/*
@@ -2472,8 +2472,8 @@ struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h,
return folio;
}
-/* page migration callback function */
-struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
+/* folio migration callback function */
+struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
nodemask_t *nmask, gfp_t gfp_mask)
{
spin_lock_irq(&hugetlb_lock);
@@ -2484,12 +2484,12 @@ struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
preferred_nid, nmask);
if (folio) {
spin_unlock_irq(&hugetlb_lock);
- return &folio->page;
+ return folio;
}
}
spin_unlock_irq(&hugetlb_lock);
- return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask);
+ return alloc_migrate_hugetlb_folio(h, gfp_mask, preferred_nid, nmask);
}
/* mempolicy aware migration callback */
@@ -2498,16 +2498,16 @@ struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
{
struct mempolicy *mpol;
nodemask_t *nodemask;
- struct page *page;
+ struct folio *folio;
gfp_t gfp_mask;
int node;
gfp_mask = htlb_alloc_mask(h);
node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
- page = alloc_huge_page_nodemask(h, node, nodemask, gfp_mask);
+ folio = alloc_hugetlb_folio_nodemask(h, node, nodemask, gfp_mask);
mpol_cond_put(mpol);
- return page;
+ return &folio->page;
}
/*
diff --git a/mm/migrate.c b/mm/migrate.c
index f6464bce7678..811e76c6fac1 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1663,6 +1663,7 @@ struct page *alloc_migration_target(struct page *page, unsigned long private)
struct migration_target_control *mtc;
gfp_t gfp_mask;
unsigned int order = 0;
+ struct folio *hugetlb_folio = NULL;
struct folio *new_folio = NULL;
int nid;
int zidx;
@@ -1677,7 +1678,9 @@ struct page *alloc_migration_target(struct page *page, unsigned long private)
struct hstate *h = folio_hstate(folio);
gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
- return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask);
+ hugetlb_folio = alloc_hugetlb_folio_nodemask(h, nid,
+ mtc->nmask, gfp_mask);
+ return &hugetlb_folio->page;
}
if (folio_test_large(folio)) {