summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKefeng Wang <wangkefeng.wang@huawei.com>2023-09-21 15:44:15 +0800
committerAndrew Morton <akpm@linux-foundation.org>2023-10-16 15:44:37 -0700
commitcda6d93672ac5dd8af778a3f3e6082e12233b65b (patch)
tree67b176761ce339e2c5eef0ee943543eacb62c8d2
parent6695cf68b15c215d33b8add64c33e01e3cbe236c (diff)
downloadlinux-cda6d93672ac5dd8af778a3f3e6082e12233b65b.tar.gz
linux-cda6d93672ac5dd8af778a3f3e6082e12233b65b.tar.bz2
linux-cda6d93672ac5dd8af778a3f3e6082e12233b65b.zip
mm: memory: make numa_migrate_prep() to take a folio
In preparation for large folio numa balancing, make numa_migrate_prep() to take a folio, no functional change intended. Link: https://lkml.kernel.org/r/20230921074417.24004-5-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> Cc: David Hildenbrand <david@redhat.com> Cc: "Huang, Ying" <ying.huang@intel.com> Cc: Hugh Dickins <hughd@google.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Zi Yan <ziy@nvidia.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--mm/huge_memory.c2
-rw-r--r--mm/internal.h2
-rw-r--r--mm/memory.c9
3 files changed, 6 insertions, 7 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 5baf9b6dc522..aa0224556132 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1556,7 +1556,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
*/
if (node_is_toptier(nid))
last_cpupid = page_cpupid_last(&folio->page);
- target_nid = numa_migrate_prep(&folio->page, vma, haddr, nid, &flags);
+ target_nid = numa_migrate_prep(folio, vma, haddr, nid, &flags);
if (target_nid == NUMA_NO_NODE) {
folio_put(folio);
goto out_map;
diff --git a/mm/internal.h b/mm/internal.h
index 9e62c8b952b8..5a5c923725d3 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -983,7 +983,7 @@ void vunmap_range_noflush(unsigned long start, unsigned long end);
void __vunmap_range_noflush(unsigned long start, unsigned long end);
-int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
+int numa_migrate_prep(struct folio *folio, struct vm_area_struct *vma,
unsigned long addr, int page_nid, int *flags);
void free_zone_device_page(struct page *page);
diff --git a/mm/memory.c b/mm/memory.c
index 865741d9b6b9..20b290c9dc87 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4724,10 +4724,10 @@ static vm_fault_t do_fault(struct vm_fault *vmf)
return ret;
}
-int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
+int numa_migrate_prep(struct folio *folio, struct vm_area_struct *vma,
unsigned long addr, int page_nid, int *flags)
{
- get_page(page);
+ folio_get(folio);
/* Record the current PID acceesing VMA */
vma_set_access_pid_bit(vma);
@@ -4738,7 +4738,7 @@ int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
*flags |= TNF_FAULT_LOCAL;
}
- return mpol_misplaced(page, vma, addr);
+ return mpol_misplaced(&folio->page, vma, addr);
}
static vm_fault_t do_numa_page(struct vm_fault *vmf)
@@ -4812,8 +4812,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
last_cpupid = (-1 & LAST_CPUPID_MASK);
else
last_cpupid = page_cpupid_last(&folio->page);
- target_nid = numa_migrate_prep(&folio->page, vma, vmf->address, nid,
- &flags);
+ target_nid = numa_migrate_prep(folio, vma, vmf->address, nid, &flags);
if (target_nid == NUMA_NO_NODE) {
folio_put(folio);
goto out_map;