summaryrefslogtreecommitdiffstats
path: root/mm/migrate.c
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2022-09-02 20:46:47 +0100
committerAndrew Morton <akpm@linux-foundation.org>2022-10-03 14:02:54 -0700
commitc33db29231ad242b0c381c60b1603f5e1dec7e46 (patch)
tree14930d635d728afb20bb34bff2320f8c991f5777 /mm/migrate.c
parent682a71a1b6b363bff71440f4eca6498f827a839d (diff)
downloadlinux-c33db29231ad242b0c381c60b1603f5e1dec7e46.tar.gz
linux-c33db29231ad242b0c381c60b1603f5e1dec7e46.tar.bz2
linux-c33db29231ad242b0c381c60b1603f5e1dec7e46.zip
migrate: convert unmap_and_move_huge_page() to use folios
Saves several calls to compound_head() and removes a couple of uses of page->lru. Link: https://lkml.kernel.org/r/20220902194653.1739778-52-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/migrate.c')
-rw-r--r--mm/migrate.c28
1 files changed, 14 insertions, 14 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index 1ea149f14f84..c1c2d9d9032b 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1263,7 +1263,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
if (!hugepage_migration_supported(page_hstate(hpage)))
return -ENOSYS;
- if (page_count(hpage) == 1) {
+ if (folio_ref_count(src) == 1) {
/* page was freed from under us. So we are done. */
putback_active_hugepage(hpage);
return MIGRATEPAGE_SUCCESS;
@@ -1274,7 +1274,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
return -ENOMEM;
dst = page_folio(new_hpage);
- if (!trylock_page(hpage)) {
+ if (!folio_trylock(src)) {
if (!force)
goto out;
switch (mode) {
@@ -1284,29 +1284,29 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
default:
goto out;
}
- lock_page(hpage);
+ folio_lock(src);
}
/*
* Check for pages which are in the process of being freed. Without
- * page_mapping() set, hugetlbfs specific move page routine will not
+ * folio_mapping() set, hugetlbfs specific move page routine will not
* be called and we could leak usage counts for subpools.
*/
- if (hugetlb_page_subpool(hpage) && !page_mapping(hpage)) {
+ if (hugetlb_page_subpool(hpage) && !folio_mapping(src)) {
rc = -EBUSY;
goto out_unlock;
}
- if (PageAnon(hpage))
- anon_vma = page_get_anon_vma(hpage);
+ if (folio_test_anon(src))
+ anon_vma = page_get_anon_vma(&src->page);
- if (unlikely(!trylock_page(new_hpage)))
+ if (unlikely(!folio_trylock(dst)))
goto put_anon;
- if (page_mapped(hpage)) {
+ if (folio_mapped(src)) {
enum ttu_flags ttu = 0;
- if (!PageAnon(hpage)) {
+ if (!folio_test_anon(src)) {
/*
* In shared mappings, try_to_unmap could potentially
* call huge_pmd_unshare. Because of this, take
@@ -1327,7 +1327,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
i_mmap_unlock_write(mapping);
}
- if (!page_mapped(hpage))
+ if (!folio_mapped(src))
rc = move_to_new_folio(dst, src, mode);
if (page_was_mapped)
@@ -1335,7 +1335,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
rc == MIGRATEPAGE_SUCCESS ? dst : src, false);
unlock_put_anon:
- unlock_page(new_hpage);
+ folio_unlock(dst);
put_anon:
if (anon_vma)
@@ -1347,12 +1347,12 @@ put_anon:
}
out_unlock:
- unlock_page(hpage);
+ folio_unlock(src);
out:
if (rc == MIGRATEPAGE_SUCCESS)
putback_active_hugepage(hpage);
else if (rc != -EAGAIN)
- list_move_tail(&hpage->lru, ret);
+ list_move_tail(&src->lru, ret);
/*
* If migration was not successful and there's a freeing callback, use