diff options
author | Baolin Wang <baolin.wang@linux.alibaba.com> | 2022-11-09 16:40:27 +0800 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2022-11-30 15:58:53 -0800 |
commit | 16fd6b31dd9b24acf83d439a73a41c4138199424 (patch) | |
tree | c12478b16715cca5ffb51f70bf82a2d955a16719 | |
parent | c66b6ead74ffdad8659eb829468343a88afc2f2c (diff) | |
download | linux-stable-16fd6b31dd9b24acf83d439a73a41c4138199424.tar.gz linux-stable-16fd6b31dd9b24acf83d439a73a41c4138199424.tar.bz2 linux-stable-16fd6b31dd9b24acf83d439a73a41c4138199424.zip |
Revert "mm: migration: fix the FOLL_GET failure on following huge page"
Revert commit 831568214883 ("mm: migration: fix the FOLL_GET failure on
following huge page"), since after commit 1a6baaa0db73 ("s390/hugetlb:
switch to generic version of follow_huge_pud()") and commit 57a196a58421
("hugetlb: simplify hugetlb handling in follow_page_mask") were merged,
now all the following huge page routines can support FOLL_GET operation.
Link: https://lkml.kernel.org/r/496786039852aba90ffa68f10d0df3f4236a990b.1667983080.git.baolin.wang@linux.alibaba.com
Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Acked-by: Haiyue Wang <haiyue.wang@intel.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r-- | mm/migrate.c | 10 |
1 files changed, 2 insertions, 8 deletions
diff --git a/mm/migrate.c b/mm/migrate.c index 4aea647a0180..4eccf4e1da2c 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1899,7 +1899,6 @@ static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages, for (i = 0; i < nr_pages; i++) { unsigned long addr = (unsigned long)(*pages); - unsigned int foll_flags = FOLL_DUMP; struct vm_area_struct *vma; struct page *page; int err = -EFAULT; @@ -1908,12 +1907,8 @@ static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages, if (!vma) goto set_status; - /* Not all huge page follow APIs support 'FOLL_GET' */ - if (!is_vm_hugetlb_page(vma)) - foll_flags |= FOLL_GET; - /* FOLL_DUMP to ignore special (like zero) pages */ - page = follow_page(vma, addr, foll_flags); + page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP); err = PTR_ERR(page); if (IS_ERR(page)) @@ -1926,8 +1921,7 @@ static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages, if (!is_zone_device_page(page)) err = page_to_nid(page); - if (foll_flags & FOLL_GET) - put_page(page); + put_page(page); set_status: *status = err; |