summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorPavel Tatashin <pasha.tatashin@soleen.com>2021-05-04 18:39:08 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-05-05 11:27:26 -0700
commitd1e153fea2a8940273174fc17733c44323d35cd5 (patch)
treed5a5caa26fdb5f316008627fbae539421a8bf627 /mm
parent9afaf30f7a1aab2022961715a66f644275b8daec (diff)
downloadlinux-d1e153fea2a8940273174fc17733c44323d35cd5.tar.gz
linux-d1e153fea2a8940273174fc17733c44323d35cd5.tar.bz2
linux-d1e153fea2a8940273174fc17733c44323d35cd5.zip
mm/gup: migrate pinned pages out of movable zone
We should not pin pages in ZONE_MOVABLE. Currently, we do not pin only movable CMA pages. Generalize the function that migrates CMA pages to migrate all movable pages. Use is_pinnable_page() to check which pages need to be migrated Link: https://lkml.kernel.org/r/20210215161349.246722-10-pasha.tatashin@soleen.com Signed-off-by: Pavel Tatashin <pasha.tatashin@soleen.com> Reviewed-by: John Hubbard <jhubbard@nvidia.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: David Hildenbrand <david@redhat.com> Cc: David Rientjes <rientjes@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ira Weiny <ira.weiny@intel.com> Cc: James Morris <jmorris@namei.org> Cc: Jason Gunthorpe <jgg@nvidia.com> Cc: Jason Gunthorpe <jgg@ziepe.ca> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Michal Hocko <mhocko@kernel.org> Cc: Michal Hocko <mhocko@suse.com> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Sasha Levin <sashal@kernel.org> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: Tyler Hicks <tyhicks@linux.microsoft.com> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/gup.c67
1 files changed, 34 insertions, 33 deletions
diff --git a/mm/gup.c b/mm/gup.c
index a1eff7ad31da..4bc57420f535 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -87,11 +87,12 @@ __maybe_unused struct page *try_grab_compound_head(struct page *page,
int orig_refs = refs;
/*
- * Can't do FOLL_LONGTERM + FOLL_PIN with CMA in the gup fast
- * path, so fail and let the caller fall back to the slow path.
+ * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a
+ * right zone, so fail and let the caller fall back to the slow
+ * path.
*/
- if (unlikely(flags & FOLL_LONGTERM) &&
- is_migrate_cma_page(page))
+ if (unlikely((flags & FOLL_LONGTERM) &&
+ !is_pinnable_page(page)))
return NULL;
/*
@@ -1600,17 +1601,17 @@ struct page *get_dump_page(unsigned long addr)
}
#endif /* CONFIG_ELF_CORE */
-#ifdef CONFIG_CMA
-static long check_and_migrate_cma_pages(struct mm_struct *mm,
- unsigned long start,
- unsigned long nr_pages,
- struct page **pages,
- struct vm_area_struct **vmas,
- unsigned int gup_flags)
+#ifdef CONFIG_MIGRATION
+static long check_and_migrate_movable_pages(struct mm_struct *mm,
+ unsigned long start,
+ unsigned long nr_pages,
+ struct page **pages,
+ struct vm_area_struct **vmas,
+ unsigned int gup_flags)
{
unsigned long i, isolation_error_count;
bool drain_allow;
- LIST_HEAD(cma_page_list);
+ LIST_HEAD(movable_page_list);
long ret = nr_pages;
struct page *prev_head, *head;
struct migration_target_control mtc = {
@@ -1628,13 +1629,12 @@ check_again:
continue;
prev_head = head;
/*
- * If we get a page from the CMA zone, since we are going to
- * be pinning these entries, we might as well move them out
- * of the CMA zone if possible.
+ * If we get a movable page, since we are going to be pinning
+ * these entries, try to move them out if possible.
*/
- if (is_migrate_cma_page(head)) {
+ if (!is_pinnable_page(head)) {
if (PageHuge(head)) {
- if (!isolate_huge_page(head, &cma_page_list))
+ if (!isolate_huge_page(head, &movable_page_list))
isolation_error_count++;
} else {
if (!PageLRU(head) && drain_allow) {
@@ -1646,7 +1646,7 @@ check_again:
isolation_error_count++;
continue;
}
- list_add_tail(&head->lru, &cma_page_list);
+ list_add_tail(&head->lru, &movable_page_list);
mod_node_page_state(page_pgdat(head),
NR_ISOLATED_ANON +
page_is_file_lru(head),
@@ -1659,10 +1659,10 @@ check_again:
* If list is empty, and no isolation errors, means that all pages are
* in the correct zone.
*/
- if (list_empty(&cma_page_list) && !isolation_error_count)
+ if (list_empty(&movable_page_list) && !isolation_error_count)
return ret;
- if (!list_empty(&cma_page_list)) {
+ if (!list_empty(&movable_page_list)) {
/*
* drop the above get_user_pages reference.
*/
@@ -1672,12 +1672,12 @@ check_again:
for (i = 0; i < nr_pages; i++)
put_page(pages[i]);
- ret = migrate_pages(&cma_page_list, alloc_migration_target,
+ ret = migrate_pages(&movable_page_list, alloc_migration_target,
NULL, (unsigned long)&mtc, MIGRATE_SYNC,
- MR_CONTIG_RANGE);
+ MR_LONGTERM_PIN);
if (ret) {
- if (!list_empty(&cma_page_list))
- putback_movable_pages(&cma_page_list);
+ if (!list_empty(&movable_page_list))
+ putback_movable_pages(&movable_page_list);
return ret > 0 ? -ENOMEM : ret;
}
@@ -1696,16 +1696,16 @@ check_again:
goto check_again;
}
#else
-static long check_and_migrate_cma_pages(struct mm_struct *mm,
- unsigned long start,
- unsigned long nr_pages,
- struct page **pages,
- struct vm_area_struct **vmas,
- unsigned int gup_flags)
+static long check_and_migrate_movable_pages(struct mm_struct *mm,
+ unsigned long start,
+ unsigned long nr_pages,
+ struct page **pages,
+ struct vm_area_struct **vmas,
+ unsigned int gup_flags)
{
return nr_pages;
}
-#endif /* CONFIG_CMA */
+#endif /* CONFIG_MIGRATION */
/*
* __gup_longterm_locked() is a wrapper for __get_user_pages_locked which
@@ -1729,8 +1729,9 @@ static long __gup_longterm_locked(struct mm_struct *mm,
if (gup_flags & FOLL_LONGTERM) {
if (rc > 0)
- rc = check_and_migrate_cma_pages(mm, start, rc, pages,
- vmas, gup_flags);
+ rc = check_and_migrate_movable_pages(mm, start, rc,
+ pages, vmas,
+ gup_flags);
memalloc_pin_restore(flags);
}
return rc;