summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorQian Cai <cai@lca.pw>2020-01-30 22:15:01 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2020-01-31 10:30:39 -0800
commit3d680bdf60a5bade3e8cbd049927e7f8b1d3fe97 (patch)
treeb755d6cf6f1fd2c19dd7eb28f5ab4f2beab5607b /mm
parent4a55c0474a92d5c418bcbbe122368de0910aeac2 (diff)
downloadlinux-3d680bdf60a5bade3e8cbd049927e7f8b1d3fe97.tar.gz
linux-3d680bdf60a5bade3e8cbd049927e7f8b1d3fe97.tar.bz2
linux-3d680bdf60a5bade3e8cbd049927e7f8b1d3fe97.zip
mm/page_isolation: fix potential warning from user
It makes sense to call the WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE) from start_isolate_page_range(), but should avoid triggering it from userspace, i.e, from is_mem_section_removable() because it could crash the system by a non-root user if warn_on_panic is set. While at it, simplify the code a bit by removing an unnecessary jump label. Link: http://lkml.kernel.org/r/20200120163915.1469-1-cai@lca.pw Signed-off-by: Qian Cai <cai@lca.pw> Suggested-by: Michal Hocko <mhocko@kernel.org> Acked-by: Michal Hocko <mhocko@suse.com> Reviewed-by: David Hildenbrand <david@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c11
-rw-r--r--mm/page_isolation.c18
2 files changed, 15 insertions, 14 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2a1a816c7992..15e908ad933b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -8214,7 +8214,7 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page,
if (is_migrate_cma(migratetype))
return NULL;
- goto unmovable;
+ return page;
}
for (; iter < pageblock_nr_pages; iter++) {
@@ -8224,7 +8224,7 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page,
page = pfn_to_page(pfn + iter);
if (PageReserved(page))
- goto unmovable;
+ return page;
/*
* If the zone is movable and we have ruled out all reserved
@@ -8244,7 +8244,7 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page,
unsigned int skip_pages;
if (!hugepage_migration_supported(page_hstate(head)))
- goto unmovable;
+ return page;
skip_pages = compound_nr(head) - (page - head);
iter += skip_pages - 1;
@@ -8286,12 +8286,9 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page,
* is set to both of a memory hole page and a _used_ kernel
* page at boot.
*/
- goto unmovable;
+ return page;
}
return NULL;
-unmovable:
- WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE);
- return pfn_to_page(pfn + iter);
}
#ifdef CONFIG_CONTIG_ALLOC
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index e70586523ca3..a9fd7c740c23 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -54,14 +54,18 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_
out:
spin_unlock_irqrestore(&zone->lock, flags);
- if (!ret)
+ if (!ret) {
drain_all_pages(zone);
- else if ((isol_flags & REPORT_FAILURE) && unmovable)
- /*
- * printk() with zone->lock held will guarantee to trigger a
- * lockdep splat, so defer it here.
- */
- dump_page(unmovable, "unmovable page");
+ } else {
+ WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE);
+
+ if ((isol_flags & REPORT_FAILURE) && unmovable)
+ /*
+ * printk() with zone->lock held will likely trigger a
+ * lockdep splat, so defer it here.
+ */
+ dump_page(unmovable, "unmovable page");
+ }
return ret;
}