summaryrefslogtreecommitdiffstats
path: root/mm/page_isolation.c
diff options
context:
space:
mode:
authorZi Yan <ziy@nvidia.com>2022-05-12 20:22:58 -0700
committerAndrew Morton <akpm@linux-foundation.org>2022-05-13 07:20:13 -0700
commit6e263fff1de48fcd97b680b54cd8d1695fc3c776 (patch)
tree393bfa41a800b63161c526772acebd12835d688b /mm/page_isolation.c
parentb2c9e2fbba32539626522b6aed30d1dde7b7e971 (diff)
downloadlinux-6e263fff1de48fcd97b680b54cd8d1695fc3c776.tar.gz
linux-6e263fff1de48fcd97b680b54cd8d1695fc3c776.tar.bz2
linux-6e263fff1de48fcd97b680b54cd8d1695fc3c776.zip
mm: page_isolation: enable arbitrary range page isolation.
Now start_isolate_page_range() is ready to handle arbitrary range isolation, so move the alignment check/adjustment into the function body. Do the same for its counterpart undo_isolate_page_range(). alloc_contig_range(), its caller, can pass an arbitrary range instead of a MAX_ORDER_NR_PAGES aligned one. Link: https://lkml.kernel.org/r/20220425143118.2850746-5-zi.yan@sent.com Signed-off-by: Zi Yan <ziy@nvidia.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: David Hildenbrand <david@redhat.com> Cc: Eric Ren <renzhengeek@gmail.com> Cc: kernel test robot <lkp@intel.com> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Mike Rapoport <rppt@linux.ibm.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Oscar Salvador <osalvador@suse.de> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/page_isolation.c')
-rw-r--r--mm/page_isolation.c33
1 files changed, 16 insertions, 17 deletions
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 8a0f16d2e4c3..b3f074d1682e 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -444,7 +444,6 @@ failed:
* be MIGRATE_ISOLATE.
* @start_pfn: The lower PFN of the range to be isolated.
* @end_pfn: The upper PFN of the range to be isolated.
- * start_pfn/end_pfn must be aligned to pageblock_order.
* @migratetype: Migrate type to set in error recovery.
* @flags: The following flags are allowed (they can be combined in
* a bit mask)
@@ -491,33 +490,33 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
{
unsigned long pfn;
struct page *page;
+ /* isolation is done at page block granularity */
+ unsigned long isolate_start = ALIGN_DOWN(start_pfn, pageblock_nr_pages);
+ unsigned long isolate_end = ALIGN(end_pfn, pageblock_nr_pages);
int ret;
- BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
- BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
-
- /* isolate [start_pfn, start_pfn + pageblock_nr_pages) pageblock */
- ret = isolate_single_pageblock(start_pfn, gfp_flags, false);
+ /* isolate [isolate_start, isolate_start + pageblock_nr_pages) pageblock */
+ ret = isolate_single_pageblock(isolate_start, gfp_flags, false);
if (ret)
return ret;
- /* isolate [end_pfn - pageblock_nr_pages, end_pfn) pageblock */
- ret = isolate_single_pageblock(end_pfn, gfp_flags, true);
+ /* isolate [isolate_end - pageblock_nr_pages, isolate_end) pageblock */
+ ret = isolate_single_pageblock(isolate_end, gfp_flags, true);
if (ret) {
- unset_migratetype_isolate(pfn_to_page(start_pfn), migratetype);
+ unset_migratetype_isolate(pfn_to_page(isolate_start), migratetype);
return ret;
}
/* skip isolated pageblocks at the beginning and end */
- for (pfn = start_pfn + pageblock_nr_pages;
- pfn < end_pfn - pageblock_nr_pages;
+ for (pfn = isolate_start + pageblock_nr_pages;
+ pfn < isolate_end - pageblock_nr_pages;
pfn += pageblock_nr_pages) {
page = __first_valid_page(pfn, pageblock_nr_pages);
if (page && set_migratetype_isolate(page, migratetype, flags,
start_pfn, end_pfn)) {
- undo_isolate_page_range(start_pfn, pfn, migratetype);
+ undo_isolate_page_range(isolate_start, pfn, migratetype);
unset_migratetype_isolate(
- pfn_to_page(end_pfn - pageblock_nr_pages),
+ pfn_to_page(isolate_end - pageblock_nr_pages),
migratetype);
return -EBUSY;
}
@@ -533,12 +532,12 @@ void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
{
unsigned long pfn;
struct page *page;
+ unsigned long isolate_start = ALIGN_DOWN(start_pfn, pageblock_nr_pages);
+ unsigned long isolate_end = ALIGN(end_pfn, pageblock_nr_pages);
- BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
- BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
- for (pfn = start_pfn;
- pfn < end_pfn;
+ for (pfn = isolate_start;
+ pfn < isolate_end;
pfn += pageblock_nr_pages) {
page = __first_valid_page(pfn, pageblock_nr_pages);
if (!page || !is_migrate_isolate_page(page))