summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMike Rapoport <rppt@linux.ibm.com>2021-05-17 21:15:15 +0300
committerMike Rapoport <rppt@linux.ibm.com>2021-06-30 11:38:33 +0300
commite2a86800d58639b3acde7eaeb9eb393dca066e08 (patch)
tree964e0c209239ff75e7aa615af95bade6f1eef368 /mm
parentc4681547bcce777daf576925a966ffa824edd09d (diff)
downloadlinux-e2a86800d58639b3acde7eaeb9eb393dca066e08.tar.gz
linux-e2a86800d58639b3acde7eaeb9eb393dca066e08.tar.bz2
linux-e2a86800d58639b3acde7eaeb9eb393dca066e08.zip
memblock: free_unused_memmap: use pageblock units instead of MAX_ORDER
The code that frees unused memory map uses rounds start and end of the holes that are freed to MAX_ORDER_NR_PAGES to preserve continuity of the memory map for MAX_ORDER regions. Lots of core memory management functionality relies on homogeneity of the memory map within each pageblock which size may differ from MAX_ORDER in certain configurations. Although currently, for the architectures that use free_unused_memmap(), pageblock_order and MAX_ORDER are equivalent, it is cleaner to have common notation thought mm code. Replace MAX_ORDER_NR_PAGES with pageblock_nr_pages and update the comments to make it more clear why the alignment to pageblock boundaries is required. Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Tested-by: Tony Lindgren <tony@atomide.com>
Diffstat (limited to 'mm')
-rw-r--r--mm/memblock.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/mm/memblock.c b/mm/memblock.c
index afaefa8fc6ab..97fa87541b5f 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -1943,11 +1943,11 @@ static void __init free_unused_memmap(void)
start = min(start, ALIGN(prev_end, PAGES_PER_SECTION));
#else
/*
- * Align down here since the VM subsystem insists that the
- * memmap entries are valid from the bank start aligned to
- * MAX_ORDER_NR_PAGES.
+ * Align down here since many operations in VM subsystem
+ * presume that there are no holes in the memory map inside
+ * a pageblock
*/
- start = round_down(start, MAX_ORDER_NR_PAGES);
+ start = round_down(start, pageblock_nr_pages);
#endif
/*
@@ -1958,11 +1958,11 @@ static void __init free_unused_memmap(void)
free_memmap(prev_end, start);
/*
- * Align up here since the VM subsystem insists that the
- * memmap entries are valid from the bank end aligned to
- * MAX_ORDER_NR_PAGES.
+ * Align up here since many operations in VM subsystem
+ * presume that there are no holes in the memory map inside
+ * a pageblock
*/
- prev_end = ALIGN(end, MAX_ORDER_NR_PAGES);
+ prev_end = ALIGN(end, pageblock_nr_pages);
}
#ifdef CONFIG_SPARSEMEM