diff options
author | Mike Rapoport <rppt@linux.ibm.com> | 2021-05-17 21:31:59 +0300 |
---|---|---|
committer | Mike Rapoport <rppt@linux.ibm.com> | 2021-06-30 11:38:51 +0300 |
commit | f921f53e089a12a192808ac4319f28727b35dc0f (patch) | |
tree | d8f12566c50a3310d3ed71a0f372bcfb76ceff5f /mm/memblock.c | |
parent | e2a86800d58639b3acde7eaeb9eb393dca066e08 (diff) | |
download | linux-f921f53e089a12a192808ac4319f28727b35dc0f.tar.gz linux-f921f53e089a12a192808ac4319f28727b35dc0f.tar.bz2 linux-f921f53e089a12a192808ac4319f28727b35dc0f.zip |
memblock: align freed memory map on pageblock boundaries with SPARSEMEM
When CONFIG_SPARSEMEM=y the ranges of the memory map that are freed are not
aligned to the pageblock boundaries which breaks assumptions about
homogeneity of the memory map throughout core mm code.
Make sure that the freed memory map is always aligned on pageblock
boundaries regardless of the memory model selection.
Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
Tested-by: Tony Lindgren <tony@atomide.com>
Diffstat (limited to 'mm/memblock.c')
-rw-r--r-- | mm/memblock.c | 7 |
1 files changed, 4 insertions, 3 deletions
diff --git a/mm/memblock.c b/mm/memblock.c index 97fa87541b5f..2e25d69739e0 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -1941,14 +1941,13 @@ static void __init free_unused_memmap(void) * due to SPARSEMEM sections which aren't present. */ start = min(start, ALIGN(prev_end, PAGES_PER_SECTION)); -#else +#endif /* * Align down here since many operations in VM subsystem * presume that there are no holes in the memory map inside * a pageblock */ start = round_down(start, pageblock_nr_pages); -#endif /* * If we had a previous bank, and there is a space @@ -1966,8 +1965,10 @@ static void __init free_unused_memmap(void) } #ifdef CONFIG_SPARSEMEM - if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) + if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) { + prev_end = ALIGN(end, pageblock_nr_pages); free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION)); + } #endif } |