summaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
authorArd Biesheuvel <ardb@kernel.org>2022-08-26 18:48:00 +0200
committerWill Deacon <will@kernel.org>2022-09-01 12:02:39 +0100
commit5fbc49cef91916140a305f22f7430e9a7ea0c6b4 (patch)
tree6330fdc85832cfe957ae1ccce93822e589b3846f /arch/arm64
parent6bb0d64c100091e131cd16710b62fda3319cd0af (diff)
downloadlinux-stable-5fbc49cef91916140a305f22f7430e9a7ea0c6b4.tar.gz
linux-stable-5fbc49cef91916140a305f22f7430e9a7ea0c6b4.tar.bz2
linux-stable-5fbc49cef91916140a305f22f7430e9a7ea0c6b4.zip
arm64: mm: Reserve enough pages for the initial ID map
The logic that conditionally allocates one additional page at each swapper page table level if KASLR is enabled is also applied to the initial ID map, now that we have started using the same set of macros to allocate the space for it. However, the placement of the kernel in physical memory might result in additional pages being needed at any level, even if KASLR is disabled in the build. So account for this in the computation. Fixes: c3cee924bd85 ("arm64: head: cover entire kernel image in initial ID map") Signed-off-by: Ard Biesheuvel <ardb@kernel.org> Link: https://lore.kernel.org/r/20220826164800.2059148-1-ardb@kernel.org Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/include/asm/kernel-pgtable.h26
1 files changed, 13 insertions, 13 deletions
diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h
index 02e59fa8f293..32d14f481f0c 100644
--- a/arch/arm64/include/asm/kernel-pgtable.h
+++ b/arch/arm64/include/asm/kernel-pgtable.h
@@ -64,28 +64,28 @@
#define EARLY_KASLR (0)
#endif
-#define EARLY_ENTRIES(vstart, vend, shift) \
- ((((vend) - 1) >> (shift)) - ((vstart) >> (shift)) + 1 + EARLY_KASLR)
+#define EARLY_ENTRIES(vstart, vend, shift, add) \
+ ((((vend) - 1) >> (shift)) - ((vstart) >> (shift)) + 1 + add)
-#define EARLY_PGDS(vstart, vend) (EARLY_ENTRIES(vstart, vend, PGDIR_SHIFT))
+#define EARLY_PGDS(vstart, vend, add) (EARLY_ENTRIES(vstart, vend, PGDIR_SHIFT, add))
#if SWAPPER_PGTABLE_LEVELS > 3
-#define EARLY_PUDS(vstart, vend) (EARLY_ENTRIES(vstart, vend, PUD_SHIFT))
+#define EARLY_PUDS(vstart, vend, add) (EARLY_ENTRIES(vstart, vend, PUD_SHIFT, add))
#else
-#define EARLY_PUDS(vstart, vend) (0)
+#define EARLY_PUDS(vstart, vend, add) (0)
#endif
#if SWAPPER_PGTABLE_LEVELS > 2
-#define EARLY_PMDS(vstart, vend) (EARLY_ENTRIES(vstart, vend, SWAPPER_TABLE_SHIFT))
+#define EARLY_PMDS(vstart, vend, add) (EARLY_ENTRIES(vstart, vend, SWAPPER_TABLE_SHIFT, add))
#else
-#define EARLY_PMDS(vstart, vend) (0)
+#define EARLY_PMDS(vstart, vend, add) (0)
#endif
-#define EARLY_PAGES(vstart, vend) ( 1 /* PGDIR page */ \
- + EARLY_PGDS((vstart), (vend)) /* each PGDIR needs a next level page table */ \
- + EARLY_PUDS((vstart), (vend)) /* each PUD needs a next level page table */ \
- + EARLY_PMDS((vstart), (vend))) /* each PMD needs a next level page table */
-#define INIT_DIR_SIZE (PAGE_SIZE * EARLY_PAGES(KIMAGE_VADDR, _end))
+#define EARLY_PAGES(vstart, vend, add) ( 1 /* PGDIR page */ \
+ + EARLY_PGDS((vstart), (vend), add) /* each PGDIR needs a next level page table */ \
+ + EARLY_PUDS((vstart), (vend), add) /* each PUD needs a next level page table */ \
+ + EARLY_PMDS((vstart), (vend), add)) /* each PMD needs a next level page table */
+#define INIT_DIR_SIZE (PAGE_SIZE * EARLY_PAGES(KIMAGE_VADDR, _end, EARLY_KASLR))
/* the initial ID map may need two extra pages if it needs to be extended */
#if VA_BITS < 48
@@ -93,7 +93,7 @@
#else
#define INIT_IDMAP_DIR_SIZE (INIT_IDMAP_DIR_PAGES * PAGE_SIZE)
#endif
-#define INIT_IDMAP_DIR_PAGES EARLY_PAGES(KIMAGE_VADDR, _end + MAX_FDT_SIZE + SWAPPER_BLOCK_SIZE)
+#define INIT_IDMAP_DIR_PAGES EARLY_PAGES(KIMAGE_VADDR, _end + MAX_FDT_SIZE + SWAPPER_BLOCK_SIZE, 1)
/* Initial memory map size */
#if ARM64_KERNEL_USES_PMD_MAPS