summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexandre Ghiti <alexandre.ghiti@canonical.com>2021-10-29 06:59:26 +0200
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2021-11-02 19:51:02 +0100
commit4606bbb6b19ccaa74159e1f3628e8a2414557167 (patch)
tree8364d8d3d5b4d910ed2d2b61c4fdbbdfff837a24
parent7567abe63797aba9b2b7e16c543ec6af987f9baa (diff)
downloadlinux-stable-4606bbb6b19ccaa74159e1f3628e8a2414557167.tar.gz
linux-stable-4606bbb6b19ccaa74159e1f3628e8a2414557167.tar.bz2
linux-stable-4606bbb6b19ccaa74159e1f3628e8a2414557167.zip
riscv: Do not re-populate shadow memory with kasan_populate_early_shadow
commit cf11d01135ea1ff7fddb612033e3cb5cde279ff2 upstream. When calling this function, all the shadow memory is already populated with kasan_early_shadow_pte which has PAGE_KERNEL protection. kasan_populate_early_shadow write-protects the mapping of the range of addresses passed in argument in zero_pte_populate, which actually write-protects all the shadow memory mapping since kasan_early_shadow_pte is used for all the shadow memory at this point. And then when using memblock API to populate the shadow memory, the first write access to the kernel stack triggers a trap. This becomes visible with the next commit that contains a fix for asan-stack. We already manually populate all the shadow memory in kasan_early_init and we write-protect kasan_early_shadow_pte at the end of kasan_init which makes the calls to kasan_populate_early_shadow superfluous so we can remove them. Signed-off-by: Alexandre Ghiti <alexandre.ghiti@canonical.com> Fixes: e178d670f251 ("riscv/kasan: add KASAN_VMALLOC support") Fixes: 8ad8b72721d0 ("riscv: Add KASAN support") Cc: stable@vger.kernel.org Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--arch/riscv/mm/kasan_init.c11
1 files changed, 0 insertions, 11 deletions
diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c
index d7189c8714a9..89a8376ce44e 100644
--- a/arch/riscv/mm/kasan_init.c
+++ b/arch/riscv/mm/kasan_init.c
@@ -172,21 +172,10 @@ void __init kasan_init(void)
phys_addr_t p_start, p_end;
u64 i;
- /*
- * Populate all kernel virtual address space with kasan_early_shadow_page
- * except for the linear mapping and the modules/kernel/BPF mapping.
- */
- kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
- (void *)kasan_mem_to_shadow((void *)
- VMEMMAP_END));
if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
kasan_shallow_populate(
(void *)kasan_mem_to_shadow((void *)VMALLOC_START),
(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
- else
- kasan_populate_early_shadow(
- (void *)kasan_mem_to_shadow((void *)VMALLOC_START),
- (void *)kasan_mem_to_shadow((void *)VMALLOC_END));
/* Populate the linear mapping */
for_each_mem_range(i, &p_start, &p_end) {