summaryrefslogtreecommitdiffstats
path: root/arch/arm64/mm/fault.c
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2022-11-14 10:44:11 +0000
committerWill Deacon <will@kernel.org>2022-11-15 13:29:05 +0000
commit0bb1fbffc631064db567ccaeb9ed6b6df6342b66 (patch)
tree7a383af1a4f9e9c1ab1c92dab85e81a188ab11ee /arch/arm64/mm/fault.c
parenta4ee28615c7a1e2925e1fcb4ba0fa1aeee633d78 (diff)
downloadlinux-0bb1fbffc631064db567ccaeb9ed6b6df6342b66.tar.gz
linux-0bb1fbffc631064db567ccaeb9ed6b6df6342b66.tar.bz2
linux-0bb1fbffc631064db567ccaeb9ed6b6df6342b66.zip
arm64: mm: kfence: only handle translation faults
Alexander noted that KFENCE only expects to handle faults from invalid page table entries (i.e. translation faults), but arm64's fault handling logic will call kfence_handle_page_fault() for other types of faults, including alignment faults caused by unaligned atomics. This has the unfortunate property of causing those other faults to be reported as "KFENCE: use-after-free", which is misleading and hinders debugging. Fix this by only forwarding unhandled translation faults to the KFENCE code, similar to what x86 does already. Alexander has verified that this passes all the tests in the KFENCE test suite and avoids bogus reports on misaligned atomics. Link: https://lore.kernel.org/all/20221102081620.1465154-1-zhongbaisong@huawei.com/ Fixes: 840b23986344 ("arm64, kfence: enable KFENCE for ARM64") Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Alexander Potapenko <glider@google.com> Tested-by: Alexander Potapenko <glider@google.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Marco Elver <elver@google.com> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20221114104411.2853040-1-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'arch/arm64/mm/fault.c')
-rw-r--r--arch/arm64/mm/fault.c8
1 files changed, 7 insertions, 1 deletions
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 3e9cf9826417..3eb2825d08cf 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -354,6 +354,11 @@ static bool is_el1_mte_sync_tag_check_fault(unsigned long esr)
return false;
}
+static bool is_translation_fault(unsigned long esr)
+{
+ return (esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_FAULT;
+}
+
static void __do_kernel_fault(unsigned long addr, unsigned long esr,
struct pt_regs *regs)
{
@@ -386,7 +391,8 @@ static void __do_kernel_fault(unsigned long addr, unsigned long esr,
} else if (addr < PAGE_SIZE) {
msg = "NULL pointer dereference";
} else {
- if (kfence_handle_page_fault(addr, esr & ESR_ELx_WNR, regs))
+ if (is_translation_fault(esr) &&
+ kfence_handle_page_fault(addr, esr & ESR_ELx_WNR, regs))
return;
msg = "paging request";