summaryrefslogtreecommitdiffstats
path: root/arch/arm64/mm
diff options
context:
space:
mode:
authorSteve Capper <steve.capper@arm.com>2019-08-07 16:55:19 +0100
committerWill Deacon <will@kernel.org>2019-08-09 11:17:24 +0100
commitc812026c54cfaec23fa1d78cdbfd0e56e787470a (patch)
tree21c649aa4bd867a2d871e7c2ad9192824999874f /arch/arm64/mm
parent5383cc6efed13784ddb3cff2cc183b6b8c50c8db (diff)
downloadlinux-c812026c54cfaec23fa1d78cdbfd0e56e787470a.tar.gz
linux-c812026c54cfaec23fa1d78cdbfd0e56e787470a.tar.bz2
linux-c812026c54cfaec23fa1d78cdbfd0e56e787470a.zip
arm64: mm: Logic to make offset_ttbr1 conditional
When running with a 52-bit userspace VA and a 48-bit kernel VA we offset ttbr1_el1 to allow the kernel pagetables with a 52-bit PTRS_PER_PGD to be used for both userspace and kernel. Moving on to a 52-bit kernel VA we no longer require this offset to ttbr1_el1 should we be running on a system with HW support for 52-bit VAs. This patch introduces conditional logic to offset_ttbr1 to query SYS_ID_AA64MMFR2_EL1 whenever 52-bit VAs are selected. If there is HW support for 52-bit VAs then the ttbr1 offset is skipped. We choose to read a system register rather than vabits_actual because offset_ttbr1 can be called in places where the kernel data is not actually mapped. Calls to offset_ttbr1 appear to be made from rarely called code paths so this extra logic is not expected to adversely affect performance. Signed-off-by: Steve Capper <steve.capper@arm.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r--arch/arm64/mm/proc.S6
1 files changed, 3 insertions, 3 deletions
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 7dbf2be470f6..8d289ff7584d 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -168,7 +168,7 @@ ENDPROC(cpu_do_switch_mm)
.macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2
adrp \tmp1, empty_zero_page
phys_to_ttbr \tmp2, \tmp1
- offset_ttbr1 \tmp2
+ offset_ttbr1 \tmp2, \tmp1
msr ttbr1_el1, \tmp2
isb
tlbi vmalle1
@@ -187,7 +187,7 @@ ENTRY(idmap_cpu_replace_ttbr1)
__idmap_cpu_set_reserved_ttbr1 x1, x3
- offset_ttbr1 x0
+ offset_ttbr1 x0, x3
msr ttbr1_el1, x0
isb
@@ -362,7 +362,7 @@ __idmap_kpti_secondary:
cbnz w18, 1b
/* All done, act like nothing happened */
- offset_ttbr1 swapper_ttb
+ offset_ttbr1 swapper_ttb, x18
msr ttbr1_el1, swapper_ttb
isb
ret