summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2015-08-04 17:49:36 +0100
committerWill Deacon <will.deacon@arm.com>2015-08-05 10:05:20 +0100
commit8ec41987436d566f7c4559c6871738b869f7ef07 (patch)
tree385d1fc297fc0521fd22ab0120751f0e76e3053d /arch
parent04b8637be92f284409651088f3856f4290a931d8 (diff)
downloadlinux-stable-8ec41987436d566f7c4559c6871738b869f7ef07.tar.gz
linux-stable-8ec41987436d566f7c4559c6871738b869f7ef07.tar.bz2
linux-stable-8ec41987436d566f7c4559c6871738b869f7ef07.zip
arm64: mm: ensure patched kernel text is fetched from PoU
The arm64 booting document requires that the bootloader has cleaned the kernel image to the PoC. However, when a CPU re-enters the kernel due to either a CPU hotplug "on" event or resuming from a low-power state (e.g. cpuidle), the kernel text may in-fact be dirty at the PoU due to things like alternative patching or even module loading. Thanks to I-cache speculation with the MMU off, stale instructions could be fetched prior to enabling the MMU, potentially leading to crashes when executing regions of code that have been modified at runtime. This patch addresses the issue by ensuring that the local I-cache is invalidated immediately after a CPU has enabled its MMU but before jumping out of the identity mapping. Any stale instructions fetched from the PoC will then be discarded and refetched correctly from the PoU. Patching kernel text executed prior to the MMU being enabled is prohibited, so the early entry code will always be clean. Reviewed-by: Mark Rutland <mark.rutland@arm.com> Tested-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/kernel/head.S8
-rw-r--r--arch/arm64/kernel/sleep.S8
-rw-r--r--arch/arm64/mm/proc.S1
3 files changed, 16 insertions, 1 deletions
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 3a0654173997..a055be6125cf 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -634,5 +634,13 @@ __enable_mmu:
isb
msr sctlr_el1, x0
isb
+ /*
+ * Invalidate the local I-cache so that any instructions fetched
+ * speculatively from the PoC are discarded, since they may have
+ * been dynamically patched at the PoU.
+ */
+ ic iallu
+ dsb nsh
+ isb
br x27
ENDPROC(__enable_mmu)
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index fb3128ea3a4f..f586f7c875e2 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -133,6 +133,14 @@ ENTRY(cpu_resume_mmu)
ldr x3, =cpu_resume_after_mmu
msr sctlr_el1, x0 // restore sctlr_el1
isb
+ /*
+ * Invalidate the local I-cache so that any instructions fetched
+ * speculatively from the PoC are discarded, since they may have
+ * been dynamically patched at the PoU.
+ */
+ ic iallu
+ dsb nsh
+ isb
br x3 // global jump to virtual address
ENDPROC(cpu_resume_mmu)
.popsection
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 34da270f9e34..6e8765a2bddd 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -146,7 +146,6 @@ ENDPROC(cpu_do_switch_mm)
* value of the SCTLR_EL1 register.
*/
ENTRY(__cpu_setup)
- ic iallu // I+BTB cache invalidate
tlbi vmalle1is // invalidate I + D TLBs
dsb ish