summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/tm.S
diff options
context:
space:
mode:
authorMichael Neuling <mikey@neuling.org>2018-09-25 19:36:47 +1000
committerMichael Ellerman <mpe@ellerman.id.au>2018-09-25 22:51:32 +1000
commit96dc89d526ef77604376f06220e3d2931a0bfd58 (patch)
treef0654ed863e42de110a65900fdfaf2e82c10fb3e /arch/powerpc/kernel/tm.S
parentcf13435b730a502e814c63c84d93db131e563f5f (diff)
downloadlinux-stable-96dc89d526ef77604376f06220e3d2931a0bfd58.tar.gz
linux-stable-96dc89d526ef77604376f06220e3d2931a0bfd58.tar.bz2
linux-stable-96dc89d526ef77604376f06220e3d2931a0bfd58.zip
powerpc/tm: Avoid possible userspace r1 corruption on reclaim
Current we store the userspace r1 to PACATMSCRATCH before finally saving it to the thread struct. In theory an exception could be taken here (like a machine check or SLB miss) that could write PACATMSCRATCH and hence corrupt the userspace r1. The SLB fault currently doesn't touch PACATMSCRATCH, but others do. We've never actually seen this happen but it's theoretically possible. Either way, the code is fragile as it is. This patch saves r1 to the kernel stack (which can't fault) before we turn MSR[RI] back on. PACATMSCRATCH is still used but only with MSR[RI] off. We then copy r1 from the kernel stack to the thread struct once we have MSR[RI] back on. Suggested-by: Breno Leitao <leitao@debian.org> Signed-off-by: Michael Neuling <mikey@neuling.org> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/kernel/tm.S')
-rw-r--r--arch/powerpc/kernel/tm.S9
1 files changed, 8 insertions, 1 deletions
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
index 183e8d75936f..7716374786bd 100644
--- a/arch/powerpc/kernel/tm.S
+++ b/arch/powerpc/kernel/tm.S
@@ -179,6 +179,13 @@ _GLOBAL(tm_reclaim)
std r11, GPR11(r1) /* Temporary stash */
/*
+ * Move the saved user r1 to the kernel stack in case PACATMSCRATCH is
+ * clobbered by an exception once we turn on MSR_RI below.
+ */
+ ld r11, PACATMSCRATCH(r13)
+ std r11, GPR1(r1)
+
+ /*
* Store r13 away so we can free up the scratch SPR for the SLB fault
* handler (needed once we start accessing the thread_struct).
*/
@@ -214,7 +221,7 @@ _GLOBAL(tm_reclaim)
SAVE_GPR(8, r7) /* user r8 */
SAVE_GPR(9, r7) /* user r9 */
SAVE_GPR(10, r7) /* user r10 */
- ld r3, PACATMSCRATCH(r13) /* user r1 */
+ ld r3, GPR1(r1) /* user r1 */
ld r4, GPR7(r1) /* user r7 */
ld r5, GPR11(r1) /* user r11 */
ld r6, GPR12(r1) /* user r12 */