summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-09-06 08:54:45 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-09-06 08:54:45 -0700
commit13da6ac106be36d7c2d7f201fd8850a9ac235e6b (patch)
tree25394fce83181990f06505055a4658f88ed07850 /arch
parentd41a3effbb53b1bcea41e328d16a4d046a508381 (diff)
parenta8318c13e79badb92bc6640704a64cc022a6eb97 (diff)
downloadlinux-stable-13da6ac106be36d7c2d7f201fd8850a9ac235e6b.tar.gz
linux-stable-13da6ac106be36d7c2d7f201fd8850a9ac235e6b.tar.bz2
linux-stable-13da6ac106be36d7c2d7f201fd8850a9ac235e6b.zip
Merge tag 'powerpc-5.3-5' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc fixes from Michael Ellerman: "One fix for a boot hang on some Freescale machines when PREEMPT is enabled. Two CVE fixes for bugs in our handling of FP registers and transactional memory, both of which can result in corrupted FP state, or FP state leaking between processes. Thanks to: Chris Packham, Christophe Leroy, Gustavo Romero, Michael Neuling" * tag 'powerpc-5.3-5' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: powerpc/tm: Fix restoring FP/VMX facility incorrectly on interrupts powerpc/tm: Fix FP/VMX unavailable exceptions inside a transaction powerpc/64e: Drop stale call to smp_processor_id() which hangs SMP startup
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/kernel/process.c21
-rw-r--r--arch/powerpc/mm/nohash/tlb.c1
2 files changed, 4 insertions, 18 deletions
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 8fc4de0d22b4..7a84c9f1778e 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -101,21 +101,8 @@ static void check_if_tm_restore_required(struct task_struct *tsk)
}
}
-static bool tm_active_with_fp(struct task_struct *tsk)
-{
- return MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
- (tsk->thread.ckpt_regs.msr & MSR_FP);
-}
-
-static bool tm_active_with_altivec(struct task_struct *tsk)
-{
- return MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
- (tsk->thread.ckpt_regs.msr & MSR_VEC);
-}
#else
static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
-static inline bool tm_active_with_fp(struct task_struct *tsk) { return false; }
-static inline bool tm_active_with_altivec(struct task_struct *tsk) { return false; }
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
bool strict_msr_control;
@@ -252,7 +239,7 @@ EXPORT_SYMBOL(enable_kernel_fp);
static int restore_fp(struct task_struct *tsk)
{
- if (tsk->thread.load_fp || tm_active_with_fp(tsk)) {
+ if (tsk->thread.load_fp) {
load_fp_state(&current->thread.fp_state);
current->thread.load_fp++;
return 1;
@@ -334,8 +321,7 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
static int restore_altivec(struct task_struct *tsk)
{
- if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
- (tsk->thread.load_vec || tm_active_with_altivec(tsk))) {
+ if (cpu_has_feature(CPU_FTR_ALTIVEC) && (tsk->thread.load_vec)) {
load_vr_state(&tsk->thread.vr_state);
tsk->thread.used_vr = 1;
tsk->thread.load_vec++;
@@ -497,13 +483,14 @@ void giveup_all(struct task_struct *tsk)
if (!tsk->thread.regs)
return;
+ check_if_tm_restore_required(tsk);
+
usermsr = tsk->thread.regs->msr;
if ((usermsr & msr_all_available) == 0)
return;
msr_check_and_set(msr_all_available);
- check_if_tm_restore_required(tsk);
WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
diff --git a/arch/powerpc/mm/nohash/tlb.c b/arch/powerpc/mm/nohash/tlb.c
index d4acf6fa0596..bf60983a58c7 100644
--- a/arch/powerpc/mm/nohash/tlb.c
+++ b/arch/powerpc/mm/nohash/tlb.c
@@ -630,7 +630,6 @@ static void early_init_this_mmu(void)
#ifdef CONFIG_PPC_FSL_BOOK3E
if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
unsigned int num_cams;
- int __maybe_unused cpu = smp_processor_id();
bool map = true;
/* use a quarter of the TLBCAM for bolted linear map */