summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Burton <paul.burton@imgtec.com>2014-07-30 08:53:20 +0100
committerRalf Baechle <ralf@linux-mips.org>2014-08-02 00:06:44 +0200
commitc9017757c532d48bf43d6e7d3b7282443ad4207b (patch)
tree169346d3830d35c20c8cce14cb9c91cb3bfc3b7a
parent33c771ba5c5d067f85a5a6c4b11047219b5b8f4e (diff)
downloadlinux-c9017757c532d48bf43d6e7d3b7282443ad4207b.tar.gz
linux-c9017757c532d48bf43d6e7d3b7282443ad4207b.tar.bz2
linux-c9017757c532d48bf43d6e7d3b7282443ad4207b.zip
MIPS: init upper 64b of vector registers when MSA is first used
When a task first makes use of MSA we need to ensure that the upper 64b of the vector registers are set to some value such that no information can be leaked to it from the previous task to use MSA context on the CPU. The architecture formerly specified that these bits would be cleared to 0 when a scalar FP instructions wrote to the aliased FP registers, which would have implicitly handled this as the kernel restored scalar FP context. However more recent versions of the specification now state that the value of the bits in such cases is unpredictable. Initialise them explictly to be sure, and set all the bits to 1 rather than 0 for consistency with the least significant 64b. Signed-off-by: Paul Burton <paul.burton@imgtec.com> Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/7497/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
-rw-r--r--arch/mips/include/asm/asmmacro.h20
-rw-r--r--arch/mips/include/asm/msa.h1
-rw-r--r--arch/mips/kernel/r4k_switch.S5
-rw-r--r--arch/mips/kernel/traps.c39
4 files changed, 56 insertions, 9 deletions
diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
index 4986bf5ffd29..cd9a98bc8f60 100644
--- a/arch/mips/include/asm/asmmacro.h
+++ b/arch/mips/include/asm/asmmacro.h
@@ -426,4 +426,24 @@
ld_d 31, THREAD_FPR31, \thread
.endm
+ .macro msa_init_upper wd
+#ifdef CONFIG_64BIT
+ insert_d \wd, 1
+#else
+ insert_w \wd, 2
+ insert_w \wd, 3
+#endif
+ .if 31-\wd
+ msa_init_upper (\wd+1)
+ .endif
+ .endm
+
+ .macro msa_init_all_upper
+ .set push
+ .set noat
+ not $1, zero
+ msa_init_upper 0
+ .set pop
+ .endm
+
#endif /* _ASM_ASMMACRO_H */
diff --git a/arch/mips/include/asm/msa.h b/arch/mips/include/asm/msa.h
index e80e85c1334f..fe25a17bc783 100644
--- a/arch/mips/include/asm/msa.h
+++ b/arch/mips/include/asm/msa.h
@@ -16,6 +16,7 @@
extern void _save_msa(struct task_struct *);
extern void _restore_msa(struct task_struct *);
+extern void _init_msa_upper(void);
static inline void enable_msa(void)
{
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
index 1a1aef04312d..4c4ec1812420 100644
--- a/arch/mips/kernel/r4k_switch.S
+++ b/arch/mips/kernel/r4k_switch.S
@@ -144,6 +144,11 @@ LEAF(_restore_msa)
jr ra
END(_restore_msa)
+LEAF(_init_msa_upper)
+ msa_init_all_upper
+ jr ra
+ END(_init_msa_upper)
+
#endif
/*
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 649c151fe1db..1ed84577d3e3 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -1089,13 +1089,15 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
static int enable_restore_fp_context(int msa)
{
- int err, was_fpu_owner;
+ int err, was_fpu_owner, prior_msa;
if (!used_math()) {
/* First time FP context user. */
err = init_fpu();
- if (msa && !err)
+ if (msa && !err) {
enable_msa();
+ _init_msa_upper();
+ }
if (!err)
set_used_math();
return err;
@@ -1147,18 +1149,37 @@ static int enable_restore_fp_context(int msa)
/*
* If this is the first time that the task is using MSA and it has
* previously used scalar FP in this time slice then we already nave
- * FP context which we shouldn't clobber.
+ * FP context which we shouldn't clobber. We do however need to clear
+ * the upper 64b of each vector register so that this task has no
+ * opportunity to see data left behind by another.
*/
- if (!test_and_set_thread_flag(TIF_MSA_CTX_LIVE) && was_fpu_owner)
+ prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE);
+ if (!prior_msa && was_fpu_owner) {
+ _init_msa_upper();
return 0;
+ }
- /* We need to restore the vector context. */
- restore_msa(current);
+ if (!prior_msa) {
+ /*
+ * Restore the least significant 64b of each vector register
+ * from the existing scalar FP context.
+ */
+ _restore_fp(current);
- /* Restore the scalar FP control & status register */
- if (!was_fpu_owner)
- asm volatile("ctc1 %0, $31" : : "r"(current->thread.fpu.fcr31));
+ /*
+ * The task has not formerly used MSA, so clear the upper 64b
+ * of each vector register such that it cannot see data left
+ * behind by another task.
+ */
+ _init_msa_upper();
+ } else {
+ /* We need to restore the vector context. */
+ restore_msa(current);
+ /* Restore the scalar FP control & status register */
+ if (!was_fpu_owner)
+ asm volatile("ctc1 %0, $31" : : "r"(current->thread.fpu.fcr31));
+ }
return 0;
}