diff options
author | Nicholas Piggin <npiggin@gmail.com> | 2020-06-24 09:41:39 +1000 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2020-07-16 13:00:24 +1000 |
commit | b2b46304e9360f3dda49c9d8ba4a1478b9eecf1d (patch) | |
tree | 8274b88fe87bc0005a27b6478ebcdba9f8a4ca74 | |
parent | 01eb01877f3386d4bd5de75909abdd0af45a5fa2 (diff) | |
download | linux-b2b46304e9360f3dda49c9d8ba4a1478b9eecf1d.tar.gz linux-b2b46304e9360f3dda49c9d8ba4a1478b9eecf1d.tar.bz2 linux-b2b46304e9360f3dda49c9d8ba4a1478b9eecf1d.zip |
powerpc: re-initialise lazy FPU/VEC counters on every fault
When a FP/VEC/VSX unavailable fault loads registers and enables the
facility in the MSR, re-set the lazy restore counters to 1 rather
than incrementing them so every fault gets the same number of
restores before the next fault.
This probably shouldn't be a practical change because if a lazy counter
was non-zero then it should have been restored and would not cause a
fault when userspace tries to access it. However the code and comment
implies otherwise so that's misleading and unnecessary.
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200623234139.2262227-3-npiggin@gmail.com
-rw-r--r-- | arch/powerpc/kernel/fpu.S | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/vector.S | 4 |
2 files changed, 2 insertions, 6 deletions
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S index cac22cb97a8c..4ae39db70044 100644 --- a/arch/powerpc/kernel/fpu.S +++ b/arch/powerpc/kernel/fpu.S @@ -107,9 +107,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) or r12,r12,r4 std r12,_MSR(r1) #endif - /* Don't care if r4 overflows, this is desired behaviour */ - lbz r4,THREAD_LOAD_FP(r5) - addi r4,r4,1 + li r4,1 stb r4,THREAD_LOAD_FP(r5) addi r10,r5,THREAD_FPSTATE lfd fr0,FPSTATE_FPSCR(r10) diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S index efc5b52f95d2..801dc28fdcca 100644 --- a/arch/powerpc/kernel/vector.S +++ b/arch/powerpc/kernel/vector.S @@ -76,9 +76,7 @@ _GLOBAL(load_up_altivec) oris r12,r12,MSR_VEC@h std r12,_MSR(r1) #endif - /* Don't care if r4 overflows, this is desired behaviour */ - lbz r4,THREAD_LOAD_VEC(r5) - addi r4,r4,1 + li r4,1 stb r4,THREAD_LOAD_VEC(r5) addi r6,r5,THREAD_VRSTATE li r4,1 |