diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-10-26 19:43:12 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-10-26 19:43:12 -0400 |
commit | 5a1e843c66fa6438f389045981c37e4073917641 (patch) | |
tree | 90b84c64ea215ed214f1d15b3a0b33200aff7370 /arch/mips/mm/tlbex.c | |
parent | 297689545916c40b65a86d4871e5610a024c8993 (diff) | |
parent | b42aa3fd5957e4daf4b69129e5ce752a2a53e7d6 (diff) | |
download | linux-5a1e843c66fa6438f389045981c37e4073917641.tar.gz linux-5a1e843c66fa6438f389045981c37e4073917641.tar.bz2 linux-5a1e843c66fa6438f389045981c37e4073917641.zip |
Merge tag 'mips_fixes_5.4_3' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux
Pull MIPS fixes from Paul Burton:
"A few MIPS fixes:
- Fix VDSO time-related function behavior for systems where we need
to fall back to syscalls, but were instead returning bogus results.
- A fix to TLB exception handlers for Cavium Octeon systems where
they would inadvertently clobber the $1/$at register.
- A build fix for bcm63xx configurations.
- Switch to using my @kernel.org email address"
* tag 'mips_fixes_5.4_3' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux:
MIPS: tlbex: Fix build_restore_pagemask KScratch restore
MIPS: bmips: mark exception vectors as char arrays
mips: vdso: Fix __arch_get_hw_counter()
MAINTAINERS: Use @kernel.org address for Paul Burton
Diffstat (limited to 'arch/mips/mm/tlbex.c')
-rw-r--r-- | arch/mips/mm/tlbex.c | 23 |
1 files changed, 15 insertions, 8 deletions
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index e01cb33bfa1a..41bb91f05688 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -653,6 +653,13 @@ static void build_restore_pagemask(u32 **p, struct uasm_reloc **r, int restore_scratch) { if (restore_scratch) { + /* + * Ensure the MFC0 below observes the value written to the + * KScratch register by the prior MTC0. + */ + if (scratch_reg >= 0) + uasm_i_ehb(p); + /* Reset default page size */ if (PM_DEFAULT_MASK >> 16) { uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16); @@ -667,12 +674,10 @@ static void build_restore_pagemask(u32 **p, struct uasm_reloc **r, uasm_i_mtc0(p, 0, C0_PAGEMASK); uasm_il_b(p, r, lid); } - if (scratch_reg >= 0) { - uasm_i_ehb(p); + if (scratch_reg >= 0) UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); - } else { + else UASM_i_LW(p, 1, scratchpad_offset(0), 0); - } } else { /* Reset default page size */ if (PM_DEFAULT_MASK >> 16) { @@ -921,6 +926,10 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, } if (mode != not_refill && check_for_high_segbits) { uasm_l_large_segbits_fault(l, *p); + + if (mode == refill_scratch && scratch_reg >= 0) + uasm_i_ehb(p); + /* * We get here if we are an xsseg address, or if we are * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary. @@ -939,12 +948,10 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, uasm_i_jr(p, ptr); if (mode == refill_scratch) { - if (scratch_reg >= 0) { - uasm_i_ehb(p); + if (scratch_reg >= 0) UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); - } else { + else UASM_i_LW(p, 1, scratchpad_offset(0), 0); - } } else { uasm_i_nop(p); } |