diff options
author | James Hogan <james.hogan@imgtec.com> | 2016-09-10 23:55:07 +0100 |
---|---|---|
committer | James Hogan <james.hogan@imgtec.com> | 2017-02-03 15:19:01 +0000 |
commit | 722b45443146f425453525d3c2270ff2733f5dc4 (patch) | |
tree | 6f4d172bf3da8f433b3b9d8942dbf02ebf270e21 | |
parent | 93a93c2461f9416a58c7d1e32fec201af8cc3aad (diff) | |
download | linux-stable-722b45443146f425453525d3c2270ff2733f5dc4.tar.gz linux-stable-722b45443146f425453525d3c2270ff2733f5dc4.tar.bz2 linux-stable-722b45443146f425453525d3c2270ff2733f5dc4.zip |
MIPS: Export some tlbex internals for KVM to use
Export to TLB exception code generating functions so that KVM can
construct a fast TLB refill handler for guest context without
reinventing the wheel quite so much.
Signed-off-by: James Hogan <james.hogan@imgtec.com>
Acked-by: Ralf Baechle <ralf@linux-mips.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: "Radim Krčmář" <rkrcmar@redhat.com>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
-rw-r--r-- | arch/mips/include/asm/tlbex.h | 26 | ||||
-rw-r--r-- | arch/mips/mm/tlbex.c | 33 |
2 files changed, 42 insertions, 17 deletions
diff --git a/arch/mips/include/asm/tlbex.h b/arch/mips/include/asm/tlbex.h new file mode 100644 index 000000000000..53050e9dd2c9 --- /dev/null +++ b/arch/mips/include/asm/tlbex.h @@ -0,0 +1,26 @@ +#ifndef __ASM_TLBEX_H +#define __ASM_TLBEX_H + +#include <asm/uasm.h> + +/* + * Write random or indexed TLB entry, and care about the hazards from + * the preceding mtc0 and for the following eret. + */ +enum tlb_write_entry { + tlb_random, + tlb_indexed +}; + +extern int pgd_reg; + +void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, + unsigned int tmp, unsigned int ptr); +void build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr); +void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr); +void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep); +void build_tlb_write_entry(u32 **p, struct uasm_label **l, + struct uasm_reloc **r, + enum tlb_write_entry wmode); + +#endif /* __ASM_TLBEX_H */ diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index dc7bb1506103..2465f83c79c3 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -35,6 +35,7 @@ #include <asm/war.h> #include <asm/uasm.h> #include <asm/setup.h> +#include <asm/tlbex.h> static int mips_xpa_disabled; @@ -345,7 +346,8 @@ static int allocate_kscratch(void) } static int scratch_reg; -static int pgd_reg; +int pgd_reg; +EXPORT_SYMBOL_GPL(pgd_reg); enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch}; static struct work_registers build_get_work_registers(u32 **p) @@ -497,15 +499,9 @@ static void __maybe_unused build_tlb_probe_entry(u32 **p) } } -/* - * Write random or indexed TLB entry, and care about the hazards from - * the preceding mtc0 and for the following eret. - */ -enum tlb_write_entry { tlb_random, tlb_indexed }; - -static void build_tlb_write_entry(u32 **p, struct uasm_label **l, - struct uasm_reloc **r, - enum tlb_write_entry wmode) +void build_tlb_write_entry(u32 **p, struct uasm_label **l, + struct uasm_reloc **r, + enum tlb_write_entry wmode) { void(*tlbw)(u32 **) = NULL; @@ -628,6 +624,7 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l, break; } } +EXPORT_SYMBOL_GPL(build_tlb_write_entry); static __maybe_unused void build_convert_pte_to_entrylo(u32 **p, unsigned int reg) @@ -782,9 +779,8 @@ static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r, * TMP and PTR are scratch. * TMP will be clobbered, PTR will hold the pmd entry. */ -static void -build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, - unsigned int tmp, unsigned int ptr) +void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, + unsigned int tmp, unsigned int ptr) { #ifndef CONFIG_MIPS_PGD_C0_CONTEXT long pgdc = (long)pgd_current; @@ -860,6 +856,7 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */ #endif } +EXPORT_SYMBOL_GPL(build_get_pmde64); /* * BVADDR is the faulting address, PTR is scratch. @@ -935,8 +932,7 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, * TMP and PTR are scratch. * TMP will be clobbered, PTR will hold the pgd entry. */ -static void __maybe_unused -build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) +void build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) { if (pgd_reg != -1) { /* pgd is in pgd_reg */ @@ -961,6 +957,7 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) uasm_i_sll(p, tmp, tmp, PGD_T_LOG2); uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */ } +EXPORT_SYMBOL_GPL(build_get_pgde32); #endif /* !CONFIG_64BIT */ @@ -990,7 +987,7 @@ static void build_adjust_context(u32 **p, unsigned int ctx) uasm_i_andi(p, ctx, ctx, mask); } -static void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) +void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) { /* * Bug workaround for the Nevada. It seems as if under certain @@ -1014,8 +1011,9 @@ static void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) build_adjust_context(p, tmp); UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */ } +EXPORT_SYMBOL_GPL(build_get_ptep); -static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) +void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) { int pte_off_even = 0; int pte_off_odd = sizeof(pte_t); @@ -1064,6 +1062,7 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) UASM_i_MTC0(p, 0, C0_ENTRYLO1); UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ } +EXPORT_SYMBOL_GPL(build_update_entries); struct mips_huge_tlb_info { int huge_pte; |