summaryrefslogtreecommitdiffstats
path: root/arch/mips/include
diff options
context:
space:
mode:
authorJames Hogan <james.hogan@imgtec.com>2016-12-16 15:57:00 +0000
committerJames Hogan <james.hogan@imgtec.com>2017-02-03 15:20:56 +0000
commitaba8592950f1c698bb9c1b42d4f4dab07a145674 (patch)
treea02c3bfe5ce29de735f063db6ea03b0332ca2fe6 /arch/mips/include
parenta31b50d741bd85a127d5ef2c21c0788041bc41a9 (diff)
downloadlinux-stable-aba8592950f1c698bb9c1b42d4f4dab07a145674.tar.gz
linux-stable-aba8592950f1c698bb9c1b42d4f4dab07a145674.tar.bz2
linux-stable-aba8592950f1c698bb9c1b42d4f4dab07a145674.zip
KVM: MIPS/MMU: Invalidate stale GVA PTEs on TLBW
Implement invalidation of specific pairs of GVA page table entries in one or both of the GVA page tables. This is used when existing mappings are replaced in the guest TLB by emulated TLBWI/TLBWR instructions. Due to the sharing of page tables in the host kernel range, we should be careful not to allow host pages to be invalidated. Add a helper kvm_mips_walk_pgd() which can be used when walking of either GPA (future patches) or GVA page tables is needed, optionally with allocation of page tables along the way when they don't exist. GPA page table walking will need to be protected by the kvm->mmu_lock, so we also add a small MMU page cache in each KVM VCPU, like that found for other architectures but smaller. This allows enough pages to be pre-allocated to handle a single fault without holding the lock, allowing the helper to run with the lock held without having to handle allocation failures. Using the same mechanism for GVA allows the same code to be used, and allows it to use the same cache of allocated pages if the GPA walk didn't need to allocate any new tables. Signed-off-by: James Hogan <james.hogan@imgtec.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: "Radim Krčmář" <rkrcmar@redhat.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org
Diffstat (limited to 'arch/mips/include')
-rw-r--r--arch/mips/include/asm/kvm_host.h17
1 files changed, 17 insertions, 0 deletions
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index f5145dcab319..40aab4f5007c 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -261,6 +261,17 @@ struct kvm_mips_tlb {
long tlb_lo[2];
};
+#define KVM_NR_MEM_OBJS 4
+
+/*
+ * We don't want allocation failures within the mmu code, so we preallocate
+ * enough memory for a single page fault in a cache.
+ */
+struct kvm_mmu_memory_cache {
+ int nobjs;
+ void *objects[KVM_NR_MEM_OBJS];
+};
+
#define KVM_MIPS_AUX_FPU 0x1
#define KVM_MIPS_AUX_MSA 0x2
@@ -327,6 +338,9 @@ struct kvm_vcpu_arch {
/* Guest ASID of last user mode execution */
unsigned int last_user_gasid;
+ /* Cache some mmu pages needed inside spinlock regions */
+ struct kvm_mmu_memory_cache mmu_page_cache;
+
int last_sched_cpu;
/* WAIT executed */
@@ -631,6 +645,9 @@ enum kvm_mips_flush {
KMF_GPA = 0x2,
};
void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags);
+void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
+void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr,
+ bool user);
extern unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
unsigned long gva);
extern void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,