From 6c9dd6d26216ad9733e57f382e1669c142494aab Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 2 Apr 2021 17:53:09 +0200 Subject: KVM: constify kvm_arch_flush_remote_tlbs_memslot memslots are stored in RCU and there should be no need to change them. Signed-off-by: Paolo Bonzini --- arch/mips/kvm/mips.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/mips/kvm') diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 58a8812e2fa5..7db8234a4407 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -997,7 +997,7 @@ void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) } void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, - struct kvm_memory_slot *memslot) + const struct kvm_memory_slot *memslot) { /* Let implementation handle TLB/GVA invalidation */ kvm_mips_callbacks->flush_shadow_memslot(kvm, memslot); -- cgit v1.2.3 From 5194552fb1ffb4248c8db3f7286aa4ac7ae3163b Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Wed, 31 Mar 2021 09:38:16 +0200 Subject: KVM: MIPS: rework flush_shadow_* callbacks into one that prepares the flush Both trap-and-emulate and VZ have a single implementation that covers both .flush_shadow_all and .flush_shadow_memslot, and both of them end with a call to kvm_flush_remote_tlbs. Unify the callbacks into one and extract the call to kvm_flush_remote_tlbs. The next patches will pull it further out of the the architecture-specific MMU notifier functions kvm_unmap_hva_range and kvm_set_spte_hva. Signed-off-by: Paolo Bonzini --- arch/mips/kvm/mips.c | 12 ++++++------ arch/mips/kvm/mmu.c | 9 ++++++--- arch/mips/kvm/trap_emul.c | 13 ++----------- arch/mips/kvm/vz.c | 19 ++++--------------- 4 files changed, 18 insertions(+), 35 deletions(-) (limited to 'arch/mips/kvm') diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 7db8234a4407..867b8de0fc07 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -206,7 +206,8 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm) kvm_mips_flush_gpa_pt(kvm, 0, ~0); /* Let implementation do the rest */ - kvm_mips_callbacks->flush_shadow_all(kvm); + kvm_mips_callbacks->prepare_flush_shadow(kvm); + kvm_flush_remote_tlbs(kvm); } void kvm_arch_flush_shadow_memslot(struct kvm *kvm, @@ -221,8 +222,7 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm, /* Flush slot from GPA */ kvm_mips_flush_gpa_pt(kvm, slot->base_gfn, slot->base_gfn + slot->npages - 1); - /* Let implementation do the rest */ - kvm_mips_callbacks->flush_shadow_memslot(kvm, slot); + kvm_arch_flush_remote_tlbs_memslot(kvm, slot); spin_unlock(&kvm->mmu_lock); } @@ -262,9 +262,8 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, /* Write protect GPA page table entries */ needs_flush = kvm_mips_mkclean_gpa_pt(kvm, new->base_gfn, new->base_gfn + new->npages - 1); - /* Let implementation do the rest */ if (needs_flush) - kvm_mips_callbacks->flush_shadow_memslot(kvm, new); + kvm_arch_flush_remote_tlbs_memslot(kvm, new); spin_unlock(&kvm->mmu_lock); } } @@ -1000,7 +999,8 @@ void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, const struct kvm_memory_slot *memslot) { /* Let implementation handle TLB/GVA invalidation */ - kvm_mips_callbacks->flush_shadow_memslot(kvm, memslot); + kvm_mips_callbacks->prepare_flush_shadow(kvm); + kvm_flush_remote_tlbs(kvm); } long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c index 3dabeda82458..7e055e5dfd3c 100644 --- a/arch/mips/kvm/mmu.c +++ b/arch/mips/kvm/mmu.c @@ -491,7 +491,8 @@ int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, { handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); - kvm_mips_callbacks->flush_shadow_all(kvm); + kvm_mips_callbacks->prepare_flush_shadow(kvm); + kvm_flush_remote_tlbs(kvm); return 0; } @@ -532,8 +533,10 @@ int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) int ret; ret = handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &pte); - if (ret) - kvm_mips_callbacks->flush_shadow_all(kvm); + if (ret) { + kvm_mips_callbacks->prepare_flush_shadow(kvm); + kvm_flush_remote_tlbs(kvm); + } return 0; } diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c index 0788c00d7e94..5f2df497599c 100644 --- a/arch/mips/kvm/trap_emul.c +++ b/arch/mips/kvm/trap_emul.c @@ -687,16 +687,8 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu) return 0; } -static void kvm_trap_emul_flush_shadow_all(struct kvm *kvm) +static void kvm_trap_emul_prepare_flush_shadow(struct kvm *kvm) { - /* Flush GVA page tables and invalidate GVA ASIDs on all VCPUs */ - kvm_flush_remote_tlbs(kvm); -} - -static void kvm_trap_emul_flush_shadow_memslot(struct kvm *kvm, - const struct kvm_memory_slot *slot) -{ - kvm_trap_emul_flush_shadow_all(kvm); } static u64 kvm_trap_emul_get_one_regs[] = { @@ -1280,8 +1272,7 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = { .vcpu_init = kvm_trap_emul_vcpu_init, .vcpu_uninit = kvm_trap_emul_vcpu_uninit, .vcpu_setup = kvm_trap_emul_vcpu_setup, - .flush_shadow_all = kvm_trap_emul_flush_shadow_all, - .flush_shadow_memslot = kvm_trap_emul_flush_shadow_memslot, + .prepare_flush_shadow = kvm_trap_emul_prepare_flush_shadow, .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb, .queue_timer_int = kvm_mips_queue_timer_int_cb, .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb, diff --git a/arch/mips/kvm/vz.c b/arch/mips/kvm/vz.c index 2ffbe9264a31..2c75571dc4a2 100644 --- a/arch/mips/kvm/vz.c +++ b/arch/mips/kvm/vz.c @@ -3211,32 +3211,22 @@ static int kvm_vz_vcpu_setup(struct kvm_vcpu *vcpu) return 0; } -static void kvm_vz_flush_shadow_all(struct kvm *kvm) +static void kvm_vz_prepare_flush_shadow(struct kvm *kvm) { - if (cpu_has_guestid) { - /* Flush GuestID for each VCPU individually */ - kvm_flush_remote_tlbs(kvm); - } else { + if (!cpu_has_guestid) { /* * For each CPU there is a single GPA ASID used by all VCPUs in * the VM, so it doesn't make sense for the VCPUs to handle * invalidation of these ASIDs individually. * * Instead mark all CPUs as needing ASID invalidation in - * asid_flush_mask, and just use kvm_flush_remote_tlbs(kvm) to + * asid_flush_mask, and kvm_flush_remote_tlbs(kvm) will * kick any running VCPUs so they check asid_flush_mask. */ cpumask_setall(&kvm->arch.asid_flush_mask); - kvm_flush_remote_tlbs(kvm); } } -static void kvm_vz_flush_shadow_memslot(struct kvm *kvm, - const struct kvm_memory_slot *slot) -{ - kvm_vz_flush_shadow_all(kvm); -} - static void kvm_vz_vcpu_reenter(struct kvm_vcpu *vcpu) { int cpu = smp_processor_id(); @@ -3292,8 +3282,7 @@ static struct kvm_mips_callbacks kvm_vz_callbacks = { .vcpu_init = kvm_vz_vcpu_init, .vcpu_uninit = kvm_vz_vcpu_uninit, .vcpu_setup = kvm_vz_vcpu_setup, - .flush_shadow_all = kvm_vz_flush_shadow_all, - .flush_shadow_memslot = kvm_vz_flush_shadow_memslot, + .prepare_flush_shadow = kvm_vz_prepare_flush_shadow, .gva_to_gpa = kvm_vz_gva_to_gpa_cb, .queue_timer_int = kvm_vz_queue_timer_int_cb, .dequeue_timer_int = kvm_vz_dequeue_timer_int_cb, -- cgit v1.2.3 From 566a0beef52c83f13b67aef02b2bc0aa63c0e0d6 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 2 Apr 2021 11:44:56 +0200 Subject: KVM: MIPS: let generic code call prepare_flush_shadow Since all calls to kvm_flush_remote_tlbs must be preceded by kvm_mips_callbacks->prepare_flush_shadow, repurpose kvm_arch_flush_remote_tlb to invoke it. This makes it possible to use the TLB flushing mechanism provided by the generic MMU notifier code. Signed-off-by: Paolo Bonzini --- arch/mips/kvm/mips.c | 11 ++++++----- arch/mips/kvm/mmu.c | 6 +----- 2 files changed, 7 insertions(+), 10 deletions(-) (limited to 'arch/mips/kvm') diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 867b8de0fc07..4a22ba70c943 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -204,9 +204,6 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm) { /* Flush whole GPA */ kvm_mips_flush_gpa_pt(kvm, 0, ~0); - - /* Let implementation do the rest */ - kvm_mips_callbacks->prepare_flush_shadow(kvm); kvm_flush_remote_tlbs(kvm); } @@ -995,11 +992,15 @@ void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) } +int kvm_arch_flush_remote_tlb(struct kvm *kvm) +{ + kvm_mips_callbacks->prepare_flush_shadow(kvm); + return 1; +} + void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, const struct kvm_memory_slot *memslot) { - /* Let implementation handle TLB/GVA invalidation */ - kvm_mips_callbacks->prepare_flush_shadow(kvm); kvm_flush_remote_tlbs(kvm); } diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c index 7e055e5dfd3c..2cedf908d744 100644 --- a/arch/mips/kvm/mmu.c +++ b/arch/mips/kvm/mmu.c @@ -490,8 +490,6 @@ int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, unsigned flags) { handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); - - kvm_mips_callbacks->prepare_flush_shadow(kvm); kvm_flush_remote_tlbs(kvm); return 0; } @@ -533,10 +531,8 @@ int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) int ret; ret = handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &pte); - if (ret) { - kvm_mips_callbacks->prepare_flush_shadow(kvm); + if (ret) kvm_flush_remote_tlbs(kvm); - } return 0; } -- cgit v1.2.3 From fe9a5b055116dff7fcee081abc2def4b14d24c21 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Wed, 31 Mar 2021 09:40:39 +0200 Subject: KVM: MIPS: defer flush to generic MMU notifier code Return 1 from kvm_unmap_hva_range and kvm_set_spte_hva if a flush is needed, so that the generic code can coalesce the flushes. Signed-off-by: Paolo Bonzini --- arch/mips/kvm/mmu.c | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) (limited to 'arch/mips/kvm') diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c index 2cedf908d744..a3a49b009dc4 100644 --- a/arch/mips/kvm/mmu.c +++ b/arch/mips/kvm/mmu.c @@ -489,9 +489,7 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, unsigned flags) { - handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); - kvm_flush_remote_tlbs(kvm); - return 0; + return handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); } static int kvm_set_spte_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, @@ -528,12 +526,7 @@ static int kvm_set_spte_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) { unsigned long end = hva + PAGE_SIZE; - int ret; - - ret = handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &pte); - if (ret) - kvm_flush_remote_tlbs(kvm); - return 0; + return handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &pte); } static int kvm_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, -- cgit v1.2.3 From d923ff258423b7c30e257d7adcd791f845e1b5fb Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 1 Apr 2021 17:56:52 -0700 Subject: KVM: MIPS/MMU: Convert to the gfn-based MMU notifier callbacks Move MIPS to the gfn-based MMU notifier APIs, which do the hva->gfn lookup in common code, and whose code is nearly identical to MIPS' lookup. No meaningful functional change intended, though the exact order of operations is slightly different since the memslot lookups occur before calling into arch code. Signed-off-by: Sean Christopherson Message-Id: <20210402005658.3024832-5-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/mips/kvm/mmu.c | 92 ++++++++--------------------------------------------- 1 file changed, 13 insertions(+), 79 deletions(-) (limited to 'arch/mips/kvm') diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c index a3a49b009dc4..8af002bd3c07 100644 --- a/arch/mips/kvm/mmu.c +++ b/arch/mips/kvm/mmu.c @@ -439,82 +439,34 @@ static int kvm_mips_mkold_gpa_pt(struct kvm *kvm, gfn_t start_gfn, end_gfn << PAGE_SHIFT); } -static int handle_hva_to_gpa(struct kvm *kvm, - unsigned long start, - unsigned long end, - int (*handler)(struct kvm *kvm, gfn_t gfn, - gpa_t gfn_end, - struct kvm_memory_slot *memslot, - void *data), - void *data) +bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) { - struct kvm_memslots *slots; - struct kvm_memory_slot *memslot; - int ret = 0; - - slots = kvm_memslots(kvm); - - /* we only care about the pages that the guest sees */ - kvm_for_each_memslot(memslot, slots) { - unsigned long hva_start, hva_end; - gfn_t gfn, gfn_end; - - hva_start = max(start, memslot->userspace_addr); - hva_end = min(end, memslot->userspace_addr + - (memslot->npages << PAGE_SHIFT)); - if (hva_start >= hva_end) - continue; - - /* - * {gfn(page) | page intersects with [hva_start, hva_end)} = - * {gfn_start, gfn_start+1, ..., gfn_end-1}. - */ - gfn = hva_to_gfn_memslot(hva_start, memslot); - gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); - - ret |= handler(kvm, gfn, gfn_end, memslot, data); - } - - return ret; -} - - -static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, - struct kvm_memory_slot *memslot, void *data) -{ - kvm_mips_flush_gpa_pt(kvm, gfn, gfn_end); + kvm_mips_flush_gpa_pt(kvm, range->start, range->end); return 1; } -int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, - unsigned flags) -{ - return handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); -} - -static int kvm_set_spte_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, - struct kvm_memory_slot *memslot, void *data) +bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { - gpa_t gpa = gfn << PAGE_SHIFT; - pte_t hva_pte = *(pte_t *)data; + gpa_t gpa = range->start << PAGE_SHIFT; + pte_t hva_pte = range->pte; pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa); pte_t old_pte; if (!gpa_pte) - return 0; + return false; /* Mapping may need adjusting depending on memslot flags */ old_pte = *gpa_pte; - if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES && !pte_dirty(old_pte)) + if (range->slot->flags & KVM_MEM_LOG_DIRTY_PAGES && !pte_dirty(old_pte)) hva_pte = pte_mkclean(hva_pte); - else if (memslot->flags & KVM_MEM_READONLY) + else if (range->slot->flags & KVM_MEM_READONLY) hva_pte = pte_wrprotect(hva_pte); set_pte(gpa_pte, hva_pte); /* Replacing an absent or old page doesn't need flushes */ if (!pte_present(old_pte) || !pte_young(old_pte)) - return 0; + return false; /* Pages swapped, aged, moved, or cleaned require flushes */ return !pte_present(hva_pte) || @@ -523,22 +475,14 @@ static int kvm_set_spte_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, (pte_dirty(old_pte) && !pte_dirty(hva_pte)); } -int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) +bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { - unsigned long end = hva + PAGE_SIZE; - return handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &pte); + return kvm_mips_mkold_gpa_pt(kvm, range->start, range->end); } -static int kvm_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, - struct kvm_memory_slot *memslot, void *data) +bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { - return kvm_mips_mkold_gpa_pt(kvm, gfn, gfn_end); -} - -static int kvm_test_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, - struct kvm_memory_slot *memslot, void *data) -{ - gpa_t gpa = gfn << PAGE_SHIFT; + gpa_t gpa = range->start << PAGE_SHIFT; pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa); if (!gpa_pte) @@ -546,16 +490,6 @@ static int kvm_test_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, return pte_young(*gpa_pte); } -int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end) -{ - return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL); -} - -int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) -{ - return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL); -} - /** * _kvm_mips_map_page_fast() - Fast path GPA fault handler. * @vcpu: VCPU pointer. -- cgit v1.2.3