summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorSean Christopherson <sean.j.christopherson@intel.com>2019-09-12 19:46:04 -0700
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2019-10-05 13:14:05 +0200
commite1bbf7d2cd2181f336d612ae47a560cb2f644427 (patch)
tree2b2c370d32cdc9e60b59a8aefc828e0ef56f1d49 /arch
parent32076032e7db5d95c5cdc0cb1ef5e641d295076c (diff)
downloadlinux-stable-e1bbf7d2cd2181f336d612ae47a560cb2f644427.tar.gz
linux-stable-e1bbf7d2cd2181f336d612ae47a560cb2f644427.tar.bz2
linux-stable-e1bbf7d2cd2181f336d612ae47a560cb2f644427.zip
KVM: x86/mmu: Use fast invalidate mechanism to zap MMIO sptes
commit 92f58b5c0181596d9f1e317b49ada2e728fb76eb upstream. Use the fast invalidate mechasim to zap MMIO sptes on a MMIO generation wrap. The fast invalidate flow was reintroduced to fix a livelock bug in kvm_mmu_zap_all() that can occur if kvm_mmu_zap_all() is invoked when the guest has live vCPUs. I.e. using kvm_mmu_zap_all() to handle the MMIO generation wrap is theoretically susceptible to the livelock bug. This effectively reverts commit 4771450c345dc ("Revert "KVM: MMU: drop kvm_mmu_zap_mmio_sptes""), i.e. restores the behavior of commit a8eca9dcc656a ("KVM: MMU: drop kvm_mmu_zap_mmio_sptes"). Note, this actually fixes commit 571c5af06e303 ("KVM: x86/mmu: Voluntarily reschedule as needed when zapping MMIO sptes"), but there is no need to incrementally revert back to using fast invalidate, e.g. doing so doesn't provide any bisection or stability benefits. Fixes: 571c5af06e303 ("KVM: x86/mmu: Voluntarily reschedule as needed when zapping MMIO sptes") Cc: stable@vger.kernel.org Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kvm/mmu.c17
1 files changed, 3 insertions, 14 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 9130152d5ed8..3070b1dd923a 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -389,8 +389,6 @@ static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
mask |= (gpa & shadow_nonpresent_or_rsvd_mask)
<< shadow_nonpresent_or_rsvd_mask_len;
- page_header(__pa(sptep))->mmio_cached = true;
-
trace_mark_mmio_spte(sptep, gfn, access, gen);
mmu_spte_set(sptep, mask);
}
@@ -5952,7 +5950,7 @@ void kvm_mmu_slot_set_dirty(struct kvm *kvm,
}
EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty);
-static void __kvm_mmu_zap_all(struct kvm *kvm, bool mmio_only)
+void kvm_mmu_zap_all(struct kvm *kvm)
{
struct kvm_mmu_page *sp, *node;
LIST_HEAD(invalid_list);
@@ -5961,14 +5959,10 @@ static void __kvm_mmu_zap_all(struct kvm *kvm, bool mmio_only)
spin_lock(&kvm->mmu_lock);
restart:
list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
- if (mmio_only && !sp->mmio_cached)
- continue;
if (sp->role.invalid && sp->root_count)
continue;
- if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign)) {
- WARN_ON_ONCE(mmio_only);
+ if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign))
goto restart;
- }
if (cond_resched_lock(&kvm->mmu_lock))
goto restart;
}
@@ -5977,11 +5971,6 @@ restart:
spin_unlock(&kvm->mmu_lock);
}
-void kvm_mmu_zap_all(struct kvm *kvm)
-{
- return __kvm_mmu_zap_all(kvm, false);
-}
-
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
{
WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
@@ -6003,7 +5992,7 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
*/
if (unlikely(gen == 0)) {
kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n");
- __kvm_mmu_zap_all(kvm, true);
+ kvm_mmu_zap_all_fast(kvm);
}
}