summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Matlack <dmatlack@google.com>2022-09-21 10:35:43 -0700
committerPaolo Bonzini <pbonzini@redhat.com>2022-12-29 15:33:21 -0500
commite5e6f8d254a285e97b02180fdc75d02ffdf9f0ed (patch)
tree77a043f89d5b067c491cc3711256735d1633effc
parent354c908c068ec0a21850aef5dbf6b8331434a2e9 (diff)
downloadlinux-stable-e5e6f8d254a285e97b02180fdc75d02ffdf9f0ed.tar.gz
linux-stable-e5e6f8d254a285e97b02180fdc75d02ffdf9f0ed.tar.bz2
linux-stable-e5e6f8d254a285e97b02180fdc75d02ffdf9f0ed.zip
KVM: x86/mmu: Initialize fault.{gfn,slot} earlier for direct MMUs
Move the initialization of fault.{gfn,slot} earlier in the page fault handling code for fully direct MMUs. This will enable a future commit to split out TDP MMU page fault handling without needing to duplicate the initialization of these 2 fields. Opportunistically take advantage of the fact that fault.gfn is initialized in kvm_tdp_page_fault() rather than recomputing it from fault->addr. No functional change intended. Signed-off-by: David Matlack <dmatlack@google.com> Reviewed-by: Isaku Yamahata <isaku.yamahata@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <20220921173546.2674386-8-dmatlack@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/x86/kvm/mmu/mmu.c5
-rw-r--r--arch/x86/kvm/mmu/mmu_internal.h5
2 files changed, 6 insertions, 4 deletions
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index e2e8c4dfbaa5..cc2683419ff2 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -4306,9 +4306,6 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
bool is_tdp_mmu_fault = is_tdp_mmu(vcpu->arch.mmu);
int r;
- fault->gfn = fault->addr >> PAGE_SHIFT;
- fault->slot = kvm_vcpu_gfn_to_memslot(vcpu, fault->gfn);
-
if (page_fault_handle_page_track(vcpu, fault))
return RET_PF_EMULATE;
@@ -4412,7 +4409,7 @@ int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
if (shadow_memtype_mask && kvm_arch_has_noncoherent_dma(vcpu->kvm)) {
for ( ; fault->max_level > PG_LEVEL_4K; --fault->max_level) {
int page_num = KVM_PAGES_PER_HPAGE(fault->max_level);
- gfn_t base = (fault->addr >> PAGE_SHIFT) & ~(page_num - 1);
+ gfn_t base = fault->gfn & ~(page_num - 1);
if (kvm_mtrr_check_gfn_range_consistency(vcpu, base, page_num))
break;
diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
index 1556f596a628..069890789faf 100644
--- a/arch/x86/kvm/mmu/mmu_internal.h
+++ b/arch/x86/kvm/mmu/mmu_internal.h
@@ -280,6 +280,11 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
};
int r;
+ if (vcpu->arch.mmu->root_role.direct) {
+ fault.gfn = fault.addr >> PAGE_SHIFT;
+ fault.slot = kvm_vcpu_gfn_to_memslot(vcpu, fault.gfn);
+ }
+
/*
* Async #PF "faults", a.k.a. prefetch faults, are not faults from the
* guest perspective and have already been counted at the time of the