summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Matlack <dmatlack@google.com>2022-09-21 10:35:40 -0700
committerPaolo Bonzini <pbonzini@redhat.com>2022-12-29 15:33:19 -0500
commit56c3a4e4a2d5ef36ac12156e9d4d793e2841135c (patch)
treed413dee6704736aeedf3be4c337fdde96681590f
parentba6e3fe255439fd401a8d5487ad7c030db14afa8 (diff)
downloadlinux-stable-56c3a4e4a2d5ef36ac12156e9d4d793e2841135c.tar.gz
linux-stable-56c3a4e4a2d5ef36ac12156e9d4d793e2841135c.tar.bz2
linux-stable-56c3a4e4a2d5ef36ac12156e9d4d793e2841135c.zip
KVM: x86/mmu: Handle error PFNs in kvm_faultin_pfn()
Handle error PFNs in kvm_faultin_pfn() rather than relying on the caller to invoke handle_abnormal_pfn() after kvm_faultin_pfn(). Opportunistically rename kvm_handle_bad_page() to kvm_handle_error_pfn() to make it more consistent with is_error_pfn(). This commit moves KVM closer to being able to drop handle_abnormal_pfn(), which will reduce the amount of duplicate code in the various page fault handlers. No functional change intended. Signed-off-by: David Matlack <dmatlack@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <20220921173546.2674386-5-dmatlack@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/x86/kvm/mmu/mmu.c15
1 files changed, 10 insertions, 5 deletions
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index efc540e43e17..d8a256f1b842 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -3219,10 +3219,6 @@ static int kvm_handle_error_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
static int handle_abnormal_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
unsigned int access)
{
- /* The pfn is invalid, report the error! */
- if (unlikely(is_error_pfn(fault->pfn)))
- return kvm_handle_error_pfn(vcpu, fault->gfn, fault->pfn);
-
if (unlikely(!fault->slot)) {
gva_t gva = fault->is_tdp ? 0 : fault->addr;
@@ -4252,10 +4248,19 @@ static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
{
+ int ret;
+
fault->mmu_seq = vcpu->kvm->mmu_invalidate_seq;
smp_rmb();
- return __kvm_faultin_pfn(vcpu, fault);
+ ret = __kvm_faultin_pfn(vcpu, fault);
+ if (ret != RET_PF_CONTINUE)
+ return ret;
+
+ if (unlikely(is_error_pfn(fault->pfn)))
+ return kvm_handle_error_pfn(vcpu, fault->gfn, fault->pfn);
+
+ return RET_PF_CONTINUE;
}
/*