summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSean Christopherson <seanjc@google.com>2024-01-09 16:39:36 -0800
committerSean Christopherson <seanjc@google.com>2024-02-22 16:27:03 -0800
commit9b8615c5d37fca15b330882bafceaf24f2398352 (patch)
treea8c9b88b0ea0944ea72013077efc9976b70ed55c
parent77bcd9e6231a5297ef417a7d7f734d61c2bcceb6 (diff)
downloadlinux-stable-9b8615c5d37fca15b330882bafceaf24f2398352.tar.gz
linux-stable-9b8615c5d37fca15b330882bafceaf24f2398352.tar.bz2
linux-stable-9b8615c5d37fca15b330882bafceaf24f2398352.zip
KVM: x86: Rely solely on preempted_in_kernel flag for directed yield
Snapshot preempted_in_kernel using kvm_arch_vcpu_in_kernel() so that the flag is "accurate" (or rather, consistent and deterministic within KVM) for guests with protected state, and explicitly use preempted_in_kernel when checking if a vCPU was preempted in kernel mode instead of bouncing through kvm_arch_vcpu_in_kernel(). Drop the gnarly logic in kvm_arch_vcpu_in_kernel() that redirects to preempted_in_kernel if the target vCPU is not the "running", i.e. loaded, vCPU, as the only reason that code existed was for the directed yield case where KVM wants to check the CPL of a vCPU that may or may not be loaded on the current pCPU. Cc: Like Xu <like.xu.linux@gmail.com> Reviewed-by: Yuan Yao <yuan.yao@intel.com> Link: https://lore.kernel.org/r/20240110003938.490206-3-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
-rw-r--r--arch/x86/kvm/x86.c8
1 files changed, 2 insertions, 6 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 94346490f407..ab7d2baaae7a 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5059,8 +5059,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
int idx;
if (vcpu->preempted) {
- if (!vcpu->arch.guest_state_protected)
- vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu);
+ vcpu->arch.preempted_in_kernel = kvm_arch_vcpu_in_kernel(vcpu);
/*
* Take the srcu lock as memslots will be accessed to check the gfn
@@ -13056,7 +13055,7 @@ bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu)
bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu)
{
- return kvm_arch_vcpu_in_kernel(vcpu);
+ return vcpu->arch.preempted_in_kernel;
}
bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
@@ -13079,9 +13078,6 @@ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
if (vcpu->arch.guest_state_protected)
return true;
- if (vcpu != kvm_get_running_vcpu())
- return vcpu->arch.preempted_in_kernel;
-
return static_call(kvm_x86_get_cpl)(vcpu) == 0;
}