summaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorSean Christopherson <seanjc@google.com>2024-01-09 16:39:35 -0800
committerSean Christopherson <seanjc@google.com>2024-02-22 16:26:26 -0800
commit77bcd9e6231a5297ef417a7d7f734d61c2bcceb6 (patch)
treecab40abfd19ea43db33e84549d7588a6febd0623 /virt
parentfc3c94142b3a4391cab94adde56fcfbea25723e5 (diff)
downloadlinux-stable-77bcd9e6231a5297ef417a7d7f734d61c2bcceb6.tar.gz
linux-stable-77bcd9e6231a5297ef417a7d7f734d61c2bcceb6.tar.bz2
linux-stable-77bcd9e6231a5297ef417a7d7f734d61c2bcceb6.zip
KVM: Add dedicated arch hook for querying if vCPU was preempted in-kernel
Plumb in a dedicated hook for querying whether or not a vCPU was preempted in-kernel. Unlike literally every other architecture, x86's VMX can check if a vCPU is in kernel context if and only if the vCPU is loaded on the current pCPU. x86's kvm_arch_vcpu_in_kernel() works around the limitation by querying kvm_get_running_vcpu() and redirecting to vcpu->arch.preempted_in_kernel as needed. But that's unnecessary, confusing, and fragile, e.g. x86 has had at least one bug where KVM incorrectly used a stale preempted_in_kernel. No functional change intended. Reviewed-by: Yuan Yao <yuan.yao@intel.com> Link: https://lore.kernel.org/r/20240110003938.490206-2-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/kvm_main.c14
1 files changed, 13 insertions, 1 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 10bfc88a69f7..9b92858c8b72 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -4042,6 +4042,18 @@ static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu)
return false;
}
+/*
+ * By default, simply query the target vCPU's current mode when checking if a
+ * vCPU was preempted in kernel mode. All architectures except x86 (or more
+ * specifical, except VMX) allow querying whether or not a vCPU is in kernel
+ * mode even if the vCPU is NOT loaded, i.e. using kvm_arch_vcpu_in_kernel()
+ * directly for cross-vCPU checks is functionally correct and accurate.
+ */
+bool __weak kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu)
+{
+ return kvm_arch_vcpu_in_kernel(vcpu);
+}
+
bool __weak kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu)
{
return false;
@@ -4080,7 +4092,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
continue;
if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode &&
!kvm_arch_dy_has_pending_interrupt(vcpu) &&
- !kvm_arch_vcpu_in_kernel(vcpu))
+ !kvm_arch_vcpu_preempted_in_kernel(vcpu))
continue;
if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
continue;