summaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2024-03-11 10:24:56 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2024-03-11 10:24:56 -0400
commitc9cd0beae9d9c3a675eb73ee45e29900d8c11bd2 (patch)
tree0c92ae6dbe5ce4912bd78ddff774cc0290fe67f1 /virt
parent507e72f899bd5968b6d3bc4f29cc534ada9ee509 (diff)
parent78ccfce774435a08d9c69ce434099166cc7952c8 (diff)
downloadlinux-stable-c9cd0beae9d9c3a675eb73ee45e29900d8c11bd2.tar.gz
linux-stable-c9cd0beae9d9c3a675eb73ee45e29900d8c11bd2.tar.bz2
linux-stable-c9cd0beae9d9c3a675eb73ee45e29900d8c11bd2.zip
Merge tag 'kvm-x86-misc-6.9' of https://github.com/kvm-x86/linux into HEAD
KVM x86 misc changes for 6.9: - Explicitly initialize a variety of on-stack variables in the emulator that triggered KMSAN false positives (though in fairness in KMSAN, it's comically difficult to see that the uninitialized memory is never truly consumed). - Fix the deubgregs ABI for 32-bit KVM, and clean up code related to reading DR6 and DR7. - Rework the "force immediate exit" code so that vendor code ultimately decides how and when to force the exit. This allows VMX to further optimize handling preemption timer exits, and allows SVM to avoid sending a duplicate IPI (SVM also has a need to force an exit). - Fix a long-standing bug where kvm_has_noapic_vcpu could be left elevated if vCPU creation ultimately failed, and add WARN to guard against similar bugs. - Provide a dedicated arch hook for checking if a different vCPU was in-kernel (for directed yield), and simplify the logic for checking if the currently loaded vCPU is in-kernel. - Misc cleanups and fixes.
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/kvm_main.c21
1 files changed, 20 insertions, 1 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 741d86ca06a1..fb49c2a60200 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -4046,6 +4046,18 @@ static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu)
return false;
}
+/*
+ * By default, simply query the target vCPU's current mode when checking if a
+ * vCPU was preempted in kernel mode. All architectures except x86 (or more
+ * specifical, except VMX) allow querying whether or not a vCPU is in kernel
+ * mode even if the vCPU is NOT loaded, i.e. using kvm_arch_vcpu_in_kernel()
+ * directly for cross-vCPU checks is functionally correct and accurate.
+ */
+bool __weak kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu)
+{
+ return kvm_arch_vcpu_in_kernel(vcpu);
+}
+
bool __weak kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu)
{
return false;
@@ -4082,9 +4094,16 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
continue;
if (kvm_vcpu_is_blocking(vcpu) && !vcpu_dy_runnable(vcpu))
continue;
+
+ /*
+ * Treat the target vCPU as being in-kernel if it has a
+ * pending interrupt, as the vCPU trying to yield may
+ * be spinning waiting on IPI delivery, i.e. the target
+ * vCPU is in-kernel for the purposes of directed yield.
+ */
if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode &&
!kvm_arch_dy_has_pending_interrupt(vcpu) &&
- !kvm_arch_vcpu_in_kernel(vcpu))
+ !kvm_arch_vcpu_preempted_in_kernel(vcpu))
continue;
if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
continue;