summaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/svm/svm.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/svm/svm.c')
-rw-r--r--arch/x86/kvm/svm/svm.c27
1 files changed, 22 insertions, 5 deletions
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 0227c4cbe642..23bac92f5b27 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -3399,12 +3399,21 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
/*
- * Tell context tracking that this CPU is about to enter guest
- * mode. This has to be after x86_spec_ctrl_set_guest() because
- * that can take locks (lockdep needs RCU) and calls into world and
- * some more.
+ * VMENTER enables interrupts (host state), but the kernel state is
+ * interrupts disabled when this is invoked. Also tell RCU about
+ * it. This is the same logic as for exit_to_user_mode().
+ *
+ * This ensures that e.g. latency analysis on the host observes
+ * guest mode as interrupt enabled.
+ *
+ * guest_enter_irqoff() informs context tracking about the
+ * transition to guest mode and if enabled adjusts RCU state
+ * accordingly.
*/
+ trace_hardirqs_on_prepare();
+ lockdep_hardirqs_on_prepare(CALLER_ADDR0);
guest_enter_irqoff();
+ lockdep_hardirqs_on(CALLER_ADDR0);
__svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&svm->vcpu.arch.regs);
@@ -3416,14 +3425,22 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
loadsegment(gs, svm->host.gs);
#endif
#endif
+
/*
- * Tell context tracking that this CPU is back.
+ * VMEXIT disables interrupts (host state), but tracing and lockdep
+ * have them in state 'on' as recorded before entering guest mode.
+ * Same as enter_from_user_mode().
+ *
+ * guest_exit_irqoff() restores host context and reinstates RCU if
+ * enabled and required.
*
* This needs to be done before the below as native_read_msr()
* contains a tracepoint and x86_spec_ctrl_restore_host() calls
* into world and some more.
*/
+ lockdep_hardirqs_off(CALLER_ADDR0);
guest_exit_irqoff();
+ trace_hardirqs_off_finish();
/*
* We do not use IBRS in the kernel. If this vCPU has used the