summaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/smm.c
diff options
context:
space:
mode:
authorMaxim Levitsky <mlevitsk@redhat.com>2022-10-25 15:47:41 +0300
committerPaolo Bonzini <pbonzini@redhat.com>2022-11-09 12:31:26 -0500
commitfb28875fd7da184079150295da7ee8d80a70917e (patch)
tree73c75fa069173b462517317739a0f891679b45f9 /arch/x86/kvm/smm.c
parent95504c7c981b3260b3b238ace03f3519bd9a0b6d (diff)
downloadlinux-fb28875fd7da184079150295da7ee8d80a70917e.tar.gz
linux-fb28875fd7da184079150295da7ee8d80a70917e.tar.bz2
linux-fb28875fd7da184079150295da7ee8d80a70917e.zip
KVM: x86: smm: preserve interrupt shadow in SMRAM
When #SMI is asserted, the CPU can be in interrupt shadow due to sti or mov ss. It is not mandatory in Intel/AMD prm to have the #SMI blocked during the shadow, and on top of that, since neither SVM nor VMX has true support for SMI window, waiting for one instruction would mean single stepping the guest. Instead, allow #SMI in this case, but both reset the interrupt window and stash its value in SMRAM to restore it on exit from SMM. This fixes rare failures seen mostly on windows guests on VMX, when #SMI falls on the sti instruction which mainfest in VM entry failure due to EFLAGS.IF not being set, but STI interrupt window still being set in the VMCS. Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com> Message-Id: <20221025124741.228045-24-mlevitsk@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/smm.c')
-rw-r--r--arch/x86/kvm/smm.c29
1 files changed, 25 insertions, 4 deletions
diff --git a/arch/x86/kvm/smm.c b/arch/x86/kvm/smm.c
index e3ecb1f84168..a9c1c2af8d94 100644
--- a/arch/x86/kvm/smm.c
+++ b/arch/x86/kvm/smm.c
@@ -27,7 +27,9 @@ static void check_smram_offsets(void)
CHECK_SMRAM32_OFFSET(io_restart_rsi, 0xFF0C);
CHECK_SMRAM32_OFFSET(io_restart_rip, 0xFF10);
CHECK_SMRAM32_OFFSET(cr4, 0xFF14);
- CHECK_SMRAM32_OFFSET(reserved3, 0xFF18);
+ CHECK_SMRAM32_OFFSET(reserved2, 0xFF18);
+ CHECK_SMRAM32_OFFSET(int_shadow, 0xFF1A);
+ CHECK_SMRAM32_OFFSET(reserved3, 0xFF1B);
CHECK_SMRAM32_OFFSET(ds, 0xFF2C);
CHECK_SMRAM32_OFFSET(fs, 0xFF38);
CHECK_SMRAM32_OFFSET(gs, 0xFF44);
@@ -73,7 +75,9 @@ static void check_smram_offsets(void)
CHECK_SMRAM64_OFFSET(reserved1, 0xFEC4);
CHECK_SMRAM64_OFFSET(io_inst_restart, 0xFEC8);
CHECK_SMRAM64_OFFSET(auto_hlt_restart, 0xFEC9);
- CHECK_SMRAM64_OFFSET(reserved2, 0xFECA);
+ CHECK_SMRAM64_OFFSET(amd_nmi_mask, 0xFECA);
+ CHECK_SMRAM64_OFFSET(int_shadow, 0xFECB);
+ CHECK_SMRAM64_OFFSET(reserved2, 0xFECC);
CHECK_SMRAM64_OFFSET(efer, 0xFED0);
CHECK_SMRAM64_OFFSET(svm_guest_flag, 0xFED8);
CHECK_SMRAM64_OFFSET(svm_guest_vmcb_gpa, 0xFEE0);
@@ -219,6 +223,8 @@ static void enter_smm_save_state_32(struct kvm_vcpu *vcpu,
smram->cr4 = kvm_read_cr4(vcpu);
smram->smm_revision = 0x00020000;
smram->smbase = vcpu->arch.smbase;
+
+ smram->int_shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu);
}
#ifdef CONFIG_X86_64
@@ -268,6 +274,8 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu,
enter_smm_save_seg_64(vcpu, &smram->ds, VCPU_SREG_DS);
enter_smm_save_seg_64(vcpu, &smram->fs, VCPU_SREG_FS);
enter_smm_save_seg_64(vcpu, &smram->gs, VCPU_SREG_GS);
+
+ smram->int_shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu);
}
#endif
@@ -313,6 +321,8 @@ void enter_smm(struct kvm_vcpu *vcpu)
kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
kvm_rip_write(vcpu, 0x8000);
+ static_call(kvm_x86_set_interrupt_shadow)(vcpu, 0);
+
cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG);
static_call(kvm_x86_set_cr0)(vcpu, cr0);
vcpu->arch.cr0 = cr0;
@@ -460,7 +470,7 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
{
struct kvm_vcpu *vcpu = ctxt->vcpu;
struct desc_ptr dt;
- int i;
+ int i, r;
ctxt->eflags = smstate->eflags | X86_EFLAGS_FIXED;
ctxt->_eip = smstate->eip;
@@ -494,8 +504,16 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
vcpu->arch.smbase = smstate->smbase;
- return rsm_enter_protected_mode(vcpu, smstate->cr0,
+ r = rsm_enter_protected_mode(vcpu, smstate->cr0,
smstate->cr3, smstate->cr4);
+
+ if (r != X86EMUL_CONTINUE)
+ return r;
+
+ static_call(kvm_x86_set_interrupt_shadow)(vcpu, 0);
+ ctxt->interruptibility = (u8)smstate->int_shadow;
+
+ return r;
}
#ifdef CONFIG_X86_64
@@ -545,6 +563,9 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
rsm_load_seg_64(vcpu, &smstate->fs, VCPU_SREG_FS);
rsm_load_seg_64(vcpu, &smstate->gs, VCPU_SREG_GS);
+ static_call(kvm_x86_set_interrupt_shadow)(vcpu, 0);
+ ctxt->interruptibility = (u8)smstate->int_shadow;
+
return X86EMUL_CONTINUE;
}
#endif