summaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
authorVitaly Kuznetsov <vkuznets@redhat.com>2021-05-05 17:18:22 +0200
committerPaolo Bonzini <pbonzini@redhat.com>2021-05-07 06:06:12 -0400
commitc9ecafaf0113a305f5085ceb9c7a4b64ca70eae9 (patch)
tree26410b087af837d837e69bfee73d97d7d90babc0 /tools
parent32d1b3ab588c1231dbfa9eb08819c50529ce77d7 (diff)
downloadlinux-c9ecafaf0113a305f5085ceb9c7a4b64ca70eae9.tar.gz
linux-c9ecafaf0113a305f5085ceb9c7a4b64ca70eae9.tar.bz2
linux-c9ecafaf0113a305f5085ceb9c7a4b64ca70eae9.zip
KVM: selftests: evmcs_test: Check that VMCS12 is alway properly synced to eVMCS after restore
Add a test for the regression, introduced by commit f2c7ef3ba955 ("KVM: nSVM: cancel KVM_REQ_GET_NESTED_STATE_PAGES on nested vmexit"). When L2->L1 exit is forced immediately after restoring nested state, KVM_REQ_GET_NESTED_STATE_PAGES request is cleared and VMCS12 changes (e.g. fresh RIP) are not reflected to eVMCS. The consequent nested vCPU run gets broken. Utilize NMI injection to do the job. Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com> Message-Id: <20210505151823.1341678-3-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'tools')
-rw-r--r--tools/testing/selftests/kvm/x86_64/evmcs_test.c66
1 files changed, 55 insertions, 11 deletions
diff --git a/tools/testing/selftests/kvm/x86_64/evmcs_test.c b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
index b01f64ac6ce3..63096cea26c6 100644
--- a/tools/testing/selftests/kvm/x86_64/evmcs_test.c
+++ b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
@@ -18,30 +18,52 @@
#include "vmx.h"
#define VCPU_ID 5
+#define NMI_VECTOR 2
static int ud_count;
+void enable_x2apic(void)
+{
+ uint32_t spiv_reg = APIC_BASE_MSR + (APIC_SPIV >> 4);
+
+ wrmsr(MSR_IA32_APICBASE, rdmsr(MSR_IA32_APICBASE) |
+ MSR_IA32_APICBASE_ENABLE | MSR_IA32_APICBASE_EXTD);
+ wrmsr(spiv_reg, rdmsr(spiv_reg) | APIC_SPIV_APIC_ENABLED);
+}
+
static void guest_ud_handler(struct ex_regs *regs)
{
ud_count++;
regs->rip += 3; /* VMLAUNCH */
}
+static void guest_nmi_handler(struct ex_regs *regs)
+{
+}
+
void l2_guest_code(void)
{
GUEST_SYNC(7);
GUEST_SYNC(8);
+ /* Forced exit to L1 upon restore */
+ GUEST_SYNC(9);
+
/* Done, exit to L1 and never come back. */
vmcall();
}
-void l1_guest_code(struct vmx_pages *vmx_pages)
+void guest_code(struct vmx_pages *vmx_pages)
{
#define L2_GUEST_STACK_SIZE 64
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+ enable_x2apic();
+
+ GUEST_SYNC(1);
+ GUEST_SYNC(2);
+
enable_vp_assist(vmx_pages->vp_assist_gpa, vmx_pages->vp_assist);
GUEST_ASSERT(vmx_pages->vmcs_gpa);
@@ -63,21 +85,22 @@ void l1_guest_code(struct vmx_pages *vmx_pages)
current_evmcs->revision_id = EVMCS_VERSION;
GUEST_SYNC(6);
+ current_evmcs->pin_based_vm_exec_control |=
+ PIN_BASED_NMI_EXITING;
GUEST_ASSERT(!vmlaunch());
GUEST_ASSERT(vmptrstz() == vmx_pages->enlightened_vmcs_gpa);
- GUEST_SYNC(9);
+
+ /*
+ * NMI forces L2->L1 exit, resuming L2 and hope that EVMCS is
+ * up-to-date (RIP points where it should and not at the beginning
+ * of l2_guest_code(). GUEST_SYNC(9) checkes that.
+ */
GUEST_ASSERT(!vmresume());
- GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
- GUEST_SYNC(10);
-}
-void guest_code(struct vmx_pages *vmx_pages)
-{
- GUEST_SYNC(1);
- GUEST_SYNC(2);
+ GUEST_SYNC(10);
- if (vmx_pages)
- l1_guest_code(vmx_pages);
+ GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
+ GUEST_SYNC(11);
/* Try enlightened vmptrld with an incorrect GPA */
evmcs_vmptrld(0xdeadbeef, vmx_pages->enlightened_vmcs);
@@ -86,6 +109,18 @@ void guest_code(struct vmx_pages *vmx_pages)
GUEST_DONE();
}
+void inject_nmi(struct kvm_vm *vm)
+{
+ struct kvm_vcpu_events events;
+
+ vcpu_events_get(vm, VCPU_ID, &events);
+
+ events.nmi.pending = 1;
+ events.flags |= KVM_VCPUEVENT_VALID_NMI_PENDING;
+
+ vcpu_events_set(vm, VCPU_ID, &events);
+}
+
int main(int argc, char *argv[])
{
vm_vaddr_t vmx_pages_gva = 0;
@@ -120,6 +155,9 @@ int main(int argc, char *argv[])
vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vm, VCPU_ID);
vm_handle_exception(vm, UD_VECTOR, guest_ud_handler);
+ vm_handle_exception(vm, NMI_VECTOR, guest_nmi_handler);
+
+ pr_info("Running L1 which uses EVMCS to run L2\n");
for (stage = 1;; stage++) {
_vcpu_run(vm, VCPU_ID);
@@ -166,6 +204,12 @@ int main(int argc, char *argv[])
TEST_ASSERT(!memcmp(&regs1, &regs2, sizeof(regs2)),
"Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
(ulong) regs2.rdi, (ulong) regs2.rsi);
+
+ /* Force immediate L2->L1 exit before resuming */
+ if (stage == 8) {
+ pr_info("Injecting NMI into L1 before L2 had a chance to run after restore\n");
+ inject_nmi(vm);
+ }
}
done: