summaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorKrish Sadhukhan <krish.sadhukhan@oracle.com>2018-02-20 21:24:39 -0500
committerRadim Krčmář <rkrcmar@redhat.com>2018-03-08 16:54:03 +0100
commit0c7f650e106c3f045d9f8d1c1c7eb6f5596508b8 (patch)
treef86e1120e27b5f05df02045b3887377b8d634321 /arch/x86
parent1389309c811b0c954bf3b591b761d79b1700283d (diff)
downloadlinux-stable-0c7f650e106c3f045d9f8d1c1c7eb6f5596508b8.tar.gz
linux-stable-0c7f650e106c3f045d9f8d1c1c7eb6f5596508b8.tar.bz2
linux-stable-0c7f650e106c3f045d9f8d1c1c7eb6f5596508b8.zip
KVM: nVMX: Enforce NMI controls on vmentry of L2 guests
According to Intel SDM 26.2.1.1, the following rules should be enforced on vmentry: * If the "NMI exiting" VM-execution control is 0, "Virtual NMIs" VM-execution control must be 0. * If the “virtual NMIs” VM-execution control is 0, the “NMI-window exiting” VM-execution control must be 0. This patch enforces these rules when entering an L2 guest. Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com> Reviewed-by: Liran Alon <liran.alon@oracle.com> Reviewed-by: Jim Mattson <jmattson@google.com> Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kvm/vmx.c29
1 files changed, 27 insertions, 2 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 66a77b15e38a..5726cee43f4f 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1340,6 +1340,16 @@ static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12)
PIN_BASED_VMX_PREEMPTION_TIMER;
}
+static inline bool nested_cpu_has_nmi_exiting(struct vmcs12 *vmcs12)
+{
+ return vmcs12->pin_based_vm_exec_control & PIN_BASED_NMI_EXITING;
+}
+
+static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12)
+{
+ return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
+}
+
static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12)
{
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT);
@@ -5896,8 +5906,7 @@ static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu)
static bool nested_exit_on_nmi(struct kvm_vcpu *vcpu)
{
- return get_vmcs12(vcpu)->pin_based_vm_exec_control &
- PIN_BASED_NMI_EXITING;
+ return nested_cpu_has_nmi_exiting(get_vmcs12(vcpu));
}
static void enable_irq_window(struct kvm_vcpu *vcpu)
@@ -10979,6 +10988,19 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
return 0;
}
+static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12)
+{
+ if (!nested_cpu_has_nmi_exiting(vmcs12) &&
+ nested_cpu_has_virtual_nmis(vmcs12))
+ return -EINVAL;
+
+ if (!nested_cpu_has_virtual_nmis(vmcs12) &&
+ nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING))
+ return -EINVAL;
+
+ return 0;
+}
+
static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -11023,6 +11045,9 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
vmx->nested.msrs.entry_ctls_high))
return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+ if (nested_vmx_check_nmi_controls(vmcs12))
+ return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
if (nested_cpu_has_vmfunc(vmcs12)) {
if (vmcs12->vm_function_control &
~vmx->nested.msrs.vmfunc_controls)