summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorSean Christopherson <sean.j.christopherson@intel.com>2018-07-23 12:32:43 -0700
committerPaolo Bonzini <pbonzini@redhat.com>2018-08-06 17:59:12 +0200
commit678e315e78a780dbef384b92339c8414309dbc11 (patch)
treee0d8d585653e5edf7c978d70296a20a505392298 /arch
parentbd9966de4e14fb559e89a06f7f5c9aab2cc028b9 (diff)
downloadlinux-678e315e78a780dbef384b92339c8414309dbc11.tar.gz
linux-678e315e78a780dbef384b92339c8414309dbc11.tar.bz2
linux-678e315e78a780dbef384b92339c8414309dbc11.zip
KVM: vmx: add dedicated utility to access guest's kernel_gs_base
When lazy save/restore of MSR_KERNEL_GS_BASE was introduced[1], the MSR was intercepted in all modes and was only restored for the host when the guest is in 64-bit mode. So at the time, going through the full host restore prior to accessing MSR_KERNEL_GS_BASE was necessary to load host state and was not a significant waste of cycles. Later, MSR_KERNEL_GS_BASE interception was disabled for a 64-bit guest[2], and then unconditionally saved/restored for the host[3]. As a result, loading full host state is overkill for accesses to MSR_KERNEL_GS_BASE, and completely unnecessary when the guest is not in 64-bit mode. Add a dedicated utility to read/write the guest's MSR_KERNEL_GS_BASE (outside of the save/restore flow) to minimize the overhead incurred when accessing the MSR. When setting EFER, only decache the MSR if the new EFER will disable long mode. Removing out-of-band usage of vmx_load_host_state() also eliminates, or at least reduces, potential corner cases in its usage, which in turn will (hopefuly) make it easier to reason about future changes to the save/restore flow, e.g. optimization of saving host state. [1] commit 44ea2b1758d8 ("KVM: VMX: Move MSR_KERNEL_GS_BASE out of the vmx autoload msr area") [2] commit 5897297bc228 ("KVM: VMX: Don't intercept MSR_KERNEL_GS_BASE") [3] commit c8770e7ba63b ("KVM: VMX: Fix host userspace gsbase corruption") Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kvm/vmx.c46
1 files changed, 35 insertions, 11 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 935b84f40f65..72c392525f61 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2772,13 +2772,31 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
load_fixmap_gdt(raw_smp_processor_id());
}
-static void vmx_load_host_state(struct vcpu_vmx *vmx)
+#ifdef CONFIG_X86_64
+static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx)
{
- preempt_disable();
- __vmx_load_host_state(vmx);
- preempt_enable();
+ if (is_long_mode(&vmx->vcpu)) {
+ preempt_disable();
+ if (vmx->loaded_cpu_state)
+ rdmsrl(MSR_KERNEL_GS_BASE,
+ vmx->msr_guest_kernel_gs_base);
+ preempt_enable();
+ }
+ return vmx->msr_guest_kernel_gs_base;
}
+static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data)
+{
+ if (is_long_mode(&vmx->vcpu)) {
+ preempt_disable();
+ if (vmx->loaded_cpu_state)
+ wrmsrl(MSR_KERNEL_GS_BASE, data);
+ preempt_enable();
+ }
+ vmx->msr_guest_kernel_gs_base = data;
+}
+#endif
+
static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
{
struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
@@ -3857,8 +3875,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
msr_info->data = vmcs_readl(GUEST_GS_BASE);
break;
case MSR_KERNEL_GS_BASE:
- vmx_load_host_state(vmx);
- msr_info->data = vmx->msr_guest_kernel_gs_base;
+ msr_info->data = vmx_read_guest_kernel_gs_base(vmx);
break;
#endif
case MSR_EFER:
@@ -3958,8 +3975,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
vmcs_writel(GUEST_GS_BASE, data);
break;
case MSR_KERNEL_GS_BASE:
- vmx_load_host_state(vmx);
- vmx->msr_guest_kernel_gs_base = data;
+ vmx_write_guest_kernel_gs_base(vmx, data);
break;
#endif
case MSR_IA32_SYSENTER_CS:
@@ -4849,10 +4865,18 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
return;
/*
- * Force kernel_gs_base reloading before EFER changes, as control
- * of this msr depends on is_long_mode().
+ * MSR_KERNEL_GS_BASE is not intercepted when the guest is in
+ * 64-bit mode as a 64-bit kernel may frequently access the
+ * MSR. This means we need to manually save/restore the MSR
+ * when switching between guest and host state, but only if
+ * the guest is in 64-bit mode. Sync our cached value if the
+ * guest is transitioning to 32-bit mode and the CPU contains
+ * guest state, i.e. the cache is stale.
*/
- vmx_load_host_state(to_vmx(vcpu));
+#ifdef CONFIG_X86_64
+ if (!(efer & EFER_LMA))
+ (void)vmx_read_guest_kernel_gs_base(vmx);
+#endif
vcpu->arch.efer = efer;
if (efer & EFER_LMA) {
vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);