summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSheng Yang <sheng@linux.intel.com>2010-05-17 17:08:27 +0800
committerAvi Kivity <avi@redhat.com>2010-08-01 10:35:48 +0300
commit7cf30855e02be7a207ffebb8b9350986f2ba83e9 (patch)
tree114f0f2dfbe425bfb7736ebbeb1655c23d636db4
parent5ee481da7b62a992b91f958bf26aaaa92354c170 (diff)
downloadlinux-stable-7cf30855e02be7a207ffebb8b9350986f2ba83e9.tar.gz
linux-stable-7cf30855e02be7a207ffebb8b9350986f2ba83e9.tar.bz2
linux-stable-7cf30855e02be7a207ffebb8b9350986f2ba83e9.zip
KVM: x86: Use unlazy_fpu() for host FPU
We can avoid unnecessary fpu load when userspace process didn't use FPU frequently. Derived from Avi's idea. Signed-off-by: Sheng Yang <sheng@linux.intel.com> Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r--arch/x86/include/asm/kvm_host.h1
-rw-r--r--arch/x86/kvm/x86.c18
2 files changed, 2 insertions, 17 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 0c06148fa3b1..d93601c52902 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -301,7 +301,6 @@ struct kvm_vcpu_arch {
unsigned long mmu_seq;
} update_pte;
- struct i387_fxsave_struct host_fx_image;
struct i387_fxsave_struct guest_fx_image;
gva_t mmio_fault_cr2;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 4c2096f30d90..54ce77582eda 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -52,6 +52,7 @@
#include <asm/desc.h>
#include <asm/mtrr.h>
#include <asm/mce.h>
+#include <asm/i387.h>
#define MAX_IO_MSRS 256
#define CR0_RESERVED_BITS \
@@ -5134,21 +5135,10 @@ void fx_init(struct kvm_vcpu *vcpu)
{
unsigned after_mxcsr_mask;
- /*
- * Touch the fpu the first time in non atomic context as if
- * this is the first fpu instruction the exception handler
- * will fire before the instruction returns and it'll have to
- * allocate ram with GFP_KERNEL.
- */
- if (!used_math())
- kvm_fx_save(&vcpu->arch.host_fx_image);
-
/* Initialize guest FPU by resetting ours and saving into guest's */
preempt_disable();
- kvm_fx_save(&vcpu->arch.host_fx_image);
kvm_fx_finit();
kvm_fx_save(&vcpu->arch.guest_fx_image);
- kvm_fx_restore(&vcpu->arch.host_fx_image);
preempt_enable();
vcpu->arch.cr0 |= X86_CR0_ET;
@@ -5165,7 +5155,7 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
return;
vcpu->guest_fpu_loaded = 1;
- kvm_fx_save(&vcpu->arch.host_fx_image);
+ unlazy_fpu(current);
kvm_fx_restore(&vcpu->arch.guest_fx_image);
trace_kvm_fpu(1);
}
@@ -5177,7 +5167,6 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
vcpu->guest_fpu_loaded = 0;
kvm_fx_save(&vcpu->arch.guest_fx_image);
- kvm_fx_restore(&vcpu->arch.host_fx_image);
++vcpu->stat.fpu_reload;
set_bit(KVM_REQ_DEACTIVATE_FPU, &vcpu->requests);
trace_kvm_fpu(0);
@@ -5203,9 +5192,6 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{
int r;
- /* We do fxsave: this must be aligned. */
- BUG_ON((unsigned long)&vcpu->arch.host_fx_image & 0xF);
-
vcpu->arch.mtrr_state.have_fixed = 1;
vcpu_load(vcpu);
r = kvm_arch_vcpu_reset(vcpu);