summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMarian Rotariu <mrotariu@bitdefender.com>2018-04-30 12:23:01 +0300
committerPaolo Bonzini <pbonzini@redhat.com>2018-05-11 11:21:10 +0200
commit6356ee0c9602004e0a3b4b2dad68ee2ee9385b17 (patch)
tree0b5978c4e855c52181e1a2004e95d87558794e79
parentddc9cfb79c1096a0855839631c091aa7e9602052 (diff)
downloadlinux-6356ee0c9602004e0a3b4b2dad68ee2ee9385b17.tar.gz
linux-6356ee0c9602004e0a3b4b2dad68ee2ee9385b17.tar.bz2
linux-6356ee0c9602004e0a3b4b2dad68ee2ee9385b17.zip
x86: Delay skip of emulated hypercall instruction
The IP increment should be done after the hypercall emulation, after calling the various handlers. In this way, these handlers can accurately identify the the IP of the VMCALL if they need it. This patch keeps the same functionality for the Hyper-V handler which does not use the return code of the standard kvm_skip_emulated_instruction() call. Signed-off-by: Marian Rotariu <mrotariu@bitdefender.com> [Hyper-V hypercalls also need kvm_skip_emulated_instruction() - Paolo] Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/x86/kvm/hyperv.c2
-rw-r--r--arch/x86/kvm/x86.c19
2 files changed, 12 insertions, 9 deletions
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 98618e397342..14dd5e5010a2 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -1265,7 +1265,7 @@ static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
struct kvm_run *run = vcpu->run;
kvm_hv_hypercall_set_result(vcpu, run->hyperv.u.hcall.result);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 51ecd381793b..44bd4a23b59c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6671,12 +6671,13 @@ void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu)
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
{
unsigned long nr, a0, a1, a2, a3, ret;
- int op_64_bit, r;
+ int op_64_bit;
- r = kvm_skip_emulated_instruction(vcpu);
-
- if (kvm_hv_hypercall_enabled(vcpu->kvm))
- return kvm_hv_hypercall(vcpu);
+ if (kvm_hv_hypercall_enabled(vcpu->kvm)) {
+ if (!kvm_hv_hypercall(vcpu))
+ return 0;
+ goto out;
+ }
nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
@@ -6697,7 +6698,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
if (kvm_x86_ops->get_cpl(vcpu) != 0) {
ret = -KVM_EPERM;
- goto out;
+ goto out_error;
}
switch (nr) {
@@ -6717,12 +6718,14 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
ret = -KVM_ENOSYS;
break;
}
-out:
+out_error:
if (!op_64_bit)
ret = (u32)ret;
kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
+
+out:
++vcpu->stat.hypercalls;
- return r;
+ return kvm_skip_emulated_instruction(vcpu);
}
EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);