summaryrefslogtreecommitdiffstats
path: root/arch/s390/kvm/kvm-s390.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/kvm/kvm-s390.c')
-rw-r--r--arch/s390/kvm/kvm-s390.c49
1 files changed, 29 insertions, 20 deletions
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index f8bcaefd7d34..49292869a5cd 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -23,6 +23,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/timer.h>
+#include <asm/asm-offsets.h>
#include <asm/lowcore.h>
#include <asm/pgtable.h>
#include <asm/nmi.h>
@@ -241,6 +242,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kvm_free_physmem(kvm);
free_page((unsigned long)(kvm->arch.sca));
debug_unregister(kvm->arch.dbf);
+ cleanup_srcu_struct(&kvm->srcu);
kfree(kvm);
}
@@ -543,7 +545,7 @@ rerun_vcpu:
rc = -EINTR;
}
- if (rc == -ENOTSUPP) {
+ if (rc == -EOPNOTSUPP) {
/* intercept cannot be handled in-kernel, prepare kvm-run */
kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
@@ -603,45 +605,45 @@ int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
} else
prefix = 0;
- if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, fp_regs),
+ if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
vcpu->arch.guest_fpregs.fprs, 128, prefix))
return -EFAULT;
- if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, gp_regs),
+ if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
vcpu->arch.guest_gprs, 128, prefix))
return -EFAULT;
- if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, psw),
+ if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
&vcpu->arch.sie_block->gpsw, 16, prefix))
return -EFAULT;
- if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, pref_reg),
+ if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
&vcpu->arch.sie_block->prefix, 4, prefix))
return -EFAULT;
if (__guestcopy(vcpu,
- addr + offsetof(struct save_area_s390x, fp_ctrl_reg),
+ addr + offsetof(struct save_area, fp_ctrl_reg),
&vcpu->arch.guest_fpregs.fpc, 4, prefix))
return -EFAULT;
- if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, tod_reg),
+ if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
&vcpu->arch.sie_block->todpr, 4, prefix))
return -EFAULT;
- if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, timer),
+ if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
&vcpu->arch.sie_block->cputm, 8, prefix))
return -EFAULT;
- if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, clk_cmp),
+ if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
&vcpu->arch.sie_block->ckc, 8, prefix))
return -EFAULT;
- if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, acc_regs),
+ if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
&vcpu->arch.guest_acrs, 64, prefix))
return -EFAULT;
if (__guestcopy(vcpu,
- addr + offsetof(struct save_area_s390x, ctrl_regs),
+ addr + offsetof(struct save_area, ctrl_regs),
&vcpu->arch.sie_block->gcr, 128, prefix))
return -EFAULT;
return 0;
@@ -689,14 +691,12 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
}
/* Section: memory related */
-int kvm_arch_set_memory_region(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem,
- struct kvm_memory_slot old,
- int user_alloc)
+int kvm_arch_prepare_memory_region(struct kvm *kvm,
+ struct kvm_memory_slot *memslot,
+ struct kvm_memory_slot old,
+ struct kvm_userspace_memory_region *mem,
+ int user_alloc)
{
- int i;
- struct kvm_vcpu *vcpu;
-
/* A few sanity checks. We can have exactly one memory slot which has
to start at guest virtual zero and which has to be located at a
page boundary in userland and which has to end at a page boundary.
@@ -719,14 +719,23 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
if (!user_alloc)
return -EINVAL;
+ return 0;
+}
+
+void kvm_arch_commit_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem,
+ struct kvm_memory_slot old,
+ int user_alloc)
+{
+ int i;
+ struct kvm_vcpu *vcpu;
+
/* request update of sie control block for all available vcpus */
kvm_for_each_vcpu(i, vcpu, kvm) {
if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
continue;
kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP);
}
-
- return 0;
}
void kvm_arch_flush_shadow(struct kvm *kvm)