summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2015-11-02 10:42:36 +0100
committerPaolo Bonzini <pbonzini@redhat.com>2015-11-02 10:42:36 +0100
commit4d5140c5799e676f5a8fb805105e8806f2db1902 (patch)
treeb09eab65be4a658df3d1ad8543d7f7eed7cfca23
parent8c85ac1c0a1b41299370857765bc0950666ed5d9 (diff)
parent46b708ea875f14f5496109df053624199f3aae87 (diff)
downloadlinux-stable-4d5140c5799e676f5a8fb805105e8806f2db1902.tar.gz
linux-stable-4d5140c5799e676f5a8fb805105e8806f2db1902.tar.bz2
linux-stable-4d5140c5799e676f5a8fb805105e8806f2db1902.zip
Merge tag 'kvm-s390-next-20151028' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD
KVM: s390: Bugfix and cleanups There is one important bug fix for a potential memory corruption and/or guest errors for guests with 63 or 64 vCPUs. This fix would qualify for 4.3 but is some days too late giving that we are about to release 4.3. Given that this patch is cc stable >= 3.15 anyway, we can handle it via 4.4. merge window. This pull request also contains two cleanups.
-rw-r--r--arch/s390/kvm/intercept.c42
-rw-r--r--arch/s390/kvm/kvm-s390.c12
2 files changed, 28 insertions, 26 deletions
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 7365e8a46032..b4a5aa110cec 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -336,28 +336,28 @@ static int handle_partial_execution(struct kvm_vcpu *vcpu)
return -EOPNOTSUPP;
}
-static const intercept_handler_t intercept_funcs[] = {
- [0x00 >> 2] = handle_noop,
- [0x04 >> 2] = handle_instruction,
- [0x08 >> 2] = handle_prog,
- [0x10 >> 2] = handle_noop,
- [0x14 >> 2] = handle_external_interrupt,
- [0x18 >> 2] = handle_noop,
- [0x1C >> 2] = kvm_s390_handle_wait,
- [0x20 >> 2] = handle_validity,
- [0x28 >> 2] = handle_stop,
- [0x38 >> 2] = handle_partial_execution,
-};
-
int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
{
- intercept_handler_t func;
- u8 code = vcpu->arch.sie_block->icptcode;
-
- if (code & 3 || (code >> 2) >= ARRAY_SIZE(intercept_funcs))
+ switch (vcpu->arch.sie_block->icptcode) {
+ case 0x00:
+ case 0x10:
+ case 0x18:
+ return handle_noop(vcpu);
+ case 0x04:
+ return handle_instruction(vcpu);
+ case 0x08:
+ return handle_prog(vcpu);
+ case 0x14:
+ return handle_external_interrupt(vcpu);
+ case 0x1c:
+ return kvm_s390_handle_wait(vcpu);
+ case 0x20:
+ return handle_validity(vcpu);
+ case 0x28:
+ return handle_stop(vcpu);
+ case 0x38:
+ return handle_partial_execution(vcpu);
+ default:
return -EOPNOTSUPP;
- func = intercept_funcs[code >> 2];
- if (func)
- return func(vcpu);
- return -EOPNOTSUPP;
+ }
}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 618c85411a51..07a6aa896c12 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -514,7 +514,7 @@ static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
if (gtod_high != 0)
return -EINVAL;
- VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x\n", gtod_high);
+ VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
return 0;
}
@@ -527,7 +527,7 @@ static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
return -EFAULT;
kvm_s390_set_tod_clock(kvm, gtod);
- VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx\n", gtod);
+ VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
return 0;
}
@@ -559,7 +559,7 @@ static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
if (copy_to_user((void __user *)attr->addr, &gtod_high,
sizeof(gtod_high)))
return -EFAULT;
- VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x\n", gtod_high);
+ VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
return 0;
}
@@ -571,7 +571,7 @@ static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
gtod = kvm_s390_get_tod_clock_fast(kvm);
if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
return -EFAULT;
- VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx\n", gtod);
+ VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
return 0;
}
@@ -1098,7 +1098,9 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
if (!kvm->arch.sca)
goto out_err;
spin_lock(&kvm_lock);
- sca_offset = (sca_offset + 16) & 0x7f0;
+ sca_offset += 16;
+ if (sca_offset + sizeof(struct sca_block) > PAGE_SIZE)
+ sca_offset = 0;
kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
spin_unlock(&kvm_lock);