summaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-01-06 17:05:05 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2018-01-06 17:05:05 -0800
commit5b6c02f38315b720c593c6079364855d276886aa (patch)
treeb1bf0e7739f563642a9f4a4e567c14b99824aeb0 /arch/s390
parent3219e264b984ec0a13923aa66385819c2e98d582 (diff)
parentbb4945e60dd0b5afb0e92bc8006ce560948fbc39 (diff)
downloadlinux-stable-5b6c02f38315b720c593c6079364855d276886aa.tar.gz
linux-stable-5b6c02f38315b720c593c6079364855d276886aa.tar.bz2
linux-stable-5b6c02f38315b720c593c6079364855d276886aa.zip
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM fixes from Radim Krčmář: "s390: - Two fixes for potential bitmap overruns in the cmma migration code x86: - Clear guest provided GPRs to defeat the Project Zero PoC for CVE 2017-5715" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: kvm: vmx: Scrub hardware GPRs at VM-exit KVM: s390: prevent buffer overrun on memory hotplug during migration KVM: s390: fix cmma migration for multiple memory slots
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/kvm/kvm-s390.c9
-rw-r--r--arch/s390/kvm/priv.c2
2 files changed, 6 insertions, 5 deletions
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index ec8b68e97d3c..2c93cbbcd15e 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -792,11 +792,12 @@ static int kvm_s390_vm_start_migration(struct kvm *kvm)
if (kvm->arch.use_cmma) {
/*
- * Get the last slot. They should be sorted by base_gfn, so the
- * last slot is also the one at the end of the address space.
- * We have verified above that at least one slot is present.
+ * Get the first slot. They are reverse sorted by base_gfn, so
+ * the first slot is also the one at the end of the address
+ * space. We have verified above that at least one slot is
+ * present.
*/
- ms = slots->memslots + slots->used_slots - 1;
+ ms = slots->memslots;
/* round up so we only use full longs */
ram_pages = roundup(ms->base_gfn + ms->npages, BITS_PER_LONG);
/* allocate enough bytes to store all the bits */
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 572496c688cc..0714bfa56da0 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -1006,7 +1006,7 @@ static inline int do_essa(struct kvm_vcpu *vcpu, const int orc)
cbrlo[entries] = gfn << PAGE_SHIFT;
}
- if (orc) {
+ if (orc && gfn < ms->bitmap_size) {
/* increment only if we are really flipping the bit to 1 */
if (!test_and_set_bit(gfn, ms->pgste_bitmap))
atomic64_inc(&ms->dirty_pages);