diff options
author | Matt Delco <delco@chromium.org> | 2019-09-16 14:16:54 -0700 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2019-09-18 15:56:55 +0200 |
commit | b60fe990c6b07ef6d4df67bc0530c7c90a62623a (patch) | |
tree | 49d837232351e9b86c9632ccc509f26596e269ed /virt | |
parent | a9c20bb0206ae9384bd470a6832dd8913730add9 (diff) | |
download | linux-stable-b60fe990c6b07ef6d4df67bc0530c7c90a62623a.tar.gz linux-stable-b60fe990c6b07ef6d4df67bc0530c7c90a62623a.tar.bz2 linux-stable-b60fe990c6b07ef6d4df67bc0530c7c90a62623a.zip |
KVM: coalesced_mmio: add bounds checking
The first/last indexes are typically shared with a user app.
The app can change the 'last' index that the kernel uses
to store the next result. This change sanity checks the index
before using it for writing to a potentially arbitrary address.
This fixes CVE-2019-14821.
Cc: stable@vger.kernel.org
Fixes: 5f94c1741bdc ("KVM: Add coalesced MMIO support (common part)")
Signed-off-by: Matt Delco <delco@chromium.org>
Signed-off-by: Jim Mattson <jmattson@google.com>
Reported-by: syzbot+983c866c3dd6efa3662a@syzkaller.appspotmail.com
[Use READ_ONCE. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r-- | virt/kvm/coalesced_mmio.c | 19 |
1 files changed, 11 insertions, 8 deletions
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c index 5294abb3f178..8ffd07e2a160 100644 --- a/virt/kvm/coalesced_mmio.c +++ b/virt/kvm/coalesced_mmio.c @@ -40,7 +40,7 @@ static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev, return 1; } -static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev) +static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev, u32 last) { struct kvm_coalesced_mmio_ring *ring; unsigned avail; @@ -52,7 +52,7 @@ static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev) * there is always one unused entry in the buffer */ ring = dev->kvm->coalesced_mmio_ring; - avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX; + avail = (ring->first - last - 1) % KVM_COALESCED_MMIO_MAX; if (avail == 0) { /* full */ return 0; @@ -67,25 +67,28 @@ static int coalesced_mmio_write(struct kvm_vcpu *vcpu, { struct kvm_coalesced_mmio_dev *dev = to_mmio(this); struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring; + __u32 insert; if (!coalesced_mmio_in_range(dev, addr, len)) return -EOPNOTSUPP; spin_lock(&dev->kvm->ring_lock); - if (!coalesced_mmio_has_room(dev)) { + insert = READ_ONCE(ring->last); + if (!coalesced_mmio_has_room(dev, insert) || + insert >= KVM_COALESCED_MMIO_MAX) { spin_unlock(&dev->kvm->ring_lock); return -EOPNOTSUPP; } /* copy data in first free entry of the ring */ - ring->coalesced_mmio[ring->last].phys_addr = addr; - ring->coalesced_mmio[ring->last].len = len; - memcpy(ring->coalesced_mmio[ring->last].data, val, len); - ring->coalesced_mmio[ring->last].pio = dev->zone.pio; + ring->coalesced_mmio[insert].phys_addr = addr; + ring->coalesced_mmio[insert].len = len; + memcpy(ring->coalesced_mmio[insert].data, val, len); + ring->coalesced_mmio[insert].pio = dev->zone.pio; smp_wmb(); - ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX; + ring->last = (insert + 1) % KVM_COALESCED_MMIO_MAX; spin_unlock(&dev->kvm->ring_lock); return 0; } |