diff options
author | Matt Delco <delco@chromium.org> | 2019-09-16 14:16:54 -0700 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2019-09-21 07:12:47 +0200 |
commit | ae41539657ce0a4e9f4588e89e5e19a8b8f11928 (patch) | |
tree | a0f77a1180d0b6e714c5690c9eee4829f87f6687 /virt | |
parent | ac591429f94c43113dee9cc42ff69305a43365eb (diff) | |
download | linux-stable-ae41539657ce0a4e9f4588e89e5e19a8b8f11928.tar.gz linux-stable-ae41539657ce0a4e9f4588e89e5e19a8b8f11928.tar.bz2 linux-stable-ae41539657ce0a4e9f4588e89e5e19a8b8f11928.zip |
KVM: coalesced_mmio: add bounds checking
commit b60fe990c6b07ef6d4df67bc0530c7c90a62623a upstream.
The first/last indexes are typically shared with a user app.
The app can change the 'last' index that the kernel uses
to store the next result. This change sanity checks the index
before using it for writing to a potentially arbitrary address.
This fixes CVE-2019-14821.
Cc: stable@vger.kernel.org
Fixes: 5f94c1741bdc ("KVM: Add coalesced MMIO support (common part)")
Signed-off-by: Matt Delco <delco@chromium.org>
Signed-off-by: Jim Mattson <jmattson@google.com>
Reported-by: syzbot+983c866c3dd6efa3662a@syzkaller.appspotmail.com
[Use READ_ONCE. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'virt')
-rw-r--r-- | virt/kvm/coalesced_mmio.c | 17 |
1 files changed, 10 insertions, 7 deletions
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c index 571c1ce37d15..5c1efb869df2 100644 --- a/virt/kvm/coalesced_mmio.c +++ b/virt/kvm/coalesced_mmio.c @@ -39,7 +39,7 @@ static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev, return 1; } -static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev) +static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev, u32 last) { struct kvm_coalesced_mmio_ring *ring; unsigned avail; @@ -51,7 +51,7 @@ static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev) * there is always one unused entry in the buffer */ ring = dev->kvm->coalesced_mmio_ring; - avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX; + avail = (ring->first - last - 1) % KVM_COALESCED_MMIO_MAX; if (avail == 0) { /* full */ return 0; @@ -66,24 +66,27 @@ static int coalesced_mmio_write(struct kvm_vcpu *vcpu, { struct kvm_coalesced_mmio_dev *dev = to_mmio(this); struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring; + __u32 insert; if (!coalesced_mmio_in_range(dev, addr, len)) return -EOPNOTSUPP; spin_lock(&dev->kvm->ring_lock); - if (!coalesced_mmio_has_room(dev)) { + insert = READ_ONCE(ring->last); + if (!coalesced_mmio_has_room(dev, insert) || + insert >= KVM_COALESCED_MMIO_MAX) { spin_unlock(&dev->kvm->ring_lock); return -EOPNOTSUPP; } /* copy data in first free entry of the ring */ - ring->coalesced_mmio[ring->last].phys_addr = addr; - ring->coalesced_mmio[ring->last].len = len; - memcpy(ring->coalesced_mmio[ring->last].data, val, len); + ring->coalesced_mmio[insert].phys_addr = addr; + ring->coalesced_mmio[insert].len = len; + memcpy(ring->coalesced_mmio[insert].data, val, len); smp_wmb(); - ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX; + ring->last = (insert + 1) % KVM_COALESCED_MMIO_MAX; spin_unlock(&dev->kvm->ring_lock); return 0; } |