summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Woodhouse <dwmw@amazon.co.uk>2021-12-10 16:36:20 +0000
committerPaolo Bonzini <pbonzini@redhat.com>2022-01-07 10:44:44 -0500
commit2efd61a608b0039911924d2e5d7028eb37496e85 (patch)
tree26f889c3710b9b263c0f2c748a6882ddc4e41971
parentf3f26dae05e39f0f286f588669b54d49b61dcfb8 (diff)
downloadlinux-stable-2efd61a608b0039911924d2e5d7028eb37496e85.tar.gz
linux-stable-2efd61a608b0039911924d2e5d7028eb37496e85.tar.bz2
linux-stable-2efd61a608b0039911924d2e5d7028eb37496e85.zip
KVM: Warn if mark_page_dirty() is called without an active vCPU
The various kvm_write_guest() and mark_page_dirty() functions must only ever be called in the context of an active vCPU, because if dirty ring tracking is enabled it may simply oops when kvm_get_running_vcpu() returns NULL for the vcpu and then kvm_dirty_ring_get() dereferences it. This oops was reported by "butt3rflyh4ck" <butterflyhuangxx@gmail.com> in https://lore.kernel.org/kvm/CAFcO6XOmoS7EacN_n6v4Txk7xL7iqRa2gABg3F7E3Naf5uG94g@mail.gmail.com/ That actual bug will be fixed under separate cover but this warning should help to prevent new ones from being added. Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> Message-Id: <20211210163625.2886-2-dwmw2@infradead.org> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--include/linux/kvm_dirty_ring.h6
-rw-r--r--virt/kvm/dirty_ring.c9
-rw-r--r--virt/kvm/kvm_main.c7
3 files changed, 6 insertions, 16 deletions
diff --git a/include/linux/kvm_dirty_ring.h b/include/linux/kvm_dirty_ring.h
index 4da8d4a4140b..906f899813dc 100644
--- a/include/linux/kvm_dirty_ring.h
+++ b/include/linux/kvm_dirty_ring.h
@@ -43,11 +43,6 @@ static inline int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring,
return 0;
}
-static inline struct kvm_dirty_ring *kvm_dirty_ring_get(struct kvm *kvm)
-{
- return NULL;
-}
-
static inline int kvm_dirty_ring_reset(struct kvm *kvm,
struct kvm_dirty_ring *ring)
{
@@ -78,7 +73,6 @@ static inline bool kvm_dirty_ring_soft_full(struct kvm_dirty_ring *ring)
u32 kvm_dirty_ring_get_rsvd_entries(void);
int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring, int index, u32 size);
-struct kvm_dirty_ring *kvm_dirty_ring_get(struct kvm *kvm);
/*
* called with kvm->slots_lock held, returns the number of
diff --git a/virt/kvm/dirty_ring.c b/virt/kvm/dirty_ring.c
index 88f4683198ea..8e9874760fb3 100644
--- a/virt/kvm/dirty_ring.c
+++ b/virt/kvm/dirty_ring.c
@@ -36,15 +36,6 @@ static bool kvm_dirty_ring_full(struct kvm_dirty_ring *ring)
return kvm_dirty_ring_used(ring) >= ring->size;
}
-struct kvm_dirty_ring *kvm_dirty_ring_get(struct kvm *kvm)
-{
- struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
-
- WARN_ON_ONCE(vcpu->kvm != kvm);
-
- return &vcpu->dirty_ring;
-}
-
static void kvm_reset_dirty_gfn(struct kvm *kvm, u32 slot, u64 offset, u64 mask)
{
struct kvm_memory_slot *memslot;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index b0f7e6eb00ff..af5b4427b139 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -3155,12 +3155,17 @@ void mark_page_dirty_in_slot(struct kvm *kvm,
const struct kvm_memory_slot *memslot,
gfn_t gfn)
{
+ struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
+
+ if (WARN_ON_ONCE(!vcpu) || WARN_ON_ONCE(vcpu->kvm != kvm))
+ return;
+
if (memslot && kvm_slot_dirty_track_enabled(memslot)) {
unsigned long rel_gfn = gfn - memslot->base_gfn;
u32 slot = (memslot->as_id << 16) | memslot->id;
if (kvm->dirty_ring_size)
- kvm_dirty_ring_push(kvm_dirty_ring_get(kvm),
+ kvm_dirty_ring_push(&vcpu->dirty_ring,
slot, rel_gfn);
else
set_bit_le(rel_gfn, memslot->dirty_bitmap);