summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Durrant <pdurrant@amazon.com>2024-02-15 15:28:58 +0000
committerSean Christopherson <seanjc@google.com>2024-02-20 07:37:41 -0800
commit4438355ec6e165f73357203e0792079dec9e3059 (patch)
tree2e903ae58b7636480b70ca6d9e83a4ccdf5929cf
parent41496fffc0e1276e1c41344ef38513d5353da3ca (diff)
downloadlinux-stable-4438355ec6e165f73357203e0792079dec9e3059.tar.gz
linux-stable-4438355ec6e165f73357203e0792079dec9e3059.tar.bz2
linux-stable-4438355ec6e165f73357203e0792079dec9e3059.zip
KVM: x86/xen: mark guest pages dirty with the pfncache lock held
Sampling gpa and memslot from an unlocked pfncache may yield inconsistent values so, since there is no problem with calling mark_page_dirty_in_slot() with the pfncache lock held, relocate the calls in kvm_xen_update_runstate_guest() and kvm_xen_inject_pending_events() accordingly. Signed-off-by: Paul Durrant <pdurrant@amazon.com> Reviewed-by: David Woodhouse <dwmw@amazon.co.uk> Link: https://lore.kernel.org/r/20240215152916.1158-4-paul@xen.org Signed-off-by: Sean Christopherson <seanjc@google.com>
-rw-r--r--arch/x86/kvm/xen.c13
1 files changed, 6 insertions, 7 deletions
diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c
index 4b4e738c6f1b..f3327508ae41 100644
--- a/arch/x86/kvm/xen.c
+++ b/arch/x86/kvm/xen.c
@@ -452,14 +452,13 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
smp_wmb();
}
- if (user_len2)
+ if (user_len2) {
+ mark_page_dirty_in_slot(v->kvm, gpc2->memslot, gpc2->gpa >> PAGE_SHIFT);
read_unlock(&gpc2->lock);
-
- read_unlock_irqrestore(&gpc1->lock, flags);
+ }
mark_page_dirty_in_slot(v->kvm, gpc1->memslot, gpc1->gpa >> PAGE_SHIFT);
- if (user_len2)
- mark_page_dirty_in_slot(v->kvm, gpc2->memslot, gpc2->gpa >> PAGE_SHIFT);
+ read_unlock_irqrestore(&gpc1->lock, flags);
}
void kvm_xen_update_runstate(struct kvm_vcpu *v, int state)
@@ -565,13 +564,13 @@ void kvm_xen_inject_pending_events(struct kvm_vcpu *v)
: "0" (evtchn_pending_sel32));
WRITE_ONCE(vi->evtchn_upcall_pending, 1);
}
+
+ mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
read_unlock_irqrestore(&gpc->lock, flags);
/* For the per-vCPU lapic vector, deliver it as MSI. */
if (v->arch.xen.upcall_vector)
kvm_xen_inject_vcpu_vector(v);
-
- mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
}
int __kvm_xen_has_interrupt(struct kvm_vcpu *v)