summaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorPaul Durrant <pdurrant@amazon.com>2024-02-15 15:29:14 +0000
committerSean Christopherson <seanjc@google.com>2024-02-22 07:01:20 -0800
commit9fa336e343b2c7f6bad6fd5fa2e1cf55e80d3ed1 (patch)
treefedea5f91c18f801ec7297d42f2d60e8e0ce92ce /virt
parent615451d8cb3f82265e0ed60414b606b4fa120f5e (diff)
downloadlinux-stable-9fa336e343b2c7f6bad6fd5fa2e1cf55e80d3ed1.tar.gz
linux-stable-9fa336e343b2c7f6bad6fd5fa2e1cf55e80d3ed1.tar.bz2
linux-stable-9fa336e343b2c7f6bad6fd5fa2e1cf55e80d3ed1.zip
KVM: pfncache: check the need for invalidation under read lock first
When processing mmu_notifier invalidations for gpc caches, pre-check for overlap with the invalidation event while holding gpc->lock for read, and only take gpc->lock for write if the cache needs to be invalidated. Doing a pre-check without taking gpc->lock for write avoids unnecessarily contending the lock for unrelated invalidations, which is very beneficial for caches that are heavily used (but rarely subjected to mmu_notifier invalidations). Signed-off-by: Paul Durrant <pdurrant@amazon.com> Reviewed-by: David Woodhouse <dwmw@amazon.co.uk> Link: https://lore.kernel.org/r/20240215152916.1158-20-paul@xen.org Signed-off-by: Sean Christopherson <seanjc@google.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/pfncache.c22
1 files changed, 19 insertions, 3 deletions
diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c
index a47ca6fd75c2..9ac8c9da4eda 100644
--- a/virt/kvm/pfncache.c
+++ b/virt/kvm/pfncache.c
@@ -29,14 +29,30 @@ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
spin_lock(&kvm->gpc_lock);
list_for_each_entry(gpc, &kvm->gpc_list, list) {
- write_lock_irq(&gpc->lock);
+ read_lock_irq(&gpc->lock);
/* Only a single page so no need to care about length */
if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) &&
gpc->uhva >= start && gpc->uhva < end) {
- gpc->valid = false;
+ read_unlock_irq(&gpc->lock);
+
+ /*
+ * There is a small window here where the cache could
+ * be modified, and invalidation would no longer be
+ * necessary. Hence check again whether invalidation
+ * is still necessary once the write lock has been
+ * acquired.
+ */
+
+ write_lock_irq(&gpc->lock);
+ if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) &&
+ gpc->uhva >= start && gpc->uhva < end)
+ gpc->valid = false;
+ write_unlock_irq(&gpc->lock);
+ continue;
}
- write_unlock_irq(&gpc->lock);
+
+ read_unlock_irq(&gpc->lock);
}
spin_unlock(&kvm->gpc_lock);
}