From f39b80e3ff12ced77dddcacd28b4af878cac1433 Mon Sep 17 00:00:00 2001 From: Paul Durrant Date: Thu, 15 Feb 2024 15:28:56 +0000 Subject: KVM: pfncache: Add a map helper function There is a pfncache unmap helper but mapping is open-coded. Arguably this is fine because mapping is done in only one place, hva_to_pfn_retry(), but adding the helper does make that function more readable. No functional change intended. Signed-off-by: Paul Durrant Reviewed-by: David Woodhouse Link: https://lore.kernel.org/r/20240215152916.1158-2-paul@xen.org Signed-off-by: Sean Christopherson --- virt/kvm/pfncache.c | 47 +++++++++++++++++++++++++++++------------------ 1 file changed, 29 insertions(+), 18 deletions(-) (limited to 'virt') diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c index 2d6aba677830..10842f1eeeae 100644 --- a/virt/kvm/pfncache.c +++ b/virt/kvm/pfncache.c @@ -96,17 +96,32 @@ bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len) } EXPORT_SYMBOL_GPL(kvm_gpc_check); -static void gpc_unmap_khva(kvm_pfn_t pfn, void *khva) +static void *gpc_map(kvm_pfn_t pfn) { - /* Unmap the old pfn/page if it was mapped before. */ - if (!is_error_noslot_pfn(pfn) && khva) { - if (pfn_valid(pfn)) - kunmap(pfn_to_page(pfn)); + if (pfn_valid(pfn)) + return kmap(pfn_to_page(pfn)); + #ifdef CONFIG_HAS_IOMEM - else - memunmap(khva); + return memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB); +#else + return NULL; #endif +} + +static void gpc_unmap(kvm_pfn_t pfn, void *khva) +{ + /* Unmap the old pfn/page if it was mapped before. */ + if (is_error_noslot_pfn(pfn) || !khva) + return; + + if (pfn_valid(pfn)) { + kunmap(pfn_to_page(pfn)); + return; } + +#ifdef CONFIG_HAS_IOMEM + memunmap(khva); +#endif } static inline bool mmu_notifier_retry_cache(struct kvm *kvm, unsigned long mmu_seq) @@ -175,7 +190,7 @@ static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc) * the existing mapping and didn't create a new one. */ if (new_khva != old_khva) - gpc_unmap_khva(new_pfn, new_khva); + gpc_unmap(new_pfn, new_khva); kvm_release_pfn_clean(new_pfn); @@ -193,15 +208,11 @@ static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc) * too must be done outside of gpc->lock! */ if (gpc->usage & KVM_HOST_USES_PFN) { - if (new_pfn == gpc->pfn) { + if (new_pfn == gpc->pfn) new_khva = old_khva; - } else if (pfn_valid(new_pfn)) { - new_khva = kmap(pfn_to_page(new_pfn)); -#ifdef CONFIG_HAS_IOMEM - } else { - new_khva = memremap(pfn_to_hpa(new_pfn), PAGE_SIZE, MEMREMAP_WB); -#endif - } + else + new_khva = gpc_map(new_pfn); + if (!new_khva) { kvm_release_pfn_clean(new_pfn); goto out_error; @@ -326,7 +337,7 @@ out_unlock: mutex_unlock(&gpc->refresh_lock); if (unmap_old) - gpc_unmap_khva(old_pfn, old_khva); + gpc_unmap(old_pfn, old_khva); return ret; } @@ -412,7 +423,7 @@ void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc) list_del(&gpc->list); spin_unlock(&kvm->gpc_lock); - gpc_unmap_khva(old_pfn, old_khva); + gpc_unmap(old_pfn, old_khva); } } EXPORT_SYMBOL_GPL(kvm_gpc_deactivate); -- cgit v1.2.3 From 41496fffc0e1276e1c41344ef38513d5353da3ca Mon Sep 17 00:00:00 2001 From: Paul Durrant Date: Thu, 15 Feb 2024 15:28:57 +0000 Subject: KVM: pfncache: remove unnecessary exports There is no need for the existing kvm_gpc_XXX() functions to be exported. Clean up now before additional functions are added in subsequent patches. Signed-off-by: Paul Durrant Reviewed-by: David Woodhouse Link: https://lore.kernel.org/r/20240215152916.1158-3-paul@xen.org Signed-off-by: Sean Christopherson --- virt/kvm/pfncache.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'virt') diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c index 10842f1eeeae..f3571f44d9af 100644 --- a/virt/kvm/pfncache.c +++ b/virt/kvm/pfncache.c @@ -94,7 +94,6 @@ bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len) return true; } -EXPORT_SYMBOL_GPL(kvm_gpc_check); static void *gpc_map(kvm_pfn_t pfn) { @@ -346,7 +345,6 @@ int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len) { return __kvm_gpc_refresh(gpc, gpc->gpa, len); } -EXPORT_SYMBOL_GPL(kvm_gpc_refresh); void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm, struct kvm_vcpu *vcpu, enum pfn_cache_usage usage) @@ -363,7 +361,6 @@ void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm, gpc->pfn = KVM_PFN_ERR_FAULT; gpc->uhva = KVM_HVA_ERR_BAD; } -EXPORT_SYMBOL_GPL(kvm_gpc_init); int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len) { @@ -388,7 +385,6 @@ int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len) } return __kvm_gpc_refresh(gpc, gpa, len); } -EXPORT_SYMBOL_GPL(kvm_gpc_activate); void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc) { @@ -426,4 +422,3 @@ void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc) gpc_unmap(old_pfn, old_khva); } } -EXPORT_SYMBOL_GPL(kvm_gpc_deactivate); -- cgit v1.2.3 From a4bff3df51472f555ab8dea05a3d2faf4abbf199 Mon Sep 17 00:00:00 2001 From: Paul Durrant Date: Thu, 15 Feb 2024 15:29:00 +0000 Subject: KVM: pfncache: remove KVM_GUEST_USES_PFN usage As noted in [1] the KVM_GUEST_USES_PFN usage flag is never set by any callers of kvm_gpc_init(), and for good reason: the implementation is incomplete/broken. And it's not clear that there will ever be a user of KVM_GUEST_USES_PFN, as coordinating vCPUs with mmu_notifier events is non-trivial. Remove KVM_GUEST_USES_PFN and all related code, e.g. dropping KVM_GUEST_USES_PFN also makes the 'vcpu' argument redundant, to avoid having to reason about broken code as __kvm_gpc_refresh() evolves. Moreover, all existing callers specify KVM_HOST_USES_PFN so the usage check in hva_to_pfn_retry() and hence the 'usage' argument to kvm_gpc_init() are also redundant. [1] https://lore.kernel.org/all/ZQiR8IpqOZrOpzHC@google.com Signed-off-by: Paul Durrant Reviewed-by: David Woodhouse Link: https://lore.kernel.org/r/20240215152916.1158-6-paul@xen.org [sean: explicitly call out that guest usage is incomplete] Signed-off-by: Sean Christopherson --- virt/kvm/pfncache.c | 61 ++++++++--------------------------------------------- 1 file changed, 9 insertions(+), 52 deletions(-) (limited to 'virt') diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c index f3571f44d9af..6f4b537eb25b 100644 --- a/virt/kvm/pfncache.c +++ b/virt/kvm/pfncache.c @@ -25,9 +25,7 @@ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start, unsigned long end, bool may_block) { - DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS); struct gfn_to_pfn_cache *gpc; - bool evict_vcpus = false; spin_lock(&kvm->gpc_lock); list_for_each_entry(gpc, &kvm->gpc_list, list) { @@ -37,43 +35,10 @@ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start, if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) && gpc->uhva >= start && gpc->uhva < end) { gpc->valid = false; - - /* - * If a guest vCPU could be using the physical address, - * it needs to be forced out of guest mode. - */ - if (gpc->usage & KVM_GUEST_USES_PFN) { - if (!evict_vcpus) { - evict_vcpus = true; - bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS); - } - __set_bit(gpc->vcpu->vcpu_idx, vcpu_bitmap); - } } write_unlock_irq(&gpc->lock); } spin_unlock(&kvm->gpc_lock); - - if (evict_vcpus) { - /* - * KVM needs to ensure the vCPU is fully out of guest context - * before allowing the invalidation to continue. - */ - unsigned int req = KVM_REQ_OUTSIDE_GUEST_MODE; - bool called; - - /* - * If the OOM reaper is active, then all vCPUs should have - * been stopped already, so perform the request without - * KVM_REQUEST_WAIT and be sad if any needed to be IPI'd. - */ - if (!may_block) - req &= ~KVM_REQUEST_WAIT; - - called = kvm_make_vcpus_request_mask(kvm, req, vcpu_bitmap); - - WARN_ON_ONCE(called && !may_block); - } } bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len) @@ -206,16 +171,14 @@ static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc) * pfn. Note, kmap() and memremap() can both sleep, so this * too must be done outside of gpc->lock! */ - if (gpc->usage & KVM_HOST_USES_PFN) { - if (new_pfn == gpc->pfn) - new_khva = old_khva; - else - new_khva = gpc_map(new_pfn); - - if (!new_khva) { - kvm_release_pfn_clean(new_pfn); - goto out_error; - } + if (new_pfn == gpc->pfn) + new_khva = old_khva; + else + new_khva = gpc_map(new_pfn); + + if (!new_khva) { + kvm_release_pfn_clean(new_pfn); + goto out_error; } write_lock_irq(&gpc->lock); @@ -346,18 +309,12 @@ int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len) return __kvm_gpc_refresh(gpc, gpc->gpa, len); } -void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm, - struct kvm_vcpu *vcpu, enum pfn_cache_usage usage) +void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm) { - WARN_ON_ONCE(!usage || (usage & KVM_GUEST_AND_HOST_USE_PFN) != usage); - WARN_ON_ONCE((usage & KVM_GUEST_USES_PFN) && !vcpu); - rwlock_init(&gpc->lock); mutex_init(&gpc->refresh_lock); gpc->kvm = kvm; - gpc->vcpu = vcpu; - gpc->usage = usage; gpc->pfn = KVM_PFN_ERR_FAULT; gpc->uhva = KVM_HVA_ERR_BAD; } -- cgit v1.2.3 From 53e63e953e14a66b7d0d7e9c616cd602f3da9291 Mon Sep 17 00:00:00 2001 From: Paul Durrant Date: Thu, 15 Feb 2024 15:29:01 +0000 Subject: KVM: pfncache: stop open-coding offset_in_page() Some code in pfncache uses offset_in_page() but in other places it is open- coded. Use offset_in_page() consistently everywhere. Signed-off-by: Paul Durrant Reviewed-by: David Woodhouse Link: https://lore.kernel.org/r/20240215152916.1158-7-paul@xen.org Signed-off-by: Sean Christopherson --- virt/kvm/pfncache.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'virt') diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c index 6f4b537eb25b..0eeb034d0674 100644 --- a/virt/kvm/pfncache.c +++ b/virt/kvm/pfncache.c @@ -48,7 +48,7 @@ bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len) if (!gpc->active) return false; - if ((gpc->gpa & ~PAGE_MASK) + len > PAGE_SIZE) + if (offset_in_page(gpc->gpa) + len > PAGE_SIZE) return false; if (gpc->generation != slots->generation || kvm_is_error_hva(gpc->uhva)) @@ -192,7 +192,7 @@ static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc) gpc->valid = true; gpc->pfn = new_pfn; - gpc->khva = new_khva + (gpc->gpa & ~PAGE_MASK); + gpc->khva = new_khva + offset_in_page(gpc->gpa); /* * Put the reference to the _new_ pfn. The pfn is now tracked by the @@ -213,7 +213,7 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len) { struct kvm_memslots *slots = kvm_memslots(gpc->kvm); - unsigned long page_offset = gpa & ~PAGE_MASK; + unsigned long page_offset = offset_in_page(gpa); bool unmap_old = false; unsigned long old_uhva; kvm_pfn_t old_pfn; -- cgit v1.2.3 From 406c10962a4cbb1e04aa7993ea466db1a4533ae4 Mon Sep 17 00:00:00 2001 From: Paul Durrant Date: Thu, 15 Feb 2024 15:29:02 +0000 Subject: KVM: pfncache: include page offset in uhva and use it consistently Currently the pfncache page offset is sometimes determined using the gpa and sometimes the khva, whilst the uhva is always page-aligned. After a subsequent patch is applied the gpa will not always be valid so adjust the code to include the page offset in the uhva and use it consistently as the source of truth. Also, where a page-aligned address is required, use PAGE_ALIGN_DOWN() for clarity. No functional change intended. Signed-off-by: Paul Durrant Reviewed-by: David Woodhouse Link: https://lore.kernel.org/r/20240215152916.1158-8-paul@xen.org Signed-off-by: Sean Christopherson --- virt/kvm/pfncache.c | 29 +++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) (limited to 'virt') diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c index 0eeb034d0674..97eec8ee3449 100644 --- a/virt/kvm/pfncache.c +++ b/virt/kvm/pfncache.c @@ -48,10 +48,10 @@ bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len) if (!gpc->active) return false; - if (offset_in_page(gpc->gpa) + len > PAGE_SIZE) + if (gpc->generation != slots->generation || kvm_is_error_hva(gpc->uhva)) return false; - if (gpc->generation != slots->generation || kvm_is_error_hva(gpc->uhva)) + if (offset_in_page(gpc->uhva) + len > PAGE_SIZE) return false; if (!gpc->valid) @@ -119,7 +119,7 @@ static inline bool mmu_notifier_retry_cache(struct kvm *kvm, unsigned long mmu_s static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc) { /* Note, the new page offset may be different than the old! */ - void *old_khva = gpc->khva - offset_in_page(gpc->khva); + void *old_khva = (void *)PAGE_ALIGN_DOWN((uintptr_t)gpc->khva); kvm_pfn_t new_pfn = KVM_PFN_ERR_FAULT; void *new_khva = NULL; unsigned long mmu_seq; @@ -192,7 +192,7 @@ static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc) gpc->valid = true; gpc->pfn = new_pfn; - gpc->khva = new_khva + offset_in_page(gpc->gpa); + gpc->khva = new_khva + offset_in_page(gpc->uhva); /* * Put the reference to the _new_ pfn. The pfn is now tracked by the @@ -217,6 +217,7 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, bool unmap_old = false; unsigned long old_uhva; kvm_pfn_t old_pfn; + bool hva_change = false; void *old_khva; int ret; @@ -242,10 +243,10 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, } old_pfn = gpc->pfn; - old_khva = gpc->khva - offset_in_page(gpc->khva); - old_uhva = gpc->uhva; + old_khva = (void *)PAGE_ALIGN_DOWN((uintptr_t)gpc->khva); + old_uhva = PAGE_ALIGN_DOWN(gpc->uhva); - /* If the userspace HVA is invalid, refresh that first */ + /* Refresh the userspace HVA if necessary */ if (gpc->gpa != gpa || gpc->generation != slots->generation || kvm_is_error_hva(gpc->uhva)) { gfn_t gfn = gpa_to_gfn(gpa); @@ -259,13 +260,25 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, ret = -EFAULT; goto out; } + + /* + * Even if the GPA and/or the memslot generation changed, the + * HVA may still be the same. + */ + if (gpc->uhva != old_uhva) + hva_change = true; + } else { + gpc->uhva = old_uhva; } + /* Note: the offset must be correct before calling hva_to_pfn_retry() */ + gpc->uhva += page_offset; + /* * If the userspace HVA changed or the PFN was already invalid, * drop the lock and do the HVA to PFN lookup again. */ - if (!gpc->valid || old_uhva != gpc->uhva) { + if (!gpc->valid || hva_change) { ret = hva_to_pfn_retry(gpc); } else { /* -- cgit v1.2.3 From 721f5b0dda784829b833039fbb42f420b9f86575 Mon Sep 17 00:00:00 2001 From: Paul Durrant Date: Thu, 15 Feb 2024 15:29:04 +0000 Subject: KVM: pfncache: allow a cache to be activated with a fixed (userspace) HVA Some pfncache pages may actually be overlays on guest memory that have a fixed HVA within the VMM. It's pointless to invalidate such cached mappings if the overlay is moved so allow a cache to be activated directly with the HVA to cater for such cases. A subsequent patch will make use of this facility. Signed-off-by: Paul Durrant Reviewed-by: David Woodhouse Link: https://lore.kernel.org/r/20240215152916.1158-10-paul@xen.org Signed-off-by: Sean Christopherson --- virt/kvm/pfncache.c | 98 ++++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 70 insertions(+), 28 deletions(-) (limited to 'virt') diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c index 97eec8ee3449..a47ca6fd75c2 100644 --- a/virt/kvm/pfncache.c +++ b/virt/kvm/pfncache.c @@ -48,7 +48,14 @@ bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len) if (!gpc->active) return false; - if (gpc->generation != slots->generation || kvm_is_error_hva(gpc->uhva)) + /* + * If the page was cached from a memslot, make sure the memslots have + * not been re-configured. + */ + if (!kvm_is_error_gpa(gpc->gpa) && gpc->generation != slots->generation) + return false; + + if (kvm_is_error_hva(gpc->uhva)) return false; if (offset_in_page(gpc->uhva) + len > PAGE_SIZE) @@ -209,11 +216,10 @@ out_error: return -EFAULT; } -static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, +static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long uhva, unsigned long len) { - struct kvm_memslots *slots = kvm_memslots(gpc->kvm); - unsigned long page_offset = offset_in_page(gpa); + unsigned long page_offset; bool unmap_old = false; unsigned long old_uhva; kvm_pfn_t old_pfn; @@ -221,10 +227,16 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, void *old_khva; int ret; + /* Either gpa or uhva must be valid, but not both */ + if (WARN_ON_ONCE(kvm_is_error_gpa(gpa) == kvm_is_error_hva(uhva))) + return -EINVAL; + /* - * If must fit within a single page. The 'len' argument is - * only to enforce that. + * The cached acces must fit within a single page. The 'len' argument + * exists only to enforce that. */ + page_offset = kvm_is_error_gpa(gpa) ? offset_in_page(uhva) : + offset_in_page(gpa); if (page_offset + len > PAGE_SIZE) return -EINVAL; @@ -246,29 +258,39 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, old_khva = (void *)PAGE_ALIGN_DOWN((uintptr_t)gpc->khva); old_uhva = PAGE_ALIGN_DOWN(gpc->uhva); - /* Refresh the userspace HVA if necessary */ - if (gpc->gpa != gpa || gpc->generation != slots->generation || - kvm_is_error_hva(gpc->uhva)) { - gfn_t gfn = gpa_to_gfn(gpa); - - gpc->gpa = gpa; - gpc->generation = slots->generation; - gpc->memslot = __gfn_to_memslot(slots, gfn); - gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn); + if (kvm_is_error_gpa(gpa)) { + gpc->gpa = INVALID_GPA; + gpc->memslot = NULL; + gpc->uhva = PAGE_ALIGN_DOWN(uhva); - if (kvm_is_error_hva(gpc->uhva)) { - ret = -EFAULT; - goto out; - } - - /* - * Even if the GPA and/or the memslot generation changed, the - * HVA may still be the same. - */ if (gpc->uhva != old_uhva) hva_change = true; } else { - gpc->uhva = old_uhva; + struct kvm_memslots *slots = kvm_memslots(gpc->kvm); + + if (gpc->gpa != gpa || gpc->generation != slots->generation || + kvm_is_error_hva(gpc->uhva)) { + gfn_t gfn = gpa_to_gfn(gpa); + + gpc->gpa = gpa; + gpc->generation = slots->generation; + gpc->memslot = __gfn_to_memslot(slots, gfn); + gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn); + + if (kvm_is_error_hva(gpc->uhva)) { + ret = -EFAULT; + goto out; + } + + /* + * Even if the GPA and/or the memslot generation changed, the + * HVA may still be the same. + */ + if (gpc->uhva != old_uhva) + hva_change = true; + } else { + gpc->uhva = old_uhva; + } } /* Note: the offset must be correct before calling hva_to_pfn_retry() */ @@ -319,7 +341,15 @@ out_unlock: int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len) { - return __kvm_gpc_refresh(gpc, gpc->gpa, len); + /* + * If the GPA is valid then ignore the HVA, as a cache can be GPA-based + * or HVA-based, not both. For GPA-based caches, the HVA will be + * recomputed during refresh if necessary. + */ + unsigned long uhva = kvm_is_error_gpa(gpc->gpa) ? gpc->uhva : + KVM_HVA_ERR_BAD; + + return __kvm_gpc_refresh(gpc, gpc->gpa, uhva, len); } void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm) @@ -329,10 +359,12 @@ void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm) gpc->kvm = kvm; gpc->pfn = KVM_PFN_ERR_FAULT; + gpc->gpa = INVALID_GPA; gpc->uhva = KVM_HVA_ERR_BAD; } -int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len) +static int __kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long uhva, + unsigned long len) { struct kvm *kvm = gpc->kvm; @@ -353,7 +385,17 @@ int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len) gpc->active = true; write_unlock_irq(&gpc->lock); } - return __kvm_gpc_refresh(gpc, gpa, len); + return __kvm_gpc_refresh(gpc, gpa, uhva, len); +} + +int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len) +{ + return __kvm_gpc_activate(gpc, gpa, KVM_HVA_ERR_BAD, len); +} + +int kvm_gpc_activate_hva(struct gfn_to_pfn_cache *gpc, unsigned long uhva, unsigned long len) +{ + return __kvm_gpc_activate(gpc, INVALID_GPA, uhva, len); } void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc) -- cgit v1.2.3 From 9fa336e343b2c7f6bad6fd5fa2e1cf55e80d3ed1 Mon Sep 17 00:00:00 2001 From: Paul Durrant Date: Thu, 15 Feb 2024 15:29:14 +0000 Subject: KVM: pfncache: check the need for invalidation under read lock first When processing mmu_notifier invalidations for gpc caches, pre-check for overlap with the invalidation event while holding gpc->lock for read, and only take gpc->lock for write if the cache needs to be invalidated. Doing a pre-check without taking gpc->lock for write avoids unnecessarily contending the lock for unrelated invalidations, which is very beneficial for caches that are heavily used (but rarely subjected to mmu_notifier invalidations). Signed-off-by: Paul Durrant Reviewed-by: David Woodhouse Link: https://lore.kernel.org/r/20240215152916.1158-20-paul@xen.org Signed-off-by: Sean Christopherson --- virt/kvm/pfncache.c | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) (limited to 'virt') diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c index a47ca6fd75c2..9ac8c9da4eda 100644 --- a/virt/kvm/pfncache.c +++ b/virt/kvm/pfncache.c @@ -29,14 +29,30 @@ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start, spin_lock(&kvm->gpc_lock); list_for_each_entry(gpc, &kvm->gpc_list, list) { - write_lock_irq(&gpc->lock); + read_lock_irq(&gpc->lock); /* Only a single page so no need to care about length */ if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) && gpc->uhva >= start && gpc->uhva < end) { - gpc->valid = false; + read_unlock_irq(&gpc->lock); + + /* + * There is a small window here where the cache could + * be modified, and invalidation would no longer be + * necessary. Hence check again whether invalidation + * is still necessary once the write lock has been + * acquired. + */ + + write_lock_irq(&gpc->lock); + if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) && + gpc->uhva >= start && gpc->uhva < end) + gpc->valid = false; + write_unlock_irq(&gpc->lock); + continue; } - write_unlock_irq(&gpc->lock); + + read_unlock_irq(&gpc->lock); } spin_unlock(&kvm->gpc_lock); } -- cgit v1.2.3 From 6addfcf27139da1356493f2a440af1252b5b7dbe Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Tue, 27 Feb 2024 11:49:18 +0000 Subject: KVM: pfncache: simplify locking and make more self-contained MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The locking on the gfn_to_pfn_cache is... interesting. And awful. There is a rwlock in ->lock which readers take to ensure protection against concurrent changes. But __kvm_gpc_refresh() makes assumptions that certain fields will not change even while it drops the write lock and performs MM operations to revalidate the target PFN and kernel mapping. Commit 93984f19e7bc ("KVM: Fully serialize gfn=>pfn cache refresh via mutex") partly addressed that — not by fixing it, but by adding a new mutex, ->refresh_lock. This prevented concurrent __kvm_gpc_refresh() calls on a given gfn_to_pfn_cache, but is still only a partial solution. There is still a theoretical race where __kvm_gpc_refresh() runs in parallel with kvm_gpc_deactivate(). While __kvm_gpc_refresh() has dropped the write lock, kvm_gpc_deactivate() clears the ->active flag and unmaps ->khva. Then __kvm_gpc_refresh() determines that the previous ->pfn and ->khva are still valid, and reinstalls those values into the structure. This leaves the gfn_to_pfn_cache with the ->valid bit set, but ->active clear. And a ->khva which looks like a reasonable kernel address but is actually unmapped. All it takes is a subsequent reactivation to cause that ->khva to be dereferenced. This would theoretically cause an oops which would look something like this: [1724749.564994] BUG: unable to handle page fault for address: ffffaa3540ace0e0 [1724749.565039] RIP: 0010:__kvm_xen_has_interrupt+0x8b/0xb0 I say "theoretically" because theoretically, that oops that was seen in production cannot happen. The code which uses the gfn_to_pfn_cache is supposed to have its *own* locking, to further paper over the fact that the gfn_to_pfn_cache's own papering-over (->refresh_lock) of its own rwlock abuse is not sufficient. For the Xen vcpu_info that external lock is the vcpu->mutex, and for the shared info it's kvm->arch.xen.xen_lock. Those locks ought to protect the gfn_to_pfn_cache against concurrent deactivation vs. refresh in all but the cases where the vcpu or kvm object is being *destroyed*, in which case the subsequent reactivation should never happen. Theoretically. Nevertheless, this locking abuse is awful and should be fixed, even if no clear explanation can be found for how the oops happened. So expand the use of the ->refresh_lock mutex to ensure serialization of activate/deactivate vs. refresh and make the pfncache locking entirely self-sufficient. This means that a future commit can simplify the locking in the callers, such as the Xen emulation code which has an outstanding problem with recursive locking of kvm->arch.xen.xen_lock, which will no longer be necessary. The rwlock abuse described above is still not best practice, although it's harmless now that the ->refresh_lock is held for the entire duration while the offending code drops the write lock, does some other stuff, then takes the write lock again and assumes nothing changed. That can also be fixed^W cleaned up in a subsequent commit, but this commit is a simpler basis for the Xen deadlock fix mentioned above. Signed-off-by: David Woodhouse Reviewed-by: Paul Durrant Link: https://lore.kernel.org/r/20240227115648.3104-5-dwmw2@infradead.org [sean: use guard(mutex) to fix a missed unlock] Signed-off-by: Sean Christopherson --- virt/kvm/pfncache.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) (limited to 'virt') diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c index 9ac8c9da4eda..4e07112a24c2 100644 --- a/virt/kvm/pfncache.c +++ b/virt/kvm/pfncache.c @@ -256,12 +256,7 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned l if (page_offset + len > PAGE_SIZE) return -EINVAL; - /* - * If another task is refreshing the cache, wait for it to complete. - * There is no guarantee that concurrent refreshes will see the same - * gpa, memslots generation, etc..., so they must be fully serialized. - */ - mutex_lock(&gpc->refresh_lock); + lockdep_assert_held(&gpc->refresh_lock); write_lock_irq(&gpc->lock); @@ -347,8 +342,6 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned l out_unlock: write_unlock_irq(&gpc->lock); - mutex_unlock(&gpc->refresh_lock); - if (unmap_old) gpc_unmap(old_pfn, old_khva); @@ -357,13 +350,16 @@ out_unlock: int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len) { + unsigned long uhva; + + guard(mutex)(&gpc->refresh_lock); + /* * If the GPA is valid then ignore the HVA, as a cache can be GPA-based * or HVA-based, not both. For GPA-based caches, the HVA will be * recomputed during refresh if necessary. */ - unsigned long uhva = kvm_is_error_gpa(gpc->gpa) ? gpc->uhva : - KVM_HVA_ERR_BAD; + uhva = kvm_is_error_gpa(gpc->gpa) ? gpc->uhva : KVM_HVA_ERR_BAD; return __kvm_gpc_refresh(gpc, gpc->gpa, uhva, len); } @@ -377,6 +373,7 @@ void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm) gpc->pfn = KVM_PFN_ERR_FAULT; gpc->gpa = INVALID_GPA; gpc->uhva = KVM_HVA_ERR_BAD; + gpc->active = gpc->valid = false; } static int __kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long uhva, @@ -384,6 +381,8 @@ static int __kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned { struct kvm *kvm = gpc->kvm; + guard(mutex)(&gpc->refresh_lock); + if (!gpc->active) { if (KVM_BUG_ON(gpc->valid, kvm)) return -EIO; @@ -420,6 +419,8 @@ void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc) kvm_pfn_t old_pfn; void *old_khva; + guard(mutex)(&gpc->refresh_lock); + if (gpc->active) { /* * Deactivate the cache before removing it from the list, KVM -- cgit v1.2.3