diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2022-12-02 14:00:53 -0500 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2022-12-02 14:01:43 -0500 |
commit | 5656374b168c98377b6feee8d7500993eebda230 (patch) | |
tree | b7511b4b25fa38aa77fae2342420d85e3e05cd4a /virt | |
parent | 74bee0cad8dcd8ddec5e763c369239fc5990676a (diff) | |
parent | 06e155c44aa0e7921aa44d3c67f8ea464b16cb75 (diff) | |
download | linux-5656374b168c98377b6feee8d7500993eebda230.tar.gz linux-5656374b168c98377b6feee8d7500993eebda230.tar.bz2 linux-5656374b168c98377b6feee8d7500993eebda230.zip |
Merge branch 'gpc-fixes' of git://git.infradead.org/users/dwmw2/linux into HEAD
Pull Xen-for-KVM changes from David Woodhouse:
* add support for 32-bit guests in SCHEDOP_poll
* the rest of the gfn-to-pfn cache API cleanup
"I still haven't reinstated the last of those patches to make gpc->len
immutable."
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r-- | virt/kvm/pfncache.c | 104 |
1 files changed, 50 insertions, 54 deletions
diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c index b4295474519f..2d6aba677830 100644 --- a/virt/kvm/pfncache.c +++ b/virt/kvm/pfncache.c @@ -76,19 +76,17 @@ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start, } } -bool kvm_gpc_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, gpa_t gpa, - unsigned long len) +bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len) { - struct kvm_memslots *slots = kvm_memslots(kvm); + struct kvm_memslots *slots = kvm_memslots(gpc->kvm); if (!gpc->active) return false; - if ((gpa & ~PAGE_MASK) + len > PAGE_SIZE) + if ((gpc->gpa & ~PAGE_MASK) + len > PAGE_SIZE) return false; - if (gpc->gpa != gpa || gpc->generation != slots->generation || - kvm_is_error_hva(gpc->uhva)) + if (gpc->generation != slots->generation || kvm_is_error_hva(gpc->uhva)) return false; if (!gpc->valid) @@ -139,7 +137,7 @@ static inline bool mmu_notifier_retry_cache(struct kvm *kvm, unsigned long mmu_s return kvm->mmu_invalidate_seq != mmu_seq; } -static kvm_pfn_t hva_to_pfn_retry(struct kvm *kvm, struct gfn_to_pfn_cache *gpc) +static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc) { /* Note, the new page offset may be different than the old! */ void *old_khva = gpc->khva - offset_in_page(gpc->khva); @@ -159,7 +157,7 @@ static kvm_pfn_t hva_to_pfn_retry(struct kvm *kvm, struct gfn_to_pfn_cache *gpc) gpc->valid = false; do { - mmu_seq = kvm->mmu_invalidate_seq; + mmu_seq = gpc->kvm->mmu_invalidate_seq; smp_rmb(); write_unlock_irq(&gpc->lock); @@ -217,7 +215,7 @@ static kvm_pfn_t hva_to_pfn_retry(struct kvm *kvm, struct gfn_to_pfn_cache *gpc) * attempting to refresh. */ WARN_ON_ONCE(gpc->valid); - } while (mmu_notifier_retry_cache(kvm, mmu_seq)); + } while (mmu_notifier_retry_cache(gpc->kvm, mmu_seq)); gpc->valid = true; gpc->pfn = new_pfn; @@ -238,10 +236,10 @@ out_error: return -EFAULT; } -int kvm_gpc_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, gpa_t gpa, - unsigned long len) +static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, + unsigned long len) { - struct kvm_memslots *slots = kvm_memslots(kvm); + struct kvm_memslots *slots = kvm_memslots(gpc->kvm); unsigned long page_offset = gpa & ~PAGE_MASK; bool unmap_old = false; unsigned long old_uhva; @@ -295,7 +293,7 @@ int kvm_gpc_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, gpa_t gpa, * drop the lock and do the HVA to PFN lookup again. */ if (!gpc->valid || old_uhva != gpc->uhva) { - ret = hva_to_pfn_retry(kvm, gpc); + ret = hva_to_pfn_retry(gpc); } else { /* * If the HVA→PFN mapping was already valid, don't unmap it. @@ -303,9 +301,8 @@ int kvm_gpc_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, gpa_t gpa, * may have changed. */ gpc->khva = old_khva + page_offset; - old_pfn = KVM_PFN_ERR_FAULT; - old_khva = NULL; ret = 0; + goto out_unlock; } out: @@ -333,55 +330,37 @@ out_unlock: return ret; } -EXPORT_SYMBOL_GPL(kvm_gpc_refresh); -void kvm_gpc_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc) +int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len) { - void *old_khva; - kvm_pfn_t old_pfn; - - mutex_lock(&gpc->refresh_lock); - write_lock_irq(&gpc->lock); - - gpc->valid = false; - - old_khva = gpc->khva - offset_in_page(gpc->khva); - old_pfn = gpc->pfn; - - /* - * We can leave the GPA → uHVA map cache intact but the PFN - * lookup will need to be redone even for the same page. - */ - gpc->khva = NULL; - gpc->pfn = KVM_PFN_ERR_FAULT; - - write_unlock_irq(&gpc->lock); - mutex_unlock(&gpc->refresh_lock); - - gpc_unmap_khva(old_pfn, old_khva); + return __kvm_gpc_refresh(gpc, gpc->gpa, len); } -EXPORT_SYMBOL_GPL(kvm_gpc_unmap); +EXPORT_SYMBOL_GPL(kvm_gpc_refresh); -void kvm_gpc_init(struct gfn_to_pfn_cache *gpc) +void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm, + struct kvm_vcpu *vcpu, enum pfn_cache_usage usage) { + WARN_ON_ONCE(!usage || (usage & KVM_GUEST_AND_HOST_USE_PFN) != usage); + WARN_ON_ONCE((usage & KVM_GUEST_USES_PFN) && !vcpu); + rwlock_init(&gpc->lock); mutex_init(&gpc->refresh_lock); + + gpc->kvm = kvm; + gpc->vcpu = vcpu; + gpc->usage = usage; + gpc->pfn = KVM_PFN_ERR_FAULT; + gpc->uhva = KVM_HVA_ERR_BAD; } EXPORT_SYMBOL_GPL(kvm_gpc_init); -int kvm_gpc_activate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, - struct kvm_vcpu *vcpu, enum pfn_cache_usage usage, - gpa_t gpa, unsigned long len) +int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len) { - WARN_ON_ONCE(!usage || (usage & KVM_GUEST_AND_HOST_USE_PFN) != usage); + struct kvm *kvm = gpc->kvm; if (!gpc->active) { - gpc->khva = NULL; - gpc->pfn = KVM_PFN_ERR_FAULT; - gpc->uhva = KVM_HVA_ERR_BAD; - gpc->vcpu = vcpu; - gpc->usage = usage; - gpc->valid = false; + if (KVM_BUG_ON(gpc->valid, kvm)) + return -EIO; spin_lock(&kvm->gpc_lock); list_add(&gpc->list, &kvm->gpc_list); @@ -396,12 +375,16 @@ int kvm_gpc_activate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, gpc->active = true; write_unlock_irq(&gpc->lock); } - return kvm_gpc_refresh(kvm, gpc, gpa, len); + return __kvm_gpc_refresh(gpc, gpa, len); } EXPORT_SYMBOL_GPL(kvm_gpc_activate); -void kvm_gpc_deactivate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc) +void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc) { + struct kvm *kvm = gpc->kvm; + kvm_pfn_t old_pfn; + void *old_khva; + if (gpc->active) { /* * Deactivate the cache before removing it from the list, KVM @@ -410,13 +393,26 @@ void kvm_gpc_deactivate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc) */ write_lock_irq(&gpc->lock); gpc->active = false; + gpc->valid = false; + + /* + * Leave the GPA => uHVA cache intact, it's protected by the + * memslot generation. The PFN lookup needs to be redone every + * time as mmu_notifier protection is lost when the cache is + * removed from the VM's gpc_list. + */ + old_khva = gpc->khva - offset_in_page(gpc->khva); + gpc->khva = NULL; + + old_pfn = gpc->pfn; + gpc->pfn = KVM_PFN_ERR_FAULT; write_unlock_irq(&gpc->lock); spin_lock(&kvm->gpc_lock); list_del(&gpc->list); spin_unlock(&kvm->gpc_lock); - kvm_gpc_unmap(kvm, gpc); + gpc_unmap_khva(old_pfn, old_khva); } } EXPORT_SYMBOL_GPL(kvm_gpc_deactivate); |