summaryrefslogtreecommitdiffstats
path: root/virt/kvm/pfncache.c
diff options
context:
space:
mode:
Diffstat (limited to 'virt/kvm/pfncache.c')
-rw-r--r--virt/kvm/pfncache.c251
1 files changed, 143 insertions, 108 deletions
diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c
index 2d6aba677830..4e07112a24c2 100644
--- a/virt/kvm/pfncache.c
+++ b/virt/kvm/pfncache.c
@@ -25,55 +25,36 @@
void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
unsigned long end, bool may_block)
{
- DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
struct gfn_to_pfn_cache *gpc;
- bool evict_vcpus = false;
spin_lock(&kvm->gpc_lock);
list_for_each_entry(gpc, &kvm->gpc_list, list) {
- write_lock_irq(&gpc->lock);
+ read_lock_irq(&gpc->lock);
/* Only a single page so no need to care about length */
if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) &&
gpc->uhva >= start && gpc->uhva < end) {
- gpc->valid = false;
+ read_unlock_irq(&gpc->lock);
/*
- * If a guest vCPU could be using the physical address,
- * it needs to be forced out of guest mode.
+ * There is a small window here where the cache could
+ * be modified, and invalidation would no longer be
+ * necessary. Hence check again whether invalidation
+ * is still necessary once the write lock has been
+ * acquired.
*/
- if (gpc->usage & KVM_GUEST_USES_PFN) {
- if (!evict_vcpus) {
- evict_vcpus = true;
- bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
- }
- __set_bit(gpc->vcpu->vcpu_idx, vcpu_bitmap);
- }
- }
- write_unlock_irq(&gpc->lock);
- }
- spin_unlock(&kvm->gpc_lock);
-
- if (evict_vcpus) {
- /*
- * KVM needs to ensure the vCPU is fully out of guest context
- * before allowing the invalidation to continue.
- */
- unsigned int req = KVM_REQ_OUTSIDE_GUEST_MODE;
- bool called;
- /*
- * If the OOM reaper is active, then all vCPUs should have
- * been stopped already, so perform the request without
- * KVM_REQUEST_WAIT and be sad if any needed to be IPI'd.
- */
- if (!may_block)
- req &= ~KVM_REQUEST_WAIT;
-
- called = kvm_make_vcpus_request_mask(kvm, req, vcpu_bitmap);
+ write_lock_irq(&gpc->lock);
+ if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) &&
+ gpc->uhva >= start && gpc->uhva < end)
+ gpc->valid = false;
+ write_unlock_irq(&gpc->lock);
+ continue;
+ }
- WARN_ON_ONCE(called && !may_block);
+ read_unlock_irq(&gpc->lock);
}
+ spin_unlock(&kvm->gpc_lock);
}
bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len)
@@ -83,10 +64,17 @@ bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len)
if (!gpc->active)
return false;
- if ((gpc->gpa & ~PAGE_MASK) + len > PAGE_SIZE)
+ /*
+ * If the page was cached from a memslot, make sure the memslots have
+ * not been re-configured.
+ */
+ if (!kvm_is_error_gpa(gpc->gpa) && gpc->generation != slots->generation)
+ return false;
+
+ if (kvm_is_error_hva(gpc->uhva))
return false;
- if (gpc->generation != slots->generation || kvm_is_error_hva(gpc->uhva))
+ if (offset_in_page(gpc->uhva) + len > PAGE_SIZE)
return false;
if (!gpc->valid)
@@ -94,19 +82,33 @@ bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len)
return true;
}
-EXPORT_SYMBOL_GPL(kvm_gpc_check);
-static void gpc_unmap_khva(kvm_pfn_t pfn, void *khva)
+static void *gpc_map(kvm_pfn_t pfn)
{
- /* Unmap the old pfn/page if it was mapped before. */
- if (!is_error_noslot_pfn(pfn) && khva) {
- if (pfn_valid(pfn))
- kunmap(pfn_to_page(pfn));
+ if (pfn_valid(pfn))
+ return kmap(pfn_to_page(pfn));
+
#ifdef CONFIG_HAS_IOMEM
- else
- memunmap(khva);
+ return memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
+#else
+ return NULL;
#endif
+}
+
+static void gpc_unmap(kvm_pfn_t pfn, void *khva)
+{
+ /* Unmap the old pfn/page if it was mapped before. */
+ if (is_error_noslot_pfn(pfn) || !khva)
+ return;
+
+ if (pfn_valid(pfn)) {
+ kunmap(pfn_to_page(pfn));
+ return;
}
+
+#ifdef CONFIG_HAS_IOMEM
+ memunmap(khva);
+#endif
}
static inline bool mmu_notifier_retry_cache(struct kvm *kvm, unsigned long mmu_seq)
@@ -140,7 +142,7 @@ static inline bool mmu_notifier_retry_cache(struct kvm *kvm, unsigned long mmu_s
static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
{
/* Note, the new page offset may be different than the old! */
- void *old_khva = gpc->khva - offset_in_page(gpc->khva);
+ void *old_khva = (void *)PAGE_ALIGN_DOWN((uintptr_t)gpc->khva);
kvm_pfn_t new_pfn = KVM_PFN_ERR_FAULT;
void *new_khva = NULL;
unsigned long mmu_seq;
@@ -175,7 +177,7 @@ static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
* the existing mapping and didn't create a new one.
*/
if (new_khva != old_khva)
- gpc_unmap_khva(new_pfn, new_khva);
+ gpc_unmap(new_pfn, new_khva);
kvm_release_pfn_clean(new_pfn);
@@ -192,20 +194,14 @@ static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
* pfn. Note, kmap() and memremap() can both sleep, so this
* too must be done outside of gpc->lock!
*/
- if (gpc->usage & KVM_HOST_USES_PFN) {
- if (new_pfn == gpc->pfn) {
- new_khva = old_khva;
- } else if (pfn_valid(new_pfn)) {
- new_khva = kmap(pfn_to_page(new_pfn));
-#ifdef CONFIG_HAS_IOMEM
- } else {
- new_khva = memremap(pfn_to_hpa(new_pfn), PAGE_SIZE, MEMREMAP_WB);
-#endif
- }
- if (!new_khva) {
- kvm_release_pfn_clean(new_pfn);
- goto out_error;
- }
+ if (new_pfn == gpc->pfn)
+ new_khva = old_khva;
+ else
+ new_khva = gpc_map(new_pfn);
+
+ if (!new_khva) {
+ kvm_release_pfn_clean(new_pfn);
+ goto out_error;
}
write_lock_irq(&gpc->lock);
@@ -219,7 +215,7 @@ static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
gpc->valid = true;
gpc->pfn = new_pfn;
- gpc->khva = new_khva + (gpc->gpa & ~PAGE_MASK);
+ gpc->khva = new_khva + offset_in_page(gpc->uhva);
/*
* Put the reference to the _new_ pfn. The pfn is now tracked by the
@@ -236,30 +232,31 @@ out_error:
return -EFAULT;
}
-static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa,
+static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long uhva,
unsigned long len)
{
- struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
- unsigned long page_offset = gpa & ~PAGE_MASK;
+ unsigned long page_offset;
bool unmap_old = false;
unsigned long old_uhva;
kvm_pfn_t old_pfn;
+ bool hva_change = false;
void *old_khva;
int ret;
+ /* Either gpa or uhva must be valid, but not both */
+ if (WARN_ON_ONCE(kvm_is_error_gpa(gpa) == kvm_is_error_hva(uhva)))
+ return -EINVAL;
+
/*
- * If must fit within a single page. The 'len' argument is
- * only to enforce that.
+ * The cached acces must fit within a single page. The 'len' argument
+ * exists only to enforce that.
*/
+ page_offset = kvm_is_error_gpa(gpa) ? offset_in_page(uhva) :
+ offset_in_page(gpa);
if (page_offset + len > PAGE_SIZE)
return -EINVAL;
- /*
- * If another task is refreshing the cache, wait for it to complete.
- * There is no guarantee that concurrent refreshes will see the same
- * gpa, memslots generation, etc..., so they must be fully serialized.
- */
- mutex_lock(&gpc->refresh_lock);
+ lockdep_assert_held(&gpc->refresh_lock);
write_lock_irq(&gpc->lock);
@@ -269,30 +266,52 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa,
}
old_pfn = gpc->pfn;
- old_khva = gpc->khva - offset_in_page(gpc->khva);
- old_uhva = gpc->uhva;
-
- /* If the userspace HVA is invalid, refresh that first */
- if (gpc->gpa != gpa || gpc->generation != slots->generation ||
- kvm_is_error_hva(gpc->uhva)) {
- gfn_t gfn = gpa_to_gfn(gpa);
-
- gpc->gpa = gpa;
- gpc->generation = slots->generation;
- gpc->memslot = __gfn_to_memslot(slots, gfn);
- gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn);
-
- if (kvm_is_error_hva(gpc->uhva)) {
- ret = -EFAULT;
- goto out;
+ old_khva = (void *)PAGE_ALIGN_DOWN((uintptr_t)gpc->khva);
+ old_uhva = PAGE_ALIGN_DOWN(gpc->uhva);
+
+ if (kvm_is_error_gpa(gpa)) {
+ gpc->gpa = INVALID_GPA;
+ gpc->memslot = NULL;
+ gpc->uhva = PAGE_ALIGN_DOWN(uhva);
+
+ if (gpc->uhva != old_uhva)
+ hva_change = true;
+ } else {
+ struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
+
+ if (gpc->gpa != gpa || gpc->generation != slots->generation ||
+ kvm_is_error_hva(gpc->uhva)) {
+ gfn_t gfn = gpa_to_gfn(gpa);
+
+ gpc->gpa = gpa;
+ gpc->generation = slots->generation;
+ gpc->memslot = __gfn_to_memslot(slots, gfn);
+ gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn);
+
+ if (kvm_is_error_hva(gpc->uhva)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ /*
+ * Even if the GPA and/or the memslot generation changed, the
+ * HVA may still be the same.
+ */
+ if (gpc->uhva != old_uhva)
+ hva_change = true;
+ } else {
+ gpc->uhva = old_uhva;
}
}
+ /* Note: the offset must be correct before calling hva_to_pfn_retry() */
+ gpc->uhva += page_offset;
+
/*
* If the userspace HVA changed or the PFN was already invalid,
* drop the lock and do the HVA to PFN lookup again.
*/
- if (!gpc->valid || old_uhva != gpc->uhva) {
+ if (!gpc->valid || hva_change) {
ret = hva_to_pfn_retry(gpc);
} else {
/*
@@ -323,41 +342,47 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa,
out_unlock:
write_unlock_irq(&gpc->lock);
- mutex_unlock(&gpc->refresh_lock);
-
if (unmap_old)
- gpc_unmap_khva(old_pfn, old_khva);
+ gpc_unmap(old_pfn, old_khva);
return ret;
}
int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len)
{
- return __kvm_gpc_refresh(gpc, gpc->gpa, len);
+ unsigned long uhva;
+
+ guard(mutex)(&gpc->refresh_lock);
+
+ /*
+ * If the GPA is valid then ignore the HVA, as a cache can be GPA-based
+ * or HVA-based, not both. For GPA-based caches, the HVA will be
+ * recomputed during refresh if necessary.
+ */
+ uhva = kvm_is_error_gpa(gpc->gpa) ? gpc->uhva : KVM_HVA_ERR_BAD;
+
+ return __kvm_gpc_refresh(gpc, gpc->gpa, uhva, len);
}
-EXPORT_SYMBOL_GPL(kvm_gpc_refresh);
-void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm,
- struct kvm_vcpu *vcpu, enum pfn_cache_usage usage)
+void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm)
{
- WARN_ON_ONCE(!usage || (usage & KVM_GUEST_AND_HOST_USE_PFN) != usage);
- WARN_ON_ONCE((usage & KVM_GUEST_USES_PFN) && !vcpu);
-
rwlock_init(&gpc->lock);
mutex_init(&gpc->refresh_lock);
gpc->kvm = kvm;
- gpc->vcpu = vcpu;
- gpc->usage = usage;
gpc->pfn = KVM_PFN_ERR_FAULT;
+ gpc->gpa = INVALID_GPA;
gpc->uhva = KVM_HVA_ERR_BAD;
+ gpc->active = gpc->valid = false;
}
-EXPORT_SYMBOL_GPL(kvm_gpc_init);
-int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
+static int __kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long uhva,
+ unsigned long len)
{
struct kvm *kvm = gpc->kvm;
+ guard(mutex)(&gpc->refresh_lock);
+
if (!gpc->active) {
if (KVM_BUG_ON(gpc->valid, kvm))
return -EIO;
@@ -375,9 +400,18 @@ int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
gpc->active = true;
write_unlock_irq(&gpc->lock);
}
- return __kvm_gpc_refresh(gpc, gpa, len);
+ return __kvm_gpc_refresh(gpc, gpa, uhva, len);
+}
+
+int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
+{
+ return __kvm_gpc_activate(gpc, gpa, KVM_HVA_ERR_BAD, len);
+}
+
+int kvm_gpc_activate_hva(struct gfn_to_pfn_cache *gpc, unsigned long uhva, unsigned long len)
+{
+ return __kvm_gpc_activate(gpc, INVALID_GPA, uhva, len);
}
-EXPORT_SYMBOL_GPL(kvm_gpc_activate);
void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc)
{
@@ -385,6 +419,8 @@ void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc)
kvm_pfn_t old_pfn;
void *old_khva;
+ guard(mutex)(&gpc->refresh_lock);
+
if (gpc->active) {
/*
* Deactivate the cache before removing it from the list, KVM
@@ -412,7 +448,6 @@ void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc)
list_del(&gpc->list);
spin_unlock(&kvm->gpc_lock);
- gpc_unmap_khva(old_pfn, old_khva);
+ gpc_unmap(old_pfn, old_khva);
}
}
-EXPORT_SYMBOL_GPL(kvm_gpc_deactivate);