summaryrefslogtreecommitdiffstats
path: root/virt/kvm
diff options
context:
space:
mode:
Diffstat (limited to 'virt/kvm')
-rw-r--r--virt/kvm/kvm_main.c116
-rw-r--r--virt/kvm/kvm_mm.h6
-rw-r--r--virt/kvm/pfncache.c50
-rw-r--r--virt/kvm/vfio.c2
4 files changed, 77 insertions, 97 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index fb49c2a60200..14841acb8b95 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -311,8 +311,7 @@ bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
return called;
}
-bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
- struct kvm_vcpu *except)
+bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
{
struct kvm_vcpu *vcpu;
struct cpumask *cpus;
@@ -325,22 +324,14 @@ bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask);
cpumask_clear(cpus);
- kvm_for_each_vcpu(i, vcpu, kvm) {
- if (vcpu == except)
- continue;
+ kvm_for_each_vcpu(i, vcpu, kvm)
kvm_make_vcpu_request(vcpu, req, cpus, me);
- }
called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT));
put_cpu();
return called;
}
-
-bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
-{
- return kvm_make_all_cpus_request_except(kvm, req, NULL);
-}
EXPORT_SYMBOL_GPL(kvm_make_all_cpus_request);
void kvm_flush_remote_tlbs(struct kvm *kvm)
@@ -401,12 +392,17 @@ static void kvm_flush_shadow_all(struct kvm *kvm)
static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc,
gfp_t gfp_flags)
{
+ void *page;
+
gfp_flags |= mc->gfp_zero;
if (mc->kmem_cache)
return kmem_cache_alloc(mc->kmem_cache, gfp_flags);
- else
- return (void *)__get_free_page(gfp_flags);
+
+ page = (void *)__get_free_page(gfp_flags);
+ if (page && mc->init_value)
+ memset64(page, mc->init_value, PAGE_SIZE / sizeof(u64));
+ return page;
}
int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min)
@@ -421,6 +417,13 @@ int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity,
if (WARN_ON_ONCE(!capacity))
return -EIO;
+ /*
+ * Custom init values can be used only for page allocations,
+ * and obviously conflict with __GFP_ZERO.
+ */
+ if (WARN_ON_ONCE(mc->init_value && (mc->kmem_cache || mc->gfp_zero)))
+ return -EIO;
+
mc->objects = kvmalloc_array(capacity, sizeof(void *), gfp);
if (!mc->objects)
return -ENOMEM;
@@ -583,8 +586,6 @@ static void kvm_null_fn(void)
}
#define IS_KVM_NULL_FN(fn) ((fn) == (void *)kvm_null_fn)
-static const union kvm_mmu_notifier_arg KVM_MMU_NOTIFIER_NO_ARG;
-
/* Iterate over each memslot intersecting [start, last] (inclusive) range */
#define kvm_for_each_memslot_in_hva_range(node, slots, start, last) \
for (node = interval_tree_iter_first(&slots->hva_tree, start, last); \
@@ -670,14 +671,12 @@ static __always_inline kvm_mn_ret_t __kvm_handle_hva_range(struct kvm *kvm,
static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn,
unsigned long start,
unsigned long end,
- union kvm_mmu_notifier_arg arg,
gfn_handler_t handler)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
const struct kvm_mmu_notifier_range range = {
.start = start,
.end = end,
- .arg = arg,
.handler = handler,
.on_lock = (void *)kvm_null_fn,
.flush_on_ret = true,
@@ -705,48 +704,6 @@ static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn
return __kvm_handle_hva_range(kvm, &range).ret;
}
-static bool kvm_change_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
-{
- /*
- * Skipping invalid memslots is correct if and only change_pte() is
- * surrounded by invalidate_range_{start,end}(), which is currently
- * guaranteed by the primary MMU. If that ever changes, KVM needs to
- * unmap the memslot instead of skipping the memslot to ensure that KVM
- * doesn't hold references to the old PFN.
- */
- WARN_ON_ONCE(!READ_ONCE(kvm->mn_active_invalidate_count));
-
- if (range->slot->flags & KVM_MEMSLOT_INVALID)
- return false;
-
- return kvm_set_spte_gfn(kvm, range);
-}
-
-static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long address,
- pte_t pte)
-{
- struct kvm *kvm = mmu_notifier_to_kvm(mn);
- const union kvm_mmu_notifier_arg arg = { .pte = pte };
-
- trace_kvm_set_spte_hva(address);
-
- /*
- * .change_pte() must be surrounded by .invalidate_range_{start,end}().
- * If mmu_invalidate_in_progress is zero, then no in-progress
- * invalidations, including this one, found a relevant memslot at
- * start(); rechecking memslots here is unnecessary. Note, a false
- * positive (count elevated by a different invalidation) is sub-optimal
- * but functionally ok.
- */
- WARN_ON_ONCE(!READ_ONCE(kvm->mn_active_invalidate_count));
- if (!READ_ONCE(kvm->mmu_invalidate_in_progress))
- return;
-
- kvm_handle_hva_range(mn, address, address + 1, arg, kvm_change_spte_gfn);
-}
-
void kvm_mmu_invalidate_begin(struct kvm *kvm)
{
lockdep_assert_held_write(&kvm->mmu_lock);
@@ -832,8 +789,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
* mn_active_invalidate_count (see above) instead of
* mmu_invalidate_in_progress.
*/
- gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end,
- hva_range.may_block);
+ gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end);
/*
* If one or more memslots were found and thus zapped, notify arch code
@@ -910,8 +866,7 @@ static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
{
trace_kvm_age_hva(start, end);
- return kvm_handle_hva_range(mn, start, end, KVM_MMU_NOTIFIER_NO_ARG,
- kvm_age_gfn);
+ return kvm_handle_hva_range(mn, start, end, kvm_age_gfn);
}
static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
@@ -964,7 +919,6 @@ static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
.clear_flush_young = kvm_mmu_notifier_clear_flush_young,
.clear_young = kvm_mmu_notifier_clear_young,
.test_young = kvm_mmu_notifier_test_young,
- .change_pte = kvm_mmu_notifier_change_pte,
.release = kvm_mmu_notifier_release,
};
@@ -1020,7 +974,7 @@ static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
if (!memslot->dirty_bitmap)
return;
- kvfree(memslot->dirty_bitmap);
+ vfree(memslot->dirty_bitmap);
memslot->dirty_bitmap = NULL;
}
@@ -1329,6 +1283,12 @@ static void kvm_destroy_devices(struct kvm *kvm)
* We do not need to take the kvm->lock here, because nobody else
* has a reference to the struct kvm at this point and therefore
* cannot access the devices list anyhow.
+ *
+ * The device list is generally managed as an rculist, but list_del()
+ * is used intentionally here. If a bug in KVM introduced a reader that
+ * was not backed by a reference on the kvm struct, the hope is that
+ * it'd consume the poisoned forward pointer instead of suffering a
+ * use-after-free, even though this cannot be guaranteed.
*/
list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) {
list_del(&dev->vm_node);
@@ -2902,7 +2862,7 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma,
spinlock_t *ptl;
int r;
- r = follow_pte(vma->vm_mm, addr, &ptep, &ptl);
+ r = follow_pte(vma, addr, &ptep, &ptl);
if (r) {
/*
* get_user_pages fails for VM_IO and VM_PFNMAP vmas and does
@@ -2917,7 +2877,7 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma,
if (r)
return r;
- r = follow_pte(vma->vm_mm, addr, &ptep, &ptl);
+ r = follow_pte(vma, addr, &ptep, &ptl);
if (r)
return r;
}
@@ -2963,7 +2923,7 @@ out:
/*
* Pin guest page in memory and return its pfn.
* @addr: host virtual address which maps memory to the guest
- * @atomic: whether this function can sleep
+ * @atomic: whether this function is forbidden from sleeping
* @interruptible: whether the process can be interrupted by non-fatal signals
* @async: whether this function need to wait IO complete if the
* host page is not in the memory
@@ -3035,16 +2995,12 @@ kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn,
if (hva)
*hva = addr;
- if (addr == KVM_HVA_ERR_RO_BAD) {
- if (writable)
- *writable = false;
- return KVM_PFN_ERR_RO_FAULT;
- }
-
if (kvm_is_error_hva(addr)) {
if (writable)
*writable = false;
- return KVM_PFN_NOSLOT;
+
+ return addr == KVM_HVA_ERR_RO_BAD ? KVM_PFN_ERR_RO_FAULT :
+ KVM_PFN_NOSLOT;
}
/* Do not map writable pfn in the readonly memslot. */
@@ -3308,6 +3264,7 @@ static int next_segment(unsigned long len, int offset)
return len;
}
+/* Copy @len bytes from guest memory at '(@gfn * PAGE_SIZE) + @offset' to @data */
static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn,
void *data, int offset, int len)
{
@@ -3409,6 +3366,7 @@ int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa,
}
EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic);
+/* Copy @len bytes from @data into guest memory at '(@gfn * PAGE_SIZE) + @offset' */
static int __kvm_write_guest_page(struct kvm *kvm,
struct kvm_memory_slot *memslot, gfn_t gfn,
const void *data, int offset, int len)
@@ -4725,7 +4683,8 @@ static int kvm_device_release(struct inode *inode, struct file *filp)
if (dev->ops->release) {
mutex_lock(&kvm->lock);
- list_del(&dev->vm_node);
+ list_del_rcu(&dev->vm_node);
+ synchronize_rcu();
dev->ops->release(dev);
mutex_unlock(&kvm->lock);
}
@@ -4808,7 +4767,7 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
kfree(dev);
return ret;
}
- list_add(&dev->vm_node, &kvm->devices);
+ list_add_rcu(&dev->vm_node, &kvm->devices);
mutex_unlock(&kvm->lock);
if (ops->init)
@@ -4819,7 +4778,8 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
if (ret < 0) {
kvm_put_kvm_no_destroy(kvm);
mutex_lock(&kvm->lock);
- list_del(&dev->vm_node);
+ list_del_rcu(&dev->vm_node);
+ synchronize_rcu();
if (ops->release)
ops->release(dev);
mutex_unlock(&kvm->lock);
diff --git a/virt/kvm/kvm_mm.h b/virt/kvm/kvm_mm.h
index ecefc7ec51af..715f19669d01 100644
--- a/virt/kvm/kvm_mm.h
+++ b/virt/kvm/kvm_mm.h
@@ -26,13 +26,11 @@ kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool interruptible,
#ifdef CONFIG_HAVE_KVM_PFNCACHE
void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm,
unsigned long start,
- unsigned long end,
- bool may_block);
+ unsigned long end);
#else
static inline void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm,
unsigned long start,
- unsigned long end,
- bool may_block)
+ unsigned long end)
{
}
#endif /* HAVE_KVM_PFNCACHE */
diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c
index 4e07112a24c2..e3453e869e92 100644
--- a/virt/kvm/pfncache.c
+++ b/virt/kvm/pfncache.c
@@ -23,7 +23,7 @@
* MMU notifier 'invalidate_range_start' hook.
*/
void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
- unsigned long end, bool may_block)
+ unsigned long end)
{
struct gfn_to_pfn_cache *gpc;
@@ -57,6 +57,19 @@ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
spin_unlock(&kvm->gpc_lock);
}
+static bool kvm_gpc_is_valid_len(gpa_t gpa, unsigned long uhva,
+ unsigned long len)
+{
+ unsigned long offset = kvm_is_error_gpa(gpa) ? offset_in_page(uhva) :
+ offset_in_page(gpa);
+
+ /*
+ * The cached access must fit within a single page. The 'len' argument
+ * to activate() and refresh() exists only to enforce that.
+ */
+ return offset + len <= PAGE_SIZE;
+}
+
bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len)
{
struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
@@ -74,7 +87,7 @@ bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len)
if (kvm_is_error_hva(gpc->uhva))
return false;
- if (offset_in_page(gpc->uhva) + len > PAGE_SIZE)
+ if (!kvm_gpc_is_valid_len(gpc->gpa, gpc->uhva, len))
return false;
if (!gpc->valid)
@@ -232,8 +245,7 @@ out_error:
return -EFAULT;
}
-static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long uhva,
- unsigned long len)
+static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long uhva)
{
unsigned long page_offset;
bool unmap_old = false;
@@ -247,15 +259,6 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned l
if (WARN_ON_ONCE(kvm_is_error_gpa(gpa) == kvm_is_error_hva(uhva)))
return -EINVAL;
- /*
- * The cached acces must fit within a single page. The 'len' argument
- * exists only to enforce that.
- */
- page_offset = kvm_is_error_gpa(gpa) ? offset_in_page(uhva) :
- offset_in_page(gpa);
- if (page_offset + len > PAGE_SIZE)
- return -EINVAL;
-
lockdep_assert_held(&gpc->refresh_lock);
write_lock_irq(&gpc->lock);
@@ -270,6 +273,8 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned l
old_uhva = PAGE_ALIGN_DOWN(gpc->uhva);
if (kvm_is_error_gpa(gpa)) {
+ page_offset = offset_in_page(uhva);
+
gpc->gpa = INVALID_GPA;
gpc->memslot = NULL;
gpc->uhva = PAGE_ALIGN_DOWN(uhva);
@@ -279,6 +284,8 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned l
} else {
struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
+ page_offset = offset_in_page(gpa);
+
if (gpc->gpa != gpa || gpc->generation != slots->generation ||
kvm_is_error_hva(gpc->uhva)) {
gfn_t gfn = gpa_to_gfn(gpa);
@@ -354,6 +361,9 @@ int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len)
guard(mutex)(&gpc->refresh_lock);
+ if (!kvm_gpc_is_valid_len(gpc->gpa, gpc->uhva, len))
+ return -EINVAL;
+
/*
* If the GPA is valid then ignore the HVA, as a cache can be GPA-based
* or HVA-based, not both. For GPA-based caches, the HVA will be
@@ -361,7 +371,7 @@ int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len)
*/
uhva = kvm_is_error_gpa(gpc->gpa) ? gpc->uhva : KVM_HVA_ERR_BAD;
- return __kvm_gpc_refresh(gpc, gpc->gpa, uhva, len);
+ return __kvm_gpc_refresh(gpc, gpc->gpa, uhva);
}
void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm)
@@ -381,6 +391,9 @@ static int __kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned
{
struct kvm *kvm = gpc->kvm;
+ if (!kvm_gpc_is_valid_len(gpa, uhva, len))
+ return -EINVAL;
+
guard(mutex)(&gpc->refresh_lock);
if (!gpc->active) {
@@ -400,11 +413,18 @@ static int __kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned
gpc->active = true;
write_unlock_irq(&gpc->lock);
}
- return __kvm_gpc_refresh(gpc, gpa, uhva, len);
+ return __kvm_gpc_refresh(gpc, gpa, uhva);
}
int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
{
+ /*
+ * Explicitly disallow INVALID_GPA so that the magic value can be used
+ * by KVM to differentiate between GPA-based and HVA-based caches.
+ */
+ if (WARN_ON_ONCE(kvm_is_error_gpa(gpa)))
+ return -EINVAL;
+
return __kvm_gpc_activate(gpc, gpa, KVM_HVA_ERR_BAD, len);
}
diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c
index ca24ce120906..76b7f6085dcd 100644
--- a/virt/kvm/vfio.c
+++ b/virt/kvm/vfio.c
@@ -366,6 +366,8 @@ static int kvm_vfio_create(struct kvm_device *dev, u32 type)
struct kvm_device *tmp;
struct kvm_vfio *kv;
+ lockdep_assert_held(&dev->kvm->lock);
+
/* Only one VFIO "device" per VM */
list_for_each_entry(tmp, &dev->kvm->devices, vm_node)
if (tmp->ops == &kvm_vfio_ops)