summaryrefslogtreecommitdiffstats
path: root/drivers/vfio/pci/vfio_pci.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-06-09 09:54:46 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-06-09 09:54:46 -0700
commita5ad5742f671de906adbf29fbedf0a04705cebad (patch)
tree88d1a4c18e2025a5a8335dbbc9dea8bebeba5789 /drivers/vfio/pci/vfio_pci.c
parent013b2deba9a6b80ca02f4fafd7dedf875e9b4450 (diff)
parent4fa7252338a56fbc90220e6330f136a379175a7a (diff)
downloadlinux-a5ad5742f671de906adbf29fbedf0a04705cebad.tar.gz
linux-a5ad5742f671de906adbf29fbedf0a04705cebad.tar.bz2
linux-a5ad5742f671de906adbf29fbedf0a04705cebad.zip
Merge branch 'akpm' (patches from Andrew)
Merge even more updates from Andrew Morton: - a kernel-wide sweep of show_stack() - pagetable cleanups - abstract out accesses to mmap_sem - prep for mmap_sem scalability work - hch's user acess work Subsystems affected by this patch series: debug, mm/pagemap, mm/maccess, mm/documentation. * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (93 commits) include/linux/cache.h: expand documentation over __read_mostly maccess: return -ERANGE when probe_kernel_read() fails x86: use non-set_fs based maccess routines maccess: allow architectures to provide kernel probing directly maccess: move user access routines together maccess: always use strict semantics for probe_kernel_read maccess: remove strncpy_from_unsafe tracing/kprobes: handle mixed kernel/userspace probes better bpf: rework the compat kernel probe handling bpf:bpf_seq_printf(): handle potentially unsafe format string better bpf: handle the compat string in bpf_trace_copy_string better bpf: factor out a bpf_trace_copy_string helper maccess: unify the probe kernel arch hooks maccess: remove probe_read_common and probe_write_common maccess: rename strnlen_unsafe_user to strnlen_user_nofault maccess: rename strncpy_from_unsafe_strict to strncpy_from_kernel_nofault maccess: rename strncpy_from_unsafe_user to strncpy_from_user_nofault maccess: update the top of file comment maccess: clarify kerneldoc comments maccess: remove duplicate kerneldoc comments ...
Diffstat (limited to 'drivers/vfio/pci/vfio_pci.c')
-rw-r--r--drivers/vfio/pci/vfio_pci.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 3fc198f3eeb5..7c0779018b1b 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -1185,7 +1185,7 @@ reset_info_exit:
/*
* We need to get memory_lock for each device, but devices
- * can share mmap_sem, therefore we need to zap and hold
+ * can share mmap_lock, therefore we need to zap and hold
* the vma_lock for each device, and only then get each
* memory_lock.
*/
@@ -1375,26 +1375,26 @@ static int vfio_pci_zap_and_vma_lock(struct vfio_pci_device *vdev, bool try)
/*
* Lock ordering:
- * vma_lock is nested under mmap_sem for vm_ops callback paths.
+ * vma_lock is nested under mmap_lock for vm_ops callback paths.
* The memory_lock semaphore is used by both code paths calling
* into this function to zap vmas and the vm_ops.fault callback
* to protect the memory enable state of the device.
*
- * When zapping vmas we need to maintain the mmap_sem => vma_lock
+ * When zapping vmas we need to maintain the mmap_lock => vma_lock
* ordering, which requires using vma_lock to walk vma_list to
- * acquire an mm, then dropping vma_lock to get the mmap_sem and
+ * acquire an mm, then dropping vma_lock to get the mmap_lock and
* reacquiring vma_lock. This logic is derived from similar
* requirements in uverbs_user_mmap_disassociate().
*
- * mmap_sem must always be the top-level lock when it is taken.
+ * mmap_lock must always be the top-level lock when it is taken.
* Therefore we can only hold the memory_lock write lock when
- * vma_list is empty, as we'd need to take mmap_sem to clear
+ * vma_list is empty, as we'd need to take mmap_lock to clear
* entries. vma_list can only be guaranteed empty when holding
* vma_lock, thus memory_lock is nested under vma_lock.
*
* This enables the vm_ops.fault callback to acquire vma_lock,
* followed by memory_lock read lock, while already holding
- * mmap_sem without risk of deadlock.
+ * mmap_lock without risk of deadlock.
*/
while (1) {
struct mm_struct *mm = NULL;
@@ -1422,17 +1422,17 @@ static int vfio_pci_zap_and_vma_lock(struct vfio_pci_device *vdev, bool try)
mutex_unlock(&vdev->vma_lock);
if (try) {
- if (!down_read_trylock(&mm->mmap_sem)) {
+ if (!mmap_read_trylock(mm)) {
mmput(mm);
return 0;
}
} else {
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
}
if (mmget_still_valid(mm)) {
if (try) {
if (!mutex_trylock(&vdev->vma_lock)) {
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
return 0;
}
@@ -1454,7 +1454,7 @@ static int vfio_pci_zap_and_vma_lock(struct vfio_pci_device *vdev, bool try)
}
mutex_unlock(&vdev->vma_lock);
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
}
}