summaryrefslogtreecommitdiffstats
path: root/mm/mlock.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-06-09 09:54:46 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-06-09 09:54:46 -0700
commita5ad5742f671de906adbf29fbedf0a04705cebad (patch)
tree88d1a4c18e2025a5a8335dbbc9dea8bebeba5789 /mm/mlock.c
parent013b2deba9a6b80ca02f4fafd7dedf875e9b4450 (diff)
parent4fa7252338a56fbc90220e6330f136a379175a7a (diff)
downloadlinux-a5ad5742f671de906adbf29fbedf0a04705cebad.tar.gz
linux-a5ad5742f671de906adbf29fbedf0a04705cebad.tar.bz2
linux-a5ad5742f671de906adbf29fbedf0a04705cebad.zip
Merge branch 'akpm' (patches from Andrew)
Merge even more updates from Andrew Morton: - a kernel-wide sweep of show_stack() - pagetable cleanups - abstract out accesses to mmap_sem - prep for mmap_sem scalability work - hch's user acess work Subsystems affected by this patch series: debug, mm/pagemap, mm/maccess, mm/documentation. * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (93 commits) include/linux/cache.h: expand documentation over __read_mostly maccess: return -ERANGE when probe_kernel_read() fails x86: use non-set_fs based maccess routines maccess: allow architectures to provide kernel probing directly maccess: move user access routines together maccess: always use strict semantics for probe_kernel_read maccess: remove strncpy_from_unsafe tracing/kprobes: handle mixed kernel/userspace probes better bpf: rework the compat kernel probe handling bpf:bpf_seq_printf(): handle potentially unsafe format string better bpf: handle the compat string in bpf_trace_copy_string better bpf: factor out a bpf_trace_copy_string helper maccess: unify the probe kernel arch hooks maccess: remove probe_read_common and probe_write_common maccess: rename strnlen_unsafe_user to strnlen_user_nofault maccess: rename strncpy_from_unsafe_strict to strncpy_from_kernel_nofault maccess: rename strncpy_from_unsafe_user to strncpy_from_user_nofault maccess: update the top of file comment maccess: clarify kerneldoc comments maccess: remove duplicate kerneldoc comments ...
Diffstat (limited to 'mm/mlock.c')
-rw-r--r--mm/mlock.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/mm/mlock.c b/mm/mlock.c
index a72c1eeded77..f8736136fad7 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -49,7 +49,7 @@ EXPORT_SYMBOL(can_do_mlock);
* When lazy mlocking via vmscan, it is important to ensure that the
* vma's VM_LOCKED status is not concurrently being modified, otherwise we
* may have mlocked a page that is being munlocked. So lazy mlock must take
- * the mmap_sem for read, and verify that the vma really is locked
+ * the mmap_lock for read, and verify that the vma really is locked
* (see mm/rmap.c).
*/
@@ -381,7 +381,7 @@ static unsigned long __munlock_pagevec_fill(struct pagevec *pvec,
/*
* Initialize pte walk starting at the already pinned page where we
* are sure that there is a pte, as it was pinned under the same
- * mmap_sem write op.
+ * mmap_lock write op.
*/
pte = get_locked_pte(vma->vm_mm, start, &ptl);
/* Make sure we do not cross the page table boundary */
@@ -565,7 +565,7 @@ success:
mm->locked_vm += nr_pages;
/*
- * vm_flags is protected by the mmap_sem held in write mode.
+ * vm_flags is protected by the mmap_lock held in write mode.
* It's okay if try_to_unmap_one unmaps a page just after we
* set VM_LOCKED, populate_vma_page_range will bring it back.
*/
@@ -686,7 +686,7 @@ static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t fla
lock_limit >>= PAGE_SHIFT;
locked = len >> PAGE_SHIFT;
- if (down_write_killable(&current->mm->mmap_sem))
+ if (mmap_write_lock_killable(current->mm))
return -EINTR;
locked += current->mm->locked_vm;
@@ -705,7 +705,7 @@ static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t fla
if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
error = apply_vma_lock_flags(start, len, flags);
- up_write(&current->mm->mmap_sem);
+ mmap_write_unlock(current->mm);
if (error)
return error;
@@ -742,10 +742,10 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
len = PAGE_ALIGN(len + (offset_in_page(start)));
start &= PAGE_MASK;
- if (down_write_killable(&current->mm->mmap_sem))
+ if (mmap_write_lock_killable(current->mm))
return -EINTR;
ret = apply_vma_lock_flags(start, len, 0);
- up_write(&current->mm->mmap_sem);
+ mmap_write_unlock(current->mm);
return ret;
}
@@ -811,14 +811,14 @@ SYSCALL_DEFINE1(mlockall, int, flags)
lock_limit = rlimit(RLIMIT_MEMLOCK);
lock_limit >>= PAGE_SHIFT;
- if (down_write_killable(&current->mm->mmap_sem))
+ if (mmap_write_lock_killable(current->mm))
return -EINTR;
ret = -ENOMEM;
if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
capable(CAP_IPC_LOCK))
ret = apply_mlockall_flags(flags);
- up_write(&current->mm->mmap_sem);
+ mmap_write_unlock(current->mm);
if (!ret && (flags & MCL_CURRENT))
mm_populate(0, TASK_SIZE);
@@ -829,10 +829,10 @@ SYSCALL_DEFINE0(munlockall)
{
int ret;
- if (down_write_killable(&current->mm->mmap_sem))
+ if (mmap_write_lock_killable(current->mm))
return -EINTR;
ret = apply_mlockall_flags(0);
- up_write(&current->mm->mmap_sem);
+ mmap_write_unlock(current->mm);
return ret;
}