summaryrefslogtreecommitdiffstats
path: root/mm/util.c
diff options
context:
space:
mode:
authorMichal Hocko <mhocko@suse.com>2016-05-23 16:25:30 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-23 17:04:14 -0700
commit9fbeb5ab59a2b2a09cca2eb68283e7a090d4b98d (patch)
treeb4f3ff77c420bdafd5be3aa8293320ad4cb871e4 /mm/util.c
parentdc0ef0df7b6a90892ec41933212ac701152a254c (diff)
downloadlinux-9fbeb5ab59a2b2a09cca2eb68283e7a090d4b98d.tar.gz
linux-9fbeb5ab59a2b2a09cca2eb68283e7a090d4b98d.tar.bz2
linux-9fbeb5ab59a2b2a09cca2eb68283e7a090d4b98d.zip
mm: make vm_mmap killable
All the callers of vm_mmap seem to check for the failure already and bail out in one way or another on the error which means that we can change it to use killable version of vm_mmap_pgoff and return -EINTR if the current task gets killed while waiting for mmap_sem. This also means that vm_mmap_pgoff can be killable by default and drop the additional parameter. This will help in the OOM conditions when the oom victim might be stuck waiting for the mmap_sem for write which in turn can block oom_reaper which relies on the mmap_sem for read to make a forward progress and reclaim the address space of the victim. Please note that load_elf_binary is ignoring vm_mmap error for current->personality & MMAP_PAGE_ZERO case but that shouldn't be a problem because the address is not used anywhere and we never return to the userspace if we got killed. Signed-off-by: Michal Hocko <mhocko@suse.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/util.c')
-rw-r--r--mm/util.c13
1 files changed, 4 insertions, 9 deletions
diff --git a/mm/util.c b/mm/util.c
index 03b237746850..917e0e3d0f8e 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -289,7 +289,7 @@ EXPORT_SYMBOL_GPL(get_user_pages_fast);
unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot,
- unsigned long flag, unsigned long pgoff, bool killable)
+ unsigned long flag, unsigned long pgoff)
{
unsigned long ret;
struct mm_struct *mm = current->mm;
@@ -297,12 +297,8 @@ unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
ret = security_mmap_file(file, prot, flag);
if (!ret) {
- if (killable) {
- if (down_write_killable(&mm->mmap_sem))
- return -EINTR;
- } else {
- down_write(&mm->mmap_sem);
- }
+ if (down_write_killable(&mm->mmap_sem))
+ return -EINTR;
ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
&populate);
up_write(&mm->mmap_sem);
@@ -312,7 +308,6 @@ unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
return ret;
}
-/* XXX are all callers checking an error */
unsigned long vm_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot,
unsigned long flag, unsigned long offset)
@@ -322,7 +317,7 @@ unsigned long vm_mmap(struct file *file, unsigned long addr,
if (unlikely(offset_in_page(offset)))
return -EINVAL;
- return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT, false);
+ return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
}
EXPORT_SYMBOL(vm_mmap);