summaryrefslogtreecommitdiffstats
path: root/mm/nommu.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2019-07-11 20:57:21 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-07-12 11:05:45 -0700
commit050a9adc64383aed3429a31432b4f5a7b0cdc8ac (patch)
treee8bf6fec0f1a4fd0aadbbad9f9834cb648ff1129 /mm/nommu.c
parentd3649f68b4336e7ef7aa264cf05ba1265feb0968 (diff)
downloadlinux-stable-050a9adc64383aed3429a31432b4f5a7b0cdc8ac.tar.gz
linux-stable-050a9adc64383aed3429a31432b4f5a7b0cdc8ac.tar.bz2
linux-stable-050a9adc64383aed3429a31432b4f5a7b0cdc8ac.zip
mm: consolidate the get_user_pages* implementations
Always build mm/gup.c so that we don't have to provide separate nommu stubs. Also merge the get_user_pages_fast and __get_user_pages_fast stubs when HAVE_FAST_GUP into the main implementations, which will never call the fast path if HAVE_FAST_GUP is not set. This also ensures the new put_user_pages* helpers are available for nommu, as those are currently missing, which would create a problem as soon as we actually grew users for it. Link: http://lkml.kernel.org/r/20190625143715.1689-13-hch@lst.de Signed-off-by: Christoph Hellwig <hch@lst.de> Cc: Andrey Konovalov <andreyknvl@google.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: David Miller <davem@davemloft.net> Cc: James Hogan <jhogan@kernel.org> Cc: Jason Gunthorpe <jgg@mellanox.com> Cc: Khalid Aziz <khalid.aziz@oracle.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Paul Burton <paul.burton@mips.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Rich Felker <dalias@libc.org> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/nommu.c')
-rw-r--r--mm/nommu.c88
1 files changed, 0 insertions, 88 deletions
diff --git a/mm/nommu.c b/mm/nommu.c
index d8c02fbe03b5..07165ad2e548 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -111,94 +111,6 @@ unsigned int kobjsize(const void *objp)
return PAGE_SIZE << compound_order(page);
}
-static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, unsigned long nr_pages,
- unsigned int foll_flags, struct page **pages,
- struct vm_area_struct **vmas, int *nonblocking)
-{
- struct vm_area_struct *vma;
- unsigned long vm_flags;
- int i;
-
- /* calculate required read or write permissions.
- * If FOLL_FORCE is set, we only require the "MAY" flags.
- */
- vm_flags = (foll_flags & FOLL_WRITE) ?
- (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
- vm_flags &= (foll_flags & FOLL_FORCE) ?
- (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
-
- for (i = 0; i < nr_pages; i++) {
- vma = find_vma(mm, start);
- if (!vma)
- goto finish_or_fault;
-
- /* protect what we can, including chardevs */
- if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
- !(vm_flags & vma->vm_flags))
- goto finish_or_fault;
-
- if (pages) {
- pages[i] = virt_to_page(start);
- if (pages[i])
- get_page(pages[i]);
- }
- if (vmas)
- vmas[i] = vma;
- start = (start + PAGE_SIZE) & PAGE_MASK;
- }
-
- return i;
-
-finish_or_fault:
- return i ? : -EFAULT;
-}
-
-/*
- * get a list of pages in an address range belonging to the specified process
- * and indicate the VMA that covers each page
- * - this is potentially dodgy as we may end incrementing the page count of a
- * slab page or a secondary page from a compound page
- * - don't permit access to VMAs that don't support it, such as I/O mappings
- */
-long get_user_pages(unsigned long start, unsigned long nr_pages,
- unsigned int gup_flags, struct page **pages,
- struct vm_area_struct **vmas)
-{
- return __get_user_pages(current, current->mm, start, nr_pages,
- gup_flags, pages, vmas, NULL);
-}
-EXPORT_SYMBOL(get_user_pages);
-
-long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
- unsigned int gup_flags, struct page **pages,
- int *locked)
-{
- return get_user_pages(start, nr_pages, gup_flags, pages, NULL);
-}
-EXPORT_SYMBOL(get_user_pages_locked);
-
-static long __get_user_pages_unlocked(struct task_struct *tsk,
- struct mm_struct *mm, unsigned long start,
- unsigned long nr_pages, struct page **pages,
- unsigned int gup_flags)
-{
- long ret;
- down_read(&mm->mmap_sem);
- ret = __get_user_pages(tsk, mm, start, nr_pages, gup_flags, pages,
- NULL, NULL);
- up_read(&mm->mmap_sem);
- return ret;
-}
-
-long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
- struct page **pages, unsigned int gup_flags)
-{
- return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
- pages, gup_flags);
-}
-EXPORT_SYMBOL(get_user_pages_unlocked);
-
/**
* follow_pfn - look up PFN at a user virtual address
* @vma: memory mapping