summaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorMike Kravetz <mike.kravetz@oracle.com>2017-02-22 15:42:58 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2017-02-22 16:41:28 -0800
commit810a56b943e265bbabfcd5a8e54cb8d3b16cd6e4 (patch)
tree4d95abf9e7d4efb0fa40cb6f5b1c3552e3bf2f77 /mm/memory.c
parent60d4d2d2b40e44cd36bfb6049e8d9e2055a24f8a (diff)
downloadlinux-stable-810a56b943e265bbabfcd5a8e54cb8d3b16cd6e4.tar.gz
linux-stable-810a56b943e265bbabfcd5a8e54cb8d3b16cd6e4.tar.bz2
linux-stable-810a56b943e265bbabfcd5a8e54cb8d3b16cd6e4.zip
userfaultfd: hugetlbfs: fix __mcopy_atomic_hugetlb retry/error processing
The new routine copy_huge_page_from_user() uses kmap_atomic() to map PAGE_SIZE pages. However, this prevents page faults in the subsequent call to copy_from_user(). This is OK in the case where the routine is copied with mmap_sema held. However, in another case we want to allow page faults. So, add a new argument allow_pagefault to indicate if the routine should allow page faults. [dan.carpenter@oracle.com: unmap the correct pointer] Link: http://lkml.kernel.org/r/20170113082608.GA3548@mwanda [akpm@linux-foundation.org: kunmap() takes a page*, per Hugh] Link: http://lkml.kernel.org/r/20161216144821.5183-20-aarcange@redhat.com Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com> Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com> Cc: "Dr. David Alan Gilbert" <dgilbert@redhat.com> Cc: Hillf Danton <hillf.zj@alibaba-inc.com> Cc: Michael Rapoport <RAPOPORT@il.ibm.com> Cc: Mike Rapoport <rppt@linux.vnet.ibm.com> Cc: Pavel Emelyanov <xemul@parallels.com> Cc: Hugh Dickins <hughd@google.com> Cc: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c13
1 files changed, 10 insertions, 3 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 4ade940d105c..d7676a68c80a 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4155,7 +4155,8 @@ void copy_user_huge_page(struct page *dst, struct page *src,
long copy_huge_page_from_user(struct page *dst_page,
const void __user *usr_src,
- unsigned int pages_per_huge_page)
+ unsigned int pages_per_huge_page,
+ bool allow_pagefault)
{
void *src = (void *)usr_src;
void *page_kaddr;
@@ -4163,11 +4164,17 @@ long copy_huge_page_from_user(struct page *dst_page,
unsigned long ret_val = pages_per_huge_page * PAGE_SIZE;
for (i = 0; i < pages_per_huge_page; i++) {
- page_kaddr = kmap_atomic(dst_page + i);
+ if (allow_pagefault)
+ page_kaddr = kmap(dst_page + i);
+ else
+ page_kaddr = kmap_atomic(dst_page + i);
rc = copy_from_user(page_kaddr,
(const void __user *)(src + i * PAGE_SIZE),
PAGE_SIZE);
- kunmap_atomic(page_kaddr);
+ if (allow_pagefault)
+ kunmap(dst_page + i);
+ else
+ kunmap_atomic(page_kaddr);
ret_val -= (PAGE_SIZE - rc);
if (rc)