summaryrefslogtreecommitdiffstats
path: root/mm/gup.c
diff options
context:
space:
mode:
authorPeter Xu <peterx@redhat.com>2023-11-23 13:02:22 -0500
committerAndrew Morton <akpm@linux-foundation.org>2023-12-10 16:51:52 -0800
commite9119fb65761f124b31743b598ce04b8f15a6fe3 (patch)
tree8f9808b829310b9e3c4924fa2a7b13d150c935e5 /mm/gup.c
parentac3f3b0a55518056bc80ed32a41931c99e1f7d81 (diff)
downloadlinux-e9119fb65761f124b31743b598ce04b8f15a6fe3.tar.gz
linux-e9119fb65761f124b31743b598ce04b8f15a6fe3.tar.bz2
linux-e9119fb65761f124b31743b598ce04b8f15a6fe3.zip
mm/gup: fix follow_devmap_p[mu]d() on page==NULL handling
This is a bug found not by any report but only by code observations. When GUP sees a devpmd/devpud and if page==NULL is returned, it means a fault is probably required. Here falling through when page==NULL can cause unexpected behavior. Fix both cases by catching the page==NULL cases with no_page_table(). Link: https://lkml.kernel.org/r/20231123180222.1048297-1-peterx@redhat.com Fixes: 3565fce3a659 ("mm, x86: get_user_pages() for dax mappings") Fixes: 080dbb618b4b ("mm/follow_page_mask: split follow_page_mask to smaller functions.") Signed-off-by: Peter Xu <peterx@redhat.com> Acked-by: David Hildenbrand <david@redhat.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Matthew Wilcox <willy@infradead.org> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: Christoph Hellwig <hch@lst.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/gup.c')
-rw-r--r--mm/gup.c2
1 files changed, 2 insertions, 0 deletions
diff --git a/mm/gup.c b/mm/gup.c
index 231711efa390..0a5f0e91bfec 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -710,6 +710,7 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
spin_unlock(ptl);
if (page)
return page;
+ return no_page_table(vma, flags);
}
if (likely(!pmd_trans_huge(pmdval)))
return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
@@ -758,6 +759,7 @@ static struct page *follow_pud_mask(struct vm_area_struct *vma,
spin_unlock(ptl);
if (page)
return page;
+ return no_page_table(vma, flags);
}
if (unlikely(pud_bad(*pud)))
return no_page_table(vma, flags);