diff options
author | Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> | 2017-07-06 15:38:56 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-07-06 16:24:33 -0700 |
commit | 4dc71451a2078efcad2f66bd6ef130d2296827b1 (patch) | |
tree | 42f0896ed2bc9e7e59a4943b1b50fde56fb73a6a /mm/gup.c | |
parent | e22992923f741c951b830121655b58342fce202e (diff) | |
download | linux-stable-4dc71451a2078efcad2f66bd6ef130d2296827b1.tar.gz linux-stable-4dc71451a2078efcad2f66bd6ef130d2296827b1.tar.bz2 linux-stable-4dc71451a2078efcad2f66bd6ef130d2296827b1.zip |
mm/follow_page_mask: add support for hugepage directory entry
Architectures like ppc64 supports hugepage size that is not mapped to
any of of the page table levels. Instead they add an alternate page
table entry format called hugepage directory (hugepd). hugepd indicates
that the page table entry maps to a set of hugetlb pages. Add support
for this in generic follow_page_mask code. We already support this
format in the generic gup code.
The default implementation prints warning and returns NULL. We will add
ppc64 support in later patches
Link: http://lkml.kernel.org/r/1494926612-23928-7-git-send-email-aneesh.kumar@linux.vnet.ibm.com
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Mike Kravetz <kravetz@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/gup.c')
-rw-r--r-- | mm/gup.c | 33 |
1 files changed, 33 insertions, 0 deletions
@@ -226,6 +226,14 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma, return page; return no_page_table(vma, flags); } + if (is_hugepd(__hugepd(pmd_val(*pmd)))) { + page = follow_huge_pd(vma, address, + __hugepd(pmd_val(*pmd)), flags, + PMD_SHIFT); + if (page) + return page; + return no_page_table(vma, flags); + } if (pmd_devmap(*pmd)) { ptl = pmd_lock(mm, pmd); page = follow_devmap_pmd(vma, address, pmd, flags); @@ -292,6 +300,14 @@ static struct page *follow_pud_mask(struct vm_area_struct *vma, return page; return no_page_table(vma, flags); } + if (is_hugepd(__hugepd(pud_val(*pud)))) { + page = follow_huge_pd(vma, address, + __hugepd(pud_val(*pud)), flags, + PUD_SHIFT); + if (page) + return page; + return no_page_table(vma, flags); + } if (pud_devmap(*pud)) { ptl = pud_lock(mm, pud); page = follow_devmap_pud(vma, address, pud, flags); @@ -311,6 +327,7 @@ static struct page *follow_p4d_mask(struct vm_area_struct *vma, unsigned int flags, unsigned int *page_mask) { p4d_t *p4d; + struct page *page; p4d = p4d_offset(pgdp, address); if (p4d_none(*p4d)) @@ -319,6 +336,14 @@ static struct page *follow_p4d_mask(struct vm_area_struct *vma, if (unlikely(p4d_bad(*p4d))) return no_page_table(vma, flags); + if (is_hugepd(__hugepd(p4d_val(*p4d)))) { + page = follow_huge_pd(vma, address, + __hugepd(p4d_val(*p4d)), flags, + P4D_SHIFT); + if (page) + return page; + return no_page_table(vma, flags); + } return follow_pud_mask(vma, address, p4d, flags, page_mask); } @@ -363,6 +388,14 @@ struct page *follow_page_mask(struct vm_area_struct *vma, return page; return no_page_table(vma, flags); } + if (is_hugepd(__hugepd(pgd_val(*pgd)))) { + page = follow_huge_pd(vma, address, + __hugepd(pgd_val(*pgd)), flags, + PGDIR_SHIFT); + if (page) + return page; + return no_page_table(vma, flags); + } return follow_p4d_mask(vma, address, pgd, flags, page_mask); } |