diff options
author | Kirill A. Shutemov <kirill.shutemov@linux.intel.com> | 2016-07-26 15:26:35 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-26 16:19:19 -0700 |
commit | e496cf3d782135c1cca0d154d4b924517ff58de0 (patch) | |
tree | 884ae0082c0b344dd9200b28ca087a3cc0973fed /mm/khugepaged.c | |
parent | f3f0e1d2150b2b99da2cbdfaad000089efe9bf30 (diff) | |
download | linux-e496cf3d782135c1cca0d154d4b924517ff58de0.tar.gz linux-e496cf3d782135c1cca0d154d4b924517ff58de0.tar.bz2 linux-e496cf3d782135c1cca0d154d4b924517ff58de0.zip |
thp: introduce CONFIG_TRANSPARENT_HUGE_PAGECACHE
For file mappings, we don't deposit page tables on THP allocation
because it's not strictly required to implement split_huge_pmd(): we can
just clear pmd and let following page faults to reconstruct the page
table.
But Power makes use of deposited page table to address MMU quirk.
Let's hide THP page cache, including huge tmpfs, under separate config
option, so it can be forbidden on Power.
We can revert the patch later once solution for Power found.
Link: http://lkml.kernel.org/r/1466021202-61880-36-git-send-email-kirill.shutemov@linux.intel.com
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/khugepaged.c')
-rw-r--r-- | mm/khugepaged.c | 11 |
1 files changed, 7 insertions, 4 deletions
diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 573e4366d3b9..93d5f87c00d5 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -819,6 +819,8 @@ static bool hugepage_vma_check(struct vm_area_struct *vma) (vma->vm_flags & VM_NOHUGEPAGE)) return false; if (shmem_file(vma->vm_file)) { + if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) + return false; return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff, HPAGE_PMD_NR); } @@ -1222,7 +1224,7 @@ static void collect_mm_slot(struct mm_slot *mm_slot) } } -#ifdef CONFIG_SHMEM +#if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE) static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) { struct vm_area_struct *vma; @@ -1681,8 +1683,6 @@ skip: if (khugepaged_scan.address < hstart) khugepaged_scan.address = hstart; VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK); - if (shmem_file(vma->vm_file) && !shmem_huge_enabled(vma)) - goto skip; while (khugepaged_scan.address < hend) { int ret; @@ -1694,9 +1694,12 @@ skip: khugepaged_scan.address + HPAGE_PMD_SIZE > hend); if (shmem_file(vma->vm_file)) { - struct file *file = get_file(vma->vm_file); + struct file *file; pgoff_t pgoff = linear_page_index(vma, khugepaged_scan.address); + if (!shmem_huge_enabled(vma)) + goto skip; + file = get_file(vma->vm_file); up_read(&mm->mmap_sem); ret = 1; khugepaged_scan_shmem(mm, file->f_mapping, |