diff options
author | Konstantin Khlebnikov <khlebnikov@yandex-team.ru> | 2015-09-08 15:00:04 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-09-08 15:35:28 -0700 |
commit | 356515e7b64c2629f686109d426baaf868cdf7e8 (patch) | |
tree | 369d85857cd3eb403ebb0d07125b5e38e333c3f8 /fs/proc | |
parent | deb945441b9408d6cd15751f5232eeca9f50a5a1 (diff) | |
download | linux-356515e7b64c2629f686109d426baaf868cdf7e8.tar.gz linux-356515e7b64c2629f686109d426baaf868cdf7e8.tar.bz2 linux-356515e7b64c2629f686109d426baaf868cdf7e8.zip |
pagemap: rework hugetlb and thp report
This patch moves pmd dissection out of reporting loop: huge pages are
reported as bunch of normal pages with contiguous PFNs.
Add missing "FILE" bit in hugetlb vmas.
Signed-off-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Reviewed-by: Mark Williamson <mwilliamson@undo-software.com>
Tested-by: Mark Williamson <mwilliamson@undo-software.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/proc')
-rw-r--r-- | fs/proc/task_mmu.c | 100 |
1 files changed, 44 insertions, 56 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 41c0a0a500f7..98ba9ea96b19 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -1040,33 +1040,7 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm, return make_pme(frame, flags); } -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -static pagemap_entry_t thp_pmd_to_pagemap_entry(struct pagemapread *pm, - pmd_t pmd, int offset, u64 flags) -{ - u64 frame = 0; - - /* - * Currently pmd for thp is always present because thp can not be - * swapped-out, migrated, or HWPOISONed (split in such cases instead.) - * This if-check is just to prepare for future implementation. - */ - if (pmd_present(pmd)) { - frame = pmd_pfn(pmd) + offset; - flags |= PM_PRESENT; - } - - return make_pme(frame, flags); -} -#else -static pagemap_entry_t thp_pmd_to_pagemap_entry(struct pagemapread *pm, - pmd_t pmd, int offset, u64 flags) -{ - return make_pme(0, 0); -} -#endif - -static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, +static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, struct mm_walk *walk) { struct vm_area_struct *vma = walk->vma; @@ -1075,35 +1049,48 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, pte_t *pte, *orig_pte; int err = 0; - if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { - u64 flags = 0; +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + if (pmd_trans_huge_lock(pmdp, vma, &ptl) == 1) { + u64 flags = 0, frame = 0; + pmd_t pmd = *pmdp; - if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(*pmd)) + if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(pmd)) flags |= PM_SOFT_DIRTY; + /* + * Currently pmd for thp is always present because thp + * can not be swapped-out, migrated, or HWPOISONed + * (split in such cases instead.) + * This if-check is just to prepare for future implementation. + */ + if (pmd_present(pmd)) { + flags |= PM_PRESENT; + frame = pmd_pfn(pmd) + + ((addr & ~PMD_MASK) >> PAGE_SHIFT); + } + for (; addr != end; addr += PAGE_SIZE) { - unsigned long offset; - pagemap_entry_t pme; + pagemap_entry_t pme = make_pme(frame, flags); - offset = (addr & ~PAGEMAP_WALK_MASK) >> - PAGE_SHIFT; - pme = thp_pmd_to_pagemap_entry(pm, *pmd, offset, flags); err = add_to_pagemap(addr, &pme, pm); if (err) break; + if (flags & PM_PRESENT) + frame++; } spin_unlock(ptl); return err; } - if (pmd_trans_unstable(pmd)) + if (pmd_trans_unstable(pmdp)) return 0; +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ /* * We can assume that @vma always points to a valid one and @end never * goes beyond vma->vm_end. */ - orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); + orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl); for (; addr < end; pte++, addr += PAGE_SIZE) { pagemap_entry_t pme; @@ -1120,39 +1107,40 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, } #ifdef CONFIG_HUGETLB_PAGE -static pagemap_entry_t huge_pte_to_pagemap_entry(struct pagemapread *pm, - pte_t pte, int offset, u64 flags) -{ - u64 frame = 0; - - if (pte_present(pte)) { - frame = pte_pfn(pte) + offset; - flags |= PM_PRESENT; - } - - return make_pme(frame, flags); -} - /* This function walks within one hugetlb entry in the single call */ -static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask, +static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask, unsigned long addr, unsigned long end, struct mm_walk *walk) { struct pagemapread *pm = walk->private; struct vm_area_struct *vma = walk->vma; + u64 flags = 0, frame = 0; int err = 0; - u64 flags = 0; - pagemap_entry_t pme; + pte_t pte; if (vma->vm_flags & VM_SOFTDIRTY) flags |= PM_SOFT_DIRTY; + pte = huge_ptep_get(ptep); + if (pte_present(pte)) { + struct page *page = pte_page(pte); + + if (!PageAnon(page)) + flags |= PM_FILE; + + flags |= PM_PRESENT; + frame = pte_pfn(pte) + + ((addr & ~hmask) >> PAGE_SHIFT); + } + for (; addr != end; addr += PAGE_SIZE) { - int offset = (addr & ~hmask) >> PAGE_SHIFT; - pme = huge_pte_to_pagemap_entry(pm, *pte, offset, flags); + pagemap_entry_t pme = make_pme(frame, flags); + err = add_to_pagemap(addr, &pme, pm); if (err) return err; + if (flags & PM_PRESENT) + frame++; } cond_resched(); @@ -1216,7 +1204,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf, if (!pm.buffer) goto out_mm; - pagemap_walk.pmd_entry = pagemap_pte_range; + pagemap_walk.pmd_entry = pagemap_pmd_range; pagemap_walk.pte_hole = pagemap_pte_hole; #ifdef CONFIG_HUGETLB_PAGE pagemap_walk.hugetlb_entry = pagemap_hugetlb_range; |