summaryrefslogtreecommitdiffstats
path: root/fs/proc/task_mmu.c
diff options
context:
space:
mode:
authorPeter Feiner <pfeiner@google.com>2014-10-09 15:28:32 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-09 22:25:58 -0400
commit81d0fa623c5b8dbd5279d9713094b0f9b0a00fb4 (patch)
tree840455ffeafaef31a53b94dc74df80d2561c713e /fs/proc/task_mmu.c
parent3193913ce62c63056bc67a6ae378beaf494afa66 (diff)
downloadlinux-stable-81d0fa623c5b8dbd5279d9713094b0f9b0a00fb4.tar.gz
linux-stable-81d0fa623c5b8dbd5279d9713094b0f9b0a00fb4.tar.bz2
linux-stable-81d0fa623c5b8dbd5279d9713094b0f9b0a00fb4.zip
mm: softdirty: unmapped addresses between VMAs are clean
If a /proc/pid/pagemap read spans a [VMA, an unmapped region, then a VM_SOFTDIRTY VMA], the virtual pages in the unmapped region are reported as softdirty. Here's a program to demonstrate the bug: int main() { const uint64_t PAGEMAP_SOFTDIRTY = 1ul << 55; uint64_t pme[3]; int fd = open("/proc/self/pagemap", O_RDONLY);; char *m = mmap(NULL, 3 * getpagesize(), PROT_READ, MAP_ANONYMOUS | MAP_SHARED, -1, 0); munmap(m + getpagesize(), getpagesize()); pread(fd, pme, 24, (unsigned long) m / getpagesize() * 8); assert(pme[0] & PAGEMAP_SOFTDIRTY); /* passes */ assert(!(pme[1] & PAGEMAP_SOFTDIRTY)); /* fails */ assert(pme[2] & PAGEMAP_SOFTDIRTY); /* passes */ return 0; } (Note that all pages in new VMAs are softdirty until cleared). Tested: Used the program given above. I'm going to include this code in a selftest in the future. [n-horiguchi@ah.jp.nec.com: prevent pagemap_pte_range() from overrunning] Signed-off-by: Peter Feiner <pfeiner@google.com> Cc: "Kirill A. Shutemov" <kirill@shutemov.name> Cc: Cyrill Gorcunov <gorcunov@openvz.org> Cc: Pavel Emelyanov <xemul@parallels.com> Cc: Jamie Liu <jamieliu@google.com> Cc: Hugh Dickins <hughd@google.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/proc/task_mmu.c')
-rw-r--r--fs/proc/task_mmu.c61
1 files changed, 40 insertions, 21 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 1acec26a3758..b7a7dc963a35 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1027,7 +1027,6 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
spinlock_t *ptl;
pte_t *pte;
int err = 0;
- pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));
/* find the first VMA at or above 'addr' */
vma = find_vma(walk->mm, addr);
@@ -1041,6 +1040,7 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
for (; addr != end; addr += PAGE_SIZE) {
unsigned long offset;
+ pagemap_entry_t pme;
offset = (addr & ~PAGEMAP_WALK_MASK) >>
PAGE_SHIFT;
@@ -1055,32 +1055,51 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
if (pmd_trans_unstable(pmd))
return 0;
- for (; addr != end; addr += PAGE_SIZE) {
- int flags2;
-
- /* check to see if we've left 'vma' behind
- * and need a new, higher one */
- if (vma && (addr >= vma->vm_end)) {
- vma = find_vma(walk->mm, addr);
- if (vma && (vma->vm_flags & VM_SOFTDIRTY))
- flags2 = __PM_SOFT_DIRTY;
- else
- flags2 = 0;
- pme = make_pme(PM_NOT_PRESENT(pm->v2) | PM_STATUS2(pm->v2, flags2));
+
+ while (1) {
+ /* End of address space hole, which we mark as non-present. */
+ unsigned long hole_end;
+
+ if (vma)
+ hole_end = min(end, vma->vm_start);
+ else
+ hole_end = end;
+
+ for (; addr < hole_end; addr += PAGE_SIZE) {
+ pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));
+
+ err = add_to_pagemap(addr, &pme, pm);
+ if (err)
+ return err;
}
- /* check that 'vma' actually covers this address,
- * and that it isn't a huge page vma */
- if (vma && (vma->vm_start <= addr) &&
- !is_vm_hugetlb_page(vma)) {
+ if (!vma || vma->vm_start >= end)
+ break;
+ /*
+ * We can't possibly be in a hugetlb VMA. In general,
+ * for a mm_walk with a pmd_entry and a hugetlb_entry,
+ * the pmd_entry can only be called on addresses in a
+ * hugetlb if the walk starts in a non-hugetlb VMA and
+ * spans a hugepage VMA. Since pagemap_read walks are
+ * PMD-sized and PMD-aligned, this will never be true.
+ */
+ BUG_ON(is_vm_hugetlb_page(vma));
+
+ /* Addresses in the VMA. */
+ for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
+ pagemap_entry_t pme;
pte = pte_offset_map(pmd, addr);
pte_to_pagemap_entry(&pme, pm, vma, addr, *pte);
- /* unmap before userspace copy */
pte_unmap(pte);
+ err = add_to_pagemap(addr, &pme, pm);
+ if (err)
+ return err;
}
- err = add_to_pagemap(addr, &pme, pm);
- if (err)
- return err;
+
+ if (addr == end)
+ break;
+
+ vma = find_vma(walk->mm, addr);
}
cond_resched();