summaryrefslogtreecommitdiffstats
path: root/fs/proc
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2005-10-29 18:16:27 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-29 21:40:40 -0700
commit705e87c0c3c38424f7f30556c85bc20e808d2f59 (patch)
tree7a237e6266f4801385e1226cc497b47e3a2458bd /fs/proc
parent8f4e2101fd7df9031a754eedb82e2060b51f8c45 (diff)
downloadlinux-stable-705e87c0c3c38424f7f30556c85bc20e808d2f59.tar.gz
linux-stable-705e87c0c3c38424f7f30556c85bc20e808d2f59.tar.bz2
linux-stable-705e87c0c3c38424f7f30556c85bc20e808d2f59.zip
[PATCH] mm: pte_offset_map_lock loops
Convert those common loops using page_table_lock on the outside and pte_offset_map within to use just pte_offset_map_lock within instead. These all hold mmap_sem (some exclusively, some not), so at no level can a page table be whipped away from beneath them. But whereas pte_alloc loops tested with the "atomic" pmd_present, these loops are testing with pmd_none, which on i386 PAE tests both lower and upper halves. That's now unsafe, so add a cast into pmd_none to test only the vital lower half: we lose a little sensitivity to a corrupt middle directory, but not enough to worry about. It appears that i386 and UML were the only architectures vulnerable in this way, and pgd and pud no problem. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'fs/proc')
-rw-r--r--fs/proc/task_mmu.c17
1 files changed, 6 insertions, 11 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 7c89b4549049..7e5e7ec2e36d 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -203,13 +203,14 @@ static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
struct mem_size_stats *mss)
{
pte_t *pte, ptent;
+ spinlock_t *ptl;
unsigned long pfn;
struct page *page;
- pte = pte_offset_map(pmd, addr);
+ pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
do {
ptent = *pte;
- if (pte_none(ptent) || !pte_present(ptent))
+ if (!pte_present(ptent))
continue;
mss->resident += PAGE_SIZE;
@@ -230,8 +231,8 @@ static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
mss->private_clean += PAGE_SIZE;
}
} while (pte++, addr += PAGE_SIZE, addr != end);
- pte_unmap(pte - 1);
- cond_resched_lock(&vma->vm_mm->page_table_lock);
+ pte_unmap_unlock(pte - 1, ptl);
+ cond_resched();
}
static inline void smaps_pmd_range(struct vm_area_struct *vma, pud_t *pud,
@@ -285,17 +286,11 @@ static inline void smaps_pgd_range(struct vm_area_struct *vma,
static int show_smap(struct seq_file *m, void *v)
{
struct vm_area_struct *vma = v;
- struct mm_struct *mm = vma->vm_mm;
struct mem_size_stats mss;
memset(&mss, 0, sizeof mss);
-
- if (mm) {
- spin_lock(&mm->page_table_lock);
+ if (vma->vm_mm)
smaps_pgd_range(vma, vma->vm_start, vma->vm_end, &mss);
- spin_unlock(&mm->page_table_lock);
- }
-
return show_map_internal(m, v, &mss);
}