diff options
author | Hugh Dickins <hugh@veritas.com> | 2005-10-29 18:16:05 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-10-29 21:40:38 -0700 |
commit | 4294621f41a85497019fae64341aa5351a1921b7 (patch) | |
tree | fdeb7eb44384a99d0679ffa6de5019bab0ea2166 /mm | |
parent | 404351e67a9facb475abf1492245374a28d13e90 (diff) | |
download | linux-4294621f41a85497019fae64341aa5351a1921b7.tar.gz linux-4294621f41a85497019fae64341aa5351a1921b7.tar.bz2 linux-4294621f41a85497019fae64341aa5351a1921b7.zip |
[PATCH] mm: rss = file_rss + anon_rss
I was lazy when we added anon_rss, and chose to change as few places as
possible. So currently each anonymous page has to be counted twice, in rss
and in anon_rss. Which won't be so good if those are atomic counts in some
configurations.
Change that around: keep file_rss and anon_rss separately, and add them
together (with get_mm_rss macro) when the total is needed - reading two
atomics is much cheaper than updating two atomics. And update anon_rss
upfront, typically in memory.c, not tucked away in page_add_anon_rmap.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/fremap.c | 4 | ||||
-rw-r--r-- | mm/hugetlb.c | 6 | ||||
-rw-r--r-- | mm/memory.c | 31 | ||||
-rw-r--r-- | mm/nommu.c | 2 | ||||
-rw-r--r-- | mm/rmap.c | 8 | ||||
-rw-r--r-- | mm/swapfile.c | 2 |
6 files changed, 27 insertions, 26 deletions
diff --git a/mm/fremap.c b/mm/fremap.c index ab23a0673c35..fd7f2a17ff3e 100644 --- a/mm/fremap.c +++ b/mm/fremap.c @@ -39,7 +39,7 @@ static inline void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma, set_page_dirty(page); page_remove_rmap(page); page_cache_release(page); - dec_mm_counter(mm, rss); + dec_mm_counter(mm, file_rss); } } } else { @@ -95,7 +95,7 @@ int install_page(struct mm_struct *mm, struct vm_area_struct *vma, zap_pte(mm, vma, addr, pte); - inc_mm_counter(mm,rss); + inc_mm_counter(mm, file_rss); flush_icache_page(vma, page); set_pte_at(mm, addr, pte, mk_pte(page, prot)); page_add_file_rmap(page); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 61d380678030..094455bcbbf7 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -286,7 +286,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, entry = *src_pte; ptepage = pte_page(entry); get_page(ptepage); - add_mm_counter(dst, rss, HPAGE_SIZE / PAGE_SIZE); + add_mm_counter(dst, file_rss, HPAGE_SIZE / PAGE_SIZE); set_huge_pte_at(dst, addr, dst_pte, entry); } spin_unlock(&src->page_table_lock); @@ -324,7 +324,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, page = pte_page(pte); put_page(page); - add_mm_counter(mm, rss, - (HPAGE_SIZE / PAGE_SIZE)); + add_mm_counter(mm, file_rss, (int) -(HPAGE_SIZE / PAGE_SIZE)); } flush_tlb_range(vma, start, end); } @@ -386,7 +386,7 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) goto out; } } - add_mm_counter(mm, rss, HPAGE_SIZE / PAGE_SIZE); + add_mm_counter(mm, file_rss, HPAGE_SIZE / PAGE_SIZE); set_huge_pte_at(mm, addr, pte, make_huge_pte(vma, page)); } out: diff --git a/mm/memory.c b/mm/memory.c index 51eb38574830..59d42e50fa53 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -397,9 +397,10 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, pte = pte_mkclean(pte); pte = pte_mkold(pte); get_page(page); - inc_mm_counter(dst_mm, rss); if (PageAnon(page)) inc_mm_counter(dst_mm, anon_rss); + else + inc_mm_counter(dst_mm, file_rss); set_pte_at(dst_mm, addr, dst_pte, pte); page_dup_rmap(page); } @@ -581,8 +582,8 @@ static void zap_pte_range(struct mmu_gather *tlb, pmd_t *pmd, set_page_dirty(page); if (pte_young(ptent)) mark_page_accessed(page); + dec_mm_counter(tlb->mm, file_rss); } - dec_mm_counter(tlb->mm, rss); page_remove_rmap(page); tlb_remove_page(tlb, page); continue; @@ -1290,13 +1291,15 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, spin_lock(&mm->page_table_lock); page_table = pte_offset_map(pmd, address); if (likely(pte_same(*page_table, orig_pte))) { - if (PageAnon(old_page)) - dec_mm_counter(mm, anon_rss); if (PageReserved(old_page)) - inc_mm_counter(mm, rss); - else + inc_mm_counter(mm, anon_rss); + else { page_remove_rmap(old_page); - + if (!PageAnon(old_page)) { + inc_mm_counter(mm, anon_rss); + dec_mm_counter(mm, file_rss); + } + } flush_cache_page(vma, address, pfn); entry = mk_pte(new_page, vma->vm_page_prot); entry = maybe_mkwrite(pte_mkdirty(entry), vma); @@ -1701,7 +1704,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, /* The page isn't present yet, go ahead with the fault. */ - inc_mm_counter(mm, rss); + inc_mm_counter(mm, anon_rss); pte = mk_pte(page, vma->vm_page_prot); if (write_access && can_share_swap_page(page)) { pte = maybe_mkwrite(pte_mkdirty(pte), vma); @@ -1774,7 +1777,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, page_cache_release(page); goto unlock; } - inc_mm_counter(mm, rss); + inc_mm_counter(mm, anon_rss); entry = mk_pte(page, vma->vm_page_prot); entry = maybe_mkwrite(pte_mkdirty(entry), vma); lru_cache_add_active(page); @@ -1887,19 +1890,19 @@ retry: */ /* Only go through if we didn't race with anybody else... */ if (pte_none(*page_table)) { - if (!PageReserved(new_page)) - inc_mm_counter(mm, rss); - flush_icache_page(vma, new_page); entry = mk_pte(new_page, vma->vm_page_prot); if (write_access) entry = maybe_mkwrite(pte_mkdirty(entry), vma); set_pte_at(mm, address, page_table, entry); if (anon) { + inc_mm_counter(mm, anon_rss); lru_cache_add_active(new_page); page_add_anon_rmap(new_page, vma, address); - } else + } else if (!PageReserved(new_page)) { + inc_mm_counter(mm, file_rss); page_add_file_rmap(new_page); + } } else { /* One of our sibling threads was faster, back out. */ page_cache_release(new_page); @@ -2192,7 +2195,7 @@ EXPORT_SYMBOL(vmalloc_to_pfn); void update_mem_hiwater(struct task_struct *tsk) { if (tsk->mm) { - unsigned long rss = get_mm_counter(tsk->mm, rss); + unsigned long rss = get_mm_rss(tsk->mm); if (tsk->mm->hiwater_rss < rss) tsk->mm->hiwater_rss = rss; diff --git a/mm/nommu.c b/mm/nommu.c index 0ef241ae3763..599924886eb5 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -1083,7 +1083,7 @@ void update_mem_hiwater(struct task_struct *tsk) unsigned long rss; if (likely(tsk->mm)) { - rss = get_mm_counter(tsk->mm, rss); + rss = get_mm_rss(tsk->mm); if (tsk->mm->hiwater_rss < rss) tsk->mm->hiwater_rss = rss; if (tsk->mm->hiwater_vm < tsk->mm->total_vm) diff --git a/mm/rmap.c b/mm/rmap.c index 1fc559e09ca8..504757624cce 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -445,8 +445,6 @@ void page_add_anon_rmap(struct page *page, { BUG_ON(PageReserved(page)); - inc_mm_counter(vma->vm_mm, anon_rss); - if (atomic_inc_and_test(&page->_mapcount)) { struct anon_vma *anon_vma = vma->anon_vma; @@ -561,9 +559,9 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma) set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); BUG_ON(pte_file(*pte)); dec_mm_counter(mm, anon_rss); - } + } else + dec_mm_counter(mm, file_rss); - dec_mm_counter(mm, rss); page_remove_rmap(page); page_cache_release(page); @@ -667,7 +665,7 @@ static void try_to_unmap_cluster(unsigned long cursor, page_remove_rmap(page); page_cache_release(page); - dec_mm_counter(mm, rss); + dec_mm_counter(mm, file_rss); (*mapcount)--; } diff --git a/mm/swapfile.c b/mm/swapfile.c index 05c851291241..296e0bbf7836 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -407,7 +407,7 @@ void free_swap_and_cache(swp_entry_t entry) static void unuse_pte(struct vm_area_struct *vma, pte_t *pte, unsigned long addr, swp_entry_t entry, struct page *page) { - inc_mm_counter(vma->vm_mm, rss); + inc_mm_counter(vma->vm_mm, anon_rss); get_page(page); set_pte_at(vma->vm_mm, addr, pte, pte_mkold(mk_pte(page, vma->vm_page_prot))); |