diff options
author | Nick Piggin <npiggin@suse.de> | 2008-10-18 20:26:51 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-10-20 08:52:31 -0700 |
commit | 5344b7e648980cc2ca613ec03a56a8222ff48820 (patch) | |
tree | f9f8773ae8e38fb91aec52ca9ad2bd81f039b565 /mm/internal.h | |
parent | ba470de43188cdbff795b5da43a1474523c6c2fb (diff) | |
download | linux-5344b7e648980cc2ca613ec03a56a8222ff48820.tar.gz linux-5344b7e648980cc2ca613ec03a56a8222ff48820.tar.bz2 linux-5344b7e648980cc2ca613ec03a56a8222ff48820.zip |
vmstat: mlocked pages statistics
Add NR_MLOCK zone page state, which provides a (conservative) count of
mlocked pages (actually, the number of mlocked pages moved off the LRU).
Reworked by lts to fit in with the modified mlock page support in the
Reclaim Scalability series.
[kosaki.motohiro@jp.fujitsu.com: fix incorrect Mlocked field of /proc/meminfo]
[lee.schermerhorn@hp.com: mlocked-pages: add event counting with statistics]
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/internal.h')
-rw-r--r-- | mm/internal.h | 16 |
1 files changed, 13 insertions, 3 deletions
diff --git a/mm/internal.h b/mm/internal.h index 48e32f790571..1cfbf2e2bc9e 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -101,7 +101,10 @@ static inline int is_mlocked_vma(struct vm_area_struct *vma, struct page *page) if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) return 0; - SetPageMlocked(page); + if (!TestSetPageMlocked(page)) { + inc_zone_page_state(page, NR_MLOCK); + count_vm_event(UNEVICTABLE_PGMLOCKED); + } return 1; } @@ -128,12 +131,19 @@ static inline void clear_page_mlock(struct page *page) /* * mlock_migrate_page - called only from migrate_page_copy() to - * migrate the Mlocked page flag + * migrate the Mlocked page flag; update statistics. */ static inline void mlock_migrate_page(struct page *newpage, struct page *page) { - if (TestClearPageMlocked(page)) + if (TestClearPageMlocked(page)) { + unsigned long flags; + + local_irq_save(flags); + __dec_zone_page_state(page, NR_MLOCK); SetPageMlocked(newpage); + __inc_zone_page_state(newpage, NR_MLOCK); + local_irq_restore(flags); + } } |