diff options
author | Hugh Dickins <hugh@veritas.com> | 2008-02-04 22:28:49 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2008-02-05 09:44:15 -0800 |
commit | bb63be0a091c512fb566ee235eb8320d5831b6e2 (patch) | |
tree | 3fc58d190902417429228c3066b6093a848c5974 /mm/swap_state.c | |
parent | 818db35992c249dc32c1d86daf7d533fb0952f5d (diff) | |
download | linux-bb63be0a091c512fb566ee235eb8320d5831b6e2.tar.gz linux-bb63be0a091c512fb566ee235eb8320d5831b6e2.tar.bz2 linux-bb63be0a091c512fb566ee235eb8320d5831b6e2.zip |
tmpfs: move swap_state stats update
Both unionfs and memcgroups pose challenges to tmpfs and shmem. To help fix,
it's best to move the swap swizzling functions from swap_state.c to shmem.c.
As a preliminary to that, move swap stats updating down into
__add_to_swap_cache, which will remain internal to swap_state.c.
Well, actually, just move down the incrementation of add_total: remove
noent_race and exist_race completely, they are relics of my 2.4.11 testing.
Alt-SysRq-m users will be thrilled if 2.6.25 is at last free of "race M+N"s.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/swap_state.c')
-rw-r--r-- | mm/swap_state.c | 22 |
1 files changed, 6 insertions, 16 deletions
diff --git a/mm/swap_state.c b/mm/swap_state.c index e7875642e2cf..18fce3613e5a 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -52,16 +52,13 @@ static struct { unsigned long del_total; unsigned long find_success; unsigned long find_total; - unsigned long noent_race; - unsigned long exist_race; } swap_cache_info; void show_swap_cache_info(void) { - printk("Swap cache: add %lu, delete %lu, find %lu/%lu, race %lu+%lu\n", + printk("Swap cache: add %lu, delete %lu, find %lu/%lu\n", swap_cache_info.add_total, swap_cache_info.del_total, - swap_cache_info.find_success, swap_cache_info.find_total, - swap_cache_info.noent_race, swap_cache_info.exist_race); + swap_cache_info.find_success, swap_cache_info.find_total); printk("Free swap = %lukB\n", nr_swap_pages << (PAGE_SHIFT - 10)); printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10)); } @@ -89,6 +86,7 @@ static int __add_to_swap_cache(struct page *page, swp_entry_t entry, set_page_private(page, entry.val); total_swapcache_pages++; __inc_zone_page_state(page, NR_FILE_PAGES); + INC_CACHE_INFO(add_total); } write_unlock_irq(&swapper_space.tree_lock); radix_tree_preload_end(); @@ -102,10 +100,9 @@ static int add_to_swap_cache(struct page *page, swp_entry_t entry, int error; BUG_ON(PageLocked(page)); - if (!swap_duplicate(entry)) { - INC_CACHE_INFO(noent_race); + if (!swap_duplicate(entry)) return -ENOENT; - } + SetPageLocked(page); error = __add_to_swap_cache(page, entry, gfp_mask & GFP_KERNEL); /* @@ -114,11 +111,8 @@ static int add_to_swap_cache(struct page *page, swp_entry_t entry, if (error) { ClearPageLocked(page); swap_free(entry); - if (error == -EEXIST) - INC_CACHE_INFO(exist_race); return error; } - INC_CACHE_INFO(add_total); return 0; } @@ -178,11 +172,9 @@ int add_to_swap(struct page * page, gfp_t gfp_mask) case 0: /* Success */ SetPageUptodate(page); SetPageDirty(page); - INC_CACHE_INFO(add_total); return 1; case -EEXIST: /* Raced with "speculative" read_swap_cache_async */ - INC_CACHE_INFO(exist_race); swap_free(entry); continue; default: @@ -225,9 +217,7 @@ int move_to_swap_cache(struct page *page, swp_entry_t entry) if (!swap_duplicate(entry)) BUG(); SetPageDirty(page); - INC_CACHE_INFO(add_total); - } else if (err == -EEXIST) - INC_CACHE_INFO(exist_race); + } return err; } |