summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2012-07-31 16:45:50 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-07-31 18:42:49 -0700
commitbdf4f4d2161a795b9323855a81a047bd68f16202 (patch)
tree3aa457660c69f2945cb9ab477079e7441f220b27 /mm
parent90deb78839faedd194b65d419dbd9cba981e1922 (diff)
downloadlinux-bdf4f4d2161a795b9323855a81a047bd68f16202.tar.gz
linux-bdf4f4d2161a795b9323855a81a047bd68f16202.tar.bz2
linux-bdf4f4d2161a795b9323855a81a047bd68f16202.zip
mm: memcg: only check anon swapin page charges for swap cache
shmem knows for sure that the page is in swap cache when attempting to charge a page, because the cache charge entry function has a check for it. Only anon pages may be removed from swap cache already when trying to charge their swapin. Adjust the comment, though: '4969c11 mm: fix swapin race condition' added a stable PageSwapCache check under the page lock in the do_swap_page() before calling the memory controller, so it's unuse_pte()'s pte_same() that may fail. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: Michal Hocko <mhocko@suse.cz> Cc: David Rientjes <rientjes@google.com> Cc: Hugh Dickins <hughd@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Wanpeng Li <liwp.linux@gmail.com> Cc: Mel Gorman <mel@csn.ul.ie> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c22
1 files changed, 14 insertions, 8 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 1a2020d72ca0..b06833f2b89e 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2818,14 +2818,6 @@ static int __mem_cgroup_try_charge_swapin(struct mm_struct *mm,
return 0;
if (!do_swap_account)
goto charge_cur_mm;
- /*
- * A racing thread's fault, or swapoff, may have already updated
- * the pte, and even removed page from swap cache: in those cases
- * do_swap_page()'s pte_same() test will fail; but there's also a
- * KSM case which does need to charge the page.
- */
- if (!PageSwapCache(page))
- goto charge_cur_mm;
memcg = try_get_mem_cgroup_from_page(page);
if (!memcg)
goto charge_cur_mm;
@@ -2848,6 +2840,20 @@ int mem_cgroup_try_charge_swapin(struct mm_struct *mm, struct page *page,
*memcgp = NULL;
if (mem_cgroup_disabled())
return 0;
+ /*
+ * A racing thread's fault, or swapoff, may have already
+ * updated the pte, and even removed page from swap cache: in
+ * those cases unuse_pte()'s pte_same() test will fail; but
+ * there's also a KSM case which does need to charge the page.
+ */
+ if (!PageSwapCache(page)) {
+ int ret;
+
+ ret = __mem_cgroup_try_charge(mm, gfp_mask, 1, memcgp, true);
+ if (ret == -EINTR)
+ ret = 0;
+ return ret;
+ }
return __mem_cgroup_try_charge_swapin(mm, page, gfp_mask, memcgp);
}