summaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2014-12-10 15:43:57 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-10 17:41:07 -0800
commit18eca2e636f921e6350dc31b5b450bb4102d664f (patch)
treec2087da91edc48f98a01b32c7213300f7786b097 /mm/memcontrol.c
parent7bdd143c37e591c254d0991ac398a53f3f9ef1af (diff)
downloadlinux-stable-18eca2e636f921e6350dc31b5b450bb4102d664f.tar.gz
linux-stable-18eca2e636f921e6350dc31b5b450bb4102d664f.tar.bz2
linux-stable-18eca2e636f921e6350dc31b5b450bb4102d664f.zip
mm: memcontrol: remove unnecessary PCG_MEMSW memory+swap charge flag
Now that mem_cgroup_swapout() fully uncharges the page, every page that is still in use when reaching mem_cgroup_uncharge() is known to carry both the memory and the memory+swap charge. Simplify the uncharge path and remove the PCG_MEMSW page flag accordingly. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Hugh Dickins <hughd@google.com> Acked-by: Michal Hocko <mhocko@suse.cz> Reviewed-by: Vladimir Davydov <vdavydov@parallels.com> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c34
1 files changed, 12 insertions, 22 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 266a440c89f9..baf3b535b180 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2614,7 +2614,7 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg,
* have the page locked
*/
pc->mem_cgroup = memcg;
- pc->flags = PCG_USED | PCG_MEM | (do_swap_account ? PCG_MEMSW : 0);
+ pc->flags = PCG_USED | PCG_MEM;
if (lrucare)
unlock_page_lru(page, isolated);
@@ -5793,7 +5793,6 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
if (!PageCgroupUsed(pc))
return;
- VM_BUG_ON_PAGE(!(pc->flags & PCG_MEMSW), page);
memcg = pc->mem_cgroup;
oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
@@ -5989,17 +5988,16 @@ void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg)
}
static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
- unsigned long nr_mem, unsigned long nr_memsw,
unsigned long nr_anon, unsigned long nr_file,
unsigned long nr_huge, struct page *dummy_page)
{
+ unsigned long nr_pages = nr_anon + nr_file;
unsigned long flags;
if (!mem_cgroup_is_root(memcg)) {
- if (nr_mem)
- page_counter_uncharge(&memcg->memory, nr_mem);
- if (nr_memsw)
- page_counter_uncharge(&memcg->memsw, nr_memsw);
+ page_counter_uncharge(&memcg->memory, nr_pages);
+ if (do_swap_account)
+ page_counter_uncharge(&memcg->memsw, nr_pages);
memcg_oom_recover(memcg);
}
@@ -6008,23 +6006,21 @@ static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
__this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file);
__this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge);
__this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout);
- __this_cpu_add(memcg->stat->nr_page_events, nr_anon + nr_file);
+ __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
memcg_check_events(memcg, dummy_page);
local_irq_restore(flags);
if (!mem_cgroup_is_root(memcg))
- css_put_many(&memcg->css, max(nr_mem, nr_memsw));
+ css_put_many(&memcg->css, nr_pages);
}
static void uncharge_list(struct list_head *page_list)
{
struct mem_cgroup *memcg = NULL;
- unsigned long nr_memsw = 0;
unsigned long nr_anon = 0;
unsigned long nr_file = 0;
unsigned long nr_huge = 0;
unsigned long pgpgout = 0;
- unsigned long nr_mem = 0;
struct list_head *next;
struct page *page;
@@ -6051,10 +6047,9 @@ static void uncharge_list(struct list_head *page_list)
if (memcg != pc->mem_cgroup) {
if (memcg) {
- uncharge_batch(memcg, pgpgout, nr_mem, nr_memsw,
- nr_anon, nr_file, nr_huge, page);
- pgpgout = nr_mem = nr_memsw = 0;
- nr_anon = nr_file = nr_huge = 0;
+ uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
+ nr_huge, page);
+ pgpgout = nr_anon = nr_file = nr_huge = 0;
}
memcg = pc->mem_cgroup;
}
@@ -6070,18 +6065,14 @@ static void uncharge_list(struct list_head *page_list)
else
nr_file += nr_pages;
- if (pc->flags & PCG_MEM)
- nr_mem += nr_pages;
- if (pc->flags & PCG_MEMSW)
- nr_memsw += nr_pages;
pc->flags = 0;
pgpgout++;
} while (next != page_list);
if (memcg)
- uncharge_batch(memcg, pgpgout, nr_mem, nr_memsw,
- nr_anon, nr_file, nr_huge, page);
+ uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
+ nr_huge, page);
}
/**
@@ -6166,7 +6157,6 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
return;
VM_BUG_ON_PAGE(!(pc->flags & PCG_MEM), oldpage);
- VM_BUG_ON_PAGE(do_swap_account && !(pc->flags & PCG_MEMSW), oldpage);
if (lrucare)
lock_page_lru(oldpage, &isolated);