summaryrefslogtreecommitdiffstats
path: root/mm/workingset.c
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2016-03-15 14:57:22 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-15 16:55:16 -0700
commit62cccb8c8e7a3ca233f49d5e7dcb1557d25465cd (patch)
tree43a902faf461c65393a4efebf9ff9622017b92b1 /mm/workingset.c
parent6a93ca8fde3cfce0f00f02281139a377c83e8d8c (diff)
downloadlinux-stable-62cccb8c8e7a3ca233f49d5e7dcb1557d25465cd.tar.gz
linux-stable-62cccb8c8e7a3ca233f49d5e7dcb1557d25465cd.tar.bz2
linux-stable-62cccb8c8e7a3ca233f49d5e7dcb1557d25465cd.zip
mm: simplify lock_page_memcg()
Now that migration doesn't clear page->mem_cgroup of live pages anymore, it's safe to make lock_page_memcg() and the memcg stat functions take pages, and spare the callers from memcg objects. [akpm@linux-foundation.org: fix warnings] Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Suggested-by: Vladimir Davydov <vdavydov@virtuozzo.com> Acked-by: Vladimir Davydov <vdavydov@virtuozzo.com> Cc: Michal Hocko <mhocko@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/workingset.c')
-rw-r--r--mm/workingset.c9
1 files changed, 4 insertions, 5 deletions
diff --git a/mm/workingset.c b/mm/workingset.c
index 14bc23a7779b..6130ba0b2641 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -305,10 +305,9 @@ bool workingset_refault(void *shadow)
*/
void workingset_activation(struct page *page)
{
- struct mem_cgroup *memcg;
struct lruvec *lruvec;
- memcg = lock_page_memcg(page);
+ lock_page_memcg(page);
/*
* Filter non-memcg pages here, e.g. unmap can call
* mark_page_accessed() on VDSO pages.
@@ -316,12 +315,12 @@ void workingset_activation(struct page *page)
* XXX: See workingset_refault() - this should return
* root_mem_cgroup even for !CONFIG_MEMCG.
*/
- if (!mem_cgroup_disabled() && !memcg)
+ if (!mem_cgroup_disabled() && !page_memcg(page))
goto out;
- lruvec = mem_cgroup_zone_lruvec(page_zone(page), memcg);
+ lruvec = mem_cgroup_zone_lruvec(page_zone(page), page_memcg(page));
atomic_long_inc(&lruvec->inactive_age);
out:
- unlock_page_memcg(memcg);
+ unlock_page_memcg(page);
}
/*