summaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2012-07-31 16:45:40 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-07-31 18:42:48 -0700
commit24467cacc03cf8b156d4c6786697e5e5d13c9321 (patch)
treed62344582c4b980226a65faa6ceefabb6d106204 /mm/memcontrol.c
parent62ba7442c8e286b6dd68fe82a3cc85c9414d909b (diff)
downloadlinux-24467cacc03cf8b156d4c6786697e5e5d13c9321.tar.gz
linux-24467cacc03cf8b156d4c6786697e5e5d13c9321.tar.bz2
linux-24467cacc03cf8b156d4c6786697e5e5d13c9321.zip
mm: memcg: remove needless !mm fixup to init_mm when charging
It does not matter to __mem_cgroup_try_charge() if the passed mm is NULL or init_mm, it will charge the root memcg in either case. Also fix up the comment in __mem_cgroup_try_charge() that claimed the init_mm would be charged when no mm was passed. It's not really incorrect, but confusing. Clarify that the root memcg is charged in this case. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: Michal Hocko <mhocko@suse.cz> Cc: David Rientjes <rientjes@google.com> Cc: Hugh Dickins <hughd@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Wanpeng Li <liwp.linux@gmail.com> Cc: Mel Gorman <mel@csn.ul.ie> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c7
1 files changed, 1 insertions, 6 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 17a79e5ed688..10b370345a5b 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2333,7 +2333,7 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
* We always charge the cgroup the mm_struct belongs to.
* The mm_struct's mem_cgroup changes on task migration if the
* thread group leader migrates. It's possible that mm is not
- * set, if so charge the init_mm (happens for pagecache usage).
+ * set, if so charge the root memcg (happens for pagecache usage).
*/
if (!*ptr && !mm)
*ptr = root_mem_cgroup;
@@ -2833,8 +2833,6 @@ int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
ret = 0;
return ret;
charge_cur_mm:
- if (unlikely(!mm))
- mm = &init_mm;
ret = __mem_cgroup_try_charge(mm, mask, 1, memcgp, true);
if (ret == -EINTR)
ret = 0;
@@ -2899,9 +2897,6 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
if (PageCompound(page))
return 0;
- if (unlikely(!mm))
- mm = &init_mm;
-
if (!PageSwapCache(page))
ret = mem_cgroup_charge_common(page, mm, gfp_mask, type);
else { /* page is swapcache/shmem */