diff options
author | Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> | 2009-01-07 18:08:12 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-01-08 08:31:07 -0800 |
commit | b85a96c0b6cb79c67e7b01b66368f2e31579d7c5 (patch) | |
tree | 178005176284faef0f8a47293eaddd2885062f53 /mm | |
parent | f9717d28d673468883df8ac34b47268719ac5a3d (diff) | |
download | linux-b85a96c0b6cb79c67e7b01b66368f2e31579d7c5.tar.gz linux-b85a96c0b6cb79c67e7b01b66368f2e31579d7c5.tar.bz2 linux-b85a96c0b6cb79c67e7b01b66368f2e31579d7c5.zip |
memcg: memory swap controller: fix limit check
There are scatterd calls of res_counter_check_under_limit(), and most of
them don't take mem+swap accounting into account.
define mem_cgroup_check_under_limit() and avoid direct use of
res_counter_check_limit().
Reported-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memcontrol.c | 26 |
1 files changed, 17 insertions, 9 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index b83790083087..6ad309e9825f 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -571,6 +571,18 @@ done: return ret; } +static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem) +{ + if (do_swap_account) { + if (res_counter_check_under_limit(&mem->res) && + res_counter_check_under_limit(&mem->memsw)) + return true; + } else + if (res_counter_check_under_limit(&mem->res)) + return true; + return false; +} + /* * Dance down the hierarchy if needed to reclaim memory. We remember the * last child we reclaimed from, so that we don't end up penalizing @@ -592,7 +604,7 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, * have left. */ ret = try_to_free_mem_cgroup_pages(root_mem, gfp_mask, noswap); - if (res_counter_check_under_limit(&root_mem->res)) + if (mem_cgroup_check_under_limit(root_mem)) return 0; next_mem = mem_cgroup_get_first_node(root_mem); @@ -606,7 +618,7 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, continue; } ret = try_to_free_mem_cgroup_pages(next_mem, gfp_mask, noswap); - if (res_counter_check_under_limit(&root_mem->res)) + if (mem_cgroup_check_under_limit(root_mem)) return 0; cgroup_lock(); next_mem = mem_cgroup_get_next_node(next_mem, root_mem); @@ -709,12 +721,8 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm, * current usage of the cgroup before giving up * */ - if (do_swap_account) { - if (res_counter_check_under_limit(&mem_over_limit->res) && - res_counter_check_under_limit(&mem_over_limit->memsw)) - continue; - } else if (res_counter_check_under_limit(&mem_over_limit->res)) - continue; + if (mem_cgroup_check_under_limit(mem_over_limit)) + continue; if (!nr_retries--) { if (oom) { @@ -1334,7 +1342,7 @@ int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask) do { progress = try_to_free_mem_cgroup_pages(mem, gfp_mask, true); - progress += res_counter_check_under_limit(&mem->res); + progress += mem_cgroup_check_under_limit(mem); } while (!progress && --retry); css_put(&mem->css); |