summaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2014-08-06 16:05:55 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-06 18:01:17 -0700
commit9476db974d9e18885123fcebc09f4596bb922e5f (patch)
tree70701f82e65c98a0ee5314d0eb3da9863e0f2091 /mm/memcontrol.c
parent0029e19ebf84dcd70b226820daa7747b28d5956d (diff)
downloadlinux-stable-9476db974d9e18885123fcebc09f4596bb922e5f.tar.gz
linux-stable-9476db974d9e18885123fcebc09f4596bb922e5f.tar.bz2
linux-stable-9476db974d9e18885123fcebc09f4596bb922e5f.zip
mm: memcontrol: simplify move precharge function
The move precharge function does some baroque things: it tries raw res_counter charging of the entire amount first, and then falls back to a loop of one-by-one charges, with checks for pending signals and cond_resched() batching. Just use mem_cgroup_try_charge() without __GFP_WAIT for the first bulk charge attempt. In the one-by-one loop, remove the signal check (this is already checked in try_charge), and simply call cond_resched() after every charge - it's not that expensive. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Michal Hocko <mhocko@suse.cz> Cc: Hugh Dickins <hughd@google.com> Cc: Tejun Heo <tj@kernel.org> Cc: Vladimir Davydov <vdavydov@parallels.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c48
1 files changed, 15 insertions, 33 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 8aaca8267dfe..8a4159efa3c0 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -6385,56 +6385,38 @@ static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
#ifdef CONFIG_MMU
/* Handlers for move charge at task migration. */
-#define PRECHARGE_COUNT_AT_ONCE 256
static int mem_cgroup_do_precharge(unsigned long count)
{
int ret = 0;
- int batch_count = PRECHARGE_COUNT_AT_ONCE;
- struct mem_cgroup *memcg = mc.to;
- if (mem_cgroup_is_root(memcg)) {
+ if (mem_cgroup_is_root(mc.to)) {
mc.precharge += count;
/* we don't need css_get for root */
return ret;
}
- /* try to charge at once */
- if (count > 1) {
- struct res_counter *dummy;
- /*
- * "memcg" cannot be under rmdir() because we've already checked
- * by cgroup_lock_live_cgroup() that it is not removed and we
- * are still under the same cgroup_mutex. So we can postpone
- * css_get().
- */
- if (res_counter_charge(&memcg->res, PAGE_SIZE * count, &dummy))
- goto one_by_one;
- if (do_swap_account && res_counter_charge(&memcg->memsw,
- PAGE_SIZE * count, &dummy)) {
- res_counter_uncharge(&memcg->res, PAGE_SIZE * count);
- goto one_by_one;
- }
+
+ /* Try a single bulk charge without reclaim first */
+ ret = mem_cgroup_try_charge(mc.to, GFP_KERNEL & ~__GFP_WAIT, count);
+ if (!ret) {
mc.precharge += count;
return ret;
}
-one_by_one:
- /* fall back to one by one charge */
+
+ /* Try charges one by one with reclaim */
while (count--) {
- if (signal_pending(current)) {
- ret = -EINTR;
- break;
- }
- if (!batch_count--) {
- batch_count = PRECHARGE_COUNT_AT_ONCE;
- cond_resched();
- }
- ret = mem_cgroup_try_charge(memcg,
+ ret = mem_cgroup_try_charge(mc.to,
GFP_KERNEL & ~__GFP_NORETRY, 1);
+ /*
+ * In case of failure, any residual charges against
+ * mc.to will be dropped by mem_cgroup_clear_mc()
+ * later on.
+ */
if (ret)
- /* mem_cgroup_clear_mc() will do uncharge later */
return ret;
mc.precharge++;
+ cond_resched();
}
- return ret;
+ return 0;
}
/**