diff options
author | Johannes Weiner <hannes@cmpxchg.org> | 2015-02-11 15:26:09 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-02-11 17:06:02 -0800 |
commit | 1dfab5abcdd404fd04597c063d9f61a5b3247552 (patch) | |
tree | d8b69a2260b839a6db419ee39d520d14eda34f74 /mm/memcontrol.c | |
parent | 241994ed8649f7300667be8b13a9e04ae04e05a1 (diff) | |
download | linux-stable-1dfab5abcdd404fd04597c063d9f61a5b3247552.tar.gz linux-stable-1dfab5abcdd404fd04597c063d9f61a5b3247552.tar.bz2 linux-stable-1dfab5abcdd404fd04597c063d9f61a5b3247552.zip |
mm: memcontrol: fold move_anon() and move_file()
Turn the move type enum into flags and give the flags field a shorter
name. Once that is done, move_anon() and move_file() are simple enough to
just fold them into the callsites.
[akpm@linux-foundation.org: tweak MOVE_MASK definition, per Michal]
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Michal Hocko <mhocko@suse.cz>
Reviewed-by: Vladimir Davydov <vdavydov@parallels.com>
Cc: Greg Thelen <gthelen@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r-- | mm/memcontrol.c | 49 |
1 files changed, 18 insertions, 31 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index ee97c9ac62c0..11c9e6a1dad5 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -369,21 +369,18 @@ static bool memcg_kmem_is_active(struct mem_cgroup *memcg) /* Stuffs for move charges at task migration. */ /* - * Types of charges to be moved. "move_charge_at_immitgrate" and - * "immigrate_flags" are treated as a left-shifted bitmap of these types. + * Types of charges to be moved. */ -enum move_type { - MOVE_CHARGE_TYPE_ANON, /* private anonymous page and swap of it */ - MOVE_CHARGE_TYPE_FILE, /* file page(including tmpfs) and swap of it */ - NR_MOVE_TYPE, -}; +#define MOVE_ANON 0x1U +#define MOVE_FILE 0x2U +#define MOVE_MASK (MOVE_ANON | MOVE_FILE) /* "mc" and its members are protected by cgroup_mutex */ static struct move_charge_struct { spinlock_t lock; /* for from, to */ struct mem_cgroup *from; struct mem_cgroup *to; - unsigned long immigrate_flags; + unsigned long flags; unsigned long precharge; unsigned long moved_charge; unsigned long moved_swap; @@ -394,16 +391,6 @@ static struct move_charge_struct { .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq), }; -static bool move_anon(void) -{ - return test_bit(MOVE_CHARGE_TYPE_ANON, &mc.immigrate_flags); -} - -static bool move_file(void) -{ - return test_bit(MOVE_CHARGE_TYPE_FILE, &mc.immigrate_flags); -} - /* * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft * limit reclaim to prevent infinite loops, if they ever occur. @@ -3500,7 +3487,7 @@ static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, { struct mem_cgroup *memcg = mem_cgroup_from_css(css); - if (val >= (1 << NR_MOVE_TYPE)) + if (val & ~MOVE_MASK) return -EINVAL; /* @@ -4773,12 +4760,12 @@ static struct page *mc_handle_present_pte(struct vm_area_struct *vma, if (!page || !page_mapped(page)) return NULL; if (PageAnon(page)) { - /* we don't move shared anon */ - if (!move_anon()) + if (!(mc.flags & MOVE_ANON)) return NULL; - } else if (!move_file()) - /* we ignore mapcount for file pages */ - return NULL; + } else { + if (!(mc.flags & MOVE_FILE)) + return NULL; + } if (!get_page_unless_zero(page)) return NULL; @@ -4792,7 +4779,7 @@ static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, struct page *page = NULL; swp_entry_t ent = pte_to_swp_entry(ptent); - if (!move_anon() || non_swap_entry(ent)) + if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent)) return NULL; /* * Because lookup_swap_cache() updates some statistics counter, @@ -4821,7 +4808,7 @@ static struct page *mc_handle_file_pte(struct vm_area_struct *vma, if (!vma->vm_file) /* anonymous vma */ return NULL; - if (!move_file()) + if (!(mc.flags & MOVE_FILE)) return NULL; mapping = vma->vm_file->f_mapping; @@ -4900,7 +4887,7 @@ static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, page = pmd_page(pmd); VM_BUG_ON_PAGE(!page || !PageHead(page), page); - if (!move_anon()) + if (!(mc.flags & MOVE_ANON)) return ret; if (page->mem_cgroup == mc.from) { ret = MC_TARGET_PAGE; @@ -5042,15 +5029,15 @@ static int mem_cgroup_can_attach(struct cgroup_subsys_state *css, struct task_struct *p = cgroup_taskset_first(tset); int ret = 0; struct mem_cgroup *memcg = mem_cgroup_from_css(css); - unsigned long move_charge_at_immigrate; + unsigned long move_flags; /* * We are now commited to this value whatever it is. Changes in this * tunable will only affect upcoming migrations, not the current one. * So we need to save it, and keep it going. */ - move_charge_at_immigrate = memcg->move_charge_at_immigrate; - if (move_charge_at_immigrate) { + move_flags = ACCESS_ONCE(memcg->move_charge_at_immigrate); + if (move_flags) { struct mm_struct *mm; struct mem_cgroup *from = mem_cgroup_from_task(p); @@ -5070,7 +5057,7 @@ static int mem_cgroup_can_attach(struct cgroup_subsys_state *css, spin_lock(&mc.lock); mc.from = from; mc.to = memcg; - mc.immigrate_flags = move_charge_at_immigrate; + mc.flags = move_flags; spin_unlock(&mc.lock); /* We set mc.moving_task later */ |