summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mm/memcontrol.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index eb1004f207b3..c200875072f7 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1332,8 +1332,8 @@ static void mem_cgroup_end_move(struct mem_cgroup *memcg)
/*
* 2 routines for checking "mem" is under move_account() or not.
*
- * mem_cgroup_stealed() - checking a cgroup is mc.from or not. This is used
- * for avoiding race in accounting. If true,
+ * mem_cgroup_stolen() - checking whether a cgroup is mc.from or not. This
+ * is used for avoiding races in accounting. If true,
* pc->mem_cgroup may be overwritten.
*
* mem_cgroup_under_move() - checking a cgroup is mc.from or mc.to or
@@ -1341,7 +1341,7 @@ static void mem_cgroup_end_move(struct mem_cgroup *memcg)
* waiting at hith-memory prressure caused by "move".
*/
-static bool mem_cgroup_stealed(struct mem_cgroup *memcg)
+static bool mem_cgroup_stolen(struct mem_cgroup *memcg)
{
VM_BUG_ON(!rcu_read_lock_held());
return atomic_read(&memcg->moving_account) > 0;
@@ -1389,7 +1389,7 @@ static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
* Take this lock when
* - a code tries to modify page's memcg while it's USED.
* - a code tries to modify page state accounting in a memcg.
- * see mem_cgroup_stealed(), too.
+ * see mem_cgroup_stolen(), too.
*/
static void move_lock_mem_cgroup(struct mem_cgroup *memcg,
unsigned long *flags)
@@ -1932,9 +1932,9 @@ again:
* If this memory cgroup is not under account moving, we don't
* need to take move_lock_page_cgroup(). Because we already hold
* rcu_read_lock(), any calls to move_account will be delayed until
- * rcu_read_unlock() if mem_cgroup_stealed() == true.
+ * rcu_read_unlock() if mem_cgroup_stolen() == true.
*/
- if (!mem_cgroup_stealed(memcg))
+ if (!mem_cgroup_stolen(memcg))
return;
move_lock_mem_cgroup(memcg, flags);