summaryrefslogtreecommitdiffstats
path: root/include/linux/memcontrol.h
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2020-12-29 02:35:58 +0000
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2021-01-09 13:37:37 +0100
commit955f8bc9eb69b3be9a7785015c726f7004ec36b9 (patch)
tree17ef68c1578f564252730ea2471000fd6365cf4b /include/linux/memcontrol.h
parentfdcda71d87acfe8f645f3ce5e9303688cad8b8f1 (diff)
downloadlinux-stable-955f8bc9eb69b3be9a7785015c726f7004ec36b9.tar.gz
linux-stable-955f8bc9eb69b3be9a7785015c726f7004ec36b9.tar.bz2
linux-stable-955f8bc9eb69b3be9a7785015c726f7004ec36b9.zip
mm: memcontrol: implement lruvec stat functions on top of each other
commit 284542656e22c43fdada8c8cc0ca9ede8453eed7 upstream The implementation of the lruvec stat functions and their variants for accounting through a page, or accounting from a preemptible context, are mostly identical and needlessly repetitive. Implement the lruvec_page functions by looking up the page's lruvec and then using the lruvec function. Implement the functions for preemptible contexts by disabling preemption before calling the atomic context functions. Link: http://lkml.kernel.org/r/20171103153336.24044-2-hannes@cmpxchg.org Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Vladimir Davydov <vdavydov.dev@gmail.com> Cc: Michal Hocko <mhocko@suse.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Cc: stable@vger.kernel.org Signed-off-by: Shaoying Xu <shaoyi@amazon.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'include/linux/memcontrol.h')
-rw-r--r--include/linux/memcontrol.h44
1 files changed, 22 insertions, 22 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 2c80b69dd266..1ffc54ac4cc9 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -569,51 +569,51 @@ static inline void __mod_lruvec_state(struct lruvec *lruvec,
{
struct mem_cgroup_per_node *pn;
+ /* Update node */
__mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
+
if (mem_cgroup_disabled())
return;
+
pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
+
+ /* Update memcg */
__mod_memcg_state(pn->memcg, idx, val);
+
+ /* Update lruvec */
__this_cpu_add(pn->lruvec_stat->count[idx], val);
}
static inline void mod_lruvec_state(struct lruvec *lruvec,
enum node_stat_item idx, int val)
{
- struct mem_cgroup_per_node *pn;
-
- mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
- if (mem_cgroup_disabled())
- return;
- pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
- mod_memcg_state(pn->memcg, idx, val);
- this_cpu_add(pn->lruvec_stat->count[idx], val);
+ preempt_disable();
+ __mod_lruvec_state(lruvec, idx, val);
+ preempt_enable();
}
static inline void __mod_lruvec_page_state(struct page *page,
enum node_stat_item idx, int val)
{
- struct mem_cgroup_per_node *pn;
+ pg_data_t *pgdat = page_pgdat(page);
+ struct lruvec *lruvec;
- __mod_node_page_state(page_pgdat(page), idx, val);
- if (mem_cgroup_disabled() || !page->mem_cgroup)
+ /* Untracked pages have no memcg, no lruvec. Update only the node */
+ if (!page->mem_cgroup) {
+ __mod_node_page_state(pgdat, idx, val);
return;
- __mod_memcg_state(page->mem_cgroup, idx, val);
- pn = page->mem_cgroup->nodeinfo[page_to_nid(page)];
- __this_cpu_add(pn->lruvec_stat->count[idx], val);
+ }
+
+ lruvec = mem_cgroup_lruvec(pgdat, page->mem_cgroup);
+ __mod_lruvec_state(lruvec, idx, val);
}
static inline void mod_lruvec_page_state(struct page *page,
enum node_stat_item idx, int val)
{
- struct mem_cgroup_per_node *pn;
-
- mod_node_page_state(page_pgdat(page), idx, val);
- if (mem_cgroup_disabled() || !page->mem_cgroup)
- return;
- mod_memcg_state(page->mem_cgroup, idx, val);
- pn = page->mem_cgroup->nodeinfo[page_to_nid(page)];
- this_cpu_add(pn->lruvec_stat->count[idx], val);
+ preempt_disable();
+ __mod_lruvec_page_state(page, idx, val);
+ preempt_enable();
}
unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,