summaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorMuchun Song <songmuchun@bytedance.com>2021-02-24 12:04:26 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2021-02-24 13:38:30 -0800
commit96403bfe50c344b587ea53894954a9d152af1c9d (patch)
tree6a40d44c33e32f4b69496d0238b3a000816866fd /mm/slub.c
parent1685bde6b9af55923180a76152036c7fb7176db0 (diff)
downloadlinux-96403bfe50c344b587ea53894954a9d152af1c9d.tar.gz
linux-96403bfe50c344b587ea53894954a9d152af1c9d.tar.bz2
linux-96403bfe50c344b587ea53894954a9d152af1c9d.zip
mm: memcontrol: fix slub memory accounting
SLUB currently account kmalloc() and kmalloc_node() allocations larger than order-1 page per-node. But it forget to update the per-memcg vmstats. So it can lead to inaccurate statistics of "slab_unreclaimable" which is from memory.stat. Fix it by using mod_lruvec_page_state instead of mod_node_page_state. Link: https://lkml.kernel.org/r/20210223092423.42420-1-songmuchun@bytedance.com Fixes: 6a486c0ad4dc ("mm, sl[ou]b: improve memory accounting") Signed-off-by: Muchun Song <songmuchun@bytedance.com> Reviewed-by: Shakeel Butt <shakeelb@google.com> Reviewed-by: Roman Gushchin <guro@fb.com> Reviewed-by: Michal Koutný <mkoutny@suse.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Vladimir Davydov <vdavydov.dev@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/mm/slub.c b/mm/slub.c
index ca566361561e..046d7194acaf 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -4042,8 +4042,8 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
page = alloc_pages_node(node, flags, order);
if (page) {
ptr = page_address(page);
- mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B,
- PAGE_SIZE << order);
+ mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
+ PAGE_SIZE << order);
}
return kmalloc_large_node_hook(ptr, size, flags);
@@ -4174,8 +4174,8 @@ void kfree(const void *x)
BUG_ON(!PageCompound(page));
kfree_hook(object);
- mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B,
- -(PAGE_SIZE << order));
+ mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
+ -(PAGE_SIZE << order));
__free_pages(page, order);
return;
}