summaryrefslogtreecommitdiffstats
path: root/mm/huge_memory.c
diff options
context:
space:
mode:
authorChris Down <chris@chrisdown.name>2019-03-05 15:48:09 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2019-03-05 21:07:19 -0800
commit1ff9e6e1798c7670ea6a7680a1ad5582df2fa914 (patch)
tree6050ceb4029e34bd694454533bb81d02b5afa9c9 /mm/huge_memory.c
parent2bb0f34fe3c1f04196cbcf8aa86b0a9371f6938d (diff)
downloadlinux-1ff9e6e1798c7670ea6a7680a1ad5582df2fa914.tar.gz
linux-1ff9e6e1798c7670ea6a7680a1ad5582df2fa914.tar.bz2
linux-1ff9e6e1798c7670ea6a7680a1ad5582df2fa914.zip
mm: memcontrol: expose THP events on a per-memcg basis
Currently THP allocation events data is fairly opaque, since you can only get it system-wide. This patch makes it easier to reason about transparent hugepage behaviour on a per-memcg basis. For anonymous THP-backed pages, we already have MEMCG_RSS_HUGE in v1, which is used for v1's rss_huge [sic]. This is reused here as it's fairly involved to untangle NR_ANON_THPS right now to make it per-memcg, since right now some of this is delegated to rmap before we have any memcg actually assigned to the page. It's a good idea to rework that, but let's leave untangling THP allocation for a future patch. [akpm@linux-foundation.org: fix build] [chris@chrisdown.name: fix memcontrol build when THP is disabled] Link: http://lkml.kernel.org/r/20190131160802.GA5777@chrisdown.name Link: http://lkml.kernel.org/r/20190129205852.GA7310@chrisdown.name Signed-off-by: Chris Down <chris@chrisdown.name> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Tejun Heo <tj@kernel.org> Cc: Roman Gushchin <guro@fb.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r--mm/huge_memory.c2
1 files changed, 2 insertions, 0 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index af07527cd971..d4847026d4b1 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -617,6 +617,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
mm_inc_nr_ptes(vma->vm_mm);
spin_unlock(vmf->ptl);
count_vm_event(THP_FAULT_ALLOC);
+ count_memcg_events(memcg, THP_FAULT_ALLOC, 1);
}
return 0;
@@ -1338,6 +1339,7 @@ alloc:
}
count_vm_event(THP_FAULT_ALLOC);
+ count_memcg_events(memcg, THP_FAULT_ALLOC, 1);
if (!page)
clear_huge_page(new_page, vmf->address, HPAGE_PMD_NR);