summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorRoman Gushchin <guro@fb.com>2020-08-06 23:21:17 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-08-07 11:33:25 -0700
commit272911a4ad18c48f8bc449a5db945a54987dd687 (patch)
treeff000a4a362687f849c8ee584c817c26bf950796 /mm
parentd797b7d05405c519f7b62ea69a75cea1883863b2 (diff)
downloadlinux-272911a4ad18c48f8bc449a5db945a54987dd687.tar.gz
linux-272911a4ad18c48f8bc449a5db945a54987dd687.tar.bz2
linux-272911a4ad18c48f8bc449a5db945a54987dd687.zip
mm: memcg/slab: remove memcg_kmem_get_cache()
The memcg_kmem_get_cache() function became really trivial, so let's just inline it into the single call point: memcg_slab_pre_alloc_hook(). It will make the code less bulky and can also help the compiler to generate a better code. Signed-off-by: Roman Gushchin <guro@fb.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Shakeel Butt <shakeelb@google.com> Cc: Christoph Lameter <cl@linux.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Tejun Heo <tj@kernel.org> Link: http://lkml.kernel.org/r/20200623174037.3951353-15-guro@fb.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c25
-rw-r--r--mm/slab.h11
-rw-r--r--mm/slab_common.c2
3 files changed, 11 insertions, 27 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index c713867e496d..a8113b77b23a 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -393,7 +393,7 @@ void memcg_put_cache_ids(void)
/*
* A lot of the calls to the cache allocation functions are expected to be
- * inlined by the compiler. Since the calls to memcg_kmem_get_cache are
+ * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are
* conditional to this static branch, we'll have to allow modules that does
* kmem_cache_alloc and the such to see this symbol as well
*/
@@ -2901,29 +2901,6 @@ static void memcg_free_cache_id(int id)
}
/**
- * memcg_kmem_get_cache: select memcg or root cache for allocation
- * @cachep: the original global kmem cache
- *
- * Return the kmem_cache we're supposed to use for a slab allocation.
- *
- * If the cache does not exist yet, if we are the first user of it, we
- * create it asynchronously in a workqueue and let the current allocation
- * go through with the original cache.
- */
-struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep)
-{
- struct kmem_cache *memcg_cachep;
-
- memcg_cachep = READ_ONCE(cachep->memcg_params.memcg_cache);
- if (unlikely(!memcg_cachep)) {
- queue_work(system_wq, &cachep->memcg_params.work);
- return cachep;
- }
-
- return memcg_cachep;
-}
-
-/**
* __memcg_kmem_charge: charge a number of kernel pages to a memcg
* @memcg: memory cgroup to charge
* @gfp: reclaim mode
diff --git a/mm/slab.h b/mm/slab.h
index fd9fcdfb3789..342eac852967 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -365,9 +365,16 @@ static inline struct kmem_cache *memcg_slab_pre_alloc_hook(struct kmem_cache *s,
if (memcg_kmem_bypass())
return s;
- cachep = memcg_kmem_get_cache(s);
- if (is_root_cache(cachep))
+ cachep = READ_ONCE(s->memcg_params.memcg_cache);
+ if (unlikely(!cachep)) {
+ /*
+ * If memcg cache does not exist yet, we schedule it's
+ * asynchronous creation and let the current allocation
+ * go through with the root cache.
+ */
+ queue_work(system_wq, &s->memcg_params.work);
return s;
+ }
objcg = get_obj_cgroup_from_current();
if (!objcg)
diff --git a/mm/slab_common.c b/mm/slab_common.c
index b898698f6c8a..de0a46cf974a 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -570,7 +570,7 @@ void memcg_create_kmem_cache(struct kmem_cache *root_cache)
}
/*
- * Since readers won't lock (see memcg_kmem_get_cache()), we need a
+ * Since readers won't lock (see memcg_slab_pre_alloc_hook()), we need a
* barrier here to ensure nobody will see the kmem_cache partially
* initialized.
*/