diff options
author | Vlastimil Babka <vbabka@suse.cz> | 2021-11-10 14:12:45 +0100 |
---|---|---|
committer | Vlastimil Babka <vbabka@suse.cz> | 2022-01-06 12:26:53 +0100 |
commit | 9c01e9af171f13cf6573f404ecaf96dfa48233ab (patch) | |
tree | 1af8f4560e4f707a0587cd9febe0ac34605f3abd /mm/slub.c | |
parent | 662188c3a20eba75babc5a910a5f1b4278069f85 (diff) | |
download | linux-9c01e9af171f13cf6573f404ecaf96dfa48233ab.tar.gz linux-9c01e9af171f13cf6573f404ecaf96dfa48233ab.tar.bz2 linux-9c01e9af171f13cf6573f404ecaf96dfa48233ab.zip |
mm/slub: Define struct slab fields for CONFIG_SLUB_CPU_PARTIAL only when enabled
The fields 'next' and 'slabs' are only used when CONFIG_SLUB_CPU_PARTIAL
is enabled. We can put their definition to #ifdef to prevent accidental
use when disabled.
Currenlty show_slab_objects() and slabs_cpu_partial_show() contain code
accessing the slabs field that's effectively dead with
CONFIG_SLUB_CPU_PARTIAL=n through the wrappers slub_percpu_partial() and
slub_percpu_partial_read_once(), but to prevent a compile error, we need
to hide all this code behind #ifdef.
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Reviewed-by: Roman Gushchin <guro@fb.com>
Tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 8 |
1 files changed, 6 insertions, 2 deletions
diff --git a/mm/slub.c b/mm/slub.c index d08ba1025aae..261474092e43 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -5258,6 +5258,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s, total += x; nodes[node] += x; +#ifdef CONFIG_SLUB_CPU_PARTIAL slab = slub_percpu_partial_read_once(c); if (slab) { node = slab_nid(slab); @@ -5270,6 +5271,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s, total += x; nodes[node] += x; } +#endif } } @@ -5469,9 +5471,10 @@ static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf) { int objects = 0; int slabs = 0; - int cpu; + int cpu __maybe_unused; int len = 0; +#ifdef CONFIG_SLUB_CPU_PARTIAL for_each_online_cpu(cpu) { struct slab *slab; @@ -5480,12 +5483,13 @@ static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf) if (slab) slabs += slab->slabs; } +#endif /* Approximate half-full slabs, see slub_set_cpu_partial() */ objects = (slabs * oo_objects(s->oo)) / 2; len += sysfs_emit_at(buf, len, "%d(%d)", objects, slabs); -#ifdef CONFIG_SMP +#if defined(CONFIG_SLUB_CPU_PARTIAL) && defined(CONFIG_SMP) for_each_online_cpu(cpu) { struct slab *slab; |