diff options
author | Sergey Senozhatsky <senozhatsky@chromium.org> | 2024-07-01 12:11:37 +0900 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2024-07-12 15:52:13 -0700 |
commit | 791abe1e420c3dad6ddbd0a6c40467e9e24059b7 (patch) | |
tree | 58912fb44ba580e2c6d0dd086c4d1713ad1852ae /mm | |
parent | 9b89e018990de47c72ef8b2ca29204f88fda8f05 (diff) | |
download | linux-stable-791abe1e420c3dad6ddbd0a6c40467e9e24059b7.tar.gz linux-stable-791abe1e420c3dad6ddbd0a6c40467e9e24059b7.tar.bz2 linux-stable-791abe1e420c3dad6ddbd0a6c40467e9e24059b7.zip |
zsmalloc: rename class stat mutators
A cosmetic change.
o Rename class_stat_inc() and class_stat_dec() to class_stat_add()
and class_stat_sub() correspondingly. inc/dec are usually associated
with +1/-1 modifications, while zsmlloc can modify stats by up
to ->objs_per_zspage. Use add/sub (follow atomics naming).
o Rename zs_stat_get() to class_stat_read()
get() is usually associated with ref-counting and is paired with put().
zs_stat_get() simply reads class stat so rename to reflect it.
(This also follows atomics naming).
Link: https://lkml.kernel.org/r/20240701031140.3756345-1-senozhatsky@chromium.org
Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org>
Reviewed-by: Chengming Zhou <chengming.zhou@linux.dev>
Cc: Minchan Kim <minchan@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/zsmalloc.c | 38 |
1 files changed, 19 insertions, 19 deletions
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index c2f4e62ffb46..5d6581ab7c07 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -512,19 +512,19 @@ static int get_size_class_index(int size) return min_t(int, ZS_SIZE_CLASSES - 1, idx); } -static inline void class_stat_inc(struct size_class *class, - int type, unsigned long cnt) +static inline void class_stat_add(struct size_class *class, int type, + unsigned long cnt) { class->stats.objs[type] += cnt; } -static inline void class_stat_dec(struct size_class *class, - int type, unsigned long cnt) +static inline void class_stat_sub(struct size_class *class, int type, + unsigned long cnt) { class->stats.objs[type] -= cnt; } -static inline unsigned long zs_stat_get(struct size_class *class, int type) +static inline unsigned long class_stat_read(struct size_class *class, int type) { return class->stats.objs[type]; } @@ -576,12 +576,12 @@ static int zs_stats_size_show(struct seq_file *s, void *v) seq_printf(s, " %5u %5u ", i, class->size); for (fg = ZS_INUSE_RATIO_10; fg < NR_FULLNESS_GROUPS; fg++) { - inuse_totals[fg] += zs_stat_get(class, fg); - seq_printf(s, "%9lu ", zs_stat_get(class, fg)); + inuse_totals[fg] += class_stat_read(class, fg); + seq_printf(s, "%9lu ", class_stat_read(class, fg)); } - obj_allocated = zs_stat_get(class, ZS_OBJS_ALLOCATED); - obj_used = zs_stat_get(class, ZS_OBJS_INUSE); + obj_allocated = class_stat_read(class, ZS_OBJS_ALLOCATED); + obj_used = class_stat_read(class, ZS_OBJS_INUSE); freeable = zs_can_compact(class); spin_unlock(&class->lock); @@ -686,7 +686,7 @@ static void insert_zspage(struct size_class *class, struct zspage *zspage, int fullness) { - class_stat_inc(class, fullness, 1); + class_stat_add(class, fullness, 1); list_add(&zspage->list, &class->fullness_list[fullness]); zspage->fullness = fullness; } @@ -702,7 +702,7 @@ static void remove_zspage(struct size_class *class, struct zspage *zspage) VM_BUG_ON(list_empty(&class->fullness_list[fullness])); list_del_init(&zspage->list); - class_stat_dec(class, fullness, 1); + class_stat_sub(class, fullness, 1); } /* @@ -858,7 +858,7 @@ static void __free_zspage(struct zs_pool *pool, struct size_class *class, cache_free_zspage(pool, zspage); - class_stat_dec(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage); + class_stat_sub(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage); atomic_long_sub(class->pages_per_zspage, &pool->pages_allocated); } @@ -1374,7 +1374,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp) obj_malloc(pool, zspage, handle); /* Now move the zspage to another fullness group, if required */ fix_fullness_group(class, zspage); - class_stat_inc(class, ZS_OBJS_INUSE, 1); + class_stat_add(class, ZS_OBJS_INUSE, 1); goto out; } @@ -1392,8 +1392,8 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp) newfg = get_fullness_group(class, zspage); insert_zspage(class, zspage, newfg); atomic_long_add(class->pages_per_zspage, &pool->pages_allocated); - class_stat_inc(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage); - class_stat_inc(class, ZS_OBJS_INUSE, 1); + class_stat_add(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage); + class_stat_add(class, ZS_OBJS_INUSE, 1); /* We completely set up zspage so mark them as movable */ SetZsPageMovable(pool, zspage); @@ -1454,7 +1454,7 @@ void zs_free(struct zs_pool *pool, unsigned long handle) spin_lock(&class->lock); read_unlock(&pool->migrate_lock); - class_stat_dec(class, ZS_OBJS_INUSE, 1); + class_stat_sub(class, ZS_OBJS_INUSE, 1); obj_free(class->size, obj); fullness = fix_fullness_group(class, zspage); @@ -1880,7 +1880,7 @@ static void async_free_zspage(struct work_struct *work) class = zspage_class(pool, zspage); spin_lock(&class->lock); - class_stat_dec(class, ZS_INUSE_RATIO_0, 1); + class_stat_sub(class, ZS_INUSE_RATIO_0, 1); __free_zspage(pool, class, zspage); spin_unlock(&class->lock); } @@ -1923,8 +1923,8 @@ static inline void zs_flush_migration(struct zs_pool *pool) { } static unsigned long zs_can_compact(struct size_class *class) { unsigned long obj_wasted; - unsigned long obj_allocated = zs_stat_get(class, ZS_OBJS_ALLOCATED); - unsigned long obj_used = zs_stat_get(class, ZS_OBJS_INUSE); + unsigned long obj_allocated = class_stat_read(class, ZS_OBJS_ALLOCATED); + unsigned long obj_used = class_stat_read(class, ZS_OBJS_INUSE); if (obj_allocated <= obj_used) return 0; |