diff options
author | Ming Lei <ming.lei@redhat.com> | 2024-05-15 09:31:56 +0800 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2024-05-15 20:14:20 -0600 |
commit | 6da6680632792709cecf2b006f2fe3ca7857e791 (patch) | |
tree | dc7324c953f3bfa9a9b02d1f48d5370e7f7baaa3 /block | |
parent | efb905aeb44b0e99c0e6b07865b1885ae0471ebf (diff) | |
download | linux-stable-6da6680632792709cecf2b006f2fe3ca7857e791.tar.gz linux-stable-6da6680632792709cecf2b006f2fe3ca7857e791.tar.bz2 linux-stable-6da6680632792709cecf2b006f2fe3ca7857e791.zip |
blk-cgroup: fix list corruption from resetting io stat
Since commit 3b8cc6298724 ("blk-cgroup: Optimize blkcg_rstat_flush()"),
each iostat instance is added to blkcg percpu list, so blkcg_reset_stats()
can't reset the stat instance by memset(), otherwise the llist may be
corrupted.
Fix the issue by only resetting the counter part.
Cc: Tejun Heo <tj@kernel.org>
Cc: Waiman Long <longman@redhat.com>
Cc: Jay Shin <jaeshin@redhat.com>
Fixes: 3b8cc6298724 ("blk-cgroup: Optimize blkcg_rstat_flush()")
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Acked-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Waiman Long <longman@redhat.com>
Link: https://lore.kernel.org/r/20240515013157.443672-2-ming.lei@redhat.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-cgroup.c | 58 |
1 files changed, 35 insertions, 23 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 4b1a35ab0ea4..8699f193cf31 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -618,12 +618,45 @@ restart: spin_unlock_irq(&q->queue_lock); } +static void blkg_iostat_set(struct blkg_iostat *dst, struct blkg_iostat *src) +{ + int i; + + for (i = 0; i < BLKG_IOSTAT_NR; i++) { + dst->bytes[i] = src->bytes[i]; + dst->ios[i] = src->ios[i]; + } +} + +static void __blkg_clear_stat(struct blkg_iostat_set *bis) +{ + struct blkg_iostat cur = {0}; + unsigned long flags; + + flags = u64_stats_update_begin_irqsave(&bis->sync); + blkg_iostat_set(&bis->cur, &cur); + blkg_iostat_set(&bis->last, &cur); + u64_stats_update_end_irqrestore(&bis->sync, flags); +} + +static void blkg_clear_stat(struct blkcg_gq *blkg) +{ + int cpu; + + for_each_possible_cpu(cpu) { + struct blkg_iostat_set *s = per_cpu_ptr(blkg->iostat_cpu, cpu); + + __blkg_clear_stat(s); + } + __blkg_clear_stat(&blkg->iostat); +} + static int blkcg_reset_stats(struct cgroup_subsys_state *css, struct cftype *cftype, u64 val) { struct blkcg *blkcg = css_to_blkcg(css); struct blkcg_gq *blkg; - int i, cpu; + int i; mutex_lock(&blkcg_pol_mutex); spin_lock_irq(&blkcg->lock); @@ -634,18 +667,7 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css, * anyway. If you get hit by a race, retry. */ hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { - for_each_possible_cpu(cpu) { - struct blkg_iostat_set *bis = - per_cpu_ptr(blkg->iostat_cpu, cpu); - memset(bis, 0, sizeof(*bis)); - - /* Re-initialize the cleared blkg_iostat_set */ - u64_stats_init(&bis->sync); - bis->blkg = blkg; - } - memset(&blkg->iostat, 0, sizeof(blkg->iostat)); - u64_stats_init(&blkg->iostat.sync); - + blkg_clear_stat(blkg); for (i = 0; i < BLKCG_MAX_POLS; i++) { struct blkcg_policy *pol = blkcg_policy[i]; @@ -948,16 +970,6 @@ void blkg_conf_exit(struct blkg_conf_ctx *ctx) } EXPORT_SYMBOL_GPL(blkg_conf_exit); -static void blkg_iostat_set(struct blkg_iostat *dst, struct blkg_iostat *src) -{ - int i; - - for (i = 0; i < BLKG_IOSTAT_NR; i++) { - dst->bytes[i] = src->bytes[i]; - dst->ios[i] = src->ios[i]; - } -} - static void blkg_iostat_add(struct blkg_iostat *dst, struct blkg_iostat *src) { int i; |