summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Jeffery <djeffery@redhat.com>2023-10-02 14:32:29 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2023-10-10 22:02:56 +0200
commitbb39b7c7869d17cd19d7c62e778613f92282561b (patch)
treed180243fce4104188af7960bf77d472ab2cbba7e
parent3f3164ce6396138747984ee9e61158e248246300 (diff)
downloadlinux-stable-bb39b7c7869d17cd19d7c62e778613f92282561b.tar.gz
linux-stable-bb39b7c7869d17cd19d7c62e778613f92282561b.tar.bz2
linux-stable-bb39b7c7869d17cd19d7c62e778613f92282561b.zip
md/raid5: release batch_last before waiting for another stripe_head
commit 2fd7b0f6d5ad655b1d947d3acdd82f687c31465e upstream. When raid5_get_active_stripe is called with a ctx containing a stripe_head in its batch_last pointer, it can cause a deadlock if the task sleeps waiting on another stripe_head to become available. The stripe_head held by batch_last can be blocking the advancement of other stripe_heads, leading to no stripe_heads being released so raid5_get_active_stripe waits forever. Like with the quiesce state handling earlier in the function, batch_last needs to be released by raid5_get_active_stripe before it waits for another stripe_head. Fixes: 3312e6c887fe ("md/raid5: Keep a reference to last stripe_head for batch") Cc: stable@vger.kernel.org # v6.0+ Signed-off-by: David Jeffery <djeffery@redhat.com> Reviewed-by: Logan Gunthorpe <logang@deltatee.com> Signed-off-by: Song Liu <song@kernel.org> Link: https://lore.kernel.org/r/20231002183422.13047-1-djeffery@redhat.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--drivers/md/raid5.c7
1 files changed, 7 insertions, 0 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 85b3004594e0..e6ac4d0d94d8 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -854,6 +854,13 @@ struct stripe_head *raid5_get_active_stripe(struct r5conf *conf,
set_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
r5l_wake_reclaim(conf->log, 0);
+
+ /* release batch_last before wait to avoid risk of deadlock */
+ if (ctx && ctx->batch_last) {
+ raid5_release_stripe(ctx->batch_last);
+ ctx->batch_last = NULL;
+ }
+
wait_event_lock_irq(conf->wait_for_stripe,
is_inactive_blocked(conf, hash),
*(conf->hash_locks + hash));