summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2012-03-19 12:46:38 +1100
committerNeilBrown <neilb@suse.de>2012-03-19 12:46:38 +1100
commitd6b42dcb995e6acd7cc276774e751ffc9f0ef4bf (patch)
treea9112351e8ddd2866afd8687b645a1c5bf574ee7 /drivers
parent4474ca42e2577563a919fd3ed782e2ec55bf11a2 (diff)
downloadlinux-d6b42dcb995e6acd7cc276774e751ffc9f0ef4bf.tar.gz
linux-d6b42dcb995e6acd7cc276774e751ffc9f0ef4bf.tar.bz2
linux-d6b42dcb995e6acd7cc276774e751ffc9f0ef4bf.zip
md/raid1,raid10: avoid deadlock during resync/recovery.
If RAID1 or RAID10 is used under LVM or some other stacking block device, it is possible to enter a deadlock during resync or recovery. This can happen if the upper level block device creates two requests to the RAID1 or RAID10. The first request gets processed, blocks recovery and queue requests for underlying requests in current->bio_list. A resync request then starts which will wait for those requests and block new IO. But then the second request to the RAID1/10 will be attempted and it cannot progress until the resync request completes, which cannot progress until the underlying device requests complete, which are on a queue behind that second request. So allow that second request to proceed even though there is a resync request about to start. This is suitable for any -stable kernel. Cc: stable@vger.kernel.org Reported-by: Ray Morris <support@bettercgi.com> Tested-by: Ray Morris <support@bettercgi.com> Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/md/raid1.c17
-rw-r--r--drivers/md/raid10.c17
2 files changed, 30 insertions, 4 deletions
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index a0b225eb4ac4..118e0f69f224 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -737,9 +737,22 @@ static void wait_barrier(struct r1conf *conf)
spin_lock_irq(&conf->resync_lock);
if (conf->barrier) {
conf->nr_waiting++;
- wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
+ /* Wait for the barrier to drop.
+ * However if there are already pending
+ * requests (preventing the barrier from
+ * rising completely), and the
+ * pre-process bio queue isn't empty,
+ * then don't wait, as we need to empty
+ * that queue to get the nr_pending
+ * count down.
+ */
+ wait_event_lock_irq(conf->wait_barrier,
+ !conf->barrier ||
+ (conf->nr_pending &&
+ current->bio_list &&
+ !bio_list_empty(current->bio_list)),
conf->resync_lock,
- );
+ );
conf->nr_waiting--;
}
conf->nr_pending++;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index f4f3edcdaf8d..2ae7021320e1 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -863,9 +863,22 @@ static void wait_barrier(struct r10conf *conf)
spin_lock_irq(&conf->resync_lock);
if (conf->barrier) {
conf->nr_waiting++;
- wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
+ /* Wait for the barrier to drop.
+ * However if there are already pending
+ * requests (preventing the barrier from
+ * rising completely), and the
+ * pre-process bio queue isn't empty,
+ * then don't wait, as we need to empty
+ * that queue to get the nr_pending
+ * count down.
+ */
+ wait_event_lock_irq(conf->wait_barrier,
+ !conf->barrier ||
+ (conf->nr_pending &&
+ current->bio_list &&
+ !bio_list_empty(current->bio_list)),
conf->resync_lock,
- );
+ );
conf->nr_waiting--;
}
conf->nr_pending++;