summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorOmar Sandoval <osandov@fb.com>2017-01-18 11:55:22 -0800
committerBen Hutchings <ben@decadent.org.uk>2017-06-05 21:16:55 +0100
commit85edb87c082d9e0f42d874610c68e7c3003c9870 (patch)
treee3da5f4ace23d836086e703c66bb33d8a0e50b00 /block
parenteb7175dd1c6f0713d4312e745103354bb0abaed5 (diff)
downloadlinux-stable-85edb87c082d9e0f42d874610c68e7c3003c9870.tar.gz
linux-stable-85edb87c082d9e0f42d874610c68e7c3003c9870.tar.bz2
linux-stable-85edb87c082d9e0f42d874610c68e7c3003c9870.zip
sbitmap: fix wakeup hang after sbq resize
commit 6c0ca7ae292adea09b8bdd33a524bb9326c3e989 upstream. When we resize a struct sbitmap_queue, we update the wakeup batch size, but we don't update the wait count in the struct sbq_wait_states. If we resized down from a size which could use a bigger batch size, these counts could be too large and cause us to miss necessary wakeups. To fix this, update the wait counts when we resize (ensuring some careful memory ordering so that it's safe w.r.t. concurrent clears). This also fixes a theoretical issue where two threads could end up bumping the wait count up by the batch size, which could also potentially lead to hangs. Reported-by: Martin Raiber <martin@urbackup.org> Fixes: e3a2b3f931f5 ("blk-mq: allow changing of queue depth through sysfs") Fixes: 2971c35f3588 ("blk-mq: bitmap tag: fix race on blk_mq_bitmap_tags::wake_cnt") Signed-off-by: Omar Sandoval <osandov@fb.com> Signed-off-by: Jens Axboe <axboe@fb.com> [bwh: Backported to 3.16: - Adjust filename - Rename almost everything - Use ACCESS_ONCE() instead of {READ,WRITE}_ONCE()] Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq-tag.c41
1 files changed, 32 insertions, 9 deletions
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 05d8bc76d025..b37125e2c7dc 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -340,6 +340,7 @@ static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag)
{
const int index = TAG_TO_INDEX(bt, tag);
struct bt_wait_state *bs;
+ unsigned int wake_batch;
int wait_cnt;
clear_bit(TAG_TO_BIT(bt, tag), &bt->map[index].word);
@@ -352,10 +353,22 @@ static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag)
return;
wait_cnt = atomic_dec_return(&bs->wait_cnt);
- if (unlikely(wait_cnt < 0))
- wait_cnt = atomic_inc_return(&bs->wait_cnt);
- if (wait_cnt == 0) {
- atomic_add(bt->wake_cnt, &bs->wait_cnt);
+ if (wait_cnt <= 0) {
+ wake_batch = ACCESS_ONCE(bt->wake_cnt);
+ /*
+ * Pairs with the memory barrier in bt_update_count() to
+ * ensure that we see the batch size update before the wait
+ * count is reset.
+ */
+ smp_mb__before_atomic();
+ /*
+ * If there are concurrent callers to bt_clear_tag(), the last
+ * one to decrement the wait count below zero will bump it back
+ * up. If there is a concurrent resize, the count reset will
+ * either cause the cmpxchg to fail or overwrite after the
+ * cmpxchg.
+ */
+ atomic_cmpxchg(&bs->wait_cnt, wait_cnt, wait_cnt + wake_batch);
bt_index_atomic_inc(&bt->wake_index);
wake_up(&bs->wait);
}
@@ -450,20 +463,30 @@ static void bt_update_count(struct blk_mq_bitmap_tags *bt,
{
unsigned int tags_per_word = 1U << bt->bits_per_word;
unsigned int map_depth = depth;
+ unsigned int wake_batch;
+ int i;
if (depth) {
- int i;
-
for (i = 0; i < bt->map_nr; i++) {
bt->map[i].depth = min(map_depth, tags_per_word);
map_depth -= bt->map[i].depth;
}
}
- bt->wake_cnt = BT_WAIT_BATCH;
- if (bt->wake_cnt > depth / BT_WAIT_QUEUES)
- bt->wake_cnt = max(1U, depth / BT_WAIT_QUEUES);
+ wake_batch = BT_WAIT_BATCH;
+ if (wake_batch > depth / BT_WAIT_QUEUES)
+ wake_batch = max(1U, depth / BT_WAIT_QUEUES);
+ if (bt->wake_cnt != wake_batch) {
+ ACCESS_ONCE(bt->wake_cnt) = wake_batch;
+ /*
+ * Pairs with the memory barrier in bt_clear_tag() to ensure
+ * that the batch size is updated before the wait counts.
+ */
+ smp_mb__before_atomic();
+ for (i = 0; i < BT_WAIT_QUEUES; i++)
+ atomic_set(&bt->bs[i].wait_cnt, 1);
+ }
bt->depth = depth;
}