diff options
author | Jens Axboe <axboe@kernel.dk> | 2018-06-02 14:04:07 -0600 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2018-06-02 20:35:00 -0600 |
commit | cd4a4ae4683dc2e09380118e205e057896dcda2b (patch) | |
tree | 7991620de08c70556b880e5fc4917382d48e477d /block | |
parent | d00a11df691466772435ec02471292eae07885e5 (diff) | |
download | linux-cd4a4ae4683dc2e09380118e205e057896dcda2b.tar.gz linux-cd4a4ae4683dc2e09380118e205e057896dcda2b.tar.bz2 linux-cd4a4ae4683dc2e09380118e205e057896dcda2b.zip |
block: don't use blocking queue entered for recursive bio submits
If we end up splitting a bio and the queue goes away between
the initial submission and the later split submission, then we
can block forever in blk_queue_enter() waiting for the reference
to drop to zero. This will never happen, since we already hold
a reference.
Mark a split bio as already having entered the queue, so we can
just use the live non-blocking queue enter variant.
Thanks to Tetsuo Handa for the analysis.
Reported-by: syzbot+c4f9cebf9d651f6e54de@syzkaller.appspotmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 4 | ||||
-rw-r--r-- | block/blk-merge.c | 10 |
2 files changed, 13 insertions, 1 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index cd573a33a6f3..3f56be15f17e 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -2377,7 +2377,9 @@ blk_qc_t generic_make_request(struct bio *bio) if (bio->bi_opf & REQ_NOWAIT) flags = BLK_MQ_REQ_NOWAIT; - if (blk_queue_enter(q, flags) < 0) { + if (bio_flagged(bio, BIO_QUEUE_ENTERED)) + blk_queue_enter_live(q); + else if (blk_queue_enter(q, flags) < 0) { if (!blk_queue_dying(q) && (bio->bi_opf & REQ_NOWAIT)) bio_wouldblock_error(bio); else diff --git a/block/blk-merge.c b/block/blk-merge.c index d70ab08820e5..aaec38cc37b8 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -210,6 +210,16 @@ void blk_queue_split(struct request_queue *q, struct bio **bio) /* there isn't chance to merge the splitted bio */ split->bi_opf |= REQ_NOMERGE; + /* + * Since we're recursing into make_request here, ensure + * that we mark this bio as already having entered the queue. + * If not, and the queue is going away, we can get stuck + * forever on waiting for the queue reference to drop. But + * that will never happen, as we're already holding a + * reference to it. + */ + bio_set_flag(*bio, BIO_QUEUE_ENTERED); + bio_chain(split, *bio); trace_block_split(q, split, (*bio)->bi_iter.bi_sector); generic_make_request(*bio); |