summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2017-02-17 11:38:36 -0700
committerJens Axboe <axboe@fb.com>2017-02-17 12:35:47 -0700
commit0c2a6fe4dc3e8c24bc67cd5d0a36092834027cf0 (patch)
tree098511f98d5c70f2bae4e10cac0391d3e5ee56f1 /block
parentc7a571b45055dba740156013fef4a7fdbe3262d9 (diff)
downloadlinux-stable-0c2a6fe4dc3e8c24bc67cd5d0a36092834027cf0.tar.gz
linux-stable-0c2a6fe4dc3e8c24bc67cd5d0a36092834027cf0.tar.bz2
linux-stable-0c2a6fe4dc3e8c24bc67cd5d0a36092834027cf0.zip
blk-mq: don't special case flush inserts for blk-mq-sched
The current request insertion machinery works just fine for directly inserting flushes, so no need to special case this anymore. Signed-off-by: Jens Axboe <axboe@fb.com> Reviewed-by: Omar Sandoval <osandov@fb.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq.c18
1 files changed, 10 insertions, 8 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 6baa0c9fc06d..ee8c6f9f1d4d 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1434,12 +1434,11 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
cookie = request_to_qc_t(data.hctx, rq);
if (unlikely(is_flush_fua)) {
- blk_mq_put_ctx(data.ctx);
+ if (q->elevator)
+ goto elv_insert;
blk_mq_bio_to_request(rq, bio);
- blk_mq_get_driver_tag(rq, NULL, true);
blk_insert_flush(rq);
- blk_mq_run_hw_queue(data.hctx, true);
- goto done;
+ goto run_queue;
}
plug = current->plug;
@@ -1489,6 +1488,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
}
if (q->elevator) {
+elv_insert:
blk_mq_put_ctx(data.ctx);
blk_mq_bio_to_request(rq, bio);
blk_mq_sched_insert_request(rq, false, true,
@@ -1502,6 +1502,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
* latter allows for merging opportunities and more efficient
* dispatching.
*/
+run_queue:
blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
}
blk_mq_put_ctx(data.ctx);
@@ -1557,12 +1558,11 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
cookie = request_to_qc_t(data.hctx, rq);
if (unlikely(is_flush_fua)) {
- blk_mq_put_ctx(data.ctx);
+ if (q->elevator)
+ goto elv_insert;
blk_mq_bio_to_request(rq, bio);
- blk_mq_get_driver_tag(rq, NULL, true);
blk_insert_flush(rq);
- blk_mq_run_hw_queue(data.hctx, true);
- goto done;
+ goto run_queue;
}
/*
@@ -1600,6 +1600,7 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
}
if (q->elevator) {
+elv_insert:
blk_mq_put_ctx(data.ctx);
blk_mq_bio_to_request(rq, bio);
blk_mq_sched_insert_request(rq, false, true,
@@ -1613,6 +1614,7 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
* latter allows for merging opportunities and more efficient
* dispatching.
*/
+run_queue:
blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
}