summaryrefslogtreecommitdiffstats
path: root/block/blk-mq-sched.c
diff options
context:
space:
mode:
authorMing Lei <ming.lei@redhat.com>2017-10-14 17:22:25 +0800
committerJens Axboe <axboe@kernel.dk>2017-11-01 08:20:02 -0600
commit5e3d02bbafad38975099b5848f5ebadedcf7bb7e (patch)
tree165fcba950a2f17a88639cf3251e660c3844f889 /block/blk-mq-sched.c
parent0e7d3a8d4ec3f662faa7ef7f2957459b33f2481d (diff)
downloadlinux-stable-5e3d02bbafad38975099b5848f5ebadedcf7bb7e.tar.gz
linux-stable-5e3d02bbafad38975099b5848f5ebadedcf7bb7e.tar.bz2
linux-stable-5e3d02bbafad38975099b5848f5ebadedcf7bb7e.zip
blk-mq-sched: dispatch from scheduler IFF progress is made in ->dispatch
When the hw queue is busy, we shouldn't take requests from the scheduler queue any more, otherwise it is difficult to do IO merge. This patch fixes the awful IO performance on some SCSI devices(lpfc, qla2xxx, ...) when mq-deadline/kyber is used by not taking requests if hw queue is busy. Reviewed-by: Omar Sandoval <osandov@fb.com> Reviewed-by: Bart Van Assche <bart.vanassche@wdc.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-mq-sched.c')
-rw-r--r--block/blk-mq-sched.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 4ab69435708c..eca011fdfa0e 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -94,7 +94,7 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
struct request_queue *q = hctx->queue;
struct elevator_queue *e = q->elevator;
const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request;
- bool did_work = false;
+ bool do_sched_dispatch = true;
LIST_HEAD(rq_list);
/* RCU or SRCU read lock is needed before checking quiesced flag */
@@ -125,18 +125,18 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
*/
if (!list_empty(&rq_list)) {
blk_mq_sched_mark_restart_hctx(hctx);
- did_work = blk_mq_dispatch_rq_list(q, &rq_list);
+ do_sched_dispatch = blk_mq_dispatch_rq_list(q, &rq_list);
} else if (!has_sched_dispatch) {
blk_mq_flush_busy_ctxs(hctx, &rq_list);
blk_mq_dispatch_rq_list(q, &rq_list);
}
/*
- * We want to dispatch from the scheduler if we had no work left
- * on the dispatch list, OR if we did have work but weren't able
- * to make progress.
+ * We want to dispatch from the scheduler if there was nothing
+ * on the dispatch list or we were able to dispatch from the
+ * dispatch list.
*/
- if (!did_work && has_sched_dispatch) {
+ if (do_sched_dispatch && has_sched_dispatch) {
do {
struct request *rq;