diff options
author | Jens Axboe <axboe@fb.com> | 2017-01-26 12:40:07 -0700 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2017-01-27 08:20:35 -0700 |
commit | c13660a08c8b3bb49def4374bfd414aaaa564662 (patch) | |
tree | c7a61741aba75e320838865f176092c07af37f95 /block/blk-mq-sched.c | |
parent | 50e1dab86aa2c10cbca2f754aae9542169403141 (diff) | |
download | linux-c13660a08c8b3bb49def4374bfd414aaaa564662.tar.gz linux-c13660a08c8b3bb49def4374bfd414aaaa564662.tar.bz2 linux-c13660a08c8b3bb49def4374bfd414aaaa564662.zip |
blk-mq-sched: change ->dispatch_requests() to ->dispatch_request()
When we invoke dispatch_requests(), the scheduler empties everything
into the passed in list. This isn't always a good thing, since it
means that we remove items that we could have potentially merged
with.
Change the function to dispatch single requests at the time. If
we do that, we can backoff exactly at the point where the device
can't consume more IO, and leave the rest with the scheduler for
better merging and future dispatch decision making.
Signed-off-by: Jens Axboe <axboe@fb.com>
Reviewed-by: Omar Sandoval <osandov@fb.com>
Tested-by: Hannes Reinecke <hare@suse.com>
Diffstat (limited to 'block/blk-mq-sched.c')
-rw-r--r-- | block/blk-mq-sched.c | 23 |
1 files changed, 15 insertions, 8 deletions
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index fcc0e893d687..c27613de80c5 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -201,15 +201,22 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) * leave them there for as long as we can. Mark the hw queue as * needing a restart in that case. */ - if (list_empty(&rq_list)) { - if (e && e->type->ops.mq.dispatch_requests) - e->type->ops.mq.dispatch_requests(hctx, &rq_list); - else - blk_mq_flush_busy_ctxs(hctx, &rq_list); - } else + if (!list_empty(&rq_list)) { blk_mq_sched_mark_restart(hctx); - - blk_mq_dispatch_rq_list(hctx, &rq_list); + blk_mq_dispatch_rq_list(hctx, &rq_list); + } else if (!e || !e->type->ops.mq.dispatch_request) { + blk_mq_flush_busy_ctxs(hctx, &rq_list); + blk_mq_dispatch_rq_list(hctx, &rq_list); + } else { + do { + struct request *rq; + + rq = e->type->ops.mq.dispatch_request(hctx); + if (!rq) + break; + list_add(&rq->queuelist, &rq_list); + } while (blk_mq_dispatch_rq_list(hctx, &rq_list)); + } } void blk_mq_sched_move_to_dispatch(struct blk_mq_hw_ctx *hctx, |