diff options
author | Ming Lei <ming.lei@canonical.com> | 2015-10-20 23:13:57 +0800 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2015-10-21 15:00:58 -0600 |
commit | cfd0c552a8272d691691f40073654d775836e23a (patch) | |
tree | 8c5ee7e38512bde8df889e70b17e855b5d96faf9 /block/blk-mq.c | |
parent | 676d06077f964f06af52c19e59f0409a8880612f (diff) | |
download | linux-cfd0c552a8272d691691f40073654d775836e23a.tar.gz linux-cfd0c552a8272d691691f40073654d775836e23a.tar.bz2 linux-cfd0c552a8272d691691f40073654d775836e23a.zip |
blk-mq: mark ctx as pending at batch in flush plug path
Most of times, flush plug should be the hottest I/O path,
so mark ctx as pending after all requests in the list are
inserted.
Reviewed-by: Jeff Moyer <jmoyer@redhat.com>
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r-- | block/blk-mq.c | 18 |
1 files changed, 13 insertions, 5 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index 24c528f182ea..159e69bd2c3c 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -990,18 +990,25 @@ void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) } EXPORT_SYMBOL(blk_mq_delay_queue); -static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, - struct request *rq, bool at_head) +static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx, + struct blk_mq_ctx *ctx, + struct request *rq, + bool at_head) { - struct blk_mq_ctx *ctx = rq->mq_ctx; - trace_block_rq_insert(hctx->queue, rq); if (at_head) list_add(&rq->queuelist, &ctx->rq_list); else list_add_tail(&rq->queuelist, &ctx->rq_list); +} + +static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, + struct request *rq, bool at_head) +{ + struct blk_mq_ctx *ctx = rq->mq_ctx; + __blk_mq_insert_req_list(hctx, ctx, rq, at_head); blk_mq_hctx_mark_pending(hctx, ctx); } @@ -1057,8 +1064,9 @@ static void blk_mq_insert_requests(struct request_queue *q, rq = list_first_entry(list, struct request, queuelist); list_del_init(&rq->queuelist); rq->mq_ctx = ctx; - __blk_mq_insert_request(hctx, rq, false); + __blk_mq_insert_req_list(hctx, ctx, rq, false); } + blk_mq_hctx_mark_pending(hctx, ctx); spin_unlock(&ctx->lock); blk_mq_run_hw_queue(hctx, from_schedule); |