summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2021-12-03 06:48:53 -0700
committerJens Axboe <axboe@kernel.dk>2021-12-16 08:49:17 -0700
commit3c67d44de787dff288d7f2a51c372b22f7356db6 (patch)
tree98a6604327a08c080282c6ca40d5e12783a8eb7d /block
parentfcade2ce06ffebee5c2f6629ddbf2086c0f5ba5a (diff)
downloadlinux-stable-3c67d44de787dff288d7f2a51c372b22f7356db6.tar.gz
linux-stable-3c67d44de787dff288d7f2a51c372b22f7356db6.tar.bz2
linux-stable-3c67d44de787dff288d7f2a51c372b22f7356db6.zip
block: add mq_ops->queue_rqs hook
If we have a list of requests in our plug list, send it to the driver in one go, if possible. The driver must set mq_ops->queue_rqs() to support this, if not the usual one-by-one path is used. Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq.c26
1 files changed, 23 insertions, 3 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 75154cc788db..51991232824a 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2553,6 +2553,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
{
struct blk_mq_hw_ctx *this_hctx;
struct blk_mq_ctx *this_ctx;
+ struct request *rq;
unsigned int depth;
LIST_HEAD(list);
@@ -2561,7 +2562,28 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
plug->rq_count = 0;
if (!plug->multiple_queues && !plug->has_elevator && !from_schedule) {
- struct request_queue *q = rq_list_peek(&plug->mq_list)->q;
+ struct request_queue *q;
+
+ rq = rq_list_peek(&plug->mq_list);
+ q = rq->q;
+
+ /*
+ * Peek first request and see if we have a ->queue_rqs() hook.
+ * If we do, we can dispatch the whole plug list in one go. We
+ * already know at this point that all requests belong to the
+ * same queue, caller must ensure that's the case.
+ *
+ * Since we pass off the full list to the driver at this point,
+ * we do not increment the active request count for the queue.
+ * Bypass shared tags for now because of that.
+ */
+ if (q->mq_ops->queue_rqs &&
+ !(rq->mq_hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
+ blk_mq_run_dispatch_ops(q,
+ q->mq_ops->queue_rqs(&plug->mq_list));
+ if (rq_list_empty(plug->mq_list))
+ return;
+ }
blk_mq_run_dispatch_ops(q,
blk_mq_plug_issue_direct(plug, false));
@@ -2573,8 +2595,6 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
this_ctx = NULL;
depth = 0;
do {
- struct request *rq;
-
rq = rq_list_pop(&plug->mq_list);
if (!this_hctx) {