summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2020-05-29 15:53:13 +0200
committerJens Axboe <axboe@kernel.dk>2020-05-29 10:23:25 -0600
commit600c3b0cea784aaba77df3ed4a6b4f2ebfa935ce (patch)
treec0596a8c7bccf342e1df3dc020fdaf46f5d8d102 /block
parent766473681c131f2da81d62472864c8c97e021373 (diff)
downloadlinux-600c3b0cea784aaba77df3ed4a6b4f2ebfa935ce.tar.gz
linux-600c3b0cea784aaba77df3ed4a6b4f2ebfa935ce.tar.bz2
linux-600c3b0cea784aaba77df3ed4a6b4f2ebfa935ce.zip
blk-mq: open code __blk_mq_alloc_request in blk_mq_alloc_request_hctx
blk_mq_alloc_request_hctx is only used for NVMeoF connect commands, so tailor it to the specific requirements, and don't bother the general fast path code with its special twinkles. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: Hannes Reinecke <hare@suse.de Reviewed-by: Daniel Wagner <dwagner@suse.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq.c44
1 files changed, 23 insertions, 21 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index dcc52859a92c..560ef5df8993 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -351,21 +351,13 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data)
{
struct request_queue *q = data->q;
struct elevator_queue *e = q->elevator;
- unsigned int tag;
- bool clear_ctx_on_error = false;
u64 alloc_time_ns = 0;
+ unsigned int tag;
/* alloc_time includes depth and tag waits */
if (blk_queue_rq_alloc_time(q))
alloc_time_ns = ktime_get_ns();
- if (likely(!data->ctx)) {
- data->ctx = blk_mq_get_ctx(q);
- clear_ctx_on_error = true;
- }
- if (likely(!data->hctx))
- data->hctx = blk_mq_map_queue(q, data->cmd_flags,
- data->ctx);
if (data->cmd_flags & REQ_NOWAIT)
data->flags |= BLK_MQ_REQ_NOWAIT;
@@ -381,17 +373,16 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data)
e->type->ops.limit_depth &&
!(data->flags & BLK_MQ_REQ_RESERVED))
e->type->ops.limit_depth(data->cmd_flags, data);
- } else {
- blk_mq_tag_busy(data->hctx);
}
+ data->ctx = blk_mq_get_ctx(q);
+ data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx);
+ if (!(data->flags & BLK_MQ_REQ_INTERNAL))
+ blk_mq_tag_busy(data->hctx);
+
tag = blk_mq_get_tag(data);
- if (tag == BLK_MQ_NO_TAG) {
- if (clear_ctx_on_error)
- data->ctx = NULL;
+ if (tag == BLK_MQ_NO_TAG)
return NULL;
- }
-
return blk_mq_rq_ctx_init(data, tag, alloc_time_ns);
}
@@ -431,17 +422,22 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
.flags = flags,
.cmd_flags = op,
};
- struct request *rq;
+ u64 alloc_time_ns = 0;
unsigned int cpu;
+ unsigned int tag;
int ret;
+ /* alloc_time includes depth and tag waits */
+ if (blk_queue_rq_alloc_time(q))
+ alloc_time_ns = ktime_get_ns();
+
/*
* If the tag allocator sleeps we could get an allocation for a
* different hardware context. No need to complicate the low level
* allocator for this for the rare use case of a command tied to
* a specific queue.
*/
- if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)))
+ if (WARN_ON_ONCE(!(flags & (BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED))))
return ERR_PTR(-EINVAL);
if (hctx_idx >= q->nr_hw_queues)
@@ -462,11 +458,17 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask);
data.ctx = __blk_mq_get_ctx(q, cpu);
+ if (q->elevator)
+ data.flags |= BLK_MQ_REQ_INTERNAL;
+ else
+ blk_mq_tag_busy(data.hctx);
+
ret = -EWOULDBLOCK;
- rq = __blk_mq_alloc_request(&data);
- if (!rq)
+ tag = blk_mq_get_tag(&data);
+ if (tag == BLK_MQ_NO_TAG)
goto out_queue_exit;
- return rq;
+ return blk_mq_rq_ctx_init(&data, tag, alloc_time_ns);
+
out_queue_exit:
blk_queue_exit(q);
return ERR_PTR(ret);