diff options
author | Ming Lin <ming.l@ssi.samsung.com> | 2016-06-13 16:45:21 +0200 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2016-07-05 11:28:07 -0600 |
commit | 1f5bd336b9150560458b03460cbcfcfbcf8995b1 (patch) | |
tree | 92fd38e4125cb0d032acce9f7abfb2d6b0538ab7 /block/blk-mq.c | |
parent | 9e2d23f19ef6e8a08348c8f59f79805c6cb25fe6 (diff) | |
download | linux-stable-1f5bd336b9150560458b03460cbcfcfbcf8995b1.tar.gz linux-stable-1f5bd336b9150560458b03460cbcfcfbcf8995b1.tar.bz2 linux-stable-1f5bd336b9150560458b03460cbcfcfbcf8995b1.zip |
blk-mq: add blk_mq_alloc_request_hctx
For some protocols like NVMe over Fabrics we need to be able to send
initialization commands to a specific queue.
Based on an earlier patch from Christoph Hellwig <hch@lst.de>.
Signed-off-by: Ming Lin <ming.l@ssi.samsung.com>
[hch: disallow sleeping allocation, req_op fixes]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r-- | block/blk-mq.c | 39 |
1 files changed, 39 insertions, 0 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index 13f460368759..7aa60c4f56fd 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -267,6 +267,45 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, } EXPORT_SYMBOL(blk_mq_alloc_request); +struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw, + unsigned int flags, unsigned int hctx_idx) +{ + struct blk_mq_hw_ctx *hctx; + struct blk_mq_ctx *ctx; + struct request *rq; + struct blk_mq_alloc_data alloc_data; + int ret; + + /* + * If the tag allocator sleeps we could get an allocation for a + * different hardware context. No need to complicate the low level + * allocator for this for the rare use case of a command tied to + * a specific queue. + */ + if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT))) + return ERR_PTR(-EINVAL); + + if (hctx_idx >= q->nr_hw_queues) + return ERR_PTR(-EIO); + + ret = blk_queue_enter(q, true); + if (ret) + return ERR_PTR(ret); + + hctx = q->queue_hw_ctx[hctx_idx]; + ctx = __blk_mq_get_ctx(q, cpumask_first(hctx->cpumask)); + + blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx); + rq = __blk_mq_alloc_request(&alloc_data, rw, 0); + if (!rq) { + blk_queue_exit(q); + return ERR_PTR(-EWOULDBLOCK); + } + + return rq; +} +EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx); + static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, struct request *rq) { |