diff options
author | Christoph Hellwig <hch@lst.de> | 2017-05-01 10:19:08 -0600 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2017-05-02 07:52:08 -0600 |
commit | d6296d39e90c9075bc2fc15f1e86dac44930d4b5 (patch) | |
tree | 46c56a31e8b72182c4f9f5d7937f790e9a111f68 /block | |
parent | a800ce8ba53da88571872cbccb0e2fff8e374752 (diff) | |
download | linux-d6296d39e90c9075bc2fc15f1e86dac44930d4b5.tar.gz linux-d6296d39e90c9075bc2fc15f1e86dac44930d4b5.tar.bz2 linux-d6296d39e90c9075bc2fc15f1e86dac44930d4b5.zip |
blk-mq: update ->init_request and ->exit_request prototypes
Remove the request_idx parameter, which can't be used safely now that we
support I/O schedulers with blk-mq. Except for a superflous check in
mtip32xx it was unused anyway.
Also pass the tag_set instead of just the driver data - this allows drivers
to avoid some code duplication in a follow on cleanup.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-mq.c | 18 |
1 files changed, 5 insertions, 13 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index bf90684a007a..b81e4a7cd7f2 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1655,8 +1655,7 @@ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, if (!rq) continue; - set->ops->exit_request(set->driver_data, rq, - hctx_idx, i); + set->ops->exit_request(set, rq, hctx_idx); tags->static_rqs[i] = NULL; } } @@ -1787,8 +1786,7 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, tags->static_rqs[i] = rq; if (set->ops->init_request) { - if (set->ops->init_request(set->driver_data, - rq, hctx_idx, i, + if (set->ops->init_request(set, rq, hctx_idx, node)) { tags->static_rqs[i] = NULL; goto fail; @@ -1849,14 +1847,10 @@ static void blk_mq_exit_hctx(struct request_queue *q, struct blk_mq_tag_set *set, struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) { - unsigned flush_start_tag = set->queue_depth; - blk_mq_tag_idle(hctx); if (set->ops->exit_request) - set->ops->exit_request(set->driver_data, - hctx->fq->flush_rq, hctx_idx, - flush_start_tag + hctx_idx); + set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx); blk_mq_sched_exit_hctx(q, hctx, hctx_idx); @@ -1889,7 +1883,6 @@ static int blk_mq_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx, unsigned hctx_idx) { int node; - unsigned flush_start_tag = set->queue_depth; node = hctx->numa_node; if (node == NUMA_NO_NODE) @@ -1933,9 +1926,8 @@ static int blk_mq_init_hctx(struct request_queue *q, goto sched_exit_hctx; if (set->ops->init_request && - set->ops->init_request(set->driver_data, - hctx->fq->flush_rq, hctx_idx, - flush_start_tag + hctx_idx, node)) + set->ops->init_request(set, hctx->fq->flush_rq, hctx_idx, + node)) goto free_fq; if (hctx->flags & BLK_MQ_F_BLOCKING) |