diff options
author | Christoph Hellwig <hch@infradead.org> | 2014-02-11 08:27:14 -0800 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2014-02-11 09:34:08 -0700 |
commit | 49f5baa5109897b8cee491e8a7c4d74052b6bc1e (patch) | |
tree | 9e0cf5285be207bde015af948e8fd7b89760ffcd /block | |
parent | 1e93b8c274268038c93763dca65a73b42a081e10 (diff) | |
download | linux-49f5baa5109897b8cee491e8a7c4d74052b6bc1e.tar.gz linux-49f5baa5109897b8cee491e8a7c4d74052b6bc1e.tar.bz2 linux-49f5baa5109897b8cee491e8a7c4d74052b6bc1e.zip |
blk-mq: pair blk_mq_start_request / blk_mq_requeue_request
Make sure we have a proper pairing between starting and requeueing
requests. Move the dma drain and REQ_END setup into blk_mq_start_request,
and make sure blk_mq_requeue_request properly undoes them, giving us
a pair of function to prepare and unprepare a request without leaving
side effects.
Together this ensures we always clean up properly after
BLK_MQ_RQ_QUEUE_BUSY returns from ->queue_rq.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-mq.c | 49 |
1 files changed, 26 insertions, 23 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index 0480710a8b45..1fa9dd153fde 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -377,7 +377,7 @@ void blk_mq_complete_request(struct request *rq) } EXPORT_SYMBOL(blk_mq_complete_request); -static void blk_mq_start_request(struct request *rq) +static void blk_mq_start_request(struct request *rq, bool last) { struct request_queue *q = rq->q; @@ -390,6 +390,25 @@ static void blk_mq_start_request(struct request *rq) */ rq->deadline = jiffies + q->rq_timeout; set_bit(REQ_ATOM_STARTED, &rq->atomic_flags); + + if (q->dma_drain_size && blk_rq_bytes(rq)) { + /* + * Make sure space for the drain appears. We know we can do + * this because max_hw_segments has been adjusted to be one + * fewer than the device can handle. + */ + rq->nr_phys_segments++; + } + + /* + * Flag the last request in the series so that drivers know when IO + * should be kicked off, if they don't do it on a per-request basis. + * + * Note: the flag isn't the only condition drivers should do kick off. + * If drive is busy, the last request might not have the bit set. + */ + if (last) + rq->cmd_flags |= REQ_END; } static void blk_mq_requeue_request(struct request *rq) @@ -398,6 +417,11 @@ static void blk_mq_requeue_request(struct request *rq) trace_block_rq_requeue(q, rq); clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags); + + rq->cmd_flags &= ~REQ_END; + + if (q->dma_drain_size && blk_rq_bytes(rq)) + rq->nr_phys_segments--; } struct blk_mq_timeout_data { @@ -565,29 +589,8 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) rq = list_first_entry(&rq_list, struct request, queuelist); list_del_init(&rq->queuelist); - blk_mq_start_request(rq); - if (q->dma_drain_size && blk_rq_bytes(rq)) { - /* - * make sure space for the drain appears we - * know we can do this because max_hw_segments - * has been adjusted to be one fewer than the - * device can handle - */ - rq->nr_phys_segments++; - } - - /* - * Last request in the series. Flag it as such, this - * enables drivers to know when IO should be kicked off, - * if they don't do it on a per-request basis. - * - * Note: the flag isn't the only condition drivers - * should do kick off. If drive is busy, the last - * request might not have the bit set. - */ - if (list_empty(&rq_list)) - rq->cmd_flags |= REQ_END; + blk_mq_start_request(rq, list_empty(&rq_list)); ret = q->mq_ops->queue_rq(hctx, rq); switch (ret) { |