summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2018-01-09 08:29:51 -0800
committerJens Axboe <axboe@kernel.dk>2018-01-09 09:31:15 -0700
commit634f9e4631a88025d3b90c1884e9a1b6a13d01d2 (patch)
tree181882ff0dc0d5b0844ede83e2cfc954e28df69f /block
parent358f70da49d77c43f2ca11b5da584213b2add29c (diff)
downloadlinux-stable-634f9e4631a88025d3b90c1884e9a1b6a13d01d2.tar.gz
linux-stable-634f9e4631a88025d3b90c1884e9a1b6a13d01d2.tar.bz2
linux-stable-634f9e4631a88025d3b90c1884e9a1b6a13d01d2.zip
blk-mq: remove REQ_ATOM_COMPLETE usages from blk-mq
After the recent updates to use generation number and state based synchronization, blk-mq no longer depends on REQ_ATOM_COMPLETE except to avoid firing the same timeout multiple times. Remove all REQ_ATOM_COMPLETE usages and use a new rq_flags flag RQF_MQ_TIMEOUT_EXPIRED to avoid firing the same timeout multiple times. This removes atomic bitops from hot paths too. v2: Removed blk_clear_rq_complete() from blk_mq_rq_timed_out(). v3: Added RQF_MQ_TIMEOUT_EXPIRED flag. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: "jianchao.wang" <jianchao.w.wang@oracle.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq.c15
-rw-r--r--block/blk-timeout.c1
2 files changed, 8 insertions, 8 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 90f6910a83f6..d1000c6cbec6 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -634,8 +634,7 @@ void blk_mq_complete_request(struct request *rq)
* hctx_lock() covers both issue and completion paths.
*/
hctx_lock(hctx, &srcu_idx);
- if (blk_mq_rq_aborted_gstate(rq) != rq->gstate &&
- !blk_mark_rq_complete(rq))
+ if (blk_mq_rq_aborted_gstate(rq) != rq->gstate)
__blk_mq_complete_request(rq);
hctx_unlock(hctx, srcu_idx);
}
@@ -685,8 +684,6 @@ void blk_mq_start_request(struct request *rq)
preempt_enable();
set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
- if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
- clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
if (q->dma_drain_size && blk_rq_bytes(rq)) {
/*
@@ -837,6 +834,8 @@ static void blk_mq_rq_timed_out(struct request *req, bool reserved)
if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags))
return;
+ req->rq_flags |= RQF_MQ_TIMEOUT_EXPIRED;
+
if (ops->timeout)
ret = ops->timeout(req, reserved);
@@ -852,7 +851,6 @@ static void blk_mq_rq_timed_out(struct request *req, bool reserved)
*/
blk_mq_rq_update_aborted_gstate(req, 0);
blk_add_timer(req);
- blk_clear_rq_complete(req);
break;
case BLK_EH_NOT_HANDLED:
break;
@@ -871,7 +869,8 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
might_sleep();
- if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
+ if ((rq->rq_flags & RQF_MQ_TIMEOUT_EXPIRED) ||
+ !test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
return;
/* read coherent snapshots of @rq->state_gen and @rq->deadline */
@@ -906,8 +905,8 @@ static void blk_mq_terminate_expired(struct blk_mq_hw_ctx *hctx,
* now guaranteed to see @rq->aborted_gstate and yield. If
* @rq->aborted_gstate still matches @rq->gstate, @rq is ours.
*/
- if (READ_ONCE(rq->gstate) == rq->aborted_gstate &&
- !blk_mark_rq_complete(rq))
+ if (!(rq->rq_flags & RQF_MQ_TIMEOUT_EXPIRED) &&
+ READ_ONCE(rq->gstate) == rq->aborted_gstate)
blk_mq_rq_timed_out(rq, reserved);
}
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index 4f04cd1e0b74..ebe99963386c 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -214,6 +214,7 @@ void blk_add_timer(struct request *req)
req->timeout = q->rq_timeout;
req->deadline = jiffies + req->timeout;
+ req->rq_flags &= ~RQF_MQ_TIMEOUT_EXPIRED;
/*
* Only the non-mq case needs to add the request to a protected list.