summaryrefslogtreecommitdiffstats
path: root/block/blk-mq-sched.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2017-06-22 21:55:24 -0600
committerJens Axboe <axboe@kernel.dk>2017-06-22 21:55:24 -0600
commitf95a0d6a95b12a79b7492da7ab687ae4cd741124 (patch)
tree503ad30af40e94bbc090fc7de45a051030646141 /block/blk-mq-sched.c
parenta9590fe148c03cb4157b56255357419cb4e14124 (diff)
parent8e8320c9315c47a6a090188720ccff32a6a6ba18 (diff)
downloadlinux-f95a0d6a95b12a79b7492da7ab687ae4cd741124.tar.gz
linux-f95a0d6a95b12a79b7492da7ab687ae4cd741124.tar.bz2
linux-f95a0d6a95b12a79b7492da7ab687ae4cd741124.zip
Merge commit '8e8320c9315c' into for-4.13/block
Pull in the fix for shared tags, as it conflicts with the pending changes in for-4.13/block. We already pulled in v4.12-rc5 to solve other conflicts or get fixes that went into 4.12, so not a lot of changes in this merge. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-mq-sched.c')
-rw-r--r--block/blk-mq-sched.c58
1 files changed, 46 insertions, 12 deletions
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 191bf82d185e..7f0dc48ffb40 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -50,6 +50,45 @@ void blk_mq_sched_assign_ioc(struct request *rq, struct bio *bio)
rq->elv.icq = icq;
}
+/*
+ * Mark a hardware queue as needing a restart. For shared queues, maintain
+ * a count of how many hardware queues are marked for restart.
+ */
+static void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
+{
+ if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
+ return;
+
+ if (hctx->flags & BLK_MQ_F_TAG_SHARED) {
+ struct request_queue *q = hctx->queue;
+
+ if (!test_and_set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
+ atomic_inc(&q->shared_hctx_restart);
+ } else
+ set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
+}
+
+static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
+{
+ if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
+ return false;
+
+ if (hctx->flags & BLK_MQ_F_TAG_SHARED) {
+ struct request_queue *q = hctx->queue;
+
+ if (test_and_clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
+ atomic_dec(&q->shared_hctx_restart);
+ } else
+ clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
+
+ if (blk_mq_hctx_has_pending(hctx)) {
+ blk_mq_run_hw_queue(hctx, true);
+ return true;
+ }
+
+ return false;
+}
+
void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
{
struct request_queue *q = hctx->queue;
@@ -238,18 +277,6 @@ static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
return true;
}
-static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
-{
- if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) {
- clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
- if (blk_mq_hctx_has_pending(hctx)) {
- blk_mq_run_hw_queue(hctx, true);
- return true;
- }
- }
- return false;
-}
-
/**
* list_for_each_entry_rcu_rr - iterate in a round-robin fashion over rcu list
* @pos: loop cursor.
@@ -281,6 +308,13 @@ void blk_mq_sched_restart(struct blk_mq_hw_ctx *const hctx)
unsigned int i, j;
if (set->flags & BLK_MQ_F_TAG_SHARED) {
+ /*
+ * If this is 0, then we know that no hardware queues
+ * have RESTART marked. We're done.
+ */
+ if (!atomic_read(&queue->shared_hctx_restart))
+ return;
+
rcu_read_lock();
list_for_each_entry_rcu_rr(q, queue, &set->tag_list,
tag_set_list) {