summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2017-01-25 08:11:38 -0700
committerJens Axboe <axboe@fb.com>2017-01-25 08:11:38 -0700
commit200e86b3372b51e136a382e007b6b904b1dac7e4 (patch)
treef4d92877e39e0ea679fae27568724c898d333dd7
parent1cf417530375b475d4a8a9f18dc0867f91e52d78 (diff)
downloadlinux-stable-200e86b3372b51e136a382e007b6b904b1dac7e4.tar.gz
linux-stable-200e86b3372b51e136a382e007b6b904b1dac7e4.tar.bz2
linux-stable-200e86b3372b51e136a382e007b6b904b1dac7e4.zip
blk-mq: only apply active queue tag throttling for driver tags
If we have a scheduler attached, we have two sets of tags. We don't want to apply our active queue throttling for the scheduler side of tags, that only applies to driver tags since that's the resource we need to dispatch an IO. Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--block/blk-mq-tag.c12
-rw-r--r--block/blk-mq.c13
2 files changed, 15 insertions, 10 deletions
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index a49ec77c415a..1b156ca79af6 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -90,9 +90,11 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
return atomic_read(&hctx->nr_active) < depth;
}
-static int __blk_mq_get_tag(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt)
+static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
+ struct sbitmap_queue *bt)
{
- if (!hctx_may_queue(hctx, bt))
+ if (!(data->flags & BLK_MQ_REQ_INTERNAL) &&
+ !hctx_may_queue(data->hctx, bt))
return -1;
return __sbitmap_queue_get(bt);
}
@@ -118,7 +120,7 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
tag_offset = tags->nr_reserved_tags;
}
- tag = __blk_mq_get_tag(data->hctx, bt);
+ tag = __blk_mq_get_tag(data, bt);
if (tag != -1)
goto found_tag;
@@ -129,7 +131,7 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
do {
prepare_to_wait(&ws->wait, &wait, TASK_UNINTERRUPTIBLE);
- tag = __blk_mq_get_tag(data->hctx, bt);
+ tag = __blk_mq_get_tag(data, bt);
if (tag != -1)
break;
@@ -144,7 +146,7 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
* Retry tag allocation after running the hardware queue,
* as running the queue may also have found completions.
*/
- tag = __blk_mq_get_tag(data->hctx, bt);
+ tag = __blk_mq_get_tag(data, bt);
if (tag != -1)
break;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index ee69e5e89769..dcb567642db7 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -230,15 +230,14 @@ struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data,
rq = tags->static_rqs[tag];
- if (blk_mq_tag_busy(data->hctx)) {
- rq->rq_flags = RQF_MQ_INFLIGHT;
- atomic_inc(&data->hctx->nr_active);
- }
-
if (data->flags & BLK_MQ_REQ_INTERNAL) {
rq->tag = -1;
rq->internal_tag = tag;
} else {
+ if (blk_mq_tag_busy(data->hctx)) {
+ rq->rq_flags = RQF_MQ_INFLIGHT;
+ atomic_inc(&data->hctx->nr_active);
+ }
rq->tag = tag;
rq->internal_tag = -1;
}
@@ -869,6 +868,10 @@ done:
rq->tag = blk_mq_get_tag(&data);
if (rq->tag >= 0) {
+ if (blk_mq_tag_busy(data.hctx)) {
+ rq->rq_flags |= RQF_MQ_INFLIGHT;
+ atomic_inc(&data.hctx->nr_active);
+ }
data.hctx->tags->rqs[rq->tag] = rq;
goto done;
}