summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorOmar Sandoval <osandov@fb.com>2017-04-05 12:01:31 -0700
committerJens Axboe <axboe@fb.com>2017-04-07 08:56:46 -0600
commit93252632e828da3e90241a1c0e766556abf71598 (patch)
tree388756bb6862ea8943c87240644a33feb8c5c402
parent6917ff0b5bd4139e08a3f3146529dcb3b95ba7a6 (diff)
downloadlinux-stable-93252632e828da3e90241a1c0e766556abf71598.tar.gz
linux-stable-93252632e828da3e90241a1c0e766556abf71598.tar.bz2
linux-stable-93252632e828da3e90241a1c0e766556abf71598.zip
blk-mq-sched: set up scheduler tags when bringing up new queues
If a new hardware queue is added at runtime, we don't allocate scheduler tags for it, leading to a crash. This hooks up the scheduler framework to blk_mq_{init,exit}_hctx() to make sure everything gets properly initialized/freed. Signed-off-by: Omar Sandoval <osandov@fb.com> Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--block/blk-mq-sched.c22
-rw-r--r--block/blk-mq-sched.h5
-rw-r--r--block/blk-mq.c9
3 files changed, 35 insertions, 1 deletions
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 6bd1758ea29b..0bb13bb51daa 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -461,6 +461,28 @@ void blk_mq_sched_teardown(struct request_queue *q)
blk_mq_sched_free_tags(set, hctx, i);
}
+int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
+ unsigned int hctx_idx)
+{
+ struct elevator_queue *e = q->elevator;
+
+ if (!e)
+ return 0;
+
+ return blk_mq_sched_alloc_tags(q, hctx, hctx_idx);
+}
+
+void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
+ unsigned int hctx_idx)
+{
+ struct elevator_queue *e = q->elevator;
+
+ if (!e)
+ return;
+
+ blk_mq_sched_free_tags(q->tag_set, hctx, hctx_idx);
+}
+
int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
{
struct blk_mq_hw_ctx *hctx;
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index 873f9af5a35b..19db25e0c95a 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -35,6 +35,11 @@ void blk_mq_sched_move_to_dispatch(struct blk_mq_hw_ctx *hctx,
int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e);
void blk_mq_sched_teardown(struct request_queue *q);
+int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
+ unsigned int hctx_idx);
+void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
+ unsigned int hctx_idx);
+
int blk_mq_sched_init(struct request_queue *q);
static inline bool
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 828f4bd193f2..72e744cd638c 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1924,6 +1924,8 @@ static void blk_mq_exit_hctx(struct request_queue *q,
hctx->fq->flush_rq, hctx_idx,
flush_start_tag + hctx_idx);
+ blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
+
if (set->ops->exit_hctx)
set->ops->exit_hctx(hctx, hctx_idx);
@@ -1990,9 +1992,12 @@ static int blk_mq_init_hctx(struct request_queue *q,
set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
goto free_bitmap;
+ if (blk_mq_sched_init_hctx(q, hctx, hctx_idx))
+ goto exit_hctx;
+
hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
if (!hctx->fq)
- goto exit_hctx;
+ goto sched_exit_hctx;
if (set->ops->init_request &&
set->ops->init_request(set->driver_data,
@@ -2007,6 +2012,8 @@ static int blk_mq_init_hctx(struct request_queue *q,
free_fq:
kfree(hctx->fq);
+ sched_exit_hctx:
+ blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
exit_hctx:
if (set->ops->exit_hctx)
set->ops->exit_hctx(hctx, hctx_idx);