diff options
author | Jens Axboe <axboe@kernel.dk> | 2018-10-29 13:06:14 -0600 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2018-11-07 13:44:59 -0700 |
commit | ed76e329d74a4b15ac0f5fd3adbd52ec0178a134 (patch) | |
tree | 628ce9d3ac0997faef04f2d9e4f4d169594fe366 /block/blk-mq.c | |
parent | a8908939af569ce2419f43fd56eeaf003bc3d85d (diff) | |
download | linux-ed76e329d74a4b15ac0f5fd3adbd52ec0178a134.tar.gz linux-ed76e329d74a4b15ac0f5fd3adbd52ec0178a134.tar.bz2 linux-ed76e329d74a4b15ac0f5fd3adbd52ec0178a134.zip |
blk-mq: abstract out queue map
This is in preparation for allowing multiple sets of maps per
queue, if so desired.
Reviewed-by: Hannes Reinecke <hare@suse.com>
Reviewed-by: Bart Van Assche <bvanassche@acm.org>
Reviewed-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r-- | block/blk-mq.c | 34 |
1 files changed, 18 insertions, 16 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index 766facfa1f08..fac88d16988b 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1975,7 +1975,7 @@ struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, struct blk_mq_tags *tags; int node; - node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx); + node = blk_mq_hw_queue_to_node(&set->map[0], hctx_idx); if (node == NUMA_NO_NODE) node = set->numa_node; @@ -2031,7 +2031,7 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, size_t rq_size, left; int node; - node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx); + node = blk_mq_hw_queue_to_node(&set->map[0], hctx_idx); if (node == NUMA_NO_NODE) node = set->numa_node; @@ -2322,7 +2322,7 @@ static void blk_mq_map_swqueue(struct request_queue *q) * If the cpu isn't present, the cpu is mapped to first hctx. */ for_each_possible_cpu(i) { - hctx_idx = set->mq_map[i]; + hctx_idx = set->map[0].mq_map[i]; /* unmapped hw queue can be remapped after CPU topo changed */ if (!set->tags[hctx_idx] && !__blk_mq_alloc_rq_map(set, hctx_idx)) { @@ -2332,7 +2332,7 @@ static void blk_mq_map_swqueue(struct request_queue *q) * case, remap the current ctx to hctx[0] which * is guaranteed to always have tags allocated */ - set->mq_map[i] = 0; + set->map[0].mq_map[i] = 0; } ctx = per_cpu_ptr(q->queue_ctx, i); @@ -2585,7 +2585,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, int node; struct blk_mq_hw_ctx *hctx; - node = blk_mq_hw_queue_to_node(set->mq_map, i); + node = blk_mq_hw_queue_to_node(&set->map[0], i); /* * If the hw queue has been mapped to another numa node, * we need to realloc the hctx. If allocation fails, fallback @@ -2791,18 +2791,18 @@ static int blk_mq_update_queue_map(struct blk_mq_tag_set *set) * for (queue = 0; queue < set->nr_hw_queues; queue++) { * mask = get_cpu_mask(queue) * for_each_cpu(cpu, mask) - * set->mq_map[cpu] = queue; + * set->map.mq_map[cpu] = queue; * } * * When we need to remap, the table has to be cleared for * killing stale mapping since one CPU may not be mapped * to any hw queue. */ - blk_mq_clear_mq_map(set); + blk_mq_clear_mq_map(&set->map[0]); return set->ops->map_queues(set); } else - return blk_mq_map_queues(set); + return blk_mq_map_queues(&set->map[0]); } /* @@ -2857,10 +2857,12 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) return -ENOMEM; ret = -ENOMEM; - set->mq_map = kcalloc_node(nr_cpu_ids, sizeof(*set->mq_map), - GFP_KERNEL, set->numa_node); - if (!set->mq_map) + set->map[0].mq_map = kcalloc_node(nr_cpu_ids, + sizeof(*set->map[0].mq_map), + GFP_KERNEL, set->numa_node); + if (!set->map[0].mq_map) goto out_free_tags; + set->map[0].nr_queues = set->nr_hw_queues; ret = blk_mq_update_queue_map(set); if (ret) @@ -2876,8 +2878,8 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) return 0; out_free_mq_map: - kfree(set->mq_map); - set->mq_map = NULL; + kfree(set->map[0].mq_map); + set->map[0].mq_map = NULL; out_free_tags: kfree(set->tags); set->tags = NULL; @@ -2892,8 +2894,8 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set) for (i = 0; i < nr_cpu_ids; i++) blk_mq_free_map_and_requests(set, i); - kfree(set->mq_map); - set->mq_map = NULL; + kfree(set->map[0].mq_map); + set->map[0].mq_map = NULL; kfree(set->tags); set->tags = NULL; @@ -3054,7 +3056,7 @@ fallback: pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n", nr_hw_queues, prev_nr_hw_queues); set->nr_hw_queues = prev_nr_hw_queues; - blk_mq_map_queues(set); + blk_mq_map_queues(&set->map[0]); goto fallback; } blk_mq_map_swqueue(q); |