summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorMing Lei <ming.lei@redhat.com>2024-02-28 12:08:57 +0800
committerJens Axboe <axboe@kernel.dk>2024-02-28 07:22:06 -0700
commitec30b461f3d067bd322a6c3a6c5105746ed9bf14 (patch)
treef6e65bf1c333892dca5d08e61c943067fcc1d873 /block
parentfb5d1d389c9e78d68f1f71f926d6251017579f5b (diff)
downloadlinux-stable-ec30b461f3d067bd322a6c3a6c5105746ed9bf14.tar.gz
linux-stable-ec30b461f3d067bd322a6c3a6c5105746ed9bf14.tar.bz2
linux-stable-ec30b461f3d067bd322a6c3a6c5105746ed9bf14.zip
blk-mq: don't change nr_hw_queues and nr_maps for kdump kernel
For most of ARCHs, 'nr_cpus=1' is passed for kdump kernel, so nr_hw_queues for each mapping is supposed to be 1 already. More importantly, this way may cause trouble for driver, because blk-mq and driver see different queue mapping since driver should setup hardware queue setting before calling into allocating blk-mq tagset. So not overriding nr_hw_queues and nr_maps for kdump kernel. Cc: Wen Xiong <wenxiong@us.ibm.com> Signed-off-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Link: https://lore.kernel.org/r/20240228040857.306483-1-ming.lei@redhat.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq.c14
1 files changed, 6 insertions, 8 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 7111bd4180e7..b7ba91d53c41 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -4379,7 +4379,7 @@ static void blk_mq_update_queue_map(struct blk_mq_tag_set *set)
if (set->nr_maps == 1)
set->map[HCTX_TYPE_DEFAULT].nr_queues = set->nr_hw_queues;
- if (set->ops->map_queues && !is_kdump_kernel()) {
+ if (set->ops->map_queues) {
int i;
/*
@@ -4478,14 +4478,12 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
/*
* If a crashdump is active, then we are potentially in a very
- * memory constrained environment. Limit us to 1 queue and
- * 64 tags to prevent using too much memory.
+ * memory constrained environment. Limit us to 64 tags to prevent
+ * using too much memory.
*/
- if (is_kdump_kernel()) {
- set->nr_hw_queues = 1;
- set->nr_maps = 1;
+ if (is_kdump_kernel())
set->queue_depth = min(64U, set->queue_depth);
- }
+
/*
* There is no use for more h/w queues than cpus if we just have
* a single map
@@ -4515,7 +4513,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
GFP_KERNEL, set->numa_node);
if (!set->map[i].mq_map)
goto out_free_mq_map;
- set->map[i].nr_queues = is_kdump_kernel() ? 1 : set->nr_hw_queues;
+ set->map[i].nr_queues = set->nr_hw_queues;
}
blk_mq_update_queue_map(set);