summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorMing Lei <ming.lei@redhat.com>2017-07-03 20:37:14 +0800
committerJens Axboe <axboe@kernel.dk>2017-07-03 16:54:09 -0600
commit32825c45ff8f4cce937ab85b030dc693ceb1aa0a (patch)
tree80f95c39f30fdec2d3d996723ca61594bd2562aa /block
parent431b17f9d5453533cba7d73e7e40428e4f90b35d (diff)
downloadlinux-stable-32825c45ff8f4cce937ab85b030dc693ceb1aa0a.tar.gz
linux-stable-32825c45ff8f4cce937ab85b030dc693ceb1aa0a.tar.bz2
linux-stable-32825c45ff8f4cce937ab85b030dc693ceb1aa0a.zip
blk-mq-sched: fix performance regression of mq-deadline
When mq-deadline is taken, IOPS of sequential read and seqential write is observed more than 20% drop on sata(scsi-mq) devices, compared with using 'none' scheduler. The reason is that the default nr_requests for scheduler is too big for small queuedepth devices, and latency is increased much. Since the principle of taking 256 requests for mq scheduler is based on 128 queue depth, this patch changes into double size of min(hw queue_depth, 128). Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq-sched.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 7f0dc48ffb40..4ab69435708c 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -515,10 +515,12 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
}
/*
- * Default to 256, since we don't split into sync/async like the
- * old code did. Additionally, this is a per-hw queue depth.
+ * Default to double of smaller one between hw queue_depth and 128,
+ * since we don't split into sync/async like the old code did.
+ * Additionally, this is a per-hw queue depth.
*/
- q->nr_requests = 2 * BLKDEV_MAX_RQ;
+ q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth,
+ BLKDEV_MAX_RQ);
queue_for_each_hw_ctx(q, hctx, i) {
ret = blk_mq_sched_alloc_tags(q, hctx, i);