diff options
author | Ming Lei <ming.lei@redhat.com> | 2018-09-28 16:42:20 +0800 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2018-10-08 10:50:43 -0600 |
commit | 36e765392e48e0322222347c4d21078c0b94758c (patch) | |
tree | 2400722b5eafca827323b315ac00d116dcbd998e /block | |
parent | 3a646fd77684dd5fbe20748bb04e12077bbecddc (diff) | |
download | linux-36e765392e48e0322222347c4d21078c0b94758c.tar.gz linux-36e765392e48e0322222347c4d21078c0b94758c.tar.bz2 linux-36e765392e48e0322222347c4d21078c0b94758c.zip |
blk-mq: complete req in softirq context in case of single queue
Lot of controllers may have only one irq vector for completing IO
request. And usually affinity of the only irq vector is all possible
CPUs, however, on most of ARCH, there may be only one specific CPU
for handling this interrupt.
So if all IOs are completed in hardirq context, it is inevitable to
degrade IO performance because of increased irq latency.
This patch tries to address this issue by allowing to complete request
in softirq context, like the legacy IO path.
IOPS is observed as ~13%+ in the following randread test on raid0 over
virtio-scsi.
mdadm --create --verbose /dev/md0 --level=0 --chunk=1024 --raid-devices=8 /dev/sdb /dev/sdc /dev/sdd /dev/sde /dev/sdf /dev/sdg /dev/sdh /dev/sdi
fio --time_based --name=benchmark --runtime=30 --filename=/dev/md0 --nrfiles=1 --ioengine=libaio --iodepth=32 --direct=1 --invalidate=1 --verify=0 --verify_fatal=0 --numjobs=32 --rw=randread --blocksize=4k
Cc: Dongli Zhang <dongli.zhang@oracle.com>
Cc: Zach Marano <zmarano@google.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Bart Van Assche <bvanassche@acm.org>
Cc: Jianchao Wang <jianchao.w.wang@oracle.com>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-mq.c | 14 | ||||
-rw-r--r-- | block/blk-softirq.c | 5 |
2 files changed, 16 insertions, 3 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index 1dc157c85a83..89bd9cb9defc 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -568,6 +568,20 @@ static void __blk_mq_complete_request(struct request *rq) if (!blk_mq_mark_complete(rq)) return; + /* + * Most of single queue controllers, there is only one irq vector + * for handling IO completion, and the only irq's affinity is set + * as all possible CPUs. On most of ARCHs, this affinity means the + * irq is handled on one specific CPU. + * + * So complete IO reqeust in softirq context in case of single queue + * for not degrading IO performance by irqsoff latency. + */ + if (rq->q->nr_hw_queues == 1) { + __blk_complete_request(rq); + return; + } + if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) { rq->q->softirq_done_fn(rq); return; diff --git a/block/blk-softirq.c b/block/blk-softirq.c index 15c1f5e12eb8..e47a2f751884 100644 --- a/block/blk-softirq.c +++ b/block/blk-softirq.c @@ -97,8 +97,8 @@ static int blk_softirq_cpu_dead(unsigned int cpu) void __blk_complete_request(struct request *req) { - int ccpu, cpu; struct request_queue *q = req->q; + int cpu, ccpu = q->mq_ops ? req->mq_ctx->cpu : req->cpu; unsigned long flags; bool shared = false; @@ -110,8 +110,7 @@ void __blk_complete_request(struct request *req) /* * Select completion CPU */ - if (req->cpu != -1) { - ccpu = req->cpu; + if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) && ccpu != -1) { if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags)) shared = cpus_share_cache(cpu, ccpu); } else |