summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMing Lei <ming.lei@redhat.com>2018-07-02 17:35:59 +0800
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2018-09-26 08:39:43 +0200
commit864e90ed4a8f4a37b20effa5c06b7b5902ebcab9 (patch)
tree4f6a158e6cc219380f718e4b37e50a2d4f47ed66
parent83459da8e319f5625ad4a089c926b5dc5bafb6fe (diff)
downloadlinux-stable-864e90ed4a8f4a37b20effa5c06b7b5902ebcab9.tar.gz
linux-stable-864e90ed4a8f4a37b20effa5c06b7b5902ebcab9.tar.bz2
linux-stable-864e90ed4a8f4a37b20effa5c06b7b5902ebcab9.zip
blk-mq: only attempt to merge bio if there is rq in sw queue
[ Upstream commit b04f50ab8a74129b3041a2836c33c916be3c6667 ] Only attempt to merge bio iff the ctx->rq_list isn't empty, because: 1) for high-performance SSD, most of times dispatch may succeed, then there may be nothing left in ctx->rq_list, so don't try to merge over sw queue if it is empty, then we can save one acquiring of ctx->lock 2) we can't expect good merge performance on per-cpu sw queue, and missing one merge on sw queue won't be a big deal since tasks can be scheduled from one CPU to another. Cc: Laurence Oberman <loberman@redhat.com> Cc: Omar Sandoval <osandov@fb.com> Cc: Bart Van Assche <bart.vanassche@wdc.com> Tested-by: Kashyap Desai <kashyap.desai@broadcom.com> Reported-by: Kashyap Desai <kashyap.desai@broadcom.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Sasha Levin <alexander.levin@microsoft.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--block/blk-mq-sched.c3
1 files changed, 2 insertions, 1 deletions
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 56c493c6cd90..f5745acc2d98 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -339,7 +339,8 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
return e->type->ops.mq.bio_merge(hctx, bio);
}
- if (hctx->flags & BLK_MQ_F_SHOULD_MERGE) {
+ if ((hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
+ !list_empty_careful(&ctx->rq_list)) {
/* default per sw-queue merge */
spin_lock(&ctx->lock);
ret = blk_mq_attempt_merge(q, ctx, bio);