diff options
author | Omar Sandoval <osandov@fb.com> | 2016-06-01 22:18:48 -0700 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2017-02-26 11:07:49 +0100 |
commit | e8330cb5ae475f59308297e2ac9a496d4192912e (patch) | |
tree | 5978520f07b3283bf2132aa15cf2632509755c8a /block | |
parent | f0414c1f8bb7a4e69064296f460773170c5435ac (diff) | |
download | linux-stable-e8330cb5ae475f59308297e2ac9a496d4192912e.tar.gz linux-stable-e8330cb5ae475f59308297e2ac9a496d4192912e.tar.bz2 linux-stable-e8330cb5ae475f59308297e2ac9a496d4192912e.zip |
blk-mq: really fix plug list flushing for nomerge queues
commit 87c279e613f848c691111b29d49de8df3f4f56da upstream.
Commit 0809e3ac6231 ("block: fix plug list flushing for nomerge queues")
updated blk_mq_make_request() to set request_count even when
blk_queue_nomerges() returns true. However, blk_mq_make_request() only
does limited plugging and doesn't use request_count;
blk_sq_make_request() is the one that should have been fixed. Do that
and get rid of the unnecessary work in the mq version.
Fixes: 0809e3ac6231 ("block: fix plug list flushing for nomerge queues")
Signed-off-by: Omar Sandoval <osandov@fb.com>
Reviewed-by: Ming Lei <tom.leiming@gmail.com>
Reviewed-by: Jeff Moyer <jmoyer@redhat.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-mq.c | 17 |
1 files changed, 8 insertions, 9 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index 6cfc6b200366..d8d63c38bf29 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1259,12 +1259,9 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) blk_queue_split(q, &bio, q->bio_split); - if (!is_flush_fua && !blk_queue_nomerges(q)) { - if (blk_attempt_plug_merge(q, bio, &request_count, - &same_queue_rq)) - return BLK_QC_T_NONE; - } else - request_count = blk_plug_queued_count(q); + if (!is_flush_fua && !blk_queue_nomerges(q) && + blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq)) + return BLK_QC_T_NONE; rq = blk_mq_map_request(q, bio, &data); if (unlikely(!rq)) @@ -1355,9 +1352,11 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio) blk_queue_split(q, &bio, q->bio_split); - if (!is_flush_fua && !blk_queue_nomerges(q) && - blk_attempt_plug_merge(q, bio, &request_count, NULL)) - return BLK_QC_T_NONE; + if (!is_flush_fua && !blk_queue_nomerges(q)) { + if (blk_attempt_plug_merge(q, bio, &request_count, NULL)) + return BLK_QC_T_NONE; + } else + request_count = blk_plug_queued_count(q); rq = blk_mq_map_request(q, bio, &data); if (unlikely(!rq)) |