summaryrefslogtreecommitdiffstats
path: root/block/blk-core.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2017-02-03 09:48:28 -0700
committerJens Axboe <axboe@fb.com>2017-02-03 09:48:28 -0700
commite4d750c97794ea2bab793d4c518b1f4006364588 (patch)
tree8b351d08b1d81986402964a243268e70bb31a6a9 /block/blk-core.c
parentb973cb7e89fe3dcc2bc72c5b3aa7a3bfd9d0e6d5 (diff)
downloadlinux-e4d750c97794ea2bab793d4c518b1f4006364588.tar.gz
linux-e4d750c97794ea2bab793d4c518b1f4006364588.tar.bz2
linux-e4d750c97794ea2bab793d4c518b1f4006364588.zip
block: free merged request in the caller
If we end up doing a request-to-request merge when we have completed a bio-to-request merge, we free the request from deep down in that path. For blk-mq-sched, the merge path has to hold the appropriate lock, but we don't need it for freeing the request. And in fact holding the lock is problematic, since we are now calling the mq sched put_rq_private() hook with the lock held. Other call paths do not hold this lock. Fix this inconsistency by ensuring that the caller frees a merged request. Then we can do it outside of the lock, making it both more efficient and fixing the blk-mq-sched problem of invoking parts of the scheduler with an unknown lock state. Reported-by: Paolo Valente <paolo.valente@linaro.org> Signed-off-by: Jens Axboe <axboe@fb.com> Reviewed-by: Omar Sandoval <osandov@fb.com>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c12
1 files changed, 9 insertions, 3 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 76c42f559df8..1d263311353a 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1596,7 +1596,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
{
struct blk_plug *plug;
int el_ret, where = ELEVATOR_INSERT_SORT;
- struct request *req;
+ struct request *req, *free;
unsigned int request_count = 0;
unsigned int wb_acct;
@@ -1637,15 +1637,21 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
if (el_ret == ELEVATOR_BACK_MERGE) {
if (bio_attempt_back_merge(q, req, bio)) {
elv_bio_merged(q, req, bio);
- if (!attempt_back_merge(q, req))
+ free = attempt_back_merge(q, req);
+ if (!free)
elv_merged_request(q, req, el_ret);
+ else
+ __blk_put_request(q, free);
goto out_unlock;
}
} else if (el_ret == ELEVATOR_FRONT_MERGE) {
if (bio_attempt_front_merge(q, req, bio)) {
elv_bio_merged(q, req, bio);
- if (!attempt_front_merge(q, req))
+ free = attempt_front_merge(q, req);
+ if (!free)
elv_merged_request(q, req, el_ret);
+ else
+ __blk_put_request(q, free);
goto out_unlock;
}
}