summaryrefslogtreecommitdiffstats
path: root/block/blk-merge.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2017-02-02 08:54:40 -0700
committerJens Axboe <axboe@fb.com>2017-02-03 09:47:32 -0700
commitb973cb7e89fe3dcc2bc72c5b3aa7a3bfd9d0e6d5 (patch)
tree8949e898cd4e87d63b6752ae8dfa36e1b461da77 /block/blk-merge.c
parent9b54d816e00425c3a517514e0d677bb3cec49258 (diff)
downloadlinux-b973cb7e89fe3dcc2bc72c5b3aa7a3bfd9d0e6d5.tar.gz
linux-b973cb7e89fe3dcc2bc72c5b3aa7a3bfd9d0e6d5.tar.bz2
linux-b973cb7e89fe3dcc2bc72c5b3aa7a3bfd9d0e6d5.zip
blk-merge: return the merged request
When we attempt to merge request-to-request, we return a 0/1 if we ended up merging or not. Change that to return the pointer to the request that we freed. We will use this to move the freeing of that request out of the merge logic, so that callers can drop locks before freeing the request. There should be no functional changes in this patch. Signed-off-by: Jens Axboe <axboe@fb.com> Reviewed-by: Omar Sandoval <osandov@fb.com>
Diffstat (limited to 'block/blk-merge.c')
-rw-r--r--block/blk-merge.c31
1 files changed, 16 insertions, 15 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 6aa43dec5af4..3826fc32b72c 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -659,31 +659,32 @@ static void blk_account_io_merge(struct request *req)
}
/*
- * Has to be called with the request spinlock acquired
+ * For non-mq, this has to be called with the request spinlock acquired.
+ * For mq with scheduling, the appropriate queue wide lock should be held.
*/
-static int attempt_merge(struct request_queue *q, struct request *req,
- struct request *next)
+static struct request *attempt_merge(struct request_queue *q,
+ struct request *req, struct request *next)
{
if (!rq_mergeable(req) || !rq_mergeable(next))
- return 0;
+ return NULL;
if (req_op(req) != req_op(next))
- return 0;
+ return NULL;
/*
* not contiguous
*/
if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
- return 0;
+ return NULL;
if (rq_data_dir(req) != rq_data_dir(next)
|| req->rq_disk != next->rq_disk
|| req_no_special_merge(next))
- return 0;
+ return NULL;
if (req_op(req) == REQ_OP_WRITE_SAME &&
!blk_write_same_mergeable(req->bio, next->bio))
- return 0;
+ return NULL;
/*
* If we are allowed to merge, then append bio list
@@ -692,7 +693,7 @@ static int attempt_merge(struct request_queue *q, struct request *req,
* counts here.
*/
if (!ll_merge_requests_fn(q, req, next))
- return 0;
+ return NULL;
/*
* If failfast settings disagree or any of the two is already
@@ -735,27 +736,27 @@ static int attempt_merge(struct request_queue *q, struct request *req,
/* owner-ship of bio passed from next to req */
next->bio = NULL;
__blk_put_request(q, next);
- return 1;
+ return next;
}
-int attempt_back_merge(struct request_queue *q, struct request *rq)
+struct request *attempt_back_merge(struct request_queue *q, struct request *rq)
{
struct request *next = elv_latter_request(q, rq);
if (next)
return attempt_merge(q, rq, next);
- return 0;
+ return NULL;
}
-int attempt_front_merge(struct request_queue *q, struct request *rq)
+struct request *attempt_front_merge(struct request_queue *q, struct request *rq)
{
struct request *prev = elv_former_request(q, rq);
if (prev)
return attempt_merge(q, prev, rq);
- return 0;
+ return NULL;
}
int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
@@ -767,7 +768,7 @@ int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
if (!e->type->ops.sq.elevator_allow_rq_merge_fn(q, rq, next))
return 0;
- return attempt_merge(q, rq, next);
+ return attempt_merge(q, rq, next) != NULL;
}
bool blk_rq_merge_ok(struct request *rq, struct bio *bio)