summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2015-11-05 10:41:16 -0700
committerJens Axboe <axboe@fb.com>2015-11-07 10:40:46 -0700
commitdece16353ef47d8d33f5302bc158072a9d65e26f (patch)
tree75438980d0f76c155bab11ddb5f1f1600fcaee13 /block
parent8e483ed1342a4ea45b70f0f33ac54eff7a33d918 (diff)
downloadlinux-dece16353ef47d8d33f5302bc158072a9d65e26f.tar.gz
linux-dece16353ef47d8d33f5302bc158072a9d65e26f.tar.bz2
linux-dece16353ef47d8d33f5302bc158072a9d65e26f.zip
block: change ->make_request_fn() and users to return a queue cookie
No functional changes in this patch, but it prepares us for returning a more useful cookie related to the IO that was queued up. Signed-off-by: Jens Axboe <axboe@fb.com> Acked-by: Christoph Hellwig <hch@lst.de> Acked-by: Keith Busch <keith.busch@intel.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c26
-rw-r--r--block/blk-mq.c26
2 files changed, 30 insertions, 22 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 89eec7965870..e93df6d386a0 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -809,7 +809,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
}
EXPORT_SYMBOL(blk_init_queue_node);
-static void blk_queue_bio(struct request_queue *q, struct bio *bio);
+static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio);
struct request_queue *
blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
@@ -1678,7 +1678,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
blk_rq_bio_prep(req->q, req, bio);
}
-static void blk_queue_bio(struct request_queue *q, struct bio *bio)
+static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
{
const bool sync = !!(bio->bi_rw & REQ_SYNC);
struct blk_plug *plug;
@@ -1698,7 +1698,7 @@ static void blk_queue_bio(struct request_queue *q, struct bio *bio)
if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
bio->bi_error = -EIO;
bio_endio(bio);
- return;
+ return BLK_QC_T_NONE;
}
if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) {
@@ -1713,7 +1713,7 @@ static void blk_queue_bio(struct request_queue *q, struct bio *bio)
*/
if (!blk_queue_nomerges(q)) {
if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
- return;
+ return BLK_QC_T_NONE;
} else
request_count = blk_plug_queued_count(q);
@@ -1791,6 +1791,8 @@ get_rq:
out_unlock:
spin_unlock_irq(q->queue_lock);
}
+
+ return BLK_QC_T_NONE;
}
/*
@@ -1996,12 +1998,13 @@ end_io:
* a lower device by calling into generic_make_request recursively, which
* means the bio should NOT be touched after the call to ->make_request_fn.
*/
-void generic_make_request(struct bio *bio)
+blk_qc_t generic_make_request(struct bio *bio)
{
struct bio_list bio_list_on_stack;
+ blk_qc_t ret = BLK_QC_T_NONE;
if (!generic_make_request_checks(bio))
- return;
+ goto out;
/*
* We only want one ->make_request_fn to be active at a time, else
@@ -2015,7 +2018,7 @@ void generic_make_request(struct bio *bio)
*/
if (current->bio_list) {
bio_list_add(current->bio_list, bio);
- return;
+ goto out;
}
/* following loop may be a bit non-obvious, and so deserves some
@@ -2040,7 +2043,7 @@ void generic_make_request(struct bio *bio)
if (likely(blk_queue_enter(q, __GFP_WAIT) == 0)) {
- q->make_request_fn(q, bio);
+ ret = q->make_request_fn(q, bio);
blk_queue_exit(q);
@@ -2053,6 +2056,9 @@ void generic_make_request(struct bio *bio)
}
} while (bio);
current->bio_list = NULL; /* deactivate */
+
+out:
+ return ret;
}
EXPORT_SYMBOL(generic_make_request);
@@ -2066,7 +2072,7 @@ EXPORT_SYMBOL(generic_make_request);
* interfaces; @bio must be presetup and ready for I/O.
*
*/
-void submit_bio(int rw, struct bio *bio)
+blk_qc_t submit_bio(int rw, struct bio *bio)
{
bio->bi_rw |= rw;
@@ -2100,7 +2106,7 @@ void submit_bio(int rw, struct bio *bio)
}
}
- generic_make_request(bio);
+ return generic_make_request(bio);
}
EXPORT_SYMBOL(submit_bio);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 1c27b3eaef64..65f43bd696a0 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1235,7 +1235,7 @@ static int blk_mq_direct_issue_request(struct request *rq)
* but will attempt to bypass the hctx queueing if we can go straight to
* hardware for SYNC IO.
*/
-static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
+static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
{
const int is_sync = rw_is_sync(bio->bi_rw);
const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
@@ -1249,7 +1249,7 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
bio_io_error(bio);
- return;
+ return BLK_QC_T_NONE;
}
blk_queue_split(q, &bio, q->bio_split);
@@ -1257,13 +1257,13 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
if (!is_flush_fua && !blk_queue_nomerges(q)) {
if (blk_attempt_plug_merge(q, bio, &request_count,
&same_queue_rq))
- return;
+ return BLK_QC_T_NONE;
} else
request_count = blk_plug_queued_count(q);
rq = blk_mq_map_request(q, bio, &data);
if (unlikely(!rq))
- return;
+ return BLK_QC_T_NONE;
if (unlikely(is_flush_fua)) {
blk_mq_bio_to_request(rq, bio);
@@ -1302,11 +1302,11 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
old_rq = rq;
blk_mq_put_ctx(data.ctx);
if (!old_rq)
- return;
+ return BLK_QC_T_NONE;
if (!blk_mq_direct_issue_request(old_rq))
- return;
+ return BLK_QC_T_NONE;
blk_mq_insert_request(old_rq, false, true, true);
- return;
+ return BLK_QC_T_NONE;
}
if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
@@ -1320,13 +1320,14 @@ run_queue:
blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
}
blk_mq_put_ctx(data.ctx);
+ return BLK_QC_T_NONE;
}
/*
* Single hardware queue variant. This will attempt to use any per-process
* plug for merging and IO deferral.
*/
-static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
+static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
{
const int is_sync = rw_is_sync(bio->bi_rw);
const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
@@ -1339,18 +1340,18 @@ static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
bio_io_error(bio);
- return;
+ return BLK_QC_T_NONE;
}
blk_queue_split(q, &bio, q->bio_split);
if (!is_flush_fua && !blk_queue_nomerges(q) &&
blk_attempt_plug_merge(q, bio, &request_count, NULL))
- return;
+ return BLK_QC_T_NONE;
rq = blk_mq_map_request(q, bio, &data);
if (unlikely(!rq))
- return;
+ return BLK_QC_T_NONE;
if (unlikely(is_flush_fua)) {
blk_mq_bio_to_request(rq, bio);
@@ -1374,7 +1375,7 @@ static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
}
list_add_tail(&rq->queuelist, &plug->mq_list);
blk_mq_put_ctx(data.ctx);
- return;
+ return BLK_QC_T_NONE;
}
if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
@@ -1389,6 +1390,7 @@ run_queue:
}
blk_mq_put_ctx(data.ctx);
+ return BLK_QC_T_NONE;
}
/*