summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2022-09-21 15:19:54 -0600
committerJens Axboe <axboe@kernel.dk>2022-09-30 07:49:09 -0600
commitde671d6116b5210097cd6fbb877bac92536f265b (patch)
treec230b310a4475736c46dd83cb8207acd7dc90427 /block
parent4b6a5d9cea911424e84107df8c4eb8317938d2cd (diff)
downloadlinux-de671d6116b5210097cd6fbb877bac92536f265b.tar.gz
linux-de671d6116b5210097cd6fbb877bac92536f265b.tar.bz2
linux-de671d6116b5210097cd6fbb877bac92536f265b.zip
block: change request end_io handler to pass back a return value
Everything is just converted to returning RQ_END_IO_NONE, and there should be no functional changes with this patch. In preparation for allowing the end_io handler to pass ownership back to the block layer, rather than retain ownership of the request. Reviewed-by: Keith Busch <kbusch@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-flush.c10
-rw-r--r--block/blk-mq.c14
2 files changed, 16 insertions, 8 deletions
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 27705fc584a0..53202eff545e 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -217,7 +217,8 @@ static void blk_flush_complete_seq(struct request *rq,
blk_kick_flush(q, fq, cmd_flags);
}
-static void flush_end_io(struct request *flush_rq, blk_status_t error)
+static enum rq_end_io_ret flush_end_io(struct request *flush_rq,
+ blk_status_t error)
{
struct request_queue *q = flush_rq->q;
struct list_head *running;
@@ -231,7 +232,7 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
if (!req_ref_put_and_test(flush_rq)) {
fq->rq_status = error;
spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
- return;
+ return RQ_END_IO_NONE;
}
blk_account_io_flush(flush_rq);
@@ -268,6 +269,7 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
}
spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
+ return RQ_END_IO_NONE;
}
bool is_flush_rq(struct request *rq)
@@ -353,7 +355,8 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
blk_flush_queue_rq(flush_rq, false);
}
-static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
+static enum rq_end_io_ret mq_flush_data_end_io(struct request *rq,
+ blk_status_t error)
{
struct request_queue *q = rq->q;
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
@@ -375,6 +378,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
blk_mq_sched_restart(hctx);
+ return RQ_END_IO_NONE;
}
/**
diff --git a/block/blk-mq.c b/block/blk-mq.c
index b32f70f38c6e..a21631de45b3 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1001,7 +1001,8 @@ inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
if (rq->end_io) {
rq_qos_done(rq->q, rq);
- rq->end_io(rq, error);
+ if (rq->end_io(rq, error) == RQ_END_IO_FREE)
+ blk_mq_free_request(rq);
} else {
blk_mq_free_request(rq);
}
@@ -1295,12 +1296,13 @@ struct blk_rq_wait {
blk_status_t ret;
};
-static void blk_end_sync_rq(struct request *rq, blk_status_t ret)
+static enum rq_end_io_ret blk_end_sync_rq(struct request *rq, blk_status_t ret)
{
struct blk_rq_wait *wait = rq->end_io_data;
wait->ret = ret;
complete(&wait->done);
+ return RQ_END_IO_NONE;
}
bool blk_rq_is_poll(struct request *rq)
@@ -1534,10 +1536,12 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
void blk_mq_put_rq_ref(struct request *rq)
{
- if (is_flush_rq(rq))
- rq->end_io(rq, 0);
- else if (req_ref_put_and_test(rq))
+ if (is_flush_rq(rq)) {
+ if (rq->end_io(rq, 0) == RQ_END_IO_FREE)
+ blk_mq_free_request(rq);
+ } else if (req_ref_put_and_test(rq)) {
__blk_mq_free_request(rq);
+ }
}
static bool blk_mq_check_expired(struct request *rq, void *priv)