From 337e89feb7c29043dacd851b6ac28542a9a8aacf Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 24 Jan 2024 10:26:57 +0100 Subject: blk-mq: introduce a blk_mq_peek_cached_request helper Add a new helper to check if there is suitable cached request in blk_mq_submit_bio. This removes open coded logic in blk_mq_submit_bio and moves some checks that so far are in blk_mq_use_cached_rq to be performed earlier. This avoids the case where we first do check with the cached request but then later end up allocating a new one anyway and need to grab a queue reference. Signed-off-by: Christoph Hellwig Reviewed-by: Damien Le Moal Tested-by: Damien Le Moal Link: https://lore.kernel.org/r/20240124092658.2258309-3-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-mq.c | 63 ++++++++++++++++++++++++++++++---------------------------- 1 file changed, 33 insertions(+), 30 deletions(-) (limited to 'block/blk-mq.c') diff --git a/block/blk-mq.c b/block/blk-mq.c index bc032d06858e..ad57a259e975 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2909,22 +2909,31 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q, } /* - * Check if we can use the passed on request for submitting the passed in bio, - * and remove it from the request list if it can be used. + * Check if there is a suitable cached request and return it. */ -static bool blk_mq_use_cached_rq(struct request *rq, struct blk_plug *plug, - struct bio *bio) +static struct request *blk_mq_peek_cached_request(struct blk_plug *plug, + struct request_queue *q, blk_opf_t opf) { - enum hctx_type type = blk_mq_get_hctx_type(bio->bi_opf); - enum hctx_type hctx_type = rq->mq_hctx->type; + enum hctx_type type = blk_mq_get_hctx_type(opf); + struct request *rq; - WARN_ON_ONCE(rq_list_peek(&plug->cached_rq) != rq); + if (!plug) + return NULL; + rq = rq_list_peek(&plug->cached_rq); + if (!rq || rq->q != q) + return NULL; + if (type != rq->mq_hctx->type && + (type != HCTX_TYPE_READ || rq->mq_hctx->type != HCTX_TYPE_DEFAULT)) + return NULL; + if (op_is_flush(rq->cmd_flags) != op_is_flush(opf)) + return NULL; + return rq; +} - if (type != hctx_type && - !(type == HCTX_TYPE_READ && hctx_type == HCTX_TYPE_DEFAULT)) - return false; - if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf)) - return false; +static void blk_mq_use_cached_rq(struct request *rq, struct blk_plug *plug, + struct bio *bio) +{ + WARN_ON_ONCE(rq_list_peek(&plug->cached_rq) != rq); /* * If any qos ->throttle() end up blocking, we will have flushed the @@ -2937,7 +2946,6 @@ static bool blk_mq_use_cached_rq(struct request *rq, struct blk_plug *plug, blk_mq_rq_time_init(rq, 0); rq->cmd_flags = bio->bi_opf; INIT_LIST_HEAD(&rq->queuelist); - return true; } /** @@ -2965,11 +2973,7 @@ void blk_mq_submit_bio(struct bio *bio) bio = blk_queue_bounce(bio, q); - if (plug) { - rq = rq_list_peek(&plug->cached_rq); - if (rq && rq->q != q) - rq = NULL; - } + rq = blk_mq_peek_cached_request(plug, q, bio->bi_opf); if (rq) { if (unlikely(bio_may_exceed_limits(bio, &q->limits))) { bio = __bio_split_to_limits(bio, &q->limits, &nr_segs); @@ -2980,20 +2984,19 @@ void blk_mq_submit_bio(struct bio *bio) return; if (blk_mq_attempt_bio_merge(q, bio, nr_segs)) return; - if (blk_mq_use_cached_rq(rq, plug, bio)) - goto done; - percpu_ref_get(&q->q_usage_counter); - } else { - if (unlikely(bio_queue_enter(bio))) - return; - if (unlikely(bio_may_exceed_limits(bio, &q->limits))) { - bio = __bio_split_to_limits(bio, &q->limits, &nr_segs); - if (!bio) - goto queue_exit; - } - if (!bio_integrity_prep(bio)) + blk_mq_use_cached_rq(rq, plug, bio); + goto done; + } + + if (unlikely(bio_queue_enter(bio))) + return; + if (unlikely(bio_may_exceed_limits(bio, &q->limits))) { + bio = __bio_split_to_limits(bio, &q->limits, &nr_segs); + if (!bio) goto queue_exit; } + if (!bio_integrity_prep(bio)) + goto queue_exit; if (blk_mq_attempt_bio_merge(q, bio, nr_segs)) goto queue_exit; -- cgit v1.2.3