diff options
author | Christoph Hellwig <hch@lst.de> | 2023-05-18 07:31:01 +0200 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2023-05-18 19:42:54 -0600 |
commit | dd6216bb16e83e349d5d987227328031b0b0d30d (patch) | |
tree | a0f848cac75d999c7fa488d387cfecbc67f3f5b1 /include | |
parent | fdcab6cddef24a26b86d798814b3c25057e53c21 (diff) | |
download | linux-stable-dd6216bb16e83e349d5d987227328031b0b0d30d.tar.gz linux-stable-dd6216bb16e83e349d5d987227328031b0b0d30d.tar.bz2 linux-stable-dd6216bb16e83e349d5d987227328031b0b0d30d.zip |
blk-mq: make sure elevator callbacks aren't called for passthrough request
In case of q->elevator, passthrough request can still be marked as
RQF_ELV, so some elevator callbacks will be called for them.
Fix this by splitting RQF_SCHED_TAGS, which is set for all requests that
are issued on a queue that uses an I/O scheduler, and RQF_USE_SCHED for
non-flush, non-passthrough requests on such a queue.
Roughly based on two different patches from
Ming Lei <ming.lei@redhat.com>.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Ming Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/20230518053101.760632-4-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/blk-mq.h | 12 |
1 files changed, 7 insertions, 5 deletions
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 5529e7d28ae6..e4a211957db6 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -38,6 +38,10 @@ typedef __u32 __bitwise req_flags_t; #define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6)) /* don't call prep for this one */ #define RQF_DONTPREP ((__force req_flags_t)(1 << 7)) +/* use hctx->sched_tags */ +#define RQF_SCHED_TAGS ((__force req_flags_t)(1 << 8)) +/* use an I/O scheduler for this request */ +#define RQF_USE_SCHED ((__force req_flags_t)(1 << 9)) /* vaguely specified driver internal error. Ignored by the block layer */ #define RQF_FAILED ((__force req_flags_t)(1 << 10)) /* don't warn about errors */ @@ -57,9 +61,7 @@ typedef __u32 __bitwise req_flags_t; #define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19)) /* ->timeout has been called, don't expire again */ #define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21)) -/* queue has elevator attached */ -#define RQF_ELV ((__force req_flags_t)(1 << 22)) -#define RQF_RESV ((__force req_flags_t)(1 << 23)) +#define RQF_RESV ((__force req_flags_t)(1 << 23)) /* flags that prevent us from merging requests: */ #define RQF_NOMERGE_FLAGS \ @@ -842,7 +844,7 @@ void blk_mq_end_request_batch(struct io_comp_batch *ib); */ static inline bool blk_mq_need_time_stamp(struct request *rq) { - return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_ELV)); + return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_USE_SCHED)); } static inline bool blk_mq_is_reserved_rq(struct request *rq) @@ -858,7 +860,7 @@ static inline bool blk_mq_add_to_batch(struct request *req, struct io_comp_batch *iob, int ioerror, void (*complete)(struct io_comp_batch *)) { - if (!iob || (req->rq_flags & RQF_ELV) || ioerror || + if (!iob || (req->rq_flags & RQF_USE_SCHED) || ioerror || (req->end_io && !blk_rq_is_passthrough(req))) return false; |