summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2022-06-23 09:48:32 +0200
committerJens Axboe <axboe@kernel.dk>2022-06-27 06:29:12 -0600
commit82b74cac28493fb40ea74fb2fe648b5fc7ea0c1c (patch)
tree4965afde9f626a0c326ad99297436b258fd93d5c
parentf25865447294bf2468c2587dd98f8fa999260893 (diff)
downloadlinux-stable-82b74cac28493fb40ea74fb2fe648b5fc7ea0c1c.tar.gz
linux-stable-82b74cac28493fb40ea74fb2fe648b5fc7ea0c1c.tar.bz2
linux-stable-82b74cac28493fb40ea74fb2fe648b5fc7ea0c1c.zip
blk-ioprio: Convert from rqos policy to direct call
Convert blk-ioprio handling from a rqos policy to a direct call from blk_mq_submit_bio(). Firstly, blk-ioprio is not much of a rqos policy anyway, it just needs a hook in bio submission path to set the bio's IO priority. Secondly, the rqos .track hook gets actually called too late for blk-ioprio purposes and introducing a special rqos hook just for blk-ioprio looks even weirder. Reviewed-by: Damien Le Moal <damien.lemoal@opensource.wdc.com> Tested-by: Damien Le Moal <damien.lemoal@opensource.wdc.com> Signed-off-by: Jan Kara <jack@suse.cz> Reviewed-by: Christoph Hellwig <hch@lst.de> Link: https://lore.kernel.org/r/20220623074840.5960-7-jack@suse.cz Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--block/blk-cgroup.c1
-rw-r--r--block/blk-ioprio.c50
-rw-r--r--block/blk-ioprio.h9
-rw-r--r--block/blk-mq.c8
4 files changed, 23 insertions, 45 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 764e740b0c0f..6906981563f8 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1299,6 +1299,7 @@ int blkcg_init_queue(struct request_queue *q)
ret = blk_iolatency_init(q);
if (ret) {
blk_throtl_exit(q);
+ blk_ioprio_exit(q);
goto err_destroy_all;
}
diff --git a/block/blk-ioprio.c b/block/blk-ioprio.c
index 3f605583598b..c00060a02c6e 100644
--- a/block/blk-ioprio.c
+++ b/block/blk-ioprio.c
@@ -181,17 +181,12 @@ static struct blkcg_policy ioprio_policy = {
.pd_free_fn = ioprio_free_pd,
};
-struct blk_ioprio {
- struct rq_qos rqos;
-};
-
-static void blkcg_ioprio_track(struct rq_qos *rqos, struct request *rq,
- struct bio *bio)
+void blkcg_set_ioprio(struct bio *bio)
{
struct ioprio_blkcg *blkcg = ioprio_blkcg_from_bio(bio);
u16 prio;
- if (blkcg->prio_policy == POLICY_NO_CHANGE)
+ if (!blkcg || blkcg->prio_policy == POLICY_NO_CHANGE)
return;
/*
@@ -207,49 +202,14 @@ static void blkcg_ioprio_track(struct rq_qos *rqos, struct request *rq,
bio->bi_ioprio = prio;
}
-static void blkcg_ioprio_exit(struct rq_qos *rqos)
+void blk_ioprio_exit(struct request_queue *q)
{
- struct blk_ioprio *blkioprio_blkg =
- container_of(rqos, typeof(*blkioprio_blkg), rqos);
-
- blkcg_deactivate_policy(rqos->q, &ioprio_policy);
- kfree(blkioprio_blkg);
+ blkcg_deactivate_policy(q, &ioprio_policy);
}
-static struct rq_qos_ops blkcg_ioprio_ops = {
- .track = blkcg_ioprio_track,
- .exit = blkcg_ioprio_exit,
-};
-
int blk_ioprio_init(struct request_queue *q)
{
- struct blk_ioprio *blkioprio_blkg;
- struct rq_qos *rqos;
- int ret;
-
- blkioprio_blkg = kzalloc(sizeof(*blkioprio_blkg), GFP_KERNEL);
- if (!blkioprio_blkg)
- return -ENOMEM;
-
- ret = blkcg_activate_policy(q, &ioprio_policy);
- if (ret) {
- kfree(blkioprio_blkg);
- return ret;
- }
-
- rqos = &blkioprio_blkg->rqos;
- rqos->id = RQ_QOS_IOPRIO;
- rqos->ops = &blkcg_ioprio_ops;
- rqos->q = q;
-
- /*
- * Registering the rq-qos policy after activating the blk-cgroup
- * policy guarantees that ioprio_blkcg_from_bio(bio) != NULL in the
- * rq-qos callbacks.
- */
- rq_qos_add(q, rqos);
-
- return 0;
+ return blkcg_activate_policy(q, &ioprio_policy);
}
static int __init ioprio_init(void)
diff --git a/block/blk-ioprio.h b/block/blk-ioprio.h
index a7785c2f1aea..5a1eb550e178 100644
--- a/block/blk-ioprio.h
+++ b/block/blk-ioprio.h
@@ -6,14 +6,23 @@
#include <linux/kconfig.h>
struct request_queue;
+struct bio;
#ifdef CONFIG_BLK_CGROUP_IOPRIO
int blk_ioprio_init(struct request_queue *q);
+void blk_ioprio_exit(struct request_queue *q);
+void blkcg_set_ioprio(struct bio *bio);
#else
static inline int blk_ioprio_init(struct request_queue *q)
{
return 0;
}
+static inline void blk_ioprio_exit(struct request_queue *q)
+{
+}
+static inline void blkcg_set_ioprio(struct bio *bio)
+{
+}
#endif
#endif /* _BLK_IOPRIO_H_ */
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 62b7025d6854..4c4944b6f520 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -42,6 +42,7 @@
#include "blk-stat.h"
#include "blk-mq-sched.h"
#include "blk-rq-qos.h"
+#include "blk-ioprio.h"
static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
@@ -2779,6 +2780,11 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
return rq;
}
+static void bio_set_ioprio(struct bio *bio)
+{
+ blkcg_set_ioprio(bio);
+}
+
/**
* blk_mq_submit_bio - Create and send a request to block device.
* @bio: Bio pointer.
@@ -2819,6 +2825,8 @@ void blk_mq_submit_bio(struct bio *bio)
trace_block_getrq(bio);
+ bio_set_ioprio(bio);
+
rq_qos_track(q, rq, bio);
blk_mq_bio_to_request(rq, bio, nr_segs);