diff options
author | Tejun Heo <tj@kernel.org> | 2012-03-05 13:15:19 -0800 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2012-03-06 21:27:23 +0100 |
commit | 03aa264ac15637b6f98374270bcdf31400965505 (patch) | |
tree | 6fa9ca54d3f775fba19123790f6655158034a1d8 /block/blk-cgroup.h | |
parent | 4eef3049986e8397d5003916aed8cad6567a5e02 (diff) | |
download | linux-03aa264ac15637b6f98374270bcdf31400965505.tar.gz linux-03aa264ac15637b6f98374270bcdf31400965505.tar.bz2 linux-03aa264ac15637b6f98374270bcdf31400965505.zip |
blkcg: let blkcg core manage per-queue blkg list and counter
With the previous patch to move blkg list heads and counters to
request_queue and blkg, logic to manage them in both policies are
almost identical and can be moved to blkcg core.
This patch moves blkg link logic into blkg_lookup_create(), implements
common blkg unlink code in blkg_destroy(), and updates
blkg_destory_all() so that it's policy specific and can skip root
group. The updated blkg_destroy_all() is now used to both clear queue
for bypassing and elv switching, and release all blkgs on q exit.
This patch introduces a race window where policy [de]registration may
race against queue blkg clearing. This can only be a problem on cfq
unload and shouldn't be a real problem in practice (and we have many
other places where this race already exists). Future patches will
remove these unlikely races.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-cgroup.h')
-rw-r--r-- | block/blk-cgroup.h | 15 |
1 files changed, 5 insertions, 10 deletions
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h index ae96f196d469..83ce5fa0a604 100644 --- a/block/blk-cgroup.h +++ b/block/blk-cgroup.h @@ -196,11 +196,6 @@ struct blkio_group { }; typedef void (blkio_init_group_fn)(struct blkio_group *blkg); -typedef void (blkio_link_group_fn)(struct request_queue *q, - struct blkio_group *blkg); -typedef void (blkio_unlink_group_fn)(struct request_queue *q, - struct blkio_group *blkg); -typedef bool (blkio_clear_queue_fn)(struct request_queue *q); typedef void (blkio_update_group_weight_fn)(struct request_queue *q, struct blkio_group *blkg, unsigned int weight); typedef void (blkio_update_group_read_bps_fn)(struct request_queue *q, @@ -214,9 +209,6 @@ typedef void (blkio_update_group_write_iops_fn)(struct request_queue *q, struct blkio_policy_ops { blkio_init_group_fn *blkio_init_group_fn; - blkio_link_group_fn *blkio_link_group_fn; - blkio_unlink_group_fn *blkio_unlink_group_fn; - blkio_clear_queue_fn *blkio_clear_queue_fn; blkio_update_group_weight_fn *blkio_update_group_weight_fn; blkio_update_group_read_bps_fn *blkio_update_group_read_bps_fn; blkio_update_group_write_bps_fn *blkio_update_group_write_bps_fn; @@ -238,7 +230,8 @@ extern void blkcg_exit_queue(struct request_queue *q); /* Blkio controller policy registration */ extern void blkio_policy_register(struct blkio_policy_type *); extern void blkio_policy_unregister(struct blkio_policy_type *); -extern void blkg_destroy_all(struct request_queue *q); +extern void blkg_destroy_all(struct request_queue *q, + enum blkio_policy_id plid, bool destroy_root); /** * blkg_to_pdata - get policy private data @@ -319,7 +312,9 @@ static inline void blkcg_drain_queue(struct request_queue *q) { } static inline void blkcg_exit_queue(struct request_queue *q) { } static inline void blkio_policy_register(struct blkio_policy_type *blkiop) { } static inline void blkio_policy_unregister(struct blkio_policy_type *blkiop) { } -static inline void blkg_destroy_all(struct request_queue *q) { } +static inline void blkg_destroy_all(struct request_queue *q, + enum blkio_policy_id plid, + bool destory_root) { } static inline void *blkg_to_pdata(struct blkio_group *blkg, struct blkio_policy_type *pol) { return NULL; } |