diff options
author | Tejun Heo <tj@kernel.org> | 2012-04-13 13:11:34 -0700 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2012-04-20 10:06:06 +0200 |
commit | 3c96cb32d318f323c1bf972a4c66821f8499e34d (patch) | |
tree | e76d0437f487405d8a22a727b8085696b949179d /block | |
parent | a2b1693bac45ea3fe3ba612fd22c45f17449f610 (diff) | |
download | linux-3c96cb32d318f323c1bf972a4c66821f8499e34d.tar.gz linux-3c96cb32d318f323c1bf972a4c66821f8499e34d.tar.bz2 linux-3c96cb32d318f323c1bf972a4c66821f8499e34d.zip |
blkcg: drop stuff unused after per-queue policy activation update
* All_q_list is unused. Drop all_q_{mutex|list}.
* @for_root of blkg_lookup_create() is always %false when called from
outside blk-cgroup.c proper. Factor out __blkg_lookup_create() so
that it doesn't check whether @q is bypassing and use the
underscored version for the @for_root callsite.
* blkg_destroy_all() is used only from blkcg proper and @destroy_root
is always %true. Make it static and drop @destroy_root.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-cgroup.c | 61 | ||||
-rw-r--r-- | block/blk-cgroup.h | 6 | ||||
-rw-r--r-- | block/blk-throttle.c | 2 | ||||
-rw-r--r-- | block/cfq-iosched.c | 2 |
4 files changed, 23 insertions, 48 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index d6d59ad105b4..10f0d2fc0b23 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -25,8 +25,6 @@ #define MAX_KEY_LEN 100 static DEFINE_MUTEX(blkcg_pol_mutex); -static DEFINE_MUTEX(all_q_mutex); -static LIST_HEAD(all_q_list); struct blkio_cgroup blkio_root_cgroup = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT }; EXPORT_SYMBOL_GPL(blkio_root_cgroup); @@ -179,9 +177,8 @@ struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg, } EXPORT_SYMBOL_GPL(blkg_lookup); -struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg, - struct request_queue *q, - bool for_root) +static struct blkio_group *__blkg_lookup_create(struct blkio_cgroup *blkcg, + struct request_queue *q) __releases(q->queue_lock) __acquires(q->queue_lock) { struct blkio_group *blkg; @@ -189,13 +186,6 @@ struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg, WARN_ON_ONCE(!rcu_read_lock_held()); lockdep_assert_held(q->queue_lock); - /* - * This could be the first entry point of blkcg implementation and - * we shouldn't allow anything to go through for a bypassing queue. - */ - if (unlikely(blk_queue_bypass(q)) && !for_root) - return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY); - blkg = __blkg_lookup(blkcg, q); if (blkg) return blkg; @@ -223,6 +213,18 @@ struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg, out: return blkg; } + +struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg, + struct request_queue *q) +{ + /* + * This could be the first entry point of blkcg implementation and + * we shouldn't allow anything to go through for a bypassing queue. + */ + if (unlikely(blk_queue_bypass(q))) + return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY); + return __blkg_lookup_create(blkcg, q); +} EXPORT_SYMBOL_GPL(blkg_lookup_create); static void blkg_destroy(struct blkio_group *blkg) @@ -249,12 +251,10 @@ static void blkg_destroy(struct blkio_group *blkg) /** * blkg_destroy_all - destroy all blkgs associated with a request_queue * @q: request_queue of interest - * @destroy_root: whether to destroy root blkg or not * - * Destroy blkgs associated with @q. If @destroy_root is %true, all are - * destroyed; otherwise, root blkg is left alone. + * Destroy all blkgs associated with @q. */ -void blkg_destroy_all(struct request_queue *q, bool destroy_root) +static void blkg_destroy_all(struct request_queue *q) { struct blkio_group *blkg, *n; @@ -263,10 +263,6 @@ void blkg_destroy_all(struct request_queue *q, bool destroy_root) list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { struct blkio_cgroup *blkcg = blkg->blkcg; - /* skip root? */ - if (!destroy_root && blkg->blkcg == &blkio_root_cgroup) - continue; - spin_lock(&blkcg->lock); blkg_destroy(blkg); spin_unlock(&blkcg->lock); @@ -274,7 +270,6 @@ void blkg_destroy_all(struct request_queue *q, bool destroy_root) spin_unlock_irq(q->queue_lock); } -EXPORT_SYMBOL_GPL(blkg_destroy_all); static void blkg_rcu_free(struct rcu_head *rcu_head) { @@ -492,7 +487,7 @@ int blkg_conf_prep(struct blkio_cgroup *blkcg, spin_lock_irq(disk->queue->queue_lock); if (blkcg_policy_enabled(disk->queue, pol)) - blkg = blkg_lookup_create(blkcg, disk->queue, false); + blkg = blkg_lookup_create(blkcg, disk->queue); else blkg = ERR_PTR(-EINVAL); @@ -625,20 +620,9 @@ done: */ int blkcg_init_queue(struct request_queue *q) { - int ret; - might_sleep(); - ret = blk_throtl_init(q); - if (ret) - return ret; - - mutex_lock(&all_q_mutex); - INIT_LIST_HEAD(&q->all_q_node); - list_add_tail(&q->all_q_node, &all_q_list); - mutex_unlock(&all_q_mutex); - - return 0; + return blk_throtl_init(q); } /** @@ -662,12 +646,7 @@ void blkcg_drain_queue(struct request_queue *q) */ void blkcg_exit_queue(struct request_queue *q) { - mutex_lock(&all_q_mutex); - list_del_init(&q->all_q_node); - mutex_unlock(&all_q_mutex); - - blkg_destroy_all(q, true); - + blkg_destroy_all(q); blk_throtl_exit(q); } @@ -741,7 +720,7 @@ int blkcg_activate_policy(struct request_queue *q, spin_lock_irq(q->queue_lock); rcu_read_lock(); - blkg = blkg_lookup_create(&blkio_root_cgroup, q, true); + blkg = __blkg_lookup_create(&blkio_root_cgroup, q); rcu_read_unlock(); if (IS_ERR(blkg)) { diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h index 66253a7c8ff4..222063d36355 100644 --- a/block/blk-cgroup.h +++ b/block/blk-cgroup.h @@ -115,7 +115,6 @@ extern int blkcg_activate_policy(struct request_queue *q, const struct blkio_policy_type *pol); extern void blkcg_deactivate_policy(struct request_queue *q, const struct blkio_policy_type *pol); -extern void blkg_destroy_all(struct request_queue *q, bool destroy_root); void blkcg_print_blkgs(struct seq_file *sf, struct blkio_cgroup *blkcg, u64 (*prfill)(struct seq_file *, void *, int), @@ -334,8 +333,6 @@ static inline int blkcg_activate_policy(struct request_queue *q, const struct blkio_policy_type *pol) { return 0; } static inline void blkcg_deactivate_policy(struct request_queue *q, const struct blkio_policy_type *pol) { } -static inline void blkg_destroy_all(struct request_queue *q, - bool destory_root) { } static inline void *blkg_to_pdata(struct blkio_group *blkg, struct blkio_policy_type *pol) { return NULL; } @@ -354,8 +351,7 @@ extern struct blkio_cgroup *bio_blkio_cgroup(struct bio *bio); extern struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg, struct request_queue *q); struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg, - struct request_queue *q, - bool for_root); + struct request_queue *q); #else struct cgroup; static inline struct blkio_cgroup * diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 2fc964e06ea4..e2aaf27e1f10 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -285,7 +285,7 @@ static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td, } else { struct blkio_group *blkg; - blkg = blkg_lookup_create(blkcg, q, false); + blkg = blkg_lookup_create(blkcg, q); /* if %NULL and @q is alive, fall back to root_tg */ if (!IS_ERR(blkg)) diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 0203652e1f34..eb07eb64e85b 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -1348,7 +1348,7 @@ static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd, } else { struct blkio_group *blkg; - blkg = blkg_lookup_create(blkcg, q, false); + blkg = blkg_lookup_create(blkcg, q); if (!IS_ERR(blkg)) cfqg = blkg_to_cfqg(blkg); } |