summaryrefslogtreecommitdiffstats
path: root/block/cfq-iosched.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-04-16 13:57:26 -0700
committerJens Axboe <axboe@kernel.dk>2012-04-20 10:06:17 +0200
commitf95a04afa80c0f4ddd645ef6a84ed118b5d1ad46 (patch)
tree2428dacf9bab1eb643628c872aed3092704f7fb2 /block/cfq-iosched.c
parent3c798398e393e5f9502dbab2b51e6c25e2e8f2ac (diff)
downloadlinux-stable-f95a04afa80c0f4ddd645ef6a84ed118b5d1ad46.tar.gz
linux-stable-f95a04afa80c0f4ddd645ef6a84ed118b5d1ad46.tar.bz2
linux-stable-f95a04afa80c0f4ddd645ef6a84ed118b5d1ad46.zip
blkcg: embed struct blkg_policy_data in policy specific data
Currently blkg_policy_data carries policy specific data as char flex array instead of being embedded in policy specific data. This was forced by oddities around blkg allocation which are all gone now. This patch makes blkg_policy_data embedded in policy specific data - throtl_grp and cfq_group so that it's more conventional and consistent with how io_cq is handled. * blkcg_policy->pdata_size is renamed to ->pd_size. * Functions which used to take void *pdata now takes struct blkg_policy_data *pd. * blkg_to_pdata/pdata_to_blkg() updated to blkg_to_pd/pd_to_blkg(). * Dummy struct blkg_policy_data definition added. Dummy pdata_to_blkg() definition was unused and inconsistent with the non-dummy version - correct dummy pd_to_blkg() added. * throtl and cfq updated accordingly. * As dummy blkg_to_pd/pd_to_blkg() are provided, blkg_to_cfqg/cfqg_to_blkg() don't need to be ifdef'd. Moved outside ifdef block. This patch doesn't introduce any functional change. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r--block/cfq-iosched.c46
1 files changed, 27 insertions, 19 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 792218281d91..7865cc38ea77 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -217,6 +217,9 @@ struct cfqg_stats {
/* This is per cgroup per device grouping structure */
struct cfq_group {
+ /* must be the first member */
+ struct blkg_policy_data pd;
+
/* group service_tree member */
struct rb_node rb_node;
@@ -409,6 +412,21 @@ CFQ_CFQQ_FNS(deep);
CFQ_CFQQ_FNS(wait_busy);
#undef CFQ_CFQQ_FNS
+static inline struct cfq_group *pd_to_cfqg(struct blkg_policy_data *pd)
+{
+ return pd ? container_of(pd, struct cfq_group, pd) : NULL;
+}
+
+static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
+{
+ return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
+}
+
+static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg)
+{
+ return pd_to_blkg(&cfqg->pd);
+}
+
#if defined(CONFIG_CFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
/* cfqg stats flags */
@@ -553,16 +571,6 @@ static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { }
#ifdef CONFIG_CFQ_GROUP_IOSCHED
-static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
-{
- return blkg_to_pdata(blkg, &blkcg_policy_cfq);
-}
-
-static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg)
-{
- return pdata_to_blkg(cfqg);
-}
-
static inline void cfqg_get(struct cfq_group *cfqg)
{
return blkg_get(cfqg_to_blkg(cfqg));
@@ -662,8 +670,6 @@ static void cfq_pd_reset_stats(struct blkcg_gq *blkg)
#else /* CONFIG_CFQ_GROUP_IOSCHED */
-static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg) { return NULL; }
-static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg) { return NULL; }
static inline void cfqg_get(struct cfq_group *cfqg) { }
static inline void cfqg_put(struct cfq_group *cfqg) { }
@@ -1374,13 +1380,14 @@ static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
cfqg_get(cfqg);
}
-static u64 cfqg_prfill_weight_device(struct seq_file *sf, void *pdata, int off)
+static u64 cfqg_prfill_weight_device(struct seq_file *sf,
+ struct blkg_policy_data *pd, int off)
{
- struct cfq_group *cfqg = pdata;
+ struct cfq_group *cfqg = pd_to_cfqg(pd);
if (!cfqg->dev_weight)
return 0;
- return __blkg_prfill_u64(sf, pdata, cfqg->dev_weight);
+ return __blkg_prfill_u64(sf, pd, cfqg->dev_weight);
}
static int cfqg_print_weight_device(struct cgroup *cgrp, struct cftype *cft,
@@ -1467,9 +1474,10 @@ static int cfqg_print_rwstat(struct cgroup *cgrp, struct cftype *cft,
}
#ifdef CONFIG_DEBUG_BLK_CGROUP
-static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf, void *pdata, int off)
+static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf,
+ struct blkg_policy_data *pd, int off)
{
- struct cfq_group *cfqg = pdata;
+ struct cfq_group *cfqg = pd_to_cfqg(pd);
u64 samples = blkg_stat_read(&cfqg->stats.avg_queue_size_samples);
u64 v = 0;
@@ -1477,7 +1485,7 @@ static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf, void *pdata, int off)
v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum);
do_div(v, samples);
}
- __blkg_prfill_u64(sf, pdata, v);
+ __blkg_prfill_u64(sf, pd, v);
return 0;
}
@@ -4161,7 +4169,7 @@ static struct blkcg_policy blkcg_policy_cfq = {
.pd_init_fn = cfq_pd_init,
.pd_reset_stats_fn = cfq_pd_reset_stats,
},
- .pdata_size = sizeof(struct cfq_group),
+ .pd_size = sizeof(struct cfq_group),
.cftypes = cfq_blkcg_files,
};
#endif