diff options
author | Glauber Costa <glommer@parallels.com> | 2011-11-28 14:45:19 -0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-12-06 20:51:21 +0100 |
commit | 54c707e98de9ca899e6552a47c797c62c45885ee (patch) | |
tree | 61ec9be62b2b3db0201aca7c8eddb5e49239baf5 /kernel/sched/sched.h | |
parent | b39e66eaf9c573f38133e894256caeaf9fd2a528 (diff) | |
download | linux-54c707e98de9ca899e6552a47c797c62c45885ee.tar.gz linux-54c707e98de9ca899e6552a47c797c62c45885ee.tar.bz2 linux-54c707e98de9ca899e6552a47c797c62c45885ee.zip |
sched/accounting: Re-use scheduler statistics for the root cgroup
Right now, after we collect tick statistics for user and system and store them
in a well known location, we keep the same statistics again for cpuacct.
Since cpuacct is hierarchical, the numbers for the root cgroup should be
absolutely equal to the system-wide numbers.
So it would be better to just use it: this patch changes cpuacct accounting
in a way that the cpustat statistics are kept in a struct kernel_cpustat percpu
array. In the root cgroup case, we just point it to the main array. The rest of
the hierarchy walk can be totally disabled later with a static branch - but I am
not doing it here.
Signed-off-by: Glauber Costa <glommer@parallels.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Tuner <pjt@google.com>
Link: http://lkml.kernel.org/r/1322498719-2255-4-git-send-email-glommer@parallels.com
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched/sched.h')
-rw-r--r-- | kernel/sched/sched.h | 34 |
1 files changed, 30 insertions, 4 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index d88545c667e3..c24801636219 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -830,13 +830,39 @@ extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime extern void update_cpu_load(struct rq *this_rq); #ifdef CONFIG_CGROUP_CPUACCT +#include <linux/cgroup.h> +/* track cpu usage of a group of tasks and its child groups */ +struct cpuacct { + struct cgroup_subsys_state css; + /* cpuusage holds pointer to a u64-type object on every cpu */ + u64 __percpu *cpuusage; + struct kernel_cpustat __percpu *cpustat; +}; + +/* return cpu accounting group corresponding to this container */ +static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp) +{ + return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id), + struct cpuacct, css); +} + +/* return cpu accounting group to which this task belongs */ +static inline struct cpuacct *task_ca(struct task_struct *tsk) +{ + return container_of(task_subsys_state(tsk, cpuacct_subsys_id), + struct cpuacct, css); +} + +static inline struct cpuacct *parent_ca(struct cpuacct *ca) +{ + if (!ca || !ca->css.cgroup->parent) + return NULL; + return cgroup_ca(ca->css.cgroup->parent); +} + extern void cpuacct_charge(struct task_struct *tsk, u64 cputime); -extern void cpuacct_update_stats(struct task_struct *tsk, - enum cpuacct_stat_index idx, cputime_t val); #else static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {} -static inline void cpuacct_update_stats(struct task_struct *tsk, - enum cpuacct_stat_index idx, cputime_t val) {} #endif static inline void inc_nr_running(struct rq *rq) |