diff options
author | Mark Brown <broonie@kernel.org> | 2018-01-08 15:54:50 +0000 |
---|---|---|
committer | Mark Brown <broonie@kernel.org> | 2018-01-08 15:54:50 +0000 |
commit | 498495dba268b20e8eadd7fe93c140c68b6cc9d2 (patch) | |
tree | 00d1562049d8bc2194fddd9ba0cbbe0812ad6f68 /kernel/sched/debug.c | |
parent | d5cc0a1fcbb5ddbef9fdd4c4a978da3254ddbf37 (diff) | |
parent | 5c256045b87b8aa8e5bc9d2e2fdc0802351c1f99 (diff) | |
download | linux-stable-498495dba268b20e8eadd7fe93c140c68b6cc9d2.tar.gz linux-stable-498495dba268b20e8eadd7fe93c140c68b6cc9d2.tar.bz2 linux-stable-498495dba268b20e8eadd7fe93c140c68b6cc9d2.zip |
Merge branch 'fix/intel' of https://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound into asoc-intel
Diffstat (limited to 'kernel/sched/debug.c')
-rw-r--r-- | kernel/sched/debug.c | 20 |
1 files changed, 13 insertions, 7 deletions
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 01217fb5a5de..1ca0130ed4f9 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -441,9 +441,11 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group P_SCHEDSTAT(se->statistics.wait_count); } P(se->load.weight); + P(se->runnable_weight); #ifdef CONFIG_SMP P(se->avg.load_avg); P(se->avg.util_avg); + P(se->avg.runnable_load_avg); #endif #undef PN_SCHEDSTAT @@ -466,8 +468,6 @@ static char *task_group_path(struct task_group *tg) } #endif -static const char stat_nam[] = TASK_STATE_TO_CHAR_STR; - static void print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) { @@ -560,16 +560,19 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running); SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight); #ifdef CONFIG_SMP + SEQ_printf(m, " .%-30s: %ld\n", "runnable_weight", cfs_rq->runnable_weight); SEQ_printf(m, " .%-30s: %lu\n", "load_avg", cfs_rq->avg.load_avg); SEQ_printf(m, " .%-30s: %lu\n", "runnable_load_avg", - cfs_rq->runnable_load_avg); + cfs_rq->avg.runnable_load_avg); SEQ_printf(m, " .%-30s: %lu\n", "util_avg", cfs_rq->avg.util_avg); - SEQ_printf(m, " .%-30s: %ld\n", "removed_load_avg", - atomic_long_read(&cfs_rq->removed_load_avg)); - SEQ_printf(m, " .%-30s: %ld\n", "removed_util_avg", - atomic_long_read(&cfs_rq->removed_util_avg)); + SEQ_printf(m, " .%-30s: %ld\n", "removed.load_avg", + cfs_rq->removed.load_avg); + SEQ_printf(m, " .%-30s: %ld\n", "removed.util_avg", + cfs_rq->removed.util_avg); + SEQ_printf(m, " .%-30s: %ld\n", "removed.runnable_sum", + cfs_rq->removed.runnable_sum); #ifdef CONFIG_FAIR_GROUP_SCHED SEQ_printf(m, " .%-30s: %lu\n", "tg_load_avg_contrib", cfs_rq->tg_load_avg_contrib); @@ -1006,10 +1009,13 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, "nr_involuntary_switches", (long long)p->nivcsw); P(se.load.weight); + P(se.runnable_weight); #ifdef CONFIG_SMP P(se.avg.load_sum); + P(se.avg.runnable_load_sum); P(se.avg.util_sum); P(se.avg.load_avg); + P(se.avg.runnable_load_avg); P(se.avg.util_avg); P(se.avg.last_update_time); #endif |