summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-06-27 13:41:26 +0200
committerIngo Molnar <mingo@elte.hu>2008-06-27 14:31:39 +0200
commita8a51d5e59561aa5b4d66e19eca819b537783e8f (patch)
treeb400bc77244a742e737bb9deb94a6911a769e082 /kernel
parent039a1c41b3a489e34593ea1e1687f6fdad6b13ab (diff)
downloadlinux-a8a51d5e59561aa5b4d66e19eca819b537783e8f.tar.gz
linux-a8a51d5e59561aa5b4d66e19eca819b537783e8f.tar.bz2
linux-a8a51d5e59561aa5b4d66e19eca819b537783e8f.zip
sched: persistent average load per task
Remove the fall-back to SCHED_LOAD_SCALE by remembering the previous value of cpu_avg_load_per_task() - this is useful because of the hierarchical group model in which task weight can be much smaller. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> Cc: Mike Galbraith <efault@gmx.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c25
1 files changed, 12 insertions, 13 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 39d5495540d2..6a6b0139eb32 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -554,6 +554,8 @@ struct rq {
int cpu;
int online;
+ unsigned long avg_load_per_task;
+
struct task_struct *migration_thread;
struct list_head migration_queue;
#endif
@@ -1427,9 +1429,18 @@ static inline void dec_cpu_load(struct rq *rq, unsigned long load)
#ifdef CONFIG_SMP
static unsigned long source_load(int cpu, int type);
static unsigned long target_load(int cpu, int type);
-static unsigned long cpu_avg_load_per_task(int cpu);
static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
+static unsigned long cpu_avg_load_per_task(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+
+ if (rq->nr_running)
+ rq->avg_load_per_task = rq->load.weight / rq->nr_running;
+
+ return rq->avg_load_per_task;
+}
+
#ifdef CONFIG_FAIR_GROUP_SCHED
typedef void (*tg_visitor)(struct task_group *, int, struct sched_domain *);
@@ -2011,18 +2022,6 @@ static unsigned long target_load(int cpu, int type)
}
/*
- * Return the average load per task on the cpu's run queue
- */
-static unsigned long cpu_avg_load_per_task(int cpu)
-{
- struct rq *rq = cpu_rq(cpu);
- unsigned long total = weighted_cpuload(cpu);
- unsigned long n = rq->nr_running;
-
- return n ? total / n : SCHED_LOAD_SCALE;
-}
-
-/*
* find_idlest_group finds and returns the least busy CPU group within the
* domain.
*/