summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorSrivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>2008-01-25 21:07:59 +0100
committerIngo Molnar <mingo@elte.hu>2008-01-25 21:07:59 +0100
commit93f992ccc008dd4030381caeebb252e85e66684b (patch)
tree34c177cc9de4eee560aee07c08a1fde59b37ed37 /kernel
parent86faf39d0fc04272b05fab1db6d683f3ac7199d1 (diff)
downloadlinux-93f992ccc008dd4030381caeebb252e85e66684b.tar.gz
linux-93f992ccc008dd4030381caeebb252e85e66684b.tar.bz2
linux-93f992ccc008dd4030381caeebb252e85e66684b.zip
sched: group scheduling code cleanup
Minor cleanups: - Fix coding style - remove obsolete comment Signed-off-by: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c21
1 files changed, 3 insertions, 18 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index e76b11ca6df3..7f827b70ae02 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -191,12 +191,12 @@ struct task_group init_task_group = {
};
#ifdef CONFIG_FAIR_USER_SCHED
-# define INIT_TASK_GRP_LOAD 2*NICE_0_LOAD
+# define INIT_TASK_GROUP_LOAD 2*NICE_0_LOAD
#else
-# define INIT_TASK_GRP_LOAD NICE_0_LOAD
+# define INIT_TASK_GROUP_LOAD NICE_0_LOAD
#endif
-static int init_task_group_load = INIT_TASK_GRP_LOAD;
+static int init_task_group_load = INIT_TASK_GROUP_LOAD;
/* return group to which a task belongs */
static inline struct task_group *task_group(struct task_struct *p)
@@ -881,21 +881,6 @@ static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
#define sched_class_highest (&rt_sched_class)
-/*
- * Update delta_exec, delta_fair fields for rq.
- *
- * delta_fair clock advances at a rate inversely proportional to
- * total load (rq->load.weight) on the runqueue, while
- * delta_exec advances at the same rate as wall-clock (provided
- * cpu is not idle).
- *
- * delta_exec / delta_fair is a measure of the (smoothened) load on this
- * runqueue over any given interval. This (smoothened) load is used
- * during load balance.
- *
- * This function is called /before/ updating rq->load
- * and when switching tasks.
- */
static inline void inc_load(struct rq *rq, const struct task_struct *p)
{
update_load_add(&rq->load, p->se.load.weight);