summaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorDmitry Adamushko <dmitry.adamushko@gmail.com>2007-10-15 17:00:08 +0200
committerIngo Molnar <mingo@elte.hu>2007-10-15 17:00:08 +0200
commitf6b53205e17c8ca481c69ed579a35a650a4b481a (patch)
treed7820281757e3a2c7cfff7e38275261dcd077eab /kernel/sched_fair.c
parent4530d7ab0fb8d5056b68c376949e2d5c4db7817e (diff)
downloadlinux-f6b53205e17c8ca481c69ed579a35a650a4b481a.tar.gz
linux-f6b53205e17c8ca481c69ed579a35a650a4b481a.tar.bz2
linux-f6b53205e17c8ca481c69ed579a35a650a4b481a.zip
sched: rework enqueue/dequeue_entity() to get rid of set_curr_task()
rework enqueue/dequeue_entity() to get rid of sched_class::set_curr_task(). This simplifies sched_setscheduler(), rt_mutex_setprio() and sched_move_tasks(). text data bss dec hex filename 24330 2734 20 27084 69cc sched.o.before 24233 2730 20 26983 6967 sched.o.after Signed-off-by: Dmitry Adamushko <dmitry.adamushko@gmail.com> Signed-off-by: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c55
1 files changed, 25 insertions, 30 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 4dd256d46853..568e922255c6 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -472,9 +472,20 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
}
static void
-enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
+enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
+ int wakeup, int set_curr)
{
/*
+ * In case of the 'current'.
+ */
+ if (unlikely(set_curr)) {
+ update_stats_curr_start(cfs_rq, se);
+ cfs_rq->curr = se;
+ account_entity_enqueue(cfs_rq, se);
+ return;
+ }
+
+ /*
* Update the fair clock.
*/
update_curr(cfs_rq);
@@ -485,8 +496,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
}
update_stats_enqueue(cfs_rq, se);
- if (se != cfs_rq->curr)
- __enqueue_entity(cfs_rq, se);
+ __enqueue_entity(cfs_rq, se);
account_entity_enqueue(cfs_rq, se);
}
@@ -506,8 +516,12 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
}
}
#endif
- if (se != cfs_rq->curr)
+ if (likely(se != cfs_rq->curr))
__dequeue_entity(cfs_rq, se);
+ else {
+ update_stats_curr_end(cfs_rq, se);
+ cfs_rq->curr = NULL;
+ }
account_entity_dequeue(cfs_rq, se);
}
@@ -689,12 +703,17 @@ static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
{
struct cfs_rq *cfs_rq;
struct sched_entity *se = &p->se;
+ int set_curr = 0;
+
+ /* Are we enqueuing the current task? */
+ if (unlikely(task_running(rq, p)))
+ set_curr = 1;
for_each_sched_entity(se) {
if (se->on_rq)
break;
cfs_rq = cfs_rq_of(se);
- enqueue_entity(cfs_rq, se, wakeup);
+ enqueue_entity(cfs_rq, se, wakeup, set_curr);
}
}
@@ -742,7 +761,7 @@ static void yield_task_fair(struct rq *rq)
* position within the tree:
*/
dequeue_entity(cfs_rq, se, 0);
- enqueue_entity(cfs_rq, se, 0);
+ enqueue_entity(cfs_rq, se, 0, 1);
return;
}
@@ -985,29 +1004,6 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
resched_task(rq->curr);
}
-#ifdef CONFIG_FAIR_GROUP_SCHED
-/* Account for a task changing its policy or group.
- *
- * This routine is mostly called to set cfs_rq->curr field when a task
- * migrates between groups/classes.
- */
-static void set_curr_task_fair(struct rq *rq)
-{
- struct sched_entity *se = &rq->curr->se;
-
- for_each_sched_entity(se)
- set_next_entity(cfs_rq_of(se), se);
-}
-#else
-static void set_curr_task_fair(struct rq *rq)
-{
- struct sched_entity *se = &rq->curr->se;
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
-
- cfs_rq->curr = se;
-}
-#endif
-
/*
* All the scheduling class methods:
*/
@@ -1023,7 +1019,6 @@ struct sched_class fair_sched_class __read_mostly = {
.load_balance = load_balance_fair,
- .set_curr_task = set_curr_task_fair,
.task_tick = task_tick_fair,
.task_new = task_new_fair,
};