summaryrefslogtreecommitdiffstats
path: root/kernel/sched_rt.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-07-06 14:23:39 +0200
committerIngo Molnar <mingo@elte.hu>2008-07-06 14:23:39 +0200
commit68083e05d72d94f347293d8cc0067050ba904bfa (patch)
tree842e71365bd90866be7add181661a4039d891564 /kernel/sched_rt.c
parent7baac8b91f9871ba8cb09af84de4ae1d86d07812 (diff)
parentb7279469d66b55119784b8b9529c99c1955fe747 (diff)
downloadlinux-68083e05d72d94f347293d8cc0067050ba904bfa.tar.gz
linux-68083e05d72d94f347293d8cc0067050ba904bfa.tar.bz2
linux-68083e05d72d94f347293d8cc0067050ba904bfa.zip
Merge commit 'v2.6.26-rc9' into cpus4096
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r--kernel/sched_rt.c70
1 files changed, 38 insertions, 32 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index d73386c6e361..e757f370eb1b 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -250,7 +250,8 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
if (rt_rq->rt_time || rt_rq->rt_nr_running)
idle = 0;
spin_unlock(&rt_rq->rt_runtime_lock);
- }
+ } else if (rt_rq->rt_nr_running)
+ idle = 0;
if (enqueue)
sched_rt_rq_enqueue(rt_rq);
@@ -449,13 +450,19 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
#endif
}
-static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
+static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
{
struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
struct rt_prio_array *array = &rt_rq->active;
struct rt_rq *group_rq = group_rt_rq(rt_se);
- if (group_rq && rt_rq_throttled(group_rq))
+ /*
+ * Don't enqueue the group if its throttled, or when empty.
+ * The latter is a consequence of the former when a child group
+ * get throttled and the current group doesn't have any other
+ * active members.
+ */
+ if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
return;
list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se));
@@ -464,7 +471,7 @@ static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
inc_rt_tasks(rt_se, rt_rq);
}
-static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
+static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
{
struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
struct rt_prio_array *array = &rt_rq->active;
@@ -480,11 +487,10 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
* Because the prio of an upper entry depends on the lower
* entries, we must remove entries top - down.
*/
-static void dequeue_rt_stack(struct task_struct *p)
+static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
{
- struct sched_rt_entity *rt_se, *back = NULL;
+ struct sched_rt_entity *back = NULL;
- rt_se = &p->rt;
for_each_sched_rt_entity(rt_se) {
rt_se->back = back;
back = rt_se;
@@ -492,7 +498,26 @@ static void dequeue_rt_stack(struct task_struct *p)
for (rt_se = back; rt_se; rt_se = rt_se->back) {
if (on_rt_rq(rt_se))
- dequeue_rt_entity(rt_se);
+ __dequeue_rt_entity(rt_se);
+ }
+}
+
+static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
+{
+ dequeue_rt_stack(rt_se);
+ for_each_sched_rt_entity(rt_se)
+ __enqueue_rt_entity(rt_se);
+}
+
+static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
+{
+ dequeue_rt_stack(rt_se);
+
+ for_each_sched_rt_entity(rt_se) {
+ struct rt_rq *rt_rq = group_rt_rq(rt_se);
+
+ if (rt_rq && rt_rq->rt_nr_running)
+ __enqueue_rt_entity(rt_se);
}
}
@@ -506,36 +531,15 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
if (wakeup)
rt_se->timeout = 0;
- dequeue_rt_stack(p);
-
- /*
- * enqueue everybody, bottom - up.
- */
- for_each_sched_rt_entity(rt_se)
- enqueue_rt_entity(rt_se);
-
- inc_cpu_load(rq, p->se.load.weight);
+ enqueue_rt_entity(rt_se);
}
static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
{
struct sched_rt_entity *rt_se = &p->rt;
- struct rt_rq *rt_rq;
update_curr_rt(rq);
-
- dequeue_rt_stack(p);
-
- /*
- * re-enqueue all non-empty rt_rq entities.
- */
- for_each_sched_rt_entity(rt_se) {
- rt_rq = group_rt_rq(rt_se);
- if (rt_rq && rt_rq->rt_nr_running)
- enqueue_rt_entity(rt_se);
- }
-
- dec_cpu_load(rq, p->se.load.weight);
+ dequeue_rt_entity(rt_se);
}
/*
@@ -546,8 +550,10 @@ static
void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
{
struct rt_prio_array *array = &rt_rq->active;
+ struct list_head *queue = array->queue + rt_se_prio(rt_se);
- list_move_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se));
+ if (on_rt_rq(rt_se))
+ list_move_tail(&rt_se->run_list, queue);
}
static void requeue_task_rt(struct rq *rq, struct task_struct *p)