diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-01-25 21:08:17 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-25 21:08:17 +0100 |
commit | 80bf3171dcdf0f5d236e2e48afe2a95c7ce23879 (patch) | |
tree | 91cc16d8b91fd669ef234ab231895779907c1a31 /kernel/sched_rt.c | |
parent | 00597c3ed78e424bdafff123565c078d8b6088cf (diff) | |
download | linux-stable-80bf3171dcdf0f5d236e2e48afe2a95c7ce23879.tar.gz linux-stable-80bf3171dcdf0f5d236e2e48afe2a95c7ce23879.tar.bz2 linux-stable-80bf3171dcdf0f5d236e2e48afe2a95c7ce23879.zip |
sched: clean up pull_rt_task()
clean up pull_rt_task().
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r-- | kernel/sched_rt.c | 22 |
1 files changed, 10 insertions, 12 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index cc38521c5723..05ada7d44800 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -576,12 +576,9 @@ static void push_rt_tasks(struct rq *rq) static int pull_rt_task(struct rq *this_rq) { - struct task_struct *next; - struct task_struct *p; + int this_cpu = this_rq->cpu, ret = 0, cpu; + struct task_struct *p, *next; struct rq *src_rq; - int this_cpu = this_rq->cpu; - int cpu; - int ret = 0; /* * If cpusets are used, and we have overlapping @@ -608,23 +605,25 @@ static int pull_rt_task(struct rq *this_rq) if (double_lock_balance(this_rq, src_rq)) { /* unlocked our runqueue lock */ struct task_struct *old_next = next; + next = pick_next_task_rt(this_rq); if (next != old_next) ret = 1; } - if (likely(src_rq->rt.rt_nr_running <= 1)) + if (likely(src_rq->rt.rt_nr_running <= 1)) { /* * Small chance that this_rq->curr changed * but it's really harmless here. */ rt_clear_overload(this_rq); - else + } else { /* * Heh, the src_rq is now overloaded, since * we already have the src_rq lock, go straight * to pulling tasks from it. */ goto try_pulling; + } spin_unlock(&src_rq->lock); continue; } @@ -638,6 +637,7 @@ static int pull_rt_task(struct rq *this_rq) */ if (double_lock_balance(this_rq, src_rq)) { struct task_struct *old_next = next; + next = pick_next_task_rt(this_rq); if (next != old_next) ret = 1; @@ -674,7 +674,7 @@ static int pull_rt_task(struct rq *this_rq) */ if (p->prio < src_rq->curr->prio || (next && next->prio < src_rq->curr->prio)) - goto bail; + goto out; ret = 1; @@ -686,9 +686,7 @@ static int pull_rt_task(struct rq *this_rq) * case there's an even higher prio task * in another runqueue. (low likelyhood * but possible) - */ - - /* + * * Update next so that we won't pick a task * on another cpu with a priority lower (or equal) * than the one we just picked. @@ -696,7 +694,7 @@ static int pull_rt_task(struct rq *this_rq) next = p; } - bail: + out: spin_unlock(&src_rq->lock); } |