diff options
author | Gregory Haskins <ghaskins@novell.com> | 2008-01-25 21:08:10 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-25 21:08:10 +0100 |
commit | 07b4032c9e505e2a1fbe7703aff64a153c3249be (patch) | |
tree | 8b797b9cf80bc4f683a75557ff85ff385544cd74 /kernel/sched_rt.c | |
parent | e7693a362ec84bb5b6fd441d8a8b4b9d568a7a0c (diff) | |
download | linux-07b4032c9e505e2a1fbe7703aff64a153c3249be.tar.gz linux-07b4032c9e505e2a1fbe7703aff64a153c3249be.tar.bz2 linux-07b4032c9e505e2a1fbe7703aff64a153c3249be.zip |
sched: break out search for RT tasks
Isolate the search logic into a function so that it can be used later
in places other than find_locked_lowest_rq().
Signed-off-by: Gregory Haskins <ghaskins@novell.com>
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r-- | kernel/sched_rt.c | 66 |
1 files changed, 39 insertions, 27 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 5de1aebdbd1b..ffd02720b58f 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -263,54 +263,66 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, static DEFINE_PER_CPU(cpumask_t, local_cpu_mask); -/* Will lock the rq it finds */ -static struct rq *find_lock_lowest_rq(struct task_struct *task, - struct rq *this_rq) +static int find_lowest_rq(struct task_struct *task) { - struct rq *lowest_rq = NULL; int cpu; - int tries; cpumask_t *cpu_mask = &__get_cpu_var(local_cpu_mask); + struct rq *lowest_rq = NULL; cpus_and(*cpu_mask, cpu_online_map, task->cpus_allowed); - for (tries = 0; tries < RT_MAX_TRIES; tries++) { - /* - * Scan each rq for the lowest prio. - */ - for_each_cpu_mask(cpu, *cpu_mask) { - struct rq *rq = &per_cpu(runqueues, cpu); + /* + * Scan each rq for the lowest prio. + */ + for_each_cpu_mask(cpu, *cpu_mask) { + struct rq *rq = cpu_rq(cpu); - if (cpu == this_rq->cpu) - continue; + if (cpu == rq->cpu) + continue; - /* We look for lowest RT prio or non-rt CPU */ - if (rq->rt.highest_prio >= MAX_RT_PRIO) { - lowest_rq = rq; - break; - } + /* We look for lowest RT prio or non-rt CPU */ + if (rq->rt.highest_prio >= MAX_RT_PRIO) { + lowest_rq = rq; + break; + } - /* no locking for now */ - if (rq->rt.highest_prio > task->prio && - (!lowest_rq || rq->rt.highest_prio > lowest_rq->rt.highest_prio)) { - lowest_rq = rq; - } + /* no locking for now */ + if (rq->rt.highest_prio > task->prio && + (!lowest_rq || rq->rt.highest_prio > lowest_rq->rt.highest_prio)) { + lowest_rq = rq; } + } + + return lowest_rq ? lowest_rq->cpu : -1; +} + +/* Will lock the rq it finds */ +static struct rq *find_lock_lowest_rq(struct task_struct *task, + struct rq *rq) +{ + struct rq *lowest_rq = NULL; + int cpu; + int tries; - if (!lowest_rq) + for (tries = 0; tries < RT_MAX_TRIES; tries++) { + cpu = find_lowest_rq(task); + + if (cpu == -1) break; + lowest_rq = cpu_rq(cpu); + /* if the prio of this runqueue changed, try again */ - if (double_lock_balance(this_rq, lowest_rq)) { + if (double_lock_balance(rq, lowest_rq)) { /* * We had to unlock the run queue. In * the mean time, task could have * migrated already or had its affinity changed. * Also make sure that it wasn't scheduled on its rq. */ - if (unlikely(task_rq(task) != this_rq || + if (unlikely(task_rq(task) != rq || !cpu_isset(lowest_rq->cpu, task->cpus_allowed) || - task_running(this_rq, task) || + task_running(rq, task) || !task->se.on_rq)) { spin_unlock(&lowest_rq->lock); lowest_rq = NULL; |