summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2008-01-25 21:08:13 +0100
committerIngo Molnar <mingo@elte.hu>2008-01-25 21:08:13 +0100
commit610bf05645a7ac6ea104a474e328eeaaea148870 (patch)
treefb0a1b2969e0cdfb466f362d0a3bd636b0ec5ba0 /kernel
parent06f90dbd7610d51549004ea9c2ada337831eb292 (diff)
downloadlinux-610bf05645a7ac6ea104a474e328eeaaea148870.tar.gz
linux-610bf05645a7ac6ea104a474e328eeaaea148870.tar.bz2
linux-610bf05645a7ac6ea104a474e328eeaaea148870.zip
sched: RT-balance, optimize cpu search
This patch removes several cpumask operations by keeping track of the first of the CPUS that is of the lowest priority. When the search for the lowest priority runqueue is completed, all the bits up to the first CPU with the lowest priority runqueue is cleared. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched_rt.c49
1 files changed, 36 insertions, 13 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 52d88f193afc..61d198845f00 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -296,29 +296,36 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq,
}
static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);
-static DEFINE_PER_CPU(cpumask_t, valid_cpu_mask);
static int find_lowest_cpus(struct task_struct *task, cpumask_t *lowest_mask)
{
- int cpu;
- cpumask_t *valid_mask = &__get_cpu_var(valid_cpu_mask);
int lowest_prio = -1;
+ int lowest_cpu = -1;
int count = 0;
+ int cpu;
- cpus_clear(*lowest_mask);
- cpus_and(*valid_mask, cpu_online_map, task->cpus_allowed);
+ cpus_and(*lowest_mask, cpu_online_map, task->cpus_allowed);
/*
* Scan each rq for the lowest prio.
*/
- for_each_cpu_mask(cpu, *valid_mask) {
+ for_each_cpu_mask(cpu, *lowest_mask) {
struct rq *rq = cpu_rq(cpu);
/* We look for lowest RT prio or non-rt CPU */
if (rq->rt.highest_prio >= MAX_RT_PRIO) {
- if (count)
+ /*
+ * if we already found a low RT queue
+ * and now we found this non-rt queue
+ * clear the mask and set our bit.
+ * Otherwise just return the queue as is
+ * and the count==1 will cause the algorithm
+ * to use the first bit found.
+ */
+ if (lowest_cpu != -1) {
cpus_clear(*lowest_mask);
- cpu_set(rq->cpu, *lowest_mask);
+ cpu_set(rq->cpu, *lowest_mask);
+ }
return 1;
}
@@ -328,13 +335,29 @@ static int find_lowest_cpus(struct task_struct *task, cpumask_t *lowest_mask)
if (rq->rt.highest_prio > lowest_prio) {
/* new low - clear old data */
lowest_prio = rq->rt.highest_prio;
- if (count) {
- cpus_clear(*lowest_mask);
- count = 0;
- }
+ lowest_cpu = cpu;
+ count = 0;
}
- cpu_set(rq->cpu, *lowest_mask);
count++;
+ } else
+ cpu_clear(cpu, *lowest_mask);
+ }
+
+ /*
+ * Clear out all the set bits that represent
+ * runqueues that were of higher prio than
+ * the lowest_prio.
+ */
+ if (lowest_cpu > 0) {
+ /*
+ * Perhaps we could add another cpumask op to
+ * zero out bits. Like cpu_zero_bits(cpumask, nrbits);
+ * Then that could be optimized to use memset and such.
+ */
+ for_each_cpu_mask(cpu, *lowest_mask) {
+ if (cpu >= lowest_cpu)
+ break;
+ cpu_clear(cpu, *lowest_mask);
}
}