summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorGregory Haskins <ghaskins@novell.com>2008-01-25 21:08:12 +0100
committerIngo Molnar <mingo@elte.hu>2008-01-25 21:08:12 +0100
commita22d7fc187ed996b66d8439db27b2303f79a8e7b (patch)
tree44845eaac2aa44b185d0663d689fea29d94ea5ff /kernel
parent6e1254d2c41215da27025add8900ed187bca121d (diff)
downloadlinux-a22d7fc187ed996b66d8439db27b2303f79a8e7b.tar.gz
linux-a22d7fc187ed996b66d8439db27b2303f79a8e7b.tar.bz2
linux-a22d7fc187ed996b66d8439db27b2303f79a8e7b.zip
sched: wake-balance fixes
We have logic to detect whether the system has migratable tasks, but we are not using it when deciding whether to push tasks away. So we add support for considering this new information. Signed-off-by: Gregory Haskins <ghaskins@novell.com> Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c2
-rw-r--r--kernel/sched_rt.c10
2 files changed, 10 insertions, 2 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 3344ba776b97..c591abd9ca38 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -346,6 +346,7 @@ struct rt_rq {
unsigned long rt_nr_migratory;
/* highest queued rt task prio */
int highest_prio;
+ int overloaded;
};
/*
@@ -6770,6 +6771,7 @@ void __init sched_init(void)
rq->migration_thread = NULL;
INIT_LIST_HEAD(&rq->migration_queue);
rq->rt.highest_prio = MAX_RT_PRIO;
+ rq->rt.overloaded = 0;
#endif
atomic_set(&rq->nr_iowait, 0);
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index a9d7d4408160..87d7b3ff3861 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -16,6 +16,7 @@ static inline cpumask_t *rt_overload(void)
}
static inline void rt_set_overload(struct rq *rq)
{
+ rq->rt.overloaded = 1;
cpu_set(rq->cpu, rt_overload_mask);
/*
* Make sure the mask is visible before we set
@@ -32,6 +33,7 @@ static inline void rt_clear_overload(struct rq *rq)
/* the order here really doesn't matter */
atomic_dec(&rto_count);
cpu_clear(rq->cpu, rt_overload_mask);
+ rq->rt.overloaded = 0;
}
static void update_rt_migration(struct rq *rq)
@@ -448,6 +450,9 @@ static int push_rt_task(struct rq *rq)
assert_spin_locked(&rq->lock);
+ if (!rq->rt.overloaded)
+ return 0;
+
next_task = pick_next_highest_task_rt(rq, -1);
if (!next_task)
return 0;
@@ -675,7 +680,7 @@ static void schedule_tail_balance_rt(struct rq *rq)
* the lock was owned by prev, we need to release it
* first via finish_lock_switch and then reaquire it here.
*/
- if (unlikely(rq->rt.rt_nr_running > 1)) {
+ if (unlikely(rq->rt.overloaded)) {
spin_lock_irq(&rq->lock);
push_rt_tasks(rq);
spin_unlock_irq(&rq->lock);
@@ -687,7 +692,8 @@ static void wakeup_balance_rt(struct rq *rq, struct task_struct *p)
{
if (unlikely(rt_task(p)) &&
!task_running(rq, p) &&
- (p->prio >= rq->curr->prio))
+ (p->prio >= rq->rt.highest_prio) &&
+ rq->rt.overloaded)
push_rt_tasks(rq);
}