summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGregory Haskins <ghaskins@novell.com>2008-04-23 07:13:29 -0400
committerIngo Molnar <mingo@elte.hu>2008-05-05 23:56:18 +0200
commit8ae121ac8666b0421aa20fd80d4597ec66fa54bc (patch)
tree1db411965b1b12029d501ea10ab90fbc8aa4ec62
parent983ed7a66bcec9dc307d89dc7af47cdf209e56af (diff)
downloadlinux-8ae121ac8666b0421aa20fd80d4597ec66fa54bc.tar.gz
linux-8ae121ac8666b0421aa20fd80d4597ec66fa54bc.tar.bz2
linux-8ae121ac8666b0421aa20fd80d4597ec66fa54bc.zip
sched: fix RT task-wakeup logic
Dmitry Adamushko pointed out a logic error in task_wake_up_rt() where we will always evaluate to "true". You can find the thread here: http://lkml.org/lkml/2008/4/22/296 In reality, we only want to try to push tasks away when a wake up request is not going to preempt the current task. So lets fix it. Note: We introduce test_tsk_need_resched() instead of open-coding the flag check so that the merge-conflict with -rt should help remind us that we may need to support NEEDS_RESCHED_DELAYED in the future, too. Signed-off-by: Gregory Haskins <ghaskins@novell.com> CC: Dmitry Adamushko <dmitry.adamushko@gmail.com> CC: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/linux/sched.h7
-rw-r--r--kernel/sched_rt.c7
2 files changed, 11 insertions, 3 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 03c238088aee..698b5a4d25a7 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1977,6 +1977,11 @@ static inline void clear_tsk_need_resched(struct task_struct *tsk)
clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
}
+static inline int test_tsk_need_resched(struct task_struct *tsk)
+{
+ return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
+}
+
static inline int signal_pending(struct task_struct *p)
{
return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
@@ -1991,7 +1996,7 @@ static inline int fatal_signal_pending(struct task_struct *p)
static inline int need_resched(void)
{
- return unlikely(test_thread_flag(TIF_NEED_RESCHED));
+ return unlikely(test_tsk_need_resched(current));
}
/*
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index dcd649588593..060e87b0cb1c 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -1098,11 +1098,14 @@ static void post_schedule_rt(struct rq *rq)
}
}
-
+/*
+ * If we are not running and we are not going to reschedule soon, we should
+ * try to push tasks away now
+ */
static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
{
if (!task_running(rq, p) &&
- (p->prio >= rq->rt.highest_prio) &&
+ !test_tsk_need_resched(rq->curr) &&
rq->rt.overloaded)
push_rt_tasks(rq);
}