summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2013-01-22 13:09:13 +0530
committerGleb Natapov <gleb@redhat.com>2013-01-29 15:38:37 +0200
commit7b270f609982f68f2433442bf167f735e7364b06 (patch)
tree07430a425125c1938fad3befc6debec667310ec4 /kernel
parentc7c9c56ca26f7b9458711b2d78b60b60e0d38ba7 (diff)
downloadlinux-7b270f609982f68f2433442bf167f735e7364b06.tar.gz
linux-7b270f609982f68f2433442bf167f735e7364b06.tar.bz2
linux-7b270f609982f68f2433442bf167f735e7364b06.zip
sched: Bail out of yield_to when source and target runqueue has one task
In case of undercomitted scenarios, especially in large guests yield_to overhead is significantly high. when run queue length of source and target is one, take an opportunity to bail out and return -ESRCH. This return condition can be further exploited to quickly come out of PLE handler. (History: Raghavendra initially worked on break out of kvm ple handler upon seeing source runqueue length = 1, but it had to export rq length). Peter came up with the elegant idea of return -ESRCH in scheduler core. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Raghavendra, Checking the rq length of target vcpu condition added.(thanks Avi) Reviewed-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Signed-off-by: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com> Acked-by: Andrew Jones <drjones@redhat.com> Tested-by: Chegu Vinod <chegu_vinod@hp.com> Signed-off-by: Gleb Natapov <gleb@redhat.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c25
1 files changed, 19 insertions, 6 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 0533496b6228..01edad9b5d71 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4316,7 +4316,10 @@ EXPORT_SYMBOL(yield);
* It's the caller's job to ensure that the target task struct
* can't go away on us before we can do any checks.
*
- * Returns true if we indeed boosted the target task.
+ * Returns:
+ * true (>0) if we indeed boosted the target task.
+ * false (0) if we failed to boost the target.
+ * -ESRCH if there's no task to yield to.
*/
bool __sched yield_to(struct task_struct *p, bool preempt)
{
@@ -4330,6 +4333,15 @@ bool __sched yield_to(struct task_struct *p, bool preempt)
again:
p_rq = task_rq(p);
+ /*
+ * If we're the only runnable task on the rq and target rq also
+ * has only one task, there's absolutely no point in yielding.
+ */
+ if (rq->nr_running == 1 && p_rq->nr_running == 1) {
+ yielded = -ESRCH;
+ goto out_irq;
+ }
+
double_rq_lock(rq, p_rq);
while (task_rq(p) != p_rq) {
double_rq_unlock(rq, p_rq);
@@ -4337,13 +4349,13 @@ again:
}
if (!curr->sched_class->yield_to_task)
- goto out;
+ goto out_unlock;
if (curr->sched_class != p->sched_class)
- goto out;
+ goto out_unlock;
if (task_running(p_rq, p) || p->state)
- goto out;
+ goto out_unlock;
yielded = curr->sched_class->yield_to_task(rq, p, preempt);
if (yielded) {
@@ -4356,11 +4368,12 @@ again:
resched_task(p_rq->curr);
}
-out:
+out_unlock:
double_rq_unlock(rq, p_rq);
+out_irq:
local_irq_restore(flags);
- if (yielded)
+ if (yielded > 0)
schedule();
return yielded;