summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVincent Guittot <vincent.guittot@linaro.org>2018-06-26 15:53:22 +0200
committerIngo Molnar <mingo@kernel.org>2018-07-03 09:17:28 +0200
commit296b2ffe7fa9ed756c41415c6b1512bc4ad687b1 (patch)
tree11267161f1a90159ac9a47dcf48dbadfd44ef9cc
parentd9c0ffcabd6aae7ff1e34e8078354c13bb9f1183 (diff)
downloadlinux-296b2ffe7fa9ed756c41415c6b1512bc4ad687b1.tar.gz
linux-296b2ffe7fa9ed756c41415c6b1512bc4ad687b1.tar.bz2
linux-296b2ffe7fa9ed756c41415c6b1512bc4ad687b1.zip
sched/rt: Fix call to cpufreq_update_util()
With commit: 8f111bc357aa ("cpufreq/schedutil: Rewrite CPUFREQ_RT support") the schedutil governor uses rq->rt.rt_nr_running to detect whether an RT task is currently running on the CPU and to set frequency to max if necessary. cpufreq_update_util() is called in enqueue/dequeue_top_rt_rq() but rq->rt.rt_nr_running has not been updated yet when dequeue_top_rt_rq() is called so schedutil still considers that an RT task is running when the last task is dequeued. The update of rq->rt.rt_nr_running happens later in dequeue_rt_stack(). In fact, we can take advantage of the sequence that the dequeue then re-enqueue rt entities when a rt task is enqueued or dequeued; As a result enqueue_top_rt_rq() is always called when a task is enqueued or dequeued and also when groups are throttled or unthrottled. The only place that not use enqueue_top_rt_rq() is when root rt_rq is throttled. Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: efault@gmx.de Cc: juri.lelli@redhat.com Cc: patrick.bellasi@arm.com Cc: viresh.kumar@linaro.org Fixes: 8f111bc357aa ('cpufreq/schedutil: Rewrite CPUFREQ_RT support') Link: http://lkml.kernel.org/r/1530021202-21695-1-git-send-email-vincent.guittot@linaro.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--kernel/sched/cpufreq_schedutil.c2
-rw-r--r--kernel/sched/rt.c16
-rw-r--r--kernel/sched/sched.h5
3 files changed, 16 insertions, 7 deletions
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 3cde46483f0a..c907fde01eaa 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -192,7 +192,7 @@ static unsigned long sugov_aggregate_util(struct sugov_cpu *sg_cpu)
{
struct rq *rq = cpu_rq(sg_cpu->cpu);
- if (rq->rt.rt_nr_running)
+ if (rt_rq_is_runnable(&rq->rt))
return sg_cpu->max;
/*
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 47556b0c9a95..572567078b60 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -508,8 +508,11 @@ static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
rt_se = rt_rq->tg->rt_se[cpu];
- if (!rt_se)
+ if (!rt_se) {
dequeue_top_rt_rq(rt_rq);
+ /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
+ cpufreq_update_util(rq_of_rt_rq(rt_rq), 0);
+ }
else if (on_rt_rq(rt_se))
dequeue_rt_entity(rt_se, 0);
}
@@ -1001,8 +1004,6 @@ dequeue_top_rt_rq(struct rt_rq *rt_rq)
sub_nr_running(rq, rt_rq->rt_nr_running);
rt_rq->rt_queued = 0;
- /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
- cpufreq_update_util(rq, 0);
}
static void
@@ -1014,11 +1015,14 @@ enqueue_top_rt_rq(struct rt_rq *rt_rq)
if (rt_rq->rt_queued)
return;
- if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running)
+
+ if (rt_rq_throttled(rt_rq))
return;
- add_nr_running(rq, rt_rq->rt_nr_running);
- rt_rq->rt_queued = 1;
+ if (rt_rq->rt_nr_running) {
+ add_nr_running(rq, rt_rq->rt_nr_running);
+ rt_rq->rt_queued = 1;
+ }
/* Kick cpufreq (see the comment in kernel/sched/sched.h). */
cpufreq_update_util(rq, 0);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 6601baf2361c..27ddec334601 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -609,6 +609,11 @@ struct rt_rq {
#endif
};
+static inline bool rt_rq_is_runnable(struct rt_rq *rt_rq)
+{
+ return rt_rq->rt_queued && rt_rq->rt_nr_running;
+}
+
/* Deadline class' related fields in a runqueue */
struct dl_rq {
/* runqueue is an rbtree, ordered by deadline */