summaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2013-04-20 15:15:35 +0200
committerFrederic Weisbecker <fweisbec@gmail.com>2013-04-22 20:08:04 +0200
commitce831b38ca4920739a7a5b0c73b921da41f03718 (patch)
tree492802655004af21c748daaf08f55e14068ceb13 /kernel/sched
parent9f3660c2c1a221c886474587103c69f6034d3e4f (diff)
downloadlinux-ce831b38ca4920739a7a5b0c73b921da41f03718.tar.gz
linux-ce831b38ca4920739a7a5b0c73b921da41f03718.tar.bz2
linux-ce831b38ca4920739a7a5b0c73b921da41f03718.zip
sched: New helper to prevent from stopping the tick in full dynticks
Provide a new helper to be called from the full dynticks engine before stopping the tick in order to make sure we don't stop it when there is more than one task running on the CPU. This way we make sure that the tick stays alive to maintain fairness. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Christoph Lameter <cl@linux.com> Cc: Geoff Levand <geoff@infradead.org> Cc: Gilad Ben Yossef <gilad@benyossef.com> Cc: Hakan Akkan <hakanakkan@gmail.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Kevin Hilman <khilman@linaro.org> Cc: Li Zhong <zhong@linux.vnet.ibm.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Paul Gortmaker <paul.gortmaker@windriver.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c18
1 files changed, 18 insertions, 0 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 0f0a5b3fd62c..69f71335984f 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -650,6 +650,24 @@ static inline bool got_nohz_idle_kick(void)
#endif /* CONFIG_NO_HZ_COMMON */
+#ifdef CONFIG_NO_HZ_FULL
+bool sched_can_stop_tick(void)
+{
+ struct rq *rq;
+
+ rq = this_rq();
+
+ /* Make sure rq->nr_running update is visible after the IPI */
+ smp_rmb();
+
+ /* More than one running task need preemption */
+ if (rq->nr_running > 1)
+ return false;
+
+ return true;
+}
+#endif /* CONFIG_NO_HZ_FULL */
+
void sched_avg_update(struct rq *rq)
{
s64 period = sched_avg_period();