summaryrefslogtreecommitdiffstats
path: root/kernel/time
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2021-05-13 01:29:22 +0200
committerIngo Molnar <mingo@kernel.org>2021-05-13 14:21:22 +0200
commita1dfb6311c7739e21e160bc4c5575a1b21b48c87 (patch)
tree131ee3864f47b6ddd8976ece6a0911c5c9d5b06c /kernel/time
parent1e4ca26d367ae71743e25068e5cd8750ef3f5f7d (diff)
downloadlinux-stable-a1dfb6311c7739e21e160bc4c5575a1b21b48c87.tar.gz
linux-stable-a1dfb6311c7739e21e160bc4c5575a1b21b48c87.tar.bz2
linux-stable-a1dfb6311c7739e21e160bc4c5575a1b21b48c87.zip
tick/nohz: Kick only _queued_ task whose tick dependency is updated
When the tick dependency of a task is updated, we want it to aknowledge the new state and restart the tick if needed. If the task is not running, we don't need to kick it because it will observe the new dependency upon scheduling in. But if the task is running, we may need to send an IPI to it so that it gets notified. Unfortunately we don't have the means to check if a task is running in a race free way. Checking p->on_cpu in a synchronized way against p->tick_dep_mask would imply adding a full barrier between prepare_task_switch() and tick_nohz_task_switch(), which we want to avoid in this fast-path. Therefore we blindly fire an IPI to the task's CPU. Meanwhile we can check if the task is queued on the CPU rq because p->on_rq is always set to TASK_ON_RQ_QUEUED _before_ schedule() and its full barrier that precedes tick_nohz_task_switch(). And if the task is queued on a nohz_full CPU, it also has fair chances to be running as the isolation constraints prescribe running single tasks on full dynticks CPUs. So use this as a trick to check if we can spare an IPI toward a non-running task. NOTE: For the ordering to be correct, it is assumed that we never deactivate a task while it is running, the only exception being the task deactivating itself while scheduling out. Suggested-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Frederic Weisbecker <frederic@kernel.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Acked-by: Peter Zijlstra <peterz@infradead.org> Link: https://lore.kernel.org/r/20210512232924.150322-9-frederic@kernel.org
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/tick-sched.c19
1 files changed, 17 insertions, 2 deletions
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index acbe6722cf75..197a3bd882ad 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -324,14 +324,28 @@ void tick_nohz_full_kick_cpu(int cpu)
static void tick_nohz_kick_task(struct task_struct *tsk)
{
- int cpu = task_cpu(tsk);
+ int cpu;
+
+ /*
+ * If the task is not running, run_posix_cpu_timers()
+ * has nothing to elapse, IPI can then be spared.
+ *
+ * activate_task() STORE p->tick_dep_mask
+ * STORE p->on_rq
+ * __schedule() (switch to task 'p') smp_mb() (atomic_fetch_or())
+ * LOCK rq->lock LOAD p->on_rq
+ * smp_mb__after_spin_lock()
+ * tick_nohz_task_switch()
+ * LOAD p->tick_dep_mask
+ */
+ if (!sched_task_on_rq(tsk))
+ return;
/*
* If the task concurrently migrates to another CPU,
* we guarantee it sees the new tick dependency upon
* schedule.
*
- *
* set_task_cpu(p, cpu);
* STORE p->cpu = @cpu
* __schedule() (switch to task 'p')
@@ -340,6 +354,7 @@ static void tick_nohz_kick_task(struct task_struct *tsk)
* tick_nohz_task_switch() smp_mb() (atomic_fetch_or())
* LOAD p->tick_dep_mask LOAD p->cpu
*/
+ cpu = task_cpu(tsk);
preempt_disable();
if (cpu_online(cpu))