diff options
author | Peter Zijlstra <peterz@infradead.org> | 2021-09-22 10:14:15 +0200 |
---|---|---|
committer | Peter Zijlstra <peterz@infradead.org> | 2021-10-07 13:51:15 +0200 |
commit | f6ac18fafcf6cc5e41c26766d12ad335ed81012e (patch) | |
tree | 96579fe0d912dab753b914131e9a21b5c0b03c28 /kernel/sched/core.c | |
parent | 769fdf83df57b373660343ef4270b3ada91ef434 (diff) | |
download | linux-f6ac18fafcf6cc5e41c26766d12ad335ed81012e.tar.gz linux-f6ac18fafcf6cc5e41c26766d12ad335ed81012e.tar.bz2 linux-f6ac18fafcf6cc5e41c26766d12ad335ed81012e.zip |
sched: Improve try_invoke_on_locked_down_task()
Clarify and tighten try_invoke_on_locked_down_task().
Basically the function calls @func under task_rq_lock(), except it
avoids taking rq->lock when possible.
This makes calling @func unconditional (the function will get renamed
in a later patch to remove the try).
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Vasily Gorbik <gor@linux.ibm.com>
Tested-by: Vasily Gorbik <gor@linux.ibm.com> # on s390
Link: https://lkml.kernel.org/r/20210929152428.589323576@infradead.org
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r-- | kernel/sched/core.c | 63 |
1 files changed, 39 insertions, 24 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index e47d7e55c46a..8a9aeac3469f 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4115,41 +4115,56 @@ out: * @func: Function to invoke. * @arg: Argument to function. * - * If the specified task can be quickly locked into a definite state - * (either sleeping or on a given runqueue), arrange to keep it in that - * state while invoking @func(@arg). This function can use ->on_rq and - * task_curr() to work out what the state is, if required. Given that - * @func can be invoked with a runqueue lock held, it had better be quite - * lightweight. + * Fix the task in it's current state by avoiding wakeups and or rq operations + * and call @func(@arg) on it. This function can use ->on_rq and task_curr() + * to work out what the state is, if required. Given that @func can be invoked + * with a runqueue lock held, it had better be quite lightweight. * * Returns: - * @false if the task slipped out from under the locks. - * @true if the task was locked onto a runqueue or is sleeping. - * However, @func can override this by returning @false. + * Whatever @func returns */ bool try_invoke_on_locked_down_task(struct task_struct *p, bool (*func)(struct task_struct *t, void *arg), void *arg) { + struct rq *rq = NULL; + unsigned int state; struct rq_flags rf; bool ret = false; - struct rq *rq; raw_spin_lock_irqsave(&p->pi_lock, rf.flags); - if (p->on_rq) { + + state = READ_ONCE(p->__state); + + /* + * Ensure we load p->on_rq after p->__state, otherwise it would be + * possible to, falsely, observe p->on_rq == 0. + * + * See try_to_wake_up() for a longer comment. + */ + smp_rmb(); + + /* + * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when + * the task is blocked. Make sure to check @state since ttwu() can drop + * locks at the end, see ttwu_queue_wakelist(). + */ + if (state == TASK_RUNNING || state == TASK_WAKING || p->on_rq) rq = __task_rq_lock(p, &rf); - if (task_rq(p) == rq) - ret = func(p, arg); + + /* + * At this point the task is pinned; either: + * - blocked and we're holding off wakeups (pi->lock) + * - woken, and we're holding off enqueue (rq->lock) + * - queued, and we're holding off schedule (rq->lock) + * - running, and we're holding off de-schedule (rq->lock) + * + * The called function (@func) can use: task_curr(), p->on_rq and + * p->__state to differentiate between these states. + */ + ret = func(p, arg); + + if (rq) rq_unlock(rq, &rf); - } else { - switch (READ_ONCE(p->__state)) { - case TASK_RUNNING: - case TASK_WAKING: - break; - default: - smp_rmb(); // See smp_rmb() comment in try_to_wake_up(). - if (!p->on_rq) - ret = func(p, arg); - } - } + raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags); return ret; } |