summaryrefslogtreecommitdiffstats
path: root/kernel/task_work.c
diff options
context:
space:
mode:
authorUros Bizjak <ubizjak@gmail.com>2022-08-23 17:26:32 +0200
committerAndrew Morton <akpm@linux-foundation.org>2022-09-11 21:55:10 -0700
commit5fdfa161b2043001f82cbce49e87e8e9f581d510 (patch)
treee3326441233ff70650de4244502dd08ea438cdd1 /kernel/task_work.c
parent977bbf4385fc64986f22b024858071a35c481a8a (diff)
downloadlinux-5fdfa161b2043001f82cbce49e87e8e9f581d510.tar.gz
linux-5fdfa161b2043001f82cbce49e87e8e9f581d510.tar.bz2
linux-5fdfa161b2043001f82cbce49e87e8e9f581d510.zip
task_work: use try_cmpxchg in task_work_add, task_work_cancel_match and task_work_run
Use try_cmpxchg instead of cmpxchg (*ptr, old, new) == old in task_work_add, task_work_cancel_match and task_work_run. x86 CMPXCHG instruction returns success in ZF flag, so this change saves a compare after cmpxchg (and related move instruction in front of cmpxchg). Also, atomic_try_cmpxchg implicitly assigns old *ptr value to "old" when cmpxchg fails, enabling further code simplifications. The patch avoids extra memory read in case cmpxchg fails. Link: https://lkml.kernel.org/r/20220823152632.4517-1-ubizjak@gmail.com Signed-off-by: Uros Bizjak <ubizjak@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'kernel/task_work.c')
-rw-r--r--kernel/task_work.c16
1 files changed, 9 insertions, 7 deletions
diff --git a/kernel/task_work.c b/kernel/task_work.c
index dff75bcde151..065e1ef8fc8d 100644
--- a/kernel/task_work.c
+++ b/kernel/task_work.c
@@ -47,12 +47,12 @@ int task_work_add(struct task_struct *task, struct callback_head *work,
/* record the work call stack in order to print it in KASAN reports */
kasan_record_aux_stack(work);
+ head = READ_ONCE(task->task_works);
do {
- head = READ_ONCE(task->task_works);
if (unlikely(head == &work_exited))
return -ESRCH;
work->next = head;
- } while (cmpxchg(&task->task_works, head, work) != head);
+ } while (!try_cmpxchg(&task->task_works, &head, work));
switch (notify) {
case TWA_NONE:
@@ -100,10 +100,12 @@ task_work_cancel_match(struct task_struct *task,
* we raced with task_work_run(), *pprev == NULL/exited.
*/
raw_spin_lock_irqsave(&task->pi_lock, flags);
- while ((work = READ_ONCE(*pprev))) {
- if (!match(work, data))
+ work = READ_ONCE(*pprev);
+ while (work) {
+ if (!match(work, data)) {
pprev = &work->next;
- else if (cmpxchg(pprev, work, work->next) == work)
+ work = READ_ONCE(*pprev);
+ } else if (try_cmpxchg(pprev, &work, work->next))
break;
}
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
@@ -151,16 +153,16 @@ void task_work_run(void)
* work->func() can do task_work_add(), do not set
* work_exited unless the list is empty.
*/
+ work = READ_ONCE(task->task_works);
do {
head = NULL;
- work = READ_ONCE(task->task_works);
if (!work) {
if (task->flags & PF_EXITING)
head = &work_exited;
else
break;
}
- } while (cmpxchg(&task->task_works, work, head) != work);
+ } while (!try_cmpxchg(&task->task_works, &work, head));
if (!work)
break;