summaryrefslogtreecommitdiffstats
path: root/kernel/task_work.c
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@redhat.com>2020-02-18 16:50:18 +0100
committerJens Axboe <axboe@kernel.dk>2020-03-02 14:06:33 -0700
commit6fb614920b38bbf3c1c7fcd944c6d9b5d746103d (patch)
tree07e72f16b7e06d68a6cdfc915dcf4289e188f2bf /kernel/task_work.c
parent3684f24653534c71c7dc9f44d7281a838f4e4979 (diff)
downloadlinux-6fb614920b38bbf3c1c7fcd944c6d9b5d746103d.tar.gz
linux-6fb614920b38bbf3c1c7fcd944c6d9b5d746103d.tar.bz2
linux-6fb614920b38bbf3c1c7fcd944c6d9b5d746103d.zip
task_work_run: don't take ->pi_lock unconditionally
As Peter pointed out, task_work() can avoid ->pi_lock and cmpxchg() if task->task_works == NULL && !PF_EXITING. And in fact the only reason why task_work_run() needs ->pi_lock is the possible race with task_work_cancel(), we can optimize this code and make the locking more clear. Signed-off-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'kernel/task_work.c')
-rw-r--r--kernel/task_work.c18
1 files changed, 14 insertions, 4 deletions
diff --git a/kernel/task_work.c b/kernel/task_work.c
index 0fef395662a6..825f28259a19 100644
--- a/kernel/task_work.c
+++ b/kernel/task_work.c
@@ -97,16 +97,26 @@ void task_work_run(void)
* work->func() can do task_work_add(), do not set
* work_exited unless the list is empty.
*/
- raw_spin_lock_irq(&task->pi_lock);
do {
+ head = NULL;
work = READ_ONCE(task->task_works);
- head = !work && (task->flags & PF_EXITING) ?
- &work_exited : NULL;
+ if (!work) {
+ if (task->flags & PF_EXITING)
+ head = &work_exited;
+ else
+ break;
+ }
} while (cmpxchg(&task->task_works, work, head) != work);
- raw_spin_unlock_irq(&task->pi_lock);
if (!work)
break;
+ /*
+ * Synchronize with task_work_cancel(). It can not remove
+ * the first entry == work, cmpxchg(task_works) must fail.
+ * But it can remove another entry from the ->next list.
+ */
+ raw_spin_lock_irq(&task->pi_lock);
+ raw_spin_unlock_irq(&task->pi_lock);
do {
next = work->next;