summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorFrederic Weisbecker <frederic@kernel.org>2019-11-08 17:08:56 +0100
committerIngo Molnar <mingo@kernel.org>2019-11-11 09:03:31 +0100
commit25269871db1ad0cbbaafd5098cbdb40c8db4ccb9 (patch)
tree4c8022165bdc48b42d3c62991640a84b3a4ff8f4 /kernel
parent153bedbac2ebd475e1c7c2d2fa0c042f5525927d (diff)
downloadlinux-25269871db1ad0cbbaafd5098cbdb40c8db4ccb9.tar.gz
linux-25269871db1ad0cbbaafd5098cbdb40c8db4ccb9.tar.bz2
linux-25269871db1ad0cbbaafd5098cbdb40c8db4ccb9.zip
irq_work: Fix irq_work_claim() memory ordering
When irq_work_claim() finds IRQ_WORK_PENDING flag already set, we just return and don't raise a new IPI. We expect the destination to see and handle our latest updades thanks to the pairing atomic_xchg() in irq_work_run_list(). But cmpxchg() doesn't guarantee a full memory barrier upon failure. So it's possible that the destination misses our latest updates. So use atomic_fetch_or() instead that is unconditionally fully ordered and also performs exactly what we want here and simplify the code. Signed-off-by: Frederic Weisbecker <frederic@kernel.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E . McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: https://lkml.kernel.org/r/20191108160858.31665-3-frederic@kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/irq_work.c22
1 files changed, 7 insertions, 15 deletions
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index df0dbf4d859b..255454a48346 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -29,24 +29,16 @@ static DEFINE_PER_CPU(struct llist_head, lazy_list);
*/
static bool irq_work_claim(struct irq_work *work)
{
- int flags, oflags, nflags;
+ int oflags;
+ oflags = atomic_fetch_or(IRQ_WORK_CLAIMED, &work->flags);
/*
- * Start with our best wish as a premise but only trust any
- * flag value after cmpxchg() result.
+ * If the work is already pending, no need to raise the IPI.
+ * The pairing atomic_xchg() in irq_work_run() makes sure
+ * everything we did before is visible.
*/
- flags = atomic_read(&work->flags) & ~IRQ_WORK_PENDING;
- for (;;) {
- nflags = flags | IRQ_WORK_CLAIMED;
- oflags = atomic_cmpxchg(&work->flags, flags, nflags);
- if (oflags == flags)
- break;
- if (oflags & IRQ_WORK_PENDING)
- return false;
- flags = oflags;
- cpu_relax();
- }
-
+ if (oflags & IRQ_WORK_PENDING)
+ return false;
return true;
}