From d89e588ca4081615216cc25f2489b0281ac0bfe9 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 5 Sep 2016 11:37:53 +0200 Subject: locking: Introduce smp_mb__after_spinlock() Since its inception, our understanding of ACQUIRE, esp. as applied to spinlocks, has changed somewhat. Also, I wonder if, with a simple change, we cannot make it provide more. The problem with the comment is that the STORE done by spin_lock isn't itself ordered by the ACQUIRE, and therefore a later LOAD can pass over it and cross with any prior STORE, rendering the default WMB insufficient (pointed out by Alan). Now, this is only really a problem on PowerPC and ARM64, both of which already defined smp_mb__before_spinlock() as a smp_mb(). At the same time, we can get a much stronger construct if we place that same barrier _inside_ the spin_lock(). In that case we upgrade the RCpc spinlock to an RCsc. That would make all schedule() calls fully transitive against one another. Signed-off-by: Peter Zijlstra (Intel) Acked-by: Will Deacon Cc: Alan Stern Cc: Benjamin Herrenschmidt Cc: Linus Torvalds Cc: Michael Ellerman Cc: Nicholas Piggin Cc: Oleg Nesterov Cc: Paul McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- include/linux/spinlock.h | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) (limited to 'include/linux/spinlock.h') diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index d9510e8522d4..840281095933 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -130,6 +130,42 @@ do { \ #define smp_mb__before_spinlock() smp_wmb() #endif +/* + * This barrier must provide two things: + * + * - it must guarantee a STORE before the spin_lock() is ordered against a + * LOAD after it, see the comments at its two usage sites. + * + * - it must ensure the critical section is RCsc. + * + * The latter is important for cases where we observe values written by other + * CPUs in spin-loops, without barriers, while being subject to scheduling. + * + * CPU0 CPU1 CPU2 + * + * for (;;) { + * if (READ_ONCE(X)) + * break; + * } + * X=1 + * + * + * r = X; + * + * without transitivity it could be that CPU1 observes X!=0 breaks the loop, + * we get migrated and CPU2 sees X==0. + * + * Since most load-store architectures implement ACQUIRE with an smp_mb() after + * the LL/SC loop, they need no further barriers. Similarly all our TSO + * architectures imply an smp_mb() for each atomic instruction and equally don't + * need more. + * + * Architectures that can implement ACQUIRE better need to take care. + */ +#ifndef smp_mb__after_spinlock +#define smp_mb__after_spinlock() do { } while (0) +#endif + /** * raw_spin_unlock_wait - wait until the spinlock gets unlocked * @lock: the spinlock in question. -- cgit v1.2.3 From a9668cd6ee288c4838bc668880ac085be551cac2 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 7 Jun 2017 17:51:27 +0200 Subject: locking: Remove smp_mb__before_spinlock() Now that there are no users of smp_mb__before_spinlock() left, remove it entirely. Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- include/linux/spinlock.h | 13 ------------- 1 file changed, 13 deletions(-) (limited to 'include/linux/spinlock.h') diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 840281095933..4e8cce19b507 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -117,19 +117,6 @@ do { \ #endif /*arch_spin_is_contended*/ #endif -/* - * Despite its name it doesn't necessarily has to be a full barrier. - * It should only guarantee that a STORE before the critical section - * can not be reordered with LOADs and STOREs inside this section. - * spin_lock() is the one-way barrier, this LOAD can not escape out - * of the region. So the default implementation simply ensures that - * a STORE can not move into the critical section, smp_wmb() should - * serialize it with another STORE done by spin_lock(). - */ -#ifndef smp_mb__before_spinlock -#define smp_mb__before_spinlock() smp_wmb() -#endif - /* * This barrier must provide two things: * -- cgit v1.2.3