summaryrefslogtreecommitdiffstats
path: root/arch/riscv
diff options
context:
space:
mode:
authorPalmer Dabbelt <palmer@sifive.com>2017-12-05 17:48:11 -0800
committerPalmer Dabbelt <palmer@dabbelt.com>2017-12-11 07:51:07 -0800
commit3cfa5008081db845c6c53d531ec34e9c84a9fd99 (patch)
tree805a705c5ab4544fe0bb8402a21433805a502f7e /arch/riscv
parent86ad5c97ce5ccdda1459d35370fd5e105721bb8d (diff)
downloadlinux-stable-3cfa5008081db845c6c53d531ec34e9c84a9fd99.tar.gz
linux-stable-3cfa5008081db845c6c53d531ec34e9c84a9fd99.tar.bz2
linux-stable-3cfa5008081db845c6c53d531ec34e9c84a9fd99.zip
RISC-V: Resurrect smp_mb__after_spinlock()
I removed this last week because of an incorrect comment: smp_mb__after_spinlock() is actually still used, and is necessary on RISC-V. It's been resurrected, with a comment that describes what it actually does this time. Thanks to Andrea for finding the bug! Fixes: 3343eb6806f3 ("RISC-V: Remove smb_mb__{before,after}_spinlock()") CC: Andrea Parri <parri.andrea@gmail.com> Signed-off-by: Palmer Dabbelt <palmer@sifive.com>
Diffstat (limited to 'arch/riscv')
-rw-r--r--arch/riscv/include/asm/barrier.h19
1 files changed, 19 insertions, 0 deletions
diff --git a/arch/riscv/include/asm/barrier.h b/arch/riscv/include/asm/barrier.h
index 773c4e039cd7..c0319cbf1eec 100644
--- a/arch/riscv/include/asm/barrier.h
+++ b/arch/riscv/include/asm/barrier.h
@@ -38,6 +38,25 @@
#define smp_rmb() RISCV_FENCE(r,r)
#define smp_wmb() RISCV_FENCE(w,w)
+/*
+ * This is a very specific barrier: it's currently only used in two places in
+ * the kernel, both in the scheduler. See include/linux/spinlock.h for the two
+ * orderings it guarantees, but the "critical section is RCsc" guarantee
+ * mandates a barrier on RISC-V. The sequence looks like:
+ *
+ * lr.aq lock
+ * sc lock <= LOCKED
+ * smp_mb__after_spinlock()
+ * // critical section
+ * lr lock
+ * sc.rl lock <= UNLOCKED
+ *
+ * The AQ/RL pair provides a RCpc critical section, but there's not really any
+ * way we can take advantage of that here because the ordering is only enforced
+ * on that one lock. Thus, we're just doing a full fence.
+ */
+#define smp_mb__after_spinlock() RISCV_FENCE(rw,rw)
+
#include <asm-generic/barrier.h>
#endif /* __ASSEMBLY__ */