summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohn David Anglin <dave.anglin@bell.net>2018-08-12 16:31:17 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2018-08-24 13:07:15 +0200
commit5c9fc580b1bc23eb8a23fddbd67e82c29d2aadfe (patch)
treee5b28a909d5faaf82bd1397d3bd44de3431ea6c7
parent4027d547d60121602404873c92c673d18ce7af7d (diff)
downloadlinux-stable-5c9fc580b1bc23eb8a23fddbd67e82c29d2aadfe.tar.gz
linux-stable-5c9fc580b1bc23eb8a23fddbd67e82c29d2aadfe.tar.bz2
linux-stable-5c9fc580b1bc23eb8a23fddbd67e82c29d2aadfe.zip
parisc: Remove unnecessary barriers from spinlock.h
commit 3b885ac1dc35b87a39ee176a6c7e2af9c789d8b8 upstream. Now that mb() is an instruction barrier, it will slow performance if we issue unnecessary barriers. The spinlock defines have a number of unnecessary barriers.  The __ldcw() define is both a hardware and compiler barrier.  The mb() barriers in the routines using __ldcw() serve no purpose. The only barrier needed is the one in arch_spin_unlock().  We need to ensure all accesses are complete prior to releasing the lock. Signed-off-by: John David Anglin <dave.anglin@bell.net> Cc: stable@vger.kernel.org # 4.0+ Signed-off-by: Helge Deller <deller@gmx.de> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--arch/parisc/include/asm/spinlock.h8
1 files changed, 2 insertions, 6 deletions
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h
index 6f84b6acc86e..8a63515f03bf 100644
--- a/arch/parisc/include/asm/spinlock.h
+++ b/arch/parisc/include/asm/spinlock.h
@@ -20,7 +20,6 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *x,
{
volatile unsigned int *a;
- mb();
a = __ldcw_align(x);
while (__ldcw(a) == 0)
while (*a == 0)
@@ -30,17 +29,16 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *x,
local_irq_disable();
} else
cpu_relax();
- mb();
}
#define arch_spin_lock_flags arch_spin_lock_flags
static inline void arch_spin_unlock(arch_spinlock_t *x)
{
volatile unsigned int *a;
- mb();
+
a = __ldcw_align(x);
- *a = 1;
mb();
+ *a = 1;
}
static inline int arch_spin_trylock(arch_spinlock_t *x)
@@ -48,10 +46,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *x)
volatile unsigned int *a;
int ret;
- mb();
a = __ldcw_align(x);
ret = __ldcw(a) != 0;
- mb();
return ret;
}