diff options
author | John David Anglin <dave.anglin@bell.net> | 2018-08-12 16:31:17 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2018-08-24 13:27:01 +0200 |
commit | 400db6fe74317d64c920025ed4de2de7b3522230 (patch) | |
tree | ac19c09f42573b90c79bf70c0e4e35d0767e62d7 /arch/parisc/include | |
parent | 6d124ea608ac800f46100741f7ccd79791c061c8 (diff) | |
download | linux-stable-400db6fe74317d64c920025ed4de2de7b3522230.tar.gz linux-stable-400db6fe74317d64c920025ed4de2de7b3522230.tar.bz2 linux-stable-400db6fe74317d64c920025ed4de2de7b3522230.zip |
parisc: Remove unnecessary barriers from spinlock.h
commit 3b885ac1dc35b87a39ee176a6c7e2af9c789d8b8 upstream.
Now that mb() is an instruction barrier, it will slow performance if we issue
unnecessary barriers.
The spinlock defines have a number of unnecessary barriers. The __ldcw()
define is both a hardware and compiler barrier. The mb() barriers in the
routines using __ldcw() serve no purpose.
The only barrier needed is the one in arch_spin_unlock(). We need to ensure
all accesses are complete prior to releasing the lock.
Signed-off-by: John David Anglin <dave.anglin@bell.net>
Cc: stable@vger.kernel.org # 4.0+
Signed-off-by: Helge Deller <deller@gmx.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'arch/parisc/include')
-rw-r--r-- | arch/parisc/include/asm/spinlock.h | 8 |
1 files changed, 2 insertions, 6 deletions
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h index 64f2992e439f..617efa845054 100644 --- a/arch/parisc/include/asm/spinlock.h +++ b/arch/parisc/include/asm/spinlock.h @@ -21,7 +21,6 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *x, { volatile unsigned int *a; - mb(); a = __ldcw_align(x); while (__ldcw(a) == 0) while (*a == 0) @@ -31,16 +30,15 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *x, local_irq_disable(); } else cpu_relax(); - mb(); } static inline void arch_spin_unlock(arch_spinlock_t *x) { volatile unsigned int *a; - mb(); + a = __ldcw_align(x); - *a = 1; mb(); + *a = 1; } static inline int arch_spin_trylock(arch_spinlock_t *x) @@ -48,10 +46,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *x) volatile unsigned int *a; int ret; - mb(); a = __ldcw_align(x); ret = __ldcw(a) != 0; - mb(); return ret; } |