diff options
author | Michael Ellerman <mpe@ellerman.id.au> | 2014-01-15 18:14:28 +1100 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2014-01-28 14:45:43 +1100 |
commit | 3405d230b374b6923878b21b8d708d7db1f734ef (patch) | |
tree | 789bd76b9c2b5a0f00bbe1f410927cf23debf96d /arch | |
parent | f7d98d18a01ece2863984d4fb5ae949b18b02715 (diff) | |
download | linux-3405d230b374b6923878b21b8d708d7db1f734ef.tar.gz linux-3405d230b374b6923878b21b8d708d7db1f734ef.tar.bz2 linux-3405d230b374b6923878b21b8d708d7db1f734ef.zip |
powerpc: Add support for the optimised lockref implementation
This commit adds the architecture support required to enable the
optimised implementation of lockrefs.
That's as simple as defining arch_spin_value_unlocked() and selecting
the Kconfig option.
We also define cmpxchg64_relaxed(), because the lockref code does not
need the cmpxchg to have barrier semantics.
Using Linus' test case[1] on one system I see a 4x improvement for the
basic enablement, and a further 1.3x for cmpxchg64_relaxed(), for a
total of 5.3x vs the baseline.
On another system I see more like 2x improvement.
[1]: http://marc.info/?l=linux-fsdevel&m=137782380714721&w=4
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/Kconfig | 1 | ||||
-rw-r--r-- | arch/powerpc/include/asm/cmpxchg.h | 1 | ||||
-rw-r--r-- | arch/powerpc/include/asm/spinlock.h | 5 |
3 files changed, 7 insertions, 0 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index fa395179ddd6..6ca5d5cabeb1 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -139,6 +139,7 @@ config PPC select OLD_SIGACTION if PPC32 select HAVE_DEBUG_STACKOVERFLOW select HAVE_IRQ_EXIT_ON_IRQ_STACK + select ARCH_USE_CMPXCHG_LOCKREF if PPC64 config GENERIC_CSUM def_bool CPU_LITTLE_ENDIAN diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h index e245aab7f191..d463c68fe7f0 100644 --- a/arch/powerpc/include/asm/cmpxchg.h +++ b/arch/powerpc/include/asm/cmpxchg.h @@ -300,6 +300,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new, BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ cmpxchg_local((ptr), (o), (n)); \ }) +#define cmpxchg64_relaxed cmpxchg64_local #else #include <asm-generic/cmpxchg-local.h> #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index 5f54a744dcc5..5162f8cd18c0 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -54,6 +54,11 @@ #define SYNC_IO #endif +static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) +{ + return lock.slock == 0; +} + /* * This returns the old value in the lock, so we succeeded * in getting the lock if the return value is 0. |