diff options
author | Nicolas Pitre <nico@cam.org> | 2006-02-08 21:19:38 +0000 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2006-02-08 21:19:38 +0000 |
commit | 365bf8ac6f5b3d3187cb39444fa87a5b38683ff4 (patch) | |
tree | 6b0e8234ca75d6b5b155c652028a0506875099a9 | |
parent | 5964eae835c3b98c69d338950651f7f414f96477 (diff) | |
download | linux-stable-365bf8ac6f5b3d3187cb39444fa87a5b38683ff4.tar.gz linux-stable-365bf8ac6f5b3d3187cb39444fa87a5b38683ff4.tar.bz2 linux-stable-365bf8ac6f5b3d3187cb39444fa87a5b38683ff4.zip |
[ARM] 3311/1: clean up include/asm-arm/mutex.h
Patch from Nicolas Pitre
Since:
if (unlikely(__res || __ex_flag))
produces worse code on ARM than:
if (unlikely(__res | __ex_flag))
I therefore made it more explicit:
__res |= __ex_flag;
if (unlikely(__res != 0))
so it is not seen as a typo again.
Also made everything static inline rather than macros for better readability
(both produce the same code after all).
And finally added missing \t from multi-line assembly code.
Signed-off-by: Nicolas Pitre <nico@cam.org>
Acked-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
-rw-r--r-- | include/asm-arm/mutex.h | 131 |
1 files changed, 65 insertions, 66 deletions
diff --git a/include/asm-arm/mutex.h b/include/asm-arm/mutex.h index 6caa59f1f595..cb29d84e690d 100644 --- a/include/asm-arm/mutex.h +++ b/include/asm-arm/mutex.h @@ -23,72 +23,71 @@ * simply bail out immediately through the slow path where the lock will be * reattempted until it succeeds. */ -#define __mutex_fastpath_lock(count, fail_fn) \ -do { \ - int __ex_flag, __res; \ - \ - typecheck(atomic_t *, count); \ - typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \ - \ - __asm__ ( \ - "ldrex %0, [%2] \n" \ - "sub %0, %0, #1 \n" \ - "strex %1, %0, [%2] \n" \ - \ - : "=&r" (__res), "=&r" (__ex_flag) \ - : "r" (&(count)->counter) \ - : "cc","memory" ); \ - \ - if (unlikely(__res || __ex_flag)) \ - fail_fn(count); \ -} while (0) - -#define __mutex_fastpath_lock_retval(count, fail_fn) \ -({ \ - int __ex_flag, __res; \ - \ - typecheck(atomic_t *, count); \ - typecheck_fn(fastcall int (*)(atomic_t *), fail_fn); \ - \ - __asm__ ( \ - "ldrex %0, [%2] \n" \ - "sub %0, %0, #1 \n" \ - "strex %1, %0, [%2] \n" \ - \ - : "=&r" (__res), "=&r" (__ex_flag) \ - : "r" (&(count)->counter) \ - : "cc","memory" ); \ - \ - __res |= __ex_flag; \ - if (unlikely(__res != 0)) \ - __res = fail_fn(count); \ - __res; \ -}) +static inline void +__mutex_fastpath_lock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *)) +{ + int __ex_flag, __res; + + __asm__ ( + + "ldrex %0, [%2] \n\t" + "sub %0, %0, #1 \n\t" + "strex %1, %0, [%2] " + + : "=&r" (__res), "=&r" (__ex_flag) + : "r" (&(count)->counter) + : "cc","memory" ); + + __res |= __ex_flag; + if (unlikely(__res != 0)) + fail_fn(count); +} + +static inline int +__mutex_fastpath_lock_retval(atomic_t *count, fastcall int (*fail_fn)(atomic_t *)) +{ + int __ex_flag, __res; + + __asm__ ( + + "ldrex %0, [%2] \n\t" + "sub %0, %0, #1 \n\t" + "strex %1, %0, [%2] " + + : "=&r" (__res), "=&r" (__ex_flag) + : "r" (&(count)->counter) + : "cc","memory" ); + + __res |= __ex_flag; + if (unlikely(__res != 0)) + __res = fail_fn(count); + return __res; +} /* * Same trick is used for the unlock fast path. However the original value, * rather than the result, is used to test for success in order to have * better generated assembly. */ -#define __mutex_fastpath_unlock(count, fail_fn) \ -do { \ - int __ex_flag, __res, __orig; \ - \ - typecheck(atomic_t *, count); \ - typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \ - \ - __asm__ ( \ - "ldrex %0, [%3] \n" \ - "add %1, %0, #1 \n" \ - "strex %2, %1, [%3] \n" \ - \ - : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag) \ - : "r" (&(count)->counter) \ - : "cc","memory" ); \ - \ - if (unlikely(__orig || __ex_flag)) \ - fail_fn(count); \ -} while (0) +static inline void +__mutex_fastpath_unlock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *)) +{ + int __ex_flag, __res, __orig; + + __asm__ ( + + "ldrex %0, [%3] \n\t" + "add %1, %0, #1 \n\t" + "strex %2, %1, [%3] " + + : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag) + : "r" (&(count)->counter) + : "cc","memory" ); + + __orig |= __ex_flag; + if (unlikely(__orig != 0)) + fail_fn(count); +} /* * If the unlock was done on a contended lock, or if the unlock simply fails @@ -110,12 +109,12 @@ __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) __asm__ ( - "1: ldrex %0, [%3] \n" - "subs %1, %0, #1 \n" - "strexeq %2, %1, [%3] \n" - "movlt %0, #0 \n" - "cmpeq %2, #0 \n" - "bgt 1b \n" + "1: ldrex %0, [%3] \n\t" + "subs %1, %0, #1 \n\t" + "strexeq %2, %1, [%3] \n\t" + "movlt %0, #0 \n\t" + "cmpeq %2, #0 \n\t" + "bgt 1b " : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag) : "r" (&count->counter) |