diff options
author | Peter Zijlstra <peterz@infradead.org> | 2014-04-23 19:35:00 +0200 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2015-07-27 14:06:23 +0200 |
commit | 2a3ed90f428cab65567c5421ebc0f6c8d02c1216 (patch) | |
tree | d85187cd28a785f6807d39757a5fcc4c009e7e90 /arch/xtensa | |
parent | 304a0d699a3c6103b61d5ea18d56820e7d8e3116 (diff) | |
download | linux-2a3ed90f428cab65567c5421ebc0f6c8d02c1216.tar.gz linux-2a3ed90f428cab65567c5421ebc0f6c8d02c1216.tar.bz2 linux-2a3ed90f428cab65567c5421ebc0f6c8d02c1216.zip |
xtensa: Provide atomic_{or,xor,and}
Implement atomic logic ops -- atomic_{or,xor,and}.
These will replace the atomic_{set,clear}_mask functions that are
available on some archs.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/xtensa')
-rw-r--r-- | arch/xtensa/include/asm/atomic.h | 85 |
1 files changed, 16 insertions, 69 deletions
diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h index 00b7d46b35b8..4dd2450300a6 100644 --- a/arch/xtensa/include/asm/atomic.h +++ b/arch/xtensa/include/asm/atomic.h @@ -145,10 +145,26 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \ ATOMIC_OPS(add) ATOMIC_OPS(sub) +#define CONFIG_ARCH_HAS_ATOMIC_OR + +ATOMIC_OP(and) +ATOMIC_OP(or) +ATOMIC_OP(xor) + #undef ATOMIC_OPS #undef ATOMIC_OP_RETURN #undef ATOMIC_OP +static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v) +{ + atomic_or(mask, v); +} + +static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v) +{ + atomic_and(~mask, v); +} + /** * atomic_sub_and_test - subtract value from variable and test result * @i: integer value to subtract @@ -250,75 +266,6 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) return c; } - -static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) -{ -#if XCHAL_HAVE_S32C1I - unsigned long tmp; - int result; - - __asm__ __volatile__( - "1: l32i %1, %3, 0\n" - " wsr %1, scompare1\n" - " and %0, %1, %2\n" - " s32c1i %0, %3, 0\n" - " bne %0, %1, 1b\n" - : "=&a" (result), "=&a" (tmp) - : "a" (~mask), "a" (v) - : "memory" - ); -#else - unsigned int all_f = -1; - unsigned int vval; - - __asm__ __volatile__( - " rsil a15,"__stringify(LOCKLEVEL)"\n" - " l32i %0, %2, 0\n" - " xor %1, %4, %3\n" - " and %0, %0, %4\n" - " s32i %0, %2, 0\n" - " wsr a15, ps\n" - " rsync\n" - : "=&a" (vval), "=a" (mask) - : "a" (v), "a" (all_f), "1" (mask) - : "a15", "memory" - ); -#endif -} - -static inline void atomic_set_mask(unsigned int mask, atomic_t *v) -{ -#if XCHAL_HAVE_S32C1I - unsigned long tmp; - int result; - - __asm__ __volatile__( - "1: l32i %1, %3, 0\n" - " wsr %1, scompare1\n" - " or %0, %1, %2\n" - " s32c1i %0, %3, 0\n" - " bne %0, %1, 1b\n" - : "=&a" (result), "=&a" (tmp) - : "a" (mask), "a" (v) - : "memory" - ); -#else - unsigned int vval; - - __asm__ __volatile__( - " rsil a15,"__stringify(LOCKLEVEL)"\n" - " l32i %0, %2, 0\n" - " or %0, %0, %1\n" - " s32i %0, %2, 0\n" - " wsr a15, ps\n" - " rsync\n" - : "=&a" (vval) - : "a" (mask), "a" (v) - : "a15", "memory" - ); -#endif -} - #endif /* __KERNEL__ */ #endif /* _XTENSA_ATOMIC_H */ |