summaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2014-04-23 20:15:48 +0200
committerThomas Gleixner <tglx@linutronix.de>2015-07-27 14:06:23 +0200
commitae8c35c85be92b79c545c57c2a14c2f8136d3353 (patch)
tree2103a425e45c3678c63fd8c1bbd58ca67c0cc884 /arch/s390
parent2a3ed90f428cab65567c5421ebc0f6c8d02c1216 (diff)
downloadlinux-ae8c35c85be92b79c545c57c2a14c2f8136d3353.tar.gz
linux-ae8c35c85be92b79c545c57c2a14c2f8136d3353.tar.bz2
linux-ae8c35c85be92b79c545c57c2a14c2f8136d3353.zip
s390: Provide atomic_{or,xor,and}
Implement atomic logic ops -- atomic_{or,xor,and}. These will replace the atomic_{set,clear}_mask functions that are available on some archs. Acked-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/include/asm/atomic.h47
1 files changed, 33 insertions, 14 deletions
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index adbe3802e377..b3859d8e001f 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -27,6 +27,7 @@
#define __ATOMIC_OR "lao"
#define __ATOMIC_AND "lan"
#define __ATOMIC_ADD "laa"
+#define __ATOMIC_XOR "lax"
#define __ATOMIC_BARRIER "bcr 14,0\n"
#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
@@ -49,6 +50,7 @@
#define __ATOMIC_OR "or"
#define __ATOMIC_AND "nr"
#define __ATOMIC_ADD "ar"
+#define __ATOMIC_XOR "xr"
#define __ATOMIC_BARRIER "\n"
#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
@@ -118,14 +120,26 @@ static inline void atomic_add(int i, atomic_t *v)
#define atomic_dec_return(_v) atomic_sub_return(1, _v)
#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
-static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
+#define ATOMIC_OP(op, OP) \
+static inline void atomic_##op(int i, atomic_t *v) \
+{ \
+ __ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_NO_BARRIER); \
+}
+
+ATOMIC_OP(and, AND)
+ATOMIC_OP(or, OR)
+ATOMIC_OP(xor, XOR)
+
+#undef ATOMIC_OP
+
+static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
- __ATOMIC_LOOP(v, ~mask, __ATOMIC_AND, __ATOMIC_NO_BARRIER);
+ atomic_and(~mask, v);
}
-static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
+static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v)
{
- __ATOMIC_LOOP(v, mask, __ATOMIC_OR, __ATOMIC_NO_BARRIER);
+ atomic_or(mask, v);
}
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
@@ -167,6 +181,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
#define __ATOMIC64_OR "laog"
#define __ATOMIC64_AND "lang"
#define __ATOMIC64_ADD "laag"
+#define __ATOMIC64_XOR "laxg"
#define __ATOMIC64_BARRIER "bcr 14,0\n"
#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
@@ -189,6 +204,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
#define __ATOMIC64_OR "ogr"
#define __ATOMIC64_AND "ngr"
#define __ATOMIC64_ADD "agr"
+#define __ATOMIC64_XOR "xgr"
#define __ATOMIC64_BARRIER "\n"
#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
@@ -247,16 +263,6 @@ static inline void atomic64_add(long long i, atomic64_t *v)
__ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_NO_BARRIER);
}
-static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v)
-{
- __ATOMIC64_LOOP(v, ~mask, __ATOMIC64_AND, __ATOMIC64_NO_BARRIER);
-}
-
-static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
-{
- __ATOMIC64_LOOP(v, mask, __ATOMIC64_OR, __ATOMIC64_NO_BARRIER);
-}
-
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
static inline long long atomic64_cmpxchg(atomic64_t *v,
@@ -270,6 +276,19 @@ static inline long long atomic64_cmpxchg(atomic64_t *v,
return old;
}
+#define ATOMIC64_OP(op, OP) \
+static inline void atomic64_##op(long i, atomic64_t *v) \
+{ \
+ __ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_NO_BARRIER); \
+}
+
+#define CONFIG_ARCH_HAS_ATOMIC_OR
+
+ATOMIC64_OP(and, AND)
+ATOMIC64_OP(or, OR)
+ATOMIC64_OP(xor, XOR)
+
+#undef ATOMIC64_OP
#undef __ATOMIC64_LOOP
static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)