summaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/percpu.h
diff options
context:
space:
mode:
authorBrian Gerst <brgerst@gmail.com>2020-07-20 13:49:23 -0700
committerThomas Gleixner <tglx@linutronix.de>2020-07-23 11:46:42 +0200
commitc94055fe93c8d00bfa23fa2cb9af080f7fc53aa0 (patch)
tree0f6a19240424d290b45137e196886ac111034205 /arch/x86/include/asm/percpu.h
parentebcd580bed4a357ea894e6878d9099b3919f727f (diff)
downloadlinux-stable-c94055fe93c8d00bfa23fa2cb9af080f7fc53aa0.tar.gz
linux-stable-c94055fe93c8d00bfa23fa2cb9af080f7fc53aa0.tar.bz2
linux-stable-c94055fe93c8d00bfa23fa2cb9af080f7fc53aa0.zip
x86/percpu: Clean up percpu_stable_op()
Use __pcpu_size_call_return() to simplify this_cpu_read_stable(). Also remove __bad_percpu_size() which is now unused. Signed-off-by: Brian Gerst <brgerst@gmail.com> Signed-off-by: Nick Desaulniers <ndesaulniers@google.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Nick Desaulniers <ndesaulniers@google.com> Tested-by: Sedat Dilek <sedat.dilek@gmail.com> Reviewed-by: Nick Desaulniers <ndesaulniers@google.com> Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Dennis Zhou <dennis@kernel.org> Link: https://lkml.kernel.org/r/20200720204925.3654302-10-ndesaulniers@google.com
Diffstat (limited to 'arch/x86/include/asm/percpu.h')
-rw-r--r--arch/x86/include/asm/percpu.h41
1 files changed, 12 insertions, 29 deletions
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 7efc0b5c4ff0..cf2b9c2a241e 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -85,7 +85,6 @@
/* For arch-specific code, we can use direct single-insn ops (they
* don't give an lvalue though). */
-extern void __bad_percpu_size(void);
#define __pcpu_type_1 u8
#define __pcpu_type_2 u16
@@ -167,33 +166,13 @@ do { \
(typeof(_var))(unsigned long) pfo_val__; \
})
-#define percpu_stable_op(op, var) \
-({ \
- typeof(var) pfo_ret__; \
- switch (sizeof(var)) { \
- case 1: \
- asm(op "b "__percpu_arg(P1)",%0" \
- : "=q" (pfo_ret__) \
- : "p" (&(var))); \
- break; \
- case 2: \
- asm(op "w "__percpu_arg(P1)",%0" \
- : "=r" (pfo_ret__) \
- : "p" (&(var))); \
- break; \
- case 4: \
- asm(op "l "__percpu_arg(P1)",%0" \
- : "=r" (pfo_ret__) \
- : "p" (&(var))); \
- break; \
- case 8: \
- asm(op "q "__percpu_arg(P1)",%0" \
- : "=r" (pfo_ret__) \
- : "p" (&(var))); \
- break; \
- default: __bad_percpu_size(); \
- } \
- pfo_ret__; \
+#define percpu_stable_op(size, op, _var) \
+({ \
+ __pcpu_type_##size pfo_val__; \
+ asm(__pcpu_op2_##size(op, __percpu_arg(P[var]), "%[val]") \
+ : [val] __pcpu_reg_##size("=", pfo_val__) \
+ : [var] "p" (&(_var))); \
+ (typeof(_var))(unsigned long) pfo_val__; \
})
/*
@@ -258,7 +237,11 @@ do { \
* per-thread variables implemented as per-cpu variables and thus
* stable for the duration of the respective task.
*/
-#define this_cpu_read_stable(var) percpu_stable_op("mov", var)
+#define this_cpu_read_stable_1(pcp) percpu_stable_op(1, "mov", pcp)
+#define this_cpu_read_stable_2(pcp) percpu_stable_op(2, "mov", pcp)
+#define this_cpu_read_stable_4(pcp) percpu_stable_op(4, "mov", pcp)
+#define this_cpu_read_stable_8(pcp) percpu_stable_op(8, "mov", pcp)
+#define this_cpu_read_stable(pcp) __pcpu_size_call_return(this_cpu_read_stable_, pcp)
#define raw_cpu_read_1(pcp) percpu_from_op(1, , "mov", pcp)
#define raw_cpu_read_2(pcp) percpu_from_op(2, , "mov", pcp)