diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-generic/percpu.h | 410 | ||||
-rw-r--r-- | include/linux/percpu-defs.h | 380 | ||||
-rw-r--r-- | include/linux/percpu-refcount.h | 64 | ||||
-rw-r--r-- | include/linux/percpu.h | 673 |
4 files changed, 761 insertions, 766 deletions
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index 0703aa75b5e8..4d9f233c4ba8 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h @@ -36,93 +36,385 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; #endif /* - * Add a offset to a pointer but keep the pointer as is. - * - * Only S390 provides its own means of moving the pointer. + * Arch may define arch_raw_cpu_ptr() to provide more efficient address + * translations for raw_cpu_ptr(). */ -#ifndef SHIFT_PERCPU_PTR -/* Weird cast keeps both GCC and sparse happy. */ -#define SHIFT_PERCPU_PTR(__p, __offset) ({ \ - __verify_pcpu_ptr((__p)); \ - RELOC_HIDE((typeof(*(__p)) __kernel __force *)(__p), (__offset)); \ -}) +#ifndef arch_raw_cpu_ptr +#define arch_raw_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset) #endif -/* - * A percpu variable may point to a discarded regions. The following are - * established ways to produce a usable pointer from the percpu variable - * offset. - */ -#define per_cpu(var, cpu) \ - (*SHIFT_PERCPU_PTR(&(var), per_cpu_offset(cpu))) - -#ifndef raw_cpu_ptr -#define raw_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset) +#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA +extern void setup_per_cpu_areas(void); #endif -#ifdef CONFIG_DEBUG_PREEMPT -#define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset) + +#endif /* SMP */ + +#ifndef PER_CPU_BASE_SECTION +#ifdef CONFIG_SMP +#define PER_CPU_BASE_SECTION ".data..percpu" #else -#define this_cpu_ptr(ptr) raw_cpu_ptr(ptr) +#define PER_CPU_BASE_SECTION ".data" +#endif #endif -#define __get_cpu_var(var) (*this_cpu_ptr(&(var))) -#define __raw_get_cpu_var(var) (*raw_cpu_ptr(&(var))) +#ifndef PER_CPU_ATTRIBUTES +#define PER_CPU_ATTRIBUTES +#endif -#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA -extern void setup_per_cpu_areas(void); +#ifndef PER_CPU_DEF_ATTRIBUTES +#define PER_CPU_DEF_ATTRIBUTES #endif -#else /* ! SMP */ +#define raw_cpu_generic_to_op(pcp, val, op) \ +do { \ + *raw_cpu_ptr(&(pcp)) op val; \ +} while (0) -#define VERIFY_PERCPU_PTR(__p) ({ \ - __verify_pcpu_ptr((__p)); \ - (typeof(*(__p)) __kernel __force *)(__p); \ +#define raw_cpu_generic_add_return(pcp, val) \ +({ \ + raw_cpu_add(pcp, val); \ + raw_cpu_read(pcp); \ }) -#define per_cpu(var, cpu) (*((void)(cpu), VERIFY_PERCPU_PTR(&(var)))) -#define __get_cpu_var(var) (*VERIFY_PERCPU_PTR(&(var))) -#define __raw_get_cpu_var(var) (*VERIFY_PERCPU_PTR(&(var))) -#define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0) -#define raw_cpu_ptr(ptr) this_cpu_ptr(ptr) +#define raw_cpu_generic_xchg(pcp, nval) \ +({ \ + typeof(pcp) __ret; \ + __ret = raw_cpu_read(pcp); \ + raw_cpu_write(pcp, nval); \ + __ret; \ +}) -#endif /* SMP */ +#define raw_cpu_generic_cmpxchg(pcp, oval, nval) \ +({ \ + typeof(pcp) __ret; \ + __ret = raw_cpu_read(pcp); \ + if (__ret == (oval)) \ + raw_cpu_write(pcp, nval); \ + __ret; \ +}) -#ifndef PER_CPU_BASE_SECTION -#ifdef CONFIG_SMP -#define PER_CPU_BASE_SECTION ".data..percpu" -#else -#define PER_CPU_BASE_SECTION ".data" +#define raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ +({ \ + int __ret = 0; \ + if (raw_cpu_read(pcp1) == (oval1) && \ + raw_cpu_read(pcp2) == (oval2)) { \ + raw_cpu_write(pcp1, nval1); \ + raw_cpu_write(pcp2, nval2); \ + __ret = 1; \ + } \ + (__ret); \ +}) + +#define this_cpu_generic_read(pcp) \ +({ \ + typeof(pcp) __ret; \ + preempt_disable(); \ + __ret = *this_cpu_ptr(&(pcp)); \ + preempt_enable(); \ + __ret; \ +}) + +#define this_cpu_generic_to_op(pcp, val, op) \ +do { \ + unsigned long __flags; \ + raw_local_irq_save(__flags); \ + *raw_cpu_ptr(&(pcp)) op val; \ + raw_local_irq_restore(__flags); \ +} while (0) + +#define this_cpu_generic_add_return(pcp, val) \ +({ \ + typeof(pcp) __ret; \ + unsigned long __flags; \ + raw_local_irq_save(__flags); \ + raw_cpu_add(pcp, val); \ + __ret = raw_cpu_read(pcp); \ + raw_local_irq_restore(__flags); \ + __ret; \ +}) + +#define this_cpu_generic_xchg(pcp, nval) \ +({ \ + typeof(pcp) __ret; \ + unsigned long __flags; \ + raw_local_irq_save(__flags); \ + __ret = raw_cpu_read(pcp); \ + raw_cpu_write(pcp, nval); \ + raw_local_irq_restore(__flags); \ + __ret; \ +}) + +#define this_cpu_generic_cmpxchg(pcp, oval, nval) \ +({ \ + typeof(pcp) __ret; \ + unsigned long __flags; \ + raw_local_irq_save(__flags); \ + __ret = raw_cpu_read(pcp); \ + if (__ret == (oval)) \ + raw_cpu_write(pcp, nval); \ + raw_local_irq_restore(__flags); \ + __ret; \ +}) + +#define this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ +({ \ + int __ret; \ + unsigned long __flags; \ + raw_local_irq_save(__flags); \ + __ret = raw_cpu_generic_cmpxchg_double(pcp1, pcp2, \ + oval1, oval2, nval1, nval2); \ + raw_local_irq_restore(__flags); \ + __ret; \ +}) + +#ifndef raw_cpu_read_1 +#define raw_cpu_read_1(pcp) (*raw_cpu_ptr(&(pcp))) #endif +#ifndef raw_cpu_read_2 +#define raw_cpu_read_2(pcp) (*raw_cpu_ptr(&(pcp))) +#endif +#ifndef raw_cpu_read_4 +#define raw_cpu_read_4(pcp) (*raw_cpu_ptr(&(pcp))) +#endif +#ifndef raw_cpu_read_8 +#define raw_cpu_read_8(pcp) (*raw_cpu_ptr(&(pcp))) #endif -#ifdef CONFIG_SMP +#ifndef raw_cpu_write_1 +#define raw_cpu_write_1(pcp, val) raw_cpu_generic_to_op(pcp, val, =) +#endif +#ifndef raw_cpu_write_2 +#define raw_cpu_write_2(pcp, val) raw_cpu_generic_to_op(pcp, val, =) +#endif +#ifndef raw_cpu_write_4 +#define raw_cpu_write_4(pcp, val) raw_cpu_generic_to_op(pcp, val, =) +#endif +#ifndef raw_cpu_write_8 +#define raw_cpu_write_8(pcp, val) raw_cpu_generic_to_op(pcp, val, =) +#endif -#ifdef MODULE -#define PER_CPU_SHARED_ALIGNED_SECTION "" -#define PER_CPU_ALIGNED_SECTION "" -#else -#define PER_CPU_SHARED_ALIGNED_SECTION "..shared_aligned" -#define PER_CPU_ALIGNED_SECTION "..shared_aligned" +#ifndef raw_cpu_add_1 +#define raw_cpu_add_1(pcp, val) raw_cpu_generic_to_op(pcp, val, +=) +#endif +#ifndef raw_cpu_add_2 +#define raw_cpu_add_2(pcp, val) raw_cpu_generic_to_op(pcp, val, +=) +#endif +#ifndef raw_cpu_add_4 +#define raw_cpu_add_4(pcp, val) raw_cpu_generic_to_op(pcp, val, +=) +#endif +#ifndef raw_cpu_add_8 +#define raw_cpu_add_8(pcp, val) raw_cpu_generic_to_op(pcp, val, +=) #endif -#define PER_CPU_FIRST_SECTION "..first" -#else +#ifndef raw_cpu_and_1 +#define raw_cpu_and_1(pcp, val) raw_cpu_generic_to_op(pcp, val, &=) +#endif +#ifndef raw_cpu_and_2 +#define raw_cpu_and_2(pcp, val) raw_cpu_generic_to_op(pcp, val, &=) +#endif +#ifndef raw_cpu_and_4 +#define raw_cpu_and_4(pcp, val) raw_cpu_generic_to_op(pcp, val, &=) +#endif +#ifndef raw_cpu_and_8 +#define raw_cpu_and_8(pcp, val) raw_cpu_generic_to_op(pcp, val, &=) +#endif + +#ifndef raw_cpu_or_1 +#define raw_cpu_or_1(pcp, val) raw_cpu_generic_to_op(pcp, val, |=) +#endif +#ifndef raw_cpu_or_2 +#define raw_cpu_or_2(pcp, val) raw_cpu_generic_to_op(pcp, val, |=) +#endif +#ifndef raw_cpu_or_4 +#define raw_cpu_or_4(pcp, val) raw_cpu_generic_to_op(pcp, val, |=) +#endif +#ifndef raw_cpu_or_8 +#define raw_cpu_or_8(pcp, val) raw_cpu_generic_to_op(pcp, val, |=) +#endif -#define PER_CPU_SHARED_ALIGNED_SECTION "" -#define PER_CPU_ALIGNED_SECTION "..shared_aligned" -#define PER_CPU_FIRST_SECTION "" +#ifndef raw_cpu_add_return_1 +#define raw_cpu_add_return_1(pcp, val) raw_cpu_generic_add_return(pcp, val) +#endif +#ifndef raw_cpu_add_return_2 +#define raw_cpu_add_return_2(pcp, val) raw_cpu_generic_add_return(pcp, val) +#endif +#ifndef raw_cpu_add_return_4 +#define raw_cpu_add_return_4(pcp, val) raw_cpu_generic_add_return(pcp, val) +#endif +#ifndef raw_cpu_add_return_8 +#define raw_cpu_add_return_8(pcp, val) raw_cpu_generic_add_return(pcp, val) +#endif +#ifndef raw_cpu_xchg_1 +#define raw_cpu_xchg_1(pcp, nval) raw_cpu_generic_xchg(pcp, nval) +#endif +#ifndef raw_cpu_xchg_2 +#define raw_cpu_xchg_2(pcp, nval) raw_cpu_generic_xchg(pcp, nval) +#endif +#ifndef raw_cpu_xchg_4 +#define raw_cpu_xchg_4(pcp, nval) raw_cpu_generic_xchg(pcp, nval) +#endif +#ifndef raw_cpu_xchg_8 +#define raw_cpu_xchg_8(pcp, nval) raw_cpu_generic_xchg(pcp, nval) #endif -#ifndef PER_CPU_ATTRIBUTES -#define PER_CPU_ATTRIBUTES +#ifndef raw_cpu_cmpxchg_1 +#define raw_cpu_cmpxchg_1(pcp, oval, nval) \ + raw_cpu_generic_cmpxchg(pcp, oval, nval) +#endif +#ifndef raw_cpu_cmpxchg_2 +#define raw_cpu_cmpxchg_2(pcp, oval, nval) \ + raw_cpu_generic_cmpxchg(pcp, oval, nval) +#endif +#ifndef raw_cpu_cmpxchg_4 +#define raw_cpu_cmpxchg_4(pcp, oval, nval) \ + raw_cpu_generic_cmpxchg(pcp, oval, nval) +#endif +#ifndef raw_cpu_cmpxchg_8 +#define raw_cpu_cmpxchg_8(pcp, oval, nval) \ + raw_cpu_generic_cmpxchg(pcp, oval, nval) #endif -#ifndef PER_CPU_DEF_ATTRIBUTES -#define PER_CPU_DEF_ATTRIBUTES +#ifndef raw_cpu_cmpxchg_double_1 +#define raw_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#endif +#ifndef raw_cpu_cmpxchg_double_2 +#define raw_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#endif +#ifndef raw_cpu_cmpxchg_double_4 +#define raw_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#endif +#ifndef raw_cpu_cmpxchg_double_8 +#define raw_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#endif + +#ifndef this_cpu_read_1 +#define this_cpu_read_1(pcp) this_cpu_generic_read(pcp) +#endif +#ifndef this_cpu_read_2 +#define this_cpu_read_2(pcp) this_cpu_generic_read(pcp) +#endif +#ifndef this_cpu_read_4 +#define this_cpu_read_4(pcp) this_cpu_generic_read(pcp) +#endif +#ifndef this_cpu_read_8 +#define this_cpu_read_8(pcp) this_cpu_generic_read(pcp) #endif -/* Keep until we have removed all uses of __this_cpu_ptr */ -#define __this_cpu_ptr raw_cpu_ptr +#ifndef this_cpu_write_1 +#define this_cpu_write_1(pcp, val) this_cpu_generic_to_op(pcp, val, =) +#endif +#ifndef this_cpu_write_2 +#define this_cpu_write_2(pcp, val) this_cpu_generic_to_op(pcp, val, =) +#endif +#ifndef this_cpu_write_4 +#define this_cpu_write_4(pcp, val) this_cpu_generic_to_op(pcp, val, =) +#endif +#ifndef this_cpu_write_8 +#define this_cpu_write_8(pcp, val) this_cpu_generic_to_op(pcp, val, =) +#endif + +#ifndef this_cpu_add_1 +#define this_cpu_add_1(pcp, val) this_cpu_generic_to_op(pcp, val, +=) +#endif +#ifndef this_cpu_add_2 +#define this_cpu_add_2(pcp, val) this_cpu_generic_to_op(pcp, val, +=) +#endif +#ifndef this_cpu_add_4 +#define this_cpu_add_4(pcp, val) this_cpu_generic_to_op(pcp, val, +=) +#endif +#ifndef this_cpu_add_8 +#define this_cpu_add_8(pcp, val) this_cpu_generic_to_op(pcp, val, +=) +#endif + +#ifndef this_cpu_and_1 +#define this_cpu_and_1(pcp, val) this_cpu_generic_to_op(pcp, val, &=) +#endif +#ifndef this_cpu_and_2 +#define this_cpu_and_2(pcp, val) this_cpu_generic_to_op(pcp, val, &=) +#endif +#ifndef this_cpu_and_4 +#define this_cpu_and_4(pcp, val) this_cpu_generic_to_op(pcp, val, &=) +#endif +#ifndef this_cpu_and_8 +#define this_cpu_and_8(pcp, val) this_cpu_generic_to_op(pcp, val, &=) +#endif + +#ifndef this_cpu_or_1 +#define this_cpu_or_1(pcp, val) this_cpu_generic_to_op(pcp, val, |=) +#endif +#ifndef this_cpu_or_2 +#define this_cpu_or_2(pcp, val) this_cpu_generic_to_op(pcp, val, |=) +#endif +#ifndef this_cpu_or_4 +#define this_cpu_or_4(pcp, val) this_cpu_generic_to_op(pcp, val, |=) +#endif +#ifndef this_cpu_or_8 +#define this_cpu_or_8(pcp, val) this_cpu_generic_to_op(pcp, val, |=) +#endif + +#ifndef this_cpu_add_return_1 +#define this_cpu_add_return_1(pcp, val) this_cpu_generic_add_return(pcp, val) +#endif +#ifndef this_cpu_add_return_2 +#define this_cpu_add_return_2(pcp, val) this_cpu_generic_add_return(pcp, val) +#endif +#ifndef this_cpu_add_return_4 +#define this_cpu_add_return_4(pcp, val) this_cpu_generic_add_return(pcp, val) +#endif +#ifndef this_cpu_add_return_8 +#define this_cpu_add_return_8(pcp, val) this_cpu_generic_add_return(pcp, val) +#endif + +#ifndef this_cpu_xchg_1 +#define this_cpu_xchg_1(pcp, nval) this_cpu_generic_xchg(pcp, nval) +#endif +#ifndef this_cpu_xchg_2 +#define this_cpu_xchg_2(pcp, nval) this_cpu_generic_xchg(pcp, nval) +#endif +#ifndef this_cpu_xchg_4 +#define this_cpu_xchg_4(pcp, nval) this_cpu_generic_xchg(pcp, nval) +#endif +#ifndef this_cpu_xchg_8 +#define this_cpu_xchg_8(pcp, nval) this_cpu_generic_xchg(pcp, nval) +#endif + +#ifndef this_cpu_cmpxchg_1 +#define this_cpu_cmpxchg_1(pcp, oval, nval) \ + this_cpu_generic_cmpxchg(pcp, oval, nval) +#endif +#ifndef this_cpu_cmpxchg_2 +#define this_cpu_cmpxchg_2(pcp, oval, nval) \ + this_cpu_generic_cmpxchg(pcp, oval, nval) +#endif +#ifndef this_cpu_cmpxchg_4 +#define this_cpu_cmpxchg_4(pcp, oval, nval) \ + this_cpu_generic_cmpxchg(pcp, oval, nval) +#endif +#ifndef this_cpu_cmpxchg_8 +#define this_cpu_cmpxchg_8(pcp, oval, nval) \ + this_cpu_generic_cmpxchg(pcp, oval, nval) +#endif + +#ifndef this_cpu_cmpxchg_double_1 +#define this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#endif +#ifndef this_cpu_cmpxchg_double_2 +#define this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#endif +#ifndef this_cpu_cmpxchg_double_4 +#define this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#endif +#ifndef this_cpu_cmpxchg_double_8 +#define this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#endif #endif /* _ASM_GENERIC_PERCPU_H_ */ diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index a5fc7d01aad6..c93fff16776c 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h @@ -1,6 +1,40 @@ +/* + * linux/percpu-defs.h - basic definitions for percpu areas + * + * DO NOT INCLUDE DIRECTLY OUTSIDE PERCPU IMPLEMENTATION PROPER. + * + * This file is separate from linux/percpu.h to avoid cyclic inclusion + * dependency from arch header files. Only to be included from + * asm/percpu.h. + * + * This file includes macros necessary to declare percpu sections and + * variables, and definitions of percpu accessors and operations. It + * should provide enough percpu features to arch header files even when + * they can only include asm/percpu.h to avoid cyclic inclusion dependency. + */ + #ifndef _LINUX_PERCPU_DEFS_H #define _LINUX_PERCPU_DEFS_H +#ifdef CONFIG_SMP + +#ifdef MODULE +#define PER_CPU_SHARED_ALIGNED_SECTION "" +#define PER_CPU_ALIGNED_SECTION "" +#else +#define PER_CPU_SHARED_ALIGNED_SECTION "..shared_aligned" +#define PER_CPU_ALIGNED_SECTION "..shared_aligned" +#endif +#define PER_CPU_FIRST_SECTION "..first" + +#else + +#define PER_CPU_SHARED_ALIGNED_SECTION "" +#define PER_CPU_ALIGNED_SECTION "..shared_aligned" +#define PER_CPU_FIRST_SECTION "" + +#endif + /* * Base implementations of per-CPU variable declarations and definitions, where * the section in which the variable is to be placed is provided by the @@ -19,19 +53,6 @@ __attribute__((section(".discard"), unused)) /* - * Macro which verifies @ptr is a percpu pointer without evaluating - * @ptr. This is to be used in percpu accessors to verify that the - * input parameter is a percpu pointer. - * - * + 0 is required in order to convert the pointer type from a - * potential array type to a pointer to a single item of the array. - */ -#define __verify_pcpu_ptr(ptr) do { \ - const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL; \ - (void)__vpp_verify; \ -} while (0) - -/* * s390 and alpha modules require percpu variables to be defined as * weak to force the compiler to generate GOT based external * references for them. This is necessary because percpu sections @@ -164,4 +185,337 @@ #define EXPORT_PER_CPU_SYMBOL_GPL(var) #endif +/* + * Accessors and operations. + */ +#ifndef __ASSEMBLY__ + +/* + * __verify_pcpu_ptr() verifies @ptr is a percpu pointer without evaluating + * @ptr and is invoked once before a percpu area is accessed by all + * accessors and operations. This is performed in the generic part of + * percpu and arch overrides don't need to worry about it; however, if an + * arch wants to implement an arch-specific percpu accessor or operation, + * it may use __verify_pcpu_ptr() to verify the parameters. + * + * + 0 is required in order to convert the pointer type from a + * potential array type to a pointer to a single item of the array. + */ +#define __verify_pcpu_ptr(ptr) \ +do { \ + const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL; \ + (void)__vpp_verify; \ +} while (0) + +#ifdef CONFIG_SMP + +/* + * Add an offset to a pointer but keep the pointer as-is. Use RELOC_HIDE() + * to prevent the compiler from making incorrect assumptions about the + * pointer value. The weird cast keeps both GCC and sparse happy. + */ +#define SHIFT_PERCPU_PTR(__p, __offset) \ + RELOC_HIDE((typeof(*(__p)) __kernel __force *)(__p), (__offset)) + +#define per_cpu_ptr(ptr, cpu) \ +({ \ + __verify_pcpu_ptr(ptr); \ + SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))); \ +}) + +#define raw_cpu_ptr(ptr) \ +({ \ + __verify_pcpu_ptr(ptr); \ + arch_raw_cpu_ptr(ptr); \ +}) + +#ifdef CONFIG_DEBUG_PREEMPT +#define this_cpu_ptr(ptr) \ +({ \ + __verify_pcpu_ptr(ptr); \ + SHIFT_PERCPU_PTR(ptr, my_cpu_offset); \ +}) +#else +#define this_cpu_ptr(ptr) raw_cpu_ptr(ptr) +#endif + +#else /* CONFIG_SMP */ + +#define VERIFY_PERCPU_PTR(__p) \ +({ \ + __verify_pcpu_ptr(__p); \ + (typeof(*(__p)) __kernel __force *)(__p); \ +}) + +#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); }) +#define raw_cpu_ptr(ptr) per_cpu_ptr(ptr, 0) +#define this_cpu_ptr(ptr) raw_cpu_ptr(ptr) + +#endif /* CONFIG_SMP */ + +#define per_cpu(var, cpu) (*per_cpu_ptr(&(var), cpu)) +#define __raw_get_cpu_var(var) (*raw_cpu_ptr(&(var))) +#define __get_cpu_var(var) (*this_cpu_ptr(&(var))) + +/* keep until we have removed all uses of __this_cpu_ptr */ +#define __this_cpu_ptr(ptr) raw_cpu_ptr(ptr) + +/* + * Must be an lvalue. Since @var must be a simple identifier, + * we force a syntax error here if it isn't. + */ +#define get_cpu_var(var) \ +(*({ \ + preempt_disable(); \ + this_cpu_ptr(&var); \ +})) + +/* + * The weird & is necessary because sparse considers (void)(var) to be + * a direct dereference of percpu variable (var). + */ +#define put_cpu_var(var) \ +do { \ + (void)&(var); \ + preempt_enable(); \ +} while (0) + +#define get_cpu_ptr(var) \ +({ \ + preempt_disable(); \ + this_cpu_ptr(var); \ +}) + +#define put_cpu_ptr(var) \ +do { \ + (void)(var); \ + preempt_enable(); \ +} while (0) + +/* + * Branching function to split up a function into a set of functions that + * are called for different scalar sizes of the objects handled. + */ + +extern void __bad_size_call_parameter(void); + +#ifdef CONFIG_DEBUG_PREEMPT +extern void __this_cpu_preempt_check(const char *op); +#else +static inline void __this_cpu_preempt_check(const char *op) { } +#endif + +#define __pcpu_size_call_return(stem, variable) \ +({ \ + typeof(variable) pscr_ret__; \ + __verify_pcpu_ptr(&(variable)); \ + switch(sizeof(variable)) { \ + case 1: pscr_ret__ = stem##1(variable); break; \ + case 2: pscr_ret__ = stem##2(variable); break; \ + case 4: pscr_ret__ = stem##4(variable); break; \ + case 8: pscr_ret__ = stem##8(variable); break; \ + default: \ + __bad_size_call_parameter(); break; \ + } \ + pscr_ret__; \ +}) + +#define __pcpu_size_call_return2(stem, variable, ...) \ +({ \ + typeof(variable) pscr2_ret__; \ + __verify_pcpu_ptr(&(variable)); \ + switch(sizeof(variable)) { \ + case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \ + case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break; \ + case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break; \ + case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break; \ + default: \ + __bad_size_call_parameter(); break; \ + } \ + pscr2_ret__; \ +}) + +/* + * Special handling for cmpxchg_double. cmpxchg_double is passed two + * percpu variables. The first has to be aligned to a double word + * boundary and the second has to follow directly thereafter. + * We enforce this on all architectures even if they don't support + * a double cmpxchg instruction, since it's a cheap requirement, and it + * avoids breaking the requirement for architectures with the instruction. + */ +#define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...) \ +({ \ + bool pdcrb_ret__; \ + __verify_pcpu_ptr(&(pcp1)); \ + BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2)); \ + VM_BUG_ON((unsigned long)(&(pcp1)) % (2 * sizeof(pcp1))); \ + VM_BUG_ON((unsigned long)(&(pcp2)) != \ + (unsigned long)(&(pcp1)) + sizeof(pcp1)); \ + switch(sizeof(pcp1)) { \ + case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break; \ + case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break; \ + case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break; \ + case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break; \ + default: \ + __bad_size_call_parameter(); break; \ + } \ + pdcrb_ret__; \ +}) + +#define __pcpu_size_call(stem, variable, ...) \ +do { \ + __verify_pcpu_ptr(&(variable)); \ + switch(sizeof(variable)) { \ + case 1: stem##1(variable, __VA_ARGS__);break; \ + case 2: stem##2(variable, __VA_ARGS__);break; \ + case 4: stem##4(variable, __VA_ARGS__);break; \ + case 8: stem##8(variable, __VA_ARGS__);break; \ + default: \ + __bad_size_call_parameter();break; \ + } \ +} while (0) + +/* + * this_cpu operations (C) 2008-2013 Christoph Lameter <cl@linux.com> + * + * Optimized manipulation for memory allocated through the per cpu + * allocator or for addresses of per cpu variables. + * + * These operation guarantee exclusivity of access for other operations + * on the *same* processor. The assumption is that per cpu data is only + * accessed by a single processor instance (the current one). + * + * The arch code can provide optimized implementation by defining macros + * for certain scalar sizes. F.e. provide this_cpu_add_2() to provide per + * cpu atomic operations for 2 byte sized RMW actions. If arch code does + * not provide operations for a scalar size then the fallback in the + * generic code will be used. + * + * cmpxchg_double replaces two adjacent scalars at once. The first two + * parameters are per cpu variables which have to be of the same size. A + * truth value is returned to indicate success or failure (since a double + * register result is difficult to handle). There is very limited hardware + * support for these operations, so only certain sizes may work. + */ + +/* + * Operations for contexts where we do not want to do any checks for + * preemptions. Unless strictly necessary, always use [__]this_cpu_*() + * instead. + * + * If there is no other protection through preempt disable and/or disabling + * interupts then one of these RMW operations can show unexpected behavior + * because the execution thread was rescheduled on another processor or an + * interrupt occurred and the same percpu variable was modified from the + * interrupt context. + */ +#define raw_cpu_read(pcp) __pcpu_size_call_return(raw_cpu_read_, pcp) +#define raw_cpu_write(pcp, val) __pcpu_size_call(raw_cpu_write_, pcp, val) +#define raw_cpu_add(pcp, val) __pcpu_size_call(raw_cpu_add_, pcp, val) +#define raw_cpu_and(pcp, val) __pcpu_size_call(raw_cpu_and_, pcp, val) +#define raw_cpu_or(pcp, val) __pcpu_size_call(raw_cpu_or_, pcp, val) +#define raw_cpu_add_return(pcp, val) __pcpu_size_call_return2(raw_cpu_add_return_, pcp, val) +#define raw_cpu_xchg(pcp, nval) __pcpu_size_call_return2(raw_cpu_xchg_, pcp, nval) +#define raw_cpu_cmpxchg(pcp, oval, nval) \ + __pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval) +#define raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + __pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, pcp1, pcp2, oval1, oval2, nval1, nval2) + +#define raw_cpu_sub(pcp, val) raw_cpu_add(pcp, -(val)) +#define raw_cpu_inc(pcp) raw_cpu_add(pcp, 1) +#define raw_cpu_dec(pcp) raw_cpu_sub(pcp, 1) +#define raw_cpu_sub_return(pcp, val) raw_cpu_add_return(pcp, -(typeof(pcp))(val)) +#define raw_cpu_inc_return(pcp) raw_cpu_add_return(pcp, 1) +#define raw_cpu_dec_return(pcp) raw_cpu_add_return(pcp, -1) + +/* + * Operations for contexts that are safe from preemption/interrupts. These + * operations verify that preemption is disabled. + */ +#define __this_cpu_read(pcp) \ +({ \ + __this_cpu_preempt_check("read"); \ + raw_cpu_read(pcp); \ +}) + +#define __this_cpu_write(pcp, val) \ +({ \ + __this_cpu_preempt_check("write"); \ + raw_cpu_write(pcp, val); \ +}) + +#define __this_cpu_add(pcp, val) \ +({ \ + __this_cpu_preempt_check("add"); \ + raw_cpu_add(pcp, val); \ +}) + +#define __this_cpu_and(pcp, val) \ +({ \ + __this_cpu_preempt_check("and"); \ + raw_cpu_and(pcp, val); \ +}) + +#define __this_cpu_or(pcp, val) \ +({ \ + __this_cpu_preempt_check("or"); \ + raw_cpu_or(pcp, val); \ +}) + +#define __this_cpu_add_return(pcp, val) \ +({ \ + __this_cpu_preempt_check("add_return"); \ + raw_cpu_add_return(pcp, val); \ +}) + +#define __this_cpu_xchg(pcp, nval) \ +({ \ + __this_cpu_preempt_check("xchg"); \ + raw_cpu_xchg(pcp, nval); \ +}) + +#define __this_cpu_cmpxchg(pcp, oval, nval) \ +({ \ + __this_cpu_preempt_check("cmpxchg"); \ + raw_cpu_cmpxchg(pcp, oval, nval); \ +}) + +#define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ +({ __this_cpu_preempt_check("cmpxchg_double"); \ + raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2); \ +}) + +#define __this_cpu_sub(pcp, val) __this_cpu_add(pcp, -(typeof(pcp))(val)) +#define __this_cpu_inc(pcp) __this_cpu_add(pcp, 1) +#define __this_cpu_dec(pcp) __this_cpu_sub(pcp, 1) +#define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(typeof(pcp))(val)) +#define __this_cpu_inc_return(pcp) __this_cpu_add_return(pcp, 1) +#define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1) + +/* + * Operations with implied preemption protection. These operations can be + * used without worrying about preemption. Note that interrupts may still + * occur while an operation is in progress and if the interrupt modifies + * the variable too then RMW actions may not be reliable. + */ +#define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, pcp) +#define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, pcp, val) +#define this_cpu_add(pcp, val) __pcpu_size_call(this_cpu_add_, pcp, val) +#define this_cpu_and(pcp, val) __pcpu_size_call(this_cpu_and_, pcp, val) +#define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, pcp, val) +#define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val) +#define this_cpu_xchg(pcp, nval) __pcpu_size_call_return2(this_cpu_xchg_, pcp, nval) +#define this_cpu_cmpxchg(pcp, oval, nval) \ + __pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval) +#define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + __pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, pcp1, pcp2, oval1, oval2, nval1, nval2) + +#define this_cpu_sub(pcp, val) this_cpu_add(pcp, -(typeof(pcp))(val)) +#define this_cpu_inc(pcp) this_cpu_add(pcp, 1) +#define this_cpu_dec(pcp) this_cpu_sub(pcp, 1) +#define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(typeof(pcp))(val)) +#define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1) +#define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1) + +#endif /* __ASSEMBLY__ */ #endif /* _LINUX_PERCPU_DEFS_H */ diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index 5d8920e23073..3dfbf237cd8f 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h @@ -57,11 +57,9 @@ struct percpu_ref { atomic_t count; /* * The low bit of the pointer indicates whether the ref is in percpu - * mode; if set, then get/put will manipulate the atomic_t (this is a - * hack because we need to keep the pointer around for - * percpu_ref_kill_rcu()) + * mode; if set, then get/put will manipulate the atomic_t. */ - unsigned __percpu *pcpu_count; + unsigned long pcpu_count_ptr; percpu_ref_func_t *release; percpu_ref_func_t *confirm_kill; struct rcu_head rcu; @@ -69,7 +67,8 @@ struct percpu_ref { int __must_check percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release); -void percpu_ref_cancel_init(struct percpu_ref *ref); +void percpu_ref_reinit(struct percpu_ref *ref); +void percpu_ref_exit(struct percpu_ref *ref); void percpu_ref_kill_and_confirm(struct percpu_ref *ref, percpu_ref_func_t *confirm_kill); @@ -88,12 +87,28 @@ static inline void percpu_ref_kill(struct percpu_ref *ref) return percpu_ref_kill_and_confirm(ref, NULL); } -#define PCPU_STATUS_BITS 2 -#define PCPU_STATUS_MASK ((1 << PCPU_STATUS_BITS) - 1) -#define PCPU_REF_PTR 0 #define PCPU_REF_DEAD 1 -#define REF_STATUS(count) (((unsigned long) count) & PCPU_STATUS_MASK) +/* + * Internal helper. Don't use outside percpu-refcount proper. The + * function doesn't return the pointer and let the caller test it for NULL + * because doing so forces the compiler to generate two conditional + * branches as it can't assume that @ref->pcpu_count is not NULL. + */ +static inline bool __pcpu_ref_alive(struct percpu_ref *ref, + unsigned __percpu **pcpu_countp) +{ + unsigned long pcpu_ptr = ACCESS_ONCE(ref->pcpu_count_ptr); + + /* paired with smp_store_release() in percpu_ref_reinit() */ + smp_read_barrier_depends(); + + if (unlikely(pcpu_ptr & PCPU_REF_DEAD)) + return false; + + *pcpu_countp = (unsigned __percpu *)pcpu_ptr; + return true; +} /** * percpu_ref_get - increment a percpu refcount @@ -107,9 +122,7 @@ static inline void percpu_ref_get(struct percpu_ref *ref) rcu_read_lock_sched(); - pcpu_count = ACCESS_ONCE(ref->pcpu_count); - - if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) + if (__pcpu_ref_alive(ref, &pcpu_count)) this_cpu_inc(*pcpu_count); else atomic_inc(&ref->count); @@ -133,9 +146,7 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref) rcu_read_lock_sched(); - pcpu_count = ACCESS_ONCE(ref->pcpu_count); - - if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) { + if (__pcpu_ref_alive(ref, &pcpu_count)) { this_cpu_inc(*pcpu_count); ret = true; } else { @@ -168,9 +179,7 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) rcu_read_lock_sched(); - pcpu_count = ACCESS_ONCE(ref->pcpu_count); - - if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) { + if (__pcpu_ref_alive(ref, &pcpu_count)) { this_cpu_inc(*pcpu_count); ret = true; } @@ -193,9 +202,7 @@ static inline void percpu_ref_put(struct percpu_ref *ref) rcu_read_lock_sched(); - pcpu_count = ACCESS_ONCE(ref->pcpu_count); - - if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) + if (__pcpu_ref_alive(ref, &pcpu_count)) this_cpu_dec(*pcpu_count); else if (unlikely(atomic_dec_and_test(&ref->count))) ref->release(ref); @@ -203,4 +210,19 @@ static inline void percpu_ref_put(struct percpu_ref *ref) rcu_read_unlock_sched(); } +/** + * percpu_ref_is_zero - test whether a percpu refcount reached zero + * @ref: percpu_ref to test + * + * Returns %true if @ref reached zero. + */ +static inline bool percpu_ref_is_zero(struct percpu_ref *ref) +{ + unsigned __percpu *pcpu_count; + + if (__pcpu_ref_alive(ref, &pcpu_count)) + return false; + return !atomic_read(&ref->count); +} + #endif diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 8419053d0f2e..6f61b61b7996 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -23,32 +23,6 @@ PERCPU_MODULE_RESERVE) #endif -/* - * Must be an lvalue. Since @var must be a simple identifier, - * we force a syntax error here if it isn't. - */ -#define get_cpu_var(var) (*({ \ - preempt_disable(); \ - this_cpu_ptr(&var); })) - -/* - * The weird & is necessary because sparse considers (void)(var) to be - * a direct dereference of percpu variable (var). - */ -#define put_cpu_var(var) do { \ - (void)&(var); \ - preempt_enable(); \ -} while (0) - -#define get_cpu_ptr(var) ({ \ - preempt_disable(); \ - this_cpu_ptr(var); }) - -#define put_cpu_ptr(var) do { \ - (void)(var); \ - preempt_enable(); \ -} while (0) - /* minimum unit size, also is the maximum supported allocation size */ #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10) @@ -140,17 +114,6 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size, pcpu_fc_populate_pte_fn_t populate_pte_fn); #endif -/* - * Use this to get to a cpu's version of the per-cpu object - * dynamically allocated. Non-atomic access to the current CPU's - * version should probably be combined with get_cpu()/put_cpu(). - */ -#ifdef CONFIG_SMP -#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) -#else -#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR((ptr)); }) -#endif - extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align); extern bool is_kernel_percpu_address(unsigned long addr); @@ -166,640 +129,4 @@ extern phys_addr_t per_cpu_ptr_to_phys(void *addr); #define alloc_percpu(type) \ (typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type)) -/* - * Branching function to split up a function into a set of functions that - * are called for different scalar sizes of the objects handled. - */ - -extern void __bad_size_call_parameter(void); - -#ifdef CONFIG_DEBUG_PREEMPT -extern void __this_cpu_preempt_check(const char *op); -#else -static inline void __this_cpu_preempt_check(const char *op) { } -#endif - -#define __pcpu_size_call_return(stem, variable) \ -({ typeof(variable) pscr_ret__; \ - __verify_pcpu_ptr(&(variable)); \ - switch(sizeof(variable)) { \ - case 1: pscr_ret__ = stem##1(variable);break; \ - case 2: pscr_ret__ = stem##2(variable);break; \ - case 4: pscr_ret__ = stem##4(variable);break; \ - case 8: pscr_ret__ = stem##8(variable);break; \ - default: \ - __bad_size_call_parameter();break; \ - } \ - pscr_ret__; \ -}) - -#define __pcpu_size_call_return2(stem, variable, ...) \ -({ \ - typeof(variable) pscr2_ret__; \ - __verify_pcpu_ptr(&(variable)); \ - switch(sizeof(variable)) { \ - case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \ - case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break; \ - case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break; \ - case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break; \ - default: \ - __bad_size_call_parameter(); break; \ - } \ - pscr2_ret__; \ -}) - -/* - * Special handling for cmpxchg_double. cmpxchg_double is passed two - * percpu variables. The first has to be aligned to a double word - * boundary and the second has to follow directly thereafter. - * We enforce this on all architectures even if they don't support - * a double cmpxchg instruction, since it's a cheap requirement, and it - * avoids breaking the requirement for architectures with the instruction. - */ -#define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...) \ -({ \ - bool pdcrb_ret__; \ - __verify_pcpu_ptr(&pcp1); \ - BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2)); \ - VM_BUG_ON((unsigned long)(&pcp1) % (2 * sizeof(pcp1))); \ - VM_BUG_ON((unsigned long)(&pcp2) != \ - (unsigned long)(&pcp1) + sizeof(pcp1)); \ - switch(sizeof(pcp1)) { \ - case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break; \ - case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break; \ - case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break; \ - case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break; \ - default: \ - __bad_size_call_parameter(); break; \ - } \ - pdcrb_ret__; \ -}) - -#define __pcpu_size_call(stem, variable, ...) \ -do { \ - __verify_pcpu_ptr(&(variable)); \ - switch(sizeof(variable)) { \ - case 1: stem##1(variable, __VA_ARGS__);break; \ - case 2: stem##2(variable, __VA_ARGS__);break; \ - case 4: stem##4(variable, __VA_ARGS__);break; \ - case 8: stem##8(variable, __VA_ARGS__);break; \ - default: \ - __bad_size_call_parameter();break; \ - } \ -} while (0) - -/* - * this_cpu operations (C) 2008-2013 Christoph Lameter <cl@linux.com> - * - * Optimized manipulation for memory allocated through the per cpu - * allocator or for addresses of per cpu variables. - * - * These operation guarantee exclusivity of access for other operations - * on the *same* processor. The assumption is that per cpu data is only - * accessed by a single processor instance (the current one). - * - * The first group is used for accesses that must be done in a - * preemption safe way since we know that the context is not preempt - * safe. Interrupts may occur. If the interrupt modifies the variable - * too then RMW actions will not be reliable. - * - * The arch code can provide optimized functions in two ways: - * - * 1. Override the function completely. F.e. define this_cpu_add(). - * The arch must then ensure that the various scalar format passed - * are handled correctly. - * - * 2. Provide functions for certain scalar sizes. F.e. provide - * this_cpu_add_2() to provide per cpu atomic operations for 2 byte - * sized RMW actions. If arch code does not provide operations for - * a scalar size then the fallback in the generic code will be - * used. - */ - -#define _this_cpu_generic_read(pcp) \ -({ typeof(pcp) ret__; \ - preempt_disable(); \ - ret__ = *this_cpu_ptr(&(pcp)); \ - preempt_enable(); \ - ret__; \ -}) - -#ifndef this_cpu_read -# ifndef this_cpu_read_1 -# define this_cpu_read_1(pcp) _this_cpu_generic_read(pcp) -# endif -# ifndef this_cpu_read_2 -# define this_cpu_read_2(pcp) _this_cpu_generic_read(pcp) -# endif -# ifndef this_cpu_read_4 -# define this_cpu_read_4(pcp) _this_cpu_generic_read(pcp) -# endif -# ifndef this_cpu_read_8 -# define this_cpu_read_8(pcp) _this_cpu_generic_read(pcp) -# endif -# define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, (pcp)) -#endif - -#define _this_cpu_generic_to_op(pcp, val, op) \ -do { \ - unsigned long flags; \ - raw_local_irq_save(flags); \ - *raw_cpu_ptr(&(pcp)) op val; \ - raw_local_irq_restore(flags); \ -} while (0) - -#ifndef this_cpu_write -# ifndef this_cpu_write_1 -# define this_cpu_write_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) -# endif -# ifndef this_cpu_write_2 -# define this_cpu_write_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) -# endif -# ifndef this_cpu_write_4 -# define this_cpu_write_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) -# endif -# ifndef this_cpu_write_8 -# define this_cpu_write_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) -# endif -# define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, (pcp), (val)) -#endif - -#ifndef this_cpu_add -# ifndef this_cpu_add_1 -# define this_cpu_add_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) -# endif -# ifndef this_cpu_add_2 -# define this_cpu_add_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) -# endif -# ifndef this_cpu_add_4 -# define this_cpu_add_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) -# endif -# ifndef this_cpu_add_8 -# define this_cpu_add_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) -# endif -# define this_cpu_add(pcp, val) __pcpu_size_call(this_cpu_add_, (pcp), (val)) -#endif - -#ifndef this_cpu_sub -# define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(typeof(pcp))(val)) -#endif - -#ifndef this_cpu_inc -# define this_cpu_inc(pcp) this_cpu_add((pcp), 1) -#endif - -#ifndef this_cpu_dec -# define this_cpu_dec(pcp) this_cpu_sub((pcp), 1) -#endif - -#ifndef this_cpu_and -# ifndef this_cpu_and_1 -# define this_cpu_and_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) -# endif -# ifndef this_cpu_and_2 -# define this_cpu_and_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) -# endif -# ifndef this_cpu_and_4 -# define this_cpu_and_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) -# endif -# ifndef this_cpu_and_8 -# define this_cpu_and_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) -# endif -# define this_cpu_and(pcp, val) __pcpu_size_call(this_cpu_and_, (pcp), (val)) -#endif - -#ifndef this_cpu_or -# ifndef this_cpu_or_1 -# define this_cpu_or_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) -# endif -# ifndef this_cpu_or_2 -# define this_cpu_or_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) -# endif -# ifndef this_cpu_or_4 -# define this_cpu_or_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) -# endif -# ifndef this_cpu_or_8 -# define this_cpu_or_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) -# endif -# define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val)) -#endif - -#define _this_cpu_generic_add_return(pcp, val) \ -({ \ - typeof(pcp) ret__; \ - unsigned long flags; \ - raw_local_irq_save(flags); \ - raw_cpu_add(pcp, val); \ - ret__ = raw_cpu_read(pcp); \ - raw_local_irq_restore(flags); \ - ret__; \ -}) - -#ifndef this_cpu_add_return -# ifndef this_cpu_add_return_1 -# define this_cpu_add_return_1(pcp, val) _this_cpu_generic_add_return(pcp, val) -# endif -# ifndef this_cpu_add_return_2 -# define this_cpu_add_return_2(pcp, val) _this_cpu_generic_add_return(pcp, val) -# endif -# ifndef this_cpu_add_return_4 -# define this_cpu_add_return_4(pcp, val) _this_cpu_generic_add_return(pcp, val) -# endif -# ifndef this_cpu_add_return_8 -# define this_cpu_add_return_8(pcp, val) _this_cpu_generic_add_return(pcp, val) -# endif -# define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val) -#endif - -#define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(typeof(pcp))(val)) -#define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1) -#define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1) - -#define _this_cpu_generic_xchg(pcp, nval) \ -({ typeof(pcp) ret__; \ - unsigned long flags; \ - raw_local_irq_save(flags); \ - ret__ = raw_cpu_read(pcp); \ - raw_cpu_write(pcp, nval); \ - raw_local_irq_restore(flags); \ - ret__; \ -}) - -#ifndef this_cpu_xchg -# ifndef this_cpu_xchg_1 -# define this_cpu_xchg_1(pcp, nval) _this_cpu_generic_xchg(pcp, nval) -# endif -# ifndef this_cpu_xchg_2 -# define this_cpu_xchg_2(pcp, nval) _this_cpu_generic_xchg(pcp, nval) -# endif -# ifndef this_cpu_xchg_4 -# define this_cpu_xchg_4(pcp, nval) _this_cpu_generic_xchg(pcp, nval) -# endif -# ifndef this_cpu_xchg_8 -# define this_cpu_xchg_8(pcp, nval) _this_cpu_generic_xchg(pcp, nval) -# endif -# define this_cpu_xchg(pcp, nval) \ - __pcpu_size_call_return2(this_cpu_xchg_, (pcp), nval) -#endif - -#define _this_cpu_generic_cmpxchg(pcp, oval, nval) \ -({ \ - typeof(pcp) ret__; \ - unsigned long flags; \ - raw_local_irq_save(flags); \ - ret__ = raw_cpu_read(pcp); \ - if (ret__ == (oval)) \ - raw_cpu_write(pcp, nval); \ - raw_local_irq_restore(flags); \ - ret__; \ -}) - -#ifndef this_cpu_cmpxchg -# ifndef this_cpu_cmpxchg_1 -# define this_cpu_cmpxchg_1(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval) -# endif -# ifndef this_cpu_cmpxchg_2 -# define this_cpu_cmpxchg_2(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval) -# endif -# ifndef this_cpu_cmpxchg_4 -# define this_cpu_cmpxchg_4(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval) -# endif -# ifndef this_cpu_cmpxchg_8 -# define this_cpu_cmpxchg_8(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval) -# endif -# define this_cpu_cmpxchg(pcp, oval, nval) \ - __pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval) -#endif - -/* - * cmpxchg_double replaces two adjacent scalars at once. The first - * two parameters are per cpu variables which have to be of the same - * size. A truth value is returned to indicate success or failure - * (since a double register result is difficult to handle). There is - * very limited hardware support for these operations, so only certain - * sizes may work. - */ -#define _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ -({ \ - int ret__; \ - unsigned long flags; \ - raw_local_irq_save(flags); \ - ret__ = raw_cpu_generic_cmpxchg_double(pcp1, pcp2, \ - oval1, oval2, nval1, nval2); \ - raw_local_irq_restore(flags); \ - ret__; \ -}) - -#ifndef this_cpu_cmpxchg_double -# ifndef this_cpu_cmpxchg_double_1 -# define this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \ - _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) -# endif -# ifndef this_cpu_cmpxchg_double_2 -# define this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \ - _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) -# endif -# ifndef this_cpu_cmpxchg_double_4 -# define this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \ - _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) -# endif -# ifndef this_cpu_cmpxchg_double_8 -# define this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \ - _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) -# endif -# define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ - __pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2)) -#endif - -/* - * Generic percpu operations for contexts where we do not want to do - * any checks for preemptiosn. - * - * If there is no other protection through preempt disable and/or - * disabling interupts then one of these RMW operations can show unexpected - * behavior because the execution thread was rescheduled on another processor - * or an interrupt occurred and the same percpu variable was modified from - * the interrupt context. - */ -#ifndef raw_cpu_read -# ifndef raw_cpu_read_1 -# define raw_cpu_read_1(pcp) (*raw_cpu_ptr(&(pcp))) -# endif -# ifndef raw_cpu_read_2 -# define raw_cpu_read_2(pcp) (*raw_cpu_ptr(&(pcp))) -# endif -# ifndef raw_cpu_read_4 -# define raw_cpu_read_4(pcp) (*raw_cpu_ptr(&(pcp))) -# endif -# ifndef raw_cpu_read_8 -# define raw_cpu_read_8(pcp) (*raw_cpu_ptr(&(pcp))) -# endif -# define raw_cpu_read(pcp) __pcpu_size_call_return(raw_cpu_read_, (pcp)) -#endif - -#define raw_cpu_generic_to_op(pcp, val, op) \ -do { \ - *raw_cpu_ptr(&(pcp)) op val; \ -} while (0) - - -#ifndef raw_cpu_write -# ifndef raw_cpu_write_1 -# define raw_cpu_write_1(pcp, val) raw_cpu_generic_to_op((pcp), (val), =) -# endif -# ifndef raw_cpu_write_2 -# define raw_cpu_write_2(pcp, val) raw_cpu_generic_to_op((pcp), (val), =) -# endif -# ifndef raw_cpu_write_4 -# define raw_cpu_write_4(pcp, val) raw_cpu_generic_to_op((pcp), (val), =) -# endif -# ifndef raw_cpu_write_8 -# define raw_cpu_write_8(pcp, val) raw_cpu_generic_to_op((pcp), (val), =) -# endif -# define raw_cpu_write(pcp, val) __pcpu_size_call(raw_cpu_write_, (pcp), (val)) -#endif - -#ifndef raw_cpu_add -# ifndef raw_cpu_add_1 -# define raw_cpu_add_1(pcp, val) raw_cpu_generic_to_op((pcp), (val), +=) -# endif -# ifndef raw_cpu_add_2 -# define raw_cpu_add_2(pcp, val) raw_cpu_generic_to_op((pcp), (val), +=) -# endif -# ifndef raw_cpu_add_4 -# define raw_cpu_add_4(pcp, val) raw_cpu_generic_to_op((pcp), (val), +=) -# endif -# ifndef raw_cpu_add_8 -# define raw_cpu_add_8(pcp, val) raw_cpu_generic_to_op((pcp), (val), +=) -# endif -# define raw_cpu_add(pcp, val) __pcpu_size_call(raw_cpu_add_, (pcp), (val)) -#endif - -#ifndef raw_cpu_sub -# define raw_cpu_sub(pcp, val) raw_cpu_add((pcp), -(val)) -#endif - -#ifndef raw_cpu_inc -# define raw_cpu_inc(pcp) raw_cpu_add((pcp), 1) -#endif - -#ifndef raw_cpu_dec -# define raw_cpu_dec(pcp) raw_cpu_sub((pcp), 1) -#endif - -#ifndef raw_cpu_and -# ifndef raw_cpu_and_1 -# define raw_cpu_and_1(pcp, val) raw_cpu_generic_to_op((pcp), (val), &=) -# endif -# ifndef raw_cpu_and_2 -# define raw_cpu_and_2(pcp, val) raw_cpu_generic_to_op((pcp), (val), &=) -# endif -# ifndef raw_cpu_and_4 -# define raw_cpu_and_4(pcp, val) raw_cpu_generic_to_op((pcp), (val), &=) -# endif -# ifndef raw_cpu_and_8 -# define raw_cpu_and_8(pcp, val) raw_cpu_generic_to_op((pcp), (val), &=) -# endif -# define raw_cpu_and(pcp, val) __pcpu_size_call(raw_cpu_and_, (pcp), (val)) -#endif - -#ifndef raw_cpu_or -# ifndef raw_cpu_or_1 -# define raw_cpu_or_1(pcp, val) raw_cpu_generic_to_op((pcp), (val), |=) -# endif -# ifndef raw_cpu_or_2 -# define raw_cpu_or_2(pcp, val) raw_cpu_generic_to_op((pcp), (val), |=) -# endif -# ifndef raw_cpu_or_4 -# define raw_cpu_or_4(pcp, val) raw_cpu_generic_to_op((pcp), (val), |=) -# endif -# ifndef raw_cpu_or_8 -# define raw_cpu_or_8(pcp, val) raw_cpu_generic_to_op((pcp), (val), |=) -# endif -# define raw_cpu_or(pcp, val) __pcpu_size_call(raw_cpu_or_, (pcp), (val)) -#endif - -#define raw_cpu_generic_add_return(pcp, val) \ -({ \ - raw_cpu_add(pcp, val); \ - raw_cpu_read(pcp); \ -}) - -#ifndef raw_cpu_add_return -# ifndef raw_cpu_add_return_1 -# define raw_cpu_add_return_1(pcp, val) raw_cpu_generic_add_return(pcp, val) -# endif -# ifndef raw_cpu_add_return_2 -# define raw_cpu_add_return_2(pcp, val) raw_cpu_generic_add_return(pcp, val) -# endif -# ifndef raw_cpu_add_return_4 -# define raw_cpu_add_return_4(pcp, val) raw_cpu_generic_add_return(pcp, val) -# endif -# ifndef raw_cpu_add_return_8 -# define raw_cpu_add_return_8(pcp, val) raw_cpu_generic_add_return(pcp, val) -# endif -# define raw_cpu_add_return(pcp, val) \ - __pcpu_size_call_return2(raw_cpu_add_return_, pcp, val) -#endif - -#define raw_cpu_sub_return(pcp, val) raw_cpu_add_return(pcp, -(typeof(pcp))(val)) -#define raw_cpu_inc_return(pcp) raw_cpu_add_return(pcp, 1) -#define raw_cpu_dec_return(pcp) raw_cpu_add_return(pcp, -1) - -#define raw_cpu_generic_xchg(pcp, nval) \ -({ typeof(pcp) ret__; \ - ret__ = raw_cpu_read(pcp); \ - raw_cpu_write(pcp, nval); \ - ret__; \ -}) - -#ifndef raw_cpu_xchg -# ifndef raw_cpu_xchg_1 -# define raw_cpu_xchg_1(pcp, nval) raw_cpu_generic_xchg(pcp, nval) -# endif -# ifndef raw_cpu_xchg_2 -# define raw_cpu_xchg_2(pcp, nval) raw_cpu_generic_xchg(pcp, nval) -# endif -# ifndef raw_cpu_xchg_4 -# define raw_cpu_xchg_4(pcp, nval) raw_cpu_generic_xchg(pcp, nval) -# endif -# ifndef raw_cpu_xchg_8 -# define raw_cpu_xchg_8(pcp, nval) raw_cpu_generic_xchg(pcp, nval) -# endif -# define raw_cpu_xchg(pcp, nval) \ - __pcpu_size_call_return2(raw_cpu_xchg_, (pcp), nval) -#endif - -#define raw_cpu_generic_cmpxchg(pcp, oval, nval) \ -({ \ - typeof(pcp) ret__; \ - ret__ = raw_cpu_read(pcp); \ - if (ret__ == (oval)) \ - raw_cpu_write(pcp, nval); \ - ret__; \ -}) - -#ifndef raw_cpu_cmpxchg -# ifndef raw_cpu_cmpxchg_1 -# define raw_cpu_cmpxchg_1(pcp, oval, nval) raw_cpu_generic_cmpxchg(pcp, oval, nval) -# endif -# ifndef raw_cpu_cmpxchg_2 -# define raw_cpu_cmpxchg_2(pcp, oval, nval) raw_cpu_generic_cmpxchg(pcp, oval, nval) -# endif -# ifndef raw_cpu_cmpxchg_4 -# define raw_cpu_cmpxchg_4(pcp, oval, nval) raw_cpu_generic_cmpxchg(pcp, oval, nval) -# endif -# ifndef raw_cpu_cmpxchg_8 -# define raw_cpu_cmpxchg_8(pcp, oval, nval) raw_cpu_generic_cmpxchg(pcp, oval, nval) -# endif -# define raw_cpu_cmpxchg(pcp, oval, nval) \ - __pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval) -#endif - -#define raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ -({ \ - int __ret = 0; \ - if (raw_cpu_read(pcp1) == (oval1) && \ - raw_cpu_read(pcp2) == (oval2)) { \ - raw_cpu_write(pcp1, (nval1)); \ - raw_cpu_write(pcp2, (nval2)); \ - __ret = 1; \ - } \ - (__ret); \ -}) - -#ifndef raw_cpu_cmpxchg_double -# ifndef raw_cpu_cmpxchg_double_1 -# define raw_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \ - raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) -# endif -# ifndef raw_cpu_cmpxchg_double_2 -# define raw_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \ - raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) -# endif -# ifndef raw_cpu_cmpxchg_double_4 -# define raw_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \ - raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) -# endif -# ifndef raw_cpu_cmpxchg_double_8 -# define raw_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \ - raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) -# endif -# define raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ - __pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2)) -#endif - -/* - * Generic percpu operations for context that are safe from preemption/interrupts. - */ -#ifndef __this_cpu_read -# define __this_cpu_read(pcp) \ - (__this_cpu_preempt_check("read"),__pcpu_size_call_return(raw_cpu_read_, (pcp))) -#endif - -#ifndef __this_cpu_write -# define __this_cpu_write(pcp, val) \ -do { __this_cpu_preempt_check("write"); \ - __pcpu_size_call(raw_cpu_write_, (pcp), (val)); \ -} while (0) -#endif - -#ifndef __this_cpu_add -# define __this_cpu_add(pcp, val) \ -do { __this_cpu_preempt_check("add"); \ - __pcpu_size_call(raw_cpu_add_, (pcp), (val)); \ -} while (0) -#endif - -#ifndef __this_cpu_sub -# define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(typeof(pcp))(val)) -#endif - -#ifndef __this_cpu_inc -# define __this_cpu_inc(pcp) __this_cpu_add((pcp), 1) -#endif - -#ifndef __this_cpu_dec -# define __this_cpu_dec(pcp) __this_cpu_sub((pcp), 1) -#endif - -#ifndef __this_cpu_and -# define __this_cpu_and(pcp, val) \ -do { __this_cpu_preempt_check("and"); \ - __pcpu_size_call(raw_cpu_and_, (pcp), (val)); \ -} while (0) - -#endif - -#ifndef __this_cpu_or -# define __this_cpu_or(pcp, val) \ -do { __this_cpu_preempt_check("or"); \ - __pcpu_size_call(raw_cpu_or_, (pcp), (val)); \ -} while (0) -#endif - -#ifndef __this_cpu_add_return -# define __this_cpu_add_return(pcp, val) \ - (__this_cpu_preempt_check("add_return"),__pcpu_size_call_return2(raw_cpu_add_return_, pcp, val)) -#endif - -#define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(typeof(pcp))(val)) -#define __this_cpu_inc_return(pcp) __this_cpu_add_return(pcp, 1) -#define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1) - -#ifndef __this_cpu_xchg -# define __this_cpu_xchg(pcp, nval) \ - (__this_cpu_preempt_check("xchg"),__pcpu_size_call_return2(raw_cpu_xchg_, (pcp), nval)) -#endif - -#ifndef __this_cpu_cmpxchg -# define __this_cpu_cmpxchg(pcp, oval, nval) \ - (__this_cpu_preempt_check("cmpxchg"),__pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval)) -#endif - -#ifndef __this_cpu_cmpxchg_double -# define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ - (__this_cpu_preempt_check("cmpxchg_double"),__pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))) -#endif - #endif /* __LINUX_PERCPU_H */ |