diff options
Diffstat (limited to 'include/linux')
53 files changed, 524 insertions, 182 deletions
diff --git a/include/linux/atomic-fallback.h b/include/linux/atomic-fallback.h index 2c4927bf7b8d..fd525c71d676 100644 --- a/include/linux/atomic-fallback.h +++ b/include/linux/atomic-fallback.h @@ -77,6 +77,9 @@ #endif /* cmpxchg64_relaxed */ +#define arch_atomic_read atomic_read +#define arch_atomic_read_acquire atomic_read_acquire + #ifndef atomic_read_acquire static __always_inline int atomic_read_acquire(const atomic_t *v) @@ -86,6 +89,9 @@ atomic_read_acquire(const atomic_t *v) #define atomic_read_acquire atomic_read_acquire #endif +#define arch_atomic_set atomic_set +#define arch_atomic_set_release atomic_set_release + #ifndef atomic_set_release static __always_inline void atomic_set_release(atomic_t *v, int i) @@ -95,6 +101,13 @@ atomic_set_release(atomic_t *v, int i) #define atomic_set_release atomic_set_release #endif +#define arch_atomic_add atomic_add + +#define arch_atomic_add_return atomic_add_return +#define arch_atomic_add_return_acquire atomic_add_return_acquire +#define arch_atomic_add_return_release atomic_add_return_release +#define arch_atomic_add_return_relaxed atomic_add_return_relaxed + #ifndef atomic_add_return_relaxed #define atomic_add_return_acquire atomic_add_return #define atomic_add_return_release atomic_add_return @@ -137,6 +150,11 @@ atomic_add_return(int i, atomic_t *v) #endif /* atomic_add_return_relaxed */ +#define arch_atomic_fetch_add atomic_fetch_add +#define arch_atomic_fetch_add_acquire atomic_fetch_add_acquire +#define arch_atomic_fetch_add_release atomic_fetch_add_release +#define arch_atomic_fetch_add_relaxed atomic_fetch_add_relaxed + #ifndef atomic_fetch_add_relaxed #define atomic_fetch_add_acquire atomic_fetch_add #define atomic_fetch_add_release atomic_fetch_add @@ -179,6 +197,13 @@ atomic_fetch_add(int i, atomic_t *v) #endif /* atomic_fetch_add_relaxed */ +#define arch_atomic_sub atomic_sub + +#define arch_atomic_sub_return atomic_sub_return +#define arch_atomic_sub_return_acquire atomic_sub_return_acquire +#define arch_atomic_sub_return_release atomic_sub_return_release +#define arch_atomic_sub_return_relaxed atomic_sub_return_relaxed + #ifndef atomic_sub_return_relaxed #define atomic_sub_return_acquire atomic_sub_return #define atomic_sub_return_release atomic_sub_return @@ -221,6 +246,11 @@ atomic_sub_return(int i, atomic_t *v) #endif /* atomic_sub_return_relaxed */ +#define arch_atomic_fetch_sub atomic_fetch_sub +#define arch_atomic_fetch_sub_acquire atomic_fetch_sub_acquire +#define arch_atomic_fetch_sub_release atomic_fetch_sub_release +#define arch_atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed + #ifndef atomic_fetch_sub_relaxed #define atomic_fetch_sub_acquire atomic_fetch_sub #define atomic_fetch_sub_release atomic_fetch_sub @@ -263,6 +293,8 @@ atomic_fetch_sub(int i, atomic_t *v) #endif /* atomic_fetch_sub_relaxed */ +#define arch_atomic_inc atomic_inc + #ifndef atomic_inc static __always_inline void atomic_inc(atomic_t *v) @@ -272,6 +304,11 @@ atomic_inc(atomic_t *v) #define atomic_inc atomic_inc #endif +#define arch_atomic_inc_return atomic_inc_return +#define arch_atomic_inc_return_acquire atomic_inc_return_acquire +#define arch_atomic_inc_return_release atomic_inc_return_release +#define arch_atomic_inc_return_relaxed atomic_inc_return_relaxed + #ifndef atomic_inc_return_relaxed #ifdef atomic_inc_return #define atomic_inc_return_acquire atomic_inc_return @@ -353,6 +390,11 @@ atomic_inc_return(atomic_t *v) #endif /* atomic_inc_return_relaxed */ +#define arch_atomic_fetch_inc atomic_fetch_inc +#define arch_atomic_fetch_inc_acquire atomic_fetch_inc_acquire +#define arch_atomic_fetch_inc_release atomic_fetch_inc_release +#define arch_atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed + #ifndef atomic_fetch_inc_relaxed #ifdef atomic_fetch_inc #define atomic_fetch_inc_acquire atomic_fetch_inc @@ -434,6 +476,8 @@ atomic_fetch_inc(atomic_t *v) #endif /* atomic_fetch_inc_relaxed */ +#define arch_atomic_dec atomic_dec + #ifndef atomic_dec static __always_inline void atomic_dec(atomic_t *v) @@ -443,6 +487,11 @@ atomic_dec(atomic_t *v) #define atomic_dec atomic_dec #endif +#define arch_atomic_dec_return atomic_dec_return +#define arch_atomic_dec_return_acquire atomic_dec_return_acquire +#define arch_atomic_dec_return_release atomic_dec_return_release +#define arch_atomic_dec_return_relaxed atomic_dec_return_relaxed + #ifndef atomic_dec_return_relaxed #ifdef atomic_dec_return #define atomic_dec_return_acquire atomic_dec_return @@ -524,6 +573,11 @@ atomic_dec_return(atomic_t *v) #endif /* atomic_dec_return_relaxed */ +#define arch_atomic_fetch_dec atomic_fetch_dec +#define arch_atomic_fetch_dec_acquire atomic_fetch_dec_acquire +#define arch_atomic_fetch_dec_release atomic_fetch_dec_release +#define arch_atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed + #ifndef atomic_fetch_dec_relaxed #ifdef atomic_fetch_dec #define atomic_fetch_dec_acquire atomic_fetch_dec @@ -605,6 +659,13 @@ atomic_fetch_dec(atomic_t *v) #endif /* atomic_fetch_dec_relaxed */ +#define arch_atomic_and atomic_and + +#define arch_atomic_fetch_and atomic_fetch_and +#define arch_atomic_fetch_and_acquire atomic_fetch_and_acquire +#define arch_atomic_fetch_and_release atomic_fetch_and_release +#define arch_atomic_fetch_and_relaxed atomic_fetch_and_relaxed + #ifndef atomic_fetch_and_relaxed #define atomic_fetch_and_acquire atomic_fetch_and #define atomic_fetch_and_release atomic_fetch_and @@ -647,6 +708,8 @@ atomic_fetch_and(int i, atomic_t *v) #endif /* atomic_fetch_and_relaxed */ +#define arch_atomic_andnot atomic_andnot + #ifndef atomic_andnot static __always_inline void atomic_andnot(int i, atomic_t *v) @@ -656,6 +719,11 @@ atomic_andnot(int i, atomic_t *v) #define atomic_andnot atomic_andnot #endif +#define arch_atomic_fetch_andnot atomic_fetch_andnot +#define arch_atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire +#define arch_atomic_fetch_andnot_release atomic_fetch_andnot_release +#define arch_atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed + #ifndef atomic_fetch_andnot_relaxed #ifdef atomic_fetch_andnot #define atomic_fetch_andnot_acquire atomic_fetch_andnot @@ -737,6 +805,13 @@ atomic_fetch_andnot(int i, atomic_t *v) #endif /* atomic_fetch_andnot_relaxed */ +#define arch_atomic_or atomic_or + +#define arch_atomic_fetch_or atomic_fetch_or +#define arch_atomic_fetch_or_acquire atomic_fetch_or_acquire +#define arch_atomic_fetch_or_release atomic_fetch_or_release +#define arch_atomic_fetch_or_relaxed atomic_fetch_or_relaxed + #ifndef atomic_fetch_or_relaxed #define atomic_fetch_or_acquire atomic_fetch_or #define atomic_fetch_or_release atomic_fetch_or @@ -779,6 +854,13 @@ atomic_fetch_or(int i, atomic_t *v) #endif /* atomic_fetch_or_relaxed */ +#define arch_atomic_xor atomic_xor + +#define arch_atomic_fetch_xor atomic_fetch_xor +#define arch_atomic_fetch_xor_acquire atomic_fetch_xor_acquire +#define arch_atomic_fetch_xor_release atomic_fetch_xor_release +#define arch_atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed + #ifndef atomic_fetch_xor_relaxed #define atomic_fetch_xor_acquire atomic_fetch_xor #define atomic_fetch_xor_release atomic_fetch_xor @@ -821,6 +903,11 @@ atomic_fetch_xor(int i, atomic_t *v) #endif /* atomic_fetch_xor_relaxed */ +#define arch_atomic_xchg atomic_xchg +#define arch_atomic_xchg_acquire atomic_xchg_acquire +#define arch_atomic_xchg_release atomic_xchg_release +#define arch_atomic_xchg_relaxed atomic_xchg_relaxed + #ifndef atomic_xchg_relaxed #define atomic_xchg_acquire atomic_xchg #define atomic_xchg_release atomic_xchg @@ -863,6 +950,11 @@ atomic_xchg(atomic_t *v, int i) #endif /* atomic_xchg_relaxed */ +#define arch_atomic_cmpxchg atomic_cmpxchg +#define arch_atomic_cmpxchg_acquire atomic_cmpxchg_acquire +#define arch_atomic_cmpxchg_release atomic_cmpxchg_release +#define arch_atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed + #ifndef atomic_cmpxchg_relaxed #define atomic_cmpxchg_acquire atomic_cmpxchg #define atomic_cmpxchg_release atomic_cmpxchg @@ -905,6 +997,11 @@ atomic_cmpxchg(atomic_t *v, int old, int new) #endif /* atomic_cmpxchg_relaxed */ +#define arch_atomic_try_cmpxchg atomic_try_cmpxchg +#define arch_atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire +#define arch_atomic_try_cmpxchg_release atomic_try_cmpxchg_release +#define arch_atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed + #ifndef atomic_try_cmpxchg_relaxed #ifdef atomic_try_cmpxchg #define atomic_try_cmpxchg_acquire atomic_try_cmpxchg @@ -1002,6 +1099,8 @@ atomic_try_cmpxchg(atomic_t *v, int *old, int new) #endif /* atomic_try_cmpxchg_relaxed */ +#define arch_atomic_sub_and_test atomic_sub_and_test + #ifndef atomic_sub_and_test /** * atomic_sub_and_test - subtract value from variable and test result @@ -1020,6 +1119,8 @@ atomic_sub_and_test(int i, atomic_t *v) #define atomic_sub_and_test atomic_sub_and_test #endif +#define arch_atomic_dec_and_test atomic_dec_and_test + #ifndef atomic_dec_and_test /** * atomic_dec_and_test - decrement and test @@ -1037,6 +1138,8 @@ atomic_dec_and_test(atomic_t *v) #define atomic_dec_and_test atomic_dec_and_test #endif +#define arch_atomic_inc_and_test atomic_inc_and_test + #ifndef atomic_inc_and_test /** * atomic_inc_and_test - increment and test @@ -1054,6 +1157,8 @@ atomic_inc_and_test(atomic_t *v) #define atomic_inc_and_test atomic_inc_and_test #endif +#define arch_atomic_add_negative atomic_add_negative + #ifndef atomic_add_negative /** * atomic_add_negative - add and test if negative @@ -1072,6 +1177,8 @@ atomic_add_negative(int i, atomic_t *v) #define atomic_add_negative atomic_add_negative #endif +#define arch_atomic_fetch_add_unless atomic_fetch_add_unless + #ifndef atomic_fetch_add_unless /** * atomic_fetch_add_unless - add unless the number is already a given value @@ -1097,6 +1204,8 @@ atomic_fetch_add_unless(atomic_t *v, int a, int u) #define atomic_fetch_add_unless atomic_fetch_add_unless #endif +#define arch_atomic_add_unless atomic_add_unless + #ifndef atomic_add_unless /** * atomic_add_unless - add unless the number is already a given value @@ -1115,6 +1224,8 @@ atomic_add_unless(atomic_t *v, int a, int u) #define atomic_add_unless atomic_add_unless #endif +#define arch_atomic_inc_not_zero atomic_inc_not_zero + #ifndef atomic_inc_not_zero /** * atomic_inc_not_zero - increment unless the number is zero @@ -1131,6 +1242,8 @@ atomic_inc_not_zero(atomic_t *v) #define atomic_inc_not_zero atomic_inc_not_zero #endif +#define arch_atomic_inc_unless_negative atomic_inc_unless_negative + #ifndef atomic_inc_unless_negative static __always_inline bool atomic_inc_unless_negative(atomic_t *v) @@ -1147,6 +1260,8 @@ atomic_inc_unless_negative(atomic_t *v) #define atomic_inc_unless_negative atomic_inc_unless_negative #endif +#define arch_atomic_dec_unless_positive atomic_dec_unless_positive + #ifndef atomic_dec_unless_positive static __always_inline bool atomic_dec_unless_positive(atomic_t *v) @@ -1163,6 +1278,8 @@ atomic_dec_unless_positive(atomic_t *v) #define atomic_dec_unless_positive atomic_dec_unless_positive #endif +#define arch_atomic_dec_if_positive atomic_dec_if_positive + #ifndef atomic_dec_if_positive static __always_inline int atomic_dec_if_positive(atomic_t *v) @@ -1184,6 +1301,9 @@ atomic_dec_if_positive(atomic_t *v) #include <asm-generic/atomic64.h> #endif +#define arch_atomic64_read atomic64_read +#define arch_atomic64_read_acquire atomic64_read_acquire + #ifndef atomic64_read_acquire static __always_inline s64 atomic64_read_acquire(const atomic64_t *v) @@ -1193,6 +1313,9 @@ atomic64_read_acquire(const atomic64_t *v) #define atomic64_read_acquire atomic64_read_acquire #endif +#define arch_atomic64_set atomic64_set +#define arch_atomic64_set_release atomic64_set_release + #ifndef atomic64_set_release static __always_inline void atomic64_set_release(atomic64_t *v, s64 i) @@ -1202,6 +1325,13 @@ atomic64_set_release(atomic64_t *v, s64 i) #define atomic64_set_release atomic64_set_release #endif +#define arch_atomic64_add atomic64_add + +#define arch_atomic64_add_return atomic64_add_return +#define arch_atomic64_add_return_acquire atomic64_add_return_acquire +#define arch_atomic64_add_return_release atomic64_add_return_release +#define arch_atomic64_add_return_relaxed atomic64_add_return_relaxed + #ifndef atomic64_add_return_relaxed #define atomic64_add_return_acquire atomic64_add_return #define atomic64_add_return_release atomic64_add_return @@ -1244,6 +1374,11 @@ atomic64_add_return(s64 i, atomic64_t *v) #endif /* atomic64_add_return_relaxed */ +#define arch_atomic64_fetch_add atomic64_fetch_add +#define arch_atomic64_fetch_add_acquire atomic64_fetch_add_acquire +#define arch_atomic64_fetch_add_release atomic64_fetch_add_release +#define arch_atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed + #ifndef atomic64_fetch_add_relaxed #define atomic64_fetch_add_acquire atomic64_fetch_add #define atomic64_fetch_add_release atomic64_fetch_add @@ -1286,6 +1421,13 @@ atomic64_fetch_add(s64 i, atomic64_t *v) #endif /* atomic64_fetch_add_relaxed */ +#define arch_atomic64_sub atomic64_sub + +#define arch_atomic64_sub_return atomic64_sub_return +#define arch_atomic64_sub_return_acquire atomic64_sub_return_acquire +#define arch_atomic64_sub_return_release atomic64_sub_return_release +#define arch_atomic64_sub_return_relaxed atomic64_sub_return_relaxed + #ifndef atomic64_sub_return_relaxed #define atomic64_sub_return_acquire atomic64_sub_return #define atomic64_sub_return_release atomic64_sub_return @@ -1328,6 +1470,11 @@ atomic64_sub_return(s64 i, atomic64_t *v) #endif /* atomic64_sub_return_relaxed */ +#define arch_atomic64_fetch_sub atomic64_fetch_sub +#define arch_atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire +#define arch_atomic64_fetch_sub_release atomic64_fetch_sub_release +#define arch_atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed + #ifndef atomic64_fetch_sub_relaxed #define atomic64_fetch_sub_acquire atomic64_fetch_sub #define atomic64_fetch_sub_release atomic64_fetch_sub @@ -1370,6 +1517,8 @@ atomic64_fetch_sub(s64 i, atomic64_t *v) #endif /* atomic64_fetch_sub_relaxed */ +#define arch_atomic64_inc atomic64_inc + #ifndef atomic64_inc static __always_inline void atomic64_inc(atomic64_t *v) @@ -1379,6 +1528,11 @@ atomic64_inc(atomic64_t *v) #define atomic64_inc atomic64_inc #endif +#define arch_atomic64_inc_return atomic64_inc_return +#define arch_atomic64_inc_return_acquire atomic64_inc_return_acquire +#define arch_atomic64_inc_return_release atomic64_inc_return_release +#define arch_atomic64_inc_return_relaxed atomic64_inc_return_relaxed + #ifndef atomic64_inc_return_relaxed #ifdef atomic64_inc_return #define atomic64_inc_return_acquire atomic64_inc_return @@ -1460,6 +1614,11 @@ atomic64_inc_return(atomic64_t *v) #endif /* atomic64_inc_return_relaxed */ +#define arch_atomic64_fetch_inc atomic64_fetch_inc +#define arch_atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire +#define arch_atomic64_fetch_inc_release atomic64_fetch_inc_release +#define arch_atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed + #ifndef atomic64_fetch_inc_relaxed #ifdef atomic64_fetch_inc #define atomic64_fetch_inc_acquire atomic64_fetch_inc @@ -1541,6 +1700,8 @@ atomic64_fetch_inc(atomic64_t *v) #endif /* atomic64_fetch_inc_relaxed */ +#define arch_atomic64_dec atomic64_dec + #ifndef atomic64_dec static __always_inline void atomic64_dec(atomic64_t *v) @@ -1550,6 +1711,11 @@ atomic64_dec(atomic64_t *v) #define atomic64_dec atomic64_dec #endif +#define arch_atomic64_dec_return atomic64_dec_return +#define arch_atomic64_dec_return_acquire atomic64_dec_return_acquire +#define arch_atomic64_dec_return_release atomic64_dec_return_release +#define arch_atomic64_dec_return_relaxed atomic64_dec_return_relaxed + #ifndef atomic64_dec_return_relaxed #ifdef atomic64_dec_return #define atomic64_dec_return_acquire atomic64_dec_return @@ -1631,6 +1797,11 @@ atomic64_dec_return(atomic64_t *v) #endif /* atomic64_dec_return_relaxed */ +#define arch_atomic64_fetch_dec atomic64_fetch_dec +#define arch_atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire +#define arch_atomic64_fetch_dec_release atomic64_fetch_dec_release +#define arch_atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed + #ifndef atomic64_fetch_dec_relaxed #ifdef atomic64_fetch_dec #define atomic64_fetch_dec_acquire atomic64_fetch_dec @@ -1712,6 +1883,13 @@ atomic64_fetch_dec(atomic64_t *v) #endif /* atomic64_fetch_dec_relaxed */ +#define arch_atomic64_and atomic64_and + +#define arch_atomic64_fetch_and atomic64_fetch_and +#define arch_atomic64_fetch_and_acquire atomic64_fetch_and_acquire +#define arch_atomic64_fetch_and_release atomic64_fetch_and_release +#define arch_atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed + #ifndef atomic64_fetch_and_relaxed #define atomic64_fetch_and_acquire atomic64_fetch_and #define atomic64_fetch_and_release atomic64_fetch_and @@ -1754,6 +1932,8 @@ atomic64_fetch_and(s64 i, atomic64_t *v) #endif /* atomic64_fetch_and_relaxed */ +#define arch_atomic64_andnot atomic64_andnot + #ifndef atomic64_andnot static __always_inline void atomic64_andnot(s64 i, atomic64_t *v) @@ -1763,6 +1943,11 @@ atomic64_andnot(s64 i, atomic64_t *v) #define atomic64_andnot atomic64_andnot #endif +#define arch_atomic64_fetch_andnot atomic64_fetch_andnot +#define arch_atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire +#define arch_atomic64_fetch_andnot_release atomic64_fetch_andnot_release +#define arch_atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed + #ifndef atomic64_fetch_andnot_relaxed #ifdef atomic64_fetch_andnot #define atomic64_fetch_andnot_acquire atomic64_fetch_andnot @@ -1844,6 +2029,13 @@ atomic64_fetch_andnot(s64 i, atomic64_t *v) #endif /* atomic64_fetch_andnot_relaxed */ +#define arch_atomic64_or atomic64_or + +#define arch_atomic64_fetch_or atomic64_fetch_or +#define arch_atomic64_fetch_or_acquire atomic64_fetch_or_acquire +#define arch_atomic64_fetch_or_release atomic64_fetch_or_release +#define arch_atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed + #ifndef atomic64_fetch_or_relaxed #define atomic64_fetch_or_acquire atomic64_fetch_or #define atomic64_fetch_or_release atomic64_fetch_or @@ -1886,6 +2078,13 @@ atomic64_fetch_or(s64 i, atomic64_t *v) #endif /* atomic64_fetch_or_relaxed */ +#define arch_atomic64_xor atomic64_xor + +#define arch_atomic64_fetch_xor atomic64_fetch_xor +#define arch_atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire +#define arch_atomic64_fetch_xor_release atomic64_fetch_xor_release +#define arch_atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed + #ifndef atomic64_fetch_xor_relaxed #define atomic64_fetch_xor_acquire atomic64_fetch_xor #define atomic64_fetch_xor_release atomic64_fetch_xor @@ -1928,6 +2127,11 @@ atomic64_fetch_xor(s64 i, atomic64_t *v) #endif /* atomic64_fetch_xor_relaxed */ +#define arch_atomic64_xchg atomic64_xchg +#define arch_atomic64_xchg_acquire atomic64_xchg_acquire +#define arch_atomic64_xchg_release atomic64_xchg_release +#define arch_atomic64_xchg_relaxed atomic64_xchg_relaxed + #ifndef atomic64_xchg_relaxed #define atomic64_xchg_acquire atomic64_xchg #define atomic64_xchg_release atomic64_xchg @@ -1970,6 +2174,11 @@ atomic64_xchg(atomic64_t *v, s64 i) #endif /* atomic64_xchg_relaxed */ +#define arch_atomic64_cmpxchg atomic64_cmpxchg +#define arch_atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire +#define arch_atomic64_cmpxchg_release atomic64_cmpxchg_release +#define arch_atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed + #ifndef atomic64_cmpxchg_relaxed #define atomic64_cmpxchg_acquire atomic64_cmpxchg #define atomic64_cmpxchg_release atomic64_cmpxchg @@ -2012,6 +2221,11 @@ atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) #endif /* atomic64_cmpxchg_relaxed */ +#define arch_atomic64_try_cmpxchg atomic64_try_cmpxchg +#define arch_atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire +#define arch_atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release +#define arch_atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed + #ifndef atomic64_try_cmpxchg_relaxed #ifdef atomic64_try_cmpxchg #define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg @@ -2109,6 +2323,8 @@ atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) #endif /* atomic64_try_cmpxchg_relaxed */ +#define arch_atomic64_sub_and_test atomic64_sub_and_test + #ifndef atomic64_sub_and_test /** * atomic64_sub_and_test - subtract value from variable and test result @@ -2127,6 +2343,8 @@ atomic64_sub_and_test(s64 i, atomic64_t *v) #define atomic64_sub_and_test atomic64_sub_and_test #endif +#define arch_atomic64_dec_and_test atomic64_dec_and_test + #ifndef atomic64_dec_and_test /** * atomic64_dec_and_test - decrement and test @@ -2144,6 +2362,8 @@ atomic64_dec_and_test(atomic64_t *v) #define atomic64_dec_and_test atomic64_dec_and_test #endif +#define arch_atomic64_inc_and_test atomic64_inc_and_test + #ifndef atomic64_inc_and_test /** * atomic64_inc_and_test - increment and test @@ -2161,6 +2381,8 @@ atomic64_inc_and_test(atomic64_t *v) #define atomic64_inc_and_test atomic64_inc_and_test #endif +#define arch_atomic64_add_negative atomic64_add_negative + #ifndef atomic64_add_negative /** * atomic64_add_negative - add and test if negative @@ -2179,6 +2401,8 @@ atomic64_add_negative(s64 i, atomic64_t *v) #define atomic64_add_negative atomic64_add_negative #endif +#define arch_atomic64_fetch_add_unless atomic64_fetch_add_unless + #ifndef atomic64_fetch_add_unless /** * atomic64_fetch_add_unless - add unless the number is already a given value @@ -2204,6 +2428,8 @@ atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) #define atomic64_fetch_add_unless atomic64_fetch_add_unless #endif +#define arch_atomic64_add_unless atomic64_add_unless + #ifndef atomic64_add_unless /** * atomic64_add_unless - add unless the number is already a given value @@ -2222,6 +2448,8 @@ atomic64_add_unless(atomic64_t *v, s64 a, s64 u) #define atomic64_add_unless atomic64_add_unless #endif +#define arch_atomic64_inc_not_zero atomic64_inc_not_zero + #ifndef atomic64_inc_not_zero /** * atomic64_inc_not_zero - increment unless the number is zero @@ -2238,6 +2466,8 @@ atomic64_inc_not_zero(atomic64_t *v) #define atomic64_inc_not_zero atomic64_inc_not_zero #endif +#define arch_atomic64_inc_unless_negative atomic64_inc_unless_negative + #ifndef atomic64_inc_unless_negative static __always_inline bool atomic64_inc_unless_negative(atomic64_t *v) @@ -2254,6 +2484,8 @@ atomic64_inc_unless_negative(atomic64_t *v) #define atomic64_inc_unless_negative atomic64_inc_unless_negative #endif +#define arch_atomic64_dec_unless_positive atomic64_dec_unless_positive + #ifndef atomic64_dec_unless_positive static __always_inline bool atomic64_dec_unless_positive(atomic64_t *v) @@ -2270,6 +2502,8 @@ atomic64_dec_unless_positive(atomic64_t *v) #define atomic64_dec_unless_positive atomic64_dec_unless_positive #endif +#define arch_atomic64_dec_if_positive atomic64_dec_if_positive + #ifndef atomic64_dec_if_positive static __always_inline s64 atomic64_dec_if_positive(atomic64_t *v) @@ -2288,4 +2522,4 @@ atomic64_dec_if_positive(atomic64_t *v) #endif #endif /* _LINUX_ATOMIC_FALLBACK_H */ -// 1fac0941c79bf0ae100723cc2ac9b94061f0b67a +// 9d95b56f98d82a2a26c7b79ccdd0c47572d50a6f diff --git a/include/linux/bits.h b/include/linux/bits.h index 4671fbf28842..7f475d59a097 100644 --- a/include/linux/bits.h +++ b/include/linux/bits.h @@ -18,8 +18,7 @@ * position @h. For example * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000. */ -#if !defined(__ASSEMBLY__) && \ - (!defined(CONFIG_CC_IS_GCC) || CONFIG_GCC_VERSION >= 49000) +#if !defined(__ASSEMBLY__) #include <linux/build_bug.h> #define GENMASK_INPUT_CHECK(h, l) \ (BUILD_BUG_ON_ZERO(__builtin_choose_expr( \ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 8fd900998b4e..57241417ff2f 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -590,6 +590,7 @@ struct request_queue { u64 write_hints[BLK_MAX_WRITE_HINTS]; }; +/* Keep blk_queue_flag_name[] in sync with the definitions below */ #define QUEUE_FLAG_STOPPED 0 /* queue is stopped */ #define QUEUE_FLAG_DYING 1 /* queue being torn down */ #define QUEUE_FLAG_NOMERGES 3 /* disable merge attempts */ diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h index a954def26c0d..900b9f4e0605 100644 --- a/include/linux/can/skb.h +++ b/include/linux/can/skb.h @@ -34,7 +34,7 @@ struct can_skb_priv { int ifindex; int skbcnt; - struct can_frame cf[0]; + struct can_frame cf[]; }; static inline struct can_skb_priv *can_skb_prv(struct sk_buff *skb) diff --git a/include/linux/cb710.h b/include/linux/cb710.h index 60de3fedd3a7..405657a9a0d5 100644 --- a/include/linux/cb710.h +++ b/include/linux/cb710.h @@ -36,7 +36,7 @@ struct cb710_chip { unsigned slot_mask; unsigned slots; spinlock_t irq_lock; - struct cb710_slot slot[0]; + struct cb710_slot slot[]; }; /* NOTE: cb710_chip.slots is modified only during device init/exit and diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index 2247e71beb83..e5ed1c541e7f 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h @@ -52,8 +52,7 @@ struct ceph_options { unsigned long osd_idle_ttl; /* jiffies */ unsigned long osd_keepalive_timeout; /* jiffies */ unsigned long osd_request_timeout; /* jiffies */ - - u32 osd_req_flags; /* CEPH_OSD_FLAG_*, applied to each OSD request */ + u32 read_from_replica; /* CEPH_OSD_FLAG_BALANCE/LOCALIZE_READS */ /* * any type that can't be simply compared or doesn't need @@ -76,6 +75,7 @@ struct ceph_options { #define CEPH_OSD_KEEPALIVE_DEFAULT msecs_to_jiffies(5 * 1000) #define CEPH_OSD_IDLE_TTL_DEFAULT msecs_to_jiffies(60 * 1000) #define CEPH_OSD_REQUEST_TIMEOUT_DEFAULT 0 /* no timeout */ +#define CEPH_READ_FROM_REPLICA_DEFAULT 0 /* read from primary */ #define CEPH_MONC_HUNT_INTERVAL msecs_to_jiffies(3 * 1000) #define CEPH_MONC_PING_INTERVAL msecs_to_jiffies(10 * 1000) diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index ee37256ec8bd..5e55302e3bf6 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -33,6 +33,14 @@ #define __no_sanitize_thread #endif +#if __has_feature(undefined_behavior_sanitizer) +/* GCC does not have __SANITIZE_UNDEFINED__ */ +#define __no_sanitize_undefined \ + __attribute__((no_sanitize("undefined"))) +#else +#define __no_sanitize_undefined +#endif + /* * Not all versions of clang implement the the type-generic versions * of the builtin overflow checkers. Fortunately, clang implements diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 7dd4e0349ef3..0b1dc61f3955 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -11,7 +11,7 @@ + __GNUC_PATCHLEVEL__) /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 */ -#if GCC_VERSION < 40800 +#if GCC_VERSION < 40900 # error Sorry, your compiler is too old - please upgrade it. #endif @@ -150,6 +150,12 @@ #define __no_sanitize_thread #endif +#if __has_attribute(__no_sanitize_undefined__) +#define __no_sanitize_undefined __attribute__((no_sanitize_undefined)) +#else +#define __no_sanitize_undefined +#endif + #if GCC_VERSION >= 50100 #define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1 #endif diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 30827f82ad62..204e76856435 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -123,7 +123,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, #ifdef CONFIG_DEBUG_ENTRY /* Begin/end of an instrumentation safe region */ #define instrumentation_begin() ({ \ - asm volatile("%c0:\n\t" \ + asm volatile("%c0: nop\n\t" \ ".pushsection .discard.instr_begin\n\t" \ ".long %c0b - .\n\t" \ ".popsection\n\t" : : "i" (__COUNTER__)); \ diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h index cdf016596659..c8f03d2969df 100644 --- a/include/linux/compiler_attributes.h +++ b/include/linux/compiler_attributes.h @@ -40,6 +40,7 @@ # define __GCC4_has_attribute___noclone__ 1 # define __GCC4_has_attribute___nonstring__ 0 # define __GCC4_has_attribute___no_sanitize_address__ (__GNUC_MINOR__ >= 8) +# define __GCC4_has_attribute___no_sanitize_undefined__ (__GNUC_MINOR__ >= 9) # define __GCC4_has_attribute___fallthrough__ 0 #endif diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 21aed0981edf..01dd58c74d80 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -5,20 +5,20 @@ #ifndef __ASSEMBLY__ #ifdef __CHECKER__ -# define __user __attribute__((noderef, address_space(1))) # define __kernel __attribute__((address_space(0))) +# define __user __attribute__((noderef, address_space(__user))) # define __safe __attribute__((safe)) # define __force __attribute__((force)) # define __nocast __attribute__((nocast)) -# define __iomem __attribute__((noderef, address_space(2))) +# define __iomem __attribute__((noderef, address_space(__iomem))) # define __must_hold(x) __attribute__((context(x,1,1))) # define __acquires(x) __attribute__((context(x,0,1))) # define __releases(x) __attribute__((context(x,1,0))) # define __acquire(x) __context__(x,1) # define __release(x) __context__(x,-1) # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) -# define __percpu __attribute__((noderef, address_space(3))) -# define __rcu __attribute__((noderef, address_space(4))) +# define __percpu __attribute__((noderef, address_space(__percpu))) +# define __rcu __attribute__((noderef, address_space(__rcu))) # define __private __attribute__((noderef)) extern void __chk_user_ptr(const volatile void __user *); extern void __chk_io_ptr(const volatile void __iomem *); @@ -118,10 +118,6 @@ struct ftrace_likely_data { #define notrace __attribute__((__no_instrument_function__)) #endif -/* Section for code which can't be instrumented at all */ -#define noinstr \ - noinline notrace __attribute((__section__(".noinstr.text"))) - /* * it doesn't make sense on ARM (currently the only user of __naked) * to trace naked functions because then mcount is called without @@ -193,16 +189,18 @@ struct ftrace_likely_data { #define __no_kcsan __no_sanitize_thread #ifdef __SANITIZE_THREAD__ -# define __no_kcsan_or_inline __no_kcsan notrace __maybe_unused -# define __no_sanitize_or_inline __no_kcsan_or_inline -#else -# define __no_kcsan_or_inline __always_inline +# define __no_sanitize_or_inline __no_kcsan notrace __maybe_unused #endif #ifndef __no_sanitize_or_inline #define __no_sanitize_or_inline __always_inline #endif +/* Section for code which can't be instrumented at all */ +#define noinstr \ + noinline notrace __attribute((__section__(".noinstr.text"))) \ + __no_kcsan __no_sanitize_address + #endif /* __KERNEL__ */ #endif /* __ASSEMBLY__ */ @@ -254,32 +252,8 @@ struct ftrace_likely_data { * __unqual_scalar_typeof(x) - Declare an unqualified scalar type, leaving * non-scalar types unchanged. */ -#if (defined(CONFIG_CC_IS_GCC) && CONFIG_GCC_VERSION < 40900) || defined(__CHECKER__) /* - * We build this out of a couple of helper macros in a vain attempt to - * help you keep your lunch down while reading it. - */ -#define __pick_scalar_type(x, type, otherwise) \ - __builtin_choose_expr(__same_type(x, type), (type)0, otherwise) - -/* - * 'char' is not type-compatible with either 'signed char' or 'unsigned char', - * so we include the naked type here as well as the signed/unsigned variants. - */ -#define __pick_integer_type(x, type, otherwise) \ - __pick_scalar_type(x, type, \ - __pick_scalar_type(x, unsigned type, \ - __pick_scalar_type(x, signed type, otherwise))) - -#define __unqual_scalar_typeof(x) typeof( \ - __pick_integer_type(x, char, \ - __pick_integer_type(x, short, \ - __pick_integer_type(x, int, \ - __pick_integer_type(x, long, \ - __pick_integer_type(x, long long, x)))))) -#else -/* - * If supported, prefer C11 _Generic for better compile-times. As above, 'char' + * Prefer C11 _Generic for better compile-times and simpler code. Note: 'char' * is not type-compatible with 'signed char', and we define a separate case. */ #define __scalar_type_to_expr_cases(type) \ @@ -295,7 +269,6 @@ struct ftrace_likely_data { __scalar_type_to_expr_cases(long), \ __scalar_type_to_expr_cases(long long), \ default: (x))) -#endif /* Is this type a native word size -- useful for atomic operations */ #define __native_word(t) \ diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h index 136f984df0d9..cdfa400f89b3 100644 --- a/include/linux/dma-direct.h +++ b/include/linux/dma-direct.h @@ -77,8 +77,6 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs); void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs); -struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, - gfp_t gfp, unsigned long attrs); int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs); diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index e1c03339918f..6283917edd90 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -153,7 +153,7 @@ struct dma_interleaved_template { bool dst_sgl; size_t numf; size_t frame_size; - struct data_chunk sgl[0]; + struct data_chunk sgl[]; }; /** @@ -535,7 +535,7 @@ struct dmaengine_unmap_data { struct device *dev; struct kref kref; size_t len; - dma_addr_t addr[0]; + dma_addr_t addr[]; }; struct dma_async_tx_descriptor; diff --git a/include/linux/efi.h b/include/linux/efi.h index 2c6495f72f79..bb35f3305e55 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -350,6 +350,7 @@ void efi_native_runtime_setup(void); * associated with ConOut */ #define LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, 0xb9, 0x0e, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95) +#define LINUX_EFI_ARM_CPU_STATE_TABLE_GUID EFI_GUID(0xef79e4aa, 0x3c3d, 0x4989, 0xb9, 0x02, 0x07, 0xa9, 0x43, 0xe5, 0x50, 0xd2) #define LINUX_EFI_LOADER_ENTRY_GUID EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f) #define LINUX_EFI_RANDOM_SEED_TABLE_GUID EFI_GUID(0x1ce1e5bc, 0x7ceb, 0x42f2, 0x81, 0xe5, 0x8a, 0xad, 0xf1, 0x80, 0xf5, 0x7b) #define LINUX_EFI_TPM_EVENT_LOG_GUID EFI_GUID(0xb7799cb0, 0xeca2, 0x4943, 0x96, 0x67, 0x1f, 0xae, 0x07, 0xb7, 0x47, 0xfa) @@ -1236,14 +1237,11 @@ struct linux_efi_memreserve { struct { phys_addr_t base; phys_addr_t size; - } entry[0]; + } entry[]; }; -#define EFI_MEMRESERVE_SIZE(count) (sizeof(struct linux_efi_memreserve) + \ - (count) * sizeof(((struct linux_efi_memreserve *)0)->entry[0])) - #define EFI_MEMRESERVE_COUNT(size) (((size) - sizeof(struct linux_efi_memreserve)) \ - / sizeof(((struct linux_efi_memreserve *)0)->entry[0])) + / sizeof_field(struct linux_efi_memreserve, entry[0])) void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size); diff --git a/include/linux/filter.h b/include/linux/filter.h index 259377723603..0b0144752d78 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -884,12 +884,12 @@ void bpf_jit_compile(struct bpf_prog *prog); bool bpf_jit_needs_zext(void); bool bpf_helper_changes_pkt_data(void *func); -static inline bool bpf_dump_raw_ok(void) +static inline bool bpf_dump_raw_ok(const struct cred *cred) { /* Reconstruction of call-sites is dependent on kallsyms, * thus make dump the same restriction. */ - return kallsyms_show_value() == 1; + return kallsyms_show_value(cred); } struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, diff --git a/include/linux/fs.h b/include/linux/fs.h index 6c4ab4dc1cd7..3f881a892ea7 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2592,7 +2592,6 @@ extern void bdput(struct block_device *); extern void invalidate_bdev(struct block_device *); extern void iterate_bdevs(void (*)(struct block_device *, void *), void *); extern int sync_blockdev(struct block_device *bdev); -extern void kill_bdev(struct block_device *); extern struct super_block *freeze_bdev(struct block_device *); extern void emergency_thaw_all(void); extern void emergency_thaw_bdev(struct super_block *sb); @@ -2608,7 +2607,6 @@ static inline bool sb_is_blkdev_sb(struct super_block *sb) #else static inline void bd_forget(struct inode *inode) {} static inline int sync_blockdev(struct block_device *bdev) { return 0; } -static inline void kill_bdev(struct block_device *bdev) {} static inline void invalidate_bdev(struct block_device *bdev) {} static inline struct super_block *freeze_bdev(struct block_device *sb) diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index ce0b5fbf239d..3f0b19dcfae7 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h @@ -46,7 +46,7 @@ struct fscache_cache_tag { unsigned long flags; #define FSCACHE_TAG_RESERVED 0 /* T if tag is reserved for a cache */ atomic_t usage; - char name[0]; /* tag name */ + char name[]; /* tag name */ }; /* diff --git a/include/linux/host1x.h b/include/linux/host1x.h index c230b4e70d75..a3a568bf9686 100644 --- a/include/linux/host1x.h +++ b/include/linux/host1x.h @@ -48,6 +48,9 @@ struct host1x_client_ops { * @channel: host1x channel associated with this client * @syncpts: array of syncpoints requested for this client * @num_syncpts: number of syncpoints requested for this client + * @parent: pointer to parent structure + * @usecount: reference count for this structure + * @lock: mutex for mutually exclusive concurrency */ struct host1x_client { struct list_head list; diff --git a/include/linux/i2c.h b/include/linux/i2c.h index c10617bb980a..b8b8963f8bb9 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -408,7 +408,7 @@ static inline bool i2c_detect_slave_mode(struct device *dev) { return false; } * that are present. This information is used to grow the driver model tree. * For mainboards this is done statically using i2c_register_board_info(); * bus numbers identify adapters that aren't yet available. For add-on boards, - * i2c_new_device() does this dynamically with the adapter already known. + * i2c_new_client_device() does this dynamically with the adapter already known. */ struct i2c_board_info { char type[I2C_NAME_SIZE]; @@ -439,14 +439,12 @@ struct i2c_board_info { #if IS_ENABLED(CONFIG_I2C) -/* Add-on boards should register/unregister their devices; e.g. a board +/* + * Add-on boards should register/unregister their devices; e.g. a board * with integrated I2C, a config eeprom, sensors, and a codec that's * used in conjunction with the primary hardware. */ struct i2c_client * -i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info); - -struct i2c_client * i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *info); /* If you don't know the exact address of an I2C device, use this variant diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 4100bd224f5c..3e8fa1c7a1e6 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -41,6 +41,7 @@ #define DMA_PTE_SNP BIT_ULL(11) #define DMA_FL_PTE_PRESENT BIT_ULL(0) +#define DMA_FL_PTE_US BIT_ULL(2) #define DMA_FL_PTE_XD BIT_ULL(63) #define ADDR_WIDTH_5LEVEL (57) diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h index 2735da5f839e..30823780c192 100644 --- a/include/linux/irq_work.h +++ b/include/linux/irq_work.h @@ -2,7 +2,7 @@ #ifndef _LINUX_IRQ_WORK_H #define _LINUX_IRQ_WORK_H -#include <linux/llist.h> +#include <linux/smp_types.h> /* * An entry can be in one of four states: @@ -13,24 +13,14 @@ * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed */ -/* flags share CSD_FLAG_ space */ - -#define IRQ_WORK_PENDING BIT(0) -#define IRQ_WORK_BUSY BIT(1) - -/* Doesn't want IPI, wait for tick: */ -#define IRQ_WORK_LAZY BIT(2) -/* Run hard IRQ context, even on RT */ -#define IRQ_WORK_HARD_IRQ BIT(3) - -#define IRQ_WORK_CLAIMED (IRQ_WORK_PENDING | IRQ_WORK_BUSY) - -/* - * structure shares layout with single_call_data_t. - */ struct irq_work { - struct llist_node llnode; - atomic_t flags; + union { + struct __call_single_node node; + struct { + struct llist_node llnode; + atomic_t flags; + }; + }; void (*func)(struct irq_work *); }; diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index f613d8529863..d56128df2aff 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -766,6 +766,11 @@ struct journal_s int j_errno; /** + * @j_abort_mutex: Lock the whole aborting procedure. + */ + struct mutex j_abort_mutex; + + /** * @j_sb_buffer: The first part of the superblock buffer. */ struct buffer_head *j_sb_buffer; @@ -1247,7 +1252,6 @@ JBD2_FEATURE_INCOMPAT_FUNCS(csum3, CSUM_V3) #define JBD2_ABORT_ON_SYNCDATA_ERR 0x040 /* Abort the journal on file * data write error in ordered * mode */ -#define JBD2_REC_ERR 0x080 /* The errno in the sb has been recorded */ /* * Function declarations for the journaling transaction and buffer diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h index 98338dc6b5d2..481273f0c72d 100644 --- a/include/linux/kallsyms.h +++ b/include/linux/kallsyms.h @@ -18,6 +18,7 @@ #define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \ 2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1) +struct cred; struct module; static inline int is_kernel_inittext(unsigned long addr) @@ -98,7 +99,7 @@ int lookup_symbol_name(unsigned long addr, char *symname); int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name); /* How and when do we show kallsyms values? */ -extern int kallsyms_show_value(void); +extern bool kallsyms_show_value(const struct cred *cred); #else /* !CONFIG_KALLSYMS */ @@ -158,7 +159,7 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u return -ERANGE; } -static inline int kallsyms_show_value(void) +static inline bool kallsyms_show_value(const struct cred *cred) { return false; } diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 1776eb2e43a4..ea67910ae6b7 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -208,7 +208,7 @@ struct crash_mem_range { struct crash_mem { unsigned int max_nr_ranges; unsigned int nr_ranges; - struct crash_mem_range ranges[0]; + struct crash_mem_range ranges[]; }; extern int crash_exclude_mem_range(struct crash_mem *mem, diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h index c62d76478adc..529116b0cabe 100644 --- a/include/linux/kgdb.h +++ b/include/linux/kgdb.h @@ -276,8 +276,7 @@ struct kgdb_arch { * the I/O driver. * @post_exception: Pointer to a function that will do any cleanup work * for the I/O driver. - * @is_console: 1 if the end device is a console 0 if the I/O device is - * not a console + * @cons: valid if the I/O device is a console; else NULL. */ struct kgdb_io { const char *name; @@ -288,7 +287,7 @@ struct kgdb_io { void (*deinit) (void); void (*pre_exception) (void); void (*post_exception) (void); - int is_console; + struct console *cons; }; extern const struct kgdb_arch arch_kgdb_ops; diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 594265bfd390..6adf90f248d7 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -161,7 +161,7 @@ struct kretprobe_instance { kprobe_opcode_t *ret_addr; struct task_struct *task; void *fp; - char data[0]; + char data[]; }; struct kretprobe_blackpoint { @@ -350,6 +350,10 @@ static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void) return this_cpu_ptr(&kprobe_ctlblk); } +extern struct kprobe kprobe_busy; +void kprobe_busy_begin(void); +void kprobe_busy_end(void); + kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset); int register_kprobe(struct kprobe *p); void unregister_kprobe(struct kprobe *p); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 62ec926c78a0..d564855243d8 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -409,7 +409,7 @@ struct kvm_irq_routing_table { * Array indexed by gsi. Each entry contains list of irq chips * the gsi is connected to. */ - struct hlist_head map[0]; + struct hlist_head map[]; }; #endif diff --git a/include/linux/libata.h b/include/linux/libata.h index af832852e620..77ccf040a128 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -22,6 +22,7 @@ #include <linux/acpi.h> #include <linux/cdrom.h> #include <linux/sched.h> +#include <linux/async.h> /* * Define if arch has non-standard setup. This is a _PCI_ standard @@ -609,7 +610,7 @@ struct ata_host { struct task_struct *eh_owner; struct ata_port *simplex_claimed; /* channel owning the DMA */ - struct ata_port *ports[0]; + struct ata_port *ports[]; }; struct ata_queued_cmd { @@ -872,6 +873,8 @@ struct ata_port { struct timer_list fastdrain_timer; unsigned long fastdrain_cnt; + async_cookie_t cookie; + int em_message_type; void *private_data; @@ -1092,7 +1095,11 @@ extern int ata_scsi_ioctl(struct scsi_device *dev, unsigned int cmd, #define ATA_SCSI_COMPAT_IOCTL /* empty */ #endif extern int ata_scsi_queuecmd(struct Scsi_Host *h, struct scsi_cmnd *cmd); +#if IS_REACHABLE(CONFIG_ATA) bool ata_scsi_dma_need_drain(struct request *rq); +#else +#define ata_scsi_dma_need_drain NULL +#endif extern int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *dev, unsigned int cmd, void __user *arg); extern bool ata_link_online(struct ata_link *link); diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h index 6791813cd439..af998f93d256 100644 --- a/include/linux/lsm_hook_defs.h +++ b/include/linux/lsm_hook_defs.h @@ -150,7 +150,7 @@ LSM_HOOK(int, 0, inode_listsecurity, struct inode *inode, char *buffer, size_t buffer_size) LSM_HOOK(void, LSM_RET_VOID, inode_getsecid, struct inode *inode, u32 *secid) LSM_HOOK(int, 0, inode_copy_up, struct dentry *src, struct cred **new) -LSM_HOOK(int, 0, inode_copy_up_xattr, const char *name) +LSM_HOOK(int, -EOPNOTSUPP, inode_copy_up_xattr, const char *name) LSM_HOOK(int, 0, kernfs_init_security, struct kernfs_node *kn_dir, struct kernfs_node *kn) LSM_HOOK(int, 0, file_permission, struct file *file, int mask) @@ -360,7 +360,7 @@ LSM_HOOK(int, 0, key_alloc, struct key *key, const struct cred *cred, unsigned long flags) LSM_HOOK(void, LSM_RET_VOID, key_free, struct key *key) LSM_HOOK(int, 0, key_permission, key_ref_t key_ref, const struct cred *cred, - unsigned perm) + enum key_need_perm need_perm) LSM_HOOK(int, 0, key_getsecurity, struct key *key, char **_buffer) #endif /* CONFIG_KEYS */ diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 116bd9bb347f..ca1887dd0423 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -4283,7 +4283,8 @@ struct mlx5_ifc_rst2init_qp_out_bits { u8 syndrome[0x20]; - u8 reserved_at_40[0x40]; + u8 reserved_at_40[0x20]; + u8 ece[0x20]; }; struct mlx5_ifc_rst2init_qp_in_bits { @@ -4300,7 +4301,7 @@ struct mlx5_ifc_rst2init_qp_in_bits { u8 opt_param_mask[0x20]; - u8 reserved_at_a0[0x20]; + u8 ece[0x20]; struct mlx5_ifc_qpc_bits qpc; @@ -6619,7 +6620,8 @@ struct mlx5_ifc_init2init_qp_out_bits { u8 syndrome[0x20]; - u8 reserved_at_40[0x40]; + u8 reserved_at_40[0x20]; + u8 ece[0x20]; }; struct mlx5_ifc_init2init_qp_in_bits { @@ -6636,7 +6638,7 @@ struct mlx5_ifc_init2init_qp_in_bits { u8 opt_param_mask[0x20]; - u8 reserved_at_a0[0x20]; + u8 ece[0x20]; struct mlx5_ifc_qpc_bits qpc; diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index c4c37fd12104..f6f884970511 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -257,8 +257,8 @@ struct lruvec { */ unsigned long anon_cost; unsigned long file_cost; - /* Evictions & activations on the inactive file list */ - atomic_long_t inactive_age; + /* Non-resident age, driven by LRU movement */ + atomic_long_t nonresident_age; /* Refaults at the time of last reclaim cycle */ unsigned long refaults; /* Various lruvec state flags (enum lruvec_flags) */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 6fc613ed8eae..39e28e11863c 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3157,7 +3157,7 @@ static inline int dev_recursion_level(void) return this_cpu_read(softnet_data.xmit.recursion); } -#define XMIT_RECURSION_LIMIT 10 +#define XMIT_RECURSION_LIMIT 8 static inline bool dev_xmit_recursion(void) { return unlikely(__this_cpu_read(softnet_data.xmit.recursion) > diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h index b394bd4f68a3..c4676d6feeff 100644 --- a/include/linux/netfilter_ipv4/ip_tables.h +++ b/include/linux/netfilter_ipv4/ip_tables.h @@ -25,6 +25,12 @@ int ipt_register_table(struct net *net, const struct xt_table *table, const struct ipt_replace *repl, const struct nf_hook_ops *ops, struct xt_table **res); + +void ipt_unregister_table_pre_exit(struct net *net, struct xt_table *table, + const struct nf_hook_ops *ops); + +void ipt_unregister_table_exit(struct net *net, struct xt_table *table); + void ipt_unregister_table(struct net *net, struct xt_table *table, const struct nf_hook_ops *ops); diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h index 8225f7821a29..1547d5f9ae06 100644 --- a/include/linux/netfilter_ipv6/ip6_tables.h +++ b/include/linux/netfilter_ipv6/ip6_tables.h @@ -29,6 +29,9 @@ int ip6t_register_table(struct net *net, const struct xt_table *table, const struct nf_hook_ops *ops, struct xt_table **res); void ip6t_unregister_table(struct net *net, struct xt_table *table, const struct nf_hook_ops *ops); +void ip6t_unregister_table_pre_exit(struct net *net, struct xt_table *table, + const struct nf_hook_ops *ops); +void ip6t_unregister_table_exit(struct net *net, struct xt_table *table); extern unsigned int ip6t_do_table(struct sk_buff *skb, const struct nf_hook_state *state, struct xt_table *table); diff --git a/include/linux/overflow.h b/include/linux/overflow.h index 659045046468..93fcef105061 100644 --- a/include/linux/overflow.h +++ b/include/linux/overflow.h @@ -304,16 +304,33 @@ static inline __must_check size_t __ab_c_size(size_t a, size_t b, size_t c) * struct_size() - Calculate size of structure with trailing array. * @p: Pointer to the structure. * @member: Name of the array member. - * @n: Number of elements in the array. + * @count: Number of elements in the array. * * Calculates size of memory needed for structure @p followed by an - * array of @n @member elements. + * array of @count number of @member elements. * * Return: number of bytes needed or SIZE_MAX on overflow. */ -#define struct_size(p, member, n) \ - __ab_c_size(n, \ +#define struct_size(p, member, count) \ + __ab_c_size(count, \ sizeof(*(p)->member) + __must_be_array((p)->member),\ sizeof(*(p))) +/** + * flex_array_size() - Calculate size of a flexible array member + * within an enclosing structure. + * + * @p: Pointer to the structure. + * @member: Name of the flexible array member. + * @count: Number of elements in the array. + * + * Calculates size of a flexible array of @count number of @member + * elements, at the end of structure @p. + * + * Return: number of bytes needed or SIZE_MAX on overflow. + */ +#define flex_array_size(p, member, count) \ + array_size(count, \ + sizeof(*(p)->member) + __must_be_array((p)->member)) + #endif /* __LINUX_OVERFLOW_H */ diff --git a/include/linux/pci.h b/include/linux/pci.h index c79d83304e52..34c1c4f45288 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -2169,12 +2169,11 @@ static inline int pci_pcie_type(const struct pci_dev *dev) */ static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev) { - struct pci_dev *bridge = pci_upstream_bridge(dev); - - while (bridge) { - if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT) - return bridge; - bridge = pci_upstream_bridge(bridge); + while (dev) { + if (pci_is_pcie(dev) && + pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) + return dev; + dev = pci_upstream_bridge(dev); } return NULL; diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 32b6c52d41b9..56c1e8eb7bb0 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -249,6 +249,13 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, } #endif +#ifndef __HAVE_ARCH_PTEP_GET +static inline pte_t ptep_get(pte_t *ptep) +{ + return READ_ONCE(*ptep); +} +#endif + #ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, diff --git a/include/linux/phy.h b/include/linux/phy.h index 8c05d0fb5c00..b693b609b2f5 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -1416,6 +1416,7 @@ int phy_ethtool_ksettings_set(struct phy_device *phydev, int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd); int phy_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); int phy_do_ioctl_running(struct net_device *dev, struct ifreq *ifr, int cmd); +int phy_disable_interrupts(struct phy_device *phydev); void phy_request_interrupt(struct phy_device *phydev); void phy_free_interrupt(struct phy_device *phydev); void phy_print_status(struct phy_device *phydev); diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index 7fbc8679145c..49d155cd2dfe 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -597,7 +597,7 @@ int sev_guest_df_flush(int *error); */ int sev_guest_decommission(struct sev_data_decommission *data, int *error); -void *psp_copy_user_blob(u64 __user uaddr, u32 len); +void *psp_copy_user_blob(u64 uaddr, u32 len); #else /* !CONFIG_CRYPTO_DEV_SP_PSP */ diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index 733fad7dfbed..6d15040c642c 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h @@ -207,28 +207,34 @@ static inline u32 qed_chain_get_cons_idx_u32(struct qed_chain *p_chain) static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain) { + u16 elem_per_page = p_chain->elem_per_page; + u32 prod = p_chain->u.chain16.prod_idx; + u32 cons = p_chain->u.chain16.cons_idx; u16 used; - used = (u16) (((u32)0x10000 + - (u32)p_chain->u.chain16.prod_idx) - - (u32)p_chain->u.chain16.cons_idx); + if (prod < cons) + prod += (u32)U16_MAX + 1; + + used = (u16)(prod - cons); if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR) - used -= p_chain->u.chain16.prod_idx / p_chain->elem_per_page - - p_chain->u.chain16.cons_idx / p_chain->elem_per_page; + used -= prod / elem_per_page - cons / elem_per_page; return (u16)(p_chain->capacity - used); } static inline u32 qed_chain_get_elem_left_u32(struct qed_chain *p_chain) { + u16 elem_per_page = p_chain->elem_per_page; + u64 prod = p_chain->u.chain32.prod_idx; + u64 cons = p_chain->u.chain32.cons_idx; u32 used; - used = (u32) (((u64)0x100000000ULL + - (u64)p_chain->u.chain32.prod_idx) - - (u64)p_chain->u.chain32.cons_idx); + if (prod < cons) + prod += (u64)U32_MAX + 1; + + used = (u32)(prod - cons); if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR) - used -= p_chain->u.chain32.prod_idx / p_chain->elem_per_page - - p_chain->u.chain32.cons_idx / p_chain->elem_per_page; + used -= (u32)(prod / elem_per_page - cons / elem_per_page); return p_chain->capacity - used; } diff --git a/include/linux/sched.h b/include/linux/sched.h index b62e6aaf28f0..692e327d7455 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -654,9 +654,8 @@ struct task_struct { unsigned int ptrace; #ifdef CONFIG_SMP - struct llist_node wake_entry; - unsigned int wake_entry_type; int on_cpu; + struct __call_single_node wake_entry; #ifdef CONFIG_THREAD_INFO_IN_TASK /* Current CPU: */ unsigned int cpu; diff --git a/include/linux/sched/jobctl.h b/include/linux/sched/jobctl.h index fa067de9f1a9..d2b4204ba4d3 100644 --- a/include/linux/sched/jobctl.h +++ b/include/linux/sched/jobctl.h @@ -19,6 +19,7 @@ struct task_struct; #define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */ #define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */ #define JOBCTL_TRAP_FREEZE_BIT 23 /* trap for cgroup freezer */ +#define JOBCTL_TASK_WORK_BIT 24 /* set by TWA_SIGNAL */ #define JOBCTL_STOP_DEQUEUED (1UL << JOBCTL_STOP_DEQUEUED_BIT) #define JOBCTL_STOP_PENDING (1UL << JOBCTL_STOP_PENDING_BIT) @@ -28,9 +29,10 @@ struct task_struct; #define JOBCTL_TRAPPING (1UL << JOBCTL_TRAPPING_BIT) #define JOBCTL_LISTENING (1UL << JOBCTL_LISTENING_BIT) #define JOBCTL_TRAP_FREEZE (1UL << JOBCTL_TRAP_FREEZE_BIT) +#define JOBCTL_TASK_WORK (1UL << JOBCTL_TASK_WORK_BIT) #define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY) -#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK) +#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK | JOBCTL_TASK_WORK) extern bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask); extern void task_clear_jobctl_trapping(struct task_struct *task); diff --git a/include/linux/sctp.h b/include/linux/sctp.h index 8ccd82105de8..76731230bbc5 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -221,7 +221,7 @@ struct sctp_datahdr { __be16 stream; __be16 ssn; __u32 ppid; - __u8 payload[0]; + __u8 payload[]; }; struct sctp_data_chunk { @@ -269,7 +269,7 @@ struct sctp_inithdr { __be16 num_outbound_streams; __be16 num_inbound_streams; __be32 initial_tsn; - __u8 params[0]; + __u8 params[]; }; struct sctp_init_chunk { @@ -299,13 +299,13 @@ struct sctp_cookie_preserve_param { /* Section 3.3.2.1 Host Name Address (11) */ struct sctp_hostname_param { struct sctp_paramhdr param_hdr; - uint8_t hostname[0]; + uint8_t hostname[]; }; /* Section 3.3.2.1 Supported Address Types (12) */ struct sctp_supported_addrs_param { struct sctp_paramhdr param_hdr; - __be16 types[0]; + __be16 types[]; }; /* ADDIP Section 3.2.6 Adaptation Layer Indication */ @@ -317,25 +317,25 @@ struct sctp_adaptation_ind_param { /* ADDIP Section 4.2.7 Supported Extensions Parameter */ struct sctp_supported_ext_param { struct sctp_paramhdr param_hdr; - __u8 chunks[0]; + __u8 chunks[]; }; /* AUTH Section 3.1 Random */ struct sctp_random_param { struct sctp_paramhdr param_hdr; - __u8 random_val[0]; + __u8 random_val[]; }; /* AUTH Section 3.2 Chunk List */ struct sctp_chunks_param { struct sctp_paramhdr param_hdr; - __u8 chunks[0]; + __u8 chunks[]; }; /* AUTH Section 3.3 HMAC Algorithm */ struct sctp_hmac_algo_param { struct sctp_paramhdr param_hdr; - __be16 hmac_ids[0]; + __be16 hmac_ids[]; }; /* RFC 2960. Section 3.3.3 Initiation Acknowledgement (INIT ACK) (2): @@ -350,7 +350,7 @@ struct sctp_initack_chunk { /* Section 3.3.3.1 State Cookie (7) */ struct sctp_cookie_param { struct sctp_paramhdr p; - __u8 body[0]; + __u8 body[]; }; /* Section 3.3.3.1 Unrecognized Parameters (8) */ @@ -384,7 +384,7 @@ struct sctp_sackhdr { __be32 a_rwnd; __be16 num_gap_ack_blocks; __be16 num_dup_tsns; - union sctp_sack_variable variable[0]; + union sctp_sack_variable variable[]; }; struct sctp_sack_chunk { @@ -436,7 +436,7 @@ struct sctp_shutdown_chunk { struct sctp_errhdr { __be16 cause; __be16 length; - __u8 variable[0]; + __u8 variable[]; }; struct sctp_operr_chunk { @@ -594,7 +594,7 @@ struct sctp_fwdtsn_skip { struct sctp_fwdtsn_hdr { __be32 new_cum_tsn; - struct sctp_fwdtsn_skip skip[0]; + struct sctp_fwdtsn_skip skip[]; }; struct sctp_fwdtsn_chunk { @@ -611,7 +611,7 @@ struct sctp_ifwdtsn_skip { struct sctp_ifwdtsn_hdr { __be32 new_cum_tsn; - struct sctp_ifwdtsn_skip skip[0]; + struct sctp_ifwdtsn_skip skip[]; }; struct sctp_ifwdtsn_chunk { @@ -658,7 +658,7 @@ struct sctp_addip_param { struct sctp_addiphdr { __be32 serial; - __u8 params[0]; + __u8 params[]; }; struct sctp_addip_chunk { @@ -718,7 +718,7 @@ struct sctp_addip_chunk { struct sctp_authhdr { __be16 shkey_id; __be16 hmac_id; - __u8 hmac[0]; + __u8 hmac[]; }; struct sctp_auth_chunk { @@ -733,7 +733,7 @@ struct sctp_infox { struct sctp_reconf_chunk { struct sctp_chunkhdr chunk_hdr; - __u8 params[0]; + __u8 params[]; }; struct sctp_strreset_outreq { @@ -741,13 +741,13 @@ struct sctp_strreset_outreq { __be32 request_seq; __be32 response_seq; __be32 send_reset_at_tsn; - __be16 list_of_streams[0]; + __be16 list_of_streams[]; }; struct sctp_strreset_inreq { struct sctp_paramhdr param_hdr; __be32 request_seq; - __be16 list_of_streams[0]; + __be16 list_of_streams[]; }; struct sctp_strreset_tsnreq { diff --git a/include/linux/smp.h b/include/linux/smp.h index 7ee202ad21a6..80d557ef8a11 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -12,29 +12,22 @@ #include <linux/list.h> #include <linux/cpumask.h> #include <linux/init.h> -#include <linux/llist.h> +#include <linux/smp_types.h> typedef void (*smp_call_func_t)(void *info); typedef bool (*smp_cond_func_t)(int cpu, void *info); -enum { - CSD_FLAG_LOCK = 0x01, - - /* IRQ_WORK_flags */ - - CSD_TYPE_ASYNC = 0x00, - CSD_TYPE_SYNC = 0x10, - CSD_TYPE_IRQ_WORK = 0x20, - CSD_TYPE_TTWU = 0x30, - CSD_FLAG_TYPE_MASK = 0xF0, -}; - /* * structure shares (partial) layout with struct irq_work */ struct __call_single_data { - struct llist_node llist; - unsigned int flags; + union { + struct __call_single_node node; + struct { + struct llist_node llist; + unsigned int flags; + }; + }; smp_call_func_t func; void *info; }; diff --git a/include/linux/smp_types.h b/include/linux/smp_types.h new file mode 100644 index 000000000000..364b3ae3e41d --- /dev/null +++ b/include/linux/smp_types.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_SMP_TYPES_H +#define __LINUX_SMP_TYPES_H + +#include <linux/llist.h> + +enum { + CSD_FLAG_LOCK = 0x01, + + IRQ_WORK_PENDING = 0x01, + IRQ_WORK_BUSY = 0x02, + IRQ_WORK_LAZY = 0x04, /* No IPI, wait for tick */ + IRQ_WORK_HARD_IRQ = 0x08, /* IRQ context on PREEMPT_RT */ + + IRQ_WORK_CLAIMED = (IRQ_WORK_PENDING | IRQ_WORK_BUSY), + + CSD_TYPE_ASYNC = 0x00, + CSD_TYPE_SYNC = 0x10, + CSD_TYPE_IRQ_WORK = 0x20, + CSD_TYPE_TTWU = 0x30, + + CSD_FLAG_TYPE_MASK = 0xF0, +}; + +/* + * struct __call_single_node is the primary type on + * smp.c:call_single_queue. + * + * flush_smp_call_function_queue() only reads the type from + * __call_single_node::u_flags as a regular load, the above + * (anonymous) enum defines all the bits of this word. + * + * Other bits are not modified until the type is known. + * + * CSD_TYPE_SYNC/ASYNC: + * struct { + * struct llist_node node; + * unsigned int flags; + * smp_call_func_t func; + * void *info; + * }; + * + * CSD_TYPE_IRQ_WORK: + * struct { + * struct llist_node node; + * atomic_t flags; + * void (*func)(struct irq_work *); + * }; + * + * CSD_TYPE_TTWU: + * struct { + * struct llist_node node; + * unsigned int flags; + * }; + * + */ + +struct __call_single_node { + struct llist_node llist; + union { + unsigned int u_flags; + atomic_t a_flags; + }; +}; + +#endif /* __LINUX_SMP_TYPES_H */ diff --git a/include/linux/swap.h b/include/linux/swap.h index 4c5974bb9ba9..5b3216ba39a9 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -313,6 +313,7 @@ struct vma_swap_readahead { }; /* linux/mm/workingset.c */ +void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages); void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg); void workingset_refault(struct page *page, void *shadow); void workingset_activation(struct page *page); diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 7c354c2955f5..b951a87da987 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -1360,7 +1360,7 @@ static inline long ksys_lchown(const char __user *filename, uid_t user, extern long do_sys_ftruncate(unsigned int fd, loff_t length, int small); -static inline long ksys_ftruncate(unsigned int fd, unsigned long length) +static inline long ksys_ftruncate(unsigned int fd, loff_t length) { return do_sys_ftruncate(fd, length, 1); } diff --git a/include/linux/task_work.h b/include/linux/task_work.h index bd9a6a91c097..0fb93aafa478 100644 --- a/include/linux/task_work.h +++ b/include/linux/task_work.h @@ -13,7 +13,10 @@ init_task_work(struct callback_head *twork, task_work_func_t func) twork->func = func; } -int task_work_add(struct task_struct *task, struct callback_head *twork, bool); +#define TWA_RESUME 1 +#define TWA_SIGNAL 2 +int task_work_add(struct task_struct *task, struct callback_head *twork, int); + struct callback_head *task_work_cancel(struct task_struct *, task_work_func_t); void task_work_run(void); diff --git a/include/linux/tifm.h b/include/linux/tifm.h index 299cbb8c63bb..44073d06710f 100644 --- a/include/linux/tifm.h +++ b/include/linux/tifm.h @@ -124,7 +124,7 @@ struct tifm_adapter { int (*has_ms_pif)(struct tifm_adapter *fm, struct tifm_dev *sock); - struct tifm_dev *sockets[0]; + struct tifm_dev *sockets[]; }; struct tifm_adapter *tifm_alloc_adapter(unsigned int num_sockets, diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index b27e2ffa96c1..d5471d6fa778 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h @@ -222,9 +222,9 @@ extern bool timekeeping_rtc_skipresume(void); extern void timekeeping_inject_sleeptime64(const struct timespec64 *delta); -/* +/** * struct system_time_snapshot - simultaneous raw/real time capture with - * counter value + * counter value * @cycles: Clocksource counter value to produce the system times * @real: Realtime system time * @raw: Monotonic raw system time @@ -239,9 +239,9 @@ struct system_time_snapshot { u8 cs_was_changed_seq; }; -/* +/** * struct system_device_crosststamp - system/device cross-timestamp - * (syncronized capture) + * (synchronized capture) * @device: Device time * @sys_realtime: Realtime simultaneous with device time * @sys_monoraw: Monotonic raw simultaneous with device time @@ -252,12 +252,12 @@ struct system_device_crosststamp { ktime_t sys_monoraw; }; -/* +/** * struct system_counterval_t - system counter value with the pointer to the - * corresponding clocksource + * corresponding clocksource * @cycles: System counter value * @cs: Clocksource corresponding to system counter value. Used by - * timekeeping code to verify comparibility of two cycle values + * timekeeping code to verify comparibility of two cycle values */ struct system_counterval_t { u64 cycles; diff --git a/include/linux/tpm_eventlog.h b/include/linux/tpm_eventlog.h index 4f8c90c93c29..64356b199e94 100644 --- a/include/linux/tpm_eventlog.h +++ b/include/linux/tpm_eventlog.h @@ -81,6 +81,8 @@ struct tcg_efi_specid_event_algs { u16 digest_size; } __packed; +#define TCG_SPECID_SIG "Spec ID Event03" + struct tcg_efi_specid_event_head { u8 signature[16]; u32 platform_class; @@ -171,6 +173,7 @@ static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event, int i; int j; u32 count, event_type; + const u8 zero_digest[sizeof(event_header->digest)] = {0}; marker = event; marker_start = marker; @@ -198,10 +201,19 @@ static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event, count = READ_ONCE(event->count); event_type = READ_ONCE(event->event_type); + /* Verify that it's the log header */ + if (event_header->pcr_idx != 0 || + event_header->event_type != NO_ACTION || + memcmp(event_header->digest, zero_digest, sizeof(zero_digest))) { + size = 0; + goto out; + } + efispecid = (struct tcg_efi_specid_event_head *)event_header->event; /* Check if event is malformed. */ - if (count > efispecid->num_algs) { + if (memcmp(efispecid->signature, TCG_SPECID_SIG, + sizeof(TCG_SPECID_SIG)) || count > efispecid->num_algs) { size = 0; goto out; } diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 7bcadca22100..0a76ddc07d59 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -301,13 +301,14 @@ copy_struct_from_user(void *dst, size_t ksize, const void __user *src, return 0; } -bool probe_kernel_read_allowed(const void *unsafe_src, size_t size); +bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size); -extern long probe_kernel_read(void *dst, const void *src, size_t size); -extern long probe_user_read(void *dst, const void __user *src, size_t size); +long copy_from_kernel_nofault(void *dst, const void *src, size_t size); +long notrace copy_to_kernel_nofault(void *dst, const void *src, size_t size); -extern long notrace probe_kernel_write(void *dst, const void *src, size_t size); -extern long notrace probe_user_write(void __user *dst, const void *src, size_t size); +long copy_from_user_nofault(void *dst, const void __user *src, size_t size); +long notrace copy_to_user_nofault(void __user *dst, const void *src, + size_t size); long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, long count); @@ -317,14 +318,16 @@ long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr, long strnlen_user_nofault(const void __user *unsafe_addr, long count); /** - * probe_kernel_address(): safely attempt to read from a location - * @addr: address to read from - * @retval: read into this variable + * get_kernel_nofault(): safely attempt to read from a location + * @val: read into this variable + * @ptr: address to read from * * Returns 0 on success, or -EFAULT. */ -#define probe_kernel_address(addr, retval) \ - probe_kernel_read(&retval, addr, sizeof(retval)) +#define get_kernel_nofault(val, ptr) ({ \ + const typeof(val) *__gk_ptr = (ptr); \ + copy_from_kernel_nofault(&(val), __gk_ptr, sizeof(val));\ +}) #ifndef user_access_begin #define user_access_begin(ptr,len) access_ok(ptr, len) diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 48bb681e6c2a..0221f852a7e1 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -106,7 +106,6 @@ extern void *vzalloc(unsigned long size); extern void *vmalloc_user(unsigned long size); extern void *vmalloc_node(unsigned long size, int node); extern void *vzalloc_node(unsigned long size, int node); -extern void *vmalloc_exec(unsigned long size); extern void *vmalloc_32(unsigned long size); extern void *vmalloc_32_user(unsigned long size); extern void *__vmalloc(unsigned long size, gfp_t gfp_mask); |