diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2017-11-14 10:01:49 +0100 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2017-11-14 10:01:49 +0100 |
commit | d4bfeabe9ff7967f4b8c24aabf2de1ce3a909cd9 (patch) | |
tree | 6b419b8497c7d57ddec20a3558697ef36ea37b11 /include | |
parent | 8a7a8e1eab929eb3a5b735a788a23b9731139046 (diff) | |
parent | b29c6ef7bb1257853c1e31616d84f55e561cf631 (diff) | |
download | linux-d4bfeabe9ff7967f4b8c24aabf2de1ce3a909cd9.tar.gz linux-d4bfeabe9ff7967f4b8c24aabf2de1ce3a909cd9.tar.bz2 linux-d4bfeabe9ff7967f4b8c24aabf2de1ce3a909cd9.zip |
Merge branch 'linus' into timers/urgent
Get upstream changes so dependent patches can be applied.
Diffstat (limited to 'include')
102 files changed, 1601 insertions, 1184 deletions
diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h index 49be4bba1e96..34a028a7bcc5 100644 --- a/include/asm-generic/atomic-long.h +++ b/include/asm-generic/atomic-long.h @@ -244,4 +244,7 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) #define atomic_long_inc_not_zero(l) \ ATOMIC_LONG_PFX(_inc_not_zero)((ATOMIC_LONG_PFX(_t) *)(l)) +#define atomic_long_cond_read_acquire(v, c) \ + ATOMIC_LONG_PFX(_cond_read_acquire)((ATOMIC_LONG_PFX(_t) *)(v), (c)) + #endif /* _ASM_GENERIC_ATOMIC_LONG_H */ diff --git a/include/asm-generic/div64.h b/include/asm-generic/div64.h index d2013064dc69..dc9726fdac8f 100644 --- a/include/asm-generic/div64.h +++ b/include/asm-generic/div64.h @@ -26,6 +26,20 @@ #if BITS_PER_LONG == 64 +/** + * do_div - returns 2 values: calculate remainder and update new dividend + * @n: pointer to uint64_t dividend (will be updated) + * @base: uint32_t divisor + * + * Summary: + * ``uint32_t remainder = *n % base;`` + * ``*n = *n / base;`` + * + * Return: (uint32_t)remainder + * + * NOTE: macro parameter @n is evaluated multiple times, + * beware of side effects! + */ # define do_div(n,base) ({ \ uint32_t __base = (base); \ uint32_t __rem; \ diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h index 7d026bf27713..0f7062bd55e5 100644 --- a/include/asm-generic/qrwlock.h +++ b/include/asm-generic/qrwlock.h @@ -26,51 +26,20 @@ /* * Writer states & reader shift and bias. - * - * | +0 | +1 | +2 | +3 | - * ----+----+----+----+----+ - * LE | 78 | 56 | 34 | 12 | 0x12345678 - * ----+----+----+----+----+ - * | wr | rd | - * +----+----+----+----+ - * - * ----+----+----+----+----+ - * BE | 12 | 34 | 56 | 78 | 0x12345678 - * ----+----+----+----+----+ - * | rd | wr | - * +----+----+----+----+ */ -#define _QW_WAITING 1 /* A writer is waiting */ -#define _QW_LOCKED 0xff /* A writer holds the lock */ -#define _QW_WMASK 0xff /* Writer mask */ -#define _QR_SHIFT 8 /* Reader count shift */ +#define _QW_WAITING 0x100 /* A writer is waiting */ +#define _QW_LOCKED 0x0ff /* A writer holds the lock */ +#define _QW_WMASK 0x1ff /* Writer mask */ +#define _QR_SHIFT 9 /* Reader count shift */ #define _QR_BIAS (1U << _QR_SHIFT) /* * External function declarations */ -extern void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts); +extern void queued_read_lock_slowpath(struct qrwlock *lock); extern void queued_write_lock_slowpath(struct qrwlock *lock); /** - * queued_read_can_lock- would read_trylock() succeed? - * @lock: Pointer to queue rwlock structure - */ -static inline int queued_read_can_lock(struct qrwlock *lock) -{ - return !(atomic_read(&lock->cnts) & _QW_WMASK); -} - -/** - * queued_write_can_lock- would write_trylock() succeed? - * @lock: Pointer to queue rwlock structure - */ -static inline int queued_write_can_lock(struct qrwlock *lock) -{ - return !atomic_read(&lock->cnts); -} - -/** * queued_read_trylock - try to acquire read lock of a queue rwlock * @lock : Pointer to queue rwlock structure * Return: 1 if lock acquired, 0 if failed @@ -118,7 +87,7 @@ static inline void queued_read_lock(struct qrwlock *lock) return; /* The slowpath will decrement the reader count, if necessary. */ - queued_read_lock_slowpath(lock, cnts); + queued_read_lock_slowpath(lock); } /** @@ -147,30 +116,18 @@ static inline void queued_read_unlock(struct qrwlock *lock) } /** - * __qrwlock_write_byte - retrieve the write byte address of a queue rwlock - * @lock : Pointer to queue rwlock structure - * Return: the write byte address of a queue rwlock - */ -static inline u8 *__qrwlock_write_byte(struct qrwlock *lock) -{ - return (u8 *)lock + 3 * IS_BUILTIN(CONFIG_CPU_BIG_ENDIAN); -} - -/** * queued_write_unlock - release write lock of a queue rwlock * @lock : Pointer to queue rwlock structure */ static inline void queued_write_unlock(struct qrwlock *lock) { - smp_store_release(__qrwlock_write_byte(lock), 0); + smp_store_release(&lock->wlocked, 0); } /* * Remapping rwlock architecture specific functions to the corresponding * queue rwlock functions. */ -#define arch_read_can_lock(l) queued_read_can_lock(l) -#define arch_write_can_lock(l) queued_write_can_lock(l) #define arch_read_lock(l) queued_read_lock(l) #define arch_write_lock(l) queued_write_lock(l) #define arch_read_trylock(l) queued_read_trylock(l) diff --git a/include/asm-generic/qrwlock_types.h b/include/asm-generic/qrwlock_types.h index d93573eff162..137ecdd16daa 100644 --- a/include/asm-generic/qrwlock_types.h +++ b/include/asm-generic/qrwlock_types.h @@ -10,12 +10,23 @@ */ typedef struct qrwlock { - atomic_t cnts; + union { + atomic_t cnts; + struct { +#ifdef __LITTLE_ENDIAN + u8 wlocked; /* Locked for write? */ + u8 __lstate[3]; +#else + u8 __lstate[3]; + u8 wlocked; /* Locked for write? */ +#endif + }; + }; arch_spinlock_t wait_lock; } arch_rwlock_t; #define __ARCH_RW_LOCK_UNLOCKED { \ - .cnts = ATOMIC_INIT(0), \ + { .cnts = ATOMIC_INIT(0), }, \ .wait_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ } diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h index 66260777d644..b37b4ad7eb94 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h @@ -121,6 +121,5 @@ static __always_inline bool virt_spin_lock(struct qspinlock *lock) #define arch_spin_lock(l) queued_spin_lock(l) #define arch_spin_trylock(l) queued_spin_trylock(l) #define arch_spin_unlock(l) queued_spin_unlock(l) -#define arch_spin_lock_flags(l, f) queued_spin_lock(l) #endif /* __ASM_GENERIC_QSPINLOCK_H */ diff --git a/include/asm-generic/rwsem.h b/include/asm-generic/rwsem.h index bdbe43bac230..93e67a055a4d 100644 --- a/include/asm-generic/rwsem.h +++ b/include/asm-generic/rwsem.h @@ -38,6 +38,16 @@ static inline void __down_read(struct rw_semaphore *sem) rwsem_down_read_failed(sem); } +static inline int __down_read_killable(struct rw_semaphore *sem) +{ + if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0)) { + if (IS_ERR(rwsem_down_read_failed_killable(sem))) + return -EINTR; + } + + return 0; +} + static inline int __down_read_trylock(struct rw_semaphore *sem) { long tmp; diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 8acfc1e099e1..bdcd1caae092 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -459,6 +459,7 @@ #define TEXT_TEXT \ ALIGN_FUNCTION(); \ *(.text.hot TEXT_MAIN .text.fixup .text.unlikely) \ + *(.text..refcount) \ *(.ref.text) \ MEM_KEEP(init.text) \ MEM_KEEP(exit.text) \ @@ -687,7 +688,7 @@ #define BUG_TABLE #endif -#ifdef CONFIG_ORC_UNWINDER +#ifdef CONFIG_UNWINDER_ORC #define ORC_UNWIND_TABLE \ . = ALIGN(4); \ .orc_unwind_ip : AT(ADDR(.orc_unwind_ip) - LOAD_OFFSET) { \ @@ -778,6 +779,24 @@ #endif /* + * Memory encryption operates on a page basis. Since we need to clear + * the memory encryption mask for this section, it needs to be aligned + * on a page boundary and be a page-size multiple in length. + * + * Note: We use a separate section so that only this section gets + * decrypted to avoid exposing more than we wish. + */ +#ifdef CONFIG_AMD_MEM_ENCRYPT +#define PERCPU_DECRYPTED_SECTION \ + . = ALIGN(PAGE_SIZE); \ + *(.data..percpu..decrypted) \ + . = ALIGN(PAGE_SIZE); +#else +#define PERCPU_DECRYPTED_SECTION +#endif + + +/* * Default discarded sections. * * Some archs want to discard exit text/data at runtime rather than @@ -815,6 +834,7 @@ . = ALIGN(cacheline); \ *(.data..percpu) \ *(.data..percpu..shared_aligned) \ + PERCPU_DECRYPTED_SECTION \ VMLINUX_SYMBOL(__per_cpu_end) = .; /** diff --git a/include/linux/atomic.h b/include/linux/atomic.h index cd18203d6ff3..8b276fd9a127 100644 --- a/include/linux/atomic.h +++ b/include/linux/atomic.h @@ -654,6 +654,8 @@ static inline int atomic_dec_if_positive(atomic_t *v) } #endif +#define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) + #ifdef CONFIG_GENERIC_ATOMIC64 #include <asm-generic/atomic64.h> #endif @@ -1073,6 +1075,8 @@ static inline long long atomic64_fetch_andnot_release(long long i, atomic64_t *v } #endif +#define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) + #include <asm-generic/atomic-long.h> #endif /* _LINUX_ATOMIC_H */ diff --git a/include/linux/average.h b/include/linux/average.h index 1b6f5560c264..a1a8f09631ce 100644 --- a/include/linux/average.h +++ b/include/linux/average.h @@ -2,6 +2,10 @@ #ifndef _LINUX_AVERAGE_H #define _LINUX_AVERAGE_H +#include <linux/bug.h> +#include <linux/compiler.h> +#include <linux/log2.h> + /* * Exponentially weighted moving average (EWMA) * @@ -49,7 +53,7 @@ static inline void ewma_##name##_add(struct ewma_##name *e, \ unsigned long val) \ { \ - unsigned long internal = ACCESS_ONCE(e->internal); \ + unsigned long internal = READ_ONCE(e->internal); \ unsigned long weight_rcp = ilog2(_weight_rcp); \ unsigned long precision = _precision; \ \ @@ -58,10 +62,10 @@ BUILD_BUG_ON((_precision) > 30); \ BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \ \ - ACCESS_ONCE(e->internal) = internal ? \ + WRITE_ONCE(e->internal, internal ? \ (((internal << weight_rcp) - internal) + \ (val << precision)) >> weight_rcp : \ - (val << precision); \ + (val << precision)); \ } #endif /* _LINUX_AVERAGE_H */ diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index 19748a5b0e77..3489253e38fc 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -22,65 +22,74 @@ * See lib/bitmap.c for more details. */ -/* +/** + * DOC: bitmap overview + * * The available bitmap operations and their rough meaning in the * case that the bitmap is a single unsigned long are thus: * * Note that nbits should be always a compile time evaluable constant. * Otherwise many inlines will generate horrible code. * - * bitmap_zero(dst, nbits) *dst = 0UL - * bitmap_fill(dst, nbits) *dst = ~0UL - * bitmap_copy(dst, src, nbits) *dst = *src - * bitmap_and(dst, src1, src2, nbits) *dst = *src1 & *src2 - * bitmap_or(dst, src1, src2, nbits) *dst = *src1 | *src2 - * bitmap_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2 - * bitmap_andnot(dst, src1, src2, nbits) *dst = *src1 & ~(*src2) - * bitmap_complement(dst, src, nbits) *dst = ~(*src) - * bitmap_equal(src1, src2, nbits) Are *src1 and *src2 equal? - * bitmap_intersects(src1, src2, nbits) Do *src1 and *src2 overlap? - * bitmap_subset(src1, src2, nbits) Is *src1 a subset of *src2? - * bitmap_empty(src, nbits) Are all bits zero in *src? - * bitmap_full(src, nbits) Are all bits set in *src? - * bitmap_weight(src, nbits) Hamming Weight: number set bits - * bitmap_set(dst, pos, nbits) Set specified bit area - * bitmap_clear(dst, pos, nbits) Clear specified bit area - * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area - * bitmap_find_next_zero_area_off(buf, len, pos, n, mask) as above - * bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n - * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n - * bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src) - * bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit) - * bitmap_onto(dst, orig, relmap, nbits) *dst = orig relative to relmap - * bitmap_fold(dst, orig, sz, nbits) dst bits = orig bits mod sz - * bitmap_parse(buf, buflen, dst, nbits) Parse bitmap dst from kernel buf - * bitmap_parse_user(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf - * bitmap_parselist(buf, dst, nbits) Parse bitmap dst from kernel buf - * bitmap_parselist_user(buf, dst, nbits) Parse bitmap dst from user buf - * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region - * bitmap_release_region(bitmap, pos, order) Free specified bit region - * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region - * bitmap_from_u32array(dst, nbits, buf, nwords) *dst = *buf (nwords 32b words) - * bitmap_to_u32array(buf, nwords, src, nbits) *buf = *dst (nwords 32b words) + * :: + * + * bitmap_zero(dst, nbits) *dst = 0UL + * bitmap_fill(dst, nbits) *dst = ~0UL + * bitmap_copy(dst, src, nbits) *dst = *src + * bitmap_and(dst, src1, src2, nbits) *dst = *src1 & *src2 + * bitmap_or(dst, src1, src2, nbits) *dst = *src1 | *src2 + * bitmap_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2 + * bitmap_andnot(dst, src1, src2, nbits) *dst = *src1 & ~(*src2) + * bitmap_complement(dst, src, nbits) *dst = ~(*src) + * bitmap_equal(src1, src2, nbits) Are *src1 and *src2 equal? + * bitmap_intersects(src1, src2, nbits) Do *src1 and *src2 overlap? + * bitmap_subset(src1, src2, nbits) Is *src1 a subset of *src2? + * bitmap_empty(src, nbits) Are all bits zero in *src? + * bitmap_full(src, nbits) Are all bits set in *src? + * bitmap_weight(src, nbits) Hamming Weight: number set bits + * bitmap_set(dst, pos, nbits) Set specified bit area + * bitmap_clear(dst, pos, nbits) Clear specified bit area + * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area + * bitmap_find_next_zero_area_off(buf, len, pos, n, mask) as above + * bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n + * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n + * bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src) + * bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit) + * bitmap_onto(dst, orig, relmap, nbits) *dst = orig relative to relmap + * bitmap_fold(dst, orig, sz, nbits) dst bits = orig bits mod sz + * bitmap_parse(buf, buflen, dst, nbits) Parse bitmap dst from kernel buf + * bitmap_parse_user(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf + * bitmap_parselist(buf, dst, nbits) Parse bitmap dst from kernel buf + * bitmap_parselist_user(buf, dst, nbits) Parse bitmap dst from user buf + * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region + * bitmap_release_region(bitmap, pos, order) Free specified bit region + * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region + * bitmap_from_u32array(dst, nbits, buf, nwords) *dst = *buf (nwords 32b words) + * bitmap_to_u32array(buf, nwords, src, nbits) *buf = *dst (nwords 32b words) + * */ -/* - * Also the following operations in asm/bitops.h apply to bitmaps. +/** + * DOC: bitmap bitops + * + * Also the following operations in asm/bitops.h apply to bitmaps.:: + * + * set_bit(bit, addr) *addr |= bit + * clear_bit(bit, addr) *addr &= ~bit + * change_bit(bit, addr) *addr ^= bit + * test_bit(bit, addr) Is bit set in *addr? + * test_and_set_bit(bit, addr) Set bit and return old value + * test_and_clear_bit(bit, addr) Clear bit and return old value + * test_and_change_bit(bit, addr) Change bit and return old value + * find_first_zero_bit(addr, nbits) Position first zero bit in *addr + * find_first_bit(addr, nbits) Position first set bit in *addr + * find_next_zero_bit(addr, nbits, bit) Position next zero bit in *addr >= bit + * find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit * - * set_bit(bit, addr) *addr |= bit - * clear_bit(bit, addr) *addr &= ~bit - * change_bit(bit, addr) *addr ^= bit - * test_bit(bit, addr) Is bit set in *addr? - * test_and_set_bit(bit, addr) Set bit and return old value - * test_and_clear_bit(bit, addr) Clear bit and return old value - * test_and_change_bit(bit, addr) Change bit and return old value - * find_first_zero_bit(addr, nbits) Position first zero bit in *addr - * find_first_bit(addr, nbits) Position first set bit in *addr - * find_next_zero_bit(addr, nbits, bit) Position next zero bit in *addr >= bit - * find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit */ -/* +/** + * DOC: declare bitmap * The DECLARE_BITMAP(name,bits) macro, in linux/types.h, can be used * to declare an array named 'name' of just enough unsigned longs to * contain all bit positions from 0 to 'bits' - 1. @@ -361,8 +370,9 @@ static inline int bitmap_parse(const char *buf, unsigned int buflen, return __bitmap_parse(buf, buflen, 0, maskp, nmaskbits); } -/* +/** * BITMAP_FROM_U64() - Represent u64 value in the format suitable for bitmap. + * @n: u64 value * * Linux bitmaps are internally arrays of unsigned longs, i.e. 32-bit * integers in 32-bit environment, and 64-bit integers in 64-bit one. @@ -393,14 +403,14 @@ static inline int bitmap_parse(const char *buf, unsigned int buflen, ((unsigned long) ((u64)(n) >> 32)) #endif -/* +/** * bitmap_from_u64 - Check and swap words within u64. * @mask: source bitmap * @dst: destination bitmap * - * In 32-bit Big Endian kernel, when using (u32 *)(&val)[*] + * In 32-bit Big Endian kernel, when using ``(u32 *)(&val)[*]`` * to read u64 mask, we will get the wrong word. - * That is "(u32 *)(&val)[0]" gets the upper 32 bits, + * That is ``(u32 *)(&val)[0]`` gets the upper 32 bits, * but we expect the lower 32-bits of u64. */ static inline void bitmap_from_u64(unsigned long *dst, u64 mask) diff --git a/include/linux/bitops.h b/include/linux/bitops.h index d03c5dd6185d..c537ac7435ad 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -237,7 +237,7 @@ static inline unsigned long __ffs64(u64 word) typeof(*ptr) old, new; \ \ do { \ - old = ACCESS_ONCE(*ptr); \ + old = READ_ONCE(*ptr); \ new = (old & ~mask) | bits; \ } while (cmpxchg(ptr, old, new) != old); \ \ @@ -252,7 +252,7 @@ static inline unsigned long __ffs64(u64 word) typeof(*ptr) old, new; \ \ do { \ - old = ACCESS_ONCE(*ptr); \ + old = READ_ONCE(*ptr); \ new = old & ~clear; \ } while (!(old & test) && \ cmpxchg(ptr, old, new) != old); \ diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index 54dfef70a072..a06583e41f80 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __LINUX_COMPILER_H +#ifndef __LINUX_COMPILER_TYPES_H #error "Please don't include <linux/compiler-clang.h> directly, include <linux/compiler.h> instead." #endif diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index bb78e5bdff26..2272ded07496 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __LINUX_COMPILER_H +#ifndef __LINUX_COMPILER_TYPES_H #error "Please don't include <linux/compiler-gcc.h> directly, include <linux/compiler.h> instead." #endif diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h index 523d1b74550f..bfa08160db3a 100644 --- a/include/linux/compiler-intel.h +++ b/include/linux/compiler-intel.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __LINUX_COMPILER_H +#ifndef __LINUX_COMPILER_TYPES_H #error "Please don't include <linux/compiler-intel.h> directly, include <linux/compiler.h> instead." #endif diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 202710420d6d..3672353a0acd 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -2,111 +2,12 @@ #ifndef __LINUX_COMPILER_H #define __LINUX_COMPILER_H -#ifndef __ASSEMBLY__ +#include <linux/compiler_types.h> -#ifdef __CHECKER__ -# define __user __attribute__((noderef, address_space(1))) -# define __kernel __attribute__((address_space(0))) -# define __safe __attribute__((safe)) -# define __force __attribute__((force)) -# define __nocast __attribute__((nocast)) -# define __iomem __attribute__((noderef, address_space(2))) -# define __must_hold(x) __attribute__((context(x,1,1))) -# define __acquires(x) __attribute__((context(x,0,1))) -# define __releases(x) __attribute__((context(x,1,0))) -# define __acquire(x) __context__(x,1) -# define __release(x) __context__(x,-1) -# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) -# define __percpu __attribute__((noderef, address_space(3))) -# define __rcu __attribute__((noderef, address_space(4))) -# define __private __attribute__((noderef)) -extern void __chk_user_ptr(const volatile void __user *); -extern void __chk_io_ptr(const volatile void __iomem *); -# define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member)) -#else /* __CHECKER__ */ -# ifdef STRUCTLEAK_PLUGIN -# define __user __attribute__((user)) -# else -# define __user -# endif -# define __kernel -# define __safe -# define __force -# define __nocast -# define __iomem -# define __chk_user_ptr(x) (void)0 -# define __chk_io_ptr(x) (void)0 -# define __builtin_warning(x, y...) (1) -# define __must_hold(x) -# define __acquires(x) -# define __releases(x) -# define __acquire(x) (void)0 -# define __release(x) (void)0 -# define __cond_lock(x,c) (c) -# define __percpu -# define __rcu -# define __private -# define ACCESS_PRIVATE(p, member) ((p)->member) -#endif /* __CHECKER__ */ - -/* Indirect macros required for expanded argument pasting, eg. __LINE__. */ -#define ___PASTE(a,b) a##b -#define __PASTE(a,b) ___PASTE(a,b) +#ifndef __ASSEMBLY__ #ifdef __KERNEL__ -#ifdef __GNUC__ -#include <linux/compiler-gcc.h> -#endif - -#if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__) -#define notrace __attribute__((hotpatch(0,0))) -#else -#define notrace __attribute__((no_instrument_function)) -#endif - -/* Intel compiler defines __GNUC__. So we will overwrite implementations - * coming from above header files here - */ -#ifdef __INTEL_COMPILER -# include <linux/compiler-intel.h> -#endif - -/* Clang compiler defines __GNUC__. So we will overwrite implementations - * coming from above header files here - */ -#ifdef __clang__ -#include <linux/compiler-clang.h> -#endif - -/* - * Generic compiler-dependent macros required for kernel - * build go below this comment. Actual compiler/compiler version - * specific implementations come from the above header files - */ - -struct ftrace_branch_data { - const char *func; - const char *file; - unsigned line; - union { - struct { - unsigned long correct; - unsigned long incorrect; - }; - struct { - unsigned long miss; - unsigned long hit; - }; - unsigned long miss_hit[2]; - }; -}; - -struct ftrace_likely_data { - struct ftrace_branch_data data; - unsigned long constant; -}; - /* * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code * to disable branch tracing on a per file basis. @@ -333,6 +234,7 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s * with an explicit memory barrier or atomic instruction that provides the * required ordering. */ +#include <asm/barrier.h> #define __READ_ONCE(x, check) \ ({ \ @@ -341,6 +243,7 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s __read_once_size(&(x), __u.__c, sizeof(x)); \ else \ __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \ + smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \ __u.__val; \ }) #define READ_ONCE(x) __READ_ONCE(x, 1) @@ -363,167 +266,6 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s #endif /* __ASSEMBLY__ */ -#ifdef __KERNEL__ -/* - * Allow us to mark functions as 'deprecated' and have gcc emit a nice - * warning for each use, in hopes of speeding the functions removal. - * Usage is: - * int __deprecated foo(void) - */ -#ifndef __deprecated -# define __deprecated /* unimplemented */ -#endif - -#ifdef MODULE -#define __deprecated_for_modules __deprecated -#else -#define __deprecated_for_modules -#endif - -#ifndef __must_check -#define __must_check -#endif - -#ifndef CONFIG_ENABLE_MUST_CHECK -#undef __must_check -#define __must_check -#endif -#ifndef CONFIG_ENABLE_WARN_DEPRECATED -#undef __deprecated -#undef __deprecated_for_modules -#define __deprecated -#define __deprecated_for_modules -#endif - -#ifndef __malloc -#define __malloc -#endif - -/* - * Allow us to avoid 'defined but not used' warnings on functions and data, - * as well as force them to be emitted to the assembly file. - * - * As of gcc 3.4, static functions that are not marked with attribute((used)) - * may be elided from the assembly file. As of gcc 3.4, static data not so - * marked will not be elided, but this may change in a future gcc version. - * - * NOTE: Because distributions shipped with a backported unit-at-a-time - * compiler in gcc 3.3, we must define __used to be __attribute__((used)) - * for gcc >=3.3 instead of 3.4. - * - * In prior versions of gcc, such functions and data would be emitted, but - * would be warned about except with attribute((unused)). - * - * Mark functions that are referenced only in inline assembly as __used so - * the code is emitted even though it appears to be unreferenced. - */ -#ifndef __used -# define __used /* unimplemented */ -#endif - -#ifndef __maybe_unused -# define __maybe_unused /* unimplemented */ -#endif - -#ifndef __always_unused -# define __always_unused /* unimplemented */ -#endif - -#ifndef noinline -#define noinline -#endif - -/* - * Rather then using noinline to prevent stack consumption, use - * noinline_for_stack instead. For documentation reasons. - */ -#define noinline_for_stack noinline - -#ifndef __always_inline -#define __always_inline inline -#endif - -#endif /* __KERNEL__ */ - -/* - * From the GCC manual: - * - * Many functions do not examine any values except their arguments, - * and have no effects except the return value. Basically this is - * just slightly more strict class than the `pure' attribute above, - * since function is not allowed to read global memory. - * - * Note that a function that has pointer arguments and examines the - * data pointed to must _not_ be declared `const'. Likewise, a - * function that calls a non-`const' function usually must not be - * `const'. It does not make sense for a `const' function to return - * `void'. - */ -#ifndef __attribute_const__ -# define __attribute_const__ /* unimplemented */ -#endif - -#ifndef __designated_init -# define __designated_init -#endif - -#ifndef __latent_entropy -# define __latent_entropy -#endif - -#ifndef __randomize_layout -# define __randomize_layout __designated_init -#endif - -#ifndef __no_randomize_layout -# define __no_randomize_layout -#endif - -#ifndef randomized_struct_fields_start -# define randomized_struct_fields_start -# define randomized_struct_fields_end -#endif - -/* - * Tell gcc if a function is cold. The compiler will assume any path - * directly leading to the call is unlikely. - */ - -#ifndef __cold -#define __cold -#endif - -/* Simple shorthand for a section definition */ -#ifndef __section -# define __section(S) __attribute__ ((__section__(#S))) -#endif - -#ifndef __visible -#define __visible -#endif - -#ifndef __nostackprotector -# define __nostackprotector -#endif - -/* - * Assume alignment of return value. - */ -#ifndef __assume_aligned -#define __assume_aligned(a, ...) -#endif - - -/* Are two types/vars the same type (ignoring qualifiers)? */ -#ifndef __same_type -# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) -#endif - -/* Is this type a native word size -- useful for atomic operations */ -#ifndef __native_word -# define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long)) -#endif - /* Compile time object size, -1 for unknown */ #ifndef __compiletime_object_size # define __compiletime_object_size(obj) -1 @@ -605,24 +347,4 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s (volatile typeof(x) *)&(x); }) #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x)) -/** - * lockless_dereference() - safely load a pointer for later dereference - * @p: The pointer to load - * - * Similar to rcu_dereference(), but for situations where the pointed-to - * object's lifetime is managed by something other than RCU. That - * "something other" might be reference counting or simple immortality. - * - * The seemingly unused variable ___typecheck_p validates that @p is - * indeed a pointer type by using a pointer to typeof(*p) as the type. - * Taking a pointer to typeof(*p) again is needed in case p is void *. - */ -#define lockless_dereference(p) \ -({ \ - typeof(p) _________p1 = READ_ONCE(p); \ - typeof(*(p)) *___typecheck_p __maybe_unused; \ - smp_read_barrier_depends(); /* Dependency order vs. p above. */ \ - (_________p1); \ -}) - #endif /* __LINUX_COMPILER_H */ diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h new file mode 100644 index 000000000000..6b79a9bba9a7 --- /dev/null +++ b/include/linux/compiler_types.h @@ -0,0 +1,274 @@ +#ifndef __LINUX_COMPILER_TYPES_H +#define __LINUX_COMPILER_TYPES_H + +#ifndef __ASSEMBLY__ + +#ifdef __CHECKER__ +# define __user __attribute__((noderef, address_space(1))) +# define __kernel __attribute__((address_space(0))) +# define __safe __attribute__((safe)) +# define __force __attribute__((force)) +# define __nocast __attribute__((nocast)) +# define __iomem __attribute__((noderef, address_space(2))) +# define __must_hold(x) __attribute__((context(x,1,1))) +# define __acquires(x) __attribute__((context(x,0,1))) +# define __releases(x) __attribute__((context(x,1,0))) +# define __acquire(x) __context__(x,1) +# define __release(x) __context__(x,-1) +# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) +# define __percpu __attribute__((noderef, address_space(3))) +# define __rcu __attribute__((noderef, address_space(4))) +# define __private __attribute__((noderef)) +extern void __chk_user_ptr(const volatile void __user *); +extern void __chk_io_ptr(const volatile void __iomem *); +# define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member)) +#else /* __CHECKER__ */ +# ifdef STRUCTLEAK_PLUGIN +# define __user __attribute__((user)) +# else +# define __user +# endif +# define __kernel +# define __safe +# define __force +# define __nocast +# define __iomem +# define __chk_user_ptr(x) (void)0 +# define __chk_io_ptr(x) (void)0 +# define __builtin_warning(x, y...) (1) +# define __must_hold(x) +# define __acquires(x) +# define __releases(x) +# define __acquire(x) (void)0 +# define __release(x) (void)0 +# define __cond_lock(x,c) (c) +# define __percpu +# define __rcu +# define __private +# define ACCESS_PRIVATE(p, member) ((p)->member) +#endif /* __CHECKER__ */ + +/* Indirect macros required for expanded argument pasting, eg. __LINE__. */ +#define ___PASTE(a,b) a##b +#define __PASTE(a,b) ___PASTE(a,b) + +#ifdef __KERNEL__ + +#ifdef __GNUC__ +#include <linux/compiler-gcc.h> +#endif + +#if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__) +#define notrace __attribute__((hotpatch(0,0))) +#else +#define notrace __attribute__((no_instrument_function)) +#endif + +/* Intel compiler defines __GNUC__. So we will overwrite implementations + * coming from above header files here + */ +#ifdef __INTEL_COMPILER +# include <linux/compiler-intel.h> +#endif + +/* Clang compiler defines __GNUC__. So we will overwrite implementations + * coming from above header files here + */ +#ifdef __clang__ +#include <linux/compiler-clang.h> +#endif + +/* + * Generic compiler-dependent macros required for kernel + * build go below this comment. Actual compiler/compiler version + * specific implementations come from the above header files + */ + +struct ftrace_branch_data { + const char *func; + const char *file; + unsigned line; + union { + struct { + unsigned long correct; + unsigned long incorrect; + }; + struct { + unsigned long miss; + unsigned long hit; + }; + unsigned long miss_hit[2]; + }; +}; + +struct ftrace_likely_data { + struct ftrace_branch_data data; + unsigned long constant; +}; + +#endif /* __KERNEL__ */ + +#endif /* __ASSEMBLY__ */ + +#ifdef __KERNEL__ +/* + * Allow us to mark functions as 'deprecated' and have gcc emit a nice + * warning for each use, in hopes of speeding the functions removal. + * Usage is: + * int __deprecated foo(void) + */ +#ifndef __deprecated +# define __deprecated /* unimplemented */ +#endif + +#ifdef MODULE +#define __deprecated_for_modules __deprecated +#else +#define __deprecated_for_modules +#endif + +#ifndef __must_check +#define __must_check +#endif + +#ifndef CONFIG_ENABLE_MUST_CHECK +#undef __must_check +#define __must_check +#endif +#ifndef CONFIG_ENABLE_WARN_DEPRECATED +#undef __deprecated +#undef __deprecated_for_modules +#define __deprecated +#define __deprecated_for_modules +#endif + +#ifndef __malloc +#define __malloc +#endif + +/* + * Allow us to avoid 'defined but not used' warnings on functions and data, + * as well as force them to be emitted to the assembly file. + * + * As of gcc 3.4, static functions that are not marked with attribute((used)) + * may be elided from the assembly file. As of gcc 3.4, static data not so + * marked will not be elided, but this may change in a future gcc version. + * + * NOTE: Because distributions shipped with a backported unit-at-a-time + * compiler in gcc 3.3, we must define __used to be __attribute__((used)) + * for gcc >=3.3 instead of 3.4. + * + * In prior versions of gcc, such functions and data would be emitted, but + * would be warned about except with attribute((unused)). + * + * Mark functions that are referenced only in inline assembly as __used so + * the code is emitted even though it appears to be unreferenced. + */ +#ifndef __used +# define __used /* unimplemented */ +#endif + +#ifndef __maybe_unused +# define __maybe_unused /* unimplemented */ +#endif + +#ifndef __always_unused +# define __always_unused /* unimplemented */ +#endif + +#ifndef noinline +#define noinline +#endif + +/* + * Rather then using noinline to prevent stack consumption, use + * noinline_for_stack instead. For documentation reasons. + */ +#define noinline_for_stack noinline + +#ifndef __always_inline +#define __always_inline inline +#endif + +#endif /* __KERNEL__ */ + +/* + * From the GCC manual: + * + * Many functions do not examine any values except their arguments, + * and have no effects except the return value. Basically this is + * just slightly more strict class than the `pure' attribute above, + * since function is not allowed to read global memory. + * + * Note that a function that has pointer arguments and examines the + * data pointed to must _not_ be declared `const'. Likewise, a + * function that calls a non-`const' function usually must not be + * `const'. It does not make sense for a `const' function to return + * `void'. + */ +#ifndef __attribute_const__ +# define __attribute_const__ /* unimplemented */ +#endif + +#ifndef __designated_init +# define __designated_init +#endif + +#ifndef __latent_entropy +# define __latent_entropy +#endif + +#ifndef __randomize_layout +# define __randomize_layout __designated_init +#endif + +#ifndef __no_randomize_layout +# define __no_randomize_layout +#endif + +#ifndef randomized_struct_fields_start +# define randomized_struct_fields_start +# define randomized_struct_fields_end +#endif + +/* + * Tell gcc if a function is cold. The compiler will assume any path + * directly leading to the call is unlikely. + */ + +#ifndef __cold +#define __cold +#endif + +/* Simple shorthand for a section definition */ +#ifndef __section +# define __section(S) __attribute__ ((__section__(#S))) +#endif + +#ifndef __visible +#define __visible +#endif + +#ifndef __nostackprotector +# define __nostackprotector +#endif + +/* + * Assume alignment of return value. + */ +#ifndef __assume_aligned +#define __assume_aligned(a, ...) +#endif + + +/* Are two types/vars the same type (ignoring qualifiers)? */ +#ifndef __same_type +# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) +#endif + +/* Is this type a native word size -- useful for atomic operations */ +#ifndef __native_word +# define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long)) +#endif + +#endif /* __LINUX_COMPILER_TYPES_H */ diff --git a/include/linux/completion.h b/include/linux/completion.h index 7828451e161a..0662a417febe 100644 --- a/include/linux/completion.h +++ b/include/linux/completion.h @@ -50,15 +50,23 @@ static inline void complete_release_commit(struct completion *x) lock_commit_crosslock((struct lockdep_map *)&x->map); } +#define init_completion_map(x, m) \ +do { \ + lockdep_init_map_crosslock((struct lockdep_map *)&(x)->map, \ + (m)->name, (m)->key, 0); \ + __init_completion(x); \ +} while (0) + #define init_completion(x) \ do { \ static struct lock_class_key __key; \ lockdep_init_map_crosslock((struct lockdep_map *)&(x)->map, \ - "(complete)" #x, \ + "(completion)" #x, \ &__key, 0); \ __init_completion(x); \ } while (0) #else +#define init_completion_map(x, m) __init_completion(x) #define init_completion(x) __init_completion(x) static inline void complete_acquire(struct completion *x) {} static inline void complete_release(struct completion *x) {} @@ -68,12 +76,15 @@ static inline void complete_release_commit(struct completion *x) {} #ifdef CONFIG_LOCKDEP_COMPLETIONS #define COMPLETION_INITIALIZER(work) \ { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait), \ - STATIC_CROSS_LOCKDEP_MAP_INIT("(complete)" #work, &(work)) } + STATIC_CROSS_LOCKDEP_MAP_INIT("(completion)" #work, &(work)) } #else #define COMPLETION_INITIALIZER(work) \ { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) } #endif +#define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \ + (*({ init_completion_map(&(work), &(map)); &(work); })) + #define COMPLETION_INITIALIZER_ONSTACK(work) \ (*({ init_completion(&work); &work; })) @@ -103,8 +114,11 @@ static inline void complete_release_commit(struct completion *x) {} #ifdef CONFIG_LOCKDEP # define DECLARE_COMPLETION_ONSTACK(work) \ struct completion work = COMPLETION_INITIALIZER_ONSTACK(work) +# define DECLARE_COMPLETION_ONSTACK_MAP(work, map) \ + struct completion work = COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) #else # define DECLARE_COMPLETION_ONSTACK(work) DECLARE_COMPLETION(work) +# define DECLARE_COMPLETION_ONSTACK_MAP(work, map) DECLARE_COMPLETION(work) #endif /** diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 938ea8ae0ba4..a04ef7c15c6a 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -56,27 +56,17 @@ extern void unregister_cpu(struct cpu *cpu); extern ssize_t arch_cpu_probe(const char *, size_t); extern ssize_t arch_cpu_release(const char *, size_t); #endif -struct notifier_block; - -#define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */ -#define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */ -#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */ -#define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug - * lock is dropped */ -#define CPU_BROKEN 0x000B /* CPU (unsigned)v did not die properly, - * perhaps due to preemption. */ - -/* Used for CPU hotplug events occurring while tasks are frozen due to a suspend - * operation in progress + +/* + * These states are not related to the core CPU hotplug mechanism. They are + * used by various (sub)architectures to track internal state */ -#define CPU_TASKS_FROZEN 0x0010 - -#define CPU_ONLINE_FROZEN (CPU_ONLINE | CPU_TASKS_FROZEN) -#define CPU_UP_PREPARE_FROZEN (CPU_UP_PREPARE | CPU_TASKS_FROZEN) -#define CPU_UP_CANCELED_FROZEN (CPU_UP_CANCELED | CPU_TASKS_FROZEN) -#define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN) -#define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN) -#define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN) +#define CPU_ONLINE 0x0002 /* CPU is up */ +#define CPU_UP_PREPARE 0x0003 /* CPU coming up */ +#define CPU_DEAD 0x0007 /* CPU dead */ +#define CPU_DEAD_FROZEN 0x0008 /* CPU timed out on unplug */ +#define CPU_POST_DEAD 0x0009 /* CPU successfully unplugged */ +#define CPU_BROKEN 0x000B /* CPU did not die properly */ #ifdef CONFIG_SMP extern bool cpuhp_tasks_frozen; diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 2477a5cb5bd5..ec32c4c5eb30 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -99,6 +99,7 @@ enum cpuhp_state { CPUHP_AP_IRQ_HIP04_STARTING, CPUHP_AP_IRQ_ARMADA_XP_STARTING, CPUHP_AP_IRQ_BCM2836_STARTING, + CPUHP_AP_IRQ_MIPS_GIC_STARTING, CPUHP_AP_ARM_MVEBU_COHERENCY, CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING, CPUHP_AP_PERF_X86_STARTING, diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 8d3125c493b2..75b565194437 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -131,6 +131,11 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp) return 0; } +static inline unsigned int cpumask_last(const struct cpumask *srcp) +{ + return 0; +} + /* Valid inputs for n are -1 and 0. */ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp) { @@ -179,6 +184,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp) return find_first_bit(cpumask_bits(srcp), nr_cpumask_bits); } +/** + * cpumask_last - get the last CPU in a cpumask + * @srcp: - the cpumask pointer + * + * Returns >= nr_cpumask_bits if no CPUs set. + */ +static inline unsigned int cpumask_last(const struct cpumask *srcp) +{ + return find_last_bit(cpumask_bits(srcp), nr_cpumask_bits); +} + unsigned int cpumask_next(int n, const struct cpumask *srcp); /** diff --git a/include/linux/dcache.h b/include/linux/dcache.h index f05a659cdf34..65cd8ab60b7a 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -520,7 +520,7 @@ static inline struct inode *d_inode(const struct dentry *dentry) } /** - * d_inode_rcu - Get the actual inode of this dentry with ACCESS_ONCE() + * d_inode_rcu - Get the actual inode of this dentry with READ_ONCE() * @dentry: The dentry to query * * This is the helper normal filesystems should use to get at their own inodes @@ -528,7 +528,7 @@ static inline struct inode *d_inode(const struct dentry *dentry) */ static inline struct inode *d_inode_rcu(const struct dentry *dentry) { - return ACCESS_ONCE(dentry->d_inode); + return READ_ONCE(dentry->d_inode); } /** diff --git a/include/linux/dynamic_queue_limits.h b/include/linux/dynamic_queue_limits.h index 34c0a5464c74..023eae69398c 100644 --- a/include/linux/dynamic_queue_limits.h +++ b/include/linux/dynamic_queue_limits.h @@ -89,7 +89,7 @@ static inline void dql_queued(struct dql *dql, unsigned int count) /* Returns how many objects can be queued, < 0 indicates over limit. */ static inline int dql_avail(const struct dql *dql) { - return ACCESS_ONCE(dql->adj_limit) - ACCESS_ONCE(dql->num_queued); + return READ_ONCE(dql->adj_limit) - READ_ONCE(dql->num_queued); } /* Record number of completed objects and recalculate the limit. */ diff --git a/include/linux/fs.h b/include/linux/fs.h index 885266aae2d7..e1f75a3b4af5 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2793,6 +2793,7 @@ extern int do_pipe_flags(int *, int); id(KEXEC_IMAGE, kexec-image) \ id(KEXEC_INITRAMFS, kexec-initramfs) \ id(POLICY, security-policy) \ + id(X509_CERTIFICATE, x509-certificate) \ id(MAX_ID, ) #define __fid_enumify(ENUM, dummy) READING_ ## ENUM, diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h index b96dd4e1e663..ecc2928e8046 100644 --- a/include/linux/genetlink.h +++ b/include/linux/genetlink.h @@ -31,7 +31,7 @@ extern wait_queue_head_t genl_sk_destructing_waitq; * @p: The pointer to read, prior to dereferencing * * Return the value of the specified RCU-protected pointer, but omit - * both the smp_read_barrier_depends() and the ACCESS_ONCE(), because + * both the smp_read_barrier_depends() and the READ_ONCE(), because * caller holds genl mutex. */ #define genl_dereference(p) \ diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 44790523057f..eaefb7a62f83 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -207,6 +207,7 @@ struct gendisk { #endif /* CONFIG_BLK_DEV_INTEGRITY */ int node_id; struct badblocks *bb; + struct lockdep_map lockdep_map; }; static inline struct gendisk *part_to_disk(struct hd_struct *part) @@ -591,8 +592,7 @@ extern void __delete_partition(struct percpu_ref *); extern void delete_partition(struct gendisk *, int); extern void printk_all_partitions(void); -extern struct gendisk *alloc_disk_node(int minors, int node_id); -extern struct gendisk *alloc_disk(int minors); +extern struct gendisk *__alloc_disk_node(int minors, int node_id); extern struct kobject *get_disk(struct gendisk *disk); extern void put_disk(struct gendisk *disk); extern void blk_register_region(dev_t devt, unsigned long range, @@ -616,6 +616,24 @@ extern ssize_t part_fail_store(struct device *dev, const char *buf, size_t count); #endif /* CONFIG_FAIL_MAKE_REQUEST */ +#define alloc_disk_node(minors, node_id) \ +({ \ + static struct lock_class_key __key; \ + const char *__name; \ + struct gendisk *__disk; \ + \ + __name = "(gendisk_completion)"#minors"("#node_id")"; \ + \ + __disk = __alloc_disk_node(minors, node_id); \ + \ + if (__disk) \ + lockdep_init_map(&__disk->lockdep_map, __name, &__key, 0); \ + \ + __disk; \ +}) + +#define alloc_disk(minors) alloc_disk_node(minors, NUMA_NO_NODE) + static inline int hd_ref_init(struct hd_struct *part) { if (percpu_ref_init(&part->ref, __delete_partition, 0, diff --git a/include/linux/gpio-fan.h b/include/linux/gpio-fan.h deleted file mode 100644 index 096659169215..000000000000 --- a/include/linux/gpio-fan.h +++ /dev/null @@ -1,36 +0,0 @@ -/* - * include/linux/gpio-fan.h - * - * Platform data structure for GPIO fan driver - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. - */ - -#ifndef __LINUX_GPIO_FAN_H -#define __LINUX_GPIO_FAN_H - -struct gpio_fan_alarm { - unsigned gpio; - unsigned active_low; -}; - -struct gpio_fan_speed { - int rpm; - int ctrl_val; -}; - -struct gpio_fan_platform_data { - int num_ctrl; - unsigned *ctrl; /* fan control GPIOs. */ - struct gpio_fan_alarm *alarm; /* fan alarm GPIO. */ - /* - * Speed conversion array: rpm from/to GPIO bit field. - * This array _must_ be sorted in ascending rpm order. - */ - int num_speed; - struct gpio_fan_speed *speed; -}; - -#endif /* __LINUX_GPIO_FAN_H */ diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 87067d23a48b..a8a126259bc4 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -222,7 +222,7 @@ extern struct page *huge_zero_page; static inline bool is_huge_zero_page(struct page *page) { - return ACCESS_ONCE(huge_zero_page) == page; + return READ_ONCE(huge_zero_page) == page; } static inline bool is_huge_zero_pmd(pmd_t pmd) diff --git a/include/linux/hypervisor.h b/include/linux/hypervisor.h index b4054fd5b6f6..b19563f9a8eb 100644 --- a/include/linux/hypervisor.h +++ b/include/linux/hypervisor.h @@ -7,8 +7,12 @@ * Juergen Gross <jgross@suse.com> */ -#ifdef CONFIG_HYPERVISOR_GUEST -#include <asm/hypervisor.h> +#ifdef CONFIG_X86 +#include <asm/x86_init.h> +static inline void hypervisor_pin_vcpu(int cpu) +{ + x86_platform.hyper.pin_vcpu(cpu); +} #else static inline void hypervisor_pin_vcpu(int cpu) { diff --git a/include/linux/ide.h b/include/linux/ide.h index 70db3af04541..771989d25ef8 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -1212,7 +1212,7 @@ extern int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout); extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout); -extern void ide_timer_expiry(unsigned long); +extern void ide_timer_expiry(struct timer_list *t); extern irqreturn_t ide_intr(int irq, void *dev_id); extern void do_ide_request(struct request_queue *); extern void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq); diff --git a/include/linux/if_team.h b/include/linux/if_team.h index 30294603526f..d95cae09dea0 100644 --- a/include/linux/if_team.h +++ b/include/linux/if_team.h @@ -247,7 +247,7 @@ static inline struct team_port *team_get_port_by_index(struct team *team, static inline int team_num_to_port_index(struct team *team, unsigned int num) { - int en_port_count = ACCESS_ONCE(team->en_port_count); + int en_port_count = READ_ONCE(team->en_port_count); if (unlikely(!en_port_count)) return 0; diff --git a/include/linux/ioport.h b/include/linux/ioport.h index 83c8d6530f0f..93b4183cf53d 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -271,11 +271,14 @@ extern int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, void *arg, int (*func)(unsigned long, unsigned long, void *)); extern int +walk_mem_res(u64 start, u64 end, void *arg, + int (*func)(struct resource *, void *)); +extern int walk_system_ram_res(u64 start, u64 end, void *arg, - int (*func)(u64, u64, void *)); + int (*func)(struct resource *, void *)); extern int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start, u64 end, - void *arg, int (*func)(u64, u64, void *)); + void *arg, int (*func)(struct resource *, void *)); /* True if any part of r1 overlaps r2 */ static inline bool resource_overlaps(struct resource *r1, struct resource *r2) diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h index 2cdd74809899..627efac73e6d 100644 --- a/include/linux/ioprio.h +++ b/include/linux/ioprio.h @@ -3,6 +3,7 @@ #define IOPRIO_H #include <linux/sched.h> +#include <linux/sched/rt.h> #include <linux/iocontext.h> /* @@ -63,7 +64,7 @@ static inline int task_nice_ioclass(struct task_struct *task) { if (task->policy == SCHED_IDLE) return IOPRIO_CLASS_IDLE; - else if (task->policy == SCHED_FIFO || task->policy == SCHED_RR) + else if (task_is_realtime(task)) return IOPRIO_CLASS_RT; else return IOPRIO_CLASS_BE; diff --git a/include/linux/irq.h b/include/linux/irq.h index 4536286cc4d2..b01d06db9101 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -1114,6 +1114,28 @@ static inline u32 irq_reg_readl(struct irq_chip_generic *gc, return readl(gc->reg_base + reg_offset); } +struct irq_matrix; +struct irq_matrix *irq_alloc_matrix(unsigned int matrix_bits, + unsigned int alloc_start, + unsigned int alloc_end); +void irq_matrix_online(struct irq_matrix *m); +void irq_matrix_offline(struct irq_matrix *m); +void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit, bool replace); +int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk); +void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk); +int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu); +void irq_matrix_reserve(struct irq_matrix *m); +void irq_matrix_remove_reserved(struct irq_matrix *m); +int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk, + bool reserved, unsigned int *mapped_cpu); +void irq_matrix_free(struct irq_matrix *m, unsigned int cpu, + unsigned int bit, bool managed); +void irq_matrix_assign(struct irq_matrix *m, unsigned int bit); +unsigned int irq_matrix_available(struct irq_matrix *m, bool cpudown); +unsigned int irq_matrix_allocated(struct irq_matrix *m); +unsigned int irq_matrix_reserved(struct irq_matrix *m); +void irq_matrix_debug_show(struct seq_file *sf, struct irq_matrix *m, int ind); + /* Contrary to Linux irqs, for hardware irqs the irq number 0 is valid */ #define INVALID_HWIRQ (~0UL) irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu); diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h index 9270d73ea682..0e81035b678f 100644 --- a/include/linux/irq_work.h +++ b/include/linux/irq_work.h @@ -34,10 +34,7 @@ void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *)) #define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { .func = (_f), } bool irq_work_queue(struct irq_work *work); - -#ifdef CONFIG_SMP bool irq_work_queue_on(struct irq_work *work, int cpu); -#endif void irq_work_tick(void); void irq_work_sync(struct irq_work *work); diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 14b74f22d43c..c00c4c33e432 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -68,6 +68,7 @@ #define GICD_CTLR_ENABLE_SS_G1 (1U << 1) #define GICD_CTLR_ENABLE_SS_G0 (1U << 0) +#define GICD_TYPER_RSS (1U << 26) #define GICD_TYPER_LPIS (1U << 17) #define GICD_TYPER_MBIS (1U << 16) @@ -461,6 +462,7 @@ #define ICC_CTLR_EL1_SEIS_MASK (0x1 << ICC_CTLR_EL1_SEIS_SHIFT) #define ICC_CTLR_EL1_A3V_SHIFT 15 #define ICC_CTLR_EL1_A3V_MASK (0x1 << ICC_CTLR_EL1_A3V_SHIFT) +#define ICC_CTLR_EL1_RSS (0x1 << 18) #define ICC_PMR_EL1_SHIFT 0 #define ICC_PMR_EL1_MASK (0xff << ICC_PMR_EL1_SHIFT) #define ICC_BPR0_EL1_SHIFT 0 @@ -549,6 +551,8 @@ #define ICC_SGI1R_AFFINITY_2_SHIFT 32 #define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_2_SHIFT) #define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40 +#define ICC_SGI1R_RS_SHIFT 44 +#define ICC_SGI1R_RS_MASK (0xfULL << ICC_SGI1R_RS_SHIFT) #define ICC_SGI1R_AFFINITY_3_SHIFT 48 #define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_3_SHIFT) diff --git a/include/linux/irqchip/arm-gic-v4.h b/include/linux/irqchip/arm-gic-v4.h index 58a4d89aa82c..447da8ca2156 100644 --- a/include/linux/irqchip/arm-gic-v4.h +++ b/include/linux/irqchip/arm-gic-v4.h @@ -20,6 +20,12 @@ struct its_vpe; +/* + * Maximum number of ITTs when GITS_TYPER.VMOVP == 0, using the + * ITSList mechanism to perform inter-ITS synchronization. + */ +#define GICv4_ITS_LIST_MAX 16 + /* Embedded in kvm.arch */ struct its_vm { struct fwnode_handle *fwnode; @@ -30,6 +36,7 @@ struct its_vm { irq_hw_number_t db_lpi_base; unsigned long *db_bitmap; int nr_db_lpis; + u32 vlpi_count[GICv4_ITS_LIST_MAX]; }; /* Embedded in kvm_vcpu.arch */ @@ -64,12 +71,14 @@ struct its_vpe { * @vm: Pointer to the GICv4 notion of a VM * @vpe: Pointer to the GICv4 notion of a virtual CPU (VPE) * @vintid: Virtual LPI number + * @properties: Priority and enable bits (as written in the prop table) * @db_enabled: Is the VPE doorbell to be generated? */ struct its_vlpi_map { struct its_vm *vm; struct its_vpe *vpe; u32 vintid; + u8 properties; bool db_enabled; }; diff --git a/include/linux/irqchip/irq-omap-intc.h b/include/linux/irqchip/irq-omap-intc.h index 2e3d1afeb674..f19ccee7749f 100644 --- a/include/linux/irqchip/irq-omap-intc.h +++ b/include/linux/irqchip/irq-omap-intc.h @@ -18,8 +18,6 @@ #ifndef __INCLUDE_LINUX_IRQCHIP_IRQ_OMAP_INTC_H #define __INCLUDE_LINUX_IRQCHIP_IRQ_OMAP_INTC_H -void omap3_init_irq(void); - int omap_irq_pending(void); void omap_intc_save_context(void); void omap_intc_restore_context(void); diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index b6084898d330..60e3100b0809 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -94,6 +94,7 @@ struct irq_desc { #endif #ifdef CONFIG_GENERIC_IRQ_DEBUGFS struct dentry *debugfs_file; + const char *dev_name; #endif #ifdef CONFIG_SPARSE_IRQ struct rcu_head rcu; diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index b1037dfc47e4..a34355d19546 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -33,6 +33,7 @@ #include <linux/types.h> #include <linux/irqhandler.h> #include <linux/of.h> +#include <linux/mutex.h> #include <linux/radix-tree.h> struct device_node; @@ -41,6 +42,7 @@ struct of_device_id; struct irq_chip; struct irq_data; struct cpumask; +struct seq_file; /* Number of irqs reserved for a legacy isa controller */ #define NUM_ISA_INTERRUPTS 16 @@ -105,18 +107,21 @@ struct irq_domain_ops { int (*xlate)(struct irq_domain *d, struct device_node *node, const u32 *intspec, unsigned int intsize, unsigned long *out_hwirq, unsigned int *out_type); - #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY /* extended V2 interfaces to support hierarchy irq_domains */ int (*alloc)(struct irq_domain *d, unsigned int virq, unsigned int nr_irqs, void *arg); void (*free)(struct irq_domain *d, unsigned int virq, unsigned int nr_irqs); - void (*activate)(struct irq_domain *d, struct irq_data *irq_data); + int (*activate)(struct irq_domain *d, struct irq_data *irqd, bool early); void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data); int (*translate)(struct irq_domain *d, struct irq_fwspec *fwspec, unsigned long *out_hwirq, unsigned int *out_type); #endif +#ifdef CONFIG_GENERIC_IRQ_DEBUGFS + void (*debug_show)(struct seq_file *m, struct irq_domain *d, + struct irq_data *irqd, int ind); +#endif }; extern struct irq_domain_ops irq_generic_chip_ops; @@ -134,8 +139,8 @@ struct irq_domain_chip_generic; * @mapcount: The number of mapped interrupts * * Optional elements - * @of_node: Pointer to device tree nodes associated with the irq_domain. Used - * when decoding device tree interrupt specifiers. + * @fwnode: Pointer to firmware node associated with the irq_domain. Pretty easy + * to swap it for the of_node via the irq_domain_get_of_node accessor * @gc: Pointer to a list of generic chips. There is a helper function for * setting up one or more generic chips for interrupt controllers * drivers using the generic chip library which uses this pointer. @@ -173,6 +178,7 @@ struct irq_domain { unsigned int revmap_direct_max_irq; unsigned int revmap_size; struct radix_tree_root revmap_tree; + struct mutex revmap_tree_mutex; unsigned int linear_revmap[]; }; @@ -438,7 +444,7 @@ extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, unsigned int nr_irqs, int node, void *arg, bool realloc, const struct cpumask *affinity); extern void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs); -extern void irq_domain_activate_irq(struct irq_data *irq_data); +extern int irq_domain_activate_irq(struct irq_data *irq_data, bool early); extern void irq_domain_deactivate_irq(struct irq_data *irq_data); static inline int irq_domain_alloc_irqs(struct irq_domain *domain, @@ -508,8 +514,6 @@ static inline bool irq_domain_is_msi_remap(struct irq_domain *domain) extern bool irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain); #else /* CONFIG_IRQ_DOMAIN_HIERARCHY */ -static inline void irq_domain_activate_irq(struct irq_data *data) { } -static inline void irq_domain_deactivate_irq(struct irq_data *data) { } static inline int irq_domain_alloc_irqs(struct irq_domain *domain, unsigned int nr_irqs, int node, void *arg) { @@ -558,8 +562,6 @@ irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain) #else /* CONFIG_IRQ_DOMAIN */ static inline void irq_dispose_mapping(unsigned int virq) { } -static inline void irq_domain_activate_irq(struct irq_data *data) { } -static inline void irq_domain_deactivate_irq(struct irq_data *data) { } static inline struct irq_domain *irq_find_matching_fwnode( struct fwnode_handle *fwnode, enum irq_domain_bus_token bus_token) { diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 3b7675bcca64..c7b368c734af 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -82,9 +82,9 @@ extern bool static_key_initialized; -#define STATIC_KEY_CHECK_USE() WARN(!static_key_initialized, \ - "%s used before call to jump_label_init", \ - __func__) +#define STATIC_KEY_CHECK_USE(key) WARN(!static_key_initialized, \ + "%s(): static key '%pS' used before call to jump_label_init()", \ + __func__, (key)) #ifdef HAVE_JUMP_LABEL @@ -212,13 +212,13 @@ static __always_inline bool static_key_true(struct static_key *key) static inline void static_key_slow_inc(struct static_key *key) { - STATIC_KEY_CHECK_USE(); + STATIC_KEY_CHECK_USE(key); atomic_inc(&key->enabled); } static inline void static_key_slow_dec(struct static_key *key) { - STATIC_KEY_CHECK_USE(); + STATIC_KEY_CHECK_USE(key); atomic_dec(&key->enabled); } @@ -237,7 +237,7 @@ static inline int jump_label_apply_nops(struct module *mod) static inline void static_key_enable(struct static_key *key) { - STATIC_KEY_CHECK_USE(); + STATIC_KEY_CHECK_USE(key); if (atomic_read(&key->enabled) != 0) { WARN_ON_ONCE(atomic_read(&key->enabled) != 1); @@ -248,7 +248,7 @@ static inline void static_key_enable(struct static_key *key) static inline void static_key_disable(struct static_key *key) { - STATIC_KEY_CHECK_USE(); + STATIC_KEY_CHECK_USE(key); if (atomic_read(&key->enabled) != 1) { WARN_ON_ONCE(atomic_read(&key->enabled) != 0); diff --git a/include/linux/jump_label_ratelimit.h b/include/linux/jump_label_ratelimit.h index fc13ff289903..baa8eabbaa56 100644 --- a/include/linux/jump_label_ratelimit.h +++ b/include/linux/jump_label_ratelimit.h @@ -25,18 +25,18 @@ struct static_key_deferred { }; static inline void static_key_slow_dec_deferred(struct static_key_deferred *key) { - STATIC_KEY_CHECK_USE(); + STATIC_KEY_CHECK_USE(key); static_key_slow_dec(&key->key); } static inline void static_key_deferred_flush(struct static_key_deferred *key) { - STATIC_KEY_CHECK_USE(); + STATIC_KEY_CHECK_USE(key); } static inline void jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl) { - STATIC_KEY_CHECK_USE(); + STATIC_KEY_CHECK_USE(key); } #endif /* HAVE_JUMP_LABEL */ #endif /* _LINUX_JUMP_LABEL_RATELIMIT_H */ diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h index 0a777c5216b1..708f337d780b 100644 --- a/include/linux/kallsyms.h +++ b/include/linux/kallsyms.h @@ -14,8 +14,6 @@ #define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \ 2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1) -/* How and when do we show kallsyms values? */ -extern int kallsyms_show_value(void); #ifndef CONFIG_64BIT # define KALLSYM_FMT "%08lx" #else @@ -54,6 +52,9 @@ extern void __print_symbol(const char *fmt, unsigned long address); int lookup_symbol_name(unsigned long addr, char *symname); int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name); +/* How and when do we show kallsyms values? */ +extern int kallsyms_show_value(void); + #else /* !CONFIG_KALLSYMS */ static inline unsigned long kallsyms_lookup_name(const char *name) @@ -112,6 +113,11 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u return -ERANGE; } +static inline int kallsyms_show_value(void) +{ + return false; +} + /* Stupid that this does nothing, but I didn't create this mess. */ #define __print_symbol(fmt, addr) #endif /*CONFIG_KALLSYMS*/ diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 1c08c925cefb..f16f6ceb3875 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -160,7 +160,7 @@ struct kexec_buf { }; int __weak arch_kexec_walk_mem(struct kexec_buf *kbuf, - int (*func)(u64, u64, void *)); + int (*func)(struct resource *, void *)); extern int kexec_add_buffer(struct kexec_buf *kbuf); int kexec_locate_mem_hole(struct kexec_buf *kbuf); #endif /* CONFIG_KEXEC_FILE */ diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index bd2684700b74..9440a2fc8893 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -391,10 +391,6 @@ int register_kprobes(struct kprobe **kps, int num); void unregister_kprobes(struct kprobe **kps, int num); int setjmp_pre_handler(struct kprobe *, struct pt_regs *); int longjmp_break_handler(struct kprobe *, struct pt_regs *); -int register_jprobe(struct jprobe *p); -void unregister_jprobe(struct jprobe *p); -int register_jprobes(struct jprobe **jps, int num); -void unregister_jprobes(struct jprobe **jps, int num); void jprobe_return(void); unsigned long arch_deref_entry_point(void *); @@ -443,20 +439,6 @@ static inline void unregister_kprobe(struct kprobe *p) static inline void unregister_kprobes(struct kprobe **kps, int num) { } -static inline int register_jprobe(struct jprobe *p) -{ - return -ENOSYS; -} -static inline int register_jprobes(struct jprobe **jps, int num) -{ - return -ENOSYS; -} -static inline void unregister_jprobe(struct jprobe *p) -{ -} -static inline void unregister_jprobes(struct jprobe **jps, int num) -{ -} static inline void jprobe_return(void) { } @@ -486,6 +468,20 @@ static inline int enable_kprobe(struct kprobe *kp) return -ENOSYS; } #endif /* CONFIG_KPROBES */ +static inline int register_jprobe(struct jprobe *p) +{ + return -ENOSYS; +} +static inline int register_jprobes(struct jprobe **jps, int num) +{ + return -ENOSYS; +} +static inline void unregister_jprobe(struct jprobe *p) +{ +} +static inline void unregister_jprobes(struct jprobe **jps, int num) +{ +} static inline int disable_kretprobe(struct kretprobe *rp) { return disable_kprobe(&rp->kp); @@ -496,11 +492,11 @@ static inline int enable_kretprobe(struct kretprobe *rp) } static inline int disable_jprobe(struct jprobe *jp) { - return disable_kprobe(&jp->kp); + return -ENOSYS; } static inline int enable_jprobe(struct jprobe *jp) { - return enable_kprobe(&jp->kp); + return -ENOSYS; } #ifndef CONFIG_KPROBES diff --git a/include/linux/kthread.h b/include/linux/kthread.h index 4e26609c77d4..86d53a3cb497 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h @@ -76,7 +76,7 @@ extern int tsk_fork_get_node(struct task_struct *tsk); */ struct kthread_work; typedef void (*kthread_work_func_t)(struct kthread_work *work); -void kthread_delayed_work_timer_fn(unsigned long __data); +void kthread_delayed_work_timer_fn(struct timer_list *t); enum { KTW_FREEZABLE = 1 << 0, /* freeze during suspend */ @@ -117,8 +117,8 @@ struct kthread_delayed_work { #define KTHREAD_DELAYED_WORK_INIT(dwork, fn) { \ .work = KTHREAD_WORK_INIT((dwork).work, (fn)), \ - .timer = __TIMER_INITIALIZER(kthread_delayed_work_timer_fn, \ - 0, (unsigned long)&(dwork), \ + .timer = __TIMER_INITIALIZER((TIMER_FUNC_TYPE)kthread_delayed_work_timer_fn,\ + (TIMER_DATA_TYPE)&(dwork.timer), \ TIMER_IRQSAFE), \ } @@ -165,8 +165,8 @@ extern void __kthread_init_worker(struct kthread_worker *worker, do { \ kthread_init_work(&(dwork)->work, (fn)); \ __setup_timer(&(dwork)->timer, \ - kthread_delayed_work_timer_fn, \ - (unsigned long)(dwork), \ + (TIMER_FUNC_TYPE)kthread_delayed_work_timer_fn,\ + (TIMER_DATA_TYPE)&(dwork)->timer, \ TIMER_IRQSAFE); \ } while (0) diff --git a/include/linux/ktime.h b/include/linux/ktime.h index 0c8bd45c8206..5b9fddbaac41 100644 --- a/include/linux/ktime.h +++ b/include/linux/ktime.h @@ -270,5 +270,6 @@ static inline ktime_t ms_to_ktime(u64 ms) } # include <linux/timekeeping.h> +# include <linux/timekeeping32.h> #endif diff --git a/include/linux/linkage.h b/include/linux/linkage.h index 2e6f90bd52aa..f68db9e450eb 100644 --- a/include/linux/linkage.h +++ b/include/linux/linkage.h @@ -2,7 +2,7 @@ #ifndef _LINUX_LINKAGE_H #define _LINUX_LINKAGE_H -#include <linux/compiler.h> +#include <linux/compiler_types.h> #include <linux/stringify.h> #include <linux/export.h> #include <asm/linkage.h> diff --git a/include/linux/llist.h b/include/linux/llist.h index 1957635e6d5f..85abc2915e8d 100644 --- a/include/linux/llist.h +++ b/include/linux/llist.h @@ -198,7 +198,7 @@ static inline void init_llist_head(struct llist_head *list) */ static inline bool llist_empty(const struct llist_head *head) { - return ACCESS_ONCE(head->first) == NULL; + return READ_ONCE(head->first) == NULL; } static inline struct llist_node *llist_next(struct llist_node *node) diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index f301d31b473c..a842551fe044 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -528,6 +528,11 @@ static inline void lockdep_on(void) */ struct lock_class_key { }; +/* + * The lockdep_map takes no space if lockdep is disabled: + */ +struct lockdep_map { }; + #define lockdep_depth(tsk) (0) #define lockdep_is_held_type(l, r) (1) @@ -720,9 +725,24 @@ do { \ lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \ lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ } while (0) + +#define lockdep_assert_irqs_enabled() do { \ + WARN_ONCE(debug_locks && !current->lockdep_recursion && \ + !current->hardirqs_enabled, \ + "IRQs not enabled as expected\n"); \ + } while (0) + +#define lockdep_assert_irqs_disabled() do { \ + WARN_ONCE(debug_locks && !current->lockdep_recursion && \ + current->hardirqs_enabled, \ + "IRQs not disabled as expected\n"); \ + } while (0) + #else # define might_lock(lock) do { } while (0) # define might_lock_read(lock) do { } while (0) +# define lockdep_assert_irqs_enabled() do { } while (0) +# define lockdep_assert_irqs_disabled() do { } while (0) #endif #ifdef CONFIG_LOCKDEP diff --git a/include/linux/log2.h b/include/linux/log2.h index c373295f359f..41a1ae010993 100644 --- a/include/linux/log2.h +++ b/include/linux/log2.h @@ -37,19 +37,23 @@ int __ilog2_u64(u64 n) } #endif -/* - * Determine whether some value is a power of two, where zero is +/** + * is_power_of_2() - check if a value is a power of two + * @n: the value to check + * + * Determine whether some value is a power of two, where zero is * *not* considered a power of two. + * Return: true if @n is a power of 2, otherwise false. */ - static inline __attribute__((const)) bool is_power_of_2(unsigned long n) { return (n != 0 && ((n & (n - 1)) == 0)); } -/* - * round up to nearest power of two +/** + * __roundup_pow_of_two() - round up to nearest power of two + * @n: value to round up */ static inline __attribute__((const)) unsigned long __roundup_pow_of_two(unsigned long n) @@ -57,8 +61,9 @@ unsigned long __roundup_pow_of_two(unsigned long n) return 1UL << fls_long(n - 1); } -/* - * round down to nearest power of two +/** + * __rounddown_pow_of_two() - round down to nearest power of two + * @n: value to round down */ static inline __attribute__((const)) unsigned long __rounddown_pow_of_two(unsigned long n) @@ -67,12 +72,12 @@ unsigned long __rounddown_pow_of_two(unsigned long n) } /** - * ilog2 - log of base 2 of 32-bit or a 64-bit unsigned value - * @n - parameter + * ilog2 - log base 2 of 32-bit or a 64-bit unsigned value + * @n: parameter * * constant-capable log of base 2 calculation * - this can be used to initialise global variables from constant data, hence - * the massive ternary operator construction + * the massive ternary operator construction * * selects the appropriately-sized optimised version depending on sizeof(n) */ @@ -150,7 +155,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n) /** * roundup_pow_of_two - round the given value up to nearest power of two - * @n - parameter + * @n: parameter * * round the given value up to the nearest power of two * - the result is undefined when n == 0 @@ -167,7 +172,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n) /** * rounddown_pow_of_two - round the given value down to nearest power of two - * @n - parameter + * @n: parameter * * round the given value down to the nearest power of two * - the result is undefined when n == 0 @@ -180,6 +185,12 @@ unsigned long __rounddown_pow_of_two(unsigned long n) __rounddown_pow_of_two(n) \ ) +static inline __attribute_const__ +int __order_base_2(unsigned long n) +{ + return n > 1 ? ilog2(n - 1) + 1 : 0; +} + /** * order_base_2 - calculate the (rounded up) base 2 order of the argument * @n: parameter @@ -193,13 +204,6 @@ unsigned long __rounddown_pow_of_two(unsigned long n) * ob2(5) = 3 * ... and so on. */ - -static inline __attribute_const__ -int __order_base_2(unsigned long n) -{ - return n > 1 ? ilog2(n - 1) + 1 : 0; -} - #define order_base_2(n) \ ( \ __builtin_constant_p(n) ? ( \ diff --git a/include/linux/math64.h b/include/linux/math64.h index 082de345b73c..837f2f2d1d34 100644 --- a/include/linux/math64.h +++ b/include/linux/math64.h @@ -12,6 +12,11 @@ /** * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder + * @dividend: unsigned 64bit dividend + * @divisor: unsigned 32bit divisor + * @remainder: pointer to unsigned 32bit remainder + * + * Return: sets ``*remainder``, then returns dividend / divisor * * This is commonly provided by 32bit archs to provide an optimized 64bit * divide. @@ -24,6 +29,11 @@ static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) /** * div_s64_rem - signed 64bit divide with 32bit divisor with remainder + * @dividend: signed 64bit dividend + * @divisor: signed 32bit divisor + * @remainder: pointer to signed 32bit remainder + * + * Return: sets ``*remainder``, then returns dividend / divisor */ static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder) { @@ -33,6 +43,11 @@ static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder) /** * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder + * @dividend: unsigned 64bit dividend + * @divisor: unsigned 64bit divisor + * @remainder: pointer to unsigned 64bit remainder + * + * Return: sets ``*remainder``, then returns dividend / divisor */ static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder) { @@ -42,6 +57,10 @@ static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder) /** * div64_u64 - unsigned 64bit divide with 64bit divisor + * @dividend: unsigned 64bit dividend + * @divisor: unsigned 64bit divisor + * + * Return: dividend / divisor */ static inline u64 div64_u64(u64 dividend, u64 divisor) { @@ -50,6 +69,10 @@ static inline u64 div64_u64(u64 dividend, u64 divisor) /** * div64_s64 - signed 64bit divide with 64bit divisor + * @dividend: signed 64bit dividend + * @divisor: signed 64bit divisor + * + * Return: dividend / divisor */ static inline s64 div64_s64(s64 dividend, s64 divisor) { @@ -89,6 +112,8 @@ extern s64 div64_s64(s64 dividend, s64 divisor); /** * div_u64 - unsigned 64bit divide with 32bit divisor + * @dividend: unsigned 64bit dividend + * @divisor: unsigned 32bit divisor * * This is the most common 64bit divide and should be used if possible, * as many 32bit archs can optimize this variant better than a full 64bit @@ -104,6 +129,8 @@ static inline u64 div_u64(u64 dividend, u32 divisor) /** * div_s64 - signed 64bit divide with 32bit divisor + * @dividend: signed 64bit dividend + * @divisor: signed 32bit divisor */ #ifndef div_s64 static inline s64 div_s64(s64 dividend, s32 divisor) diff --git a/include/linux/mem_encrypt.h b/include/linux/mem_encrypt.h index 265a9cd21cb4..b310a9c18113 100644 --- a/include/linux/mem_encrypt.h +++ b/include/linux/mem_encrypt.h @@ -23,11 +23,14 @@ #define sme_me_mask 0ULL +static inline bool sme_active(void) { return false; } +static inline bool sev_active(void) { return false; } + #endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */ -static inline bool sme_active(void) +static inline bool mem_encrypt_active(void) { - return !!sme_me_mask; + return sme_me_mask; } static inline u64 sme_get_me_mask(void) diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h index e9c908c4fba8..78dc85365c4f 100644 --- a/include/linux/mfd/axp20x.h +++ b/include/linux/mfd/axp20x.h @@ -131,6 +131,9 @@ enum axp20x_variants { #define AXP803_DCDC6_V_OUT 0x25 #define AXP803_DCDC_FREQ_CTRL 0x3b +/* Other DCDC regulator control registers are the same as AXP803 */ +#define AXP813_DCDC7_V_OUT 0x26 + /* Interrupt */ #define AXP152_IRQ1_EN 0x40 #define AXP152_IRQ2_EN 0x41 diff --git a/include/linux/mfd/rtsx_pci.h b/include/linux/mfd/rtsx_pci.h index 116816fb9110..7815d8db7eca 100644 --- a/include/linux/mfd/rtsx_pci.h +++ b/include/linux/mfd/rtsx_pci.h @@ -334,6 +334,7 @@ #define DCM_DRP_RD_DATA_H 0xFC29 #define SD_VPCLK0_CTL 0xFC2A #define SD_VPCLK1_CTL 0xFC2B +#define PHASE_SELECT_MASK 0x1F #define SD_DCMPS0_CTL 0xFC2C #define SD_DCMPS1_CTL 0xFC2D #define SD_VPTX_CTL SD_VPCLK0_CTL diff --git a/include/linux/mfd/tps65218.h b/include/linux/mfd/tps65218.h index bccd2d68b1e3..f069c518c0ed 100644 --- a/include/linux/mfd/tps65218.h +++ b/include/linux/mfd/tps65218.h @@ -246,24 +246,6 @@ enum tps65218_irqs { }; /** - * struct tps_info - packages regulator constraints - * @id: Id of the regulator - * @name: Voltage regulator name - * @min_uV: minimum micro volts - * @max_uV: minimum micro volts - * @strobe: sequencing strobe value for the regulator - * - * This data is used to check the regualtor voltage limits while setting. - */ -struct tps_info { - int id; - const char *name; - int min_uV; - int max_uV; - int strobe; -}; - -/** * struct tps65218 - tps65218 sub-driver chip access routines * * Device data may be used to access the TPS65218 chip @@ -280,7 +262,6 @@ struct tps65218 { u32 irq_mask; struct regmap_irq_chip_data *irq_data; struct regulator_desc desc[TPS65218_NUM_REGULATOR]; - struct tps_info *info[TPS65218_NUM_REGULATOR]; struct regmap *regmap; u8 *strobes; }; diff --git a/include/linux/mm.h b/include/linux/mm.h index 43edf659453b..91b46f99b4d2 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2496,7 +2496,7 @@ void vmemmap_populate_print_last(void); void vmemmap_free(unsigned long start, unsigned long end); #endif void register_page_bootmem_memmap(unsigned long section_nr, struct page *map, - unsigned long size); + unsigned long nr_pages); enum mf_flags { MF_COUNT_INCREASED = 1 << 0, diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 9a43763a68ad..e7743eca1021 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -255,6 +255,10 @@ struct mmc_supply { struct regulator *vqmmc; /* Optional Vccq supply */ }; +struct mmc_ctx { + struct task_struct *task; +}; + struct mmc_host { struct device *parent; struct device class_dev; @@ -350,6 +354,8 @@ struct mmc_host { #define MMC_CAP2_CQE (1 << 23) /* Has eMMC command queue engine */ #define MMC_CAP2_CQE_DCMD (1 << 24) /* CQE can issue a direct command */ + int fixed_drv_type; /* fixed driver type for non-removable media */ + mmc_pm_flag_t pm_caps; /* supported pm features */ /* host specific block data */ @@ -388,8 +394,9 @@ struct mmc_host { struct mmc_card *card; /* device attached to this host */ wait_queue_head_t wq; - struct task_struct *claimer; /* task that has host claimed */ + struct mmc_ctx *claimer; /* context that has host claimed */ int claim_cnt; /* "claim" nesting count */ + struct mmc_ctx default_ctx; /* default context */ struct delayed_work detect; int detect_change; /* card detect flag */ @@ -469,6 +476,8 @@ void mmc_detect_change(struct mmc_host *, unsigned long delay); void mmc_request_done(struct mmc_host *, struct mmc_request *); void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq); +void mmc_cqe_request_done(struct mmc_host *host, struct mmc_request *mrq); + static inline void mmc_signal_sdio_irq(struct mmc_host *host) { host->ops->enable_sdio_irq(host, 0); diff --git a/include/linux/mmc/sdhci-pci-data.h b/include/linux/mmc/sdhci-pci-data.h index 36f986d4a59a..1d42872d22f3 100644 --- a/include/linux/mmc/sdhci-pci-data.h +++ b/include/linux/mmc/sdhci-pci-data.h @@ -15,7 +15,4 @@ struct sdhci_pci_data { extern struct sdhci_pci_data *(*sdhci_pci_get_data)(struct pci_dev *pdev, int slotno); - -extern int sdhci_pci_spt_drive_strength; - #endif diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index c9c4a81b9767..a507f43ad221 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -1151,13 +1151,17 @@ struct mem_section { #define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1) #ifdef CONFIG_SPARSEMEM_EXTREME -extern struct mem_section *mem_section[NR_SECTION_ROOTS]; +extern struct mem_section **mem_section; #else extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]; #endif static inline struct mem_section *__nr_to_section(unsigned long nr) { +#ifdef CONFIG_SPARSEMEM_EXTREME + if (!mem_section) + return NULL; +#endif if (!mem_section[SECTION_NR_TO_ROOT(nr)]) return NULL; return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK]; diff --git a/include/linux/module.h b/include/linux/module.h index fe5aa3736707..c69b49abe877 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -639,6 +639,8 @@ static inline bool is_livepatch_module(struct module *mod) } #endif /* CONFIG_LIVEPATCH */ +bool is_module_sig_enforced(void); + #else /* !CONFIG_MODULES... */ static inline struct module *__module_address(unsigned long addr) @@ -753,6 +755,11 @@ static inline bool module_requested_async_probing(struct module *module) return false; } +static inline bool is_module_sig_enforced(void) +{ + return false; +} + #endif /* CONFIG_MODULES */ #ifdef CONFIG_SYSFS diff --git a/include/linux/msi.h b/include/linux/msi.h index cdd069cf9ed8..1f1bbb5b4679 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -284,6 +284,11 @@ enum { MSI_FLAG_PCI_MSIX = (1 << 3), /* Needs early activate, required for PCI */ MSI_FLAG_ACTIVATE_EARLY = (1 << 4), + /* + * Must reactivate when irq is started even when + * MSI_FLAG_ACTIVATE_EARLY has been set. + */ + MSI_FLAG_MUST_REACTIVATE = (1 << 5), }; int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask, diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index 414a5e769fde..495ba4dd9da5 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h @@ -67,7 +67,7 @@ static inline bool lockdep_nfnl_is_held(__u8 subsys_id) * @ss: The nfnetlink subsystem ID * * Return the value of the specified RCU-protected pointer, but omit - * both the smp_read_barrier_depends() and the ACCESS_ONCE(), because + * both the smp_read_barrier_depends() and the READ_ONCE(), because * caller holds the NFNL subsystem mutex. */ #define nfnl_dereference(p, ss) \ diff --git a/include/linux/parport.h b/include/linux/parport.h index 58e3c64c6b49..397607a0c0eb 100644 --- a/include/linux/parport.h +++ b/include/linux/parport.h @@ -225,6 +225,7 @@ struct parport { struct pardevice *waittail; struct list_head list; + struct timer_list timer; unsigned int flags; void *sysctl_table; diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index 8f16299ca068..2d2096ba1cfe 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h @@ -173,6 +173,21 @@ DEFINE_PER_CPU_SECTION(type, name, "..read_mostly") /* + * Declaration/definition used for per-CPU variables that should be accessed + * as decrypted when memory encryption is enabled in the guest. + */ +#if defined(CONFIG_VIRTUALIZATION) && defined(CONFIG_AMD_MEM_ENCRYPT) + +#define DECLARE_PER_CPU_DECRYPTED(type, name) \ + DECLARE_PER_CPU_SECTION(type, name, "..decrypted") + +#define DEFINE_PER_CPU_DECRYPTED(type, name) \ + DEFINE_PER_CPU_SECTION(type, name, "..decrypted") +#else +#define DEFINE_PER_CPU_DECRYPTED(type, name) DEFINE_PER_CPU(type, name) +#endif + +/* * Intermodule exports for per-CPU variables. sparse forgets about * address space across EXPORT_SYMBOL(), change EXPORT_SYMBOL() to * noop if __CHECKER__. diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 8e22f24ded6a..874b71a70058 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -485,9 +485,9 @@ struct perf_addr_filters_head { }; /** - * enum perf_event_active_state - the states of a event + * enum perf_event_state - the states of a event */ -enum perf_event_active_state { +enum perf_event_state { PERF_EVENT_STATE_DEAD = -4, PERF_EVENT_STATE_EXIT = -3, PERF_EVENT_STATE_ERROR = -2, @@ -578,7 +578,7 @@ struct perf_event { struct pmu *pmu; void *pmu_private; - enum perf_event_active_state state; + enum perf_event_state state; unsigned int attach_state; local64_t count; atomic64_t child_count; @@ -588,26 +588,10 @@ struct perf_event { * has been enabled (i.e. eligible to run, and the task has * been scheduled in, if this is a per-task event) * and running (scheduled onto the CPU), respectively. - * - * They are computed from tstamp_enabled, tstamp_running and - * tstamp_stopped when the event is in INACTIVE or ACTIVE state. */ u64 total_time_enabled; u64 total_time_running; - - /* - * These are timestamps used for computing total_time_enabled - * and total_time_running when the event is in INACTIVE or - * ACTIVE state, measured in nanoseconds from an arbitrary point - * in time. - * tstamp_enabled: the notional time when the event was enabled - * tstamp_running: the notional time when the event was scheduled on - * tstamp_stopped: in INACTIVE state, the notional time when the - * event was scheduled off. - */ - u64 tstamp_enabled; - u64 tstamp_running; - u64 tstamp_stopped; + u64 tstamp; /* * timestamp shadows the actual context timing but it can @@ -699,7 +683,6 @@ struct perf_event { #ifdef CONFIG_CGROUP_PERF struct perf_cgroup *cgrp; /* cgroup event is attach to */ - int cgrp_defer_enabled; #endif struct list_head sb_list; @@ -806,6 +789,7 @@ struct perf_output_handle { struct bpf_perf_event_data_kern { struct pt_regs *regs; struct perf_sample_data *data; + struct perf_event *event; }; #ifdef CONFIG_CGROUP_PERF @@ -884,7 +868,8 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, void *context); extern void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu); -int perf_event_read_local(struct perf_event *event, u64 *value); +int perf_event_read_local(struct perf_event *event, u64 *value, + u64 *enabled, u64 *running); extern u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running); @@ -1286,7 +1271,8 @@ static inline const struct perf_event_attr *perf_event_attrs(struct perf_event * { return ERR_PTR(-EINVAL); } -static inline int perf_event_read_local(struct perf_event *event, u64 *value) +static inline int perf_event_read_local(struct perf_event *event, u64 *value, + u64 *enabled, u64 *running) { return -EINVAL; } diff --git a/include/linux/platform_data/sht15.h b/include/linux/platform_data/sht15.h deleted file mode 100644 index 12289c1e9413..000000000000 --- a/include/linux/platform_data/sht15.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * sht15.h - support for the SHT15 Temperature and Humidity Sensor - * - * Copyright (c) 2009 Jonathan Cameron - * - * Copyright (c) 2007 Wouter Horre - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * For further information, see the Documentation/hwmon/sht15 file. - */ - -#ifndef _PDATA_SHT15_H -#define _PDATA_SHT15_H - -/** - * struct sht15_platform_data - sht15 connectivity info - * @gpio_data: no. of gpio to which bidirectional data line is - * connected. - * @gpio_sck: no. of gpio to which the data clock is connected. - * @supply_mv: supply voltage in mv. Overridden by regulator if - * available. - * @checksum: flag to indicate the checksum should be validated. - * @no_otp_reload: flag to indicate no reload from OTP. - * @low_resolution: flag to indicate the temp/humidity resolution to use. - */ -struct sht15_platform_data { - int gpio_data; - int gpio_sck; - int supply_mv; - bool checksum; - bool no_otp_reload; - bool low_resolution; -}; - -#endif /* _PDATA_SHT15_H */ diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 2efb08a60e63..f0fc4700b6ff 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -105,7 +105,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev) static inline void pm_runtime_mark_last_busy(struct device *dev) { - ACCESS_ONCE(dev->power.last_busy) = jiffies; + WRITE_ONCE(dev->power.last_busy, jiffies); } static inline bool pm_runtime_is_irq_safe(struct device *dev) diff --git a/include/linux/printk.h b/include/linux/printk.h index 335926039adc..905bba92f015 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -189,7 +189,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies, extern int printk_delay_msec; extern int dmesg_restrict; -extern int kptr_restrict; extern int devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write, void __user *buf, @@ -280,6 +279,8 @@ static inline void printk_safe_flush_on_panic(void) } #endif +extern int kptr_restrict; + extern asmlinkage void dump_stack(void) __cold; #ifndef pr_fmt diff --git a/include/linux/rculist.h b/include/linux/rculist.h index c2cdd45a880a..127f534fec94 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -275,7 +275,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list, * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). */ #define list_entry_rcu(ptr, type, member) \ - container_of(lockless_dereference(ptr), type, member) + container_of(READ_ONCE(ptr), type, member) /* * Where are list_empty_rcu() and list_first_entry_rcu()? @@ -368,7 +368,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list, * example is when items are added to the list, but never deleted. */ #define list_entry_lockless(ptr, type, member) \ - container_of((typeof(ptr))lockless_dereference(ptr), type, member) + container_of((typeof(ptr))READ_ONCE(ptr), type, member) /** * list_for_each_entry_lockless - iterate over rcu list of given type diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 1a9f70d44af9..a6ddc42f87a5 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -346,7 +346,7 @@ static inline void rcu_preempt_sleep_check(void) { } #define __rcu_dereference_check(p, c, space) \ ({ \ /* Dependency order vs. p above. */ \ - typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \ + typeof(*p) *________p1 = (typeof(*p) *__force)READ_ONCE(p); \ RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \ rcu_dereference_sparse(p, space); \ ((typeof(*p) __force __kernel *)(________p1)); \ @@ -360,7 +360,7 @@ static inline void rcu_preempt_sleep_check(void) { } #define rcu_dereference_raw(p) \ ({ \ /* Dependency order vs. p above. */ \ - typeof(p) ________p1 = lockless_dereference(p); \ + typeof(p) ________p1 = READ_ONCE(p); \ ((typeof(*p) __force __kernel *)(________p1)); \ }) diff --git a/include/linux/regmap.h b/include/linux/regmap.h index 978abfbac617..15eddc1353ba 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -120,21 +120,65 @@ struct reg_sequence { */ #define regmap_read_poll_timeout(map, addr, val, cond, sleep_us, timeout_us) \ ({ \ - ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \ + u64 __timeout_us = (timeout_us); \ + unsigned long __sleep_us = (sleep_us); \ + ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \ + int __ret; \ + might_sleep_if(__sleep_us); \ + for (;;) { \ + __ret = regmap_read((map), (addr), &(val)); \ + if (__ret) \ + break; \ + if (cond) \ + break; \ + if ((__timeout_us) && \ + ktime_compare(ktime_get(), __timeout) > 0) { \ + __ret = regmap_read((map), (addr), &(val)); \ + break; \ + } \ + if (__sleep_us) \ + usleep_range((__sleep_us >> 2) + 1, __sleep_us); \ + } \ + __ret ?: ((cond) ? 0 : -ETIMEDOUT); \ +}) + +/** + * regmap_field_read_poll_timeout - Poll until a condition is met or timeout + * + * @field: Regmap field to read from + * @val: Unsigned integer variable to read the value into + * @cond: Break condition (usually involving @val) + * @sleep_us: Maximum time to sleep between reads in us (0 + * tight-loops). Should be less than ~20ms since usleep_range + * is used (see Documentation/timers/timers-howto.txt). + * @timeout_us: Timeout in us, 0 means never timeout + * + * Returns 0 on success and -ETIMEDOUT upon a timeout or the regmap_field_read + * error return value in case of a error read. In the two former cases, + * the last read value at @addr is stored in @val. Must not be called + * from atomic context if sleep_us or timeout_us are used. + * + * This is modelled after the readx_poll_timeout macros in linux/iopoll.h. + */ +#define regmap_field_read_poll_timeout(field, val, cond, sleep_us, timeout_us) \ +({ \ + u64 __timeout_us = (timeout_us); \ + unsigned long __sleep_us = (sleep_us); \ + ktime_t timeout = ktime_add_us(ktime_get(), __timeout_us); \ int pollret; \ - might_sleep_if(sleep_us); \ + might_sleep_if(__sleep_us); \ for (;;) { \ - pollret = regmap_read((map), (addr), &(val)); \ + pollret = regmap_field_read((field), &(val)); \ if (pollret) \ break; \ if (cond) \ break; \ - if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \ - pollret = regmap_read((map), (addr), &(val)); \ + if (__timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \ + pollret = regmap_field_read((field), &(val)); \ break; \ } \ - if (sleep_us) \ - usleep_range((sleep_us >> 2) + 1, sleep_us); \ + if (__sleep_us) \ + usleep_range((__sleep_us >> 2) + 1, __sleep_us); \ } \ pollret ?: ((cond) ? 0 : -ETIMEDOUT); \ }) @@ -273,6 +317,9 @@ typedef void (*regmap_unlock)(void *); * * @ranges: Array of configuration entries for virtual address ranges. * @num_ranges: Number of range configuration entries. + * @hwlock_id: Specify the hardware spinlock id. + * @hwlock_mode: The hardware spinlock mode, should be HWLOCK_IRQSTATE, + * HWLOCK_IRQ or 0. */ struct regmap_config { const char *name; @@ -317,6 +364,9 @@ struct regmap_config { const struct regmap_range_cfg *ranges; unsigned int num_ranges; + + unsigned int hwlock_id; + unsigned int hwlock_mode; }; /** diff --git a/include/linux/regulator/da9211.h b/include/linux/regulator/da9211.h index 80cb40b7c88d..f2fd2d3bf58f 100644 --- a/include/linux/regulator/da9211.h +++ b/include/linux/regulator/da9211.h @@ -1,6 +1,6 @@ /* * da9211.h - Regulator device driver for DA9211/DA9212 - * /DA9213/DA9214/DA9215 + * /DA9213/DA9223/DA9214/DA9224/DA9215/DA9225 * Copyright (C) 2015 Dialog Semiconductor Ltd. * * This program is free software; you can redistribute it and/or @@ -25,8 +25,11 @@ enum da9211_chip_id { DA9211, DA9212, DA9213, + DA9223, DA9214, + DA9224, DA9215, + DA9225, }; struct da9211_pdata { diff --git a/include/linux/rtc.h b/include/linux/rtc.h index f6d7ee98d93c..41319a2e409b 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h @@ -136,6 +136,14 @@ struct rtc_device { /* Some hardware can't support UIE mode */ int uie_unsupported; + /* Number of nsec it takes to set the RTC clock. This influences when + * the set ops are called. An offset: + * - of 0.5 s will call RTC set for wall clock time 10.0 s at 9.5 s + * - of 1.5 s will call RTC set for wall clock time 10.0 s at 8.5 s + * - of -0.5 s will call RTC set for wall clock time 10.0 s at 10.5 s + */ + long set_offset_nsec; + bool registered; struct nvmem_config *nvmem_config; @@ -173,7 +181,7 @@ extern void devm_rtc_device_unregister(struct device *dev, extern int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm); extern int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm); -extern int rtc_set_ntp_time(struct timespec64 now); +extern int rtc_set_ntp_time(struct timespec64 now, unsigned long *target_nsec); int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm); extern int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alrm); @@ -222,6 +230,39 @@ static inline bool is_leap_year(unsigned int year) return (!(year % 4) && (year % 100)) || !(year % 400); } +/* Determine if we can call to driver to set the time. Drivers can only be + * called to set a second aligned time value, and the field set_offset_nsec + * specifies how far away from the second aligned time to call the driver. + * + * This also computes 'to_set' which is the time we are trying to set, and has + * a zero in tv_nsecs, such that: + * to_set - set_delay_nsec == now +/- FUZZ + * + */ +static inline bool rtc_tv_nsec_ok(s64 set_offset_nsec, + struct timespec64 *to_set, + const struct timespec64 *now) +{ + /* Allowed error in tv_nsec, arbitarily set to 5 jiffies in ns. */ + const unsigned long TIME_SET_NSEC_FUZZ = TICK_NSEC * 5; + struct timespec64 delay = {.tv_sec = 0, + .tv_nsec = set_offset_nsec}; + + *to_set = timespec64_add(*now, delay); + + if (to_set->tv_nsec < TIME_SET_NSEC_FUZZ) { + to_set->tv_nsec = 0; + return true; + } + + if (to_set->tv_nsec > NSEC_PER_SEC - TIME_SET_NSEC_FUZZ) { + to_set->tv_sec++; + to_set->tv_nsec = 0; + return true; + } + return false; +} + #define rtc_register_device(device) \ __rtc_register_device(THIS_MODULE, device) diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index ff3dd2ec44b4..54bcd970bfd3 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -68,7 +68,7 @@ static inline bool lockdep_rtnl_is_held(void) * @p: The pointer to read, prior to dereferencing * * Return the value of the specified RCU-protected pointer, but omit - * both the smp_read_barrier_depends() and the ACCESS_ONCE(), because + * both the smp_read_barrier_depends() and the READ_ONCE(), because * caller holds RTNL. */ #define rtnl_dereference(p) \ diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h index bc2994ed66e1..3dcd617e65ae 100644 --- a/include/linux/rwlock.h +++ b/include/linux/rwlock.h @@ -38,6 +38,15 @@ do { \ extern int do_raw_write_trylock(rwlock_t *lock); extern void do_raw_write_unlock(rwlock_t *lock) __releases(lock); #else + +#ifndef arch_read_lock_flags +# define arch_read_lock_flags(lock, flags) arch_read_lock(lock) +#endif + +#ifndef arch_write_lock_flags +# define arch_write_lock_flags(lock, flags) arch_write_lock(lock) +#endif + # define do_raw_read_lock(rwlock) do {__acquire(lock); arch_read_lock(&(rwlock)->raw_lock); } while (0) # define do_raw_read_lock_flags(lock, flags) \ do {__acquire(lock); arch_read_lock_flags(&(lock)->raw_lock, *(flags)); } while (0) @@ -50,9 +59,6 @@ do { \ # define do_raw_write_unlock(rwlock) do {arch_write_unlock(&(rwlock)->raw_lock); __release(lock); } while (0) #endif -#define read_can_lock(rwlock) arch_read_can_lock(&(rwlock)->raw_lock) -#define write_can_lock(rwlock) arch_write_can_lock(&(rwlock)->raw_lock) - /* * Define the various rw_lock methods. Note we define these * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h index 5b9b84b20407..86ebb4bf9c6e 100644 --- a/include/linux/rwlock_api_smp.h +++ b/include/linux/rwlock_api_smp.h @@ -211,7 +211,7 @@ static inline void __raw_write_lock(rwlock_t *lock) LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock); } -#endif /* CONFIG_PREEMPT */ +#endif /* !CONFIG_GENERIC_LOCKBREAK || CONFIG_DEBUG_LOCK_ALLOC */ static inline void __raw_write_unlock(rwlock_t *lock) { diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index dfa34d803439..56707d5ff6ad 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -112,6 +112,7 @@ static inline int rwsem_is_contended(struct rw_semaphore *sem) * lock for reading */ extern void down_read(struct rw_semaphore *sem); +extern int __must_check down_read_killable(struct rw_semaphore *sem); /* * trylock for reading -- returns 1 if successful, 0 if contention diff --git a/include/linux/sched.h b/include/linux/sched.h index fdf74f27acf1..a5dc7c98b0a2 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -166,8 +166,6 @@ struct task_group; /* Task command name length: */ #define TASK_COMM_LEN 16 -extern cpumask_var_t cpu_isolated_map; - extern void scheduler_tick(void); #define MAX_SCHEDULE_TIMEOUT LONG_MAX @@ -332,9 +330,11 @@ struct load_weight { struct sched_avg { u64 last_update_time; u64 load_sum; + u64 runnable_load_sum; u32 util_sum; u32 period_contrib; unsigned long load_avg; + unsigned long runnable_load_avg; unsigned long util_avg; }; @@ -377,6 +377,7 @@ struct sched_statistics { struct sched_entity { /* For load-balancing: */ struct load_weight load; + unsigned long runnable_weight; struct rb_node run_node; struct list_head group_node; unsigned int on_rq; @@ -472,10 +473,10 @@ struct sched_dl_entity { * conditions between the inactive timer handler and the wakeup * code. */ - int dl_throttled; - int dl_boosted; - int dl_yielded; - int dl_non_contending; + int dl_throttled : 1; + int dl_boosted : 1; + int dl_yielded : 1; + int dl_non_contending : 1; /* * Bandwidth enforcement timer. Each -deadline task has its @@ -1246,7 +1247,7 @@ static inline pid_t task_pgrp_nr(struct task_struct *tsk) #define TASK_REPORT_IDLE (TASK_REPORT + 1) #define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1) -static inline unsigned int __get_task_state(struct task_struct *tsk) +static inline unsigned int task_state_index(struct task_struct *tsk) { unsigned int tsk_state = READ_ONCE(tsk->state); unsigned int state = (tsk_state | tsk->exit_state) & TASK_REPORT; @@ -1259,7 +1260,7 @@ static inline unsigned int __get_task_state(struct task_struct *tsk) return fls(state); } -static inline char __task_state_to_char(unsigned int state) +static inline char task_index_to_char(unsigned int state) { static const char state_char[] = "RSDTtXZPI"; @@ -1270,7 +1271,7 @@ static inline char __task_state_to_char(unsigned int state) static inline char task_state_to_char(struct task_struct *tsk) { - return __task_state_to_char(__get_task_state(tsk)); + return task_index_to_char(task_state_index(tsk)); } /** diff --git a/include/linux/sched/isolation.h b/include/linux/sched/isolation.h new file mode 100644 index 000000000000..d849431c8060 --- /dev/null +++ b/include/linux/sched/isolation.h @@ -0,0 +1,51 @@ +#ifndef _LINUX_SCHED_ISOLATION_H +#define _LINUX_SCHED_ISOLATION_H + +#include <linux/cpumask.h> +#include <linux/init.h> +#include <linux/tick.h> + +enum hk_flags { + HK_FLAG_TIMER = 1, + HK_FLAG_RCU = (1 << 1), + HK_FLAG_MISC = (1 << 2), + HK_FLAG_SCHED = (1 << 3), + HK_FLAG_TICK = (1 << 4), + HK_FLAG_DOMAIN = (1 << 5), +}; + +#ifdef CONFIG_CPU_ISOLATION +DECLARE_STATIC_KEY_FALSE(housekeeping_overriden); +extern int housekeeping_any_cpu(enum hk_flags flags); +extern const struct cpumask *housekeeping_cpumask(enum hk_flags flags); +extern void housekeeping_affine(struct task_struct *t, enum hk_flags flags); +extern bool housekeeping_test_cpu(int cpu, enum hk_flags flags); +extern void __init housekeeping_init(void); + +#else + +static inline int housekeeping_any_cpu(enum hk_flags flags) +{ + return smp_processor_id(); +} + +static inline const struct cpumask *housekeeping_cpumask(enum hk_flags flags) +{ + return cpu_possible_mask; +} + +static inline void housekeeping_affine(struct task_struct *t, + enum hk_flags flags) { } +static inline void housekeeping_init(void) { } +#endif /* CONFIG_CPU_ISOLATION */ + +static inline bool housekeeping_cpu(int cpu, enum hk_flags flags) +{ +#ifdef CONFIG_CPU_ISOLATION + if (static_branch_unlikely(&housekeeping_overriden)) + return housekeeping_test_cpu(cpu, flags); +#endif + return true; +} + +#endif /* _LINUX_SCHED_ISOLATION_H */ diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h index db865ed25ef3..e5af028c08b4 100644 --- a/include/linux/sched/rt.h +++ b/include/linux/sched/rt.h @@ -18,6 +18,17 @@ static inline int rt_task(struct task_struct *p) return rt_prio(p->prio); } +static inline bool task_is_realtime(struct task_struct *tsk) +{ + int policy = tsk->policy; + + if (policy == SCHED_FIFO || policy == SCHED_RR) + return true; + if (policy == SCHED_DEADLINE) + return true; + return false; +} + #ifdef CONFIG_RT_MUTEXES /* * Must hold either p->pi_lock or task_rq(p)->lock. diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index d6a18a3839cc..1c1a1512ec55 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -38,9 +38,9 @@ extern unsigned int sysctl_numa_balancing_scan_period_max; extern unsigned int sysctl_numa_balancing_scan_size; #ifdef CONFIG_SCHED_DEBUG -extern unsigned int sysctl_sched_migration_cost; -extern unsigned int sysctl_sched_nr_migrate; -extern unsigned int sysctl_sched_time_avg; +extern __read_mostly unsigned int sysctl_sched_migration_cost; +extern __read_mostly unsigned int sysctl_sched_nr_migrate; +extern __read_mostly unsigned int sysctl_sched_time_avg; int sched_proc_update_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, diff --git a/include/linux/spi/spi-fsl-dspi.h b/include/linux/spi/spi-fsl-dspi.h new file mode 100644 index 000000000000..74c9bae20bf2 --- /dev/null +++ b/include/linux/spi/spi-fsl-dspi.h @@ -0,0 +1,31 @@ +/* + * Freescale DSPI controller driver + * + * Copyright (c) 2017 Angelo Dureghello <angelo@sysam.it> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef SPI_FSL_DSPI_HEADER_H +#define SPI_FSL_DSPI_HEADER_H + +/** + * struct fsl_dspi_platform_data - platform data for the Freescale DSPI driver + * @bus_num: board specific identifier for this DSPI driver. + * @cs_num: number of chip selects supported by this DSPI driver. + */ +struct fsl_dspi_platform_data { + u32 cs_num; + u32 bus_num; + u32 sck_cs_delay; + u32 cs_sck_delay; +}; + +#endif /* SPI_FSL_DSPI_HEADER_H */ diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 341e1a12bfc7..a39186194cd6 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -166,6 +166,10 @@ static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock) arch_spin_lock(&lock->raw_lock); } +#ifndef arch_spin_lock_flags +#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) +#endif + static inline void do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock) { @@ -279,12 +283,6 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) 1 : ({ local_irq_restore(flags); 0; }); \ }) -/** - * raw_spin_can_lock - would raw_spin_trylock() succeed? - * @lock: the spinlock in question. - */ -#define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock)) - /* Include rwlock functions */ #include <linux/rwlock.h> @@ -397,11 +395,6 @@ static __always_inline int spin_is_contended(spinlock_t *lock) return raw_spin_is_contended(&lock->rlock); } -static __always_inline int spin_can_lock(spinlock_t *lock) -{ - return raw_spin_can_lock(&lock->rlock); -} - #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock) /* diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h index 612fb530af41..0ac9112c1bbe 100644 --- a/include/linux/spinlock_up.h +++ b/include/linux/spinlock_up.h @@ -32,14 +32,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) barrier(); } -static inline void -arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) -{ - local_irq_save(flags); - lock->slock = 0; - barrier(); -} - static inline int arch_spin_trylock(arch_spinlock_t *lock) { char oldval = lock->slock; @@ -77,7 +69,4 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) #define arch_spin_is_contended(lock) (((void)(lock), 0)) -#define arch_read_can_lock(lock) (((void)(lock), 1)) -#define arch_write_can_lock(lock) (((void)(lock), 1)) - #endif /* __LINUX_SPINLOCK_UP_H */ diff --git a/include/linux/tick.h b/include/linux/tick.h index cf413b344ddb..f442d1a42025 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -138,7 +138,6 @@ static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; } #ifdef CONFIG_NO_HZ_FULL extern bool tick_nohz_full_running; extern cpumask_var_t tick_nohz_full_mask; -extern cpumask_var_t housekeeping_mask; static inline bool tick_nohz_full_enabled(void) { @@ -162,11 +161,6 @@ static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) cpumask_or(mask, mask, tick_nohz_full_mask); } -static inline int housekeeping_any_cpu(void) -{ - return cpumask_any_and(housekeeping_mask, cpu_online_mask); -} - extern void tick_nohz_dep_set(enum tick_dep_bits bit); extern void tick_nohz_dep_clear(enum tick_dep_bits bit); extern void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit); @@ -235,11 +229,8 @@ static inline void tick_dep_clear_signal(struct signal_struct *signal, extern void tick_nohz_full_kick_cpu(int cpu); extern void __tick_nohz_task_switch(void); +extern void __init tick_nohz_full_setup(cpumask_var_t cpumask); #else -static inline int housekeeping_any_cpu(void) -{ - return smp_processor_id(); -} static inline bool tick_nohz_full_enabled(void) { return false; } static inline bool tick_nohz_full_cpu(int cpu) { return false; } static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { } @@ -259,35 +250,9 @@ static inline void tick_dep_clear_signal(struct signal_struct *signal, static inline void tick_nohz_full_kick_cpu(int cpu) { } static inline void __tick_nohz_task_switch(void) { } +static inline void tick_nohz_full_setup(cpumask_var_t cpumask) { } #endif -static inline const struct cpumask *housekeeping_cpumask(void) -{ -#ifdef CONFIG_NO_HZ_FULL - if (tick_nohz_full_enabled()) - return housekeeping_mask; -#endif - return cpu_possible_mask; -} - -static inline bool is_housekeeping_cpu(int cpu) -{ -#ifdef CONFIG_NO_HZ_FULL - if (tick_nohz_full_enabled()) - return cpumask_test_cpu(cpu, housekeeping_mask); -#endif - return true; -} - -static inline void housekeeping_affine(struct task_struct *t) -{ -#ifdef CONFIG_NO_HZ_FULL - if (tick_nohz_full_enabled()) - set_cpus_allowed_ptr(t, housekeeping_mask); - -#endif -} - static inline void tick_nohz_task_switch(void) { if (tick_nohz_full_enabled()) diff --git a/include/linux/time.h b/include/linux/time.h index 87c36cf1cec2..4b62a2c0a661 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -18,149 +18,10 @@ int get_itimerspec64(struct itimerspec64 *it, int put_itimerspec64(const struct itimerspec64 *it, struct itimerspec __user *uit); -#define TIME_T_MAX (time_t)((1UL << ((sizeof(time_t) << 3) - 1)) - 1) - -static inline int timespec_equal(const struct timespec *a, - const struct timespec *b) -{ - return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec); -} - -/* - * lhs < rhs: return <0 - * lhs == rhs: return 0 - * lhs > rhs: return >0 - */ -static inline int timespec_compare(const struct timespec *lhs, const struct timespec *rhs) -{ - if (lhs->tv_sec < rhs->tv_sec) - return -1; - if (lhs->tv_sec > rhs->tv_sec) - return 1; - return lhs->tv_nsec - rhs->tv_nsec; -} - -static inline int timeval_compare(const struct timeval *lhs, const struct timeval *rhs) -{ - if (lhs->tv_sec < rhs->tv_sec) - return -1; - if (lhs->tv_sec > rhs->tv_sec) - return 1; - return lhs->tv_usec - rhs->tv_usec; -} - extern time64_t mktime64(const unsigned int year, const unsigned int mon, const unsigned int day, const unsigned int hour, const unsigned int min, const unsigned int sec); -/** - * Deprecated. Use mktime64(). - */ -static inline unsigned long mktime(const unsigned int year, - const unsigned int mon, const unsigned int day, - const unsigned int hour, const unsigned int min, - const unsigned int sec) -{ - return mktime64(year, mon, day, hour, min, sec); -} - -extern void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec); - -/* - * timespec_add_safe assumes both values are positive and checks - * for overflow. It will return TIME_T_MAX if the reutrn would be - * smaller then either of the arguments. - */ -extern struct timespec timespec_add_safe(const struct timespec lhs, - const struct timespec rhs); - - -static inline struct timespec timespec_add(struct timespec lhs, - struct timespec rhs) -{ - struct timespec ts_delta; - set_normalized_timespec(&ts_delta, lhs.tv_sec + rhs.tv_sec, - lhs.tv_nsec + rhs.tv_nsec); - return ts_delta; -} - -/* - * sub = lhs - rhs, in normalized form - */ -static inline struct timespec timespec_sub(struct timespec lhs, - struct timespec rhs) -{ - struct timespec ts_delta; - set_normalized_timespec(&ts_delta, lhs.tv_sec - rhs.tv_sec, - lhs.tv_nsec - rhs.tv_nsec); - return ts_delta; -} - -/* - * Returns true if the timespec is norm, false if denorm: - */ -static inline bool timespec_valid(const struct timespec *ts) -{ - /* Dates before 1970 are bogus */ - if (ts->tv_sec < 0) - return false; - /* Can't have more nanoseconds then a second */ - if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) - return false; - return true; -} - -static inline bool timespec_valid_strict(const struct timespec *ts) -{ - if (!timespec_valid(ts)) - return false; - /* Disallow values that could overflow ktime_t */ - if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX) - return false; - return true; -} - -static inline bool timeval_valid(const struct timeval *tv) -{ - /* Dates before 1970 are bogus */ - if (tv->tv_sec < 0) - return false; - - /* Can't have more microseconds then a second */ - if (tv->tv_usec < 0 || tv->tv_usec >= USEC_PER_SEC) - return false; - - return true; -} - -extern struct timespec timespec_trunc(struct timespec t, unsigned gran); - -/* - * Validates if a timespec/timeval used to inject a time offset is valid. - * Offsets can be postive or negative. The value of the timeval/timespec - * is the sum of its fields, but *NOTE*: the field tv_usec/tv_nsec must - * always be non-negative. - */ -static inline bool timeval_inject_offset_valid(const struct timeval *tv) -{ - /* We don't check the tv_sec as it can be positive or negative */ - - /* Can't have more microseconds then a second */ - if (tv->tv_usec < 0 || tv->tv_usec >= USEC_PER_SEC) - return false; - return true; -} - -static inline bool timespec_inject_offset_valid(const struct timespec *ts) -{ - /* We don't check the tv_sec as it can be positive or negative */ - - /* Can't have more nanoseconds then a second */ - if (ts->tv_nsec < 0 || ts->tv_nsec >= NSEC_PER_SEC) - return false; - return true; -} - /* Some architectures do not supply their own clocksource. * This is mainly the case in architectures that get their * inter-tick times by reading the counter on their interval @@ -209,73 +70,7 @@ struct tm { void time64_to_tm(time64_t totalsecs, int offset, struct tm *result); -/** - * time_to_tm - converts the calendar time to local broken-down time - * - * @totalsecs the number of seconds elapsed since 00:00:00 on January 1, 1970, - * Coordinated Universal Time (UTC). - * @offset offset seconds adding to totalsecs. - * @result pointer to struct tm variable to receive broken-down time - */ -static inline void time_to_tm(time_t totalsecs, int offset, struct tm *result) -{ - time64_to_tm(totalsecs, offset, result); -} - -/** - * timespec_to_ns - Convert timespec to nanoseconds - * @ts: pointer to the timespec variable to be converted - * - * Returns the scalar nanosecond representation of the timespec - * parameter. - */ -static inline s64 timespec_to_ns(const struct timespec *ts) -{ - return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec; -} - -/** - * timeval_to_ns - Convert timeval to nanoseconds - * @ts: pointer to the timeval variable to be converted - * - * Returns the scalar nanosecond representation of the timeval - * parameter. - */ -static inline s64 timeval_to_ns(const struct timeval *tv) -{ - return ((s64) tv->tv_sec * NSEC_PER_SEC) + - tv->tv_usec * NSEC_PER_USEC; -} - -/** - * ns_to_timespec - Convert nanoseconds to timespec - * @nsec: the nanoseconds value to be converted - * - * Returns the timespec representation of the nsec parameter. - */ -extern struct timespec ns_to_timespec(const s64 nsec); - -/** - * ns_to_timeval - Convert nanoseconds to timeval - * @nsec: the nanoseconds value to be converted - * - * Returns the timeval representation of the nsec parameter. - */ -extern struct timeval ns_to_timeval(const s64 nsec); - -/** - * timespec_add_ns - Adds nanoseconds to a timespec - * @a: pointer to timespec to be incremented - * @ns: unsigned nanoseconds value to be added - * - * This must always be inlined because its used from the x86-64 vdso, - * which cannot call other kernel functions. - */ -static __always_inline void timespec_add_ns(struct timespec *a, u64 ns) -{ - a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, NSEC_PER_SEC, &ns); - a->tv_nsec = ns; -} +# include <linux/time32.h> static inline bool itimerspec64_valid(const struct itimerspec64 *its) { diff --git a/include/linux/time32.h b/include/linux/time32.h new file mode 100644 index 000000000000..65b1de25198d --- /dev/null +++ b/include/linux/time32.h @@ -0,0 +1,221 @@ +#ifndef _LINUX_TIME32_H +#define _LINUX_TIME32_H +/* + * These are all interfaces based on the old time_t definition + * that overflows in 2038 on 32-bit architectures. New code + * should use the replacements based on time64_t and timespec64. + * + * Any interfaces in here that become unused as we migrate + * code to time64_t should get removed. + */ + +#include <linux/time64.h> + +#define TIME_T_MAX (time_t)((1UL << ((sizeof(time_t) << 3) - 1)) - 1) + +#if __BITS_PER_LONG == 64 + +/* timespec64 is defined as timespec here */ +static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64) +{ + return ts64; +} + +static inline struct timespec64 timespec_to_timespec64(const struct timespec ts) +{ + return ts; +} + +# define timespec_equal timespec64_equal +# define timespec_compare timespec64_compare +# define set_normalized_timespec set_normalized_timespec64 +# define timespec_add timespec64_add +# define timespec_sub timespec64_sub +# define timespec_valid timespec64_valid +# define timespec_valid_strict timespec64_valid_strict +# define timespec_to_ns timespec64_to_ns +# define ns_to_timespec ns_to_timespec64 +# define timespec_add_ns timespec64_add_ns + +#else +static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64) +{ + struct timespec ret; + + ret.tv_sec = (time_t)ts64.tv_sec; + ret.tv_nsec = ts64.tv_nsec; + return ret; +} + +static inline struct timespec64 timespec_to_timespec64(const struct timespec ts) +{ + struct timespec64 ret; + + ret.tv_sec = ts.tv_sec; + ret.tv_nsec = ts.tv_nsec; + return ret; +} + +static inline int timespec_equal(const struct timespec *a, + const struct timespec *b) +{ + return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec); +} + +/* + * lhs < rhs: return <0 + * lhs == rhs: return 0 + * lhs > rhs: return >0 + */ +static inline int timespec_compare(const struct timespec *lhs, const struct timespec *rhs) +{ + if (lhs->tv_sec < rhs->tv_sec) + return -1; + if (lhs->tv_sec > rhs->tv_sec) + return 1; + return lhs->tv_nsec - rhs->tv_nsec; +} + +extern void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec); + +static inline struct timespec timespec_add(struct timespec lhs, + struct timespec rhs) +{ + struct timespec ts_delta; + + set_normalized_timespec(&ts_delta, lhs.tv_sec + rhs.tv_sec, + lhs.tv_nsec + rhs.tv_nsec); + return ts_delta; +} + +/* + * sub = lhs - rhs, in normalized form + */ +static inline struct timespec timespec_sub(struct timespec lhs, + struct timespec rhs) +{ + struct timespec ts_delta; + + set_normalized_timespec(&ts_delta, lhs.tv_sec - rhs.tv_sec, + lhs.tv_nsec - rhs.tv_nsec); + return ts_delta; +} + +/* + * Returns true if the timespec is norm, false if denorm: + */ +static inline bool timespec_valid(const struct timespec *ts) +{ + /* Dates before 1970 are bogus */ + if (ts->tv_sec < 0) + return false; + /* Can't have more nanoseconds then a second */ + if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) + return false; + return true; +} + +static inline bool timespec_valid_strict(const struct timespec *ts) +{ + if (!timespec_valid(ts)) + return false; + /* Disallow values that could overflow ktime_t */ + if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX) + return false; + return true; +} + +/** + * timespec_to_ns - Convert timespec to nanoseconds + * @ts: pointer to the timespec variable to be converted + * + * Returns the scalar nanosecond representation of the timespec + * parameter. + */ +static inline s64 timespec_to_ns(const struct timespec *ts) +{ + return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec; +} + +/** + * ns_to_timespec - Convert nanoseconds to timespec + * @nsec: the nanoseconds value to be converted + * + * Returns the timespec representation of the nsec parameter. + */ +extern struct timespec ns_to_timespec(const s64 nsec); + +/** + * timespec_add_ns - Adds nanoseconds to a timespec + * @a: pointer to timespec to be incremented + * @ns: unsigned nanoseconds value to be added + * + * This must always be inlined because its used from the x86-64 vdso, + * which cannot call other kernel functions. + */ +static __always_inline void timespec_add_ns(struct timespec *a, u64 ns) +{ + a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, NSEC_PER_SEC, &ns); + a->tv_nsec = ns; +} + +#endif + +/** + * time_to_tm - converts the calendar time to local broken-down time + * + * @totalsecs the number of seconds elapsed since 00:00:00 on January 1, 1970, + * Coordinated Universal Time (UTC). + * @offset offset seconds adding to totalsecs. + * @result pointer to struct tm variable to receive broken-down time + */ +static inline void time_to_tm(time_t totalsecs, int offset, struct tm *result) +{ + time64_to_tm(totalsecs, offset, result); +} + +static inline unsigned long mktime(const unsigned int year, + const unsigned int mon, const unsigned int day, + const unsigned int hour, const unsigned int min, + const unsigned int sec) +{ + return mktime64(year, mon, day, hour, min, sec); +} + +static inline bool timeval_valid(const struct timeval *tv) +{ + /* Dates before 1970 are bogus */ + if (tv->tv_sec < 0) + return false; + + /* Can't have more microseconds then a second */ + if (tv->tv_usec < 0 || tv->tv_usec >= USEC_PER_SEC) + return false; + + return true; +} + +extern struct timespec timespec_trunc(struct timespec t, unsigned int gran); + +/** + * timeval_to_ns - Convert timeval to nanoseconds + * @ts: pointer to the timeval variable to be converted + * + * Returns the scalar nanosecond representation of the timeval + * parameter. + */ +static inline s64 timeval_to_ns(const struct timeval *tv) +{ + return ((s64) tv->tv_sec * NSEC_PER_SEC) + + tv->tv_usec * NSEC_PER_USEC; +} + +/** + * ns_to_timeval - Convert nanoseconds to timeval + * @nsec: the nanoseconds value to be converted + * + * Returns the timeval representation of the nsec parameter. + */ +extern struct timeval ns_to_timeval(const s64 nsec); + +#endif diff --git a/include/linux/time64.h b/include/linux/time64.h index ad33260618f7..93d39499838e 100644 --- a/include/linux/time64.h +++ b/include/linux/time64.h @@ -8,11 +8,8 @@ typedef __s64 time64_t; typedef __u64 timeu64_t; -/* - * This wants to go into uapi/linux/time.h once we agreed about the - * userspace interfaces. - */ #if __BITS_PER_LONG == 64 +/* this trick allows us to optimize out timespec64_to_timespec */ # define timespec64 timespec #define itimerspec64 itimerspec #else @@ -42,77 +39,6 @@ struct itimerspec64 { #define KTIME_MAX ((s64)~((u64)1 << 63)) #define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC) -#if __BITS_PER_LONG == 64 - -static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64) -{ - return ts64; -} - -static inline struct timespec64 timespec_to_timespec64(const struct timespec ts) -{ - return ts; -} - -static inline struct itimerspec itimerspec64_to_itimerspec(struct itimerspec64 *its64) -{ - return *its64; -} - -static inline struct itimerspec64 itimerspec_to_itimerspec64(struct itimerspec *its) -{ - return *its; -} - -# define timespec64_equal timespec_equal -# define timespec64_compare timespec_compare -# define set_normalized_timespec64 set_normalized_timespec -# define timespec64_add timespec_add -# define timespec64_sub timespec_sub -# define timespec64_valid timespec_valid -# define timespec64_valid_strict timespec_valid_strict -# define timespec64_to_ns timespec_to_ns -# define ns_to_timespec64 ns_to_timespec -# define timespec64_add_ns timespec_add_ns - -#else - -static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64) -{ - struct timespec ret; - - ret.tv_sec = (time_t)ts64.tv_sec; - ret.tv_nsec = ts64.tv_nsec; - return ret; -} - -static inline struct timespec64 timespec_to_timespec64(const struct timespec ts) -{ - struct timespec64 ret; - - ret.tv_sec = ts.tv_sec; - ret.tv_nsec = ts.tv_nsec; - return ret; -} - -static inline struct itimerspec itimerspec64_to_itimerspec(struct itimerspec64 *its64) -{ - struct itimerspec ret; - - ret.it_interval = timespec64_to_timespec(its64->it_interval); - ret.it_value = timespec64_to_timespec(its64->it_value); - return ret; -} - -static inline struct itimerspec64 itimerspec_to_itimerspec64(struct itimerspec *its) -{ - struct itimerspec64 ret; - - ret.it_interval = timespec_to_timespec64(its->it_interval); - ret.it_value = timespec_to_timespec64(its->it_value); - return ret; -} - static inline int timespec64_equal(const struct timespec64 *a, const struct timespec64 *b) { @@ -214,8 +140,6 @@ static __always_inline void timespec64_add_ns(struct timespec64 *a, u64 ns) a->tv_nsec = ns; } -#endif - /* * timespec64_add_safe assumes both values are positive and checks for * overflow. It will return TIME64_MAX in case of overflow. diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h index 97154c61e5d2..7e9011101cb0 100644 --- a/include/linux/timekeeper_internal.h +++ b/include/linux/timekeeper_internal.h @@ -14,19 +14,22 @@ /** * struct tk_read_base - base structure for timekeeping readout * @clock: Current clocksource used for timekeeping. - * @read: Read function of @clock * @mask: Bitmask for two's complement subtraction of non 64bit clocks * @cycle_last: @clock cycle value at last update * @mult: (NTP adjusted) multiplier for scaled math conversion * @shift: Shift value for scaled math conversion * @xtime_nsec: Shifted (fractional) nano seconds offset for readout * @base: ktime_t (nanoseconds) base time for readout + * @base_real: Nanoseconds base value for clock REALTIME readout * * This struct has size 56 byte on 64 bit. Together with a seqcount it * occupies a single 64byte cache line. * * The struct is separate from struct timekeeper as it is also used * for a fast NMI safe accessors. + * + * @base_real is for the fast NMI safe accessor to allow reading clock + * realtime from any context. */ struct tk_read_base { struct clocksource *clock; @@ -36,6 +39,7 @@ struct tk_read_base { u32 shift; u64 xtime_nsec; ktime_t base; + u64 base_real; }; /** diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index 51293e1aa4da..b17bcce58bc4 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h @@ -16,27 +16,16 @@ extern void xtime_update(unsigned long ticks); /* * Get and set timeofday */ -extern void do_gettimeofday(struct timeval *tv); extern int do_settimeofday64(const struct timespec64 *ts); extern int do_sys_settimeofday64(const struct timespec64 *tv, const struct timezone *tz); /* * Kernel time accessors */ -unsigned long get_seconds(void); struct timespec64 current_kernel_time64(void); -/* does not take xtime_lock */ -struct timespec __current_kernel_time(void); - -static inline struct timespec current_kernel_time(void) -{ - struct timespec64 now = current_kernel_time64(); - - return timespec64_to_timespec(now); -} /* - * timespec based interfaces + * timespec64 based interfaces */ struct timespec64 get_monotonic_coarse64(void); extern void getrawmonotonic64(struct timespec64 *ts); @@ -48,116 +37,6 @@ extern int __getnstimeofday64(struct timespec64 *tv); extern void getnstimeofday64(struct timespec64 *tv); extern void getboottime64(struct timespec64 *ts); -#if BITS_PER_LONG == 64 -/** - * Deprecated. Use do_settimeofday64(). - */ -static inline int do_settimeofday(const struct timespec *ts) -{ - return do_settimeofday64(ts); -} - -static inline int __getnstimeofday(struct timespec *ts) -{ - return __getnstimeofday64(ts); -} - -static inline void getnstimeofday(struct timespec *ts) -{ - getnstimeofday64(ts); -} - -static inline void ktime_get_ts(struct timespec *ts) -{ - ktime_get_ts64(ts); -} - -static inline void ktime_get_real_ts(struct timespec *ts) -{ - getnstimeofday64(ts); -} - -static inline void getrawmonotonic(struct timespec *ts) -{ - getrawmonotonic64(ts); -} - -static inline struct timespec get_monotonic_coarse(void) -{ - return get_monotonic_coarse64(); -} - -static inline void getboottime(struct timespec *ts) -{ - return getboottime64(ts); -} -#else -/** - * Deprecated. Use do_settimeofday64(). - */ -static inline int do_settimeofday(const struct timespec *ts) -{ - struct timespec64 ts64; - - ts64 = timespec_to_timespec64(*ts); - return do_settimeofday64(&ts64); -} - -static inline int __getnstimeofday(struct timespec *ts) -{ - struct timespec64 ts64; - int ret = __getnstimeofday64(&ts64); - - *ts = timespec64_to_timespec(ts64); - return ret; -} - -static inline void getnstimeofday(struct timespec *ts) -{ - struct timespec64 ts64; - - getnstimeofday64(&ts64); - *ts = timespec64_to_timespec(ts64); -} - -static inline void ktime_get_ts(struct timespec *ts) -{ - struct timespec64 ts64; - - ktime_get_ts64(&ts64); - *ts = timespec64_to_timespec(ts64); -} - -static inline void ktime_get_real_ts(struct timespec *ts) -{ - struct timespec64 ts64; - - getnstimeofday64(&ts64); - *ts = timespec64_to_timespec(ts64); -} - -static inline void getrawmonotonic(struct timespec *ts) -{ - struct timespec64 ts64; - - getrawmonotonic64(&ts64); - *ts = timespec64_to_timespec(ts64); -} - -static inline struct timespec get_monotonic_coarse(void) -{ - return timespec64_to_timespec(get_monotonic_coarse64()); -} - -static inline void getboottime(struct timespec *ts) -{ - struct timespec64 ts64; - - getboottime64(&ts64); - *ts = timespec64_to_timespec(ts64); -} -#endif - #define ktime_get_real_ts64(ts) getnstimeofday64(ts) /* @@ -240,25 +119,16 @@ static inline u64 ktime_get_raw_ns(void) extern u64 ktime_get_mono_fast_ns(void); extern u64 ktime_get_raw_fast_ns(void); extern u64 ktime_get_boot_fast_ns(void); +extern u64 ktime_get_real_fast_ns(void); /* - * Timespec interfaces utilizing the ktime based ones + * timespec64 interfaces utilizing the ktime based ones */ -static inline void get_monotonic_boottime(struct timespec *ts) -{ - *ts = ktime_to_timespec(ktime_get_boottime()); -} - static inline void get_monotonic_boottime64(struct timespec64 *ts) { *ts = ktime_to_timespec64(ktime_get_boottime()); } -static inline void timekeeping_clocktai(struct timespec *ts) -{ - *ts = ktime_to_timespec(ktime_get_clocktai()); -} - static inline void timekeeping_clocktai64(struct timespec64 *ts) { *ts = ktime_to_timespec64(ktime_get_clocktai()); @@ -335,10 +205,8 @@ extern void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot); */ extern int persistent_clock_is_local; -extern void read_persistent_clock(struct timespec *ts); extern void read_persistent_clock64(struct timespec64 *ts); extern void read_boot_clock64(struct timespec64 *ts); -extern int update_persistent_clock(struct timespec now); extern int update_persistent_clock64(struct timespec64 now); diff --git a/include/linux/timekeeping32.h b/include/linux/timekeeping32.h new file mode 100644 index 000000000000..af4114d5dc17 --- /dev/null +++ b/include/linux/timekeeping32.h @@ -0,0 +1,151 @@ +#ifndef _LINUX_TIMEKEEPING32_H +#define _LINUX_TIMEKEEPING32_H +/* + * These interfaces are all based on the old timespec type + * and should get replaced with the timespec64 based versions + * over time so we can remove the file here. + */ + +extern void do_gettimeofday(struct timeval *tv); +unsigned long get_seconds(void); + +/* does not take xtime_lock */ +struct timespec __current_kernel_time(void); + +static inline struct timespec current_kernel_time(void) +{ + struct timespec64 now = current_kernel_time64(); + + return timespec64_to_timespec(now); +} + +#if BITS_PER_LONG == 64 +/** + * Deprecated. Use do_settimeofday64(). + */ +static inline int do_settimeofday(const struct timespec *ts) +{ + return do_settimeofday64(ts); +} + +static inline int __getnstimeofday(struct timespec *ts) +{ + return __getnstimeofday64(ts); +} + +static inline void getnstimeofday(struct timespec *ts) +{ + getnstimeofday64(ts); +} + +static inline void ktime_get_ts(struct timespec *ts) +{ + ktime_get_ts64(ts); +} + +static inline void ktime_get_real_ts(struct timespec *ts) +{ + getnstimeofday64(ts); +} + +static inline void getrawmonotonic(struct timespec *ts) +{ + getrawmonotonic64(ts); +} + +static inline struct timespec get_monotonic_coarse(void) +{ + return get_monotonic_coarse64(); +} + +static inline void getboottime(struct timespec *ts) +{ + return getboottime64(ts); +} +#else +/** + * Deprecated. Use do_settimeofday64(). + */ +static inline int do_settimeofday(const struct timespec *ts) +{ + struct timespec64 ts64; + + ts64 = timespec_to_timespec64(*ts); + return do_settimeofday64(&ts64); +} + +static inline int __getnstimeofday(struct timespec *ts) +{ + struct timespec64 ts64; + int ret = __getnstimeofday64(&ts64); + + *ts = timespec64_to_timespec(ts64); + return ret; +} + +static inline void getnstimeofday(struct timespec *ts) +{ + struct timespec64 ts64; + + getnstimeofday64(&ts64); + *ts = timespec64_to_timespec(ts64); +} + +static inline void ktime_get_ts(struct timespec *ts) +{ + struct timespec64 ts64; + + ktime_get_ts64(&ts64); + *ts = timespec64_to_timespec(ts64); +} + +static inline void ktime_get_real_ts(struct timespec *ts) +{ + struct timespec64 ts64; + + getnstimeofday64(&ts64); + *ts = timespec64_to_timespec(ts64); +} + +static inline void getrawmonotonic(struct timespec *ts) +{ + struct timespec64 ts64; + + getrawmonotonic64(&ts64); + *ts = timespec64_to_timespec(ts64); +} + +static inline struct timespec get_monotonic_coarse(void) +{ + return timespec64_to_timespec(get_monotonic_coarse64()); +} + +static inline void getboottime(struct timespec *ts) +{ + struct timespec64 ts64; + + getboottime64(&ts64); + *ts = timespec64_to_timespec(ts64); +} +#endif + +/* + * Timespec interfaces utilizing the ktime based ones + */ +static inline void get_monotonic_boottime(struct timespec *ts) +{ + *ts = ktime_to_timespec(ktime_get_boottime()); +} + +static inline void timekeeping_clocktai(struct timespec *ts) +{ + *ts = ktime_to_timespec(ktime_get_clocktai()); +} + +/* + * Persistent clock related interfaces + */ +extern void read_persistent_clock(struct timespec *ts); +extern int update_persistent_clock(struct timespec now); + +#endif diff --git a/include/linux/timer.h b/include/linux/timer.h index ac66f29c6916..bf781acfc6d8 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -64,31 +64,21 @@ struct timer_list { #define TIMER_TRACE_FLAGMASK (TIMER_MIGRATING | TIMER_DEFERRABLE | TIMER_PINNED | TIMER_IRQSAFE) -#define __TIMER_INITIALIZER(_function, _expires, _data, _flags) { \ +#define TIMER_DATA_TYPE unsigned long +#define TIMER_FUNC_TYPE void (*)(TIMER_DATA_TYPE) + +#define __TIMER_INITIALIZER(_function, _data, _flags) { \ .entry = { .next = TIMER_ENTRY_STATIC }, \ .function = (_function), \ - .expires = (_expires), \ .data = (_data), \ .flags = (_flags), \ __TIMER_LOCKDEP_MAP_INITIALIZER( \ __FILE__ ":" __stringify(__LINE__)) \ } -#define TIMER_INITIALIZER(_function, _expires, _data) \ - __TIMER_INITIALIZER((_function), (_expires), (_data), 0) - -#define TIMER_PINNED_INITIALIZER(_function, _expires, _data) \ - __TIMER_INITIALIZER((_function), (_expires), (_data), TIMER_PINNED) - -#define TIMER_DEFERRED_INITIALIZER(_function, _expires, _data) \ - __TIMER_INITIALIZER((_function), (_expires), (_data), TIMER_DEFERRABLE) - -#define TIMER_PINNED_DEFERRED_INITIALIZER(_function, _expires, _data) \ - __TIMER_INITIALIZER((_function), (_expires), (_data), TIMER_DEFERRABLE | TIMER_PINNED) - -#define DEFINE_TIMER(_name, _function, _expires, _data) \ +#define DEFINE_TIMER(_name, _function) \ struct timer_list _name = \ - TIMER_INITIALIZER(_function, _expires, _data) + __TIMER_INITIALIZER((TIMER_FUNC_TYPE)_function, 0, 0) void init_timer_key(struct timer_list *timer, unsigned int flags, const char *name, struct lock_class_key *key); @@ -129,14 +119,6 @@ static inline void init_timer_on_stack_key(struct timer_list *timer, #define init_timer(timer) \ __init_timer((timer), 0) -#define init_timer_pinned(timer) \ - __init_timer((timer), TIMER_PINNED) -#define init_timer_deferrable(timer) \ - __init_timer((timer), TIMER_DEFERRABLE) -#define init_timer_pinned_deferrable(timer) \ - __init_timer((timer), TIMER_DEFERRABLE | TIMER_PINNED) -#define init_timer_on_stack(timer) \ - __init_timer_on_stack((timer), 0) #define __setup_timer(_timer, _fn, _data, _flags) \ do { \ @@ -169,9 +151,7 @@ static inline void init_timer_on_stack_key(struct timer_list *timer, #define setup_pinned_deferrable_timer_on_stack(timer, fn, data) \ __setup_timer_on_stack((timer), (fn), (data), TIMER_DEFERRABLE | TIMER_PINNED) -#define TIMER_DATA_TYPE unsigned long -#define TIMER_FUNC_TYPE void (*)(TIMER_DATA_TYPE) - +#ifndef CONFIG_LOCKDEP static inline void timer_setup(struct timer_list *timer, void (*callback)(struct timer_list *), unsigned int flags) @@ -180,6 +160,28 @@ static inline void timer_setup(struct timer_list *timer, (TIMER_DATA_TYPE)timer, flags); } +static inline void timer_setup_on_stack(struct timer_list *timer, + void (*callback)(struct timer_list *), + unsigned int flags) +{ + __setup_timer_on_stack(timer, (TIMER_FUNC_TYPE)callback, + (TIMER_DATA_TYPE)timer, flags); +} +#else +/* + * Under LOCKDEP, the timer lock_class_key (set up in __init_timer) needs + * to be tied to the caller's context, so an inline (above) won't work. We + * do want to keep the inline for argument type checking, though. + */ +# define timer_setup(timer, callback, flags) \ + __setup_timer((timer), (TIMER_FUNC_TYPE)(callback), \ + (TIMER_DATA_TYPE)(timer), (flags)) +# define timer_setup_on_stack(timer, callback, flags) \ + __setup_timer_on_stack((timer), \ + (TIMER_FUNC_TYPE)(callback), \ + (TIMER_DATA_TYPE)(timer), (flags)) +#endif + #define from_timer(var, callback_timer, timer_fieldname) \ container_of(callback_timer, typeof(*var), timer_fieldname) @@ -202,6 +204,7 @@ extern void add_timer_on(struct timer_list *timer, int cpu); extern int del_timer(struct timer_list * timer); extern int mod_timer(struct timer_list *timer, unsigned long expires); extern int mod_timer_pending(struct timer_list *timer, unsigned long expires); +extern int timer_reduce(struct timer_list *timer, unsigned long expires); /* * The jiffies value which is added to now, when there is no timer diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 0eae11fc7a23..01a050fc6650 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -18,7 +18,7 @@ struct workqueue_struct; struct work_struct; typedef void (*work_func_t)(struct work_struct *work); -void delayed_work_timer_fn(unsigned long __data); +void delayed_work_timer_fn(struct timer_list *t); /* * The first word is the work queue pointer and the flags rolled into @@ -176,8 +176,8 @@ struct execute_work { #define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \ .work = __WORK_INITIALIZER((n).work, (f)), \ - .timer = __TIMER_INITIALIZER(delayed_work_timer_fn, \ - 0, (unsigned long)&(n), \ + .timer = __TIMER_INITIALIZER((TIMER_FUNC_TYPE)delayed_work_timer_fn,\ + (TIMER_DATA_TYPE)&(n.timer), \ (tflags) | TIMER_IRQSAFE), \ } @@ -219,7 +219,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; } \ __init_work((_work), _onstack); \ (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ - lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0); \ + lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, &__key, 0); \ INIT_LIST_HEAD(&(_work)->entry); \ (_work)->func = (_func); \ } while (0) @@ -242,8 +242,9 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; } #define __INIT_DELAYED_WORK(_work, _func, _tflags) \ do { \ INIT_WORK(&(_work)->work, (_func)); \ - __setup_timer(&(_work)->timer, delayed_work_timer_fn, \ - (unsigned long)(_work), \ + __setup_timer(&(_work)->timer, \ + (TIMER_FUNC_TYPE)delayed_work_timer_fn, \ + (TIMER_DATA_TYPE)&(_work)->timer, \ (_tflags) | TIMER_IRQSAFE); \ } while (0) @@ -251,8 +252,8 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; } do { \ INIT_WORK_ONSTACK(&(_work)->work, (_func)); \ __setup_timer_on_stack(&(_work)->timer, \ - delayed_work_timer_fn, \ - (unsigned long)(_work), \ + (TIMER_FUNC_TYPE)delayed_work_timer_fn,\ + (TIMER_DATA_TYPE)&(_work)->timer,\ (_tflags) | TIMER_IRQSAFE); \ } while (0) @@ -399,7 +400,7 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active, static struct lock_class_key __key; \ const char *__lock_name; \ \ - __lock_name = #fmt#args; \ + __lock_name = "(wq_completion)"#fmt#args; \ \ __alloc_workqueue_key((fmt), (flags), (max_active), \ &__key, __lock_name, ##args); \ diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 5d08c1950e7d..ff68cf288f9b 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -984,12 +984,12 @@ static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs) static inline int sysctl_sync_period(struct netns_ipvs *ipvs) { - return ACCESS_ONCE(ipvs->sysctl_sync_threshold[1]); + return READ_ONCE(ipvs->sysctl_sync_threshold[1]); } static inline unsigned int sysctl_sync_refresh_period(struct netns_ipvs *ipvs) { - return ACCESS_ONCE(ipvs->sysctl_sync_refresh_period); + return READ_ONCE(ipvs->sysctl_sync_refresh_period); } static inline int sysctl_sync_retries(struct netns_ipvs *ipvs) @@ -1014,7 +1014,7 @@ static inline int sysctl_sloppy_sctp(struct netns_ipvs *ipvs) static inline int sysctl_sync_ports(struct netns_ipvs *ipvs) { - return ACCESS_ONCE(ipvs->sysctl_sync_ports); + return READ_ONCE(ipvs->sysctl_sync_ports); } static inline int sysctl_sync_persist_mode(struct netns_ipvs *ipvs) diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 079c69cae2f6..470c1c71e7f4 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -1165,8 +1165,8 @@ static inline u8 nft_genmask_next(const struct net *net) static inline u8 nft_genmask_cur(const struct net *net) { - /* Use ACCESS_ONCE() to prevent refetching the value for atomicity */ - return 1 << ACCESS_ONCE(net->nft.gencursor); + /* Use READ_ONCE() to prevent refetching the value for atomicity */ + return 1 << READ_ONCE(net->nft.gencursor); } #define NFT_GENMASK_ANY ((1 << 0) | (1 << 1)) diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h index 722d3264d3bf..cb8a273732cf 100644 --- a/include/scsi/libfcoe.h +++ b/include/scsi/libfcoe.h @@ -382,7 +382,7 @@ static inline struct net_device *fcoe_get_netdev(const struct fc_lport *lport) void fcoe_clean_pending_queue(struct fc_lport *); void fcoe_check_wait_queue(struct fc_lport *lport, struct sk_buff *skb); -void fcoe_queue_timer(ulong lport); +void fcoe_queue_timer(struct timer_list *t); int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen, struct fcoe_percpu_s *fps); diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h index 6c0dc6155ee7..388aaf72b480 100644 --- a/include/scsi/libsas.h +++ b/include/scsi/libsas.h @@ -629,6 +629,7 @@ struct sas_task_slow { */ struct timer_list timer; struct completion completion; + struct sas_task *task; }; #define SAS_TASK_STATE_PENDING 1 diff --git a/include/trace/events/irq_matrix.h b/include/trace/events/irq_matrix.h new file mode 100644 index 000000000000..267d4cbbf360 --- /dev/null +++ b/include/trace/events/irq_matrix.h @@ -0,0 +1,201 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM irq_matrix + +#if !defined(_TRACE_IRQ_MATRIX_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_IRQ_MATRIX_H + +#include <linux/tracepoint.h> + +struct irq_matrix; +struct cpumap; + +DECLARE_EVENT_CLASS(irq_matrix_global, + + TP_PROTO(struct irq_matrix *matrix), + + TP_ARGS(matrix), + + TP_STRUCT__entry( + __field( unsigned int, online_maps ) + __field( unsigned int, global_available ) + __field( unsigned int, global_reserved ) + __field( unsigned int, total_allocated ) + ), + + TP_fast_assign( + __entry->online_maps = matrix->online_maps; + __entry->global_available = matrix->global_available; + __entry->global_reserved = matrix->global_reserved; + __entry->total_allocated = matrix->total_allocated; + ), + + TP_printk("online_maps=%d global_avl=%u, global_rsvd=%u, total_alloc=%u", + __entry->online_maps, __entry->global_available, + __entry->global_reserved, __entry->total_allocated) +); + +DECLARE_EVENT_CLASS(irq_matrix_global_update, + + TP_PROTO(int bit, struct irq_matrix *matrix), + + TP_ARGS(bit, matrix), + + TP_STRUCT__entry( + __field( int, bit ) + __field( unsigned int, online_maps ) + __field( unsigned int, global_available ) + __field( unsigned int, global_reserved ) + __field( unsigned int, total_allocated ) + ), + + TP_fast_assign( + __entry->bit = bit; + __entry->online_maps = matrix->online_maps; + __entry->global_available = matrix->global_available; + __entry->global_reserved = matrix->global_reserved; + __entry->total_allocated = matrix->total_allocated; + ), + + TP_printk("bit=%d online_maps=%d global_avl=%u, global_rsvd=%u, total_alloc=%u", + __entry->bit, __entry->online_maps, + __entry->global_available, __entry->global_reserved, + __entry->total_allocated) +); + +DECLARE_EVENT_CLASS(irq_matrix_cpu, + + TP_PROTO(int bit, unsigned int cpu, struct irq_matrix *matrix, + struct cpumap *cmap), + + TP_ARGS(bit, cpu, matrix, cmap), + + TP_STRUCT__entry( + __field( int, bit ) + __field( unsigned int, cpu ) + __field( bool, online ) + __field( unsigned int, available ) + __field( unsigned int, allocated ) + __field( unsigned int, managed ) + __field( unsigned int, online_maps ) + __field( unsigned int, global_available ) + __field( unsigned int, global_reserved ) + __field( unsigned int, total_allocated ) + ), + + TP_fast_assign( + __entry->bit = bit; + __entry->cpu = cpu; + __entry->online = cmap->online; + __entry->available = cmap->available; + __entry->allocated = cmap->allocated; + __entry->managed = cmap->managed; + __entry->online_maps = matrix->online_maps; + __entry->global_available = matrix->global_available; + __entry->global_reserved = matrix->global_reserved; + __entry->total_allocated = matrix->total_allocated; + ), + + TP_printk("bit=%d cpu=%u online=%d avl=%u alloc=%u managed=%u online_maps=%u global_avl=%u, global_rsvd=%u, total_alloc=%u", + __entry->bit, __entry->cpu, __entry->online, + __entry->available, __entry->allocated, + __entry->managed, __entry->online_maps, + __entry->global_available, __entry->global_reserved, + __entry->total_allocated) +); + +DEFINE_EVENT(irq_matrix_global, irq_matrix_online, + + TP_PROTO(struct irq_matrix *matrix), + + TP_ARGS(matrix) +); + +DEFINE_EVENT(irq_matrix_global, irq_matrix_offline, + + TP_PROTO(struct irq_matrix *matrix), + + TP_ARGS(matrix) +); + +DEFINE_EVENT(irq_matrix_global, irq_matrix_reserve, + + TP_PROTO(struct irq_matrix *matrix), + + TP_ARGS(matrix) +); + +DEFINE_EVENT(irq_matrix_global, irq_matrix_remove_reserved, + + TP_PROTO(struct irq_matrix *matrix), + + TP_ARGS(matrix) +); + +DEFINE_EVENT(irq_matrix_global_update, irq_matrix_assign_system, + + TP_PROTO(int bit, struct irq_matrix *matrix), + + TP_ARGS(bit, matrix) +); + +DEFINE_EVENT(irq_matrix_cpu, irq_matrix_alloc_reserved, + + TP_PROTO(int bit, unsigned int cpu, + struct irq_matrix *matrix, struct cpumap *cmap), + + TP_ARGS(bit, cpu, matrix, cmap) +); + +DEFINE_EVENT(irq_matrix_cpu, irq_matrix_reserve_managed, + + TP_PROTO(int bit, unsigned int cpu, + struct irq_matrix *matrix, struct cpumap *cmap), + + TP_ARGS(bit, cpu, matrix, cmap) +); + +DEFINE_EVENT(irq_matrix_cpu, irq_matrix_remove_managed, + + TP_PROTO(int bit, unsigned int cpu, + struct irq_matrix *matrix, struct cpumap *cmap), + + TP_ARGS(bit, cpu, matrix, cmap) +); + +DEFINE_EVENT(irq_matrix_cpu, irq_matrix_alloc_managed, + + TP_PROTO(int bit, unsigned int cpu, + struct irq_matrix *matrix, struct cpumap *cmap), + + TP_ARGS(bit, cpu, matrix, cmap) +); + +DEFINE_EVENT(irq_matrix_cpu, irq_matrix_assign, + + TP_PROTO(int bit, unsigned int cpu, + struct irq_matrix *matrix, struct cpumap *cmap), + + TP_ARGS(bit, cpu, matrix, cmap) +); + +DEFINE_EVENT(irq_matrix_cpu, irq_matrix_alloc, + + TP_PROTO(int bit, unsigned int cpu, + struct irq_matrix *matrix, struct cpumap *cmap), + + TP_ARGS(bit, cpu, matrix, cmap) +); + +DEFINE_EVENT(irq_matrix_cpu, irq_matrix_free, + + TP_PROTO(int bit, unsigned int cpu, + struct irq_matrix *matrix, struct cpumap *cmap), + + TP_ARGS(bit, cpu, matrix, cmap) +); + + +#endif /* _TRACE_IRQ_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index da10aa21bebc..306b31de5194 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -118,7 +118,7 @@ static inline long __trace_sched_switch_state(bool preempt, struct task_struct * if (preempt) return TASK_STATE_MAX; - return __get_task_state(p); + return task_state_index(p); } #endif /* CREATE_TRACE_POINTS */ diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h index c58627c0d6fb..e09a4f963dc0 100644 --- a/include/uapi/linux/elf.h +++ b/include/uapi/linux/elf.h @@ -412,6 +412,7 @@ typedef struct elf64_shdr { #define NT_S390_VXRS_HIGH 0x30a /* s390 vector registers 16-31 */ #define NT_S390_GS_CB 0x30b /* s390 guarded storage registers */ #define NT_S390_GS_BC 0x30c /* s390 guarded storage broadcast control block */ +#define NT_S390_RI_CB 0x30d /* s390 runtime instrumentation */ #define NT_ARM_VFP 0x400 /* ARM VFP/NEON registers */ #define NT_ARM_TLS 0x401 /* ARM TLS register */ #define NT_ARM_HW_BREAK 0x402 /* ARM hardware breakpoint registers */ diff --git a/include/uapi/linux/stddef.h b/include/uapi/linux/stddef.h index f65b92e0e1f9..ee8220f8dcf5 100644 --- a/include/uapi/linux/stddef.h +++ b/include/uapi/linux/stddef.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -#include <linux/compiler.h> +#include <linux/compiler_types.h> #ifndef __always_inline #define __always_inline inline diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h index a92be0f492a9..c1395b5bd432 100644 --- a/include/uapi/linux/xattr.h +++ b/include/uapi/linux/xattr.h @@ -66,6 +66,9 @@ #define XATTR_NAME_SMACKTRANSMUTE XATTR_SECURITY_PREFIX XATTR_SMACK_TRANSMUTE #define XATTR_NAME_SMACKMMAP XATTR_SECURITY_PREFIX XATTR_SMACK_MMAP +#define XATTR_APPARMOR_SUFFIX "apparmor" +#define XATTR_NAME_APPARMOR XATTR_SECURITY_PREFIX XATTR_APPARMOR_SUFFIX + #define XATTR_CAPS_SUFFIX "capability" #define XATTR_NAME_CAPS XATTR_SECURITY_PREFIX XATTR_CAPS_SUFFIX |