summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2020-06-11 20:02:46 +0200
committerThomas Gleixner <tglx@linutronix.de>2020-06-11 20:02:46 +0200
commit37d1a04b13a6d2fec91a6813fc034947a27db034 (patch)
treec6a8d7d0df96a6eb1ddb53a12885761cb72e0e92 /include
parent37f8173dd84936ea78000ed1cad24f8b18d48ebb (diff)
parent97a9474aeb789183a1d0712e66a4283860279ac9 (diff)
downloadlinux-37d1a04b13a6d2fec91a6813fc034947a27db034.tar.gz
linux-37d1a04b13a6d2fec91a6813fc034947a27db034.tar.bz2
linux-37d1a04b13a6d2fec91a6813fc034947a27db034.zip
Rebase locking/kcsan to locking/urgent
Merge the state of the locking kcsan branch before the read/write_once() and the atomics modifications got merged. Squash the fallout of the rebase on top of the read/write once and atomic fallback work into the merge. The history of the original branch is preserved in tag locking-kcsan-2020-06-02. Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/atomic-instrumented.h711
-rw-r--r--include/asm-generic/atomic-long.h331
-rw-r--r--include/asm-generic/bitops/instrumented-atomic.h14
-rw-r--r--include/asm-generic/bitops/instrumented-lock.h10
-rw-r--r--include/asm-generic/bitops/instrumented-non-atomic.h16
-rw-r--r--include/linux/compiler-clang.h11
-rw-r--r--include/linux/compiler-gcc.h6
-rw-r--r--include/linux/compiler.h71
-rw-r--r--include/linux/instrumented.h109
-rw-r--r--include/linux/kcsan-checks.h430
-rw-r--r--include/linux/kcsan.h59
-rw-r--r--include/linux/sched.h4
-rw-r--r--include/linux/seqlock.h51
-rw-r--r--include/linux/uaccess.h14
14 files changed, 1274 insertions, 563 deletions
diff --git a/include/asm-generic/atomic-instrumented.h b/include/asm-generic/atomic-instrumented.h
index e8730c6b9fe2..379986e40159 100644
--- a/include/asm-generic/atomic-instrumented.h
+++ b/include/asm-generic/atomic-instrumented.h
@@ -18,1623 +18,1624 @@
#define _ASM_GENERIC_ATOMIC_INSTRUMENTED_H
#include <linux/build_bug.h>
-#include <linux/kasan-checks.h>
+#include <linux/compiler.h>
+#include <linux/instrumented.h>
-static inline int
+static __always_inline int
atomic_read(const atomic_t *v)
{
- kasan_check_read(v, sizeof(*v));
+ instrument_atomic_read(v, sizeof(*v));
return arch_atomic_read(v);
}
#define atomic_read atomic_read
#if defined(arch_atomic_read_acquire)
-static inline int
+static __always_inline int
atomic_read_acquire(const atomic_t *v)
{
- kasan_check_read(v, sizeof(*v));
+ instrument_atomic_read(v, sizeof(*v));
return arch_atomic_read_acquire(v);
}
#define atomic_read_acquire atomic_read_acquire
#endif
-static inline void
+static __always_inline void
atomic_set(atomic_t *v, int i)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic_set(v, i);
}
#define atomic_set atomic_set
#if defined(arch_atomic_set_release)
-static inline void
+static __always_inline void
atomic_set_release(atomic_t *v, int i)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic_set_release(v, i);
}
#define atomic_set_release atomic_set_release
#endif
-static inline void
+static __always_inline void
atomic_add(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic_add(i, v);
}
#define atomic_add atomic_add
#if !defined(arch_atomic_add_return_relaxed) || defined(arch_atomic_add_return)
-static inline int
+static __always_inline int
atomic_add_return(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_add_return(i, v);
}
#define atomic_add_return atomic_add_return
#endif
#if defined(arch_atomic_add_return_acquire)
-static inline int
+static __always_inline int
atomic_add_return_acquire(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_add_return_acquire(i, v);
}
#define atomic_add_return_acquire atomic_add_return_acquire
#endif
#if defined(arch_atomic_add_return_release)
-static inline int
+static __always_inline int
atomic_add_return_release(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_add_return_release(i, v);
}
#define atomic_add_return_release atomic_add_return_release
#endif
#if defined(arch_atomic_add_return_relaxed)
-static inline int
+static __always_inline int
atomic_add_return_relaxed(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_add_return_relaxed(i, v);
}
#define atomic_add_return_relaxed atomic_add_return_relaxed
#endif
#if !defined(arch_atomic_fetch_add_relaxed) || defined(arch_atomic_fetch_add)
-static inline int
+static __always_inline int
atomic_fetch_add(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_add(i, v);
}
#define atomic_fetch_add atomic_fetch_add
#endif
#if defined(arch_atomic_fetch_add_acquire)
-static inline int
+static __always_inline int
atomic_fetch_add_acquire(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_add_acquire(i, v);
}
#define atomic_fetch_add_acquire atomic_fetch_add_acquire
#endif
#if defined(arch_atomic_fetch_add_release)
-static inline int
+static __always_inline int
atomic_fetch_add_release(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_add_release(i, v);
}
#define atomic_fetch_add_release atomic_fetch_add_release
#endif
#if defined(arch_atomic_fetch_add_relaxed)
-static inline int
+static __always_inline int
atomic_fetch_add_relaxed(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_add_relaxed(i, v);
}
#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
#endif
-static inline void
+static __always_inline void
atomic_sub(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic_sub(i, v);
}
#define atomic_sub atomic_sub
#if !defined(arch_atomic_sub_return_relaxed) || defined(arch_atomic_sub_return)
-static inline int
+static __always_inline int
atomic_sub_return(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_sub_return(i, v);
}
#define atomic_sub_return atomic_sub_return
#endif
#if defined(arch_atomic_sub_return_acquire)
-static inline int
+static __always_inline int
atomic_sub_return_acquire(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_sub_return_acquire(i, v);
}
#define atomic_sub_return_acquire atomic_sub_return_acquire
#endif
#if defined(arch_atomic_sub_return_release)
-static inline int
+static __always_inline int
atomic_sub_return_release(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_sub_return_release(i, v);
}
#define atomic_sub_return_release atomic_sub_return_release
#endif
#if defined(arch_atomic_sub_return_relaxed)
-static inline int
+static __always_inline int
atomic_sub_return_relaxed(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_sub_return_relaxed(i, v);
}
#define atomic_sub_return_relaxed atomic_sub_return_relaxed
#endif
#if !defined(arch_atomic_fetch_sub_relaxed) || defined(arch_atomic_fetch_sub)
-static inline int
+static __always_inline int
atomic_fetch_sub(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_sub(i, v);
}
#define atomic_fetch_sub atomic_fetch_sub
#endif
#if defined(arch_atomic_fetch_sub_acquire)
-static inline int
+static __always_inline int
atomic_fetch_sub_acquire(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_sub_acquire(i, v);
}
#define atomic_fetch_sub_acquire atomic_fetch_sub_acquire
#endif
#if defined(arch_atomic_fetch_sub_release)
-static inline int
+static __always_inline int
atomic_fetch_sub_release(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_sub_release(i, v);
}
#define atomic_fetch_sub_release atomic_fetch_sub_release
#endif
#if defined(arch_atomic_fetch_sub_relaxed)
-static inline int
+static __always_inline int
atomic_fetch_sub_relaxed(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_sub_relaxed(i, v);
}
#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
#endif
#if defined(arch_atomic_inc)
-static inline void
+static __always_inline void
atomic_inc(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic_inc(v);
}
#define atomic_inc atomic_inc
#endif
#if defined(arch_atomic_inc_return)
-static inline int
+static __always_inline int
atomic_inc_return(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_inc_return(v);
}
#define atomic_inc_return atomic_inc_return
#endif
#if defined(arch_atomic_inc_return_acquire)
-static inline int
+static __always_inline int
atomic_inc_return_acquire(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_inc_return_acquire(v);
}
#define atomic_inc_return_acquire atomic_inc_return_acquire
#endif
#if defined(arch_atomic_inc_return_release)
-static inline int
+static __always_inline int
atomic_inc_return_release(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_inc_return_release(v);
}
#define atomic_inc_return_release atomic_inc_return_release
#endif
#if defined(arch_atomic_inc_return_relaxed)
-static inline int
+static __always_inline int
atomic_inc_return_relaxed(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_inc_return_relaxed(v);
}
#define atomic_inc_return_relaxed atomic_inc_return_relaxed
#endif
#if defined(arch_atomic_fetch_inc)
-static inline int
+static __always_inline int
atomic_fetch_inc(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_inc(v);
}
#define atomic_fetch_inc atomic_fetch_inc
#endif
#if defined(arch_atomic_fetch_inc_acquire)
-static inline int
+static __always_inline int
atomic_fetch_inc_acquire(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_inc_acquire(v);
}
#define atomic_fetch_inc_acquire atomic_fetch_inc_acquire
#endif
#if defined(arch_atomic_fetch_inc_release)
-static inline int
+static __always_inline int
atomic_fetch_inc_release(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_inc_release(v);
}
#define atomic_fetch_inc_release atomic_fetch_inc_release
#endif
#if defined(arch_atomic_fetch_inc_relaxed)
-static inline int
+static __always_inline int
atomic_fetch_inc_relaxed(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_inc_relaxed(v);
}
#define atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed
#endif
#if defined(arch_atomic_dec)
-static inline void
+static __always_inline void
atomic_dec(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic_dec(v);
}
#define atomic_dec atomic_dec
#endif
#if defined(arch_atomic_dec_return)
-static inline int
+static __always_inline int
atomic_dec_return(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_dec_return(v);
}
#define atomic_dec_return atomic_dec_return
#endif
#if defined(arch_atomic_dec_return_acquire)
-static inline int
+static __always_inline int
atomic_dec_return_acquire(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_dec_return_acquire(v);
}
#define atomic_dec_return_acquire atomic_dec_return_acquire
#endif
#if defined(arch_atomic_dec_return_release)
-static inline int
+static __always_inline int
atomic_dec_return_release(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_dec_return_release(v);
}
#define atomic_dec_return_release atomic_dec_return_release
#endif
#if defined(arch_atomic_dec_return_relaxed)
-static inline int
+static __always_inline int
atomic_dec_return_relaxed(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_dec_return_relaxed(v);
}
#define atomic_dec_return_relaxed atomic_dec_return_relaxed
#endif
#if defined(arch_atomic_fetch_dec)
-static inline int
+static __always_inline int
atomic_fetch_dec(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_dec(v);
}
#define atomic_fetch_dec atomic_fetch_dec
#endif
#if defined(arch_atomic_fetch_dec_acquire)
-static inline int
+static __always_inline int
atomic_fetch_dec_acquire(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_dec_acquire(v);
}
#define atomic_fetch_dec_acquire atomic_fetch_dec_acquire
#endif
#if defined(arch_atomic_fetch_dec_release)
-static inline int
+static __always_inline int
atomic_fetch_dec_release(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_dec_release(v);
}
#define atomic_fetch_dec_release atomic_fetch_dec_release
#endif
#if defined(arch_atomic_fetch_dec_relaxed)
-static inline int
+static __always_inline int
atomic_fetch_dec_relaxed(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_dec_relaxed(v);
}
#define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed
#endif
-static inline void
+static __always_inline void
atomic_and(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic_and(i, v);
}
#define atomic_and atomic_and
#if !defined(arch_atomic_fetch_and_relaxed) || defined(arch_atomic_fetch_and)
-static inline int
+static __always_inline int
atomic_fetch_and(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_and(i, v);
}
#define atomic_fetch_and atomic_fetch_and
#endif
#if defined(arch_atomic_fetch_and_acquire)
-static inline int
+static __always_inline int
atomic_fetch_and_acquire(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_and_acquire(i, v);
}
#define atomic_fetch_and_acquire atomic_fetch_and_acquire
#endif
#if defined(arch_atomic_fetch_and_release)
-static inline int
+static __always_inline int
atomic_fetch_and_release(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_and_release(i, v);
}
#define atomic_fetch_and_release atomic_fetch_and_release
#endif
#if defined(arch_atomic_fetch_and_relaxed)
-static inline int
+static __always_inline int
atomic_fetch_and_relaxed(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_and_relaxed(i, v);
}
#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
#endif
#if defined(arch_atomic_andnot)
-static inline void
+static __always_inline void
atomic_andnot(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic_andnot(i, v);
}
#define atomic_andnot atomic_andnot
#endif
#if defined(arch_atomic_fetch_andnot)
-static inline int
+static __always_inline int
atomic_fetch_andnot(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_andnot(i, v);
}
#define atomic_fetch_andnot atomic_fetch_andnot
#endif
#if defined(arch_atomic_fetch_andnot_acquire)
-static inline int
+static __always_inline int
atomic_fetch_andnot_acquire(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_andnot_acquire(i, v);
}
#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire
#endif
#if defined(arch_atomic_fetch_andnot_release)
-static inline int
+static __always_inline int
atomic_fetch_andnot_release(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_andnot_release(i, v);
}
#define atomic_fetch_andnot_release atomic_fetch_andnot_release
#endif
#if defined(arch_atomic_fetch_andnot_relaxed)
-static inline int
+static __always_inline int
atomic_fetch_andnot_relaxed(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_andnot_relaxed(i, v);
}
#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
#endif
-static inline void
+static __always_inline void
atomic_or(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic_or(i, v);
}
#define atomic_or atomic_or
#if !defined(arch_atomic_fetch_or_relaxed) || defined(arch_atomic_fetch_or)
-static inline int
+static __always_inline int
atomic_fetch_or(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_or(i, v);
}
#define atomic_fetch_or atomic_fetch_or
#endif
#if defined(arch_atomic_fetch_or_acquire)
-static inline int
+static __always_inline int
atomic_fetch_or_acquire(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_or_acquire(i, v);
}
#define atomic_fetch_or_acquire atomic_fetch_or_acquire
#endif
#if defined(arch_atomic_fetch_or_release)
-static inline int
+static __always_inline int
atomic_fetch_or_release(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_or_release(i, v);
}
#define atomic_fetch_or_release atomic_fetch_or_release
#endif
#if defined(arch_atomic_fetch_or_relaxed)
-static inline int
+static __always_inline int
atomic_fetch_or_relaxed(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_or_relaxed(i, v);
}
#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
#endif
-static inline void
+static __always_inline void
atomic_xor(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic_xor(i, v);
}
#define atomic_xor atomic_xor
#if !defined(arch_atomic_fetch_xor_relaxed) || defined(arch_atomic_fetch_xor)
-static inline int
+static __always_inline int
atomic_fetch_xor(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_xor(i, v);
}
#define atomic_fetch_xor atomic_fetch_xor
#endif
#if defined(arch_atomic_fetch_xor_acquire)
-static inline int
+static __always_inline int
atomic_fetch_xor_acquire(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_xor_acquire(i, v);
}
#define atomic_fetch_xor_acquire atomic_fetch_xor_acquire
#endif
#if defined(arch_atomic_fetch_xor_release)
-static inline int
+static __always_inline int
atomic_fetch_xor_release(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_xor_release(i, v);
}
#define atomic_fetch_xor_release atomic_fetch_xor_release
#endif
#if defined(arch_atomic_fetch_xor_relaxed)
-static inline int
+static __always_inline int
atomic_fetch_xor_relaxed(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_xor_relaxed(i, v);
}
#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
#endif
#if !defined(arch_atomic_xchg_relaxed) || defined(arch_atomic_xchg)
-static inline int
+static __always_inline int
atomic_xchg(atomic_t *v, int i)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_xchg(v, i);
}
#define atomic_xchg atomic_xchg
#endif
#if defined(arch_atomic_xchg_acquire)
-static inline int
+static __always_inline int
atomic_xchg_acquire(atomic_t *v, int i)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_xchg_acquire(v, i);
}
#define atomic_xchg_acquire atomic_xchg_acquire
#endif
#if defined(arch_atomic_xchg_release)
-static inline int
+static __always_inline int
atomic_xchg_release(atomic_t *v, int i)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_xchg_release(v, i);
}
#define atomic_xchg_release atomic_xchg_release
#endif
#if defined(arch_atomic_xchg_relaxed)
-static inline int
+static __always_inline int
atomic_xchg_relaxed(atomic_t *v, int i)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_xchg_relaxed(v, i);
}
#define atomic_xchg_relaxed atomic_xchg_relaxed
#endif
#if !defined(arch_atomic_cmpxchg_relaxed) || defined(arch_atomic_cmpxchg)
-static inline int
+static __always_inline int
atomic_cmpxchg(atomic_t *v, int old, int new)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_cmpxchg(v, old, new);
}
#define atomic_cmpxchg atomic_cmpxchg
#endif
#if defined(arch_atomic_cmpxchg_acquire)
-static inline int
+static __always_inline int
atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_cmpxchg_acquire(v, old, new);
}
#define atomic_cmpxchg_acquire atomic_cmpxchg_acquire
#endif
#if defined(arch_atomic_cmpxchg_release)
-static inline int
+static __always_inline int
atomic_cmpxchg_release(atomic_t *v, int old, int new)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_cmpxchg_release(v, old, new);
}
#define atomic_cmpxchg_release atomic_cmpxchg_release
#endif
#if defined(arch_atomic_cmpxchg_relaxed)
-static inline int
+static __always_inline int
atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_cmpxchg_relaxed(v, old, new);
}
#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
#endif
#if defined(arch_atomic_try_cmpxchg)
-static inline bool
+static __always_inline bool
atomic_try_cmpxchg(atomic_t *v, int *old, int new)
{
- kasan_check_write(v, sizeof(*v));
- kasan_check_write(old, sizeof(*old));
+ instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_write(old, sizeof(*old));
return arch_atomic_try_cmpxchg(v, old, new);
}
#define atomic_try_cmpxchg atomic_try_cmpxchg
#endif
#if defined(arch_atomic_try_cmpxchg_acquire)
-static inline bool
+static __always_inline bool
atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
{
- kasan_check_write(v, sizeof(*v));
- kasan_check_write(old, sizeof(*old));
+ instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_write(old, sizeof(*old));
return arch_atomic_try_cmpxchg_acquire(v, old, new);
}
#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire
#endif
#if defined(arch_atomic_try_cmpxchg_release)
-static inline bool
+static __always_inline bool
atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
{
- kasan_check_write(v, sizeof(*v));
- kasan_check_write(old, sizeof(*old));
+ instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_write(old, sizeof(*old));
return arch_atomic_try_cmpxchg_release(v, old, new);
}
#define atomic_try_cmpxchg_release atomic_try_cmpxchg_release
#endif
#if defined(arch_atomic_try_cmpxchg_relaxed)
-static inline bool
+static __always_inline bool
atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
{
- kasan_check_write(v, sizeof(*v));
- kasan_check_write(old, sizeof(*old));
+ instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_write(old, sizeof(*old));
return arch_atomic_try_cmpxchg_relaxed(v, old, new);
}
#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed
#endif
#if defined(arch_atomic_sub_and_test)
-static inline bool
+static __always_inline bool
atomic_sub_and_test(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_sub_and_test(i, v);
}
#define atomic_sub_and_test atomic_sub_and_test
#endif
#if defined(arch_atomic_dec_and_test)
-static inline bool
+static __always_inline bool
atomic_dec_and_test(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_dec_and_test(v);
}
#define atomic_dec_and_test atomic_dec_and_test
#endif
#if defined(arch_atomic_inc_and_test)
-static inline bool
+static __always_inline bool
atomic_inc_and_test(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_inc_and_test(v);
}
#define atomic_inc_and_test atomic_inc_and_test
#endif
#if defined(arch_atomic_add_negative)
-static inline bool
+static __always_inline bool
atomic_add_negative(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_add_negative(i, v);
}
#define atomic_add_negative atomic_add_negative
#endif
#if defined(arch_atomic_fetch_add_unless)
-static inline int
+static __always_inline int
atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_add_unless(v, a, u);
}
#define atomic_fetch_add_unless atomic_fetch_add_unless
#endif
#if defined(arch_atomic_add_unless)
-static inline bool
+static __always_inline bool
atomic_add_unless(atomic_t *v, int a, int u)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_add_unless(v, a, u);
}
#define atomic_add_unless atomic_add_unless
#endif
#if defined(arch_atomic_inc_not_zero)
-static inline bool
+static __always_inline bool
atomic_inc_not_zero(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_inc_not_zero(v);
}
#define atomic_inc_not_zero atomic_inc_not_zero
#endif
#if defined(arch_atomic_inc_unless_negative)
-static inline bool
+static __always_inline bool
atomic_inc_unless_negative(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_inc_unless_negative(v);
}
#define atomic_inc_unless_negative atomic_inc_unless_negative
#endif
#if defined(arch_atomic_dec_unless_positive)
-static inline bool
+static __always_inline bool
atomic_dec_unless_positive(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_dec_unless_positive(v);
}
#define atomic_dec_unless_positive atomic_dec_unless_positive
#endif
#if defined(arch_atomic_dec_if_positive)
-static inline int
+static __always_inline int
atomic_dec_if_positive(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_dec_if_positive(v);
}
#define atomic_dec_if_positive atomic_dec_if_positive
#endif
-static inline s64
+static __always_inline s64
atomic64_read(const atomic64_t *v)
{
- kasan_check_read(v, sizeof(*v));
+ instrument_atomic_read(v, sizeof(*v));
return arch_atomic64_read(v);
}
#define atomic64_read atomic64_read
#if defined(arch_atomic64_read_acquire)
-static inline s64
+static __always_inline s64
atomic64_read_acquire(const atomic64_t *v)
{
- kasan_check_read(v, sizeof(*v));
+ instrument_atomic_read(v, sizeof(*v));
return arch_atomic64_read_acquire(v);
}
#define atomic64_read_acquire atomic64_read_acquire
#endif
-static inline void
+static __always_inline void
atomic64_set(atomic64_t *v, s64 i)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic64_set(v, i);
}
#define atomic64_set atomic64_set
#if defined(arch_atomic64_set_release)
-static inline void
+static __always_inline void
atomic64_set_release(atomic64_t *v, s64 i)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic64_set_release(v, i);
}
#define atomic64_set_release atomic64_set_release
#endif
-static inline void
+static __always_inline void
atomic64_add(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic64_add(i, v);
}
#define atomic64_add atomic64_add
#if !defined(arch_atomic64_add_return_relaxed) || defined(arch_atomic64_add_return)
-static inline s64
+static __always_inline s64
atomic64_add_return(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_add_return(i, v);
}
#define atomic64_add_return atomic64_add_return
#endif
#if defined(arch_atomic64_add_return_acquire)
-static inline s64
+static __always_inline s64
atomic64_add_return_acquire(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_add_return_acquire(i, v);
}
#define atomic64_add_return_acquire atomic64_add_return_acquire
#endif
#if defined(arch_atomic64_add_return_release)
-static inline s64
+static __always_inline s64
atomic64_add_return_release(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_add_return_release(i, v);
}
#define atomic64_add_return_release atomic64_add_return_release
#endif
#if defined(arch_atomic64_add_return_relaxed)
-static inline s64
+static __always_inline s64
atomic64_add_return_relaxed(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_add_return_relaxed(i, v);
}
#define atomic64_add_return_relaxed atomic64_add_return_relaxed
#endif
#if !defined(arch_atomic64_fetch_add_relaxed) || defined(arch_atomic64_fetch_add)
-static inline s64
+static __always_inline s64
atomic64_fetch_add(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_add(i, v);
}
#define atomic64_fetch_add atomic64_fetch_add
#endif
#if defined(arch_atomic64_fetch_add_acquire)
-static inline s64
+static __always_inline s64
atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_add_acquire(i, v);
}
#define atomic64_fetch_add_acquire atomic64_fetch_add_acquire
#endif
#if defined(arch_atomic64_fetch_add_release)
-static inline s64
+static __always_inline s64
atomic64_fetch_add_release(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_add_release(i, v);
}
#define atomic64_fetch_add_release atomic64_fetch_add_release
#endif
#if defined(arch_atomic64_fetch_add_relaxed)
-static inline s64
+static __always_inline s64
atomic64_fetch_add_relaxed(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_add_relaxed(i, v);
}
#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
#endif
-static inline void
+static __always_inline void
atomic64_sub(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic64_sub(i, v);
}
#define atomic64_sub atomic64_sub
#if !defined(arch_atomic64_sub_return_relaxed) || defined(arch_atomic64_sub_return)
-static inline s64
+static __always_inline s64
atomic64_sub_return(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_sub_return(i, v);
}
#define atomic64_sub_return atomic64_sub_return
#endif
#if defined(arch_atomic64_sub_return_acquire)
-static inline s64
+static __always_inline s64
atomic64_sub_return_acquire(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_sub_return_acquire(i, v);
}
#define atomic64_sub_return_acquire atomic64_sub_return_acquire
#endif
#if defined(arch_atomic64_sub_return_release)
-static inline s64
+static __always_inline s64
atomic64_sub_return_release(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_sub_return_release(i, v);
}
#define atomic64_sub_return_release atomic64_sub_return_release
#endif
#if defined(arch_atomic64_sub_return_relaxed)
-static inline s64
+static __always_inline s64
atomic64_sub_return_relaxed(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_sub_return_relaxed(i, v);
}
#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
#endif
#if !defined(arch_atomic64_fetch_sub_relaxed) || defined(arch_atomic64_fetch_sub)
-static inline s64
+static __always_inline s64
atomic64_fetch_sub(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_sub(i, v);
}
#define atomic64_fetch_sub atomic64_fetch_sub
#endif
#if defined(arch_atomic64_fetch_sub_acquire)
-static inline s64
+static __always_inline s64
atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_sub_acquire(i, v);
}
#define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire
#endif
#if defined(arch_atomic64_fetch_sub_release)
-static inline s64
+static __always_inline s64
atomic64_fetch_sub_release(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_sub_release(i, v);
}
#define atomic64_fetch_sub_release atomic64_fetch_sub_release
#endif
#if defined(arch_atomic64_fetch_sub_relaxed)
-static inline s64
+static __always_inline s64
atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_sub_relaxed(i, v);
}
#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
#endif
#if defined(arch_atomic64_inc)
-static inline void
+static __always_inline void
atomic64_inc(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic64_inc(v);
}
#define atomic64_inc atomic64_inc
#endif
#if defined(arch_atomic64_inc_return)
-static inline s64
+static __always_inline s64
atomic64_inc_return(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_inc_return(v);
}
#define atomic64_inc_return atomic64_inc_return
#endif
#if defined(arch_atomic64_inc_return_acquire)
-static inline s64
+static __always_inline s64
atomic64_inc_return_acquire(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_inc_return_acquire(v);
}
#define atomic64_inc_return_acquire atomic64_inc_return_acquire
#endif
#if defined(arch_atomic64_inc_return_release)
-static inline s64
+static __always_inline s64
atomic64_inc_return_release(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_inc_return_release(v);
}
#define atomic64_inc_return_release atomic64_inc_return_release
#endif
#if defined(arch_atomic64_inc_return_relaxed)
-static inline s64
+static __always_inline s64
atomic64_inc_return_relaxed(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_inc_return_relaxed(v);
}
#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
#endif
#if defined(arch_atomic64_fetch_inc)
-static inline s64
+static __always_inline s64
atomic64_fetch_inc(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_inc(v);
}
#define atomic64_fetch_inc atomic64_fetch_inc
#endif
#if defined(arch_atomic64_fetch_inc_acquire)
-static inline s64
+static __always_inline s64
atomic64_fetch_inc_acquire(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_inc_acquire(v);
}
#define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire
#endif
#if defined(arch_atomic64_fetch_inc_release)
-static inline s64
+static __always_inline s64
atomic64_fetch_inc_release(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_inc_release(v);
}
#define atomic64_fetch_inc_release atomic64_fetch_inc_release
#endif
#if defined(arch_atomic64_fetch_inc_relaxed)
-static inline s64
+static __always_inline s64
atomic64_fetch_inc_relaxed(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_inc_relaxed(v);
}
#define atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed
#endif
#if defined(arch_atomic64_dec)
-static inline void
+static __always_inline void
atomic64_dec(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic64_dec(v);
}
#define atomic64_dec atomic64_dec
#endif
#if defined(arch_atomic64_dec_return)
-static inline s64
+static __always_inline s64
atomic64_dec_return(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_dec_return(v);
}
#define atomic64_dec_return atomic64_dec_return
#endif
#if defined(arch_atomic64_dec_return_acquire)
-static inline s64
+static __always_inline s64
atomic64_dec_return_acquire(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_dec_return_acquire(v);
}
#define atomic64_dec_return_acquire atomic64_dec_return_acquire
#endif
#if defined(arch_atomic64_dec_return_release)
-static inline s64
+static __always_inline s64
atomic64_dec_return_release(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_dec_return_release(v);
}
#define atomic64_dec_return_release atomic64_dec_return_release
#endif
#if defined(arch_atomic64_dec_return_relaxed)
-static inline s64
+static __always_inline s64
atomic64_dec_return_relaxed(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_dec_return_relaxed(v);
}
#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
#endif
#if defined(arch_atomic64_fetch_dec)
-static inline s64
+static __always_inline s64
atomic64_fetch_dec(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_dec(v);
}
#define atomic64_fetch_dec atomic64_fetch_dec
#endif
#if defined(arch_atomic64_fetch_dec_acquire)
-static inline s64
+static __always_inline s64
atomic64_fetch_dec_acquire(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_dec_acquire(v);
}
#define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire
#endif
#if defined(arch_atomic64_fetch_dec_release)
-static inline s64
+static __always_inline s64
atomic64_fetch_dec_release(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_dec_release(v);
}
#define atomic64_fetch_dec_release atomic64_fetch_dec_release
#endif
#if defined(arch_atomic64_fetch_dec_relaxed)
-static inline s64
+static __always_inline s64
atomic64_fetch_dec_relaxed(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_dec_relaxed(v);
}
#define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed
#endif
-static inline void
+static __always_inline void
atomic64_and(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic64_and(i, v);
}
#define atomic64_and atomic64_and
#if !defined(arch_atomic64_fetch_and_relaxed) || defined(arch_atomic64_fetch_and)
-static inline s64
+static __always_inline s64
atomic64_fetch_and(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_and(i, v);
}
#define atomic64_fetch_and atomic64_fetch_and
#endif
#if defined(arch_atomic64_fetch_and_acquire)
-static inline s64
+static __always_inline s64
atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_and_acquire(i, v);
}
#define atomic64_fetch_and_acquire atomic64_fetch_and_acquire
#endif
#if defined(arch_atomic64_fetch_and_release)
-static inline s64
+static __always_inline s64
atomic64_fetch_and_release(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_and_release(i, v);
}
#define atomic64_fetch_and_release atomic64_fetch_and_release
#endif
#if defined(arch_atomic64_fetch_and_relaxed)
-static inline s64
+static __always_inline s64
atomic64_fetch_and_relaxed(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_and_relaxed(i, v);
}
#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
#endif
#if defined(arch_atomic64_andnot)
-static inline void
+static __always_inline void
atomic64_andnot(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic64_andnot(i, v);
}
#define atomic64_andnot atomic64_andnot
#endif
#if defined(arch_atomic64_fetch_andnot)
-static inline s64
+static __always_inline s64
atomic64_fetch_andnot(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_andnot(i, v);
}
#define atomic64_fetch_andnot atomic64_fetch_andnot
#endif
#if defined(arch_atomic64_fetch_andnot_acquire)
-static inline s64
+static __always_inline s64
atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_andnot_acquire(i, v);
}
#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire
#endif
#if defined(arch_atomic64_fetch_andnot_release)
-static inline s64
+static __always_inline s64
atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_andnot_release(i, v);
}
#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release
#endif
#if defined(arch_atomic64_fetch_andnot_relaxed)
-static inline s64
+static __always_inline s64
atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_andnot_relaxed(i, v);
}
#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
#endif
-static inline void
+static __always_inline void
atomic64_or(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic64_or(i, v);
}
#define atomic64_or atomic64_or
#if !defined(arch_atomic64_fetch_or_relaxed) || defined(arch_atomic64_fetch_or)
-static inline s64
+static __always_inline s64
atomic64_fetch_or(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_or(i, v);
}
#define atomic64_fetch_or atomic64_fetch_or
#endif
#if defined(arch_atomic64_fetch_or_acquire)
-static inline s64
+static __always_inline s64
atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_or_acquire(i, v);
}
#define atomic64_fetch_or_acquire atomic64_fetch_or_acquire
#endif
#if defined(arch_atomic64_fetch_or_release)
-static inline s64
+static __always_inline s64
atomic64_fetch_or_release(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_or_release(i, v);
}
#define atomic64_fetch_or_release atomic64_fetch_or_release
#endif
#if defined(arch_atomic64_fetch_or_relaxed)
-static inline s64
+static __always_inline s64
atomic64_fetch_or_relaxed(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_or_relaxed(i, v);
}
#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
#endif
-static inline void
+static __always_inline void
atomic64_xor(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic64_xor(i, v);
}
#define atomic64_xor atomic64_xor
#if !defined(arch_atomic64_fetch_xor_relaxed) || defined(arch_atomic64_fetch_xor)
-static inline s64
+static __always_inline s64
atomic64_fetch_xor(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_xor(i, v);
}
#define atomic64_fetch_xor atomic64_fetch_xor
#endif
#if defined(arch_atomic64_fetch_xor_acquire)
-static inline s64
+static __always_inline s64
atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_xor_acquire(i, v);
}
#define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire
#endif
#if defined(arch_atomic64_fetch_xor_release)
-static inline s64
+static __always_inline s64
atomic64_fetch_xor_release(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_xor_release(i, v);
}
#define atomic64_fetch_xor_release atomic64_fetch_xor_release
#endif
#if defined(arch_atomic64_fetch_xor_relaxed)
-static inline s64
+static __always_inline s64
atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_xor_relaxed(i, v);
}
#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
#endif
#if !defined(arch_atomic64_xchg_relaxed) || defined(arch_atomic64_xchg)
-static inline s64
+static __always_inline s64
atomic64_xchg(atomic64_t *v, s64 i)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_xchg(v, i);
}
#define atomic64_xchg atomic64_xchg
#endif
#if defined(arch_atomic64_xchg_acquire)
-static inline s64
+static __always_inline s64
atomic64_xchg_acquire(atomic64_t *v, s64 i)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_xchg_acquire(v, i);
}
#define atomic64_xchg_acquire atomic64_xchg_acquire
#endif
#if defined(arch_atomic64_xchg_release)
-static inline s64
+static __always_inline s64
atomic64_xchg_release(atomic64_t *v, s64 i)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_xchg_release(v, i);
}
#define atomic64_xchg_release atomic64_xchg_release
#endif
#if defined(arch_atomic64_xchg_relaxed)
-static inline s64
+static __always_inline s64
atomic64_xchg_relaxed(atomic64_t *v, s64 i)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_xchg_relaxed(v, i);
}
#define atomic64_xchg_relaxed atomic64_xchg_relaxed
#endif
#if !defined(arch_atomic64_cmpxchg_relaxed) || defined(arch_atomic64_cmpxchg)
-static inline s64
+static __always_inline s64
atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_cmpxchg(v, old, new);
}
#define atomic64_cmpxchg atomic64_cmpxchg
#endif
#if defined(arch_atomic64_cmpxchg_acquire)
-static inline s64
+static __always_inline s64
atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_cmpxchg_acquire(v, old, new);
}
#define atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire
#endif
#if defined(arch_atomic64_cmpxchg_release)
-static inline s64
+static __always_inline s64
atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_cmpxchg_release(v, old, new);
}
#define atomic64_cmpxchg_release atomic64_cmpxchg_release
#endif
#if defined(arch_atomic64_cmpxchg_relaxed)
-static inline s64
+static __always_inline s64
atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_cmpxchg_relaxed(v, old, new);
}
#define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
#endif
#if defined(arch_atomic64_try_cmpxchg)
-static inline bool
+static __always_inline bool
atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
{
- kasan_check_write(v, sizeof(*v));
- kasan_check_write(old, sizeof(*old));
+ instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_write(old, sizeof(*old));
return arch_atomic64_try_cmpxchg(v, old, new);
}
#define atomic64_try_cmpxchg atomic64_try_cmpxchg
#endif
#if defined(arch_atomic64_try_cmpxchg_acquire)
-static inline bool
+static __always_inline bool
atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
{
- kasan_check_write(v, sizeof(*v));
- kasan_check_write(old, sizeof(*old));
+ instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_write(old, sizeof(*old));
return arch_atomic64_try_cmpxchg_acquire(v, old, new);
}
#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire
#endif
#if defined(arch_atomic64_try_cmpxchg_release)
-static inline bool
+static __always_inline bool
atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
{
- kasan_check_write(v, sizeof(*v));
- kasan_check_write(old, sizeof(*old));
+ instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_write(old, sizeof(*old));
return arch_atomic64_try_cmpxchg_release(v, old, new);
}
#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release
#endif
#if defined(arch_atomic64_try_cmpxchg_relaxed)
-static inline bool
+static __always_inline bool
atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
{
- kasan_check_write(v, sizeof(*v));
- kasan_check_write(old, sizeof(*old));
+ instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_write(old, sizeof(*old));
return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
}
#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed
#endif
#if defined(arch_atomic64_sub_and_test)
-static inline bool
+static __always_inline bool
atomic64_sub_and_test(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_sub_and_test(i, v);
}
#define atomic64_sub_and_test atomic64_sub_and_test
#endif
#if defined(arch_atomic64_dec_and_test)
-static inline bool
+static __always_inline bool
atomic64_dec_and_test(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_dec_and_test(v);
}
#define atomic64_dec_and_test atomic64_dec_and_test
#endif
#if defined(arch_atomic64_inc_and_test)
-static inline bool
+static __always_inline bool
atomic64_inc_and_test(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_inc_and_test(v);
}
#define atomic64_inc_and_test atomic64_inc_and_test
#endif
#if defined(arch_atomic64_add_negative)
-static inline bool
+static __always_inline bool
atomic64_add_negative(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_add_negative(i, v);
}
#define atomic64_add_negative atomic64_add_negative
#endif
#if defined(arch_atomic64_fetch_add_unless)
-static inline s64
+static __always_inline s64
atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_add_unless(v, a, u);
}
#define atomic64_fetch_add_unless atomic64_fetch_add_unless
#endif
#if defined(arch_atomic64_add_unless)
-static inline bool
+static __always_inline bool
atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_add_unless(v, a, u);
}
#define atomic64_add_unless atomic64_add_unless
#endif
#if defined(arch_atomic64_inc_not_zero)
-static inline bool
+static __always_inline bool
atomic64_inc_not_zero(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_inc_not_zero(v);
}
#define atomic64_inc_not_zero atomic64_inc_not_zero
#endif
#if defined(arch_atomic64_inc_unless_negative)
-static inline bool
+static __always_inline bool
atomic64_inc_unless_negative(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_inc_unless_negative(v);
}
#define atomic64_inc_unless_negative atomic64_inc_unless_negative
#endif
#if defined(arch_atomic64_dec_unless_positive)
-static inline bool
+static __always_inline bool
atomic64_dec_unless_positive(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_dec_unless_positive(v);
}
#define atomic64_dec_unless_positive atomic64_dec_unless_positive
#endif
#if defined(arch_atomic64_dec_if_positive)
-static inline s64
+static __always_inline s64
atomic64_dec_if_positive(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_dec_if_positive(v);
}
#define atomic64_dec_if_positive atomic64_dec_if_positive
@@ -1644,7 +1645,7 @@ atomic64_dec_if_positive(atomic64_t *v)
#define xchg(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_xchg(__ai_ptr, __VA_ARGS__); \
})
#endif
@@ -1653,7 +1654,7 @@ atomic64_dec_if_positive(atomic64_t *v)
#define xchg_acquire(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_xchg_acquire(__ai_ptr, __VA_ARGS__); \
})
#endif
@@ -1662,7 +1663,7 @@ atomic64_dec_if_positive(atomic64_t *v)
#define xchg_release(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_xchg_release(__ai_ptr, __VA_ARGS__); \
})
#endif
@@ -1671,7 +1672,7 @@ atomic64_dec_if_positive(atomic64_t *v)
#define xchg_relaxed(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_xchg_relaxed(__ai_ptr, __VA_ARGS__); \
})
#endif
@@ -1680,7 +1681,7 @@ atomic64_dec_if_positive(atomic64_t *v)
#define cmpxchg(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg(__ai_ptr, __VA_ARGS__); \
})
#endif
@@ -1689,7 +1690,7 @@ atomic64_dec_if_positive(atomic64_t *v)
#define cmpxchg_acquire(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg_acquire(__ai_ptr, __VA_ARGS__); \
})
#endif
@@ -1698,7 +1699,7 @@ atomic64_dec_if_positive(atomic64_t *v)
#define cmpxchg_release(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg_release(__ai_ptr, __VA_ARGS__); \
})
#endif
@@ -1707,7 +1708,7 @@ atomic64_dec_if_positive(atomic64_t *v)
#define cmpxchg_relaxed(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg_relaxed(__ai_ptr, __VA_ARGS__); \
})
#endif
@@ -1716,7 +1717,7 @@ atomic64_dec_if_positive(atomic64_t *v)
#define cmpxchg64(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg64(__ai_ptr, __VA_ARGS__); \
})
#endif
@@ -1725,7 +1726,7 @@ atomic64_dec_if_positive(atomic64_t *v)
#define cmpxchg64_acquire(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg64_acquire(__ai_ptr, __VA_ARGS__); \
})
#endif
@@ -1734,7 +1735,7 @@ atomic64_dec_if_positive(atomic64_t *v)
#define cmpxchg64_release(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg64_release(__ai_ptr, __VA_ARGS__); \
})
#endif
@@ -1743,7 +1744,7 @@ atomic64_dec_if_positive(atomic64_t *v)
#define cmpxchg64_relaxed(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__); \
})
#endif
@@ -1751,28 +1752,28 @@ atomic64_dec_if_positive(atomic64_t *v)
#define cmpxchg_local(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg_local(__ai_ptr, __VA_ARGS__); \
})
#define cmpxchg64_local(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg64_local(__ai_ptr, __VA_ARGS__); \
})
#define sync_cmpxchg(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_sync_cmpxchg(__ai_ptr, __VA_ARGS__); \
})
#define cmpxchg_double(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \
arch_cmpxchg_double(__ai_ptr, __VA_ARGS__); \
})
@@ -1780,9 +1781,9 @@ atomic64_dec_if_positive(atomic64_t *v)
#define cmpxchg_double_local(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \
arch_cmpxchg_double_local(__ai_ptr, __VA_ARGS__); \
})
#endif /* _ASM_GENERIC_ATOMIC_INSTRUMENTED_H */
-// b29b625d5de9280f680e42c7be859b55b15e5f6a
+// 89bf97f3a7509b740845e51ddf31055b48a81f40
diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
index 881c7e27af28..073cf40f431b 100644
--- a/include/asm-generic/atomic-long.h
+++ b/include/asm-generic/atomic-long.h
@@ -6,6 +6,7 @@
#ifndef _ASM_GENERIC_ATOMIC_LONG_H
#define _ASM_GENERIC_ATOMIC_LONG_H
+#include <linux/compiler.h>
#include <asm/types.h>
#ifdef CONFIG_64BIT
@@ -22,493 +23,493 @@ typedef atomic_t atomic_long_t;
#ifdef CONFIG_64BIT
-static inline long
+static __always_inline long
atomic_long_read(const atomic_long_t *v)
{
return atomic64_read(v);
}
-static inline long
+static __always_inline long
atomic_long_read_acquire(const atomic_long_t *v)
{
return atomic64_read_acquire(v);
}
-static inline void
+static __always_inline void
atomic_long_set(atomic_long_t *v, long i)
{
atomic64_set(v, i);
}
-static inline void
+static __always_inline void
atomic_long_set_release(atomic_long_t *v, long i)
{
atomic64_set_release(v, i);
}
-static inline void
+static __always_inline void
atomic_long_add(long i, atomic_long_t *v)
{
atomic64_add(i, v);
}
-static inline long
+static __always_inline long
atomic_long_add_return(long i, atomic_long_t *v)
{
return atomic64_add_return(i, v);
}
-static inline long
+static __always_inline long
atomic_long_add_return_acquire(long i, atomic_long_t *v)
{
return atomic64_add_return_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_add_return_release(long i, atomic_long_t *v)
{
return atomic64_add_return_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_add_return_relaxed(long i, atomic_long_t *v)
{
return atomic64_add_return_relaxed(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_add(long i, atomic_long_t *v)
{
return atomic64_fetch_add(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
{
return atomic64_fetch_add_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_add_release(long i, atomic_long_t *v)
{
return atomic64_fetch_add_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
{
return atomic64_fetch_add_relaxed(i, v);
}
-static inline void
+static __always_inline void
atomic_long_sub(long i, atomic_long_t *v)
{
atomic64_sub(i, v);
}
-static inline long
+static __always_inline long
atomic_long_sub_return(long i, atomic_long_t *v)
{
return atomic64_sub_return(i, v);
}
-static inline long
+static __always_inline long
atomic_long_sub_return_acquire(long i, atomic_long_t *v)
{
return atomic64_sub_return_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_sub_return_release(long i, atomic_long_t *v)
{
return atomic64_sub_return_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
{
return atomic64_sub_return_relaxed(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_sub(long i, atomic_long_t *v)
{
return atomic64_fetch_sub(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
{
return atomic64_fetch_sub_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_sub_release(long i, atomic_long_t *v)
{
return atomic64_fetch_sub_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
{
return atomic64_fetch_sub_relaxed(i, v);
}
-static inline void
+static __always_inline void
atomic_long_inc(atomic_long_t *v)
{
atomic64_inc(v);
}
-static inline long
+static __always_inline long
atomic_long_inc_return(atomic_long_t *v)
{
return atomic64_inc_return(v);
}
-static inline long
+static __always_inline long
atomic_long_inc_return_acquire(atomic_long_t *v)
{
return atomic64_inc_return_acquire(v);
}
-static inline long
+static __always_inline long
atomic_long_inc_return_release(atomic_long_t *v)
{
return atomic64_inc_return_release(v);
}
-static inline long
+static __always_inline long
atomic_long_inc_return_relaxed(atomic_long_t *v)
{
return atomic64_inc_return_relaxed(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_inc(atomic_long_t *v)
{
return atomic64_fetch_inc(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_inc_acquire(atomic_long_t *v)
{
return atomic64_fetch_inc_acquire(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_inc_release(atomic_long_t *v)
{
return atomic64_fetch_inc_release(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_inc_relaxed(atomic_long_t *v)
{
return atomic64_fetch_inc_relaxed(v);
}
-static inline void
+static __always_inline void
atomic_long_dec(atomic_long_t *v)
{
atomic64_dec(v);
}
-static inline long
+static __always_inline long
atomic_long_dec_return(atomic_long_t *v)
{
return atomic64_dec_return(v);
}
-static inline long
+static __always_inline long
atomic_long_dec_return_acquire(atomic_long_t *v)
{
return atomic64_dec_return_acquire(v);
}
-static inline long
+static __always_inline long
atomic_long_dec_return_release(atomic_long_t *v)
{
return atomic64_dec_return_release(v);
}
-static inline long
+static __always_inline long
atomic_long_dec_return_relaxed(atomic_long_t *v)
{
return atomic64_dec_return_relaxed(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_dec(atomic_long_t *v)
{
return atomic64_fetch_dec(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_dec_acquire(atomic_long_t *v)
{
return atomic64_fetch_dec_acquire(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_dec_release(atomic_long_t *v)
{
return atomic64_fetch_dec_release(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_dec_relaxed(atomic_long_t *v)
{
return atomic64_fetch_dec_relaxed(v);
}
-static inline void
+static __always_inline void
atomic_long_and(long i, atomic_long_t *v)
{
atomic64_and(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_and(long i, atomic_long_t *v)
{
return atomic64_fetch_and(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
{
return atomic64_fetch_and_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_and_release(long i, atomic_long_t *v)
{
return atomic64_fetch_and_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
{
return atomic64_fetch_and_relaxed(i, v);
}
-static inline void
+static __always_inline void
atomic_long_andnot(long i, atomic_long_t *v)
{
atomic64_andnot(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_andnot(long i, atomic_long_t *v)
{
return atomic64_fetch_andnot(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
{
return atomic64_fetch_andnot_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
{
return atomic64_fetch_andnot_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
{
return atomic64_fetch_andnot_relaxed(i, v);
}
-static inline void
+static __always_inline void
atomic_long_or(long i, atomic_long_t *v)
{
atomic64_or(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_or(long i, atomic_long_t *v)
{
return atomic64_fetch_or(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
{
return atomic64_fetch_or_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_or_release(long i, atomic_long_t *v)
{
return atomic64_fetch_or_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
{
return atomic64_fetch_or_relaxed(i, v);
}
-static inline void
+static __always_inline void
atomic_long_xor(long i, atomic_long_t *v)
{
atomic64_xor(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_xor(long i, atomic_long_t *v)
{
return atomic64_fetch_xor(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
{
return atomic64_fetch_xor_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_xor_release(long i, atomic_long_t *v)
{
return atomic64_fetch_xor_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
{
return atomic64_fetch_xor_relaxed(i, v);
}
-static inline long
+static __always_inline long
atomic_long_xchg(atomic_long_t *v, long i)
{
return atomic64_xchg(v, i);
}
-static inline long
+static __always_inline long
atomic_long_xchg_acquire(atomic_long_t *v, long i)
{
return atomic64_xchg_acquire(v, i);
}
-static inline long
+static __always_inline long
atomic_long_xchg_release(atomic_long_t *v, long i)
{
return atomic64_xchg_release(v, i);
}
-static inline long
+static __always_inline long
atomic_long_xchg_relaxed(atomic_long_t *v, long i)
{
return atomic64_xchg_relaxed(v, i);
}
-static inline long
+static __always_inline long
atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
{
return atomic64_cmpxchg(v, old, new);
}
-static inline long
+static __always_inline long
atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
{
return atomic64_cmpxchg_acquire(v, old, new);
}
-static inline long
+static __always_inline long
atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
{
return atomic64_cmpxchg_release(v, old, new);
}
-static inline long
+static __always_inline long
atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
{
return atomic64_cmpxchg_relaxed(v, old, new);
}
-static inline bool
+static __always_inline bool
atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
{
return atomic64_try_cmpxchg(v, (s64 *)old, new);
}
-static inline bool
+static __always_inline bool
atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
{
return atomic64_try_cmpxchg_acquire(v, (s64 *)old, new);
}
-static inline bool
+static __always_inline bool
atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
{
return atomic64_try_cmpxchg_release(v, (s64 *)old, new);
}
-static inline bool
+static __always_inline bool
atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
{
return atomic64_try_cmpxchg_relaxed(v, (s64 *)old, new);
}
-static inline bool
+static __always_inline bool
atomic_long_sub_and_test(long i, atomic_long_t *v)
{
return atomic64_sub_and_test(i, v);
}
-static inline bool
+static __always_inline bool
atomic_long_dec_and_test(atomic_long_t *v)
{
return atomic64_dec_and_test(v);
}
-static inline bool
+static __always_inline bool
atomic_long_inc_and_test(atomic_long_t *v)
{
return atomic64_inc_and_test(v);
}
-static inline bool
+static __always_inline bool
atomic_long_add_negative(long i, atomic_long_t *v)
{
return atomic64_add_negative(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
{
return atomic64_fetch_add_unless(v, a, u);
}
-static inline bool
+static __always_inline bool
atomic_long_add_unless(atomic_long_t *v, long a, long u)
{
return atomic64_add_unless(v, a, u);
}
-static inline bool
+static __always_inline bool
atomic_long_inc_not_zero(atomic_long_t *v)
{
return atomic64_inc_not_zero(v);
}
-static inline bool
+static __always_inline bool
atomic_long_inc_unless_negative(atomic_long_t *v)
{
return atomic64_inc_unless_negative(v);
}
-static inline bool
+static __always_inline bool
atomic_long_dec_unless_positive(atomic_long_t *v)
{
return atomic64_dec_unless_positive(v);
}
-static inline long
+static __always_inline long
atomic_long_dec_if_positive(atomic_long_t *v)
{
return atomic64_dec_if_positive(v);
@@ -516,493 +517,493 @@ atomic_long_dec_if_positive(atomic_long_t *v)
#else /* CONFIG_64BIT */
-static inline long
+static __always_inline long
atomic_long_read(const atomic_long_t *v)
{
return atomic_read(v);
}
-static inline long
+static __always_inline long
atomic_long_read_acquire(const atomic_long_t *v)
{
return atomic_read_acquire(v);
}
-static inline void
+static __always_inline void
atomic_long_set(atomic_long_t *v, long i)
{
atomic_set(v, i);
}
-static inline void
+static __always_inline void
atomic_long_set_release(atomic_long_t *v, long i)
{
atomic_set_release(v, i);
}
-static inline void
+static __always_inline void
atomic_long_add(long i, atomic_long_t *v)
{
atomic_add(i, v);
}
-static inline long
+static __always_inline long
atomic_long_add_return(long i, atomic_long_t *v)
{
return atomic_add_return(i, v);
}
-static inline long
+static __always_inline long
atomic_long_add_return_acquire(long i, atomic_long_t *v)
{
return atomic_add_return_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_add_return_release(long i, atomic_long_t *v)
{
return atomic_add_return_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_add_return_relaxed(long i, atomic_long_t *v)
{
return atomic_add_return_relaxed(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_add(long i, atomic_long_t *v)
{
return atomic_fetch_add(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
{
return atomic_fetch_add_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_add_release(long i, atomic_long_t *v)
{
return atomic_fetch_add_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
{
return atomic_fetch_add_relaxed(i, v);
}
-static inline void
+static __always_inline void
atomic_long_sub(long i, atomic_long_t *v)
{
atomic_sub(i, v);
}
-static inline long
+static __always_inline long
atomic_long_sub_return(long i, atomic_long_t *v)
{
return atomic_sub_return(i, v);
}
-static inline long
+static __always_inline long
atomic_long_sub_return_acquire(long i, atomic_long_t *v)
{
return atomic_sub_return_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_sub_return_release(long i, atomic_long_t *v)
{
return atomic_sub_return_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
{
return atomic_sub_return_relaxed(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_sub(long i, atomic_long_t *v)
{
return atomic_fetch_sub(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
{
return atomic_fetch_sub_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_sub_release(long i, atomic_long_t *v)
{
return atomic_fetch_sub_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
{
return atomic_fetch_sub_relaxed(i, v);
}
-static inline void
+static __always_inline void
atomic_long_inc(atomic_long_t *v)
{
atomic_inc(v);
}
-static inline long
+static __always_inline long
atomic_long_inc_return(atomic_long_t *v)
{
return atomic_inc_return(v);
}
-static inline long
+static __always_inline long
atomic_long_inc_return_acquire(atomic_long_t *v)
{
return atomic_inc_return_acquire(v);
}
-static inline long
+static __always_inline long
atomic_long_inc_return_release(atomic_long_t *v)
{
return atomic_inc_return_release(v);
}
-static inline long
+static __always_inline long
atomic_long_inc_return_relaxed(atomic_long_t *v)
{
return atomic_inc_return_relaxed(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_inc(atomic_long_t *v)
{
return atomic_fetch_inc(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_inc_acquire(atomic_long_t *v)
{
return atomic_fetch_inc_acquire(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_inc_release(atomic_long_t *v)
{
return atomic_fetch_inc_release(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_inc_relaxed(atomic_long_t *v)
{
return atomic_fetch_inc_relaxed(v);
}
-static inline void
+static __always_inline void
atomic_long_dec(atomic_long_t *v)
{
atomic_dec(v);
}
-static inline long
+static __always_inline long
atomic_long_dec_return(atomic_long_t *v)
{
return atomic_dec_return(v);
}
-static inline long
+static __always_inline long
atomic_long_dec_return_acquire(atomic_long_t *v)
{
return atomic_dec_return_acquire(v);
}
-static inline long
+static __always_inline long
atomic_long_dec_return_release(atomic_long_t *v)
{
return atomic_dec_return_release(v);
}
-static inline long
+static __always_inline long
atomic_long_dec_return_relaxed(atomic_long_t *v)
{
return atomic_dec_return_relaxed(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_dec(atomic_long_t *v)
{
return atomic_fetch_dec(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_dec_acquire(atomic_long_t *v)
{
return atomic_fetch_dec_acquire(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_dec_release(atomic_long_t *v)
{
return atomic_fetch_dec_release(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_dec_relaxed(atomic_long_t *v)
{
return atomic_fetch_dec_relaxed(v);
}
-static inline void
+static __always_inline void
atomic_long_and(long i, atomic_long_t *v)
{
atomic_and(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_and(long i, atomic_long_t *v)
{
return atomic_fetch_and(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
{
return atomic_fetch_and_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_and_release(long i, atomic_long_t *v)
{
return atomic_fetch_and_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
{
return atomic_fetch_and_relaxed(i, v);
}
-static inline void
+static __always_inline void
atomic_long_andnot(long i, atomic_long_t *v)
{
atomic_andnot(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_andnot(long i, atomic_long_t *v)
{
return atomic_fetch_andnot(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
{
return atomic_fetch_andnot_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
{
return atomic_fetch_andnot_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
{
return atomic_fetch_andnot_relaxed(i, v);
}
-static inline void
+static __always_inline void
atomic_long_or(long i, atomic_long_t *v)
{
atomic_or(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_or(long i, atomic_long_t *v)
{
return atomic_fetch_or(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
{
return atomic_fetch_or_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_or_release(long i, atomic_long_t *v)
{
return atomic_fetch_or_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
{
return atomic_fetch_or_relaxed(i, v);
}
-static inline void
+static __always_inline void
atomic_long_xor(long i, atomic_long_t *v)
{
atomic_xor(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_xor(long i, atomic_long_t *v)
{
return atomic_fetch_xor(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
{
return atomic_fetch_xor_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_xor_release(long i, atomic_long_t *v)
{
return atomic_fetch_xor_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
{
return atomic_fetch_xor_relaxed(i, v);
}
-static inline long
+static __always_inline long
atomic_long_xchg(atomic_long_t *v, long i)
{
return atomic_xchg(v, i);
}
-static inline long
+static __always_inline long
atomic_long_xchg_acquire(atomic_long_t *v, long i)
{
return atomic_xchg_acquire(v, i);
}
-static inline long
+static __always_inline long
atomic_long_xchg_release(atomic_long_t *v, long i)
{
return atomic_xchg_release(v, i);
}
-static inline long
+static __always_inline long
atomic_long_xchg_relaxed(atomic_long_t *v, long i)
{
return atomic_xchg_relaxed(v, i);
}
-static inline long
+static __always_inline long
atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
{
return atomic_cmpxchg(v, old, new);
}
-static inline long
+static __always_inline long
atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
{
return atomic_cmpxchg_acquire(v, old, new);
}
-static inline long
+static __always_inline long
atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
{
return atomic_cmpxchg_release(v, old, new);
}
-static inline long
+static __always_inline long
atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
{
return atomic_cmpxchg_relaxed(v, old, new);
}
-static inline bool
+static __always_inline bool
atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
{
return atomic_try_cmpxchg(v, (int *)old, new);
}
-static inline bool
+static __always_inline bool
atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
{
return atomic_try_cmpxchg_acquire(v, (int *)old, new);
}
-static inline bool
+static __always_inline bool
atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
{
return atomic_try_cmpxchg_release(v, (int *)old, new);
}
-static inline bool
+static __always_inline bool
atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
{
return atomic_try_cmpxchg_relaxed(v, (int *)old, new);
}
-static inline bool
+static __always_inline bool
atomic_long_sub_and_test(long i, atomic_long_t *v)
{
return atomic_sub_and_test(i, v);
}
-static inline bool
+static __always_inline bool
atomic_long_dec_and_test(atomic_long_t *v)
{
return atomic_dec_and_test(v);
}
-static inline bool
+static __always_inline bool
atomic_long_inc_and_test(atomic_long_t *v)
{
return atomic_inc_and_test(v);
}
-static inline bool
+static __always_inline bool
atomic_long_add_negative(long i, atomic_long_t *v)
{
return atomic_add_negative(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
{
return atomic_fetch_add_unless(v, a, u);
}
-static inline bool
+static __always_inline bool
atomic_long_add_unless(atomic_long_t *v, long a, long u)
{
return atomic_add_unless(v, a, u);
}
-static inline bool
+static __always_inline bool
atomic_long_inc_not_zero(atomic_long_t *v)
{
return atomic_inc_not_zero(v);
}
-static inline bool
+static __always_inline bool
atomic_long_inc_unless_negative(atomic_long_t *v)
{
return atomic_inc_unless_negative(v);
}
-static inline bool
+static __always_inline bool
atomic_long_dec_unless_positive(atomic_long_t *v)
{
return atomic_dec_unless_positive(v);
}
-static inline long
+static __always_inline long
atomic_long_dec_if_positive(atomic_long_t *v)
{
return atomic_dec_if_positive(v);
@@ -1010,4 +1011,4 @@ atomic_long_dec_if_positive(atomic_long_t *v)
#endif /* CONFIG_64BIT */
#endif /* _ASM_GENERIC_ATOMIC_LONG_H */
-// 77558968132ce4f911ad53f6f52ce423006f6268
+// a624200981f552b2c6be4f32fe44da8289f30d87
diff --git a/include/asm-generic/bitops/instrumented-atomic.h b/include/asm-generic/bitops/instrumented-atomic.h
index 18ce3c9e8eec..fb2cb33a4013 100644
--- a/include/asm-generic/bitops/instrumented-atomic.h
+++ b/include/asm-generic/bitops/instrumented-atomic.h
@@ -11,7 +11,7 @@
#ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H
#define _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H
-#include <linux/kasan-checks.h>
+#include <linux/instrumented.h>
/**
* set_bit - Atomically set a bit in memory
@@ -25,7 +25,7 @@
*/
static inline void set_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
arch_set_bit(nr, addr);
}
@@ -38,7 +38,7 @@ static inline void set_bit(long nr, volatile unsigned long *addr)
*/
static inline void clear_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
arch_clear_bit(nr, addr);
}
@@ -54,7 +54,7 @@ static inline void clear_bit(long nr, volatile unsigned long *addr)
*/
static inline void change_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
arch_change_bit(nr, addr);
}
@@ -67,7 +67,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
*/
static inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_set_bit(nr, addr);
}
@@ -80,7 +80,7 @@ static inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
*/
static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_clear_bit(nr, addr);
}
@@ -93,7 +93,7 @@ static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
*/
static inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_change_bit(nr, addr);
}
diff --git a/include/asm-generic/bitops/instrumented-lock.h b/include/asm-generic/bitops/instrumented-lock.h
index ec53fdeea9ec..b9bec468ae03 100644
--- a/include/asm-generic/bitops/instrumented-lock.h
+++ b/include/asm-generic/bitops/instrumented-lock.h
@@ -11,7 +11,7 @@
#ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H
#define _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H
-#include <linux/kasan-checks.h>
+#include <linux/instrumented.h>
/**
* clear_bit_unlock - Clear a bit in memory, for unlock
@@ -22,7 +22,7 @@
*/
static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
arch_clear_bit_unlock(nr, addr);
}
@@ -37,7 +37,7 @@ static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
*/
static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___clear_bit_unlock(nr, addr);
}
@@ -52,7 +52,7 @@ static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
*/
static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_set_bit_lock(nr, addr);
}
@@ -71,7 +71,7 @@ static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr)
static inline bool
clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
return arch_clear_bit_unlock_is_negative_byte(nr, addr);
}
/* Let everybody know we have it. */
diff --git a/include/asm-generic/bitops/instrumented-non-atomic.h b/include/asm-generic/bitops/instrumented-non-atomic.h
index 95ff28d128a1..20f788a25ef9 100644
--- a/include/asm-generic/bitops/instrumented-non-atomic.h
+++ b/include/asm-generic/bitops/instrumented-non-atomic.h
@@ -11,7 +11,7 @@
#ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H
#define _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H
-#include <linux/kasan-checks.h>
+#include <linux/instrumented.h>
/**
* __set_bit - Set a bit in memory
@@ -24,7 +24,7 @@
*/
static inline void __set_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___set_bit(nr, addr);
}
@@ -39,7 +39,7 @@ static inline void __set_bit(long nr, volatile unsigned long *addr)
*/
static inline void __clear_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___clear_bit(nr, addr);
}
@@ -54,7 +54,7 @@ static inline void __clear_bit(long nr, volatile unsigned long *addr)
*/
static inline void __change_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___change_bit(nr, addr);
}
@@ -68,7 +68,7 @@ static inline void __change_bit(long nr, volatile unsigned long *addr)
*/
static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_write(addr + BIT_WORD(nr), sizeof(long));
return arch___test_and_set_bit(nr, addr);
}
@@ -82,7 +82,7 @@ static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
*/
static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_write(addr + BIT_WORD(nr), sizeof(long));
return arch___test_and_clear_bit(nr, addr);
}
@@ -96,7 +96,7 @@ static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
*/
static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_write(addr + BIT_WORD(nr), sizeof(long));
return arch___test_and_change_bit(nr, addr);
}
@@ -107,7 +107,7 @@ static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
*/
static inline bool test_bit(long nr, const volatile unsigned long *addr)
{
- kasan_check_read(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_read(addr + BIT_WORD(nr), sizeof(long));
return arch_test_bit(nr, addr);
}
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index 790c0c6b8552..ee37256ec8bd 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -16,7 +16,7 @@
#define KASAN_ABI_VERSION 5
#if __has_feature(address_sanitizer) || __has_feature(hwaddress_sanitizer)
-/* emulate gcc's __SANITIZE_ADDRESS__ flag */
+/* Emulate GCC's __SANITIZE_ADDRESS__ flag */
#define __SANITIZE_ADDRESS__
#define __no_sanitize_address \
__attribute__((no_sanitize("address", "hwaddress")))
@@ -24,6 +24,15 @@
#define __no_sanitize_address
#endif
+#if __has_feature(thread_sanitizer)
+/* emulate gcc's __SANITIZE_THREAD__ flag */
+#define __SANITIZE_THREAD__
+#define __no_sanitize_thread \
+ __attribute__((no_sanitize("thread")))
+#else
+#define __no_sanitize_thread
+#endif
+
/*
* Not all versions of clang implement the the type-generic versions
* of the builtin overflow checkers. Fortunately, clang implements
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index e2f725273261..7dd4e0349ef3 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -144,6 +144,12 @@
#define __no_sanitize_address
#endif
+#if defined(__SANITIZE_THREAD__) && __has_attribute(__no_sanitize_thread__)
+#define __no_sanitize_thread __attribute__((no_sanitize_thread))
+#else
+#define __no_sanitize_thread
+#endif
+
#if GCC_VERSION >= 50100
#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
#endif
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 33d3a2e5abab..f09ebbf16562 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -250,6 +250,27 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
*/
#include <asm/barrier.h>
#include <linux/kasan-checks.h>
+#include <linux/kcsan-checks.h>
+
+/**
+ * data_race - mark an expression as containing intentional data races
+ *
+ * This data_race() macro is useful for situations in which data races
+ * should be forgiven. One example is diagnostic code that accesses
+ * shared variables but is not a part of the core synchronization design.
+ *
+ * This macro *does not* affect normal code generation, but is a hint
+ * to tooling that data races here are to be ignored.
+ */
+#define data_race(expr) \
+({ \
+ __kcsan_disable_current(); \
+ ({ \
+ __unqual_scalar_typeof(({ expr; })) __v = ({ expr; }); \
+ __kcsan_enable_current(); \
+ __v; \
+ }); \
+})
/*
* Use __READ_ONCE() instead of READ_ONCE() if you do not require any
@@ -260,7 +281,9 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
#define __READ_ONCE_SCALAR(x) \
({ \
- __unqual_scalar_typeof(x) __x = __READ_ONCE(x); \
+ typeof(x) *__xp = &(x); \
+ __unqual_scalar_typeof(x) __x = data_race(__READ_ONCE(*__xp)); \
+ kcsan_check_atomic_read(__xp, sizeof(*__xp)); \
smp_read_barrier_depends(); \
(typeof(x))__x; \
})
@@ -271,15 +294,22 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
__READ_ONCE_SCALAR(x); \
})
-#define __WRITE_ONCE(x, val) \
-do { \
- *(volatile typeof(x) *)&(x) = (val); \
+#define __WRITE_ONCE(x, val) \
+do { \
+ *(volatile typeof(x) *)&(x) = (val); \
+} while (0)
+
+#define __WRITE_ONCE_SCALAR(x, val) \
+do { \
+ typeof(x) *__xp = &(x); \
+ kcsan_check_atomic_write(__xp, sizeof(*__xp)); \
+ data_race(({ __WRITE_ONCE(*__xp, val); 0; })); \
} while (0)
-#define WRITE_ONCE(x, val) \
-do { \
- compiletime_assert_rwonce_type(x); \
- __WRITE_ONCE(x, val); \
+#define WRITE_ONCE(x, val) \
+do { \
+ compiletime_assert_rwonce_type(x); \
+ __WRITE_ONCE_SCALAR(x, val); \
} while (0)
#ifdef CONFIG_KASAN
@@ -290,11 +320,30 @@ do { \
* '__maybe_unused' allows us to avoid defined-but-not-used warnings.
*/
# define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused
+# define __no_sanitize_or_inline __no_kasan_or_inline
#else
# define __no_kasan_or_inline __always_inline
#endif
-static __no_kasan_or_inline
+#define __no_kcsan __no_sanitize_thread
+#ifdef __SANITIZE_THREAD__
+/*
+ * Rely on __SANITIZE_THREAD__ instead of CONFIG_KCSAN, to avoid not inlining in
+ * compilation units where instrumentation is disabled. The attribute 'noinline'
+ * is required for older compilers, where implicit inlining of very small
+ * functions renders __no_sanitize_thread ineffective.
+ */
+# define __no_kcsan_or_inline __no_kcsan noinline notrace __maybe_unused
+# define __no_sanitize_or_inline __no_kcsan_or_inline
+#else
+# define __no_kcsan_or_inline __always_inline
+#endif
+
+#ifndef __no_sanitize_or_inline
+#define __no_sanitize_or_inline __always_inline
+#endif
+
+static __no_sanitize_or_inline
unsigned long __read_once_word_nocheck(const void *addr)
{
return __READ_ONCE(*(unsigned long *)addr);
@@ -302,8 +351,8 @@ unsigned long __read_once_word_nocheck(const void *addr)
/*
* Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need to load a
- * word from memory atomically but without telling KASAN. This is usually
- * used by unwinding code when walking the stack of a running process.
+ * word from memory atomically but without telling KASAN/KCSAN. This is
+ * usually used by unwinding code when walking the stack of a running process.
*/
#define READ_ONCE_NOCHECK(x) \
({ \
diff --git a/include/linux/instrumented.h b/include/linux/instrumented.h
new file mode 100644
index 000000000000..43e6ea591975
--- /dev/null
+++ b/include/linux/instrumented.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * This header provides generic wrappers for memory access instrumentation that
+ * the compiler cannot emit for: KASAN, KCSAN.
+ */
+#ifndef _LINUX_INSTRUMENTED_H
+#define _LINUX_INSTRUMENTED_H
+
+#include <linux/compiler.h>
+#include <linux/kasan-checks.h>
+#include <linux/kcsan-checks.h>
+#include <linux/types.h>
+
+/**
+ * instrument_read - instrument regular read access
+ *
+ * Instrument a regular read access. The instrumentation should be inserted
+ * before the actual read happens.
+ *
+ * @ptr address of access
+ * @size size of access
+ */
+static __always_inline void instrument_read(const volatile void *v, size_t size)
+{
+ kasan_check_read(v, size);
+ kcsan_check_read(v, size);
+}
+
+/**
+ * instrument_write - instrument regular write access
+ *
+ * Instrument a regular write access. The instrumentation should be inserted
+ * before the actual write happens.
+ *
+ * @ptr address of access
+ * @size size of access
+ */
+static __always_inline void instrument_write(const volatile void *v, size_t size)
+{
+ kasan_check_write(v, size);
+ kcsan_check_write(v, size);
+}
+
+/**
+ * instrument_atomic_read - instrument atomic read access
+ *
+ * Instrument an atomic read access. The instrumentation should be inserted
+ * before the actual read happens.
+ *
+ * @ptr address of access
+ * @size size of access
+ */
+static __always_inline void instrument_atomic_read(const volatile void *v, size_t size)
+{
+ kasan_check_read(v, size);
+ kcsan_check_atomic_read(v, size);
+}
+
+/**
+ * instrument_atomic_write - instrument atomic write access
+ *
+ * Instrument an atomic write access. The instrumentation should be inserted
+ * before the actual write happens.
+ *
+ * @ptr address of access
+ * @size size of access
+ */
+static __always_inline void instrument_atomic_write(const volatile void *v, size_t size)
+{
+ kasan_check_write(v, size);
+ kcsan_check_atomic_write(v, size);
+}
+
+/**
+ * instrument_copy_to_user - instrument reads of copy_to_user
+ *
+ * Instrument reads from kernel memory, that are due to copy_to_user (and
+ * variants). The instrumentation must be inserted before the accesses.
+ *
+ * @to destination address
+ * @from source address
+ * @n number of bytes to copy
+ */
+static __always_inline void
+instrument_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ kasan_check_read(from, n);
+ kcsan_check_read(from, n);
+}
+
+/**
+ * instrument_copy_from_user - instrument writes of copy_from_user
+ *
+ * Instrument writes to kernel memory, that are due to copy_from_user (and
+ * variants). The instrumentation should be inserted before the accesses.
+ *
+ * @to destination address
+ * @from source address
+ * @n number of bytes to copy
+ */
+static __always_inline void
+instrument_copy_from_user(const void *to, const void __user *from, unsigned long n)
+{
+ kasan_check_write(to, n);
+ kcsan_check_write(to, n);
+}
+
+#endif /* _LINUX_INSTRUMENTED_H */
diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h
new file mode 100644
index 000000000000..7b0b9c44f5f3
--- /dev/null
+++ b/include/linux/kcsan-checks.h
@@ -0,0 +1,430 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_KCSAN_CHECKS_H
+#define _LINUX_KCSAN_CHECKS_H
+
+/* Note: Only include what is already included by compiler.h. */
+#include <linux/compiler_attributes.h>
+#include <linux/types.h>
+
+/*
+ * ACCESS TYPE MODIFIERS
+ *
+ * <none>: normal read access;
+ * WRITE : write access;
+ * ATOMIC: access is atomic;
+ * ASSERT: access is not a regular access, but an assertion;
+ * SCOPED: access is a scoped access;
+ */
+#define KCSAN_ACCESS_WRITE 0x1
+#define KCSAN_ACCESS_ATOMIC 0x2
+#define KCSAN_ACCESS_ASSERT 0x4
+#define KCSAN_ACCESS_SCOPED 0x8
+
+/*
+ * __kcsan_*: Always calls into the runtime when KCSAN is enabled. This may be used
+ * even in compilation units that selectively disable KCSAN, but must use KCSAN
+ * to validate access to an address. Never use these in header files!
+ */
+#ifdef CONFIG_KCSAN
+/**
+ * __kcsan_check_access - check generic access for races
+ *
+ * @ptr: address of access
+ * @size: size of access
+ * @type: access type modifier
+ */
+void __kcsan_check_access(const volatile void *ptr, size_t size, int type);
+
+/**
+ * kcsan_disable_current - disable KCSAN for the current context
+ *
+ * Supports nesting.
+ */
+void kcsan_disable_current(void);
+
+/**
+ * kcsan_enable_current - re-enable KCSAN for the current context
+ *
+ * Supports nesting.
+ */
+void kcsan_enable_current(void);
+void kcsan_enable_current_nowarn(void); /* Safe in uaccess regions. */
+
+/**
+ * kcsan_nestable_atomic_begin - begin nestable atomic region
+ *
+ * Accesses within the atomic region may appear to race with other accesses but
+ * should be considered atomic.
+ */
+void kcsan_nestable_atomic_begin(void);
+
+/**
+ * kcsan_nestable_atomic_end - end nestable atomic region
+ */
+void kcsan_nestable_atomic_end(void);
+
+/**
+ * kcsan_flat_atomic_begin - begin flat atomic region
+ *
+ * Accesses within the atomic region may appear to race with other accesses but
+ * should be considered atomic.
+ */
+void kcsan_flat_atomic_begin(void);
+
+/**
+ * kcsan_flat_atomic_end - end flat atomic region
+ */
+void kcsan_flat_atomic_end(void);
+
+/**
+ * kcsan_atomic_next - consider following accesses as atomic
+ *
+ * Force treating the next n memory accesses for the current context as atomic
+ * operations.
+ *
+ * @n: number of following memory accesses to treat as atomic.
+ */
+void kcsan_atomic_next(int n);
+
+/**
+ * kcsan_set_access_mask - set access mask
+ *
+ * Set the access mask for all accesses for the current context if non-zero.
+ * Only value changes to bits set in the mask will be reported.
+ *
+ * @mask: bitmask
+ */
+void kcsan_set_access_mask(unsigned long mask);
+
+/* Scoped access information. */
+struct kcsan_scoped_access {
+ struct list_head list;
+ const volatile void *ptr;
+ size_t size;
+ int type;
+};
+/*
+ * Automatically call kcsan_end_scoped_access() when kcsan_scoped_access goes
+ * out of scope; relies on attribute "cleanup", which is supported by all
+ * compilers that support KCSAN.
+ */
+#define __kcsan_cleanup_scoped \
+ __maybe_unused __attribute__((__cleanup__(kcsan_end_scoped_access)))
+
+/**
+ * kcsan_begin_scoped_access - begin scoped access
+ *
+ * Begin scoped access and initialize @sa, which will cause KCSAN to
+ * continuously check the memory range in the current thread until
+ * kcsan_end_scoped_access() is called for @sa.
+ *
+ * Scoped accesses are implemented by appending @sa to an internal list for the
+ * current execution context, and then checked on every call into the KCSAN
+ * runtime.
+ *
+ * @ptr: address of access
+ * @size: size of access
+ * @type: access type modifier
+ * @sa: struct kcsan_scoped_access to use for the scope of the access
+ */
+struct kcsan_scoped_access *
+kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
+ struct kcsan_scoped_access *sa);
+
+/**
+ * kcsan_end_scoped_access - end scoped access
+ *
+ * End a scoped access, which will stop KCSAN checking the memory range.
+ * Requires that kcsan_begin_scoped_access() was previously called once for @sa.
+ *
+ * @sa: a previously initialized struct kcsan_scoped_access
+ */
+void kcsan_end_scoped_access(struct kcsan_scoped_access *sa);
+
+
+#else /* CONFIG_KCSAN */
+
+static inline void __kcsan_check_access(const volatile void *ptr, size_t size,
+ int type) { }
+
+static inline void kcsan_disable_current(void) { }
+static inline void kcsan_enable_current(void) { }
+static inline void kcsan_enable_current_nowarn(void) { }
+static inline void kcsan_nestable_atomic_begin(void) { }
+static inline void kcsan_nestable_atomic_end(void) { }
+static inline void kcsan_flat_atomic_begin(void) { }
+static inline void kcsan_flat_atomic_end(void) { }
+static inline void kcsan_atomic_next(int n) { }
+static inline void kcsan_set_access_mask(unsigned long mask) { }
+
+struct kcsan_scoped_access { };
+#define __kcsan_cleanup_scoped __maybe_unused
+static inline struct kcsan_scoped_access *
+kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
+ struct kcsan_scoped_access *sa) { return sa; }
+static inline void kcsan_end_scoped_access(struct kcsan_scoped_access *sa) { }
+
+#endif /* CONFIG_KCSAN */
+
+#ifdef __SANITIZE_THREAD__
+/*
+ * Only calls into the runtime when the particular compilation unit has KCSAN
+ * instrumentation enabled. May be used in header files.
+ */
+#define kcsan_check_access __kcsan_check_access
+
+/*
+ * Only use these to disable KCSAN for accesses in the current compilation unit;
+ * calls into libraries may still perform KCSAN checks.
+ */
+#define __kcsan_disable_current kcsan_disable_current
+#define __kcsan_enable_current kcsan_enable_current_nowarn
+#else
+static inline void kcsan_check_access(const volatile void *ptr, size_t size,
+ int type) { }
+static inline void __kcsan_enable_current(void) { }
+static inline void __kcsan_disable_current(void) { }
+#endif
+
+/**
+ * __kcsan_check_read - check regular read access for races
+ *
+ * @ptr: address of access
+ * @size: size of access
+ */
+#define __kcsan_check_read(ptr, size) __kcsan_check_access(ptr, size, 0)
+
+/**
+ * __kcsan_check_write - check regular write access for races
+ *
+ * @ptr: address of access
+ * @size: size of access
+ */
+#define __kcsan_check_write(ptr, size) \
+ __kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE)
+
+/**
+ * kcsan_check_read - check regular read access for races
+ *
+ * @ptr: address of access
+ * @size: size of access
+ */
+#define kcsan_check_read(ptr, size) kcsan_check_access(ptr, size, 0)
+
+/**
+ * kcsan_check_write - check regular write access for races
+ *
+ * @ptr: address of access
+ * @size: size of access
+ */
+#define kcsan_check_write(ptr, size) \
+ kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE)
+
+/*
+ * Check for atomic accesses: if atomic accesses are not ignored, this simply
+ * aliases to kcsan_check_access(), otherwise becomes a no-op.
+ */
+#ifdef CONFIG_KCSAN_IGNORE_ATOMICS
+#define kcsan_check_atomic_read(...) do { } while (0)
+#define kcsan_check_atomic_write(...) do { } while (0)
+#else
+#define kcsan_check_atomic_read(ptr, size) \
+ kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC)
+#define kcsan_check_atomic_write(ptr, size) \
+ kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC | KCSAN_ACCESS_WRITE)
+#endif
+
+/**
+ * ASSERT_EXCLUSIVE_WRITER - assert no concurrent writes to @var
+ *
+ * Assert that there are no concurrent writes to @var; other readers are
+ * allowed. This assertion can be used to specify properties of concurrent code,
+ * where violation cannot be detected as a normal data race.
+ *
+ * For example, if we only have a single writer, but multiple concurrent
+ * readers, to avoid data races, all these accesses must be marked; even
+ * concurrent marked writes racing with the single writer are bugs.
+ * Unfortunately, due to being marked, they are no longer data races. For cases
+ * like these, we can use the macro as follows:
+ *
+ * .. code-block:: c
+ *
+ * void writer(void) {
+ * spin_lock(&update_foo_lock);
+ * ASSERT_EXCLUSIVE_WRITER(shared_foo);
+ * WRITE_ONCE(shared_foo, ...);
+ * spin_unlock(&update_foo_lock);
+ * }
+ * void reader(void) {
+ * // update_foo_lock does not need to be held!
+ * ... = READ_ONCE(shared_foo);
+ * }
+ *
+ * Note: ASSERT_EXCLUSIVE_WRITER_SCOPED(), if applicable, performs more thorough
+ * checking if a clear scope where no concurrent writes are expected exists.
+ *
+ * @var: variable to assert on
+ */
+#define ASSERT_EXCLUSIVE_WRITER(var) \
+ __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT)
+
+/*
+ * Helper macros for implementation of for ASSERT_EXCLUSIVE_*_SCOPED(). @id is
+ * expected to be unique for the scope in which instances of kcsan_scoped_access
+ * are declared.
+ */
+#define __kcsan_scoped_name(c, suffix) __kcsan_scoped_##c##suffix
+#define __ASSERT_EXCLUSIVE_SCOPED(var, type, id) \
+ struct kcsan_scoped_access __kcsan_scoped_name(id, _) \
+ __kcsan_cleanup_scoped; \
+ struct kcsan_scoped_access *__kcsan_scoped_name(id, _dummy_p) \
+ __maybe_unused = kcsan_begin_scoped_access( \
+ &(var), sizeof(var), KCSAN_ACCESS_SCOPED | (type), \
+ &__kcsan_scoped_name(id, _))
+
+/**
+ * ASSERT_EXCLUSIVE_WRITER_SCOPED - assert no concurrent writes to @var in scope
+ *
+ * Scoped variant of ASSERT_EXCLUSIVE_WRITER().
+ *
+ * Assert that there are no concurrent writes to @var for the duration of the
+ * scope in which it is introduced. This provides a better way to fully cover
+ * the enclosing scope, compared to multiple ASSERT_EXCLUSIVE_WRITER(), and
+ * increases the likelihood for KCSAN to detect racing accesses.
+ *
+ * For example, it allows finding race-condition bugs that only occur due to
+ * state changes within the scope itself:
+ *
+ * .. code-block:: c
+ *
+ * void writer(void) {
+ * spin_lock(&update_foo_lock);
+ * {
+ * ASSERT_EXCLUSIVE_WRITER_SCOPED(shared_foo);
+ * WRITE_ONCE(shared_foo, 42);
+ * ...
+ * // shared_foo should still be 42 here!
+ * }
+ * spin_unlock(&update_foo_lock);
+ * }
+ * void buggy(void) {
+ * if (READ_ONCE(shared_foo) == 42)
+ * WRITE_ONCE(shared_foo, 1); // bug!
+ * }
+ *
+ * @var: variable to assert on
+ */
+#define ASSERT_EXCLUSIVE_WRITER_SCOPED(var) \
+ __ASSERT_EXCLUSIVE_SCOPED(var, KCSAN_ACCESS_ASSERT, __COUNTER__)
+
+/**
+ * ASSERT_EXCLUSIVE_ACCESS - assert no concurrent accesses to @var
+ *
+ * Assert that there are no concurrent accesses to @var (no readers nor
+ * writers). This assertion can be used to specify properties of concurrent
+ * code, where violation cannot be detected as a normal data race.
+ *
+ * For example, where exclusive access is expected after determining no other
+ * users of an object are left, but the object is not actually freed. We can
+ * check that this property actually holds as follows:
+ *
+ * .. code-block:: c
+ *
+ * if (refcount_dec_and_test(&obj->refcnt)) {
+ * ASSERT_EXCLUSIVE_ACCESS(*obj);
+ * do_some_cleanup(obj);
+ * release_for_reuse(obj);
+ * }
+ *
+ * Note: ASSERT_EXCLUSIVE_ACCESS_SCOPED(), if applicable, performs more thorough
+ * checking if a clear scope where no concurrent accesses are expected exists.
+ *
+ * Note: For cases where the object is freed, `KASAN <kasan.html>`_ is a better
+ * fit to detect use-after-free bugs.
+ *
+ * @var: variable to assert on
+ */
+#define ASSERT_EXCLUSIVE_ACCESS(var) \
+ __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT)
+
+/**
+ * ASSERT_EXCLUSIVE_ACCESS_SCOPED - assert no concurrent accesses to @var in scope
+ *
+ * Scoped variant of ASSERT_EXCLUSIVE_ACCESS().
+ *
+ * Assert that there are no concurrent accesses to @var (no readers nor writers)
+ * for the entire duration of the scope in which it is introduced. This provides
+ * a better way to fully cover the enclosing scope, compared to multiple
+ * ASSERT_EXCLUSIVE_ACCESS(), and increases the likelihood for KCSAN to detect
+ * racing accesses.
+ *
+ * @var: variable to assert on
+ */
+#define ASSERT_EXCLUSIVE_ACCESS_SCOPED(var) \
+ __ASSERT_EXCLUSIVE_SCOPED(var, KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT, __COUNTER__)
+
+/**
+ * ASSERT_EXCLUSIVE_BITS - assert no concurrent writes to subset of bits in @var
+ *
+ * Bit-granular variant of ASSERT_EXCLUSIVE_WRITER().
+ *
+ * Assert that there are no concurrent writes to a subset of bits in @var;
+ * concurrent readers are permitted. This assertion captures more detailed
+ * bit-level properties, compared to the other (word granularity) assertions.
+ * Only the bits set in @mask are checked for concurrent modifications, while
+ * ignoring the remaining bits, i.e. concurrent writes (or reads) to ~mask bits
+ * are ignored.
+ *
+ * Use this for variables, where some bits must not be modified concurrently,
+ * yet other bits are expected to be modified concurrently.
+ *
+ * For example, variables where, after initialization, some bits are read-only,
+ * but other bits may still be modified concurrently. A reader may wish to
+ * assert that this is true as follows:
+ *
+ * .. code-block:: c
+ *
+ * ASSERT_EXCLUSIVE_BITS(flags, READ_ONLY_MASK);
+ * foo = (READ_ONCE(flags) & READ_ONLY_MASK) >> READ_ONLY_SHIFT;
+ *
+ * Note: The access that immediately follows ASSERT_EXCLUSIVE_BITS() is assumed
+ * to access the masked bits only, and KCSAN optimistically assumes it is
+ * therefore safe, even in the presence of data races, and marking it with
+ * READ_ONCE() is optional from KCSAN's point-of-view. We caution, however, that
+ * it may still be advisable to do so, since we cannot reason about all compiler
+ * optimizations when it comes to bit manipulations (on the reader and writer
+ * side). If you are sure nothing can go wrong, we can write the above simply
+ * as:
+ *
+ * .. code-block:: c
+ *
+ * ASSERT_EXCLUSIVE_BITS(flags, READ_ONLY_MASK);
+ * foo = (flags & READ_ONLY_MASK) >> READ_ONLY_SHIFT;
+ *
+ * Another example, where this may be used, is when certain bits of @var may
+ * only be modified when holding the appropriate lock, but other bits may still
+ * be modified concurrently. Writers, where other bits may change concurrently,
+ * could use the assertion as follows:
+ *
+ * .. code-block:: c
+ *
+ * spin_lock(&foo_lock);
+ * ASSERT_EXCLUSIVE_BITS(flags, FOO_MASK);
+ * old_flags = flags;
+ * new_flags = (old_flags & ~FOO_MASK) | (new_foo << FOO_SHIFT);
+ * if (cmpxchg(&flags, old_flags, new_flags) != old_flags) { ... }
+ * spin_unlock(&foo_lock);
+ *
+ * @var: variable to assert on
+ * @mask: only check for modifications to bits set in @mask
+ */
+#define ASSERT_EXCLUSIVE_BITS(var, mask) \
+ do { \
+ kcsan_set_access_mask(mask); \
+ __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT);\
+ kcsan_set_access_mask(0); \
+ kcsan_atomic_next(1); \
+ } while (0)
+
+#endif /* _LINUX_KCSAN_CHECKS_H */
diff --git a/include/linux/kcsan.h b/include/linux/kcsan.h
new file mode 100644
index 000000000000..53340d8789f9
--- /dev/null
+++ b/include/linux/kcsan.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_KCSAN_H
+#define _LINUX_KCSAN_H
+
+#include <linux/kcsan-checks.h>
+#include <linux/types.h>
+
+#ifdef CONFIG_KCSAN
+
+/*
+ * Context for each thread of execution: for tasks, this is stored in
+ * task_struct, and interrupts access internal per-CPU storage.
+ */
+struct kcsan_ctx {
+ int disable_count; /* disable counter */
+ int atomic_next; /* number of following atomic ops */
+
+ /*
+ * We distinguish between: (a) nestable atomic regions that may contain
+ * other nestable regions; and (b) flat atomic regions that do not keep
+ * track of nesting. Both (a) and (b) are entirely independent of each
+ * other, and a flat region may be started in a nestable region or
+ * vice-versa.
+ *
+ * This is required because, for example, in the annotations for
+ * seqlocks, we declare seqlock writer critical sections as (a) nestable
+ * atomic regions, but reader critical sections as (b) flat atomic
+ * regions, but have encountered cases where seqlock reader critical
+ * sections are contained within writer critical sections (the opposite
+ * may be possible, too).
+ *
+ * To support these cases, we independently track the depth of nesting
+ * for (a), and whether the leaf level is flat for (b).
+ */
+ int atomic_nest_count;
+ bool in_flat_atomic;
+
+ /*
+ * Access mask for all accesses if non-zero.
+ */
+ unsigned long access_mask;
+
+ /* List of scoped accesses. */
+ struct list_head scoped_accesses;
+};
+
+/**
+ * kcsan_init - initialize KCSAN runtime
+ */
+void kcsan_init(void);
+
+#else /* CONFIG_KCSAN */
+
+static inline void kcsan_init(void) { }
+
+#endif /* CONFIG_KCSAN */
+
+#endif /* _LINUX_KCSAN_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index c5d96e3e7fff..4ea612e9ad27 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -31,6 +31,7 @@
#include <linux/task_io_accounting.h>
#include <linux/posix-timers.h>
#include <linux/rseq.h>
+#include <linux/kcsan.h>
/* task_struct member predeclarations (sorted alphabetically): */
struct audit_context;
@@ -1197,6 +1198,9 @@ struct task_struct {
#ifdef CONFIG_KASAN
unsigned int kasan_depth;
#endif
+#ifdef CONFIG_KCSAN
+ struct kcsan_ctx kcsan_ctx;
+#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* Index of current stored address in ret_stack: */
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 0491d963d47e..8b97204f35a7 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -37,9 +37,25 @@
#include <linux/preempt.h>
#include <linux/lockdep.h>
#include <linux/compiler.h>
+#include <linux/kcsan-checks.h>
#include <asm/processor.h>
/*
+ * The seqlock interface does not prescribe a precise sequence of read
+ * begin/retry/end. For readers, typically there is a call to
+ * read_seqcount_begin() and read_seqcount_retry(), however, there are more
+ * esoteric cases which do not follow this pattern.
+ *
+ * As a consequence, we take the following best-effort approach for raw usage
+ * via seqcount_t under KCSAN: upon beginning a seq-reader critical section,
+ * pessimistically mark the next KCSAN_SEQLOCK_REGION_MAX memory accesses as
+ * atomics; if there is a matching read_seqcount_retry() call, no following
+ * memory operations are considered atomic. Usage of seqlocks via seqlock_t
+ * interface is not affected.
+ */
+#define KCSAN_SEQLOCK_REGION_MAX 1000
+
+/*
* Version using sequence counter only.
* This can be used when code has its own mutex protecting the
* updating starting before the write_seqcountbeqin() and ending
@@ -115,6 +131,7 @@ repeat:
cpu_relax();
goto repeat;
}
+ kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
return ret;
}
@@ -131,6 +148,7 @@ static inline unsigned raw_read_seqcount(const seqcount_t *s)
{
unsigned ret = READ_ONCE(s->sequence);
smp_rmb();
+ kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
return ret;
}
@@ -183,6 +201,7 @@ static inline unsigned raw_seqcount_begin(const seqcount_t *s)
{
unsigned ret = READ_ONCE(s->sequence);
smp_rmb();
+ kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
return ret & ~1;
}
@@ -202,7 +221,8 @@ static inline unsigned raw_seqcount_begin(const seqcount_t *s)
*/
static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
{
- return unlikely(s->sequence != start);
+ kcsan_atomic_next(0);
+ return unlikely(READ_ONCE(s->sequence) != start);
}
/**
@@ -225,6 +245,7 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
static inline void raw_write_seqcount_begin(seqcount_t *s)
{
+ kcsan_nestable_atomic_begin();
s->sequence++;
smp_wmb();
}
@@ -233,6 +254,7 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
{
smp_wmb();
s->sequence++;
+ kcsan_nestable_atomic_end();
}
/**
@@ -243,6 +265,13 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
* usual consistency guarantee. It is one wmb cheaper, because we can
* collapse the two back-to-back wmb()s.
*
+ * Note that writes surrounding the barrier should be declared atomic (e.g.
+ * via WRITE_ONCE): a) to ensure the writes become visible to other threads
+ * atomically, avoiding compiler optimizations; b) to document which writes are
+ * meant to propagate to the reader critical section. This is necessary because
+ * neither writes before and after the barrier are enclosed in a seq-writer
+ * critical section that would ensure readers are aware of ongoing writes.
+ *
* seqcount_t seq;
* bool X = true, Y = false;
*
@@ -262,18 +291,20 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
*
* void write(void)
* {
- * Y = true;
+ * WRITE_ONCE(Y, true);
*
* raw_write_seqcount_barrier(seq);
*
- * X = false;
+ * WRITE_ONCE(X, false);
* }
*/
static inline void raw_write_seqcount_barrier(seqcount_t *s)
{
+ kcsan_nestable_atomic_begin();
s->sequence++;
smp_wmb();
s->sequence++;
+ kcsan_nestable_atomic_end();
}
static inline int raw_read_seqcount_latch(seqcount_t *s)
@@ -398,7 +429,9 @@ static inline void write_seqcount_end(seqcount_t *s)
static inline void write_seqcount_invalidate(seqcount_t *s)
{
smp_wmb();
+ kcsan_nestable_atomic_begin();
s->sequence+=2;
+ kcsan_nestable_atomic_end();
}
typedef struct {
@@ -430,11 +463,21 @@ typedef struct {
*/
static inline unsigned read_seqbegin(const seqlock_t *sl)
{
- return read_seqcount_begin(&sl->seqcount);
+ unsigned ret = read_seqcount_begin(&sl->seqcount);
+
+ kcsan_atomic_next(0); /* non-raw usage, assume closing read_seqretry() */
+ kcsan_flat_atomic_begin();
+ return ret;
}
static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
{
+ /*
+ * Assume not nested: read_seqretry() may be called multiple times when
+ * completing read critical section.
+ */
+ kcsan_flat_atomic_end();
+
return read_seqcount_retry(&sl->seqcount, start);
}
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index dac1db05bf7e..7bcadca22100 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -2,9 +2,9 @@
#ifndef __LINUX_UACCESS_H__
#define __LINUX_UACCESS_H__
+#include <linux/instrumented.h>
#include <linux/sched.h>
#include <linux/thread_info.h>
-#include <linux/kasan-checks.h>
#define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS)
@@ -58,7 +58,7 @@
static __always_inline __must_check unsigned long
__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
{
- kasan_check_write(to, n);
+ instrument_copy_from_user(to, from, n);
check_object_size(to, n, false);
return raw_copy_from_user(to, from, n);
}
@@ -67,7 +67,7 @@ static __always_inline __must_check unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
might_fault();
- kasan_check_write(to, n);
+ instrument_copy_from_user(to, from, n);
check_object_size(to, n, false);
return raw_copy_from_user(to, from, n);
}
@@ -88,7 +88,7 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
static __always_inline __must_check unsigned long
__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
{
- kasan_check_read(from, n);
+ instrument_copy_to_user(to, from, n);
check_object_size(from, n, true);
return raw_copy_to_user(to, from, n);
}
@@ -97,7 +97,7 @@ static __always_inline __must_check unsigned long
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
- kasan_check_read(from, n);
+ instrument_copy_to_user(to, from, n);
check_object_size(from, n, true);
return raw_copy_to_user(to, from, n);
}
@@ -109,7 +109,7 @@ _copy_from_user(void *to, const void __user *from, unsigned long n)
unsigned long res = n;
might_fault();
if (likely(access_ok(from, n))) {
- kasan_check_write(to, n);
+ instrument_copy_from_user(to, from, n);
res = raw_copy_from_user(to, from, n);
}
if (unlikely(res))
@@ -127,7 +127,7 @@ _copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
if (access_ok(to, n)) {
- kasan_check_read(from, n);
+ instrument_copy_to_user(to, from, n);
n = raw_copy_to_user(to, from, n);
}
return n;