1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_COMPILER_TYPES_H
#define __LINUX_COMPILER_TYPES_H
/*
* __has_builtin is supported on gcc >= 10, clang >= 3 and icc >= 21.
* In the meantime, to support gcc < 10, we implement __has_builtin
* by hand.
*/
#ifndef __has_builtin
#define __has_builtin(x) (0)
#endif
#ifndef __ASSEMBLY__
/*
* Skipped when running bindgen due to a libclang issue;
* see https://github.com/rust-lang/rust-bindgen/issues/2244.
*/
#if defined(CONFIG_DEBUG_INFO_BTF) && defined(CONFIG_PAHOLE_HAS_BTF_TAG) && \
__has_attribute(btf_type_tag) && !defined(__BINDGEN__)
# define BTF_TYPE_TAG(value) __attribute__((btf_type_tag(#value)))
#else
# define BTF_TYPE_TAG(value) /* nothing */
#endif
/* sparse defines __CHECKER__; see Documentation/dev-tools/sparse.rst */
#ifdef __CHECKER__
/* address spaces */
# define __kernel __attribute__((address_space(0)))
# define __user __attribute__((noderef, address_space(__user)))
# define __iomem __attribute__((noderef, address_space(__iomem)))
# define __percpu __attribute__((noderef, address_space(__percpu)))
# define __rcu __attribute__((noderef, address_space(__rcu)))
static inline void __chk_user_ptr(const volatile void __user *ptr) { }
static inline void __chk_io_ptr(const volatile void __iomem *ptr) { }
/* context/locking */
# define __must_hold(x) __attribute__((context(x,1,1)))
# define __acquires(x) __attribute__((context(x,0,1)))
# define __cond_acquires(x) __attribute__((context(x,0,-1)))
# define __releases(x) __attribute__((context(x,1,0)))
# define __acquire(x) __context__(x,1)
# define __release(x) __context__(x,-1)
# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
/* other */
# define __force __attribute__((force))
# define __nocast __attribute__((nocast))
# define __safe __attribute__((safe))
# define __private __attribute__((noderef))
# define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member))
#else /* __CHECKER__ */
/* address spaces */
# define __kernel
# ifdef STRUCTLEAK_PLUGIN
# define __user __attribute__((user))
# else
# define __user BTF_TYPE_TAG(user)
# endif
# define __iomem
# define __percpu BTF_TYPE_TAG(percpu)
# define __rcu BTF_TYPE_TAG(rcu)
# define __chk_user_ptr(x) (void)0
# define __chk_io_ptr(x) (void)0
/* context/locking */
# define __must_hold(x)
# define __acquires(x)
# define __cond_acquires(x)
# define __releases(x)
# define __acquire(x) (void)0
# define __release(x) (void)0
# define __cond_lock(x,c) (c)
/* other */
# define __force
# define __nocast
# define __safe
# define __private
# define ACCESS_PRIVATE(p, member) ((p)->member)
# define __builtin_warning(x, y...) (1)
#endif /* __CHECKER__ */
/* Indirect macros required for expanded argument pasting, eg. __LINE__. */
#define ___PASTE(a,b) a##b
#define __PASTE(a,b) ___PASTE(a,b)
#ifdef __KERNEL__
/* Attributes */
#include <linux/compiler_attributes.h>
#if CONFIG_FUNCTION_ALIGNMENT > 0
#define __function_aligned __aligned(CONFIG_FUNCTION_ALIGNMENT)
#else
#define __function_aligned
#endif
/*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-cold-function-attribute
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Label-Attributes.html#index-cold-label-attribute
*
* When -falign-functions=N is in use, we must avoid the cold attribute as
* GCC drops the alignment for cold functions. Worse, GCC can implicitly mark
* callees of cold functions as cold themselves, so it's not sufficient to add
* __function_aligned here as that will not ensure that callees are correctly
* aligned.
*
* See:
*
* https://lore.kernel.org/lkml/Y77%2FqVgvaJidFpYt@FVFF77S0Q05N
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=88345#c9
*/
#if defined(CONFIG_CC_HAS_SANE_FUNCTION_ALIGNMENT) || (CONFIG_FUNCTION_ALIGNMENT == 0)
#define __cold __attribute__((__cold__))
#else
#define __cold
#endif
/*
* On x86-64 and arm64 targets, __preserve_most changes the calling convention
* of a function to make the code in the caller as unintrusive as possible. This
* convention behaves identically to the C calling convention on how arguments
* and return values are passed, but uses a different set of caller- and callee-
* saved registers.
*
* The purpose is to alleviates the burden of saving and recovering a large
* register set before and after the call in the caller. This is beneficial for
* rarely taken slow paths, such as error-reporting functions that may be called
* from hot paths.
*
* Note: This may conflict with instrumentation inserted on function entry which
* does not use __preserve_most or equivalent convention (if in assembly). Since
* function tracing assumes the normal C calling convention, where the attribute
* is supported, __preserve_most implies notrace. It is recommended to restrict
* use of the attribute to functions that should or already disable tracing.
*
* Optional: not supported by gcc.
*
* clang: https://clang.llvm.org/docs/AttributeReference.html#preserve-most
*/
#if __has_attribute(__preserve_most__) && (defined(CONFIG_X86_64) || defined(CONFIG_ARM64))
# define __preserve_most notrace __attribute__((__preserve_most__))
#else
# define __preserve_most
#endif
/*
* Annotating a function/variable with __retain tells the compiler to place
* the object in its own section and set the flag SHF_GNU_RETAIN. This flag
* instructs the linker to retain the object during garbage-cleanup or LTO
* phases.
*
* Note that the __used macro is also used to prevent functions or data
* being optimized out, but operates at the compiler/IR-level and may still
* allow unintended removal of objects during linking.
*
* Optional: only supported since gcc >= 11, clang >= 13
*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-retain-function-attribute
* clang: https://clang.llvm.org/docs/AttributeReference.html#retain
*/
#if __has_attribute(__retain__) && \
(defined(CONFIG_LD_DEAD_CODE_DATA_ELIMINATION) || \
defined(CONFIG_LTO_CLANG))
# define __retain __attribute__((__retain__))
#else
# define __retain
#endif
/* Compiler specific macros. */
#ifdef __clang__
#include <linux/compiler-clang.h>
#elif defined(__GNUC__)
/* The above compilers also define __GNUC__, so order is important here. */
#include <linux/compiler-gcc.h>
#else
#error "Unknown compiler"
#endif
/*
* Some architectures need to provide custom definitions of macros provided
* by linux/compiler-*.h, and can do so using asm/compiler.h. We include that
* conditionally rather than using an asm-generic wrapper in order to avoid
* build failures if any C compilation, which will include this file via an
* -include argument in c_flags, occurs prior to the asm-generic wrappers being
* generated.
*/
#ifdef CONFIG_HAVE_ARCH_COMPILER_H
#include <asm/compiler.h>
#endif
struct ftrace_branch_data {
const char *func;
const char *file;
unsigned line;
union {
struct {
unsigned long correct;
unsigned long incorrect;
};
struct {
unsigned long miss;
unsigned long hit;
};
unsigned long miss_hit[2];
};
};
struct ftrace_likely_data {
struct ftrace_branch_data data;
unsigned long constant;
};
#if defined(CC_USING_HOTPATCH)
#define notrace __attribute__((hotpatch(0, 0)))
#elif defined(CC_USING_PATCHABLE_FUNCTION_ENTRY)
#define notrace __attribute__((patchable_function_entry(0, 0)))
#else
#define notrace __attribute__((__no_instrument_function__))
#endif
/*
* it doesn't make sense on ARM (currently the only user of __naked)
* to trace naked functions because then mcount is called without
* stack and frame pointer being set up and there is no chance to
* restore the lr register to the value before mcount was called.
*/
#define __naked __attribute__((__naked__)) notrace
/*
* Prefer gnu_inline, so that extern inline functions do not emit an
* externally visible function. This makes extern inline behave as per gnu89
* semantics rather than c99. This prevents multiple symbol definition errors
* of extern inline functions at link time.
* A lot of inline functions can cause havoc with function tracing.
*/
#define inline inline __gnu_inline __inline_maybe_unused notrace
/*
* gcc provides both __inline__ and __inline as alternate spellings of
* the inline keyword, though the latter is undocumented. New kernel
* code should only use the inline spelling, but some existing code
* uses __inline__. Since we #define inline above, to ensure
* __inline__ has the same semantics, we need this #define.
*
* However, the spelling __inline is strictly reserved for referring
* to the bare keyword.
*/
#define __inline__ inline
/*
* GCC does not warn about unused static inline functions for -Wunused-function.
* Suppress the warning in clang as well by using __maybe_unused, but enable it
* for W=1 build. This will allow clang to find unused functions. Remove the
* __inline_maybe_unused entirely after fixing most of -Wunused-function warnings.
*/
#ifdef KBUILD_EXTRA_WARN1
#define __inline_maybe_unused
#else
#define __inline_maybe_unused __maybe_unused
#endif
/*
* Rather then using noinline to prevent stack consumption, use
* noinline_for_stack instead. For documentation reasons.
*/
#define noinline_for_stack noinline
/*
* Sanitizer helper attributes: Because using __always_inline and
* __no_sanitize_* conflict, provide helper attributes that will either expand
* to __no_sanitize_* in compilation units where instrumentation is enabled
* (__SANITIZE_*__), or __always_inline in compilation units without
* instrumentation (__SANITIZE_*__ undefined).
*/
#ifdef __SANITIZE_ADDRESS__
/*
* We can't declare function 'inline' because __no_sanitize_address conflicts
* with inlining. Attempt to inline it may cause a build failure.
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
* '__maybe_unused' allows us to avoid defined-but-not-used warnings.
*/
# define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused
# define __no_sanitize_or_inline __no_kasan_or_inline
#else
# define __no_kasan_or_inline __always_inline
#endif
#ifdef __SANITIZE_THREAD__
/*
* Clang still emits instrumentation for __tsan_func_{entry,exit}() and builtin
* atomics even with __no_sanitize_thread (to avoid false positives in userspace
* ThreadSanitizer). The kernel's requirements are stricter and we really do not
* want any instrumentation with __no_kcsan.
*
* Therefore we add __disable_sanitizer_instrumentation where available to
* disable all instrumentation. See Kconfig.kcsan where this is mandatory.
*/
# define __no_kcsan __no_sanitize_thread __disable_sanitizer_instrumentation
/*
* Type qualifier to mark variables where all data-racy accesses should be
* ignored by KCSAN. Note, the implementation simply marks these variables as
* volatile, since KCSAN will treat such accesses as "marked".
*/
# define __data_racy volatile
# define __no_sanitize_or_inline __no_kcsan notrace __maybe_unused
#else
# define __no_kcsan
# define __data_racy
#endif
#ifdef __SANITIZE_MEMORY__
/*
* Similarly to KASAN and KCSAN, KMSAN loses function attributes of inlined
* functions, therefore disabling KMSAN checks also requires disabling inlining.
*
* __no_sanitize_or_inline effectively prevents KMSAN from reporting errors
* within the function and marks all its outputs as initialized.
*/
# define __no_sanitize_or_inline __no_kmsan_checks notrace __maybe_unused
#endif
#ifndef __no_sanitize_or_inline
#define __no_sanitize_or_inline __always_inline
#endif
/*
* Apply __counted_by() when the Endianness matches to increase test coverage.
*/
#ifdef __LITTLE_ENDIAN
#define __counted_by_le(member) __counted_by(member)
#define __counted_by_be(member)
#else
#define __counted_by_le(member)
#define __counted_by_be(member) __counted_by(member)
#endif
/* Do not trap wrapping arithmetic within an annotated function. */
#ifdef CONFIG_UBSAN_SIGNED_WRAP
# define __signed_wrap __attribute__((no_sanitize("signed-integer-overflow")))
#else
# define __signed_wrap
#endif
/* Section for code which can't be instrumented at all */
#define __noinstr_section(section) \
noinline notrace __attribute((__section__(section))) \
__no_kcsan __no_sanitize_address __no_profile __no_sanitize_coverage \
__no_sanitize_memory __signed_wrap
#define noinstr __noinstr_section(".noinstr.text")
/*
* The __cpuidle section is used twofold:
*
* 1) the original use -- identifying if a CPU is 'stuck' in idle state based
* on it's instruction pointer. See cpu_in_idle().
*
* 2) supressing instrumentation around where cpuidle disables RCU; where the
* function isn't strictly required for #1, this is interchangeable with
* noinstr.
*/
#define __cpuidle __noinstr_section(".cpuidle.text")
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
/*
* The below symbols may be defined for one or more, but not ALL, of the above
* compilers. We don't consider that to be an error, so set them to nothing.
* For example, some of them are for compiler specific plugins.
*/
#ifndef __latent_entropy
# define __latent_entropy
#endif
#if defined(RANDSTRUCT) && !defined(__CHECKER__)
# define __randomize_layout __designated_init __attribute__((randomize_layout))
# define __no_randomize_layout __attribute__((no_randomize_layout))
/* This anon struct can add padding, so only enable it under randstruct. */
# define randomized_struct_fields_start struct {
# define randomized_struct_fields_end } __randomize_layout;
#else
# define __randomize_layout __designated_init
# define __no_randomize_layout
# define randomized_struct_fields_start
# define randomized_struct_fields_end
#endif
#ifndef __noscs
# define __noscs
#endif
#ifndef __nocfi
# define __nocfi
#endif
/*
* Any place that could be marked with the "alloc_size" attribute is also
* a place to be marked with the "malloc" attribute, except those that may
* be performing a _reallocation_, as that may alias the existing pointer.
* For these, use __realloc_size().
*/
#ifdef __alloc_size__
# define __alloc_size(x, ...) __alloc_size__(x, ## __VA_ARGS__) __malloc
# define __realloc_size(x, ...) __alloc_size__(x, ## __VA_ARGS__)
#else
# define __alloc_size(x, ...) __malloc
# define __realloc_size(x, ...)
#endif
/*
* When the size of an allocated object is needed, use the best available
* mechanism to find it. (For cases where sizeof() cannot be used.)
*/
#if __has_builtin(__builtin_dynamic_object_size)
#define __struct_size(p) __builtin_dynamic_object_size(p, 0)
#define __member_size(p) __builtin_dynamic_object_size(p, 1)
#else
#define __struct_size(p) __builtin_object_size(p, 0)
#define __member_size(p) __builtin_object_size(p, 1)
#endif
/* Determine if an attribute has been applied to a variable. */
#if __has_builtin(__builtin_has_attribute)
#define __annotated(var, attr) __builtin_has_attribute(var, attr)
#else
#define __annotated(var, attr) (false)
#endif
/*
* Some versions of gcc do not mark 'asm goto' volatile:
*
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=103979
*
* We do it here by hand, because it doesn't hurt.
*/
#ifndef asm_goto_output
#define asm_goto_output(x...) asm volatile goto(x)
#endif
/*
* Clang has trouble with constraints with multiple
* alternative behaviors (mainly "g" and "rm").
*/
#ifndef ASM_INPUT_G
#define ASM_INPUT_G "g"
#define ASM_INPUT_RM "rm"
#endif
#ifdef CONFIG_CC_HAS_ASM_INLINE
#define asm_inline asm __inline
#else
#define asm_inline asm
#endif
/* Are two types/vars the same type (ignoring qualifiers)? */
#define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
/*
* __unqual_scalar_typeof(x) - Declare an unqualified scalar type, leaving
* non-scalar types unchanged.
*/
/*
* Prefer C11 _Generic for better compile-times and simpler code. Note: 'char'
* is not type-compatible with 'signed char', and we define a separate case.
*/
#define __scalar_type_to_expr_cases(type) \
unsigned type: (unsigned type)0, \
signed type: (signed type)0
#define __unqual_scalar_typeof(x) typeof( \
_Generic((x), \
char: (char)0, \
__scalar_type_to_expr_cases(char), \
__scalar_type_to_expr_cases(short), \
__scalar_type_to_expr_cases(int), \
__scalar_type_to_expr_cases(long), \
__scalar_type_to_expr_cases(long long), \
default: (x)))
/* Is this type a native word size -- useful for atomic operations */
#define __native_word(t) \
(sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || \
sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
#ifdef __OPTIMIZE__
# define __compiletime_assert(condition, msg, prefix, suffix) \
do { \
/* \
* __noreturn is needed to give the compiler enough \
* information to avoid certain possibly-uninitialized \
* warnings (regardless of the build failing). \
*/ \
__noreturn extern void prefix ## suffix(void) \
__compiletime_error(msg); \
if (!(condition)) \
prefix ## suffix(); \
} while (0)
#else
# define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0)
#endif
#define _compiletime_assert(condition, msg, prefix, suffix) \
__compiletime_assert(condition, msg, prefix, suffix)
/**
* compiletime_assert - break build and emit msg if condition is false
* @condition: a compile-time constant condition to check
* @msg: a message to emit if condition is false
*
* In tradition of POSIX assert, this macro will break the build if the
* supplied condition is *false*, emitting the supplied error message if the
* compiler has support to do so.
*/
#define compiletime_assert(condition, msg) \
_compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__)
#define compiletime_assert_atomic_type(t) \
compiletime_assert(__native_word(t), \
"Need native word sized stores/loads for atomicity.")
/* Helpers for emitting diagnostics in pragmas. */
#ifndef __diag
#define __diag(string)
#endif
#ifndef __diag_GCC
#define __diag_GCC(version, severity, string)
#endif
#define __diag_push() __diag(push)
#define __diag_pop() __diag(pop)
#define __diag_ignore(compiler, version, option, comment) \
__diag_ ## compiler(version, ignore, option)
#define __diag_warn(compiler, version, option, comment) \
__diag_ ## compiler(version, warn, option)
#define __diag_error(compiler, version, option, comment) \
__diag_ ## compiler(version, error, option)
#ifndef __diag_ignore_all
#define __diag_ignore_all(option, comment)
#endif
#endif /* __LINUX_COMPILER_TYPES_H */
|