1*4882a593Smuzhiyun #ifndef __LINUX_COMPILER_H
2*4882a593Smuzhiyun #define __LINUX_COMPILER_H
3*4882a593Smuzhiyun
4*4882a593Smuzhiyun #ifdef __clang__
5*4882a593Smuzhiyun # define __user
6*4882a593Smuzhiyun # define __force
7*4882a593Smuzhiyun #else
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #ifndef __ASSEMBLY__
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #ifdef __CHECKER__
12*4882a593Smuzhiyun # define __user __attribute__((noderef, address_space(1)))
13*4882a593Smuzhiyun # define __kernel __attribute__((address_space(0)))
14*4882a593Smuzhiyun # define __safe __attribute__((safe))
15*4882a593Smuzhiyun # define __force __attribute__((force))
16*4882a593Smuzhiyun # define __nocast __attribute__((nocast))
17*4882a593Smuzhiyun # define __iomem __attribute__((noderef, address_space(2)))
18*4882a593Smuzhiyun # define __must_hold(x) __attribute__((context(x,1,1)))
19*4882a593Smuzhiyun # define __acquires(x) __attribute__((context(x,0,1)))
20*4882a593Smuzhiyun # define __releases(x) __attribute__((context(x,1,0)))
21*4882a593Smuzhiyun # define __acquire(x) __context__(x,1)
22*4882a593Smuzhiyun # define __release(x) __context__(x,-1)
23*4882a593Smuzhiyun # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
24*4882a593Smuzhiyun # define __percpu __attribute__((noderef, address_space(3)))
25*4882a593Smuzhiyun # define __pmem __attribute__((noderef, address_space(5)))
26*4882a593Smuzhiyun #ifdef CONFIG_SPARSE_RCU_POINTER
27*4882a593Smuzhiyun # define __rcu __attribute__((noderef, address_space(4)))
28*4882a593Smuzhiyun #else
29*4882a593Smuzhiyun # define __rcu
30*4882a593Smuzhiyun #endif
31*4882a593Smuzhiyun extern void __chk_user_ptr(const volatile void __user *);
32*4882a593Smuzhiyun extern void __chk_io_ptr(const volatile void __iomem *);
33*4882a593Smuzhiyun #else
34*4882a593Smuzhiyun # define __user
35*4882a593Smuzhiyun # define __kernel
36*4882a593Smuzhiyun # define __safe
37*4882a593Smuzhiyun # define __force
38*4882a593Smuzhiyun # define __nocast
39*4882a593Smuzhiyun # define __iomem
40*4882a593Smuzhiyun # define __chk_user_ptr(x) (void)0
41*4882a593Smuzhiyun # define __chk_io_ptr(x) (void)0
42*4882a593Smuzhiyun # define __builtin_warning(x, y...) (1)
43*4882a593Smuzhiyun # define __must_hold(x)
44*4882a593Smuzhiyun # define __acquires(x)
45*4882a593Smuzhiyun # define __releases(x)
46*4882a593Smuzhiyun # define __acquire(x) (void)0
47*4882a593Smuzhiyun # define __release(x) (void)0
48*4882a593Smuzhiyun # define __cond_lock(x,c) (c)
49*4882a593Smuzhiyun # define __percpu
50*4882a593Smuzhiyun # define __rcu
51*4882a593Smuzhiyun # define __pmem
52*4882a593Smuzhiyun #endif
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
55*4882a593Smuzhiyun #define ___PASTE(a,b) a##b
56*4882a593Smuzhiyun #define __PASTE(a,b) ___PASTE(a,b)
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun #ifdef __KERNEL__
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun /*
61*4882a593Smuzhiyun * Minimal backport of compiler_attributes.h to add support for __copy
62*4882a593Smuzhiyun * to v4.9.y so that we can use it in init/exit_module to avoid
63*4882a593Smuzhiyun * -Werror=missing-attributes errors on GCC 9.
64*4882a593Smuzhiyun */
65*4882a593Smuzhiyun #ifndef __has_attribute
66*4882a593Smuzhiyun # define __has_attribute(x) __GCC4_has_attribute_##x
67*4882a593Smuzhiyun # define __GCC4_has_attribute___copy__ 0
68*4882a593Smuzhiyun #endif
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun #if __has_attribute(__copy__)
71*4882a593Smuzhiyun # define __copy(symbol) __attribute__((__copy__(symbol)))
72*4882a593Smuzhiyun #else
73*4882a593Smuzhiyun # define __copy(symbol)
74*4882a593Smuzhiyun #endif
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun #ifdef __GNUC__
77*4882a593Smuzhiyun #include <linux/compiler-gcc.h>
78*4882a593Smuzhiyun #endif
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun #if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__)
81*4882a593Smuzhiyun #define notrace __attribute__((hotpatch(0,0)))
82*4882a593Smuzhiyun #else
83*4882a593Smuzhiyun #define notrace __attribute__((no_instrument_function))
84*4882a593Smuzhiyun #endif
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun /* Intel compiler defines __GNUC__. So we will overwrite implementations
87*4882a593Smuzhiyun * coming from above header files here
88*4882a593Smuzhiyun */
89*4882a593Smuzhiyun #ifdef __INTEL_COMPILER
90*4882a593Smuzhiyun # include <linux/compiler-intel.h>
91*4882a593Smuzhiyun #endif
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun /* Clang compiler defines __GNUC__. So we will overwrite implementations
94*4882a593Smuzhiyun * coming from above header files here
95*4882a593Smuzhiyun */
96*4882a593Smuzhiyun #ifdef __clang__
97*4882a593Smuzhiyun #include <linux/compiler-clang.h>
98*4882a593Smuzhiyun #endif
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun /*
101*4882a593Smuzhiyun * Generic compiler-dependent macros required for kernel
102*4882a593Smuzhiyun * build go below this comment. Actual compiler/compiler version
103*4882a593Smuzhiyun * specific implementations come from the above header files
104*4882a593Smuzhiyun */
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun struct ftrace_branch_data {
107*4882a593Smuzhiyun const char *func;
108*4882a593Smuzhiyun const char *file;
109*4882a593Smuzhiyun unsigned line;
110*4882a593Smuzhiyun union {
111*4882a593Smuzhiyun struct {
112*4882a593Smuzhiyun unsigned long correct;
113*4882a593Smuzhiyun unsigned long incorrect;
114*4882a593Smuzhiyun };
115*4882a593Smuzhiyun struct {
116*4882a593Smuzhiyun unsigned long miss;
117*4882a593Smuzhiyun unsigned long hit;
118*4882a593Smuzhiyun };
119*4882a593Smuzhiyun unsigned long miss_hit[2];
120*4882a593Smuzhiyun };
121*4882a593Smuzhiyun };
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun /*
124*4882a593Smuzhiyun * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
125*4882a593Smuzhiyun * to disable branch tracing on a per file basis.
126*4882a593Smuzhiyun */
127*4882a593Smuzhiyun #if defined(CONFIG_TRACE_BRANCH_PROFILING) \
128*4882a593Smuzhiyun && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
129*4882a593Smuzhiyun void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun #define likely_notrace(x) __builtin_expect(!!(x), 1)
132*4882a593Smuzhiyun #define unlikely_notrace(x) __builtin_expect(!!(x), 0)
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun #define __branch_check__(x, expect) ({ \
135*4882a593Smuzhiyun long ______r; \
136*4882a593Smuzhiyun static struct ftrace_branch_data \
137*4882a593Smuzhiyun __attribute__((__aligned__(4))) \
138*4882a593Smuzhiyun __attribute__((section("_ftrace_annotated_branch"))) \
139*4882a593Smuzhiyun ______f = { \
140*4882a593Smuzhiyun .func = __func__, \
141*4882a593Smuzhiyun .file = __FILE__, \
142*4882a593Smuzhiyun .line = __LINE__, \
143*4882a593Smuzhiyun }; \
144*4882a593Smuzhiyun ______r = likely_notrace(x); \
145*4882a593Smuzhiyun ftrace_likely_update(&______f, ______r, expect); \
146*4882a593Smuzhiyun ______r; \
147*4882a593Smuzhiyun })
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun /*
150*4882a593Smuzhiyun * Using __builtin_constant_p(x) to ignore cases where the return
151*4882a593Smuzhiyun * value is always the same. This idea is taken from a similar patch
152*4882a593Smuzhiyun * written by Daniel Walker.
153*4882a593Smuzhiyun */
154*4882a593Smuzhiyun # ifndef likely
155*4882a593Smuzhiyun # define likely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 1))
156*4882a593Smuzhiyun # endif
157*4882a593Smuzhiyun # ifndef unlikely
158*4882a593Smuzhiyun # define unlikely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 0))
159*4882a593Smuzhiyun # endif
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun #ifdef CONFIG_PROFILE_ALL_BRANCHES
162*4882a593Smuzhiyun /*
163*4882a593Smuzhiyun * "Define 'is'", Bill Clinton
164*4882a593Smuzhiyun * "Define 'if'", Steven Rostedt
165*4882a593Smuzhiyun */
166*4882a593Smuzhiyun #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
167*4882a593Smuzhiyun #define __trace_if(cond) \
168*4882a593Smuzhiyun if (__builtin_constant_p(!!(cond)) ? !!(cond) : \
169*4882a593Smuzhiyun ({ \
170*4882a593Smuzhiyun int ______r; \
171*4882a593Smuzhiyun static struct ftrace_branch_data \
172*4882a593Smuzhiyun __attribute__((__aligned__(4))) \
173*4882a593Smuzhiyun __attribute__((section("_ftrace_branch"))) \
174*4882a593Smuzhiyun ______f = { \
175*4882a593Smuzhiyun .func = __func__, \
176*4882a593Smuzhiyun .file = __FILE__, \
177*4882a593Smuzhiyun .line = __LINE__, \
178*4882a593Smuzhiyun }; \
179*4882a593Smuzhiyun ______r = !!(cond); \
180*4882a593Smuzhiyun ______f.miss_hit[______r]++; \
181*4882a593Smuzhiyun ______r; \
182*4882a593Smuzhiyun }))
183*4882a593Smuzhiyun #endif /* CONFIG_PROFILE_ALL_BRANCHES */
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun #else
186*4882a593Smuzhiyun # define likely(x) __builtin_expect(!!(x), 1)
187*4882a593Smuzhiyun # define unlikely(x) __builtin_expect(!!(x), 0)
188*4882a593Smuzhiyun #endif
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun /* Optimization barrier */
191*4882a593Smuzhiyun #ifndef barrier
192*4882a593Smuzhiyun # define barrier() __memory_barrier()
193*4882a593Smuzhiyun #endif
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun #ifndef barrier_data
196*4882a593Smuzhiyun # define barrier_data(ptr) barrier()
197*4882a593Smuzhiyun #endif
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun /* workaround for GCC PR82365 if needed */
200*4882a593Smuzhiyun #ifndef barrier_before_unreachable
201*4882a593Smuzhiyun # define barrier_before_unreachable() do { } while (0)
202*4882a593Smuzhiyun #endif
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun /* Unreachable code */
205*4882a593Smuzhiyun #ifndef unreachable
206*4882a593Smuzhiyun # define unreachable() do { } while (1)
207*4882a593Smuzhiyun #endif
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun #ifndef RELOC_HIDE
210*4882a593Smuzhiyun # define RELOC_HIDE(ptr, off) \
211*4882a593Smuzhiyun ({ unsigned long __ptr; \
212*4882a593Smuzhiyun __ptr = (unsigned long) (ptr); \
213*4882a593Smuzhiyun (typeof(ptr)) (__ptr + (off)); })
214*4882a593Smuzhiyun #endif
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun #ifndef OPTIMIZER_HIDE_VAR
217*4882a593Smuzhiyun #define OPTIMIZER_HIDE_VAR(var) barrier()
218*4882a593Smuzhiyun #endif
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun /* Not-quite-unique ID. */
221*4882a593Smuzhiyun #ifndef __UNIQUE_ID
222*4882a593Smuzhiyun # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
223*4882a593Smuzhiyun #endif
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun #include <uapi/linux/types.h>
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun #define __READ_ONCE_SIZE \
228*4882a593Smuzhiyun ({ \
229*4882a593Smuzhiyun switch (size) { \
230*4882a593Smuzhiyun case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \
231*4882a593Smuzhiyun case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \
232*4882a593Smuzhiyun case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \
233*4882a593Smuzhiyun case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \
234*4882a593Smuzhiyun default: \
235*4882a593Smuzhiyun barrier(); \
236*4882a593Smuzhiyun __builtin_memcpy((void *)res, (const void *)p, size); \
237*4882a593Smuzhiyun barrier(); \
238*4882a593Smuzhiyun } \
239*4882a593Smuzhiyun })
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun static __always_inline
__read_once_size(const volatile void * p,void * res,int size)242*4882a593Smuzhiyun void __read_once_size(const volatile void *p, void *res, int size)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun __READ_ONCE_SIZE;
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun #ifdef CONFIG_KASAN
248*4882a593Smuzhiyun /*
249*4882a593Smuzhiyun * This function is not 'inline' because __no_sanitize_address confilcts
250*4882a593Smuzhiyun * with inlining. Attempt to inline it may cause a build failure.
251*4882a593Smuzhiyun * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
252*4882a593Smuzhiyun * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
253*4882a593Smuzhiyun */
254*4882a593Smuzhiyun static __no_sanitize_address __maybe_unused
__read_once_size_nocheck(const volatile void * p,void * res,int size)255*4882a593Smuzhiyun void __read_once_size_nocheck(const volatile void *p, void *res, int size)
256*4882a593Smuzhiyun {
257*4882a593Smuzhiyun __READ_ONCE_SIZE;
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun #else
260*4882a593Smuzhiyun static __always_inline
__read_once_size_nocheck(const volatile void * p,void * res,int size)261*4882a593Smuzhiyun void __read_once_size_nocheck(const volatile void *p, void *res, int size)
262*4882a593Smuzhiyun {
263*4882a593Smuzhiyun __READ_ONCE_SIZE;
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun #endif
266*4882a593Smuzhiyun
__write_once_size(volatile void * p,void * res,int size)267*4882a593Smuzhiyun static __always_inline void __write_once_size(volatile void *p, void *res, int size)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun switch (size) {
270*4882a593Smuzhiyun case 1:
271*4882a593Smuzhiyun *(volatile __u8 *)p = *(__u8 *)res;
272*4882a593Smuzhiyun break;
273*4882a593Smuzhiyun case 2:
274*4882a593Smuzhiyun *(volatile __u16 *)p = *(__u16 *)res;
275*4882a593Smuzhiyun break;
276*4882a593Smuzhiyun case 4:
277*4882a593Smuzhiyun *(volatile __u32 *)p = *(__u32 *)res;
278*4882a593Smuzhiyun break;
279*4882a593Smuzhiyun case 8:
280*4882a593Smuzhiyun *(volatile __u64 *)p = *(__u64 *)res;
281*4882a593Smuzhiyun break;
282*4882a593Smuzhiyun default:
283*4882a593Smuzhiyun barrier();
284*4882a593Smuzhiyun __builtin_memcpy((void *)p, (const void *)res, size);
285*4882a593Smuzhiyun barrier();
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun /*
290*4882a593Smuzhiyun * Prevent the compiler from merging or refetching reads or writes. The
291*4882a593Smuzhiyun * compiler is also forbidden from reordering successive instances of
292*4882a593Smuzhiyun * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
293*4882a593Smuzhiyun * compiler is aware of some particular ordering. One way to make the
294*4882a593Smuzhiyun * compiler aware of ordering is to put the two invocations of READ_ONCE,
295*4882a593Smuzhiyun * WRITE_ONCE or ACCESS_ONCE() in different C statements.
296*4882a593Smuzhiyun *
297*4882a593Smuzhiyun * In contrast to ACCESS_ONCE these two macros will also work on aggregate
298*4882a593Smuzhiyun * data types like structs or unions. If the size of the accessed data
299*4882a593Smuzhiyun * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
300*4882a593Smuzhiyun * READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a
301*4882a593Smuzhiyun * compile-time warning.
302*4882a593Smuzhiyun *
303*4882a593Smuzhiyun * Their two major use cases are: (1) Mediating communication between
304*4882a593Smuzhiyun * process-level code and irq/NMI handlers, all running on the same CPU,
305*4882a593Smuzhiyun * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
306*4882a593Smuzhiyun * mutilate accesses that either do not require ordering or that interact
307*4882a593Smuzhiyun * with an explicit memory barrier or atomic instruction that provides the
308*4882a593Smuzhiyun * required ordering.
309*4882a593Smuzhiyun */
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun #define __READ_ONCE(x, check) \
312*4882a593Smuzhiyun ({ \
313*4882a593Smuzhiyun union { typeof(x) __val; char __c[1]; } __u; \
314*4882a593Smuzhiyun if (check) \
315*4882a593Smuzhiyun __read_once_size(&(x), __u.__c, sizeof(x)); \
316*4882a593Smuzhiyun else \
317*4882a593Smuzhiyun __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \
318*4882a593Smuzhiyun __u.__val; \
319*4882a593Smuzhiyun })
320*4882a593Smuzhiyun #define READ_ONCE(x) __READ_ONCE(x, 1)
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun /*
323*4882a593Smuzhiyun * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
324*4882a593Smuzhiyun * to hide memory access from KASAN.
325*4882a593Smuzhiyun */
326*4882a593Smuzhiyun #define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun #define WRITE_ONCE(x, val) \
329*4882a593Smuzhiyun ({ \
330*4882a593Smuzhiyun union { typeof(x) __val; char __c[1]; } __u = \
331*4882a593Smuzhiyun { .__val = (__force typeof(x)) (val) }; \
332*4882a593Smuzhiyun __write_once_size(&(x), __u.__c, sizeof(x)); \
333*4882a593Smuzhiyun __u.__val; \
334*4882a593Smuzhiyun })
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun #endif /* __KERNEL__ */
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun #endif /* __ASSEMBLY__ */
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun #ifdef __KERNEL__
341*4882a593Smuzhiyun /*
342*4882a593Smuzhiyun * Allow us to mark functions as 'deprecated' and have gcc emit a nice
343*4882a593Smuzhiyun * warning for each use, in hopes of speeding the functions removal.
344*4882a593Smuzhiyun * Usage is:
345*4882a593Smuzhiyun * int __deprecated foo(void)
346*4882a593Smuzhiyun */
347*4882a593Smuzhiyun #ifndef __deprecated
348*4882a593Smuzhiyun # define __deprecated /* unimplemented */
349*4882a593Smuzhiyun #endif
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun #ifdef MODULE
352*4882a593Smuzhiyun #define __deprecated_for_modules __deprecated
353*4882a593Smuzhiyun #else
354*4882a593Smuzhiyun #define __deprecated_for_modules
355*4882a593Smuzhiyun #endif
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun #ifndef __must_check
358*4882a593Smuzhiyun #define __must_check
359*4882a593Smuzhiyun #endif
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun #ifndef CONFIG_ENABLE_MUST_CHECK
362*4882a593Smuzhiyun #undef __must_check
363*4882a593Smuzhiyun #define __must_check
364*4882a593Smuzhiyun #endif
365*4882a593Smuzhiyun #ifndef CONFIG_ENABLE_WARN_DEPRECATED
366*4882a593Smuzhiyun #undef __deprecated
367*4882a593Smuzhiyun #undef __deprecated_for_modules
368*4882a593Smuzhiyun #define __deprecated
369*4882a593Smuzhiyun #define __deprecated_for_modules
370*4882a593Smuzhiyun #endif
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun /*
373*4882a593Smuzhiyun * Allow us to avoid 'defined but not used' warnings on functions and data,
374*4882a593Smuzhiyun * as well as force them to be emitted to the assembly file.
375*4882a593Smuzhiyun *
376*4882a593Smuzhiyun * As of gcc 3.4, static functions that are not marked with attribute((used))
377*4882a593Smuzhiyun * may be elided from the assembly file. As of gcc 3.4, static data not so
378*4882a593Smuzhiyun * marked will not be elided, but this may change in a future gcc version.
379*4882a593Smuzhiyun *
380*4882a593Smuzhiyun * NOTE: Because distributions shipped with a backported unit-at-a-time
381*4882a593Smuzhiyun * compiler in gcc 3.3, we must define __used to be __attribute__((used))
382*4882a593Smuzhiyun * for gcc >=3.3 instead of 3.4.
383*4882a593Smuzhiyun *
384*4882a593Smuzhiyun * In prior versions of gcc, such functions and data would be emitted, but
385*4882a593Smuzhiyun * would be warned about except with attribute((unused)).
386*4882a593Smuzhiyun *
387*4882a593Smuzhiyun * Mark functions that are referenced only in inline assembly as __used so
388*4882a593Smuzhiyun * the code is emitted even though it appears to be unreferenced.
389*4882a593Smuzhiyun */
390*4882a593Smuzhiyun #ifndef __used
391*4882a593Smuzhiyun # define __used /* unimplemented */
392*4882a593Smuzhiyun #endif
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun #ifndef __maybe_unused
395*4882a593Smuzhiyun # define __maybe_unused /* unimplemented */
396*4882a593Smuzhiyun #endif
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun #ifndef __always_unused
399*4882a593Smuzhiyun # define __always_unused /* unimplemented */
400*4882a593Smuzhiyun #endif
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun #ifndef noinline
403*4882a593Smuzhiyun #define noinline
404*4882a593Smuzhiyun #endif
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun /*
407*4882a593Smuzhiyun * Rather then using noinline to prevent stack consumption, use
408*4882a593Smuzhiyun * noinline_for_stack instead. For documentation reasons.
409*4882a593Smuzhiyun */
410*4882a593Smuzhiyun #define noinline_for_stack noinline
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun #ifndef __always_inline
413*4882a593Smuzhiyun #define __always_inline inline
414*4882a593Smuzhiyun #endif
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun #endif /* __KERNEL__ */
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun /*
419*4882a593Smuzhiyun * From the GCC manual:
420*4882a593Smuzhiyun *
421*4882a593Smuzhiyun * Many functions do not examine any values except their arguments,
422*4882a593Smuzhiyun * and have no effects except the return value. Basically this is
423*4882a593Smuzhiyun * just slightly more strict class than the `pure' attribute above,
424*4882a593Smuzhiyun * since function is not allowed to read global memory.
425*4882a593Smuzhiyun *
426*4882a593Smuzhiyun * Note that a function that has pointer arguments and examines the
427*4882a593Smuzhiyun * data pointed to must _not_ be declared `const'. Likewise, a
428*4882a593Smuzhiyun * function that calls a non-`const' function usually must not be
429*4882a593Smuzhiyun * `const'. It does not make sense for a `const' function to return
430*4882a593Smuzhiyun * `void'.
431*4882a593Smuzhiyun */
432*4882a593Smuzhiyun #ifndef __attribute_const__
433*4882a593Smuzhiyun # define __attribute_const__ /* unimplemented */
434*4882a593Smuzhiyun #endif
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun /*
437*4882a593Smuzhiyun * Tell gcc if a function is cold. The compiler will assume any path
438*4882a593Smuzhiyun * directly leading to the call is unlikely.
439*4882a593Smuzhiyun */
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun #ifndef __cold
442*4882a593Smuzhiyun #define __cold
443*4882a593Smuzhiyun #endif
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun /* Simple shorthand for a section definition */
446*4882a593Smuzhiyun #ifndef __section
447*4882a593Smuzhiyun # define __section(S) __attribute__ ((__section__(#S)))
448*4882a593Smuzhiyun #endif
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun #ifndef __visible
451*4882a593Smuzhiyun #define __visible
452*4882a593Smuzhiyun #endif
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun /*
455*4882a593Smuzhiyun * Assume alignment of return value.
456*4882a593Smuzhiyun */
457*4882a593Smuzhiyun #ifndef __assume_aligned
458*4882a593Smuzhiyun #define __assume_aligned(a, ...)
459*4882a593Smuzhiyun #endif
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun /* Are two types/vars the same type (ignoring qualifiers)? */
463*4882a593Smuzhiyun #ifndef __same_type
464*4882a593Smuzhiyun # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
465*4882a593Smuzhiyun #endif
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun /* Is this type a native word size -- useful for atomic operations */
468*4882a593Smuzhiyun #ifndef __native_word
469*4882a593Smuzhiyun # define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
470*4882a593Smuzhiyun #endif
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun /* Compile time object size, -1 for unknown */
473*4882a593Smuzhiyun #ifndef __compiletime_object_size
474*4882a593Smuzhiyun # define __compiletime_object_size(obj) -1
475*4882a593Smuzhiyun #endif
476*4882a593Smuzhiyun #ifndef __compiletime_warning
477*4882a593Smuzhiyun # define __compiletime_warning(message)
478*4882a593Smuzhiyun #endif
479*4882a593Smuzhiyun #ifndef __compiletime_error
480*4882a593Smuzhiyun # define __compiletime_error(message)
481*4882a593Smuzhiyun /*
482*4882a593Smuzhiyun * Sparse complains of variable sized arrays due to the temporary variable in
483*4882a593Smuzhiyun * __compiletime_assert. Unfortunately we can't just expand it out to make
484*4882a593Smuzhiyun * sparse see a constant array size without breaking compiletime_assert on old
485*4882a593Smuzhiyun * versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether.
486*4882a593Smuzhiyun */
487*4882a593Smuzhiyun # ifndef __CHECKER__
488*4882a593Smuzhiyun # define __compiletime_error_fallback(condition) \
489*4882a593Smuzhiyun do { ((void)sizeof(char[1 - 2 * condition])); } while (0)
490*4882a593Smuzhiyun # endif
491*4882a593Smuzhiyun #endif
492*4882a593Smuzhiyun #ifndef __compiletime_error_fallback
493*4882a593Smuzhiyun # define __compiletime_error_fallback(condition) do { } while (0)
494*4882a593Smuzhiyun #endif
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun #define __compiletime_assert(condition, msg, prefix, suffix) \
497*4882a593Smuzhiyun do { \
498*4882a593Smuzhiyun bool __cond = !(condition); \
499*4882a593Smuzhiyun extern void prefix ## suffix(void) __compiletime_error(msg); \
500*4882a593Smuzhiyun if (__cond) \
501*4882a593Smuzhiyun prefix ## suffix(); \
502*4882a593Smuzhiyun __compiletime_error_fallback(__cond); \
503*4882a593Smuzhiyun } while (0)
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun #define _compiletime_assert(condition, msg, prefix, suffix) \
506*4882a593Smuzhiyun __compiletime_assert(condition, msg, prefix, suffix)
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun /**
509*4882a593Smuzhiyun * compiletime_assert - break build and emit msg if condition is false
510*4882a593Smuzhiyun * @condition: a compile-time constant condition to check
511*4882a593Smuzhiyun * @msg: a message to emit if condition is false
512*4882a593Smuzhiyun *
513*4882a593Smuzhiyun * In tradition of POSIX assert, this macro will break the build if the
514*4882a593Smuzhiyun * supplied condition is *false*, emitting the supplied error message if the
515*4882a593Smuzhiyun * compiler has support to do so.
516*4882a593Smuzhiyun */
517*4882a593Smuzhiyun #define compiletime_assert(condition, msg) \
518*4882a593Smuzhiyun _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun #define compiletime_assert_atomic_type(t) \
521*4882a593Smuzhiyun compiletime_assert(__native_word(t), \
522*4882a593Smuzhiyun "Need native word sized stores/loads for atomicity.")
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun /*
525*4882a593Smuzhiyun * Prevent the compiler from merging or refetching accesses. The compiler
526*4882a593Smuzhiyun * is also forbidden from reordering successive instances of ACCESS_ONCE(),
527*4882a593Smuzhiyun * but only when the compiler is aware of some particular ordering. One way
528*4882a593Smuzhiyun * to make the compiler aware of ordering is to put the two invocations of
529*4882a593Smuzhiyun * ACCESS_ONCE() in different C statements.
530*4882a593Smuzhiyun *
531*4882a593Smuzhiyun * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE
532*4882a593Smuzhiyun * on a union member will work as long as the size of the member matches the
533*4882a593Smuzhiyun * size of the union and the size is smaller than word size.
534*4882a593Smuzhiyun *
535*4882a593Smuzhiyun * The major use cases of ACCESS_ONCE used to be (1) Mediating communication
536*4882a593Smuzhiyun * between process-level code and irq/NMI handlers, all running on the same CPU,
537*4882a593Smuzhiyun * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
538*4882a593Smuzhiyun * mutilate accesses that either do not require ordering or that interact
539*4882a593Smuzhiyun * with an explicit memory barrier or atomic instruction that provides the
540*4882a593Smuzhiyun * required ordering.
541*4882a593Smuzhiyun *
542*4882a593Smuzhiyun * If possible use READ_ONCE()/WRITE_ONCE() instead.
543*4882a593Smuzhiyun */
544*4882a593Smuzhiyun #define __ACCESS_ONCE(x) ({ \
545*4882a593Smuzhiyun __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
546*4882a593Smuzhiyun (volatile typeof(x) *)&(x); })
547*4882a593Smuzhiyun #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun /**
550*4882a593Smuzhiyun * lockless_dereference() - safely load a pointer for later dereference
551*4882a593Smuzhiyun * @p: The pointer to load
552*4882a593Smuzhiyun *
553*4882a593Smuzhiyun * Similar to rcu_dereference(), but for situations where the pointed-to
554*4882a593Smuzhiyun * object's lifetime is managed by something other than RCU. That
555*4882a593Smuzhiyun * "something other" might be reference counting or simple immortality.
556*4882a593Smuzhiyun */
557*4882a593Smuzhiyun #define lockless_dereference(p) \
558*4882a593Smuzhiyun ({ \
559*4882a593Smuzhiyun typeof(p) _________p1 = READ_ONCE(p); \
560*4882a593Smuzhiyun smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
561*4882a593Smuzhiyun (_________p1); \
562*4882a593Smuzhiyun })
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
565*4882a593Smuzhiyun #ifdef CONFIG_KPROBES
566*4882a593Smuzhiyun # define __kprobes __attribute__((__section__(".kprobes.text")))
567*4882a593Smuzhiyun # define nokprobe_inline __always_inline
568*4882a593Smuzhiyun #else
569*4882a593Smuzhiyun # define __kprobes
570*4882a593Smuzhiyun # define nokprobe_inline inline
571*4882a593Smuzhiyun #endif
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun #endif /* __clang__ */
574*4882a593Smuzhiyun #endif /* __LINUX_COMPILER_H */
575