xref: /optee_os/lib/libutils/ext/include/compiler.h (revision 764994e40843a9d734bf7df504d0f038fbff7be9)
1 /* SPDX-License-Identifier: BSD-2-Clause */
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  */
5 
6 #ifndef COMPILER_H
7 #define COMPILER_H
8 
9 /*
10  * Macros that should be used instead of using __attribute__ directly to
11  * ease portability and make the code easier to read.
12  *
13  * Some of the defines below is known to sometimes cause conflicts when
14  * this file is included from xtest in normal world. It is assumed that
15  * the conflicting defines has the same meaning in that environment.
16  * Surrounding the troublesome defines with #ifndef should be enough.
17  */
18 
19 #ifndef __has_attribute
20 #define __has_attribute(x) 0
21 #endif
22 
23 #define __deprecated	__attribute__((deprecated))
24 #ifndef __packed
25 #define __packed	__attribute__((packed))
26 #endif
27 #define __weak		__attribute__((weak))
28 #define __alias(x)	__attribute__((alias(x)))
29 #ifndef __noreturn
30 #define __noreturn	__attribute__((__noreturn__))
31 #endif
32 
33 #ifndef __no_stack_protector
34 #if __has_attribute(no_stack_protector)
35 #define __no_stack_protector __attribute__((no_stack_protector))
36 #else
37 #define __no_stack_protector
38 #endif
39 #endif
40 
41 #define __pure		__attribute__((pure))
42 #define __aligned(x)	__attribute__((aligned(x)))
43 #define __printf(a, b)	__attribute__((format(printf, a, b)))
44 #define __noinline	__attribute__((noinline))
45 #define __attr_const	__attribute__((__const__))
46 #ifndef __unused
47 #define __unused	__attribute__((unused))
48 #endif
49 #define __maybe_unused	__attribute__((unused))
50 #ifndef __used
51 #define __used		__attribute__((__used__))
52 #endif
53 #define __must_check	__attribute__((warn_unused_result))
54 #define __cold		__attribute__((__cold__))
55 #define __section(x)	__attribute__((section(x)))
56 #define __data		__section(".data")
57 #define __bss		__section(".bss")
58 #ifdef __clang__
59 #define __SECTION_FLAGS_RODATA
60 #else
61 /*
62  * Override sections flags/type generated by the C compiler to make sure they
63  * are: "a",%progbits (thus creating an allocatable, non-writeable, non-
64  * executable data section).
65  * The trailing COMMENT_CHAR comments out the flags generated by the compiler.
66  * This avoids a harmless warning with GCC.
67  */
68 #if defined(__aarch64__) || defined(__arm__)
69 #define COMMENT_CHAR "//"
70 #else
71 #define COMMENT_CHAR "#"
72 #endif
73 #define __SECTION_FLAGS_RODATA ",\"a\",%progbits " COMMENT_CHAR
74 #endif
75 #define __rodata	__section(".rodata" __SECTION_FLAGS_RODATA)
76 #define __rodata_dummy	__section(".rodata.dummy" __SECTION_FLAGS_RODATA)
77 #define __rodata_unpaged(x) \
78 	__section(".rodata.__unpaged." x __SECTION_FLAGS_RODATA)
79 #ifdef CFG_CORE_ASLR
80 #define __relrodata_unpaged(x) __section(".data.rel.ro.__unpaged." x)
81 #else
82 #define __relrodata_unpaged(x) __rodata_unpaged(x)
83 #endif
84 #ifdef CFG_NS_VIRTUALIZATION
85 #define __nex_bss		__section(".nex_bss")
86 #define __nex_data		__section(".nex_data")
87 #else  /* CFG_NS_VIRTUALIZATION */
88 #define __nex_bss
89 #define __nex_data
90 #endif	/* CFG_NS_VIRTUALIZATION */
91 #define __noprof	__attribute__((no_instrument_function))
92 #define __nostackcheck	__attribute__((no_instrument_function))
93 
94 #define __compiler_bswap64(x)	__builtin_bswap64((x))
95 #define __compiler_bswap32(x)	__builtin_bswap32((x))
96 #define __compiler_bswap16(x)	__builtin_bswap16((x))
97 
98 #define __GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + \
99 		       __GNUC_PATCHLEVEL__)
100 
101 #if __GCC_VERSION >= 50100 && !defined(__CHECKER__)
102 #define __HAVE_BUILTIN_OVERFLOW 1
103 #endif
104 
105 #ifdef __has_builtin
106 #if __has_builtin(__builtin_add_overflow) && \
107     __has_builtin(__builtin_sub_overflow) && \
108     __has_builtin(__builtin_mul_overflow)
109 #define __HAVE_BUILTIN_OVERFLOW 1
110 #endif
111 #endif
112 
113 #if __GCC_VERSION >= 90100 && !defined(__CHECKER__)
114 #define __HAVE_SINGLE_ARGUMENT_STATIC_ASSERT 1
115 #endif
116 
117 #ifdef __HAVE_BUILTIN_OVERFLOW
118 #define __compiler_add_overflow(a, b, res) \
119 	__builtin_add_overflow((a), (b), (res))
120 
121 #define __compiler_sub_overflow(a, b, res) \
122 	__builtin_sub_overflow((a), (b), (res))
123 
124 #define __compiler_mul_overflow(a, b, res) \
125 	__builtin_mul_overflow((a), (b), (res))
126 #else /*!__HAVE_BUILTIN_OVERFLOW*/
127 
128 /*
129  * Copied/inspired from https://www.fefe.de/intof.html
130  */
131 
132 #define __INTOF_ASSIGN(dest, src) (__extension__({ \
133 	typeof(src) __intof_x = (src); \
134 	typeof(dest) __intof_y = __intof_x; \
135 	(((uintmax_t)__intof_x == (uintmax_t)__intof_y) && \
136 	 ((__intof_x < 1) == (__intof_y < 1)) ? \
137 		(void)((dest) = __intof_y) , 0 : 1); \
138 }))
139 
140 #define __INTOF_ADD(c, a, b) (__extension__({ \
141 	typeof(a) __intofa_a = (a); \
142 	typeof(b) __intofa_b = (b); \
143 	intmax_t __intofa_a_signed = __intofa_a; \
144 	uintmax_t __intofa_a_unsigned = __intofa_a; \
145 	intmax_t __intofa_b_signed = __intofa_b; \
146 	uintmax_t __intofa_b_unsigned = __intofa_b; \
147 	\
148 	__intofa_b < 1 ? \
149 		__intofa_a < 1 ? \
150 			((INTMAX_MIN - __intofa_b_signed <= \
151 			  __intofa_a_signed)) ? \
152 				__INTOF_ASSIGN((c), __intofa_a_signed + \
153 						    __intofa_b_signed) : 1 \
154 		: \
155 			((__intofa_a_unsigned >= (uintmax_t)-__intofa_b) ? \
156 				__INTOF_ASSIGN((c), __intofa_a_unsigned + \
157 						    __intofa_b_signed) \
158 			: \
159 				__INTOF_ASSIGN((c), \
160 					(intmax_t)(__intofa_a_unsigned + \
161 						   __intofa_b_signed))) \
162 	: \
163 		__intofa_a < 1 ? \
164 			((__intofa_b_unsigned >= (uintmax_t)-__intofa_a) ? \
165 				__INTOF_ASSIGN((c), __intofa_a_signed + \
166 						    __intofa_b_unsigned) \
167 			: \
168 				__INTOF_ASSIGN((c), \
169 					(intmax_t)(__intofa_a_signed + \
170 						   __intofa_b_unsigned))) \
171 		: \
172 			((UINTMAX_MAX - __intofa_b_unsigned >= \
173 			  __intofa_a_unsigned) ? \
174 				__INTOF_ASSIGN((c), __intofa_a_unsigned + \
175 						    __intofa_b_unsigned) : 1); \
176 }))
177 
178 #define __INTOF_SUB(c, a, b) (__extension__({ \
179 	typeof(a) __intofs_a = a; \
180 	typeof(b) __intofs_b = b; \
181 	intmax_t __intofs_a_signed = __intofs_a; \
182 	uintmax_t __intofs_a_unsigned = __intofs_a; \
183 	intmax_t __intofs_b_signed = __intofs_b; \
184 	uintmax_t __intofs_b_unsigned = __intofs_b; \
185 	\
186 	__intofs_b < 1 ? \
187 		__intofs_a < 1 ? \
188 			((INTMAX_MAX + __intofs_b_signed >= \
189 			  __intofs_a_signed) ? \
190 				__INTOF_ASSIGN((c), __intofs_a_signed - \
191 						    __intofs_b_signed) : 1) \
192 		: \
193 			(((uintmax_t)(UINTMAX_MAX + __intofs_b_signed) >= \
194 			  __intofs_a_unsigned) ? \
195 				__INTOF_ASSIGN((c), __intofs_a - \
196 						    __intofs_b) : 1) \
197 	: \
198 		__intofs_a < 1 ? \
199 			(((intmax_t)(INTMAX_MIN + __intofs_b) <= \
200 			  __intofs_a_signed) ? \
201 				__INTOF_ASSIGN((c), \
202 					(intmax_t)(__intofs_a_signed - \
203 						   __intofs_b_unsigned)) : 1) \
204 		: \
205 			((__intofs_b_unsigned <= __intofs_a_unsigned) ? \
206 				__INTOF_ASSIGN((c), __intofs_a_unsigned - \
207 						    __intofs_b_unsigned) \
208 			: \
209 				__INTOF_ASSIGN((c), \
210 					(intmax_t)(__intofs_a_unsigned - \
211 						   __intofs_b_unsigned))); \
212 }))
213 
214 /*
215  * Dealing with detecting overflow in multiplication of integers.
216  *
217  * First step is to remove two corner cases with the minum signed integer
218  * which can't be represented as a positive integer + sign.
219  * Multiply with 0 or 1 can't overflow, no checking needed of the operation,
220  * only if it can be assigned to the result.
221  *
222  * After the corner cases are eliminated we convert the two factors to
223  * positive unsigned values, keeping track of the original in another
224  * variable which is used at the end to determine the sign of the product.
225  *
226  * The two terms (a and b) are divided into upper and lower half (x1 upper
227  * and x0 lower), so the product is:
228  * ((a1 << hshift) + a0) * ((b1 << hshift) + b0)
229  * which also is:
230  * ((a1 * b1) << (hshift * 2)) +				(T1)
231  * ((a1 * b0 + a0 * b1) << hshift) +				(T2)
232  * (a0 * b0)							(T3)
233  *
234  * From this we can tell and (a1 * b1) has to be 0 or we'll overflow, that
235  * is, at least one of a1 or b1 has to be 0. Once this has been checked the
236  * addition: ((a1 * b0) << hshift) + ((a0 * b1) << hshift)
237  * isn't an addition as one of the terms will be 0.
238  *
239  * Since each factor in: (a0 * b0)
240  * only uses half the capicity of the underlaying type it can't overflow
241  *
242  * The addition of T2 and T3 can overflow so we use __INTOF_ADD() to
243  * perform that addition. If the addition succeeds without overflow the
244  * result is assigned the required sign and checked for overflow again.
245  */
246 
247 #define __intof_mul_negate	((__intof_oa < 1) != (__intof_ob < 1))
248 #define __intof_mul_hshift	(sizeof(uintmax_t) * 8 / 2)
249 #define __intof_mul_hmask	(UINTMAX_MAX >> __intof_mul_hshift)
250 #define __intof_mul_a0		((uintmax_t)(__intof_a) >> __intof_mul_hshift)
251 #define __intof_mul_b0		((uintmax_t)(__intof_b) >> __intof_mul_hshift)
252 #define __intof_mul_a1		((uintmax_t)(__intof_a) & __intof_mul_hmask)
253 #define __intof_mul_b1		((uintmax_t)(__intof_b) & __intof_mul_hmask)
254 #define __intof_mul_t		(__intof_mul_a1 * __intof_mul_b0 + \
255 				 __intof_mul_a0 * __intof_mul_b1)
256 
257 #define __INTOF_MUL(c, a, b) (__extension__({ \
258 	typeof(a) __intof_oa = (a); \
259 	typeof(a) __intof_a = __intof_oa < 1 ? -__intof_oa : __intof_oa; \
260 	typeof(b) __intof_ob = (b); \
261 	typeof(b) __intof_b = __intof_ob < 1 ? -__intof_ob : __intof_ob; \
262 	typeof(c) __intof_c; \
263 	\
264 	__intof_oa == 0 || __intof_ob == 0 || \
265 	__intof_oa == 1 || __intof_ob == 1 ? \
266 		__INTOF_ASSIGN((c), __intof_oa * __intof_ob) : \
267 	(__intof_mul_a0 && __intof_mul_b0) || \
268 	 __intof_mul_t > __intof_mul_hmask ?  1 : \
269 	__INTOF_ADD((__intof_c), __intof_mul_t << __intof_mul_hshift, \
270 				 __intof_mul_a1 * __intof_mul_b1) ? 1 : \
271 	__intof_mul_negate ? __INTOF_ASSIGN((c), -__intof_c) : \
272 			     __INTOF_ASSIGN((c), __intof_c); \
273 }))
274 
275 #define __compiler_add_overflow(a, b, res) __INTOF_ADD(*(res), (a), (b))
276 #define __compiler_sub_overflow(a, b, res) __INTOF_SUB(*(res), (a), (b))
277 #define __compiler_mul_overflow(a, b, res) __INTOF_MUL(*(res), (a), (b))
278 
279 #endif /*!__HAVE_BUILTIN_OVERFLOW*/
280 
281 #define __compiler_compare_and_swap(p, oval, nval) \
282 	__atomic_compare_exchange_n((p), (oval), (nval), true, \
283 				    __ATOMIC_ACQUIRE, __ATOMIC_RELAXED) \
284 
285 #define __compiler_atomic_load(p) __atomic_load_n((p), __ATOMIC_RELAXED)
286 #define __compiler_atomic_store(p, val) \
287 	__atomic_store_n((p), (val), __ATOMIC_RELAXED)
288 
289 #define barrier() asm volatile ("" : : : "memory")
290 
291 #ifndef __has_attribute
292 #define __has_attribute(x) 0
293 #endif
294 
295 #if __has_attribute(__fallthrough__)
296 #define fallthrough __attribute__((__fallthrough__))
297 #else
298 #define fallthrough do {} while (0) /* fallthrough */
299 #endif
300 
301 #ifndef __clang__
302 #define __no_stackprot __attribute__((__optimize__ ("-fno-stack-protector")))
303 #else
304 #define __no_stackprot
305 #endif
306 
307 #define __inhibit_loop_to_libcall \
308 	__attribute__ ((__optimize__ ("-fno-tree-loop-distribute-patterns")))
309 #endif /*COMPILER_H*/
310