xref: /optee_os/lib/libutils/ext/include/util.h (revision 8132f3be81a7a09e989ae47b21134ded25c916e6)
1 /* SPDX-License-Identifier: BSD-2-Clause */
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  */
5 #ifndef UTIL_H
6 #define UTIL_H
7 
8 #include <compiler.h>
9 #include <inttypes.h>
10 
11 #ifndef __ASSEMBLER__
12 #include <assert.h>
13 #include <stddef.h>
14 #endif
15 
16 #define SIZE_4K	UINTPTR_C(0x1000)
17 #define SIZE_1M	UINTPTR_C(0x100000)
18 #define SIZE_2M	UINTPTR_C(0x200000)
19 #define SIZE_4M	UINTPTR_C(0x400000)
20 #define SIZE_8M	UINTPTR_C(0x800000)
21 #define SIZE_2G	UINTPTR_C(0x80000000)
22 
23 #ifndef MAX
24 #ifndef __ASSEMBLER__
25 #define MAX(a, b) \
26 	(__extension__({ __typeof__(a) _a = (a); \
27 	   __typeof__(b) _b = (b); \
28 	 _a > _b ? _a : _b; }))
29 
30 #define MIN(a, b) \
31 	(__extension__({ __typeof__(a) _a = (a); \
32 	   __typeof__(b) _b = (b); \
33 	 _a < _b ? _a : _b; }))
34 #else
35 #define MAX(a, b)	(((a) > (b)) ? (a) : (b))
36 #define MIN(a, b)	(((a) < (b)) ? (a) : (b))
37 #endif
38 #endif
39 
40 /*
41  * In some particular conditions MAX and MIN macros fail to
42  * build from C source file implmentation. In such case one
43  * need to use MAX_UNSAFE/MIN_UNSAFE instead.
44  */
45 #define MAX_UNSAFE(a, b)	(((a) > (b)) ? (a) : (b))
46 #define MIN_UNSAFE(a, b)	(((a) < (b)) ? (a) : (b))
47 
48 #ifndef ARRAY_SIZE
49 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
50 #endif
51 
52 #ifndef __ASSEMBLER__
53 /* Round up the even multiple of size */
54 #define ROUNDUP(x, y) \
55 	((((x) + (__typeof__(x))(y) - 1) / (__typeof__(x))(y)) * \
56 	 (__typeof__(x))(y))
57 
58 /* Round up the even multiple of size, size has to be a power of 2 */
59 #define ROUNDUP2(v, size) \
60 	(__extension__({ \
61 		assert(IS_POWER_OF_TWO(size)); \
62 		(((v) + ((__typeof__(v))(size) - 1)) & \
63 		 ~((__typeof__(v))(size) - 1)); \
64 	}))
65 
66 /*
67  * ROUNDUP_OVERFLOW(v, size, res)
68  *
69  * @v: Input value to round
70  * @size: Rounding operand
71  * @res: Pointer where boolean overflow status (0/false or 1/true) is stored
72  * @return: boolean overflow status of the resulting rounded value
73  *
74  * Round up value @v to the even multiple of @size and return if result
75  * overflows the output value range pointed by @res. The rounded value is
76  * stored in the memory address pointed by @res.
77  */
78 #define ROUNDUP_OVERFLOW(v, size, res) \
79 	(__extension__({ \
80 		typeof(v) __roundup_mod = 0; \
81 		typeof(v) __roundup_add = 0; \
82 		\
83 		__roundup_mod = (v) % (typeof(v))(size); \
84 		if (__roundup_mod) \
85 			__roundup_add = (typeof(v))(size) - __roundup_mod; \
86 		ADD_OVERFLOW((v), __roundup_add, (res)); \
87 	}))
88 
89 /*
90  * ROUNDUP2_OVERFLOW(v, size, res)
91  *
92  * @v: Input value to round
93  * @size: Rounding operand, must be a power of 2
94  * @res: Pointer where boolean overflow status (0/false or 1/true) is stored
95  * @return: boolean overflow status of the resulting rounded value
96  *
97  * Round up value @v to the even multiple of @size and return if result
98  * overflows the output value range pointed by @res. The rounded value is
99  * stored in the memory address pointed by @res.
100  */
101 #define ROUNDUP2_OVERFLOW(v, size, res) \
102 	(__extension__({ \
103 		typeof(*(res)) __roundup_tmp = 0; \
104 		typeof(v) __roundup_mask = (typeof(v))(size) - 1; \
105 		\
106 		assert(IS_POWER_OF_TWO(size)); \
107 		ADD_OVERFLOW((v), __roundup_mask, &__roundup_tmp) ? 1 : \
108 			((void)(*(res) = __roundup_tmp & ~__roundup_mask), 0); \
109 	}))
110 
111 /*
112  * ROUNDUP2_DIV(x, y)
113  *
114  * Rounds up to the nearest multiple of y and then divides by y. Safe
115  * against overflow, y has to be a power of 2.
116  *
117  * This macro is intended to be used to convert from "number of bytes" to
118  * "number of pages" or similar units. Example:
119  * num_pages = ROUNDUP2_DIV(num_bytes, SMALL_PAGE_SIZE);
120  */
121 #define ROUNDUP2_DIV(x, y) \
122 	(__extension__({ \
123 		typeof(x) __roundup_x = (x); \
124 		typeof(y) __roundup_mask = (typeof(x))(y) - 1; \
125 		\
126 		assert(IS_POWER_OF_TWO(y)); \
127 		(__roundup_x / (y)) + (__roundup_x & __roundup_mask ? 1 : 0); \
128 	}))
129 
130 /*
131  * ROUNDUP_DIV(x, y)
132  *
133  * Rounds up to the nearest multiple of y and then divides by y. Safe
134  * against overflow.
135  */
136 #define ROUNDUP_DIV(x, y) (ROUNDUP((x), (y)) / (__typeof__(x))(y))
137 
138 /* Round down the even multiple of size, size has to be a power of 2 */
139 #define ROUNDDOWN(v, size) ((v) & ~((__typeof__(v))(size) - 1))
140 
141 /*
142  * Round up the result of x / y to the nearest upper integer if result is not
143  * already an integer.
144  */
145 #define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y))
146 
147 /* Unsigned integer division with nearest rounding variant */
148 #define UDIV_ROUND_NEAREST(x, y) \
149 	(__extension__ ({ __typeof__(x) _x = (x); \
150 	  __typeof__(y) _y = (y); \
151 	  (_x + (_y / 2)) / _y; }))
152 #else
153 #define ROUNDUP(x, y)			((((x) + (y) - 1) / (y)) * (y))
154 #define ROUNDDOWN(x, y)		(((x) / (y)) * (y))
155 #define UDIV_ROUND_NEAREST(x, y)	(((x) + ((y) / 2)) / (y))
156 #endif
157 
158 /* x has to be of an unsigned type */
159 #define IS_POWER_OF_TWO(x) (((x) != 0) && (((x) & (~(x) + 1)) == (x)))
160 
161 #define IS_ALIGNED(x, a)		(((x) & ((a) - 1)) == 0)
162 #define IS_ALIGNED_WITH_TYPE(x, type) \
163         (__extension__({ \
164                 type __is_aligned_y; \
165                 IS_ALIGNED((uintptr_t)(x), __alignof__(__is_aligned_y)); \
166         }))
167 
168 #define TO_STR(x) _TO_STR(x)
169 #define _TO_STR(x) #x
170 
171 #define CONCAT(x, y) _CONCAT(x, y)
172 #define _CONCAT(x, y) x##y
173 
174 #define container_of(ptr, type, member) \
175 	(__extension__({ \
176 		const typeof(((type *)0)->member) *__ptr = (ptr); \
177 		(type *)((unsigned long)(__ptr) - offsetof(type, member)); \
178 	}))
179 
180 #define MEMBER_SIZE(type, member) sizeof(((type *)0)->member)
181 
182 #ifdef __ASSEMBLER__
183 #define BIT32(nr)		(1 << (nr))
184 #define BIT64(nr)		(1 << (nr))
185 #define SHIFT_U32(v, shift)	((v) << (shift))
186 #define SHIFT_U64(v, shift)	((v) << (shift))
187 #else
188 #define BIT32(nr)		(UINT32_C(1) << (nr))
189 #define BIT64(nr)		(UINT64_C(1) << (nr))
190 #define SHIFT_U32(v, shift)	((uint32_t)(v) << (shift))
191 #define SHIFT_U64(v, shift)	((uint64_t)(v) << (shift))
192 #endif
193 #define BIT(nr)			BIT32(nr)
194 
195 /*
196  * Create a contiguous bitmask starting at bit position @l and ending at
197  * position @h. For example
198  * GENMASK_64(39, 21) gives us the 64bit vector 0x000000ffffe00000.
199  */
200 #define GENMASK_32(h, l) \
201 	((UINT32_C(0xffffffff) << (l)) & \
202 	 (UINT32_C(0xffffffff) >> (32 - 1 - (h))))
203 
204 #define GENMASK_64(h, l) \
205 	(((~UINT64_C(0)) << (l)) & (~UINT64_C(0) >> (64 - 1 - (h))))
206 
207 /*
208  * Checking overflow for addition, subtraction and multiplication. Result
209  * of operation is stored in res which is a pointer to some kind of
210  * integer.
211  *
212  * The macros return true if an overflow occurred and *res is undefined.
213  */
214 #define ADD_OVERFLOW(a, b, res) __compiler_add_overflow((a), (b), (res))
215 #define SUB_OVERFLOW(a, b, res) __compiler_sub_overflow((a), (b), (res))
216 #define MUL_OVERFLOW(a, b, res) __compiler_mul_overflow((a), (b), (res))
217 
218 /* Return a signed +1, 0 or -1 value based on data comparison */
219 #define CMP_TRILEAN(a, b) \
220 	(__extension__({ \
221 		__typeof__(a) _a = (a); \
222 		__typeof__(b) _b = (b); \
223 		\
224 		_a > _b ? 1 : _a < _b ? -1 : 0; \
225 	}))
226 
227 #ifndef __ASSEMBLER__
228 static inline uint64_t reg_pair_to_64(uint32_t reg0, uint32_t reg1)
229 {
230 	return (uint64_t)reg0 << 32 | reg1;
231 }
232 
233 static inline uint32_t high32_from_64(uint64_t val)
234 {
235 	return val >> 32;
236 }
237 
238 static inline uint32_t low32_from_64(uint64_t val)
239 {
240 	return val;
241 }
242 
243 static inline void reg_pair_from_64(uint64_t val, uint32_t *reg0,
244 				    uint32_t *reg1)
245 {
246 	*reg0 = high32_from_64(val);
247 	*reg1 = low32_from_64(val);
248 }
249 
250 /* Get and set bit fields  */
251 static inline uint32_t get_field_u32(uint32_t reg, uint32_t mask)
252 {
253 	return (reg & mask) / (mask & ~(mask - 1));
254 }
255 
256 static inline uint32_t set_field_u32(uint32_t reg, uint32_t mask, uint32_t val)
257 {
258 	return (reg & ~mask) | (val * (mask & ~(mask - 1)));
259 }
260 
261 static inline uint64_t get_field_u64(uint64_t reg, uint64_t mask)
262 {
263 	return (reg & mask) / (mask & ~(mask - 1));
264 }
265 
266 static inline uint64_t set_field_u64(uint64_t reg, uint64_t mask, uint64_t val)
267 {
268 	return (reg & ~mask) | (val * (mask & ~(mask - 1)));
269 }
270 
271 /* Helper function for qsort with standard types */
272 void qsort_int(int *aa, size_t n);
273 void qsort_uint(unsigned int *aa, size_t n);
274 void qsort_long(long int *aa, size_t n);
275 void qsort_ul(unsigned long int *aa, size_t n);
276 void qsort_ll(long long int *aa, size_t n);
277 void qsort_ull(unsigned long long int *aa, size_t n);
278 void qsort_s8(int8_t *aa, size_t n);
279 void qsort_u8(uint8_t *aa, size_t n);
280 void qsort_s16(int16_t *aa, size_t n);
281 void qsort_u16(uint16_t *aa, size_t n);
282 void qsort_s32(int32_t *aa, size_t n);
283 void qsort_u32(uint32_t *aa, size_t n);
284 void qsort_s64(int64_t *aa, size_t n);
285 void qsort_u64(uint64_t *aa, size_t n);
286 #endif
287 
288 #endif /*UTIL_H*/
289