xref: /optee_os/lib/libutils/ext/include/util.h (revision fd0b115a9599e6dd69b8a665c0571ddbfd41792e)
1 /* SPDX-License-Identifier: BSD-2-Clause */
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  */
5 #ifndef UTIL_H
6 #define UTIL_H
7 
8 #include <compiler.h>
9 #include <inttypes.h>
10 
11 #ifndef __ASSEMBLER__
12 #include <stddef.h>
13 #endif
14 
15 #define SIZE_4K	UINTPTR_C(0x1000)
16 #define SIZE_1M	UINTPTR_C(0x100000)
17 #define SIZE_2M	UINTPTR_C(0x200000)
18 #define SIZE_4M	UINTPTR_C(0x400000)
19 #define SIZE_8M	UINTPTR_C(0x800000)
20 #define SIZE_2G	UINTPTR_C(0x80000000)
21 
22 #ifndef MAX
23 #ifndef __ASSEMBLER__
24 #define MAX(a, b) \
25 	(__extension__({ __typeof__(a) _a = (a); \
26 	   __typeof__(b) _b = (b); \
27 	 _a > _b ? _a : _b; }))
28 
29 #define MIN(a, b) \
30 	(__extension__({ __typeof__(a) _a = (a); \
31 	   __typeof__(b) _b = (b); \
32 	 _a < _b ? _a : _b; }))
33 #else
34 #define MAX(a, b)	(((a) > (b)) ? (a) : (b))
35 #define MIN(a, b)	(((a) < (b)) ? (a) : (b))
36 #endif
37 #endif
38 
39 /*
40  * In some particular conditions MAX and MIN macros fail to
41  * build from C source file implmentation. In such case one
42  * need to use MAX_UNSAFE/MIN_UNSAFE instead.
43  */
44 #define MAX_UNSAFE(a, b)	(((a) > (b)) ? (a) : (b))
45 #define MIN_UNSAFE(a, b)	(((a) < (b)) ? (a) : (b))
46 
47 #ifndef ARRAY_SIZE
48 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
49 #endif
50 
51 #ifndef __ASSEMBLER__
52 /* Round up the even multiple of size, size has to be a power of 2 */
53 #define ROUNDUP(v, size) (((v) + ((__typeof__(v))(size) - 1)) & \
54 			  ~((__typeof__(v))(size) - 1))
55 
56 /*
57  * Round up the even multiple of size and return if result overflow
58  * output value range. Size has to be a power of 2.
59  */
60 #define ROUNDUP_OVERFLOW(v, size, res) (__extension__({ \
61 	typeof(*(res)) __roundup_tmp = 0; \
62 	typeof(v) __roundup_mask = (typeof(v))(size) - 1; \
63 	\
64 	ADD_OVERFLOW((v), __roundup_mask, &__roundup_tmp) ? 1 : \
65 		((void)(*(res) = __roundup_tmp & ~__roundup_mask), 0); \
66 }))
67 
68 /*
69  * Rounds up to the nearest multiple of y and then divides by y. Safe
70  * against overflow, y has to be a power of 2.
71  *
72  * This macro is intended to be used to convert from "number of bytes" to
73  * "number of pages" or similar units. Example:
74  * num_pages = ROUNDUP_DIV(num_bytes, SMALL_PAGE_SIZE);
75  */
76 #define ROUNDUP_DIV(x, y) (__extension__({ \
77 	typeof(x) __roundup_x = (x); \
78 	typeof(y) __roundup_mask = (typeof(x))(y) - 1; \
79 	\
80 	(__roundup_x / (y)) + (__roundup_x & __roundup_mask ? 1 : 0); \
81 }))
82 
83 /* Round down the even multiple of size, size has to be a power of 2 */
84 #define ROUNDDOWN(v, size) ((v) & ~((__typeof__(v))(size) - 1))
85 
86 /*
87  * Round up the result of x / y to the nearest upper integer if result is not
88  * already an integer.
89  */
90 #define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y))
91 
92 /* Unsigned integer division with nearest rounding variant */
93 #define UDIV_ROUND_NEAREST(x, y) \
94 	(__extension__ ({ __typeof__(x) _x = (x); \
95 	  __typeof__(y) _y = (y); \
96 	  (_x + (_y / 2)) / _y; }))
97 #else
98 #define ROUNDUP(x, y)			((((x) + (y) - 1) / (y)) * (y))
99 #define ROUNDDOWN(x, y)		(((x) / (y)) * (y))
100 #define UDIV_ROUND_NEAREST(x, y)	(((x) + ((y) / 2)) / (y))
101 #endif
102 
103 /* x has to be of an unsigned type */
104 #define IS_POWER_OF_TWO(x) (((x) != 0) && (((x) & (~(x) + 1)) == (x)))
105 
106 #define IS_ALIGNED(x, a)		(((x) & ((a) - 1)) == 0)
107 #define IS_ALIGNED_WITH_TYPE(x, type) \
108         (__extension__({ \
109                 type __is_aligned_y; \
110                 IS_ALIGNED((uintptr_t)(x), __alignof__(__is_aligned_y)); \
111         }))
112 
113 #define TO_STR(x) _TO_STR(x)
114 #define _TO_STR(x) #x
115 
116 #define CONCAT(x, y) _CONCAT(x, y)
117 #define _CONCAT(x, y) x##y
118 
119 #define container_of(ptr, type, member) \
120 	(__extension__({ \
121 		const typeof(((type *)0)->member) *__ptr = (ptr); \
122 		(type *)((unsigned long)(__ptr) - offsetof(type, member)); \
123 	}))
124 
125 #define MEMBER_SIZE(type, member) sizeof(((type *)0)->member)
126 
127 #ifdef __ASSEMBLER__
128 #define BIT32(nr)		(1 << (nr))
129 #define BIT64(nr)		(1 << (nr))
130 #define SHIFT_U32(v, shift)	((v) << (shift))
131 #define SHIFT_U64(v, shift)	((v) << (shift))
132 #else
133 #define BIT32(nr)		(UINT32_C(1) << (nr))
134 #define BIT64(nr)		(UINT64_C(1) << (nr))
135 #define SHIFT_U32(v, shift)	((uint32_t)(v) << (shift))
136 #define SHIFT_U64(v, shift)	((uint64_t)(v) << (shift))
137 #endif
138 #define BIT(nr)			BIT32(nr)
139 
140 /*
141  * Create a contiguous bitmask starting at bit position @l and ending at
142  * position @h. For example
143  * GENMASK_64(39, 21) gives us the 64bit vector 0x000000ffffe00000.
144  */
145 #define GENMASK_32(h, l) \
146 	((UINT32_C(0xffffffff) << (l)) & \
147 	 (UINT32_C(0xffffffff) >> (32 - 1 - (h))))
148 
149 #define GENMASK_64(h, l) \
150 	(((~UINT64_C(0)) << (l)) & (~UINT64_C(0) >> (64 - 1 - (h))))
151 
152 /*
153  * Checking overflow for addition, subtraction and multiplication. Result
154  * of operation is stored in res which is a pointer to some kind of
155  * integer.
156  *
157  * The macros return true if an overflow occurred and *res is undefined.
158  */
159 #define ADD_OVERFLOW(a, b, res) __compiler_add_overflow((a), (b), (res))
160 #define SUB_OVERFLOW(a, b, res) __compiler_sub_overflow((a), (b), (res))
161 #define MUL_OVERFLOW(a, b, res) __compiler_mul_overflow((a), (b), (res))
162 
163 /* Return a signed +1, 0 or -1 value based on data comparison */
164 #define CMP_TRILEAN(a, b) \
165 	(__extension__({ \
166 		__typeof__(a) _a = (a); \
167 		__typeof__(b) _b = (b); \
168 		\
169 		_a > _b ? 1 : _a < _b ? -1 : 0; \
170 	}))
171 
172 #ifndef __ASSEMBLER__
173 static inline uint64_t reg_pair_to_64(uint32_t reg0, uint32_t reg1)
174 {
175 	return (uint64_t)reg0 << 32 | reg1;
176 }
177 
178 static inline uint32_t high32_from_64(uint64_t val)
179 {
180 	return val >> 32;
181 }
182 
183 static inline uint32_t low32_from_64(uint64_t val)
184 {
185 	return val;
186 }
187 
188 static inline void reg_pair_from_64(uint64_t val, uint32_t *reg0,
189 				    uint32_t *reg1)
190 {
191 	*reg0 = high32_from_64(val);
192 	*reg1 = low32_from_64(val);
193 }
194 
195 /* Get and set bit fields  */
196 static inline uint32_t get_field_u32(uint32_t reg, uint32_t mask)
197 {
198 	return (reg & mask) / (mask & ~(mask - 1));
199 }
200 
201 static inline uint32_t set_field_u32(uint32_t reg, uint32_t mask, uint32_t val)
202 {
203 	return (reg & ~mask) | (val * (mask & ~(mask - 1)));
204 }
205 
206 static inline uint64_t get_field_u64(uint64_t reg, uint64_t mask)
207 {
208 	return (reg & mask) / (mask & ~(mask - 1));
209 }
210 
211 static inline uint64_t set_field_u64(uint64_t reg, uint64_t mask, uint64_t val)
212 {
213 	return (reg & ~mask) | (val * (mask & ~(mask - 1)));
214 }
215 
216 /* Helper function for qsort with standard types */
217 void qsort_int(int *aa, size_t n);
218 void qsort_uint(unsigned int *aa, size_t n);
219 void qsort_long(long int *aa, size_t n);
220 void qsort_ul(unsigned long int *aa, size_t n);
221 void qsort_ll(long long int *aa, size_t n);
222 void qsort_ull(unsigned long long int *aa, size_t n);
223 void qsort_s8(int8_t *aa, size_t n);
224 void qsort_u8(uint8_t *aa, size_t n);
225 void qsort_s16(int16_t *aa, size_t n);
226 void qsort_u16(uint16_t *aa, size_t n);
227 void qsort_s32(int32_t *aa, size_t n);
228 void qsort_u32(uint32_t *aa, size_t n);
229 void qsort_s64(int64_t *aa, size_t n);
230 void qsort_u64(uint64_t *aa, size_t n);
231 #endif
232 
233 #endif /*UTIL_H*/
234