xref: /rk3399_ARM-atf/include/lib/utils_def.h (revision 2d05494012a51467a1984649e194c63ca154606b)
1 /*
2  * Copyright (c) 2016-2025, Arm Limited and Contributors. All rights reserved.
3  * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
4  *
5  * SPDX-License-Identifier: BSD-3-Clause
6  */
7 
8 #ifndef UTILS_DEF_H
9 #define UTILS_DEF_H
10 
11 #include <export/lib/utils_def_exp.h>
12 
13 /* Compute the number of elements in the given array */
14 #define ARRAY_SIZE(a)				\
15 	(sizeof(a) / sizeof((a)[0]))
16 
17 #define IS_POWER_OF_TWO(x)			\
18 	(((x) & ((x) - 1)) == 0)
19 
20 #define SIZE_FROM_LOG2_WORDS(n)		(U(4) << (n))
21 
22 #if defined(__LINKER__) || defined(__ASSEMBLER__)
23 #define BIT_32(nr)			(U(1) << (nr))
24 #define BIT_64(nr)			(ULL(1) << (nr))
25 #else
26 #define BIT_32(nr)			(((uint32_t)(1U)) << (nr))
27 #define BIT_64(nr)			(((uint64_t)(1ULL)) << (nr))
28 #endif
29 
30 #ifdef __aarch64__
31 #define BIT				BIT_64
32 #else
33 #define BIT				BIT_32
34 #endif
35 
36 /*
37  * Create a contiguous bitmask starting at bit position @low and ending at
38  * position @high. For example
39  * GENMASK_64(39, 21) gives us the 64bit vector 0x000000ffffe00000.
40  */
41 #if defined(__LINKER__) || defined(__ASSEMBLER__)
42 #define GENMASK_32(high, low) \
43 	(((0xFFFFFFFF) << (low)) & (0xFFFFFFFF >> (32 - 1 - (high))))
44 
45 #define GENMASK_64(high, low) \
46 	((~0 << (low)) & (~0 >> (64 - 1 - (high))))
47 #else
48 #define GENMASK_32(high, low) \
49 	((~UINT32_C(0) >> (32U - 1U - (high))) ^ ((BIT_32(low) - 1U)))
50 
51 #define GENMASK_64(high, low) \
52 	((~UINT64_C(0) >> (64U - 1U - (high))) ^ ((BIT_64(low) - 1U)))
53 #endif
54 
55 #ifdef __aarch64__
56 #define GENMASK				GENMASK_64
57 #else
58 #define GENMASK				GENMASK_32
59 #endif
60 
61 /*
62  * Similar to GENMASK_64 but uses a named register field to compute the mask.
63  * For a register field REG_FIELD, the macros REG_FIELD_WIDTH and
64  * REG_FIELD_SHIFT must be defined.
65  */
66 #define MASK(regfield)							\
67 	((~0ULL >> (64ULL - (regfield##_WIDTH))) << (regfield##_SHIFT))
68 
69 #if defined(__LINKER__) || defined(__ASSEMBLER__)
70 #define HI(addr)			(addr >> 32)
71 #define LO(addr)			(addr & 0xffffffff)
72 #else
73 #define HI(addr)                        (addr >> 32)
74 #define LO(addr)                        (uint32_t)((addr) & (0xffffffffU))
75 #endif
76 
77 #define HI_64(addr)			(addr >> 64)
78 #define LO_64(addr)			(addr & 0xffffffffffffffff)
79 
80 /**
81  * EXTRACT_FIELD - Extracts a specific bit field from a value.
82  *
83  * @reg:      The input value containing the field.
84 
85  * @regfield: A bitmask representing the field. For a register field REG_FIELD,
86  *            the macros REG_FIELD_WIDTH and REG_FIELD_SHIFT must be defined.
87 
88  * The result of this macro is the contents of the field right shifted to the
89  * least significant bit positions, with the rest being zero.
90  */
91 #define EXTRACT(regfield, reg) \
92 	(((reg) & MASK(regfield)) >> (regfield##_SHIFT))
93 
94 #define UPDATE_REG_FIELD(regfield, reg, val) \
95 	do { \
96 		(reg) &= ~(MASK(regfield)); \
97 		(reg) |= ((uint64_t)(val) << (regfield##_SHIFT)); \
98 	} while (0)
99 
100 /*
101  * This variant of div_round_up can be used in macro definition but should not
102  * be used in C code as the `div` parameter is evaluated twice.
103  */
104 #define DIV_ROUND_UP_2EVAL(n, d)	(((n) + (d) - 1) / (d))
105 
106 /* round `n` up to a multiple of `r` */
107 #define ROUND_UP_2EVAL(n, r)		((((n) + (r) - 1) / (r)) * (r))
108 
109 #define div_round_up(val, div) __extension__ ({	\
110 	__typeof__(div) _div = (div);		\
111 	((val) + _div - (__typeof__(div)) 1) / _div;		\
112 })
113 
114 #define MIN(x, y) __extension__ ({	\
115 	__typeof__(x) _x = (x);		\
116 	__typeof__(y) _y = (y);		\
117 	(void)(&_x == &_y);		\
118 	(_x < _y) ? _x : _y;		\
119 })
120 
121 #define MAX(x, y) __extension__ ({	\
122 	__typeof__(x) _x = (x);		\
123 	__typeof__(y) _y = (y);		\
124 	(void)(&_x == &_y);		\
125 	(_x > _y) ? _x : _y;		\
126 })
127 
128 #define CLAMP(x, min, max) __extension__ ({ \
129 	__typeof__(x) _x = (x); \
130 	__typeof__(min) _min = (min); \
131 	__typeof__(max) _max = (max); \
132 	(void)(&_x == &_min); \
133 	(void)(&_x == &_max); \
134 	((_x > _max) ? _max : ((_x < _min) ? _min : _x)); \
135 })
136 
137 /*
138  * The round_up() macro rounds up a value to the given boundary in a
139  * type-agnostic yet type-safe manner. The boundary must be a power of two.
140  * In other words, it computes the smallest multiple of boundary which is
141  * greater than or equal to value.
142  *
143  * round_down() is similar but rounds the value down instead.
144  */
145 #define round_boundary(value, boundary)		\
146 	((__typeof__(value))((boundary) - ((__typeof__(value))1U)))
147 
148 #define round_up(value, boundary)		\
149 	((((value) - ((__typeof__(value))1U)) | round_boundary(value, boundary)) + ((__typeof__(value))1U))
150 
151 #define round_down(value, boundary)		\
152 	((value) & ~round_boundary(value, boundary))
153 
154 /* add operation together with checking whether the operation overflowed
155  * The result is '*res',
156  * return 0 on success and 1 on overflow
157  */
158 #define add_overflow(a, b, res) __builtin_add_overflow((a), (b), (res))
159 
160 /*
161  * Round up a value to align with a given size and
162  * check whether overflow happens.
163  * The rounduped value is '*res',
164  * return 0 on success and 1 on overflow
165  */
166 #define round_up_overflow(v, size, res) (__extension__({ \
167 	typeof(res) __res = res; \
168 	typeof(*(__res)) __roundup_tmp = 0; \
169 	typeof(v) __roundup_mask = (typeof(v))(size) - 1; \
170 	\
171 	add_overflow((v), __roundup_mask, &__roundup_tmp) ? 1 : \
172 		(void)(*(__res) = __roundup_tmp & ~__roundup_mask), 0; \
173 }))
174 
175 /*
176  * Add a with b, then round up the result to align with a given size and
177  * check whether overflow happens.
178  * The rounduped value is '*res',
179  * return 0 on success and 1 on overflow
180  */
181 #define add_with_round_up_overflow(a, b, size, res) (__extension__({ \
182 	typeof(a) __a = (a); \
183 	typeof(__a) __add_res = 0; \
184 	\
185 	add_overflow((__a), (b), &__add_res) ? 1 : \
186 		round_up_overflow(__add_res, (size), (res)) ? 1 : 0; \
187 }))
188 
189 /**
190  * Helper macro to ensure a value lies on a given boundary.
191  */
192 #define is_aligned(value, boundary)			\
193 	(round_up((uintptr_t) value, boundary) ==	\
194 	 round_down((uintptr_t) value, boundary))
195 
196 /*
197  * Evaluates to 1 if (ptr + inc) overflows, 0 otherwise.
198  * Both arguments must be unsigned pointer values (i.e. uintptr_t).
199  */
200 #define check_uptr_overflow(_ptr, _inc)		\
201 	((_ptr) > (UINTPTR_MAX - (_inc)))
202 
203 /*
204  * Evaluates to 1 if (u32 + inc) overflows, 0 otherwise.
205  * Both arguments must be 32-bit unsigned integers (i.e. effectively uint32_t).
206  */
207 #define check_u32_overflow(_u32, _inc) \
208 	((_u32) > (UINT32_MAX - (_inc)))
209 
210 /* Register size of the current architecture. */
211 #ifdef __aarch64__
212 #define REGSZ		U(8)
213 #else
214 #define REGSZ		U(4)
215 #endif
216 
217 /*
218  * Import an assembly or linker symbol as a C expression with the specified
219  * type
220  */
221 #define IMPORT_SYM(type, sym, name) \
222 	extern char sym[];\
223 	static const __attribute__((unused)) type name = (type) sym;
224 
225 /*
226  * When the symbol is used to hold a pointer, its alignment can be asserted
227  * with this macro. For example, if there is a linker symbol that is going to
228  * be used as a 64-bit pointer, the value of the linker symbol must also be
229  * aligned to 64 bit. This macro makes sure this is the case.
230  */
231 #define ASSERT_SYM_PTR_ALIGN(sym) assert(((size_t)(sym) % __alignof__(*(sym))) == 0)
232 
233 #define COMPILER_BARRIER() __asm__ volatile ("" ::: "memory")
234 
235 /* Compiler builtin of GCC >= 9 and planned in llvm */
236 #ifdef __HAVE_SPECULATION_SAFE_VALUE
237 # define SPECULATION_SAFE_VALUE(var) __builtin_speculation_safe_value(var)
238 #else
239 # define SPECULATION_SAFE_VALUE(var) var
240 #endif
241 
242 /*
243  * Ticks elapsed in one second with a signal of 1 MHz
244  */
245 #define MHZ_TICKS_PER_SEC	U(1000000)
246 
247 /*
248  * Ticks elapsed in one second with a signal of 1 KHz
249  */
250 #define KHZ_TICKS_PER_SEC U(1000)
251 
252 #endif /* UTILS_DEF_H */
253