xref: /rk3399_ARM-atf/include/lib/utils_def.h (revision 06f3c7058c42a9f1a9f7df75ea2de71a000855e8)
1 /*
2  * Copyright (c) 2016-2025, Arm Limited and Contributors. All rights reserved.
3  * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
4  *
5  * SPDX-License-Identifier: BSD-3-Clause
6  */
7 
8 #ifndef UTILS_DEF_H
9 #define UTILS_DEF_H
10 
11 #include <export/lib/utils_def_exp.h>
12 
13 /* Compute the number of elements in the given array */
14 #define ARRAY_SIZE(a)				\
15 	(sizeof(a) / sizeof((a)[0]))
16 
17 #define IS_POWER_OF_TWO(x)			\
18 	(((x) & ((x) - 1)) == 0)
19 
20 #define SIZE_FROM_LOG2_WORDS(n)		(U(4) << (n))
21 
22 #if defined(__LINKER__) || defined(__ASSEMBLER__)
23 #define BIT_32(nr)			(U(1) << (nr))
24 #define BIT_64(nr)			(ULL(1) << (nr))
25 #else
26 #define BIT_32(nr)			(((uint32_t)(1U)) << (nr))
27 #define BIT_64(nr)			(((uint64_t)(1ULL)) << (nr))
28 #endif
29 
30 #ifdef __aarch64__
31 #define BIT				BIT_64
32 #else
33 #define BIT				BIT_32
34 #endif
35 
36 /*
37  * Create a contiguous bitmask starting at bit position @low and ending at
38  * position @high. For example
39  * GENMASK_64(39, 21) gives us the 64bit vector 0x000000ffffe00000.
40  */
41 #if defined(__LINKER__) || defined(__ASSEMBLER__)
42 #define GENMASK_32(high, low) \
43 	(((0xFFFFFFFF) << (low)) & (0xFFFFFFFF >> (32 - 1 - (high))))
44 
45 #define GENMASK_64(high, low) \
46 	((~0 << (low)) & (~0 >> (64 - 1 - (high))))
47 #else
48 #define GENMASK_32(high, low) \
49 	((~UINT32_C(0) >> (32U - 1U - (high))) ^ ((BIT_32(low) - 1U)))
50 
51 #define GENMASK_64(high, low) \
52 	((~UINT64_C(0) >> (64U - 1U - (high))) ^ ((BIT_64(low) - 1U)))
53 #endif
54 
55 #ifdef __aarch64__
56 #define GENMASK				GENMASK_64
57 #else
58 #define GENMASK				GENMASK_32
59 #endif
60 
61 /*
62  * Similar to GENMASK_64 but uses a named register field to compute the mask.
63  * For a register field REG_FIELD, the macros REG_FIELD_WIDTH and
64  * REG_FIELD_SHIFT must be defined.
65  */
66 #define MASK(regfield)							\
67 	((~0ULL >> (64ULL - (regfield##_WIDTH))) << (regfield##_SHIFT))
68 
69 #define HI(addr)			(addr >> 32)
70 #define LO(addr)			(addr & 0xffffffff)
71 
72 #define HI_64(addr)			(addr >> 64)
73 #define LO_64(addr)			(addr & 0xffffffffffffffff)
74 
75 /**
76  * EXTRACT_FIELD - Extracts a specific bit field from a value.
77  *
78  * @reg:      The input value containing the field.
79 
80  * @regfield: A bitmask representing the field. For a register field REG_FIELD,
81  *            the macros REG_FIELD_WIDTH and REG_FIELD_SHIFT must be defined.
82 
83  * The result of this macro is the contents of the field right shifted to the
84  * least significant bit positions, with the rest being zero.
85  */
86 #define EXTRACT(regfield, reg) \
87 	(((reg) & MASK(regfield)) >> (regfield##_SHIFT))
88 
89 /*
90  * This variant of div_round_up can be used in macro definition but should not
91  * be used in C code as the `div` parameter is evaluated twice.
92  */
93 #define DIV_ROUND_UP_2EVAL(n, d)	(((n) + (d) - 1) / (d))
94 
95 #define div_round_up(val, div) __extension__ ({	\
96 	__typeof__(div) _div = (div);		\
97 	((val) + _div - (__typeof__(div)) 1) / _div;		\
98 })
99 
100 #define MIN(x, y) __extension__ ({	\
101 	__typeof__(x) _x = (x);		\
102 	__typeof__(y) _y = (y);		\
103 	(void)(&_x == &_y);		\
104 	(_x < _y) ? _x : _y;		\
105 })
106 
107 #define MAX(x, y) __extension__ ({	\
108 	__typeof__(x) _x = (x);		\
109 	__typeof__(y) _y = (y);		\
110 	(void)(&_x == &_y);		\
111 	(_x > _y) ? _x : _y;		\
112 })
113 
114 #define CLAMP(x, min, max) __extension__ ({ \
115 	__typeof__(x) _x = (x); \
116 	__typeof__(min) _min = (min); \
117 	__typeof__(max) _max = (max); \
118 	(void)(&_x == &_min); \
119 	(void)(&_x == &_max); \
120 	((_x > _max) ? _max : ((_x < _min) ? _min : _x)); \
121 })
122 
123 /*
124  * The round_up() macro rounds up a value to the given boundary in a
125  * type-agnostic yet type-safe manner. The boundary must be a power of two.
126  * In other words, it computes the smallest multiple of boundary which is
127  * greater than or equal to value.
128  *
129  * round_down() is similar but rounds the value down instead.
130  */
131 #define round_boundary(value, boundary)		\
132 	((__typeof__(value))((boundary) - 1))
133 
134 #define round_up(value, boundary)		\
135 	((((value) - 1) | round_boundary(value, boundary)) + 1)
136 
137 #define round_down(value, boundary)		\
138 	((value) & ~round_boundary(value, boundary))
139 
140 /* add operation together with checking whether the operation overflowed
141  * The result is '*res',
142  * return 0 on success and 1 on overflow
143  */
144 #define add_overflow(a, b, res) __builtin_add_overflow((a), (b), (res))
145 
146 /*
147  * Round up a value to align with a given size and
148  * check whether overflow happens.
149  * The rounduped value is '*res',
150  * return 0 on success and 1 on overflow
151  */
152 #define round_up_overflow(v, size, res) (__extension__({ \
153 	typeof(res) __res = res; \
154 	typeof(*(__res)) __roundup_tmp = 0; \
155 	typeof(v) __roundup_mask = (typeof(v))(size) - 1; \
156 	\
157 	add_overflow((v), __roundup_mask, &__roundup_tmp) ? 1 : \
158 		(void)(*(__res) = __roundup_tmp & ~__roundup_mask), 0; \
159 }))
160 
161 /*
162  * Add a with b, then round up the result to align with a given size and
163  * check whether overflow happens.
164  * The rounduped value is '*res',
165  * return 0 on success and 1 on overflow
166  */
167 #define add_with_round_up_overflow(a, b, size, res) (__extension__({ \
168 	typeof(a) __a = (a); \
169 	typeof(__a) __add_res = 0; \
170 	\
171 	add_overflow((__a), (b), &__add_res) ? 1 : \
172 		round_up_overflow(__add_res, (size), (res)) ? 1 : 0; \
173 }))
174 
175 /**
176  * Helper macro to ensure a value lies on a given boundary.
177  */
178 #define is_aligned(value, boundary)			\
179 	(round_up((uintptr_t) value, boundary) ==	\
180 	 round_down((uintptr_t) value, boundary))
181 
182 /*
183  * Evaluates to 1 if (ptr + inc) overflows, 0 otherwise.
184  * Both arguments must be unsigned pointer values (i.e. uintptr_t).
185  */
186 #define check_uptr_overflow(_ptr, _inc)		\
187 	((_ptr) > (UINTPTR_MAX - (_inc)))
188 
189 /*
190  * Evaluates to 1 if (u32 + inc) overflows, 0 otherwise.
191  * Both arguments must be 32-bit unsigned integers (i.e. effectively uint32_t).
192  */
193 #define check_u32_overflow(_u32, _inc) \
194 	((_u32) > (UINT32_MAX - (_inc)))
195 
196 /* Register size of the current architecture. */
197 #ifdef __aarch64__
198 #define REGSZ		U(8)
199 #else
200 #define REGSZ		U(4)
201 #endif
202 
203 /*
204  * Test for the current architecture version to be at least the version
205  * expected.
206  */
207 #define ARM_ARCH_AT_LEAST(_maj, _min) \
208 	((ARM_ARCH_MAJOR > (_maj)) || \
209 	 ((ARM_ARCH_MAJOR == (_maj)) && (ARM_ARCH_MINOR >= (_min))))
210 
211 /*
212  * Import an assembly or linker symbol as a C expression with the specified
213  * type
214  */
215 #define IMPORT_SYM(type, sym, name) \
216 	extern char sym[];\
217 	static const __attribute__((unused)) type name = (type) sym;
218 
219 /*
220  * When the symbol is used to hold a pointer, its alignment can be asserted
221  * with this macro. For example, if there is a linker symbol that is going to
222  * be used as a 64-bit pointer, the value of the linker symbol must also be
223  * aligned to 64 bit. This macro makes sure this is the case.
224  */
225 #define ASSERT_SYM_PTR_ALIGN(sym) assert(((size_t)(sym) % __alignof__(*(sym))) == 0)
226 
227 #define COMPILER_BARRIER() __asm__ volatile ("" ::: "memory")
228 
229 /* Compiler builtin of GCC >= 9 and planned in llvm */
230 #ifdef __HAVE_SPECULATION_SAFE_VALUE
231 # define SPECULATION_SAFE_VALUE(var) __builtin_speculation_safe_value(var)
232 #else
233 # define SPECULATION_SAFE_VALUE(var) var
234 #endif
235 
236 /*
237  * Ticks elapsed in one second with a signal of 1 MHz
238  */
239 #define MHZ_TICKS_PER_SEC	U(1000000)
240 
241 /*
242  * Ticks elapsed in one second with a signal of 1 KHz
243  */
244 #define KHZ_TICKS_PER_SEC U(1000)
245 
246 #endif /* UTILS_DEF_H */
247