xref: /rk3399_ARM-atf/include/lib/utils_def.h (revision 3ba2c15147cc0c86342a443cd0cbfab3d2931c06)
153d9c9c8SScott Branden /*
2*3ba2c151SRaymond Mao  * Copyright (c) 2016-2023, Arm Limited and Contributors. All rights reserved.
3d4b29105SVarun Wadekar  * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
453d9c9c8SScott Branden  *
582cb2c1aSdp-arm  * SPDX-License-Identifier: BSD-3-Clause
653d9c9c8SScott Branden  */
753d9c9c8SScott Branden 
8f00119deSAntonio Nino Diaz #ifndef UTILS_DEF_H
9f00119deSAntonio Nino Diaz #define UTILS_DEF_H
1053d9c9c8SScott Branden 
1157bf6057SJulius Werner #include <export/lib/utils_def_exp.h>
1257bf6057SJulius Werner 
1353d9c9c8SScott Branden /* Compute the number of elements in the given array */
1453d9c9c8SScott Branden #define ARRAY_SIZE(a)				\
1553d9c9c8SScott Branden 	(sizeof(a) / sizeof((a)[0]))
1653d9c9c8SScott Branden 
1753d9c9c8SScott Branden #define IS_POWER_OF_TWO(x)			\
1853d9c9c8SScott Branden 	(((x) & ((x) - 1)) == 0)
1953d9c9c8SScott Branden 
203443a702SJohn Powell #define SIZE_FROM_LOG2_WORDS(n)		(U(4) << (n))
2153d9c9c8SScott Branden 
22167c5f80SYann Gautier #define BIT_32(nr)			(U(1) << (nr))
23167c5f80SYann Gautier #define BIT_64(nr)			(ULL(1) << (nr))
24167c5f80SYann Gautier 
25402b3cf8SJulius Werner #ifdef __aarch64__
26167c5f80SYann Gautier #define BIT				BIT_64
27402b3cf8SJulius Werner #else
28402b3cf8SJulius Werner #define BIT				BIT_32
29167c5f80SYann Gautier #endif
3053d9c9c8SScott Branden 
31ebf1ca10SSoby Mathew /*
3239676357SYann Gautier  * Create a contiguous bitmask starting at bit position @l and ending at
3339676357SYann Gautier  * position @h. For example
3439676357SYann Gautier  * GENMASK_64(39, 21) gives us the 64bit vector 0x000000ffffe00000.
3539676357SYann Gautier  */
36d5dfdeb6SJulius Werner #if defined(__LINKER__) || defined(__ASSEMBLER__)
3746c613eeSYann Gautier #define GENMASK_32(h, l) \
3846c613eeSYann Gautier 	(((0xFFFFFFFF) << (l)) & (0xFFFFFFFF >> (32 - 1 - (h))))
3946c613eeSYann Gautier 
4046c613eeSYann Gautier #define GENMASK_64(h, l) \
4146c613eeSYann Gautier 	((~0 << (l)) & (~0 >> (64 - 1 - (h))))
4246c613eeSYann Gautier #else
4339676357SYann Gautier #define GENMASK_32(h, l) \
4439676357SYann Gautier 	(((~UINT32_C(0)) << (l)) & (~UINT32_C(0) >> (32 - 1 - (h))))
4539676357SYann Gautier 
4639676357SYann Gautier #define GENMASK_64(h, l) \
4739676357SYann Gautier 	(((~UINT64_C(0)) << (l)) & (~UINT64_C(0) >> (64 - 1 - (h))))
4846c613eeSYann Gautier #endif
4939676357SYann Gautier 
50402b3cf8SJulius Werner #ifdef __aarch64__
5139676357SYann Gautier #define GENMASK				GENMASK_64
52402b3cf8SJulius Werner #else
53402b3cf8SJulius Werner #define GENMASK				GENMASK_32
5439676357SYann Gautier #endif
5539676357SYann Gautier 
5639676357SYann Gautier /*
57ebf1ca10SSoby Mathew  * This variant of div_round_up can be used in macro definition but should not
58ebf1ca10SSoby Mathew  * be used in C code as the `div` parameter is evaluated twice.
59ebf1ca10SSoby Mathew  */
60ebf1ca10SSoby Mathew #define DIV_ROUND_UP_2EVAL(n, d)	(((n) + (d) - 1) / (d))
61ebf1ca10SSoby Mathew 
627baa7bcaSJulius Werner #define div_round_up(val, div) __extension__ ({	\
637baa7bcaSJulius Werner 	__typeof__(div) _div = (div);		\
64d47509d6SSathees Balya 	((val) + _div - (__typeof__(div)) 1) / _div;		\
657baa7bcaSJulius Werner })
667baa7bcaSJulius Werner 
6753d9c9c8SScott Branden #define MIN(x, y) __extension__ ({	\
6853d9c9c8SScott Branden 	__typeof__(x) _x = (x);		\
6953d9c9c8SScott Branden 	__typeof__(y) _y = (y);		\
7053d9c9c8SScott Branden 	(void)(&_x == &_y);		\
718406db14SYann Gautier 	(_x < _y) ? _x : _y;		\
7253d9c9c8SScott Branden })
7353d9c9c8SScott Branden 
7453d9c9c8SScott Branden #define MAX(x, y) __extension__ ({	\
7553d9c9c8SScott Branden 	__typeof__(x) _x = (x);		\
7653d9c9c8SScott Branden 	__typeof__(y) _y = (y);		\
7753d9c9c8SScott Branden 	(void)(&_x == &_y);		\
788406db14SYann Gautier 	(_x > _y) ? _x : _y;		\
7953d9c9c8SScott Branden })
8053d9c9c8SScott Branden 
81e76d9fc4SLionel Debieve #define CLAMP(x, min, max) __extension__ ({ \
82e76d9fc4SLionel Debieve 	__typeof__(x) _x = (x); \
83e76d9fc4SLionel Debieve 	__typeof__(min) _min = (min); \
84e76d9fc4SLionel Debieve 	__typeof__(max) _max = (max); \
85e76d9fc4SLionel Debieve 	(void)(&_x == &_min); \
86e76d9fc4SLionel Debieve 	(void)(&_x == &_max); \
878406db14SYann Gautier 	((_x > _max) ? _max : ((_x < _min) ? _min : _x)); \
88e76d9fc4SLionel Debieve })
89e76d9fc4SLionel Debieve 
9053d9c9c8SScott Branden /*
9153d9c9c8SScott Branden  * The round_up() macro rounds up a value to the given boundary in a
9253d9c9c8SScott Branden  * type-agnostic yet type-safe manner. The boundary must be a power of two.
9353d9c9c8SScott Branden  * In other words, it computes the smallest multiple of boundary which is
9453d9c9c8SScott Branden  * greater than or equal to value.
9553d9c9c8SScott Branden  *
9653d9c9c8SScott Branden  * round_down() is similar but rounds the value down instead.
9753d9c9c8SScott Branden  */
9853d9c9c8SScott Branden #define round_boundary(value, boundary)		\
9953d9c9c8SScott Branden 	((__typeof__(value))((boundary) - 1))
10053d9c9c8SScott Branden 
10153d9c9c8SScott Branden #define round_up(value, boundary)		\
10253d9c9c8SScott Branden 	((((value) - 1) | round_boundary(value, boundary)) + 1)
10353d9c9c8SScott Branden 
10453d9c9c8SScott Branden #define round_down(value, boundary)		\
10553d9c9c8SScott Branden 	((value) & ~round_boundary(value, boundary))
10653d9c9c8SScott Branden 
107*3ba2c151SRaymond Mao /* add operation together with checking whether the operation overflowed
108*3ba2c151SRaymond Mao  * The result is '*res',
109*3ba2c151SRaymond Mao  * return 0 on success and 1 on overflow
110*3ba2c151SRaymond Mao  */
111*3ba2c151SRaymond Mao #define add_overflow(a, b, res) __builtin_add_overflow((a), (b), (res))
112*3ba2c151SRaymond Mao 
113*3ba2c151SRaymond Mao /*
114*3ba2c151SRaymond Mao  * Round up a value to align with a given size and
115*3ba2c151SRaymond Mao  * check whether overflow happens.
116*3ba2c151SRaymond Mao  * The rounduped value is '*res',
117*3ba2c151SRaymond Mao  * return 0 on success and 1 on overflow
118*3ba2c151SRaymond Mao  */
119*3ba2c151SRaymond Mao #define round_up_overflow(v, size, res) (__extension__({ \
120*3ba2c151SRaymond Mao 	typeof(res) __res = res; \
121*3ba2c151SRaymond Mao 	typeof(*(__res)) __roundup_tmp = 0; \
122*3ba2c151SRaymond Mao 	typeof(v) __roundup_mask = (typeof(v))(size) - 1; \
123*3ba2c151SRaymond Mao 	\
124*3ba2c151SRaymond Mao 	add_overflow((v), __roundup_mask, &__roundup_tmp) ? 1 : \
125*3ba2c151SRaymond Mao 		(void)(*(__res) = __roundup_tmp & ~__roundup_mask), 0; \
126*3ba2c151SRaymond Mao }))
127*3ba2c151SRaymond Mao 
128*3ba2c151SRaymond Mao /*
129*3ba2c151SRaymond Mao  * Add a with b, then round up the result to align with a given size and
130*3ba2c151SRaymond Mao  * check whether overflow happens.
131*3ba2c151SRaymond Mao  * The rounduped value is '*res',
132*3ba2c151SRaymond Mao  * return 0 on success and 1 on overflow
133*3ba2c151SRaymond Mao  */
134*3ba2c151SRaymond Mao #define add_with_round_up_overflow(a, b, size, res) (__extension__({ \
135*3ba2c151SRaymond Mao 	typeof(a) __a = (a); \
136*3ba2c151SRaymond Mao 	typeof(__a) __add_res = 0; \
137*3ba2c151SRaymond Mao 	\
138*3ba2c151SRaymond Mao 	add_overflow((__a), (b), &__add_res) ? 1 : \
139*3ba2c151SRaymond Mao 		round_up_overflow(__add_res, (size), (res)) ? 1 : 0; \
140*3ba2c151SRaymond Mao }))
141*3ba2c151SRaymond Mao 
1427e804f96SMarc Bonnici /**
1437e804f96SMarc Bonnici  * Helper macro to ensure a value lies on a given boundary.
1447e804f96SMarc Bonnici  */
1457e804f96SMarc Bonnici #define is_aligned(value, boundary)			\
1467e804f96SMarc Bonnici 	(round_up((uintptr_t) value, boundary) ==	\
1477e804f96SMarc Bonnici 	 round_down((uintptr_t) value, boundary))
1487e804f96SMarc Bonnici 
14953d9c9c8SScott Branden /*
15053d9c9c8SScott Branden  * Evaluates to 1 if (ptr + inc) overflows, 0 otherwise.
15153d9c9c8SScott Branden  * Both arguments must be unsigned pointer values (i.e. uintptr_t).
15253d9c9c8SScott Branden  */
153f00119deSAntonio Nino Diaz #define check_uptr_overflow(_ptr, _inc)		\
154f00119deSAntonio Nino Diaz 	((_ptr) > (UINTPTR_MAX - (_inc)))
15553d9c9c8SScott Branden 
15653d9c9c8SScott Branden /*
15730d81c36SJeenu Viswambharan  * Evaluates to 1 if (u32 + inc) overflows, 0 otherwise.
15830d81c36SJeenu Viswambharan  * Both arguments must be 32-bit unsigned integers (i.e. effectively uint32_t).
15930d81c36SJeenu Viswambharan  */
160f00119deSAntonio Nino Diaz #define check_u32_overflow(_u32, _inc) \
161f00119deSAntonio Nino Diaz 	((_u32) > (UINT32_MAX - (_inc)))
16230d81c36SJeenu Viswambharan 
163155a1006SJulius Werner /* Register size of the current architecture. */
164402b3cf8SJulius Werner #ifdef __aarch64__
165155a1006SJulius Werner #define REGSZ		U(8)
166402b3cf8SJulius Werner #else
167402b3cf8SJulius Werner #define REGSZ		U(4)
168155a1006SJulius Werner #endif
169155a1006SJulius Werner 
170f45e232aSJeenu Viswambharan /*
171f45e232aSJeenu Viswambharan  * Test for the current architecture version to be at least the version
172f45e232aSJeenu Viswambharan  * expected.
173f45e232aSJeenu Viswambharan  */
174f45e232aSJeenu Viswambharan #define ARM_ARCH_AT_LEAST(_maj, _min) \
1750cc7aa89SJeenu Viswambharan 	((ARM_ARCH_MAJOR > (_maj)) || \
1760cc7aa89SJeenu Viswambharan 	 ((ARM_ARCH_MAJOR == (_maj)) && (ARM_ARCH_MINOR >= (_min))))
177f45e232aSJeenu Viswambharan 
1789f85f9e3SJoel Hutton /*
1799f85f9e3SJoel Hutton  * Import an assembly or linker symbol as a C expression with the specified
1809f85f9e3SJoel Hutton  * type
1819f85f9e3SJoel Hutton  */
1829f85f9e3SJoel Hutton #define IMPORT_SYM(type, sym, name) \
1839f85f9e3SJoel Hutton 	extern char sym[];\
1849f85f9e3SJoel Hutton 	static const __attribute__((unused)) type name = (type) sym;
1859f85f9e3SJoel Hutton 
1869f85f9e3SJoel Hutton /*
1879f85f9e3SJoel Hutton  * When the symbol is used to hold a pointer, its alignment can be asserted
1889f85f9e3SJoel Hutton  * with this macro. For example, if there is a linker symbol that is going to
1899f85f9e3SJoel Hutton  * be used as a 64-bit pointer, the value of the linker symbol must also be
1909f85f9e3SJoel Hutton  * aligned to 64 bit. This macro makes sure this is the case.
1919f85f9e3SJoel Hutton  */
1929f85f9e3SJoel Hutton #define ASSERT_SYM_PTR_ALIGN(sym) assert(((size_t)(sym) % __alignof__(*(sym))) == 0)
1939f85f9e3SJoel Hutton 
194932b3ae2SAntonio Nino Diaz #define COMPILER_BARRIER() __asm__ volatile ("" ::: "memory")
1959f85f9e3SJoel Hutton 
1969edd8912SJoel Hutton /* Compiler builtin of GCC >= 9 and planned in llvm */
1979edd8912SJoel Hutton #ifdef __HAVE_SPECULATION_SAFE_VALUE
1989edd8912SJoel Hutton # define SPECULATION_SAFE_VALUE(var) __builtin_speculation_safe_value(var)
1999edd8912SJoel Hutton #else
2009edd8912SJoel Hutton # define SPECULATION_SAFE_VALUE(var) var
2019edd8912SJoel Hutton #endif
2029edd8912SJoel Hutton 
203d4b29105SVarun Wadekar /*
204d4b29105SVarun Wadekar  * Ticks elapsed in one second with a signal of 1 MHz
205d4b29105SVarun Wadekar  */
206d4b29105SVarun Wadekar #define MHZ_TICKS_PER_SEC	U(1000000)
207d4b29105SVarun Wadekar 
208447a42e7SPankaj Gupta /*
209447a42e7SPankaj Gupta  * Ticks elapsed in one second with a signal of 1 KHz
210447a42e7SPankaj Gupta  */
211447a42e7SPankaj Gupta #define KHZ_TICKS_PER_SEC U(1000)
212447a42e7SPankaj Gupta 
213f00119deSAntonio Nino Diaz #endif /* UTILS_DEF_H */
214