xref: /optee_os/lib/libutils/ext/include/util.h (revision 817466cb476de705a8e3dabe1ef165fe27a18c2f)
1 /* SPDX-License-Identifier: BSD-2-Clause */
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  */
5 #ifndef UTIL_H
6 #define UTIL_H
7 
8 #include <compiler.h>
9 #include <stdint.h>
10 
11 #define SIZE_4K	UINTPTR_C(0x1000)
12 #define SIZE_1M	UINTPTR_C(0x100000)
13 #define SIZE_2M	UINTPTR_C(0x200000)
14 #define SIZE_4M	UINTPTR_C(0x400000)
15 #define SIZE_8M	UINTPTR_C(0x800000)
16 #define SIZE_2G	UINTPTR_C(0x80000000)
17 
18 #ifndef MAX
19 #ifndef ASM
20 #define MAX(a, b) \
21 	(__extension__({ __typeof__(a) _a = (a); \
22 	   __typeof__(b) _b = (b); \
23 	 _a > _b ? _a : _b; }))
24 
25 #define MIN(a, b) \
26 	(__extension__({ __typeof__(a) _a = (a); \
27 	   __typeof__(b) _b = (b); \
28 	 _a < _b ? _a : _b; }))
29 #else
30 #define MAX(a, b)	(((a) > (b)) ? (a) : (b))
31 #define MIN(a, b)	(((a) < (b)) ? (a) : (b))
32 #endif
33 #endif
34 
35 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
36 
37 #ifndef ASM
38 /* Round up the even multiple of size, size has to be a multiple of 2 */
39 #define ROUNDUP(v, size) (((v) + ((__typeof__(v))(size) - 1)) & \
40 			  ~((__typeof__(v))(size) - 1))
41 
42 /* Round down the even multiple of size, size has to be a multiple of 2 */
43 #define ROUNDDOWN(v, size) ((v) & ~((__typeof__(v))(size) - 1))
44 #else
45 #define ROUNDUP(x, y)			((((x) + (y) - 1) / (y)) * (y))
46 #define ROUNDDOWN(x, y)		(((x) / (y)) * (y))
47 #endif
48 
49 /* x has to be of an unsigned type */
50 #define IS_POWER_OF_TWO(x) (((x) != 0) && (((x) & (~(x) + 1)) == (x)))
51 
52 #define ALIGNMENT_IS_OK(p, type) \
53 	(((uintptr_t)(p) & (__alignof__(type) - 1)) == 0)
54 
55 #define TO_STR(x) _TO_STR(x)
56 #define _TO_STR(x) #x
57 
58 #define CONCAT(x, y) _CONCAT(x, y)
59 #define _CONCAT(x, y) x##y
60 
61 #define container_of(ptr, type, member) \
62 	(__extension__({ \
63 		const typeof(((type *)0)->member) *__ptr = (ptr); \
64 		(type *)((unsigned long)(__ptr) - offsetof(type, member)); \
65 	}))
66 
67 #define MEMBER_SIZE(type, member) sizeof(((type *)0)->member)
68 
69 #ifdef ASM
70 #define BIT32(nr)		(1 << (nr))
71 #define BIT64(nr)		(1 << (nr))
72 #define SHIFT_U32(v, shift)	((v) << (shift))
73 #define SHIFT_U64(v, shift)	((v) << (shift))
74 #else
75 #define BIT32(nr)		(UINT32_C(1) << (nr))
76 #define BIT64(nr)		(UINT64_C(1) << (nr))
77 #define SHIFT_U32(v, shift)	((uint32_t)(v) << (shift))
78 #define SHIFT_U64(v, shift)	((uint64_t)(v) << (shift))
79 #endif
80 #define BIT(nr)			BIT32(nr)
81 
82 /*
83  * Create a contiguous bitmask starting at bit position @l and ending at
84  * position @h. For example
85  * GENMASK_64(39, 21) gives us the 64bit vector 0x000000ffffe00000.
86  */
87 #define GENMASK_32(h, l) \
88 	(((~UINT32_C(0)) << (l)) & (~UINT32_C(0) >> (32 - 1 - (h))))
89 
90 #define GENMASK_64(h, l) \
91 	(((~UINT64_C(0)) << (l)) & (~UINT64_C(0) >> (64 - 1 - (h))))
92 
93 /*
94  * Checking overflow for addition, subtraction and multiplication. Result
95  * of operation is stored in res which is a pointer to some kind of
96  * integer.
97  *
98  * The macros return true if an overflow occurred and *res is undefined.
99  */
100 #define ADD_OVERFLOW(a, b, res) __compiler_add_overflow((a), (b), (res))
101 #define SUB_OVERFLOW(a, b, res) __compiler_sub_overflow((a), (b), (res))
102 #define MUL_OVERFLOW(a, b, res) __compiler_mul_overflow((a), (b), (res))
103 
104 /* Return a signed +1, 0 or -1 value based on data comparison */
105 #define CMP_TRILEAN(a, b) \
106 	(__extension__({ \
107 		__typeof__(a) _a = (a); \
108 		__typeof__(b) _b = (b); \
109 		\
110 		_a > _b ? 1 : _a < _b ? -1 : 0; \
111 	}))
112 
113 #endif /*UTIL_H*/
114