1 /* SPDX-License-Identifier: BSD-2-Clause */ 2 /* 3 * Copyright (c) 2014, STMicroelectronics International N.V. 4 */ 5 #ifndef UTIL_H 6 #define UTIL_H 7 8 #include <compiler.h> 9 #include <inttypes.h> 10 11 #ifndef __ASSEMBLER__ 12 #include <assert.h> 13 #include <stddef.h> 14 #endif 15 16 #define SIZE_4K UINTPTR_C(0x1000) 17 #define SIZE_1M UINTPTR_C(0x100000) 18 #define SIZE_2M UINTPTR_C(0x200000) 19 #define SIZE_4M UINTPTR_C(0x400000) 20 #define SIZE_8M UINTPTR_C(0x800000) 21 #define SIZE_2G UINTPTR_C(0x80000000) 22 23 #ifndef MAX 24 #ifndef __ASSEMBLER__ 25 #define MAX(a, b) \ 26 (__extension__({ __typeof__(a) _a = (a); \ 27 __typeof__(b) _b = (b); \ 28 _a > _b ? _a : _b; })) 29 30 #define MIN(a, b) \ 31 (__extension__({ __typeof__(a) _a = (a); \ 32 __typeof__(b) _b = (b); \ 33 _a < _b ? _a : _b; })) 34 #else 35 #define MAX(a, b) (((a) > (b)) ? (a) : (b)) 36 #define MIN(a, b) (((a) < (b)) ? (a) : (b)) 37 #endif 38 #endif 39 40 /* 41 * In some particular conditions MAX and MIN macros fail to 42 * build from C source file implmentation. In such case one 43 * need to use MAX_UNSAFE/MIN_UNSAFE instead. 44 */ 45 #define MAX_UNSAFE(a, b) (((a) > (b)) ? (a) : (b)) 46 #define MIN_UNSAFE(a, b) (((a) < (b)) ? (a) : (b)) 47 48 #ifndef ARRAY_SIZE 49 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) 50 #endif 51 52 #ifndef __ASSEMBLER__ 53 /* Round up the even multiple of size, size has to be a power of 2 */ 54 #define ROUNDUP(v, size) (((v) + ((__typeof__(v))(size) - 1)) & \ 55 ~((__typeof__(v))(size) - 1)) 56 57 /* 58 * Round up the even multiple of size and return if result overflow 59 * output value range. Size has to be a power of 2. 60 */ 61 #define ROUNDUP_OVERFLOW(v, size, res) (__extension__({ \ 62 typeof(*(res)) __roundup_tmp = 0; \ 63 typeof(v) __roundup_mask = (typeof(v))(size) - 1; \ 64 \ 65 ADD_OVERFLOW((v), __roundup_mask, &__roundup_tmp) ? 1 : \ 66 ((void)(*(res) = __roundup_tmp & ~__roundup_mask), 0); \ 67 })) 68 69 /* 70 * ROUNDUP2_DIV(x, y) 71 * 72 * Rounds up to the nearest multiple of y and then divides by y. Safe 73 * against overflow, y has to be a power of 2. 74 * 75 * This macro is intended to be used to convert from "number of bytes" to 76 * "number of pages" or similar units. Example: 77 * num_pages = ROUNDUP2_DIV(num_bytes, SMALL_PAGE_SIZE); 78 */ 79 #define ROUNDUP2_DIV(x, y) \ 80 (__extension__({ \ 81 typeof(x) __roundup_x = (x); \ 82 typeof(y) __roundup_mask = (typeof(x))(y) - 1; \ 83 \ 84 assert(IS_POWER_OF_TWO(y)); \ 85 (__roundup_x / (y)) + (__roundup_x & __roundup_mask ? 1 : 0); \ 86 })) 87 88 /* 89 * ROUNDUP_DIV(x, y) 90 * 91 * Rounds up to the nearest multiple of y and then divides by y. Safe 92 * against overflow. 93 */ 94 #define ROUNDUP_DIV(x, y) (ROUNDUP((x), (y)) / (__typeof__(x))(y)) 95 96 /* Round down the even multiple of size, size has to be a power of 2 */ 97 #define ROUNDDOWN(v, size) ((v) & ~((__typeof__(v))(size) - 1)) 98 99 /* 100 * Round up the result of x / y to the nearest upper integer if result is not 101 * already an integer. 102 */ 103 #define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y)) 104 105 /* Unsigned integer division with nearest rounding variant */ 106 #define UDIV_ROUND_NEAREST(x, y) \ 107 (__extension__ ({ __typeof__(x) _x = (x); \ 108 __typeof__(y) _y = (y); \ 109 (_x + (_y / 2)) / _y; })) 110 #else 111 #define ROUNDUP(x, y) ((((x) + (y) - 1) / (y)) * (y)) 112 #define ROUNDDOWN(x, y) (((x) / (y)) * (y)) 113 #define UDIV_ROUND_NEAREST(x, y) (((x) + ((y) / 2)) / (y)) 114 #endif 115 116 /* x has to be of an unsigned type */ 117 #define IS_POWER_OF_TWO(x) (((x) != 0) && (((x) & (~(x) + 1)) == (x))) 118 119 #define IS_ALIGNED(x, a) (((x) & ((a) - 1)) == 0) 120 #define IS_ALIGNED_WITH_TYPE(x, type) \ 121 (__extension__({ \ 122 type __is_aligned_y; \ 123 IS_ALIGNED((uintptr_t)(x), __alignof__(__is_aligned_y)); \ 124 })) 125 126 #define TO_STR(x) _TO_STR(x) 127 #define _TO_STR(x) #x 128 129 #define CONCAT(x, y) _CONCAT(x, y) 130 #define _CONCAT(x, y) x##y 131 132 #define container_of(ptr, type, member) \ 133 (__extension__({ \ 134 const typeof(((type *)0)->member) *__ptr = (ptr); \ 135 (type *)((unsigned long)(__ptr) - offsetof(type, member)); \ 136 })) 137 138 #define MEMBER_SIZE(type, member) sizeof(((type *)0)->member) 139 140 #ifdef __ASSEMBLER__ 141 #define BIT32(nr) (1 << (nr)) 142 #define BIT64(nr) (1 << (nr)) 143 #define SHIFT_U32(v, shift) ((v) << (shift)) 144 #define SHIFT_U64(v, shift) ((v) << (shift)) 145 #else 146 #define BIT32(nr) (UINT32_C(1) << (nr)) 147 #define BIT64(nr) (UINT64_C(1) << (nr)) 148 #define SHIFT_U32(v, shift) ((uint32_t)(v) << (shift)) 149 #define SHIFT_U64(v, shift) ((uint64_t)(v) << (shift)) 150 #endif 151 #define BIT(nr) BIT32(nr) 152 153 /* 154 * Create a contiguous bitmask starting at bit position @l and ending at 155 * position @h. For example 156 * GENMASK_64(39, 21) gives us the 64bit vector 0x000000ffffe00000. 157 */ 158 #define GENMASK_32(h, l) \ 159 ((UINT32_C(0xffffffff) << (l)) & \ 160 (UINT32_C(0xffffffff) >> (32 - 1 - (h)))) 161 162 #define GENMASK_64(h, l) \ 163 (((~UINT64_C(0)) << (l)) & (~UINT64_C(0) >> (64 - 1 - (h)))) 164 165 /* 166 * Checking overflow for addition, subtraction and multiplication. Result 167 * of operation is stored in res which is a pointer to some kind of 168 * integer. 169 * 170 * The macros return true if an overflow occurred and *res is undefined. 171 */ 172 #define ADD_OVERFLOW(a, b, res) __compiler_add_overflow((a), (b), (res)) 173 #define SUB_OVERFLOW(a, b, res) __compiler_sub_overflow((a), (b), (res)) 174 #define MUL_OVERFLOW(a, b, res) __compiler_mul_overflow((a), (b), (res)) 175 176 /* Return a signed +1, 0 or -1 value based on data comparison */ 177 #define CMP_TRILEAN(a, b) \ 178 (__extension__({ \ 179 __typeof__(a) _a = (a); \ 180 __typeof__(b) _b = (b); \ 181 \ 182 _a > _b ? 1 : _a < _b ? -1 : 0; \ 183 })) 184 185 #ifndef __ASSEMBLER__ 186 static inline uint64_t reg_pair_to_64(uint32_t reg0, uint32_t reg1) 187 { 188 return (uint64_t)reg0 << 32 | reg1; 189 } 190 191 static inline uint32_t high32_from_64(uint64_t val) 192 { 193 return val >> 32; 194 } 195 196 static inline uint32_t low32_from_64(uint64_t val) 197 { 198 return val; 199 } 200 201 static inline void reg_pair_from_64(uint64_t val, uint32_t *reg0, 202 uint32_t *reg1) 203 { 204 *reg0 = high32_from_64(val); 205 *reg1 = low32_from_64(val); 206 } 207 208 /* Get and set bit fields */ 209 static inline uint32_t get_field_u32(uint32_t reg, uint32_t mask) 210 { 211 return (reg & mask) / (mask & ~(mask - 1)); 212 } 213 214 static inline uint32_t set_field_u32(uint32_t reg, uint32_t mask, uint32_t val) 215 { 216 return (reg & ~mask) | (val * (mask & ~(mask - 1))); 217 } 218 219 static inline uint64_t get_field_u64(uint64_t reg, uint64_t mask) 220 { 221 return (reg & mask) / (mask & ~(mask - 1)); 222 } 223 224 static inline uint64_t set_field_u64(uint64_t reg, uint64_t mask, uint64_t val) 225 { 226 return (reg & ~mask) | (val * (mask & ~(mask - 1))); 227 } 228 229 /* Helper function for qsort with standard types */ 230 void qsort_int(int *aa, size_t n); 231 void qsort_uint(unsigned int *aa, size_t n); 232 void qsort_long(long int *aa, size_t n); 233 void qsort_ul(unsigned long int *aa, size_t n); 234 void qsort_ll(long long int *aa, size_t n); 235 void qsort_ull(unsigned long long int *aa, size_t n); 236 void qsort_s8(int8_t *aa, size_t n); 237 void qsort_u8(uint8_t *aa, size_t n); 238 void qsort_s16(int16_t *aa, size_t n); 239 void qsort_u16(uint16_t *aa, size_t n); 240 void qsort_s32(int32_t *aa, size_t n); 241 void qsort_u32(uint32_t *aa, size_t n); 242 void qsort_s64(int64_t *aa, size_t n); 243 void qsort_u64(uint64_t *aa, size_t n); 244 #endif 245 246 #endif /*UTIL_H*/ 247