1 /* 2 * Copyright (c) 2016-2025, Arm Limited and Contributors. All rights reserved. 3 * Copyright (c) 2020, NVIDIA Corporation. All rights reserved. 4 * 5 * SPDX-License-Identifier: BSD-3-Clause 6 */ 7 8 #ifndef UTILS_DEF_H 9 #define UTILS_DEF_H 10 11 #include <export/lib/utils_def_exp.h> 12 13 /* Compute the number of elements in the given array */ 14 #define ARRAY_SIZE(a) \ 15 (sizeof(a) / sizeof((a)[0])) 16 17 #define IS_POWER_OF_TWO(x) \ 18 (((x) & ((x) - 1)) == 0) 19 20 #define SIZE_FROM_LOG2_WORDS(n) (U(4) << (n)) 21 22 #if defined(__LINKER__) || defined(__ASSEMBLER__) 23 #define BIT_32(nr) (U(1) << (nr)) 24 #define BIT_64(nr) (ULL(1) << (nr)) 25 #else 26 #define BIT_32(nr) (((uint32_t)(1U)) << (nr)) 27 #define BIT_64(nr) (((uint64_t)(1ULL)) << (nr)) 28 #endif 29 30 #ifdef __aarch64__ 31 #define BIT BIT_64 32 #else 33 #define BIT BIT_32 34 #endif 35 36 /* 37 * Create a contiguous bitmask starting at bit position @low and ending at 38 * position @high. For example 39 * GENMASK_64(39, 21) gives us the 64bit vector 0x000000ffffe00000. 40 */ 41 #if defined(__LINKER__) || defined(__ASSEMBLER__) 42 #define GENMASK_32(high, low) \ 43 (((0xFFFFFFFF) << (low)) & (0xFFFFFFFF >> (32 - 1 - (high)))) 44 45 #define GENMASK_64(high, low) \ 46 ((~0 << (low)) & (~0 >> (64 - 1 - (high)))) 47 #else 48 #define GENMASK_32(high, low) \ 49 ((~UINT32_C(0) >> (32U - 1U - (high))) ^ ((BIT_32(low) - 1U))) 50 51 #define GENMASK_64(high, low) \ 52 ((~UINT64_C(0) >> (64U - 1U - (high))) ^ ((BIT_64(low) - 1U))) 53 #endif 54 55 #ifdef __aarch64__ 56 #define GENMASK GENMASK_64 57 #else 58 #define GENMASK GENMASK_32 59 #endif 60 61 /* 62 * Similar to GENMASK_64 but uses a named register field to compute the mask. 63 * For a register field REG_FIELD, the macros REG_FIELD_WIDTH and 64 * REG_FIELD_SHIFT must be defined. 65 */ 66 #define MASK(regfield) \ 67 ((~0ULL >> (64ULL - (regfield##_WIDTH))) << (regfield##_SHIFT)) 68 69 #define HI(addr) (addr >> 32) 70 #define LO(addr) (addr & 0xffffffff) 71 72 #define HI_64(addr) (addr >> 64) 73 #define LO_64(addr) (addr & 0xffffffffffffffff) 74 75 /** 76 * EXTRACT_FIELD - Extracts a specific bit field from a value. 77 * 78 * @reg: The input value containing the field. 79 80 * @regfield: A bitmask representing the field. For a register field REG_FIELD, 81 * the macros REG_FIELD_WIDTH and REG_FIELD_SHIFT must be defined. 82 83 * The result of this macro is the contents of the field right shifted to the 84 * least significant bit positions, with the rest being zero. 85 */ 86 #define EXTRACT(regfield, reg) \ 87 (((reg) & MASK(regfield)) >> (regfield##_SHIFT)) 88 89 #define UPDATE_REG_FIELD(regfield, reg, val) \ 90 do { \ 91 (reg) &= ~(MASK(regfield)); \ 92 (reg) |= ((uint64_t)(val) << (regfield##_SHIFT)); \ 93 } while (0) 94 95 /* 96 * This variant of div_round_up can be used in macro definition but should not 97 * be used in C code as the `div` parameter is evaluated twice. 98 */ 99 #define DIV_ROUND_UP_2EVAL(n, d) (((n) + (d) - 1) / (d)) 100 101 #define div_round_up(val, div) __extension__ ({ \ 102 __typeof__(div) _div = (div); \ 103 ((val) + _div - (__typeof__(div)) 1) / _div; \ 104 }) 105 106 #define MIN(x, y) __extension__ ({ \ 107 __typeof__(x) _x = (x); \ 108 __typeof__(y) _y = (y); \ 109 (void)(&_x == &_y); \ 110 (_x < _y) ? _x : _y; \ 111 }) 112 113 #define MAX(x, y) __extension__ ({ \ 114 __typeof__(x) _x = (x); \ 115 __typeof__(y) _y = (y); \ 116 (void)(&_x == &_y); \ 117 (_x > _y) ? _x : _y; \ 118 }) 119 120 #define CLAMP(x, min, max) __extension__ ({ \ 121 __typeof__(x) _x = (x); \ 122 __typeof__(min) _min = (min); \ 123 __typeof__(max) _max = (max); \ 124 (void)(&_x == &_min); \ 125 (void)(&_x == &_max); \ 126 ((_x > _max) ? _max : ((_x < _min) ? _min : _x)); \ 127 }) 128 129 /* 130 * The round_up() macro rounds up a value to the given boundary in a 131 * type-agnostic yet type-safe manner. The boundary must be a power of two. 132 * In other words, it computes the smallest multiple of boundary which is 133 * greater than or equal to value. 134 * 135 * round_down() is similar but rounds the value down instead. 136 */ 137 #define round_boundary(value, boundary) \ 138 ((__typeof__(value))((boundary) - 1)) 139 140 #define round_up(value, boundary) \ 141 ((((value) - 1) | round_boundary(value, boundary)) + 1) 142 143 #define round_down(value, boundary) \ 144 ((value) & ~round_boundary(value, boundary)) 145 146 /* add operation together with checking whether the operation overflowed 147 * The result is '*res', 148 * return 0 on success and 1 on overflow 149 */ 150 #define add_overflow(a, b, res) __builtin_add_overflow((a), (b), (res)) 151 152 /* 153 * Round up a value to align with a given size and 154 * check whether overflow happens. 155 * The rounduped value is '*res', 156 * return 0 on success and 1 on overflow 157 */ 158 #define round_up_overflow(v, size, res) (__extension__({ \ 159 typeof(res) __res = res; \ 160 typeof(*(__res)) __roundup_tmp = 0; \ 161 typeof(v) __roundup_mask = (typeof(v))(size) - 1; \ 162 \ 163 add_overflow((v), __roundup_mask, &__roundup_tmp) ? 1 : \ 164 (void)(*(__res) = __roundup_tmp & ~__roundup_mask), 0; \ 165 })) 166 167 /* 168 * Add a with b, then round up the result to align with a given size and 169 * check whether overflow happens. 170 * The rounduped value is '*res', 171 * return 0 on success and 1 on overflow 172 */ 173 #define add_with_round_up_overflow(a, b, size, res) (__extension__({ \ 174 typeof(a) __a = (a); \ 175 typeof(__a) __add_res = 0; \ 176 \ 177 add_overflow((__a), (b), &__add_res) ? 1 : \ 178 round_up_overflow(__add_res, (size), (res)) ? 1 : 0; \ 179 })) 180 181 /** 182 * Helper macro to ensure a value lies on a given boundary. 183 */ 184 #define is_aligned(value, boundary) \ 185 (round_up((uintptr_t) value, boundary) == \ 186 round_down((uintptr_t) value, boundary)) 187 188 /* 189 * Evaluates to 1 if (ptr + inc) overflows, 0 otherwise. 190 * Both arguments must be unsigned pointer values (i.e. uintptr_t). 191 */ 192 #define check_uptr_overflow(_ptr, _inc) \ 193 ((_ptr) > (UINTPTR_MAX - (_inc))) 194 195 /* 196 * Evaluates to 1 if (u32 + inc) overflows, 0 otherwise. 197 * Both arguments must be 32-bit unsigned integers (i.e. effectively uint32_t). 198 */ 199 #define check_u32_overflow(_u32, _inc) \ 200 ((_u32) > (UINT32_MAX - (_inc))) 201 202 /* Register size of the current architecture. */ 203 #ifdef __aarch64__ 204 #define REGSZ U(8) 205 #else 206 #define REGSZ U(4) 207 #endif 208 209 /* 210 * Test for the current architecture version to be at least the version 211 * expected. 212 */ 213 #define ARM_ARCH_AT_LEAST(_maj, _min) \ 214 ((ARM_ARCH_MAJOR > (_maj)) || \ 215 ((ARM_ARCH_MAJOR == (_maj)) && (ARM_ARCH_MINOR >= (_min)))) 216 217 /* 218 * Import an assembly or linker symbol as a C expression with the specified 219 * type 220 */ 221 #define IMPORT_SYM(type, sym, name) \ 222 extern char sym[];\ 223 static const __attribute__((unused)) type name = (type) sym; 224 225 /* 226 * When the symbol is used to hold a pointer, its alignment can be asserted 227 * with this macro. For example, if there is a linker symbol that is going to 228 * be used as a 64-bit pointer, the value of the linker symbol must also be 229 * aligned to 64 bit. This macro makes sure this is the case. 230 */ 231 #define ASSERT_SYM_PTR_ALIGN(sym) assert(((size_t)(sym) % __alignof__(*(sym))) == 0) 232 233 #define COMPILER_BARRIER() __asm__ volatile ("" ::: "memory") 234 235 /* Compiler builtin of GCC >= 9 and planned in llvm */ 236 #ifdef __HAVE_SPECULATION_SAFE_VALUE 237 # define SPECULATION_SAFE_VALUE(var) __builtin_speculation_safe_value(var) 238 #else 239 # define SPECULATION_SAFE_VALUE(var) var 240 #endif 241 242 /* 243 * Ticks elapsed in one second with a signal of 1 MHz 244 */ 245 #define MHZ_TICKS_PER_SEC U(1000000) 246 247 /* 248 * Ticks elapsed in one second with a signal of 1 KHz 249 */ 250 #define KHZ_TICKS_PER_SEC U(1000) 251 252 #endif /* UTILS_DEF_H */ 253