1 /* 2 * Copyright (c) 2016-2025, Arm Limited and Contributors. All rights reserved. 3 * Copyright (c) 2020, NVIDIA Corporation. All rights reserved. 4 * 5 * SPDX-License-Identifier: BSD-3-Clause 6 */ 7 8 #ifndef UTILS_DEF_H 9 #define UTILS_DEF_H 10 11 #include <export/lib/utils_def_exp.h> 12 13 /* Compute the number of elements in the given array */ 14 #define ARRAY_SIZE(a) \ 15 (sizeof(a) / sizeof((a)[0])) 16 17 #define IS_POWER_OF_TWO(x) \ 18 (((x) & ((x) - 1)) == 0) 19 20 #define SIZE_FROM_LOG2_WORDS(n) (U(4) << (n)) 21 22 #if defined(__LINKER__) || defined(__ASSEMBLER__) 23 #define BIT_32(nr) (U(1) << (nr)) 24 #define BIT_64(nr) (ULL(1) << (nr)) 25 #else 26 #define BIT_32(nr) (((uint32_t)(1U)) << (nr)) 27 #define BIT_64(nr) (((uint64_t)(1ULL)) << (nr)) 28 #endif 29 30 #ifdef __aarch64__ 31 #define BIT BIT_64 32 #else 33 #define BIT BIT_32 34 #endif 35 36 /* 37 * Create a contiguous bitmask starting at bit position @low and ending at 38 * position @high. For example 39 * GENMASK_64(39, 21) gives us the 64bit vector 0x000000ffffe00000. 40 */ 41 #if defined(__LINKER__) || defined(__ASSEMBLER__) 42 #define GENMASK_32(high, low) \ 43 (((0xFFFFFFFF) << (low)) & (0xFFFFFFFF >> (32 - 1 - (high)))) 44 45 #define GENMASK_64(high, low) \ 46 ((~0 << (low)) & (~0 >> (64 - 1 - (high)))) 47 #else 48 #define GENMASK_32(high, low) \ 49 ((~UINT32_C(0) >> (32U - 1U - (high))) ^ ((BIT_32(low) - 1U))) 50 51 #define GENMASK_64(high, low) \ 52 ((~UINT64_C(0) >> (64U - 1U - (high))) ^ ((BIT_64(low) - 1U))) 53 #endif 54 55 #ifdef __aarch64__ 56 #define GENMASK GENMASK_64 57 #else 58 #define GENMASK GENMASK_32 59 #endif 60 61 #define HI(addr) (addr >> 32) 62 #define LO(addr) (addr & 0xffffffff) 63 64 #define HI_64(addr) (addr >> 64) 65 #define LO_64(addr) (addr & 0xffffffffffffffff) 66 67 /* 68 * This variant of div_round_up can be used in macro definition but should not 69 * be used in C code as the `div` parameter is evaluated twice. 70 */ 71 #define DIV_ROUND_UP_2EVAL(n, d) (((n) + (d) - 1) / (d)) 72 73 #define div_round_up(val, div) __extension__ ({ \ 74 __typeof__(div) _div = (div); \ 75 ((val) + _div - (__typeof__(div)) 1) / _div; \ 76 }) 77 78 #define MIN(x, y) __extension__ ({ \ 79 __typeof__(x) _x = (x); \ 80 __typeof__(y) _y = (y); \ 81 (void)(&_x == &_y); \ 82 (_x < _y) ? _x : _y; \ 83 }) 84 85 #define MAX(x, y) __extension__ ({ \ 86 __typeof__(x) _x = (x); \ 87 __typeof__(y) _y = (y); \ 88 (void)(&_x == &_y); \ 89 (_x > _y) ? _x : _y; \ 90 }) 91 92 #define CLAMP(x, min, max) __extension__ ({ \ 93 __typeof__(x) _x = (x); \ 94 __typeof__(min) _min = (min); \ 95 __typeof__(max) _max = (max); \ 96 (void)(&_x == &_min); \ 97 (void)(&_x == &_max); \ 98 ((_x > _max) ? _max : ((_x < _min) ? _min : _x)); \ 99 }) 100 101 /* 102 * The round_up() macro rounds up a value to the given boundary in a 103 * type-agnostic yet type-safe manner. The boundary must be a power of two. 104 * In other words, it computes the smallest multiple of boundary which is 105 * greater than or equal to value. 106 * 107 * round_down() is similar but rounds the value down instead. 108 */ 109 #define round_boundary(value, boundary) \ 110 ((__typeof__(value))((boundary) - 1)) 111 112 #define round_up(value, boundary) \ 113 ((((value) - 1) | round_boundary(value, boundary)) + 1) 114 115 #define round_down(value, boundary) \ 116 ((value) & ~round_boundary(value, boundary)) 117 118 /* add operation together with checking whether the operation overflowed 119 * The result is '*res', 120 * return 0 on success and 1 on overflow 121 */ 122 #define add_overflow(a, b, res) __builtin_add_overflow((a), (b), (res)) 123 124 /* 125 * Round up a value to align with a given size and 126 * check whether overflow happens. 127 * The rounduped value is '*res', 128 * return 0 on success and 1 on overflow 129 */ 130 #define round_up_overflow(v, size, res) (__extension__({ \ 131 typeof(res) __res = res; \ 132 typeof(*(__res)) __roundup_tmp = 0; \ 133 typeof(v) __roundup_mask = (typeof(v))(size) - 1; \ 134 \ 135 add_overflow((v), __roundup_mask, &__roundup_tmp) ? 1 : \ 136 (void)(*(__res) = __roundup_tmp & ~__roundup_mask), 0; \ 137 })) 138 139 /* 140 * Add a with b, then round up the result to align with a given size and 141 * check whether overflow happens. 142 * The rounduped value is '*res', 143 * return 0 on success and 1 on overflow 144 */ 145 #define add_with_round_up_overflow(a, b, size, res) (__extension__({ \ 146 typeof(a) __a = (a); \ 147 typeof(__a) __add_res = 0; \ 148 \ 149 add_overflow((__a), (b), &__add_res) ? 1 : \ 150 round_up_overflow(__add_res, (size), (res)) ? 1 : 0; \ 151 })) 152 153 /** 154 * Helper macro to ensure a value lies on a given boundary. 155 */ 156 #define is_aligned(value, boundary) \ 157 (round_up((uintptr_t) value, boundary) == \ 158 round_down((uintptr_t) value, boundary)) 159 160 /* 161 * Evaluates to 1 if (ptr + inc) overflows, 0 otherwise. 162 * Both arguments must be unsigned pointer values (i.e. uintptr_t). 163 */ 164 #define check_uptr_overflow(_ptr, _inc) \ 165 ((_ptr) > (UINTPTR_MAX - (_inc))) 166 167 /* 168 * Evaluates to 1 if (u32 + inc) overflows, 0 otherwise. 169 * Both arguments must be 32-bit unsigned integers (i.e. effectively uint32_t). 170 */ 171 #define check_u32_overflow(_u32, _inc) \ 172 ((_u32) > (UINT32_MAX - (_inc))) 173 174 /* Register size of the current architecture. */ 175 #ifdef __aarch64__ 176 #define REGSZ U(8) 177 #else 178 #define REGSZ U(4) 179 #endif 180 181 /* 182 * Test for the current architecture version to be at least the version 183 * expected. 184 */ 185 #define ARM_ARCH_AT_LEAST(_maj, _min) \ 186 ((ARM_ARCH_MAJOR > (_maj)) || \ 187 ((ARM_ARCH_MAJOR == (_maj)) && (ARM_ARCH_MINOR >= (_min)))) 188 189 /* 190 * Import an assembly or linker symbol as a C expression with the specified 191 * type 192 */ 193 #define IMPORT_SYM(type, sym, name) \ 194 extern char sym[];\ 195 static const __attribute__((unused)) type name = (type) sym; 196 197 /* 198 * When the symbol is used to hold a pointer, its alignment can be asserted 199 * with this macro. For example, if there is a linker symbol that is going to 200 * be used as a 64-bit pointer, the value of the linker symbol must also be 201 * aligned to 64 bit. This macro makes sure this is the case. 202 */ 203 #define ASSERT_SYM_PTR_ALIGN(sym) assert(((size_t)(sym) % __alignof__(*(sym))) == 0) 204 205 #define COMPILER_BARRIER() __asm__ volatile ("" ::: "memory") 206 207 /* Compiler builtin of GCC >= 9 and planned in llvm */ 208 #ifdef __HAVE_SPECULATION_SAFE_VALUE 209 # define SPECULATION_SAFE_VALUE(var) __builtin_speculation_safe_value(var) 210 #else 211 # define SPECULATION_SAFE_VALUE(var) var 212 #endif 213 214 /* 215 * Ticks elapsed in one second with a signal of 1 MHz 216 */ 217 #define MHZ_TICKS_PER_SEC U(1000000) 218 219 /* 220 * Ticks elapsed in one second with a signal of 1 KHz 221 */ 222 #define KHZ_TICKS_PER_SEC U(1000) 223 224 /** 225 * EXTRACT_FIELD - Extracts a specific bit field from a value. 226 * 227 * @val: The input value containing the field. 228 * @mask: A bitmask representing the maximum value of the field 229 * @shift: The starting bit position of the field. 230 * 231 * This macro shifts the input value (@val) to the right by @shift bits, 232 * aligning the target field to the least significant bits (LSB). 233 * It then applies @mask to extract only the relevant bits. 234 */ 235 #define EXTRACT_FIELD(val, mask, shift) (((val) >> (shift)) & (mask)) 236 237 #endif /* UTILS_DEF_H */ 238