1 /* 2 * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #ifndef UTILS_DEF_H 8 #define UTILS_DEF_H 9 10 #include <export/lib/utils_def_exp.h> 11 12 /* Compute the number of elements in the given array */ 13 #define ARRAY_SIZE(a) \ 14 (sizeof(a) / sizeof((a)[0])) 15 16 #define IS_POWER_OF_TWO(x) \ 17 (((x) & ((x) - 1)) == 0) 18 19 #define SIZE_FROM_LOG2_WORDS(n) (4 << (n)) 20 21 #define BIT_32(nr) (U(1) << (nr)) 22 #define BIT_64(nr) (ULL(1) << (nr)) 23 24 #ifdef __aarch64__ 25 #define BIT BIT_64 26 #else 27 #define BIT BIT_32 28 #endif 29 30 /* 31 * Create a contiguous bitmask starting at bit position @l and ending at 32 * position @h. For example 33 * GENMASK_64(39, 21) gives us the 64bit vector 0x000000ffffe00000. 34 */ 35 #if defined(__LINKER__) || defined(__ASSEMBLER__) 36 #define GENMASK_32(h, l) \ 37 (((0xFFFFFFFF) << (l)) & (0xFFFFFFFF >> (32 - 1 - (h)))) 38 39 #define GENMASK_64(h, l) \ 40 ((~0 << (l)) & (~0 >> (64 - 1 - (h)))) 41 #else 42 #define GENMASK_32(h, l) \ 43 (((~UINT32_C(0)) << (l)) & (~UINT32_C(0) >> (32 - 1 - (h)))) 44 45 #define GENMASK_64(h, l) \ 46 (((~UINT64_C(0)) << (l)) & (~UINT64_C(0) >> (64 - 1 - (h)))) 47 #endif 48 49 #ifdef __aarch64__ 50 #define GENMASK GENMASK_64 51 #else 52 #define GENMASK GENMASK_32 53 #endif 54 55 /* 56 * This variant of div_round_up can be used in macro definition but should not 57 * be used in C code as the `div` parameter is evaluated twice. 58 */ 59 #define DIV_ROUND_UP_2EVAL(n, d) (((n) + (d) - 1) / (d)) 60 61 #define div_round_up(val, div) __extension__ ({ \ 62 __typeof__(div) _div = (div); \ 63 ((val) + _div - (__typeof__(div)) 1) / _div; \ 64 }) 65 66 #define MIN(x, y) __extension__ ({ \ 67 __typeof__(x) _x = (x); \ 68 __typeof__(y) _y = (y); \ 69 (void)(&_x == &_y); \ 70 _x < _y ? _x : _y; \ 71 }) 72 73 #define MAX(x, y) __extension__ ({ \ 74 __typeof__(x) _x = (x); \ 75 __typeof__(y) _y = (y); \ 76 (void)(&_x == &_y); \ 77 _x > _y ? _x : _y; \ 78 }) 79 80 /* 81 * The round_up() macro rounds up a value to the given boundary in a 82 * type-agnostic yet type-safe manner. The boundary must be a power of two. 83 * In other words, it computes the smallest multiple of boundary which is 84 * greater than or equal to value. 85 * 86 * round_down() is similar but rounds the value down instead. 87 */ 88 #define round_boundary(value, boundary) \ 89 ((__typeof__(value))((boundary) - 1)) 90 91 #define round_up(value, boundary) \ 92 ((((value) - 1) | round_boundary(value, boundary)) + 1) 93 94 #define round_down(value, boundary) \ 95 ((value) & ~round_boundary(value, boundary)) 96 97 /* 98 * Evaluates to 1 if (ptr + inc) overflows, 0 otherwise. 99 * Both arguments must be unsigned pointer values (i.e. uintptr_t). 100 */ 101 #define check_uptr_overflow(_ptr, _inc) \ 102 ((_ptr) > (UINTPTR_MAX - (_inc))) 103 104 /* 105 * Evaluates to 1 if (u32 + inc) overflows, 0 otherwise. 106 * Both arguments must be 32-bit unsigned integers (i.e. effectively uint32_t). 107 */ 108 #define check_u32_overflow(_u32, _inc) \ 109 ((_u32) > (UINT32_MAX - (_inc))) 110 111 /* Register size of the current architecture. */ 112 #ifdef __aarch64__ 113 #define REGSZ U(8) 114 #else 115 #define REGSZ U(4) 116 #endif 117 118 /* 119 * Test for the current architecture version to be at least the version 120 * expected. 121 */ 122 #define ARM_ARCH_AT_LEAST(_maj, _min) \ 123 ((ARM_ARCH_MAJOR > (_maj)) || \ 124 ((ARM_ARCH_MAJOR == (_maj)) && (ARM_ARCH_MINOR >= (_min)))) 125 126 /* 127 * Import an assembly or linker symbol as a C expression with the specified 128 * type 129 */ 130 #define IMPORT_SYM(type, sym, name) \ 131 extern char sym[];\ 132 static const __attribute__((unused)) type name = (type) sym; 133 134 /* 135 * When the symbol is used to hold a pointer, its alignment can be asserted 136 * with this macro. For example, if there is a linker symbol that is going to 137 * be used as a 64-bit pointer, the value of the linker symbol must also be 138 * aligned to 64 bit. This macro makes sure this is the case. 139 */ 140 #define ASSERT_SYM_PTR_ALIGN(sym) assert(((size_t)(sym) % __alignof__(*(sym))) == 0) 141 142 #define COMPILER_BARRIER() __asm__ volatile ("" ::: "memory") 143 144 /* Compiler builtin of GCC >= 9 and planned in llvm */ 145 #ifdef __HAVE_SPECULATION_SAFE_VALUE 146 # define SPECULATION_SAFE_VALUE(var) __builtin_speculation_safe_value(var) 147 #else 148 # define SPECULATION_SAFE_VALUE(var) var 149 #endif 150 151 #endif /* UTILS_DEF_H */ 152