1 /* 2 * Copyright (c) 2017-2024, Arm Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <stdbool.h> 9 #include <stdint.h> 10 11 #include <arch.h> 12 #include <arch_features.h> 13 #include <arch_helpers.h> 14 #include <lib/cassert.h> 15 #include <lib/utils_def.h> 16 #include <lib/xlat_tables/xlat_tables_v2.h> 17 18 #include "../xlat_tables_private.h" 19 20 /* 21 * Returns true if the provided granule size is supported, false otherwise. 22 */ 23 bool xlat_arch_is_granule_size_supported(size_t size) 24 { 25 if (size == PAGE_SIZE_4KB) { 26 /* MSB of TGRAN4 field will be '1' for unsupported feature */ 27 return is_feat_tgran4K_present(); 28 } else if (size == PAGE_SIZE_16KB) { 29 return is_feat_tgran16K_present(); 30 } else if (size == PAGE_SIZE_64KB) { 31 /* MSB of TGRAN64 field will be '1' for unsupported feature */ 32 return is_feat_tgran64K_present(); 33 } else { 34 return false; 35 } 36 } 37 38 size_t xlat_arch_get_max_supported_granule_size(void) 39 { 40 if (xlat_arch_is_granule_size_supported(PAGE_SIZE_64KB)) { 41 return PAGE_SIZE_64KB; 42 } else if (xlat_arch_is_granule_size_supported(PAGE_SIZE_16KB)) { 43 return PAGE_SIZE_16KB; 44 } else { 45 assert(xlat_arch_is_granule_size_supported(PAGE_SIZE_4KB)); 46 return PAGE_SIZE_4KB; 47 } 48 } 49 50 /* 51 * Determine the physical address space encoded in the 'attr' parameter. 52 * 53 * The physical address will fall into one of four spaces; secure, 54 * nonsecure, root, or realm if RME is enabled, or one of two spaces; 55 * secure and nonsecure otherwise. 56 */ 57 uint32_t xlat_arch_get_pas(uint32_t attr) 58 { 59 uint32_t pas = MT_PAS(attr); 60 61 switch (pas) { 62 #if ENABLE_RME 63 /* TTD.NSE = 1 and TTD.NS = 1 for Realm PAS */ 64 case MT_REALM: 65 return LOWER_ATTRS(EL3_S1_NSE | NS); 66 /* TTD.NSE = 1 and TTD.NS = 0 for Root PAS */ 67 case MT_ROOT: 68 return LOWER_ATTRS(EL3_S1_NSE); 69 #endif 70 case MT_NS: 71 return LOWER_ATTRS(NS); 72 default: /* MT_SECURE */ 73 return 0U; 74 } 75 } 76 77 unsigned long long tcr_physical_addr_size_bits(unsigned long long max_addr) 78 { 79 /* Physical address can't exceed 48 bits */ 80 assert((max_addr & ADDR_MASK_48_TO_63) == 0U); 81 82 /* 48 bits address */ 83 if ((max_addr & ADDR_MASK_44_TO_47) != 0U) 84 return TCR_PS_BITS_256TB; 85 86 /* 44 bits address */ 87 if ((max_addr & ADDR_MASK_42_TO_43) != 0U) 88 return TCR_PS_BITS_16TB; 89 90 /* 42 bits address */ 91 if ((max_addr & ADDR_MASK_40_TO_41) != 0U) 92 return TCR_PS_BITS_4TB; 93 94 /* 40 bits address */ 95 if ((max_addr & ADDR_MASK_36_TO_39) != 0U) 96 return TCR_PS_BITS_1TB; 97 98 /* 36 bits address */ 99 if ((max_addr & ADDR_MASK_32_TO_35) != 0U) 100 return TCR_PS_BITS_64GB; 101 102 return TCR_PS_BITS_4GB; 103 } 104 105 #if ENABLE_ASSERTIONS 106 /* 107 * Physical Address ranges supported in the AArch64 Memory Model. Value 0b110 is 108 * supported in ARMv8.2 onwards. 109 */ 110 static const unsigned int pa_range_bits_arr[] = { 111 PARANGE_0000, PARANGE_0001, PARANGE_0010, PARANGE_0011, PARANGE_0100, 112 PARANGE_0101, PARANGE_0110 113 }; 114 115 unsigned long long xlat_arch_get_max_supported_pa(void) 116 { 117 u_register_t pa_range = read_id_aa64mmfr0_el1() & 118 ID_AA64MMFR0_EL1_PARANGE_MASK; 119 120 /* All other values are reserved */ 121 assert(pa_range < ARRAY_SIZE(pa_range_bits_arr)); 122 123 return (1ULL << pa_range_bits_arr[pa_range]) - 1ULL; 124 } 125 126 /* 127 * Return minimum virtual address space size supported by the architecture 128 */ 129 uintptr_t xlat_get_min_virt_addr_space_size(void) 130 { 131 uintptr_t ret; 132 133 if (is_feat_ttst_present()) 134 ret = MIN_VIRT_ADDR_SPACE_SIZE_TTST; 135 else 136 ret = MIN_VIRT_ADDR_SPACE_SIZE; 137 138 return ret; 139 } 140 #endif /* ENABLE_ASSERTIONS*/ 141 142 bool is_mmu_enabled_ctx(const xlat_ctx_t *ctx) 143 { 144 if (ctx->xlat_regime == EL1_EL0_REGIME) { 145 assert(xlat_arch_current_el() >= 1U); 146 return (read_sctlr_el1() & SCTLR_M_BIT) != 0U; 147 } else if (ctx->xlat_regime == EL2_REGIME) { 148 assert(xlat_arch_current_el() >= 2U); 149 return (read_sctlr_el2() & SCTLR_M_BIT) != 0U; 150 } else { 151 assert(ctx->xlat_regime == EL3_REGIME); 152 assert(xlat_arch_current_el() >= 3U); 153 return (read_sctlr_el3() & SCTLR_M_BIT) != 0U; 154 } 155 } 156 157 bool is_dcache_enabled(void) 158 { 159 unsigned int el = get_current_el_maybe_constant(); 160 161 if (el == 1U) { 162 return (read_sctlr_el1() & SCTLR_C_BIT) != 0U; 163 } else if (el == 2U) { 164 return (read_sctlr_el2() & SCTLR_C_BIT) != 0U; 165 } else { 166 return (read_sctlr_el3() & SCTLR_C_BIT) != 0U; 167 } 168 } 169 170 uint64_t xlat_arch_regime_get_xn_desc(int xlat_regime) 171 { 172 if (xlat_regime == EL1_EL0_REGIME) { 173 return UPPER_ATTRS(UXN) | UPPER_ATTRS(PXN); 174 } else { 175 assert((xlat_regime == EL2_REGIME) || 176 (xlat_regime == EL3_REGIME)); 177 return UPPER_ATTRS(XN); 178 } 179 } 180 181 void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime) 182 { 183 /* 184 * Ensure the translation table write has drained into memory before 185 * invalidating the TLB entry. 186 */ 187 dsbishst(); 188 189 /* 190 * This function only supports invalidation of TLB entries for the EL3 191 * and EL1&0 translation regimes. 192 * 193 * Also, it is architecturally UNDEFINED to invalidate TLBs of a higher 194 * exception level (see section D4.9.2 of the ARM ARM rev B.a). 195 */ 196 if (xlat_regime == EL1_EL0_REGIME) { 197 assert(xlat_arch_current_el() >= 1U); 198 tlbivaae1is(TLBI_ADDR(va)); 199 } else if (xlat_regime == EL2_REGIME) { 200 assert(xlat_arch_current_el() >= 2U); 201 tlbivae2is(TLBI_ADDR(va)); 202 } else { 203 assert(xlat_regime == EL3_REGIME); 204 assert(xlat_arch_current_el() >= 3U); 205 tlbivae3is(TLBI_ADDR(va)); 206 } 207 } 208 209 void xlat_arch_tlbi_va_sync(void) 210 { 211 /* 212 * A TLB maintenance instruction can complete at any time after 213 * it is issued, but is only guaranteed to be complete after the 214 * execution of DSB by the PE that executed the TLB maintenance 215 * instruction. After the TLB invalidate instruction is 216 * complete, no new memory accesses using the invalidated TLB 217 * entries will be observed by any observer of the system 218 * domain. See section D4.8.2 of the ARMv8 (issue k), paragraph 219 * "Ordering and completion of TLB maintenance instructions". 220 */ 221 dsbish(); 222 223 /* 224 * The effects of a completed TLB maintenance instruction are 225 * only guaranteed to be visible on the PE that executed the 226 * instruction after the execution of an ISB instruction by the 227 * PE that executed the TLB maintenance instruction. 228 */ 229 isb(); 230 } 231 232 unsigned int xlat_arch_current_el(void) 233 { 234 unsigned int el = (unsigned int)GET_EL(read_CurrentEl()); 235 236 assert(el > 0U); 237 238 return el; 239 } 240 241 void setup_mmu_cfg(uint64_t *params, unsigned int flags, 242 const uint64_t *base_table, unsigned long long max_pa, 243 uintptr_t max_va, int xlat_regime) 244 { 245 uint64_t mair, ttbr0, tcr; 246 uintptr_t virtual_addr_space_size; 247 248 /* Set attributes in the right indices of the MAIR. */ 249 mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX); 250 mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, ATTR_IWBWA_OWBWA_NTR_INDEX); 251 mair |= MAIR_ATTR_SET(ATTR_NON_CACHEABLE, ATTR_NON_CACHEABLE_INDEX); 252 253 /* 254 * Limit the input address ranges and memory region sizes translated 255 * using TTBR0 to the given virtual address space size. 256 */ 257 assert(max_va < ((uint64_t)UINTPTR_MAX)); 258 259 virtual_addr_space_size = (uintptr_t)max_va + 1U; 260 261 assert(virtual_addr_space_size >= 262 xlat_get_min_virt_addr_space_size()); 263 assert(virtual_addr_space_size <= MAX_VIRT_ADDR_SPACE_SIZE); 264 assert(IS_POWER_OF_TWO(virtual_addr_space_size)); 265 266 /* 267 * __builtin_ctzll(0) is undefined but here we are guaranteed that 268 * virtual_addr_space_size is in the range [1,UINTPTR_MAX]. 269 */ 270 int t0sz = 64 - __builtin_ctzll(virtual_addr_space_size); 271 272 tcr = (uint64_t)t0sz << TCR_T0SZ_SHIFT; 273 274 /* 275 * Set the cacheability and shareability attributes for memory 276 * associated with translation table walks. 277 */ 278 if ((flags & XLAT_TABLE_NC) != 0U) { 279 /* Inner & outer non-cacheable non-shareable. */ 280 tcr |= TCR_SH_NON_SHAREABLE | 281 TCR_RGN_OUTER_NC | TCR_RGN_INNER_NC; 282 } else { 283 /* Inner & outer WBWA & shareable. */ 284 tcr |= TCR_SH_INNER_SHAREABLE | 285 TCR_RGN_OUTER_WBA | TCR_RGN_INNER_WBA; 286 } 287 288 /* 289 * It is safer to restrict the max physical address accessible by the 290 * hardware as much as possible. 291 */ 292 unsigned long long tcr_ps_bits = tcr_physical_addr_size_bits(max_pa); 293 294 if (xlat_regime == EL1_EL0_REGIME) { 295 /* 296 * TCR_EL1.EPD1: Disable translation table walk for addresses 297 * that are translated using TTBR1_EL1. 298 */ 299 tcr |= TCR_EPD1_BIT | (tcr_ps_bits << TCR_EL1_IPS_SHIFT); 300 } else if (xlat_regime == EL2_REGIME) { 301 tcr |= TCR_EL2_RES1 | (tcr_ps_bits << TCR_EL2_PS_SHIFT); 302 } else { 303 assert(xlat_regime == EL3_REGIME); 304 tcr |= TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT); 305 } 306 307 /* Set TTBR bits as well */ 308 ttbr0 = (uint64_t) base_table; 309 310 if (is_feat_ttcnp_present()) { 311 /* Enable CnP bit so as to share page tables with all PEs. */ 312 ttbr0 |= TTBR_CNP_BIT; 313 } 314 315 params[MMU_CFG_MAIR] = mair; 316 params[MMU_CFG_TCR] = tcr; 317 params[MMU_CFG_TTBR0] = ttbr0; 318 } 319