1 /* 2 * Copyright (c) 2017-2024, Arm Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <stdbool.h> 9 10 #include <platform_def.h> 11 12 #include <arch.h> 13 #include <arch_features.h> 14 #include <arch_helpers.h> 15 #include <lib/cassert.h> 16 #include <lib/utils_def.h> 17 #include <lib/xlat_tables/xlat_tables_v2.h> 18 19 #include "../xlat_tables_private.h" 20 21 #if (ARM_ARCH_MAJOR == 7) && !defined(ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING) 22 #error ARMv7 target does not support LPAE MMU descriptors 23 #endif 24 25 /* 26 * Returns true if the provided granule size is supported, false otherwise. 27 */ 28 bool xlat_arch_is_granule_size_supported(size_t size) 29 { 30 /* 31 * The library uses the long descriptor translation table format, which 32 * supports 4 KiB pages only. 33 */ 34 return size == PAGE_SIZE_4KB; 35 } 36 37 size_t xlat_arch_get_max_supported_granule_size(void) 38 { 39 return PAGE_SIZE_4KB; 40 } 41 42 /* 43 * Determine the physical address space encoded in the 'attr' parameter. 44 * 45 * The physical address will fall into one of two spaces; secure or 46 * nonsecure. 47 */ 48 uint32_t xlat_arch_get_pas(uint32_t attr) 49 { 50 uint32_t pas = MT_PAS(attr); 51 52 if (pas == MT_NS) { 53 return LOWER_ATTRS(NS); 54 } else { /* MT_SECURE */ 55 return 0U; 56 } 57 } 58 59 #if ENABLE_ASSERTIONS 60 unsigned long long xlat_arch_get_max_supported_pa(void) 61 { 62 /* Physical address space size for long descriptor format. */ 63 return (1ULL << 40) - 1ULL; 64 } 65 66 /* 67 * Return minimum virtual address space size supported by the architecture 68 */ 69 uintptr_t xlat_get_min_virt_addr_space_size(void) 70 { 71 return MIN_VIRT_ADDR_SPACE_SIZE; 72 } 73 #endif /* ENABLE_ASSERTIONS*/ 74 75 bool is_mmu_enabled_ctx(const xlat_ctx_t *ctx) 76 { 77 if (ctx->xlat_regime == EL1_EL0_REGIME) { 78 assert(xlat_arch_current_el() == 1U); 79 return (read_sctlr() & SCTLR_M_BIT) != 0U; 80 } else { 81 assert(ctx->xlat_regime == EL2_REGIME); 82 assert(xlat_arch_current_el() == 2U); 83 return (read_hsctlr() & HSCTLR_M_BIT) != 0U; 84 } 85 } 86 87 bool is_dcache_enabled(void) 88 { 89 if (IS_IN_EL2()) { 90 return (read_hsctlr() & HSCTLR_C_BIT) != 0U; 91 } else { 92 return (read_sctlr() & SCTLR_C_BIT) != 0U; 93 } 94 } 95 96 uint64_t xlat_arch_regime_get_xn_desc(int xlat_regime) 97 { 98 if (xlat_regime == EL1_EL0_REGIME) { 99 return UPPER_ATTRS(XN) | UPPER_ATTRS(PXN); 100 } else { 101 assert(xlat_regime == EL2_REGIME); 102 return UPPER_ATTRS(XN); 103 } 104 } 105 106 void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime) 107 { 108 /* 109 * Ensure the translation table write has drained into memory before 110 * invalidating the TLB entry. 111 */ 112 dsbishst(); 113 114 if (xlat_regime == EL1_EL0_REGIME) { 115 tlbimvaais(TLBI_ADDR(va)); 116 } else { 117 assert(xlat_regime == EL2_REGIME); 118 tlbimvahis(TLBI_ADDR(va)); 119 } 120 } 121 122 void xlat_arch_tlbi_va_sync(void) 123 { 124 /* Invalidate all entries from branch predictors. */ 125 bpiallis(); 126 127 /* 128 * A TLB maintenance instruction can complete at any time after 129 * it is issued, but is only guaranteed to be complete after the 130 * execution of DSB by the PE that executed the TLB maintenance 131 * instruction. After the TLB invalidate instruction is 132 * complete, no new memory accesses using the invalidated TLB 133 * entries will be observed by any observer of the system 134 * domain. See section D4.8.2 of the ARMv8 (issue k), paragraph 135 * "Ordering and completion of TLB maintenance instructions". 136 */ 137 dsbish(); 138 139 /* 140 * The effects of a completed TLB maintenance instruction are 141 * only guaranteed to be visible on the PE that executed the 142 * instruction after the execution of an ISB instruction by the 143 * PE that executed the TLB maintenance instruction. 144 */ 145 isb(); 146 } 147 148 unsigned int xlat_arch_current_el(void) 149 { 150 if (IS_IN_HYP()) { 151 return 2U; 152 } else { 153 assert(IS_IN_SVC() || IS_IN_MON()); 154 /* 155 * If EL3 is in AArch32 mode, all secure PL1 modes (Monitor, 156 * System, SVC, Abort, UND, IRQ and FIQ modes) execute at EL3. 157 * 158 * The PL1&0 translation regime in AArch32 behaves like the 159 * EL1&0 regime in AArch64 except for the XN bits, but we set 160 * and unset them at the same time, so there's no difference in 161 * practice. 162 */ 163 return 1U; 164 } 165 } 166 167 /******************************************************************************* 168 * Function for enabling the MMU in PL1 or PL2, assuming that the page tables 169 * have already been created. 170 ******************************************************************************/ 171 void setup_mmu_cfg(uint64_t *params, unsigned int flags, 172 const uint64_t *base_table, unsigned long long max_pa, 173 uintptr_t max_va, __unused int xlat_regime) 174 { 175 uint64_t mair, ttbr0; 176 uint32_t ttbcr; 177 178 /* Set attributes in the right indices of the MAIR */ 179 mair = MAIR0_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX); 180 mair |= MAIR0_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, 181 ATTR_IWBWA_OWBWA_NTR_INDEX); 182 mair |= MAIR0_ATTR_SET(ATTR_NON_CACHEABLE, 183 ATTR_NON_CACHEABLE_INDEX); 184 185 /* 186 * Configure the control register for stage 1 of the PL1&0 or EL2 187 * translation regimes. 188 */ 189 190 /* Use the Long-descriptor translation table format. */ 191 ttbcr = TTBCR_EAE_BIT; 192 193 if (xlat_regime == EL1_EL0_REGIME) { 194 assert(IS_IN_SVC() || IS_IN_MON()); 195 /* 196 * Disable translation table walk for addresses that are 197 * translated using TTBR1. Therefore, only TTBR0 is used. 198 */ 199 ttbcr |= TTBCR_EPD1_BIT; 200 } else { 201 assert(xlat_regime == EL2_REGIME); 202 assert(IS_IN_HYP()); 203 204 /* 205 * Set HTCR bits as well. Set HTTBR table properties 206 * as Inner & outer WBWA & shareable. 207 */ 208 ttbcr |= HTCR_RES1 | 209 HTCR_SH0_INNER_SHAREABLE | HTCR_RGN0_OUTER_WBA | 210 HTCR_RGN0_INNER_WBA; 211 } 212 213 /* 214 * Limit the input address ranges and memory region sizes translated 215 * using TTBR0 to the given virtual address space size, if smaller than 216 * 32 bits. 217 */ 218 if (max_va != UINT32_MAX) { 219 uintptr_t virtual_addr_space_size = max_va + 1U; 220 221 assert(virtual_addr_space_size >= 222 xlat_get_min_virt_addr_space_size()); 223 assert(IS_POWER_OF_TWO(virtual_addr_space_size)); 224 225 /* 226 * __builtin_ctzll(0) is undefined but here we are guaranteed 227 * that virtual_addr_space_size is in the range [1, UINT32_MAX]. 228 */ 229 int t0sz = 32 - __builtin_ctzll(virtual_addr_space_size); 230 231 ttbcr |= (uint32_t) t0sz; 232 } 233 234 /* 235 * Set the cacheability and shareability attributes for memory 236 * associated with translation table walks using TTBR0. 237 */ 238 if ((flags & XLAT_TABLE_NC) != 0U) { 239 /* Inner & outer non-cacheable non-shareable. */ 240 ttbcr |= TTBCR_SH0_NON_SHAREABLE | TTBCR_RGN0_OUTER_NC | 241 TTBCR_RGN0_INNER_NC; 242 } else { 243 /* Inner & outer WBWA & shareable. */ 244 ttbcr |= TTBCR_SH0_INNER_SHAREABLE | TTBCR_RGN0_OUTER_WBA | 245 TTBCR_RGN0_INNER_WBA; 246 } 247 248 /* Set TTBR0 bits as well */ 249 ttbr0 = (uint64_t)(uintptr_t) base_table; 250 251 if (is_feat_ttcnp_present()) { 252 /* Enable CnP bit so as to share page tables with all PEs. */ 253 ttbr0 |= TTBR_CNP_BIT; 254 } 255 256 /* Now populate MMU configuration */ 257 params[MMU_CFG_MAIR] = mair; 258 params[MMU_CFG_TCR] = (uint64_t) ttbcr; 259 params[MMU_CFG_TTBR0] = ttbr0; 260 } 261