1 /* 2 * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <stdbool.h> 9 10 #include <platform_def.h> 11 12 #include <arch.h> 13 #include <arch_helpers.h> 14 #include <lib/cassert.h> 15 #include <lib/utils_def.h> 16 #include <lib/xlat_tables/xlat_tables_v2.h> 17 18 #include "../xlat_tables_private.h" 19 20 #if (ARM_ARCH_MAJOR == 7) && !defined(ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING) 21 #error ARMv7 target does not support LPAE MMU descriptors 22 #endif 23 24 /* 25 * Returns true if the provided granule size is supported, false otherwise. 26 */ 27 bool xlat_arch_is_granule_size_supported(size_t size) 28 { 29 /* 30 * The library uses the long descriptor translation table format, which 31 * supports 4 KiB pages only. 32 */ 33 return size == PAGE_SIZE_4KB; 34 } 35 36 size_t xlat_arch_get_max_supported_granule_size(void) 37 { 38 return PAGE_SIZE_4KB; 39 } 40 41 #if ENABLE_ASSERTIONS 42 unsigned long long xlat_arch_get_max_supported_pa(void) 43 { 44 /* Physical address space size for long descriptor format. */ 45 return (1ULL << 40) - 1ULL; 46 } 47 #endif /* ENABLE_ASSERTIONS*/ 48 49 bool is_mmu_enabled_ctx(const xlat_ctx_t *ctx) 50 { 51 if (ctx->xlat_regime == EL1_EL0_REGIME) { 52 assert(xlat_arch_current_el() == 1U); 53 return (read_sctlr() & SCTLR_M_BIT) != 0U; 54 } else { 55 assert(ctx->xlat_regime == EL2_REGIME); 56 assert(xlat_arch_current_el() == 2U); 57 return (read_hsctlr() & HSCTLR_M_BIT) != 0U; 58 } 59 } 60 61 bool is_dcache_enabled(void) 62 { 63 if (IS_IN_EL2()) { 64 return (read_hsctlr() & HSCTLR_C_BIT) != 0U; 65 } else { 66 return (read_sctlr() & SCTLR_C_BIT) != 0U; 67 } 68 } 69 70 uint64_t xlat_arch_regime_get_xn_desc(int xlat_regime) 71 { 72 if (xlat_regime == EL1_EL0_REGIME) { 73 return UPPER_ATTRS(XN) | UPPER_ATTRS(PXN); 74 } else { 75 assert(xlat_regime == EL2_REGIME); 76 return UPPER_ATTRS(XN); 77 } 78 } 79 80 void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime) 81 { 82 /* 83 * Ensure the translation table write has drained into memory before 84 * invalidating the TLB entry. 85 */ 86 dsbishst(); 87 88 if (xlat_regime == EL1_EL0_REGIME) { 89 tlbimvaais(TLBI_ADDR(va)); 90 } else { 91 assert(xlat_regime == EL2_REGIME); 92 tlbimvahis(TLBI_ADDR(va)); 93 } 94 } 95 96 void xlat_arch_tlbi_va_sync(void) 97 { 98 /* Invalidate all entries from branch predictors. */ 99 bpiallis(); 100 101 /* 102 * A TLB maintenance instruction can complete at any time after 103 * it is issued, but is only guaranteed to be complete after the 104 * execution of DSB by the PE that executed the TLB maintenance 105 * instruction. After the TLB invalidate instruction is 106 * complete, no new memory accesses using the invalidated TLB 107 * entries will be observed by any observer of the system 108 * domain. See section D4.8.2 of the ARMv8 (issue k), paragraph 109 * "Ordering and completion of TLB maintenance instructions". 110 */ 111 dsbish(); 112 113 /* 114 * The effects of a completed TLB maintenance instruction are 115 * only guaranteed to be visible on the PE that executed the 116 * instruction after the execution of an ISB instruction by the 117 * PE that executed the TLB maintenance instruction. 118 */ 119 isb(); 120 } 121 122 unsigned int xlat_arch_current_el(void) 123 { 124 if (IS_IN_HYP()) { 125 return 2U; 126 } else { 127 assert(IS_IN_SVC() || IS_IN_MON()); 128 /* 129 * If EL3 is in AArch32 mode, all secure PL1 modes (Monitor, 130 * System, SVC, Abort, UND, IRQ and FIQ modes) execute at EL3. 131 * 132 * The PL1&0 translation regime in AArch32 behaves like the 133 * EL1&0 regime in AArch64 except for the XN bits, but we set 134 * and unset them at the same time, so there's no difference in 135 * practice. 136 */ 137 return 1U; 138 } 139 } 140 141 /******************************************************************************* 142 * Function for enabling the MMU in PL1 or PL2, assuming that the page tables 143 * have already been created. 144 ******************************************************************************/ 145 void setup_mmu_cfg(uint64_t *params, unsigned int flags, 146 const uint64_t *base_table, unsigned long long max_pa, 147 uintptr_t max_va, __unused int xlat_regime) 148 { 149 uint64_t mair, ttbr0; 150 uint32_t ttbcr; 151 152 /* Set attributes in the right indices of the MAIR */ 153 mair = MAIR0_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX); 154 mair |= MAIR0_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, 155 ATTR_IWBWA_OWBWA_NTR_INDEX); 156 mair |= MAIR0_ATTR_SET(ATTR_NON_CACHEABLE, 157 ATTR_NON_CACHEABLE_INDEX); 158 159 /* 160 * Configure the control register for stage 1 of the PL1&0 or EL2 161 * translation regimes. 162 */ 163 164 /* Use the Long-descriptor translation table format. */ 165 ttbcr = TTBCR_EAE_BIT; 166 167 if (xlat_regime == EL1_EL0_REGIME) { 168 assert(IS_IN_SVC() || IS_IN_MON()); 169 /* 170 * Disable translation table walk for addresses that are 171 * translated using TTBR1. Therefore, only TTBR0 is used. 172 */ 173 ttbcr |= TTBCR_EPD1_BIT; 174 } else { 175 assert(xlat_regime == EL2_REGIME); 176 assert(IS_IN_HYP()); 177 178 /* 179 * Set HTCR bits as well. Set HTTBR table properties 180 * as Inner & outer WBWA & shareable. 181 */ 182 ttbcr |= HTCR_RES1 | 183 HTCR_SH0_INNER_SHAREABLE | HTCR_RGN0_OUTER_WBA | 184 HTCR_RGN0_INNER_WBA; 185 } 186 187 /* 188 * Limit the input address ranges and memory region sizes translated 189 * using TTBR0 to the given virtual address space size, if smaller than 190 * 32 bits. 191 */ 192 if (max_va != UINT32_MAX) { 193 uintptr_t virtual_addr_space_size = max_va + 1U; 194 195 assert(CHECK_VIRT_ADDR_SPACE_SIZE(virtual_addr_space_size)); 196 /* 197 * __builtin_ctzll(0) is undefined but here we are guaranteed 198 * that virtual_addr_space_size is in the range [1, UINT32_MAX]. 199 */ 200 int t0sz = 32 - __builtin_ctzll(virtual_addr_space_size); 201 202 ttbcr |= (uint32_t) t0sz; 203 } 204 205 /* 206 * Set the cacheability and shareability attributes for memory 207 * associated with translation table walks using TTBR0. 208 */ 209 if ((flags & XLAT_TABLE_NC) != 0U) { 210 /* Inner & outer non-cacheable non-shareable. */ 211 ttbcr |= TTBCR_SH0_NON_SHAREABLE | TTBCR_RGN0_OUTER_NC | 212 TTBCR_RGN0_INNER_NC; 213 } else { 214 /* Inner & outer WBWA & shareable. */ 215 ttbcr |= TTBCR_SH0_INNER_SHAREABLE | TTBCR_RGN0_OUTER_WBA | 216 TTBCR_RGN0_INNER_WBA; 217 } 218 219 /* Set TTBR0 bits as well */ 220 ttbr0 = (uint64_t)(uintptr_t) base_table; 221 222 #if ARM_ARCH_AT_LEAST(8, 2) 223 /* 224 * Enable CnP bit so as to share page tables with all PEs. This 225 * is mandatory for ARMv8.2 implementations. 226 */ 227 ttbr0 |= TTBR_CNP_BIT; 228 #endif 229 230 /* Now populate MMU configuration */ 231 params[MMU_CFG_MAIR] = mair; 232 params[MMU_CFG_TCR] = (uint64_t) ttbcr; 233 params[MMU_CFG_TTBR0] = ttbr0; 234 } 235