1 /* 2 * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <stdbool.h> 9 #include <stdint.h> 10 11 #include <arch.h> 12 #include <arch_helpers.h> 13 #include <lib/cassert.h> 14 #include <lib/utils_def.h> 15 #include <lib/xlat_tables/xlat_tables_v2.h> 16 17 #include "../xlat_tables_private.h" 18 19 /* 20 * Returns true if the provided granule size is supported, false otherwise. 21 */ 22 bool xlat_arch_is_granule_size_supported(size_t size) 23 { 24 u_register_t id_aa64mmfr0_el1 = read_id_aa64mmfr0_el1(); 25 26 if (size == PAGE_SIZE_4KB) { 27 return ((id_aa64mmfr0_el1 >> ID_AA64MMFR0_EL1_TGRAN4_SHIFT) & 28 ID_AA64MMFR0_EL1_TGRAN4_MASK) == 29 ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED; 30 } else if (size == PAGE_SIZE_16KB) { 31 return ((id_aa64mmfr0_el1 >> ID_AA64MMFR0_EL1_TGRAN16_SHIFT) & 32 ID_AA64MMFR0_EL1_TGRAN16_MASK) == 33 ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED; 34 } else if (size == PAGE_SIZE_64KB) { 35 return ((id_aa64mmfr0_el1 >> ID_AA64MMFR0_EL1_TGRAN64_SHIFT) & 36 ID_AA64MMFR0_EL1_TGRAN64_MASK) == 37 ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED; 38 } else { 39 return 0; 40 } 41 } 42 43 size_t xlat_arch_get_max_supported_granule_size(void) 44 { 45 if (xlat_arch_is_granule_size_supported(PAGE_SIZE_64KB)) { 46 return PAGE_SIZE_64KB; 47 } else if (xlat_arch_is_granule_size_supported(PAGE_SIZE_16KB)) { 48 return PAGE_SIZE_16KB; 49 } else { 50 assert(xlat_arch_is_granule_size_supported(PAGE_SIZE_4KB)); 51 return PAGE_SIZE_4KB; 52 } 53 } 54 55 unsigned long long tcr_physical_addr_size_bits(unsigned long long max_addr) 56 { 57 /* Physical address can't exceed 48 bits */ 58 assert((max_addr & ADDR_MASK_48_TO_63) == 0U); 59 60 /* 48 bits address */ 61 if ((max_addr & ADDR_MASK_44_TO_47) != 0U) 62 return TCR_PS_BITS_256TB; 63 64 /* 44 bits address */ 65 if ((max_addr & ADDR_MASK_42_TO_43) != 0U) 66 return TCR_PS_BITS_16TB; 67 68 /* 42 bits address */ 69 if ((max_addr & ADDR_MASK_40_TO_41) != 0U) 70 return TCR_PS_BITS_4TB; 71 72 /* 40 bits address */ 73 if ((max_addr & ADDR_MASK_36_TO_39) != 0U) 74 return TCR_PS_BITS_1TB; 75 76 /* 36 bits address */ 77 if ((max_addr & ADDR_MASK_32_TO_35) != 0U) 78 return TCR_PS_BITS_64GB; 79 80 return TCR_PS_BITS_4GB; 81 } 82 83 #if ENABLE_ASSERTIONS 84 /* 85 * Physical Address ranges supported in the AArch64 Memory Model. Value 0b110 is 86 * supported in ARMv8.2 onwards. 87 */ 88 static const unsigned int pa_range_bits_arr[] = { 89 PARANGE_0000, PARANGE_0001, PARANGE_0010, PARANGE_0011, PARANGE_0100, 90 PARANGE_0101, PARANGE_0110 91 }; 92 93 unsigned long long xlat_arch_get_max_supported_pa(void) 94 { 95 u_register_t pa_range = read_id_aa64mmfr0_el1() & 96 ID_AA64MMFR0_EL1_PARANGE_MASK; 97 98 /* All other values are reserved */ 99 assert(pa_range < ARRAY_SIZE(pa_range_bits_arr)); 100 101 return (1ULL << pa_range_bits_arr[pa_range]) - 1ULL; 102 } 103 #endif /* ENABLE_ASSERTIONS*/ 104 105 bool is_mmu_enabled_ctx(const xlat_ctx_t *ctx) 106 { 107 if (ctx->xlat_regime == EL1_EL0_REGIME) { 108 assert(xlat_arch_current_el() >= 1U); 109 return (read_sctlr_el1() & SCTLR_M_BIT) != 0U; 110 } else if (ctx->xlat_regime == EL2_REGIME) { 111 assert(xlat_arch_current_el() >= 2U); 112 return (read_sctlr_el2() & SCTLR_M_BIT) != 0U; 113 } else { 114 assert(ctx->xlat_regime == EL3_REGIME); 115 assert(xlat_arch_current_el() >= 3U); 116 return (read_sctlr_el3() & SCTLR_M_BIT) != 0U; 117 } 118 } 119 120 bool is_dcache_enabled(void) 121 { 122 unsigned int el = (unsigned int)GET_EL(read_CurrentEl()); 123 124 if (el == 1U) { 125 return (read_sctlr_el1() & SCTLR_C_BIT) != 0U; 126 } else if (el == 2U) { 127 return (read_sctlr_el2() & SCTLR_C_BIT) != 0U; 128 } else { 129 return (read_sctlr_el3() & SCTLR_C_BIT) != 0U; 130 } 131 } 132 133 uint64_t xlat_arch_regime_get_xn_desc(int xlat_regime) 134 { 135 if (xlat_regime == EL1_EL0_REGIME) { 136 return UPPER_ATTRS(UXN) | UPPER_ATTRS(PXN); 137 } else { 138 assert((xlat_regime == EL2_REGIME) || 139 (xlat_regime == EL3_REGIME)); 140 return UPPER_ATTRS(XN); 141 } 142 } 143 144 void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime) 145 { 146 /* 147 * Ensure the translation table write has drained into memory before 148 * invalidating the TLB entry. 149 */ 150 dsbishst(); 151 152 /* 153 * This function only supports invalidation of TLB entries for the EL3 154 * and EL1&0 translation regimes. 155 * 156 * Also, it is architecturally UNDEFINED to invalidate TLBs of a higher 157 * exception level (see section D4.9.2 of the ARM ARM rev B.a). 158 */ 159 if (xlat_regime == EL1_EL0_REGIME) { 160 assert(xlat_arch_current_el() >= 1U); 161 tlbivaae1is(TLBI_ADDR(va)); 162 } else if (xlat_regime == EL2_REGIME) { 163 assert(xlat_arch_current_el() >= 2U); 164 tlbivae2is(TLBI_ADDR(va)); 165 } else { 166 assert(xlat_regime == EL3_REGIME); 167 assert(xlat_arch_current_el() >= 3U); 168 tlbivae3is(TLBI_ADDR(va)); 169 } 170 } 171 172 void xlat_arch_tlbi_va_sync(void) 173 { 174 /* 175 * A TLB maintenance instruction can complete at any time after 176 * it is issued, but is only guaranteed to be complete after the 177 * execution of DSB by the PE that executed the TLB maintenance 178 * instruction. After the TLB invalidate instruction is 179 * complete, no new memory accesses using the invalidated TLB 180 * entries will be observed by any observer of the system 181 * domain. See section D4.8.2 of the ARMv8 (issue k), paragraph 182 * "Ordering and completion of TLB maintenance instructions". 183 */ 184 dsbish(); 185 186 /* 187 * The effects of a completed TLB maintenance instruction are 188 * only guaranteed to be visible on the PE that executed the 189 * instruction after the execution of an ISB instruction by the 190 * PE that executed the TLB maintenance instruction. 191 */ 192 isb(); 193 } 194 195 unsigned int xlat_arch_current_el(void) 196 { 197 unsigned int el = (unsigned int)GET_EL(read_CurrentEl()); 198 199 assert(el > 0U); 200 201 return el; 202 } 203 204 void setup_mmu_cfg(uint64_t *params, unsigned int flags, 205 const uint64_t *base_table, unsigned long long max_pa, 206 uintptr_t max_va, int xlat_regime) 207 { 208 uint64_t mair, ttbr0, tcr; 209 uintptr_t virtual_addr_space_size; 210 211 /* Set attributes in the right indices of the MAIR. */ 212 mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX); 213 mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, ATTR_IWBWA_OWBWA_NTR_INDEX); 214 mair |= MAIR_ATTR_SET(ATTR_NON_CACHEABLE, ATTR_NON_CACHEABLE_INDEX); 215 216 /* 217 * Limit the input address ranges and memory region sizes translated 218 * using TTBR0 to the given virtual address space size. 219 */ 220 assert(max_va < ((uint64_t)UINTPTR_MAX)); 221 222 virtual_addr_space_size = (uintptr_t)max_va + 1U; 223 assert(CHECK_VIRT_ADDR_SPACE_SIZE(virtual_addr_space_size)); 224 225 /* 226 * __builtin_ctzll(0) is undefined but here we are guaranteed that 227 * virtual_addr_space_size is in the range [1,UINTPTR_MAX]. 228 */ 229 int t0sz = 64 - __builtin_ctzll(virtual_addr_space_size); 230 231 tcr = (uint64_t) t0sz; 232 233 /* 234 * Set the cacheability and shareability attributes for memory 235 * associated with translation table walks. 236 */ 237 if ((flags & XLAT_TABLE_NC) != 0U) { 238 /* Inner & outer non-cacheable non-shareable. */ 239 tcr |= TCR_SH_NON_SHAREABLE | 240 TCR_RGN_OUTER_NC | TCR_RGN_INNER_NC; 241 } else { 242 /* Inner & outer WBWA & shareable. */ 243 tcr |= TCR_SH_INNER_SHAREABLE | 244 TCR_RGN_OUTER_WBA | TCR_RGN_INNER_WBA; 245 } 246 247 /* 248 * It is safer to restrict the max physical address accessible by the 249 * hardware as much as possible. 250 */ 251 unsigned long long tcr_ps_bits = tcr_physical_addr_size_bits(max_pa); 252 253 if (xlat_regime == EL1_EL0_REGIME) { 254 /* 255 * TCR_EL1.EPD1: Disable translation table walk for addresses 256 * that are translated using TTBR1_EL1. 257 */ 258 tcr |= TCR_EPD1_BIT | (tcr_ps_bits << TCR_EL1_IPS_SHIFT); 259 } else if (xlat_regime == EL2_REGIME) { 260 tcr |= TCR_EL2_RES1 | (tcr_ps_bits << TCR_EL2_PS_SHIFT); 261 } else { 262 assert(xlat_regime == EL3_REGIME); 263 tcr |= TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT); 264 } 265 266 /* Set TTBR bits as well */ 267 ttbr0 = (uint64_t) base_table; 268 269 #if ARM_ARCH_AT_LEAST(8, 2) 270 /* 271 * Enable CnP bit so as to share page tables with all PEs. This 272 * is mandatory for ARMv8.2 implementations. 273 */ 274 ttbr0 |= TTBR_CNP_BIT; 275 #endif 276 277 params[MMU_CFG_MAIR] = mair; 278 params[MMU_CFG_TCR] = tcr; 279 params[MMU_CFG_TTBR0] = ttbr0; 280 } 281