1 /* 2 * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * Redistributions of source code must retain the above copyright notice, this 8 * list of conditions and the following disclaimer. 9 * 10 * Redistributions in binary form must reproduce the above copyright notice, 11 * this list of conditions and the following disclaimer in the documentation 12 * and/or other materials provided with the distribution. 13 * 14 * Neither the name of ARM nor the names of its contributors may be used 15 * to endorse or promote products derived from this software without specific 16 * prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include <arch.h> 32 #include <arch_helpers.h> 33 #include <assert.h> 34 #include <bl_common.h> 35 #include <cassert.h> 36 #include <common_def.h> 37 #include <platform_def.h> 38 #include <sys/types.h> 39 #include <utils.h> 40 #include <xlat_tables_v2.h> 41 #include "../xlat_tables_private.h" 42 43 #if defined(IMAGE_BL1) || defined(IMAGE_BL31) 44 # define IMAGE_EL 3 45 #else 46 # define IMAGE_EL 1 47 #endif 48 49 static unsigned long long tcr_ps_bits; 50 51 static unsigned long long calc_physical_addr_size_bits( 52 unsigned long long max_addr) 53 { 54 /* Physical address can't exceed 48 bits */ 55 assert((max_addr & ADDR_MASK_48_TO_63) == 0); 56 57 /* 48 bits address */ 58 if (max_addr & ADDR_MASK_44_TO_47) 59 return TCR_PS_BITS_256TB; 60 61 /* 44 bits address */ 62 if (max_addr & ADDR_MASK_42_TO_43) 63 return TCR_PS_BITS_16TB; 64 65 /* 42 bits address */ 66 if (max_addr & ADDR_MASK_40_TO_41) 67 return TCR_PS_BITS_4TB; 68 69 /* 40 bits address */ 70 if (max_addr & ADDR_MASK_36_TO_39) 71 return TCR_PS_BITS_1TB; 72 73 /* 36 bits address */ 74 if (max_addr & ADDR_MASK_32_TO_35) 75 return TCR_PS_BITS_64GB; 76 77 return TCR_PS_BITS_4GB; 78 } 79 80 #if ENABLE_ASSERTIONS 81 /* Physical Address ranges supported in the AArch64 Memory Model */ 82 static const unsigned int pa_range_bits_arr[] = { 83 PARANGE_0000, PARANGE_0001, PARANGE_0010, PARANGE_0011, PARANGE_0100, 84 PARANGE_0101 85 }; 86 87 unsigned long long xlat_arch_get_max_supported_pa(void) 88 { 89 u_register_t pa_range = read_id_aa64mmfr0_el1() & 90 ID_AA64MMFR0_EL1_PARANGE_MASK; 91 92 /* All other values are reserved */ 93 assert(pa_range < ARRAY_SIZE(pa_range_bits_arr)); 94 95 return (1ull << pa_range_bits_arr[pa_range]) - 1ull; 96 } 97 #endif /* ENABLE_ASSERTIONS*/ 98 99 int is_mmu_enabled(void) 100 { 101 #if IMAGE_EL == 1 102 assert(IS_IN_EL(1)); 103 return (read_sctlr_el1() & SCTLR_M_BIT) != 0; 104 #elif IMAGE_EL == 3 105 assert(IS_IN_EL(3)); 106 return (read_sctlr_el3() & SCTLR_M_BIT) != 0; 107 #endif 108 } 109 110 #if PLAT_XLAT_TABLES_DYNAMIC 111 112 void xlat_arch_tlbi_va(uintptr_t va) 113 { 114 /* 115 * Ensure the translation table write has drained into memory before 116 * invalidating the TLB entry. 117 */ 118 dsbishst(); 119 120 #if IMAGE_EL == 1 121 assert(IS_IN_EL(1)); 122 tlbivaae1is(TLBI_ADDR(va)); 123 #elif IMAGE_EL == 3 124 assert(IS_IN_EL(3)); 125 tlbivae3is(TLBI_ADDR(va)); 126 #endif 127 } 128 129 void xlat_arch_tlbi_va_sync(void) 130 { 131 /* 132 * A TLB maintenance instruction can complete at any time after 133 * it is issued, but is only guaranteed to be complete after the 134 * execution of DSB by the PE that executed the TLB maintenance 135 * instruction. After the TLB invalidate instruction is 136 * complete, no new memory accesses using the invalidated TLB 137 * entries will be observed by any observer of the system 138 * domain. See section D4.8.2 of the ARMv8 (issue k), paragraph 139 * "Ordering and completion of TLB maintenance instructions". 140 */ 141 dsbish(); 142 143 /* 144 * The effects of a completed TLB maintenance instruction are 145 * only guaranteed to be visible on the PE that executed the 146 * instruction after the execution of an ISB instruction by the 147 * PE that executed the TLB maintenance instruction. 148 */ 149 isb(); 150 } 151 152 #endif /* PLAT_XLAT_TABLES_DYNAMIC */ 153 154 int xlat_arch_current_el(void) 155 { 156 int el = GET_EL(read_CurrentEl()); 157 158 assert(el > 0); 159 160 return el; 161 } 162 163 uint64_t xlat_arch_get_xn_desc(int el) 164 { 165 if (el == 3) { 166 return UPPER_ATTRS(XN); 167 } else { 168 assert(el == 1); 169 return UPPER_ATTRS(PXN); 170 } 171 } 172 173 void init_xlat_tables_arch(unsigned long long max_pa) 174 { 175 assert((PLAT_PHY_ADDR_SPACE_SIZE - 1) <= 176 xlat_arch_get_max_supported_pa()); 177 178 /* 179 * If dynamic allocation of new regions is enabled the code can't make 180 * assumptions about the max physical address because it could change 181 * after adding new regions. If this functionality is disabled it is 182 * safer to restrict the max physical address as much as possible. 183 */ 184 #ifdef PLAT_XLAT_TABLES_DYNAMIC 185 tcr_ps_bits = calc_physical_addr_size_bits(PLAT_PHY_ADDR_SPACE_SIZE); 186 #else 187 tcr_ps_bits = calc_physical_addr_size_bits(max_pa); 188 #endif 189 } 190 191 /******************************************************************************* 192 * Macro generating the code for the function enabling the MMU in the given 193 * exception level, assuming that the pagetables have already been created. 194 * 195 * _el: Exception level at which the function will run 196 * _tcr_extra: Extra bits to set in the TCR register. This mask will 197 * be OR'ed with the default TCR value. 198 * _tlbi_fct: Function to invalidate the TLBs at the current 199 * exception level 200 ******************************************************************************/ 201 #define DEFINE_ENABLE_MMU_EL(_el, _tcr_extra, _tlbi_fct) \ 202 void enable_mmu_internal_el##_el(unsigned int flags, \ 203 uint64_t *base_table) \ 204 { \ 205 uint64_t mair, tcr, ttbr; \ 206 uint32_t sctlr; \ 207 \ 208 assert(IS_IN_EL(_el)); \ 209 assert((read_sctlr_el##_el() & SCTLR_M_BIT) == 0); \ 210 \ 211 /* Invalidate TLBs at the current exception level */ \ 212 _tlbi_fct(); \ 213 \ 214 /* Set attributes in the right indices of the MAIR */ \ 215 mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX); \ 216 mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, \ 217 ATTR_IWBWA_OWBWA_NTR_INDEX); \ 218 mair |= MAIR_ATTR_SET(ATTR_NON_CACHEABLE, \ 219 ATTR_NON_CACHEABLE_INDEX); \ 220 write_mair_el##_el(mair); \ 221 \ 222 /* Set TCR bits as well. */ \ 223 /* Set T0SZ to (64 - width of virtual address space) */ \ 224 if (flags & XLAT_TABLE_NC) { \ 225 /* Inner & outer non-cacheable non-shareable. */\ 226 tcr = TCR_SH_NON_SHAREABLE | \ 227 TCR_RGN_OUTER_NC | TCR_RGN_INNER_NC | \ 228 (64 - __builtin_ctzl(PLAT_VIRT_ADDR_SPACE_SIZE));\ 229 } else { \ 230 /* Inner & outer WBWA & shareable. */ \ 231 tcr = TCR_SH_INNER_SHAREABLE | \ 232 TCR_RGN_OUTER_WBA | TCR_RGN_INNER_WBA | \ 233 (64 - __builtin_ctzl(PLAT_VIRT_ADDR_SPACE_SIZE));\ 234 } \ 235 tcr |= _tcr_extra; \ 236 write_tcr_el##_el(tcr); \ 237 \ 238 /* Set TTBR bits as well */ \ 239 ttbr = (uint64_t) base_table; \ 240 write_ttbr0_el##_el(ttbr); \ 241 \ 242 /* Ensure all translation table writes have drained */ \ 243 /* into memory, the TLB invalidation is complete, */ \ 244 /* and translation register writes are committed */ \ 245 /* before enabling the MMU */ \ 246 dsbish(); \ 247 isb(); \ 248 \ 249 sctlr = read_sctlr_el##_el(); \ 250 sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT; \ 251 \ 252 if (flags & DISABLE_DCACHE) \ 253 sctlr &= ~SCTLR_C_BIT; \ 254 else \ 255 sctlr |= SCTLR_C_BIT; \ 256 \ 257 write_sctlr_el##_el(sctlr); \ 258 \ 259 /* Ensure the MMU enable takes effect immediately */ \ 260 isb(); \ 261 } 262 263 /* Define EL1 and EL3 variants of the function enabling the MMU */ 264 #if IMAGE_EL == 1 265 DEFINE_ENABLE_MMU_EL(1, 266 (tcr_ps_bits << TCR_EL1_IPS_SHIFT), 267 tlbivmalle1) 268 #elif IMAGE_EL == 3 269 DEFINE_ENABLE_MMU_EL(3, 270 TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT), 271 tlbialle3) 272 #endif 273 274 void enable_mmu_arch(unsigned int flags, uint64_t *base_table) 275 { 276 #if IMAGE_EL == 1 277 assert(IS_IN_EL(1)); 278 enable_mmu_internal_el1(flags, base_table); 279 #elif IMAGE_EL == 3 280 assert(IS_IN_EL(3)); 281 enable_mmu_internal_el3(flags, base_table); 282 #endif 283 } 284