1 /* 2 * Copyright (c) 2014-2018, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <stdint.h> 9 10 #include <platform_def.h> 11 12 #include <arch.h> 13 #include <arch_helpers.h> 14 #include <common/bl_common.h> 15 #include <lib/utils.h> 16 #include <lib/xlat_tables/xlat_tables.h> 17 #include <lib/xlat_tables/xlat_tables_arch.h> 18 #include <plat/common/common_def.h> 19 20 #include "../xlat_tables_private.h" 21 22 #define XLAT_TABLE_LEVEL_BASE \ 23 GET_XLAT_TABLE_LEVEL_BASE(PLAT_VIRT_ADDR_SPACE_SIZE) 24 25 #define NUM_BASE_LEVEL_ENTRIES \ 26 GET_NUM_BASE_LEVEL_ENTRIES(PLAT_VIRT_ADDR_SPACE_SIZE) 27 28 static uint64_t base_xlation_table[NUM_BASE_LEVEL_ENTRIES] 29 __aligned(NUM_BASE_LEVEL_ENTRIES * sizeof(uint64_t)); 30 31 static unsigned long long tcr_ps_bits; 32 33 static unsigned long long calc_physical_addr_size_bits( 34 unsigned long long max_addr) 35 { 36 /* Physical address can't exceed 48 bits */ 37 assert((max_addr & ADDR_MASK_48_TO_63) == 0U); 38 39 /* 48 bits address */ 40 if ((max_addr & ADDR_MASK_44_TO_47) != 0U) 41 return TCR_PS_BITS_256TB; 42 43 /* 44 bits address */ 44 if ((max_addr & ADDR_MASK_42_TO_43) != 0U) 45 return TCR_PS_BITS_16TB; 46 47 /* 42 bits address */ 48 if ((max_addr & ADDR_MASK_40_TO_41) != 0U) 49 return TCR_PS_BITS_4TB; 50 51 /* 40 bits address */ 52 if ((max_addr & ADDR_MASK_36_TO_39) != 0U) 53 return TCR_PS_BITS_1TB; 54 55 /* 36 bits address */ 56 if ((max_addr & ADDR_MASK_32_TO_35) != 0U) 57 return TCR_PS_BITS_64GB; 58 59 return TCR_PS_BITS_4GB; 60 } 61 62 #if ENABLE_ASSERTIONS 63 /* 64 * Physical Address ranges supported in the AArch64 Memory Model. Value 0b110 is 65 * supported in ARMv8.2 onwards. 66 */ 67 static const unsigned int pa_range_bits_arr[] = { 68 PARANGE_0000, PARANGE_0001, PARANGE_0010, PARANGE_0011, PARANGE_0100, 69 PARANGE_0101, PARANGE_0110 70 }; 71 72 static unsigned long long get_max_supported_pa(void) 73 { 74 u_register_t pa_range = read_id_aa64mmfr0_el1() & 75 ID_AA64MMFR0_EL1_PARANGE_MASK; 76 77 /* All other values are reserved */ 78 assert(pa_range < ARRAY_SIZE(pa_range_bits_arr)); 79 80 return (1ULL << pa_range_bits_arr[pa_range]) - 1ULL; 81 } 82 #endif /* ENABLE_ASSERTIONS */ 83 84 unsigned int xlat_arch_current_el(void) 85 { 86 unsigned int el = (unsigned int)GET_EL(read_CurrentEl()); 87 88 assert(el > 0U); 89 90 return el; 91 } 92 93 uint64_t xlat_arch_get_xn_desc(unsigned int el) 94 { 95 if (el == 3U) { 96 return UPPER_ATTRS(XN); 97 } else { 98 assert(el == 1U); 99 return UPPER_ATTRS(PXN); 100 } 101 } 102 103 void init_xlat_tables(void) 104 { 105 unsigned long long max_pa; 106 uintptr_t max_va; 107 print_mmap(); 108 init_xlation_table(0U, base_xlation_table, XLAT_TABLE_LEVEL_BASE, 109 &max_va, &max_pa); 110 111 assert(max_va <= (PLAT_VIRT_ADDR_SPACE_SIZE - 1U)); 112 assert(max_pa <= (PLAT_PHY_ADDR_SPACE_SIZE - 1U)); 113 assert((PLAT_PHY_ADDR_SPACE_SIZE - 1U) <= get_max_supported_pa()); 114 115 tcr_ps_bits = calc_physical_addr_size_bits(max_pa); 116 } 117 118 /******************************************************************************* 119 * Macro generating the code for the function enabling the MMU in the given 120 * exception level, assuming that the pagetables have already been created. 121 * 122 * _el: Exception level at which the function will run 123 * _tcr_extra: Extra bits to set in the TCR register. This mask will 124 * be OR'ed with the default TCR value. 125 * _tlbi_fct: Function to invalidate the TLBs at the current 126 * exception level 127 ******************************************************************************/ 128 #define DEFINE_ENABLE_MMU_EL(_el, _tcr_extra, _tlbi_fct) \ 129 void enable_mmu_el##_el(unsigned int flags) \ 130 { \ 131 uint64_t mair, tcr, ttbr; \ 132 uint32_t sctlr; \ 133 \ 134 assert(IS_IN_EL(_el)); \ 135 assert((read_sctlr_el##_el() & SCTLR_M_BIT) == 0U); \ 136 \ 137 /* Set attributes in the right indices of the MAIR */ \ 138 mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX); \ 139 mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, \ 140 ATTR_IWBWA_OWBWA_NTR_INDEX); \ 141 mair |= MAIR_ATTR_SET(ATTR_NON_CACHEABLE, \ 142 ATTR_NON_CACHEABLE_INDEX); \ 143 write_mair_el##_el(mair); \ 144 \ 145 /* Invalidate TLBs at the current exception level */ \ 146 _tlbi_fct(); \ 147 \ 148 /* Set TCR bits as well. */ \ 149 /* Set T0SZ to (64 - width of virtual address space) */ \ 150 int t0sz = 64 - __builtin_ctzll(PLAT_VIRT_ADDR_SPACE_SIZE);\ 151 \ 152 if ((flags & XLAT_TABLE_NC) != 0U) { \ 153 /* Inner & outer non-cacheable non-shareable. */\ 154 tcr = TCR_SH_NON_SHAREABLE | \ 155 TCR_RGN_OUTER_NC | TCR_RGN_INNER_NC | \ 156 (uint64_t) t0sz; \ 157 } else { \ 158 /* Inner & outer WBWA & shareable. */ \ 159 tcr = TCR_SH_INNER_SHAREABLE | \ 160 TCR_RGN_OUTER_WBA | TCR_RGN_INNER_WBA | \ 161 (uint64_t) t0sz; \ 162 } \ 163 tcr |= _tcr_extra; \ 164 write_tcr_el##_el(tcr); \ 165 \ 166 /* Set TTBR bits as well */ \ 167 ttbr = (uint64_t) base_xlation_table; \ 168 write_ttbr0_el##_el(ttbr); \ 169 \ 170 /* Ensure all translation table writes have drained */ \ 171 /* into memory, the TLB invalidation is complete, */ \ 172 /* and translation register writes are committed */ \ 173 /* before enabling the MMU */ \ 174 dsbish(); \ 175 isb(); \ 176 \ 177 sctlr = read_sctlr_el##_el(); \ 178 sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT; \ 179 \ 180 if ((flags & DISABLE_DCACHE) != 0U) \ 181 sctlr &= ~SCTLR_C_BIT; \ 182 else \ 183 sctlr |= SCTLR_C_BIT; \ 184 \ 185 write_sctlr_el##_el(sctlr); \ 186 \ 187 /* Ensure the MMU enable takes effect immediately */ \ 188 isb(); \ 189 } \ 190 \ 191 void enable_mmu_direct_el##_el(unsigned int flags) \ 192 { \ 193 enable_mmu_el##_el(flags); \ 194 } 195 196 /* Define EL1 and EL3 variants of the function enabling the MMU */ 197 DEFINE_ENABLE_MMU_EL(1, 198 /* 199 * TCR_EL1.EPD1: Disable translation table walk for addresses 200 * that are translated using TTBR1_EL1. 201 */ 202 TCR_EPD1_BIT | (tcr_ps_bits << TCR_EL1_IPS_SHIFT), 203 tlbivmalle1) 204 DEFINE_ENABLE_MMU_EL(3, 205 TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT), 206 tlbialle3) 207