1 /* 2 * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <arch_helpers.h> 8 #include <assert.h> 9 10 #include <platform_def.h> 11 12 #include <common/debug.h> 13 #include <lib/xlat_tables/xlat_tables_defs.h> 14 #include <lib/xlat_tables/xlat_tables_v2.h> 15 16 #include "xlat_tables_private.h" 17 18 /* 19 * MMU configuration register values for the active translation context. Used 20 * from the MMU assembly helpers. 21 */ 22 uint64_t mmu_cfg_params[MMU_CFG_PARAM_MAX]; 23 24 /* 25 * Allocate and initialise the default translation context for the BL image 26 * currently executing. 27 */ 28 #if PLAT_RO_XLAT_TABLES 29 REGISTER_XLAT_CONTEXT_RO_BASE_TABLE(tf, MAX_MMAP_REGIONS, MAX_XLAT_TABLES, 30 PLAT_VIRT_ADDR_SPACE_SIZE, PLAT_PHY_ADDR_SPACE_SIZE, 31 EL_REGIME_INVALID, "xlat_table"); 32 #else 33 REGISTER_XLAT_CONTEXT(tf, MAX_MMAP_REGIONS, MAX_XLAT_TABLES, 34 PLAT_VIRT_ADDR_SPACE_SIZE, PLAT_PHY_ADDR_SPACE_SIZE); 35 #endif 36 37 void mmap_add_region(unsigned long long base_pa, uintptr_t base_va, size_t size, 38 unsigned int attr) 39 { 40 mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr); 41 42 mmap_add_region_ctx(&tf_xlat_ctx, &mm); 43 } 44 45 void mmap_add(const mmap_region_t *mm) 46 { 47 mmap_add_ctx(&tf_xlat_ctx, mm); 48 } 49 50 void mmap_add_region_alloc_va(unsigned long long base_pa, uintptr_t *base_va, 51 size_t size, unsigned int attr) 52 { 53 mmap_region_t mm = MAP_REGION_ALLOC_VA(base_pa, size, attr); 54 55 mmap_add_region_alloc_va_ctx(&tf_xlat_ctx, &mm); 56 57 *base_va = mm.base_va; 58 } 59 60 void mmap_add_alloc_va(mmap_region_t *mm) 61 { 62 while (mm->granularity != 0U) { 63 assert(mm->base_va == 0U); 64 mmap_add_region_alloc_va_ctx(&tf_xlat_ctx, mm); 65 mm++; 66 } 67 } 68 69 #if PLAT_XLAT_TABLES_DYNAMIC 70 71 int mmap_add_dynamic_region(unsigned long long base_pa, uintptr_t base_va, 72 size_t size, unsigned int attr) 73 { 74 mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr); 75 76 return mmap_add_dynamic_region_ctx(&tf_xlat_ctx, &mm); 77 } 78 79 int mmap_add_dynamic_region_alloc_va(unsigned long long base_pa, 80 uintptr_t *base_va, size_t size, 81 unsigned int attr) 82 { 83 mmap_region_t mm = MAP_REGION_ALLOC_VA(base_pa, size, attr); 84 85 int rc = mmap_add_dynamic_region_alloc_va_ctx(&tf_xlat_ctx, &mm); 86 87 *base_va = mm.base_va; 88 89 return rc; 90 } 91 92 93 int mmap_remove_dynamic_region(uintptr_t base_va, size_t size) 94 { 95 return mmap_remove_dynamic_region_ctx(&tf_xlat_ctx, 96 base_va, size); 97 } 98 99 #endif /* PLAT_XLAT_TABLES_DYNAMIC */ 100 101 void __init init_xlat_tables(void) 102 { 103 assert(tf_xlat_ctx.xlat_regime == EL_REGIME_INVALID); 104 105 unsigned int current_el = xlat_arch_current_el(); 106 107 if (current_el == 1U) { 108 tf_xlat_ctx.xlat_regime = EL1_EL0_REGIME; 109 } else if (current_el == 2U) { 110 tf_xlat_ctx.xlat_regime = EL2_REGIME; 111 } else { 112 assert(current_el == 3U); 113 tf_xlat_ctx.xlat_regime = EL3_REGIME; 114 } 115 116 init_xlat_tables_ctx(&tf_xlat_ctx); 117 } 118 119 int xlat_get_mem_attributes(uintptr_t base_va, uint32_t *attr) 120 { 121 return xlat_get_mem_attributes_ctx(&tf_xlat_ctx, base_va, attr); 122 } 123 124 int xlat_change_mem_attributes(uintptr_t base_va, size_t size, uint32_t attr) 125 { 126 return xlat_change_mem_attributes_ctx(&tf_xlat_ctx, base_va, size, attr); 127 } 128 129 #if PLAT_RO_XLAT_TABLES 130 /* Change the memory attributes of the descriptors which resolve the address 131 * range that belongs to the translation tables themselves, which are by default 132 * mapped as part of read-write data in the BL image's memory. 133 * 134 * Since the translation tables map themselves via these level 3 (page) 135 * descriptors, any change applied to them with the MMU on would introduce a 136 * chicken and egg problem because of the break-before-make sequence. 137 * Eventually, it would reach the descriptor that resolves the very table it 138 * belongs to and the invalidation (break step) would cause the subsequent write 139 * (make step) to it to generate an MMU fault. Therefore, the MMU is disabled 140 * before making the change. 141 * 142 * No assumption is made about what data this function needs, therefore all the 143 * caches are flushed in order to ensure coherency. A future optimization would 144 * be to only flush the required data to main memory. 145 */ 146 int xlat_make_tables_readonly(void) 147 { 148 assert(tf_xlat_ctx.initialized == true); 149 #ifdef __aarch64__ 150 if (tf_xlat_ctx.xlat_regime == EL1_EL0_REGIME) { 151 disable_mmu_el1(); 152 } else if (tf_xlat_ctx.xlat_regime == EL3_REGIME) { 153 disable_mmu_el3(); 154 } else { 155 assert(tf_xlat_ctx.xlat_regime == EL2_REGIME); 156 return -1; 157 } 158 159 /* Flush all caches. */ 160 dcsw_op_all(DCCISW); 161 #else /* !__aarch64__ */ 162 assert(tf_xlat_ctx.xlat_regime == EL1_EL0_REGIME); 163 /* On AArch32, we flush the caches before disabling the MMU. The reason 164 * for this is that the dcsw_op_all AArch32 function pushes some 165 * registers onto the stack under the assumption that it is writing to 166 * cache, which is not true with the MMU off. This would result in the 167 * stack becoming corrupted and a wrong/junk value for the LR being 168 * restored at the end of the routine. 169 */ 170 dcsw_op_all(DC_OP_CISW); 171 disable_mmu_secure(); 172 #endif 173 174 int rc = xlat_change_mem_attributes_ctx(&tf_xlat_ctx, 175 (uintptr_t)tf_xlat_ctx.tables, 176 tf_xlat_ctx.tables_num * XLAT_TABLE_SIZE, 177 MT_RO_DATA | MT_SECURE); 178 179 #ifdef __aarch64__ 180 if (tf_xlat_ctx.xlat_regime == EL1_EL0_REGIME) { 181 enable_mmu_el1(0U); 182 } else { 183 assert(tf_xlat_ctx.xlat_regime == EL3_REGIME); 184 enable_mmu_el3(0U); 185 } 186 #else /* !__aarch64__ */ 187 enable_mmu_svc_mon(0U); 188 #endif 189 190 if (rc == 0) { 191 tf_xlat_ctx.readonly_tables = true; 192 } 193 194 return rc; 195 } 196 #endif /* PLAT_RO_XLAT_TABLES */ 197 198 /* 199 * If dynamic allocation of new regions is disabled then by the time we call the 200 * function enabling the MMU, we'll have registered all the memory regions to 201 * map for the system's lifetime. Therefore, at this point we know the maximum 202 * physical address that will ever be mapped. 203 * 204 * If dynamic allocation is enabled then we can't make any such assumption 205 * because the maximum physical address could get pushed while adding a new 206 * region. Therefore, in this case we have to assume that the whole address 207 * space size might be mapped. 208 */ 209 #ifdef PLAT_XLAT_TABLES_DYNAMIC 210 #define MAX_PHYS_ADDR tf_xlat_ctx.pa_max_address 211 #else 212 #define MAX_PHYS_ADDR tf_xlat_ctx.max_pa 213 #endif 214 215 #ifdef __aarch64__ 216 217 void enable_mmu_el1(unsigned int flags) 218 { 219 setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags, 220 tf_xlat_ctx.base_table, MAX_PHYS_ADDR, 221 tf_xlat_ctx.va_max_address, EL1_EL0_REGIME); 222 enable_mmu_direct_el1(flags); 223 } 224 225 void enable_mmu_el2(unsigned int flags) 226 { 227 setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags, 228 tf_xlat_ctx.base_table, MAX_PHYS_ADDR, 229 tf_xlat_ctx.va_max_address, EL2_REGIME); 230 enable_mmu_direct_el2(flags); 231 } 232 233 void enable_mmu_el3(unsigned int flags) 234 { 235 setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags, 236 tf_xlat_ctx.base_table, MAX_PHYS_ADDR, 237 tf_xlat_ctx.va_max_address, EL3_REGIME); 238 enable_mmu_direct_el3(flags); 239 } 240 241 #else /* !__aarch64__ */ 242 243 void enable_mmu_svc_mon(unsigned int flags) 244 { 245 setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags, 246 tf_xlat_ctx.base_table, MAX_PHYS_ADDR, 247 tf_xlat_ctx.va_max_address, EL1_EL0_REGIME); 248 enable_mmu_direct_svc_mon(flags); 249 } 250 251 void enable_mmu_hyp(unsigned int flags) 252 { 253 setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags, 254 tf_xlat_ctx.base_table, MAX_PHYS_ADDR, 255 tf_xlat_ctx.va_max_address, EL2_REGIME); 256 enable_mmu_direct_hyp(flags); 257 } 258 259 #endif /* __aarch64__ */ 260