1 /* 2 * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 9 #include <platform_def.h> 10 11 #include <common/debug.h> 12 #include <lib/xlat_tables/xlat_tables_defs.h> 13 #include <lib/xlat_tables/xlat_tables_v2.h> 14 15 #include "xlat_tables_private.h" 16 17 /* 18 * MMU configuration register values for the active translation context. Used 19 * from the MMU assembly helpers. 20 */ 21 uint64_t mmu_cfg_params[MMU_CFG_PARAM_MAX]; 22 23 /* 24 * Allocate and initialise the default translation context for the BL image 25 * currently executing. 26 */ 27 REGISTER_XLAT_CONTEXT(tf, MAX_MMAP_REGIONS, MAX_XLAT_TABLES, 28 PLAT_VIRT_ADDR_SPACE_SIZE, PLAT_PHY_ADDR_SPACE_SIZE); 29 30 void mmap_add_region(unsigned long long base_pa, uintptr_t base_va, size_t size, 31 unsigned int attr) 32 { 33 mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr); 34 35 mmap_add_region_ctx(&tf_xlat_ctx, &mm); 36 } 37 38 void mmap_add(const mmap_region_t *mm) 39 { 40 mmap_add_ctx(&tf_xlat_ctx, mm); 41 } 42 43 void mmap_add_region_alloc_va(unsigned long long base_pa, uintptr_t *base_va, 44 size_t size, unsigned int attr) 45 { 46 mmap_region_t mm = MAP_REGION_ALLOC_VA(base_pa, size, attr); 47 48 mmap_add_region_alloc_va_ctx(&tf_xlat_ctx, &mm); 49 50 *base_va = mm.base_va; 51 } 52 53 void mmap_add_alloc_va(mmap_region_t *mm) 54 { 55 while (mm->granularity != 0U) { 56 assert(mm->base_va == 0U); 57 mmap_add_region_alloc_va_ctx(&tf_xlat_ctx, mm); 58 mm++; 59 } 60 } 61 62 #if PLAT_XLAT_TABLES_DYNAMIC 63 64 int mmap_add_dynamic_region(unsigned long long base_pa, uintptr_t base_va, 65 size_t size, unsigned int attr) 66 { 67 mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr); 68 69 return mmap_add_dynamic_region_ctx(&tf_xlat_ctx, &mm); 70 } 71 72 int mmap_add_dynamic_region_alloc_va(unsigned long long base_pa, 73 uintptr_t *base_va, size_t size, 74 unsigned int attr) 75 { 76 mmap_region_t mm = MAP_REGION_ALLOC_VA(base_pa, size, attr); 77 78 int rc = mmap_add_dynamic_region_alloc_va_ctx(&tf_xlat_ctx, &mm); 79 80 *base_va = mm.base_va; 81 82 return rc; 83 } 84 85 86 int mmap_remove_dynamic_region(uintptr_t base_va, size_t size) 87 { 88 return mmap_remove_dynamic_region_ctx(&tf_xlat_ctx, 89 base_va, size); 90 } 91 92 #endif /* PLAT_XLAT_TABLES_DYNAMIC */ 93 94 void __init init_xlat_tables(void) 95 { 96 assert(tf_xlat_ctx.xlat_regime == EL_REGIME_INVALID); 97 98 unsigned int current_el = xlat_arch_current_el(); 99 100 if (current_el == 1U) { 101 tf_xlat_ctx.xlat_regime = EL1_EL0_REGIME; 102 } else if (current_el == 2U) { 103 tf_xlat_ctx.xlat_regime = EL2_REGIME; 104 } else { 105 assert(current_el == 3U); 106 tf_xlat_ctx.xlat_regime = EL3_REGIME; 107 } 108 109 init_xlat_tables_ctx(&tf_xlat_ctx); 110 } 111 112 int xlat_get_mem_attributes(uintptr_t base_va, uint32_t *attr) 113 { 114 return xlat_get_mem_attributes_ctx(&tf_xlat_ctx, base_va, attr); 115 } 116 117 int xlat_change_mem_attributes(uintptr_t base_va, size_t size, uint32_t attr) 118 { 119 return xlat_change_mem_attributes_ctx(&tf_xlat_ctx, base_va, size, attr); 120 } 121 122 /* 123 * If dynamic allocation of new regions is disabled then by the time we call the 124 * function enabling the MMU, we'll have registered all the memory regions to 125 * map for the system's lifetime. Therefore, at this point we know the maximum 126 * physical address that will ever be mapped. 127 * 128 * If dynamic allocation is enabled then we can't make any such assumption 129 * because the maximum physical address could get pushed while adding a new 130 * region. Therefore, in this case we have to assume that the whole address 131 * space size might be mapped. 132 */ 133 #ifdef PLAT_XLAT_TABLES_DYNAMIC 134 #define MAX_PHYS_ADDR tf_xlat_ctx.pa_max_address 135 #else 136 #define MAX_PHYS_ADDR tf_xlat_ctx.max_pa 137 #endif 138 139 #ifdef AARCH32 140 141 void enable_mmu_svc_mon(unsigned int flags) 142 { 143 setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags, 144 tf_xlat_ctx.base_table, MAX_PHYS_ADDR, 145 tf_xlat_ctx.va_max_address, EL1_EL0_REGIME); 146 enable_mmu_direct_svc_mon(flags); 147 } 148 149 void enable_mmu_hyp(unsigned int flags) 150 { 151 setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags, 152 tf_xlat_ctx.base_table, MAX_PHYS_ADDR, 153 tf_xlat_ctx.va_max_address, EL2_REGIME); 154 enable_mmu_direct_hyp(flags); 155 } 156 157 #else 158 159 void enable_mmu_el1(unsigned int flags) 160 { 161 setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags, 162 tf_xlat_ctx.base_table, MAX_PHYS_ADDR, 163 tf_xlat_ctx.va_max_address, EL1_EL0_REGIME); 164 enable_mmu_direct_el1(flags); 165 } 166 167 void enable_mmu_el2(unsigned int flags) 168 { 169 setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags, 170 tf_xlat_ctx.base_table, MAX_PHYS_ADDR, 171 tf_xlat_ctx.va_max_address, EL2_REGIME); 172 enable_mmu_direct_el2(flags); 173 } 174 175 void enable_mmu_el3(unsigned int flags) 176 { 177 setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags, 178 tf_xlat_ctx.base_table, MAX_PHYS_ADDR, 179 tf_xlat_ctx.va_max_address, EL3_REGIME); 180 enable_mmu_direct_el3(flags); 181 } 182 183 #endif /* AARCH32 */ 184