1 /* 2 * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <debug.h> 9 #include <platform_def.h> 10 #include <xlat_tables_defs.h> 11 #include <xlat_tables_v2.h> 12 13 #include "xlat_tables_private.h" 14 15 /* 16 * MMU configuration register values for the active translation context. Used 17 * from the MMU assembly helpers. 18 */ 19 uint64_t mmu_cfg_params[MMU_CFG_PARAM_MAX]; 20 21 /* 22 * Each platform can define the size of its physical and virtual address spaces. 23 * If the platform hasn't defined one or both of them, default to 24 * ADDR_SPACE_SIZE. The latter is deprecated, though. 25 */ 26 #if ERROR_DEPRECATED 27 # ifdef ADDR_SPACE_SIZE 28 # error "ADDR_SPACE_SIZE is deprecated. Use PLAT_xxx_ADDR_SPACE_SIZE instead." 29 # endif 30 #elif defined(ADDR_SPACE_SIZE) 31 # ifndef PLAT_PHY_ADDR_SPACE_SIZE 32 # define PLAT_PHY_ADDR_SPACE_SIZE ADDR_SPACE_SIZE 33 # endif 34 # ifndef PLAT_VIRT_ADDR_SPACE_SIZE 35 # define PLAT_VIRT_ADDR_SPACE_SIZE ADDR_SPACE_SIZE 36 # endif 37 #endif 38 39 /* 40 * Allocate and initialise the default translation context for the BL image 41 * currently executing. 42 */ 43 REGISTER_XLAT_CONTEXT(tf, MAX_MMAP_REGIONS, MAX_XLAT_TABLES, 44 PLAT_VIRT_ADDR_SPACE_SIZE, PLAT_PHY_ADDR_SPACE_SIZE); 45 46 void mmap_add_region(unsigned long long base_pa, uintptr_t base_va, size_t size, 47 unsigned int attr) 48 { 49 mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr); 50 51 mmap_add_region_ctx(&tf_xlat_ctx, &mm); 52 } 53 54 void mmap_add(const mmap_region_t *mm) 55 { 56 mmap_add_ctx(&tf_xlat_ctx, mm); 57 } 58 59 #if PLAT_XLAT_TABLES_DYNAMIC 60 61 int mmap_add_dynamic_region(unsigned long long base_pa, uintptr_t base_va, 62 size_t size, unsigned int attr) 63 { 64 mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr); 65 66 return mmap_add_dynamic_region_ctx(&tf_xlat_ctx, &mm); 67 } 68 69 int mmap_remove_dynamic_region(uintptr_t base_va, size_t size) 70 { 71 return mmap_remove_dynamic_region_ctx(&tf_xlat_ctx, 72 base_va, size); 73 } 74 75 #endif /* PLAT_XLAT_TABLES_DYNAMIC */ 76 77 void init_xlat_tables(void) 78 { 79 assert(tf_xlat_ctx.xlat_regime == EL_REGIME_INVALID); 80 81 unsigned int current_el = xlat_arch_current_el(); 82 83 if (current_el == 1U) { 84 tf_xlat_ctx.xlat_regime = EL1_EL0_REGIME; 85 } else if (current_el == 2U) { 86 tf_xlat_ctx.xlat_regime = EL2_REGIME; 87 } else { 88 assert(current_el == 3U); 89 tf_xlat_ctx.xlat_regime = EL3_REGIME; 90 } 91 92 init_xlat_tables_ctx(&tf_xlat_ctx); 93 } 94 95 int xlat_get_mem_attributes(uintptr_t base_va, uint32_t *attr) 96 { 97 return xlat_get_mem_attributes_ctx(&tf_xlat_ctx, base_va, attr); 98 } 99 100 int xlat_change_mem_attributes(uintptr_t base_va, size_t size, uint32_t attr) 101 { 102 return xlat_change_mem_attributes_ctx(&tf_xlat_ctx, base_va, size, attr); 103 } 104 105 /* 106 * If dynamic allocation of new regions is disabled then by the time we call the 107 * function enabling the MMU, we'll have registered all the memory regions to 108 * map for the system's lifetime. Therefore, at this point we know the maximum 109 * physical address that will ever be mapped. 110 * 111 * If dynamic allocation is enabled then we can't make any such assumption 112 * because the maximum physical address could get pushed while adding a new 113 * region. Therefore, in this case we have to assume that the whole address 114 * space size might be mapped. 115 */ 116 #ifdef PLAT_XLAT_TABLES_DYNAMIC 117 #define MAX_PHYS_ADDR tf_xlat_ctx.pa_max_address 118 #else 119 #define MAX_PHYS_ADDR tf_xlat_ctx.max_pa 120 #endif 121 122 #ifdef AARCH32 123 124 #if !ERROR_DEPRECATED 125 void enable_mmu_secure(unsigned int flags) 126 { 127 enable_mmu_svc_mon(flags); 128 } 129 130 void enable_mmu_direct(unsigned int flags) 131 { 132 enable_mmu_direct_svc_mon(flags); 133 } 134 #endif 135 136 void enable_mmu_svc_mon(unsigned int flags) 137 { 138 setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags, 139 tf_xlat_ctx.base_table, MAX_PHYS_ADDR, 140 tf_xlat_ctx.va_max_address, EL1_EL0_REGIME); 141 enable_mmu_direct_svc_mon(flags); 142 } 143 144 void enable_mmu_hyp(unsigned int flags) 145 { 146 setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags, 147 tf_xlat_ctx.base_table, MAX_PHYS_ADDR, 148 tf_xlat_ctx.va_max_address, EL2_REGIME); 149 enable_mmu_direct_hyp(flags); 150 } 151 152 #else 153 154 void enable_mmu_el1(unsigned int flags) 155 { 156 setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags, 157 tf_xlat_ctx.base_table, MAX_PHYS_ADDR, 158 tf_xlat_ctx.va_max_address, EL1_EL0_REGIME); 159 enable_mmu_direct_el1(flags); 160 } 161 162 void enable_mmu_el2(unsigned int flags) 163 { 164 setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags, 165 tf_xlat_ctx.base_table, MAX_PHYS_ADDR, 166 tf_xlat_ctx.va_max_address, EL2_REGIME); 167 enable_mmu_direct_el2(flags); 168 } 169 170 void enable_mmu_el3(unsigned int flags) 171 { 172 setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags, 173 tf_xlat_ctx.base_table, MAX_PHYS_ADDR, 174 tf_xlat_ctx.va_max_address, EL3_REGIME); 175 enable_mmu_direct_el3(flags); 176 } 177 178 #endif /* AARCH32 */ 179