xref: /rk3399_ARM-atf/lib/xlat_tables_v2/xlat_tables_context.c (revision 0a0a7a9ac82cb79af91f098cedc69cc67bca3978)
1 /*
2  * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <arch_helpers.h>
8 #include <assert.h>
9 
10 #include <platform_def.h>
11 
12 #include <common/debug.h>
13 #include <lib/xlat_tables/xlat_tables_defs.h>
14 #include <lib/xlat_tables/xlat_tables_v2.h>
15 
16 #include "xlat_tables_private.h"
17 
18 /*
19  * MMU configuration register values for the active translation context. Used
20  * from the MMU assembly helpers.
21  */
22 uint64_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
23 
24 /*
25  * Allocate and initialise the default translation context for the BL image
26  * currently executing.
27  */
28 #if PLAT_RO_XLAT_TABLES
29 #define BASE_XLAT_TABLE_SECTION		".rodata"
30 #else
31 #define BASE_XLAT_TABLE_SECTION		".bss"
32 #endif
33 
34 REGISTER_XLAT_CONTEXT(tf, MAX_MMAP_REGIONS, MAX_XLAT_TABLES,
35 		      PLAT_VIRT_ADDR_SPACE_SIZE, PLAT_PHY_ADDR_SPACE_SIZE,
36 		      BASE_XLAT_TABLE_SECTION);
37 
38 void mmap_add_region(unsigned long long base_pa, uintptr_t base_va, size_t size,
39 		     unsigned int attr)
40 {
41 	mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr);
42 
43 	mmap_add_region_ctx(&tf_xlat_ctx, &mm);
44 }
45 
46 void mmap_add(const mmap_region_t *mm)
47 {
48 	mmap_add_ctx(&tf_xlat_ctx, mm);
49 }
50 
51 void mmap_add_region_alloc_va(unsigned long long base_pa, uintptr_t *base_va,
52 			      size_t size, unsigned int attr)
53 {
54 	mmap_region_t mm = MAP_REGION_ALLOC_VA(base_pa, size, attr);
55 
56 	mmap_add_region_alloc_va_ctx(&tf_xlat_ctx, &mm);
57 
58 	*base_va = mm.base_va;
59 }
60 
61 void mmap_add_alloc_va(mmap_region_t *mm)
62 {
63 	while (mm->granularity != 0U) {
64 		assert(mm->base_va == 0U);
65 		mmap_add_region_alloc_va_ctx(&tf_xlat_ctx, mm);
66 		mm++;
67 	}
68 }
69 
70 #if PLAT_XLAT_TABLES_DYNAMIC
71 
72 int mmap_add_dynamic_region(unsigned long long base_pa, uintptr_t base_va,
73 			    size_t size, unsigned int attr)
74 {
75 	mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr);
76 
77 	return mmap_add_dynamic_region_ctx(&tf_xlat_ctx, &mm);
78 }
79 
80 int mmap_add_dynamic_region_alloc_va(unsigned long long base_pa,
81 				     uintptr_t *base_va, size_t size,
82 				     unsigned int attr)
83 {
84 	mmap_region_t mm = MAP_REGION_ALLOC_VA(base_pa, size, attr);
85 
86 	int rc = mmap_add_dynamic_region_alloc_va_ctx(&tf_xlat_ctx, &mm);
87 
88 	*base_va = mm.base_va;
89 
90 	return rc;
91 }
92 
93 
94 int mmap_remove_dynamic_region(uintptr_t base_va, size_t size)
95 {
96 	return mmap_remove_dynamic_region_ctx(&tf_xlat_ctx,
97 					base_va, size);
98 }
99 
100 #endif /* PLAT_XLAT_TABLES_DYNAMIC */
101 
102 void __init init_xlat_tables(void)
103 {
104 	assert(tf_xlat_ctx.xlat_regime == EL_REGIME_INVALID);
105 
106 	unsigned int current_el = xlat_arch_current_el();
107 
108 	if (current_el == 1U) {
109 		tf_xlat_ctx.xlat_regime = EL1_EL0_REGIME;
110 	} else if (current_el == 2U) {
111 		tf_xlat_ctx.xlat_regime = EL2_REGIME;
112 	} else {
113 		assert(current_el == 3U);
114 		tf_xlat_ctx.xlat_regime = EL3_REGIME;
115 	}
116 
117 	init_xlat_tables_ctx(&tf_xlat_ctx);
118 }
119 
120 int xlat_get_mem_attributes(uintptr_t base_va, uint32_t *attr)
121 {
122 	return xlat_get_mem_attributes_ctx(&tf_xlat_ctx, base_va, attr);
123 }
124 
125 int xlat_change_mem_attributes(uintptr_t base_va, size_t size, uint32_t attr)
126 {
127 	return xlat_change_mem_attributes_ctx(&tf_xlat_ctx, base_va, size, attr);
128 }
129 
130 #if PLAT_RO_XLAT_TABLES
131 /* Change the memory attributes of the descriptors which resolve the address
132  * range that belongs to the translation tables themselves, which are by default
133  * mapped as part of read-write data in the BL image's memory.
134  *
135  * Since the translation tables map themselves via these level 3 (page)
136  * descriptors, any change applied to them with the MMU on would introduce a
137  * chicken and egg problem because of the break-before-make sequence.
138  * Eventually, it would reach the descriptor that resolves the very table it
139  * belongs to and the invalidation (break step) would cause the subsequent write
140  * (make step) to it to generate an MMU fault. Therefore, the MMU is disabled
141  * before making the change.
142  *
143  * No assumption is made about what data this function needs, therefore all the
144  * caches are flushed in order to ensure coherency. A future optimization would
145  * be to only flush the required data to main memory.
146  */
147 int xlat_make_tables_readonly(void)
148 {
149 	assert(tf_xlat_ctx.initialized == true);
150 #ifdef __aarch64__
151 	if (tf_xlat_ctx.xlat_regime == EL1_EL0_REGIME) {
152 		disable_mmu_el1();
153 	} else if (tf_xlat_ctx.xlat_regime == EL3_REGIME) {
154 		disable_mmu_el3();
155 	} else {
156 		assert(tf_xlat_ctx.xlat_regime == EL2_REGIME);
157 		return -1;
158 	}
159 
160 	/* Flush all caches. */
161 	dcsw_op_all(DCCISW);
162 #else /* !__aarch64__ */
163 	assert(tf_xlat_ctx.xlat_regime == EL1_EL0_REGIME);
164 	/* On AArch32, we flush the caches before disabling the MMU. The reason
165 	 * for this is that the dcsw_op_all AArch32 function pushes some
166 	 * registers onto the stack under the assumption that it is writing to
167 	 * cache, which is not true with the MMU off. This would result in the
168 	 * stack becoming corrupted and a wrong/junk value for the LR being
169 	 * restored at the end of the routine.
170 	 */
171 	dcsw_op_all(DC_OP_CISW);
172 	disable_mmu_secure();
173 #endif
174 
175 	int rc = xlat_change_mem_attributes_ctx(&tf_xlat_ctx,
176 				(uintptr_t)tf_xlat_ctx.tables,
177 				tf_xlat_ctx.tables_num * XLAT_TABLE_SIZE,
178 				MT_RO_DATA | MT_SECURE);
179 
180 #ifdef __aarch64__
181 	if (tf_xlat_ctx.xlat_regime == EL1_EL0_REGIME) {
182 		enable_mmu_el1(0U);
183 	} else {
184 		assert(tf_xlat_ctx.xlat_regime == EL3_REGIME);
185 		enable_mmu_el3(0U);
186 	}
187 #else /* !__aarch64__ */
188 	enable_mmu_svc_mon(0U);
189 #endif
190 
191 	if (rc == 0) {
192 		tf_xlat_ctx.readonly_tables = true;
193 	}
194 
195 	return rc;
196 }
197 #endif /* PLAT_RO_XLAT_TABLES */
198 
199 /*
200  * If dynamic allocation of new regions is disabled then by the time we call the
201  * function enabling the MMU, we'll have registered all the memory regions to
202  * map for the system's lifetime. Therefore, at this point we know the maximum
203  * physical address that will ever be mapped.
204  *
205  * If dynamic allocation is enabled then we can't make any such assumption
206  * because the maximum physical address could get pushed while adding a new
207  * region. Therefore, in this case we have to assume that the whole address
208  * space size might be mapped.
209  */
210 #ifdef PLAT_XLAT_TABLES_DYNAMIC
211 #define MAX_PHYS_ADDR	tf_xlat_ctx.pa_max_address
212 #else
213 #define MAX_PHYS_ADDR	tf_xlat_ctx.max_pa
214 #endif
215 
216 #ifdef __aarch64__
217 
218 void enable_mmu_el1(unsigned int flags)
219 {
220 	setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
221 		      tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
222 		      tf_xlat_ctx.va_max_address, EL1_EL0_REGIME);
223 	enable_mmu_direct_el1(flags);
224 }
225 
226 void enable_mmu_el2(unsigned int flags)
227 {
228 	setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
229 		      tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
230 		      tf_xlat_ctx.va_max_address, EL2_REGIME);
231 	enable_mmu_direct_el2(flags);
232 }
233 
234 void enable_mmu_el3(unsigned int flags)
235 {
236 	setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
237 		      tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
238 		      tf_xlat_ctx.va_max_address, EL3_REGIME);
239 	enable_mmu_direct_el3(flags);
240 }
241 
242 void enable_mmu(unsigned int flags)
243 {
244 	switch (get_current_el_maybe_constant()) {
245 	case 1:
246 		enable_mmu_el1(flags);
247 		break;
248 	case 2:
249 		enable_mmu_el2(flags);
250 		break;
251 	case 3:
252 		enable_mmu_el3(flags);
253 		break;
254 	default:
255 		panic();
256 	}
257 }
258 
259 #else /* !__aarch64__ */
260 
261 void enable_mmu_svc_mon(unsigned int flags)
262 {
263 	setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
264 		      tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
265 		      tf_xlat_ctx.va_max_address, EL1_EL0_REGIME);
266 	enable_mmu_direct_svc_mon(flags);
267 }
268 
269 void enable_mmu_hyp(unsigned int flags)
270 {
271 	setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
272 		      tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
273 		      tf_xlat_ctx.va_max_address, EL2_REGIME);
274 	enable_mmu_direct_hyp(flags);
275 }
276 
277 #endif /* __aarch64__ */
278