xref: /rk3399_ARM-atf/lib/xlat_tables_v2/xlat_tables_context.c (revision 9056f108062c2f893b0b68016016f58e1eb0607d)
1 /*
2  * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <debug.h>
9 #include <platform_def.h>
10 #include <xlat_tables_defs.h>
11 #include <xlat_tables_v2.h>
12 
13 #include "xlat_tables_private.h"
14 
15 /*
16  * MMU configuration register values for the active translation context. Used
17  * from the MMU assembly helpers.
18  */
19 uint64_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
20 
21 /*
22  * Allocate and initialise the default translation context for the BL image
23  * currently executing.
24  */
25 REGISTER_XLAT_CONTEXT(tf, MAX_MMAP_REGIONS, MAX_XLAT_TABLES,
26 		PLAT_VIRT_ADDR_SPACE_SIZE, PLAT_PHY_ADDR_SPACE_SIZE);
27 
28 void mmap_add_region(unsigned long long base_pa, uintptr_t base_va, size_t size,
29 		     unsigned int attr)
30 {
31 	mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr);
32 
33 	mmap_add_region_ctx(&tf_xlat_ctx, &mm);
34 }
35 
36 void mmap_add(const mmap_region_t *mm)
37 {
38 	mmap_add_ctx(&tf_xlat_ctx, mm);
39 }
40 
41 void mmap_add_region_alloc_va(unsigned long long base_pa, uintptr_t *base_va,
42 			      size_t size, unsigned int attr)
43 {
44 	mmap_region_t mm = MAP_REGION_ALLOC_VA(base_pa, size, attr);
45 
46 	mmap_add_region_alloc_va_ctx(&tf_xlat_ctx, &mm);
47 
48 	*base_va = mm.base_va;
49 }
50 
51 void mmap_add_alloc_va(mmap_region_t *mm)
52 {
53 	while (mm->granularity != 0U) {
54 		assert(mm->base_va == 0U);
55 		mmap_add_region_alloc_va_ctx(&tf_xlat_ctx, mm);
56 		mm++;
57 	}
58 }
59 
60 #if PLAT_XLAT_TABLES_DYNAMIC
61 
62 int mmap_add_dynamic_region(unsigned long long base_pa, uintptr_t base_va,
63 			    size_t size, unsigned int attr)
64 {
65 	mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr);
66 
67 	return mmap_add_dynamic_region_ctx(&tf_xlat_ctx, &mm);
68 }
69 
70 int mmap_add_dynamic_region_alloc_va(unsigned long long base_pa,
71 				     uintptr_t *base_va, size_t size,
72 				     unsigned int attr)
73 {
74 	mmap_region_t mm = MAP_REGION_ALLOC_VA(base_pa, size, attr);
75 
76 	int rc = mmap_add_dynamic_region_alloc_va_ctx(&tf_xlat_ctx, &mm);
77 
78 	*base_va = mm.base_va;
79 
80 	return rc;
81 }
82 
83 
84 int mmap_remove_dynamic_region(uintptr_t base_va, size_t size)
85 {
86 	return mmap_remove_dynamic_region_ctx(&tf_xlat_ctx,
87 					base_va, size);
88 }
89 
90 #endif /* PLAT_XLAT_TABLES_DYNAMIC */
91 
92 void __init init_xlat_tables(void)
93 {
94 	assert(tf_xlat_ctx.xlat_regime == EL_REGIME_INVALID);
95 
96 	unsigned int current_el = xlat_arch_current_el();
97 
98 	if (current_el == 1U) {
99 		tf_xlat_ctx.xlat_regime = EL1_EL0_REGIME;
100 	} else if (current_el == 2U) {
101 		tf_xlat_ctx.xlat_regime = EL2_REGIME;
102 	} else {
103 		assert(current_el == 3U);
104 		tf_xlat_ctx.xlat_regime = EL3_REGIME;
105 	}
106 
107 	init_xlat_tables_ctx(&tf_xlat_ctx);
108 }
109 
110 int xlat_get_mem_attributes(uintptr_t base_va, uint32_t *attr)
111 {
112 	return xlat_get_mem_attributes_ctx(&tf_xlat_ctx, base_va, attr);
113 }
114 
115 int xlat_change_mem_attributes(uintptr_t base_va, size_t size, uint32_t attr)
116 {
117 	return xlat_change_mem_attributes_ctx(&tf_xlat_ctx, base_va, size, attr);
118 }
119 
120 /*
121  * If dynamic allocation of new regions is disabled then by the time we call the
122  * function enabling the MMU, we'll have registered all the memory regions to
123  * map for the system's lifetime. Therefore, at this point we know the maximum
124  * physical address that will ever be mapped.
125  *
126  * If dynamic allocation is enabled then we can't make any such assumption
127  * because the maximum physical address could get pushed while adding a new
128  * region. Therefore, in this case we have to assume that the whole address
129  * space size might be mapped.
130  */
131 #ifdef PLAT_XLAT_TABLES_DYNAMIC
132 #define MAX_PHYS_ADDR	tf_xlat_ctx.pa_max_address
133 #else
134 #define MAX_PHYS_ADDR	tf_xlat_ctx.max_pa
135 #endif
136 
137 #ifdef AARCH32
138 
139 void enable_mmu_svc_mon(unsigned int flags)
140 {
141 	setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
142 		      tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
143 		      tf_xlat_ctx.va_max_address, EL1_EL0_REGIME);
144 	enable_mmu_direct_svc_mon(flags);
145 }
146 
147 void enable_mmu_hyp(unsigned int flags)
148 {
149 	setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
150 		      tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
151 		      tf_xlat_ctx.va_max_address, EL2_REGIME);
152 	enable_mmu_direct_hyp(flags);
153 }
154 
155 #else
156 
157 void enable_mmu_el1(unsigned int flags)
158 {
159 	setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
160 		      tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
161 		      tf_xlat_ctx.va_max_address, EL1_EL0_REGIME);
162 	enable_mmu_direct_el1(flags);
163 }
164 
165 void enable_mmu_el2(unsigned int flags)
166 {
167 	setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
168 		      tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
169 		      tf_xlat_ctx.va_max_address, EL2_REGIME);
170 	enable_mmu_direct_el2(flags);
171 }
172 
173 void enable_mmu_el3(unsigned int flags)
174 {
175 	setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
176 		      tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
177 		      tf_xlat_ctx.va_max_address, EL3_REGIME);
178 	enable_mmu_direct_el3(flags);
179 }
180 
181 #endif /* AARCH32 */
182