1 /*
2 * Copyright (c) 2017-2025, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <assert.h>
8 #include <stdbool.h>
9 #include <stdint.h>
10
11 #include <arch.h>
12 #include <arch_features.h>
13 #include <arch_helpers.h>
14 #include <lib/cassert.h>
15 #include <lib/utils_def.h>
16 #include <lib/xlat_tables/xlat_tables_v2.h>
17
18 #include "../xlat_tables_private.h"
19
20 /*
21 * Returns true if the provided granule size is supported, false otherwise.
22 */
xlat_arch_is_granule_size_supported(size_t size)23 bool xlat_arch_is_granule_size_supported(size_t size)
24 {
25 if (size == PAGE_SIZE_4KB) {
26 /* MSB of TGRAN4 field will be '1' for unsupported feature */
27 return is_feat_tgran4K_present();
28 } else if (size == PAGE_SIZE_16KB) {
29 return is_feat_tgran16K_present();
30 } else if (size == PAGE_SIZE_64KB) {
31 /* MSB of TGRAN64 field will be '1' for unsupported feature */
32 return is_feat_tgran64K_present();
33 } else {
34 return false;
35 }
36 }
37
xlat_arch_get_max_supported_granule_size(void)38 size_t xlat_arch_get_max_supported_granule_size(void)
39 {
40 if (xlat_arch_is_granule_size_supported(PAGE_SIZE_64KB)) {
41 return PAGE_SIZE_64KB;
42 } else if (xlat_arch_is_granule_size_supported(PAGE_SIZE_16KB)) {
43 return PAGE_SIZE_16KB;
44 } else {
45 assert(xlat_arch_is_granule_size_supported(PAGE_SIZE_4KB));
46 return PAGE_SIZE_4KB;
47 }
48 }
49
50 /*
51 * Determine the physical address space encoded in the 'attr' parameter for the
52 * context 'ctx'.
53 *
54 * The physical address will fall into one of four spaces; secure,
55 * nonsecure, root, or realm if RME is enabled, or one of two spaces;
56 * secure and nonsecure otherwise.
57 */
xlat_arch_get_pas(const xlat_ctx_t * ctx,uint32_t attr)58 uint32_t xlat_arch_get_pas(const xlat_ctx_t *ctx, uint32_t attr)
59 {
60 uint32_t pas = MT_PAS(attr);
61
62 (void)ctx;
63 assert(ctx != NULL);
64
65 if (is_feat_rme_supported()) {
66 /*
67 * When RME is enabled xlat library do not support any
68 * translation regimes other than EL3 stage 1.
69 */
70 assert(ctx->xlat_regime == EL3_REGIME);
71 assert((pas == MT_NS) || (pas == MT_SECURE) || (pas == MT_ROOT)
72 || (pas == MT_REALM));
73
74 switch (pas) {
75 case MT_REALM:
76 #if ENABLE_RMM
77 /* TTD.NSE = 1 and TTD.NS = 1 for Realm PAS */
78 return LOWER_ATTRS(EL3_S1_NSE | NS);
79 #else
80 /* Do not support MT_REALM mapping when ENABLE_RMM=0 */
81 panic();
82 #endif
83 case MT_ROOT:
84 /* TTD.NSE = 1 and TTD.NS = 0 for Root PAS */
85 return LOWER_ATTRS(EL3_S1_NSE);
86 case MT_NS:
87 /* Non-secure PAS */
88 return LOWER_ATTRS(NS);
89 default:
90 if (is_feat_sel2_supported()) {
91 /* Secure PAS */
92 return LOWER_ATTRS(0U);
93 } else {
94 /*
95 * Secure mappings are not supported when NSE
96 * and NS=0 and SEL2 not implemented. Creating
97 * such mapping will result output PA space as
98 * non-secure.
99 *
100 * This case should not occur, as FEAT_SEL2 is a
101 * mandatory architectural feature from Armv8.4
102 * onward.
103 */
104 assert(false);
105 return LOWER_ATTRS(NS);
106 }
107 }
108 } else {
109 /*
110 * To support feature detection of RME, if pas is MT_ROOT then
111 * convert to MT_SECURE when RME is not present
112 */
113 if ((ctx->xlat_regime == EL3_REGIME) && (pas == MT_ROOT)) {
114 pas = MT_SECURE;
115 }
116
117 assert((pas == MT_NS) || (pas == MT_SECURE));
118
119 if (pas == MT_NS) {
120 return LOWER_ATTRS(NS);
121 } else {
122 /* Secure PAS */
123 return LOWER_ATTRS(0U);
124 }
125 }
126 }
127
tcr_physical_addr_size_bits(unsigned long long max_addr)128 unsigned long long tcr_physical_addr_size_bits(unsigned long long max_addr)
129 {
130 /* Physical address can't exceed 48 bits */
131 assert((max_addr & ADDR_MASK_48_TO_63) == 0U);
132
133 /* 48 bits address */
134 if ((max_addr & ADDR_MASK_44_TO_47) != 0U)
135 return TCR_PS_BITS_256TB;
136
137 /* 44 bits address */
138 if ((max_addr & ADDR_MASK_42_TO_43) != 0U)
139 return TCR_PS_BITS_16TB;
140
141 /* 42 bits address */
142 if ((max_addr & ADDR_MASK_40_TO_41) != 0U)
143 return TCR_PS_BITS_4TB;
144
145 /* 40 bits address */
146 if ((max_addr & ADDR_MASK_36_TO_39) != 0U)
147 return TCR_PS_BITS_1TB;
148
149 /* 36 bits address */
150 if ((max_addr & ADDR_MASK_32_TO_35) != 0U)
151 return TCR_PS_BITS_64GB;
152
153 return TCR_PS_BITS_4GB;
154 }
155
156 #if ENABLE_ASSERTIONS
157 /*
158 * Physical Address ranges supported in the AArch64 Memory Model. Value 0b110 is
159 * supported in ARMv8.2 onwards.
160 */
161 static const unsigned int pa_range_bits_arr[] = {
162 PARANGE_0000, PARANGE_0001, PARANGE_0010, PARANGE_0011, PARANGE_0100,
163 PARANGE_0101, PARANGE_0110, PARANGE_0111
164 };
165
xlat_arch_get_max_supported_pa(void)166 unsigned long long xlat_arch_get_max_supported_pa(void)
167 {
168 u_register_t pa_range = read_id_aa64mmfr0_el1() &
169 ID_AA64MMFR0_EL1_PARANGE_MASK;
170
171 /* All other values are reserved */
172 assert(pa_range < ARRAY_SIZE(pa_range_bits_arr));
173
174 return (1ULL << pa_range_bits_arr[pa_range]) - 1ULL;
175 }
176
177 /*
178 * Return minimum virtual address space size supported by the architecture
179 */
xlat_get_min_virt_addr_space_size(void)180 uintptr_t xlat_get_min_virt_addr_space_size(void)
181 {
182 uintptr_t ret;
183
184 if (is_feat_ttst_present())
185 ret = MIN_VIRT_ADDR_SPACE_SIZE_TTST;
186 else
187 ret = MIN_VIRT_ADDR_SPACE_SIZE;
188
189 return ret;
190 }
191 #endif /* ENABLE_ASSERTIONS*/
192
is_mmu_enabled_ctx(const xlat_ctx_t * ctx)193 bool is_mmu_enabled_ctx(const xlat_ctx_t *ctx)
194 {
195 if (ctx->xlat_regime == EL1_EL0_REGIME) {
196 assert(xlat_arch_current_el() >= 1U);
197 return (read_sctlr_el1() & SCTLR_M_BIT) != 0U;
198 } else if (ctx->xlat_regime == EL2_REGIME) {
199 assert(xlat_arch_current_el() >= 2U);
200 return (read_sctlr_el2() & SCTLR_M_BIT) != 0U;
201 } else {
202 assert(ctx->xlat_regime == EL3_REGIME);
203 assert(xlat_arch_current_el() >= 3U);
204 return (read_sctlr_el3() & SCTLR_M_BIT) != 0U;
205 }
206 }
207
is_dcache_enabled(void)208 bool is_dcache_enabled(void)
209 {
210 unsigned int el = get_current_el_maybe_constant();
211
212 if (el == 1U) {
213 return (read_sctlr_el1() & SCTLR_C_BIT) != 0U;
214 } else if (el == 2U) {
215 return (read_sctlr_el2() & SCTLR_C_BIT) != 0U;
216 } else {
217 return (read_sctlr_el3() & SCTLR_C_BIT) != 0U;
218 }
219 }
220
xlat_arch_regime_get_xn_desc(int xlat_regime)221 uint64_t xlat_arch_regime_get_xn_desc(int xlat_regime)
222 {
223 if (xlat_regime == EL1_EL0_REGIME) {
224 return UPPER_ATTRS(UXN) | UPPER_ATTRS(PXN);
225 } else {
226 assert((xlat_regime == EL2_REGIME) ||
227 (xlat_regime == EL3_REGIME));
228 return UPPER_ATTRS(XN);
229 }
230 }
231
xlat_arch_tlbi_va(uintptr_t va,int xlat_regime)232 void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime)
233 {
234 /*
235 * Ensure the translation table write has drained into memory before
236 * invalidating the TLB entry.
237 */
238 dsbishst();
239
240 /*
241 * This function only supports invalidation of TLB entries for the EL3
242 * and EL1&0 translation regimes.
243 *
244 * Also, it is architecturally UNDEFINED to invalidate TLBs of a higher
245 * exception level (see section D4.9.2 of the ARM ARM rev B.a).
246 */
247 if (xlat_regime == EL1_EL0_REGIME) {
248 assert(xlat_arch_current_el() >= 1U);
249 tlbivaae1is(TLBI_ADDR(va));
250 } else if (xlat_regime == EL2_REGIME) {
251 assert(xlat_arch_current_el() >= 2U);
252 tlbivae2is(TLBI_ADDR(va));
253 } else {
254 assert(xlat_regime == EL3_REGIME);
255 assert(xlat_arch_current_el() >= 3U);
256 tlbivae3is(TLBI_ADDR(va));
257 }
258 }
259
xlat_arch_tlbi_va_sync(void)260 void xlat_arch_tlbi_va_sync(void)
261 {
262 /*
263 * A TLB maintenance instruction can complete at any time after
264 * it is issued, but is only guaranteed to be complete after the
265 * execution of DSB by the PE that executed the TLB maintenance
266 * instruction. After the TLB invalidate instruction is
267 * complete, no new memory accesses using the invalidated TLB
268 * entries will be observed by any observer of the system
269 * domain. See section D4.8.2 of the ARMv8 (issue k), paragraph
270 * "Ordering and completion of TLB maintenance instructions".
271 */
272 dsbish();
273
274 /*
275 * The effects of a completed TLB maintenance instruction are
276 * only guaranteed to be visible on the PE that executed the
277 * instruction after the execution of an ISB instruction by the
278 * PE that executed the TLB maintenance instruction.
279 */
280 isb();
281 }
282
xlat_arch_current_el(void)283 unsigned int xlat_arch_current_el(void)
284 {
285 unsigned int el = (unsigned int)GET_EL(read_CurrentEl());
286
287 assert(el > 0U);
288
289 return el;
290 }
291
setup_mmu_cfg(uint64_t * params,unsigned int flags,const uint64_t * base_table,unsigned long long max_pa,uintptr_t max_va,int xlat_regime)292 void setup_mmu_cfg(uint64_t *params, unsigned int flags,
293 const uint64_t *base_table, unsigned long long max_pa,
294 uintptr_t max_va, int xlat_regime)
295 {
296 uint64_t mair, ttbr0, tcr;
297 uintptr_t virtual_addr_space_size;
298
299 /* Set attributes in the right indices of the MAIR. */
300 mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
301 mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, ATTR_IWBWA_OWBWA_NTR_INDEX);
302 mair |= MAIR_ATTR_SET(ATTR_NON_CACHEABLE, ATTR_NON_CACHEABLE_INDEX);
303
304 /*
305 * Limit the input address ranges and memory region sizes translated
306 * using TTBR0 to the given virtual address space size.
307 */
308 assert(max_va < ((uint64_t)UINTPTR_MAX));
309
310 virtual_addr_space_size = (uintptr_t)max_va + 1U;
311
312 assert(virtual_addr_space_size >=
313 xlat_get_min_virt_addr_space_size());
314 assert(virtual_addr_space_size <= MAX_VIRT_ADDR_SPACE_SIZE);
315 assert(IS_POWER_OF_TWO(virtual_addr_space_size));
316
317 /*
318 * __builtin_ctzll(0) is undefined but here we are guaranteed that
319 * virtual_addr_space_size is in the range [1,UINTPTR_MAX].
320 */
321 int t0sz = 64 - __builtin_ctzll(virtual_addr_space_size);
322
323 tcr = (uint64_t)t0sz << TCR_T0SZ_SHIFT;
324
325 /*
326 * Set the cacheability and shareability attributes for memory
327 * associated with translation table walks.
328 */
329 if ((flags & XLAT_TABLE_NC) != 0U) {
330 /* Inner & outer non-cacheable non-shareable. */
331 tcr |= TCR_SH_NON_SHAREABLE |
332 TCR_RGN_OUTER_NC | TCR_RGN_INNER_NC;
333 } else {
334 /* Inner & outer WBWA & shareable. */
335 tcr |= TCR_SH_INNER_SHAREABLE |
336 TCR_RGN_OUTER_WBA | TCR_RGN_INNER_WBA;
337 }
338
339 /*
340 * It is safer to restrict the max physical address accessible by the
341 * hardware as much as possible.
342 */
343 unsigned long long tcr_ps_bits = tcr_physical_addr_size_bits(max_pa);
344
345 if (xlat_regime == EL1_EL0_REGIME) {
346 /*
347 * TCR_EL1.EPD1: Disable translation table walk for addresses
348 * that are translated using TTBR1_EL1.
349 */
350 tcr |= TCR_EPD1_BIT | (tcr_ps_bits << TCR_EL1_IPS_SHIFT);
351 } else if (xlat_regime == EL2_REGIME) {
352 tcr |= TCR_EL2_RES1 | (tcr_ps_bits << TCR_EL2_PS_SHIFT);
353 } else {
354 assert(xlat_regime == EL3_REGIME);
355 tcr |= TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT);
356 }
357
358 if (is_feat_morello_supported()) {
359 /* Enable capability loads and stores
360 * TCR_HWU59_BIT -> CDBM
361 * TCR_HWU60_BIT -> SC
362 * TCR_HWU61_BIT -> LC[0]
363 * TCR_HWU62_BIT -> LC[1]
364 */
365 tcr |= (TCR_HPD_BIT | TCR_HWU59_BIT | TCR_HWU60_BIT |
366 TCR_HWU61_BIT | TCR_HWU62_BIT);
367 }
368
369 /* Set TTBR bits as well */
370 ttbr0 = (uint64_t) base_table;
371
372 if (is_feat_ttcnp_present()) {
373 /* Enable CnP bit so as to share page tables with all PEs. */
374 ttbr0 |= TTBR_CNP_BIT;
375 }
376
377 params[MMU_CFG_MAIR] = mair;
378 params[MMU_CFG_TCR] = tcr;
379 params[MMU_CFG_TTBR0] = ttbr0;
380 }
381