xref: /rk3399_ARM-atf/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c (revision b56dc2a98cab0ea618cce83b3702814b7fcafd7d)
1 /*
2  * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <arch.h>
8 #include <arch_helpers.h>
9 #include <assert.h>
10 #include <cassert.h>
11 #include <platform_def.h>
12 #include <utils.h>
13 #include <utils_def.h>
14 #include <xlat_tables_v2.h>
15 #include "../xlat_tables_private.h"
16 
17 #if ARM_ARCH_MAJOR == 7 && !defined(ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING)
18 #error ARMv7 target does not support LPAE MMU descriptors
19 #endif
20 
21 uint32_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
22 
23 /*
24  * Returns 1 if the provided granule size is supported, 0 otherwise.
25  */
26 int xlat_arch_is_granule_size_supported(size_t size)
27 {
28 	/*
29 	 * The Trusted Firmware uses long descriptor translation table format,
30 	 * which supports 4 KiB pages only.
31 	 */
32 	return (size == (4U * 1024U));
33 }
34 
35 size_t xlat_arch_get_max_supported_granule_size(void)
36 {
37 	return 4U * 1024U;
38 }
39 
40 #if ENABLE_ASSERTIONS
41 unsigned long long xlat_arch_get_max_supported_pa(void)
42 {
43 	/* Physical address space size for long descriptor format. */
44 	return (1ULL << 40) - 1ULL;
45 }
46 #endif /* ENABLE_ASSERTIONS*/
47 
48 int is_mmu_enabled_ctx(const xlat_ctx_t *ctx __unused)
49 {
50 	return (read_sctlr() & SCTLR_M_BIT) != 0;
51 }
52 
53 void xlat_arch_tlbi_va(uintptr_t va)
54 {
55 	/*
56 	 * Ensure the translation table write has drained into memory before
57 	 * invalidating the TLB entry.
58 	 */
59 	dsbishst();
60 
61 	tlbimvaais(TLBI_ADDR(va));
62 }
63 
64 void xlat_arch_tlbi_va_regime(uintptr_t va, int xlat_regime __unused)
65 {
66 	/*
67 	 * Ensure the translation table write has drained into memory before
68 	 * invalidating the TLB entry.
69 	 */
70 	dsbishst();
71 
72 	tlbimvaais(TLBI_ADDR(va));
73 }
74 
75 void xlat_arch_tlbi_va_sync(void)
76 {
77 	/* Invalidate all entries from branch predictors. */
78 	bpiallis();
79 
80 	/*
81 	 * A TLB maintenance instruction can complete at any time after
82 	 * it is issued, but is only guaranteed to be complete after the
83 	 * execution of DSB by the PE that executed the TLB maintenance
84 	 * instruction. After the TLB invalidate instruction is
85 	 * complete, no new memory accesses using the invalidated TLB
86 	 * entries will be observed by any observer of the system
87 	 * domain. See section D4.8.2 of the ARMv8 (issue k), paragraph
88 	 * "Ordering and completion of TLB maintenance instructions".
89 	 */
90 	dsbish();
91 
92 	/*
93 	 * The effects of a completed TLB maintenance instruction are
94 	 * only guaranteed to be visible on the PE that executed the
95 	 * instruction after the execution of an ISB instruction by the
96 	 * PE that executed the TLB maintenance instruction.
97 	 */
98 	isb();
99 }
100 
101 int xlat_arch_current_el(void)
102 {
103 	/*
104 	 * If EL3 is in AArch32 mode, all secure PL1 modes (Monitor, System,
105 	 * SVC, Abort, UND, IRQ and FIQ modes) execute at EL3.
106 	 */
107 	return 3;
108 }
109 
110 /*******************************************************************************
111  * Function for enabling the MMU in Secure PL1, assuming that the page tables
112  * have already been created.
113  ******************************************************************************/
114 void setup_mmu_cfg(unsigned int flags,
115 		const uint64_t *base_table,
116 		unsigned long long max_pa,
117 		uintptr_t max_va)
118 {
119 	u_register_t mair0, ttbcr;
120 	uint64_t ttbr0;
121 
122 	assert(IS_IN_SECURE());
123 
124 	/* Set attributes in the right indices of the MAIR */
125 	mair0 = MAIR0_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
126 	mair0 |= MAIR0_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,
127 			ATTR_IWBWA_OWBWA_NTR_INDEX);
128 	mair0 |= MAIR0_ATTR_SET(ATTR_NON_CACHEABLE,
129 			ATTR_NON_CACHEABLE_INDEX);
130 
131 	/*
132 	 * Configure the control register for stage 1 of the PL1&0 translation
133 	 * regime.
134 	 */
135 
136 	/* Use the Long-descriptor translation table format. */
137 	ttbcr = TTBCR_EAE_BIT;
138 
139 	/*
140 	 * Disable translation table walk for addresses that are translated
141 	 * using TTBR1. Therefore, only TTBR0 is used.
142 	 */
143 	ttbcr |= TTBCR_EPD1_BIT;
144 
145 	/*
146 	 * Limit the input address ranges and memory region sizes translated
147 	 * using TTBR0 to the given virtual address space size, if smaller than
148 	 * 32 bits.
149 	 */
150 	if (max_va != UINT32_MAX) {
151 		uintptr_t virtual_addr_space_size = max_va + 1;
152 		assert(CHECK_VIRT_ADDR_SPACE_SIZE(virtual_addr_space_size));
153 		/*
154 		 * __builtin_ctzll(0) is undefined but here we are guaranteed
155 		 * that virtual_addr_space_size is in the range [1, UINT32_MAX].
156 		 */
157 		ttbcr |= 32 - __builtin_ctzll(virtual_addr_space_size);
158 	}
159 
160 	/*
161 	 * Set the cacheability and shareability attributes for memory
162 	 * associated with translation table walks using TTBR0.
163 	 */
164 	if (flags & XLAT_TABLE_NC) {
165 		/* Inner & outer non-cacheable non-shareable. */
166 		ttbcr |= TTBCR_SH0_NON_SHAREABLE | TTBCR_RGN0_OUTER_NC |
167 			TTBCR_RGN0_INNER_NC;
168 	} else {
169 		/* Inner & outer WBWA & shareable. */
170 		ttbcr |= TTBCR_SH0_INNER_SHAREABLE | TTBCR_RGN0_OUTER_WBA |
171 			TTBCR_RGN0_INNER_WBA;
172 	}
173 
174 	/* Set TTBR0 bits as well */
175 	ttbr0 = (uint64_t)(uintptr_t) base_table;
176 #if ARM_ARCH_AT_LEAST(8, 2)
177 	/*
178 	 * Enable CnP bit so as to share page tables with all PEs.
179 	 * Mandatory for ARMv8.2 implementations.
180 	 */
181 	ttbr0 |= TTBR_CNP_BIT;
182 #endif
183 
184 	/* Now populate MMU configuration */
185 	mmu_cfg_params[MMU_CFG_MAIR0] = mair0;
186 	mmu_cfg_params[MMU_CFG_TCR] = ttbcr;
187 	mmu_cfg_params[MMU_CFG_TTBR0_LO] = (uint32_t) ttbr0;
188 	mmu_cfg_params[MMU_CFG_TTBR0_HI] = ttbr0 >> 32;
189 }
190