1 /* SPDX-License-Identifier: BSD-2-Clause */
2 /*
3 * Copyright (c) 2016, Linaro Limited
4 * Copyright (c) 2014, STMicroelectronics International N.V.
5 */
6 #ifndef __MM_CORE_MMU_ARCH_H
7 #define __MM_CORE_MMU_ARCH_H
8
9 #ifndef __ASSEMBLER__
10 #include <arm.h>
11 #include <assert.h>
12 #include <compiler.h>
13 #include <config.h>
14 #include <kernel/user_ta.h>
15 #include <mm/tee_mmu_types.h>
16 #include <types_ext.h>
17 #include <util.h>
18 #endif
19
20 #include <platform_config.h>
21
22 /*
23 * Platforms can define TRUSTED_{S,D}RAM_* or TZ{S,D}RAM_*. We're helping
24 * here with the transition to TRUSTED_{S,D}RAM_* by defining these if
25 * missing based on the legacy defines.
26 */
27 #ifdef TZSRAM_BASE
28 #ifdef TRUSTED_SRAM_BASE
29 #error TRUSTED_SRAM_BASE is already defined
30 #endif
31 #define TRUSTED_SRAM_BASE TZSRAM_BASE
32 #define TRUSTED_SRAM_SIZE TZSRAM_SIZE
33 #endif
34
35 #ifdef TZDRAM_BASE
36 #ifdef TRUSTED_DRAM_BASE
37 #error TRUSTED_DRAM_BASE is already defined
38 #endif
39 #define TRUSTED_DRAM_BASE TZDRAM_BASE
40 #define TRUSTED_DRAM_SIZE TZDRAM_SIZE
41 #endif
42
43 #define SMALL_PAGE_SHIFT U(12)
44
45 #ifdef CFG_WITH_LPAE
46 #define CORE_MMU_PGDIR_SHIFT U(21)
47 #define CORE_MMU_PGDIR_LEVEL U(3)
48 #else
49 #define CORE_MMU_PGDIR_SHIFT U(20)
50 #define CORE_MMU_PGDIR_LEVEL U(2)
51 #endif
52
53 #define CORE_MMU_USER_CODE_SHIFT SMALL_PAGE_SHIFT
54
55 #define CORE_MMU_USER_PARAM_SHIFT SMALL_PAGE_SHIFT
56
57 /*
58 * Level of base table (i.e. first level of page table),
59 * depending on address space
60 */
61 #if !defined(CFG_WITH_LPAE) || (CFG_LPAE_ADDR_SPACE_BITS < 40)
62 #define CORE_MMU_BASE_TABLE_SHIFT U(30)
63 #define CORE_MMU_BASE_TABLE_LEVEL U(1)
64 #elif (CFG_LPAE_ADDR_SPACE_BITS <= 48)
65 #define CORE_MMU_BASE_TABLE_SHIFT U(39)
66 #define CORE_MMU_BASE_TABLE_LEVEL U(0)
67 #else /* (CFG_LPAE_ADDR_SPACE_BITS > 48) */
68 #error "CFG_WITH_LPAE with CFG_LPAE_ADDR_SPACE_BITS > 48 isn't supported!"
69 #endif
70
71 #ifdef CFG_WITH_LPAE
72 /*
73 * CORE_MMU_BASE_TABLE_OFFSET is used when switching to/from reduced kernel
74 * mapping. The actual value depends on internals in core_mmu_lpae.c which
75 * we rather not expose here. There's a compile time assertion to check
76 * that these magic numbers are correct.
77 */
78 #define CORE_MMU_BASE_TABLE_OFFSET \
79 (BIT(CFG_LPAE_ADDR_SPACE_BITS - CORE_MMU_BASE_TABLE_SHIFT) * U(8))
80 #endif
81
82 #ifndef __ASSEMBLER__
83
84 /*
85 * Assembly code in enable_mmu() depends on the layout of this struct.
86 */
87 struct core_mmu_config {
88 #if defined(ARM64)
89 uint64_t tcr_el1;
90 uint64_t mair_el1;
91 uint64_t ttbr0_el1_base;
92 uint64_t ttbr0_core_offset;
93 uint64_t map_offset;
94 #elif defined(CFG_WITH_LPAE)
95 uint32_t ttbcr;
96 uint32_t mair0;
97 uint32_t ttbr0_base;
98 uint32_t ttbr0_core_offset;
99 uint32_t map_offset;
100 #else
101 uint32_t prrr;
102 uint32_t nmrr;
103 uint32_t dacr;
104 uint32_t ttbcr;
105 uint32_t ttbr;
106 uint32_t map_offset;
107 #endif
108 };
109
110 #ifdef CFG_WITH_LPAE
111 /*
112 * struct core_mmu_user_map - current user mapping register state
113 * @user_map: physical address of user map translation table
114 * @asid: ASID for the user map
115 *
116 * Note that this struct should be treated as an opaque struct since
117 * the content depends on descriptor table format.
118 */
119 struct core_mmu_user_map {
120 uint64_t user_map;
121 uint32_t asid;
122 };
123 #else
124 /*
125 * struct core_mmu_user_map - current user mapping register state
126 * @ttbr0: content of ttbr0
127 * @ctxid: content of contextidr
128 *
129 * Note that this struct should be treated as an opaque struct since
130 * the content depends on descriptor table format.
131 */
132 struct core_mmu_user_map {
133 uint32_t ttbr0;
134 uint32_t ctxid;
135 };
136 #endif
137
138 /* Cache maintenance operation type */
139 enum cache_op {
140 DCACHE_CLEAN,
141 DCACHE_AREA_CLEAN,
142 DCACHE_INVALIDATE,
143 DCACHE_AREA_INVALIDATE,
144 ICACHE_INVALIDATE,
145 ICACHE_AREA_INVALIDATE,
146 DCACHE_CLEAN_INV,
147 DCACHE_AREA_CLEAN_INV,
148 };
149
150 /* L1/L2 cache maintenance */
151 TEE_Result cache_op_inner(enum cache_op op, void *va, size_t len);
152 #ifdef CFG_PL310
153 TEE_Result cache_op_outer(enum cache_op op, paddr_t pa, size_t len);
154 #else
cache_op_outer(enum cache_op op __unused,paddr_t pa __unused,size_t len __unused)155 static inline TEE_Result cache_op_outer(enum cache_op op __unused,
156 paddr_t pa __unused,
157 size_t len __unused)
158 {
159 /* Nothing to do about L2 Cache Maintenance when no PL310 */
160 return TEE_SUCCESS;
161 }
162 #endif
163
164 #if defined(ARM64)
165 unsigned int core_mmu_arm64_get_pa_width(void);
166 #endif
167
core_mmu_check_max_pa(paddr_t pa __maybe_unused)168 static inline bool core_mmu_check_max_pa(paddr_t pa __maybe_unused)
169 {
170 #if defined(ARM64)
171 return pa <= (BIT64(core_mmu_arm64_get_pa_width()) - 1);
172 #elif defined(CFG_CORE_LARGE_PHYS_ADDR)
173 return pa <= (BIT64(40) - 1);
174 #else
175 COMPILE_TIME_ASSERT(sizeof(paddr_t) == sizeof(uint32_t));
176 return true;
177 #endif
178 }
179
180 /*
181 * Special barrier to make sure all the changes to translation tables are
182 * visible before returning.
183 */
core_mmu_table_write_barrier(void)184 static inline void core_mmu_table_write_barrier(void)
185 {
186 dsb_ishst();
187 }
188
core_mmu_entry_have_security_bit(uint32_t attr)189 static inline bool core_mmu_entry_have_security_bit(uint32_t attr)
190 {
191 return !(attr & TEE_MATTR_TABLE) || !IS_ENABLED(CFG_WITH_LPAE);
192 }
193
core_mmu_get_va_width(void)194 static inline unsigned int core_mmu_get_va_width(void)
195 {
196 if (IS_ENABLED(ARM64)) {
197 COMPILE_TIME_ASSERT(CFG_LPAE_ADDR_SPACE_BITS >= 32);
198 COMPILE_TIME_ASSERT(CFG_LPAE_ADDR_SPACE_BITS <= 48);
199 return CFG_LPAE_ADDR_SPACE_BITS;
200 }
201 return 32;
202 }
203
core_mmu_va_is_valid(vaddr_t va)204 static inline bool core_mmu_va_is_valid(vaddr_t va)
205 {
206 return va < BIT64(core_mmu_get_va_width());
207 }
208
core_mmu_level_in_range(unsigned int level)209 static inline bool core_mmu_level_in_range(unsigned int level)
210 {
211 #if CORE_MMU_BASE_TABLE_LEVEL == 0
212 return level <= CORE_MMU_PGDIR_LEVEL;
213 #else
214 return level >= CORE_MMU_BASE_TABLE_LEVEL &&
215 level <= CORE_MMU_PGDIR_LEVEL;
216 #endif
217 }
218
219 #endif /*__ASSEMBLER__*/
220
221 #endif /* __MM_CORE_MMU_ARCH_H */
222