1 /* SPDX-License-Identifier: BSD-2-Clause */
2 /*
3 * Copyright 2022-2023 NXP
4 */
5 #ifndef __MM_CORE_MMU_ARCH_H
6 #define __MM_CORE_MMU_ARCH_H
7
8 #ifndef __ASSEMBLER__
9 #include <assert.h>
10 #include <compiler.h>
11 #include <config.h>
12 #include <kernel/user_ta.h>
13 #include <mm/tee_mmu_types.h>
14 #include <riscv.h>
15 #include <types_ext.h>
16 #include <util.h>
17 #endif
18
19 #include <platform_config.h>
20
21 #ifdef TRUSTED_DRAM_BASE
22 #error TRUSTED_DRAM_BASE is already defined
23 #endif
24 #define TRUSTED_DRAM_BASE TDDRAM_BASE
25 #define TRUSTED_DRAM_SIZE TDDRAM_SIZE
26
27 /* MMU defines */
28 #ifdef CFG_RISCV_MMU_MODE
29 #define RISCV_MMU_MODE CFG_RISCV_MMU_MODE
30 #else
31 #ifdef RV64
32 #define RISCV_MMU_MODE U(39)
33 #else
34 #define RISCV_MMU_MODE U(32)
35 #endif
36 #endif
37
38 #if RISCV_MMU_MODE == 57 /*Sv57*/
39 #define RISCV_SATP_MODE SATP_MODE_SV57
40 #define RISCV_SATP_MODE_SHIFT U(60)
41 #define RISCV_SATP_ASID_SHIFT U(44)
42 #define RISCV_SATP_ASID_WIDTH U(16)
43 #define RISCV_SATP_ASID_MASK 0x0FFFF
44 #define RISCV_MMU_PA_WIDTH U(56)
45 #define RISCV_MMU_VA_WIDTH U(57)
46 #elif RISCV_MMU_MODE == 48 /*Sv48*/
47 #define RISCV_SATP_MODE SATP_MODE_SV48
48 #define RISCV_SATP_MODE_SHIFT U(60)
49 #define RISCV_SATP_ASID_SHIFT U(44)
50 #define RISCV_SATP_ASID_WIDTH U(16)
51 #define RISCV_SATP_ASID_MASK 0x0FFFF
52 #define RISCV_MMU_PA_WIDTH U(56)
53 #define RISCV_MMU_VA_WIDTH U(48)
54 #elif RISCV_MMU_MODE == 39 /*Sv39*/
55 #define RISCV_SATP_MODE SATP_MODE_SV39
56 #define RISCV_SATP_MODE_SHIFT U(60)
57 #define RISCV_SATP_ASID_SHIFT U(44)
58 #define RISCV_SATP_ASID_WIDTH U(16)
59 #define RISCV_SATP_ASID_MASK 0x0FFFF
60 #define RISCV_MMU_PA_WIDTH U(56)
61 #define RISCV_MMU_VA_WIDTH U(39)
62 #elif RISCV_MMU_MODE == 32 /*Sv32*/
63 #define RISCV_SATP_MODE SATP_MODE_SV32
64 #define RISCV_SATP_MODE_SHIFT U(31)
65 #define RISCV_SATP_ASID_SHIFT U(22)
66 #define RISCV_SATP_ASID_WIDTH U(9)
67 #define RISCV_SATP_ASID_MASK 0x01FF
68 #define RISCV_MMU_PA_WIDTH U(32)
69 #define RISCV_MMU_VA_WIDTH U(32)
70 #else
71 #error unknown or unsupported mmu mode
72 #endif
73
74 #define RISCV_PTES_PER_PT BIT(RISCV_PGLEVEL_BITS)
75 #define RISCV_PGLEVELS ((RISCV_MMU_VA_WIDTH - RISCV_PGSHIFT) / \
76 RISCV_PGLEVEL_BITS)
77 #define RISCV_MMU_VPN_MASK (BIT(RISCV_PGLEVEL_BITS) - 1)
78 #define RISCV_MMU_MAX_PGTS 16
79
80 #define SMALL_PAGE_SHIFT U(12)
81
82 /*
83 * RV32:
84 * Level 0, shift = 12, 4 KiB pages
85 * Level 1, shift = 22, 4 MiB pages
86 *
87 * RV64:
88 * Level 0, shift = 12, 4 KiB pages
89 * Level 1, shift = 21, 2 MiB pages
90 * Level 2, shift = 30, 1 GiB pages
91 * Level 3, shift = 39, 512 GiB pages
92 * Level 4, shift = 48, 256 TiB pages
93 */
94 #define CORE_MMU_SHIFT_OF_LEVEL(level) (RISCV_PGLEVEL_BITS * \
95 (level) + \
96 RISCV_PGSHIFT)
97
98 #ifdef RV64
99 #define CORE_MMU_PAGE_OFFSET_MASK(level) \
100 GENMASK_64(CORE_MMU_SHIFT_OF_LEVEL(level) - 1, 0)
101 #else
102 #define CORE_MMU_PAGE_OFFSET_MASK(level) \
103 GENMASK_32(CORE_MMU_SHIFT_OF_LEVEL(level) - 1, 0)
104 #endif
105
106 #define CORE_MMU_USER_CODE_SHIFT SMALL_PAGE_SHIFT
107 #define CORE_MMU_USER_PARAM_SHIFT SMALL_PAGE_SHIFT
108
109 /*
110 * In all MMU modes, the CORE_MMU_PGDIR_LEVEL is always 0:
111 * Sv32: 4 MiB, 4 KiB
112 * +-------------------------------------+
113 * |31 22 21 12 11 0|
114 * |-------------------------------------+
115 * | VPN[1] | VPN[0] | page offset |
116 * +-------------------------------------+
117 * Sv39: 1 GiB, 2 MiB, 4 KiB
118 * +------------------------------------------------+
119 * |38 30 29 21 20 12 11 0|
120 * |------------------------------------------------+
121 * | VPN[2] | VPN[1] | VPN[0] | page offset |
122 * +------------------------------------------------+
123 * Sv48: 512 GiB, 1 GiB, 2 MiB, 4 KiB
124 * +-----------------------------------------------------------+
125 * |47 39 38 30 29 21 20 12 11 0|
126 * |-----------------------------------------------------------+
127 * | VPN[3] | VPN[2] | VPN[1] | VPN[0] | page offset |
128 * +-----------------------------------------------------------+
129 * Sv57: 256 TiB, 512 GiB, 1 GiB, 2 MiB, 4 KiB
130 * +----------------------------------------------------------------------+
131 * |56 48 47 39 38 30 29 21 20 12 11 0|
132 * |----------------------------------------------------------------------+
133 * | VPN[4] | VPN[3] | VPN[2] | VPN[1] | VPN[0] | page offset |
134 * +----------------------------------------------------------------------+
135 */
136 #define CORE_MMU_VPN0_LEVEL U(0)
137 #define CORE_MMU_VPN1_LEVEL U(1)
138 #define CORE_MMU_VPN2_LEVEL U(2)
139 #define CORE_MMU_VPN3_LEVEL U(3)
140 #define CORE_MMU_VPN4_LEVEL U(4)
141 #define CORE_MMU_VPN0_SHIFT \
142 CORE_MMU_SHIFT_OF_LEVEL(CORE_MMU_VPN0_LEVEL)
143 #define CORE_MMU_VPN1_SHIFT \
144 CORE_MMU_SHIFT_OF_LEVEL(CORE_MMU_VPN1_LEVEL)
145 #define CORE_MMU_VPN2_SHIFT \
146 CORE_MMU_SHIFT_OF_LEVEL(CORE_MMU_VPN2_LEVEL)
147 #define CORE_MMU_VPN3_SHIFT \
148 CORE_MMU_SHIFT_OF_LEVEL(CORE_MMU_VPN3_LEVEL)
149 #define CORE_MMU_VPN4_SHIFT \
150 CORE_MMU_SHIFT_OF_LEVEL(CORE_MMU_VPN4_LEVEL)
151
152 #define CORE_MMU_PGDIR_LEVEL CORE_MMU_VPN0_LEVEL
153 #define CORE_MMU_PGDIR_SHIFT \
154 CORE_MMU_SHIFT_OF_LEVEL(CORE_MMU_PGDIR_LEVEL + 1)
155
156 #define CORE_MMU_BASE_TABLE_LEVEL (RISCV_PGLEVELS - 1)
157 #define CORE_MMU_BASE_TABLE_SHIFT \
158 CORE_MMU_SHIFT_OF_LEVEL(CORE_MMU_BASE_TABLE_LEVEL)
159
160 #ifndef __ASSEMBLER__
161
162 struct core_mmu_config {
163 unsigned long satp[CFG_TEE_CORE_NB_CORE];
164 unsigned long map_offset;
165 };
166
167 struct core_mmu_user_map {
168 unsigned long user_map;
169 uint32_t asid;
170 };
171
172 /* Cache maintenance operation type */
173 enum cache_op {
174 DCACHE_CLEAN,
175 DCACHE_AREA_CLEAN,
176 DCACHE_INVALIDATE,
177 DCACHE_AREA_INVALIDATE,
178 ICACHE_INVALIDATE,
179 ICACHE_AREA_INVALIDATE,
180 DCACHE_CLEAN_INV,
181 DCACHE_AREA_CLEAN_INV,
182 };
183
core_mmu_table_write_barrier(void)184 static inline void core_mmu_table_write_barrier(void)
185 {
186 /* Invoke memory barrier */
187 mb();
188 }
189
190 TEE_Result cache_op_inner(enum cache_op op, void *va, size_t len);
191
core_mmu_check_max_pa(paddr_t pa)192 static inline bool core_mmu_check_max_pa(paddr_t pa)
193 {
194 return pa <= (BIT64(RISCV_MMU_PA_WIDTH) - 1);
195 }
196
core_mmu_get_va_width(void)197 static inline unsigned int core_mmu_get_va_width(void)
198 {
199 return RISCV_MMU_VA_WIDTH;
200 }
201
core_mmu_va_is_valid(vaddr_t va)202 static inline bool core_mmu_va_is_valid(vaddr_t va)
203 {
204 #ifdef RV32
205 return va < BIT64(core_mmu_get_va_width());
206 #else
207 /*
208 * Validates if a RV64 virtual address is valid.
209 * For each RV64 MMU mode, the upper bits must be
210 * extended from the highest valid VA bit:
211 * - Sv39: va[63:39] must equal to bit 38
212 * - Sv48: va[63:48] must equal to bit 47
213 * - Sv57: va[63:57] must equal to bit 56
214 * Otherwise, a page-fault exception is raised.
215 */
216 vaddr_t mask = GENMASK_64(63, RISCV_MMU_VA_WIDTH);
217 uint64_t msb = BIT64(RISCV_MMU_VA_WIDTH - 1);
218
219 if (va & msb)
220 return (va & mask) == mask;
221
222 return (va & mask) == 0;
223 #endif
224 }
225
core_mmu_level_in_range(unsigned int level)226 static inline bool core_mmu_level_in_range(unsigned int level)
227 {
228 return level <= CORE_MMU_BASE_TABLE_LEVEL;
229 }
230 #endif /*__ASSEMBLER__*/
231
232 #endif /* __MM_CORE_MMU_ARCH_H */
233