1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2016, 2024 Linaro Limited
4 * Copyright (c) 2014, STMicroelectronics International N.V.
5 */
6
7 #include <arm.h>
8 #include <assert.h>
9 #include <keep.h>
10 #include <kernel/boot.h>
11 #include <kernel/cache_helpers.h>
12 #include <kernel/misc.h>
13 #include <kernel/panic.h>
14 #include <kernel/thread.h>
15 #include <kernel/tlb_helpers.h>
16 #include <mm/core_memprot.h>
17 #include <mm/core_mmu.h>
18 #include <mm/pgt_cache.h>
19 #include <mm/phys_mem.h>
20 #include <platform_config.h>
21 #include <stdlib.h>
22 #include <string.h>
23 #include <trace.h>
24 #include <util.h>
25
26 #ifdef CFG_WITH_LPAE
27 #error This file is not to be used with LPAE
28 #endif
29
30 #ifdef CFG_NS_VIRTUALIZATION
31 #error Currently V7 MMU code does not support virtualization
32 #endif
33
34 #ifndef DEBUG_XLAT_TABLE
35 #define DEBUG_XLAT_TABLE 0
36 #endif
37
38 #if DEBUG_XLAT_TABLE
39 #define debug_print(...) DMSG_RAW(__VA_ARGS__)
40 #else
41 #define debug_print(...) ((void)0)
42 #endif
43
44 /*
45 * MMU related values
46 */
47
48 /* Sharable */
49 #define TEE_MMU_TTB_S (1 << 1)
50
51 /* Not Outer Sharable */
52 #define TEE_MMU_TTB_NOS (1 << 5)
53
54 /* Normal memory, Inner Non-cacheable */
55 #define TEE_MMU_TTB_IRGN_NC 0
56
57 /* Normal memory, Inner Write-Back Write-Allocate Cacheable */
58 #define TEE_MMU_TTB_IRGN_WBWA (1 << 6)
59
60 /* Normal memory, Inner Write-Through Cacheable */
61 #define TEE_MMU_TTB_IRGN_WT 1
62
63 /* Normal memory, Inner Write-Back no Write-Allocate Cacheable */
64 #define TEE_MMU_TTB_IRGN_WB (1 | (1 << 6))
65
66 /* Normal memory, Outer Write-Back Write-Allocate Cacheable */
67 #define TEE_MMU_TTB_RNG_WBWA (1 << 3)
68
69 /* Normal memory, Outer Write-Back no Write-Allocate Cacheable */
70 #define TEE_MMU_TTB_RNG_WB (3 << 3)
71
72 #ifndef CFG_NO_SMP
73 #define TEE_MMU_DEFAULT_ATTRS \
74 (TEE_MMU_TTB_S | TEE_MMU_TTB_NOS | \
75 TEE_MMU_TTB_IRGN_WBWA | TEE_MMU_TTB_RNG_WBWA)
76 #else
77 #define TEE_MMU_DEFAULT_ATTRS (TEE_MMU_TTB_IRGN_WB | TEE_MMU_TTB_RNG_WB)
78 #endif
79
80
81 #define INVALID_DESC 0x0
82
83 #define SECTION_SHIFT 20
84 #define SECTION_MASK 0x000fffff
85 #define SECTION_SIZE 0x00100000
86
87 /* armv7 memory mapping attributes: section mapping */
88 #define SECTION_SECURE (0 << 19)
89 #define SECTION_NOTSECURE (1 << 19)
90 #define SECTION_SHARED (1 << 16)
91 #define SECTION_NOTGLOBAL (1 << 17)
92 #define SECTION_ACCESS_FLAG (1 << 10)
93 #define SECTION_UNPRIV (1 << 11)
94 #define SECTION_RO (1 << 15)
95 #define SECTION_TEXCB(texcb) ((((texcb) >> 2) << 12) | \
96 ((((texcb) >> 1) & 0x1) << 3) | \
97 (((texcb) & 0x1) << 2))
98 #define SECTION_DEVICE SECTION_TEXCB(ATTR_DEVICE_INDEX)
99 #define SECTION_NORMAL SECTION_TEXCB(ATTR_DEVICE_INDEX)
100 #define SECTION_NORMAL_CACHED SECTION_TEXCB(ATTR_NORMAL_CACHED_INDEX)
101 #define SECTION_STRONG_O SECTION_TEXCB(ATTR_STRONG_O_INDEX)
102 #define SECTION_TAGGED_CACHED SECTION_TEXCB(ATTR_TAGGED_CACHED_INDEX)
103
104 #define SECTION_XN (1 << 4)
105 #define SECTION_PXN (1 << 0)
106 #define SECTION_SECTION (2 << 0)
107
108 #define SECTION_PT_NOTSECURE (1 << 3)
109 #define SECTION_PT_PT (1 << 0)
110
111 #define SECTION_PT_ATTR_MASK ~((1 << 10) - 1)
112
113 #define SMALL_PAGE_SMALL_PAGE (1 << 1)
114 #define SMALL_PAGE_SHARED (1 << 10)
115 #define SMALL_PAGE_NOTGLOBAL (1 << 11)
116 #define SMALL_PAGE_TEXCB(texcb) ((((texcb) >> 2) << 6) | \
117 ((((texcb) >> 1) & 0x1) << 3) | \
118 (((texcb) & 0x1) << 2))
119 #define SMALL_PAGE_DEVICE SMALL_PAGE_TEXCB(ATTR_DEVICE_INDEX)
120 #define SMALL_PAGE_NORMAL SMALL_PAGE_TEXCB(ATTR_DEVICE_INDEX)
121 #define SMALL_PAGE_NORMAL_CACHED \
122 SMALL_PAGE_TEXCB(ATTR_NORMAL_CACHED_INDEX)
123 #define SMALL_PAGE_STRONG_O SMALL_PAGE_TEXCB(ATTR_STRONG_O_INDEX)
124 #define SMALL_PAGE_TAGGED_CACHED \
125 SMALL_PAGE_TEXCB(ATTR_TAGGED_CACHED_INDEX)
126 #define SMALL_PAGE_ACCESS_FLAG (1 << 4)
127 #define SMALL_PAGE_UNPRIV (1 << 5)
128 #define SMALL_PAGE_RO (1 << 9)
129 #define SMALL_PAGE_XN (1 << 0)
130
131
132 /* The TEX, C and B bits concatenated */
133 #define ATTR_DEVICE_INDEX 0x0
134 #define ATTR_NORMAL_CACHED_INDEX 0x1
135 #define ATTR_STRONG_O_INDEX 0x2
136 /* Compat with TEE_MATTR_MEM_TYPE_TAGGED */
137 #define ATTR_TAGGED_CACHED_INDEX 0x3
138
139 #define PRRR_IDX(idx, tr, nos) (((tr) << (2 * (idx))) | \
140 ((uint32_t)(nos) << ((idx) + 24)))
141 #define NMRR_IDX(idx, ir, or) (((ir) << (2 * (idx))) | \
142 ((uint32_t)(or) << (2 * (idx) + 16)))
143 #define PRRR_DS0 (1 << 16)
144 #define PRRR_DS1 (1 << 17)
145 #define PRRR_NS0 (1 << 18)
146 #define PRRR_NS1 (1 << 19)
147
148 #define ATTR_DEVICE_PRRR PRRR_IDX(ATTR_DEVICE_INDEX, 1, 0)
149 #define ATTR_DEVICE_NMRR NMRR_IDX(ATTR_DEVICE_INDEX, 0, 0)
150
151 #define ATTR_STRONGLY_O_PRRR PRRR_IDX(ATTR_STRONG_O_INDEX, 0, 0)
152 #define ATTR_STRONGLY_O_NMRR NMRR_IDX(ATTR_STRONG_O_INDEX, 0, 0)
153
154 #ifndef CFG_NO_SMP
155 #define ATTR_NORMAL_CACHED_PRRR PRRR_IDX(ATTR_NORMAL_CACHED_INDEX, 2, 1)
156 #define ATTR_NORMAL_CACHED_NMRR NMRR_IDX(ATTR_NORMAL_CACHED_INDEX, 1, 1)
157 #define ATTR_TAGGED_CACHED_PRRR PRRR_IDX(ATTR_TAGGED_CACHED_INDEX, 2, 1)
158 #define ATTR_TAGGED_CACHED_NMRR NMRR_IDX(ATTR_TAGGED_CACHED_INDEX, 1, 1)
159 #else
160 #define ATTR_NORMAL_CACHED_PRRR PRRR_IDX(ATTR_NORMAL_CACHED_INDEX, 2, 0)
161 #define ATTR_NORMAL_CACHED_NMRR NMRR_IDX(ATTR_NORMAL_CACHED_INDEX, 3, 3)
162 #define ATTR_TAGGED_CACHED_PRRR PRRR_IDX(ATTR_TAGGED_CACHED_INDEX, 2, 0)
163 #define ATTR_TAGGED_CACHED_NMRR NMRR_IDX(ATTR_TAGGED_CACHED_INDEX, 3, 3)
164 #endif
165
166 #define NUM_L1_ENTRIES 4096
167 #define NUM_L2_ENTRIES 256
168
169 #define L1_TBL_SIZE (NUM_L1_ENTRIES * 4)
170 #define L2_TBL_SIZE (NUM_L2_ENTRIES * 4)
171 #define L1_ALIGNMENT L1_TBL_SIZE
172 #define L2_ALIGNMENT L2_TBL_SIZE
173
174 /* Defined to the smallest possible secondary L1 MMU table */
175 #define TTBCR_N_VALUE 7
176
177 /* Number of sections in ttbr0 when user mapping activated */
178 #define NUM_UL1_ENTRIES (1 << (12 - TTBCR_N_VALUE))
179 #define UL1_ALIGNMENT (NUM_UL1_ENTRIES * 4)
180 /* TTB attributes */
181
182 /* TTB0 of TTBR0 (depends on TTBCR_N_VALUE) */
183 #define TTB_UL1_MASK (~(UL1_ALIGNMENT - 1))
184 /* TTB1 of TTBR1 */
185 #define TTB_L1_MASK (~(L1_ALIGNMENT - 1))
186
187 #ifndef MAX_XLAT_TABLES
188 #ifdef CFG_CORE_ASLR
189 # define XLAT_TABLE_ASLR_EXTRA 2
190 #else
191 # define XLAT_TABLE_ASLR_EXTRA 0
192 #endif
193 #define MAX_XLAT_TABLES (6 + XLAT_TABLE_ASLR_EXTRA)
194 #endif /*!MAX_XLAT_TABLES*/
195
196 enum desc_type {
197 DESC_TYPE_PAGE_TABLE,
198 DESC_TYPE_SECTION,
199 DESC_TYPE_SUPER_SECTION,
200 DESC_TYPE_LARGE_PAGE,
201 DESC_TYPE_SMALL_PAGE,
202 DESC_TYPE_INVALID,
203 };
204
205 typedef uint32_t l1_xlat_tbl_t[NUM_L1_ENTRIES];
206 typedef uint32_t l2_xlat_tbl_t[NUM_L2_ENTRIES];
207 typedef uint32_t ul1_xlat_tbl_t[NUM_UL1_ENTRIES];
208
209 #ifndef CFG_DYN_CONFIG
210 static l1_xlat_tbl_t main_mmu_l1_ttb
211 __aligned(L1_ALIGNMENT) __section(".nozi.mmu.l1");
212
213 /* L2 MMU tables */
214 static l2_xlat_tbl_t main_mmu_l2_ttb[MAX_XLAT_TABLES]
215 __aligned(L2_ALIGNMENT) __section(".nozi.mmu.l2");
216
217 /* MMU L1 table for TAs, one for each thread */
218 static ul1_xlat_tbl_t main_mmu_ul1_ttb[CFG_NUM_THREADS]
219 __aligned(UL1_ALIGNMENT) __section(".nozi.mmu.ul1");
220 #endif
221
222 /*
223 * struct mmu_partition - core virtual memory tables
224 * @l1_table: Level 1 translation tables base address
225 * @l2_table: Level 2 translation tables base address (CFG_DYN_CONFIG=n)
226 * @last_l2_page: Pre-allocated Level 2 table chunk (CFG_DYN_CONFIG=y)
227 * @ul1_tables: Level 1 translation tab le for EL0 mapping
228 * @tables_used: Number of level 2 tables already used
229 */
230 struct mmu_partition {
231 l1_xlat_tbl_t *l1_table;
232 l2_xlat_tbl_t *l2_tables;
233 uint8_t *last_l2_page;
234 ul1_xlat_tbl_t *ul1_tables;
235 uint32_t tables_used;
236 };
237
238 static struct mmu_partition default_partition = {
239 #ifndef CFG_DYN_CONFIG
240 .l1_table = &main_mmu_l1_ttb,
241 .l2_tables = main_mmu_l2_ttb,
242 .ul1_tables = main_mmu_ul1_ttb,
243 .tables_used = 0,
244 #endif
245 };
246
get_prtn(void)247 static struct mmu_partition *get_prtn(void)
248 {
249 return &default_partition;
250 }
251
core_mmu_get_main_ttb_va(struct mmu_partition * prtn)252 static vaddr_t core_mmu_get_main_ttb_va(struct mmu_partition *prtn)
253 {
254 return (vaddr_t)prtn->l1_table;
255 }
256
core_mmu_get_main_ttb_pa(struct mmu_partition * prtn)257 static paddr_t core_mmu_get_main_ttb_pa(struct mmu_partition *prtn)
258 {
259 paddr_t pa = virt_to_phys((void *)core_mmu_get_main_ttb_va(prtn));
260
261 if (pa & ~TTB_L1_MASK)
262 panic("invalid core l1 table");
263 return pa;
264 }
265
core_mmu_get_ul1_ttb_va(struct mmu_partition * prtn)266 static vaddr_t core_mmu_get_ul1_ttb_va(struct mmu_partition *prtn)
267 {
268 return (vaddr_t)prtn->ul1_tables[thread_get_id()];
269 }
270
core_mmu_get_ul1_ttb_pa(struct mmu_partition * prtn)271 static paddr_t core_mmu_get_ul1_ttb_pa(struct mmu_partition *prtn)
272 {
273 paddr_t pa = virt_to_phys((void *)core_mmu_get_ul1_ttb_va(prtn));
274
275 if (pa & ~TTB_UL1_MASK)
276 panic("invalid user l1 table");
277 return pa;
278 }
279
alloc_l2_table(struct mmu_partition * prtn)280 static uint32_t *alloc_l2_table(struct mmu_partition *prtn)
281 {
282 uint32_t *new_table = NULL;
283
284 /* The CFG_DYN_CONFIG implementation below depends on this */
285 static_assert(sizeof(l2_xlat_tbl_t) * 4 == SMALL_PAGE_SIZE);
286
287 if (IS_ENABLED(CFG_DYN_CONFIG)) {
288 void *p = NULL;
289
290 if (prtn->last_l2_page)
291 goto dyn_out;
292 if (cpu_mmu_enabled()) {
293 tee_mm_entry_t *mm = NULL;
294 paddr_t pa = 0;
295
296 mm = phys_mem_core_alloc(SMALL_PAGE_SIZE);
297 if (!mm) {
298 EMSG("Phys mem exhausted");
299 return NULL;
300 }
301 pa = tee_mm_get_smem(mm);
302
303 p = phys_to_virt(pa, MEM_AREA_SEC_RAM_OVERALL,
304 SMALL_PAGE_SIZE);
305 assert(p);
306 } else {
307 p = boot_mem_alloc(SMALL_PAGE_SIZE, SMALL_PAGE_SIZE);
308 /*
309 * L2 tables are allocated four at a time as a 4k
310 * page. The pointer prtn->last_l2_page keeps track
311 * of that page until all the l2 tables have been
312 * used. That pointer may need to be relocated when
313 * the MMU is enabled so the address of the pointer
314 * is recorded, but it must only be recorded once.
315 *
316 * The already used 4k pages are only referenced
317 * via recorded physical addresses in the l1 table
318 * so those pointers don't need to be updated for
319 * relocation.
320 */
321 if (!prtn->tables_used)
322 boot_mem_add_reloc(&prtn->last_l2_page);
323 }
324 prtn->last_l2_page = p;
325 dyn_out:
326 new_table = (void *)(prtn->last_l2_page +
327 (prtn->tables_used % 4) *
328 sizeof(l2_xlat_tbl_t));
329 prtn->tables_used++;
330 /*
331 * The current page is exhausted now, the next time we need
332 * to allocate a new one.
333 */
334 if (!(prtn->tables_used % 4))
335 prtn->last_l2_page = NULL;
336 DMSG("L2 tables used %u", prtn->tables_used);
337 } else {
338 if (prtn->tables_used >= MAX_XLAT_TABLES) {
339 EMSG("%u L2 tables exhausted", MAX_XLAT_TABLES);
340 return NULL;
341 }
342
343 new_table = prtn->l2_tables[prtn->tables_used];
344 prtn->tables_used++;
345 DMSG("L2 table used: %"PRIu32"/%d", prtn->tables_used,
346 MAX_XLAT_TABLES);
347 }
348
349 return new_table;
350 }
351
get_desc_type(unsigned level,uint32_t desc)352 static enum desc_type get_desc_type(unsigned level, uint32_t desc)
353 {
354 assert(level >= 1 && level <= 2);
355
356 if (level == 1) {
357 if ((desc & 0x3) == 0x1)
358 return DESC_TYPE_PAGE_TABLE;
359
360 if ((desc & 0x2) == 0x2) {
361 if (desc & (1 << 18))
362 return DESC_TYPE_SUPER_SECTION;
363 return DESC_TYPE_SECTION;
364 }
365 } else {
366 if ((desc & 0x3) == 0x1)
367 return DESC_TYPE_LARGE_PAGE;
368
369 if ((desc & 0x2) == 0x2)
370 return DESC_TYPE_SMALL_PAGE;
371 }
372
373 return DESC_TYPE_INVALID;
374 }
375
texcb_to_mattr(uint32_t texcb)376 static uint32_t texcb_to_mattr(uint32_t texcb)
377 {
378 COMPILE_TIME_ASSERT(ATTR_DEVICE_INDEX == TEE_MATTR_MEM_TYPE_DEV);
379 COMPILE_TIME_ASSERT(ATTR_NORMAL_CACHED_INDEX ==
380 TEE_MATTR_MEM_TYPE_CACHED);
381 COMPILE_TIME_ASSERT(ATTR_STRONG_O_INDEX ==
382 TEE_MATTR_MEM_TYPE_STRONGLY_O);
383 COMPILE_TIME_ASSERT(ATTR_TAGGED_CACHED_INDEX ==
384 TEE_MATTR_MEM_TYPE_TAGGED);
385
386 return texcb << TEE_MATTR_MEM_TYPE_SHIFT;
387 }
388
mattr_to_texcb(uint32_t attr)389 static uint32_t mattr_to_texcb(uint32_t attr)
390 {
391 /* Keep in sync with core_mmu.c:core_mmu_mattr_is_ok */
392 return (attr >> TEE_MATTR_MEM_TYPE_SHIFT) & TEE_MATTR_MEM_TYPE_MASK;
393 }
394
395
desc_to_mattr(unsigned level,uint32_t desc)396 static uint32_t desc_to_mattr(unsigned level, uint32_t desc)
397 {
398 uint32_t a;
399
400 switch (get_desc_type(level, desc)) {
401 case DESC_TYPE_PAGE_TABLE:
402 a = TEE_MATTR_TABLE;
403 if (!(desc & SECTION_PT_NOTSECURE))
404 a |= TEE_MATTR_SECURE;
405 break;
406 case DESC_TYPE_SECTION:
407 a = TEE_MATTR_VALID_BLOCK;
408 if (desc & SECTION_ACCESS_FLAG)
409 a |= TEE_MATTR_PRX | TEE_MATTR_URX;
410
411 if (!(desc & SECTION_RO))
412 a |= TEE_MATTR_PW | TEE_MATTR_UW;
413
414 if (desc & SECTION_XN)
415 a &= ~(TEE_MATTR_PX | TEE_MATTR_UX);
416
417 if (desc & SECTION_PXN)
418 a &= ~TEE_MATTR_PX;
419
420 a |= texcb_to_mattr(((desc >> 12) & 0x7) | ((desc >> 2) & 0x3));
421
422 if (!(desc & SECTION_NOTGLOBAL))
423 a |= TEE_MATTR_GLOBAL;
424
425 if (!(desc & SECTION_NOTSECURE))
426 a |= TEE_MATTR_SECURE;
427
428 break;
429 case DESC_TYPE_SMALL_PAGE:
430 a = TEE_MATTR_VALID_BLOCK;
431 if (desc & SMALL_PAGE_ACCESS_FLAG)
432 a |= TEE_MATTR_PRX | TEE_MATTR_URX;
433
434 if (!(desc & SMALL_PAGE_RO))
435 a |= TEE_MATTR_PW | TEE_MATTR_UW;
436
437 if (desc & SMALL_PAGE_XN)
438 a &= ~(TEE_MATTR_PX | TEE_MATTR_UX);
439
440 a |= texcb_to_mattr(((desc >> 6) & 0x7) | ((desc >> 2) & 0x3));
441
442 if (!(desc & SMALL_PAGE_NOTGLOBAL))
443 a |= TEE_MATTR_GLOBAL;
444 break;
445 default:
446 return 0;
447 }
448
449 return a;
450 }
451
mattr_to_desc(unsigned level,uint32_t attr)452 static uint32_t mattr_to_desc(unsigned level, uint32_t attr)
453 {
454 uint32_t desc;
455 uint32_t a = attr;
456 unsigned texcb;
457
458 if (level == 1 && (a & TEE_MATTR_TABLE)) {
459 desc = SECTION_PT_PT;
460 if (!(a & TEE_MATTR_SECURE))
461 desc |= SECTION_PT_NOTSECURE;
462 return desc;
463 }
464
465 if (!(a & TEE_MATTR_VALID_BLOCK))
466 return INVALID_DESC;
467
468 if (a & (TEE_MATTR_PX | TEE_MATTR_PW))
469 a |= TEE_MATTR_PR;
470 if (a & (TEE_MATTR_UX | TEE_MATTR_UW))
471 a |= TEE_MATTR_UR;
472 if (a & TEE_MATTR_UR)
473 a |= TEE_MATTR_PR;
474 if (a & TEE_MATTR_UW)
475 a |= TEE_MATTR_PW;
476
477
478 texcb = mattr_to_texcb(a);
479
480 if (level == 1) { /* Section */
481 #ifndef CFG_NO_SMP
482 desc = SECTION_SECTION | SECTION_SHARED;
483 #else
484 desc = SECTION_SECTION;
485 #endif
486
487 if (!(a & (TEE_MATTR_PX | TEE_MATTR_UX)))
488 desc |= SECTION_XN;
489
490 #ifdef CFG_HWSUPP_MEM_PERM_PXN
491 if (!(a & TEE_MATTR_PX))
492 desc |= SECTION_PXN;
493 #endif
494
495 if (a & TEE_MATTR_UR)
496 desc |= SECTION_UNPRIV;
497
498 if (!(a & TEE_MATTR_PW))
499 desc |= SECTION_RO;
500
501 if (a & (TEE_MATTR_UR | TEE_MATTR_PR))
502 desc |= SECTION_ACCESS_FLAG;
503
504 if (!(a & TEE_MATTR_GLOBAL))
505 desc |= SECTION_NOTGLOBAL;
506
507 if (!(a & TEE_MATTR_SECURE))
508 desc |= SECTION_NOTSECURE;
509
510 desc |= SECTION_TEXCB(texcb);
511 } else {
512 #ifndef CFG_NO_SMP
513 desc = SMALL_PAGE_SMALL_PAGE | SMALL_PAGE_SHARED;
514 #else
515 desc = SMALL_PAGE_SMALL_PAGE;
516 #endif
517
518 if (!(a & (TEE_MATTR_PX | TEE_MATTR_UX)))
519 desc |= SMALL_PAGE_XN;
520
521 if (a & TEE_MATTR_UR)
522 desc |= SMALL_PAGE_UNPRIV;
523
524 if (!(a & TEE_MATTR_PW))
525 desc |= SMALL_PAGE_RO;
526
527 if (a & (TEE_MATTR_UR | TEE_MATTR_PR))
528 desc |= SMALL_PAGE_ACCESS_FLAG;
529
530 if (!(a & TEE_MATTR_GLOBAL))
531 desc |= SMALL_PAGE_NOTGLOBAL;
532
533 desc |= SMALL_PAGE_TEXCB(texcb);
534 }
535
536 return desc;
537 }
538
core_mmu_set_info_table(struct core_mmu_table_info * tbl_info,unsigned level,vaddr_t va_base,void * table)539 void core_mmu_set_info_table(struct core_mmu_table_info *tbl_info,
540 unsigned level, vaddr_t va_base, void *table)
541 {
542 tbl_info->level = level;
543 tbl_info->next_level = level + 1;
544 tbl_info->table = table;
545 tbl_info->va_base = va_base;
546 assert(level <= 2);
547 if (level == 1) {
548 tbl_info->shift = SECTION_SHIFT;
549 tbl_info->num_entries = NUM_L1_ENTRIES;
550 } else {
551 tbl_info->shift = SMALL_PAGE_SHIFT;
552 tbl_info->num_entries = NUM_L2_ENTRIES;
553 }
554 }
555
core_mmu_get_user_pgdir(struct core_mmu_table_info * pgd_info)556 void core_mmu_get_user_pgdir(struct core_mmu_table_info *pgd_info)
557 {
558 void *tbl = (void *)core_mmu_get_ul1_ttb_va(get_prtn());
559
560 core_mmu_set_info_table(pgd_info, 1, 0, tbl);
561 pgd_info->num_entries = NUM_UL1_ENTRIES;
562 }
563
core_mmu_create_user_map(struct user_mode_ctx * uctx,struct core_mmu_user_map * map)564 void core_mmu_create_user_map(struct user_mode_ctx *uctx,
565 struct core_mmu_user_map *map)
566 {
567 struct core_mmu_table_info dir_info = { };
568
569 COMPILE_TIME_ASSERT(L2_TBL_SIZE == PGT_SIZE);
570
571 core_mmu_get_user_pgdir(&dir_info);
572 memset(dir_info.table, 0, dir_info.num_entries * sizeof(uint32_t));
573 core_mmu_populate_user_map(&dir_info, uctx);
574 map->ttbr0 = core_mmu_get_ul1_ttb_pa(get_prtn()) |
575 TEE_MMU_DEFAULT_ATTRS;
576 map->ctxid = uctx->vm_info.asid;
577 }
578
core_mmu_find_table(struct mmu_partition * prtn,vaddr_t va,unsigned max_level,struct core_mmu_table_info * tbl_info)579 bool core_mmu_find_table(struct mmu_partition *prtn, vaddr_t va,
580 unsigned max_level,
581 struct core_mmu_table_info *tbl_info)
582 {
583 uint32_t *tbl;
584 unsigned n = va >> SECTION_SHIFT;
585
586 if (!prtn)
587 prtn = get_prtn();
588 tbl = (uint32_t *)core_mmu_get_main_ttb_va(prtn);
589
590 if (max_level == 1 || (tbl[n] & 0x3) != 0x1) {
591 core_mmu_set_info_table(tbl_info, 1, 0, tbl);
592 } else {
593 paddr_t ntbl = tbl[n] & ~((1 << 10) - 1);
594 void *l2tbl = phys_to_virt(ntbl, MEM_AREA_TEE_RAM_RW_DATA,
595 L2_TBL_SIZE);
596
597 if (!l2tbl)
598 l2tbl = phys_to_virt(ntbl, MEM_AREA_SEC_RAM_OVERALL,
599 L2_TBL_SIZE);
600 if (!l2tbl)
601 return false;
602
603 core_mmu_set_info_table(tbl_info, 2, n << SECTION_SHIFT, l2tbl);
604 }
605 return true;
606 }
607
core_mmu_set_entry_primitive(void * table,size_t level,size_t idx,paddr_t pa,uint32_t attr)608 void core_mmu_set_entry_primitive(void *table, size_t level, size_t idx,
609 paddr_t pa, uint32_t attr)
610 {
611 uint32_t *tbl = table;
612 uint32_t desc = mattr_to_desc(level, attr);
613
614 tbl[idx] = desc | pa;
615 }
616
desc_to_pa(unsigned level,uint32_t desc)617 static paddr_t desc_to_pa(unsigned level, uint32_t desc)
618 {
619 unsigned shift_mask;
620
621 switch (get_desc_type(level, desc)) {
622 case DESC_TYPE_PAGE_TABLE:
623 shift_mask = 10;
624 break;
625 case DESC_TYPE_SECTION:
626 shift_mask = 20;
627 break;
628 case DESC_TYPE_SUPER_SECTION:
629 shift_mask = 24; /* We're ignoring bits 32 and above. */
630 break;
631 case DESC_TYPE_LARGE_PAGE:
632 shift_mask = 16;
633 break;
634 case DESC_TYPE_SMALL_PAGE:
635 shift_mask = 12;
636 break;
637 default:
638 /* Invalid section */
639 shift_mask = 4;
640 }
641
642 return desc & ~((1 << shift_mask) - 1);
643 }
644
core_mmu_entry_to_finer_grained(struct core_mmu_table_info * tbl_info,unsigned int idx,bool secure)645 bool core_mmu_entry_to_finer_grained(struct core_mmu_table_info *tbl_info,
646 unsigned int idx, bool secure)
647 {
648 uint32_t *new_table;
649 uint32_t *entry;
650 uint32_t new_table_desc;
651 uint32_t attr;
652 uint32_t desc;
653 paddr_t pa;
654 int i;
655
656 if (tbl_info->level != 1)
657 return false;
658
659 if (idx >= NUM_L1_ENTRIES)
660 return false;
661
662 entry = (uint32_t *)tbl_info->table + idx;
663 attr = desc_to_mattr(1, *entry);
664
665 if (*entry && get_desc_type(1, *entry) == DESC_TYPE_PAGE_TABLE) {
666 /*
667 * If there is page table already,
668 * check the secure attribute fits
669 */
670 return secure == (bool)(attr & TEE_MATTR_SECURE);
671 }
672
673 /* If there is something mapped, check the secure access flag */
674 if (attr && secure != (bool)(attr & TEE_MATTR_SECURE))
675 return false;
676
677 new_table = alloc_l2_table(get_prtn());
678 if (!new_table)
679 return false;
680
681 new_table_desc = SECTION_PT_PT | virt_to_phys(new_table);
682
683 if (!secure)
684 new_table_desc |= SECTION_PT_NOTSECURE;
685
686 if (*entry) {
687 pa = desc_to_pa(1, *entry);
688 desc = mattr_to_desc(2, attr);
689 for (i = 0; i < NUM_L2_ENTRIES; i++, pa += SMALL_PAGE_SIZE)
690 new_table[i] = desc | pa;
691 } else {
692 memset(new_table, 0, sizeof(l2_xlat_tbl_t));
693 }
694
695 /* Update descriptor at current level */
696 *entry = new_table_desc;
697
698 return true;
699 }
700
core_mmu_get_entry_primitive(const void * table,size_t level,size_t idx,paddr_t * pa,uint32_t * attr)701 void core_mmu_get_entry_primitive(const void *table, size_t level,
702 size_t idx, paddr_t *pa, uint32_t *attr)
703 {
704 const uint32_t *tbl = table;
705
706 if (pa)
707 *pa = desc_to_pa(level, tbl[idx]);
708
709 if (attr)
710 *attr = desc_to_mattr(level, tbl[idx]);
711 }
712
core_mmu_get_user_va_range(vaddr_t * base,size_t * size)713 void core_mmu_get_user_va_range(vaddr_t *base, size_t *size)
714 {
715 if (base) {
716 /* Leaving the first entry unmapped to make NULL unmapped */
717 *base = 1 << SECTION_SHIFT;
718 }
719
720 if (size)
721 *size = (NUM_UL1_ENTRIES - 1) << SECTION_SHIFT;
722 }
723
core_mmu_get_user_map(struct core_mmu_user_map * map)724 void core_mmu_get_user_map(struct core_mmu_user_map *map)
725 {
726 map->ttbr0 = read_ttbr0();
727 map->ctxid = read_contextidr();
728 }
729
core_mmu_set_user_map(struct core_mmu_user_map * map)730 void core_mmu_set_user_map(struct core_mmu_user_map *map)
731 {
732 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
733
734 /*
735 * Update the reserved Context ID and TTBR0
736 */
737
738 dsb(); /* ARM erratum 754322 */
739 write_contextidr(0);
740 isb();
741
742 if (map) {
743 write_ttbr0(map->ttbr0);
744 isb();
745 write_contextidr(map->ctxid);
746 isb();
747 } else {
748 write_ttbr0(read_ttbr1());
749 isb();
750 }
751
752 tlbi_all();
753 icache_inv_all();
754
755 /* Restore interrupts */
756 thread_unmask_exceptions(exceptions);
757 }
758
core_mmu_user_mapping_is_active(void)759 bool core_mmu_user_mapping_is_active(void)
760 {
761 bool ret;
762 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
763
764 ret = read_ttbr0() != read_ttbr1();
765 thread_unmask_exceptions(exceptions);
766
767 return ret;
768 }
769
core_mmu_user_va_range_is_defined(void)770 bool __noprof core_mmu_user_va_range_is_defined(void)
771 {
772 return true;
773 }
774
core_init_mmu_prtn(struct mmu_partition * prtn,struct memory_map * mem_map)775 void core_init_mmu_prtn(struct mmu_partition *prtn, struct memory_map *mem_map)
776 {
777 void *ttb1 = (void *)core_mmu_get_main_ttb_va(prtn);
778 size_t n = 0;
779
780 /* reset L1 table */
781 memset(ttb1, 0, L1_TBL_SIZE);
782
783 for (n = 0; n < mem_map->count; n++)
784 core_mmu_map_region(prtn, mem_map->map + n);
785 }
786
core_init_mmu(struct memory_map * mem_map)787 void core_init_mmu(struct memory_map *mem_map)
788 {
789 struct mmu_partition *prtn = &default_partition;
790
791 if (IS_ENABLED(CFG_DYN_CONFIG)) {
792 prtn->l1_table = boot_mem_alloc(sizeof(l1_xlat_tbl_t),
793 L1_ALIGNMENT);
794 boot_mem_add_reloc(&prtn->l1_table);
795 prtn->ul1_tables = boot_mem_alloc(sizeof(ul1_xlat_tbl_t) *
796 CFG_NUM_THREADS,
797 UL1_ALIGNMENT);
798 boot_mem_add_reloc(&prtn->ul1_tables);
799 }
800 /* Initialize default pagetables */
801 core_init_mmu_prtn(prtn, mem_map);
802 }
803
core_init_mmu_regs(struct core_mmu_config * cfg)804 void core_init_mmu_regs(struct core_mmu_config *cfg)
805 {
806 cfg->ttbr = core_mmu_get_main_ttb_pa(&default_partition) |
807 TEE_MMU_DEFAULT_ATTRS;
808
809 cfg->prrr = ATTR_DEVICE_PRRR | ATTR_NORMAL_CACHED_PRRR |
810 ATTR_STRONGLY_O_PRRR | ATTR_TAGGED_CACHED_PRRR;
811 cfg->nmrr = ATTR_DEVICE_NMRR | ATTR_NORMAL_CACHED_NMRR |
812 ATTR_STRONGLY_O_NMRR | ATTR_TAGGED_CACHED_NMRR;
813
814 cfg->prrr |= PRRR_NS1 | PRRR_DS1;
815
816 /*
817 * Program Domain access control register with two domains:
818 * domain 0: teecore
819 * domain 1: TA
820 */
821 cfg->dacr = DACR_DOMAIN(0, DACR_DOMAIN_PERM_CLIENT) |
822 DACR_DOMAIN(1, DACR_DOMAIN_PERM_CLIENT);
823
824 /*
825 * Enable lookups using TTBR0 and TTBR1 with the split of addresses
826 * defined by TEE_MMU_TTBCR_N_VALUE.
827 */
828 cfg->ttbcr = TTBCR_N_VALUE;
829 }
830
core_mmu_get_fault_type(uint32_t fsr)831 enum core_mmu_fault core_mmu_get_fault_type(uint32_t fsr)
832 {
833 assert(!(fsr & FSR_LPAE));
834
835 switch (fsr & FSR_FS_MASK) {
836 case 0x1: /* DFSR[10,3:0] 0b00001 Alignment fault (DFSR only) */
837 return CORE_MMU_FAULT_ALIGNMENT;
838 case 0x2: /* DFSR[10,3:0] 0b00010 Debug event */
839 return CORE_MMU_FAULT_DEBUG_EVENT;
840 case 0x4: /* DFSR[10,3:0] b00100 Fault on instr cache maintenance */
841 case 0x5: /* DFSR[10,3:0] b00101 Translation fault first level */
842 case 0x7: /* DFSR[10,3:0] b00111 Translation fault second level */
843 return CORE_MMU_FAULT_TRANSLATION;
844 case 0xd: /* DFSR[10,3:0] b01101 Permission fault first level */
845 case 0xf: /* DFSR[10,3:0] b01111 Permission fault second level */
846 if (fsr & FSR_WNR)
847 return CORE_MMU_FAULT_WRITE_PERMISSION;
848 else
849 return CORE_MMU_FAULT_READ_PERMISSION;
850 case 0x3: /* DFSR[10,3:0] b00011 access bit fault on section */
851 case 0x6: /* DFSR[10,3:0] b00110 access bit fault on page */
852 return CORE_MMU_FAULT_ACCESS_BIT;
853 case (1 << 10) | 0x6:
854 /* DFSR[10,3:0] 0b10110 Async external abort (DFSR only) */
855 return CORE_MMU_FAULT_ASYNC_EXTERNAL;
856 case 0x8: /* DFSR[10,3:0] 0b01000 Sync external abort, not on table */
857 case 0xc: /* DFSR[10,3:0] 0b01100 Sync external abort, on table, L1 */
858 case 0xe: /* DFSR[10,3:0] 0b01110 Sync external abort, on table, L2 */
859 return CORE_MMU_FAULT_SYNC_EXTERNAL;
860 default:
861 return CORE_MMU_FAULT_OTHER;
862 }
863 }
864