1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2016-2025 Linaro Limited
4 * Copyright (c) 2014, STMicroelectronics International N.V.
5 * Copyright (c) 2022, Arm Limited and Contributors. All rights reserved.
6 */
7
8 #include <assert.h>
9 #include <config.h>
10 #include <kernel/boot.h>
11 #include <kernel/dt.h>
12 #include <kernel/linker.h>
13 #include <kernel/panic.h>
14 #include <kernel/spinlock.h>
15 #include <kernel/tee_l2cc_mutex.h>
16 #include <kernel/tee_misc.h>
17 #include <kernel/tlb_helpers.h>
18 #include <kernel/user_mode_ctx.h>
19 #include <kernel/virtualization.h>
20 #include <libfdt.h>
21 #include <memtag.h>
22 #include <mm/core_memprot.h>
23 #include <mm/core_mmu.h>
24 #include <mm/mobj.h>
25 #include <mm/pgt_cache.h>
26 #include <mm/phys_mem.h>
27 #include <mm/tee_pager.h>
28 #include <mm/vm.h>
29 #include <platform_config.h>
30 #include <stdalign.h>
31 #include <string.h>
32 #include <trace.h>
33 #include <util.h>
34
35 #ifndef DEBUG_XLAT_TABLE
36 #define DEBUG_XLAT_TABLE 0
37 #endif
38
39 #define SHM_VASPACE_SIZE (1024 * 1024 * 32)
40
41 /* Virtual memory pool for core mappings */
42 tee_mm_pool_t core_virt_mem_pool;
43
44 /* Virtual memory pool for shared memory mappings */
45 tee_mm_pool_t core_virt_shm_pool;
46
47 #ifdef CFG_CORE_PHYS_RELOCATABLE
48 unsigned long core_mmu_tee_load_pa __nex_bss;
49 #else
50 const unsigned long core_mmu_tee_load_pa = TEE_LOAD_ADDR;
51 #endif
52
53 /*
54 * These variables are initialized before .bss is cleared. To avoid
55 * resetting them when .bss is cleared we're storing them in .data instead,
56 * even if they initially are zero.
57 */
58
59 #ifdef CFG_CORE_RESERVED_SHM
60 /* Default NSec shared memory allocated from NSec world */
61 unsigned long default_nsec_shm_size __nex_bss;
62 unsigned long default_nsec_shm_paddr __nex_bss;
63 #endif
64
65 static struct memory_map static_memory_map __nex_bss;
66 void (*memory_map_realloc_func)(struct memory_map *mem_map) __nex_bss;
67
68 /* Offset of the first TEE RAM mapping from start of secure RAM */
69 static size_t tee_ram_initial_offs __nex_bss;
70
71 /* Define the platform's memory layout. */
72 struct memaccess_area {
73 paddr_t paddr;
74 size_t size;
75 };
76
77 #define MEMACCESS_AREA(a, s) { .paddr = a, .size = s }
78
79 static struct memaccess_area secure_only[] __nex_data = {
80 #ifdef CFG_CORE_PHYS_RELOCATABLE
81 MEMACCESS_AREA(0, 0),
82 #else
83 #ifdef TRUSTED_SRAM_BASE
84 MEMACCESS_AREA(TRUSTED_SRAM_BASE, TRUSTED_SRAM_SIZE),
85 #endif
86 MEMACCESS_AREA(TRUSTED_DRAM_BASE, TRUSTED_DRAM_SIZE),
87 #endif
88 };
89
90 static struct memaccess_area nsec_shared[] __nex_data = {
91 #ifdef CFG_CORE_RESERVED_SHM
92 MEMACCESS_AREA(TEE_SHMEM_START, TEE_SHMEM_SIZE),
93 #endif
94 };
95
96 #if defined(CFG_SECURE_DATA_PATH)
97 static const char *tz_sdp_match = "linaro,secure-heap";
98 static struct memaccess_area sec_sdp;
99 #ifdef CFG_TEE_SDP_MEM_BASE
100 register_sdp_mem(CFG_TEE_SDP_MEM_BASE, CFG_TEE_SDP_MEM_SIZE);
101 #endif
102 #ifdef TEE_SDP_TEST_MEM_BASE
103 register_sdp_mem(TEE_SDP_TEST_MEM_BASE, TEE_SDP_TEST_MEM_SIZE);
104 #endif
105 #endif
106
107 #ifdef CFG_CORE_RESERVED_SHM
108 register_phys_mem(MEM_AREA_NSEC_SHM, TEE_SHMEM_START, TEE_SHMEM_SIZE);
109 #endif
110 static unsigned int mmu_spinlock;
111
mmu_lock(void)112 static uint32_t mmu_lock(void)
113 {
114 return cpu_spin_lock_xsave(&mmu_spinlock);
115 }
116
mmu_unlock(uint32_t exceptions)117 static void mmu_unlock(uint32_t exceptions)
118 {
119 cpu_spin_unlock_xrestore(&mmu_spinlock, exceptions);
120 }
121
heap_realloc_memory_map(struct memory_map * mem_map)122 static void heap_realloc_memory_map(struct memory_map *mem_map)
123 {
124 struct tee_mmap_region *m = NULL;
125 struct tee_mmap_region *old = mem_map->map;
126 size_t old_sz = sizeof(*old) * mem_map->alloc_count;
127 size_t sz = old_sz + sizeof(*m);
128
129 assert(nex_malloc_buffer_is_within_alloced(old, old_sz));
130 m = nex_realloc(old, sz);
131 if (!m)
132 panic();
133 mem_map->map = m;
134 mem_map->alloc_count++;
135 }
136
boot_mem_realloc_memory_map(struct memory_map * mem_map)137 static void boot_mem_realloc_memory_map(struct memory_map *mem_map)
138 {
139 struct tee_mmap_region *m = NULL;
140 struct tee_mmap_region *old = mem_map->map;
141 size_t old_sz = sizeof(*old) * mem_map->alloc_count;
142 size_t sz = old_sz * 2;
143
144 m = boot_mem_alloc_tmp(sz, alignof(*m));
145 memcpy(m, old, old_sz);
146 mem_map->map = m;
147 mem_map->alloc_count *= 2;
148 }
149
grow_mem_map(struct memory_map * mem_map)150 static void grow_mem_map(struct memory_map *mem_map)
151 {
152 if (mem_map->count == mem_map->alloc_count) {
153 if (!memory_map_realloc_func) {
154 EMSG("Out of entries (%zu) in mem_map",
155 mem_map->alloc_count);
156 panic();
157 }
158 memory_map_realloc_func(mem_map);
159 }
160 mem_map->count++;
161 }
162
core_mmu_get_secure_memory(paddr_t * base,paddr_size_t * size)163 void core_mmu_get_secure_memory(paddr_t *base, paddr_size_t *size)
164 {
165 /*
166 * The first range is always used to cover OP-TEE core memory, but
167 * depending on configuration it may cover more than that.
168 */
169 *base = secure_only[0].paddr;
170 *size = secure_only[0].size;
171 }
172
core_mmu_set_secure_memory(paddr_t base,size_t size)173 void core_mmu_set_secure_memory(paddr_t base, size_t size)
174 {
175 #ifdef CFG_CORE_PHYS_RELOCATABLE
176 static_assert(ARRAY_SIZE(secure_only) == 1);
177 #endif
178 runtime_assert(IS_ENABLED(CFG_CORE_PHYS_RELOCATABLE));
179 assert(!secure_only[0].size);
180 assert(base && size);
181
182 DMSG("Physical secure memory base %#"PRIxPA" size %#zx", base, size);
183 secure_only[0].paddr = base;
184 secure_only[0].size = size;
185 }
186
get_memory_map(void)187 static struct memory_map *get_memory_map(void)
188 {
189 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
190 struct memory_map *map = virt_get_memory_map();
191
192 if (map)
193 return map;
194 }
195
196 return &static_memory_map;
197 }
198
_pbuf_intersects(struct memaccess_area * a,size_t alen,paddr_t pa,size_t size)199 static bool _pbuf_intersects(struct memaccess_area *a, size_t alen,
200 paddr_t pa, size_t size)
201 {
202 size_t n;
203
204 for (n = 0; n < alen; n++)
205 if (core_is_buffer_intersect(pa, size, a[n].paddr, a[n].size))
206 return true;
207 return false;
208 }
209
210 #define pbuf_intersects(a, pa, size) \
211 _pbuf_intersects((a), ARRAY_SIZE(a), (pa), (size))
212
_pbuf_is_inside(struct memaccess_area * a,size_t alen,paddr_t pa,size_t size)213 static bool _pbuf_is_inside(struct memaccess_area *a, size_t alen,
214 paddr_t pa, size_t size)
215 {
216 size_t n;
217
218 for (n = 0; n < alen; n++)
219 if (core_is_buffer_inside(pa, size, a[n].paddr, a[n].size))
220 return true;
221 return false;
222 }
223
224 #define pbuf_is_inside(a, pa, size) \
225 _pbuf_is_inside((a), ARRAY_SIZE(a), (pa), (size))
226
pa_is_in_map(struct tee_mmap_region * map,paddr_t pa,size_t len)227 static bool pa_is_in_map(struct tee_mmap_region *map, paddr_t pa, size_t len)
228 {
229 paddr_t end_pa = 0;
230
231 if (!map)
232 return false;
233
234 if (SUB_OVERFLOW(len, 1, &end_pa) || ADD_OVERFLOW(pa, end_pa, &end_pa))
235 return false;
236
237 return (pa >= map->pa && end_pa <= map->pa + map->size - 1);
238 }
239
va_is_in_map(struct tee_mmap_region * map,vaddr_t va)240 static bool va_is_in_map(struct tee_mmap_region *map, vaddr_t va)
241 {
242 if (!map)
243 return false;
244 return (va >= map->va && va <= (map->va + map->size - 1));
245 }
246
247 /* check if target buffer fits in a core default map area */
pbuf_inside_map_area(unsigned long p,size_t l,struct tee_mmap_region * map)248 static bool pbuf_inside_map_area(unsigned long p, size_t l,
249 struct tee_mmap_region *map)
250 {
251 return core_is_buffer_inside(p, l, map->pa, map->size);
252 }
253
core_mmu_for_each_map(void * ptr,TEE_Result (* fn)(struct tee_mmap_region * map,void * ptr))254 TEE_Result core_mmu_for_each_map(void *ptr,
255 TEE_Result (*fn)(struct tee_mmap_region *map,
256 void *ptr))
257 {
258 struct memory_map *mem_map = get_memory_map();
259 TEE_Result res = TEE_SUCCESS;
260 size_t n = 0;
261
262 for (n = 0; n < mem_map->count; n++) {
263 res = fn(mem_map->map + n, ptr);
264 if (res)
265 return res;
266 }
267
268 return TEE_SUCCESS;
269 }
270
find_map_by_type(enum teecore_memtypes type)271 static struct tee_mmap_region *find_map_by_type(enum teecore_memtypes type)
272 {
273 struct memory_map *mem_map = get_memory_map();
274 size_t n = 0;
275
276 for (n = 0; n < mem_map->count; n++) {
277 if (mem_map->map[n].type == type)
278 return mem_map->map + n;
279 }
280 return NULL;
281 }
282
283 static struct tee_mmap_region *
find_map_by_type_and_pa(enum teecore_memtypes type,paddr_t pa,size_t len)284 find_map_by_type_and_pa(enum teecore_memtypes type, paddr_t pa, size_t len)
285 {
286 struct memory_map *mem_map = get_memory_map();
287 size_t n = 0;
288
289 for (n = 0; n < mem_map->count; n++) {
290 if (mem_map->map[n].type != type)
291 continue;
292 if (pa_is_in_map(mem_map->map + n, pa, len))
293 return mem_map->map + n;
294 }
295 return NULL;
296 }
297
find_map_by_va(void * va)298 static struct tee_mmap_region *find_map_by_va(void *va)
299 {
300 struct memory_map *mem_map = get_memory_map();
301 vaddr_t a = (vaddr_t)va;
302 size_t n = 0;
303
304 for (n = 0; n < mem_map->count; n++) {
305 if (a >= mem_map->map[n].va &&
306 a <= (mem_map->map[n].va - 1 + mem_map->map[n].size))
307 return mem_map->map + n;
308 }
309
310 return NULL;
311 }
312
find_map_by_pa(unsigned long pa)313 static struct tee_mmap_region *find_map_by_pa(unsigned long pa)
314 {
315 struct memory_map *mem_map = get_memory_map();
316 size_t n = 0;
317
318 for (n = 0; n < mem_map->count; n++) {
319 /* Skip unmapped regions */
320 if ((mem_map->map[n].attr & TEE_MATTR_VALID_BLOCK) &&
321 pa >= mem_map->map[n].pa &&
322 pa <= (mem_map->map[n].pa - 1 + mem_map->map[n].size))
323 return mem_map->map + n;
324 }
325
326 return NULL;
327 }
328
329 #if defined(CFG_SECURE_DATA_PATH)
dtb_get_sdp_region(void)330 static bool dtb_get_sdp_region(void)
331 {
332 void *fdt = NULL;
333 int node = 0;
334 int tmp_node = 0;
335 paddr_t tmp_addr = 0;
336 size_t tmp_size = 0;
337
338 if (!IS_ENABLED(CFG_EMBED_DTB))
339 return false;
340
341 fdt = get_embedded_dt();
342 if (!fdt)
343 panic("No DTB found");
344
345 node = fdt_node_offset_by_compatible(fdt, -1, tz_sdp_match);
346 if (node < 0) {
347 DMSG("No %s compatible node found", tz_sdp_match);
348 return false;
349 }
350 tmp_node = node;
351 while (tmp_node >= 0) {
352 tmp_node = fdt_node_offset_by_compatible(fdt, tmp_node,
353 tz_sdp_match);
354 if (tmp_node >= 0)
355 DMSG("Ignore SDP pool node %s, supports only 1 node",
356 fdt_get_name(fdt, tmp_node, NULL));
357 }
358
359 if (fdt_reg_info(fdt, node, &tmp_addr, &tmp_size)) {
360 EMSG("%s: Unable to get base addr or size from DT",
361 tz_sdp_match);
362 return false;
363 }
364
365 sec_sdp.paddr = tmp_addr;
366 sec_sdp.size = tmp_size;
367
368 return true;
369 }
370 #endif
371
372 #if defined(CFG_CORE_DYN_SHM) || defined(CFG_SECURE_DATA_PATH)
pbuf_is_special_mem(paddr_t pbuf,size_t len,const struct core_mmu_phys_mem * start,const struct core_mmu_phys_mem * end)373 static bool pbuf_is_special_mem(paddr_t pbuf, size_t len,
374 const struct core_mmu_phys_mem *start,
375 const struct core_mmu_phys_mem *end)
376 {
377 const struct core_mmu_phys_mem *mem;
378
379 for (mem = start; mem < end; mem++) {
380 if (core_is_buffer_inside(pbuf, len, mem->addr, mem->size))
381 return true;
382 }
383
384 return false;
385 }
386 #endif
387
388 #ifdef CFG_CORE_DYN_SHM
carve_out_phys_mem(struct core_mmu_phys_mem ** mem,size_t * nelems,paddr_t pa,size_t size)389 static void carve_out_phys_mem(struct core_mmu_phys_mem **mem, size_t *nelems,
390 paddr_t pa, size_t size)
391 {
392 struct core_mmu_phys_mem *m = *mem;
393 size_t n = 0;
394
395 while (n < *nelems) {
396 if (!core_is_buffer_intersect(pa, size, m[n].addr, m[n].size)) {
397 n++;
398 continue;
399 }
400
401 if (core_is_buffer_inside(m[n].addr, m[n].size, pa, size)) {
402 /* m[n] is completely covered by pa:size */
403 rem_array_elem(m, *nelems, sizeof(*m), n);
404 (*nelems)--;
405 m = nex_realloc(m, sizeof(*m) * *nelems);
406 if (!m)
407 panic();
408 *mem = m;
409 continue;
410 }
411
412 if (pa > m[n].addr &&
413 pa + size - 1 < m[n].addr + m[n].size - 1) {
414 /*
415 * pa:size is strictly inside m[n] range so split
416 * m[n] entry.
417 */
418 m = nex_realloc(m, sizeof(*m) * (*nelems + 1));
419 if (!m)
420 panic();
421 *mem = m;
422 (*nelems)++;
423 ins_array_elem(m, *nelems, sizeof(*m), n + 1, NULL);
424 m[n + 1].addr = pa + size;
425 m[n + 1].size = m[n].addr + m[n].size - pa - size;
426 m[n].size = pa - m[n].addr;
427 n++;
428 } else if (pa <= m[n].addr) {
429 /*
430 * pa:size is overlapping (possibly partially) at the
431 * beginning of m[n].
432 */
433 m[n].size = m[n].addr + m[n].size - pa - size;
434 m[n].addr = pa + size;
435 } else {
436 /*
437 * pa:size is overlapping (possibly partially) at
438 * the end of m[n].
439 */
440 m[n].size = pa - m[n].addr;
441 }
442 n++;
443 }
444 }
445
check_phys_mem_is_outside(struct core_mmu_phys_mem * start,size_t nelems,struct tee_mmap_region * map)446 static void check_phys_mem_is_outside(struct core_mmu_phys_mem *start,
447 size_t nelems,
448 struct tee_mmap_region *map)
449 {
450 size_t n;
451
452 for (n = 0; n < nelems; n++) {
453 if (!core_is_buffer_outside(start[n].addr, start[n].size,
454 map->pa, map->size)) {
455 EMSG("Non-sec mem (%#" PRIxPA ":%#" PRIxPASZ
456 ") overlaps map (type %d %#" PRIxPA ":%#zx)",
457 start[n].addr, start[n].size,
458 map->type, map->pa, map->size);
459 panic();
460 }
461 }
462 }
463
464 static const struct core_mmu_phys_mem *discovered_nsec_ddr_start __nex_bss;
465 static size_t discovered_nsec_ddr_nelems __nex_bss;
466
cmp_pmem_by_addr(const void * a,const void * b)467 static int cmp_pmem_by_addr(const void *a, const void *b)
468 {
469 const struct core_mmu_phys_mem *pmem_a = a;
470 const struct core_mmu_phys_mem *pmem_b = b;
471
472 return CMP_TRILEAN(pmem_a->addr, pmem_b->addr);
473 }
474
core_mmu_set_discovered_nsec_ddr(struct core_mmu_phys_mem * start,size_t nelems)475 void core_mmu_set_discovered_nsec_ddr(struct core_mmu_phys_mem *start,
476 size_t nelems)
477 {
478 struct core_mmu_phys_mem *m = start;
479 size_t num_elems = nelems;
480 struct memory_map *mem_map = &static_memory_map;
481 const struct core_mmu_phys_mem __maybe_unused *pmem;
482 size_t n = 0;
483
484 assert(!discovered_nsec_ddr_start);
485 assert(m && num_elems);
486
487 qsort(m, num_elems, sizeof(*m), cmp_pmem_by_addr);
488
489 /*
490 * Non-secure shared memory and also secure data
491 * path memory are supposed to reside inside
492 * non-secure memory. Since NSEC_SHM and SDP_MEM
493 * are used for a specific purpose make holes for
494 * those memory in the normal non-secure memory.
495 *
496 * This has to be done since for instance QEMU
497 * isn't aware of which memory range in the
498 * non-secure memory is used for NSEC_SHM.
499 */
500
501 #ifdef CFG_SECURE_DATA_PATH
502 if (dtb_get_sdp_region())
503 carve_out_phys_mem(&m, &num_elems, sec_sdp.paddr, sec_sdp.size);
504
505 for (pmem = phys_sdp_mem_begin; pmem < phys_sdp_mem_end; pmem++)
506 carve_out_phys_mem(&m, &num_elems, pmem->addr, pmem->size);
507 #endif
508
509 for (n = 0; n < ARRAY_SIZE(secure_only); n++)
510 carve_out_phys_mem(&m, &num_elems, secure_only[n].paddr,
511 secure_only[n].size);
512
513 for (n = 0; n < mem_map->count; n++) {
514 switch (mem_map->map[n].type) {
515 case MEM_AREA_NSEC_SHM:
516 carve_out_phys_mem(&m, &num_elems, mem_map->map[n].pa,
517 mem_map->map[n].size);
518 break;
519 case MEM_AREA_EXT_DT:
520 case MEM_AREA_MANIFEST_DT:
521 case MEM_AREA_RAM_NSEC:
522 case MEM_AREA_RES_VASPACE:
523 case MEM_AREA_SHM_VASPACE:
524 case MEM_AREA_TS_VASPACE:
525 case MEM_AREA_PAGER_VASPACE:
526 case MEM_AREA_NEX_DYN_VASPACE:
527 case MEM_AREA_TEE_DYN_VASPACE:
528 break;
529 default:
530 check_phys_mem_is_outside(m, num_elems,
531 mem_map->map + n);
532 }
533 }
534
535 discovered_nsec_ddr_start = m;
536 discovered_nsec_ddr_nelems = num_elems;
537
538 DMSG("Non-secure RAM:");
539 for (n = 0; n < num_elems; n++)
540 DMSG("%zu: pa %#"PRIxPA"..%#"PRIxPA" sz %#"PRIxPASZ,
541 n, m[n].addr, m[n].addr + m[n].size - 1, m[n].size);
542
543 if (!core_mmu_check_end_pa(m[num_elems - 1].addr,
544 m[num_elems - 1].size))
545 panic();
546 }
547
get_discovered_nsec_ddr(const struct core_mmu_phys_mem ** start,const struct core_mmu_phys_mem ** end)548 static bool get_discovered_nsec_ddr(const struct core_mmu_phys_mem **start,
549 const struct core_mmu_phys_mem **end)
550 {
551 if (!discovered_nsec_ddr_start)
552 return false;
553
554 *start = discovered_nsec_ddr_start;
555 *end = discovered_nsec_ddr_start + discovered_nsec_ddr_nelems;
556
557 return true;
558 }
559
pbuf_is_nsec_ddr(paddr_t pbuf,size_t len)560 static bool pbuf_is_nsec_ddr(paddr_t pbuf, size_t len)
561 {
562 const struct core_mmu_phys_mem *start;
563 const struct core_mmu_phys_mem *end;
564
565 if (!get_discovered_nsec_ddr(&start, &end))
566 return false;
567
568 return pbuf_is_special_mem(pbuf, len, start, end);
569 }
570
core_mmu_nsec_ddr_is_defined(void)571 bool core_mmu_nsec_ddr_is_defined(void)
572 {
573 const struct core_mmu_phys_mem *start;
574 const struct core_mmu_phys_mem *end;
575
576 if (!get_discovered_nsec_ddr(&start, &end))
577 return false;
578
579 return start != end;
580 }
581 #else
pbuf_is_nsec_ddr(paddr_t pbuf __unused,size_t len __unused)582 static bool pbuf_is_nsec_ddr(paddr_t pbuf __unused, size_t len __unused)
583 {
584 return false;
585 }
586 #endif /*CFG_CORE_DYN_SHM*/
587
588 #define MSG_MEM_INSTERSECT(pa1, sz1, pa2, sz2) \
589 EMSG("[%" PRIxPA " %" PRIx64 "] intersects [%" PRIxPA " %" PRIx64 "]", \
590 pa1, (uint64_t)pa1 + (sz1), pa2, (uint64_t)pa2 + (sz2))
591
592 #ifdef CFG_SECURE_DATA_PATH
pbuf_is_sdp_mem(paddr_t pbuf,size_t len)593 static bool pbuf_is_sdp_mem(paddr_t pbuf, size_t len)
594 {
595 bool is_sdp_mem = false;
596
597 if (sec_sdp.size)
598 is_sdp_mem = core_is_buffer_inside(pbuf, len, sec_sdp.paddr,
599 sec_sdp.size);
600
601 if (!is_sdp_mem)
602 is_sdp_mem = pbuf_is_special_mem(pbuf, len, phys_sdp_mem_begin,
603 phys_sdp_mem_end);
604
605 if (!is_sdp_mem) {
606 struct mobj *m = mobj_protmem_get_by_pa(pbuf, len);
607
608 if (!m)
609 m = mobj_ffa_protmem_get_by_pa(pbuf, len);
610 if (m) {
611 mobj_put(m);
612 is_sdp_mem = true;
613 }
614 }
615
616 return is_sdp_mem;
617 }
618
core_sdp_mem_alloc_mobj(paddr_t pa,size_t size)619 static struct mobj *core_sdp_mem_alloc_mobj(paddr_t pa, size_t size)
620 {
621 struct mobj *mobj = mobj_phys_alloc(pa, size, TEE_MATTR_MEM_TYPE_CACHED,
622 CORE_MEM_SDP_MEM);
623
624 if (!mobj)
625 panic("can't create SDP physical memory object");
626
627 return mobj;
628 }
629
core_sdp_mem_create_mobjs(void)630 struct mobj **core_sdp_mem_create_mobjs(void)
631 {
632 const struct core_mmu_phys_mem *mem = NULL;
633 struct mobj **mobj_base = NULL;
634 struct mobj **mobj = NULL;
635 int cnt = phys_sdp_mem_end - phys_sdp_mem_begin;
636
637 if (sec_sdp.size)
638 cnt++;
639
640 /* SDP mobjs table must end with a NULL entry */
641 mobj_base = calloc(cnt + 1, sizeof(struct mobj *));
642 if (!mobj_base)
643 panic("Out of memory");
644
645 mobj = mobj_base;
646
647 for (mem = phys_sdp_mem_begin; mem < phys_sdp_mem_end; mem++, mobj++)
648 *mobj = core_sdp_mem_alloc_mobj(mem->addr, mem->size);
649
650 if (sec_sdp.size)
651 *mobj = core_sdp_mem_alloc_mobj(sec_sdp.paddr, sec_sdp.size);
652
653 return mobj_base;
654 }
655
656 #else /* CFG_SECURE_DATA_PATH */
pbuf_is_sdp_mem(paddr_t pbuf __unused,size_t len __unused)657 static bool pbuf_is_sdp_mem(paddr_t pbuf __unused, size_t len __unused)
658 {
659 return false;
660 }
661
662 #endif /* CFG_SECURE_DATA_PATH */
663
664 /* Check special memories comply with registered memories */
verify_special_mem_areas(struct memory_map * mem_map,const struct core_mmu_phys_mem * start,const struct core_mmu_phys_mem * end,const char * area_name __maybe_unused)665 static void verify_special_mem_areas(struct memory_map *mem_map,
666 const struct core_mmu_phys_mem *start,
667 const struct core_mmu_phys_mem *end,
668 const char *area_name __maybe_unused)
669 {
670 const struct core_mmu_phys_mem *mem = NULL;
671 const struct core_mmu_phys_mem *mem2 = NULL;
672 size_t n = 0;
673
674 if (start == end) {
675 DMSG("No %s memory area defined", area_name);
676 return;
677 }
678
679 for (mem = start; mem < end; mem++)
680 DMSG("%s memory [%" PRIxPA " %" PRIx64 "]",
681 area_name, mem->addr, (uint64_t)mem->addr + mem->size);
682
683 /* Check memories do not intersect each other */
684 for (mem = start; mem + 1 < end; mem++) {
685 for (mem2 = mem + 1; mem2 < end; mem2++) {
686 if (core_is_buffer_intersect(mem2->addr, mem2->size,
687 mem->addr, mem->size)) {
688 MSG_MEM_INSTERSECT(mem2->addr, mem2->size,
689 mem->addr, mem->size);
690 panic("Special memory intersection");
691 }
692 }
693 }
694
695 /*
696 * Check memories do not intersect any mapped memory.
697 * This is called before reserved VA space is loaded in mem_map.
698 */
699 for (mem = start; mem < end; mem++) {
700 for (n = 0; n < mem_map->count; n++) {
701 #ifdef TEE_SDP_TEST_MEM_BASE
702 /*
703 * Ignore MEM_AREA_SEC_RAM_OVERALL since it covers
704 * TEE_SDP_TEST_MEM too.
705 */
706 if (mem->addr == TEE_SDP_TEST_MEM_BASE &&
707 mem->size == TEE_SDP_TEST_MEM_SIZE &&
708 mem_map->map[n].type == MEM_AREA_SEC_RAM_OVERALL)
709 continue;
710 #endif
711 if (core_is_buffer_intersect(mem->addr, mem->size,
712 mem_map->map[n].pa,
713 mem_map->map[n].size)) {
714 MSG_MEM_INSTERSECT(mem->addr, mem->size,
715 mem_map->map[n].pa,
716 mem_map->map[n].size);
717 panic("Special memory intersection");
718 }
719 }
720 }
721 }
722
merge_mmaps(struct tee_mmap_region * dst,const struct tee_mmap_region * src)723 static void merge_mmaps(struct tee_mmap_region *dst,
724 const struct tee_mmap_region *src)
725 {
726 paddr_t end_pa = MAX(dst->pa + dst->size - 1, src->pa + src->size - 1);
727 paddr_t pa = MIN(dst->pa, src->pa);
728
729 DMSG("Merging %#"PRIxPA"..%#"PRIxPA" and %#"PRIxPA"..%#"PRIxPA,
730 dst->pa, dst->pa + dst->size - 1, src->pa,
731 src->pa + src->size - 1);
732 dst->pa = pa;
733 dst->size = end_pa - pa + 1;
734 }
735
mmaps_are_mergeable(const struct tee_mmap_region * r1,const struct tee_mmap_region * r2)736 static bool mmaps_are_mergeable(const struct tee_mmap_region *r1,
737 const struct tee_mmap_region *r2)
738 {
739 if (r1->type != r2->type)
740 return false;
741
742 if (r1->pa == r2->pa)
743 return true;
744
745 if (r1->pa < r2->pa)
746 return r1->pa + r1->size >= r2->pa;
747 else
748 return r2->pa + r2->size >= r1->pa;
749 }
750
add_phys_mem(struct memory_map * mem_map,const char * mem_name __maybe_unused,enum teecore_memtypes mem_type,paddr_t mem_addr,paddr_size_t mem_size)751 static void add_phys_mem(struct memory_map *mem_map,
752 const char *mem_name __maybe_unused,
753 enum teecore_memtypes mem_type,
754 paddr_t mem_addr, paddr_size_t mem_size)
755 {
756 size_t n = 0;
757 const struct tee_mmap_region m0 = {
758 .type = mem_type,
759 .pa = mem_addr,
760 .size = mem_size,
761 };
762
763 if (!mem_size) /* Discard null size entries */
764 return;
765
766 /*
767 * If some ranges of memory of the same type do overlap
768 * each others they are coalesced into one entry. To help this
769 * added entries are sorted by increasing physical.
770 *
771 * Note that it's valid to have the same physical memory as several
772 * different memory types, for instance the same device memory
773 * mapped as both secure and non-secure. This will probably not
774 * happen often in practice.
775 */
776 DMSG("%s type %s 0x%08" PRIxPA " size 0x%08" PRIxPASZ,
777 mem_name, teecore_memtype_name(mem_type), mem_addr, mem_size);
778 for (n = 0; n < mem_map->count; n++) {
779 if (mmaps_are_mergeable(mem_map->map + n, &m0)) {
780 merge_mmaps(mem_map->map + n, &m0);
781 /*
782 * The merged result might be mergeable with the
783 * next or previous entry.
784 */
785 if (n + 1 < mem_map->count &&
786 mmaps_are_mergeable(mem_map->map + n,
787 mem_map->map + n + 1)) {
788 merge_mmaps(mem_map->map + n,
789 mem_map->map + n + 1);
790 rem_array_elem(mem_map->map, mem_map->count,
791 sizeof(*mem_map->map), n + 1);
792 mem_map->count--;
793 }
794 if (n > 0 && mmaps_are_mergeable(mem_map->map + n - 1,
795 mem_map->map + n)) {
796 merge_mmaps(mem_map->map + n - 1,
797 mem_map->map + n);
798 rem_array_elem(mem_map->map, mem_map->count,
799 sizeof(*mem_map->map), n);
800 mem_map->count--;
801 }
802 return;
803 }
804 if (mem_type < mem_map->map[n].type ||
805 (mem_type == mem_map->map[n].type &&
806 mem_addr < mem_map->map[n].pa))
807 break; /* found the spot where to insert this memory */
808 }
809
810 grow_mem_map(mem_map);
811 ins_array_elem(mem_map->map, mem_map->count, sizeof(*mem_map->map),
812 n, &m0);
813 }
814
add_va_space(struct memory_map * mem_map,enum teecore_memtypes type,size_t size)815 static void add_va_space(struct memory_map *mem_map,
816 enum teecore_memtypes type, size_t size)
817 {
818 size_t n = 0;
819
820 DMSG("type %s size 0x%08zx", teecore_memtype_name(type), size);
821 for (n = 0; n < mem_map->count; n++) {
822 if (type < mem_map->map[n].type)
823 break;
824 }
825
826 grow_mem_map(mem_map);
827 ins_array_elem(mem_map->map, mem_map->count, sizeof(*mem_map->map),
828 n, NULL);
829 mem_map->map[n] = (struct tee_mmap_region){
830 .type = type,
831 .size = size,
832 };
833 }
834
core_mmu_type_to_attr(enum teecore_memtypes t)835 uint32_t core_mmu_type_to_attr(enum teecore_memtypes t)
836 {
837 const uint32_t attr = TEE_MATTR_VALID_BLOCK;
838 const uint32_t tagged = TEE_MATTR_MEM_TYPE_TAGGED <<
839 TEE_MATTR_MEM_TYPE_SHIFT;
840 const uint32_t cached = TEE_MATTR_MEM_TYPE_CACHED <<
841 TEE_MATTR_MEM_TYPE_SHIFT;
842 const uint32_t noncache = TEE_MATTR_MEM_TYPE_DEV <<
843 TEE_MATTR_MEM_TYPE_SHIFT;
844
845 switch (t) {
846 case MEM_AREA_TEE_RAM:
847 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRWX | tagged;
848 case MEM_AREA_TEE_RAM_RX:
849 case MEM_AREA_INIT_RAM_RX:
850 case MEM_AREA_IDENTITY_MAP_RX:
851 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRX | tagged;
852 case MEM_AREA_TEE_RAM_RO:
853 case MEM_AREA_INIT_RAM_RO:
854 return attr | TEE_MATTR_SECURE | TEE_MATTR_PR | tagged;
855 case MEM_AREA_TEE_RAM_RW:
856 case MEM_AREA_NEX_RAM_RO: /* This has to be r/w during init runtime */
857 case MEM_AREA_NEX_RAM_RW:
858 case MEM_AREA_NEX_DYN_VASPACE:
859 case MEM_AREA_TEE_DYN_VASPACE:
860 case MEM_AREA_TEE_ASAN:
861 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | tagged;
862 case MEM_AREA_TEE_COHERENT:
863 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRWX | noncache;
864 case MEM_AREA_NSEC_SHM:
865 case MEM_AREA_NEX_NSEC_SHM:
866 return attr | TEE_MATTR_PRW | cached;
867 case MEM_AREA_MANIFEST_DT:
868 return attr | TEE_MATTR_SECURE | TEE_MATTR_PR | cached;
869 case MEM_AREA_TRANSFER_LIST:
870 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | cached;
871 case MEM_AREA_EXT_DT:
872 /*
873 * If CFG_MAP_EXT_DT_SECURE is enabled map the external device
874 * tree as secure non-cached memory, otherwise, fall back to
875 * non-secure mapping.
876 */
877 if (IS_ENABLED(CFG_MAP_EXT_DT_SECURE))
878 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW |
879 noncache;
880 fallthrough;
881 case MEM_AREA_IO_NSEC:
882 return attr | TEE_MATTR_PRW | noncache;
883 case MEM_AREA_IO_SEC:
884 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | noncache;
885 case MEM_AREA_RAM_NSEC:
886 return attr | TEE_MATTR_PRW | cached;
887 case MEM_AREA_RAM_SEC:
888 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | cached;
889 case MEM_AREA_SEC_RAM_OVERALL:
890 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | tagged;
891 case MEM_AREA_ROM_SEC:
892 return attr | TEE_MATTR_SECURE | TEE_MATTR_PR | cached;
893 case MEM_AREA_RES_VASPACE:
894 case MEM_AREA_SHM_VASPACE:
895 return 0;
896 case MEM_AREA_PAGER_VASPACE:
897 return TEE_MATTR_SECURE;
898 default:
899 panic("invalid type");
900 }
901 }
902
map_is_tee_ram(const struct tee_mmap_region * mm)903 static bool __maybe_unused map_is_tee_ram(const struct tee_mmap_region *mm)
904 {
905 switch (mm->type) {
906 case MEM_AREA_TEE_RAM:
907 case MEM_AREA_TEE_RAM_RX:
908 case MEM_AREA_TEE_RAM_RO:
909 case MEM_AREA_TEE_RAM_RW:
910 case MEM_AREA_INIT_RAM_RX:
911 case MEM_AREA_INIT_RAM_RO:
912 case MEM_AREA_NEX_RAM_RW:
913 case MEM_AREA_NEX_RAM_RO:
914 case MEM_AREA_TEE_ASAN:
915 return true;
916 default:
917 return false;
918 }
919 }
920
map_is_secure(const struct tee_mmap_region * mm)921 static bool __maybe_unused map_is_secure(const struct tee_mmap_region *mm)
922 {
923 return !!(core_mmu_type_to_attr(mm->type) & TEE_MATTR_SECURE);
924 }
925
map_is_pgdir(const struct tee_mmap_region * mm)926 static bool __maybe_unused map_is_pgdir(const struct tee_mmap_region *mm)
927 {
928 return mm->region_size == CORE_MMU_PGDIR_SIZE;
929 }
930
cmp_mmap_by_lower_va(const void * a,const void * b)931 static int cmp_mmap_by_lower_va(const void *a, const void *b)
932 {
933 const struct tee_mmap_region *mm_a = a;
934 const struct tee_mmap_region *mm_b = b;
935
936 return CMP_TRILEAN(mm_a->va, mm_b->va);
937 }
938
dump_mmap_table(struct memory_map * mem_map)939 static void dump_mmap_table(struct memory_map *mem_map)
940 {
941 size_t n = 0;
942
943 for (n = 0; n < mem_map->count; n++) {
944 struct tee_mmap_region *map __maybe_unused = mem_map->map + n;
945
946 DMSG("type %-12s va 0x%08" PRIxVA "..0x%08" PRIxVA
947 " pa 0x%08" PRIxPA "..0x%08" PRIxPA " size 0x%08zx (%s)",
948 teecore_memtype_name(map->type), map->va,
949 map->va + map->size - 1, map->pa,
950 (paddr_t)(map->pa + map->size - 1), map->size,
951 map->region_size == SMALL_PAGE_SIZE ? "smallpg" : "pgdir");
952 }
953 }
954
955 #if DEBUG_XLAT_TABLE
956
dump_xlat_table(vaddr_t va,unsigned int level)957 static void dump_xlat_table(vaddr_t va, unsigned int level)
958 {
959 struct core_mmu_table_info tbl_info;
960 unsigned int idx = 0;
961 paddr_t pa;
962 uint32_t attr;
963
964 core_mmu_find_table(NULL, va, level, &tbl_info);
965 va = tbl_info.va_base;
966 for (idx = 0; idx < tbl_info.num_entries; idx++) {
967 core_mmu_get_entry(&tbl_info, idx, &pa, &attr);
968 if (attr || level > CORE_MMU_BASE_TABLE_LEVEL) {
969 const char *security_bit = "";
970
971 if (core_mmu_entry_have_security_bit(attr)) {
972 if (attr & TEE_MATTR_SECURE)
973 security_bit = "S";
974 else
975 security_bit = "NS";
976 }
977
978 if (attr & TEE_MATTR_TABLE) {
979 DMSG_RAW("%*s [LVL%d] VA:0x%010" PRIxVA
980 " TBL:0x%010" PRIxPA " %s",
981 level * 2, "", level, va, pa,
982 security_bit);
983 dump_xlat_table(va, level + 1);
984 } else if (attr) {
985 DMSG_RAW("%*s [LVL%d] VA:0x%010" PRIxVA
986 " PA:0x%010" PRIxPA " %s-%s-%s-%s",
987 level * 2, "", level, va, pa,
988 mattr_is_cached(attr) ? "MEM" :
989 "DEV",
990 attr & TEE_MATTR_PW ? "RW" : "RO",
991 attr & TEE_MATTR_PX ? "X " : "XN",
992 security_bit);
993 } else {
994 DMSG_RAW("%*s [LVL%d] VA:0x%010" PRIxVA
995 " INVALID\n",
996 level * 2, "", level, va);
997 }
998 }
999 va += BIT64(tbl_info.shift);
1000 }
1001 }
1002
1003 #else
1004
dump_xlat_table(vaddr_t va __unused,int level __unused)1005 static void dump_xlat_table(vaddr_t va __unused, int level __unused)
1006 {
1007 }
1008
1009 #endif
1010
1011 /*
1012 * Reserves virtual memory space for pager usage.
1013 *
1014 * From the start of the first memory used by the link script +
1015 * TEE_RAM_VA_SIZE should be covered, either with a direct mapping or empty
1016 * mapping for pager usage. This adds translation tables as needed for the
1017 * pager to operate.
1018 */
add_pager_vaspace(struct memory_map * mem_map)1019 static void add_pager_vaspace(struct memory_map *mem_map)
1020 {
1021 paddr_t begin = 0;
1022 paddr_t end = 0;
1023 size_t size = 0;
1024 size_t pos = 0;
1025 size_t n = 0;
1026
1027
1028 for (n = 0; n < mem_map->count; n++) {
1029 if (map_is_tee_ram(mem_map->map + n)) {
1030 if (!begin)
1031 begin = mem_map->map[n].pa;
1032 pos = n + 1;
1033 }
1034 }
1035
1036 end = mem_map->map[pos - 1].pa + mem_map->map[pos - 1].size;
1037 assert(end - begin < TEE_RAM_VA_SIZE);
1038 size = TEE_RAM_VA_SIZE - (end - begin);
1039
1040 grow_mem_map(mem_map);
1041 ins_array_elem(mem_map->map, mem_map->count, sizeof(*mem_map->map),
1042 n, NULL);
1043 mem_map->map[n] = (struct tee_mmap_region){
1044 .type = MEM_AREA_PAGER_VASPACE,
1045 .size = size,
1046 .region_size = SMALL_PAGE_SIZE,
1047 .attr = core_mmu_type_to_attr(MEM_AREA_PAGER_VASPACE),
1048 };
1049 }
1050
check_sec_nsec_mem_config(void)1051 static void check_sec_nsec_mem_config(void)
1052 {
1053 size_t n = 0;
1054
1055 for (n = 0; n < ARRAY_SIZE(secure_only); n++) {
1056 if (pbuf_intersects(nsec_shared, secure_only[n].paddr,
1057 secure_only[n].size))
1058 panic("Invalid memory access config: sec/nsec");
1059 }
1060 }
1061
collect_device_mem_ranges(struct memory_map * mem_map)1062 static void collect_device_mem_ranges(struct memory_map *mem_map)
1063 {
1064 const char *compatible = "arm,ffa-manifest-device-regions";
1065 void *fdt = get_manifest_dt();
1066 const char *name = NULL;
1067 uint64_t page_count = 0;
1068 uint64_t base = 0;
1069 int subnode = 0;
1070 int node = 0;
1071
1072 assert(fdt);
1073
1074 node = fdt_node_offset_by_compatible(fdt, 0, compatible);
1075 if (node < 0)
1076 return;
1077
1078 fdt_for_each_subnode(subnode, fdt, node) {
1079 name = fdt_get_name(fdt, subnode, NULL);
1080 if (!name)
1081 continue;
1082
1083 if (dt_getprop_as_number(fdt, subnode, "base-address",
1084 &base)) {
1085 EMSG("Mandatory field is missing: base-address");
1086 continue;
1087 }
1088
1089 if (base & SMALL_PAGE_MASK) {
1090 EMSG("base-address is not page aligned");
1091 continue;
1092 }
1093
1094 if (dt_getprop_as_number(fdt, subnode, "pages-count",
1095 &page_count)) {
1096 EMSG("Mandatory field is missing: pages-count");
1097 continue;
1098 }
1099
1100 add_phys_mem(mem_map, name, MEM_AREA_IO_SEC,
1101 base, page_count * SMALL_PAGE_SIZE);
1102 }
1103 }
1104
collect_mem_ranges(struct memory_map * mem_map)1105 static void collect_mem_ranges(struct memory_map *mem_map)
1106 {
1107 const struct core_mmu_phys_mem *mem = NULL;
1108 vaddr_t ram_start = secure_only[0].paddr;
1109 size_t n = 0;
1110
1111 #define ADD_PHYS_MEM(_type, _addr, _size) \
1112 add_phys_mem(mem_map, #_addr, (_type), (_addr), (_size))
1113
1114 if (IS_ENABLED(CFG_CORE_RWDATA_NOEXEC)) {
1115 paddr_t next_pa = 0;
1116
1117 /*
1118 * Read-only and read-execute physical memory areas must
1119 * not be mapped by MEM_AREA_SEC_RAM_OVERALL, but all the
1120 * read/write should.
1121 */
1122 ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, ram_start,
1123 VCORE_UNPG_RX_PA - ram_start);
1124 assert(VCORE_UNPG_RX_PA >= ram_start);
1125 tee_ram_initial_offs = VCORE_UNPG_RX_PA - ram_start;
1126 DMSG("tee_ram_initial_offs %#zx", tee_ram_initial_offs);
1127 ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RX, VCORE_UNPG_RX_PA,
1128 VCORE_UNPG_RX_SZ);
1129 ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RO, VCORE_UNPG_RO_PA,
1130 VCORE_UNPG_RO_SZ);
1131
1132 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1133 ADD_PHYS_MEM(MEM_AREA_NEX_RAM_RO, VCORE_UNPG_RW_PA,
1134 VCORE_UNPG_RW_SZ);
1135 ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, VCORE_UNPG_RW_PA,
1136 VCORE_UNPG_RW_SZ);
1137
1138 ADD_PHYS_MEM(MEM_AREA_NEX_RAM_RW, VCORE_NEX_RW_PA,
1139 VCORE_NEX_RW_SZ);
1140 ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, VCORE_NEX_RW_PA,
1141 VCORE_NEX_RW_SZ);
1142
1143 ADD_PHYS_MEM(MEM_AREA_NEX_RAM_RW, VCORE_FREE_PA,
1144 VCORE_FREE_SZ);
1145 ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, VCORE_FREE_PA,
1146 VCORE_FREE_SZ);
1147 next_pa = VCORE_FREE_PA + VCORE_FREE_SZ;
1148 } else {
1149 ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RW, VCORE_UNPG_RW_PA,
1150 VCORE_UNPG_RW_SZ);
1151 ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, VCORE_UNPG_RW_PA,
1152 VCORE_UNPG_RW_SZ);
1153
1154 ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RW, VCORE_FREE_PA,
1155 VCORE_FREE_SZ);
1156 ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, VCORE_FREE_PA,
1157 VCORE_FREE_SZ);
1158 next_pa = VCORE_FREE_PA + VCORE_FREE_SZ;
1159 }
1160
1161 if (IS_ENABLED(CFG_WITH_PAGER)) {
1162 paddr_t pa = 0;
1163 size_t sz = 0;
1164
1165 ADD_PHYS_MEM(MEM_AREA_INIT_RAM_RX, VCORE_INIT_RX_PA,
1166 VCORE_INIT_RX_SZ);
1167 ADD_PHYS_MEM(MEM_AREA_INIT_RAM_RO, VCORE_INIT_RO_PA,
1168 VCORE_INIT_RO_SZ);
1169 /*
1170 * Core init mapping shall cover up to end of the
1171 * physical RAM. This is required since the hash
1172 * table is appended to the binary data after the
1173 * firmware build sequence.
1174 */
1175 pa = VCORE_INIT_RO_PA + VCORE_INIT_RO_SZ;
1176 sz = TEE_RAM_START + TEE_RAM_PH_SIZE - pa;
1177 ADD_PHYS_MEM(MEM_AREA_TEE_RAM, pa, sz);
1178 } else {
1179 ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, next_pa,
1180 secure_only[0].paddr +
1181 secure_only[0].size - next_pa);
1182 }
1183 } else {
1184 ADD_PHYS_MEM(MEM_AREA_TEE_RAM, TEE_RAM_START, TEE_RAM_PH_SIZE);
1185 ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, secure_only[n].paddr,
1186 secure_only[0].size);
1187 }
1188
1189 for (n = 1; n < ARRAY_SIZE(secure_only); n++)
1190 ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, secure_only[n].paddr,
1191 secure_only[n].size);
1192
1193 if (IS_ENABLED(CFG_CORE_SANITIZE_KADDRESS))
1194 ADD_PHYS_MEM(MEM_AREA_TEE_ASAN, ASAN_MAP_PA, ASAN_MAP_SZ);
1195
1196 #undef ADD_PHYS_MEM
1197
1198 /* Collect device memory info from SP manifest */
1199 if (IS_ENABLED(CFG_CORE_SEL2_SPMC))
1200 collect_device_mem_ranges(mem_map);
1201
1202 for (mem = phys_mem_map_begin; mem < phys_mem_map_end; mem++) {
1203 /* Only unmapped virtual range may have a null phys addr */
1204 assert(mem->addr || !core_mmu_type_to_attr(mem->type));
1205
1206 add_phys_mem(mem_map, mem->name, mem->type,
1207 mem->addr, mem->size);
1208 }
1209
1210 if (IS_ENABLED(CFG_SECURE_DATA_PATH))
1211 verify_special_mem_areas(mem_map, phys_sdp_mem_begin,
1212 phys_sdp_mem_end, "SDP");
1213
1214 add_va_space(mem_map, MEM_AREA_RES_VASPACE, CFG_RESERVED_VASPACE_SIZE);
1215 add_va_space(mem_map, MEM_AREA_SHM_VASPACE, SHM_VASPACE_SIZE);
1216 if (IS_ENABLED(CFG_DYN_CONFIG)) {
1217 if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
1218 add_va_space(mem_map, MEM_AREA_NEX_DYN_VASPACE,
1219 ROUNDUP(CFG_NEX_DYN_VASPACE_SIZE,
1220 CORE_MMU_PGDIR_SIZE));
1221 add_va_space(mem_map, MEM_AREA_TEE_DYN_VASPACE,
1222 CFG_TEE_DYN_VASPACE_SIZE);
1223 }
1224 }
1225
assign_mem_granularity(struct memory_map * mem_map)1226 static void assign_mem_granularity(struct memory_map *mem_map)
1227 {
1228 size_t n = 0;
1229
1230 /*
1231 * Assign region sizes, note that MEM_AREA_TEE_RAM always uses
1232 * SMALL_PAGE_SIZE.
1233 */
1234 for (n = 0; n < mem_map->count; n++) {
1235 paddr_t mask = mem_map->map[n].pa | mem_map->map[n].size;
1236
1237 if (mask & SMALL_PAGE_MASK)
1238 panic("Impossible memory alignment");
1239
1240 if (map_is_tee_ram(mem_map->map + n))
1241 mem_map->map[n].region_size = SMALL_PAGE_SIZE;
1242 else
1243 mem_map->map[n].region_size = CORE_MMU_PGDIR_SIZE;
1244 }
1245 }
1246
place_tee_ram_at_top(paddr_t paddr)1247 static bool place_tee_ram_at_top(paddr_t paddr)
1248 {
1249 return paddr > BIT64(core_mmu_get_va_width()) / 2;
1250 }
1251
1252 /*
1253 * MMU arch driver shall override this function if it helps
1254 * optimizing the memory footprint of the address translation tables.
1255 */
core_mmu_prefer_tee_ram_at_top(paddr_t paddr)1256 bool __weak core_mmu_prefer_tee_ram_at_top(paddr_t paddr)
1257 {
1258 return place_tee_ram_at_top(paddr);
1259 }
1260
assign_mem_va_dir(vaddr_t tee_ram_va,struct memory_map * mem_map,bool tee_ram_at_top)1261 static bool assign_mem_va_dir(vaddr_t tee_ram_va, struct memory_map *mem_map,
1262 bool tee_ram_at_top)
1263 {
1264 struct tee_mmap_region *map = NULL;
1265 bool va_is_nex_shared = false;
1266 bool va_is_secure = true;
1267 vaddr_t va = 0;
1268 size_t n = 0;
1269
1270 /*
1271 * tee_ram_va might equals 0 when CFG_CORE_ASLR=y.
1272 * 0 is by design an invalid va, so return false directly.
1273 */
1274 if (!tee_ram_va)
1275 return false;
1276
1277 /* Clear eventual previous assignments */
1278 for (n = 0; n < mem_map->count; n++)
1279 mem_map->map[n].va = 0;
1280
1281 /*
1282 * TEE RAM regions are always aligned with region_size.
1283 *
1284 * Note that MEM_AREA_PAGER_VASPACE also counts as TEE RAM here
1285 * since it handles virtual memory which covers the part of the ELF
1286 * that cannot fit directly into memory.
1287 */
1288 va = tee_ram_va + tee_ram_initial_offs;
1289 for (n = 0; n < mem_map->count; n++) {
1290 map = mem_map->map + n;
1291 if (map_is_tee_ram(map) ||
1292 map->type == MEM_AREA_PAGER_VASPACE) {
1293 assert(!(va & (map->region_size - 1)));
1294 assert(!(map->size & (map->region_size - 1)));
1295 map->va = va;
1296 if (ADD_OVERFLOW(va, map->size, &va))
1297 return false;
1298 if (!core_mmu_va_is_valid(va))
1299 return false;
1300 }
1301 }
1302
1303 if (tee_ram_at_top) {
1304 /*
1305 * Map non-tee ram regions at addresses lower than the tee
1306 * ram region.
1307 */
1308 va = tee_ram_va;
1309 for (n = 0; n < mem_map->count; n++) {
1310 map = mem_map->map + n;
1311 map->attr = core_mmu_type_to_attr(map->type);
1312 if (map->va)
1313 continue;
1314
1315 if (!IS_ENABLED(CFG_WITH_LPAE) &&
1316 va_is_secure != map_is_secure(map)) {
1317 va_is_secure = !va_is_secure;
1318 va = ROUNDDOWN(va, CORE_MMU_PGDIR_SIZE);
1319 } else if (va_is_nex_shared !=
1320 core_mmu_type_is_nex_shared(map->type)) {
1321 va_is_nex_shared = !va_is_nex_shared;
1322 va = ROUNDDOWN(va, CORE_MMU_PGDIR_SIZE);
1323 }
1324
1325 if (SUB_OVERFLOW(va, map->size, &va))
1326 return false;
1327 va = ROUNDDOWN2(va, map->region_size);
1328 /*
1329 * Make sure that va is aligned with pa for
1330 * efficient pgdir mapping. Basically pa &
1331 * pgdir_mask should be == va & pgdir_mask
1332 */
1333 if (map->size > 2 * CORE_MMU_PGDIR_SIZE) {
1334 if (SUB_OVERFLOW(va, CORE_MMU_PGDIR_SIZE, &va))
1335 return false;
1336 va += (map->pa - va) & CORE_MMU_PGDIR_MASK;
1337 }
1338 map->va = va;
1339 }
1340 } else {
1341 /*
1342 * Map non-tee ram regions at addresses higher than the tee
1343 * ram region.
1344 */
1345 for (n = 0; n < mem_map->count; n++) {
1346 map = mem_map->map + n;
1347 map->attr = core_mmu_type_to_attr(map->type);
1348 if (map->va)
1349 continue;
1350
1351 if (!IS_ENABLED(CFG_WITH_LPAE) &&
1352 va_is_secure != map_is_secure(map)) {
1353 va_is_secure = !va_is_secure;
1354 if (ROUNDUP_OVERFLOW(va, CORE_MMU_PGDIR_SIZE,
1355 &va))
1356 return false;
1357 } else if (va_is_nex_shared !=
1358 core_mmu_type_is_nex_shared(map->type)) {
1359 va_is_nex_shared = !va_is_nex_shared;
1360 if (ROUNDUP_OVERFLOW(va, CORE_MMU_PGDIR_SIZE,
1361 &va))
1362 return false;
1363 }
1364
1365 if (ROUNDUP2_OVERFLOW(va, map->region_size, &va))
1366 return false;
1367 /*
1368 * Make sure that va is aligned with pa for
1369 * efficient pgdir mapping. Basically pa &
1370 * pgdir_mask should be == va & pgdir_mask
1371 */
1372 if (map->size > 2 * CORE_MMU_PGDIR_SIZE) {
1373 vaddr_t offs = (map->pa - va) &
1374 CORE_MMU_PGDIR_MASK;
1375
1376 if (ADD_OVERFLOW(va, offs, &va))
1377 return false;
1378 }
1379
1380 map->va = va;
1381 if (ADD_OVERFLOW(va, map->size, &va))
1382 return false;
1383 if (!core_mmu_va_is_valid(va))
1384 return false;
1385 }
1386 }
1387
1388 return true;
1389 }
1390
assign_mem_va(vaddr_t tee_ram_va,struct memory_map * mem_map)1391 static bool assign_mem_va(vaddr_t tee_ram_va, struct memory_map *mem_map)
1392 {
1393 bool tee_ram_at_top = place_tee_ram_at_top(tee_ram_va);
1394
1395 /*
1396 * Check that we're not overlapping with the user VA range.
1397 */
1398 if (IS_ENABLED(CFG_WITH_LPAE)) {
1399 /*
1400 * User VA range is supposed to be defined after these
1401 * mappings have been established.
1402 */
1403 assert(!core_mmu_user_va_range_is_defined());
1404 } else {
1405 vaddr_t user_va_base = 0;
1406 size_t user_va_size = 0;
1407
1408 assert(core_mmu_user_va_range_is_defined());
1409 core_mmu_get_user_va_range(&user_va_base, &user_va_size);
1410 if (tee_ram_va < (user_va_base + user_va_size))
1411 return false;
1412 }
1413
1414 if (IS_ENABLED(CFG_WITH_PAGER)) {
1415 bool prefered_dir = core_mmu_prefer_tee_ram_at_top(tee_ram_va);
1416
1417 /* Try whole mapping covered by a single base xlat entry */
1418 if (prefered_dir != tee_ram_at_top &&
1419 assign_mem_va_dir(tee_ram_va, mem_map, prefered_dir))
1420 return true;
1421 }
1422
1423 return assign_mem_va_dir(tee_ram_va, mem_map, tee_ram_at_top);
1424 }
1425
cmp_init_mem_map(const void * a,const void * b)1426 static int cmp_init_mem_map(const void *a, const void *b)
1427 {
1428 const struct tee_mmap_region *mm_a = a;
1429 const struct tee_mmap_region *mm_b = b;
1430 int rc = 0;
1431
1432 rc = CMP_TRILEAN(mm_a->region_size, mm_b->region_size);
1433 if (!rc)
1434 rc = CMP_TRILEAN(mm_a->pa, mm_b->pa);
1435 /*
1436 * 32bit MMU descriptors cannot mix secure and non-secure mapping in
1437 * the same level2 table. Hence sort secure mapping from non-secure
1438 * mapping.
1439 */
1440 if (!rc && !IS_ENABLED(CFG_WITH_LPAE))
1441 rc = CMP_TRILEAN(map_is_secure(mm_a), map_is_secure(mm_b));
1442
1443 /*
1444 * Nexus mappings shared between partitions should not be mixed
1445 * with other mappings in the same translation table. Hence sort
1446 * nexus shared mappings from other mappings.
1447 */
1448 if (!rc)
1449 rc = CMP_TRILEAN(core_mmu_type_is_nex_shared(mm_a->type),
1450 core_mmu_type_is_nex_shared(mm_b->type));
1451
1452 return rc;
1453 }
1454
mem_map_add_id_map(struct memory_map * mem_map,vaddr_t id_map_start,vaddr_t id_map_end)1455 static bool mem_map_add_id_map(struct memory_map *mem_map,
1456 vaddr_t id_map_start, vaddr_t id_map_end)
1457 {
1458 vaddr_t start = ROUNDDOWN(id_map_start, SMALL_PAGE_SIZE);
1459 vaddr_t end = ROUNDUP(id_map_end, SMALL_PAGE_SIZE);
1460 size_t len = end - start;
1461 size_t n = 0;
1462
1463
1464 for (n = 0; n < mem_map->count; n++)
1465 if (core_is_buffer_intersect(mem_map->map[n].va,
1466 mem_map->map[n].size, start, len))
1467 return false;
1468
1469 grow_mem_map(mem_map);
1470 mem_map->map[mem_map->count - 1] = (struct tee_mmap_region){
1471 .type = MEM_AREA_IDENTITY_MAP_RX,
1472 /*
1473 * Could use CORE_MMU_PGDIR_SIZE to potentially save a
1474 * translation table, at the increased risk of clashes with
1475 * the rest of the memory map.
1476 */
1477 .region_size = SMALL_PAGE_SIZE,
1478 .pa = start,
1479 .va = start,
1480 .size = len,
1481 .attr = core_mmu_type_to_attr(MEM_AREA_IDENTITY_MAP_RX),
1482 };
1483
1484 return true;
1485 }
1486
init_mem_map(struct memory_map * mem_map,unsigned long seed,unsigned long * ret_offs)1487 static struct memory_map *init_mem_map(struct memory_map *mem_map,
1488 unsigned long seed,
1489 unsigned long *ret_offs)
1490 {
1491 /*
1492 * @id_map_start and @id_map_end describes a physical memory range
1493 * that must be mapped Read-Only eXecutable at identical virtual
1494 * addresses.
1495 */
1496 vaddr_t id_map_start = (vaddr_t)__identity_map_init_start;
1497 vaddr_t id_map_end = (vaddr_t)__identity_map_init_end;
1498 vaddr_t start_addr = secure_only[0].paddr;
1499 unsigned long offs = 0;
1500
1501 collect_mem_ranges(mem_map);
1502 assign_mem_granularity(mem_map);
1503
1504 /*
1505 * To ease mapping and lower use of xlat tables, sort mapping
1506 * description moving small-page regions after the pgdir regions.
1507 */
1508 qsort(mem_map->map, mem_map->count, sizeof(struct tee_mmap_region),
1509 cmp_init_mem_map);
1510
1511 if (IS_ENABLED(CFG_WITH_PAGER))
1512 add_pager_vaspace(mem_map);
1513
1514 if (IS_ENABLED(CFG_CORE_ASLR) && seed) {
1515 vaddr_t ba = 0;
1516 size_t n = 0;
1517
1518 for (n = 0; n < 3; n++) {
1519 ba = arch_aslr_base_addr(start_addr, seed, n);
1520 if (assign_mem_va(ba, mem_map) &&
1521 mem_map_add_id_map(mem_map, id_map_start,
1522 id_map_end)) {
1523 offs = ba - start_addr;
1524 DMSG("Mapping core at %#"PRIxVA" offs %#lx",
1525 ba, offs);
1526 goto out;
1527 } else {
1528 DMSG("Failed to map core at %#"PRIxVA, ba);
1529 }
1530 }
1531 EMSG("Failed to map core with seed %#lx", seed);
1532 }
1533
1534 if (!assign_mem_va(start_addr, mem_map))
1535 panic();
1536
1537 out:
1538 qsort(mem_map->map, mem_map->count, sizeof(struct tee_mmap_region),
1539 cmp_mmap_by_lower_va);
1540
1541 dump_mmap_table(mem_map);
1542
1543 *ret_offs = offs;
1544 return mem_map;
1545 }
1546
check_mem_map(struct memory_map * mem_map)1547 static void check_mem_map(struct memory_map *mem_map)
1548 {
1549 struct tee_mmap_region *m = NULL;
1550 size_t n = 0;
1551
1552 for (n = 0; n < mem_map->count; n++) {
1553 m = mem_map->map + n;
1554 switch (m->type) {
1555 case MEM_AREA_TEE_RAM:
1556 case MEM_AREA_TEE_RAM_RX:
1557 case MEM_AREA_TEE_RAM_RO:
1558 case MEM_AREA_TEE_RAM_RW:
1559 case MEM_AREA_INIT_RAM_RX:
1560 case MEM_AREA_INIT_RAM_RO:
1561 case MEM_AREA_NEX_RAM_RW:
1562 case MEM_AREA_NEX_RAM_RO:
1563 case MEM_AREA_IDENTITY_MAP_RX:
1564 if (!pbuf_is_inside(secure_only, m->pa, m->size))
1565 panic("TEE_RAM can't fit in secure_only");
1566 break;
1567 case MEM_AREA_SEC_RAM_OVERALL:
1568 if (!pbuf_is_inside(secure_only, m->pa, m->size))
1569 panic("SEC_RAM_OVERALL can't fit in secure_only");
1570 break;
1571 case MEM_AREA_NSEC_SHM:
1572 if (!pbuf_is_inside(nsec_shared, m->pa, m->size))
1573 panic("NS_SHM can't fit in nsec_shared");
1574 break;
1575 case MEM_AREA_TEE_COHERENT:
1576 case MEM_AREA_TEE_ASAN:
1577 case MEM_AREA_IO_SEC:
1578 case MEM_AREA_IO_NSEC:
1579 case MEM_AREA_EXT_DT:
1580 case MEM_AREA_MANIFEST_DT:
1581 case MEM_AREA_TRANSFER_LIST:
1582 case MEM_AREA_RAM_SEC:
1583 case MEM_AREA_RAM_NSEC:
1584 case MEM_AREA_ROM_SEC:
1585 case MEM_AREA_RES_VASPACE:
1586 case MEM_AREA_SHM_VASPACE:
1587 case MEM_AREA_PAGER_VASPACE:
1588 case MEM_AREA_NEX_DYN_VASPACE:
1589 case MEM_AREA_TEE_DYN_VASPACE:
1590 break;
1591 default:
1592 EMSG("Uhandled memtype %d", m->type);
1593 panic();
1594 }
1595 }
1596 }
1597
1598 /*
1599 * core_init_mmu_map() - init tee core default memory mapping
1600 *
1601 * This routine sets the static default TEE core mapping. If @seed is > 0
1602 * and configured with CFG_CORE_ASLR it will map tee core at a location
1603 * based on the seed and return the offset from the link address.
1604 *
1605 * If an error happened: core_init_mmu_map is expected to panic.
1606 *
1607 * Note: this function is weak just to make it possible to exclude it from
1608 * the unpaged area.
1609 */
core_init_mmu_map(unsigned long seed,struct core_mmu_config * cfg)1610 void __weak core_init_mmu_map(unsigned long seed, struct core_mmu_config *cfg)
1611 {
1612 #ifndef CFG_NS_VIRTUALIZATION
1613 vaddr_t start = ROUNDDOWN((vaddr_t)__nozi_start, SMALL_PAGE_SIZE);
1614 #else
1615 vaddr_t start = ROUNDDOWN((vaddr_t)__vcore_nex_rw_start,
1616 SMALL_PAGE_SIZE);
1617 #endif
1618 #ifdef CFG_DYN_CONFIG
1619 vaddr_t len = ROUNDUP(VCORE_FREE_END_PA, SMALL_PAGE_SIZE) - start;
1620 #else
1621 vaddr_t len = ROUNDUP((vaddr_t)__nozi_end, SMALL_PAGE_SIZE) - start;
1622 #endif
1623 struct tee_mmap_region tmp_mmap_region = { };
1624 struct memory_map mem_map = { };
1625 unsigned long offs = 0;
1626
1627 if (IS_ENABLED(CFG_CORE_PHYS_RELOCATABLE) &&
1628 (core_mmu_tee_load_pa & SMALL_PAGE_MASK))
1629 panic("OP-TEE load address is not page aligned");
1630
1631 check_sec_nsec_mem_config();
1632
1633 mem_map.alloc_count = CFG_MMAP_REGIONS;
1634 mem_map.map = boot_mem_alloc_tmp(mem_map.alloc_count *
1635 sizeof(*mem_map.map),
1636 alignof(*mem_map.map));
1637 memory_map_realloc_func = boot_mem_realloc_memory_map;
1638
1639 static_memory_map = (struct memory_map){
1640 .map = &tmp_mmap_region,
1641 .alloc_count = 1,
1642 .count = 1,
1643 };
1644 /*
1645 * Add a entry covering the translation tables which will be
1646 * involved in some virt_to_phys() and phys_to_virt() conversions.
1647 */
1648 static_memory_map.map[0] = (struct tee_mmap_region){
1649 .type = MEM_AREA_TEE_RAM,
1650 .region_size = SMALL_PAGE_SIZE,
1651 .pa = start,
1652 .va = start,
1653 .size = len,
1654 .attr = core_mmu_type_to_attr(MEM_AREA_IDENTITY_MAP_RX),
1655 };
1656
1657 init_mem_map(&mem_map, seed, &offs);
1658
1659 check_mem_map(&mem_map);
1660 core_init_mmu(&mem_map);
1661 dump_xlat_table(0x0, CORE_MMU_BASE_TABLE_LEVEL);
1662 core_init_mmu_regs(cfg);
1663 cfg->map_offset = offs;
1664 static_memory_map = mem_map;
1665 boot_mem_add_reloc(&static_memory_map.map);
1666 }
1667
core_mmu_save_mem_map(void)1668 void core_mmu_save_mem_map(void)
1669 {
1670 size_t alloc_count = static_memory_map.count + 5;
1671 size_t elem_sz = sizeof(*static_memory_map.map);
1672 void *p = NULL;
1673
1674 p = nex_calloc(alloc_count, elem_sz);
1675 if (!p)
1676 panic();
1677 memcpy(p, static_memory_map.map, static_memory_map.count * elem_sz);
1678 static_memory_map.map = p;
1679 static_memory_map.alloc_count = alloc_count;
1680 memory_map_realloc_func = heap_realloc_memory_map;
1681 }
1682
core_mmu_mattr_is_ok(uint32_t mattr)1683 bool core_mmu_mattr_is_ok(uint32_t mattr)
1684 {
1685 /*
1686 * Keep in sync with core_mmu_lpae.c:mattr_to_desc and
1687 * core_mmu_v7.c:mattr_to_texcb
1688 */
1689
1690 switch ((mattr >> TEE_MATTR_MEM_TYPE_SHIFT) & TEE_MATTR_MEM_TYPE_MASK) {
1691 case TEE_MATTR_MEM_TYPE_DEV:
1692 case TEE_MATTR_MEM_TYPE_STRONGLY_O:
1693 case TEE_MATTR_MEM_TYPE_CACHED:
1694 case TEE_MATTR_MEM_TYPE_TAGGED:
1695 return true;
1696 default:
1697 return false;
1698 }
1699 }
1700
1701 /*
1702 * test attributes of target physical buffer
1703 *
1704 * Flags: pbuf_is(SECURE, NOT_SECURE, RAM, IOMEM, KEYVAULT).
1705 *
1706 */
core_pbuf_is(uint32_t attr,paddr_t pbuf,size_t len)1707 bool core_pbuf_is(uint32_t attr, paddr_t pbuf, size_t len)
1708 {
1709 struct tee_mmap_region *map;
1710
1711 /* Empty buffers complies with anything */
1712 if (len == 0)
1713 return true;
1714
1715 switch (attr) {
1716 case CORE_MEM_SEC:
1717 return pbuf_is_inside(secure_only, pbuf, len);
1718 case CORE_MEM_NON_SEC:
1719 return pbuf_is_inside(nsec_shared, pbuf, len) ||
1720 pbuf_is_nsec_ddr(pbuf, len);
1721 case CORE_MEM_TEE_RAM:
1722 return core_is_buffer_inside(pbuf, len, TEE_RAM_START,
1723 TEE_RAM_PH_SIZE);
1724 #ifdef CFG_CORE_RESERVED_SHM
1725 case CORE_MEM_NSEC_SHM:
1726 return core_is_buffer_inside(pbuf, len, TEE_SHMEM_START,
1727 TEE_SHMEM_SIZE);
1728 #endif
1729 case CORE_MEM_SDP_MEM:
1730 return pbuf_is_sdp_mem(pbuf, len);
1731 case CORE_MEM_CACHED:
1732 map = find_map_by_pa(pbuf);
1733 if (!map || !pbuf_inside_map_area(pbuf, len, map))
1734 return false;
1735 return mattr_is_cached(map->attr);
1736 default:
1737 return false;
1738 }
1739 }
1740
1741 /* test attributes of target virtual buffer (in core mapping) */
core_vbuf_is(uint32_t attr,const void * vbuf,size_t len)1742 bool core_vbuf_is(uint32_t attr, const void *vbuf, size_t len)
1743 {
1744 paddr_t p;
1745
1746 /* Empty buffers complies with anything */
1747 if (len == 0)
1748 return true;
1749
1750 p = virt_to_phys((void *)vbuf);
1751 if (!p)
1752 return false;
1753
1754 return core_pbuf_is(attr, p, len);
1755 }
1756
1757 /* core_va2pa - teecore exported service */
core_va2pa_helper(void * va,paddr_t * pa)1758 static int __maybe_unused core_va2pa_helper(void *va, paddr_t *pa)
1759 {
1760 struct tee_mmap_region *map;
1761
1762 map = find_map_by_va(va);
1763 if (!va_is_in_map(map, (vaddr_t)va))
1764 return -1;
1765
1766 /*
1767 * We can calculate PA for static map. Virtual address ranges
1768 * reserved to core dynamic mapping return a 'match' (return 0;)
1769 * together with an invalid null physical address.
1770 */
1771 if (map->pa)
1772 *pa = map->pa + (vaddr_t)va - map->va;
1773 else
1774 *pa = 0;
1775
1776 return 0;
1777 }
1778
map_pa2va(struct tee_mmap_region * map,paddr_t pa,size_t len)1779 static void *map_pa2va(struct tee_mmap_region *map, paddr_t pa, size_t len)
1780 {
1781 if (!pa_is_in_map(map, pa, len))
1782 return NULL;
1783
1784 return (void *)(vaddr_t)(map->va + pa - map->pa);
1785 }
1786
1787 /*
1788 * teecore gets some memory area definitions
1789 */
core_mmu_get_mem_by_type(enum teecore_memtypes type,vaddr_t * s,vaddr_t * e)1790 void core_mmu_get_mem_by_type(enum teecore_memtypes type, vaddr_t *s,
1791 vaddr_t *e)
1792 {
1793 struct tee_mmap_region *map = find_map_by_type(type);
1794
1795 if (map) {
1796 *s = map->va;
1797 *e = map->va + map->size;
1798 } else {
1799 *s = 0;
1800 *e = 0;
1801 }
1802 }
1803
core_mmu_get_type_by_pa(paddr_t pa)1804 enum teecore_memtypes core_mmu_get_type_by_pa(paddr_t pa)
1805 {
1806 struct tee_mmap_region *map = find_map_by_pa(pa);
1807
1808 /* VA spaces have no valid PAs in the memory map */
1809 if (!map || map->type == MEM_AREA_RES_VASPACE ||
1810 map->type == MEM_AREA_SHM_VASPACE)
1811 return MEM_AREA_MAXTYPE;
1812 return map->type;
1813 }
1814
core_mmu_set_entry(struct core_mmu_table_info * tbl_info,unsigned int idx,paddr_t pa,uint32_t attr)1815 void core_mmu_set_entry(struct core_mmu_table_info *tbl_info, unsigned int idx,
1816 paddr_t pa, uint32_t attr)
1817 {
1818 assert(idx < tbl_info->num_entries);
1819 core_mmu_set_entry_primitive(tbl_info->table, tbl_info->level,
1820 idx, pa, attr);
1821 }
1822
core_mmu_get_entry(struct core_mmu_table_info * tbl_info,unsigned int idx,paddr_t * pa,uint32_t * attr)1823 void core_mmu_get_entry(struct core_mmu_table_info *tbl_info, unsigned int idx,
1824 paddr_t *pa, uint32_t *attr)
1825 {
1826 assert(idx < tbl_info->num_entries);
1827 core_mmu_get_entry_primitive(tbl_info->table, tbl_info->level,
1828 idx, pa, attr);
1829 }
1830
clear_region(struct core_mmu_table_info * tbl_info,struct tee_mmap_region * region)1831 static void clear_region(struct core_mmu_table_info *tbl_info,
1832 struct tee_mmap_region *region)
1833 {
1834 unsigned int end = 0;
1835 unsigned int idx = 0;
1836
1837 /* va, len and pa should be block aligned */
1838 assert(!core_mmu_get_block_offset(tbl_info, region->va));
1839 assert(!core_mmu_get_block_offset(tbl_info, region->size));
1840 assert(!core_mmu_get_block_offset(tbl_info, region->pa));
1841
1842 idx = core_mmu_va2idx(tbl_info, region->va);
1843 end = core_mmu_va2idx(tbl_info, region->va + region->size);
1844
1845 while (idx < end) {
1846 core_mmu_set_entry(tbl_info, idx, 0, 0);
1847 idx++;
1848 }
1849 }
1850
set_region(struct core_mmu_table_info * tbl_info,struct tee_mmap_region * region)1851 static void set_region(struct core_mmu_table_info *tbl_info,
1852 struct tee_mmap_region *region)
1853 {
1854 unsigned int end;
1855 unsigned int idx;
1856 paddr_t pa;
1857
1858 /* va, len and pa should be block aligned */
1859 assert(!core_mmu_get_block_offset(tbl_info, region->va));
1860 assert(!core_mmu_get_block_offset(tbl_info, region->size));
1861 assert(!core_mmu_get_block_offset(tbl_info, region->pa));
1862
1863 idx = core_mmu_va2idx(tbl_info, region->va);
1864 end = core_mmu_va2idx(tbl_info, region->va + region->size);
1865 pa = region->pa;
1866
1867 while (idx < end) {
1868 core_mmu_set_entry(tbl_info, idx, pa, region->attr);
1869 idx++;
1870 pa += BIT64(tbl_info->shift);
1871 }
1872 }
1873
set_pg_region(struct core_mmu_table_info * dir_info,struct vm_region * region,struct pgt ** pgt,struct core_mmu_table_info * pg_info)1874 static void set_pg_region(struct core_mmu_table_info *dir_info,
1875 struct vm_region *region, struct pgt **pgt,
1876 struct core_mmu_table_info *pg_info)
1877 {
1878 struct tee_mmap_region r = {
1879 .va = region->va,
1880 .size = region->size,
1881 .attr = region->attr,
1882 };
1883 vaddr_t end = r.va + r.size;
1884 uint32_t pgt_attr = (r.attr & TEE_MATTR_SECURE) | TEE_MATTR_TABLE;
1885
1886 while (r.va < end) {
1887 if (!pg_info->table ||
1888 r.va >= (pg_info->va_base + CORE_MMU_PGDIR_SIZE)) {
1889 /*
1890 * We're assigning a new translation table.
1891 */
1892 unsigned int idx;
1893
1894 /* Virtual addresses must grow */
1895 assert(r.va > pg_info->va_base);
1896
1897 idx = core_mmu_va2idx(dir_info, r.va);
1898 pg_info->va_base = core_mmu_idx2va(dir_info, idx);
1899
1900 /*
1901 * Advance pgt to va_base, note that we may need to
1902 * skip multiple page tables if there are large
1903 * holes in the vm map.
1904 */
1905 while ((*pgt)->vabase < pg_info->va_base) {
1906 *pgt = SLIST_NEXT(*pgt, link);
1907 /* We should have allocated enough */
1908 assert(*pgt);
1909 }
1910 assert((*pgt)->vabase == pg_info->va_base);
1911 pg_info->table = (*pgt)->tbl;
1912
1913 core_mmu_set_entry(dir_info, idx,
1914 virt_to_phys(pg_info->table),
1915 pgt_attr);
1916 }
1917
1918 r.size = MIN(CORE_MMU_PGDIR_SIZE - (r.va - pg_info->va_base),
1919 end - r.va);
1920
1921 if (!(*pgt)->populated && !mobj_is_paged(region->mobj)) {
1922 size_t granule = BIT(pg_info->shift);
1923 size_t offset = r.va - region->va + region->offset;
1924
1925 r.size = MIN(r.size,
1926 mobj_get_phys_granule(region->mobj));
1927 r.size = ROUNDUP(r.size, SMALL_PAGE_SIZE);
1928
1929 if (mobj_get_pa(region->mobj, offset, granule,
1930 &r.pa) != TEE_SUCCESS)
1931 panic("Failed to get PA of unpaged mobj");
1932 set_region(pg_info, &r);
1933 }
1934 r.va += r.size;
1935 }
1936 }
1937
can_map_at_level(paddr_t paddr,vaddr_t vaddr,size_t size_left,paddr_t block_size,struct tee_mmap_region * mm)1938 static bool can_map_at_level(paddr_t paddr, vaddr_t vaddr,
1939 size_t size_left, paddr_t block_size,
1940 struct tee_mmap_region *mm)
1941 {
1942 /* VA and PA are aligned to block size at current level */
1943 if ((vaddr | paddr) & (block_size - 1))
1944 return false;
1945
1946 /* Remainder fits into block at current level */
1947 if (size_left < block_size)
1948 return false;
1949
1950 /*
1951 * The required block size of the region is compatible with the
1952 * block size of the current level.
1953 */
1954 if (mm->region_size < block_size)
1955 return false;
1956
1957 #ifdef CFG_WITH_PAGER
1958 /*
1959 * If pager is enabled, we need to map TEE RAM and the whole pager
1960 * regions with small pages only
1961 */
1962 if ((map_is_tee_ram(mm) || mm->type == MEM_AREA_PAGER_VASPACE) &&
1963 block_size != SMALL_PAGE_SIZE)
1964 return false;
1965 #endif
1966
1967 return true;
1968 }
1969
core_mmu_map_region(struct mmu_partition * prtn,struct tee_mmap_region * mm)1970 void core_mmu_map_region(struct mmu_partition *prtn, struct tee_mmap_region *mm)
1971 {
1972 struct core_mmu_table_info tbl_info = { };
1973 unsigned int idx = 0;
1974 vaddr_t vaddr = mm->va;
1975 paddr_t paddr = mm->pa;
1976 ssize_t size_left = mm->size;
1977 uint32_t attr = mm->attr;
1978 unsigned int level = 0;
1979 bool table_found = false;
1980 uint32_t old_attr = 0;
1981
1982 assert(!((vaddr | paddr) & SMALL_PAGE_MASK));
1983 if (!paddr)
1984 attr = 0;
1985
1986 while (size_left > 0) {
1987 level = CORE_MMU_BASE_TABLE_LEVEL;
1988
1989 while (true) {
1990 paddr_t block_size = 0;
1991
1992 assert(core_mmu_level_in_range(level));
1993
1994 table_found = core_mmu_find_table(prtn, vaddr, level,
1995 &tbl_info);
1996 if (!table_found)
1997 panic("can't find table for mapping");
1998
1999 block_size = BIT64(tbl_info.shift);
2000
2001 idx = core_mmu_va2idx(&tbl_info, vaddr);
2002 if (!can_map_at_level(paddr, vaddr, size_left,
2003 block_size, mm)) {
2004 bool secure = mm->attr & TEE_MATTR_SECURE;
2005
2006 /*
2007 * This part of the region can't be mapped at
2008 * this level. Need to go deeper.
2009 */
2010 if (!core_mmu_entry_to_finer_grained(&tbl_info,
2011 idx,
2012 secure))
2013 panic("Can't divide MMU entry");
2014 level = tbl_info.next_level;
2015 continue;
2016 }
2017
2018 /* We can map part of the region at current level */
2019 core_mmu_get_entry(&tbl_info, idx, NULL, &old_attr);
2020 if (old_attr)
2021 panic("Page is already mapped");
2022
2023 core_mmu_set_entry(&tbl_info, idx, paddr, attr);
2024 /*
2025 * Dynamic vaspace regions don't have a physical
2026 * address initially but we need to allocate and
2027 * initialize the translation tables now for later
2028 * updates to work properly.
2029 */
2030 if (paddr)
2031 paddr += block_size;
2032 vaddr += block_size;
2033 size_left -= block_size;
2034
2035 break;
2036 }
2037 }
2038 }
2039
core_mmu_map_pages(vaddr_t vstart,paddr_t * pages,size_t num_pages,enum teecore_memtypes memtype)2040 TEE_Result core_mmu_map_pages(vaddr_t vstart, paddr_t *pages, size_t num_pages,
2041 enum teecore_memtypes memtype)
2042 {
2043 TEE_Result ret;
2044 struct core_mmu_table_info tbl_info;
2045 struct tee_mmap_region *mm;
2046 unsigned int idx;
2047 uint32_t old_attr;
2048 uint32_t exceptions;
2049 vaddr_t vaddr = vstart;
2050 size_t i;
2051 bool secure;
2052
2053 assert(!(core_mmu_type_to_attr(memtype) & TEE_MATTR_PX));
2054
2055 secure = core_mmu_type_to_attr(memtype) & TEE_MATTR_SECURE;
2056
2057 if (vaddr & SMALL_PAGE_MASK)
2058 return TEE_ERROR_BAD_PARAMETERS;
2059
2060 exceptions = mmu_lock();
2061
2062 mm = find_map_by_va((void *)vaddr);
2063 if (!mm || !va_is_in_map(mm, vaddr + num_pages * SMALL_PAGE_SIZE - 1))
2064 panic("VA does not belong to any known mm region");
2065
2066 if (!core_mmu_is_dynamic_vaspace(mm))
2067 panic("Trying to map into static region");
2068
2069 for (i = 0; i < num_pages; i++) {
2070 if (pages[i] & SMALL_PAGE_MASK) {
2071 ret = TEE_ERROR_BAD_PARAMETERS;
2072 goto err;
2073 }
2074
2075 while (true) {
2076 if (!core_mmu_find_table(NULL, vaddr, UINT_MAX,
2077 &tbl_info))
2078 panic("Can't find pagetable for vaddr ");
2079
2080 idx = core_mmu_va2idx(&tbl_info, vaddr);
2081 if (tbl_info.shift == SMALL_PAGE_SHIFT)
2082 break;
2083
2084 /* This is supertable. Need to divide it. */
2085 if (!core_mmu_entry_to_finer_grained(&tbl_info, idx,
2086 secure))
2087 panic("Failed to spread pgdir on small tables");
2088 }
2089
2090 core_mmu_get_entry(&tbl_info, idx, NULL, &old_attr);
2091 if (old_attr)
2092 panic("Page is already mapped");
2093
2094 core_mmu_set_entry(&tbl_info, idx, pages[i],
2095 core_mmu_type_to_attr(memtype));
2096 vaddr += SMALL_PAGE_SIZE;
2097 }
2098
2099 /*
2100 * Make sure all the changes to translation tables are visible
2101 * before returning. TLB doesn't need to be invalidated as we are
2102 * guaranteed that there's no valid mapping in this range.
2103 */
2104 core_mmu_table_write_barrier();
2105 mmu_unlock(exceptions);
2106
2107 return TEE_SUCCESS;
2108 err:
2109 mmu_unlock(exceptions);
2110
2111 if (i)
2112 core_mmu_unmap_pages(vstart, i);
2113
2114 return ret;
2115 }
2116
core_mmu_map_contiguous_pages(vaddr_t vstart,paddr_t pstart,size_t num_pages,enum teecore_memtypes memtype)2117 TEE_Result core_mmu_map_contiguous_pages(vaddr_t vstart, paddr_t pstart,
2118 size_t num_pages,
2119 enum teecore_memtypes memtype)
2120 {
2121 struct core_mmu_table_info tbl_info = { };
2122 struct tee_mmap_region *mm = NULL;
2123 unsigned int idx = 0;
2124 uint32_t old_attr = 0;
2125 uint32_t exceptions = 0;
2126 vaddr_t vaddr = vstart;
2127 paddr_t paddr = pstart;
2128 size_t i = 0;
2129 bool secure = false;
2130
2131 assert(!(core_mmu_type_to_attr(memtype) & TEE_MATTR_PX));
2132
2133 secure = core_mmu_type_to_attr(memtype) & TEE_MATTR_SECURE;
2134
2135 if ((vaddr | paddr) & SMALL_PAGE_MASK)
2136 return TEE_ERROR_BAD_PARAMETERS;
2137
2138 exceptions = mmu_lock();
2139
2140 mm = find_map_by_va((void *)vaddr);
2141 if (!mm || !va_is_in_map(mm, vaddr + num_pages * SMALL_PAGE_SIZE - 1))
2142 panic("VA does not belong to any known mm region");
2143
2144 if (!core_mmu_is_dynamic_vaspace(mm))
2145 panic("Trying to map into static region");
2146
2147 for (i = 0; i < num_pages; i++) {
2148 while (true) {
2149 if (!core_mmu_find_table(NULL, vaddr, UINT_MAX,
2150 &tbl_info))
2151 panic("Can't find pagetable for vaddr ");
2152
2153 idx = core_mmu_va2idx(&tbl_info, vaddr);
2154 if (tbl_info.shift == SMALL_PAGE_SHIFT)
2155 break;
2156
2157 /* This is supertable. Need to divide it. */
2158 if (!core_mmu_entry_to_finer_grained(&tbl_info, idx,
2159 secure))
2160 panic("Failed to spread pgdir on small tables");
2161 }
2162
2163 core_mmu_get_entry(&tbl_info, idx, NULL, &old_attr);
2164 if (old_attr)
2165 panic("Page is already mapped");
2166
2167 core_mmu_set_entry(&tbl_info, idx, paddr,
2168 core_mmu_type_to_attr(memtype));
2169 paddr += SMALL_PAGE_SIZE;
2170 vaddr += SMALL_PAGE_SIZE;
2171 }
2172
2173 /*
2174 * Make sure all the changes to translation tables are visible
2175 * before returning. TLB doesn't need to be invalidated as we are
2176 * guaranteed that there's no valid mapping in this range.
2177 */
2178 core_mmu_table_write_barrier();
2179 mmu_unlock(exceptions);
2180
2181 return TEE_SUCCESS;
2182 }
2183
mem_range_is_in_vcore_free(vaddr_t vstart,size_t num_pages)2184 static bool mem_range_is_in_vcore_free(vaddr_t vstart, size_t num_pages)
2185 {
2186 return core_is_buffer_inside(vstart, num_pages * SMALL_PAGE_SIZE,
2187 VCORE_FREE_PA, VCORE_FREE_SZ);
2188 }
2189
maybe_remove_from_mem_map(vaddr_t vstart,size_t num_pages)2190 static void maybe_remove_from_mem_map(vaddr_t vstart, size_t num_pages)
2191 {
2192 struct memory_map *mem_map = NULL;
2193 struct tee_mmap_region *mm = NULL;
2194 size_t idx = 0;
2195 vaddr_t va = 0;
2196
2197 mm = find_map_by_va((void *)vstart);
2198 if (!mm || !va_is_in_map(mm, vstart + num_pages * SMALL_PAGE_SIZE - 1))
2199 panic("VA does not belong to any known mm region");
2200
2201 if (core_mmu_is_dynamic_vaspace(mm))
2202 return;
2203
2204 if (!mem_range_is_in_vcore_free(vstart, num_pages))
2205 panic("Trying to unmap static region");
2206
2207 /*
2208 * We're going to remove a memory from the VCORE_FREE memory range.
2209 * Depending where the range is we may need to remove the matching
2210 * mm, peal of a bit from the start or end of the mm, or split it
2211 * into two with a whole in the middle.
2212 */
2213
2214 va = ROUNDDOWN(vstart, SMALL_PAGE_SIZE);
2215 assert(mm->region_size == SMALL_PAGE_SIZE);
2216
2217 if (va == mm->va && mm->size == num_pages * SMALL_PAGE_SIZE) {
2218 mem_map = get_memory_map();
2219 idx = mm - mem_map->map;
2220 assert(idx < mem_map->count);
2221
2222 rem_array_elem(mem_map->map, mem_map->count,
2223 sizeof(*mem_map->map), idx);
2224 mem_map->count--;
2225 } else if (va == mm->va) {
2226 mm->va += num_pages * SMALL_PAGE_SIZE;
2227 mm->pa += num_pages * SMALL_PAGE_SIZE;
2228 mm->size -= num_pages * SMALL_PAGE_SIZE;
2229 } else if (va + num_pages * SMALL_PAGE_SIZE == mm->va + mm->size) {
2230 mm->size -= num_pages * SMALL_PAGE_SIZE;
2231 } else {
2232 struct tee_mmap_region m = *mm;
2233
2234 mem_map = get_memory_map();
2235 idx = mm - mem_map->map;
2236 assert(idx < mem_map->count);
2237
2238 mm->size = va - mm->va;
2239 m.va += mm->size + num_pages * SMALL_PAGE_SIZE;
2240 m.pa += mm->size + num_pages * SMALL_PAGE_SIZE;
2241 m.size -= mm->size + num_pages * SMALL_PAGE_SIZE;
2242 grow_mem_map(mem_map);
2243 ins_array_elem(mem_map->map, mem_map->count,
2244 sizeof(*mem_map->map), idx + 1, &m);
2245 }
2246 }
2247
core_mmu_unmap_pages(vaddr_t vstart,size_t num_pages)2248 void core_mmu_unmap_pages(vaddr_t vstart, size_t num_pages)
2249 {
2250 struct core_mmu_table_info tbl_info;
2251 size_t i;
2252 unsigned int idx;
2253 uint32_t exceptions;
2254
2255 exceptions = mmu_lock();
2256
2257 maybe_remove_from_mem_map(vstart, num_pages);
2258
2259 for (i = 0; i < num_pages; i++, vstart += SMALL_PAGE_SIZE) {
2260 if (!core_mmu_find_table(NULL, vstart, UINT_MAX, &tbl_info))
2261 panic("Can't find pagetable");
2262
2263 if (tbl_info.shift != SMALL_PAGE_SHIFT)
2264 panic("Invalid pagetable level");
2265
2266 idx = core_mmu_va2idx(&tbl_info, vstart);
2267 core_mmu_set_entry(&tbl_info, idx, 0, 0);
2268 }
2269 tlbi_all();
2270
2271 mmu_unlock(exceptions);
2272 }
2273
core_mmu_populate_user_map(struct core_mmu_table_info * dir_info,struct user_mode_ctx * uctx)2274 void core_mmu_populate_user_map(struct core_mmu_table_info *dir_info,
2275 struct user_mode_ctx *uctx)
2276 {
2277 struct core_mmu_table_info pg_info = { };
2278 struct pgt_cache *pgt_cache = &uctx->pgt_cache;
2279 struct pgt *pgt = NULL;
2280 struct pgt *p = NULL;
2281 struct vm_region *r = NULL;
2282
2283 if (TAILQ_EMPTY(&uctx->vm_info.regions))
2284 return; /* Nothing to map */
2285
2286 /*
2287 * Allocate all page tables in advance.
2288 */
2289 pgt_get_all(uctx);
2290 pgt = SLIST_FIRST(pgt_cache);
2291
2292 core_mmu_set_info_table(&pg_info, dir_info->next_level, 0, NULL);
2293
2294 TAILQ_FOREACH(r, &uctx->vm_info.regions, link)
2295 set_pg_region(dir_info, r, &pgt, &pg_info);
2296 /* Record that the translation tables now are populated. */
2297 SLIST_FOREACH(p, pgt_cache, link) {
2298 p->populated = true;
2299 if (p == pgt)
2300 break;
2301 }
2302 assert(p == pgt);
2303 }
2304
core_mmu_remove_mapping(enum teecore_memtypes type,void * addr,size_t len)2305 TEE_Result core_mmu_remove_mapping(enum teecore_memtypes type, void *addr,
2306 size_t len)
2307 {
2308 struct core_mmu_table_info tbl_info = { };
2309 struct tee_mmap_region *res_map = NULL;
2310 struct tee_mmap_region *map = NULL;
2311 paddr_t pa = virt_to_phys(addr);
2312 size_t granule = 0;
2313 ptrdiff_t i = 0;
2314 paddr_t p = 0;
2315 size_t l = 0;
2316
2317 map = find_map_by_type_and_pa(type, pa, len);
2318 if (!map)
2319 return TEE_ERROR_GENERIC;
2320
2321 res_map = find_map_by_type(MEM_AREA_RES_VASPACE);
2322 if (!res_map)
2323 return TEE_ERROR_GENERIC;
2324 if (!core_mmu_find_table(NULL, res_map->va, UINT_MAX, &tbl_info))
2325 return TEE_ERROR_GENERIC;
2326 granule = BIT(tbl_info.shift);
2327
2328 if (map < static_memory_map.map ||
2329 map >= static_memory_map.map + static_memory_map.count)
2330 return TEE_ERROR_GENERIC;
2331 i = map - static_memory_map.map;
2332
2333 /* Check that we have a full match */
2334 p = ROUNDDOWN2(pa, granule);
2335 l = ROUNDUP2(len + pa - p, granule);
2336 if (map->pa != p || map->size != l)
2337 return TEE_ERROR_GENERIC;
2338
2339 clear_region(&tbl_info, map);
2340 tlbi_all();
2341
2342 /* If possible remove the va range from res_map */
2343 if (res_map->va - map->size == map->va) {
2344 res_map->va -= map->size;
2345 res_map->size += map->size;
2346 }
2347
2348 /* Remove the entry. */
2349 rem_array_elem(static_memory_map.map, static_memory_map.count,
2350 sizeof(*static_memory_map.map), i);
2351 static_memory_map.count--;
2352
2353 return TEE_SUCCESS;
2354 }
2355
2356 struct tee_mmap_region *
core_mmu_find_mapping_exclusive(enum teecore_memtypes type,size_t len)2357 core_mmu_find_mapping_exclusive(enum teecore_memtypes type, size_t len)
2358 {
2359 struct memory_map *mem_map = get_memory_map();
2360 struct tee_mmap_region *map_found = NULL;
2361 size_t n = 0;
2362
2363 if (!len)
2364 return NULL;
2365
2366 for (n = 0; n < mem_map->count; n++) {
2367 if (mem_map->map[n].type != type)
2368 continue;
2369
2370 if (map_found)
2371 return NULL;
2372
2373 map_found = mem_map->map + n;
2374 }
2375
2376 if (!map_found || map_found->size < len)
2377 return NULL;
2378
2379 return map_found;
2380 }
2381
core_mmu_add_mapping(enum teecore_memtypes type,paddr_t addr,size_t len)2382 void *core_mmu_add_mapping(enum teecore_memtypes type, paddr_t addr, size_t len)
2383 {
2384 struct memory_map *mem_map = &static_memory_map;
2385 struct core_mmu_table_info tbl_info = { };
2386 struct tee_mmap_region *map = NULL;
2387 size_t granule = 0;
2388 paddr_t p = 0;
2389 size_t l = 0;
2390
2391 if (!len)
2392 return NULL;
2393
2394 if (!core_mmu_check_end_pa(addr, len))
2395 return NULL;
2396
2397 /* Check if the memory is already mapped */
2398 map = find_map_by_type_and_pa(type, addr, len);
2399 if (map && pbuf_inside_map_area(addr, len, map))
2400 return (void *)(vaddr_t)(map->va + addr - map->pa);
2401
2402 /* Find the reserved va space used for late mappings */
2403 map = find_map_by_type(MEM_AREA_RES_VASPACE);
2404 if (!map)
2405 return NULL;
2406
2407 if (!core_mmu_find_table(NULL, map->va, UINT_MAX, &tbl_info))
2408 return NULL;
2409
2410 granule = BIT64(tbl_info.shift);
2411 p = ROUNDDOWN2(addr, granule);
2412 l = ROUNDUP2(len + addr - p, granule);
2413
2414 /* Ban overflowing virtual addresses */
2415 if (map->size < l)
2416 return NULL;
2417
2418 /*
2419 * Something is wrong, we can't fit the va range into the selected
2420 * table. The reserved va range is possibly missaligned with
2421 * granule.
2422 */
2423 if (core_mmu_va2idx(&tbl_info, map->va + len) >= tbl_info.num_entries)
2424 return NULL;
2425
2426 if (static_memory_map.count >= static_memory_map.alloc_count)
2427 return NULL;
2428
2429 mem_map->map[mem_map->count] = (struct tee_mmap_region){
2430 .va = map->va,
2431 .size = l,
2432 .type = type,
2433 .region_size = granule,
2434 .attr = core_mmu_type_to_attr(type),
2435 .pa = p,
2436 };
2437 map->va += l;
2438 map->size -= l;
2439 map = mem_map->map + mem_map->count;
2440 mem_map->count++;
2441
2442 set_region(&tbl_info, map);
2443
2444 /* Make sure the new entry is visible before continuing. */
2445 core_mmu_table_write_barrier();
2446
2447 return (void *)(vaddr_t)(map->va + addr - map->pa);
2448 }
2449
2450 #ifdef CFG_WITH_PAGER
get_linear_map_end_va(void)2451 static vaddr_t get_linear_map_end_va(void)
2452 {
2453 /* this is synced with the generic linker file kern.ld.S */
2454 return (vaddr_t)__heap2_end;
2455 }
2456
get_linear_map_end_pa(void)2457 static paddr_t get_linear_map_end_pa(void)
2458 {
2459 return get_linear_map_end_va() - boot_mmu_config.map_offset;
2460 }
2461 #endif
2462
2463 #if defined(CFG_TEE_CORE_DEBUG)
check_pa_matches_va(void * va,paddr_t pa)2464 static void check_pa_matches_va(void *va, paddr_t pa)
2465 {
2466 TEE_Result res = TEE_ERROR_GENERIC;
2467 vaddr_t v = (vaddr_t)va;
2468 paddr_t p = 0;
2469 struct core_mmu_table_info ti __maybe_unused = { };
2470
2471 if (core_mmu_user_va_range_is_defined()) {
2472 vaddr_t user_va_base = 0;
2473 size_t user_va_size = 0;
2474
2475 core_mmu_get_user_va_range(&user_va_base, &user_va_size);
2476 if (v >= user_va_base &&
2477 v <= (user_va_base - 1 + user_va_size)) {
2478 if (!core_mmu_user_mapping_is_active()) {
2479 if (pa)
2480 panic("issue in linear address space");
2481 return;
2482 }
2483
2484 res = vm_va2pa(to_user_mode_ctx(thread_get_tsd()->ctx),
2485 va, &p);
2486 if (res == TEE_ERROR_NOT_SUPPORTED)
2487 return;
2488 if (res == TEE_SUCCESS && pa != p)
2489 panic("bad pa");
2490 if (res != TEE_SUCCESS && pa)
2491 panic("false pa");
2492 return;
2493 }
2494 }
2495 #ifdef CFG_WITH_PAGER
2496 if (is_unpaged(va)) {
2497 if (v - boot_mmu_config.map_offset != pa)
2498 panic("issue in linear address space");
2499 return;
2500 }
2501
2502 if (tee_pager_get_table_info(v, &ti)) {
2503 uint32_t a;
2504
2505 /*
2506 * Lookups in the page table managed by the pager is
2507 * dangerous for addresses in the paged area as those pages
2508 * changes all the time. But some ranges are safe,
2509 * rw-locked areas when the page is populated for instance.
2510 */
2511 core_mmu_get_entry(&ti, core_mmu_va2idx(&ti, v), &p, &a);
2512 if (a & TEE_MATTR_VALID_BLOCK) {
2513 paddr_t mask = BIT64(ti.shift) - 1;
2514
2515 p |= v & mask;
2516 if (pa != p)
2517 panic();
2518 } else {
2519 if (pa)
2520 panic();
2521 }
2522 return;
2523 }
2524 #endif
2525
2526 if (!core_va2pa_helper(va, &p)) {
2527 /* Verfiy only the static mapping (case non null phys addr) */
2528 if (p && pa != p) {
2529 DMSG("va %p maps 0x%" PRIxPA ", expect 0x%" PRIxPA,
2530 va, p, pa);
2531 panic();
2532 }
2533 } else {
2534 if (pa) {
2535 DMSG("va %p unmapped, expect 0x%" PRIxPA, va, pa);
2536 panic();
2537 }
2538 }
2539 }
2540 #else
check_pa_matches_va(void * va __unused,paddr_t pa __unused)2541 static void check_pa_matches_va(void *va __unused, paddr_t pa __unused)
2542 {
2543 }
2544 #endif
2545
virt_to_phys(void * va)2546 paddr_t virt_to_phys(void *va)
2547 {
2548 paddr_t pa = 0;
2549
2550 if (!arch_va2pa_helper(va, &pa))
2551 pa = 0;
2552 check_pa_matches_va(memtag_strip_tag(va), pa);
2553 return pa;
2554 }
2555
2556 /*
2557 * Don't use check_va_matches_pa() for RISC-V, as its callee
2558 * arch_va2pa_helper() will call it eventually, this creates
2559 * indirect recursion and can lead to a stack overflow.
2560 * Moreover, if arch_va2pa_helper() returns true, it implies
2561 * the va2pa mapping is matched, no need to check it again.
2562 */
2563 #if defined(CFG_TEE_CORE_DEBUG) && !defined(__riscv)
check_va_matches_pa(paddr_t pa,void * va)2564 static void check_va_matches_pa(paddr_t pa, void *va)
2565 {
2566 paddr_t p = 0;
2567
2568 if (!va)
2569 return;
2570
2571 p = virt_to_phys(va);
2572 if (p != pa) {
2573 DMSG("va %p maps 0x%" PRIxPA " expect 0x%" PRIxPA, va, p, pa);
2574 panic();
2575 }
2576 }
2577 #else
check_va_matches_pa(paddr_t pa __unused,void * va __unused)2578 static void check_va_matches_pa(paddr_t pa __unused, void *va __unused)
2579 {
2580 }
2581 #endif
2582
phys_to_virt_ts_vaspace(paddr_t pa,size_t len)2583 static void *phys_to_virt_ts_vaspace(paddr_t pa, size_t len)
2584 {
2585 if (!core_mmu_user_mapping_is_active())
2586 return NULL;
2587
2588 return vm_pa2va(to_user_mode_ctx(thread_get_tsd()->ctx), pa, len);
2589 }
2590
2591 #ifdef CFG_WITH_PAGER
phys_to_virt_tee_ram(paddr_t pa,size_t len)2592 static void *phys_to_virt_tee_ram(paddr_t pa, size_t len)
2593 {
2594 paddr_t end_pa = 0;
2595
2596 if (SUB_OVERFLOW(len, 1, &end_pa) || ADD_OVERFLOW(pa, end_pa, &end_pa))
2597 return NULL;
2598
2599 if (pa >= TEE_LOAD_ADDR && pa < get_linear_map_end_pa()) {
2600 if (end_pa > get_linear_map_end_pa())
2601 return NULL;
2602 return (void *)(vaddr_t)(pa + boot_mmu_config.map_offset);
2603 }
2604
2605 return tee_pager_phys_to_virt(pa, len);
2606 }
2607 #else
phys_to_virt_tee_ram(paddr_t pa,size_t len)2608 static void *phys_to_virt_tee_ram(paddr_t pa, size_t len)
2609 {
2610 struct tee_mmap_region *mmap = NULL;
2611
2612 mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM, pa, len);
2613 if (!mmap)
2614 mmap = find_map_by_type_and_pa(MEM_AREA_NEX_RAM_RW, pa, len);
2615 if (!mmap)
2616 mmap = find_map_by_type_and_pa(MEM_AREA_NEX_RAM_RO, pa, len);
2617 if (!mmap)
2618 mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM_RW, pa, len);
2619 if (!mmap)
2620 mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM_RO, pa, len);
2621 if (!mmap)
2622 mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM_RX, pa, len);
2623
2624 /*
2625 * Note that MEM_AREA_INIT_RAM_RO and MEM_AREA_INIT_RAM_RX are only
2626 * used with pager and not needed here.
2627 */
2628 return map_pa2va(mmap, pa, len);
2629 }
2630 #endif
2631
phys_to_virt(paddr_t pa,enum teecore_memtypes m,size_t len)2632 void *phys_to_virt(paddr_t pa, enum teecore_memtypes m, size_t len)
2633 {
2634 void *va = NULL;
2635
2636 switch (m) {
2637 case MEM_AREA_TS_VASPACE:
2638 va = phys_to_virt_ts_vaspace(pa, len);
2639 break;
2640 case MEM_AREA_TEE_RAM:
2641 case MEM_AREA_TEE_RAM_RX:
2642 case MEM_AREA_TEE_RAM_RO:
2643 case MEM_AREA_TEE_RAM_RW:
2644 case MEM_AREA_NEX_RAM_RO:
2645 case MEM_AREA_NEX_RAM_RW:
2646 va = phys_to_virt_tee_ram(pa, len);
2647 break;
2648 case MEM_AREA_SHM_VASPACE:
2649 case MEM_AREA_NEX_DYN_VASPACE:
2650 case MEM_AREA_TEE_DYN_VASPACE:
2651 /* Find VA from PA in dynamic SHM is not yet supported */
2652 va = NULL;
2653 break;
2654 default:
2655 va = map_pa2va(find_map_by_type_and_pa(m, pa, len), pa, len);
2656 }
2657 if (m != MEM_AREA_SEC_RAM_OVERALL)
2658 check_va_matches_pa(pa, va);
2659 return va;
2660 }
2661
phys_to_virt_io(paddr_t pa,size_t len)2662 void *phys_to_virt_io(paddr_t pa, size_t len)
2663 {
2664 struct tee_mmap_region *map = NULL;
2665 void *va = NULL;
2666
2667 map = find_map_by_type_and_pa(MEM_AREA_IO_SEC, pa, len);
2668 if (!map)
2669 map = find_map_by_type_and_pa(MEM_AREA_IO_NSEC, pa, len);
2670 if (!map)
2671 return NULL;
2672 va = map_pa2va(map, pa, len);
2673 check_va_matches_pa(pa, va);
2674 return va;
2675 }
2676
core_mmu_get_va(paddr_t pa,enum teecore_memtypes type,size_t len)2677 vaddr_t core_mmu_get_va(paddr_t pa, enum teecore_memtypes type, size_t len)
2678 {
2679 if (cpu_mmu_enabled())
2680 return (vaddr_t)phys_to_virt(pa, type, len);
2681
2682 return (vaddr_t)pa;
2683 }
2684
2685 #ifdef CFG_WITH_PAGER
is_unpaged(const void * va)2686 bool is_unpaged(const void *va)
2687 {
2688 vaddr_t v = (vaddr_t)va;
2689
2690 return v >= VCORE_START_VA && v < get_linear_map_end_va();
2691 }
2692 #endif
2693
2694 #ifdef CFG_NS_VIRTUALIZATION
is_nexus(const void * va)2695 bool is_nexus(const void *va)
2696 {
2697 vaddr_t v = (vaddr_t)va;
2698
2699 return v >= VCORE_START_VA && v < VCORE_NEX_RW_PA + VCORE_NEX_RW_SZ;
2700 }
2701 #endif
2702
io_pa_or_va(struct io_pa_va * p,size_t len)2703 vaddr_t io_pa_or_va(struct io_pa_va *p, size_t len)
2704 {
2705 assert(p->pa);
2706 if (cpu_mmu_enabled()) {
2707 if (!p->va)
2708 p->va = (vaddr_t)phys_to_virt_io(p->pa, len);
2709 assert(p->va);
2710 return p->va;
2711 }
2712 return p->pa;
2713 }
2714
io_pa_or_va_secure(struct io_pa_va * p,size_t len)2715 vaddr_t io_pa_or_va_secure(struct io_pa_va *p, size_t len)
2716 {
2717 assert(p->pa);
2718 if (cpu_mmu_enabled()) {
2719 if (!p->va)
2720 p->va = (vaddr_t)phys_to_virt(p->pa, MEM_AREA_IO_SEC,
2721 len);
2722 assert(p->va);
2723 return p->va;
2724 }
2725 return p->pa;
2726 }
2727
io_pa_or_va_nsec(struct io_pa_va * p,size_t len)2728 vaddr_t io_pa_or_va_nsec(struct io_pa_va *p, size_t len)
2729 {
2730 assert(p->pa);
2731 if (cpu_mmu_enabled()) {
2732 if (!p->va)
2733 p->va = (vaddr_t)phys_to_virt(p->pa, MEM_AREA_IO_NSEC,
2734 len);
2735 assert(p->va);
2736 return p->va;
2737 }
2738 return p->pa;
2739 }
2740
2741 #ifdef CFG_CORE_RESERVED_SHM
teecore_init_pub_ram(void)2742 static TEE_Result teecore_init_pub_ram(void)
2743 {
2744 vaddr_t s = 0;
2745 vaddr_t e = 0;
2746
2747 /* get virtual addr/size of NSec shared mem allocated from teecore */
2748 core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &s, &e);
2749
2750 if (s >= e || s & SMALL_PAGE_MASK || e & SMALL_PAGE_MASK)
2751 panic("invalid PUB RAM");
2752
2753 /* extra check: we could rely on core_mmu_get_mem_by_type() */
2754 if (!tee_vbuf_is_non_sec(s, e - s))
2755 panic("PUB RAM is not non-secure");
2756
2757 #ifdef CFG_PL310
2758 /* Allocate statically the l2cc mutex */
2759 tee_l2cc_store_mutex_boot_pa(virt_to_phys((void *)s));
2760 s += sizeof(uint32_t); /* size of a pl310 mutex */
2761 s = ROUNDUP(s, SMALL_PAGE_SIZE); /* keep required alignment */
2762 #endif
2763
2764 default_nsec_shm_paddr = virt_to_phys((void *)s);
2765 default_nsec_shm_size = e - s;
2766
2767 return TEE_SUCCESS;
2768 }
2769 early_init(teecore_init_pub_ram);
2770 #endif /*CFG_CORE_RESERVED_SHM*/
2771
carve_out_core_mem(paddr_t pa,paddr_t end_pa)2772 static void __maybe_unused carve_out_core_mem(paddr_t pa, paddr_t end_pa)
2773 {
2774 tee_mm_entry_t *mm __maybe_unused = NULL;
2775
2776 DMSG("%#"PRIxPA" .. %#"PRIxPA, pa, end_pa);
2777 mm = phys_mem_alloc2(pa, end_pa - pa);
2778 assert(mm);
2779 }
2780
core_mmu_init_phys_mem(void)2781 void core_mmu_init_phys_mem(void)
2782 {
2783 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
2784 paddr_t b1 = 0;
2785 paddr_size_t s1 = 0;
2786
2787 static_assert(ARRAY_SIZE(secure_only) <= 2);
2788
2789 if (ARRAY_SIZE(secure_only) == 2) {
2790 b1 = secure_only[1].paddr;
2791 s1 = secure_only[1].size;
2792 }
2793 virt_init_memory(&static_memory_map, secure_only[0].paddr,
2794 secure_only[0].size, b1, s1);
2795 } else {
2796 #ifdef CFG_WITH_PAGER
2797 /*
2798 * The pager uses all core memory so there's no need to add
2799 * it to the pool.
2800 */
2801 static_assert(ARRAY_SIZE(secure_only) == 2);
2802 phys_mem_init(0, 0, secure_only[1].paddr, secure_only[1].size);
2803 #else /*!CFG_WITH_PAGER*/
2804 size_t align = BIT(CORE_MMU_USER_CODE_SHIFT);
2805 paddr_t end_pa = 0;
2806 size_t size = 0;
2807 paddr_t ps = 0;
2808 paddr_t pa = 0;
2809
2810 static_assert(ARRAY_SIZE(secure_only) <= 2);
2811 if (ARRAY_SIZE(secure_only) == 2) {
2812 ps = secure_only[1].paddr;
2813 size = secure_only[1].size;
2814 }
2815 phys_mem_init(secure_only[0].paddr, secure_only[0].size,
2816 ps, size);
2817
2818 /*
2819 * The VCORE macros are relocatable so we need to translate
2820 * the addresses now that the MMU is enabled.
2821 */
2822 end_pa = vaddr_to_phys(ROUNDUP2(VCORE_FREE_END_PA,
2823 align) - 1) + 1;
2824 /* Carve out the part used by OP-TEE core */
2825 carve_out_core_mem(vaddr_to_phys(VCORE_UNPG_RX_PA), end_pa);
2826 if (IS_ENABLED(CFG_CORE_SANITIZE_KADDRESS)) {
2827 pa = vaddr_to_phys(ROUNDUP2(ASAN_MAP_PA, align));
2828 carve_out_core_mem(pa, pa + ASAN_MAP_SZ);
2829 }
2830
2831 /* Carve out test SDP memory */
2832 #ifdef TEE_SDP_TEST_MEM_BASE
2833 if (TEE_SDP_TEST_MEM_SIZE) {
2834 pa = TEE_SDP_TEST_MEM_BASE;
2835 carve_out_core_mem(pa, pa + TEE_SDP_TEST_MEM_SIZE);
2836 }
2837 #endif
2838 #endif /*!CFG_WITH_PAGER*/
2839 }
2840 }
2841