xref: /optee_os/core/mm/core_mmu.c (revision 93a6acc05603ab4522e52f742df3410ad671a2ee)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2025 Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  * Copyright (c) 2022, Arm Limited and Contributors. All rights reserved.
6  */
7 
8 #include <assert.h>
9 #include <config.h>
10 #include <kernel/boot.h>
11 #include <kernel/dt.h>
12 #include <kernel/linker.h>
13 #include <kernel/panic.h>
14 #include <kernel/spinlock.h>
15 #include <kernel/tee_l2cc_mutex.h>
16 #include <kernel/tee_misc.h>
17 #include <kernel/tlb_helpers.h>
18 #include <kernel/user_mode_ctx.h>
19 #include <kernel/virtualization.h>
20 #include <libfdt.h>
21 #include <memtag.h>
22 #include <mm/core_memprot.h>
23 #include <mm/core_mmu.h>
24 #include <mm/mobj.h>
25 #include <mm/pgt_cache.h>
26 #include <mm/phys_mem.h>
27 #include <mm/tee_pager.h>
28 #include <mm/vm.h>
29 #include <platform_config.h>
30 #include <stdalign.h>
31 #include <string.h>
32 #include <trace.h>
33 #include <util.h>
34 
35 #ifndef DEBUG_XLAT_TABLE
36 #define DEBUG_XLAT_TABLE 0
37 #endif
38 
39 #define SHM_VASPACE_SIZE	(1024 * 1024 * 32)
40 
41 /* Virtual memory pool for core mappings */
42 tee_mm_pool_t core_virt_mem_pool;
43 
44 /* Virtual memory pool for shared memory mappings */
45 tee_mm_pool_t core_virt_shm_pool;
46 
47 #ifdef CFG_CORE_PHYS_RELOCATABLE
48 unsigned long core_mmu_tee_load_pa __nex_bss;
49 #else
50 const unsigned long core_mmu_tee_load_pa = TEE_LOAD_ADDR;
51 #endif
52 
53 /*
54  * These variables are initialized before .bss is cleared. To avoid
55  * resetting them when .bss is cleared we're storing them in .data instead,
56  * even if they initially are zero.
57  */
58 
59 #ifdef CFG_CORE_RESERVED_SHM
60 /* Default NSec shared memory allocated from NSec world */
61 unsigned long default_nsec_shm_size __nex_bss;
62 unsigned long default_nsec_shm_paddr __nex_bss;
63 #endif
64 
65 static struct memory_map static_memory_map __nex_bss;
66 void (*memory_map_realloc_func)(struct memory_map *mem_map) __nex_bss;
67 
68 /* Offset of the first TEE RAM mapping from start of secure RAM */
69 static size_t tee_ram_initial_offs __nex_bss;
70 
71 /* Define the platform's memory layout. */
72 struct memaccess_area {
73 	paddr_t paddr;
74 	size_t size;
75 };
76 
77 #define MEMACCESS_AREA(a, s) { .paddr = a, .size = s }
78 
79 static struct memaccess_area secure_only[] __nex_data = {
80 #ifdef CFG_CORE_PHYS_RELOCATABLE
81 	MEMACCESS_AREA(0, 0),
82 #else
83 #ifdef TRUSTED_SRAM_BASE
84 	MEMACCESS_AREA(TRUSTED_SRAM_BASE, TRUSTED_SRAM_SIZE),
85 #endif
86 	MEMACCESS_AREA(TRUSTED_DRAM_BASE, TRUSTED_DRAM_SIZE),
87 #endif
88 };
89 
90 static struct memaccess_area nsec_shared[] __nex_data = {
91 #ifdef CFG_CORE_RESERVED_SHM
92 	MEMACCESS_AREA(TEE_SHMEM_START, TEE_SHMEM_SIZE),
93 #endif
94 };
95 
96 #if defined(CFG_SECURE_DATA_PATH)
97 static const char *tz_sdp_match = "linaro,secure-heap";
98 static struct memaccess_area sec_sdp;
99 #ifdef CFG_TEE_SDP_MEM_BASE
100 register_sdp_mem(CFG_TEE_SDP_MEM_BASE, CFG_TEE_SDP_MEM_SIZE);
101 #endif
102 #ifdef TEE_SDP_TEST_MEM_BASE
103 register_sdp_mem(TEE_SDP_TEST_MEM_BASE, TEE_SDP_TEST_MEM_SIZE);
104 #endif
105 #endif
106 
107 #ifdef CFG_CORE_RESERVED_SHM
108 register_phys_mem(MEM_AREA_NSEC_SHM, TEE_SHMEM_START, TEE_SHMEM_SIZE);
109 #endif
110 static unsigned int mmu_spinlock;
111 
mmu_lock(void)112 static uint32_t mmu_lock(void)
113 {
114 	return cpu_spin_lock_xsave(&mmu_spinlock);
115 }
116 
mmu_unlock(uint32_t exceptions)117 static void mmu_unlock(uint32_t exceptions)
118 {
119 	cpu_spin_unlock_xrestore(&mmu_spinlock, exceptions);
120 }
121 
heap_realloc_memory_map(struct memory_map * mem_map)122 static void heap_realloc_memory_map(struct memory_map *mem_map)
123 {
124 	struct tee_mmap_region *m = NULL;
125 	struct tee_mmap_region *old = mem_map->map;
126 	size_t old_sz = sizeof(*old) * mem_map->alloc_count;
127 	size_t sz = old_sz + sizeof(*m);
128 
129 	assert(nex_malloc_buffer_is_within_alloced(old, old_sz));
130 	m = nex_realloc(old, sz);
131 	if (!m)
132 		panic();
133 	mem_map->map = m;
134 	mem_map->alloc_count++;
135 }
136 
boot_mem_realloc_memory_map(struct memory_map * mem_map)137 static void boot_mem_realloc_memory_map(struct memory_map *mem_map)
138 {
139 	struct tee_mmap_region *m = NULL;
140 	struct tee_mmap_region *old = mem_map->map;
141 	size_t old_sz = sizeof(*old) * mem_map->alloc_count;
142 	size_t sz = old_sz * 2;
143 
144 	m = boot_mem_alloc_tmp(sz, alignof(*m));
145 	memcpy(m, old, old_sz);
146 	mem_map->map = m;
147 	mem_map->alloc_count *= 2;
148 }
149 
grow_mem_map(struct memory_map * mem_map)150 static void grow_mem_map(struct memory_map *mem_map)
151 {
152 	if (mem_map->count == mem_map->alloc_count) {
153 		if (!memory_map_realloc_func) {
154 			EMSG("Out of entries (%zu) in mem_map",
155 			     mem_map->alloc_count);
156 			panic();
157 		}
158 		memory_map_realloc_func(mem_map);
159 	}
160 	mem_map->count++;
161 }
162 
core_mmu_get_secure_memory(paddr_t * base,paddr_size_t * size)163 void core_mmu_get_secure_memory(paddr_t *base, paddr_size_t *size)
164 {
165 	/*
166 	 * The first range is always used to cover OP-TEE core memory, but
167 	 * depending on configuration it may cover more than that.
168 	 */
169 	*base = secure_only[0].paddr;
170 	*size = secure_only[0].size;
171 }
172 
core_mmu_set_secure_memory(paddr_t base,size_t size)173 void core_mmu_set_secure_memory(paddr_t base, size_t size)
174 {
175 #ifdef CFG_CORE_PHYS_RELOCATABLE
176 	static_assert(ARRAY_SIZE(secure_only) == 1);
177 #endif
178 	runtime_assert(IS_ENABLED(CFG_CORE_PHYS_RELOCATABLE));
179 	assert(!secure_only[0].size);
180 	assert(base && size);
181 
182 	DMSG("Physical secure memory base %#"PRIxPA" size %#zx", base, size);
183 	secure_only[0].paddr = base;
184 	secure_only[0].size = size;
185 }
186 
get_memory_map(void)187 static struct memory_map *get_memory_map(void)
188 {
189 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
190 		struct memory_map *map = virt_get_memory_map();
191 
192 		if (map)
193 			return map;
194 	}
195 
196 	return &static_memory_map;
197 }
198 
_pbuf_intersects(struct memaccess_area * a,size_t alen,paddr_t pa,size_t size)199 static bool _pbuf_intersects(struct memaccess_area *a, size_t alen,
200 			     paddr_t pa, size_t size)
201 {
202 	size_t n;
203 
204 	for (n = 0; n < alen; n++)
205 		if (core_is_buffer_intersect(pa, size, a[n].paddr, a[n].size))
206 			return true;
207 	return false;
208 }
209 
210 #define pbuf_intersects(a, pa, size) \
211 	_pbuf_intersects((a), ARRAY_SIZE(a), (pa), (size))
212 
_pbuf_is_inside(struct memaccess_area * a,size_t alen,paddr_t pa,size_t size)213 static bool _pbuf_is_inside(struct memaccess_area *a, size_t alen,
214 			    paddr_t pa, size_t size)
215 {
216 	size_t n;
217 
218 	for (n = 0; n < alen; n++)
219 		if (core_is_buffer_inside(pa, size, a[n].paddr, a[n].size))
220 			return true;
221 	return false;
222 }
223 
224 #define pbuf_is_inside(a, pa, size) \
225 	_pbuf_is_inside((a), ARRAY_SIZE(a), (pa), (size))
226 
pa_is_in_map(struct tee_mmap_region * map,paddr_t pa,size_t len)227 static bool pa_is_in_map(struct tee_mmap_region *map, paddr_t pa, size_t len)
228 {
229 	paddr_t end_pa = 0;
230 
231 	if (!map)
232 		return false;
233 
234 	if (SUB_OVERFLOW(len, 1, &end_pa) || ADD_OVERFLOW(pa, end_pa, &end_pa))
235 		return false;
236 
237 	return (pa >= map->pa && end_pa <= map->pa + map->size - 1);
238 }
239 
va_is_in_map(struct tee_mmap_region * map,vaddr_t va)240 static bool va_is_in_map(struct tee_mmap_region *map, vaddr_t va)
241 {
242 	if (!map)
243 		return false;
244 	return (va >= map->va && va <= (map->va + map->size - 1));
245 }
246 
247 /* check if target buffer fits in a core default map area */
pbuf_inside_map_area(unsigned long p,size_t l,struct tee_mmap_region * map)248 static bool pbuf_inside_map_area(unsigned long p, size_t l,
249 				 struct tee_mmap_region *map)
250 {
251 	return core_is_buffer_inside(p, l, map->pa, map->size);
252 }
253 
core_mmu_for_each_map(void * ptr,TEE_Result (* fn)(struct tee_mmap_region * map,void * ptr))254 TEE_Result core_mmu_for_each_map(void *ptr,
255 				 TEE_Result (*fn)(struct tee_mmap_region *map,
256 						  void *ptr))
257 {
258 	struct memory_map *mem_map = get_memory_map();
259 	TEE_Result res = TEE_SUCCESS;
260 	size_t n = 0;
261 
262 	for (n = 0; n < mem_map->count; n++) {
263 		res = fn(mem_map->map + n, ptr);
264 		if (res)
265 			return res;
266 	}
267 
268 	return TEE_SUCCESS;
269 }
270 
find_map_by_type(enum teecore_memtypes type)271 static struct tee_mmap_region *find_map_by_type(enum teecore_memtypes type)
272 {
273 	struct memory_map *mem_map = get_memory_map();
274 	size_t n = 0;
275 
276 	for (n = 0; n < mem_map->count; n++) {
277 		if (mem_map->map[n].type == type)
278 			return mem_map->map + n;
279 	}
280 	return NULL;
281 }
282 
283 static struct tee_mmap_region *
find_map_by_type_and_pa(enum teecore_memtypes type,paddr_t pa,size_t len)284 find_map_by_type_and_pa(enum teecore_memtypes type, paddr_t pa, size_t len)
285 {
286 	struct memory_map *mem_map = get_memory_map();
287 	size_t n = 0;
288 
289 	for (n = 0; n < mem_map->count; n++) {
290 		if (mem_map->map[n].type != type)
291 			continue;
292 		if (pa_is_in_map(mem_map->map + n, pa, len))
293 			return mem_map->map + n;
294 	}
295 	return NULL;
296 }
297 
find_map_by_va(void * va)298 static struct tee_mmap_region *find_map_by_va(void *va)
299 {
300 	struct memory_map *mem_map = get_memory_map();
301 	vaddr_t a = (vaddr_t)va;
302 	size_t n = 0;
303 
304 	for (n = 0; n < mem_map->count; n++) {
305 		if (a >= mem_map->map[n].va &&
306 		    a <= (mem_map->map[n].va - 1 + mem_map->map[n].size))
307 			return mem_map->map + n;
308 	}
309 
310 	return NULL;
311 }
312 
find_map_by_pa(unsigned long pa)313 static struct tee_mmap_region *find_map_by_pa(unsigned long pa)
314 {
315 	struct memory_map *mem_map = get_memory_map();
316 	size_t n = 0;
317 
318 	for (n = 0; n < mem_map->count; n++) {
319 		/* Skip unmapped regions */
320 		if ((mem_map->map[n].attr & TEE_MATTR_VALID_BLOCK) &&
321 		    pa >= mem_map->map[n].pa &&
322 		    pa <= (mem_map->map[n].pa - 1 + mem_map->map[n].size))
323 			return mem_map->map + n;
324 	}
325 
326 	return NULL;
327 }
328 
329 #if defined(CFG_SECURE_DATA_PATH)
dtb_get_sdp_region(void)330 static bool dtb_get_sdp_region(void)
331 {
332 	void *fdt = NULL;
333 	int node = 0;
334 	int tmp_node = 0;
335 	paddr_t tmp_addr = 0;
336 	size_t tmp_size = 0;
337 
338 	if (!IS_ENABLED(CFG_EMBED_DTB))
339 		return false;
340 
341 	fdt = get_embedded_dt();
342 	if (!fdt)
343 		panic("No DTB found");
344 
345 	node = fdt_node_offset_by_compatible(fdt, -1, tz_sdp_match);
346 	if (node < 0) {
347 		DMSG("No %s compatible node found", tz_sdp_match);
348 		return false;
349 	}
350 	tmp_node = node;
351 	while (tmp_node >= 0) {
352 		tmp_node = fdt_node_offset_by_compatible(fdt, tmp_node,
353 							 tz_sdp_match);
354 		if (tmp_node >= 0)
355 			DMSG("Ignore SDP pool node %s, supports only 1 node",
356 			     fdt_get_name(fdt, tmp_node, NULL));
357 	}
358 
359 	if (fdt_reg_info(fdt, node, &tmp_addr, &tmp_size)) {
360 		EMSG("%s: Unable to get base addr or size from DT",
361 		     tz_sdp_match);
362 		return false;
363 	}
364 
365 	sec_sdp.paddr = tmp_addr;
366 	sec_sdp.size = tmp_size;
367 
368 	return true;
369 }
370 #endif
371 
372 #if defined(CFG_CORE_DYN_SHM) || defined(CFG_SECURE_DATA_PATH)
pbuf_is_special_mem(paddr_t pbuf,size_t len,const struct core_mmu_phys_mem * start,const struct core_mmu_phys_mem * end)373 static bool pbuf_is_special_mem(paddr_t pbuf, size_t len,
374 				const struct core_mmu_phys_mem *start,
375 				const struct core_mmu_phys_mem *end)
376 {
377 	const struct core_mmu_phys_mem *mem;
378 
379 	for (mem = start; mem < end; mem++) {
380 		if (core_is_buffer_inside(pbuf, len, mem->addr, mem->size))
381 			return true;
382 	}
383 
384 	return false;
385 }
386 #endif
387 
388 #ifdef CFG_CORE_DYN_SHM
carve_out_phys_mem(struct core_mmu_phys_mem ** mem,size_t * nelems,paddr_t pa,size_t size)389 static void carve_out_phys_mem(struct core_mmu_phys_mem **mem, size_t *nelems,
390 			       paddr_t pa, size_t size)
391 {
392 	struct core_mmu_phys_mem *m = *mem;
393 	size_t n = 0;
394 
395 	while (n < *nelems) {
396 		if (!core_is_buffer_intersect(pa, size, m[n].addr, m[n].size)) {
397 			n++;
398 			continue;
399 		}
400 
401 		if (core_is_buffer_inside(m[n].addr, m[n].size, pa, size)) {
402 			/* m[n] is completely covered by pa:size */
403 			rem_array_elem(m, *nelems, sizeof(*m), n);
404 			(*nelems)--;
405 			m = nex_realloc(m, sizeof(*m) * *nelems);
406 			if (!m)
407 				panic();
408 			*mem = m;
409 			continue;
410 		}
411 
412 		if (pa > m[n].addr &&
413 		    pa + size - 1 < m[n].addr + m[n].size - 1) {
414 			/*
415 			 * pa:size is strictly inside m[n] range so split
416 			 * m[n] entry.
417 			 */
418 			m = nex_realloc(m, sizeof(*m) * (*nelems + 1));
419 			if (!m)
420 				panic();
421 			*mem = m;
422 			(*nelems)++;
423 			ins_array_elem(m, *nelems, sizeof(*m), n + 1, NULL);
424 			m[n + 1].addr = pa + size;
425 			m[n + 1].size = m[n].addr + m[n].size - pa - size;
426 			m[n].size = pa - m[n].addr;
427 			n++;
428 		} else if (pa <= m[n].addr) {
429 			/*
430 			 * pa:size is overlapping (possibly partially) at the
431 			 * beginning of m[n].
432 			 */
433 			m[n].size = m[n].addr + m[n].size - pa - size;
434 			m[n].addr = pa + size;
435 		} else {
436 			/*
437 			 * pa:size is overlapping (possibly partially) at
438 			 * the end of m[n].
439 			 */
440 			m[n].size = pa - m[n].addr;
441 		}
442 		n++;
443 	}
444 }
445 
check_phys_mem_is_outside(struct core_mmu_phys_mem * start,size_t nelems,struct tee_mmap_region * map)446 static void check_phys_mem_is_outside(struct core_mmu_phys_mem *start,
447 				      size_t nelems,
448 				      struct tee_mmap_region *map)
449 {
450 	size_t n;
451 
452 	for (n = 0; n < nelems; n++) {
453 		if (!core_is_buffer_outside(start[n].addr, start[n].size,
454 					    map->pa, map->size)) {
455 			EMSG("Non-sec mem (%#" PRIxPA ":%#" PRIxPASZ
456 			     ") overlaps map (type %d %#" PRIxPA ":%#zx)",
457 			     start[n].addr, start[n].size,
458 			     map->type, map->pa, map->size);
459 			panic();
460 		}
461 	}
462 }
463 
464 static const struct core_mmu_phys_mem *discovered_nsec_ddr_start __nex_bss;
465 static size_t discovered_nsec_ddr_nelems __nex_bss;
466 
cmp_pmem_by_addr(const void * a,const void * b)467 static int cmp_pmem_by_addr(const void *a, const void *b)
468 {
469 	const struct core_mmu_phys_mem *pmem_a = a;
470 	const struct core_mmu_phys_mem *pmem_b = b;
471 
472 	return CMP_TRILEAN(pmem_a->addr, pmem_b->addr);
473 }
474 
core_mmu_set_discovered_nsec_ddr(struct core_mmu_phys_mem * start,size_t nelems)475 void core_mmu_set_discovered_nsec_ddr(struct core_mmu_phys_mem *start,
476 				      size_t nelems)
477 {
478 	struct core_mmu_phys_mem *m = start;
479 	size_t num_elems = nelems;
480 	struct memory_map *mem_map = &static_memory_map;
481 	const struct core_mmu_phys_mem __maybe_unused *pmem;
482 	size_t n = 0;
483 
484 	assert(!discovered_nsec_ddr_start);
485 	assert(m && num_elems);
486 
487 	qsort(m, num_elems, sizeof(*m), cmp_pmem_by_addr);
488 
489 	/*
490 	 * Non-secure shared memory and also secure data
491 	 * path memory are supposed to reside inside
492 	 * non-secure memory. Since NSEC_SHM and SDP_MEM
493 	 * are used for a specific purpose make holes for
494 	 * those memory in the normal non-secure memory.
495 	 *
496 	 * This has to be done since for instance QEMU
497 	 * isn't aware of which memory range in the
498 	 * non-secure memory is used for NSEC_SHM.
499 	 */
500 
501 #ifdef CFG_SECURE_DATA_PATH
502 	if (dtb_get_sdp_region())
503 		carve_out_phys_mem(&m, &num_elems, sec_sdp.paddr, sec_sdp.size);
504 
505 	for (pmem = phys_sdp_mem_begin; pmem < phys_sdp_mem_end; pmem++)
506 		carve_out_phys_mem(&m, &num_elems, pmem->addr, pmem->size);
507 #endif
508 
509 	for (n = 0; n < ARRAY_SIZE(secure_only); n++)
510 		carve_out_phys_mem(&m, &num_elems, secure_only[n].paddr,
511 				   secure_only[n].size);
512 
513 	for  (n = 0; n < mem_map->count; n++) {
514 		switch (mem_map->map[n].type) {
515 		case MEM_AREA_NSEC_SHM:
516 			carve_out_phys_mem(&m, &num_elems, mem_map->map[n].pa,
517 					   mem_map->map[n].size);
518 			break;
519 		case MEM_AREA_EXT_DT:
520 		case MEM_AREA_MANIFEST_DT:
521 		case MEM_AREA_RAM_NSEC:
522 		case MEM_AREA_RES_VASPACE:
523 		case MEM_AREA_SHM_VASPACE:
524 		case MEM_AREA_TS_VASPACE:
525 		case MEM_AREA_PAGER_VASPACE:
526 		case MEM_AREA_NEX_DYN_VASPACE:
527 		case MEM_AREA_TEE_DYN_VASPACE:
528 			break;
529 		default:
530 			check_phys_mem_is_outside(m, num_elems,
531 						  mem_map->map + n);
532 		}
533 	}
534 
535 	discovered_nsec_ddr_start = m;
536 	discovered_nsec_ddr_nelems = num_elems;
537 
538 	DMSG("Non-secure RAM:");
539 	for (n = 0; n < num_elems; n++)
540 		DMSG("%zu: pa %#"PRIxPA"..%#"PRIxPA" sz %#"PRIxPASZ,
541 		     n, m[n].addr, m[n].addr + m[n].size - 1, m[n].size);
542 
543 	if (!core_mmu_check_end_pa(m[num_elems - 1].addr,
544 				   m[num_elems - 1].size))
545 		panic();
546 }
547 
get_discovered_nsec_ddr(const struct core_mmu_phys_mem ** start,const struct core_mmu_phys_mem ** end)548 static bool get_discovered_nsec_ddr(const struct core_mmu_phys_mem **start,
549 				    const struct core_mmu_phys_mem **end)
550 {
551 	if (!discovered_nsec_ddr_start)
552 		return false;
553 
554 	*start = discovered_nsec_ddr_start;
555 	*end = discovered_nsec_ddr_start + discovered_nsec_ddr_nelems;
556 
557 	return true;
558 }
559 
pbuf_is_nsec_ddr(paddr_t pbuf,size_t len)560 static bool pbuf_is_nsec_ddr(paddr_t pbuf, size_t len)
561 {
562 	const struct core_mmu_phys_mem *start;
563 	const struct core_mmu_phys_mem *end;
564 
565 	if (!get_discovered_nsec_ddr(&start, &end))
566 		return false;
567 
568 	return pbuf_is_special_mem(pbuf, len, start, end);
569 }
570 
core_mmu_nsec_ddr_is_defined(void)571 bool core_mmu_nsec_ddr_is_defined(void)
572 {
573 	const struct core_mmu_phys_mem *start;
574 	const struct core_mmu_phys_mem *end;
575 
576 	if (!get_discovered_nsec_ddr(&start, &end))
577 		return false;
578 
579 	return start != end;
580 }
581 
582 TEE_Result
core_mmu_for_each_nsec_ddr(void * ptr,TEE_Result (* fn)(const struct core_mmu_phys_mem * m,void * ptr))583 core_mmu_for_each_nsec_ddr(void *ptr,
584 			   TEE_Result (*fn)(const struct core_mmu_phys_mem *m,
585 					    void *ptr))
586 {
587 	const struct core_mmu_phys_mem *start = NULL;
588 	const struct core_mmu_phys_mem *end = NULL;
589 	const struct core_mmu_phys_mem *mem = NULL;
590 	TEE_Result res = TEE_ERROR_GENERIC;
591 
592 	if (!get_discovered_nsec_ddr(&start, &end))
593 		return TEE_ERROR_GENERIC;
594 
595 	for (mem = start; mem < end; mem++) {
596 		res = fn(mem, ptr);
597 		if (res)
598 			return res;
599 	}
600 
601 	return TEE_SUCCESS;
602 }
603 #else
pbuf_is_nsec_ddr(paddr_t pbuf __unused,size_t len __unused)604 static bool pbuf_is_nsec_ddr(paddr_t pbuf __unused, size_t len __unused)
605 {
606 	return false;
607 }
608 #endif /*CFG_CORE_DYN_SHM*/
609 
610 #define MSG_MEM_INSTERSECT(pa1, sz1, pa2, sz2) \
611 	EMSG("[%" PRIxPA " %" PRIx64 "] intersects [%" PRIxPA " %" PRIx64 "]", \
612 			pa1, (uint64_t)pa1 + (sz1), pa2, (uint64_t)pa2 + (sz2))
613 
614 #ifdef CFG_SECURE_DATA_PATH
pbuf_is_sdp_mem(paddr_t pbuf,size_t len)615 static bool pbuf_is_sdp_mem(paddr_t pbuf, size_t len)
616 {
617 	bool is_sdp_mem = false;
618 
619 	if (sec_sdp.size)
620 		is_sdp_mem = core_is_buffer_inside(pbuf, len, sec_sdp.paddr,
621 						   sec_sdp.size);
622 
623 	if (!is_sdp_mem)
624 		is_sdp_mem = pbuf_is_special_mem(pbuf, len, phys_sdp_mem_begin,
625 						 phys_sdp_mem_end);
626 
627 	if (!is_sdp_mem) {
628 		struct mobj *m = mobj_protmem_get_by_pa(pbuf, len);
629 
630 		if (!m)
631 			m = mobj_ffa_protmem_get_by_pa(pbuf, len);
632 		if (m) {
633 			mobj_put(m);
634 			is_sdp_mem = true;
635 		}
636 	}
637 
638 	return is_sdp_mem;
639 }
640 
core_sdp_mem_alloc_mobj(paddr_t pa,size_t size)641 static struct mobj *core_sdp_mem_alloc_mobj(paddr_t pa, size_t size)
642 {
643 	struct mobj *mobj = mobj_phys_alloc(pa, size, TEE_MATTR_MEM_TYPE_CACHED,
644 					    CORE_MEM_SDP_MEM);
645 
646 	if (!mobj)
647 		panic("can't create SDP physical memory object");
648 
649 	return mobj;
650 }
651 
core_sdp_mem_create_mobjs(void)652 struct mobj **core_sdp_mem_create_mobjs(void)
653 {
654 	const struct core_mmu_phys_mem *mem = NULL;
655 	struct mobj **mobj_base = NULL;
656 	struct mobj **mobj = NULL;
657 	int cnt = phys_sdp_mem_end - phys_sdp_mem_begin;
658 
659 	if (sec_sdp.size)
660 		cnt++;
661 
662 	/* SDP mobjs table must end with a NULL entry */
663 	mobj_base = calloc(cnt + 1, sizeof(struct mobj *));
664 	if (!mobj_base)
665 		panic("Out of memory");
666 
667 	mobj = mobj_base;
668 
669 	for (mem = phys_sdp_mem_begin; mem < phys_sdp_mem_end; mem++, mobj++)
670 		*mobj = core_sdp_mem_alloc_mobj(mem->addr, mem->size);
671 
672 	if (sec_sdp.size)
673 		*mobj = core_sdp_mem_alloc_mobj(sec_sdp.paddr, sec_sdp.size);
674 
675 	return mobj_base;
676 }
677 
678 #else /* CFG_SECURE_DATA_PATH */
pbuf_is_sdp_mem(paddr_t pbuf __unused,size_t len __unused)679 static bool pbuf_is_sdp_mem(paddr_t pbuf __unused, size_t len __unused)
680 {
681 	return false;
682 }
683 
684 #endif /* CFG_SECURE_DATA_PATH */
685 
686 /* Check special memories comply with registered memories */
verify_special_mem_areas(struct memory_map * mem_map,const struct core_mmu_phys_mem * start,const struct core_mmu_phys_mem * end,const char * area_name __maybe_unused)687 static void verify_special_mem_areas(struct memory_map *mem_map,
688 				     const struct core_mmu_phys_mem *start,
689 				     const struct core_mmu_phys_mem *end,
690 				     const char *area_name __maybe_unused)
691 {
692 	const struct core_mmu_phys_mem *mem = NULL;
693 	const struct core_mmu_phys_mem *mem2 = NULL;
694 	size_t n = 0;
695 
696 	if (start == end) {
697 		DMSG("No %s memory area defined", area_name);
698 		return;
699 	}
700 
701 	for (mem = start; mem < end; mem++)
702 		DMSG("%s memory [%" PRIxPA " %" PRIx64 "]",
703 		     area_name, mem->addr, (uint64_t)mem->addr + mem->size);
704 
705 	/* Check memories do not intersect each other */
706 	for (mem = start; mem + 1 < end; mem++) {
707 		for (mem2 = mem + 1; mem2 < end; mem2++) {
708 			if (core_is_buffer_intersect(mem2->addr, mem2->size,
709 						     mem->addr, mem->size)) {
710 				MSG_MEM_INSTERSECT(mem2->addr, mem2->size,
711 						   mem->addr, mem->size);
712 				panic("Special memory intersection");
713 			}
714 		}
715 	}
716 
717 	/*
718 	 * Check memories do not intersect any mapped memory.
719 	 * This is called before reserved VA space is loaded in mem_map.
720 	 */
721 	for (mem = start; mem < end; mem++) {
722 		for (n = 0; n < mem_map->count; n++) {
723 #ifdef TEE_SDP_TEST_MEM_BASE
724 			/*
725 			 * Ignore MEM_AREA_SEC_RAM_OVERALL since it covers
726 			 * TEE_SDP_TEST_MEM too.
727 			 */
728 			if (mem->addr == TEE_SDP_TEST_MEM_BASE &&
729 			    mem->size == TEE_SDP_TEST_MEM_SIZE &&
730 			    mem_map->map[n].type == MEM_AREA_SEC_RAM_OVERALL)
731 				continue;
732 #endif
733 			if (core_is_buffer_intersect(mem->addr, mem->size,
734 						     mem_map->map[n].pa,
735 						     mem_map->map[n].size)) {
736 				MSG_MEM_INSTERSECT(mem->addr, mem->size,
737 						   mem_map->map[n].pa,
738 						   mem_map->map[n].size);
739 				panic("Special memory intersection");
740 			}
741 		}
742 	}
743 }
744 
merge_mmaps(struct tee_mmap_region * dst,const struct tee_mmap_region * src)745 static void merge_mmaps(struct tee_mmap_region *dst,
746 			const struct tee_mmap_region *src)
747 {
748 	paddr_t end_pa = MAX(dst->pa + dst->size - 1, src->pa + src->size - 1);
749 	paddr_t pa = MIN(dst->pa, src->pa);
750 
751 	DMSG("Merging %#"PRIxPA"..%#"PRIxPA" and %#"PRIxPA"..%#"PRIxPA,
752 	     dst->pa, dst->pa + dst->size - 1, src->pa,
753 	     src->pa + src->size - 1);
754 	dst->pa = pa;
755 	dst->size = end_pa - pa + 1;
756 }
757 
mmaps_are_mergeable(const struct tee_mmap_region * r1,const struct tee_mmap_region * r2)758 static bool mmaps_are_mergeable(const struct tee_mmap_region *r1,
759 				const struct tee_mmap_region *r2)
760 {
761 	if (r1->type != r2->type)
762 		return false;
763 
764 	if (r1->pa == r2->pa)
765 		return true;
766 
767 	if (r1->pa < r2->pa)
768 		return r1->pa + r1->size >= r2->pa;
769 	else
770 		return r2->pa + r2->size >= r1->pa;
771 }
772 
add_phys_mem(struct memory_map * mem_map,const char * mem_name __maybe_unused,enum teecore_memtypes mem_type,paddr_t mem_addr,paddr_size_t mem_size)773 static void add_phys_mem(struct memory_map *mem_map,
774 			 const char *mem_name __maybe_unused,
775 			 enum teecore_memtypes mem_type,
776 			 paddr_t mem_addr, paddr_size_t mem_size)
777 {
778 	size_t n = 0;
779 	const struct tee_mmap_region m0 = {
780 		.type = mem_type,
781 		.pa = mem_addr,
782 		.size = mem_size,
783 	};
784 
785 	if (!mem_size)	/* Discard null size entries */
786 		return;
787 
788 	/*
789 	 * If some ranges of memory of the same type do overlap
790 	 * each others they are coalesced into one entry. To help this
791 	 * added entries are sorted by increasing physical.
792 	 *
793 	 * Note that it's valid to have the same physical memory as several
794 	 * different memory types, for instance the same device memory
795 	 * mapped as both secure and non-secure. This will probably not
796 	 * happen often in practice.
797 	 */
798 	DMSG("%s type %s 0x%08" PRIxPA " size 0x%08" PRIxPASZ,
799 	     mem_name, teecore_memtype_name(mem_type), mem_addr, mem_size);
800 	for  (n = 0; n < mem_map->count; n++) {
801 		if (mmaps_are_mergeable(mem_map->map + n, &m0)) {
802 			merge_mmaps(mem_map->map + n, &m0);
803 			/*
804 			 * The merged result might be mergeable with the
805 			 * next or previous entry.
806 			 */
807 			if (n + 1 < mem_map->count &&
808 			    mmaps_are_mergeable(mem_map->map + n,
809 						mem_map->map + n + 1)) {
810 				merge_mmaps(mem_map->map + n,
811 					    mem_map->map + n + 1);
812 				rem_array_elem(mem_map->map, mem_map->count,
813 					       sizeof(*mem_map->map), n + 1);
814 				mem_map->count--;
815 			}
816 			if (n > 0 && mmaps_are_mergeable(mem_map->map + n - 1,
817 							 mem_map->map + n)) {
818 				merge_mmaps(mem_map->map + n - 1,
819 					    mem_map->map + n);
820 				rem_array_elem(mem_map->map, mem_map->count,
821 					       sizeof(*mem_map->map), n);
822 				mem_map->count--;
823 			}
824 			return;
825 		}
826 		if (mem_type < mem_map->map[n].type ||
827 		    (mem_type == mem_map->map[n].type &&
828 		     mem_addr < mem_map->map[n].pa))
829 			break; /* found the spot where to insert this memory */
830 	}
831 
832 	grow_mem_map(mem_map);
833 	ins_array_elem(mem_map->map, mem_map->count, sizeof(*mem_map->map),
834 		       n, &m0);
835 }
836 
add_va_space(struct memory_map * mem_map,enum teecore_memtypes type,size_t size)837 static void add_va_space(struct memory_map *mem_map,
838 			 enum teecore_memtypes type, size_t size)
839 {
840 	size_t n = 0;
841 
842 	DMSG("type %s size 0x%08zx", teecore_memtype_name(type), size);
843 	for  (n = 0; n < mem_map->count; n++) {
844 		if (type < mem_map->map[n].type)
845 			break;
846 	}
847 
848 	grow_mem_map(mem_map);
849 	ins_array_elem(mem_map->map, mem_map->count, sizeof(*mem_map->map),
850 		       n, NULL);
851 	mem_map->map[n] = (struct tee_mmap_region){
852 		.type = type,
853 		.size = size,
854 	};
855 }
856 
core_mmu_type_to_attr(enum teecore_memtypes t)857 uint32_t core_mmu_type_to_attr(enum teecore_memtypes t)
858 {
859 	const uint32_t attr = TEE_MATTR_VALID_BLOCK;
860 	const uint32_t tagged = TEE_MATTR_MEM_TYPE_TAGGED <<
861 				TEE_MATTR_MEM_TYPE_SHIFT;
862 	const uint32_t cached = TEE_MATTR_MEM_TYPE_CACHED <<
863 				TEE_MATTR_MEM_TYPE_SHIFT;
864 	const uint32_t noncache = TEE_MATTR_MEM_TYPE_DEV <<
865 				  TEE_MATTR_MEM_TYPE_SHIFT;
866 
867 	switch (t) {
868 	case MEM_AREA_TEE_RAM:
869 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRWX | tagged;
870 	case MEM_AREA_TEE_RAM_RX:
871 	case MEM_AREA_INIT_RAM_RX:
872 	case MEM_AREA_IDENTITY_MAP_RX:
873 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRX | tagged;
874 	case MEM_AREA_TEE_RAM_RO:
875 	case MEM_AREA_INIT_RAM_RO:
876 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PR | tagged;
877 	case MEM_AREA_TEE_RAM_RW:
878 	case MEM_AREA_NEX_RAM_RO: /* This has to be r/w during init runtime */
879 	case MEM_AREA_NEX_RAM_RW:
880 	case MEM_AREA_NEX_DYN_VASPACE:
881 	case MEM_AREA_TEE_DYN_VASPACE:
882 	case MEM_AREA_TEE_ASAN:
883 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | tagged;
884 	case MEM_AREA_TEE_COHERENT:
885 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRWX | noncache;
886 	case MEM_AREA_NSEC_SHM:
887 	case MEM_AREA_NEX_NSEC_SHM:
888 		return attr | TEE_MATTR_PRW | cached;
889 	case MEM_AREA_MANIFEST_DT:
890 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PR | cached;
891 	case MEM_AREA_TRANSFER_LIST:
892 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | cached;
893 	case MEM_AREA_EXT_DT:
894 		/*
895 		 * If CFG_MAP_EXT_DT_SECURE is enabled map the external device
896 		 * tree as secure non-cached memory, otherwise, fall back to
897 		 * non-secure mapping.
898 		 */
899 		if (IS_ENABLED(CFG_MAP_EXT_DT_SECURE))
900 			return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW |
901 			       noncache;
902 		fallthrough;
903 	case MEM_AREA_IO_NSEC:
904 		return attr | TEE_MATTR_PRW | noncache;
905 	case MEM_AREA_IO_SEC:
906 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | noncache;
907 	case MEM_AREA_RAM_NSEC:
908 		return attr | TEE_MATTR_PRW | cached;
909 	case MEM_AREA_RAM_SEC:
910 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | cached;
911 	case MEM_AREA_SEC_RAM_OVERALL:
912 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | tagged;
913 	case MEM_AREA_ROM_SEC:
914 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PR | cached;
915 	case MEM_AREA_RES_VASPACE:
916 	case MEM_AREA_SHM_VASPACE:
917 		return 0;
918 	case MEM_AREA_PAGER_VASPACE:
919 		return TEE_MATTR_SECURE;
920 	default:
921 		panic("invalid type");
922 	}
923 }
924 
map_is_tee_ram(const struct tee_mmap_region * mm)925 static bool __maybe_unused map_is_tee_ram(const struct tee_mmap_region *mm)
926 {
927 	switch (mm->type) {
928 	case MEM_AREA_TEE_RAM:
929 	case MEM_AREA_TEE_RAM_RX:
930 	case MEM_AREA_TEE_RAM_RO:
931 	case MEM_AREA_TEE_RAM_RW:
932 	case MEM_AREA_INIT_RAM_RX:
933 	case MEM_AREA_INIT_RAM_RO:
934 	case MEM_AREA_NEX_RAM_RW:
935 	case MEM_AREA_NEX_RAM_RO:
936 	case MEM_AREA_TEE_ASAN:
937 		return true;
938 	default:
939 		return false;
940 	}
941 }
942 
map_is_secure(const struct tee_mmap_region * mm)943 static bool __maybe_unused map_is_secure(const struct tee_mmap_region *mm)
944 {
945 	return !!(core_mmu_type_to_attr(mm->type) & TEE_MATTR_SECURE);
946 }
947 
map_is_pgdir(const struct tee_mmap_region * mm)948 static bool __maybe_unused map_is_pgdir(const struct tee_mmap_region *mm)
949 {
950 	return mm->region_size == CORE_MMU_PGDIR_SIZE;
951 }
952 
cmp_mmap_by_lower_va(const void * a,const void * b)953 static int cmp_mmap_by_lower_va(const void *a, const void *b)
954 {
955 	const struct tee_mmap_region *mm_a = a;
956 	const struct tee_mmap_region *mm_b = b;
957 
958 	return CMP_TRILEAN(mm_a->va, mm_b->va);
959 }
960 
dump_mmap_table(struct memory_map * mem_map)961 static void dump_mmap_table(struct memory_map *mem_map)
962 {
963 	size_t n = 0;
964 
965 	for (n = 0; n < mem_map->count; n++) {
966 		struct tee_mmap_region *map __maybe_unused = mem_map->map + n;
967 
968 		DMSG("type %-12s va 0x%08" PRIxVA "..0x%08" PRIxVA
969 		     " pa 0x%08" PRIxPA "..0x%08" PRIxPA " size 0x%08zx (%s)",
970 		     teecore_memtype_name(map->type), map->va,
971 		     map->va + map->size - 1, map->pa,
972 		     (paddr_t)(map->pa + map->size - 1), map->size,
973 		     map->region_size == SMALL_PAGE_SIZE ? "smallpg" : "pgdir");
974 	}
975 }
976 
977 #if DEBUG_XLAT_TABLE
978 
dump_xlat_table(vaddr_t va,unsigned int level)979 static void dump_xlat_table(vaddr_t va, unsigned int level)
980 {
981 	struct core_mmu_table_info tbl_info;
982 	unsigned int idx = 0;
983 	paddr_t pa;
984 	uint32_t attr;
985 
986 	core_mmu_find_table(NULL, va, level, &tbl_info);
987 	va = tbl_info.va_base;
988 	for (idx = 0; idx < tbl_info.num_entries; idx++) {
989 		core_mmu_get_entry(&tbl_info, idx, &pa, &attr);
990 		if (attr || level > CORE_MMU_BASE_TABLE_LEVEL) {
991 			const char *security_bit = "";
992 
993 			if (core_mmu_entry_have_security_bit(attr)) {
994 				if (attr & TEE_MATTR_SECURE)
995 					security_bit = "S";
996 				else
997 					security_bit = "NS";
998 			}
999 
1000 			if (attr & TEE_MATTR_TABLE) {
1001 				DMSG_RAW("%*s [LVL%d] VA:0x%010" PRIxVA
1002 					" TBL:0x%010" PRIxPA " %s",
1003 					level * 2, "", level, va, pa,
1004 					security_bit);
1005 				dump_xlat_table(va, level + 1);
1006 			} else if (attr) {
1007 				DMSG_RAW("%*s [LVL%d] VA:0x%010" PRIxVA
1008 					" PA:0x%010" PRIxPA " %s-%s-%s-%s",
1009 					level * 2, "", level, va, pa,
1010 					mattr_is_cached(attr) ? "MEM" :
1011 					"DEV",
1012 					attr & TEE_MATTR_PW ? "RW" : "RO",
1013 					attr & TEE_MATTR_PX ? "X " : "XN",
1014 					security_bit);
1015 			} else {
1016 				DMSG_RAW("%*s [LVL%d] VA:0x%010" PRIxVA
1017 					    " INVALID\n",
1018 					    level * 2, "", level, va);
1019 			}
1020 		}
1021 		va += BIT64(tbl_info.shift);
1022 	}
1023 }
1024 
1025 #else
1026 
dump_xlat_table(vaddr_t va __unused,int level __unused)1027 static void dump_xlat_table(vaddr_t va __unused, int level __unused)
1028 {
1029 }
1030 
1031 #endif
1032 
1033 /*
1034  * Reserves virtual memory space for pager usage.
1035  *
1036  * From the start of the first memory used by the link script +
1037  * TEE_RAM_VA_SIZE should be covered, either with a direct mapping or empty
1038  * mapping for pager usage. This adds translation tables as needed for the
1039  * pager to operate.
1040  */
add_pager_vaspace(struct memory_map * mem_map)1041 static void add_pager_vaspace(struct memory_map *mem_map)
1042 {
1043 	paddr_t begin = 0;
1044 	paddr_t end = 0;
1045 	size_t size = 0;
1046 	size_t pos = 0;
1047 	size_t n = 0;
1048 
1049 
1050 	for (n = 0; n < mem_map->count; n++) {
1051 		if (map_is_tee_ram(mem_map->map + n)) {
1052 			if (!begin)
1053 				begin = mem_map->map[n].pa;
1054 			pos = n + 1;
1055 		}
1056 	}
1057 
1058 	end = mem_map->map[pos - 1].pa + mem_map->map[pos - 1].size;
1059 	assert(end - begin < TEE_RAM_VA_SIZE);
1060 	size = TEE_RAM_VA_SIZE - (end - begin);
1061 
1062 	grow_mem_map(mem_map);
1063 	ins_array_elem(mem_map->map, mem_map->count, sizeof(*mem_map->map),
1064 		       n, NULL);
1065 	mem_map->map[n] = (struct tee_mmap_region){
1066 		.type = MEM_AREA_PAGER_VASPACE,
1067 		.size = size,
1068 		.region_size = SMALL_PAGE_SIZE,
1069 		.attr = core_mmu_type_to_attr(MEM_AREA_PAGER_VASPACE),
1070 	};
1071 }
1072 
check_sec_nsec_mem_config(void)1073 static void check_sec_nsec_mem_config(void)
1074 {
1075 	size_t n = 0;
1076 
1077 	for (n = 0; n < ARRAY_SIZE(secure_only); n++) {
1078 		if (pbuf_intersects(nsec_shared, secure_only[n].paddr,
1079 				    secure_only[n].size))
1080 			panic("Invalid memory access config: sec/nsec");
1081 	}
1082 }
1083 
collect_device_mem_ranges(struct memory_map * mem_map)1084 static void collect_device_mem_ranges(struct memory_map *mem_map)
1085 {
1086 	const char *compatible = "arm,ffa-manifest-device-regions";
1087 	void *fdt = get_manifest_dt();
1088 	const char *name = NULL;
1089 	uint64_t page_count = 0;
1090 	uint64_t base = 0;
1091 	int subnode = 0;
1092 	int node = 0;
1093 
1094 	assert(fdt);
1095 
1096 	node = fdt_node_offset_by_compatible(fdt, 0, compatible);
1097 	if (node < 0)
1098 		return;
1099 
1100 	fdt_for_each_subnode(subnode, fdt, node) {
1101 		name = fdt_get_name(fdt, subnode, NULL);
1102 		if (!name)
1103 			continue;
1104 
1105 		if (dt_getprop_as_number(fdt, subnode, "base-address",
1106 					 &base)) {
1107 			EMSG("Mandatory field is missing: base-address");
1108 			continue;
1109 		}
1110 
1111 		if (base & SMALL_PAGE_MASK) {
1112 			EMSG("base-address is not page aligned");
1113 			continue;
1114 		}
1115 
1116 		if (dt_getprop_as_number(fdt, subnode, "pages-count",
1117 					 &page_count)) {
1118 			EMSG("Mandatory field is missing: pages-count");
1119 			continue;
1120 		}
1121 
1122 		add_phys_mem(mem_map, name, MEM_AREA_IO_SEC,
1123 			     base, page_count * SMALL_PAGE_SIZE);
1124 	}
1125 }
1126 
collect_mem_ranges(struct memory_map * mem_map)1127 static void collect_mem_ranges(struct memory_map *mem_map)
1128 {
1129 	const struct core_mmu_phys_mem *mem = NULL;
1130 	vaddr_t ram_start = secure_only[0].paddr;
1131 	size_t n = 0;
1132 
1133 #define ADD_PHYS_MEM(_type, _addr, _size) \
1134 		add_phys_mem(mem_map, #_addr, (_type), (_addr), (_size))
1135 
1136 	if (IS_ENABLED(CFG_CORE_RWDATA_NOEXEC)) {
1137 		paddr_t next_pa = 0;
1138 
1139 		/*
1140 		 * Read-only and read-execute physical memory areas must
1141 		 * not be mapped by MEM_AREA_SEC_RAM_OVERALL, but all the
1142 		 * read/write should.
1143 		 */
1144 		ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, ram_start,
1145 			     VCORE_UNPG_RX_PA - ram_start);
1146 		assert(VCORE_UNPG_RX_PA >= ram_start);
1147 		tee_ram_initial_offs = VCORE_UNPG_RX_PA - ram_start;
1148 		DMSG("tee_ram_initial_offs %#zx", tee_ram_initial_offs);
1149 		ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RX, VCORE_UNPG_RX_PA,
1150 			     VCORE_UNPG_RX_SZ);
1151 		ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RO, VCORE_UNPG_RO_PA,
1152 			     VCORE_UNPG_RO_SZ);
1153 
1154 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1155 			ADD_PHYS_MEM(MEM_AREA_NEX_RAM_RO, VCORE_UNPG_RW_PA,
1156 				     VCORE_UNPG_RW_SZ);
1157 			ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, VCORE_UNPG_RW_PA,
1158 				     VCORE_UNPG_RW_SZ);
1159 
1160 			ADD_PHYS_MEM(MEM_AREA_NEX_RAM_RW, VCORE_NEX_RW_PA,
1161 				     VCORE_NEX_RW_SZ);
1162 			ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, VCORE_NEX_RW_PA,
1163 				     VCORE_NEX_RW_SZ);
1164 
1165 			ADD_PHYS_MEM(MEM_AREA_NEX_RAM_RW, VCORE_FREE_PA,
1166 				     VCORE_FREE_SZ);
1167 			ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, VCORE_FREE_PA,
1168 				     VCORE_FREE_SZ);
1169 			next_pa = VCORE_FREE_PA + VCORE_FREE_SZ;
1170 		} else {
1171 			ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RW, VCORE_UNPG_RW_PA,
1172 				     VCORE_UNPG_RW_SZ);
1173 			ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, VCORE_UNPG_RW_PA,
1174 				     VCORE_UNPG_RW_SZ);
1175 
1176 			ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RW, VCORE_FREE_PA,
1177 				     VCORE_FREE_SZ);
1178 			ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, VCORE_FREE_PA,
1179 				     VCORE_FREE_SZ);
1180 			next_pa = VCORE_FREE_PA + VCORE_FREE_SZ;
1181 		}
1182 
1183 		if (IS_ENABLED(CFG_WITH_PAGER)) {
1184 			paddr_t pa = 0;
1185 			size_t sz = 0;
1186 
1187 			ADD_PHYS_MEM(MEM_AREA_INIT_RAM_RX, VCORE_INIT_RX_PA,
1188 				     VCORE_INIT_RX_SZ);
1189 			ADD_PHYS_MEM(MEM_AREA_INIT_RAM_RO, VCORE_INIT_RO_PA,
1190 				     VCORE_INIT_RO_SZ);
1191 			/*
1192 			 * Core init mapping shall cover up to end of the
1193 			 * physical RAM.  This is required since the hash
1194 			 * table is appended to the binary data after the
1195 			 * firmware build sequence.
1196 			 */
1197 			pa = VCORE_INIT_RO_PA + VCORE_INIT_RO_SZ;
1198 			sz = TEE_RAM_START + TEE_RAM_PH_SIZE - pa;
1199 			ADD_PHYS_MEM(MEM_AREA_TEE_RAM, pa, sz);
1200 		} else {
1201 			ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, next_pa,
1202 				     secure_only[0].paddr +
1203 				     secure_only[0].size - next_pa);
1204 		}
1205 	} else {
1206 		ADD_PHYS_MEM(MEM_AREA_TEE_RAM, TEE_RAM_START, TEE_RAM_PH_SIZE);
1207 		ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, secure_only[n].paddr,
1208 			     secure_only[0].size);
1209 	}
1210 
1211 	for (n = 1; n < ARRAY_SIZE(secure_only); n++)
1212 		ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, secure_only[n].paddr,
1213 			     secure_only[n].size);
1214 
1215 	if (IS_ENABLED(CFG_CORE_SANITIZE_KADDRESS))
1216 		ADD_PHYS_MEM(MEM_AREA_TEE_ASAN, ASAN_MAP_PA, ASAN_MAP_SZ);
1217 
1218 #undef ADD_PHYS_MEM
1219 
1220 	/* Collect device memory info from SP manifest */
1221 	if (IS_ENABLED(CFG_CORE_SEL2_SPMC))
1222 		collect_device_mem_ranges(mem_map);
1223 
1224 	for (mem = phys_mem_map_begin; mem < phys_mem_map_end; mem++) {
1225 		/* Only unmapped virtual range may have a null phys addr */
1226 		assert(mem->addr || !core_mmu_type_to_attr(mem->type));
1227 
1228 		add_phys_mem(mem_map, mem->name, mem->type,
1229 			     mem->addr, mem->size);
1230 	}
1231 
1232 	if (IS_ENABLED(CFG_SECURE_DATA_PATH))
1233 		verify_special_mem_areas(mem_map, phys_sdp_mem_begin,
1234 					 phys_sdp_mem_end, "SDP");
1235 
1236 	add_va_space(mem_map, MEM_AREA_RES_VASPACE, CFG_RESERVED_VASPACE_SIZE);
1237 	add_va_space(mem_map, MEM_AREA_SHM_VASPACE, SHM_VASPACE_SIZE);
1238 	if (IS_ENABLED(CFG_DYN_CONFIG)) {
1239 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
1240 			add_va_space(mem_map, MEM_AREA_NEX_DYN_VASPACE,
1241 				     ROUNDUP(CFG_NEX_DYN_VASPACE_SIZE,
1242 					     CORE_MMU_PGDIR_SIZE));
1243 		add_va_space(mem_map, MEM_AREA_TEE_DYN_VASPACE,
1244 			     CFG_TEE_DYN_VASPACE_SIZE);
1245 	}
1246 }
1247 
assign_mem_granularity(struct memory_map * mem_map)1248 static void assign_mem_granularity(struct memory_map *mem_map)
1249 {
1250 	size_t n = 0;
1251 
1252 	/*
1253 	 * Assign region sizes, note that MEM_AREA_TEE_RAM always uses
1254 	 * SMALL_PAGE_SIZE.
1255 	 */
1256 	for  (n = 0; n < mem_map->count; n++) {
1257 		paddr_t mask = mem_map->map[n].pa | mem_map->map[n].size;
1258 
1259 		if (mask & SMALL_PAGE_MASK)
1260 			panic("Impossible memory alignment");
1261 
1262 		if (map_is_tee_ram(mem_map->map + n))
1263 			mem_map->map[n].region_size = SMALL_PAGE_SIZE;
1264 		else
1265 			mem_map->map[n].region_size = CORE_MMU_PGDIR_SIZE;
1266 	}
1267 }
1268 
place_tee_ram_at_top(paddr_t paddr)1269 static bool place_tee_ram_at_top(paddr_t paddr)
1270 {
1271 	return paddr > BIT64(core_mmu_get_va_width()) / 2;
1272 }
1273 
1274 /*
1275  * MMU arch driver shall override this function if it helps
1276  * optimizing the memory footprint of the address translation tables.
1277  */
core_mmu_prefer_tee_ram_at_top(paddr_t paddr)1278 bool __weak core_mmu_prefer_tee_ram_at_top(paddr_t paddr)
1279 {
1280 	return place_tee_ram_at_top(paddr);
1281 }
1282 
assign_mem_va_dir(vaddr_t tee_ram_va,struct memory_map * mem_map,bool tee_ram_at_top)1283 static bool assign_mem_va_dir(vaddr_t tee_ram_va, struct memory_map *mem_map,
1284 			      bool tee_ram_at_top)
1285 {
1286 	struct tee_mmap_region *map = NULL;
1287 	bool va_is_nex_shared = false;
1288 	bool va_is_secure = true;
1289 	vaddr_t va = 0;
1290 	size_t n = 0;
1291 
1292 	/*
1293 	 * tee_ram_va might equals 0 when CFG_CORE_ASLR=y.
1294 	 * 0 is by design an invalid va, so return false directly.
1295 	 */
1296 	if (!tee_ram_va)
1297 		return false;
1298 
1299 	/* Clear eventual previous assignments */
1300 	for (n = 0; n < mem_map->count; n++)
1301 		mem_map->map[n].va = 0;
1302 
1303 	/*
1304 	 * TEE RAM regions are always aligned with region_size.
1305 	 *
1306 	 * Note that MEM_AREA_PAGER_VASPACE also counts as TEE RAM here
1307 	 * since it handles virtual memory which covers the part of the ELF
1308 	 * that cannot fit directly into memory.
1309 	 */
1310 	va = tee_ram_va + tee_ram_initial_offs;
1311 	for (n = 0; n < mem_map->count; n++) {
1312 		map = mem_map->map + n;
1313 		if (map_is_tee_ram(map) ||
1314 		    map->type == MEM_AREA_PAGER_VASPACE) {
1315 			assert(!(va & (map->region_size - 1)));
1316 			assert(!(map->size & (map->region_size - 1)));
1317 			map->va = va;
1318 			if (ADD_OVERFLOW(va, map->size, &va))
1319 				return false;
1320 			if (!core_mmu_va_is_valid(va))
1321 				return false;
1322 		}
1323 	}
1324 
1325 	if (tee_ram_at_top) {
1326 		/*
1327 		 * Map non-tee ram regions at addresses lower than the tee
1328 		 * ram region.
1329 		 */
1330 		va = tee_ram_va;
1331 		for (n = 0; n < mem_map->count; n++) {
1332 			map = mem_map->map + n;
1333 			map->attr = core_mmu_type_to_attr(map->type);
1334 			if (map->va)
1335 				continue;
1336 
1337 			if (!IS_ENABLED(CFG_WITH_LPAE) &&
1338 			    va_is_secure != map_is_secure(map)) {
1339 				va_is_secure = !va_is_secure;
1340 				va = ROUNDDOWN(va, CORE_MMU_PGDIR_SIZE);
1341 			} else if (va_is_nex_shared !=
1342 				   core_mmu_type_is_nex_shared(map->type)) {
1343 				va_is_nex_shared = !va_is_nex_shared;
1344 				va = ROUNDDOWN(va, CORE_MMU_PGDIR_SIZE);
1345 			}
1346 
1347 			if (SUB_OVERFLOW(va, map->size, &va))
1348 				return false;
1349 			va = ROUNDDOWN2(va, map->region_size);
1350 			/*
1351 			 * Make sure that va is aligned with pa for
1352 			 * efficient pgdir mapping. Basically pa &
1353 			 * pgdir_mask should be == va & pgdir_mask
1354 			 */
1355 			if (map->size > 2 * CORE_MMU_PGDIR_SIZE) {
1356 				if (SUB_OVERFLOW(va, CORE_MMU_PGDIR_SIZE, &va))
1357 					return false;
1358 				va += (map->pa - va) & CORE_MMU_PGDIR_MASK;
1359 			}
1360 			map->va = va;
1361 		}
1362 	} else {
1363 		/*
1364 		 * Map non-tee ram regions at addresses higher than the tee
1365 		 * ram region.
1366 		 */
1367 		for (n = 0; n < mem_map->count; n++) {
1368 			map = mem_map->map + n;
1369 			map->attr = core_mmu_type_to_attr(map->type);
1370 			if (map->va)
1371 				continue;
1372 
1373 			if (!IS_ENABLED(CFG_WITH_LPAE) &&
1374 			    va_is_secure != map_is_secure(map)) {
1375 				va_is_secure = !va_is_secure;
1376 				if (ROUNDUP_OVERFLOW(va, CORE_MMU_PGDIR_SIZE,
1377 						     &va))
1378 					return false;
1379 			} else if (va_is_nex_shared !=
1380 				   core_mmu_type_is_nex_shared(map->type)) {
1381 				va_is_nex_shared = !va_is_nex_shared;
1382 				if (ROUNDUP_OVERFLOW(va, CORE_MMU_PGDIR_SIZE,
1383 						     &va))
1384 					return false;
1385 			}
1386 
1387 			if (ROUNDUP2_OVERFLOW(va, map->region_size, &va))
1388 				return false;
1389 			/*
1390 			 * Make sure that va is aligned with pa for
1391 			 * efficient pgdir mapping. Basically pa &
1392 			 * pgdir_mask should be == va & pgdir_mask
1393 			 */
1394 			if (map->size > 2 * CORE_MMU_PGDIR_SIZE) {
1395 				vaddr_t offs = (map->pa - va) &
1396 					       CORE_MMU_PGDIR_MASK;
1397 
1398 				if (ADD_OVERFLOW(va, offs, &va))
1399 					return false;
1400 			}
1401 
1402 			map->va = va;
1403 			if (ADD_OVERFLOW(va, map->size, &va))
1404 				return false;
1405 			if (!core_mmu_va_is_valid(va))
1406 				return false;
1407 		}
1408 	}
1409 
1410 	return true;
1411 }
1412 
assign_mem_va(vaddr_t tee_ram_va,struct memory_map * mem_map)1413 static bool assign_mem_va(vaddr_t tee_ram_va, struct memory_map *mem_map)
1414 {
1415 	bool tee_ram_at_top = place_tee_ram_at_top(tee_ram_va);
1416 
1417 	/*
1418 	 * Check that we're not overlapping with the user VA range.
1419 	 */
1420 	if (IS_ENABLED(CFG_WITH_LPAE)) {
1421 		/*
1422 		 * User VA range is supposed to be defined after these
1423 		 * mappings have been established.
1424 		 */
1425 		assert(!core_mmu_user_va_range_is_defined());
1426 	} else {
1427 		vaddr_t user_va_base = 0;
1428 		size_t user_va_size = 0;
1429 
1430 		assert(core_mmu_user_va_range_is_defined());
1431 		core_mmu_get_user_va_range(&user_va_base, &user_va_size);
1432 		if (tee_ram_va < (user_va_base + user_va_size))
1433 			return false;
1434 	}
1435 
1436 	if (IS_ENABLED(CFG_WITH_PAGER)) {
1437 		bool prefered_dir = core_mmu_prefer_tee_ram_at_top(tee_ram_va);
1438 
1439 		/* Try whole mapping covered by a single base xlat entry */
1440 		if (prefered_dir != tee_ram_at_top &&
1441 		    assign_mem_va_dir(tee_ram_va, mem_map, prefered_dir))
1442 			return true;
1443 	}
1444 
1445 	return assign_mem_va_dir(tee_ram_va, mem_map, tee_ram_at_top);
1446 }
1447 
cmp_init_mem_map(const void * a,const void * b)1448 static int cmp_init_mem_map(const void *a, const void *b)
1449 {
1450 	const struct tee_mmap_region *mm_a = a;
1451 	const struct tee_mmap_region *mm_b = b;
1452 	int rc = 0;
1453 
1454 	rc = CMP_TRILEAN(mm_a->region_size, mm_b->region_size);
1455 	if (!rc)
1456 		rc = CMP_TRILEAN(mm_a->pa, mm_b->pa);
1457 	/*
1458 	 * 32bit MMU descriptors cannot mix secure and non-secure mapping in
1459 	 * the same level2 table. Hence sort secure mapping from non-secure
1460 	 * mapping.
1461 	 */
1462 	if (!rc && !IS_ENABLED(CFG_WITH_LPAE))
1463 		rc = CMP_TRILEAN(map_is_secure(mm_a), map_is_secure(mm_b));
1464 
1465 	/*
1466 	 * Nexus mappings shared between partitions should not be mixed
1467 	 * with other mappings in the same translation table. Hence sort
1468 	 * nexus shared mappings from other mappings.
1469 	 */
1470 	if (!rc)
1471 		rc = CMP_TRILEAN(core_mmu_type_is_nex_shared(mm_a->type),
1472 				 core_mmu_type_is_nex_shared(mm_b->type));
1473 
1474 	return rc;
1475 }
1476 
mem_map_add_id_map(struct memory_map * mem_map,vaddr_t id_map_start,vaddr_t id_map_end)1477 static bool mem_map_add_id_map(struct memory_map *mem_map,
1478 			       vaddr_t id_map_start, vaddr_t id_map_end)
1479 {
1480 	vaddr_t start = ROUNDDOWN(id_map_start, SMALL_PAGE_SIZE);
1481 	vaddr_t end = ROUNDUP(id_map_end, SMALL_PAGE_SIZE);
1482 	size_t len = end - start;
1483 	size_t n = 0;
1484 
1485 
1486 	for (n = 0; n < mem_map->count; n++)
1487 		if (core_is_buffer_intersect(mem_map->map[n].va,
1488 					     mem_map->map[n].size, start, len))
1489 			return false;
1490 
1491 	grow_mem_map(mem_map);
1492 	mem_map->map[mem_map->count - 1] = (struct tee_mmap_region){
1493 		.type = MEM_AREA_IDENTITY_MAP_RX,
1494 		/*
1495 		 * Could use CORE_MMU_PGDIR_SIZE to potentially save a
1496 		 * translation table, at the increased risk of clashes with
1497 		 * the rest of the memory map.
1498 		 */
1499 		.region_size = SMALL_PAGE_SIZE,
1500 		.pa = start,
1501 		.va = start,
1502 		.size = len,
1503 		.attr = core_mmu_type_to_attr(MEM_AREA_IDENTITY_MAP_RX),
1504 	};
1505 
1506 	return true;
1507 }
1508 
init_mem_map(struct memory_map * mem_map,unsigned long seed,unsigned long * ret_offs)1509 static struct memory_map *init_mem_map(struct memory_map *mem_map,
1510 				       unsigned long seed,
1511 				       unsigned long *ret_offs)
1512 {
1513 	/*
1514 	 * @id_map_start and @id_map_end describes a physical memory range
1515 	 * that must be mapped Read-Only eXecutable at identical virtual
1516 	 * addresses.
1517 	 */
1518 	vaddr_t id_map_start = (vaddr_t)__identity_map_init_start;
1519 	vaddr_t id_map_end = (vaddr_t)__identity_map_init_end;
1520 	vaddr_t start_addr = secure_only[0].paddr;
1521 	unsigned long offs = 0;
1522 
1523 	collect_mem_ranges(mem_map);
1524 	assign_mem_granularity(mem_map);
1525 
1526 	/*
1527 	 * To ease mapping and lower use of xlat tables, sort mapping
1528 	 * description moving small-page regions after the pgdir regions.
1529 	 */
1530 	qsort(mem_map->map, mem_map->count, sizeof(struct tee_mmap_region),
1531 	      cmp_init_mem_map);
1532 
1533 	if (IS_ENABLED(CFG_WITH_PAGER))
1534 		add_pager_vaspace(mem_map);
1535 
1536 	if (IS_ENABLED(CFG_CORE_ASLR) && seed) {
1537 		vaddr_t ba = 0;
1538 		size_t n = 0;
1539 
1540 		for (n = 0; n < 3; n++) {
1541 			ba = arch_aslr_base_addr(start_addr, seed, n);
1542 			if (assign_mem_va(ba, mem_map) &&
1543 			    mem_map_add_id_map(mem_map, id_map_start,
1544 					       id_map_end)) {
1545 				offs = ba - start_addr;
1546 				DMSG("Mapping core at %#"PRIxVA" offs %#lx",
1547 				     ba, offs);
1548 				goto out;
1549 			} else {
1550 				DMSG("Failed to map core at %#"PRIxVA, ba);
1551 			}
1552 		}
1553 		EMSG("Failed to map core with seed %#lx", seed);
1554 	}
1555 
1556 	if (!assign_mem_va(start_addr, mem_map))
1557 		panic();
1558 
1559 out:
1560 	qsort(mem_map->map, mem_map->count, sizeof(struct tee_mmap_region),
1561 	      cmp_mmap_by_lower_va);
1562 
1563 	dump_mmap_table(mem_map);
1564 
1565 	*ret_offs = offs;
1566 	return mem_map;
1567 }
1568 
check_mem_map(struct memory_map * mem_map)1569 static void check_mem_map(struct memory_map *mem_map)
1570 {
1571 	struct tee_mmap_region *m = NULL;
1572 	size_t n = 0;
1573 
1574 	for (n = 0; n < mem_map->count; n++) {
1575 		m = mem_map->map + n;
1576 		switch (m->type) {
1577 		case MEM_AREA_TEE_RAM:
1578 		case MEM_AREA_TEE_RAM_RX:
1579 		case MEM_AREA_TEE_RAM_RO:
1580 		case MEM_AREA_TEE_RAM_RW:
1581 		case MEM_AREA_INIT_RAM_RX:
1582 		case MEM_AREA_INIT_RAM_RO:
1583 		case MEM_AREA_NEX_RAM_RW:
1584 		case MEM_AREA_NEX_RAM_RO:
1585 		case MEM_AREA_IDENTITY_MAP_RX:
1586 			if (!pbuf_is_inside(secure_only, m->pa, m->size))
1587 				panic("TEE_RAM can't fit in secure_only");
1588 			break;
1589 		case MEM_AREA_SEC_RAM_OVERALL:
1590 			if (!pbuf_is_inside(secure_only, m->pa, m->size))
1591 				panic("SEC_RAM_OVERALL can't fit in secure_only");
1592 			break;
1593 		case MEM_AREA_NSEC_SHM:
1594 			if (!pbuf_is_inside(nsec_shared, m->pa, m->size))
1595 				panic("NS_SHM can't fit in nsec_shared");
1596 			break;
1597 		case MEM_AREA_TEE_COHERENT:
1598 		case MEM_AREA_TEE_ASAN:
1599 		case MEM_AREA_IO_SEC:
1600 		case MEM_AREA_IO_NSEC:
1601 		case MEM_AREA_EXT_DT:
1602 		case MEM_AREA_MANIFEST_DT:
1603 		case MEM_AREA_TRANSFER_LIST:
1604 		case MEM_AREA_RAM_SEC:
1605 		case MEM_AREA_RAM_NSEC:
1606 		case MEM_AREA_ROM_SEC:
1607 		case MEM_AREA_RES_VASPACE:
1608 		case MEM_AREA_SHM_VASPACE:
1609 		case MEM_AREA_PAGER_VASPACE:
1610 		case MEM_AREA_NEX_DYN_VASPACE:
1611 		case MEM_AREA_TEE_DYN_VASPACE:
1612 			break;
1613 		default:
1614 			EMSG("Uhandled memtype %d", m->type);
1615 			panic();
1616 		}
1617 	}
1618 }
1619 
1620 /*
1621  * core_init_mmu_map() - init tee core default memory mapping
1622  *
1623  * This routine sets the static default TEE core mapping. If @seed is > 0
1624  * and configured with CFG_CORE_ASLR it will map tee core at a location
1625  * based on the seed and return the offset from the link address.
1626  *
1627  * If an error happened: core_init_mmu_map is expected to panic.
1628  *
1629  * Note: this function is weak just to make it possible to exclude it from
1630  * the unpaged area.
1631  */
core_init_mmu_map(unsigned long seed,struct core_mmu_config * cfg)1632 void __weak core_init_mmu_map(unsigned long seed, struct core_mmu_config *cfg)
1633 {
1634 #ifndef CFG_NS_VIRTUALIZATION
1635 	vaddr_t start = ROUNDDOWN((vaddr_t)__nozi_start, SMALL_PAGE_SIZE);
1636 #else
1637 	vaddr_t start = ROUNDDOWN((vaddr_t)__vcore_nex_rw_start,
1638 				  SMALL_PAGE_SIZE);
1639 #endif
1640 #ifdef CFG_DYN_CONFIG
1641 	vaddr_t len = ROUNDUP(VCORE_FREE_END_PA, SMALL_PAGE_SIZE) - start;
1642 #else
1643 	vaddr_t len = ROUNDUP((vaddr_t)__nozi_end, SMALL_PAGE_SIZE) - start;
1644 #endif
1645 	struct tee_mmap_region tmp_mmap_region = { };
1646 	struct memory_map mem_map = { };
1647 	unsigned long offs = 0;
1648 
1649 	if (IS_ENABLED(CFG_CORE_PHYS_RELOCATABLE) &&
1650 	    (core_mmu_tee_load_pa & SMALL_PAGE_MASK))
1651 		panic("OP-TEE load address is not page aligned");
1652 
1653 	check_sec_nsec_mem_config();
1654 
1655 	mem_map.alloc_count = CFG_MMAP_REGIONS;
1656 	mem_map.map = boot_mem_alloc_tmp(mem_map.alloc_count *
1657 						sizeof(*mem_map.map),
1658 					 alignof(*mem_map.map));
1659 	memory_map_realloc_func = boot_mem_realloc_memory_map;
1660 
1661 	static_memory_map = (struct memory_map){
1662 		.map = &tmp_mmap_region,
1663 		.alloc_count = 1,
1664 		.count = 1,
1665 	};
1666 	/*
1667 	 * Add a entry covering the translation tables which will be
1668 	 * involved in some virt_to_phys() and phys_to_virt() conversions.
1669 	 */
1670 	static_memory_map.map[0] = (struct tee_mmap_region){
1671 		.type = MEM_AREA_TEE_RAM,
1672 		.region_size = SMALL_PAGE_SIZE,
1673 		.pa = start,
1674 		.va = start,
1675 		.size = len,
1676 		.attr = core_mmu_type_to_attr(MEM_AREA_IDENTITY_MAP_RX),
1677 	};
1678 
1679 	init_mem_map(&mem_map, seed, &offs);
1680 
1681 	check_mem_map(&mem_map);
1682 	core_init_mmu(&mem_map);
1683 	dump_xlat_table(0x0, CORE_MMU_BASE_TABLE_LEVEL);
1684 	core_init_mmu_regs(cfg);
1685 	cfg->map_offset = offs;
1686 	static_memory_map = mem_map;
1687 	boot_mem_add_reloc(&static_memory_map.map);
1688 }
1689 
core_mmu_save_mem_map(void)1690 void core_mmu_save_mem_map(void)
1691 {
1692 	size_t alloc_count = static_memory_map.count + 5;
1693 	size_t elem_sz = sizeof(*static_memory_map.map);
1694 	void *p = NULL;
1695 
1696 	p = nex_calloc(alloc_count, elem_sz);
1697 	if (!p)
1698 		panic();
1699 	memcpy(p, static_memory_map.map, static_memory_map.count * elem_sz);
1700 	static_memory_map.map = p;
1701 	static_memory_map.alloc_count = alloc_count;
1702 	memory_map_realloc_func = heap_realloc_memory_map;
1703 }
1704 
core_mmu_mattr_is_ok(uint32_t mattr)1705 bool core_mmu_mattr_is_ok(uint32_t mattr)
1706 {
1707 	/*
1708 	 * Keep in sync with core_mmu_lpae.c:mattr_to_desc and
1709 	 * core_mmu_v7.c:mattr_to_texcb
1710 	 */
1711 
1712 	switch ((mattr >> TEE_MATTR_MEM_TYPE_SHIFT) & TEE_MATTR_MEM_TYPE_MASK) {
1713 	case TEE_MATTR_MEM_TYPE_DEV:
1714 	case TEE_MATTR_MEM_TYPE_STRONGLY_O:
1715 	case TEE_MATTR_MEM_TYPE_CACHED:
1716 	case TEE_MATTR_MEM_TYPE_TAGGED:
1717 		return true;
1718 	default:
1719 		return false;
1720 	}
1721 }
1722 
1723 /*
1724  * test attributes of target physical buffer
1725  *
1726  * Flags: pbuf_is(SECURE, NOT_SECURE, RAM, IOMEM, KEYVAULT).
1727  *
1728  */
core_pbuf_is(uint32_t attr,paddr_t pbuf,size_t len)1729 bool core_pbuf_is(uint32_t attr, paddr_t pbuf, size_t len)
1730 {
1731 	struct tee_mmap_region *map;
1732 
1733 	/* Empty buffers complies with anything */
1734 	if (len == 0)
1735 		return true;
1736 
1737 	switch (attr) {
1738 	case CORE_MEM_SEC:
1739 		return pbuf_is_inside(secure_only, pbuf, len);
1740 	case CORE_MEM_NON_SEC:
1741 		return pbuf_is_inside(nsec_shared, pbuf, len) ||
1742 			pbuf_is_nsec_ddr(pbuf, len);
1743 	case CORE_MEM_TEE_RAM:
1744 		return core_is_buffer_inside(pbuf, len, TEE_RAM_START,
1745 							TEE_RAM_PH_SIZE);
1746 #ifdef CFG_CORE_RESERVED_SHM
1747 	case CORE_MEM_NSEC_SHM:
1748 		return core_is_buffer_inside(pbuf, len, TEE_SHMEM_START,
1749 							TEE_SHMEM_SIZE);
1750 #endif
1751 	case CORE_MEM_SDP_MEM:
1752 		return pbuf_is_sdp_mem(pbuf, len);
1753 	case CORE_MEM_CACHED:
1754 		map = find_map_by_pa(pbuf);
1755 		if (!map || !pbuf_inside_map_area(pbuf, len, map))
1756 			return false;
1757 		return mattr_is_cached(map->attr);
1758 	default:
1759 		return false;
1760 	}
1761 }
1762 
1763 /* test attributes of target virtual buffer (in core mapping) */
core_vbuf_is(uint32_t attr,const void * vbuf,size_t len)1764 bool core_vbuf_is(uint32_t attr, const void *vbuf, size_t len)
1765 {
1766 	paddr_t p;
1767 
1768 	/* Empty buffers complies with anything */
1769 	if (len == 0)
1770 		return true;
1771 
1772 	p = virt_to_phys((void *)vbuf);
1773 	if (!p)
1774 		return false;
1775 
1776 	return core_pbuf_is(attr, p, len);
1777 }
1778 
1779 /* core_va2pa - teecore exported service */
core_va2pa_helper(void * va,paddr_t * pa)1780 static int __maybe_unused core_va2pa_helper(void *va, paddr_t *pa)
1781 {
1782 	struct tee_mmap_region *map;
1783 
1784 	map = find_map_by_va(va);
1785 	if (!va_is_in_map(map, (vaddr_t)va))
1786 		return -1;
1787 
1788 	/*
1789 	 * We can calculate PA for static map. Virtual address ranges
1790 	 * reserved to core dynamic mapping return a 'match' (return 0;)
1791 	 * together with an invalid null physical address.
1792 	 */
1793 	if (map->pa)
1794 		*pa = map->pa + (vaddr_t)va  - map->va;
1795 	else
1796 		*pa = 0;
1797 
1798 	return 0;
1799 }
1800 
map_pa2va(struct tee_mmap_region * map,paddr_t pa,size_t len)1801 static void *map_pa2va(struct tee_mmap_region *map, paddr_t pa, size_t len)
1802 {
1803 	if (!pa_is_in_map(map, pa, len))
1804 		return NULL;
1805 
1806 	return (void *)(vaddr_t)(map->va + pa - map->pa);
1807 }
1808 
1809 /*
1810  * teecore gets some memory area definitions
1811  */
core_mmu_get_mem_by_type(enum teecore_memtypes type,vaddr_t * s,vaddr_t * e)1812 void core_mmu_get_mem_by_type(enum teecore_memtypes type, vaddr_t *s,
1813 			      vaddr_t *e)
1814 {
1815 	struct tee_mmap_region *map = find_map_by_type(type);
1816 
1817 	if (map) {
1818 		*s = map->va;
1819 		*e = map->va + map->size;
1820 	} else {
1821 		*s = 0;
1822 		*e = 0;
1823 	}
1824 }
1825 
core_mmu_get_type_by_pa(paddr_t pa)1826 enum teecore_memtypes core_mmu_get_type_by_pa(paddr_t pa)
1827 {
1828 	struct tee_mmap_region *map = find_map_by_pa(pa);
1829 
1830 	/* VA spaces have no valid PAs in the memory map */
1831 	if (!map || map->type == MEM_AREA_RES_VASPACE ||
1832 	    map->type == MEM_AREA_SHM_VASPACE)
1833 		return MEM_AREA_MAXTYPE;
1834 	return map->type;
1835 }
1836 
core_mmu_set_entry(struct core_mmu_table_info * tbl_info,unsigned int idx,paddr_t pa,uint32_t attr)1837 void core_mmu_set_entry(struct core_mmu_table_info *tbl_info, unsigned int idx,
1838 			paddr_t pa, uint32_t attr)
1839 {
1840 	assert(idx < tbl_info->num_entries);
1841 	core_mmu_set_entry_primitive(tbl_info->table, tbl_info->level,
1842 				     idx, pa, attr);
1843 }
1844 
core_mmu_get_entry(struct core_mmu_table_info * tbl_info,unsigned int idx,paddr_t * pa,uint32_t * attr)1845 void core_mmu_get_entry(struct core_mmu_table_info *tbl_info, unsigned int idx,
1846 			paddr_t *pa, uint32_t *attr)
1847 {
1848 	assert(idx < tbl_info->num_entries);
1849 	core_mmu_get_entry_primitive(tbl_info->table, tbl_info->level,
1850 				     idx, pa, attr);
1851 }
1852 
clear_region(struct core_mmu_table_info * tbl_info,struct tee_mmap_region * region)1853 static void clear_region(struct core_mmu_table_info *tbl_info,
1854 			 struct tee_mmap_region *region)
1855 {
1856 	unsigned int end = 0;
1857 	unsigned int idx = 0;
1858 
1859 	/* va, len and pa should be block aligned */
1860 	assert(!core_mmu_get_block_offset(tbl_info, region->va));
1861 	assert(!core_mmu_get_block_offset(tbl_info, region->size));
1862 	assert(!core_mmu_get_block_offset(tbl_info, region->pa));
1863 
1864 	idx = core_mmu_va2idx(tbl_info, region->va);
1865 	end = core_mmu_va2idx(tbl_info, region->va + region->size);
1866 
1867 	while (idx < end) {
1868 		core_mmu_set_entry(tbl_info, idx, 0, 0);
1869 		idx++;
1870 	}
1871 }
1872 
set_region(struct core_mmu_table_info * tbl_info,struct tee_mmap_region * region)1873 static void set_region(struct core_mmu_table_info *tbl_info,
1874 		       struct tee_mmap_region *region)
1875 {
1876 	unsigned int end;
1877 	unsigned int idx;
1878 	paddr_t pa;
1879 
1880 	/* va, len and pa should be block aligned */
1881 	assert(!core_mmu_get_block_offset(tbl_info, region->va));
1882 	assert(!core_mmu_get_block_offset(tbl_info, region->size));
1883 	assert(!core_mmu_get_block_offset(tbl_info, region->pa));
1884 
1885 	idx = core_mmu_va2idx(tbl_info, region->va);
1886 	end = core_mmu_va2idx(tbl_info, region->va + region->size);
1887 	pa = region->pa;
1888 
1889 	while (idx < end) {
1890 		core_mmu_set_entry(tbl_info, idx, pa, region->attr);
1891 		idx++;
1892 		pa += BIT64(tbl_info->shift);
1893 	}
1894 }
1895 
set_pg_region(struct core_mmu_table_info * dir_info,struct vm_region * region,struct pgt ** pgt,struct core_mmu_table_info * pg_info)1896 static void set_pg_region(struct core_mmu_table_info *dir_info,
1897 			  struct vm_region *region, struct pgt **pgt,
1898 			  struct core_mmu_table_info *pg_info)
1899 {
1900 	struct tee_mmap_region r = {
1901 		.va = region->va,
1902 		.size = region->size,
1903 		.attr = region->attr,
1904 	};
1905 	vaddr_t end = r.va + r.size;
1906 	uint32_t pgt_attr = (r.attr & TEE_MATTR_SECURE) | TEE_MATTR_TABLE;
1907 
1908 	while (r.va < end) {
1909 		if (!pg_info->table ||
1910 		    r.va >= (pg_info->va_base + CORE_MMU_PGDIR_SIZE)) {
1911 			/*
1912 			 * We're assigning a new translation table.
1913 			 */
1914 			unsigned int idx;
1915 
1916 			/* Virtual addresses must grow */
1917 			assert(r.va > pg_info->va_base);
1918 
1919 			idx = core_mmu_va2idx(dir_info, r.va);
1920 			pg_info->va_base = core_mmu_idx2va(dir_info, idx);
1921 
1922 			/*
1923 			 * Advance pgt to va_base, note that we may need to
1924 			 * skip multiple page tables if there are large
1925 			 * holes in the vm map.
1926 			 */
1927 			while ((*pgt)->vabase < pg_info->va_base) {
1928 				*pgt = SLIST_NEXT(*pgt, link);
1929 				/* We should have allocated enough */
1930 				assert(*pgt);
1931 			}
1932 			assert((*pgt)->vabase == pg_info->va_base);
1933 			pg_info->table = (*pgt)->tbl;
1934 
1935 			core_mmu_set_entry(dir_info, idx,
1936 					   virt_to_phys(pg_info->table),
1937 					   pgt_attr);
1938 		}
1939 
1940 		r.size = MIN(CORE_MMU_PGDIR_SIZE - (r.va - pg_info->va_base),
1941 			     end - r.va);
1942 
1943 		if (!(*pgt)->populated  && !mobj_is_paged(region->mobj)) {
1944 			size_t granule = BIT(pg_info->shift);
1945 			size_t offset = r.va - region->va + region->offset;
1946 
1947 			r.size = MIN(r.size,
1948 				     mobj_get_phys_granule(region->mobj));
1949 			r.size = ROUNDUP(r.size, SMALL_PAGE_SIZE);
1950 
1951 			if (mobj_get_pa(region->mobj, offset, granule,
1952 					&r.pa) != TEE_SUCCESS)
1953 				panic("Failed to get PA of unpaged mobj");
1954 			set_region(pg_info, &r);
1955 		}
1956 		r.va += r.size;
1957 	}
1958 }
1959 
can_map_at_level(paddr_t paddr,vaddr_t vaddr,size_t size_left,paddr_t block_size,struct tee_mmap_region * mm)1960 static bool can_map_at_level(paddr_t paddr, vaddr_t vaddr,
1961 			     size_t size_left, paddr_t block_size,
1962 			     struct tee_mmap_region *mm)
1963 {
1964 	/* VA and PA are aligned to block size at current level */
1965 	if ((vaddr | paddr) & (block_size - 1))
1966 		return false;
1967 
1968 	/* Remainder fits into block at current level */
1969 	if (size_left < block_size)
1970 		return false;
1971 
1972 	/*
1973 	 * The required block size of the region is compatible with the
1974 	 * block size of the current level.
1975 	 */
1976 	if (mm->region_size < block_size)
1977 		return false;
1978 
1979 #ifdef CFG_WITH_PAGER
1980 	/*
1981 	 * If pager is enabled, we need to map TEE RAM and the whole pager
1982 	 * regions with small pages only
1983 	 */
1984 	if ((map_is_tee_ram(mm) || mm->type == MEM_AREA_PAGER_VASPACE) &&
1985 	    block_size != SMALL_PAGE_SIZE)
1986 		return false;
1987 #endif
1988 
1989 	return true;
1990 }
1991 
core_mmu_map_region(struct mmu_partition * prtn,struct tee_mmap_region * mm)1992 void core_mmu_map_region(struct mmu_partition *prtn, struct tee_mmap_region *mm)
1993 {
1994 	struct core_mmu_table_info tbl_info = { };
1995 	unsigned int idx = 0;
1996 	vaddr_t vaddr = mm->va;
1997 	paddr_t paddr = mm->pa;
1998 	ssize_t size_left = mm->size;
1999 	uint32_t attr = mm->attr;
2000 	unsigned int level = 0;
2001 	bool table_found = false;
2002 	uint32_t old_attr = 0;
2003 
2004 	assert(!((vaddr | paddr) & SMALL_PAGE_MASK));
2005 	if (!paddr)
2006 		attr = 0;
2007 
2008 	while (size_left > 0) {
2009 		level = CORE_MMU_BASE_TABLE_LEVEL;
2010 
2011 		while (true) {
2012 			paddr_t block_size = 0;
2013 
2014 			assert(core_mmu_level_in_range(level));
2015 
2016 			table_found = core_mmu_find_table(prtn, vaddr, level,
2017 							  &tbl_info);
2018 			if (!table_found)
2019 				panic("can't find table for mapping");
2020 
2021 			block_size = BIT64(tbl_info.shift);
2022 
2023 			idx = core_mmu_va2idx(&tbl_info, vaddr);
2024 			if (!can_map_at_level(paddr, vaddr, size_left,
2025 					      block_size, mm)) {
2026 				bool secure = mm->attr & TEE_MATTR_SECURE;
2027 
2028 				/*
2029 				 * This part of the region can't be mapped at
2030 				 * this level. Need to go deeper.
2031 				 */
2032 				if (!core_mmu_entry_to_finer_grained(&tbl_info,
2033 								     idx,
2034 								     secure))
2035 					panic("Can't divide MMU entry");
2036 				level = tbl_info.next_level;
2037 				continue;
2038 			}
2039 
2040 			/* We can map part of the region at current level */
2041 			core_mmu_get_entry(&tbl_info, idx, NULL, &old_attr);
2042 			if (old_attr)
2043 				panic("Page is already mapped");
2044 
2045 			core_mmu_set_entry(&tbl_info, idx, paddr, attr);
2046 			/*
2047 			 * Dynamic vaspace regions don't have a physical
2048 			 * address initially but we need to allocate and
2049 			 * initialize the translation tables now for later
2050 			 * updates to work properly.
2051 			 */
2052 			if (paddr)
2053 				paddr += block_size;
2054 			vaddr += block_size;
2055 			size_left -= block_size;
2056 
2057 			break;
2058 		}
2059 	}
2060 }
2061 
core_mmu_map_pages(vaddr_t vstart,paddr_t * pages,size_t num_pages,enum teecore_memtypes memtype)2062 TEE_Result core_mmu_map_pages(vaddr_t vstart, paddr_t *pages, size_t num_pages,
2063 			      enum teecore_memtypes memtype)
2064 {
2065 	TEE_Result ret;
2066 	struct core_mmu_table_info tbl_info;
2067 	struct tee_mmap_region *mm;
2068 	unsigned int idx;
2069 	uint32_t old_attr;
2070 	uint32_t exceptions;
2071 	vaddr_t vaddr = vstart;
2072 	size_t i;
2073 	bool secure;
2074 
2075 	assert(!(core_mmu_type_to_attr(memtype) & TEE_MATTR_PX));
2076 
2077 	secure = core_mmu_type_to_attr(memtype) & TEE_MATTR_SECURE;
2078 
2079 	if (vaddr & SMALL_PAGE_MASK)
2080 		return TEE_ERROR_BAD_PARAMETERS;
2081 
2082 	exceptions = mmu_lock();
2083 
2084 	mm = find_map_by_va((void *)vaddr);
2085 	if (!mm || !va_is_in_map(mm, vaddr + num_pages * SMALL_PAGE_SIZE - 1))
2086 		panic("VA does not belong to any known mm region");
2087 
2088 	if (!core_mmu_is_dynamic_vaspace(mm))
2089 		panic("Trying to map into static region");
2090 
2091 	for (i = 0; i < num_pages; i++) {
2092 		if (pages[i] & SMALL_PAGE_MASK) {
2093 			ret = TEE_ERROR_BAD_PARAMETERS;
2094 			goto err;
2095 		}
2096 
2097 		while (true) {
2098 			if (!core_mmu_find_table(NULL, vaddr, UINT_MAX,
2099 						 &tbl_info))
2100 				panic("Can't find pagetable for vaddr ");
2101 
2102 			idx = core_mmu_va2idx(&tbl_info, vaddr);
2103 			if (tbl_info.shift == SMALL_PAGE_SHIFT)
2104 				break;
2105 
2106 			/* This is supertable. Need to divide it. */
2107 			if (!core_mmu_entry_to_finer_grained(&tbl_info, idx,
2108 							     secure))
2109 				panic("Failed to spread pgdir on small tables");
2110 		}
2111 
2112 		core_mmu_get_entry(&tbl_info, idx, NULL, &old_attr);
2113 		if (old_attr)
2114 			panic("Page is already mapped");
2115 
2116 		core_mmu_set_entry(&tbl_info, idx, pages[i],
2117 				   core_mmu_type_to_attr(memtype));
2118 		vaddr += SMALL_PAGE_SIZE;
2119 	}
2120 
2121 	/*
2122 	 * Make sure all the changes to translation tables are visible
2123 	 * before returning. TLB doesn't need to be invalidated as we are
2124 	 * guaranteed that there's no valid mapping in this range.
2125 	 */
2126 	core_mmu_table_write_barrier();
2127 	mmu_unlock(exceptions);
2128 
2129 	return TEE_SUCCESS;
2130 err:
2131 	mmu_unlock(exceptions);
2132 
2133 	if (i)
2134 		core_mmu_unmap_pages(vstart, i);
2135 
2136 	return ret;
2137 }
2138 
core_mmu_map_contiguous_pages(vaddr_t vstart,paddr_t pstart,size_t num_pages,enum teecore_memtypes memtype)2139 TEE_Result core_mmu_map_contiguous_pages(vaddr_t vstart, paddr_t pstart,
2140 					 size_t num_pages,
2141 					 enum teecore_memtypes memtype)
2142 {
2143 	struct core_mmu_table_info tbl_info = { };
2144 	struct tee_mmap_region *mm = NULL;
2145 	unsigned int idx = 0;
2146 	uint32_t old_attr = 0;
2147 	uint32_t exceptions = 0;
2148 	vaddr_t vaddr = vstart;
2149 	paddr_t paddr = pstart;
2150 	size_t i = 0;
2151 	bool secure = false;
2152 
2153 	assert(!(core_mmu_type_to_attr(memtype) & TEE_MATTR_PX));
2154 
2155 	secure = core_mmu_type_to_attr(memtype) & TEE_MATTR_SECURE;
2156 
2157 	if ((vaddr | paddr) & SMALL_PAGE_MASK)
2158 		return TEE_ERROR_BAD_PARAMETERS;
2159 
2160 	exceptions = mmu_lock();
2161 
2162 	mm = find_map_by_va((void *)vaddr);
2163 	if (!mm || !va_is_in_map(mm, vaddr + num_pages * SMALL_PAGE_SIZE - 1))
2164 		panic("VA does not belong to any known mm region");
2165 
2166 	if (!core_mmu_is_dynamic_vaspace(mm))
2167 		panic("Trying to map into static region");
2168 
2169 	for (i = 0; i < num_pages; i++) {
2170 		while (true) {
2171 			if (!core_mmu_find_table(NULL, vaddr, UINT_MAX,
2172 						 &tbl_info))
2173 				panic("Can't find pagetable for vaddr ");
2174 
2175 			idx = core_mmu_va2idx(&tbl_info, vaddr);
2176 			if (tbl_info.shift == SMALL_PAGE_SHIFT)
2177 				break;
2178 
2179 			/* This is supertable. Need to divide it. */
2180 			if (!core_mmu_entry_to_finer_grained(&tbl_info, idx,
2181 							     secure))
2182 				panic("Failed to spread pgdir on small tables");
2183 		}
2184 
2185 		core_mmu_get_entry(&tbl_info, idx, NULL, &old_attr);
2186 		if (old_attr)
2187 			panic("Page is already mapped");
2188 
2189 		core_mmu_set_entry(&tbl_info, idx, paddr,
2190 				   core_mmu_type_to_attr(memtype));
2191 		paddr += SMALL_PAGE_SIZE;
2192 		vaddr += SMALL_PAGE_SIZE;
2193 	}
2194 
2195 	/*
2196 	 * Make sure all the changes to translation tables are visible
2197 	 * before returning. TLB doesn't need to be invalidated as we are
2198 	 * guaranteed that there's no valid mapping in this range.
2199 	 */
2200 	core_mmu_table_write_barrier();
2201 	mmu_unlock(exceptions);
2202 
2203 	return TEE_SUCCESS;
2204 }
2205 
mem_range_is_in_vcore_free(vaddr_t vstart,size_t num_pages)2206 static bool mem_range_is_in_vcore_free(vaddr_t vstart, size_t num_pages)
2207 {
2208 	return core_is_buffer_inside(vstart, num_pages * SMALL_PAGE_SIZE,
2209 				     VCORE_FREE_PA, VCORE_FREE_SZ);
2210 }
2211 
maybe_remove_from_mem_map(vaddr_t vstart,size_t num_pages)2212 static void maybe_remove_from_mem_map(vaddr_t vstart, size_t num_pages)
2213 {
2214 	struct memory_map *mem_map = NULL;
2215 	struct tee_mmap_region *mm = NULL;
2216 	size_t idx = 0;
2217 	vaddr_t va = 0;
2218 
2219 	mm = find_map_by_va((void *)vstart);
2220 	if (!mm || !va_is_in_map(mm, vstart + num_pages * SMALL_PAGE_SIZE - 1))
2221 		panic("VA does not belong to any known mm region");
2222 
2223 	if (core_mmu_is_dynamic_vaspace(mm))
2224 		return;
2225 
2226 	if (!mem_range_is_in_vcore_free(vstart, num_pages))
2227 		panic("Trying to unmap static region");
2228 
2229 	/*
2230 	 * We're going to remove a memory from the VCORE_FREE memory range.
2231 	 * Depending where the range is we may need to remove the matching
2232 	 * mm, peal of a bit from the start or end of the mm, or split it
2233 	 * into two with a whole in the middle.
2234 	 */
2235 
2236 	va = ROUNDDOWN(vstart, SMALL_PAGE_SIZE);
2237 	assert(mm->region_size == SMALL_PAGE_SIZE);
2238 
2239 	if (va == mm->va && mm->size == num_pages * SMALL_PAGE_SIZE) {
2240 		mem_map = get_memory_map();
2241 		idx = mm - mem_map->map;
2242 		assert(idx < mem_map->count);
2243 
2244 		rem_array_elem(mem_map->map, mem_map->count,
2245 			       sizeof(*mem_map->map), idx);
2246 		mem_map->count--;
2247 	} else if (va == mm->va) {
2248 		mm->va += num_pages * SMALL_PAGE_SIZE;
2249 		mm->pa += num_pages * SMALL_PAGE_SIZE;
2250 		mm->size -= num_pages * SMALL_PAGE_SIZE;
2251 	} else if (va + num_pages * SMALL_PAGE_SIZE == mm->va + mm->size) {
2252 		mm->size -= num_pages * SMALL_PAGE_SIZE;
2253 	} else {
2254 		struct tee_mmap_region m = *mm;
2255 
2256 		mem_map = get_memory_map();
2257 		idx = mm - mem_map->map;
2258 		assert(idx < mem_map->count);
2259 
2260 		mm->size = va - mm->va;
2261 		m.va += mm->size + num_pages * SMALL_PAGE_SIZE;
2262 		m.pa += mm->size + num_pages * SMALL_PAGE_SIZE;
2263 		m.size -= mm->size + num_pages * SMALL_PAGE_SIZE;
2264 		grow_mem_map(mem_map);
2265 		ins_array_elem(mem_map->map, mem_map->count,
2266 			       sizeof(*mem_map->map), idx + 1, &m);
2267 	}
2268 }
2269 
core_mmu_unmap_pages(vaddr_t vstart,size_t num_pages)2270 void core_mmu_unmap_pages(vaddr_t vstart, size_t num_pages)
2271 {
2272 	struct core_mmu_table_info tbl_info;
2273 	size_t i;
2274 	unsigned int idx;
2275 	uint32_t exceptions;
2276 
2277 	exceptions = mmu_lock();
2278 
2279 	maybe_remove_from_mem_map(vstart, num_pages);
2280 
2281 	for (i = 0; i < num_pages; i++, vstart += SMALL_PAGE_SIZE) {
2282 		if (!core_mmu_find_table(NULL, vstart, UINT_MAX, &tbl_info))
2283 			panic("Can't find pagetable");
2284 
2285 		if (tbl_info.shift != SMALL_PAGE_SHIFT)
2286 			panic("Invalid pagetable level");
2287 
2288 		idx = core_mmu_va2idx(&tbl_info, vstart);
2289 		core_mmu_set_entry(&tbl_info, idx, 0, 0);
2290 	}
2291 	tlbi_all();
2292 
2293 	mmu_unlock(exceptions);
2294 }
2295 
core_mmu_populate_user_map(struct core_mmu_table_info * dir_info,struct user_mode_ctx * uctx)2296 void core_mmu_populate_user_map(struct core_mmu_table_info *dir_info,
2297 				struct user_mode_ctx *uctx)
2298 {
2299 	struct core_mmu_table_info pg_info = { };
2300 	struct pgt_cache *pgt_cache = &uctx->pgt_cache;
2301 	struct pgt *pgt = NULL;
2302 	struct pgt *p = NULL;
2303 	struct vm_region *r = NULL;
2304 
2305 	if (TAILQ_EMPTY(&uctx->vm_info.regions))
2306 		return; /* Nothing to map */
2307 
2308 	/*
2309 	 * Allocate all page tables in advance.
2310 	 */
2311 	pgt_get_all(uctx);
2312 	pgt = SLIST_FIRST(pgt_cache);
2313 
2314 	core_mmu_set_info_table(&pg_info, dir_info->next_level, 0, NULL);
2315 
2316 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link)
2317 		set_pg_region(dir_info, r, &pgt, &pg_info);
2318 	/* Record that the translation tables now are populated. */
2319 	SLIST_FOREACH(p, pgt_cache, link) {
2320 		p->populated = true;
2321 		if (p == pgt)
2322 			break;
2323 	}
2324 	assert(p == pgt);
2325 }
2326 
core_mmu_remove_mapping(enum teecore_memtypes type,void * addr,size_t len)2327 TEE_Result core_mmu_remove_mapping(enum teecore_memtypes type, void *addr,
2328 				   size_t len)
2329 {
2330 	struct core_mmu_table_info tbl_info = { };
2331 	struct tee_mmap_region *res_map = NULL;
2332 	struct tee_mmap_region *map = NULL;
2333 	paddr_t pa = virt_to_phys(addr);
2334 	size_t granule = 0;
2335 	ptrdiff_t i = 0;
2336 	paddr_t p = 0;
2337 	size_t l = 0;
2338 
2339 	map = find_map_by_type_and_pa(type, pa, len);
2340 	if (!map)
2341 		return TEE_ERROR_GENERIC;
2342 
2343 	res_map = find_map_by_type(MEM_AREA_RES_VASPACE);
2344 	if (!res_map)
2345 		return TEE_ERROR_GENERIC;
2346 	if (!core_mmu_find_table(NULL, res_map->va, UINT_MAX, &tbl_info))
2347 		return TEE_ERROR_GENERIC;
2348 	granule = BIT(tbl_info.shift);
2349 
2350 	if (map < static_memory_map.map ||
2351 	    map >= static_memory_map.map + static_memory_map.count)
2352 		return TEE_ERROR_GENERIC;
2353 	i = map - static_memory_map.map;
2354 
2355 	/* Check that we have a full match */
2356 	p = ROUNDDOWN2(pa, granule);
2357 	l = ROUNDUP2(len + pa - p, granule);
2358 	if (map->pa != p || map->size != l)
2359 		return TEE_ERROR_GENERIC;
2360 
2361 	clear_region(&tbl_info, map);
2362 	tlbi_all();
2363 
2364 	/* If possible remove the va range from res_map */
2365 	if (res_map->va - map->size == map->va) {
2366 		res_map->va -= map->size;
2367 		res_map->size += map->size;
2368 	}
2369 
2370 	/* Remove the entry. */
2371 	rem_array_elem(static_memory_map.map, static_memory_map.count,
2372 		       sizeof(*static_memory_map.map), i);
2373 	static_memory_map.count--;
2374 
2375 	return TEE_SUCCESS;
2376 }
2377 
2378 struct tee_mmap_region *
core_mmu_find_mapping_exclusive(enum teecore_memtypes type,size_t len)2379 core_mmu_find_mapping_exclusive(enum teecore_memtypes type, size_t len)
2380 {
2381 	struct memory_map *mem_map = get_memory_map();
2382 	struct tee_mmap_region *map_found = NULL;
2383 	size_t n = 0;
2384 
2385 	if (!len)
2386 		return NULL;
2387 
2388 	for (n = 0; n < mem_map->count; n++) {
2389 		if (mem_map->map[n].type != type)
2390 			continue;
2391 
2392 		if (map_found)
2393 			return NULL;
2394 
2395 		map_found = mem_map->map + n;
2396 	}
2397 
2398 	if (!map_found || map_found->size < len)
2399 		return NULL;
2400 
2401 	return map_found;
2402 }
2403 
core_mmu_add_mapping(enum teecore_memtypes type,paddr_t addr,size_t len)2404 void *core_mmu_add_mapping(enum teecore_memtypes type, paddr_t addr, size_t len)
2405 {
2406 	struct memory_map *mem_map = &static_memory_map;
2407 	struct core_mmu_table_info tbl_info = { };
2408 	struct tee_mmap_region *map = NULL;
2409 	size_t granule = 0;
2410 	paddr_t p = 0;
2411 	size_t l = 0;
2412 
2413 	if (!len)
2414 		return NULL;
2415 
2416 	if (!core_mmu_check_end_pa(addr, len))
2417 		return NULL;
2418 
2419 	/* Check if the memory is already mapped */
2420 	map = find_map_by_type_and_pa(type, addr, len);
2421 	if (map && pbuf_inside_map_area(addr, len, map))
2422 		return (void *)(vaddr_t)(map->va + addr - map->pa);
2423 
2424 	/* Find the reserved va space used for late mappings */
2425 	map = find_map_by_type(MEM_AREA_RES_VASPACE);
2426 	if (!map)
2427 		return NULL;
2428 
2429 	if (!core_mmu_find_table(NULL, map->va, UINT_MAX, &tbl_info))
2430 		return NULL;
2431 
2432 	granule = BIT64(tbl_info.shift);
2433 	p = ROUNDDOWN2(addr, granule);
2434 	l = ROUNDUP2(len + addr - p, granule);
2435 
2436 	/* Ban overflowing virtual addresses */
2437 	if (map->size < l)
2438 		return NULL;
2439 
2440 	/*
2441 	 * Something is wrong, we can't fit the va range into the selected
2442 	 * table. The reserved va range is possibly missaligned with
2443 	 * granule.
2444 	 */
2445 	if (core_mmu_va2idx(&tbl_info, map->va + len) >= tbl_info.num_entries)
2446 		return NULL;
2447 
2448 	if (static_memory_map.count >= static_memory_map.alloc_count)
2449 		return NULL;
2450 
2451 	mem_map->map[mem_map->count] = (struct tee_mmap_region){
2452 		.va = map->va,
2453 		.size = l,
2454 		.type = type,
2455 		.region_size = granule,
2456 		.attr = core_mmu_type_to_attr(type),
2457 		.pa = p,
2458 	};
2459 	map->va += l;
2460 	map->size -= l;
2461 	map = mem_map->map + mem_map->count;
2462 	mem_map->count++;
2463 
2464 	set_region(&tbl_info, map);
2465 
2466 	/* Make sure the new entry is visible before continuing. */
2467 	core_mmu_table_write_barrier();
2468 
2469 	return (void *)(vaddr_t)(map->va + addr - map->pa);
2470 }
2471 
2472 #ifdef CFG_WITH_PAGER
get_linear_map_end_va(void)2473 static vaddr_t get_linear_map_end_va(void)
2474 {
2475 	/* this is synced with the generic linker file kern.ld.S */
2476 	return (vaddr_t)__heap2_end;
2477 }
2478 
get_linear_map_end_pa(void)2479 static paddr_t get_linear_map_end_pa(void)
2480 {
2481 	return get_linear_map_end_va() - boot_mmu_config.map_offset;
2482 }
2483 #endif
2484 
2485 #if defined(CFG_TEE_CORE_DEBUG)
check_pa_matches_va(void * va,paddr_t pa)2486 static void check_pa_matches_va(void *va, paddr_t pa)
2487 {
2488 	TEE_Result res = TEE_ERROR_GENERIC;
2489 	vaddr_t v = (vaddr_t)va;
2490 	paddr_t p = 0;
2491 	struct core_mmu_table_info ti __maybe_unused = { };
2492 
2493 	if (core_mmu_user_va_range_is_defined()) {
2494 		vaddr_t user_va_base = 0;
2495 		size_t user_va_size = 0;
2496 
2497 		core_mmu_get_user_va_range(&user_va_base, &user_va_size);
2498 		if (v >= user_va_base &&
2499 		    v <= (user_va_base - 1 + user_va_size)) {
2500 			if (!core_mmu_user_mapping_is_active()) {
2501 				if (pa)
2502 					panic("issue in linear address space");
2503 				return;
2504 			}
2505 
2506 			res = vm_va2pa(to_user_mode_ctx(thread_get_tsd()->ctx),
2507 				       va, &p);
2508 			if (res == TEE_ERROR_NOT_SUPPORTED)
2509 				return;
2510 			if (res == TEE_SUCCESS && pa != p)
2511 				panic("bad pa");
2512 			if (res != TEE_SUCCESS && pa)
2513 				panic("false pa");
2514 			return;
2515 		}
2516 	}
2517 #ifdef CFG_WITH_PAGER
2518 	if (is_unpaged(va)) {
2519 		if (v - boot_mmu_config.map_offset != pa)
2520 			panic("issue in linear address space");
2521 		return;
2522 	}
2523 
2524 	if (tee_pager_get_table_info(v, &ti)) {
2525 		uint32_t a;
2526 
2527 		/*
2528 		 * Lookups in the page table managed by the pager is
2529 		 * dangerous for addresses in the paged area as those pages
2530 		 * changes all the time. But some ranges are safe,
2531 		 * rw-locked areas when the page is populated for instance.
2532 		 */
2533 		core_mmu_get_entry(&ti, core_mmu_va2idx(&ti, v), &p, &a);
2534 		if (a & TEE_MATTR_VALID_BLOCK) {
2535 			paddr_t mask = BIT64(ti.shift) - 1;
2536 
2537 			p |= v & mask;
2538 			if (pa != p)
2539 				panic();
2540 		} else {
2541 			if (pa)
2542 				panic();
2543 		}
2544 		return;
2545 	}
2546 #endif
2547 
2548 	if (!core_va2pa_helper(va, &p)) {
2549 		/* Verfiy only the static mapping (case non null phys addr) */
2550 		if (p && pa != p) {
2551 			DMSG("va %p maps 0x%" PRIxPA ", expect 0x%" PRIxPA,
2552 			     va, p, pa);
2553 			panic();
2554 		}
2555 	} else {
2556 		if (pa) {
2557 			DMSG("va %p unmapped, expect 0x%" PRIxPA, va, pa);
2558 			panic();
2559 		}
2560 	}
2561 }
2562 #else
check_pa_matches_va(void * va __unused,paddr_t pa __unused)2563 static void check_pa_matches_va(void *va __unused, paddr_t pa __unused)
2564 {
2565 }
2566 #endif
2567 
virt_to_phys(void * va)2568 paddr_t virt_to_phys(void *va)
2569 {
2570 	paddr_t pa = 0;
2571 
2572 	if (!arch_va2pa_helper(va, &pa))
2573 		pa = 0;
2574 	check_pa_matches_va(memtag_strip_tag(va), pa);
2575 	return pa;
2576 }
2577 
2578 /*
2579  * Don't use check_va_matches_pa() for RISC-V, as its callee
2580  * arch_va2pa_helper() will call it eventually, this creates
2581  * indirect recursion and can lead to a stack overflow.
2582  * Moreover, if arch_va2pa_helper() returns true, it implies
2583  * the va2pa mapping is matched, no need to check it again.
2584  */
2585 #if defined(CFG_TEE_CORE_DEBUG) && !defined(__riscv)
check_va_matches_pa(paddr_t pa,void * va)2586 static void check_va_matches_pa(paddr_t pa, void *va)
2587 {
2588 	paddr_t p = 0;
2589 
2590 	if (!va)
2591 		return;
2592 
2593 	p = virt_to_phys(va);
2594 	if (p != pa) {
2595 		DMSG("va %p maps 0x%" PRIxPA " expect 0x%" PRIxPA, va, p, pa);
2596 		panic();
2597 	}
2598 }
2599 #else
check_va_matches_pa(paddr_t pa __unused,void * va __unused)2600 static void check_va_matches_pa(paddr_t pa __unused, void *va __unused)
2601 {
2602 }
2603 #endif
2604 
phys_to_virt_ts_vaspace(paddr_t pa,size_t len)2605 static void *phys_to_virt_ts_vaspace(paddr_t pa, size_t len)
2606 {
2607 	if (!core_mmu_user_mapping_is_active())
2608 		return NULL;
2609 
2610 	return vm_pa2va(to_user_mode_ctx(thread_get_tsd()->ctx), pa, len);
2611 }
2612 
2613 #ifdef CFG_WITH_PAGER
phys_to_virt_tee_ram(paddr_t pa,size_t len)2614 static void *phys_to_virt_tee_ram(paddr_t pa, size_t len)
2615 {
2616 	paddr_t end_pa = 0;
2617 
2618 	if (SUB_OVERFLOW(len, 1, &end_pa) || ADD_OVERFLOW(pa, end_pa, &end_pa))
2619 		return NULL;
2620 
2621 	if (pa >= TEE_LOAD_ADDR && pa < get_linear_map_end_pa()) {
2622 		if (end_pa > get_linear_map_end_pa())
2623 			return NULL;
2624 		return (void *)(vaddr_t)(pa + boot_mmu_config.map_offset);
2625 	}
2626 
2627 	return tee_pager_phys_to_virt(pa, len);
2628 }
2629 #else
phys_to_virt_tee_ram(paddr_t pa,size_t len)2630 static void *phys_to_virt_tee_ram(paddr_t pa, size_t len)
2631 {
2632 	struct tee_mmap_region *mmap = NULL;
2633 
2634 	mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM, pa, len);
2635 	if (!mmap)
2636 		mmap = find_map_by_type_and_pa(MEM_AREA_NEX_RAM_RW, pa, len);
2637 	if (!mmap)
2638 		mmap = find_map_by_type_and_pa(MEM_AREA_NEX_RAM_RO, pa, len);
2639 	if (!mmap)
2640 		mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM_RW, pa, len);
2641 	if (!mmap)
2642 		mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM_RO, pa, len);
2643 	if (!mmap)
2644 		mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM_RX, pa, len);
2645 
2646 	/*
2647 	 * Note that MEM_AREA_INIT_RAM_RO and MEM_AREA_INIT_RAM_RX are only
2648 	 * used with pager and not needed here.
2649 	 */
2650 	return map_pa2va(mmap, pa, len);
2651 }
2652 #endif
2653 
phys_to_virt(paddr_t pa,enum teecore_memtypes m,size_t len)2654 void *phys_to_virt(paddr_t pa, enum teecore_memtypes m, size_t len)
2655 {
2656 	void *va = NULL;
2657 
2658 	switch (m) {
2659 	case MEM_AREA_TS_VASPACE:
2660 		va = phys_to_virt_ts_vaspace(pa, len);
2661 		break;
2662 	case MEM_AREA_TEE_RAM:
2663 	case MEM_AREA_TEE_RAM_RX:
2664 	case MEM_AREA_TEE_RAM_RO:
2665 	case MEM_AREA_TEE_RAM_RW:
2666 	case MEM_AREA_NEX_RAM_RO:
2667 	case MEM_AREA_NEX_RAM_RW:
2668 		va = phys_to_virt_tee_ram(pa, len);
2669 		break;
2670 	case MEM_AREA_SHM_VASPACE:
2671 	case MEM_AREA_NEX_DYN_VASPACE:
2672 	case MEM_AREA_TEE_DYN_VASPACE:
2673 		/* Find VA from PA in dynamic SHM is not yet supported */
2674 		va = NULL;
2675 		break;
2676 	default:
2677 		va = map_pa2va(find_map_by_type_and_pa(m, pa, len), pa, len);
2678 	}
2679 	if (m != MEM_AREA_SEC_RAM_OVERALL)
2680 		check_va_matches_pa(pa, va);
2681 	return va;
2682 }
2683 
phys_to_virt_io(paddr_t pa,size_t len)2684 void *phys_to_virt_io(paddr_t pa, size_t len)
2685 {
2686 	struct tee_mmap_region *map = NULL;
2687 	void *va = NULL;
2688 
2689 	map = find_map_by_type_and_pa(MEM_AREA_IO_SEC, pa, len);
2690 	if (!map)
2691 		map = find_map_by_type_and_pa(MEM_AREA_IO_NSEC, pa, len);
2692 	if (!map)
2693 		return NULL;
2694 	va = map_pa2va(map, pa, len);
2695 	check_va_matches_pa(pa, va);
2696 	return va;
2697 }
2698 
core_mmu_get_va(paddr_t pa,enum teecore_memtypes type,size_t len)2699 vaddr_t core_mmu_get_va(paddr_t pa, enum teecore_memtypes type, size_t len)
2700 {
2701 	if (cpu_mmu_enabled())
2702 		return (vaddr_t)phys_to_virt(pa, type, len);
2703 
2704 	return (vaddr_t)pa;
2705 }
2706 
2707 #ifdef CFG_WITH_PAGER
is_unpaged(const void * va)2708 bool is_unpaged(const void *va)
2709 {
2710 	vaddr_t v = (vaddr_t)va;
2711 
2712 	return v >= VCORE_START_VA && v < get_linear_map_end_va();
2713 }
2714 #endif
2715 
2716 #ifdef CFG_NS_VIRTUALIZATION
is_nexus(const void * va)2717 bool is_nexus(const void *va)
2718 {
2719 	vaddr_t v = (vaddr_t)va;
2720 
2721 	return v >= VCORE_START_VA && v < VCORE_NEX_RW_PA + VCORE_NEX_RW_SZ;
2722 }
2723 #endif
2724 
io_pa_or_va(struct io_pa_va * p,size_t len)2725 vaddr_t io_pa_or_va(struct io_pa_va *p, size_t len)
2726 {
2727 	assert(p->pa);
2728 	if (cpu_mmu_enabled()) {
2729 		if (!p->va)
2730 			p->va = (vaddr_t)phys_to_virt_io(p->pa, len);
2731 		assert(p->va);
2732 		return p->va;
2733 	}
2734 	return p->pa;
2735 }
2736 
io_pa_or_va_secure(struct io_pa_va * p,size_t len)2737 vaddr_t io_pa_or_va_secure(struct io_pa_va *p, size_t len)
2738 {
2739 	assert(p->pa);
2740 	if (cpu_mmu_enabled()) {
2741 		if (!p->va)
2742 			p->va = (vaddr_t)phys_to_virt(p->pa, MEM_AREA_IO_SEC,
2743 						      len);
2744 		assert(p->va);
2745 		return p->va;
2746 	}
2747 	return p->pa;
2748 }
2749 
io_pa_or_va_nsec(struct io_pa_va * p,size_t len)2750 vaddr_t io_pa_or_va_nsec(struct io_pa_va *p, size_t len)
2751 {
2752 	assert(p->pa);
2753 	if (cpu_mmu_enabled()) {
2754 		if (!p->va)
2755 			p->va = (vaddr_t)phys_to_virt(p->pa, MEM_AREA_IO_NSEC,
2756 						      len);
2757 		assert(p->va);
2758 		return p->va;
2759 	}
2760 	return p->pa;
2761 }
2762 
2763 #ifdef CFG_CORE_RESERVED_SHM
teecore_init_pub_ram(void)2764 static TEE_Result teecore_init_pub_ram(void)
2765 {
2766 	vaddr_t s = 0;
2767 	vaddr_t e = 0;
2768 
2769 	/* get virtual addr/size of NSec shared mem allocated from teecore */
2770 	core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &s, &e);
2771 
2772 	if (s >= e || s & SMALL_PAGE_MASK || e & SMALL_PAGE_MASK)
2773 		panic("invalid PUB RAM");
2774 
2775 	/* extra check: we could rely on core_mmu_get_mem_by_type() */
2776 	if (!tee_vbuf_is_non_sec(s, e - s))
2777 		panic("PUB RAM is not non-secure");
2778 
2779 #ifdef CFG_PL310
2780 	/* Allocate statically the l2cc mutex */
2781 	tee_l2cc_store_mutex_boot_pa(virt_to_phys((void *)s));
2782 	s += sizeof(uint32_t);			/* size of a pl310 mutex */
2783 	s = ROUNDUP(s, SMALL_PAGE_SIZE);	/* keep required alignment */
2784 #endif
2785 
2786 	default_nsec_shm_paddr = virt_to_phys((void *)s);
2787 	default_nsec_shm_size = e - s;
2788 
2789 	return TEE_SUCCESS;
2790 }
2791 early_init(teecore_init_pub_ram);
2792 #endif /*CFG_CORE_RESERVED_SHM*/
2793 
carve_out_core_mem(paddr_t pa,paddr_t end_pa)2794 static void __maybe_unused carve_out_core_mem(paddr_t pa, paddr_t end_pa)
2795 {
2796 	tee_mm_entry_t *mm __maybe_unused = NULL;
2797 
2798 	DMSG("%#"PRIxPA" .. %#"PRIxPA, pa, end_pa);
2799 	mm = phys_mem_alloc2(pa, end_pa - pa);
2800 	assert(mm);
2801 }
2802 
core_mmu_init_phys_mem(void)2803 void core_mmu_init_phys_mem(void)
2804 {
2805 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
2806 		paddr_t b1 = 0;
2807 		paddr_size_t s1 = 0;
2808 
2809 		static_assert(ARRAY_SIZE(secure_only) <= 2);
2810 
2811 		if (ARRAY_SIZE(secure_only) == 2) {
2812 			b1 = secure_only[1].paddr;
2813 			s1 = secure_only[1].size;
2814 		}
2815 		virt_init_memory(&static_memory_map, secure_only[0].paddr,
2816 				 secure_only[0].size, b1, s1);
2817 	} else {
2818 #ifdef CFG_WITH_PAGER
2819 		/*
2820 		 * The pager uses all core memory so there's no need to add
2821 		 * it to the pool.
2822 		 */
2823 		static_assert(ARRAY_SIZE(secure_only) == 2);
2824 		phys_mem_init(0, 0, secure_only[1].paddr, secure_only[1].size);
2825 #else /*!CFG_WITH_PAGER*/
2826 		size_t align = BIT(CORE_MMU_USER_CODE_SHIFT);
2827 		paddr_t end_pa = 0;
2828 		size_t size = 0;
2829 		paddr_t ps = 0;
2830 		paddr_t pa = 0;
2831 
2832 		static_assert(ARRAY_SIZE(secure_only) <= 2);
2833 		if (ARRAY_SIZE(secure_only) == 2) {
2834 			ps = secure_only[1].paddr;
2835 			size = secure_only[1].size;
2836 		}
2837 		phys_mem_init(secure_only[0].paddr, secure_only[0].size,
2838 			      ps, size);
2839 
2840 		/*
2841 		 * The VCORE macros are relocatable so we need to translate
2842 		 * the addresses now that the MMU is enabled.
2843 		 */
2844 		end_pa = vaddr_to_phys(ROUNDUP2(VCORE_FREE_END_PA,
2845 						align) - 1) + 1;
2846 		/* Carve out the part used by OP-TEE core */
2847 		carve_out_core_mem(vaddr_to_phys(VCORE_UNPG_RX_PA), end_pa);
2848 		if (IS_ENABLED(CFG_CORE_SANITIZE_KADDRESS)) {
2849 			pa = vaddr_to_phys(ROUNDUP2(ASAN_MAP_PA, align));
2850 			carve_out_core_mem(pa, pa + ASAN_MAP_SZ);
2851 		}
2852 
2853 		/* Carve out test SDP memory */
2854 #ifdef TEE_SDP_TEST_MEM_BASE
2855 		if (TEE_SDP_TEST_MEM_SIZE) {
2856 			pa = TEE_SDP_TEST_MEM_BASE;
2857 			carve_out_core_mem(pa, pa + TEE_SDP_TEST_MEM_SIZE);
2858 		}
2859 #endif
2860 #endif /*!CFG_WITH_PAGER*/
2861 	}
2862 }
2863