xref: /optee_os/core/mm/core_mmu.c (revision b339ffbd9956d9b9a27217493ea70ffbdd670ddc)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016, 2022 Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  * Copyright (c) 2022, Arm Limited and Contributors. All rights reserved.
6  */
7 
8 #include <assert.h>
9 #include <config.h>
10 #include <kernel/boot.h>
11 #include <kernel/dt.h>
12 #include <kernel/linker.h>
13 #include <kernel/panic.h>
14 #include <kernel/spinlock.h>
15 #include <kernel/tee_l2cc_mutex.h>
16 #include <kernel/tee_misc.h>
17 #include <kernel/tlb_helpers.h>
18 #include <kernel/user_mode_ctx.h>
19 #include <kernel/virtualization.h>
20 #include <libfdt.h>
21 #include <memtag.h>
22 #include <mm/core_memprot.h>
23 #include <mm/core_mmu.h>
24 #include <mm/mobj.h>
25 #include <mm/pgt_cache.h>
26 #include <mm/tee_pager.h>
27 #include <mm/vm.h>
28 #include <platform_config.h>
29 #include <string.h>
30 #include <trace.h>
31 #include <util.h>
32 
33 #ifndef DEBUG_XLAT_TABLE
34 #define DEBUG_XLAT_TABLE 0
35 #endif
36 
37 #define SHM_VASPACE_SIZE	(1024 * 1024 * 32)
38 
39 /* Physical Secure DDR pool */
40 tee_mm_pool_t tee_mm_sec_ddr;
41 
42 /* Virtual memory pool for core mappings */
43 tee_mm_pool_t core_virt_mem_pool;
44 
45 /* Virtual memory pool for shared memory mappings */
46 tee_mm_pool_t core_virt_shm_pool;
47 
48 #ifdef CFG_CORE_PHYS_RELOCATABLE
49 unsigned long core_mmu_tee_load_pa __nex_bss;
50 #else
51 const unsigned long core_mmu_tee_load_pa = TEE_LOAD_ADDR;
52 #endif
53 
54 /*
55  * These variables are initialized before .bss is cleared. To avoid
56  * resetting them when .bss is cleared we're storing them in .data instead,
57  * even if they initially are zero.
58  */
59 
60 #ifdef CFG_CORE_RESERVED_SHM
61 /* Default NSec shared memory allocated from NSec world */
62 unsigned long default_nsec_shm_size __nex_bss;
63 unsigned long default_nsec_shm_paddr __nex_bss;
64 #endif
65 
66 static struct tee_mmap_region static_memory_map[CFG_MMAP_REGIONS
67 #if defined(CFG_CORE_ASLR) || defined(CFG_CORE_PHYS_RELOCATABLE)
68 						+ 1
69 #endif
70 						+ 1] __nex_bss;
71 
72 /* Define the platform's memory layout. */
73 struct memaccess_area {
74 	paddr_t paddr;
75 	size_t size;
76 };
77 
78 #define MEMACCESS_AREA(a, s) { .paddr = a, .size = s }
79 
80 static struct memaccess_area secure_only[] __nex_data = {
81 #ifdef CFG_CORE_PHYS_RELOCATABLE
82 	MEMACCESS_AREA(0, 0),
83 #else
84 #ifdef TRUSTED_SRAM_BASE
85 	MEMACCESS_AREA(TRUSTED_SRAM_BASE, TRUSTED_SRAM_SIZE),
86 #endif
87 	MEMACCESS_AREA(TRUSTED_DRAM_BASE, TRUSTED_DRAM_SIZE),
88 #endif
89 };
90 
91 static struct memaccess_area nsec_shared[] __nex_data = {
92 #ifdef CFG_CORE_RESERVED_SHM
93 	MEMACCESS_AREA(TEE_SHMEM_START, TEE_SHMEM_SIZE),
94 #endif
95 };
96 
97 #if defined(CFG_SECURE_DATA_PATH)
98 static const char *tz_sdp_match = "linaro,secure-heap";
99 static struct memaccess_area sec_sdp;
100 #ifdef CFG_TEE_SDP_MEM_BASE
101 register_sdp_mem(CFG_TEE_SDP_MEM_BASE, CFG_TEE_SDP_MEM_SIZE);
102 #endif
103 #ifdef TEE_SDP_TEST_MEM_BASE
104 register_sdp_mem(TEE_SDP_TEST_MEM_BASE, TEE_SDP_TEST_MEM_SIZE);
105 #endif
106 #endif
107 
108 #ifdef CFG_CORE_RESERVED_SHM
109 register_phys_mem(MEM_AREA_NSEC_SHM, TEE_SHMEM_START, TEE_SHMEM_SIZE);
110 #endif
111 static unsigned int mmu_spinlock;
112 
113 static uint32_t mmu_lock(void)
114 {
115 	return cpu_spin_lock_xsave(&mmu_spinlock);
116 }
117 
118 static void mmu_unlock(uint32_t exceptions)
119 {
120 	cpu_spin_unlock_xrestore(&mmu_spinlock, exceptions);
121 }
122 
123 void core_mmu_get_secure_memory(paddr_t *base, paddr_size_t *size)
124 {
125 	/*
126 	 * The first range is always used to cover OP-TEE core memory, but
127 	 * depending on configuration it may cover more than that.
128 	 */
129 	*base = secure_only[0].paddr;
130 	*size = secure_only[0].size;
131 }
132 
133 void core_mmu_set_secure_memory(paddr_t base, size_t size)
134 {
135 #ifdef CFG_CORE_PHYS_RELOCATABLE
136 	static_assert(ARRAY_SIZE(secure_only) == 1);
137 #endif
138 	runtime_assert(IS_ENABLED(CFG_CORE_PHYS_RELOCATABLE));
139 	assert(!secure_only[0].size);
140 	assert(base && size);
141 
142 	DMSG("Physical secure memory base %#"PRIxPA" size %#zx", base, size);
143 	secure_only[0].paddr = base;
144 	secure_only[0].size = size;
145 }
146 
147 void core_mmu_get_ta_range(paddr_t *base, size_t *size)
148 {
149 	paddr_t b = 0;
150 	size_t s = 0;
151 
152 	static_assert(!(TEE_RAM_VA_SIZE % SMALL_PAGE_SIZE));
153 #ifdef TA_RAM_START
154 	b = TA_RAM_START;
155 	s = TA_RAM_SIZE;
156 #else
157 	static_assert(ARRAY_SIZE(secure_only) <= 2);
158 	if (ARRAY_SIZE(secure_only) == 1) {
159 		vaddr_t load_offs = 0;
160 
161 		assert(core_mmu_tee_load_pa >= secure_only[0].paddr);
162 		load_offs = core_mmu_tee_load_pa - secure_only[0].paddr;
163 
164 		assert(secure_only[0].size >
165 		       load_offs + TEE_RAM_VA_SIZE + TEE_SDP_TEST_MEM_SIZE);
166 		b = secure_only[0].paddr + load_offs + TEE_RAM_VA_SIZE;
167 		s = secure_only[0].size - load_offs - TEE_RAM_VA_SIZE -
168 		    TEE_SDP_TEST_MEM_SIZE;
169 	} else {
170 		assert(secure_only[1].size > TEE_SDP_TEST_MEM_SIZE);
171 		b = secure_only[1].paddr;
172 		s = secure_only[1].size - TEE_SDP_TEST_MEM_SIZE;
173 	}
174 #endif
175 	if (base)
176 		*base = b;
177 	if (size)
178 		*size = s;
179 }
180 
181 static struct tee_mmap_region *get_memory_map(void)
182 {
183 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
184 		struct tee_mmap_region *map = virt_get_memory_map();
185 
186 		if (map)
187 			return map;
188 	}
189 
190 	return static_memory_map;
191 }
192 
193 static bool _pbuf_intersects(struct memaccess_area *a, size_t alen,
194 			     paddr_t pa, size_t size)
195 {
196 	size_t n;
197 
198 	for (n = 0; n < alen; n++)
199 		if (core_is_buffer_intersect(pa, size, a[n].paddr, a[n].size))
200 			return true;
201 	return false;
202 }
203 
204 #define pbuf_intersects(a, pa, size) \
205 	_pbuf_intersects((a), ARRAY_SIZE(a), (pa), (size))
206 
207 static bool _pbuf_is_inside(struct memaccess_area *a, size_t alen,
208 			    paddr_t pa, size_t size)
209 {
210 	size_t n;
211 
212 	for (n = 0; n < alen; n++)
213 		if (core_is_buffer_inside(pa, size, a[n].paddr, a[n].size))
214 			return true;
215 	return false;
216 }
217 
218 #define pbuf_is_inside(a, pa, size) \
219 	_pbuf_is_inside((a), ARRAY_SIZE(a), (pa), (size))
220 
221 static bool pa_is_in_map(struct tee_mmap_region *map, paddr_t pa, size_t len)
222 {
223 	paddr_t end_pa = 0;
224 
225 	if (!map)
226 		return false;
227 
228 	if (SUB_OVERFLOW(len, 1, &end_pa) || ADD_OVERFLOW(pa, end_pa, &end_pa))
229 		return false;
230 
231 	return (pa >= map->pa && end_pa <= map->pa + map->size - 1);
232 }
233 
234 static bool va_is_in_map(struct tee_mmap_region *map, vaddr_t va)
235 {
236 	if (!map)
237 		return false;
238 	return (va >= map->va && va <= (map->va + map->size - 1));
239 }
240 
241 /* check if target buffer fits in a core default map area */
242 static bool pbuf_inside_map_area(unsigned long p, size_t l,
243 				 struct tee_mmap_region *map)
244 {
245 	return core_is_buffer_inside(p, l, map->pa, map->size);
246 }
247 
248 static struct tee_mmap_region *find_map_by_type(enum teecore_memtypes type)
249 {
250 	struct tee_mmap_region *map;
251 
252 	for (map = get_memory_map(); !core_mmap_is_end_of_table(map); map++)
253 		if (map->type == type)
254 			return map;
255 	return NULL;
256 }
257 
258 static struct tee_mmap_region *
259 find_map_by_type_and_pa(enum teecore_memtypes type, paddr_t pa, size_t len)
260 {
261 	struct tee_mmap_region *map;
262 
263 	for (map = get_memory_map(); !core_mmap_is_end_of_table(map); map++) {
264 		if (map->type != type)
265 			continue;
266 		if (pa_is_in_map(map, pa, len))
267 			return map;
268 	}
269 	return NULL;
270 }
271 
272 static struct tee_mmap_region *find_map_by_va(void *va)
273 {
274 	struct tee_mmap_region *map = get_memory_map();
275 	unsigned long a = (unsigned long)va;
276 
277 	while (!core_mmap_is_end_of_table(map)) {
278 		if (a >= map->va && a <= (map->va - 1 + map->size))
279 			return map;
280 		map++;
281 	}
282 	return NULL;
283 }
284 
285 static struct tee_mmap_region *find_map_by_pa(unsigned long pa)
286 {
287 	struct tee_mmap_region *map = get_memory_map();
288 
289 	while (!core_mmap_is_end_of_table(map)) {
290 		/* Skip unmapped regions */
291 		if ((map->attr & TEE_MATTR_VALID_BLOCK) &&
292 		    pa >= map->pa && pa <= (map->pa + map->size - 1))
293 			return map;
294 		map++;
295 	}
296 	return NULL;
297 }
298 
299 #if defined(CFG_SECURE_DATA_PATH)
300 static bool dtb_get_sdp_region(void)
301 {
302 	void *fdt = NULL;
303 	int node = 0;
304 	int tmp_node = 0;
305 	paddr_t tmp_addr = 0;
306 	size_t tmp_size = 0;
307 
308 	if (!IS_ENABLED(CFG_EMBED_DTB))
309 		return false;
310 
311 	fdt = get_embedded_dt();
312 	if (!fdt)
313 		panic("No DTB found");
314 
315 	node = fdt_node_offset_by_compatible(fdt, -1, tz_sdp_match);
316 	if (node < 0) {
317 		DMSG("No %s compatible node found", tz_sdp_match);
318 		return false;
319 	}
320 	tmp_node = node;
321 	while (tmp_node >= 0) {
322 		tmp_node = fdt_node_offset_by_compatible(fdt, tmp_node,
323 							 tz_sdp_match);
324 		if (tmp_node >= 0)
325 			DMSG("Ignore SDP pool node %s, supports only 1 node",
326 			     fdt_get_name(fdt, tmp_node, NULL));
327 	}
328 
329 	tmp_addr = fdt_reg_base_address(fdt, node);
330 	if (tmp_addr == DT_INFO_INVALID_REG) {
331 		EMSG("%s: Unable to get base addr from DT", tz_sdp_match);
332 		return false;
333 	}
334 
335 	tmp_size = fdt_reg_size(fdt, node);
336 	if (tmp_size == DT_INFO_INVALID_REG_SIZE) {
337 		EMSG("%s: Unable to get size of base addr from DT",
338 		     tz_sdp_match);
339 		return false;
340 	}
341 
342 	sec_sdp.paddr = tmp_addr;
343 	sec_sdp.size = tmp_size;
344 
345 	return true;
346 }
347 #endif
348 
349 #if defined(CFG_CORE_DYN_SHM) || defined(CFG_SECURE_DATA_PATH)
350 static bool pbuf_is_special_mem(paddr_t pbuf, size_t len,
351 				const struct core_mmu_phys_mem *start,
352 				const struct core_mmu_phys_mem *end)
353 {
354 	const struct core_mmu_phys_mem *mem;
355 
356 	for (mem = start; mem < end; mem++) {
357 		if (core_is_buffer_inside(pbuf, len, mem->addr, mem->size))
358 			return true;
359 	}
360 
361 	return false;
362 }
363 #endif
364 
365 #ifdef CFG_CORE_DYN_SHM
366 static void carve_out_phys_mem(struct core_mmu_phys_mem **mem, size_t *nelems,
367 			       paddr_t pa, size_t size)
368 {
369 	struct core_mmu_phys_mem *m = *mem;
370 	size_t n = 0;
371 
372 	while (true) {
373 		if (n >= *nelems) {
374 			DMSG("No need to carve out %#" PRIxPA " size %#zx",
375 			     pa, size);
376 			return;
377 		}
378 		if (core_is_buffer_inside(pa, size, m[n].addr, m[n].size))
379 			break;
380 		if (!core_is_buffer_outside(pa, size, m[n].addr, m[n].size))
381 			panic();
382 		n++;
383 	}
384 
385 	if (pa == m[n].addr && size == m[n].size) {
386 		/* Remove this entry */
387 		(*nelems)--;
388 		memmove(m + n, m + n + 1, sizeof(*m) * (*nelems - n));
389 		m = nex_realloc(m, sizeof(*m) * *nelems);
390 		if (!m)
391 			panic();
392 		*mem = m;
393 	} else if (pa == m[n].addr) {
394 		m[n].addr += size;
395 		m[n].size -= size;
396 	} else if ((pa + size) == (m[n].addr + m[n].size)) {
397 		m[n].size -= size;
398 	} else {
399 		/* Need to split the memory entry */
400 		m = nex_realloc(m, sizeof(*m) * (*nelems + 1));
401 		if (!m)
402 			panic();
403 		*mem = m;
404 		memmove(m + n + 1, m + n, sizeof(*m) * (*nelems - n));
405 		(*nelems)++;
406 		m[n].size = pa - m[n].addr;
407 		m[n + 1].size -= size + m[n].size;
408 		m[n + 1].addr = pa + size;
409 	}
410 }
411 
412 static void check_phys_mem_is_outside(struct core_mmu_phys_mem *start,
413 				      size_t nelems,
414 				      struct tee_mmap_region *map)
415 {
416 	size_t n;
417 
418 	for (n = 0; n < nelems; n++) {
419 		if (!core_is_buffer_outside(start[n].addr, start[n].size,
420 					    map->pa, map->size)) {
421 			EMSG("Non-sec mem (%#" PRIxPA ":%#" PRIxPASZ
422 			     ") overlaps map (type %d %#" PRIxPA ":%#zx)",
423 			     start[n].addr, start[n].size,
424 			     map->type, map->pa, map->size);
425 			panic();
426 		}
427 	}
428 }
429 
430 static const struct core_mmu_phys_mem *discovered_nsec_ddr_start __nex_bss;
431 static size_t discovered_nsec_ddr_nelems __nex_bss;
432 
433 static int cmp_pmem_by_addr(const void *a, const void *b)
434 {
435 	const struct core_mmu_phys_mem *pmem_a = a;
436 	const struct core_mmu_phys_mem *pmem_b = b;
437 
438 	return CMP_TRILEAN(pmem_a->addr, pmem_b->addr);
439 }
440 
441 void core_mmu_set_discovered_nsec_ddr(struct core_mmu_phys_mem *start,
442 				      size_t nelems)
443 {
444 	struct core_mmu_phys_mem *m = start;
445 	size_t num_elems = nelems;
446 	struct tee_mmap_region *map = static_memory_map;
447 	const struct core_mmu_phys_mem __maybe_unused *pmem;
448 	size_t n = 0;
449 
450 	assert(!discovered_nsec_ddr_start);
451 	assert(m && num_elems);
452 
453 	qsort(m, num_elems, sizeof(*m), cmp_pmem_by_addr);
454 
455 	/*
456 	 * Non-secure shared memory and also secure data
457 	 * path memory are supposed to reside inside
458 	 * non-secure memory. Since NSEC_SHM and SDP_MEM
459 	 * are used for a specific purpose make holes for
460 	 * those memory in the normal non-secure memory.
461 	 *
462 	 * This has to be done since for instance QEMU
463 	 * isn't aware of which memory range in the
464 	 * non-secure memory is used for NSEC_SHM.
465 	 */
466 
467 #ifdef CFG_SECURE_DATA_PATH
468 	if (dtb_get_sdp_region())
469 		carve_out_phys_mem(&m, &num_elems, sec_sdp.paddr, sec_sdp.size);
470 
471 	for (pmem = phys_sdp_mem_begin; pmem < phys_sdp_mem_end; pmem++)
472 		carve_out_phys_mem(&m, &num_elems, pmem->addr, pmem->size);
473 #endif
474 
475 	for (n = 0; n < ARRAY_SIZE(secure_only); n++)
476 		carve_out_phys_mem(&m, &num_elems, secure_only[n].paddr,
477 				   secure_only[n].size);
478 
479 	for (map = static_memory_map; !core_mmap_is_end_of_table(map); map++) {
480 		switch (map->type) {
481 		case MEM_AREA_NSEC_SHM:
482 			carve_out_phys_mem(&m, &num_elems, map->pa, map->size);
483 			break;
484 		case MEM_AREA_EXT_DT:
485 		case MEM_AREA_MANIFEST_DT:
486 		case MEM_AREA_RAM_NSEC:
487 		case MEM_AREA_RES_VASPACE:
488 		case MEM_AREA_SHM_VASPACE:
489 		case MEM_AREA_TS_VASPACE:
490 		case MEM_AREA_PAGER_VASPACE:
491 			break;
492 		default:
493 			check_phys_mem_is_outside(m, num_elems, map);
494 		}
495 	}
496 
497 	discovered_nsec_ddr_start = m;
498 	discovered_nsec_ddr_nelems = num_elems;
499 
500 	if (!core_mmu_check_end_pa(m[num_elems - 1].addr,
501 				   m[num_elems - 1].size))
502 		panic();
503 }
504 
505 static bool get_discovered_nsec_ddr(const struct core_mmu_phys_mem **start,
506 				    const struct core_mmu_phys_mem **end)
507 {
508 	if (!discovered_nsec_ddr_start)
509 		return false;
510 
511 	*start = discovered_nsec_ddr_start;
512 	*end = discovered_nsec_ddr_start + discovered_nsec_ddr_nelems;
513 
514 	return true;
515 }
516 
517 static bool pbuf_is_nsec_ddr(paddr_t pbuf, size_t len)
518 {
519 	const struct core_mmu_phys_mem *start;
520 	const struct core_mmu_phys_mem *end;
521 
522 	if (!get_discovered_nsec_ddr(&start, &end))
523 		return false;
524 
525 	return pbuf_is_special_mem(pbuf, len, start, end);
526 }
527 
528 bool core_mmu_nsec_ddr_is_defined(void)
529 {
530 	const struct core_mmu_phys_mem *start;
531 	const struct core_mmu_phys_mem *end;
532 
533 	if (!get_discovered_nsec_ddr(&start, &end))
534 		return false;
535 
536 	return start != end;
537 }
538 #else
539 static bool pbuf_is_nsec_ddr(paddr_t pbuf __unused, size_t len __unused)
540 {
541 	return false;
542 }
543 #endif /*CFG_CORE_DYN_SHM*/
544 
545 #define MSG_MEM_INSTERSECT(pa1, sz1, pa2, sz2) \
546 	EMSG("[%" PRIxPA " %" PRIx64 "] intersects [%" PRIxPA " %" PRIx64 "]", \
547 			pa1, (uint64_t)pa1 + (sz1), pa2, (uint64_t)pa2 + (sz2))
548 
549 #ifdef CFG_SECURE_DATA_PATH
550 static bool pbuf_is_sdp_mem(paddr_t pbuf, size_t len)
551 {
552 	bool is_sdp_mem = false;
553 
554 	if (sec_sdp.size)
555 		is_sdp_mem = core_is_buffer_inside(pbuf, len, sec_sdp.paddr,
556 						   sec_sdp.size);
557 
558 	if (!is_sdp_mem)
559 		is_sdp_mem = pbuf_is_special_mem(pbuf, len, phys_sdp_mem_begin,
560 						 phys_sdp_mem_end);
561 
562 	return is_sdp_mem;
563 }
564 
565 static struct mobj *core_sdp_mem_alloc_mobj(paddr_t pa, size_t size)
566 {
567 	struct mobj *mobj = mobj_phys_alloc(pa, size, TEE_MATTR_MEM_TYPE_CACHED,
568 					    CORE_MEM_SDP_MEM);
569 
570 	if (!mobj)
571 		panic("can't create SDP physical memory object");
572 
573 	return mobj;
574 }
575 
576 struct mobj **core_sdp_mem_create_mobjs(void)
577 {
578 	const struct core_mmu_phys_mem *mem = NULL;
579 	struct mobj **mobj_base = NULL;
580 	struct mobj **mobj = NULL;
581 	int cnt = phys_sdp_mem_end - phys_sdp_mem_begin;
582 
583 	if (sec_sdp.size)
584 		cnt++;
585 
586 	/* SDP mobjs table must end with a NULL entry */
587 	mobj_base = calloc(cnt + 1, sizeof(struct mobj *));
588 	if (!mobj_base)
589 		panic("Out of memory");
590 
591 	mobj = mobj_base;
592 
593 	for (mem = phys_sdp_mem_begin; mem < phys_sdp_mem_end; mem++, mobj++)
594 		*mobj = core_sdp_mem_alloc_mobj(mem->addr, mem->size);
595 
596 	if (sec_sdp.size)
597 		*mobj = core_sdp_mem_alloc_mobj(sec_sdp.paddr, sec_sdp.size);
598 
599 	return mobj_base;
600 }
601 
602 #else /* CFG_SECURE_DATA_PATH */
603 static bool pbuf_is_sdp_mem(paddr_t pbuf __unused, size_t len __unused)
604 {
605 	return false;
606 }
607 
608 #endif /* CFG_SECURE_DATA_PATH */
609 
610 /* Check special memories comply with registered memories */
611 static void verify_special_mem_areas(struct tee_mmap_region *mem_map,
612 				     const struct core_mmu_phys_mem *start,
613 				     const struct core_mmu_phys_mem *end,
614 				     const char *area_name __maybe_unused)
615 {
616 	const struct core_mmu_phys_mem *mem;
617 	const struct core_mmu_phys_mem *mem2;
618 	struct tee_mmap_region *mmap;
619 
620 	if (start == end) {
621 		DMSG("No %s memory area defined", area_name);
622 		return;
623 	}
624 
625 	for (mem = start; mem < end; mem++)
626 		DMSG("%s memory [%" PRIxPA " %" PRIx64 "]",
627 		     area_name, mem->addr, (uint64_t)mem->addr + mem->size);
628 
629 	/* Check memories do not intersect each other */
630 	for (mem = start; mem + 1 < end; mem++) {
631 		for (mem2 = mem + 1; mem2 < end; mem2++) {
632 			if (core_is_buffer_intersect(mem2->addr, mem2->size,
633 						     mem->addr, mem->size)) {
634 				MSG_MEM_INSTERSECT(mem2->addr, mem2->size,
635 						   mem->addr, mem->size);
636 				panic("Special memory intersection");
637 			}
638 		}
639 	}
640 
641 	/*
642 	 * Check memories do not intersect any mapped memory.
643 	 * This is called before reserved VA space is loaded in mem_map.
644 	 */
645 	for (mem = start; mem < end; mem++) {
646 		for (mmap = mem_map; mmap->type != MEM_AREA_END; mmap++) {
647 			if (core_is_buffer_intersect(mem->addr, mem->size,
648 						     mmap->pa, mmap->size)) {
649 				MSG_MEM_INSTERSECT(mem->addr, mem->size,
650 						   mmap->pa, mmap->size);
651 				panic("Special memory intersection");
652 			}
653 		}
654 	}
655 }
656 
657 static void add_phys_mem(struct tee_mmap_region *memory_map, size_t num_elems,
658 			 const char *mem_name __maybe_unused,
659 			 enum teecore_memtypes mem_type,
660 			 paddr_t mem_addr, paddr_size_t mem_size, size_t *last)
661 {
662 	size_t n = 0;
663 	paddr_t pa;
664 	paddr_size_t size;
665 
666 	if (!mem_size)	/* Discard null size entries */
667 		return;
668 	/*
669 	 * If some ranges of memory of the same type do overlap
670 	 * each others they are coalesced into one entry. To help this
671 	 * added entries are sorted by increasing physical.
672 	 *
673 	 * Note that it's valid to have the same physical memory as several
674 	 * different memory types, for instance the same device memory
675 	 * mapped as both secure and non-secure. This will probably not
676 	 * happen often in practice.
677 	 */
678 	DMSG("%s type %s 0x%08" PRIxPA " size 0x%08" PRIxPASZ,
679 	     mem_name, teecore_memtype_name(mem_type), mem_addr, mem_size);
680 	while (true) {
681 		if (n >= (num_elems - 1)) {
682 			EMSG("Out of entries (%zu) in memory_map", num_elems);
683 			panic();
684 		}
685 		if (n == *last)
686 			break;
687 		pa = memory_map[n].pa;
688 		size = memory_map[n].size;
689 		if (mem_type == memory_map[n].type &&
690 		    ((pa <= (mem_addr + (mem_size - 1))) &&
691 		    (mem_addr <= (pa + (size - 1))))) {
692 			DMSG("Physical mem map overlaps 0x%" PRIxPA, mem_addr);
693 			memory_map[n].pa = MIN(pa, mem_addr);
694 			memory_map[n].size = MAX(size, mem_size) +
695 					     (pa - memory_map[n].pa);
696 			return;
697 		}
698 		if (mem_type < memory_map[n].type ||
699 		    (mem_type == memory_map[n].type && mem_addr < pa))
700 			break; /* found the spot where to insert this memory */
701 		n++;
702 	}
703 
704 	memmove(memory_map + n + 1, memory_map + n,
705 		sizeof(struct tee_mmap_region) * (*last - n));
706 	(*last)++;
707 	memset(memory_map + n, 0, sizeof(memory_map[0]));
708 	memory_map[n].type = mem_type;
709 	memory_map[n].pa = mem_addr;
710 	memory_map[n].size = mem_size;
711 }
712 
713 static void add_va_space(struct tee_mmap_region *memory_map, size_t num_elems,
714 			 enum teecore_memtypes type, size_t size, size_t *last)
715 {
716 	size_t n = 0;
717 
718 	DMSG("type %s size 0x%08zx", teecore_memtype_name(type), size);
719 	while (true) {
720 		if (n >= (num_elems - 1)) {
721 			EMSG("Out of entries (%zu) in memory_map", num_elems);
722 			panic();
723 		}
724 		if (n == *last)
725 			break;
726 		if (type < memory_map[n].type)
727 			break;
728 		n++;
729 	}
730 
731 	memmove(memory_map + n + 1, memory_map + n,
732 		sizeof(struct tee_mmap_region) * (*last - n));
733 	(*last)++;
734 	memset(memory_map + n, 0, sizeof(memory_map[0]));
735 	memory_map[n].type = type;
736 	memory_map[n].size = size;
737 }
738 
739 uint32_t core_mmu_type_to_attr(enum teecore_memtypes t)
740 {
741 	const uint32_t attr = TEE_MATTR_VALID_BLOCK;
742 	const uint32_t tagged = TEE_MATTR_MEM_TYPE_TAGGED <<
743 				TEE_MATTR_MEM_TYPE_SHIFT;
744 	const uint32_t cached = TEE_MATTR_MEM_TYPE_CACHED <<
745 				TEE_MATTR_MEM_TYPE_SHIFT;
746 	const uint32_t noncache = TEE_MATTR_MEM_TYPE_DEV <<
747 				  TEE_MATTR_MEM_TYPE_SHIFT;
748 
749 	switch (t) {
750 	case MEM_AREA_TEE_RAM:
751 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRWX | tagged;
752 	case MEM_AREA_TEE_RAM_RX:
753 	case MEM_AREA_INIT_RAM_RX:
754 	case MEM_AREA_IDENTITY_MAP_RX:
755 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRX | tagged;
756 	case MEM_AREA_TEE_RAM_RO:
757 	case MEM_AREA_INIT_RAM_RO:
758 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PR | tagged;
759 	case MEM_AREA_TEE_RAM_RW:
760 	case MEM_AREA_NEX_RAM_RO: /* This has to be r/w during init runtime */
761 	case MEM_AREA_NEX_RAM_RW:
762 	case MEM_AREA_TEE_ASAN:
763 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | tagged;
764 	case MEM_AREA_TEE_COHERENT:
765 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRWX | noncache;
766 	case MEM_AREA_TA_RAM:
767 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | tagged;
768 	case MEM_AREA_NSEC_SHM:
769 	case MEM_AREA_NEX_NSEC_SHM:
770 		return attr | TEE_MATTR_PRW | cached;
771 	case MEM_AREA_MANIFEST_DT:
772 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PR | cached;
773 	case MEM_AREA_TRANSFER_LIST:
774 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | cached;
775 	case MEM_AREA_EXT_DT:
776 		/*
777 		 * If CFG_MAP_EXT_DT_SECURE is enabled map the external device
778 		 * tree as secure non-cached memory, otherwise, fall back to
779 		 * non-secure mapping.
780 		 */
781 		if (IS_ENABLED(CFG_MAP_EXT_DT_SECURE))
782 			return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW |
783 			       noncache;
784 		fallthrough;
785 	case MEM_AREA_IO_NSEC:
786 		return attr | TEE_MATTR_PRW | noncache;
787 	case MEM_AREA_IO_SEC:
788 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | noncache;
789 	case MEM_AREA_RAM_NSEC:
790 		return attr | TEE_MATTR_PRW | cached;
791 	case MEM_AREA_RAM_SEC:
792 	case MEM_AREA_SEC_RAM_OVERALL:
793 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | cached;
794 	case MEM_AREA_ROM_SEC:
795 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PR | cached;
796 	case MEM_AREA_RES_VASPACE:
797 	case MEM_AREA_SHM_VASPACE:
798 		return 0;
799 	case MEM_AREA_PAGER_VASPACE:
800 		return TEE_MATTR_SECURE;
801 	default:
802 		panic("invalid type");
803 	}
804 }
805 
806 static bool __maybe_unused map_is_tee_ram(const struct tee_mmap_region *mm)
807 {
808 	switch (mm->type) {
809 	case MEM_AREA_TEE_RAM:
810 	case MEM_AREA_TEE_RAM_RX:
811 	case MEM_AREA_TEE_RAM_RO:
812 	case MEM_AREA_TEE_RAM_RW:
813 	case MEM_AREA_INIT_RAM_RX:
814 	case MEM_AREA_INIT_RAM_RO:
815 	case MEM_AREA_NEX_RAM_RW:
816 	case MEM_AREA_NEX_RAM_RO:
817 	case MEM_AREA_TEE_ASAN:
818 		return true;
819 	default:
820 		return false;
821 	}
822 }
823 
824 static bool __maybe_unused map_is_secure(const struct tee_mmap_region *mm)
825 {
826 	return !!(core_mmu_type_to_attr(mm->type) & TEE_MATTR_SECURE);
827 }
828 
829 static bool __maybe_unused map_is_pgdir(const struct tee_mmap_region *mm)
830 {
831 	return mm->region_size == CORE_MMU_PGDIR_SIZE;
832 }
833 
834 static int cmp_mmap_by_lower_va(const void *a, const void *b)
835 {
836 	const struct tee_mmap_region *mm_a = a;
837 	const struct tee_mmap_region *mm_b = b;
838 
839 	return CMP_TRILEAN(mm_a->va, mm_b->va);
840 }
841 
842 static void dump_mmap_table(struct tee_mmap_region *memory_map)
843 {
844 	struct tee_mmap_region *map;
845 
846 	for (map = memory_map; !core_mmap_is_end_of_table(map); map++) {
847 		vaddr_t __maybe_unused vstart;
848 
849 		vstart = map->va + ((vaddr_t)map->pa & (map->region_size - 1));
850 		DMSG("type %-12s va 0x%08" PRIxVA "..0x%08" PRIxVA
851 		     " pa 0x%08" PRIxPA "..0x%08" PRIxPA " size 0x%08zx (%s)",
852 		     teecore_memtype_name(map->type), vstart,
853 		     vstart + map->size - 1, map->pa,
854 		     (paddr_t)(map->pa + map->size - 1), map->size,
855 		     map->region_size == SMALL_PAGE_SIZE ? "smallpg" : "pgdir");
856 	}
857 }
858 
859 #if DEBUG_XLAT_TABLE
860 
861 static void dump_xlat_table(vaddr_t va, unsigned int level)
862 {
863 	struct core_mmu_table_info tbl_info;
864 	unsigned int idx = 0;
865 	paddr_t pa;
866 	uint32_t attr;
867 
868 	core_mmu_find_table(NULL, va, level, &tbl_info);
869 	va = tbl_info.va_base;
870 	for (idx = 0; idx < tbl_info.num_entries; idx++) {
871 		core_mmu_get_entry(&tbl_info, idx, &pa, &attr);
872 		if (attr || level > CORE_MMU_BASE_TABLE_LEVEL) {
873 			const char *security_bit = "";
874 
875 			if (core_mmu_entry_have_security_bit(attr)) {
876 				if (attr & TEE_MATTR_SECURE)
877 					security_bit = "S";
878 				else
879 					security_bit = "NS";
880 			}
881 
882 			if (attr & TEE_MATTR_TABLE) {
883 				DMSG_RAW("%*s [LVL%d] VA:0x%010" PRIxVA
884 					" TBL:0x%010" PRIxPA " %s",
885 					level * 2, "", level, va, pa,
886 					security_bit);
887 				dump_xlat_table(va, level + 1);
888 			} else if (attr) {
889 				DMSG_RAW("%*s [LVL%d] VA:0x%010" PRIxVA
890 					" PA:0x%010" PRIxPA " %s-%s-%s-%s",
891 					level * 2, "", level, va, pa,
892 					mattr_is_cached(attr) ? "MEM" :
893 					"DEV",
894 					attr & TEE_MATTR_PW ? "RW" : "RO",
895 					attr & TEE_MATTR_PX ? "X " : "XN",
896 					security_bit);
897 			} else {
898 				DMSG_RAW("%*s [LVL%d] VA:0x%010" PRIxVA
899 					    " INVALID\n",
900 					    level * 2, "", level, va);
901 			}
902 		}
903 		va += BIT64(tbl_info.shift);
904 	}
905 }
906 
907 #else
908 
909 static void dump_xlat_table(vaddr_t va __unused, int level __unused)
910 {
911 }
912 
913 #endif
914 
915 /*
916  * Reserves virtual memory space for pager usage.
917  *
918  * From the start of the first memory used by the link script +
919  * TEE_RAM_VA_SIZE should be covered, either with a direct mapping or empty
920  * mapping for pager usage. This adds translation tables as needed for the
921  * pager to operate.
922  */
923 static void add_pager_vaspace(struct tee_mmap_region *mmap, size_t num_elems,
924 			      size_t *last)
925 {
926 	paddr_t begin = 0;
927 	paddr_t end = 0;
928 	size_t size = 0;
929 	size_t pos = 0;
930 	size_t n = 0;
931 
932 	if (*last >= (num_elems - 1)) {
933 		EMSG("Out of entries (%zu) in memory map", num_elems);
934 		panic();
935 	}
936 
937 	for (n = 0; !core_mmap_is_end_of_table(mmap + n); n++) {
938 		if (map_is_tee_ram(mmap + n)) {
939 			if (!begin)
940 				begin = mmap[n].pa;
941 			pos = n + 1;
942 		}
943 	}
944 
945 	end = mmap[pos - 1].pa + mmap[pos - 1].size;
946 	assert(end - begin < TEE_RAM_VA_SIZE);
947 	size = TEE_RAM_VA_SIZE - (end - begin);
948 
949 	assert(pos <= *last);
950 	memmove(mmap + pos + 1, mmap + pos,
951 		sizeof(struct tee_mmap_region) * (*last - pos));
952 	(*last)++;
953 	memset(mmap + pos, 0, sizeof(mmap[0]));
954 	mmap[pos].type = MEM_AREA_PAGER_VASPACE;
955 	mmap[pos].va = 0;
956 	mmap[pos].size = size;
957 	mmap[pos].region_size = SMALL_PAGE_SIZE;
958 	mmap[pos].attr = core_mmu_type_to_attr(MEM_AREA_PAGER_VASPACE);
959 }
960 
961 static void check_sec_nsec_mem_config(void)
962 {
963 	size_t n = 0;
964 
965 	for (n = 0; n < ARRAY_SIZE(secure_only); n++) {
966 		if (pbuf_intersects(nsec_shared, secure_only[n].paddr,
967 				    secure_only[n].size))
968 			panic("Invalid memory access config: sec/nsec");
969 	}
970 }
971 
972 static void collect_device_mem_ranges(struct tee_mmap_region *memory_map,
973 				      size_t num_elems, size_t *last)
974 {
975 	const char *compatible = "arm,ffa-manifest-device-regions";
976 	void *fdt = get_manifest_dt();
977 	const char *name = NULL;
978 	uint64_t page_count = 0;
979 	uint64_t base = 0;
980 	int subnode = 0;
981 	int node = 0;
982 
983 	assert(fdt);
984 
985 	node = fdt_node_offset_by_compatible(fdt, 0, compatible);
986 	if (node < 0)
987 		return;
988 
989 	fdt_for_each_subnode(subnode, fdt, node) {
990 		name = fdt_get_name(fdt, subnode, NULL);
991 		if (!name)
992 			continue;
993 
994 		if (dt_getprop_as_number(fdt, subnode, "base-address",
995 					 &base)) {
996 			EMSG("Mandatory field is missing: base-address");
997 			continue;
998 		}
999 
1000 		if (base & SMALL_PAGE_MASK) {
1001 			EMSG("base-address is not page aligned");
1002 			continue;
1003 		}
1004 
1005 		if (dt_getprop_as_number(fdt, subnode, "pages-count",
1006 					 &page_count)) {
1007 			EMSG("Mandatory field is missing: pages-count");
1008 			continue;
1009 		}
1010 
1011 		add_phys_mem(memory_map, num_elems, name, MEM_AREA_IO_SEC,
1012 			     base, page_count * SMALL_PAGE_SIZE, last);
1013 	}
1014 }
1015 
1016 static size_t collect_mem_ranges(struct tee_mmap_region *memory_map,
1017 				 size_t num_elems)
1018 {
1019 	const struct core_mmu_phys_mem *mem = NULL;
1020 	vaddr_t ram_start = secure_only[0].paddr;
1021 	size_t last = 0;
1022 
1023 
1024 #define ADD_PHYS_MEM(_type, _addr, _size) \
1025 		add_phys_mem(memory_map, num_elems, #_addr, (_type), \
1026 			     (_addr), (_size),  &last)
1027 
1028 	if (IS_ENABLED(CFG_CORE_RWDATA_NOEXEC)) {
1029 		ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RO, ram_start,
1030 			     VCORE_UNPG_RX_PA - ram_start);
1031 		ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RX, VCORE_UNPG_RX_PA,
1032 			     VCORE_UNPG_RX_SZ);
1033 		ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RO, VCORE_UNPG_RO_PA,
1034 			     VCORE_UNPG_RO_SZ);
1035 
1036 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1037 			ADD_PHYS_MEM(MEM_AREA_NEX_RAM_RO, VCORE_UNPG_RW_PA,
1038 				     VCORE_UNPG_RW_SZ);
1039 			ADD_PHYS_MEM(MEM_AREA_NEX_RAM_RW, VCORE_NEX_RW_PA,
1040 				     VCORE_NEX_RW_SZ);
1041 		} else {
1042 			ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RW, VCORE_UNPG_RW_PA,
1043 				     VCORE_UNPG_RW_SZ);
1044 		}
1045 
1046 		if (IS_ENABLED(CFG_WITH_PAGER)) {
1047 			ADD_PHYS_MEM(MEM_AREA_INIT_RAM_RX, VCORE_INIT_RX_PA,
1048 				     VCORE_INIT_RX_SZ);
1049 			ADD_PHYS_MEM(MEM_AREA_INIT_RAM_RO, VCORE_INIT_RO_PA,
1050 				     VCORE_INIT_RO_SZ);
1051 		}
1052 	} else {
1053 		ADD_PHYS_MEM(MEM_AREA_TEE_RAM, TEE_RAM_START, TEE_RAM_PH_SIZE);
1054 	}
1055 
1056 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1057 		ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, TRUSTED_DRAM_BASE,
1058 			     TRUSTED_DRAM_SIZE);
1059 	} else {
1060 		/*
1061 		 * Every guest will have own TA RAM if virtualization
1062 		 * support is enabled.
1063 		 */
1064 		paddr_t ta_base = 0;
1065 		size_t ta_size = 0;
1066 
1067 		core_mmu_get_ta_range(&ta_base, &ta_size);
1068 		ADD_PHYS_MEM(MEM_AREA_TA_RAM, ta_base, ta_size);
1069 	}
1070 
1071 	if (IS_ENABLED(CFG_CORE_SANITIZE_KADDRESS) &&
1072 	    IS_ENABLED(CFG_WITH_PAGER)) {
1073 		/*
1074 		 * Asan ram is part of MEM_AREA_TEE_RAM_RW when pager is
1075 		 * disabled.
1076 		 */
1077 		ADD_PHYS_MEM(MEM_AREA_TEE_ASAN, ASAN_MAP_PA, ASAN_MAP_SZ);
1078 	}
1079 
1080 #undef ADD_PHYS_MEM
1081 
1082 	/* Collect device memory info from SP manifest */
1083 	if (IS_ENABLED(CFG_CORE_SEL2_SPMC))
1084 		collect_device_mem_ranges(memory_map, num_elems, &last);
1085 
1086 	for (mem = phys_mem_map_begin; mem < phys_mem_map_end; mem++) {
1087 		/* Only unmapped virtual range may have a null phys addr */
1088 		assert(mem->addr || !core_mmu_type_to_attr(mem->type));
1089 
1090 		add_phys_mem(memory_map, num_elems, mem->name, mem->type,
1091 			     mem->addr, mem->size, &last);
1092 	}
1093 
1094 	if (IS_ENABLED(CFG_SECURE_DATA_PATH))
1095 		verify_special_mem_areas(memory_map, phys_sdp_mem_begin,
1096 					 phys_sdp_mem_end, "SDP");
1097 
1098 	add_va_space(memory_map, num_elems, MEM_AREA_RES_VASPACE,
1099 		     CFG_RESERVED_VASPACE_SIZE, &last);
1100 
1101 	add_va_space(memory_map, num_elems, MEM_AREA_SHM_VASPACE,
1102 		     SHM_VASPACE_SIZE, &last);
1103 
1104 	memory_map[last].type = MEM_AREA_END;
1105 
1106 	return last;
1107 }
1108 
1109 static void assign_mem_granularity(struct tee_mmap_region *memory_map)
1110 {
1111 	struct tee_mmap_region *map = NULL;
1112 
1113 	/*
1114 	 * Assign region sizes, note that MEM_AREA_TEE_RAM always uses
1115 	 * SMALL_PAGE_SIZE.
1116 	 */
1117 	for (map = memory_map; !core_mmap_is_end_of_table(map); map++) {
1118 		paddr_t mask = map->pa | map->size;
1119 
1120 		if (!(mask & CORE_MMU_PGDIR_MASK))
1121 			map->region_size = CORE_MMU_PGDIR_SIZE;
1122 		else if (!(mask & SMALL_PAGE_MASK))
1123 			map->region_size = SMALL_PAGE_SIZE;
1124 		else
1125 			panic("Impossible memory alignment");
1126 
1127 		if (map_is_tee_ram(map))
1128 			map->region_size = SMALL_PAGE_SIZE;
1129 	}
1130 }
1131 
1132 static bool place_tee_ram_at_top(paddr_t paddr)
1133 {
1134 	return paddr > BIT64(core_mmu_get_va_width()) / 2;
1135 }
1136 
1137 /*
1138  * MMU arch driver shall override this function if it helps
1139  * optimizing the memory footprint of the address translation tables.
1140  */
1141 bool __weak core_mmu_prefer_tee_ram_at_top(paddr_t paddr)
1142 {
1143 	return place_tee_ram_at_top(paddr);
1144 }
1145 
1146 static bool assign_mem_va_dir(vaddr_t tee_ram_va,
1147 			      struct tee_mmap_region *memory_map,
1148 			      bool tee_ram_at_top)
1149 {
1150 	struct tee_mmap_region *map = NULL;
1151 	vaddr_t va = 0;
1152 	bool va_is_secure = true;
1153 
1154 	/*
1155 	 * tee_ram_va might equals 0 when CFG_CORE_ASLR=y.
1156 	 * 0 is by design an invalid va, so return false directly.
1157 	 */
1158 	if (!tee_ram_va)
1159 		return false;
1160 
1161 	/* Clear eventual previous assignments */
1162 	for (map = memory_map; !core_mmap_is_end_of_table(map); map++)
1163 		map->va = 0;
1164 
1165 	/*
1166 	 * TEE RAM regions are always aligned with region_size.
1167 	 *
1168 	 * Note that MEM_AREA_PAGER_VASPACE also counts as TEE RAM here
1169 	 * since it handles virtual memory which covers the part of the ELF
1170 	 * that cannot fit directly into memory.
1171 	 */
1172 	va = tee_ram_va;
1173 	for (map = memory_map; !core_mmap_is_end_of_table(map); map++) {
1174 		if (map_is_tee_ram(map) ||
1175 		    map->type == MEM_AREA_PAGER_VASPACE) {
1176 			assert(!(va & (map->region_size - 1)));
1177 			assert(!(map->size & (map->region_size - 1)));
1178 			map->va = va;
1179 			if (ADD_OVERFLOW(va, map->size, &va))
1180 				return false;
1181 			if (va >= BIT64(core_mmu_get_va_width()))
1182 				return false;
1183 		}
1184 	}
1185 
1186 	if (tee_ram_at_top) {
1187 		/*
1188 		 * Map non-tee ram regions at addresses lower than the tee
1189 		 * ram region.
1190 		 */
1191 		va = tee_ram_va;
1192 		for (map = memory_map; !core_mmap_is_end_of_table(map); map++) {
1193 			map->attr = core_mmu_type_to_attr(map->type);
1194 			if (map->va)
1195 				continue;
1196 
1197 			if (!IS_ENABLED(CFG_WITH_LPAE) &&
1198 			    va_is_secure != map_is_secure(map)) {
1199 				va_is_secure = !va_is_secure;
1200 				va = ROUNDDOWN(va, CORE_MMU_PGDIR_SIZE);
1201 			}
1202 
1203 			if (SUB_OVERFLOW(va, map->size, &va))
1204 				return false;
1205 			va = ROUNDDOWN(va, map->region_size);
1206 			/*
1207 			 * Make sure that va is aligned with pa for
1208 			 * efficient pgdir mapping. Basically pa &
1209 			 * pgdir_mask should be == va & pgdir_mask
1210 			 */
1211 			if (map->size > 2 * CORE_MMU_PGDIR_SIZE) {
1212 				if (SUB_OVERFLOW(va, CORE_MMU_PGDIR_SIZE, &va))
1213 					return false;
1214 				va += (map->pa - va) & CORE_MMU_PGDIR_MASK;
1215 			}
1216 			map->va = va;
1217 		}
1218 	} else {
1219 		/*
1220 		 * Map non-tee ram regions at addresses higher than the tee
1221 		 * ram region.
1222 		 */
1223 		for (map = memory_map; !core_mmap_is_end_of_table(map); map++) {
1224 			map->attr = core_mmu_type_to_attr(map->type);
1225 			if (map->va)
1226 				continue;
1227 
1228 			if (!IS_ENABLED(CFG_WITH_LPAE) &&
1229 			    va_is_secure != map_is_secure(map)) {
1230 				va_is_secure = !va_is_secure;
1231 				if (ROUNDUP_OVERFLOW(va, CORE_MMU_PGDIR_SIZE,
1232 						     &va))
1233 					return false;
1234 			}
1235 
1236 			if (ROUNDUP_OVERFLOW(va, map->region_size, &va))
1237 				return false;
1238 			/*
1239 			 * Make sure that va is aligned with pa for
1240 			 * efficient pgdir mapping. Basically pa &
1241 			 * pgdir_mask should be == va & pgdir_mask
1242 			 */
1243 			if (map->size > 2 * CORE_MMU_PGDIR_SIZE) {
1244 				vaddr_t offs = (map->pa - va) &
1245 					       CORE_MMU_PGDIR_MASK;
1246 
1247 				if (ADD_OVERFLOW(va, offs, &va))
1248 					return false;
1249 			}
1250 
1251 			map->va = va;
1252 			if (ADD_OVERFLOW(va, map->size, &va))
1253 				return false;
1254 			if (va >= BIT64(core_mmu_get_va_width()))
1255 				return false;
1256 		}
1257 	}
1258 
1259 	return true;
1260 }
1261 
1262 static bool assign_mem_va(vaddr_t tee_ram_va,
1263 			  struct tee_mmap_region *memory_map)
1264 {
1265 	bool tee_ram_at_top = place_tee_ram_at_top(tee_ram_va);
1266 
1267 	/*
1268 	 * Check that we're not overlapping with the user VA range.
1269 	 */
1270 	if (IS_ENABLED(CFG_WITH_LPAE)) {
1271 		/*
1272 		 * User VA range is supposed to be defined after these
1273 		 * mappings have been established.
1274 		 */
1275 		assert(!core_mmu_user_va_range_is_defined());
1276 	} else {
1277 		vaddr_t user_va_base = 0;
1278 		size_t user_va_size = 0;
1279 
1280 		assert(core_mmu_user_va_range_is_defined());
1281 		core_mmu_get_user_va_range(&user_va_base, &user_va_size);
1282 		if (tee_ram_va < (user_va_base + user_va_size))
1283 			return false;
1284 	}
1285 
1286 	if (IS_ENABLED(CFG_WITH_PAGER)) {
1287 		bool prefered_dir = core_mmu_prefer_tee_ram_at_top(tee_ram_va);
1288 
1289 		/* Try whole mapping covered by a single base xlat entry */
1290 		if (prefered_dir != tee_ram_at_top &&
1291 		    assign_mem_va_dir(tee_ram_va, memory_map, prefered_dir))
1292 			return true;
1293 	}
1294 
1295 	return assign_mem_va_dir(tee_ram_va, memory_map, tee_ram_at_top);
1296 }
1297 
1298 static int cmp_init_mem_map(const void *a, const void *b)
1299 {
1300 	const struct tee_mmap_region *mm_a = a;
1301 	const struct tee_mmap_region *mm_b = b;
1302 	int rc = 0;
1303 
1304 	rc = CMP_TRILEAN(mm_a->region_size, mm_b->region_size);
1305 	if (!rc)
1306 		rc = CMP_TRILEAN(mm_a->pa, mm_b->pa);
1307 	/*
1308 	 * 32bit MMU descriptors cannot mix secure and non-secure mapping in
1309 	 * the same level2 table. Hence sort secure mapping from non-secure
1310 	 * mapping.
1311 	 */
1312 	if (!rc && !IS_ENABLED(CFG_WITH_LPAE))
1313 		rc = CMP_TRILEAN(map_is_secure(mm_a), map_is_secure(mm_b));
1314 
1315 	return rc;
1316 }
1317 
1318 static bool mem_map_add_id_map(struct tee_mmap_region *memory_map,
1319 			       size_t num_elems, size_t *last,
1320 			       vaddr_t id_map_start, vaddr_t id_map_end)
1321 {
1322 	struct tee_mmap_region *map = NULL;
1323 	vaddr_t start = ROUNDDOWN(id_map_start, SMALL_PAGE_SIZE);
1324 	vaddr_t end = ROUNDUP(id_map_end, SMALL_PAGE_SIZE);
1325 	size_t len = end - start;
1326 
1327 	if (*last >= num_elems - 1) {
1328 		EMSG("Out of entries (%zu) in memory map", num_elems);
1329 		panic();
1330 	}
1331 
1332 	for (map = memory_map; !core_mmap_is_end_of_table(map); map++)
1333 		if (core_is_buffer_intersect(map->va, map->size, start, len))
1334 			return false;
1335 
1336 	*map = (struct tee_mmap_region){
1337 		.type = MEM_AREA_IDENTITY_MAP_RX,
1338 		/*
1339 		 * Could use CORE_MMU_PGDIR_SIZE to potentially save a
1340 		 * translation table, at the increased risk of clashes with
1341 		 * the rest of the memory map.
1342 		 */
1343 		.region_size = SMALL_PAGE_SIZE,
1344 		.pa = start,
1345 		.va = start,
1346 		.size = len,
1347 		.attr = core_mmu_type_to_attr(MEM_AREA_IDENTITY_MAP_RX),
1348 	};
1349 
1350 	(*last)++;
1351 
1352 	return true;
1353 }
1354 
1355 static unsigned long init_mem_map(struct tee_mmap_region *memory_map,
1356 				  size_t num_elems, unsigned long seed)
1357 {
1358 	/*
1359 	 * @id_map_start and @id_map_end describes a physical memory range
1360 	 * that must be mapped Read-Only eXecutable at identical virtual
1361 	 * addresses.
1362 	 */
1363 	vaddr_t id_map_start = (vaddr_t)__identity_map_init_start;
1364 	vaddr_t id_map_end = (vaddr_t)__identity_map_init_end;
1365 	vaddr_t start_addr = secure_only[0].paddr;
1366 	unsigned long offs = 0;
1367 	size_t last = 0;
1368 
1369 	last = collect_mem_ranges(memory_map, num_elems);
1370 	assign_mem_granularity(memory_map);
1371 
1372 	/*
1373 	 * To ease mapping and lower use of xlat tables, sort mapping
1374 	 * description moving small-page regions after the pgdir regions.
1375 	 */
1376 	qsort(memory_map, last, sizeof(struct tee_mmap_region),
1377 	      cmp_init_mem_map);
1378 
1379 	if (IS_ENABLED(CFG_WITH_PAGER))
1380 		add_pager_vaspace(memory_map, num_elems, &last);
1381 
1382 	if (IS_ENABLED(CFG_CORE_ASLR) && seed) {
1383 		vaddr_t base_addr = start_addr + seed;
1384 		const unsigned int va_width = core_mmu_get_va_width();
1385 		const vaddr_t va_mask = GENMASK_64(va_width - 1,
1386 						   SMALL_PAGE_SHIFT);
1387 		vaddr_t ba = base_addr;
1388 		size_t n = 0;
1389 
1390 		for (n = 0; n < 3; n++) {
1391 			if (n)
1392 				ba = base_addr ^ BIT64(va_width - n);
1393 			ba &= va_mask;
1394 			if (assign_mem_va(ba, memory_map) &&
1395 			    mem_map_add_id_map(memory_map, num_elems, &last,
1396 					       id_map_start, id_map_end)) {
1397 				offs = ba - start_addr;
1398 				DMSG("Mapping core at %#"PRIxVA" offs %#lx",
1399 				     ba, offs);
1400 				goto out;
1401 			} else {
1402 				DMSG("Failed to map core at %#"PRIxVA, ba);
1403 			}
1404 		}
1405 		EMSG("Failed to map core with seed %#lx", seed);
1406 	}
1407 
1408 	if (!assign_mem_va(start_addr, memory_map))
1409 		panic();
1410 
1411 out:
1412 	qsort(memory_map, last, sizeof(struct tee_mmap_region),
1413 	      cmp_mmap_by_lower_va);
1414 
1415 	dump_mmap_table(memory_map);
1416 
1417 	return offs;
1418 }
1419 
1420 static void check_mem_map(struct tee_mmap_region *map)
1421 {
1422 	struct tee_mmap_region *m = NULL;
1423 
1424 	for (m = map; !core_mmap_is_end_of_table(m); m++) {
1425 		switch (m->type) {
1426 		case MEM_AREA_TEE_RAM:
1427 		case MEM_AREA_TEE_RAM_RX:
1428 		case MEM_AREA_TEE_RAM_RO:
1429 		case MEM_AREA_TEE_RAM_RW:
1430 		case MEM_AREA_INIT_RAM_RX:
1431 		case MEM_AREA_INIT_RAM_RO:
1432 		case MEM_AREA_NEX_RAM_RW:
1433 		case MEM_AREA_NEX_RAM_RO:
1434 		case MEM_AREA_IDENTITY_MAP_RX:
1435 			if (!pbuf_is_inside(secure_only, m->pa, m->size))
1436 				panic("TEE_RAM can't fit in secure_only");
1437 			break;
1438 		case MEM_AREA_TA_RAM:
1439 			if (!pbuf_is_inside(secure_only, m->pa, m->size))
1440 				panic("TA_RAM can't fit in secure_only");
1441 			break;
1442 		case MEM_AREA_NSEC_SHM:
1443 			if (!pbuf_is_inside(nsec_shared, m->pa, m->size))
1444 				panic("NS_SHM can't fit in nsec_shared");
1445 			break;
1446 		case MEM_AREA_SEC_RAM_OVERALL:
1447 		case MEM_AREA_TEE_COHERENT:
1448 		case MEM_AREA_TEE_ASAN:
1449 		case MEM_AREA_IO_SEC:
1450 		case MEM_AREA_IO_NSEC:
1451 		case MEM_AREA_EXT_DT:
1452 		case MEM_AREA_MANIFEST_DT:
1453 		case MEM_AREA_TRANSFER_LIST:
1454 		case MEM_AREA_RAM_SEC:
1455 		case MEM_AREA_RAM_NSEC:
1456 		case MEM_AREA_ROM_SEC:
1457 		case MEM_AREA_RES_VASPACE:
1458 		case MEM_AREA_SHM_VASPACE:
1459 		case MEM_AREA_PAGER_VASPACE:
1460 			break;
1461 		default:
1462 			EMSG("Uhandled memtype %d", m->type);
1463 			panic();
1464 		}
1465 	}
1466 }
1467 
1468 static struct tee_mmap_region *get_tmp_mmap(void)
1469 {
1470 	struct tee_mmap_region *tmp_mmap = (void *)__heap1_start;
1471 
1472 #ifdef CFG_WITH_PAGER
1473 	if (__heap1_end - __heap1_start < (ptrdiff_t)sizeof(static_memory_map))
1474 		tmp_mmap = (void *)__heap2_start;
1475 #endif
1476 
1477 	memset(tmp_mmap, 0, sizeof(static_memory_map));
1478 
1479 	return tmp_mmap;
1480 }
1481 
1482 /*
1483  * core_init_mmu_map() - init tee core default memory mapping
1484  *
1485  * This routine sets the static default TEE core mapping. If @seed is > 0
1486  * and configured with CFG_CORE_ASLR it will map tee core at a location
1487  * based on the seed and return the offset from the link address.
1488  *
1489  * If an error happened: core_init_mmu_map is expected to panic.
1490  *
1491  * Note: this function is weak just to make it possible to exclude it from
1492  * the unpaged area.
1493  */
1494 void __weak core_init_mmu_map(unsigned long seed, struct core_mmu_config *cfg)
1495 {
1496 #ifndef CFG_NS_VIRTUALIZATION
1497 	vaddr_t start = ROUNDDOWN((vaddr_t)__nozi_start, SMALL_PAGE_SIZE);
1498 #else
1499 	vaddr_t start = ROUNDDOWN((vaddr_t)__vcore_nex_rw_start,
1500 				  SMALL_PAGE_SIZE);
1501 #endif
1502 	vaddr_t len = ROUNDUP((vaddr_t)__nozi_end, SMALL_PAGE_SIZE) - start;
1503 	struct tee_mmap_region *tmp_mmap = get_tmp_mmap();
1504 	unsigned long offs = 0;
1505 
1506 	if (IS_ENABLED(CFG_CORE_PHYS_RELOCATABLE) &&
1507 	    (core_mmu_tee_load_pa & SMALL_PAGE_MASK))
1508 		panic("OP-TEE load address is not page aligned");
1509 
1510 	check_sec_nsec_mem_config();
1511 
1512 	/*
1513 	 * Add a entry covering the translation tables which will be
1514 	 * involved in some virt_to_phys() and phys_to_virt() conversions.
1515 	 */
1516 	static_memory_map[0] = (struct tee_mmap_region){
1517 		.type = MEM_AREA_TEE_RAM,
1518 		.region_size = SMALL_PAGE_SIZE,
1519 		.pa = start,
1520 		.va = start,
1521 		.size = len,
1522 		.attr = core_mmu_type_to_attr(MEM_AREA_IDENTITY_MAP_RX),
1523 	};
1524 
1525 	COMPILE_TIME_ASSERT(CFG_MMAP_REGIONS >= 13);
1526 	offs = init_mem_map(tmp_mmap, ARRAY_SIZE(static_memory_map), seed);
1527 
1528 	check_mem_map(tmp_mmap);
1529 	core_init_mmu(tmp_mmap);
1530 	dump_xlat_table(0x0, CORE_MMU_BASE_TABLE_LEVEL);
1531 	core_init_mmu_regs(cfg);
1532 	cfg->map_offset = offs;
1533 	memcpy(static_memory_map, tmp_mmap, sizeof(static_memory_map));
1534 }
1535 
1536 bool core_mmu_mattr_is_ok(uint32_t mattr)
1537 {
1538 	/*
1539 	 * Keep in sync with core_mmu_lpae.c:mattr_to_desc and
1540 	 * core_mmu_v7.c:mattr_to_texcb
1541 	 */
1542 
1543 	switch ((mattr >> TEE_MATTR_MEM_TYPE_SHIFT) & TEE_MATTR_MEM_TYPE_MASK) {
1544 	case TEE_MATTR_MEM_TYPE_DEV:
1545 	case TEE_MATTR_MEM_TYPE_STRONGLY_O:
1546 	case TEE_MATTR_MEM_TYPE_CACHED:
1547 	case TEE_MATTR_MEM_TYPE_TAGGED:
1548 		return true;
1549 	default:
1550 		return false;
1551 	}
1552 }
1553 
1554 /*
1555  * test attributes of target physical buffer
1556  *
1557  * Flags: pbuf_is(SECURE, NOT_SECURE, RAM, IOMEM, KEYVAULT).
1558  *
1559  */
1560 bool core_pbuf_is(uint32_t attr, paddr_t pbuf, size_t len)
1561 {
1562 	paddr_t ta_base = 0;
1563 	size_t ta_size = 0;
1564 	struct tee_mmap_region *map;
1565 
1566 	/* Empty buffers complies with anything */
1567 	if (len == 0)
1568 		return true;
1569 
1570 	switch (attr) {
1571 	case CORE_MEM_SEC:
1572 		return pbuf_is_inside(secure_only, pbuf, len);
1573 	case CORE_MEM_NON_SEC:
1574 		return pbuf_is_inside(nsec_shared, pbuf, len) ||
1575 			pbuf_is_nsec_ddr(pbuf, len);
1576 	case CORE_MEM_TEE_RAM:
1577 		return core_is_buffer_inside(pbuf, len, TEE_RAM_START,
1578 							TEE_RAM_PH_SIZE);
1579 	case CORE_MEM_TA_RAM:
1580 		core_mmu_get_ta_range(&ta_base, &ta_size);
1581 		return core_is_buffer_inside(pbuf, len, ta_base, ta_size);
1582 #ifdef CFG_CORE_RESERVED_SHM
1583 	case CORE_MEM_NSEC_SHM:
1584 		return core_is_buffer_inside(pbuf, len, TEE_SHMEM_START,
1585 							TEE_SHMEM_SIZE);
1586 #endif
1587 	case CORE_MEM_SDP_MEM:
1588 		return pbuf_is_sdp_mem(pbuf, len);
1589 	case CORE_MEM_CACHED:
1590 		map = find_map_by_pa(pbuf);
1591 		if (!map || !pbuf_inside_map_area(pbuf, len, map))
1592 			return false;
1593 		return mattr_is_cached(map->attr);
1594 	default:
1595 		return false;
1596 	}
1597 }
1598 
1599 /* test attributes of target virtual buffer (in core mapping) */
1600 bool core_vbuf_is(uint32_t attr, const void *vbuf, size_t len)
1601 {
1602 	paddr_t p;
1603 
1604 	/* Empty buffers complies with anything */
1605 	if (len == 0)
1606 		return true;
1607 
1608 	p = virt_to_phys((void *)vbuf);
1609 	if (!p)
1610 		return false;
1611 
1612 	return core_pbuf_is(attr, p, len);
1613 }
1614 
1615 /* core_va2pa - teecore exported service */
1616 static int __maybe_unused core_va2pa_helper(void *va, paddr_t *pa)
1617 {
1618 	struct tee_mmap_region *map;
1619 
1620 	map = find_map_by_va(va);
1621 	if (!va_is_in_map(map, (vaddr_t)va))
1622 		return -1;
1623 
1624 	/*
1625 	 * We can calculate PA for static map. Virtual address ranges
1626 	 * reserved to core dynamic mapping return a 'match' (return 0;)
1627 	 * together with an invalid null physical address.
1628 	 */
1629 	if (map->pa)
1630 		*pa = map->pa + (vaddr_t)va  - map->va;
1631 	else
1632 		*pa = 0;
1633 
1634 	return 0;
1635 }
1636 
1637 static void *map_pa2va(struct tee_mmap_region *map, paddr_t pa, size_t len)
1638 {
1639 	if (!pa_is_in_map(map, pa, len))
1640 		return NULL;
1641 
1642 	return (void *)(vaddr_t)(map->va + pa - map->pa);
1643 }
1644 
1645 /*
1646  * teecore gets some memory area definitions
1647  */
1648 void core_mmu_get_mem_by_type(enum teecore_memtypes type, vaddr_t *s,
1649 			      vaddr_t *e)
1650 {
1651 	struct tee_mmap_region *map = find_map_by_type(type);
1652 
1653 	if (map) {
1654 		*s = map->va;
1655 		*e = map->va + map->size;
1656 	} else {
1657 		*s = 0;
1658 		*e = 0;
1659 	}
1660 }
1661 
1662 enum teecore_memtypes core_mmu_get_type_by_pa(paddr_t pa)
1663 {
1664 	struct tee_mmap_region *map = find_map_by_pa(pa);
1665 
1666 	if (!map)
1667 		return MEM_AREA_MAXTYPE;
1668 	return map->type;
1669 }
1670 
1671 void core_mmu_set_entry(struct core_mmu_table_info *tbl_info, unsigned int idx,
1672 			paddr_t pa, uint32_t attr)
1673 {
1674 	assert(idx < tbl_info->num_entries);
1675 	core_mmu_set_entry_primitive(tbl_info->table, tbl_info->level,
1676 				     idx, pa, attr);
1677 }
1678 
1679 void core_mmu_get_entry(struct core_mmu_table_info *tbl_info, unsigned int idx,
1680 			paddr_t *pa, uint32_t *attr)
1681 {
1682 	assert(idx < tbl_info->num_entries);
1683 	core_mmu_get_entry_primitive(tbl_info->table, tbl_info->level,
1684 				     idx, pa, attr);
1685 }
1686 
1687 static void clear_region(struct core_mmu_table_info *tbl_info,
1688 			 struct tee_mmap_region *region)
1689 {
1690 	unsigned int end = 0;
1691 	unsigned int idx = 0;
1692 
1693 	/* va, len and pa should be block aligned */
1694 	assert(!core_mmu_get_block_offset(tbl_info, region->va));
1695 	assert(!core_mmu_get_block_offset(tbl_info, region->size));
1696 	assert(!core_mmu_get_block_offset(tbl_info, region->pa));
1697 
1698 	idx = core_mmu_va2idx(tbl_info, region->va);
1699 	end = core_mmu_va2idx(tbl_info, region->va + region->size);
1700 
1701 	while (idx < end) {
1702 		core_mmu_set_entry(tbl_info, idx, 0, 0);
1703 		idx++;
1704 	}
1705 }
1706 
1707 static void set_region(struct core_mmu_table_info *tbl_info,
1708 		       struct tee_mmap_region *region)
1709 {
1710 	unsigned int end;
1711 	unsigned int idx;
1712 	paddr_t pa;
1713 
1714 	/* va, len and pa should be block aligned */
1715 	assert(!core_mmu_get_block_offset(tbl_info, region->va));
1716 	assert(!core_mmu_get_block_offset(tbl_info, region->size));
1717 	assert(!core_mmu_get_block_offset(tbl_info, region->pa));
1718 
1719 	idx = core_mmu_va2idx(tbl_info, region->va);
1720 	end = core_mmu_va2idx(tbl_info, region->va + region->size);
1721 	pa = region->pa;
1722 
1723 	while (idx < end) {
1724 		core_mmu_set_entry(tbl_info, idx, pa, region->attr);
1725 		idx++;
1726 		pa += BIT64(tbl_info->shift);
1727 	}
1728 }
1729 
1730 static void set_pg_region(struct core_mmu_table_info *dir_info,
1731 			  struct vm_region *region, struct pgt **pgt,
1732 			  struct core_mmu_table_info *pg_info)
1733 {
1734 	struct tee_mmap_region r = {
1735 		.va = region->va,
1736 		.size = region->size,
1737 		.attr = region->attr,
1738 	};
1739 	vaddr_t end = r.va + r.size;
1740 	uint32_t pgt_attr = (r.attr & TEE_MATTR_SECURE) | TEE_MATTR_TABLE;
1741 
1742 	while (r.va < end) {
1743 		if (!pg_info->table ||
1744 		    r.va >= (pg_info->va_base + CORE_MMU_PGDIR_SIZE)) {
1745 			/*
1746 			 * We're assigning a new translation table.
1747 			 */
1748 			unsigned int idx;
1749 
1750 			/* Virtual addresses must grow */
1751 			assert(r.va > pg_info->va_base);
1752 
1753 			idx = core_mmu_va2idx(dir_info, r.va);
1754 			pg_info->va_base = core_mmu_idx2va(dir_info, idx);
1755 
1756 			/*
1757 			 * Advance pgt to va_base, note that we may need to
1758 			 * skip multiple page tables if there are large
1759 			 * holes in the vm map.
1760 			 */
1761 			while ((*pgt)->vabase < pg_info->va_base) {
1762 				*pgt = SLIST_NEXT(*pgt, link);
1763 				/* We should have allocated enough */
1764 				assert(*pgt);
1765 			}
1766 			assert((*pgt)->vabase == pg_info->va_base);
1767 			pg_info->table = (*pgt)->tbl;
1768 
1769 			core_mmu_set_entry(dir_info, idx,
1770 					   virt_to_phys(pg_info->table),
1771 					   pgt_attr);
1772 		}
1773 
1774 		r.size = MIN(CORE_MMU_PGDIR_SIZE - (r.va - pg_info->va_base),
1775 			     end - r.va);
1776 
1777 		if (!(*pgt)->populated  && !mobj_is_paged(region->mobj)) {
1778 			size_t granule = BIT(pg_info->shift);
1779 			size_t offset = r.va - region->va + region->offset;
1780 
1781 			r.size = MIN(r.size,
1782 				     mobj_get_phys_granule(region->mobj));
1783 			r.size = ROUNDUP(r.size, SMALL_PAGE_SIZE);
1784 
1785 			if (mobj_get_pa(region->mobj, offset, granule,
1786 					&r.pa) != TEE_SUCCESS)
1787 				panic("Failed to get PA of unpaged mobj");
1788 			set_region(pg_info, &r);
1789 		}
1790 		r.va += r.size;
1791 	}
1792 }
1793 
1794 static bool can_map_at_level(paddr_t paddr, vaddr_t vaddr,
1795 			     size_t size_left, paddr_t block_size,
1796 			     struct tee_mmap_region *mm __maybe_unused)
1797 {
1798 	/* VA and PA are aligned to block size at current level */
1799 	if ((vaddr | paddr) & (block_size - 1))
1800 		return false;
1801 
1802 	/* Remainder fits into block at current level */
1803 	if (size_left < block_size)
1804 		return false;
1805 
1806 #ifdef CFG_WITH_PAGER
1807 	/*
1808 	 * If pager is enabled, we need to map TEE RAM and the whole pager
1809 	 * regions with small pages only
1810 	 */
1811 	if ((map_is_tee_ram(mm) || mm->type == MEM_AREA_PAGER_VASPACE) &&
1812 	    block_size != SMALL_PAGE_SIZE)
1813 		return false;
1814 #endif
1815 
1816 	return true;
1817 }
1818 
1819 void core_mmu_map_region(struct mmu_partition *prtn, struct tee_mmap_region *mm)
1820 {
1821 	struct core_mmu_table_info tbl_info;
1822 	unsigned int idx;
1823 	vaddr_t vaddr = mm->va;
1824 	paddr_t paddr = mm->pa;
1825 	ssize_t size_left = mm->size;
1826 	unsigned int level;
1827 	bool table_found;
1828 	uint32_t old_attr;
1829 
1830 	assert(!((vaddr | paddr) & SMALL_PAGE_MASK));
1831 
1832 	while (size_left > 0) {
1833 		level = CORE_MMU_BASE_TABLE_LEVEL;
1834 
1835 		while (true) {
1836 			paddr_t block_size = 0;
1837 
1838 			assert(core_mmu_level_in_range(level));
1839 
1840 			table_found = core_mmu_find_table(prtn, vaddr, level,
1841 							  &tbl_info);
1842 			if (!table_found)
1843 				panic("can't find table for mapping");
1844 
1845 			block_size = BIT64(tbl_info.shift);
1846 
1847 			idx = core_mmu_va2idx(&tbl_info, vaddr);
1848 			if (!can_map_at_level(paddr, vaddr, size_left,
1849 					      block_size, mm)) {
1850 				bool secure = mm->attr & TEE_MATTR_SECURE;
1851 
1852 				/*
1853 				 * This part of the region can't be mapped at
1854 				 * this level. Need to go deeper.
1855 				 */
1856 				if (!core_mmu_entry_to_finer_grained(&tbl_info,
1857 								     idx,
1858 								     secure))
1859 					panic("Can't divide MMU entry");
1860 				level = tbl_info.next_level;
1861 				continue;
1862 			}
1863 
1864 			/* We can map part of the region at current level */
1865 			core_mmu_get_entry(&tbl_info, idx, NULL, &old_attr);
1866 			if (old_attr)
1867 				panic("Page is already mapped");
1868 
1869 			core_mmu_set_entry(&tbl_info, idx, paddr, mm->attr);
1870 			paddr += block_size;
1871 			vaddr += block_size;
1872 			size_left -= block_size;
1873 
1874 			break;
1875 		}
1876 	}
1877 }
1878 
1879 TEE_Result core_mmu_map_pages(vaddr_t vstart, paddr_t *pages, size_t num_pages,
1880 			      enum teecore_memtypes memtype)
1881 {
1882 	TEE_Result ret;
1883 	struct core_mmu_table_info tbl_info;
1884 	struct tee_mmap_region *mm;
1885 	unsigned int idx;
1886 	uint32_t old_attr;
1887 	uint32_t exceptions;
1888 	vaddr_t vaddr = vstart;
1889 	size_t i;
1890 	bool secure;
1891 
1892 	assert(!(core_mmu_type_to_attr(memtype) & TEE_MATTR_PX));
1893 
1894 	secure = core_mmu_type_to_attr(memtype) & TEE_MATTR_SECURE;
1895 
1896 	if (vaddr & SMALL_PAGE_MASK)
1897 		return TEE_ERROR_BAD_PARAMETERS;
1898 
1899 	exceptions = mmu_lock();
1900 
1901 	mm = find_map_by_va((void *)vaddr);
1902 	if (!mm || !va_is_in_map(mm, vaddr + num_pages * SMALL_PAGE_SIZE - 1))
1903 		panic("VA does not belong to any known mm region");
1904 
1905 	if (!core_mmu_is_dynamic_vaspace(mm))
1906 		panic("Trying to map into static region");
1907 
1908 	for (i = 0; i < num_pages; i++) {
1909 		if (pages[i] & SMALL_PAGE_MASK) {
1910 			ret = TEE_ERROR_BAD_PARAMETERS;
1911 			goto err;
1912 		}
1913 
1914 		while (true) {
1915 			if (!core_mmu_find_table(NULL, vaddr, UINT_MAX,
1916 						 &tbl_info))
1917 				panic("Can't find pagetable for vaddr ");
1918 
1919 			idx = core_mmu_va2idx(&tbl_info, vaddr);
1920 			if (tbl_info.shift == SMALL_PAGE_SHIFT)
1921 				break;
1922 
1923 			/* This is supertable. Need to divide it. */
1924 			if (!core_mmu_entry_to_finer_grained(&tbl_info, idx,
1925 							     secure))
1926 				panic("Failed to spread pgdir on small tables");
1927 		}
1928 
1929 		core_mmu_get_entry(&tbl_info, idx, NULL, &old_attr);
1930 		if (old_attr)
1931 			panic("Page is already mapped");
1932 
1933 		core_mmu_set_entry(&tbl_info, idx, pages[i],
1934 				   core_mmu_type_to_attr(memtype));
1935 		vaddr += SMALL_PAGE_SIZE;
1936 	}
1937 
1938 	/*
1939 	 * Make sure all the changes to translation tables are visible
1940 	 * before returning. TLB doesn't need to be invalidated as we are
1941 	 * guaranteed that there's no valid mapping in this range.
1942 	 */
1943 	core_mmu_table_write_barrier();
1944 	mmu_unlock(exceptions);
1945 
1946 	return TEE_SUCCESS;
1947 err:
1948 	mmu_unlock(exceptions);
1949 
1950 	if (i)
1951 		core_mmu_unmap_pages(vstart, i);
1952 
1953 	return ret;
1954 }
1955 
1956 TEE_Result core_mmu_map_contiguous_pages(vaddr_t vstart, paddr_t pstart,
1957 					 size_t num_pages,
1958 					 enum teecore_memtypes memtype)
1959 {
1960 	struct core_mmu_table_info tbl_info = { };
1961 	struct tee_mmap_region *mm = NULL;
1962 	unsigned int idx = 0;
1963 	uint32_t old_attr = 0;
1964 	uint32_t exceptions = 0;
1965 	vaddr_t vaddr = vstart;
1966 	paddr_t paddr = pstart;
1967 	size_t i = 0;
1968 	bool secure = false;
1969 
1970 	assert(!(core_mmu_type_to_attr(memtype) & TEE_MATTR_PX));
1971 
1972 	secure = core_mmu_type_to_attr(memtype) & TEE_MATTR_SECURE;
1973 
1974 	if ((vaddr | paddr) & SMALL_PAGE_MASK)
1975 		return TEE_ERROR_BAD_PARAMETERS;
1976 
1977 	exceptions = mmu_lock();
1978 
1979 	mm = find_map_by_va((void *)vaddr);
1980 	if (!mm || !va_is_in_map(mm, vaddr + num_pages * SMALL_PAGE_SIZE - 1))
1981 		panic("VA does not belong to any known mm region");
1982 
1983 	if (!core_mmu_is_dynamic_vaspace(mm))
1984 		panic("Trying to map into static region");
1985 
1986 	for (i = 0; i < num_pages; i++) {
1987 		while (true) {
1988 			if (!core_mmu_find_table(NULL, vaddr, UINT_MAX,
1989 						 &tbl_info))
1990 				panic("Can't find pagetable for vaddr ");
1991 
1992 			idx = core_mmu_va2idx(&tbl_info, vaddr);
1993 			if (tbl_info.shift == SMALL_PAGE_SHIFT)
1994 				break;
1995 
1996 			/* This is supertable. Need to divide it. */
1997 			if (!core_mmu_entry_to_finer_grained(&tbl_info, idx,
1998 							     secure))
1999 				panic("Failed to spread pgdir on small tables");
2000 		}
2001 
2002 		core_mmu_get_entry(&tbl_info, idx, NULL, &old_attr);
2003 		if (old_attr)
2004 			panic("Page is already mapped");
2005 
2006 		core_mmu_set_entry(&tbl_info, idx, paddr,
2007 				   core_mmu_type_to_attr(memtype));
2008 		paddr += SMALL_PAGE_SIZE;
2009 		vaddr += SMALL_PAGE_SIZE;
2010 	}
2011 
2012 	/*
2013 	 * Make sure all the changes to translation tables are visible
2014 	 * before returning. TLB doesn't need to be invalidated as we are
2015 	 * guaranteed that there's no valid mapping in this range.
2016 	 */
2017 	core_mmu_table_write_barrier();
2018 	mmu_unlock(exceptions);
2019 
2020 	return TEE_SUCCESS;
2021 }
2022 
2023 void core_mmu_unmap_pages(vaddr_t vstart, size_t num_pages)
2024 {
2025 	struct core_mmu_table_info tbl_info;
2026 	struct tee_mmap_region *mm;
2027 	size_t i;
2028 	unsigned int idx;
2029 	uint32_t exceptions;
2030 
2031 	exceptions = mmu_lock();
2032 
2033 	mm = find_map_by_va((void *)vstart);
2034 	if (!mm || !va_is_in_map(mm, vstart + num_pages * SMALL_PAGE_SIZE - 1))
2035 		panic("VA does not belong to any known mm region");
2036 
2037 	if (!core_mmu_is_dynamic_vaspace(mm))
2038 		panic("Trying to unmap static region");
2039 
2040 	for (i = 0; i < num_pages; i++, vstart += SMALL_PAGE_SIZE) {
2041 		if (!core_mmu_find_table(NULL, vstart, UINT_MAX, &tbl_info))
2042 			panic("Can't find pagetable");
2043 
2044 		if (tbl_info.shift != SMALL_PAGE_SHIFT)
2045 			panic("Invalid pagetable level");
2046 
2047 		idx = core_mmu_va2idx(&tbl_info, vstart);
2048 		core_mmu_set_entry(&tbl_info, idx, 0, 0);
2049 	}
2050 	tlbi_all();
2051 
2052 	mmu_unlock(exceptions);
2053 }
2054 
2055 void core_mmu_populate_user_map(struct core_mmu_table_info *dir_info,
2056 				struct user_mode_ctx *uctx)
2057 {
2058 	struct core_mmu_table_info pg_info = { };
2059 	struct pgt_cache *pgt_cache = &uctx->pgt_cache;
2060 	struct pgt *pgt = NULL;
2061 	struct pgt *p = NULL;
2062 	struct vm_region *r = NULL;
2063 
2064 	if (TAILQ_EMPTY(&uctx->vm_info.regions))
2065 		return; /* Nothing to map */
2066 
2067 	/*
2068 	 * Allocate all page tables in advance.
2069 	 */
2070 	pgt_get_all(uctx);
2071 	pgt = SLIST_FIRST(pgt_cache);
2072 
2073 	core_mmu_set_info_table(&pg_info, dir_info->next_level, 0, NULL);
2074 
2075 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link)
2076 		set_pg_region(dir_info, r, &pgt, &pg_info);
2077 	/* Record that the translation tables now are populated. */
2078 	SLIST_FOREACH(p, pgt_cache, link) {
2079 		p->populated = true;
2080 		if (p == pgt)
2081 			break;
2082 	}
2083 	assert(p == pgt);
2084 }
2085 
2086 TEE_Result core_mmu_remove_mapping(enum teecore_memtypes type, void *addr,
2087 				   size_t len)
2088 {
2089 	struct core_mmu_table_info tbl_info = { };
2090 	struct tee_mmap_region *res_map = NULL;
2091 	struct tee_mmap_region *map = NULL;
2092 	paddr_t pa = virt_to_phys(addr);
2093 	size_t granule = 0;
2094 	ptrdiff_t i = 0;
2095 	paddr_t p = 0;
2096 	size_t l = 0;
2097 
2098 	map = find_map_by_type_and_pa(type, pa, len);
2099 	if (!map)
2100 		return TEE_ERROR_GENERIC;
2101 
2102 	res_map = find_map_by_type(MEM_AREA_RES_VASPACE);
2103 	if (!res_map)
2104 		return TEE_ERROR_GENERIC;
2105 	if (!core_mmu_find_table(NULL, res_map->va, UINT_MAX, &tbl_info))
2106 		return TEE_ERROR_GENERIC;
2107 	granule = BIT(tbl_info.shift);
2108 
2109 	if (map < static_memory_map ||
2110 	    map >= static_memory_map + ARRAY_SIZE(static_memory_map))
2111 		return TEE_ERROR_GENERIC;
2112 	i = map - static_memory_map;
2113 
2114 	/* Check that we have a full match */
2115 	p = ROUNDDOWN(pa, granule);
2116 	l = ROUNDUP(len + pa - p, granule);
2117 	if (map->pa != p || map->size != l)
2118 		return TEE_ERROR_GENERIC;
2119 
2120 	clear_region(&tbl_info, map);
2121 	tlbi_all();
2122 
2123 	/* If possible remove the va range from res_map */
2124 	if (res_map->va - map->size == map->va) {
2125 		res_map->va -= map->size;
2126 		res_map->size += map->size;
2127 	}
2128 
2129 	/* Remove the entry. */
2130 	memmove(map, map + 1,
2131 		(ARRAY_SIZE(static_memory_map) - i - 1) * sizeof(*map));
2132 
2133 	/* Clear the last new entry in case it was used */
2134 	memset(static_memory_map + ARRAY_SIZE(static_memory_map) - 1,
2135 	       0, sizeof(*map));
2136 
2137 	return TEE_SUCCESS;
2138 }
2139 
2140 struct tee_mmap_region *
2141 core_mmu_find_mapping_exclusive(enum teecore_memtypes type, size_t len)
2142 {
2143 	struct tee_mmap_region *map = NULL;
2144 	struct tee_mmap_region *map_found = NULL;
2145 
2146 	if (!len)
2147 		return NULL;
2148 
2149 	for (map = get_memory_map(); !core_mmap_is_end_of_table(map); map++) {
2150 		if (map->type != type)
2151 			continue;
2152 
2153 		if (map_found)
2154 			return NULL;
2155 
2156 		map_found = map;
2157 	}
2158 
2159 	if (!map_found || map_found->size < len)
2160 		return NULL;
2161 
2162 	return map_found;
2163 }
2164 
2165 void *core_mmu_add_mapping(enum teecore_memtypes type, paddr_t addr, size_t len)
2166 {
2167 	struct core_mmu_table_info tbl_info;
2168 	struct tee_mmap_region *map;
2169 	size_t n;
2170 	size_t granule;
2171 	paddr_t p;
2172 	size_t l;
2173 
2174 	if (!len)
2175 		return NULL;
2176 
2177 	if (!core_mmu_check_end_pa(addr, len))
2178 		return NULL;
2179 
2180 	/* Check if the memory is already mapped */
2181 	map = find_map_by_type_and_pa(type, addr, len);
2182 	if (map && pbuf_inside_map_area(addr, len, map))
2183 		return (void *)(vaddr_t)(map->va + addr - map->pa);
2184 
2185 	/* Find the reserved va space used for late mappings */
2186 	map = find_map_by_type(MEM_AREA_RES_VASPACE);
2187 	if (!map)
2188 		return NULL;
2189 
2190 	if (!core_mmu_find_table(NULL, map->va, UINT_MAX, &tbl_info))
2191 		return NULL;
2192 
2193 	granule = BIT64(tbl_info.shift);
2194 	p = ROUNDDOWN(addr, granule);
2195 	l = ROUNDUP(len + addr - p, granule);
2196 
2197 	/* Ban overflowing virtual addresses */
2198 	if (map->size < l)
2199 		return NULL;
2200 
2201 	/*
2202 	 * Something is wrong, we can't fit the va range into the selected
2203 	 * table. The reserved va range is possibly missaligned with
2204 	 * granule.
2205 	 */
2206 	if (core_mmu_va2idx(&tbl_info, map->va + len) >= tbl_info.num_entries)
2207 		return NULL;
2208 
2209 	/* Find end of the memory map */
2210 	n = 0;
2211 	while (!core_mmap_is_end_of_table(static_memory_map + n))
2212 		n++;
2213 
2214 	if (n < (ARRAY_SIZE(static_memory_map) - 1)) {
2215 		/* There's room for another entry */
2216 		static_memory_map[n].va = map->va;
2217 		static_memory_map[n].size = l;
2218 		static_memory_map[n + 1].type = MEM_AREA_END;
2219 		map->va += l;
2220 		map->size -= l;
2221 		map = static_memory_map + n;
2222 	} else {
2223 		/*
2224 		 * There isn't room for another entry, steal the reserved
2225 		 * entry as it's not useful for anything else any longer.
2226 		 */
2227 		map->size = l;
2228 	}
2229 	map->type = type;
2230 	map->region_size = granule;
2231 	map->attr = core_mmu_type_to_attr(type);
2232 	map->pa = p;
2233 
2234 	set_region(&tbl_info, map);
2235 
2236 	/* Make sure the new entry is visible before continuing. */
2237 	core_mmu_table_write_barrier();
2238 
2239 	return (void *)(vaddr_t)(map->va + addr - map->pa);
2240 }
2241 
2242 #ifdef CFG_WITH_PAGER
2243 static vaddr_t get_linear_map_end_va(void)
2244 {
2245 	/* this is synced with the generic linker file kern.ld.S */
2246 	return (vaddr_t)__heap2_end;
2247 }
2248 
2249 static paddr_t get_linear_map_end_pa(void)
2250 {
2251 	return get_linear_map_end_va() - boot_mmu_config.map_offset;
2252 }
2253 #endif
2254 
2255 #if defined(CFG_TEE_CORE_DEBUG)
2256 static void check_pa_matches_va(void *va, paddr_t pa)
2257 {
2258 	TEE_Result res = TEE_ERROR_GENERIC;
2259 	vaddr_t v = (vaddr_t)va;
2260 	paddr_t p = 0;
2261 	struct core_mmu_table_info ti __maybe_unused = { };
2262 
2263 	if (core_mmu_user_va_range_is_defined()) {
2264 		vaddr_t user_va_base = 0;
2265 		size_t user_va_size = 0;
2266 
2267 		core_mmu_get_user_va_range(&user_va_base, &user_va_size);
2268 		if (v >= user_va_base &&
2269 		    v <= (user_va_base - 1 + user_va_size)) {
2270 			if (!core_mmu_user_mapping_is_active()) {
2271 				if (pa)
2272 					panic("issue in linear address space");
2273 				return;
2274 			}
2275 
2276 			res = vm_va2pa(to_user_mode_ctx(thread_get_tsd()->ctx),
2277 				       va, &p);
2278 			if (res == TEE_ERROR_NOT_SUPPORTED)
2279 				return;
2280 			if (res == TEE_SUCCESS && pa != p)
2281 				panic("bad pa");
2282 			if (res != TEE_SUCCESS && pa)
2283 				panic("false pa");
2284 			return;
2285 		}
2286 	}
2287 #ifdef CFG_WITH_PAGER
2288 	if (is_unpaged(va)) {
2289 		if (v - boot_mmu_config.map_offset != pa)
2290 			panic("issue in linear address space");
2291 		return;
2292 	}
2293 
2294 	if (tee_pager_get_table_info(v, &ti)) {
2295 		uint32_t a;
2296 
2297 		/*
2298 		 * Lookups in the page table managed by the pager is
2299 		 * dangerous for addresses in the paged area as those pages
2300 		 * changes all the time. But some ranges are safe,
2301 		 * rw-locked areas when the page is populated for instance.
2302 		 */
2303 		core_mmu_get_entry(&ti, core_mmu_va2idx(&ti, v), &p, &a);
2304 		if (a & TEE_MATTR_VALID_BLOCK) {
2305 			paddr_t mask = BIT64(ti.shift) - 1;
2306 
2307 			p |= v & mask;
2308 			if (pa != p)
2309 				panic();
2310 		} else {
2311 			if (pa)
2312 				panic();
2313 		}
2314 		return;
2315 	}
2316 #endif
2317 
2318 	if (!core_va2pa_helper(va, &p)) {
2319 		/* Verfiy only the static mapping (case non null phys addr) */
2320 		if (p && pa != p) {
2321 			DMSG("va %p maps 0x%" PRIxPA ", expect 0x%" PRIxPA,
2322 			     va, p, pa);
2323 			panic();
2324 		}
2325 	} else {
2326 		if (pa) {
2327 			DMSG("va %p unmapped, expect 0x%" PRIxPA, va, pa);
2328 			panic();
2329 		}
2330 	}
2331 }
2332 #else
2333 static void check_pa_matches_va(void *va __unused, paddr_t pa __unused)
2334 {
2335 }
2336 #endif
2337 
2338 paddr_t virt_to_phys(void *va)
2339 {
2340 	paddr_t pa = 0;
2341 
2342 	if (!arch_va2pa_helper(va, &pa))
2343 		pa = 0;
2344 	check_pa_matches_va(memtag_strip_tag(va), pa);
2345 	return pa;
2346 }
2347 
2348 #if defined(CFG_TEE_CORE_DEBUG)
2349 static void check_va_matches_pa(paddr_t pa, void *va)
2350 {
2351 	paddr_t p = 0;
2352 
2353 	if (!va)
2354 		return;
2355 
2356 	p = virt_to_phys(va);
2357 	if (p != pa) {
2358 		DMSG("va %p maps 0x%" PRIxPA " expect 0x%" PRIxPA, va, p, pa);
2359 		panic();
2360 	}
2361 }
2362 #else
2363 static void check_va_matches_pa(paddr_t pa __unused, void *va __unused)
2364 {
2365 }
2366 #endif
2367 
2368 static void *phys_to_virt_ts_vaspace(paddr_t pa, size_t len)
2369 {
2370 	if (!core_mmu_user_mapping_is_active())
2371 		return NULL;
2372 
2373 	return vm_pa2va(to_user_mode_ctx(thread_get_tsd()->ctx), pa, len);
2374 }
2375 
2376 #ifdef CFG_WITH_PAGER
2377 static void *phys_to_virt_tee_ram(paddr_t pa, size_t len)
2378 {
2379 	paddr_t end_pa = 0;
2380 
2381 	if (SUB_OVERFLOW(len, 1, &end_pa) || ADD_OVERFLOW(pa, end_pa, &end_pa))
2382 		return NULL;
2383 
2384 	if (pa >= TEE_LOAD_ADDR && pa < get_linear_map_end_pa()) {
2385 		if (end_pa > get_linear_map_end_pa())
2386 			return NULL;
2387 		return (void *)(vaddr_t)(pa + boot_mmu_config.map_offset);
2388 	}
2389 
2390 	return tee_pager_phys_to_virt(pa, len);
2391 }
2392 #else
2393 static void *phys_to_virt_tee_ram(paddr_t pa, size_t len)
2394 {
2395 	struct tee_mmap_region *mmap = NULL;
2396 
2397 	mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM, pa, len);
2398 	if (!mmap)
2399 		mmap = find_map_by_type_and_pa(MEM_AREA_NEX_RAM_RW, pa, len);
2400 	if (!mmap)
2401 		mmap = find_map_by_type_and_pa(MEM_AREA_NEX_RAM_RO, pa, len);
2402 	if (!mmap)
2403 		mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM_RW, pa, len);
2404 	if (!mmap)
2405 		mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM_RO, pa, len);
2406 	if (!mmap)
2407 		mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM_RX, pa, len);
2408 	/*
2409 	 * Note that MEM_AREA_INIT_RAM_RO and MEM_AREA_INIT_RAM_RX are only
2410 	 * used with pager and not needed here.
2411 	 */
2412 	return map_pa2va(mmap, pa, len);
2413 }
2414 #endif
2415 
2416 void *phys_to_virt(paddr_t pa, enum teecore_memtypes m, size_t len)
2417 {
2418 	void *va = NULL;
2419 
2420 	switch (m) {
2421 	case MEM_AREA_TS_VASPACE:
2422 		va = phys_to_virt_ts_vaspace(pa, len);
2423 		break;
2424 	case MEM_AREA_TEE_RAM:
2425 	case MEM_AREA_TEE_RAM_RX:
2426 	case MEM_AREA_TEE_RAM_RO:
2427 	case MEM_AREA_TEE_RAM_RW:
2428 	case MEM_AREA_NEX_RAM_RO:
2429 	case MEM_AREA_NEX_RAM_RW:
2430 		va = phys_to_virt_tee_ram(pa, len);
2431 		break;
2432 	case MEM_AREA_SHM_VASPACE:
2433 		/* Find VA from PA in dynamic SHM is not yet supported */
2434 		va = NULL;
2435 		break;
2436 	default:
2437 		va = map_pa2va(find_map_by_type_and_pa(m, pa, len), pa, len);
2438 	}
2439 	if (m != MEM_AREA_SEC_RAM_OVERALL)
2440 		check_va_matches_pa(pa, va);
2441 	return va;
2442 }
2443 
2444 void *phys_to_virt_io(paddr_t pa, size_t len)
2445 {
2446 	struct tee_mmap_region *map = NULL;
2447 	void *va = NULL;
2448 
2449 	map = find_map_by_type_and_pa(MEM_AREA_IO_SEC, pa, len);
2450 	if (!map)
2451 		map = find_map_by_type_and_pa(MEM_AREA_IO_NSEC, pa, len);
2452 	if (!map)
2453 		return NULL;
2454 	va = map_pa2va(map, pa, len);
2455 	check_va_matches_pa(pa, va);
2456 	return va;
2457 }
2458 
2459 vaddr_t core_mmu_get_va(paddr_t pa, enum teecore_memtypes type, size_t len)
2460 {
2461 	if (cpu_mmu_enabled())
2462 		return (vaddr_t)phys_to_virt(pa, type, len);
2463 
2464 	return (vaddr_t)pa;
2465 }
2466 
2467 #ifdef CFG_WITH_PAGER
2468 bool is_unpaged(const void *va)
2469 {
2470 	vaddr_t v = (vaddr_t)va;
2471 
2472 	return v >= VCORE_START_VA && v < get_linear_map_end_va();
2473 }
2474 #endif
2475 
2476 #ifdef CFG_NS_VIRTUALIZATION
2477 bool is_nexus(const void *va)
2478 {
2479 	vaddr_t v = (vaddr_t)va;
2480 
2481 	return v >= VCORE_START_VA && v < VCORE_NEX_RW_PA + VCORE_NEX_RW_SZ;
2482 }
2483 #endif
2484 
2485 void core_mmu_init_virtualization(void)
2486 {
2487 	paddr_t b1 = 0;
2488 	paddr_size_t s1 = 0;
2489 
2490 	static_assert(ARRAY_SIZE(secure_only) <= 2);
2491 	if (ARRAY_SIZE(secure_only) == 2) {
2492 		b1 = secure_only[1].paddr;
2493 		s1 = secure_only[1].size;
2494 	}
2495 	virt_init_memory(static_memory_map, secure_only[0].paddr,
2496 			 secure_only[0].size, b1, s1);
2497 }
2498 
2499 vaddr_t io_pa_or_va(struct io_pa_va *p, size_t len)
2500 {
2501 	assert(p->pa);
2502 	if (cpu_mmu_enabled()) {
2503 		if (!p->va)
2504 			p->va = (vaddr_t)phys_to_virt_io(p->pa, len);
2505 		assert(p->va);
2506 		return p->va;
2507 	}
2508 	return p->pa;
2509 }
2510 
2511 vaddr_t io_pa_or_va_secure(struct io_pa_va *p, size_t len)
2512 {
2513 	assert(p->pa);
2514 	if (cpu_mmu_enabled()) {
2515 		if (!p->va)
2516 			p->va = (vaddr_t)phys_to_virt(p->pa, MEM_AREA_IO_SEC,
2517 						      len);
2518 		assert(p->va);
2519 		return p->va;
2520 	}
2521 	return p->pa;
2522 }
2523 
2524 vaddr_t io_pa_or_va_nsec(struct io_pa_va *p, size_t len)
2525 {
2526 	assert(p->pa);
2527 	if (cpu_mmu_enabled()) {
2528 		if (!p->va)
2529 			p->va = (vaddr_t)phys_to_virt(p->pa, MEM_AREA_IO_NSEC,
2530 						      len);
2531 		assert(p->va);
2532 		return p->va;
2533 	}
2534 	return p->pa;
2535 }
2536 
2537 #ifdef CFG_CORE_RESERVED_SHM
2538 static TEE_Result teecore_init_pub_ram(void)
2539 {
2540 	vaddr_t s = 0;
2541 	vaddr_t e = 0;
2542 
2543 	/* get virtual addr/size of NSec shared mem allocated from teecore */
2544 	core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &s, &e);
2545 
2546 	if (s >= e || s & SMALL_PAGE_MASK || e & SMALL_PAGE_MASK)
2547 		panic("invalid PUB RAM");
2548 
2549 	/* extra check: we could rely on core_mmu_get_mem_by_type() */
2550 	if (!tee_vbuf_is_non_sec(s, e - s))
2551 		panic("PUB RAM is not non-secure");
2552 
2553 #ifdef CFG_PL310
2554 	/* Allocate statically the l2cc mutex */
2555 	tee_l2cc_store_mutex_boot_pa(virt_to_phys((void *)s));
2556 	s += sizeof(uint32_t);			/* size of a pl310 mutex */
2557 	s = ROUNDUP(s, SMALL_PAGE_SIZE);	/* keep required alignment */
2558 #endif
2559 
2560 	default_nsec_shm_paddr = virt_to_phys((void *)s);
2561 	default_nsec_shm_size = e - s;
2562 
2563 	return TEE_SUCCESS;
2564 }
2565 early_init(teecore_init_pub_ram);
2566 #endif /*CFG_CORE_RESERVED_SHM*/
2567 
2568 void core_mmu_init_ta_ram(void)
2569 {
2570 	vaddr_t s = 0;
2571 	vaddr_t e = 0;
2572 	paddr_t ps = 0;
2573 	size_t size = 0;
2574 
2575 	/*
2576 	 * Get virtual addr/size of RAM where TA are loaded/executedNSec
2577 	 * shared mem allocated from teecore.
2578 	 */
2579 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
2580 		virt_get_ta_ram(&s, &e);
2581 	else
2582 		core_mmu_get_mem_by_type(MEM_AREA_TA_RAM, &s, &e);
2583 
2584 	ps = virt_to_phys((void *)s);
2585 	size = e - s;
2586 
2587 	if (!ps || (ps & CORE_MMU_USER_CODE_MASK) ||
2588 	    !size || (size & CORE_MMU_USER_CODE_MASK))
2589 		panic("invalid TA RAM");
2590 
2591 	/* extra check: we could rely on core_mmu_get_mem_by_type() */
2592 	if (!tee_pbuf_is_sec(ps, size))
2593 		panic("TA RAM is not secure");
2594 
2595 	if (!tee_mm_is_empty(&tee_mm_sec_ddr))
2596 		panic("TA RAM pool is not empty");
2597 
2598 	/* remove previous config and init TA ddr memory pool */
2599 	tee_mm_final(&tee_mm_sec_ddr);
2600 	tee_mm_init(&tee_mm_sec_ddr, ps, size, CORE_MMU_USER_CODE_SHIFT,
2601 		    TEE_MM_POOL_NO_FLAGS);
2602 }
2603