xref: /optee_os/core/mm/core_mmu.c (revision 1fbe848c7736857c598097549b8571a282b610a7)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016, 2022 Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  * Copyright (c) 2022, Arm Limited and Contributors. All rights reserved.
6  */
7 
8 #include <assert.h>
9 #include <config.h>
10 #include <kernel/boot.h>
11 #include <kernel/dt.h>
12 #include <kernel/linker.h>
13 #include <kernel/panic.h>
14 #include <kernel/spinlock.h>
15 #include <kernel/tee_l2cc_mutex.h>
16 #include <kernel/tee_misc.h>
17 #include <kernel/tlb_helpers.h>
18 #include <kernel/user_mode_ctx.h>
19 #include <kernel/virtualization.h>
20 #include <libfdt.h>
21 #include <memtag.h>
22 #include <mm/core_memprot.h>
23 #include <mm/core_mmu.h>
24 #include <mm/mobj.h>
25 #include <mm/pgt_cache.h>
26 #include <mm/phys_mem.h>
27 #include <mm/tee_pager.h>
28 #include <mm/vm.h>
29 #include <platform_config.h>
30 #include <stdalign.h>
31 #include <string.h>
32 #include <trace.h>
33 #include <util.h>
34 
35 #ifndef DEBUG_XLAT_TABLE
36 #define DEBUG_XLAT_TABLE 0
37 #endif
38 
39 #define SHM_VASPACE_SIZE	(1024 * 1024 * 32)
40 
41 /* Virtual memory pool for core mappings */
42 tee_mm_pool_t core_virt_mem_pool;
43 
44 /* Virtual memory pool for shared memory mappings */
45 tee_mm_pool_t core_virt_shm_pool;
46 
47 #ifdef CFG_CORE_PHYS_RELOCATABLE
48 unsigned long core_mmu_tee_load_pa __nex_bss;
49 #else
50 const unsigned long core_mmu_tee_load_pa = TEE_LOAD_ADDR;
51 #endif
52 
53 /*
54  * These variables are initialized before .bss is cleared. To avoid
55  * resetting them when .bss is cleared we're storing them in .data instead,
56  * even if they initially are zero.
57  */
58 
59 #ifdef CFG_CORE_RESERVED_SHM
60 /* Default NSec shared memory allocated from NSec world */
61 unsigned long default_nsec_shm_size __nex_bss;
62 unsigned long default_nsec_shm_paddr __nex_bss;
63 #endif
64 
65 static struct tee_mmap_region static_mmap_regions[CFG_MMAP_REGIONS
66 #if defined(CFG_CORE_ASLR) || defined(CFG_CORE_PHYS_RELOCATABLE)
67 						+ 1
68 #endif
69 						+ 1] __nex_bss;
70 static struct memory_map static_memory_map __nex_data = {
71 	.map = static_mmap_regions,
72 	.alloc_count = ARRAY_SIZE(static_mmap_regions),
73 };
74 
75 /* Define the platform's memory layout. */
76 struct memaccess_area {
77 	paddr_t paddr;
78 	size_t size;
79 };
80 
81 #define MEMACCESS_AREA(a, s) { .paddr = a, .size = s }
82 
83 static struct memaccess_area secure_only[] __nex_data = {
84 #ifdef CFG_CORE_PHYS_RELOCATABLE
85 	MEMACCESS_AREA(0, 0),
86 #else
87 #ifdef TRUSTED_SRAM_BASE
88 	MEMACCESS_AREA(TRUSTED_SRAM_BASE, TRUSTED_SRAM_SIZE),
89 #endif
90 	MEMACCESS_AREA(TRUSTED_DRAM_BASE, TRUSTED_DRAM_SIZE),
91 #endif
92 };
93 
94 static struct memaccess_area nsec_shared[] __nex_data = {
95 #ifdef CFG_CORE_RESERVED_SHM
96 	MEMACCESS_AREA(TEE_SHMEM_START, TEE_SHMEM_SIZE),
97 #endif
98 };
99 
100 #if defined(CFG_SECURE_DATA_PATH)
101 static const char *tz_sdp_match = "linaro,secure-heap";
102 static struct memaccess_area sec_sdp;
103 #ifdef CFG_TEE_SDP_MEM_BASE
104 register_sdp_mem(CFG_TEE_SDP_MEM_BASE, CFG_TEE_SDP_MEM_SIZE);
105 #endif
106 #ifdef TEE_SDP_TEST_MEM_BASE
107 register_sdp_mem(TEE_SDP_TEST_MEM_BASE, TEE_SDP_TEST_MEM_SIZE);
108 #endif
109 #endif
110 
111 #ifdef CFG_CORE_RESERVED_SHM
112 register_phys_mem(MEM_AREA_NSEC_SHM, TEE_SHMEM_START, TEE_SHMEM_SIZE);
113 #endif
114 static unsigned int mmu_spinlock;
115 
116 static uint32_t mmu_lock(void)
117 {
118 	return cpu_spin_lock_xsave(&mmu_spinlock);
119 }
120 
121 static void mmu_unlock(uint32_t exceptions)
122 {
123 	cpu_spin_unlock_xrestore(&mmu_spinlock, exceptions);
124 }
125 
126 static void grow_mem_map(struct memory_map *mem_map)
127 {
128 	if (mem_map->count == mem_map->alloc_count) {
129 		EMSG("Out of entries (%zu) in mem_map", mem_map->alloc_count);
130 		panic();
131 	}
132 	mem_map->count++;
133 }
134 
135 void core_mmu_get_secure_memory(paddr_t *base, paddr_size_t *size)
136 {
137 	/*
138 	 * The first range is always used to cover OP-TEE core memory, but
139 	 * depending on configuration it may cover more than that.
140 	 */
141 	*base = secure_only[0].paddr;
142 	*size = secure_only[0].size;
143 }
144 
145 void core_mmu_set_secure_memory(paddr_t base, size_t size)
146 {
147 #ifdef CFG_CORE_PHYS_RELOCATABLE
148 	static_assert(ARRAY_SIZE(secure_only) == 1);
149 #endif
150 	runtime_assert(IS_ENABLED(CFG_CORE_PHYS_RELOCATABLE));
151 	assert(!secure_only[0].size);
152 	assert(base && size);
153 
154 	DMSG("Physical secure memory base %#"PRIxPA" size %#zx", base, size);
155 	secure_only[0].paddr = base;
156 	secure_only[0].size = size;
157 }
158 
159 void core_mmu_get_ta_range(paddr_t *base, size_t *size)
160 {
161 	paddr_t b = 0;
162 	size_t s = 0;
163 
164 	static_assert(!(TEE_RAM_VA_SIZE % SMALL_PAGE_SIZE));
165 #ifdef TA_RAM_START
166 	b = TA_RAM_START;
167 	s = TA_RAM_SIZE;
168 #else
169 	static_assert(ARRAY_SIZE(secure_only) <= 2);
170 	if (ARRAY_SIZE(secure_only) == 1) {
171 		vaddr_t load_offs = 0;
172 
173 		assert(core_mmu_tee_load_pa >= secure_only[0].paddr);
174 		load_offs = core_mmu_tee_load_pa - secure_only[0].paddr;
175 
176 		assert(secure_only[0].size >
177 		       load_offs + TEE_RAM_VA_SIZE + TEE_SDP_TEST_MEM_SIZE);
178 		b = secure_only[0].paddr + load_offs + TEE_RAM_VA_SIZE;
179 		s = secure_only[0].size - load_offs - TEE_RAM_VA_SIZE -
180 		    TEE_SDP_TEST_MEM_SIZE;
181 	} else {
182 		assert(secure_only[1].size > TEE_SDP_TEST_MEM_SIZE);
183 		b = secure_only[1].paddr;
184 		s = secure_only[1].size - TEE_SDP_TEST_MEM_SIZE;
185 	}
186 #endif
187 	if (base)
188 		*base = b;
189 	if (size)
190 		*size = s;
191 }
192 
193 static struct memory_map *get_memory_map(void)
194 {
195 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
196 		struct memory_map *map = virt_get_memory_map();
197 
198 		if (map)
199 			return map;
200 	}
201 
202 	return &static_memory_map;
203 }
204 
205 static bool _pbuf_intersects(struct memaccess_area *a, size_t alen,
206 			     paddr_t pa, size_t size)
207 {
208 	size_t n;
209 
210 	for (n = 0; n < alen; n++)
211 		if (core_is_buffer_intersect(pa, size, a[n].paddr, a[n].size))
212 			return true;
213 	return false;
214 }
215 
216 #define pbuf_intersects(a, pa, size) \
217 	_pbuf_intersects((a), ARRAY_SIZE(a), (pa), (size))
218 
219 static bool _pbuf_is_inside(struct memaccess_area *a, size_t alen,
220 			    paddr_t pa, size_t size)
221 {
222 	size_t n;
223 
224 	for (n = 0; n < alen; n++)
225 		if (core_is_buffer_inside(pa, size, a[n].paddr, a[n].size))
226 			return true;
227 	return false;
228 }
229 
230 #define pbuf_is_inside(a, pa, size) \
231 	_pbuf_is_inside((a), ARRAY_SIZE(a), (pa), (size))
232 
233 static bool pa_is_in_map(struct tee_mmap_region *map, paddr_t pa, size_t len)
234 {
235 	paddr_t end_pa = 0;
236 
237 	if (!map)
238 		return false;
239 
240 	if (SUB_OVERFLOW(len, 1, &end_pa) || ADD_OVERFLOW(pa, end_pa, &end_pa))
241 		return false;
242 
243 	return (pa >= map->pa && end_pa <= map->pa + map->size - 1);
244 }
245 
246 static bool va_is_in_map(struct tee_mmap_region *map, vaddr_t va)
247 {
248 	if (!map)
249 		return false;
250 	return (va >= map->va && va <= (map->va + map->size - 1));
251 }
252 
253 /* check if target buffer fits in a core default map area */
254 static bool pbuf_inside_map_area(unsigned long p, size_t l,
255 				 struct tee_mmap_region *map)
256 {
257 	return core_is_buffer_inside(p, l, map->pa, map->size);
258 }
259 
260 TEE_Result core_mmu_for_each_map(void *ptr,
261 				 TEE_Result (*fn)(struct tee_mmap_region *map,
262 						  void *ptr))
263 {
264 	struct memory_map *mem_map = get_memory_map();
265 	TEE_Result res = TEE_SUCCESS;
266 	size_t n = 0;
267 
268 	for (n = 0; n < mem_map->count; n++) {
269 		res = fn(mem_map->map + n, ptr);
270 		if (res)
271 			return res;
272 	}
273 
274 	return TEE_SUCCESS;
275 }
276 
277 static struct tee_mmap_region *find_map_by_type(enum teecore_memtypes type)
278 {
279 	struct memory_map *mem_map = get_memory_map();
280 	size_t n = 0;
281 
282 	for (n = 0; n < mem_map->count; n++) {
283 		if (mem_map->map[n].type == type)
284 			return mem_map->map + n;
285 	}
286 	return NULL;
287 }
288 
289 static struct tee_mmap_region *
290 find_map_by_type_and_pa(enum teecore_memtypes type, paddr_t pa, size_t len)
291 {
292 	struct memory_map *mem_map = get_memory_map();
293 	size_t n = 0;
294 
295 	for (n = 0; n < mem_map->count; n++) {
296 		if (mem_map->map[n].type != type)
297 			continue;
298 		if (pa_is_in_map(mem_map->map + n, pa, len))
299 			return mem_map->map + n;
300 	}
301 	return NULL;
302 }
303 
304 static struct tee_mmap_region *find_map_by_va(void *va)
305 {
306 	struct memory_map *mem_map = get_memory_map();
307 	vaddr_t a = (vaddr_t)va;
308 	size_t n = 0;
309 
310 	for (n = 0; n < mem_map->count; n++) {
311 		if (a >= mem_map->map[n].va &&
312 		    a <= (mem_map->map[n].va - 1 + mem_map->map[n].size))
313 			return mem_map->map + n;
314 	}
315 
316 	return NULL;
317 }
318 
319 static struct tee_mmap_region *find_map_by_pa(unsigned long pa)
320 {
321 	struct memory_map *mem_map = get_memory_map();
322 	size_t n = 0;
323 
324 	for (n = 0; n < mem_map->count; n++) {
325 		/* Skip unmapped regions */
326 		if ((mem_map->map[n].attr & TEE_MATTR_VALID_BLOCK) &&
327 		    pa >= mem_map->map[n].pa &&
328 		    pa <= (mem_map->map[n].pa - 1 + mem_map->map[n].size))
329 			return mem_map->map + n;
330 	}
331 
332 	return NULL;
333 }
334 
335 #if defined(CFG_SECURE_DATA_PATH)
336 static bool dtb_get_sdp_region(void)
337 {
338 	void *fdt = NULL;
339 	int node = 0;
340 	int tmp_node = 0;
341 	paddr_t tmp_addr = 0;
342 	size_t tmp_size = 0;
343 
344 	if (!IS_ENABLED(CFG_EMBED_DTB))
345 		return false;
346 
347 	fdt = get_embedded_dt();
348 	if (!fdt)
349 		panic("No DTB found");
350 
351 	node = fdt_node_offset_by_compatible(fdt, -1, tz_sdp_match);
352 	if (node < 0) {
353 		DMSG("No %s compatible node found", tz_sdp_match);
354 		return false;
355 	}
356 	tmp_node = node;
357 	while (tmp_node >= 0) {
358 		tmp_node = fdt_node_offset_by_compatible(fdt, tmp_node,
359 							 tz_sdp_match);
360 		if (tmp_node >= 0)
361 			DMSG("Ignore SDP pool node %s, supports only 1 node",
362 			     fdt_get_name(fdt, tmp_node, NULL));
363 	}
364 
365 	if (fdt_reg_info(fdt, node, &tmp_addr, &tmp_size)) {
366 		EMSG("%s: Unable to get base addr or size from DT",
367 		     tz_sdp_match);
368 		return false;
369 	}
370 
371 	sec_sdp.paddr = tmp_addr;
372 	sec_sdp.size = tmp_size;
373 
374 	return true;
375 }
376 #endif
377 
378 #if defined(CFG_CORE_DYN_SHM) || defined(CFG_SECURE_DATA_PATH)
379 static bool pbuf_is_special_mem(paddr_t pbuf, size_t len,
380 				const struct core_mmu_phys_mem *start,
381 				const struct core_mmu_phys_mem *end)
382 {
383 	const struct core_mmu_phys_mem *mem;
384 
385 	for (mem = start; mem < end; mem++) {
386 		if (core_is_buffer_inside(pbuf, len, mem->addr, mem->size))
387 			return true;
388 	}
389 
390 	return false;
391 }
392 #endif
393 
394 #ifdef CFG_CORE_DYN_SHM
395 static void carve_out_phys_mem(struct core_mmu_phys_mem **mem, size_t *nelems,
396 			       paddr_t pa, size_t size)
397 {
398 	struct core_mmu_phys_mem *m = *mem;
399 	size_t n = 0;
400 
401 	while (true) {
402 		if (n >= *nelems) {
403 			DMSG("No need to carve out %#" PRIxPA " size %#zx",
404 			     pa, size);
405 			return;
406 		}
407 		if (core_is_buffer_inside(pa, size, m[n].addr, m[n].size))
408 			break;
409 		if (!core_is_buffer_outside(pa, size, m[n].addr, m[n].size))
410 			panic();
411 		n++;
412 	}
413 
414 	if (pa == m[n].addr && size == m[n].size) {
415 		/* Remove this entry */
416 		(*nelems)--;
417 		memmove(m + n, m + n + 1, sizeof(*m) * (*nelems - n));
418 		m = nex_realloc(m, sizeof(*m) * *nelems);
419 		if (!m)
420 			panic();
421 		*mem = m;
422 	} else if (pa == m[n].addr) {
423 		m[n].addr += size;
424 		m[n].size -= size;
425 	} else if ((pa + size) == (m[n].addr + m[n].size)) {
426 		m[n].size -= size;
427 	} else {
428 		/* Need to split the memory entry */
429 		m = nex_realloc(m, sizeof(*m) * (*nelems + 1));
430 		if (!m)
431 			panic();
432 		*mem = m;
433 		memmove(m + n + 1, m + n, sizeof(*m) * (*nelems - n));
434 		(*nelems)++;
435 		m[n].size = pa - m[n].addr;
436 		m[n + 1].size -= size + m[n].size;
437 		m[n + 1].addr = pa + size;
438 	}
439 }
440 
441 static void check_phys_mem_is_outside(struct core_mmu_phys_mem *start,
442 				      size_t nelems,
443 				      struct tee_mmap_region *map)
444 {
445 	size_t n;
446 
447 	for (n = 0; n < nelems; n++) {
448 		if (!core_is_buffer_outside(start[n].addr, start[n].size,
449 					    map->pa, map->size)) {
450 			EMSG("Non-sec mem (%#" PRIxPA ":%#" PRIxPASZ
451 			     ") overlaps map (type %d %#" PRIxPA ":%#zx)",
452 			     start[n].addr, start[n].size,
453 			     map->type, map->pa, map->size);
454 			panic();
455 		}
456 	}
457 }
458 
459 static const struct core_mmu_phys_mem *discovered_nsec_ddr_start __nex_bss;
460 static size_t discovered_nsec_ddr_nelems __nex_bss;
461 
462 static int cmp_pmem_by_addr(const void *a, const void *b)
463 {
464 	const struct core_mmu_phys_mem *pmem_a = a;
465 	const struct core_mmu_phys_mem *pmem_b = b;
466 
467 	return CMP_TRILEAN(pmem_a->addr, pmem_b->addr);
468 }
469 
470 void core_mmu_set_discovered_nsec_ddr(struct core_mmu_phys_mem *start,
471 				      size_t nelems)
472 {
473 	struct core_mmu_phys_mem *m = start;
474 	size_t num_elems = nelems;
475 	struct memory_map *mem_map = &static_memory_map;
476 	const struct core_mmu_phys_mem __maybe_unused *pmem;
477 	size_t n = 0;
478 
479 	assert(!discovered_nsec_ddr_start);
480 	assert(m && num_elems);
481 
482 	qsort(m, num_elems, sizeof(*m), cmp_pmem_by_addr);
483 
484 	/*
485 	 * Non-secure shared memory and also secure data
486 	 * path memory are supposed to reside inside
487 	 * non-secure memory. Since NSEC_SHM and SDP_MEM
488 	 * are used for a specific purpose make holes for
489 	 * those memory in the normal non-secure memory.
490 	 *
491 	 * This has to be done since for instance QEMU
492 	 * isn't aware of which memory range in the
493 	 * non-secure memory is used for NSEC_SHM.
494 	 */
495 
496 #ifdef CFG_SECURE_DATA_PATH
497 	if (dtb_get_sdp_region())
498 		carve_out_phys_mem(&m, &num_elems, sec_sdp.paddr, sec_sdp.size);
499 
500 	for (pmem = phys_sdp_mem_begin; pmem < phys_sdp_mem_end; pmem++)
501 		carve_out_phys_mem(&m, &num_elems, pmem->addr, pmem->size);
502 #endif
503 
504 	for (n = 0; n < ARRAY_SIZE(secure_only); n++)
505 		carve_out_phys_mem(&m, &num_elems, secure_only[n].paddr,
506 				   secure_only[n].size);
507 
508 	for  (n = 0; n < mem_map->count; n++) {
509 		switch (mem_map->map[n].type) {
510 		case MEM_AREA_NSEC_SHM:
511 			carve_out_phys_mem(&m, &num_elems, mem_map->map[n].pa,
512 					   mem_map->map[n].size);
513 			break;
514 		case MEM_AREA_EXT_DT:
515 		case MEM_AREA_MANIFEST_DT:
516 		case MEM_AREA_RAM_NSEC:
517 		case MEM_AREA_RES_VASPACE:
518 		case MEM_AREA_SHM_VASPACE:
519 		case MEM_AREA_TS_VASPACE:
520 		case MEM_AREA_PAGER_VASPACE:
521 			break;
522 		default:
523 			check_phys_mem_is_outside(m, num_elems,
524 						  mem_map->map + n);
525 		}
526 	}
527 
528 	discovered_nsec_ddr_start = m;
529 	discovered_nsec_ddr_nelems = num_elems;
530 
531 	if (!core_mmu_check_end_pa(m[num_elems - 1].addr,
532 				   m[num_elems - 1].size))
533 		panic();
534 }
535 
536 static bool get_discovered_nsec_ddr(const struct core_mmu_phys_mem **start,
537 				    const struct core_mmu_phys_mem **end)
538 {
539 	if (!discovered_nsec_ddr_start)
540 		return false;
541 
542 	*start = discovered_nsec_ddr_start;
543 	*end = discovered_nsec_ddr_start + discovered_nsec_ddr_nelems;
544 
545 	return true;
546 }
547 
548 static bool pbuf_is_nsec_ddr(paddr_t pbuf, size_t len)
549 {
550 	const struct core_mmu_phys_mem *start;
551 	const struct core_mmu_phys_mem *end;
552 
553 	if (!get_discovered_nsec_ddr(&start, &end))
554 		return false;
555 
556 	return pbuf_is_special_mem(pbuf, len, start, end);
557 }
558 
559 bool core_mmu_nsec_ddr_is_defined(void)
560 {
561 	const struct core_mmu_phys_mem *start;
562 	const struct core_mmu_phys_mem *end;
563 
564 	if (!get_discovered_nsec_ddr(&start, &end))
565 		return false;
566 
567 	return start != end;
568 }
569 #else
570 static bool pbuf_is_nsec_ddr(paddr_t pbuf __unused, size_t len __unused)
571 {
572 	return false;
573 }
574 #endif /*CFG_CORE_DYN_SHM*/
575 
576 #define MSG_MEM_INSTERSECT(pa1, sz1, pa2, sz2) \
577 	EMSG("[%" PRIxPA " %" PRIx64 "] intersects [%" PRIxPA " %" PRIx64 "]", \
578 			pa1, (uint64_t)pa1 + (sz1), pa2, (uint64_t)pa2 + (sz2))
579 
580 #ifdef CFG_SECURE_DATA_PATH
581 static bool pbuf_is_sdp_mem(paddr_t pbuf, size_t len)
582 {
583 	bool is_sdp_mem = false;
584 
585 	if (sec_sdp.size)
586 		is_sdp_mem = core_is_buffer_inside(pbuf, len, sec_sdp.paddr,
587 						   sec_sdp.size);
588 
589 	if (!is_sdp_mem)
590 		is_sdp_mem = pbuf_is_special_mem(pbuf, len, phys_sdp_mem_begin,
591 						 phys_sdp_mem_end);
592 
593 	return is_sdp_mem;
594 }
595 
596 static struct mobj *core_sdp_mem_alloc_mobj(paddr_t pa, size_t size)
597 {
598 	struct mobj *mobj = mobj_phys_alloc(pa, size, TEE_MATTR_MEM_TYPE_CACHED,
599 					    CORE_MEM_SDP_MEM);
600 
601 	if (!mobj)
602 		panic("can't create SDP physical memory object");
603 
604 	return mobj;
605 }
606 
607 struct mobj **core_sdp_mem_create_mobjs(void)
608 {
609 	const struct core_mmu_phys_mem *mem = NULL;
610 	struct mobj **mobj_base = NULL;
611 	struct mobj **mobj = NULL;
612 	int cnt = phys_sdp_mem_end - phys_sdp_mem_begin;
613 
614 	if (sec_sdp.size)
615 		cnt++;
616 
617 	/* SDP mobjs table must end with a NULL entry */
618 	mobj_base = calloc(cnt + 1, sizeof(struct mobj *));
619 	if (!mobj_base)
620 		panic("Out of memory");
621 
622 	mobj = mobj_base;
623 
624 	for (mem = phys_sdp_mem_begin; mem < phys_sdp_mem_end; mem++, mobj++)
625 		*mobj = core_sdp_mem_alloc_mobj(mem->addr, mem->size);
626 
627 	if (sec_sdp.size)
628 		*mobj = core_sdp_mem_alloc_mobj(sec_sdp.paddr, sec_sdp.size);
629 
630 	return mobj_base;
631 }
632 
633 #else /* CFG_SECURE_DATA_PATH */
634 static bool pbuf_is_sdp_mem(paddr_t pbuf __unused, size_t len __unused)
635 {
636 	return false;
637 }
638 
639 #endif /* CFG_SECURE_DATA_PATH */
640 
641 /* Check special memories comply with registered memories */
642 static void verify_special_mem_areas(struct memory_map *mem_map,
643 				     const struct core_mmu_phys_mem *start,
644 				     const struct core_mmu_phys_mem *end,
645 				     const char *area_name __maybe_unused)
646 {
647 	const struct core_mmu_phys_mem *mem = NULL;
648 	const struct core_mmu_phys_mem *mem2 = NULL;
649 	size_t n = 0;
650 
651 	if (start == end) {
652 		DMSG("No %s memory area defined", area_name);
653 		return;
654 	}
655 
656 	for (mem = start; mem < end; mem++)
657 		DMSG("%s memory [%" PRIxPA " %" PRIx64 "]",
658 		     area_name, mem->addr, (uint64_t)mem->addr + mem->size);
659 
660 	/* Check memories do not intersect each other */
661 	for (mem = start; mem + 1 < end; mem++) {
662 		for (mem2 = mem + 1; mem2 < end; mem2++) {
663 			if (core_is_buffer_intersect(mem2->addr, mem2->size,
664 						     mem->addr, mem->size)) {
665 				MSG_MEM_INSTERSECT(mem2->addr, mem2->size,
666 						   mem->addr, mem->size);
667 				panic("Special memory intersection");
668 			}
669 		}
670 	}
671 
672 	/*
673 	 * Check memories do not intersect any mapped memory.
674 	 * This is called before reserved VA space is loaded in mem_map.
675 	 */
676 	for (mem = start; mem < end; mem++) {
677 		for (n = 0; n < mem_map->count; n++) {
678 			if (core_is_buffer_intersect(mem->addr, mem->size,
679 						     mem_map->map[n].pa,
680 						     mem_map->map[n].size)) {
681 				MSG_MEM_INSTERSECT(mem->addr, mem->size,
682 						   mem_map->map[n].pa,
683 						   mem_map->map[n].size);
684 				panic("Special memory intersection");
685 			}
686 		}
687 	}
688 }
689 
690 static void merge_mmaps(struct tee_mmap_region *dst,
691 			const struct tee_mmap_region *src)
692 {
693 	paddr_t end_pa = MAX(dst->pa + dst->size - 1, src->pa + src->size - 1);
694 	paddr_t pa = MIN(dst->pa, src->pa);
695 
696 	DMSG("Merging %#"PRIxPA"..%#"PRIxPA" and %#"PRIxPA"..%#"PRIxPA,
697 	     dst->pa, dst->pa + dst->size - 1, src->pa,
698 	     src->pa + src->size - 1);
699 	dst->pa = pa;
700 	dst->size = end_pa - pa + 1;
701 }
702 
703 static bool mmaps_are_mergeable(const struct tee_mmap_region *r1,
704 				const struct tee_mmap_region *r2)
705 {
706 	if (r1->type != r2->type)
707 		return false;
708 
709 	if (r1->pa == r2->pa)
710 		return true;
711 
712 	if (r1->pa < r2->pa)
713 		return r1->pa + r1->size >= r2->pa;
714 	else
715 		return r2->pa + r2->size >= r1->pa;
716 }
717 
718 static void add_phys_mem(struct memory_map *mem_map,
719 			 const char *mem_name __maybe_unused,
720 			 enum teecore_memtypes mem_type,
721 			 paddr_t mem_addr, paddr_size_t mem_size)
722 {
723 	size_t n = 0;
724 	const struct tee_mmap_region m0 = {
725 		.type = mem_type,
726 		.pa = mem_addr,
727 		.size = mem_size,
728 	};
729 
730 	if (!mem_size)	/* Discard null size entries */
731 		return;
732 
733 	/*
734 	 * If some ranges of memory of the same type do overlap
735 	 * each others they are coalesced into one entry. To help this
736 	 * added entries are sorted by increasing physical.
737 	 *
738 	 * Note that it's valid to have the same physical memory as several
739 	 * different memory types, for instance the same device memory
740 	 * mapped as both secure and non-secure. This will probably not
741 	 * happen often in practice.
742 	 */
743 	DMSG("%s type %s 0x%08" PRIxPA " size 0x%08" PRIxPASZ,
744 	     mem_name, teecore_memtype_name(mem_type), mem_addr, mem_size);
745 	for  (n = 0; n < mem_map->count; n++) {
746 		if (mmaps_are_mergeable(mem_map->map + n, &m0)) {
747 			merge_mmaps(mem_map->map + n, &m0);
748 			/*
749 			 * The merged result might be mergeable with the
750 			 * next or previous entry.
751 			 */
752 			if (n + 1 < mem_map->count &&
753 			    mmaps_are_mergeable(mem_map->map + n,
754 						mem_map->map + n + 1)) {
755 				merge_mmaps(mem_map->map + n,
756 					    mem_map->map + n + 1);
757 				rem_array_elem(mem_map->map, mem_map->count,
758 					       sizeof(*mem_map->map), n + 1);
759 				mem_map->count--;
760 			}
761 			if (n > 0 && mmaps_are_mergeable(mem_map->map + n - 1,
762 							 mem_map->map + n)) {
763 				merge_mmaps(mem_map->map + n - 1,
764 					    mem_map->map + n);
765 				rem_array_elem(mem_map->map, mem_map->count,
766 					       sizeof(*mem_map->map), n);
767 				mem_map->count--;
768 			}
769 			return;
770 		}
771 		if (mem_type < mem_map->map[n].type ||
772 		    (mem_type == mem_map->map[n].type &&
773 		     mem_addr < mem_map->map[n].pa))
774 			break; /* found the spot where to insert this memory */
775 	}
776 
777 	grow_mem_map(mem_map);
778 	ins_array_elem(mem_map->map, mem_map->count, sizeof(*mem_map->map),
779 		       n, &m0);
780 }
781 
782 static void add_va_space(struct memory_map *mem_map,
783 			 enum teecore_memtypes type, size_t size)
784 {
785 	size_t n = 0;
786 
787 	DMSG("type %s size 0x%08zx", teecore_memtype_name(type), size);
788 	for  (n = 0; n < mem_map->count; n++) {
789 		if (type < mem_map->map[n].type)
790 			break;
791 	}
792 
793 	grow_mem_map(mem_map);
794 	ins_array_elem(mem_map->map, mem_map->count, sizeof(*mem_map->map),
795 		       n, NULL);
796 	mem_map->map[n] = (struct tee_mmap_region){
797 		.type = type,
798 		.size = size,
799 	};
800 }
801 
802 uint32_t core_mmu_type_to_attr(enum teecore_memtypes t)
803 {
804 	const uint32_t attr = TEE_MATTR_VALID_BLOCK;
805 	const uint32_t tagged = TEE_MATTR_MEM_TYPE_TAGGED <<
806 				TEE_MATTR_MEM_TYPE_SHIFT;
807 	const uint32_t cached = TEE_MATTR_MEM_TYPE_CACHED <<
808 				TEE_MATTR_MEM_TYPE_SHIFT;
809 	const uint32_t noncache = TEE_MATTR_MEM_TYPE_DEV <<
810 				  TEE_MATTR_MEM_TYPE_SHIFT;
811 
812 	switch (t) {
813 	case MEM_AREA_TEE_RAM:
814 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRWX | tagged;
815 	case MEM_AREA_TEE_RAM_RX:
816 	case MEM_AREA_INIT_RAM_RX:
817 	case MEM_AREA_IDENTITY_MAP_RX:
818 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRX | tagged;
819 	case MEM_AREA_TEE_RAM_RO:
820 	case MEM_AREA_INIT_RAM_RO:
821 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PR | tagged;
822 	case MEM_AREA_TEE_RAM_RW:
823 	case MEM_AREA_NEX_RAM_RO: /* This has to be r/w during init runtime */
824 	case MEM_AREA_NEX_RAM_RW:
825 	case MEM_AREA_TEE_ASAN:
826 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | tagged;
827 	case MEM_AREA_TEE_COHERENT:
828 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRWX | noncache;
829 	case MEM_AREA_TA_RAM:
830 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | tagged;
831 	case MEM_AREA_NSEC_SHM:
832 	case MEM_AREA_NEX_NSEC_SHM:
833 		return attr | TEE_MATTR_PRW | cached;
834 	case MEM_AREA_MANIFEST_DT:
835 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PR | cached;
836 	case MEM_AREA_TRANSFER_LIST:
837 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | cached;
838 	case MEM_AREA_EXT_DT:
839 		/*
840 		 * If CFG_MAP_EXT_DT_SECURE is enabled map the external device
841 		 * tree as secure non-cached memory, otherwise, fall back to
842 		 * non-secure mapping.
843 		 */
844 		if (IS_ENABLED(CFG_MAP_EXT_DT_SECURE))
845 			return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW |
846 			       noncache;
847 		fallthrough;
848 	case MEM_AREA_IO_NSEC:
849 		return attr | TEE_MATTR_PRW | noncache;
850 	case MEM_AREA_IO_SEC:
851 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | noncache;
852 	case MEM_AREA_RAM_NSEC:
853 		return attr | TEE_MATTR_PRW | cached;
854 	case MEM_AREA_RAM_SEC:
855 	case MEM_AREA_SEC_RAM_OVERALL:
856 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | cached;
857 	case MEM_AREA_ROM_SEC:
858 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PR | cached;
859 	case MEM_AREA_RES_VASPACE:
860 	case MEM_AREA_SHM_VASPACE:
861 		return 0;
862 	case MEM_AREA_PAGER_VASPACE:
863 		return TEE_MATTR_SECURE;
864 	default:
865 		panic("invalid type");
866 	}
867 }
868 
869 static bool __maybe_unused map_is_tee_ram(const struct tee_mmap_region *mm)
870 {
871 	switch (mm->type) {
872 	case MEM_AREA_TEE_RAM:
873 	case MEM_AREA_TEE_RAM_RX:
874 	case MEM_AREA_TEE_RAM_RO:
875 	case MEM_AREA_TEE_RAM_RW:
876 	case MEM_AREA_INIT_RAM_RX:
877 	case MEM_AREA_INIT_RAM_RO:
878 	case MEM_AREA_NEX_RAM_RW:
879 	case MEM_AREA_NEX_RAM_RO:
880 	case MEM_AREA_TEE_ASAN:
881 		return true;
882 	default:
883 		return false;
884 	}
885 }
886 
887 static bool __maybe_unused map_is_secure(const struct tee_mmap_region *mm)
888 {
889 	return !!(core_mmu_type_to_attr(mm->type) & TEE_MATTR_SECURE);
890 }
891 
892 static bool __maybe_unused map_is_pgdir(const struct tee_mmap_region *mm)
893 {
894 	return mm->region_size == CORE_MMU_PGDIR_SIZE;
895 }
896 
897 static int cmp_mmap_by_lower_va(const void *a, const void *b)
898 {
899 	const struct tee_mmap_region *mm_a = a;
900 	const struct tee_mmap_region *mm_b = b;
901 
902 	return CMP_TRILEAN(mm_a->va, mm_b->va);
903 }
904 
905 static void dump_mmap_table(struct memory_map *mem_map)
906 {
907 	size_t n = 0;
908 
909 	for (n = 0; n < mem_map->count; n++) {
910 		struct tee_mmap_region *map = mem_map->map + n;
911 		vaddr_t __maybe_unused vstart;
912 
913 		vstart = map->va + ((vaddr_t)map->pa & (map->region_size - 1));
914 		DMSG("type %-12s va 0x%08" PRIxVA "..0x%08" PRIxVA
915 		     " pa 0x%08" PRIxPA "..0x%08" PRIxPA " size 0x%08zx (%s)",
916 		     teecore_memtype_name(map->type), vstart,
917 		     vstart + map->size - 1, map->pa,
918 		     (paddr_t)(map->pa + map->size - 1), map->size,
919 		     map->region_size == SMALL_PAGE_SIZE ? "smallpg" : "pgdir");
920 	}
921 }
922 
923 #if DEBUG_XLAT_TABLE
924 
925 static void dump_xlat_table(vaddr_t va, unsigned int level)
926 {
927 	struct core_mmu_table_info tbl_info;
928 	unsigned int idx = 0;
929 	paddr_t pa;
930 	uint32_t attr;
931 
932 	core_mmu_find_table(NULL, va, level, &tbl_info);
933 	va = tbl_info.va_base;
934 	for (idx = 0; idx < tbl_info.num_entries; idx++) {
935 		core_mmu_get_entry(&tbl_info, idx, &pa, &attr);
936 		if (attr || level > CORE_MMU_BASE_TABLE_LEVEL) {
937 			const char *security_bit = "";
938 
939 			if (core_mmu_entry_have_security_bit(attr)) {
940 				if (attr & TEE_MATTR_SECURE)
941 					security_bit = "S";
942 				else
943 					security_bit = "NS";
944 			}
945 
946 			if (attr & TEE_MATTR_TABLE) {
947 				DMSG_RAW("%*s [LVL%d] VA:0x%010" PRIxVA
948 					" TBL:0x%010" PRIxPA " %s",
949 					level * 2, "", level, va, pa,
950 					security_bit);
951 				dump_xlat_table(va, level + 1);
952 			} else if (attr) {
953 				DMSG_RAW("%*s [LVL%d] VA:0x%010" PRIxVA
954 					" PA:0x%010" PRIxPA " %s-%s-%s-%s",
955 					level * 2, "", level, va, pa,
956 					mattr_is_cached(attr) ? "MEM" :
957 					"DEV",
958 					attr & TEE_MATTR_PW ? "RW" : "RO",
959 					attr & TEE_MATTR_PX ? "X " : "XN",
960 					security_bit);
961 			} else {
962 				DMSG_RAW("%*s [LVL%d] VA:0x%010" PRIxVA
963 					    " INVALID\n",
964 					    level * 2, "", level, va);
965 			}
966 		}
967 		va += BIT64(tbl_info.shift);
968 	}
969 }
970 
971 #else
972 
973 static void dump_xlat_table(vaddr_t va __unused, int level __unused)
974 {
975 }
976 
977 #endif
978 
979 /*
980  * Reserves virtual memory space for pager usage.
981  *
982  * From the start of the first memory used by the link script +
983  * TEE_RAM_VA_SIZE should be covered, either with a direct mapping or empty
984  * mapping for pager usage. This adds translation tables as needed for the
985  * pager to operate.
986  */
987 static void add_pager_vaspace(struct memory_map *mem_map)
988 {
989 	paddr_t begin = 0;
990 	paddr_t end = 0;
991 	size_t size = 0;
992 	size_t pos = 0;
993 	size_t n = 0;
994 
995 
996 	for (n = 0; n < mem_map->count; n++) {
997 		if (map_is_tee_ram(mem_map->map + n)) {
998 			if (!begin)
999 				begin = mem_map->map[n].pa;
1000 			pos = n + 1;
1001 		}
1002 	}
1003 
1004 	end = mem_map->map[pos - 1].pa + mem_map->map[pos - 1].size;
1005 	assert(end - begin < TEE_RAM_VA_SIZE);
1006 	size = TEE_RAM_VA_SIZE - (end - begin);
1007 
1008 	grow_mem_map(mem_map);
1009 	ins_array_elem(mem_map->map, mem_map->count, sizeof(*mem_map->map),
1010 		       n, NULL);
1011 	mem_map->map[n] = (struct tee_mmap_region){
1012 		.type = MEM_AREA_PAGER_VASPACE,
1013 		.size = size,
1014 		.region_size = SMALL_PAGE_SIZE,
1015 		.attr = core_mmu_type_to_attr(MEM_AREA_PAGER_VASPACE),
1016 	};
1017 }
1018 
1019 static void check_sec_nsec_mem_config(void)
1020 {
1021 	size_t n = 0;
1022 
1023 	for (n = 0; n < ARRAY_SIZE(secure_only); n++) {
1024 		if (pbuf_intersects(nsec_shared, secure_only[n].paddr,
1025 				    secure_only[n].size))
1026 			panic("Invalid memory access config: sec/nsec");
1027 	}
1028 }
1029 
1030 static void collect_device_mem_ranges(struct memory_map *mem_map)
1031 {
1032 	const char *compatible = "arm,ffa-manifest-device-regions";
1033 	void *fdt = get_manifest_dt();
1034 	const char *name = NULL;
1035 	uint64_t page_count = 0;
1036 	uint64_t base = 0;
1037 	int subnode = 0;
1038 	int node = 0;
1039 
1040 	assert(fdt);
1041 
1042 	node = fdt_node_offset_by_compatible(fdt, 0, compatible);
1043 	if (node < 0)
1044 		return;
1045 
1046 	fdt_for_each_subnode(subnode, fdt, node) {
1047 		name = fdt_get_name(fdt, subnode, NULL);
1048 		if (!name)
1049 			continue;
1050 
1051 		if (dt_getprop_as_number(fdt, subnode, "base-address",
1052 					 &base)) {
1053 			EMSG("Mandatory field is missing: base-address");
1054 			continue;
1055 		}
1056 
1057 		if (base & SMALL_PAGE_MASK) {
1058 			EMSG("base-address is not page aligned");
1059 			continue;
1060 		}
1061 
1062 		if (dt_getprop_as_number(fdt, subnode, "pages-count",
1063 					 &page_count)) {
1064 			EMSG("Mandatory field is missing: pages-count");
1065 			continue;
1066 		}
1067 
1068 		add_phys_mem(mem_map, name, MEM_AREA_IO_SEC,
1069 			     base, base + page_count * SMALL_PAGE_SIZE);
1070 	}
1071 }
1072 
1073 static void collect_mem_ranges(struct memory_map *mem_map)
1074 {
1075 	const struct core_mmu_phys_mem *mem = NULL;
1076 	vaddr_t ram_start = secure_only[0].paddr;
1077 
1078 #define ADD_PHYS_MEM(_type, _addr, _size) \
1079 		add_phys_mem(mem_map, #_addr, (_type), (_addr), (_size))
1080 
1081 	if (IS_ENABLED(CFG_CORE_RWDATA_NOEXEC)) {
1082 		ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RO, ram_start,
1083 			     VCORE_UNPG_RX_PA - ram_start);
1084 		ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RX, VCORE_UNPG_RX_PA,
1085 			     VCORE_UNPG_RX_SZ);
1086 		ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RO, VCORE_UNPG_RO_PA,
1087 			     VCORE_UNPG_RO_SZ);
1088 
1089 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1090 			ADD_PHYS_MEM(MEM_AREA_NEX_RAM_RO, VCORE_UNPG_RW_PA,
1091 				     VCORE_UNPG_RW_SZ);
1092 			ADD_PHYS_MEM(MEM_AREA_NEX_RAM_RW, VCORE_NEX_RW_PA,
1093 				     VCORE_NEX_RW_SZ);
1094 		} else {
1095 			ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RW, VCORE_UNPG_RW_PA,
1096 				     VCORE_UNPG_RW_SZ);
1097 		}
1098 
1099 		if (IS_ENABLED(CFG_WITH_PAGER)) {
1100 			ADD_PHYS_MEM(MEM_AREA_INIT_RAM_RX, VCORE_INIT_RX_PA,
1101 				     VCORE_INIT_RX_SZ);
1102 			ADD_PHYS_MEM(MEM_AREA_INIT_RAM_RO, VCORE_INIT_RO_PA,
1103 				     VCORE_INIT_RO_SZ);
1104 		}
1105 	} else {
1106 		ADD_PHYS_MEM(MEM_AREA_TEE_RAM, TEE_RAM_START, TEE_RAM_PH_SIZE);
1107 	}
1108 
1109 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1110 		ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, TRUSTED_DRAM_BASE,
1111 			     TRUSTED_DRAM_SIZE);
1112 	} else {
1113 		/*
1114 		 * Every guest will have own TA RAM if virtualization
1115 		 * support is enabled.
1116 		 */
1117 		paddr_t ta_base = 0;
1118 		size_t ta_size = 0;
1119 
1120 		core_mmu_get_ta_range(&ta_base, &ta_size);
1121 		ADD_PHYS_MEM(MEM_AREA_TA_RAM, ta_base, ta_size);
1122 	}
1123 
1124 	if (IS_ENABLED(CFG_CORE_SANITIZE_KADDRESS) &&
1125 	    IS_ENABLED(CFG_WITH_PAGER)) {
1126 		/*
1127 		 * Asan ram is part of MEM_AREA_TEE_RAM_RW when pager is
1128 		 * disabled.
1129 		 */
1130 		ADD_PHYS_MEM(MEM_AREA_TEE_ASAN, ASAN_MAP_PA, ASAN_MAP_SZ);
1131 	}
1132 
1133 #undef ADD_PHYS_MEM
1134 
1135 	/* Collect device memory info from SP manifest */
1136 	if (IS_ENABLED(CFG_CORE_SEL2_SPMC))
1137 		collect_device_mem_ranges(mem_map);
1138 
1139 	for (mem = phys_mem_map_begin; mem < phys_mem_map_end; mem++) {
1140 		/* Only unmapped virtual range may have a null phys addr */
1141 		assert(mem->addr || !core_mmu_type_to_attr(mem->type));
1142 
1143 		add_phys_mem(mem_map, mem->name, mem->type,
1144 			     mem->addr, mem->size);
1145 	}
1146 
1147 	if (IS_ENABLED(CFG_SECURE_DATA_PATH))
1148 		verify_special_mem_areas(mem_map, phys_sdp_mem_begin,
1149 					 phys_sdp_mem_end, "SDP");
1150 
1151 	add_va_space(mem_map, MEM_AREA_RES_VASPACE, CFG_RESERVED_VASPACE_SIZE);
1152 	add_va_space(mem_map, MEM_AREA_SHM_VASPACE, SHM_VASPACE_SIZE);
1153 }
1154 
1155 static void assign_mem_granularity(struct memory_map *mem_map)
1156 {
1157 	size_t n = 0;
1158 
1159 	/*
1160 	 * Assign region sizes, note that MEM_AREA_TEE_RAM always uses
1161 	 * SMALL_PAGE_SIZE.
1162 	 */
1163 	for  (n = 0; n < mem_map->count; n++) {
1164 		paddr_t mask = mem_map->map[n].pa | mem_map->map[n].size;
1165 
1166 		if (!(mask & CORE_MMU_PGDIR_MASK))
1167 			mem_map->map[n].region_size = CORE_MMU_PGDIR_SIZE;
1168 		else if (!(mask & SMALL_PAGE_MASK))
1169 			mem_map->map[n].region_size = SMALL_PAGE_SIZE;
1170 		else
1171 			panic("Impossible memory alignment");
1172 
1173 		if (map_is_tee_ram(mem_map->map + n))
1174 			mem_map->map[n].region_size = SMALL_PAGE_SIZE;
1175 	}
1176 }
1177 
1178 static bool place_tee_ram_at_top(paddr_t paddr)
1179 {
1180 	return paddr > BIT64(core_mmu_get_va_width()) / 2;
1181 }
1182 
1183 /*
1184  * MMU arch driver shall override this function if it helps
1185  * optimizing the memory footprint of the address translation tables.
1186  */
1187 bool __weak core_mmu_prefer_tee_ram_at_top(paddr_t paddr)
1188 {
1189 	return place_tee_ram_at_top(paddr);
1190 }
1191 
1192 static bool assign_mem_va_dir(vaddr_t tee_ram_va, struct memory_map *mem_map,
1193 			      bool tee_ram_at_top)
1194 {
1195 	struct tee_mmap_region *map = NULL;
1196 	vaddr_t va = 0;
1197 	bool va_is_secure = true;
1198 	size_t n = 0;
1199 
1200 	/*
1201 	 * tee_ram_va might equals 0 when CFG_CORE_ASLR=y.
1202 	 * 0 is by design an invalid va, so return false directly.
1203 	 */
1204 	if (!tee_ram_va)
1205 		return false;
1206 
1207 	/* Clear eventual previous assignments */
1208 	for (n = 0; n < mem_map->count; n++)
1209 		mem_map->map[n].va = 0;
1210 
1211 	/*
1212 	 * TEE RAM regions are always aligned with region_size.
1213 	 *
1214 	 * Note that MEM_AREA_PAGER_VASPACE also counts as TEE RAM here
1215 	 * since it handles virtual memory which covers the part of the ELF
1216 	 * that cannot fit directly into memory.
1217 	 */
1218 	va = tee_ram_va;
1219 	for (n = 0; n < mem_map->count; n++) {
1220 		map = mem_map->map + n;
1221 		if (map_is_tee_ram(map) ||
1222 		    map->type == MEM_AREA_PAGER_VASPACE) {
1223 			assert(!(va & (map->region_size - 1)));
1224 			assert(!(map->size & (map->region_size - 1)));
1225 			map->va = va;
1226 			if (ADD_OVERFLOW(va, map->size, &va))
1227 				return false;
1228 			if (va >= BIT64(core_mmu_get_va_width()))
1229 				return false;
1230 		}
1231 	}
1232 
1233 	if (tee_ram_at_top) {
1234 		/*
1235 		 * Map non-tee ram regions at addresses lower than the tee
1236 		 * ram region.
1237 		 */
1238 		va = tee_ram_va;
1239 		for (n = 0; n < mem_map->count; n++) {
1240 			map = mem_map->map + n;
1241 			map->attr = core_mmu_type_to_attr(map->type);
1242 			if (map->va)
1243 				continue;
1244 
1245 			if (!IS_ENABLED(CFG_WITH_LPAE) &&
1246 			    va_is_secure != map_is_secure(map)) {
1247 				va_is_secure = !va_is_secure;
1248 				va = ROUNDDOWN(va, CORE_MMU_PGDIR_SIZE);
1249 			}
1250 
1251 			if (SUB_OVERFLOW(va, map->size, &va))
1252 				return false;
1253 			va = ROUNDDOWN(va, map->region_size);
1254 			/*
1255 			 * Make sure that va is aligned with pa for
1256 			 * efficient pgdir mapping. Basically pa &
1257 			 * pgdir_mask should be == va & pgdir_mask
1258 			 */
1259 			if (map->size > 2 * CORE_MMU_PGDIR_SIZE) {
1260 				if (SUB_OVERFLOW(va, CORE_MMU_PGDIR_SIZE, &va))
1261 					return false;
1262 				va += (map->pa - va) & CORE_MMU_PGDIR_MASK;
1263 			}
1264 			map->va = va;
1265 		}
1266 	} else {
1267 		/*
1268 		 * Map non-tee ram regions at addresses higher than the tee
1269 		 * ram region.
1270 		 */
1271 		for (n = 0; n < mem_map->count; n++) {
1272 			map = mem_map->map + n;
1273 			map->attr = core_mmu_type_to_attr(map->type);
1274 			if (map->va)
1275 				continue;
1276 
1277 			if (!IS_ENABLED(CFG_WITH_LPAE) &&
1278 			    va_is_secure != map_is_secure(map)) {
1279 				va_is_secure = !va_is_secure;
1280 				if (ROUNDUP_OVERFLOW(va, CORE_MMU_PGDIR_SIZE,
1281 						     &va))
1282 					return false;
1283 			}
1284 
1285 			if (ROUNDUP_OVERFLOW(va, map->region_size, &va))
1286 				return false;
1287 			/*
1288 			 * Make sure that va is aligned with pa for
1289 			 * efficient pgdir mapping. Basically pa &
1290 			 * pgdir_mask should be == va & pgdir_mask
1291 			 */
1292 			if (map->size > 2 * CORE_MMU_PGDIR_SIZE) {
1293 				vaddr_t offs = (map->pa - va) &
1294 					       CORE_MMU_PGDIR_MASK;
1295 
1296 				if (ADD_OVERFLOW(va, offs, &va))
1297 					return false;
1298 			}
1299 
1300 			map->va = va;
1301 			if (ADD_OVERFLOW(va, map->size, &va))
1302 				return false;
1303 			if (va >= BIT64(core_mmu_get_va_width()))
1304 				return false;
1305 		}
1306 	}
1307 
1308 	return true;
1309 }
1310 
1311 static bool assign_mem_va(vaddr_t tee_ram_va, struct memory_map *mem_map)
1312 {
1313 	bool tee_ram_at_top = place_tee_ram_at_top(tee_ram_va);
1314 
1315 	/*
1316 	 * Check that we're not overlapping with the user VA range.
1317 	 */
1318 	if (IS_ENABLED(CFG_WITH_LPAE)) {
1319 		/*
1320 		 * User VA range is supposed to be defined after these
1321 		 * mappings have been established.
1322 		 */
1323 		assert(!core_mmu_user_va_range_is_defined());
1324 	} else {
1325 		vaddr_t user_va_base = 0;
1326 		size_t user_va_size = 0;
1327 
1328 		assert(core_mmu_user_va_range_is_defined());
1329 		core_mmu_get_user_va_range(&user_va_base, &user_va_size);
1330 		if (tee_ram_va < (user_va_base + user_va_size))
1331 			return false;
1332 	}
1333 
1334 	if (IS_ENABLED(CFG_WITH_PAGER)) {
1335 		bool prefered_dir = core_mmu_prefer_tee_ram_at_top(tee_ram_va);
1336 
1337 		/* Try whole mapping covered by a single base xlat entry */
1338 		if (prefered_dir != tee_ram_at_top &&
1339 		    assign_mem_va_dir(tee_ram_va, mem_map, prefered_dir))
1340 			return true;
1341 	}
1342 
1343 	return assign_mem_va_dir(tee_ram_va, mem_map, tee_ram_at_top);
1344 }
1345 
1346 static int cmp_init_mem_map(const void *a, const void *b)
1347 {
1348 	const struct tee_mmap_region *mm_a = a;
1349 	const struct tee_mmap_region *mm_b = b;
1350 	int rc = 0;
1351 
1352 	rc = CMP_TRILEAN(mm_a->region_size, mm_b->region_size);
1353 	if (!rc)
1354 		rc = CMP_TRILEAN(mm_a->pa, mm_b->pa);
1355 	/*
1356 	 * 32bit MMU descriptors cannot mix secure and non-secure mapping in
1357 	 * the same level2 table. Hence sort secure mapping from non-secure
1358 	 * mapping.
1359 	 */
1360 	if (!rc && !IS_ENABLED(CFG_WITH_LPAE))
1361 		rc = CMP_TRILEAN(map_is_secure(mm_a), map_is_secure(mm_b));
1362 
1363 	return rc;
1364 }
1365 
1366 static bool mem_map_add_id_map(struct memory_map *mem_map,
1367 			       vaddr_t id_map_start, vaddr_t id_map_end)
1368 {
1369 	vaddr_t start = ROUNDDOWN(id_map_start, SMALL_PAGE_SIZE);
1370 	vaddr_t end = ROUNDUP(id_map_end, SMALL_PAGE_SIZE);
1371 	size_t len = end - start;
1372 	size_t n = 0;
1373 
1374 
1375 	for (n = 0; n < mem_map->count; n++)
1376 		if (core_is_buffer_intersect(mem_map->map[n].va,
1377 					     mem_map->map[n].size, start, len))
1378 			return false;
1379 
1380 	grow_mem_map(mem_map);
1381 	mem_map->map[mem_map->count - 1] = (struct tee_mmap_region){
1382 		.type = MEM_AREA_IDENTITY_MAP_RX,
1383 		/*
1384 		 * Could use CORE_MMU_PGDIR_SIZE to potentially save a
1385 		 * translation table, at the increased risk of clashes with
1386 		 * the rest of the memory map.
1387 		 */
1388 		.region_size = SMALL_PAGE_SIZE,
1389 		.pa = start,
1390 		.va = start,
1391 		.size = len,
1392 		.attr = core_mmu_type_to_attr(MEM_AREA_IDENTITY_MAP_RX),
1393 	};
1394 
1395 	return true;
1396 }
1397 
1398 static struct memory_map *init_mem_map(struct memory_map *mem_map,
1399 				       unsigned long seed,
1400 				       unsigned long *ret_offs)
1401 {
1402 	/*
1403 	 * @id_map_start and @id_map_end describes a physical memory range
1404 	 * that must be mapped Read-Only eXecutable at identical virtual
1405 	 * addresses.
1406 	 */
1407 	vaddr_t id_map_start = (vaddr_t)__identity_map_init_start;
1408 	vaddr_t id_map_end = (vaddr_t)__identity_map_init_end;
1409 	vaddr_t start_addr = secure_only[0].paddr;
1410 	unsigned long offs = 0;
1411 
1412 	collect_mem_ranges(mem_map);
1413 	assign_mem_granularity(mem_map);
1414 
1415 	/*
1416 	 * To ease mapping and lower use of xlat tables, sort mapping
1417 	 * description moving small-page regions after the pgdir regions.
1418 	 */
1419 	qsort(mem_map->map, mem_map->count, sizeof(struct tee_mmap_region),
1420 	      cmp_init_mem_map);
1421 
1422 	if (IS_ENABLED(CFG_WITH_PAGER))
1423 		add_pager_vaspace(mem_map);
1424 
1425 	if (IS_ENABLED(CFG_CORE_ASLR) && seed) {
1426 		vaddr_t base_addr = start_addr + seed;
1427 		const unsigned int va_width = core_mmu_get_va_width();
1428 		const vaddr_t va_mask = GENMASK_64(va_width - 1,
1429 						   SMALL_PAGE_SHIFT);
1430 		vaddr_t ba = base_addr;
1431 		size_t n = 0;
1432 
1433 		for (n = 0; n < 3; n++) {
1434 			if (n)
1435 				ba = base_addr ^ BIT64(va_width - n);
1436 			ba &= va_mask;
1437 			if (assign_mem_va(ba, mem_map) &&
1438 			    mem_map_add_id_map(mem_map, id_map_start,
1439 					       id_map_end)) {
1440 				offs = ba - start_addr;
1441 				DMSG("Mapping core at %#"PRIxVA" offs %#lx",
1442 				     ba, offs);
1443 				goto out;
1444 			} else {
1445 				DMSG("Failed to map core at %#"PRIxVA, ba);
1446 			}
1447 		}
1448 		EMSG("Failed to map core with seed %#lx", seed);
1449 	}
1450 
1451 	if (!assign_mem_va(start_addr, mem_map))
1452 		panic();
1453 
1454 out:
1455 	qsort(mem_map->map, mem_map->count, sizeof(struct tee_mmap_region),
1456 	      cmp_mmap_by_lower_va);
1457 
1458 	dump_mmap_table(mem_map);
1459 
1460 	*ret_offs = offs;
1461 	return mem_map;
1462 }
1463 
1464 static void check_mem_map(struct memory_map *mem_map)
1465 {
1466 	struct tee_mmap_region *m = NULL;
1467 	size_t n = 0;
1468 
1469 	for (n = 0; n < mem_map->count; n++) {
1470 		m = mem_map->map + n;
1471 		switch (m->type) {
1472 		case MEM_AREA_TEE_RAM:
1473 		case MEM_AREA_TEE_RAM_RX:
1474 		case MEM_AREA_TEE_RAM_RO:
1475 		case MEM_AREA_TEE_RAM_RW:
1476 		case MEM_AREA_INIT_RAM_RX:
1477 		case MEM_AREA_INIT_RAM_RO:
1478 		case MEM_AREA_NEX_RAM_RW:
1479 		case MEM_AREA_NEX_RAM_RO:
1480 		case MEM_AREA_IDENTITY_MAP_RX:
1481 			if (!pbuf_is_inside(secure_only, m->pa, m->size))
1482 				panic("TEE_RAM can't fit in secure_only");
1483 			break;
1484 		case MEM_AREA_TA_RAM:
1485 			if (!pbuf_is_inside(secure_only, m->pa, m->size))
1486 				panic("TA_RAM can't fit in secure_only");
1487 			break;
1488 		case MEM_AREA_NSEC_SHM:
1489 			if (!pbuf_is_inside(nsec_shared, m->pa, m->size))
1490 				panic("NS_SHM can't fit in nsec_shared");
1491 			break;
1492 		case MEM_AREA_SEC_RAM_OVERALL:
1493 		case MEM_AREA_TEE_COHERENT:
1494 		case MEM_AREA_TEE_ASAN:
1495 		case MEM_AREA_IO_SEC:
1496 		case MEM_AREA_IO_NSEC:
1497 		case MEM_AREA_EXT_DT:
1498 		case MEM_AREA_MANIFEST_DT:
1499 		case MEM_AREA_TRANSFER_LIST:
1500 		case MEM_AREA_RAM_SEC:
1501 		case MEM_AREA_RAM_NSEC:
1502 		case MEM_AREA_ROM_SEC:
1503 		case MEM_AREA_RES_VASPACE:
1504 		case MEM_AREA_SHM_VASPACE:
1505 		case MEM_AREA_PAGER_VASPACE:
1506 			break;
1507 		default:
1508 			EMSG("Uhandled memtype %d", m->type);
1509 			panic();
1510 		}
1511 	}
1512 }
1513 
1514 /*
1515  * core_init_mmu_map() - init tee core default memory mapping
1516  *
1517  * This routine sets the static default TEE core mapping. If @seed is > 0
1518  * and configured with CFG_CORE_ASLR it will map tee core at a location
1519  * based on the seed and return the offset from the link address.
1520  *
1521  * If an error happened: core_init_mmu_map is expected to panic.
1522  *
1523  * Note: this function is weak just to make it possible to exclude it from
1524  * the unpaged area.
1525  */
1526 void __weak core_init_mmu_map(unsigned long seed, struct core_mmu_config *cfg)
1527 {
1528 #ifndef CFG_NS_VIRTUALIZATION
1529 	vaddr_t start = ROUNDDOWN((vaddr_t)__nozi_start, SMALL_PAGE_SIZE);
1530 #else
1531 	vaddr_t start = ROUNDDOWN((vaddr_t)__vcore_nex_rw_start,
1532 				  SMALL_PAGE_SIZE);
1533 #endif
1534 	vaddr_t len = ROUNDUP((vaddr_t)__nozi_end, SMALL_PAGE_SIZE) - start;
1535 	struct tee_mmap_region tmp_mmap_region = { };
1536 	struct memory_map mem_map = { };
1537 	unsigned long offs = 0;
1538 
1539 	if (IS_ENABLED(CFG_CORE_PHYS_RELOCATABLE) &&
1540 	    (core_mmu_tee_load_pa & SMALL_PAGE_MASK))
1541 		panic("OP-TEE load address is not page aligned");
1542 
1543 	check_sec_nsec_mem_config();
1544 
1545 	mem_map = static_memory_map;
1546 	static_memory_map = (struct memory_map){
1547 		.map = &tmp_mmap_region,
1548 		.alloc_count = 1,
1549 		.count = 1,
1550 	};
1551 	/*
1552 	 * Add a entry covering the translation tables which will be
1553 	 * involved in some virt_to_phys() and phys_to_virt() conversions.
1554 	 */
1555 	static_memory_map.map[0] = (struct tee_mmap_region){
1556 		.type = MEM_AREA_TEE_RAM,
1557 		.region_size = SMALL_PAGE_SIZE,
1558 		.pa = start,
1559 		.va = start,
1560 		.size = len,
1561 		.attr = core_mmu_type_to_attr(MEM_AREA_IDENTITY_MAP_RX),
1562 	};
1563 
1564 	init_mem_map(&mem_map, seed, &offs);
1565 
1566 	check_mem_map(&mem_map);
1567 	core_init_mmu(&mem_map);
1568 	dump_xlat_table(0x0, CORE_MMU_BASE_TABLE_LEVEL);
1569 	core_init_mmu_regs(cfg);
1570 	cfg->map_offset = offs;
1571 	static_memory_map = mem_map;
1572 }
1573 
1574 bool core_mmu_mattr_is_ok(uint32_t mattr)
1575 {
1576 	/*
1577 	 * Keep in sync with core_mmu_lpae.c:mattr_to_desc and
1578 	 * core_mmu_v7.c:mattr_to_texcb
1579 	 */
1580 
1581 	switch ((mattr >> TEE_MATTR_MEM_TYPE_SHIFT) & TEE_MATTR_MEM_TYPE_MASK) {
1582 	case TEE_MATTR_MEM_TYPE_DEV:
1583 	case TEE_MATTR_MEM_TYPE_STRONGLY_O:
1584 	case TEE_MATTR_MEM_TYPE_CACHED:
1585 	case TEE_MATTR_MEM_TYPE_TAGGED:
1586 		return true;
1587 	default:
1588 		return false;
1589 	}
1590 }
1591 
1592 /*
1593  * test attributes of target physical buffer
1594  *
1595  * Flags: pbuf_is(SECURE, NOT_SECURE, RAM, IOMEM, KEYVAULT).
1596  *
1597  */
1598 bool core_pbuf_is(uint32_t attr, paddr_t pbuf, size_t len)
1599 {
1600 	struct tee_mmap_region *map;
1601 
1602 	/* Empty buffers complies with anything */
1603 	if (len == 0)
1604 		return true;
1605 
1606 	switch (attr) {
1607 	case CORE_MEM_SEC:
1608 		return pbuf_is_inside(secure_only, pbuf, len);
1609 	case CORE_MEM_NON_SEC:
1610 		return pbuf_is_inside(nsec_shared, pbuf, len) ||
1611 			pbuf_is_nsec_ddr(pbuf, len);
1612 	case CORE_MEM_TEE_RAM:
1613 		return core_is_buffer_inside(pbuf, len, TEE_RAM_START,
1614 							TEE_RAM_PH_SIZE);
1615 #ifdef CFG_CORE_RESERVED_SHM
1616 	case CORE_MEM_NSEC_SHM:
1617 		return core_is_buffer_inside(pbuf, len, TEE_SHMEM_START,
1618 							TEE_SHMEM_SIZE);
1619 #endif
1620 	case CORE_MEM_SDP_MEM:
1621 		return pbuf_is_sdp_mem(pbuf, len);
1622 	case CORE_MEM_CACHED:
1623 		map = find_map_by_pa(pbuf);
1624 		if (!map || !pbuf_inside_map_area(pbuf, len, map))
1625 			return false;
1626 		return mattr_is_cached(map->attr);
1627 	default:
1628 		return false;
1629 	}
1630 }
1631 
1632 /* test attributes of target virtual buffer (in core mapping) */
1633 bool core_vbuf_is(uint32_t attr, const void *vbuf, size_t len)
1634 {
1635 	paddr_t p;
1636 
1637 	/* Empty buffers complies with anything */
1638 	if (len == 0)
1639 		return true;
1640 
1641 	p = virt_to_phys((void *)vbuf);
1642 	if (!p)
1643 		return false;
1644 
1645 	return core_pbuf_is(attr, p, len);
1646 }
1647 
1648 /* core_va2pa - teecore exported service */
1649 static int __maybe_unused core_va2pa_helper(void *va, paddr_t *pa)
1650 {
1651 	struct tee_mmap_region *map;
1652 
1653 	map = find_map_by_va(va);
1654 	if (!va_is_in_map(map, (vaddr_t)va))
1655 		return -1;
1656 
1657 	/*
1658 	 * We can calculate PA for static map. Virtual address ranges
1659 	 * reserved to core dynamic mapping return a 'match' (return 0;)
1660 	 * together with an invalid null physical address.
1661 	 */
1662 	if (map->pa)
1663 		*pa = map->pa + (vaddr_t)va  - map->va;
1664 	else
1665 		*pa = 0;
1666 
1667 	return 0;
1668 }
1669 
1670 static void *map_pa2va(struct tee_mmap_region *map, paddr_t pa, size_t len)
1671 {
1672 	if (!pa_is_in_map(map, pa, len))
1673 		return NULL;
1674 
1675 	return (void *)(vaddr_t)(map->va + pa - map->pa);
1676 }
1677 
1678 /*
1679  * teecore gets some memory area definitions
1680  */
1681 void core_mmu_get_mem_by_type(enum teecore_memtypes type, vaddr_t *s,
1682 			      vaddr_t *e)
1683 {
1684 	struct tee_mmap_region *map = find_map_by_type(type);
1685 
1686 	if (map) {
1687 		*s = map->va;
1688 		*e = map->va + map->size;
1689 	} else {
1690 		*s = 0;
1691 		*e = 0;
1692 	}
1693 }
1694 
1695 enum teecore_memtypes core_mmu_get_type_by_pa(paddr_t pa)
1696 {
1697 	struct tee_mmap_region *map = find_map_by_pa(pa);
1698 
1699 	if (!map)
1700 		return MEM_AREA_MAXTYPE;
1701 	return map->type;
1702 }
1703 
1704 void core_mmu_set_entry(struct core_mmu_table_info *tbl_info, unsigned int idx,
1705 			paddr_t pa, uint32_t attr)
1706 {
1707 	assert(idx < tbl_info->num_entries);
1708 	core_mmu_set_entry_primitive(tbl_info->table, tbl_info->level,
1709 				     idx, pa, attr);
1710 }
1711 
1712 void core_mmu_get_entry(struct core_mmu_table_info *tbl_info, unsigned int idx,
1713 			paddr_t *pa, uint32_t *attr)
1714 {
1715 	assert(idx < tbl_info->num_entries);
1716 	core_mmu_get_entry_primitive(tbl_info->table, tbl_info->level,
1717 				     idx, pa, attr);
1718 }
1719 
1720 static void clear_region(struct core_mmu_table_info *tbl_info,
1721 			 struct tee_mmap_region *region)
1722 {
1723 	unsigned int end = 0;
1724 	unsigned int idx = 0;
1725 
1726 	/* va, len and pa should be block aligned */
1727 	assert(!core_mmu_get_block_offset(tbl_info, region->va));
1728 	assert(!core_mmu_get_block_offset(tbl_info, region->size));
1729 	assert(!core_mmu_get_block_offset(tbl_info, region->pa));
1730 
1731 	idx = core_mmu_va2idx(tbl_info, region->va);
1732 	end = core_mmu_va2idx(tbl_info, region->va + region->size);
1733 
1734 	while (idx < end) {
1735 		core_mmu_set_entry(tbl_info, idx, 0, 0);
1736 		idx++;
1737 	}
1738 }
1739 
1740 static void set_region(struct core_mmu_table_info *tbl_info,
1741 		       struct tee_mmap_region *region)
1742 {
1743 	unsigned int end;
1744 	unsigned int idx;
1745 	paddr_t pa;
1746 
1747 	/* va, len and pa should be block aligned */
1748 	assert(!core_mmu_get_block_offset(tbl_info, region->va));
1749 	assert(!core_mmu_get_block_offset(tbl_info, region->size));
1750 	assert(!core_mmu_get_block_offset(tbl_info, region->pa));
1751 
1752 	idx = core_mmu_va2idx(tbl_info, region->va);
1753 	end = core_mmu_va2idx(tbl_info, region->va + region->size);
1754 	pa = region->pa;
1755 
1756 	while (idx < end) {
1757 		core_mmu_set_entry(tbl_info, idx, pa, region->attr);
1758 		idx++;
1759 		pa += BIT64(tbl_info->shift);
1760 	}
1761 }
1762 
1763 static void set_pg_region(struct core_mmu_table_info *dir_info,
1764 			  struct vm_region *region, struct pgt **pgt,
1765 			  struct core_mmu_table_info *pg_info)
1766 {
1767 	struct tee_mmap_region r = {
1768 		.va = region->va,
1769 		.size = region->size,
1770 		.attr = region->attr,
1771 	};
1772 	vaddr_t end = r.va + r.size;
1773 	uint32_t pgt_attr = (r.attr & TEE_MATTR_SECURE) | TEE_MATTR_TABLE;
1774 
1775 	while (r.va < end) {
1776 		if (!pg_info->table ||
1777 		    r.va >= (pg_info->va_base + CORE_MMU_PGDIR_SIZE)) {
1778 			/*
1779 			 * We're assigning a new translation table.
1780 			 */
1781 			unsigned int idx;
1782 
1783 			/* Virtual addresses must grow */
1784 			assert(r.va > pg_info->va_base);
1785 
1786 			idx = core_mmu_va2idx(dir_info, r.va);
1787 			pg_info->va_base = core_mmu_idx2va(dir_info, idx);
1788 
1789 			/*
1790 			 * Advance pgt to va_base, note that we may need to
1791 			 * skip multiple page tables if there are large
1792 			 * holes in the vm map.
1793 			 */
1794 			while ((*pgt)->vabase < pg_info->va_base) {
1795 				*pgt = SLIST_NEXT(*pgt, link);
1796 				/* We should have allocated enough */
1797 				assert(*pgt);
1798 			}
1799 			assert((*pgt)->vabase == pg_info->va_base);
1800 			pg_info->table = (*pgt)->tbl;
1801 
1802 			core_mmu_set_entry(dir_info, idx,
1803 					   virt_to_phys(pg_info->table),
1804 					   pgt_attr);
1805 		}
1806 
1807 		r.size = MIN(CORE_MMU_PGDIR_SIZE - (r.va - pg_info->va_base),
1808 			     end - r.va);
1809 
1810 		if (!(*pgt)->populated  && !mobj_is_paged(region->mobj)) {
1811 			size_t granule = BIT(pg_info->shift);
1812 			size_t offset = r.va - region->va + region->offset;
1813 
1814 			r.size = MIN(r.size,
1815 				     mobj_get_phys_granule(region->mobj));
1816 			r.size = ROUNDUP(r.size, SMALL_PAGE_SIZE);
1817 
1818 			if (mobj_get_pa(region->mobj, offset, granule,
1819 					&r.pa) != TEE_SUCCESS)
1820 				panic("Failed to get PA of unpaged mobj");
1821 			set_region(pg_info, &r);
1822 		}
1823 		r.va += r.size;
1824 	}
1825 }
1826 
1827 static bool can_map_at_level(paddr_t paddr, vaddr_t vaddr,
1828 			     size_t size_left, paddr_t block_size,
1829 			     struct tee_mmap_region *mm __maybe_unused)
1830 {
1831 	/* VA and PA are aligned to block size at current level */
1832 	if ((vaddr | paddr) & (block_size - 1))
1833 		return false;
1834 
1835 	/* Remainder fits into block at current level */
1836 	if (size_left < block_size)
1837 		return false;
1838 
1839 #ifdef CFG_WITH_PAGER
1840 	/*
1841 	 * If pager is enabled, we need to map TEE RAM and the whole pager
1842 	 * regions with small pages only
1843 	 */
1844 	if ((map_is_tee_ram(mm) || mm->type == MEM_AREA_PAGER_VASPACE) &&
1845 	    block_size != SMALL_PAGE_SIZE)
1846 		return false;
1847 #endif
1848 
1849 	return true;
1850 }
1851 
1852 void core_mmu_map_region(struct mmu_partition *prtn, struct tee_mmap_region *mm)
1853 {
1854 	struct core_mmu_table_info tbl_info;
1855 	unsigned int idx;
1856 	vaddr_t vaddr = mm->va;
1857 	paddr_t paddr = mm->pa;
1858 	ssize_t size_left = mm->size;
1859 	unsigned int level;
1860 	bool table_found;
1861 	uint32_t old_attr;
1862 
1863 	assert(!((vaddr | paddr) & SMALL_PAGE_MASK));
1864 
1865 	while (size_left > 0) {
1866 		level = CORE_MMU_BASE_TABLE_LEVEL;
1867 
1868 		while (true) {
1869 			paddr_t block_size = 0;
1870 
1871 			assert(core_mmu_level_in_range(level));
1872 
1873 			table_found = core_mmu_find_table(prtn, vaddr, level,
1874 							  &tbl_info);
1875 			if (!table_found)
1876 				panic("can't find table for mapping");
1877 
1878 			block_size = BIT64(tbl_info.shift);
1879 
1880 			idx = core_mmu_va2idx(&tbl_info, vaddr);
1881 			if (!can_map_at_level(paddr, vaddr, size_left,
1882 					      block_size, mm)) {
1883 				bool secure = mm->attr & TEE_MATTR_SECURE;
1884 
1885 				/*
1886 				 * This part of the region can't be mapped at
1887 				 * this level. Need to go deeper.
1888 				 */
1889 				if (!core_mmu_entry_to_finer_grained(&tbl_info,
1890 								     idx,
1891 								     secure))
1892 					panic("Can't divide MMU entry");
1893 				level = tbl_info.next_level;
1894 				continue;
1895 			}
1896 
1897 			/* We can map part of the region at current level */
1898 			core_mmu_get_entry(&tbl_info, idx, NULL, &old_attr);
1899 			if (old_attr)
1900 				panic("Page is already mapped");
1901 
1902 			core_mmu_set_entry(&tbl_info, idx, paddr, mm->attr);
1903 			paddr += block_size;
1904 			vaddr += block_size;
1905 			size_left -= block_size;
1906 
1907 			break;
1908 		}
1909 	}
1910 }
1911 
1912 TEE_Result core_mmu_map_pages(vaddr_t vstart, paddr_t *pages, size_t num_pages,
1913 			      enum teecore_memtypes memtype)
1914 {
1915 	TEE_Result ret;
1916 	struct core_mmu_table_info tbl_info;
1917 	struct tee_mmap_region *mm;
1918 	unsigned int idx;
1919 	uint32_t old_attr;
1920 	uint32_t exceptions;
1921 	vaddr_t vaddr = vstart;
1922 	size_t i;
1923 	bool secure;
1924 
1925 	assert(!(core_mmu_type_to_attr(memtype) & TEE_MATTR_PX));
1926 
1927 	secure = core_mmu_type_to_attr(memtype) & TEE_MATTR_SECURE;
1928 
1929 	if (vaddr & SMALL_PAGE_MASK)
1930 		return TEE_ERROR_BAD_PARAMETERS;
1931 
1932 	exceptions = mmu_lock();
1933 
1934 	mm = find_map_by_va((void *)vaddr);
1935 	if (!mm || !va_is_in_map(mm, vaddr + num_pages * SMALL_PAGE_SIZE - 1))
1936 		panic("VA does not belong to any known mm region");
1937 
1938 	if (!core_mmu_is_dynamic_vaspace(mm))
1939 		panic("Trying to map into static region");
1940 
1941 	for (i = 0; i < num_pages; i++) {
1942 		if (pages[i] & SMALL_PAGE_MASK) {
1943 			ret = TEE_ERROR_BAD_PARAMETERS;
1944 			goto err;
1945 		}
1946 
1947 		while (true) {
1948 			if (!core_mmu_find_table(NULL, vaddr, UINT_MAX,
1949 						 &tbl_info))
1950 				panic("Can't find pagetable for vaddr ");
1951 
1952 			idx = core_mmu_va2idx(&tbl_info, vaddr);
1953 			if (tbl_info.shift == SMALL_PAGE_SHIFT)
1954 				break;
1955 
1956 			/* This is supertable. Need to divide it. */
1957 			if (!core_mmu_entry_to_finer_grained(&tbl_info, idx,
1958 							     secure))
1959 				panic("Failed to spread pgdir on small tables");
1960 		}
1961 
1962 		core_mmu_get_entry(&tbl_info, idx, NULL, &old_attr);
1963 		if (old_attr)
1964 			panic("Page is already mapped");
1965 
1966 		core_mmu_set_entry(&tbl_info, idx, pages[i],
1967 				   core_mmu_type_to_attr(memtype));
1968 		vaddr += SMALL_PAGE_SIZE;
1969 	}
1970 
1971 	/*
1972 	 * Make sure all the changes to translation tables are visible
1973 	 * before returning. TLB doesn't need to be invalidated as we are
1974 	 * guaranteed that there's no valid mapping in this range.
1975 	 */
1976 	core_mmu_table_write_barrier();
1977 	mmu_unlock(exceptions);
1978 
1979 	return TEE_SUCCESS;
1980 err:
1981 	mmu_unlock(exceptions);
1982 
1983 	if (i)
1984 		core_mmu_unmap_pages(vstart, i);
1985 
1986 	return ret;
1987 }
1988 
1989 TEE_Result core_mmu_map_contiguous_pages(vaddr_t vstart, paddr_t pstart,
1990 					 size_t num_pages,
1991 					 enum teecore_memtypes memtype)
1992 {
1993 	struct core_mmu_table_info tbl_info = { };
1994 	struct tee_mmap_region *mm = NULL;
1995 	unsigned int idx = 0;
1996 	uint32_t old_attr = 0;
1997 	uint32_t exceptions = 0;
1998 	vaddr_t vaddr = vstart;
1999 	paddr_t paddr = pstart;
2000 	size_t i = 0;
2001 	bool secure = false;
2002 
2003 	assert(!(core_mmu_type_to_attr(memtype) & TEE_MATTR_PX));
2004 
2005 	secure = core_mmu_type_to_attr(memtype) & TEE_MATTR_SECURE;
2006 
2007 	if ((vaddr | paddr) & SMALL_PAGE_MASK)
2008 		return TEE_ERROR_BAD_PARAMETERS;
2009 
2010 	exceptions = mmu_lock();
2011 
2012 	mm = find_map_by_va((void *)vaddr);
2013 	if (!mm || !va_is_in_map(mm, vaddr + num_pages * SMALL_PAGE_SIZE - 1))
2014 		panic("VA does not belong to any known mm region");
2015 
2016 	if (!core_mmu_is_dynamic_vaspace(mm))
2017 		panic("Trying to map into static region");
2018 
2019 	for (i = 0; i < num_pages; i++) {
2020 		while (true) {
2021 			if (!core_mmu_find_table(NULL, vaddr, UINT_MAX,
2022 						 &tbl_info))
2023 				panic("Can't find pagetable for vaddr ");
2024 
2025 			idx = core_mmu_va2idx(&tbl_info, vaddr);
2026 			if (tbl_info.shift == SMALL_PAGE_SHIFT)
2027 				break;
2028 
2029 			/* This is supertable. Need to divide it. */
2030 			if (!core_mmu_entry_to_finer_grained(&tbl_info, idx,
2031 							     secure))
2032 				panic("Failed to spread pgdir on small tables");
2033 		}
2034 
2035 		core_mmu_get_entry(&tbl_info, idx, NULL, &old_attr);
2036 		if (old_attr)
2037 			panic("Page is already mapped");
2038 
2039 		core_mmu_set_entry(&tbl_info, idx, paddr,
2040 				   core_mmu_type_to_attr(memtype));
2041 		paddr += SMALL_PAGE_SIZE;
2042 		vaddr += SMALL_PAGE_SIZE;
2043 	}
2044 
2045 	/*
2046 	 * Make sure all the changes to translation tables are visible
2047 	 * before returning. TLB doesn't need to be invalidated as we are
2048 	 * guaranteed that there's no valid mapping in this range.
2049 	 */
2050 	core_mmu_table_write_barrier();
2051 	mmu_unlock(exceptions);
2052 
2053 	return TEE_SUCCESS;
2054 }
2055 
2056 void core_mmu_unmap_pages(vaddr_t vstart, size_t num_pages)
2057 {
2058 	struct core_mmu_table_info tbl_info;
2059 	struct tee_mmap_region *mm;
2060 	size_t i;
2061 	unsigned int idx;
2062 	uint32_t exceptions;
2063 
2064 	exceptions = mmu_lock();
2065 
2066 	mm = find_map_by_va((void *)vstart);
2067 	if (!mm || !va_is_in_map(mm, vstart + num_pages * SMALL_PAGE_SIZE - 1))
2068 		panic("VA does not belong to any known mm region");
2069 
2070 	if (!core_mmu_is_dynamic_vaspace(mm))
2071 		panic("Trying to unmap static region");
2072 
2073 	for (i = 0; i < num_pages; i++, vstart += SMALL_PAGE_SIZE) {
2074 		if (!core_mmu_find_table(NULL, vstart, UINT_MAX, &tbl_info))
2075 			panic("Can't find pagetable");
2076 
2077 		if (tbl_info.shift != SMALL_PAGE_SHIFT)
2078 			panic("Invalid pagetable level");
2079 
2080 		idx = core_mmu_va2idx(&tbl_info, vstart);
2081 		core_mmu_set_entry(&tbl_info, idx, 0, 0);
2082 	}
2083 	tlbi_all();
2084 
2085 	mmu_unlock(exceptions);
2086 }
2087 
2088 void core_mmu_populate_user_map(struct core_mmu_table_info *dir_info,
2089 				struct user_mode_ctx *uctx)
2090 {
2091 	struct core_mmu_table_info pg_info = { };
2092 	struct pgt_cache *pgt_cache = &uctx->pgt_cache;
2093 	struct pgt *pgt = NULL;
2094 	struct pgt *p = NULL;
2095 	struct vm_region *r = NULL;
2096 
2097 	if (TAILQ_EMPTY(&uctx->vm_info.regions))
2098 		return; /* Nothing to map */
2099 
2100 	/*
2101 	 * Allocate all page tables in advance.
2102 	 */
2103 	pgt_get_all(uctx);
2104 	pgt = SLIST_FIRST(pgt_cache);
2105 
2106 	core_mmu_set_info_table(&pg_info, dir_info->next_level, 0, NULL);
2107 
2108 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link)
2109 		set_pg_region(dir_info, r, &pgt, &pg_info);
2110 	/* Record that the translation tables now are populated. */
2111 	SLIST_FOREACH(p, pgt_cache, link) {
2112 		p->populated = true;
2113 		if (p == pgt)
2114 			break;
2115 	}
2116 	assert(p == pgt);
2117 }
2118 
2119 TEE_Result core_mmu_remove_mapping(enum teecore_memtypes type, void *addr,
2120 				   size_t len)
2121 {
2122 	struct core_mmu_table_info tbl_info = { };
2123 	struct tee_mmap_region *res_map = NULL;
2124 	struct tee_mmap_region *map = NULL;
2125 	paddr_t pa = virt_to_phys(addr);
2126 	size_t granule = 0;
2127 	ptrdiff_t i = 0;
2128 	paddr_t p = 0;
2129 	size_t l = 0;
2130 
2131 	map = find_map_by_type_and_pa(type, pa, len);
2132 	if (!map)
2133 		return TEE_ERROR_GENERIC;
2134 
2135 	res_map = find_map_by_type(MEM_AREA_RES_VASPACE);
2136 	if (!res_map)
2137 		return TEE_ERROR_GENERIC;
2138 	if (!core_mmu_find_table(NULL, res_map->va, UINT_MAX, &tbl_info))
2139 		return TEE_ERROR_GENERIC;
2140 	granule = BIT(tbl_info.shift);
2141 
2142 	if (map < static_memory_map.map ||
2143 	    map >= static_memory_map.map + static_memory_map.count)
2144 		return TEE_ERROR_GENERIC;
2145 	i = map - static_memory_map.map;
2146 
2147 	/* Check that we have a full match */
2148 	p = ROUNDDOWN(pa, granule);
2149 	l = ROUNDUP(len + pa - p, granule);
2150 	if (map->pa != p || map->size != l)
2151 		return TEE_ERROR_GENERIC;
2152 
2153 	clear_region(&tbl_info, map);
2154 	tlbi_all();
2155 
2156 	/* If possible remove the va range from res_map */
2157 	if (res_map->va - map->size == map->va) {
2158 		res_map->va -= map->size;
2159 		res_map->size += map->size;
2160 	}
2161 
2162 	/* Remove the entry. */
2163 	rem_array_elem(static_memory_map.map, static_memory_map.count,
2164 		       sizeof(*static_memory_map.map), i);
2165 	static_memory_map.count--;
2166 
2167 	return TEE_SUCCESS;
2168 }
2169 
2170 struct tee_mmap_region *
2171 core_mmu_find_mapping_exclusive(enum teecore_memtypes type, size_t len)
2172 {
2173 	struct memory_map *mem_map = get_memory_map();
2174 	struct tee_mmap_region *map_found = NULL;
2175 	size_t n = 0;
2176 
2177 	if (!len)
2178 		return NULL;
2179 
2180 	for (n = 0; n < mem_map->count; n++) {
2181 		if (mem_map->map[n].type != type)
2182 			continue;
2183 
2184 		if (map_found)
2185 			return NULL;
2186 
2187 		map_found = mem_map->map + n;
2188 	}
2189 
2190 	if (!map_found || map_found->size < len)
2191 		return NULL;
2192 
2193 	return map_found;
2194 }
2195 
2196 void *core_mmu_add_mapping(enum teecore_memtypes type, paddr_t addr, size_t len)
2197 {
2198 	struct memory_map *mem_map = &static_memory_map;
2199 	struct core_mmu_table_info tbl_info = { };
2200 	struct tee_mmap_region *map = NULL;
2201 	size_t granule = 0;
2202 	paddr_t p = 0;
2203 	size_t l = 0;
2204 
2205 	if (!len)
2206 		return NULL;
2207 
2208 	if (!core_mmu_check_end_pa(addr, len))
2209 		return NULL;
2210 
2211 	/* Check if the memory is already mapped */
2212 	map = find_map_by_type_and_pa(type, addr, len);
2213 	if (map && pbuf_inside_map_area(addr, len, map))
2214 		return (void *)(vaddr_t)(map->va + addr - map->pa);
2215 
2216 	/* Find the reserved va space used for late mappings */
2217 	map = find_map_by_type(MEM_AREA_RES_VASPACE);
2218 	if (!map)
2219 		return NULL;
2220 
2221 	if (!core_mmu_find_table(NULL, map->va, UINT_MAX, &tbl_info))
2222 		return NULL;
2223 
2224 	granule = BIT64(tbl_info.shift);
2225 	p = ROUNDDOWN(addr, granule);
2226 	l = ROUNDUP(len + addr - p, granule);
2227 
2228 	/* Ban overflowing virtual addresses */
2229 	if (map->size < l)
2230 		return NULL;
2231 
2232 	/*
2233 	 * Something is wrong, we can't fit the va range into the selected
2234 	 * table. The reserved va range is possibly missaligned with
2235 	 * granule.
2236 	 */
2237 	if (core_mmu_va2idx(&tbl_info, map->va + len) >= tbl_info.num_entries)
2238 		return NULL;
2239 
2240 	if (static_memory_map.count >= static_memory_map.alloc_count)
2241 		return NULL;
2242 
2243 	mem_map->map[mem_map->count] = (struct tee_mmap_region){
2244 		.va = map->va,
2245 		.size = l,
2246 		.type = type,
2247 		.region_size = granule,
2248 		.attr = core_mmu_type_to_attr(type),
2249 		.pa = p,
2250 	};
2251 	map->va += l;
2252 	map->size -= l;
2253 	map = mem_map->map + mem_map->count;
2254 	mem_map->count++;
2255 
2256 	set_region(&tbl_info, map);
2257 
2258 	/* Make sure the new entry is visible before continuing. */
2259 	core_mmu_table_write_barrier();
2260 
2261 	return (void *)(vaddr_t)(map->va + addr - map->pa);
2262 }
2263 
2264 #ifdef CFG_WITH_PAGER
2265 static vaddr_t get_linear_map_end_va(void)
2266 {
2267 	/* this is synced with the generic linker file kern.ld.S */
2268 	return (vaddr_t)__heap2_end;
2269 }
2270 
2271 static paddr_t get_linear_map_end_pa(void)
2272 {
2273 	return get_linear_map_end_va() - boot_mmu_config.map_offset;
2274 }
2275 #endif
2276 
2277 #if defined(CFG_TEE_CORE_DEBUG)
2278 static void check_pa_matches_va(void *va, paddr_t pa)
2279 {
2280 	TEE_Result res = TEE_ERROR_GENERIC;
2281 	vaddr_t v = (vaddr_t)va;
2282 	paddr_t p = 0;
2283 	struct core_mmu_table_info ti __maybe_unused = { };
2284 
2285 	if (core_mmu_user_va_range_is_defined()) {
2286 		vaddr_t user_va_base = 0;
2287 		size_t user_va_size = 0;
2288 
2289 		core_mmu_get_user_va_range(&user_va_base, &user_va_size);
2290 		if (v >= user_va_base &&
2291 		    v <= (user_va_base - 1 + user_va_size)) {
2292 			if (!core_mmu_user_mapping_is_active()) {
2293 				if (pa)
2294 					panic("issue in linear address space");
2295 				return;
2296 			}
2297 
2298 			res = vm_va2pa(to_user_mode_ctx(thread_get_tsd()->ctx),
2299 				       va, &p);
2300 			if (res == TEE_ERROR_NOT_SUPPORTED)
2301 				return;
2302 			if (res == TEE_SUCCESS && pa != p)
2303 				panic("bad pa");
2304 			if (res != TEE_SUCCESS && pa)
2305 				panic("false pa");
2306 			return;
2307 		}
2308 	}
2309 #ifdef CFG_WITH_PAGER
2310 	if (is_unpaged(va)) {
2311 		if (v - boot_mmu_config.map_offset != pa)
2312 			panic("issue in linear address space");
2313 		return;
2314 	}
2315 
2316 	if (tee_pager_get_table_info(v, &ti)) {
2317 		uint32_t a;
2318 
2319 		/*
2320 		 * Lookups in the page table managed by the pager is
2321 		 * dangerous for addresses in the paged area as those pages
2322 		 * changes all the time. But some ranges are safe,
2323 		 * rw-locked areas when the page is populated for instance.
2324 		 */
2325 		core_mmu_get_entry(&ti, core_mmu_va2idx(&ti, v), &p, &a);
2326 		if (a & TEE_MATTR_VALID_BLOCK) {
2327 			paddr_t mask = BIT64(ti.shift) - 1;
2328 
2329 			p |= v & mask;
2330 			if (pa != p)
2331 				panic();
2332 		} else {
2333 			if (pa)
2334 				panic();
2335 		}
2336 		return;
2337 	}
2338 #endif
2339 
2340 	if (!core_va2pa_helper(va, &p)) {
2341 		/* Verfiy only the static mapping (case non null phys addr) */
2342 		if (p && pa != p) {
2343 			DMSG("va %p maps 0x%" PRIxPA ", expect 0x%" PRIxPA,
2344 			     va, p, pa);
2345 			panic();
2346 		}
2347 	} else {
2348 		if (pa) {
2349 			DMSG("va %p unmapped, expect 0x%" PRIxPA, va, pa);
2350 			panic();
2351 		}
2352 	}
2353 }
2354 #else
2355 static void check_pa_matches_va(void *va __unused, paddr_t pa __unused)
2356 {
2357 }
2358 #endif
2359 
2360 paddr_t virt_to_phys(void *va)
2361 {
2362 	paddr_t pa = 0;
2363 
2364 	if (!arch_va2pa_helper(va, &pa))
2365 		pa = 0;
2366 	check_pa_matches_va(memtag_strip_tag(va), pa);
2367 	return pa;
2368 }
2369 
2370 /*
2371  * Don't use check_va_matches_pa() for RISC-V, as its callee
2372  * arch_va2pa_helper() will call it eventually, this creates
2373  * indirect recursion and can lead to a stack overflow.
2374  * Moreover, if arch_va2pa_helper() returns true, it implies
2375  * the va2pa mapping is matched, no need to check it again.
2376  */
2377 #if defined(CFG_TEE_CORE_DEBUG) && !defined(__riscv)
2378 static void check_va_matches_pa(paddr_t pa, void *va)
2379 {
2380 	paddr_t p = 0;
2381 
2382 	if (!va)
2383 		return;
2384 
2385 	p = virt_to_phys(va);
2386 	if (p != pa) {
2387 		DMSG("va %p maps 0x%" PRIxPA " expect 0x%" PRIxPA, va, p, pa);
2388 		panic();
2389 	}
2390 }
2391 #else
2392 static void check_va_matches_pa(paddr_t pa __unused, void *va __unused)
2393 {
2394 }
2395 #endif
2396 
2397 static void *phys_to_virt_ts_vaspace(paddr_t pa, size_t len)
2398 {
2399 	if (!core_mmu_user_mapping_is_active())
2400 		return NULL;
2401 
2402 	return vm_pa2va(to_user_mode_ctx(thread_get_tsd()->ctx), pa, len);
2403 }
2404 
2405 #ifdef CFG_WITH_PAGER
2406 static void *phys_to_virt_tee_ram(paddr_t pa, size_t len)
2407 {
2408 	paddr_t end_pa = 0;
2409 
2410 	if (SUB_OVERFLOW(len, 1, &end_pa) || ADD_OVERFLOW(pa, end_pa, &end_pa))
2411 		return NULL;
2412 
2413 	if (pa >= TEE_LOAD_ADDR && pa < get_linear_map_end_pa()) {
2414 		if (end_pa > get_linear_map_end_pa())
2415 			return NULL;
2416 		return (void *)(vaddr_t)(pa + boot_mmu_config.map_offset);
2417 	}
2418 
2419 	return tee_pager_phys_to_virt(pa, len);
2420 }
2421 #else
2422 static void *phys_to_virt_tee_ram(paddr_t pa, size_t len)
2423 {
2424 	struct tee_mmap_region *mmap = NULL;
2425 
2426 	mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM, pa, len);
2427 	if (!mmap)
2428 		mmap = find_map_by_type_and_pa(MEM_AREA_NEX_RAM_RW, pa, len);
2429 	if (!mmap)
2430 		mmap = find_map_by_type_and_pa(MEM_AREA_NEX_RAM_RO, pa, len);
2431 	if (!mmap)
2432 		mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM_RW, pa, len);
2433 	if (!mmap)
2434 		mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM_RO, pa, len);
2435 	if (!mmap)
2436 		mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM_RX, pa, len);
2437 	/*
2438 	 * Note that MEM_AREA_INIT_RAM_RO and MEM_AREA_INIT_RAM_RX are only
2439 	 * used with pager and not needed here.
2440 	 */
2441 	return map_pa2va(mmap, pa, len);
2442 }
2443 #endif
2444 
2445 void *phys_to_virt(paddr_t pa, enum teecore_memtypes m, size_t len)
2446 {
2447 	void *va = NULL;
2448 
2449 	switch (m) {
2450 	case MEM_AREA_TS_VASPACE:
2451 		va = phys_to_virt_ts_vaspace(pa, len);
2452 		break;
2453 	case MEM_AREA_TEE_RAM:
2454 	case MEM_AREA_TEE_RAM_RX:
2455 	case MEM_AREA_TEE_RAM_RO:
2456 	case MEM_AREA_TEE_RAM_RW:
2457 	case MEM_AREA_NEX_RAM_RO:
2458 	case MEM_AREA_NEX_RAM_RW:
2459 		va = phys_to_virt_tee_ram(pa, len);
2460 		break;
2461 	case MEM_AREA_SHM_VASPACE:
2462 		/* Find VA from PA in dynamic SHM is not yet supported */
2463 		va = NULL;
2464 		break;
2465 	default:
2466 		va = map_pa2va(find_map_by_type_and_pa(m, pa, len), pa, len);
2467 	}
2468 	if (m != MEM_AREA_SEC_RAM_OVERALL)
2469 		check_va_matches_pa(pa, va);
2470 	return va;
2471 }
2472 
2473 void *phys_to_virt_io(paddr_t pa, size_t len)
2474 {
2475 	struct tee_mmap_region *map = NULL;
2476 	void *va = NULL;
2477 
2478 	map = find_map_by_type_and_pa(MEM_AREA_IO_SEC, pa, len);
2479 	if (!map)
2480 		map = find_map_by_type_and_pa(MEM_AREA_IO_NSEC, pa, len);
2481 	if (!map)
2482 		return NULL;
2483 	va = map_pa2va(map, pa, len);
2484 	check_va_matches_pa(pa, va);
2485 	return va;
2486 }
2487 
2488 vaddr_t core_mmu_get_va(paddr_t pa, enum teecore_memtypes type, size_t len)
2489 {
2490 	if (cpu_mmu_enabled())
2491 		return (vaddr_t)phys_to_virt(pa, type, len);
2492 
2493 	return (vaddr_t)pa;
2494 }
2495 
2496 #ifdef CFG_WITH_PAGER
2497 bool is_unpaged(const void *va)
2498 {
2499 	vaddr_t v = (vaddr_t)va;
2500 
2501 	return v >= VCORE_START_VA && v < get_linear_map_end_va();
2502 }
2503 #endif
2504 
2505 #ifdef CFG_NS_VIRTUALIZATION
2506 bool is_nexus(const void *va)
2507 {
2508 	vaddr_t v = (vaddr_t)va;
2509 
2510 	return v >= VCORE_START_VA && v < VCORE_NEX_RW_PA + VCORE_NEX_RW_SZ;
2511 }
2512 #endif
2513 
2514 void core_mmu_init_virtualization(void)
2515 {
2516 	paddr_t b1 = 0;
2517 	paddr_size_t s1 = 0;
2518 
2519 	static_assert(ARRAY_SIZE(secure_only) <= 2);
2520 	if (ARRAY_SIZE(secure_only) == 2) {
2521 		b1 = secure_only[1].paddr;
2522 		s1 = secure_only[1].size;
2523 	}
2524 	virt_init_memory(&static_memory_map, secure_only[0].paddr,
2525 			 secure_only[0].size, b1, s1);
2526 }
2527 
2528 vaddr_t io_pa_or_va(struct io_pa_va *p, size_t len)
2529 {
2530 	assert(p->pa);
2531 	if (cpu_mmu_enabled()) {
2532 		if (!p->va)
2533 			p->va = (vaddr_t)phys_to_virt_io(p->pa, len);
2534 		assert(p->va);
2535 		return p->va;
2536 	}
2537 	return p->pa;
2538 }
2539 
2540 vaddr_t io_pa_or_va_secure(struct io_pa_va *p, size_t len)
2541 {
2542 	assert(p->pa);
2543 	if (cpu_mmu_enabled()) {
2544 		if (!p->va)
2545 			p->va = (vaddr_t)phys_to_virt(p->pa, MEM_AREA_IO_SEC,
2546 						      len);
2547 		assert(p->va);
2548 		return p->va;
2549 	}
2550 	return p->pa;
2551 }
2552 
2553 vaddr_t io_pa_or_va_nsec(struct io_pa_va *p, size_t len)
2554 {
2555 	assert(p->pa);
2556 	if (cpu_mmu_enabled()) {
2557 		if (!p->va)
2558 			p->va = (vaddr_t)phys_to_virt(p->pa, MEM_AREA_IO_NSEC,
2559 						      len);
2560 		assert(p->va);
2561 		return p->va;
2562 	}
2563 	return p->pa;
2564 }
2565 
2566 #ifdef CFG_CORE_RESERVED_SHM
2567 static TEE_Result teecore_init_pub_ram(void)
2568 {
2569 	vaddr_t s = 0;
2570 	vaddr_t e = 0;
2571 
2572 	/* get virtual addr/size of NSec shared mem allocated from teecore */
2573 	core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &s, &e);
2574 
2575 	if (s >= e || s & SMALL_PAGE_MASK || e & SMALL_PAGE_MASK)
2576 		panic("invalid PUB RAM");
2577 
2578 	/* extra check: we could rely on core_mmu_get_mem_by_type() */
2579 	if (!tee_vbuf_is_non_sec(s, e - s))
2580 		panic("PUB RAM is not non-secure");
2581 
2582 #ifdef CFG_PL310
2583 	/* Allocate statically the l2cc mutex */
2584 	tee_l2cc_store_mutex_boot_pa(virt_to_phys((void *)s));
2585 	s += sizeof(uint32_t);			/* size of a pl310 mutex */
2586 	s = ROUNDUP(s, SMALL_PAGE_SIZE);	/* keep required alignment */
2587 #endif
2588 
2589 	default_nsec_shm_paddr = virt_to_phys((void *)s);
2590 	default_nsec_shm_size = e - s;
2591 
2592 	return TEE_SUCCESS;
2593 }
2594 early_init(teecore_init_pub_ram);
2595 #endif /*CFG_CORE_RESERVED_SHM*/
2596 
2597 void core_mmu_init_phys_mem(void)
2598 {
2599 	vaddr_t s = 0;
2600 	vaddr_t e = 0;
2601 	paddr_t ps = 0;
2602 	size_t size = 0;
2603 
2604 	/*
2605 	 * Get virtual addr/size of RAM where TA are loaded/executedNSec
2606 	 * shared mem allocated from teecore.
2607 	 */
2608 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
2609 		virt_get_ta_ram(&s, &e);
2610 	else
2611 		core_mmu_get_mem_by_type(MEM_AREA_TA_RAM, &s, &e);
2612 
2613 	ps = virt_to_phys((void *)s);
2614 	size = e - s;
2615 
2616 	phys_mem_init(0, 0, ps, size);
2617 }
2618