xref: /optee_os/core/mm/core_mmu.c (revision f12843460d47d97ed5b33bd11e9c7c49b0928169)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016, 2022 Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  * Copyright (c) 2022, Arm Limited and Contributors. All rights reserved.
6  */
7 
8 #include <assert.h>
9 #include <config.h>
10 #include <kernel/boot.h>
11 #include <kernel/dt.h>
12 #include <kernel/linker.h>
13 #include <kernel/panic.h>
14 #include <kernel/spinlock.h>
15 #include <kernel/tee_l2cc_mutex.h>
16 #include <kernel/tee_misc.h>
17 #include <kernel/tlb_helpers.h>
18 #include <kernel/user_mode_ctx.h>
19 #include <kernel/virtualization.h>
20 #include <libfdt.h>
21 #include <memtag.h>
22 #include <mm/core_memprot.h>
23 #include <mm/core_mmu.h>
24 #include <mm/mobj.h>
25 #include <mm/pgt_cache.h>
26 #include <mm/phys_mem.h>
27 #include <mm/tee_pager.h>
28 #include <mm/vm.h>
29 #include <platform_config.h>
30 #include <stdalign.h>
31 #include <string.h>
32 #include <trace.h>
33 #include <util.h>
34 
35 #ifndef DEBUG_XLAT_TABLE
36 #define DEBUG_XLAT_TABLE 0
37 #endif
38 
39 #define SHM_VASPACE_SIZE	(1024 * 1024 * 32)
40 
41 /* Virtual memory pool for core mappings */
42 tee_mm_pool_t core_virt_mem_pool;
43 
44 /* Virtual memory pool for shared memory mappings */
45 tee_mm_pool_t core_virt_shm_pool;
46 
47 #ifdef CFG_CORE_PHYS_RELOCATABLE
48 unsigned long core_mmu_tee_load_pa __nex_bss;
49 #else
50 const unsigned long core_mmu_tee_load_pa = TEE_LOAD_ADDR;
51 #endif
52 
53 /*
54  * These variables are initialized before .bss is cleared. To avoid
55  * resetting them when .bss is cleared we're storing them in .data instead,
56  * even if they initially are zero.
57  */
58 
59 #ifdef CFG_CORE_RESERVED_SHM
60 /* Default NSec shared memory allocated from NSec world */
61 unsigned long default_nsec_shm_size __nex_bss;
62 unsigned long default_nsec_shm_paddr __nex_bss;
63 #endif
64 
65 #ifdef CFG_BOOT_MEM
66 static struct memory_map static_memory_map __nex_bss;
67 #else
68 static struct tee_mmap_region static_mmap_regions[CFG_MMAP_REGIONS
69 #if defined(CFG_CORE_ASLR) || defined(CFG_CORE_PHYS_RELOCATABLE)
70 						+ 1
71 #endif
72 						+ 4] __nex_bss;
73 static struct memory_map static_memory_map __nex_data = {
74 	.map = static_mmap_regions,
75 	.alloc_count = ARRAY_SIZE(static_mmap_regions),
76 };
77 #endif
78 void (*memory_map_realloc_func)(struct memory_map *mem_map) __nex_bss;
79 
80 /* Offset of the first TEE RAM mapping from start of secure RAM */
81 static size_t tee_ram_initial_offs __nex_bss;
82 
83 /* Define the platform's memory layout. */
84 struct memaccess_area {
85 	paddr_t paddr;
86 	size_t size;
87 };
88 
89 #define MEMACCESS_AREA(a, s) { .paddr = a, .size = s }
90 
91 static struct memaccess_area secure_only[] __nex_data = {
92 #ifdef CFG_CORE_PHYS_RELOCATABLE
93 	MEMACCESS_AREA(0, 0),
94 #else
95 #ifdef TRUSTED_SRAM_BASE
96 	MEMACCESS_AREA(TRUSTED_SRAM_BASE, TRUSTED_SRAM_SIZE),
97 #endif
98 	MEMACCESS_AREA(TRUSTED_DRAM_BASE, TRUSTED_DRAM_SIZE),
99 #endif
100 };
101 
102 static struct memaccess_area nsec_shared[] __nex_data = {
103 #ifdef CFG_CORE_RESERVED_SHM
104 	MEMACCESS_AREA(TEE_SHMEM_START, TEE_SHMEM_SIZE),
105 #endif
106 };
107 
108 #if defined(CFG_SECURE_DATA_PATH)
109 static const char *tz_sdp_match = "linaro,secure-heap";
110 static struct memaccess_area sec_sdp;
111 #ifdef CFG_TEE_SDP_MEM_BASE
112 register_sdp_mem(CFG_TEE_SDP_MEM_BASE, CFG_TEE_SDP_MEM_SIZE);
113 #endif
114 #ifdef TEE_SDP_TEST_MEM_BASE
115 register_sdp_mem(TEE_SDP_TEST_MEM_BASE, TEE_SDP_TEST_MEM_SIZE);
116 #endif
117 #endif
118 
119 #ifdef CFG_CORE_RESERVED_SHM
120 register_phys_mem(MEM_AREA_NSEC_SHM, TEE_SHMEM_START, TEE_SHMEM_SIZE);
121 #endif
122 static unsigned int mmu_spinlock;
123 
124 static uint32_t mmu_lock(void)
125 {
126 	return cpu_spin_lock_xsave(&mmu_spinlock);
127 }
128 
129 static void mmu_unlock(uint32_t exceptions)
130 {
131 	cpu_spin_unlock_xrestore(&mmu_spinlock, exceptions);
132 }
133 
134 static void heap_realloc_memory_map(struct memory_map *mem_map)
135 {
136 	struct tee_mmap_region *m = NULL;
137 	struct tee_mmap_region *old = mem_map->map;
138 	size_t old_sz = sizeof(*old) * mem_map->alloc_count;
139 	size_t sz = old_sz + sizeof(*m);
140 
141 	assert(nex_malloc_buffer_is_within_alloced(old, old_sz));
142 	m = nex_realloc(old, sz);
143 	if (!m)
144 		panic();
145 	mem_map->map = m;
146 	mem_map->alloc_count++;
147 }
148 
149 static void boot_mem_realloc_memory_map(struct memory_map *mem_map)
150 {
151 	struct tee_mmap_region *m = NULL;
152 	struct tee_mmap_region *old = mem_map->map;
153 	size_t old_sz = sizeof(*old) * mem_map->alloc_count;
154 	size_t sz = old_sz * 2;
155 
156 	m = boot_mem_alloc_tmp(sz, alignof(*m));
157 	memcpy(m, old, old_sz);
158 	mem_map->map = m;
159 	mem_map->alloc_count *= 2;
160 }
161 
162 static void grow_mem_map(struct memory_map *mem_map)
163 {
164 	if (mem_map->count == mem_map->alloc_count) {
165 		if (!memory_map_realloc_func) {
166 			EMSG("Out of entries (%zu) in mem_map",
167 			     mem_map->alloc_count);
168 			panic();
169 		}
170 		memory_map_realloc_func(mem_map);
171 	}
172 	mem_map->count++;
173 }
174 
175 void core_mmu_get_secure_memory(paddr_t *base, paddr_size_t *size)
176 {
177 	/*
178 	 * The first range is always used to cover OP-TEE core memory, but
179 	 * depending on configuration it may cover more than that.
180 	 */
181 	*base = secure_only[0].paddr;
182 	*size = secure_only[0].size;
183 }
184 
185 void core_mmu_set_secure_memory(paddr_t base, size_t size)
186 {
187 #ifdef CFG_CORE_PHYS_RELOCATABLE
188 	static_assert(ARRAY_SIZE(secure_only) == 1);
189 #endif
190 	runtime_assert(IS_ENABLED(CFG_CORE_PHYS_RELOCATABLE));
191 	assert(!secure_only[0].size);
192 	assert(base && size);
193 
194 	DMSG("Physical secure memory base %#"PRIxPA" size %#zx", base, size);
195 	secure_only[0].paddr = base;
196 	secure_only[0].size = size;
197 }
198 
199 void core_mmu_get_ta_range(paddr_t *base, size_t *size)
200 {
201 	paddr_t b = 0;
202 	size_t s = 0;
203 
204 	static_assert(!(TEE_RAM_VA_SIZE % SMALL_PAGE_SIZE));
205 #ifdef TA_RAM_START
206 	b = TA_RAM_START;
207 	s = TA_RAM_SIZE;
208 #else
209 	static_assert(ARRAY_SIZE(secure_only) <= 2);
210 	if (ARRAY_SIZE(secure_only) == 1) {
211 		vaddr_t load_offs = 0;
212 
213 		assert(core_mmu_tee_load_pa >= secure_only[0].paddr);
214 		load_offs = core_mmu_tee_load_pa - secure_only[0].paddr;
215 
216 		assert(secure_only[0].size >
217 		       load_offs + TEE_RAM_VA_SIZE + TEE_SDP_TEST_MEM_SIZE);
218 		b = secure_only[0].paddr + load_offs + TEE_RAM_VA_SIZE;
219 		s = secure_only[0].size - load_offs - TEE_RAM_VA_SIZE -
220 		    TEE_SDP_TEST_MEM_SIZE;
221 	} else {
222 		assert(secure_only[1].size > TEE_SDP_TEST_MEM_SIZE);
223 		b = secure_only[1].paddr;
224 		s = secure_only[1].size - TEE_SDP_TEST_MEM_SIZE;
225 	}
226 #endif
227 	if (base)
228 		*base = b;
229 	if (size)
230 		*size = s;
231 }
232 
233 static struct memory_map *get_memory_map(void)
234 {
235 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
236 		struct memory_map *map = virt_get_memory_map();
237 
238 		if (map)
239 			return map;
240 	}
241 
242 	return &static_memory_map;
243 }
244 
245 static bool _pbuf_intersects(struct memaccess_area *a, size_t alen,
246 			     paddr_t pa, size_t size)
247 {
248 	size_t n;
249 
250 	for (n = 0; n < alen; n++)
251 		if (core_is_buffer_intersect(pa, size, a[n].paddr, a[n].size))
252 			return true;
253 	return false;
254 }
255 
256 #define pbuf_intersects(a, pa, size) \
257 	_pbuf_intersects((a), ARRAY_SIZE(a), (pa), (size))
258 
259 static bool _pbuf_is_inside(struct memaccess_area *a, size_t alen,
260 			    paddr_t pa, size_t size)
261 {
262 	size_t n;
263 
264 	for (n = 0; n < alen; n++)
265 		if (core_is_buffer_inside(pa, size, a[n].paddr, a[n].size))
266 			return true;
267 	return false;
268 }
269 
270 #define pbuf_is_inside(a, pa, size) \
271 	_pbuf_is_inside((a), ARRAY_SIZE(a), (pa), (size))
272 
273 static bool pa_is_in_map(struct tee_mmap_region *map, paddr_t pa, size_t len)
274 {
275 	paddr_t end_pa = 0;
276 
277 	if (!map)
278 		return false;
279 
280 	if (SUB_OVERFLOW(len, 1, &end_pa) || ADD_OVERFLOW(pa, end_pa, &end_pa))
281 		return false;
282 
283 	return (pa >= map->pa && end_pa <= map->pa + map->size - 1);
284 }
285 
286 static bool va_is_in_map(struct tee_mmap_region *map, vaddr_t va)
287 {
288 	if (!map)
289 		return false;
290 	return (va >= map->va && va <= (map->va + map->size - 1));
291 }
292 
293 /* check if target buffer fits in a core default map area */
294 static bool pbuf_inside_map_area(unsigned long p, size_t l,
295 				 struct tee_mmap_region *map)
296 {
297 	return core_is_buffer_inside(p, l, map->pa, map->size);
298 }
299 
300 TEE_Result core_mmu_for_each_map(void *ptr,
301 				 TEE_Result (*fn)(struct tee_mmap_region *map,
302 						  void *ptr))
303 {
304 	struct memory_map *mem_map = get_memory_map();
305 	TEE_Result res = TEE_SUCCESS;
306 	size_t n = 0;
307 
308 	for (n = 0; n < mem_map->count; n++) {
309 		res = fn(mem_map->map + n, ptr);
310 		if (res)
311 			return res;
312 	}
313 
314 	return TEE_SUCCESS;
315 }
316 
317 static struct tee_mmap_region *find_map_by_type(enum teecore_memtypes type)
318 {
319 	struct memory_map *mem_map = get_memory_map();
320 	size_t n = 0;
321 
322 	for (n = 0; n < mem_map->count; n++) {
323 		if (mem_map->map[n].type == type)
324 			return mem_map->map + n;
325 	}
326 	return NULL;
327 }
328 
329 static struct tee_mmap_region *
330 find_map_by_type_and_pa(enum teecore_memtypes type, paddr_t pa, size_t len)
331 {
332 	struct memory_map *mem_map = get_memory_map();
333 	size_t n = 0;
334 
335 	for (n = 0; n < mem_map->count; n++) {
336 		if (mem_map->map[n].type != type)
337 			continue;
338 		if (pa_is_in_map(mem_map->map + n, pa, len))
339 			return mem_map->map + n;
340 	}
341 	return NULL;
342 }
343 
344 static struct tee_mmap_region *find_map_by_va(void *va)
345 {
346 	struct memory_map *mem_map = get_memory_map();
347 	vaddr_t a = (vaddr_t)va;
348 	size_t n = 0;
349 
350 	for (n = 0; n < mem_map->count; n++) {
351 		if (a >= mem_map->map[n].va &&
352 		    a <= (mem_map->map[n].va - 1 + mem_map->map[n].size))
353 			return mem_map->map + n;
354 	}
355 
356 	return NULL;
357 }
358 
359 static struct tee_mmap_region *find_map_by_pa(unsigned long pa)
360 {
361 	struct memory_map *mem_map = get_memory_map();
362 	size_t n = 0;
363 
364 	for (n = 0; n < mem_map->count; n++) {
365 		/* Skip unmapped regions */
366 		if ((mem_map->map[n].attr & TEE_MATTR_VALID_BLOCK) &&
367 		    pa >= mem_map->map[n].pa &&
368 		    pa <= (mem_map->map[n].pa - 1 + mem_map->map[n].size))
369 			return mem_map->map + n;
370 	}
371 
372 	return NULL;
373 }
374 
375 #if defined(CFG_SECURE_DATA_PATH)
376 static bool dtb_get_sdp_region(void)
377 {
378 	void *fdt = NULL;
379 	int node = 0;
380 	int tmp_node = 0;
381 	paddr_t tmp_addr = 0;
382 	size_t tmp_size = 0;
383 
384 	if (!IS_ENABLED(CFG_EMBED_DTB))
385 		return false;
386 
387 	fdt = get_embedded_dt();
388 	if (!fdt)
389 		panic("No DTB found");
390 
391 	node = fdt_node_offset_by_compatible(fdt, -1, tz_sdp_match);
392 	if (node < 0) {
393 		DMSG("No %s compatible node found", tz_sdp_match);
394 		return false;
395 	}
396 	tmp_node = node;
397 	while (tmp_node >= 0) {
398 		tmp_node = fdt_node_offset_by_compatible(fdt, tmp_node,
399 							 tz_sdp_match);
400 		if (tmp_node >= 0)
401 			DMSG("Ignore SDP pool node %s, supports only 1 node",
402 			     fdt_get_name(fdt, tmp_node, NULL));
403 	}
404 
405 	if (fdt_reg_info(fdt, node, &tmp_addr, &tmp_size)) {
406 		EMSG("%s: Unable to get base addr or size from DT",
407 		     tz_sdp_match);
408 		return false;
409 	}
410 
411 	sec_sdp.paddr = tmp_addr;
412 	sec_sdp.size = tmp_size;
413 
414 	return true;
415 }
416 #endif
417 
418 #if defined(CFG_CORE_DYN_SHM) || defined(CFG_SECURE_DATA_PATH)
419 static bool pbuf_is_special_mem(paddr_t pbuf, size_t len,
420 				const struct core_mmu_phys_mem *start,
421 				const struct core_mmu_phys_mem *end)
422 {
423 	const struct core_mmu_phys_mem *mem;
424 
425 	for (mem = start; mem < end; mem++) {
426 		if (core_is_buffer_inside(pbuf, len, mem->addr, mem->size))
427 			return true;
428 	}
429 
430 	return false;
431 }
432 #endif
433 
434 #ifdef CFG_CORE_DYN_SHM
435 static void carve_out_phys_mem(struct core_mmu_phys_mem **mem, size_t *nelems,
436 			       paddr_t pa, size_t size)
437 {
438 	struct core_mmu_phys_mem *m = *mem;
439 	size_t n = 0;
440 
441 	while (true) {
442 		if (n >= *nelems) {
443 			DMSG("No need to carve out %#" PRIxPA " size %#zx",
444 			     pa, size);
445 			return;
446 		}
447 		if (core_is_buffer_inside(pa, size, m[n].addr, m[n].size))
448 			break;
449 		if (!core_is_buffer_outside(pa, size, m[n].addr, m[n].size))
450 			panic();
451 		n++;
452 	}
453 
454 	if (pa == m[n].addr && size == m[n].size) {
455 		/* Remove this entry */
456 		(*nelems)--;
457 		memmove(m + n, m + n + 1, sizeof(*m) * (*nelems - n));
458 		m = nex_realloc(m, sizeof(*m) * *nelems);
459 		if (!m)
460 			panic();
461 		*mem = m;
462 	} else if (pa == m[n].addr) {
463 		m[n].addr += size;
464 		m[n].size -= size;
465 	} else if ((pa + size) == (m[n].addr + m[n].size)) {
466 		m[n].size -= size;
467 	} else {
468 		/* Need to split the memory entry */
469 		m = nex_realloc(m, sizeof(*m) * (*nelems + 1));
470 		if (!m)
471 			panic();
472 		*mem = m;
473 		memmove(m + n + 1, m + n, sizeof(*m) * (*nelems - n));
474 		(*nelems)++;
475 		m[n].size = pa - m[n].addr;
476 		m[n + 1].size -= size + m[n].size;
477 		m[n + 1].addr = pa + size;
478 	}
479 }
480 
481 static void check_phys_mem_is_outside(struct core_mmu_phys_mem *start,
482 				      size_t nelems,
483 				      struct tee_mmap_region *map)
484 {
485 	size_t n;
486 
487 	for (n = 0; n < nelems; n++) {
488 		if (!core_is_buffer_outside(start[n].addr, start[n].size,
489 					    map->pa, map->size)) {
490 			EMSG("Non-sec mem (%#" PRIxPA ":%#" PRIxPASZ
491 			     ") overlaps map (type %d %#" PRIxPA ":%#zx)",
492 			     start[n].addr, start[n].size,
493 			     map->type, map->pa, map->size);
494 			panic();
495 		}
496 	}
497 }
498 
499 static const struct core_mmu_phys_mem *discovered_nsec_ddr_start __nex_bss;
500 static size_t discovered_nsec_ddr_nelems __nex_bss;
501 
502 static int cmp_pmem_by_addr(const void *a, const void *b)
503 {
504 	const struct core_mmu_phys_mem *pmem_a = a;
505 	const struct core_mmu_phys_mem *pmem_b = b;
506 
507 	return CMP_TRILEAN(pmem_a->addr, pmem_b->addr);
508 }
509 
510 void core_mmu_set_discovered_nsec_ddr(struct core_mmu_phys_mem *start,
511 				      size_t nelems)
512 {
513 	struct core_mmu_phys_mem *m = start;
514 	size_t num_elems = nelems;
515 	struct memory_map *mem_map = &static_memory_map;
516 	const struct core_mmu_phys_mem __maybe_unused *pmem;
517 	size_t n = 0;
518 
519 	assert(!discovered_nsec_ddr_start);
520 	assert(m && num_elems);
521 
522 	qsort(m, num_elems, sizeof(*m), cmp_pmem_by_addr);
523 
524 	/*
525 	 * Non-secure shared memory and also secure data
526 	 * path memory are supposed to reside inside
527 	 * non-secure memory. Since NSEC_SHM and SDP_MEM
528 	 * are used for a specific purpose make holes for
529 	 * those memory in the normal non-secure memory.
530 	 *
531 	 * This has to be done since for instance QEMU
532 	 * isn't aware of which memory range in the
533 	 * non-secure memory is used for NSEC_SHM.
534 	 */
535 
536 #ifdef CFG_SECURE_DATA_PATH
537 	if (dtb_get_sdp_region())
538 		carve_out_phys_mem(&m, &num_elems, sec_sdp.paddr, sec_sdp.size);
539 
540 	for (pmem = phys_sdp_mem_begin; pmem < phys_sdp_mem_end; pmem++)
541 		carve_out_phys_mem(&m, &num_elems, pmem->addr, pmem->size);
542 #endif
543 
544 	for (n = 0; n < ARRAY_SIZE(secure_only); n++)
545 		carve_out_phys_mem(&m, &num_elems, secure_only[n].paddr,
546 				   secure_only[n].size);
547 
548 	for  (n = 0; n < mem_map->count; n++) {
549 		switch (mem_map->map[n].type) {
550 		case MEM_AREA_NSEC_SHM:
551 			carve_out_phys_mem(&m, &num_elems, mem_map->map[n].pa,
552 					   mem_map->map[n].size);
553 			break;
554 		case MEM_AREA_EXT_DT:
555 		case MEM_AREA_MANIFEST_DT:
556 		case MEM_AREA_RAM_NSEC:
557 		case MEM_AREA_RES_VASPACE:
558 		case MEM_AREA_SHM_VASPACE:
559 		case MEM_AREA_TS_VASPACE:
560 		case MEM_AREA_PAGER_VASPACE:
561 			break;
562 		default:
563 			check_phys_mem_is_outside(m, num_elems,
564 						  mem_map->map + n);
565 		}
566 	}
567 
568 	discovered_nsec_ddr_start = m;
569 	discovered_nsec_ddr_nelems = num_elems;
570 
571 	if (!core_mmu_check_end_pa(m[num_elems - 1].addr,
572 				   m[num_elems - 1].size))
573 		panic();
574 }
575 
576 static bool get_discovered_nsec_ddr(const struct core_mmu_phys_mem **start,
577 				    const struct core_mmu_phys_mem **end)
578 {
579 	if (!discovered_nsec_ddr_start)
580 		return false;
581 
582 	*start = discovered_nsec_ddr_start;
583 	*end = discovered_nsec_ddr_start + discovered_nsec_ddr_nelems;
584 
585 	return true;
586 }
587 
588 static bool pbuf_is_nsec_ddr(paddr_t pbuf, size_t len)
589 {
590 	const struct core_mmu_phys_mem *start;
591 	const struct core_mmu_phys_mem *end;
592 
593 	if (!get_discovered_nsec_ddr(&start, &end))
594 		return false;
595 
596 	return pbuf_is_special_mem(pbuf, len, start, end);
597 }
598 
599 bool core_mmu_nsec_ddr_is_defined(void)
600 {
601 	const struct core_mmu_phys_mem *start;
602 	const struct core_mmu_phys_mem *end;
603 
604 	if (!get_discovered_nsec_ddr(&start, &end))
605 		return false;
606 
607 	return start != end;
608 }
609 #else
610 static bool pbuf_is_nsec_ddr(paddr_t pbuf __unused, size_t len __unused)
611 {
612 	return false;
613 }
614 #endif /*CFG_CORE_DYN_SHM*/
615 
616 #define MSG_MEM_INSTERSECT(pa1, sz1, pa2, sz2) \
617 	EMSG("[%" PRIxPA " %" PRIx64 "] intersects [%" PRIxPA " %" PRIx64 "]", \
618 			pa1, (uint64_t)pa1 + (sz1), pa2, (uint64_t)pa2 + (sz2))
619 
620 #ifdef CFG_SECURE_DATA_PATH
621 static bool pbuf_is_sdp_mem(paddr_t pbuf, size_t len)
622 {
623 	bool is_sdp_mem = false;
624 
625 	if (sec_sdp.size)
626 		is_sdp_mem = core_is_buffer_inside(pbuf, len, sec_sdp.paddr,
627 						   sec_sdp.size);
628 
629 	if (!is_sdp_mem)
630 		is_sdp_mem = pbuf_is_special_mem(pbuf, len, phys_sdp_mem_begin,
631 						 phys_sdp_mem_end);
632 
633 	return is_sdp_mem;
634 }
635 
636 static struct mobj *core_sdp_mem_alloc_mobj(paddr_t pa, size_t size)
637 {
638 	struct mobj *mobj = mobj_phys_alloc(pa, size, TEE_MATTR_MEM_TYPE_CACHED,
639 					    CORE_MEM_SDP_MEM);
640 
641 	if (!mobj)
642 		panic("can't create SDP physical memory object");
643 
644 	return mobj;
645 }
646 
647 struct mobj **core_sdp_mem_create_mobjs(void)
648 {
649 	const struct core_mmu_phys_mem *mem = NULL;
650 	struct mobj **mobj_base = NULL;
651 	struct mobj **mobj = NULL;
652 	int cnt = phys_sdp_mem_end - phys_sdp_mem_begin;
653 
654 	if (sec_sdp.size)
655 		cnt++;
656 
657 	/* SDP mobjs table must end with a NULL entry */
658 	mobj_base = calloc(cnt + 1, sizeof(struct mobj *));
659 	if (!mobj_base)
660 		panic("Out of memory");
661 
662 	mobj = mobj_base;
663 
664 	for (mem = phys_sdp_mem_begin; mem < phys_sdp_mem_end; mem++, mobj++)
665 		*mobj = core_sdp_mem_alloc_mobj(mem->addr, mem->size);
666 
667 	if (sec_sdp.size)
668 		*mobj = core_sdp_mem_alloc_mobj(sec_sdp.paddr, sec_sdp.size);
669 
670 	return mobj_base;
671 }
672 
673 #else /* CFG_SECURE_DATA_PATH */
674 static bool pbuf_is_sdp_mem(paddr_t pbuf __unused, size_t len __unused)
675 {
676 	return false;
677 }
678 
679 #endif /* CFG_SECURE_DATA_PATH */
680 
681 /* Check special memories comply with registered memories */
682 static void verify_special_mem_areas(struct memory_map *mem_map,
683 				     const struct core_mmu_phys_mem *start,
684 				     const struct core_mmu_phys_mem *end,
685 				     const char *area_name __maybe_unused)
686 {
687 	const struct core_mmu_phys_mem *mem = NULL;
688 	const struct core_mmu_phys_mem *mem2 = NULL;
689 	size_t n = 0;
690 
691 	if (start == end) {
692 		DMSG("No %s memory area defined", area_name);
693 		return;
694 	}
695 
696 	for (mem = start; mem < end; mem++)
697 		DMSG("%s memory [%" PRIxPA " %" PRIx64 "]",
698 		     area_name, mem->addr, (uint64_t)mem->addr + mem->size);
699 
700 	/* Check memories do not intersect each other */
701 	for (mem = start; mem + 1 < end; mem++) {
702 		for (mem2 = mem + 1; mem2 < end; mem2++) {
703 			if (core_is_buffer_intersect(mem2->addr, mem2->size,
704 						     mem->addr, mem->size)) {
705 				MSG_MEM_INSTERSECT(mem2->addr, mem2->size,
706 						   mem->addr, mem->size);
707 				panic("Special memory intersection");
708 			}
709 		}
710 	}
711 
712 	/*
713 	 * Check memories do not intersect any mapped memory.
714 	 * This is called before reserved VA space is loaded in mem_map.
715 	 */
716 	for (mem = start; mem < end; mem++) {
717 		for (n = 0; n < mem_map->count; n++) {
718 			if (core_is_buffer_intersect(mem->addr, mem->size,
719 						     mem_map->map[n].pa,
720 						     mem_map->map[n].size)) {
721 				MSG_MEM_INSTERSECT(mem->addr, mem->size,
722 						   mem_map->map[n].pa,
723 						   mem_map->map[n].size);
724 				panic("Special memory intersection");
725 			}
726 		}
727 	}
728 }
729 
730 static void merge_mmaps(struct tee_mmap_region *dst,
731 			const struct tee_mmap_region *src)
732 {
733 	paddr_t end_pa = MAX(dst->pa + dst->size - 1, src->pa + src->size - 1);
734 	paddr_t pa = MIN(dst->pa, src->pa);
735 
736 	DMSG("Merging %#"PRIxPA"..%#"PRIxPA" and %#"PRIxPA"..%#"PRIxPA,
737 	     dst->pa, dst->pa + dst->size - 1, src->pa,
738 	     src->pa + src->size - 1);
739 	dst->pa = pa;
740 	dst->size = end_pa - pa + 1;
741 }
742 
743 static bool mmaps_are_mergeable(const struct tee_mmap_region *r1,
744 				const struct tee_mmap_region *r2)
745 {
746 	if (r1->type != r2->type)
747 		return false;
748 
749 	if (r1->pa == r2->pa)
750 		return true;
751 
752 	if (r1->pa < r2->pa)
753 		return r1->pa + r1->size >= r2->pa;
754 	else
755 		return r2->pa + r2->size >= r1->pa;
756 }
757 
758 static void add_phys_mem(struct memory_map *mem_map,
759 			 const char *mem_name __maybe_unused,
760 			 enum teecore_memtypes mem_type,
761 			 paddr_t mem_addr, paddr_size_t mem_size)
762 {
763 	size_t n = 0;
764 	const struct tee_mmap_region m0 = {
765 		.type = mem_type,
766 		.pa = mem_addr,
767 		.size = mem_size,
768 	};
769 
770 	if (!mem_size)	/* Discard null size entries */
771 		return;
772 
773 	/*
774 	 * If some ranges of memory of the same type do overlap
775 	 * each others they are coalesced into one entry. To help this
776 	 * added entries are sorted by increasing physical.
777 	 *
778 	 * Note that it's valid to have the same physical memory as several
779 	 * different memory types, for instance the same device memory
780 	 * mapped as both secure and non-secure. This will probably not
781 	 * happen often in practice.
782 	 */
783 	DMSG("%s type %s 0x%08" PRIxPA " size 0x%08" PRIxPASZ,
784 	     mem_name, teecore_memtype_name(mem_type), mem_addr, mem_size);
785 	for  (n = 0; n < mem_map->count; n++) {
786 		if (mmaps_are_mergeable(mem_map->map + n, &m0)) {
787 			merge_mmaps(mem_map->map + n, &m0);
788 			/*
789 			 * The merged result might be mergeable with the
790 			 * next or previous entry.
791 			 */
792 			if (n + 1 < mem_map->count &&
793 			    mmaps_are_mergeable(mem_map->map + n,
794 						mem_map->map + n + 1)) {
795 				merge_mmaps(mem_map->map + n,
796 					    mem_map->map + n + 1);
797 				rem_array_elem(mem_map->map, mem_map->count,
798 					       sizeof(*mem_map->map), n + 1);
799 				mem_map->count--;
800 			}
801 			if (n > 0 && mmaps_are_mergeable(mem_map->map + n - 1,
802 							 mem_map->map + n)) {
803 				merge_mmaps(mem_map->map + n - 1,
804 					    mem_map->map + n);
805 				rem_array_elem(mem_map->map, mem_map->count,
806 					       sizeof(*mem_map->map), n);
807 				mem_map->count--;
808 			}
809 			return;
810 		}
811 		if (mem_type < mem_map->map[n].type ||
812 		    (mem_type == mem_map->map[n].type &&
813 		     mem_addr < mem_map->map[n].pa))
814 			break; /* found the spot where to insert this memory */
815 	}
816 
817 	grow_mem_map(mem_map);
818 	ins_array_elem(mem_map->map, mem_map->count, sizeof(*mem_map->map),
819 		       n, &m0);
820 }
821 
822 static void add_va_space(struct memory_map *mem_map,
823 			 enum teecore_memtypes type, size_t size)
824 {
825 	size_t n = 0;
826 
827 	DMSG("type %s size 0x%08zx", teecore_memtype_name(type), size);
828 	for  (n = 0; n < mem_map->count; n++) {
829 		if (type < mem_map->map[n].type)
830 			break;
831 	}
832 
833 	grow_mem_map(mem_map);
834 	ins_array_elem(mem_map->map, mem_map->count, sizeof(*mem_map->map),
835 		       n, NULL);
836 	mem_map->map[n] = (struct tee_mmap_region){
837 		.type = type,
838 		.size = size,
839 	};
840 }
841 
842 uint32_t core_mmu_type_to_attr(enum teecore_memtypes t)
843 {
844 	const uint32_t attr = TEE_MATTR_VALID_BLOCK;
845 	const uint32_t tagged = TEE_MATTR_MEM_TYPE_TAGGED <<
846 				TEE_MATTR_MEM_TYPE_SHIFT;
847 	const uint32_t cached = TEE_MATTR_MEM_TYPE_CACHED <<
848 				TEE_MATTR_MEM_TYPE_SHIFT;
849 	const uint32_t noncache = TEE_MATTR_MEM_TYPE_DEV <<
850 				  TEE_MATTR_MEM_TYPE_SHIFT;
851 
852 	switch (t) {
853 	case MEM_AREA_TEE_RAM:
854 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRWX | tagged;
855 	case MEM_AREA_TEE_RAM_RX:
856 	case MEM_AREA_INIT_RAM_RX:
857 	case MEM_AREA_IDENTITY_MAP_RX:
858 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRX | tagged;
859 	case MEM_AREA_TEE_RAM_RO:
860 	case MEM_AREA_INIT_RAM_RO:
861 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PR | tagged;
862 	case MEM_AREA_TEE_RAM_RW:
863 	case MEM_AREA_NEX_RAM_RO: /* This has to be r/w during init runtime */
864 	case MEM_AREA_NEX_RAM_RW:
865 	case MEM_AREA_TEE_ASAN:
866 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | tagged;
867 	case MEM_AREA_TEE_COHERENT:
868 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRWX | noncache;
869 	case MEM_AREA_NSEC_SHM:
870 	case MEM_AREA_NEX_NSEC_SHM:
871 		return attr | TEE_MATTR_PRW | cached;
872 	case MEM_AREA_MANIFEST_DT:
873 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PR | cached;
874 	case MEM_AREA_TRANSFER_LIST:
875 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | cached;
876 	case MEM_AREA_EXT_DT:
877 		/*
878 		 * If CFG_MAP_EXT_DT_SECURE is enabled map the external device
879 		 * tree as secure non-cached memory, otherwise, fall back to
880 		 * non-secure mapping.
881 		 */
882 		if (IS_ENABLED(CFG_MAP_EXT_DT_SECURE))
883 			return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW |
884 			       noncache;
885 		fallthrough;
886 	case MEM_AREA_IO_NSEC:
887 		return attr | TEE_MATTR_PRW | noncache;
888 	case MEM_AREA_IO_SEC:
889 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | noncache;
890 	case MEM_AREA_RAM_NSEC:
891 		return attr | TEE_MATTR_PRW | cached;
892 	case MEM_AREA_RAM_SEC:
893 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | cached;
894 	case MEM_AREA_SEC_RAM_OVERALL:
895 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | tagged;
896 	case MEM_AREA_ROM_SEC:
897 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PR | cached;
898 	case MEM_AREA_RES_VASPACE:
899 	case MEM_AREA_SHM_VASPACE:
900 		return 0;
901 	case MEM_AREA_PAGER_VASPACE:
902 		return TEE_MATTR_SECURE;
903 	default:
904 		panic("invalid type");
905 	}
906 }
907 
908 static bool __maybe_unused map_is_tee_ram(const struct tee_mmap_region *mm)
909 {
910 	switch (mm->type) {
911 	case MEM_AREA_TEE_RAM:
912 	case MEM_AREA_TEE_RAM_RX:
913 	case MEM_AREA_TEE_RAM_RO:
914 	case MEM_AREA_TEE_RAM_RW:
915 	case MEM_AREA_INIT_RAM_RX:
916 	case MEM_AREA_INIT_RAM_RO:
917 	case MEM_AREA_NEX_RAM_RW:
918 	case MEM_AREA_NEX_RAM_RO:
919 	case MEM_AREA_TEE_ASAN:
920 		return true;
921 	default:
922 		return false;
923 	}
924 }
925 
926 static bool __maybe_unused map_is_secure(const struct tee_mmap_region *mm)
927 {
928 	return !!(core_mmu_type_to_attr(mm->type) & TEE_MATTR_SECURE);
929 }
930 
931 static bool __maybe_unused map_is_pgdir(const struct tee_mmap_region *mm)
932 {
933 	return mm->region_size == CORE_MMU_PGDIR_SIZE;
934 }
935 
936 static int cmp_mmap_by_lower_va(const void *a, const void *b)
937 {
938 	const struct tee_mmap_region *mm_a = a;
939 	const struct tee_mmap_region *mm_b = b;
940 
941 	return CMP_TRILEAN(mm_a->va, mm_b->va);
942 }
943 
944 static void dump_mmap_table(struct memory_map *mem_map)
945 {
946 	size_t n = 0;
947 
948 	for (n = 0; n < mem_map->count; n++) {
949 		struct tee_mmap_region *map __maybe_unused = mem_map->map + n;
950 
951 		DMSG("type %-12s va 0x%08" PRIxVA "..0x%08" PRIxVA
952 		     " pa 0x%08" PRIxPA "..0x%08" PRIxPA " size 0x%08zx (%s)",
953 		     teecore_memtype_name(map->type), map->va,
954 		     map->va + map->size - 1, map->pa,
955 		     (paddr_t)(map->pa + map->size - 1), map->size,
956 		     map->region_size == SMALL_PAGE_SIZE ? "smallpg" : "pgdir");
957 	}
958 }
959 
960 #if DEBUG_XLAT_TABLE
961 
962 static void dump_xlat_table(vaddr_t va, unsigned int level)
963 {
964 	struct core_mmu_table_info tbl_info;
965 	unsigned int idx = 0;
966 	paddr_t pa;
967 	uint32_t attr;
968 
969 	core_mmu_find_table(NULL, va, level, &tbl_info);
970 	va = tbl_info.va_base;
971 	for (idx = 0; idx < tbl_info.num_entries; idx++) {
972 		core_mmu_get_entry(&tbl_info, idx, &pa, &attr);
973 		if (attr || level > CORE_MMU_BASE_TABLE_LEVEL) {
974 			const char *security_bit = "";
975 
976 			if (core_mmu_entry_have_security_bit(attr)) {
977 				if (attr & TEE_MATTR_SECURE)
978 					security_bit = "S";
979 				else
980 					security_bit = "NS";
981 			}
982 
983 			if (attr & TEE_MATTR_TABLE) {
984 				DMSG_RAW("%*s [LVL%d] VA:0x%010" PRIxVA
985 					" TBL:0x%010" PRIxPA " %s",
986 					level * 2, "", level, va, pa,
987 					security_bit);
988 				dump_xlat_table(va, level + 1);
989 			} else if (attr) {
990 				DMSG_RAW("%*s [LVL%d] VA:0x%010" PRIxVA
991 					" PA:0x%010" PRIxPA " %s-%s-%s-%s",
992 					level * 2, "", level, va, pa,
993 					mattr_is_cached(attr) ? "MEM" :
994 					"DEV",
995 					attr & TEE_MATTR_PW ? "RW" : "RO",
996 					attr & TEE_MATTR_PX ? "X " : "XN",
997 					security_bit);
998 			} else {
999 				DMSG_RAW("%*s [LVL%d] VA:0x%010" PRIxVA
1000 					    " INVALID\n",
1001 					    level * 2, "", level, va);
1002 			}
1003 		}
1004 		va += BIT64(tbl_info.shift);
1005 	}
1006 }
1007 
1008 #else
1009 
1010 static void dump_xlat_table(vaddr_t va __unused, int level __unused)
1011 {
1012 }
1013 
1014 #endif
1015 
1016 /*
1017  * Reserves virtual memory space for pager usage.
1018  *
1019  * From the start of the first memory used by the link script +
1020  * TEE_RAM_VA_SIZE should be covered, either with a direct mapping or empty
1021  * mapping for pager usage. This adds translation tables as needed for the
1022  * pager to operate.
1023  */
1024 static void add_pager_vaspace(struct memory_map *mem_map)
1025 {
1026 	paddr_t begin = 0;
1027 	paddr_t end = 0;
1028 	size_t size = 0;
1029 	size_t pos = 0;
1030 	size_t n = 0;
1031 
1032 
1033 	for (n = 0; n < mem_map->count; n++) {
1034 		if (map_is_tee_ram(mem_map->map + n)) {
1035 			if (!begin)
1036 				begin = mem_map->map[n].pa;
1037 			pos = n + 1;
1038 		}
1039 	}
1040 
1041 	end = mem_map->map[pos - 1].pa + mem_map->map[pos - 1].size;
1042 	assert(end - begin < TEE_RAM_VA_SIZE);
1043 	size = TEE_RAM_VA_SIZE - (end - begin);
1044 
1045 	grow_mem_map(mem_map);
1046 	ins_array_elem(mem_map->map, mem_map->count, sizeof(*mem_map->map),
1047 		       n, NULL);
1048 	mem_map->map[n] = (struct tee_mmap_region){
1049 		.type = MEM_AREA_PAGER_VASPACE,
1050 		.size = size,
1051 		.region_size = SMALL_PAGE_SIZE,
1052 		.attr = core_mmu_type_to_attr(MEM_AREA_PAGER_VASPACE),
1053 	};
1054 }
1055 
1056 static void check_sec_nsec_mem_config(void)
1057 {
1058 	size_t n = 0;
1059 
1060 	for (n = 0; n < ARRAY_SIZE(secure_only); n++) {
1061 		if (pbuf_intersects(nsec_shared, secure_only[n].paddr,
1062 				    secure_only[n].size))
1063 			panic("Invalid memory access config: sec/nsec");
1064 	}
1065 }
1066 
1067 static void collect_device_mem_ranges(struct memory_map *mem_map)
1068 {
1069 	const char *compatible = "arm,ffa-manifest-device-regions";
1070 	void *fdt = get_manifest_dt();
1071 	const char *name = NULL;
1072 	uint64_t page_count = 0;
1073 	uint64_t base = 0;
1074 	int subnode = 0;
1075 	int node = 0;
1076 
1077 	assert(fdt);
1078 
1079 	node = fdt_node_offset_by_compatible(fdt, 0, compatible);
1080 	if (node < 0)
1081 		return;
1082 
1083 	fdt_for_each_subnode(subnode, fdt, node) {
1084 		name = fdt_get_name(fdt, subnode, NULL);
1085 		if (!name)
1086 			continue;
1087 
1088 		if (dt_getprop_as_number(fdt, subnode, "base-address",
1089 					 &base)) {
1090 			EMSG("Mandatory field is missing: base-address");
1091 			continue;
1092 		}
1093 
1094 		if (base & SMALL_PAGE_MASK) {
1095 			EMSG("base-address is not page aligned");
1096 			continue;
1097 		}
1098 
1099 		if (dt_getprop_as_number(fdt, subnode, "pages-count",
1100 					 &page_count)) {
1101 			EMSG("Mandatory field is missing: pages-count");
1102 			continue;
1103 		}
1104 
1105 		add_phys_mem(mem_map, name, MEM_AREA_IO_SEC,
1106 			     base, base + page_count * SMALL_PAGE_SIZE);
1107 	}
1108 }
1109 
1110 static void collect_mem_ranges(struct memory_map *mem_map)
1111 {
1112 	const struct core_mmu_phys_mem *mem = NULL;
1113 	vaddr_t ram_start = secure_only[0].paddr;
1114 	size_t n = 0;
1115 
1116 #define ADD_PHYS_MEM(_type, _addr, _size) \
1117 		add_phys_mem(mem_map, #_addr, (_type), (_addr), (_size))
1118 
1119 	if (IS_ENABLED(CFG_CORE_RWDATA_NOEXEC)) {
1120 		paddr_t next_pa = 0;
1121 
1122 		/*
1123 		 * Read-only and read-execute physical memory areas must
1124 		 * not be mapped by MEM_AREA_SEC_RAM_OVERALL, but all the
1125 		 * read/write should.
1126 		 */
1127 		ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, ram_start,
1128 			     VCORE_UNPG_RX_PA - ram_start);
1129 		assert(VCORE_UNPG_RX_PA >= ram_start);
1130 		tee_ram_initial_offs = VCORE_UNPG_RX_PA - ram_start;
1131 		DMSG("tee_ram_initial_offs %#zx", tee_ram_initial_offs);
1132 		ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RX, VCORE_UNPG_RX_PA,
1133 			     VCORE_UNPG_RX_SZ);
1134 		ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RO, VCORE_UNPG_RO_PA,
1135 			     VCORE_UNPG_RO_SZ);
1136 
1137 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1138 			ADD_PHYS_MEM(MEM_AREA_NEX_RAM_RO, VCORE_UNPG_RW_PA,
1139 				     VCORE_UNPG_RW_SZ);
1140 			ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, VCORE_UNPG_RW_PA,
1141 				     VCORE_UNPG_RW_SZ);
1142 
1143 			ADD_PHYS_MEM(MEM_AREA_NEX_RAM_RW, VCORE_NEX_RW_PA,
1144 				     VCORE_NEX_RW_SZ);
1145 			ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, VCORE_NEX_RW_PA,
1146 				     VCORE_NEX_RW_SZ);
1147 
1148 			ADD_PHYS_MEM(MEM_AREA_NEX_RAM_RW, VCORE_FREE_PA,
1149 				     VCORE_FREE_SZ);
1150 			ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, VCORE_FREE_PA,
1151 				     VCORE_FREE_SZ);
1152 			next_pa = VCORE_FREE_PA + VCORE_FREE_SZ;
1153 		} else {
1154 			ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RW, VCORE_UNPG_RW_PA,
1155 				     VCORE_UNPG_RW_SZ);
1156 			ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, VCORE_UNPG_RW_PA,
1157 				     VCORE_UNPG_RW_SZ);
1158 
1159 			ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RW, VCORE_FREE_PA,
1160 				     VCORE_FREE_SZ);
1161 			ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, VCORE_FREE_PA,
1162 				     VCORE_FREE_SZ);
1163 			next_pa = VCORE_FREE_PA + VCORE_FREE_SZ;
1164 		}
1165 
1166 		if (IS_ENABLED(CFG_WITH_PAGER)) {
1167 			paddr_t pa = 0;
1168 			size_t sz = 0;
1169 
1170 			ADD_PHYS_MEM(MEM_AREA_INIT_RAM_RX, VCORE_INIT_RX_PA,
1171 				     VCORE_INIT_RX_SZ);
1172 			ADD_PHYS_MEM(MEM_AREA_INIT_RAM_RO, VCORE_INIT_RO_PA,
1173 				     VCORE_INIT_RO_SZ);
1174 			/*
1175 			 * Core init mapping shall cover up to end of the
1176 			 * physical RAM.  This is required since the hash
1177 			 * table is appended to the binary data after the
1178 			 * firmware build sequence.
1179 			 */
1180 			pa = VCORE_INIT_RO_PA + VCORE_INIT_RO_SZ;
1181 			sz = TEE_RAM_START + TEE_RAM_PH_SIZE - pa;
1182 			ADD_PHYS_MEM(MEM_AREA_TEE_RAM, pa, sz);
1183 		} else {
1184 			ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, next_pa,
1185 				     secure_only[0].paddr +
1186 				     secure_only[0].size - next_pa);
1187 		}
1188 	} else {
1189 		ADD_PHYS_MEM(MEM_AREA_TEE_RAM, TEE_RAM_START, TEE_RAM_PH_SIZE);
1190 		ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, secure_only[n].paddr,
1191 			     secure_only[0].size);
1192 	}
1193 
1194 	for (n = 1; n < ARRAY_SIZE(secure_only); n++)
1195 		ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, secure_only[n].paddr,
1196 			     secure_only[n].size);
1197 
1198 	if (IS_ENABLED(CFG_CORE_SANITIZE_KADDRESS) &&
1199 	    IS_ENABLED(CFG_WITH_PAGER)) {
1200 		/*
1201 		 * Asan ram is part of MEM_AREA_TEE_RAM_RW when pager is
1202 		 * disabled.
1203 		 */
1204 		ADD_PHYS_MEM(MEM_AREA_TEE_ASAN, ASAN_MAP_PA, ASAN_MAP_SZ);
1205 	}
1206 
1207 #undef ADD_PHYS_MEM
1208 
1209 	/* Collect device memory info from SP manifest */
1210 	if (IS_ENABLED(CFG_CORE_SEL2_SPMC))
1211 		collect_device_mem_ranges(mem_map);
1212 
1213 	for (mem = phys_mem_map_begin; mem < phys_mem_map_end; mem++) {
1214 		/* Only unmapped virtual range may have a null phys addr */
1215 		assert(mem->addr || !core_mmu_type_to_attr(mem->type));
1216 
1217 		add_phys_mem(mem_map, mem->name, mem->type,
1218 			     mem->addr, mem->size);
1219 	}
1220 
1221 	if (IS_ENABLED(CFG_SECURE_DATA_PATH))
1222 		verify_special_mem_areas(mem_map, phys_sdp_mem_begin,
1223 					 phys_sdp_mem_end, "SDP");
1224 
1225 	add_va_space(mem_map, MEM_AREA_RES_VASPACE, CFG_RESERVED_VASPACE_SIZE);
1226 	add_va_space(mem_map, MEM_AREA_SHM_VASPACE, SHM_VASPACE_SIZE);
1227 }
1228 
1229 static void assign_mem_granularity(struct memory_map *mem_map)
1230 {
1231 	size_t n = 0;
1232 
1233 	/*
1234 	 * Assign region sizes, note that MEM_AREA_TEE_RAM always uses
1235 	 * SMALL_PAGE_SIZE.
1236 	 */
1237 	for  (n = 0; n < mem_map->count; n++) {
1238 		paddr_t mask = mem_map->map[n].pa | mem_map->map[n].size;
1239 
1240 		if (mask & SMALL_PAGE_MASK)
1241 			panic("Impossible memory alignment");
1242 
1243 		if (map_is_tee_ram(mem_map->map + n))
1244 			mem_map->map[n].region_size = SMALL_PAGE_SIZE;
1245 		else
1246 			mem_map->map[n].region_size = CORE_MMU_PGDIR_SIZE;
1247 	}
1248 }
1249 
1250 static bool place_tee_ram_at_top(paddr_t paddr)
1251 {
1252 	return paddr > BIT64(core_mmu_get_va_width()) / 2;
1253 }
1254 
1255 /*
1256  * MMU arch driver shall override this function if it helps
1257  * optimizing the memory footprint of the address translation tables.
1258  */
1259 bool __weak core_mmu_prefer_tee_ram_at_top(paddr_t paddr)
1260 {
1261 	return place_tee_ram_at_top(paddr);
1262 }
1263 
1264 static bool assign_mem_va_dir(vaddr_t tee_ram_va, struct memory_map *mem_map,
1265 			      bool tee_ram_at_top)
1266 {
1267 	struct tee_mmap_region *map = NULL;
1268 	vaddr_t va = 0;
1269 	bool va_is_secure = true;
1270 	size_t n = 0;
1271 
1272 	/*
1273 	 * tee_ram_va might equals 0 when CFG_CORE_ASLR=y.
1274 	 * 0 is by design an invalid va, so return false directly.
1275 	 */
1276 	if (!tee_ram_va)
1277 		return false;
1278 
1279 	/* Clear eventual previous assignments */
1280 	for (n = 0; n < mem_map->count; n++)
1281 		mem_map->map[n].va = 0;
1282 
1283 	/*
1284 	 * TEE RAM regions are always aligned with region_size.
1285 	 *
1286 	 * Note that MEM_AREA_PAGER_VASPACE also counts as TEE RAM here
1287 	 * since it handles virtual memory which covers the part of the ELF
1288 	 * that cannot fit directly into memory.
1289 	 */
1290 	va = tee_ram_va + tee_ram_initial_offs;
1291 	for (n = 0; n < mem_map->count; n++) {
1292 		map = mem_map->map + n;
1293 		if (map_is_tee_ram(map) ||
1294 		    map->type == MEM_AREA_PAGER_VASPACE) {
1295 			assert(!(va & (map->region_size - 1)));
1296 			assert(!(map->size & (map->region_size - 1)));
1297 			map->va = va;
1298 			if (ADD_OVERFLOW(va, map->size, &va))
1299 				return false;
1300 			if (va >= BIT64(core_mmu_get_va_width()))
1301 				return false;
1302 		}
1303 	}
1304 
1305 	if (tee_ram_at_top) {
1306 		/*
1307 		 * Map non-tee ram regions at addresses lower than the tee
1308 		 * ram region.
1309 		 */
1310 		va = tee_ram_va;
1311 		for (n = 0; n < mem_map->count; n++) {
1312 			map = mem_map->map + n;
1313 			map->attr = core_mmu_type_to_attr(map->type);
1314 			if (map->va)
1315 				continue;
1316 
1317 			if (!IS_ENABLED(CFG_WITH_LPAE) &&
1318 			    va_is_secure != map_is_secure(map)) {
1319 				va_is_secure = !va_is_secure;
1320 				va = ROUNDDOWN(va, CORE_MMU_PGDIR_SIZE);
1321 			}
1322 
1323 			if (SUB_OVERFLOW(va, map->size, &va))
1324 				return false;
1325 			va = ROUNDDOWN(va, map->region_size);
1326 			/*
1327 			 * Make sure that va is aligned with pa for
1328 			 * efficient pgdir mapping. Basically pa &
1329 			 * pgdir_mask should be == va & pgdir_mask
1330 			 */
1331 			if (map->size > 2 * CORE_MMU_PGDIR_SIZE) {
1332 				if (SUB_OVERFLOW(va, CORE_MMU_PGDIR_SIZE, &va))
1333 					return false;
1334 				va += (map->pa - va) & CORE_MMU_PGDIR_MASK;
1335 			}
1336 			map->va = va;
1337 		}
1338 	} else {
1339 		/*
1340 		 * Map non-tee ram regions at addresses higher than the tee
1341 		 * ram region.
1342 		 */
1343 		for (n = 0; n < mem_map->count; n++) {
1344 			map = mem_map->map + n;
1345 			map->attr = core_mmu_type_to_attr(map->type);
1346 			if (map->va)
1347 				continue;
1348 
1349 			if (!IS_ENABLED(CFG_WITH_LPAE) &&
1350 			    va_is_secure != map_is_secure(map)) {
1351 				va_is_secure = !va_is_secure;
1352 				if (ROUNDUP_OVERFLOW(va, CORE_MMU_PGDIR_SIZE,
1353 						     &va))
1354 					return false;
1355 			}
1356 
1357 			if (ROUNDUP_OVERFLOW(va, map->region_size, &va))
1358 				return false;
1359 			/*
1360 			 * Make sure that va is aligned with pa for
1361 			 * efficient pgdir mapping. Basically pa &
1362 			 * pgdir_mask should be == va & pgdir_mask
1363 			 */
1364 			if (map->size > 2 * CORE_MMU_PGDIR_SIZE) {
1365 				vaddr_t offs = (map->pa - va) &
1366 					       CORE_MMU_PGDIR_MASK;
1367 
1368 				if (ADD_OVERFLOW(va, offs, &va))
1369 					return false;
1370 			}
1371 
1372 			map->va = va;
1373 			if (ADD_OVERFLOW(va, map->size, &va))
1374 				return false;
1375 			if (va >= BIT64(core_mmu_get_va_width()))
1376 				return false;
1377 		}
1378 	}
1379 
1380 	return true;
1381 }
1382 
1383 static bool assign_mem_va(vaddr_t tee_ram_va, struct memory_map *mem_map)
1384 {
1385 	bool tee_ram_at_top = place_tee_ram_at_top(tee_ram_va);
1386 
1387 	/*
1388 	 * Check that we're not overlapping with the user VA range.
1389 	 */
1390 	if (IS_ENABLED(CFG_WITH_LPAE)) {
1391 		/*
1392 		 * User VA range is supposed to be defined after these
1393 		 * mappings have been established.
1394 		 */
1395 		assert(!core_mmu_user_va_range_is_defined());
1396 	} else {
1397 		vaddr_t user_va_base = 0;
1398 		size_t user_va_size = 0;
1399 
1400 		assert(core_mmu_user_va_range_is_defined());
1401 		core_mmu_get_user_va_range(&user_va_base, &user_va_size);
1402 		if (tee_ram_va < (user_va_base + user_va_size))
1403 			return false;
1404 	}
1405 
1406 	if (IS_ENABLED(CFG_WITH_PAGER)) {
1407 		bool prefered_dir = core_mmu_prefer_tee_ram_at_top(tee_ram_va);
1408 
1409 		/* Try whole mapping covered by a single base xlat entry */
1410 		if (prefered_dir != tee_ram_at_top &&
1411 		    assign_mem_va_dir(tee_ram_va, mem_map, prefered_dir))
1412 			return true;
1413 	}
1414 
1415 	return assign_mem_va_dir(tee_ram_va, mem_map, tee_ram_at_top);
1416 }
1417 
1418 static int cmp_init_mem_map(const void *a, const void *b)
1419 {
1420 	const struct tee_mmap_region *mm_a = a;
1421 	const struct tee_mmap_region *mm_b = b;
1422 	int rc = 0;
1423 
1424 	rc = CMP_TRILEAN(mm_a->region_size, mm_b->region_size);
1425 	if (!rc)
1426 		rc = CMP_TRILEAN(mm_a->pa, mm_b->pa);
1427 	/*
1428 	 * 32bit MMU descriptors cannot mix secure and non-secure mapping in
1429 	 * the same level2 table. Hence sort secure mapping from non-secure
1430 	 * mapping.
1431 	 */
1432 	if (!rc && !IS_ENABLED(CFG_WITH_LPAE))
1433 		rc = CMP_TRILEAN(map_is_secure(mm_a), map_is_secure(mm_b));
1434 
1435 	return rc;
1436 }
1437 
1438 static bool mem_map_add_id_map(struct memory_map *mem_map,
1439 			       vaddr_t id_map_start, vaddr_t id_map_end)
1440 {
1441 	vaddr_t start = ROUNDDOWN(id_map_start, SMALL_PAGE_SIZE);
1442 	vaddr_t end = ROUNDUP(id_map_end, SMALL_PAGE_SIZE);
1443 	size_t len = end - start;
1444 	size_t n = 0;
1445 
1446 
1447 	for (n = 0; n < mem_map->count; n++)
1448 		if (core_is_buffer_intersect(mem_map->map[n].va,
1449 					     mem_map->map[n].size, start, len))
1450 			return false;
1451 
1452 	grow_mem_map(mem_map);
1453 	mem_map->map[mem_map->count - 1] = (struct tee_mmap_region){
1454 		.type = MEM_AREA_IDENTITY_MAP_RX,
1455 		/*
1456 		 * Could use CORE_MMU_PGDIR_SIZE to potentially save a
1457 		 * translation table, at the increased risk of clashes with
1458 		 * the rest of the memory map.
1459 		 */
1460 		.region_size = SMALL_PAGE_SIZE,
1461 		.pa = start,
1462 		.va = start,
1463 		.size = len,
1464 		.attr = core_mmu_type_to_attr(MEM_AREA_IDENTITY_MAP_RX),
1465 	};
1466 
1467 	return true;
1468 }
1469 
1470 static struct memory_map *init_mem_map(struct memory_map *mem_map,
1471 				       unsigned long seed,
1472 				       unsigned long *ret_offs)
1473 {
1474 	/*
1475 	 * @id_map_start and @id_map_end describes a physical memory range
1476 	 * that must be mapped Read-Only eXecutable at identical virtual
1477 	 * addresses.
1478 	 */
1479 	vaddr_t id_map_start = (vaddr_t)__identity_map_init_start;
1480 	vaddr_t id_map_end = (vaddr_t)__identity_map_init_end;
1481 	vaddr_t start_addr = secure_only[0].paddr;
1482 	unsigned long offs = 0;
1483 
1484 	collect_mem_ranges(mem_map);
1485 	assign_mem_granularity(mem_map);
1486 
1487 	/*
1488 	 * To ease mapping and lower use of xlat tables, sort mapping
1489 	 * description moving small-page regions after the pgdir regions.
1490 	 */
1491 	qsort(mem_map->map, mem_map->count, sizeof(struct tee_mmap_region),
1492 	      cmp_init_mem_map);
1493 
1494 	if (IS_ENABLED(CFG_WITH_PAGER))
1495 		add_pager_vaspace(mem_map);
1496 
1497 	if (IS_ENABLED(CFG_CORE_ASLR) && seed) {
1498 		vaddr_t base_addr = start_addr + seed;
1499 		const unsigned int va_width = core_mmu_get_va_width();
1500 		const vaddr_t va_mask = GENMASK_64(va_width - 1,
1501 						   SMALL_PAGE_SHIFT);
1502 		vaddr_t ba = base_addr;
1503 		size_t n = 0;
1504 
1505 		for (n = 0; n < 3; n++) {
1506 			if (n)
1507 				ba = base_addr ^ BIT64(va_width - n);
1508 			ba &= va_mask;
1509 			if (assign_mem_va(ba, mem_map) &&
1510 			    mem_map_add_id_map(mem_map, id_map_start,
1511 					       id_map_end)) {
1512 				offs = ba - start_addr;
1513 				DMSG("Mapping core at %#"PRIxVA" offs %#lx",
1514 				     ba, offs);
1515 				goto out;
1516 			} else {
1517 				DMSG("Failed to map core at %#"PRIxVA, ba);
1518 			}
1519 		}
1520 		EMSG("Failed to map core with seed %#lx", seed);
1521 	}
1522 
1523 	if (!assign_mem_va(start_addr, mem_map))
1524 		panic();
1525 
1526 out:
1527 	qsort(mem_map->map, mem_map->count, sizeof(struct tee_mmap_region),
1528 	      cmp_mmap_by_lower_va);
1529 
1530 	dump_mmap_table(mem_map);
1531 
1532 	*ret_offs = offs;
1533 	return mem_map;
1534 }
1535 
1536 static void check_mem_map(struct memory_map *mem_map)
1537 {
1538 	struct tee_mmap_region *m = NULL;
1539 	size_t n = 0;
1540 
1541 	for (n = 0; n < mem_map->count; n++) {
1542 		m = mem_map->map + n;
1543 		switch (m->type) {
1544 		case MEM_AREA_TEE_RAM:
1545 		case MEM_AREA_TEE_RAM_RX:
1546 		case MEM_AREA_TEE_RAM_RO:
1547 		case MEM_AREA_TEE_RAM_RW:
1548 		case MEM_AREA_INIT_RAM_RX:
1549 		case MEM_AREA_INIT_RAM_RO:
1550 		case MEM_AREA_NEX_RAM_RW:
1551 		case MEM_AREA_NEX_RAM_RO:
1552 		case MEM_AREA_IDENTITY_MAP_RX:
1553 			if (!pbuf_is_inside(secure_only, m->pa, m->size))
1554 				panic("TEE_RAM can't fit in secure_only");
1555 			break;
1556 		case MEM_AREA_SEC_RAM_OVERALL:
1557 			if (!pbuf_is_inside(secure_only, m->pa, m->size))
1558 				panic("SEC_RAM_OVERALL can't fit in secure_only");
1559 			break;
1560 		case MEM_AREA_NSEC_SHM:
1561 			if (!pbuf_is_inside(nsec_shared, m->pa, m->size))
1562 				panic("NS_SHM can't fit in nsec_shared");
1563 			break;
1564 		case MEM_AREA_TEE_COHERENT:
1565 		case MEM_AREA_TEE_ASAN:
1566 		case MEM_AREA_IO_SEC:
1567 		case MEM_AREA_IO_NSEC:
1568 		case MEM_AREA_EXT_DT:
1569 		case MEM_AREA_MANIFEST_DT:
1570 		case MEM_AREA_TRANSFER_LIST:
1571 		case MEM_AREA_RAM_SEC:
1572 		case MEM_AREA_RAM_NSEC:
1573 		case MEM_AREA_ROM_SEC:
1574 		case MEM_AREA_RES_VASPACE:
1575 		case MEM_AREA_SHM_VASPACE:
1576 		case MEM_AREA_PAGER_VASPACE:
1577 			break;
1578 		default:
1579 			EMSG("Uhandled memtype %d", m->type);
1580 			panic();
1581 		}
1582 	}
1583 }
1584 
1585 /*
1586  * core_init_mmu_map() - init tee core default memory mapping
1587  *
1588  * This routine sets the static default TEE core mapping. If @seed is > 0
1589  * and configured with CFG_CORE_ASLR it will map tee core at a location
1590  * based on the seed and return the offset from the link address.
1591  *
1592  * If an error happened: core_init_mmu_map is expected to panic.
1593  *
1594  * Note: this function is weak just to make it possible to exclude it from
1595  * the unpaged area.
1596  */
1597 void __weak core_init_mmu_map(unsigned long seed, struct core_mmu_config *cfg)
1598 {
1599 #ifndef CFG_NS_VIRTUALIZATION
1600 	vaddr_t start = ROUNDDOWN((vaddr_t)__nozi_start, SMALL_PAGE_SIZE);
1601 #else
1602 	vaddr_t start = ROUNDDOWN((vaddr_t)__vcore_nex_rw_start,
1603 				  SMALL_PAGE_SIZE);
1604 #endif
1605 	vaddr_t len = ROUNDUP((vaddr_t)__nozi_end, SMALL_PAGE_SIZE) - start;
1606 	struct tee_mmap_region tmp_mmap_region = { };
1607 	struct memory_map mem_map = { };
1608 	unsigned long offs = 0;
1609 
1610 	if (IS_ENABLED(CFG_CORE_PHYS_RELOCATABLE) &&
1611 	    (core_mmu_tee_load_pa & SMALL_PAGE_MASK))
1612 		panic("OP-TEE load address is not page aligned");
1613 
1614 	check_sec_nsec_mem_config();
1615 
1616 	if (IS_ENABLED(CFG_BOOT_MEM)) {
1617 		mem_map.alloc_count = CFG_MMAP_REGIONS;
1618 		mem_map.map = boot_mem_alloc_tmp(mem_map.alloc_count *
1619 							sizeof(*mem_map.map),
1620 						 alignof(*mem_map.map));
1621 		memory_map_realloc_func = boot_mem_realloc_memory_map;
1622 	} else {
1623 		mem_map = static_memory_map;
1624 	}
1625 
1626 	static_memory_map = (struct memory_map){
1627 		.map = &tmp_mmap_region,
1628 		.alloc_count = 1,
1629 		.count = 1,
1630 	};
1631 	/*
1632 	 * Add a entry covering the translation tables which will be
1633 	 * involved in some virt_to_phys() and phys_to_virt() conversions.
1634 	 */
1635 	static_memory_map.map[0] = (struct tee_mmap_region){
1636 		.type = MEM_AREA_TEE_RAM,
1637 		.region_size = SMALL_PAGE_SIZE,
1638 		.pa = start,
1639 		.va = start,
1640 		.size = len,
1641 		.attr = core_mmu_type_to_attr(MEM_AREA_IDENTITY_MAP_RX),
1642 	};
1643 
1644 	init_mem_map(&mem_map, seed, &offs);
1645 
1646 	check_mem_map(&mem_map);
1647 	core_init_mmu(&mem_map);
1648 	dump_xlat_table(0x0, CORE_MMU_BASE_TABLE_LEVEL);
1649 	core_init_mmu_regs(cfg);
1650 	cfg->map_offset = offs;
1651 	static_memory_map = mem_map;
1652 	boot_mem_add_reloc(&static_memory_map.map);
1653 }
1654 
1655 void core_mmu_save_mem_map(void)
1656 {
1657 	if (IS_ENABLED(CFG_BOOT_MEM)) {
1658 		size_t alloc_count = static_memory_map.count + 5;
1659 		size_t elem_sz = sizeof(*static_memory_map.map);
1660 		void *p = NULL;
1661 
1662 		p = nex_calloc(alloc_count, elem_sz);
1663 		if (!p)
1664 			panic();
1665 		memcpy(p, static_memory_map.map,
1666 		       static_memory_map.count * elem_sz);
1667 		static_memory_map.map = p;
1668 		static_memory_map.alloc_count = alloc_count;
1669 		memory_map_realloc_func = heap_realloc_memory_map;
1670 	}
1671 }
1672 
1673 bool core_mmu_mattr_is_ok(uint32_t mattr)
1674 {
1675 	/*
1676 	 * Keep in sync with core_mmu_lpae.c:mattr_to_desc and
1677 	 * core_mmu_v7.c:mattr_to_texcb
1678 	 */
1679 
1680 	switch ((mattr >> TEE_MATTR_MEM_TYPE_SHIFT) & TEE_MATTR_MEM_TYPE_MASK) {
1681 	case TEE_MATTR_MEM_TYPE_DEV:
1682 	case TEE_MATTR_MEM_TYPE_STRONGLY_O:
1683 	case TEE_MATTR_MEM_TYPE_CACHED:
1684 	case TEE_MATTR_MEM_TYPE_TAGGED:
1685 		return true;
1686 	default:
1687 		return false;
1688 	}
1689 }
1690 
1691 /*
1692  * test attributes of target physical buffer
1693  *
1694  * Flags: pbuf_is(SECURE, NOT_SECURE, RAM, IOMEM, KEYVAULT).
1695  *
1696  */
1697 bool core_pbuf_is(uint32_t attr, paddr_t pbuf, size_t len)
1698 {
1699 	struct tee_mmap_region *map;
1700 
1701 	/* Empty buffers complies with anything */
1702 	if (len == 0)
1703 		return true;
1704 
1705 	switch (attr) {
1706 	case CORE_MEM_SEC:
1707 		return pbuf_is_inside(secure_only, pbuf, len);
1708 	case CORE_MEM_NON_SEC:
1709 		return pbuf_is_inside(nsec_shared, pbuf, len) ||
1710 			pbuf_is_nsec_ddr(pbuf, len);
1711 	case CORE_MEM_TEE_RAM:
1712 		return core_is_buffer_inside(pbuf, len, TEE_RAM_START,
1713 							TEE_RAM_PH_SIZE);
1714 #ifdef CFG_CORE_RESERVED_SHM
1715 	case CORE_MEM_NSEC_SHM:
1716 		return core_is_buffer_inside(pbuf, len, TEE_SHMEM_START,
1717 							TEE_SHMEM_SIZE);
1718 #endif
1719 	case CORE_MEM_SDP_MEM:
1720 		return pbuf_is_sdp_mem(pbuf, len);
1721 	case CORE_MEM_CACHED:
1722 		map = find_map_by_pa(pbuf);
1723 		if (!map || !pbuf_inside_map_area(pbuf, len, map))
1724 			return false;
1725 		return mattr_is_cached(map->attr);
1726 	default:
1727 		return false;
1728 	}
1729 }
1730 
1731 /* test attributes of target virtual buffer (in core mapping) */
1732 bool core_vbuf_is(uint32_t attr, const void *vbuf, size_t len)
1733 {
1734 	paddr_t p;
1735 
1736 	/* Empty buffers complies with anything */
1737 	if (len == 0)
1738 		return true;
1739 
1740 	p = virt_to_phys((void *)vbuf);
1741 	if (!p)
1742 		return false;
1743 
1744 	return core_pbuf_is(attr, p, len);
1745 }
1746 
1747 /* core_va2pa - teecore exported service */
1748 static int __maybe_unused core_va2pa_helper(void *va, paddr_t *pa)
1749 {
1750 	struct tee_mmap_region *map;
1751 
1752 	map = find_map_by_va(va);
1753 	if (!va_is_in_map(map, (vaddr_t)va))
1754 		return -1;
1755 
1756 	/*
1757 	 * We can calculate PA for static map. Virtual address ranges
1758 	 * reserved to core dynamic mapping return a 'match' (return 0;)
1759 	 * together with an invalid null physical address.
1760 	 */
1761 	if (map->pa)
1762 		*pa = map->pa + (vaddr_t)va  - map->va;
1763 	else
1764 		*pa = 0;
1765 
1766 	return 0;
1767 }
1768 
1769 static void *map_pa2va(struct tee_mmap_region *map, paddr_t pa, size_t len)
1770 {
1771 	if (!pa_is_in_map(map, pa, len))
1772 		return NULL;
1773 
1774 	return (void *)(vaddr_t)(map->va + pa - map->pa);
1775 }
1776 
1777 /*
1778  * teecore gets some memory area definitions
1779  */
1780 void core_mmu_get_mem_by_type(enum teecore_memtypes type, vaddr_t *s,
1781 			      vaddr_t *e)
1782 {
1783 	struct tee_mmap_region *map = find_map_by_type(type);
1784 
1785 	if (map) {
1786 		*s = map->va;
1787 		*e = map->va + map->size;
1788 	} else {
1789 		*s = 0;
1790 		*e = 0;
1791 	}
1792 }
1793 
1794 enum teecore_memtypes core_mmu_get_type_by_pa(paddr_t pa)
1795 {
1796 	struct tee_mmap_region *map = find_map_by_pa(pa);
1797 
1798 	if (!map)
1799 		return MEM_AREA_MAXTYPE;
1800 	return map->type;
1801 }
1802 
1803 void core_mmu_set_entry(struct core_mmu_table_info *tbl_info, unsigned int idx,
1804 			paddr_t pa, uint32_t attr)
1805 {
1806 	assert(idx < tbl_info->num_entries);
1807 	core_mmu_set_entry_primitive(tbl_info->table, tbl_info->level,
1808 				     idx, pa, attr);
1809 }
1810 
1811 void core_mmu_get_entry(struct core_mmu_table_info *tbl_info, unsigned int idx,
1812 			paddr_t *pa, uint32_t *attr)
1813 {
1814 	assert(idx < tbl_info->num_entries);
1815 	core_mmu_get_entry_primitive(tbl_info->table, tbl_info->level,
1816 				     idx, pa, attr);
1817 }
1818 
1819 static void clear_region(struct core_mmu_table_info *tbl_info,
1820 			 struct tee_mmap_region *region)
1821 {
1822 	unsigned int end = 0;
1823 	unsigned int idx = 0;
1824 
1825 	/* va, len and pa should be block aligned */
1826 	assert(!core_mmu_get_block_offset(tbl_info, region->va));
1827 	assert(!core_mmu_get_block_offset(tbl_info, region->size));
1828 	assert(!core_mmu_get_block_offset(tbl_info, region->pa));
1829 
1830 	idx = core_mmu_va2idx(tbl_info, region->va);
1831 	end = core_mmu_va2idx(tbl_info, region->va + region->size);
1832 
1833 	while (idx < end) {
1834 		core_mmu_set_entry(tbl_info, idx, 0, 0);
1835 		idx++;
1836 	}
1837 }
1838 
1839 static void set_region(struct core_mmu_table_info *tbl_info,
1840 		       struct tee_mmap_region *region)
1841 {
1842 	unsigned int end;
1843 	unsigned int idx;
1844 	paddr_t pa;
1845 
1846 	/* va, len and pa should be block aligned */
1847 	assert(!core_mmu_get_block_offset(tbl_info, region->va));
1848 	assert(!core_mmu_get_block_offset(tbl_info, region->size));
1849 	assert(!core_mmu_get_block_offset(tbl_info, region->pa));
1850 
1851 	idx = core_mmu_va2idx(tbl_info, region->va);
1852 	end = core_mmu_va2idx(tbl_info, region->va + region->size);
1853 	pa = region->pa;
1854 
1855 	while (idx < end) {
1856 		core_mmu_set_entry(tbl_info, idx, pa, region->attr);
1857 		idx++;
1858 		pa += BIT64(tbl_info->shift);
1859 	}
1860 }
1861 
1862 static void set_pg_region(struct core_mmu_table_info *dir_info,
1863 			  struct vm_region *region, struct pgt **pgt,
1864 			  struct core_mmu_table_info *pg_info)
1865 {
1866 	struct tee_mmap_region r = {
1867 		.va = region->va,
1868 		.size = region->size,
1869 		.attr = region->attr,
1870 	};
1871 	vaddr_t end = r.va + r.size;
1872 	uint32_t pgt_attr = (r.attr & TEE_MATTR_SECURE) | TEE_MATTR_TABLE;
1873 
1874 	while (r.va < end) {
1875 		if (!pg_info->table ||
1876 		    r.va >= (pg_info->va_base + CORE_MMU_PGDIR_SIZE)) {
1877 			/*
1878 			 * We're assigning a new translation table.
1879 			 */
1880 			unsigned int idx;
1881 
1882 			/* Virtual addresses must grow */
1883 			assert(r.va > pg_info->va_base);
1884 
1885 			idx = core_mmu_va2idx(dir_info, r.va);
1886 			pg_info->va_base = core_mmu_idx2va(dir_info, idx);
1887 
1888 			/*
1889 			 * Advance pgt to va_base, note that we may need to
1890 			 * skip multiple page tables if there are large
1891 			 * holes in the vm map.
1892 			 */
1893 			while ((*pgt)->vabase < pg_info->va_base) {
1894 				*pgt = SLIST_NEXT(*pgt, link);
1895 				/* We should have allocated enough */
1896 				assert(*pgt);
1897 			}
1898 			assert((*pgt)->vabase == pg_info->va_base);
1899 			pg_info->table = (*pgt)->tbl;
1900 
1901 			core_mmu_set_entry(dir_info, idx,
1902 					   virt_to_phys(pg_info->table),
1903 					   pgt_attr);
1904 		}
1905 
1906 		r.size = MIN(CORE_MMU_PGDIR_SIZE - (r.va - pg_info->va_base),
1907 			     end - r.va);
1908 
1909 		if (!(*pgt)->populated  && !mobj_is_paged(region->mobj)) {
1910 			size_t granule = BIT(pg_info->shift);
1911 			size_t offset = r.va - region->va + region->offset;
1912 
1913 			r.size = MIN(r.size,
1914 				     mobj_get_phys_granule(region->mobj));
1915 			r.size = ROUNDUP(r.size, SMALL_PAGE_SIZE);
1916 
1917 			if (mobj_get_pa(region->mobj, offset, granule,
1918 					&r.pa) != TEE_SUCCESS)
1919 				panic("Failed to get PA of unpaged mobj");
1920 			set_region(pg_info, &r);
1921 		}
1922 		r.va += r.size;
1923 	}
1924 }
1925 
1926 static bool can_map_at_level(paddr_t paddr, vaddr_t vaddr,
1927 			     size_t size_left, paddr_t block_size,
1928 			     struct tee_mmap_region *mm)
1929 {
1930 	/* VA and PA are aligned to block size at current level */
1931 	if ((vaddr | paddr) & (block_size - 1))
1932 		return false;
1933 
1934 	/* Remainder fits into block at current level */
1935 	if (size_left < block_size)
1936 		return false;
1937 
1938 	/*
1939 	 * The required block size of the region is compatible with the
1940 	 * block size of the current level.
1941 	 */
1942 	if (mm->region_size < block_size)
1943 		return false;
1944 
1945 #ifdef CFG_WITH_PAGER
1946 	/*
1947 	 * If pager is enabled, we need to map TEE RAM and the whole pager
1948 	 * regions with small pages only
1949 	 */
1950 	if ((map_is_tee_ram(mm) || mm->type == MEM_AREA_PAGER_VASPACE) &&
1951 	    block_size != SMALL_PAGE_SIZE)
1952 		return false;
1953 #endif
1954 
1955 	return true;
1956 }
1957 
1958 void core_mmu_map_region(struct mmu_partition *prtn, struct tee_mmap_region *mm)
1959 {
1960 	struct core_mmu_table_info tbl_info;
1961 	unsigned int idx;
1962 	vaddr_t vaddr = mm->va;
1963 	paddr_t paddr = mm->pa;
1964 	ssize_t size_left = mm->size;
1965 	unsigned int level;
1966 	bool table_found;
1967 	uint32_t old_attr;
1968 
1969 	assert(!((vaddr | paddr) & SMALL_PAGE_MASK));
1970 
1971 	while (size_left > 0) {
1972 		level = CORE_MMU_BASE_TABLE_LEVEL;
1973 
1974 		while (true) {
1975 			paddr_t block_size = 0;
1976 
1977 			assert(core_mmu_level_in_range(level));
1978 
1979 			table_found = core_mmu_find_table(prtn, vaddr, level,
1980 							  &tbl_info);
1981 			if (!table_found)
1982 				panic("can't find table for mapping");
1983 
1984 			block_size = BIT64(tbl_info.shift);
1985 
1986 			idx = core_mmu_va2idx(&tbl_info, vaddr);
1987 			if (!can_map_at_level(paddr, vaddr, size_left,
1988 					      block_size, mm)) {
1989 				bool secure = mm->attr & TEE_MATTR_SECURE;
1990 
1991 				/*
1992 				 * This part of the region can't be mapped at
1993 				 * this level. Need to go deeper.
1994 				 */
1995 				if (!core_mmu_entry_to_finer_grained(&tbl_info,
1996 								     idx,
1997 								     secure))
1998 					panic("Can't divide MMU entry");
1999 				level = tbl_info.next_level;
2000 				continue;
2001 			}
2002 
2003 			/* We can map part of the region at current level */
2004 			core_mmu_get_entry(&tbl_info, idx, NULL, &old_attr);
2005 			if (old_attr)
2006 				panic("Page is already mapped");
2007 
2008 			core_mmu_set_entry(&tbl_info, idx, paddr, mm->attr);
2009 			paddr += block_size;
2010 			vaddr += block_size;
2011 			size_left -= block_size;
2012 
2013 			break;
2014 		}
2015 	}
2016 }
2017 
2018 TEE_Result core_mmu_map_pages(vaddr_t vstart, paddr_t *pages, size_t num_pages,
2019 			      enum teecore_memtypes memtype)
2020 {
2021 	TEE_Result ret;
2022 	struct core_mmu_table_info tbl_info;
2023 	struct tee_mmap_region *mm;
2024 	unsigned int idx;
2025 	uint32_t old_attr;
2026 	uint32_t exceptions;
2027 	vaddr_t vaddr = vstart;
2028 	size_t i;
2029 	bool secure;
2030 
2031 	assert(!(core_mmu_type_to_attr(memtype) & TEE_MATTR_PX));
2032 
2033 	secure = core_mmu_type_to_attr(memtype) & TEE_MATTR_SECURE;
2034 
2035 	if (vaddr & SMALL_PAGE_MASK)
2036 		return TEE_ERROR_BAD_PARAMETERS;
2037 
2038 	exceptions = mmu_lock();
2039 
2040 	mm = find_map_by_va((void *)vaddr);
2041 	if (!mm || !va_is_in_map(mm, vaddr + num_pages * SMALL_PAGE_SIZE - 1))
2042 		panic("VA does not belong to any known mm region");
2043 
2044 	if (!core_mmu_is_dynamic_vaspace(mm))
2045 		panic("Trying to map into static region");
2046 
2047 	for (i = 0; i < num_pages; i++) {
2048 		if (pages[i] & SMALL_PAGE_MASK) {
2049 			ret = TEE_ERROR_BAD_PARAMETERS;
2050 			goto err;
2051 		}
2052 
2053 		while (true) {
2054 			if (!core_mmu_find_table(NULL, vaddr, UINT_MAX,
2055 						 &tbl_info))
2056 				panic("Can't find pagetable for vaddr ");
2057 
2058 			idx = core_mmu_va2idx(&tbl_info, vaddr);
2059 			if (tbl_info.shift == SMALL_PAGE_SHIFT)
2060 				break;
2061 
2062 			/* This is supertable. Need to divide it. */
2063 			if (!core_mmu_entry_to_finer_grained(&tbl_info, idx,
2064 							     secure))
2065 				panic("Failed to spread pgdir on small tables");
2066 		}
2067 
2068 		core_mmu_get_entry(&tbl_info, idx, NULL, &old_attr);
2069 		if (old_attr)
2070 			panic("Page is already mapped");
2071 
2072 		core_mmu_set_entry(&tbl_info, idx, pages[i],
2073 				   core_mmu_type_to_attr(memtype));
2074 		vaddr += SMALL_PAGE_SIZE;
2075 	}
2076 
2077 	/*
2078 	 * Make sure all the changes to translation tables are visible
2079 	 * before returning. TLB doesn't need to be invalidated as we are
2080 	 * guaranteed that there's no valid mapping in this range.
2081 	 */
2082 	core_mmu_table_write_barrier();
2083 	mmu_unlock(exceptions);
2084 
2085 	return TEE_SUCCESS;
2086 err:
2087 	mmu_unlock(exceptions);
2088 
2089 	if (i)
2090 		core_mmu_unmap_pages(vstart, i);
2091 
2092 	return ret;
2093 }
2094 
2095 TEE_Result core_mmu_map_contiguous_pages(vaddr_t vstart, paddr_t pstart,
2096 					 size_t num_pages,
2097 					 enum teecore_memtypes memtype)
2098 {
2099 	struct core_mmu_table_info tbl_info = { };
2100 	struct tee_mmap_region *mm = NULL;
2101 	unsigned int idx = 0;
2102 	uint32_t old_attr = 0;
2103 	uint32_t exceptions = 0;
2104 	vaddr_t vaddr = vstart;
2105 	paddr_t paddr = pstart;
2106 	size_t i = 0;
2107 	bool secure = false;
2108 
2109 	assert(!(core_mmu_type_to_attr(memtype) & TEE_MATTR_PX));
2110 
2111 	secure = core_mmu_type_to_attr(memtype) & TEE_MATTR_SECURE;
2112 
2113 	if ((vaddr | paddr) & SMALL_PAGE_MASK)
2114 		return TEE_ERROR_BAD_PARAMETERS;
2115 
2116 	exceptions = mmu_lock();
2117 
2118 	mm = find_map_by_va((void *)vaddr);
2119 	if (!mm || !va_is_in_map(mm, vaddr + num_pages * SMALL_PAGE_SIZE - 1))
2120 		panic("VA does not belong to any known mm region");
2121 
2122 	if (!core_mmu_is_dynamic_vaspace(mm))
2123 		panic("Trying to map into static region");
2124 
2125 	for (i = 0; i < num_pages; i++) {
2126 		while (true) {
2127 			if (!core_mmu_find_table(NULL, vaddr, UINT_MAX,
2128 						 &tbl_info))
2129 				panic("Can't find pagetable for vaddr ");
2130 
2131 			idx = core_mmu_va2idx(&tbl_info, vaddr);
2132 			if (tbl_info.shift == SMALL_PAGE_SHIFT)
2133 				break;
2134 
2135 			/* This is supertable. Need to divide it. */
2136 			if (!core_mmu_entry_to_finer_grained(&tbl_info, idx,
2137 							     secure))
2138 				panic("Failed to spread pgdir on small tables");
2139 		}
2140 
2141 		core_mmu_get_entry(&tbl_info, idx, NULL, &old_attr);
2142 		if (old_attr)
2143 			panic("Page is already mapped");
2144 
2145 		core_mmu_set_entry(&tbl_info, idx, paddr,
2146 				   core_mmu_type_to_attr(memtype));
2147 		paddr += SMALL_PAGE_SIZE;
2148 		vaddr += SMALL_PAGE_SIZE;
2149 	}
2150 
2151 	/*
2152 	 * Make sure all the changes to translation tables are visible
2153 	 * before returning. TLB doesn't need to be invalidated as we are
2154 	 * guaranteed that there's no valid mapping in this range.
2155 	 */
2156 	core_mmu_table_write_barrier();
2157 	mmu_unlock(exceptions);
2158 
2159 	return TEE_SUCCESS;
2160 }
2161 
2162 static bool mem_range_is_in_vcore_free(vaddr_t vstart, size_t num_pages)
2163 {
2164 	return core_is_buffer_inside(vstart, num_pages * SMALL_PAGE_SIZE,
2165 				     VCORE_FREE_PA, VCORE_FREE_SZ);
2166 }
2167 
2168 void core_mmu_unmap_pages(vaddr_t vstart, size_t num_pages)
2169 {
2170 	struct core_mmu_table_info tbl_info;
2171 	struct tee_mmap_region *mm;
2172 	size_t i;
2173 	unsigned int idx;
2174 	uint32_t exceptions;
2175 
2176 	exceptions = mmu_lock();
2177 
2178 	mm = find_map_by_va((void *)vstart);
2179 	if (!mm || !va_is_in_map(mm, vstart + num_pages * SMALL_PAGE_SIZE - 1))
2180 		panic("VA does not belong to any known mm region");
2181 
2182 	if (!core_mmu_is_dynamic_vaspace(mm) &&
2183 	    !mem_range_is_in_vcore_free(vstart, num_pages))
2184 		panic("Trying to unmap static region");
2185 
2186 	for (i = 0; i < num_pages; i++, vstart += SMALL_PAGE_SIZE) {
2187 		if (!core_mmu_find_table(NULL, vstart, UINT_MAX, &tbl_info))
2188 			panic("Can't find pagetable");
2189 
2190 		if (tbl_info.shift != SMALL_PAGE_SHIFT)
2191 			panic("Invalid pagetable level");
2192 
2193 		idx = core_mmu_va2idx(&tbl_info, vstart);
2194 		core_mmu_set_entry(&tbl_info, idx, 0, 0);
2195 	}
2196 	tlbi_all();
2197 
2198 	mmu_unlock(exceptions);
2199 }
2200 
2201 void core_mmu_populate_user_map(struct core_mmu_table_info *dir_info,
2202 				struct user_mode_ctx *uctx)
2203 {
2204 	struct core_mmu_table_info pg_info = { };
2205 	struct pgt_cache *pgt_cache = &uctx->pgt_cache;
2206 	struct pgt *pgt = NULL;
2207 	struct pgt *p = NULL;
2208 	struct vm_region *r = NULL;
2209 
2210 	if (TAILQ_EMPTY(&uctx->vm_info.regions))
2211 		return; /* Nothing to map */
2212 
2213 	/*
2214 	 * Allocate all page tables in advance.
2215 	 */
2216 	pgt_get_all(uctx);
2217 	pgt = SLIST_FIRST(pgt_cache);
2218 
2219 	core_mmu_set_info_table(&pg_info, dir_info->next_level, 0, NULL);
2220 
2221 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link)
2222 		set_pg_region(dir_info, r, &pgt, &pg_info);
2223 	/* Record that the translation tables now are populated. */
2224 	SLIST_FOREACH(p, pgt_cache, link) {
2225 		p->populated = true;
2226 		if (p == pgt)
2227 			break;
2228 	}
2229 	assert(p == pgt);
2230 }
2231 
2232 TEE_Result core_mmu_remove_mapping(enum teecore_memtypes type, void *addr,
2233 				   size_t len)
2234 {
2235 	struct core_mmu_table_info tbl_info = { };
2236 	struct tee_mmap_region *res_map = NULL;
2237 	struct tee_mmap_region *map = NULL;
2238 	paddr_t pa = virt_to_phys(addr);
2239 	size_t granule = 0;
2240 	ptrdiff_t i = 0;
2241 	paddr_t p = 0;
2242 	size_t l = 0;
2243 
2244 	map = find_map_by_type_and_pa(type, pa, len);
2245 	if (!map)
2246 		return TEE_ERROR_GENERIC;
2247 
2248 	res_map = find_map_by_type(MEM_AREA_RES_VASPACE);
2249 	if (!res_map)
2250 		return TEE_ERROR_GENERIC;
2251 	if (!core_mmu_find_table(NULL, res_map->va, UINT_MAX, &tbl_info))
2252 		return TEE_ERROR_GENERIC;
2253 	granule = BIT(tbl_info.shift);
2254 
2255 	if (map < static_memory_map.map ||
2256 	    map >= static_memory_map.map + static_memory_map.count)
2257 		return TEE_ERROR_GENERIC;
2258 	i = map - static_memory_map.map;
2259 
2260 	/* Check that we have a full match */
2261 	p = ROUNDDOWN(pa, granule);
2262 	l = ROUNDUP(len + pa - p, granule);
2263 	if (map->pa != p || map->size != l)
2264 		return TEE_ERROR_GENERIC;
2265 
2266 	clear_region(&tbl_info, map);
2267 	tlbi_all();
2268 
2269 	/* If possible remove the va range from res_map */
2270 	if (res_map->va - map->size == map->va) {
2271 		res_map->va -= map->size;
2272 		res_map->size += map->size;
2273 	}
2274 
2275 	/* Remove the entry. */
2276 	rem_array_elem(static_memory_map.map, static_memory_map.count,
2277 		       sizeof(*static_memory_map.map), i);
2278 	static_memory_map.count--;
2279 
2280 	return TEE_SUCCESS;
2281 }
2282 
2283 struct tee_mmap_region *
2284 core_mmu_find_mapping_exclusive(enum teecore_memtypes type, size_t len)
2285 {
2286 	struct memory_map *mem_map = get_memory_map();
2287 	struct tee_mmap_region *map_found = NULL;
2288 	size_t n = 0;
2289 
2290 	if (!len)
2291 		return NULL;
2292 
2293 	for (n = 0; n < mem_map->count; n++) {
2294 		if (mem_map->map[n].type != type)
2295 			continue;
2296 
2297 		if (map_found)
2298 			return NULL;
2299 
2300 		map_found = mem_map->map + n;
2301 	}
2302 
2303 	if (!map_found || map_found->size < len)
2304 		return NULL;
2305 
2306 	return map_found;
2307 }
2308 
2309 void *core_mmu_add_mapping(enum teecore_memtypes type, paddr_t addr, size_t len)
2310 {
2311 	struct memory_map *mem_map = &static_memory_map;
2312 	struct core_mmu_table_info tbl_info = { };
2313 	struct tee_mmap_region *map = NULL;
2314 	size_t granule = 0;
2315 	paddr_t p = 0;
2316 	size_t l = 0;
2317 
2318 	if (!len)
2319 		return NULL;
2320 
2321 	if (!core_mmu_check_end_pa(addr, len))
2322 		return NULL;
2323 
2324 	/* Check if the memory is already mapped */
2325 	map = find_map_by_type_and_pa(type, addr, len);
2326 	if (map && pbuf_inside_map_area(addr, len, map))
2327 		return (void *)(vaddr_t)(map->va + addr - map->pa);
2328 
2329 	/* Find the reserved va space used for late mappings */
2330 	map = find_map_by_type(MEM_AREA_RES_VASPACE);
2331 	if (!map)
2332 		return NULL;
2333 
2334 	if (!core_mmu_find_table(NULL, map->va, UINT_MAX, &tbl_info))
2335 		return NULL;
2336 
2337 	granule = BIT64(tbl_info.shift);
2338 	p = ROUNDDOWN(addr, granule);
2339 	l = ROUNDUP(len + addr - p, granule);
2340 
2341 	/* Ban overflowing virtual addresses */
2342 	if (map->size < l)
2343 		return NULL;
2344 
2345 	/*
2346 	 * Something is wrong, we can't fit the va range into the selected
2347 	 * table. The reserved va range is possibly missaligned with
2348 	 * granule.
2349 	 */
2350 	if (core_mmu_va2idx(&tbl_info, map->va + len) >= tbl_info.num_entries)
2351 		return NULL;
2352 
2353 	if (static_memory_map.count >= static_memory_map.alloc_count)
2354 		return NULL;
2355 
2356 	mem_map->map[mem_map->count] = (struct tee_mmap_region){
2357 		.va = map->va,
2358 		.size = l,
2359 		.type = type,
2360 		.region_size = granule,
2361 		.attr = core_mmu_type_to_attr(type),
2362 		.pa = p,
2363 	};
2364 	map->va += l;
2365 	map->size -= l;
2366 	map = mem_map->map + mem_map->count;
2367 	mem_map->count++;
2368 
2369 	set_region(&tbl_info, map);
2370 
2371 	/* Make sure the new entry is visible before continuing. */
2372 	core_mmu_table_write_barrier();
2373 
2374 	return (void *)(vaddr_t)(map->va + addr - map->pa);
2375 }
2376 
2377 #ifdef CFG_WITH_PAGER
2378 static vaddr_t get_linear_map_end_va(void)
2379 {
2380 	/* this is synced with the generic linker file kern.ld.S */
2381 	return (vaddr_t)__heap2_end;
2382 }
2383 
2384 static paddr_t get_linear_map_end_pa(void)
2385 {
2386 	return get_linear_map_end_va() - boot_mmu_config.map_offset;
2387 }
2388 #endif
2389 
2390 #if defined(CFG_TEE_CORE_DEBUG)
2391 static void check_pa_matches_va(void *va, paddr_t pa)
2392 {
2393 	TEE_Result res = TEE_ERROR_GENERIC;
2394 	vaddr_t v = (vaddr_t)va;
2395 	paddr_t p = 0;
2396 	struct core_mmu_table_info ti __maybe_unused = { };
2397 
2398 	if (core_mmu_user_va_range_is_defined()) {
2399 		vaddr_t user_va_base = 0;
2400 		size_t user_va_size = 0;
2401 
2402 		core_mmu_get_user_va_range(&user_va_base, &user_va_size);
2403 		if (v >= user_va_base &&
2404 		    v <= (user_va_base - 1 + user_va_size)) {
2405 			if (!core_mmu_user_mapping_is_active()) {
2406 				if (pa)
2407 					panic("issue in linear address space");
2408 				return;
2409 			}
2410 
2411 			res = vm_va2pa(to_user_mode_ctx(thread_get_tsd()->ctx),
2412 				       va, &p);
2413 			if (res == TEE_ERROR_NOT_SUPPORTED)
2414 				return;
2415 			if (res == TEE_SUCCESS && pa != p)
2416 				panic("bad pa");
2417 			if (res != TEE_SUCCESS && pa)
2418 				panic("false pa");
2419 			return;
2420 		}
2421 	}
2422 #ifdef CFG_WITH_PAGER
2423 	if (is_unpaged(va)) {
2424 		if (v - boot_mmu_config.map_offset != pa)
2425 			panic("issue in linear address space");
2426 		return;
2427 	}
2428 
2429 	if (tee_pager_get_table_info(v, &ti)) {
2430 		uint32_t a;
2431 
2432 		/*
2433 		 * Lookups in the page table managed by the pager is
2434 		 * dangerous for addresses in the paged area as those pages
2435 		 * changes all the time. But some ranges are safe,
2436 		 * rw-locked areas when the page is populated for instance.
2437 		 */
2438 		core_mmu_get_entry(&ti, core_mmu_va2idx(&ti, v), &p, &a);
2439 		if (a & TEE_MATTR_VALID_BLOCK) {
2440 			paddr_t mask = BIT64(ti.shift) - 1;
2441 
2442 			p |= v & mask;
2443 			if (pa != p)
2444 				panic();
2445 		} else {
2446 			if (pa)
2447 				panic();
2448 		}
2449 		return;
2450 	}
2451 #endif
2452 
2453 	if (!core_va2pa_helper(va, &p)) {
2454 		/* Verfiy only the static mapping (case non null phys addr) */
2455 		if (p && pa != p) {
2456 			DMSG("va %p maps 0x%" PRIxPA ", expect 0x%" PRIxPA,
2457 			     va, p, pa);
2458 			panic();
2459 		}
2460 	} else {
2461 		if (pa) {
2462 			DMSG("va %p unmapped, expect 0x%" PRIxPA, va, pa);
2463 			panic();
2464 		}
2465 	}
2466 }
2467 #else
2468 static void check_pa_matches_va(void *va __unused, paddr_t pa __unused)
2469 {
2470 }
2471 #endif
2472 
2473 paddr_t virt_to_phys(void *va)
2474 {
2475 	paddr_t pa = 0;
2476 
2477 	if (!arch_va2pa_helper(va, &pa))
2478 		pa = 0;
2479 	check_pa_matches_va(memtag_strip_tag(va), pa);
2480 	return pa;
2481 }
2482 
2483 /*
2484  * Don't use check_va_matches_pa() for RISC-V, as its callee
2485  * arch_va2pa_helper() will call it eventually, this creates
2486  * indirect recursion and can lead to a stack overflow.
2487  * Moreover, if arch_va2pa_helper() returns true, it implies
2488  * the va2pa mapping is matched, no need to check it again.
2489  */
2490 #if defined(CFG_TEE_CORE_DEBUG) && !defined(__riscv)
2491 static void check_va_matches_pa(paddr_t pa, void *va)
2492 {
2493 	paddr_t p = 0;
2494 
2495 	if (!va)
2496 		return;
2497 
2498 	p = virt_to_phys(va);
2499 	if (p != pa) {
2500 		DMSG("va %p maps 0x%" PRIxPA " expect 0x%" PRIxPA, va, p, pa);
2501 		panic();
2502 	}
2503 }
2504 #else
2505 static void check_va_matches_pa(paddr_t pa __unused, void *va __unused)
2506 {
2507 }
2508 #endif
2509 
2510 static void *phys_to_virt_ts_vaspace(paddr_t pa, size_t len)
2511 {
2512 	if (!core_mmu_user_mapping_is_active())
2513 		return NULL;
2514 
2515 	return vm_pa2va(to_user_mode_ctx(thread_get_tsd()->ctx), pa, len);
2516 }
2517 
2518 #ifdef CFG_WITH_PAGER
2519 static void *phys_to_virt_tee_ram(paddr_t pa, size_t len)
2520 {
2521 	paddr_t end_pa = 0;
2522 
2523 	if (SUB_OVERFLOW(len, 1, &end_pa) || ADD_OVERFLOW(pa, end_pa, &end_pa))
2524 		return NULL;
2525 
2526 	if (pa >= TEE_LOAD_ADDR && pa < get_linear_map_end_pa()) {
2527 		if (end_pa > get_linear_map_end_pa())
2528 			return NULL;
2529 		return (void *)(vaddr_t)(pa + boot_mmu_config.map_offset);
2530 	}
2531 
2532 	return tee_pager_phys_to_virt(pa, len);
2533 }
2534 #else
2535 static void *phys_to_virt_tee_ram(paddr_t pa, size_t len)
2536 {
2537 	struct tee_mmap_region *mmap = NULL;
2538 
2539 	mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM, pa, len);
2540 	if (!mmap)
2541 		mmap = find_map_by_type_and_pa(MEM_AREA_NEX_RAM_RW, pa, len);
2542 	if (!mmap)
2543 		mmap = find_map_by_type_and_pa(MEM_AREA_NEX_RAM_RO, pa, len);
2544 	if (!mmap)
2545 		mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM_RW, pa, len);
2546 	if (!mmap)
2547 		mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM_RO, pa, len);
2548 	if (!mmap)
2549 		mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM_RX, pa, len);
2550 	/*
2551 	 * Note that MEM_AREA_INIT_RAM_RO and MEM_AREA_INIT_RAM_RX are only
2552 	 * used with pager and not needed here.
2553 	 */
2554 	return map_pa2va(mmap, pa, len);
2555 }
2556 #endif
2557 
2558 void *phys_to_virt(paddr_t pa, enum teecore_memtypes m, size_t len)
2559 {
2560 	void *va = NULL;
2561 
2562 	switch (m) {
2563 	case MEM_AREA_TS_VASPACE:
2564 		va = phys_to_virt_ts_vaspace(pa, len);
2565 		break;
2566 	case MEM_AREA_TEE_RAM:
2567 	case MEM_AREA_TEE_RAM_RX:
2568 	case MEM_AREA_TEE_RAM_RO:
2569 	case MEM_AREA_TEE_RAM_RW:
2570 	case MEM_AREA_NEX_RAM_RO:
2571 	case MEM_AREA_NEX_RAM_RW:
2572 		va = phys_to_virt_tee_ram(pa, len);
2573 		break;
2574 	case MEM_AREA_SHM_VASPACE:
2575 		/* Find VA from PA in dynamic SHM is not yet supported */
2576 		va = NULL;
2577 		break;
2578 	default:
2579 		va = map_pa2va(find_map_by_type_and_pa(m, pa, len), pa, len);
2580 	}
2581 	if (m != MEM_AREA_SEC_RAM_OVERALL)
2582 		check_va_matches_pa(pa, va);
2583 	return va;
2584 }
2585 
2586 void *phys_to_virt_io(paddr_t pa, size_t len)
2587 {
2588 	struct tee_mmap_region *map = NULL;
2589 	void *va = NULL;
2590 
2591 	map = find_map_by_type_and_pa(MEM_AREA_IO_SEC, pa, len);
2592 	if (!map)
2593 		map = find_map_by_type_and_pa(MEM_AREA_IO_NSEC, pa, len);
2594 	if (!map)
2595 		return NULL;
2596 	va = map_pa2va(map, pa, len);
2597 	check_va_matches_pa(pa, va);
2598 	return va;
2599 }
2600 
2601 vaddr_t core_mmu_get_va(paddr_t pa, enum teecore_memtypes type, size_t len)
2602 {
2603 	if (cpu_mmu_enabled())
2604 		return (vaddr_t)phys_to_virt(pa, type, len);
2605 
2606 	return (vaddr_t)pa;
2607 }
2608 
2609 #ifdef CFG_WITH_PAGER
2610 bool is_unpaged(const void *va)
2611 {
2612 	vaddr_t v = (vaddr_t)va;
2613 
2614 	return v >= VCORE_START_VA && v < get_linear_map_end_va();
2615 }
2616 #endif
2617 
2618 #ifdef CFG_NS_VIRTUALIZATION
2619 bool is_nexus(const void *va)
2620 {
2621 	vaddr_t v = (vaddr_t)va;
2622 
2623 	return v >= VCORE_START_VA && v < VCORE_NEX_RW_PA + VCORE_NEX_RW_SZ;
2624 }
2625 #endif
2626 
2627 void core_mmu_init_virtualization(void)
2628 {
2629 	paddr_t b1 = 0;
2630 	paddr_size_t s1 = 0;
2631 
2632 	static_assert(ARRAY_SIZE(secure_only) <= 2);
2633 	if (ARRAY_SIZE(secure_only) == 2) {
2634 		b1 = secure_only[1].paddr;
2635 		s1 = secure_only[1].size;
2636 	}
2637 	virt_init_memory(&static_memory_map, secure_only[0].paddr,
2638 			 secure_only[0].size, b1, s1);
2639 }
2640 
2641 vaddr_t io_pa_or_va(struct io_pa_va *p, size_t len)
2642 {
2643 	assert(p->pa);
2644 	if (cpu_mmu_enabled()) {
2645 		if (!p->va)
2646 			p->va = (vaddr_t)phys_to_virt_io(p->pa, len);
2647 		assert(p->va);
2648 		return p->va;
2649 	}
2650 	return p->pa;
2651 }
2652 
2653 vaddr_t io_pa_or_va_secure(struct io_pa_va *p, size_t len)
2654 {
2655 	assert(p->pa);
2656 	if (cpu_mmu_enabled()) {
2657 		if (!p->va)
2658 			p->va = (vaddr_t)phys_to_virt(p->pa, MEM_AREA_IO_SEC,
2659 						      len);
2660 		assert(p->va);
2661 		return p->va;
2662 	}
2663 	return p->pa;
2664 }
2665 
2666 vaddr_t io_pa_or_va_nsec(struct io_pa_va *p, size_t len)
2667 {
2668 	assert(p->pa);
2669 	if (cpu_mmu_enabled()) {
2670 		if (!p->va)
2671 			p->va = (vaddr_t)phys_to_virt(p->pa, MEM_AREA_IO_NSEC,
2672 						      len);
2673 		assert(p->va);
2674 		return p->va;
2675 	}
2676 	return p->pa;
2677 }
2678 
2679 #ifdef CFG_CORE_RESERVED_SHM
2680 static TEE_Result teecore_init_pub_ram(void)
2681 {
2682 	vaddr_t s = 0;
2683 	vaddr_t e = 0;
2684 
2685 	/* get virtual addr/size of NSec shared mem allocated from teecore */
2686 	core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &s, &e);
2687 
2688 	if (s >= e || s & SMALL_PAGE_MASK || e & SMALL_PAGE_MASK)
2689 		panic("invalid PUB RAM");
2690 
2691 	/* extra check: we could rely on core_mmu_get_mem_by_type() */
2692 	if (!tee_vbuf_is_non_sec(s, e - s))
2693 		panic("PUB RAM is not non-secure");
2694 
2695 #ifdef CFG_PL310
2696 	/* Allocate statically the l2cc mutex */
2697 	tee_l2cc_store_mutex_boot_pa(virt_to_phys((void *)s));
2698 	s += sizeof(uint32_t);			/* size of a pl310 mutex */
2699 	s = ROUNDUP(s, SMALL_PAGE_SIZE);	/* keep required alignment */
2700 #endif
2701 
2702 	default_nsec_shm_paddr = virt_to_phys((void *)s);
2703 	default_nsec_shm_size = e - s;
2704 
2705 	return TEE_SUCCESS;
2706 }
2707 early_init(teecore_init_pub_ram);
2708 #endif /*CFG_CORE_RESERVED_SHM*/
2709 
2710 static void __maybe_unused carve_out_core_mem(paddr_t pa, paddr_t end_pa)
2711 {
2712 	tee_mm_entry_t *mm __maybe_unused = NULL;
2713 
2714 	DMSG("%#"PRIxPA" .. %#"PRIxPA, pa, end_pa);
2715 	mm = phys_mem_alloc2(pa, end_pa - pa);
2716 	assert(mm);
2717 }
2718 
2719 void core_mmu_init_phys_mem(void)
2720 {
2721 	paddr_t ps = 0;
2722 	size_t size = 0;
2723 
2724 	/*
2725 	 * Get virtual addr/size of RAM where TA are loaded/executedNSec
2726 	 * shared mem allocated from teecore.
2727 	 */
2728 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
2729 		vaddr_t s = 0;
2730 		vaddr_t e = 0;
2731 
2732 		virt_get_ta_ram(&s, &e);
2733 		ps = virt_to_phys((void *)s);
2734 		size = e - s;
2735 		phys_mem_init(0, 0, ps, size);
2736 	} else {
2737 #ifdef CFG_WITH_PAGER
2738 		/*
2739 		 * The pager uses all core memory so there's no need to add
2740 		 * it to the pool.
2741 		 */
2742 		static_assert(ARRAY_SIZE(secure_only) == 2);
2743 		phys_mem_init(0, 0, secure_only[1].paddr, secure_only[1].size);
2744 #else /*!CFG_WITH_PAGER*/
2745 		size_t align = BIT(CORE_MMU_USER_CODE_SHIFT);
2746 		paddr_t end_pa = 0;
2747 		paddr_t pa = 0;
2748 
2749 		static_assert(ARRAY_SIZE(secure_only) <= 2);
2750 		if (ARRAY_SIZE(secure_only) == 2) {
2751 			ps = secure_only[1].paddr;
2752 			size = secure_only[1].size;
2753 		}
2754 		phys_mem_init(secure_only[0].paddr, secure_only[0].size,
2755 			      ps, size);
2756 
2757 		/*
2758 		 * The VCORE macros are relocatable so we need to translate
2759 		 * the addresses now that the MMU is enabled.
2760 		 */
2761 		end_pa = vaddr_to_phys(ROUNDUP(VCORE_FREE_END_PA,
2762 					       align) - 1) + 1;
2763 		/* Carve out the part used by OP-TEE core */
2764 		carve_out_core_mem(vaddr_to_phys(VCORE_UNPG_RX_PA), end_pa);
2765 		if (IS_ENABLED(CFG_CORE_SANITIZE_KADDRESS)) {
2766 			pa = vaddr_to_phys(ROUNDUP(ASAN_MAP_PA, align));
2767 			carve_out_core_mem(pa, pa + ASAN_MAP_SZ);
2768 		}
2769 
2770 		/* Carve out test SDP memory */
2771 #ifdef TEE_SDP_TEST_MEM_BASE
2772 		if (TEE_SDP_TEST_MEM_SIZE) {
2773 			pa = vaddr_to_phys(TEE_SDP_TEST_MEM_BASE);
2774 			carve_out_core_mem(pa, pa + TEE_SDP_TEST_MEM_SIZE);
2775 		}
2776 #endif
2777 #endif /*!CFG_WITH_PAGER*/
2778 	}
2779 }
2780