xref: /optee_os/core/mm/core_mmu.c (revision c79fb6d48c8f58eb3b9a6246a9931c5b07e47177)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016, 2022 Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  * Copyright (c) 2022, Arm Limited and Contributors. All rights reserved.
6  */
7 
8 #include <assert.h>
9 #include <config.h>
10 #include <kernel/boot.h>
11 #include <kernel/dt.h>
12 #include <kernel/linker.h>
13 #include <kernel/panic.h>
14 #include <kernel/spinlock.h>
15 #include <kernel/tee_l2cc_mutex.h>
16 #include <kernel/tee_misc.h>
17 #include <kernel/tlb_helpers.h>
18 #include <kernel/user_mode_ctx.h>
19 #include <kernel/virtualization.h>
20 #include <libfdt.h>
21 #include <mm/core_memprot.h>
22 #include <mm/core_mmu.h>
23 #include <mm/mobj.h>
24 #include <mm/pgt_cache.h>
25 #include <mm/tee_pager.h>
26 #include <mm/vm.h>
27 #include <platform_config.h>
28 #include <string.h>
29 #include <trace.h>
30 #include <util.h>
31 
32 #ifndef DEBUG_XLAT_TABLE
33 #define DEBUG_XLAT_TABLE 0
34 #endif
35 
36 #define SHM_VASPACE_SIZE	(1024 * 1024 * 32)
37 
38 /*
39  * These variables are initialized before .bss is cleared. To avoid
40  * resetting them when .bss is cleared we're storing them in .data instead,
41  * even if they initially are zero.
42  */
43 
44 #ifdef CFG_CORE_RESERVED_SHM
45 /* Default NSec shared memory allocated from NSec world */
46 unsigned long default_nsec_shm_size __nex_bss;
47 unsigned long default_nsec_shm_paddr __nex_bss;
48 #endif
49 
50 static struct tee_mmap_region static_memory_map[CFG_MMAP_REGIONS
51 #ifdef CFG_CORE_ASLR
52 						+ 1
53 #endif
54 						+ 1] __nex_bss;
55 
56 /* Define the platform's memory layout. */
57 struct memaccess_area {
58 	paddr_t paddr;
59 	size_t size;
60 };
61 
62 #define MEMACCESS_AREA(a, s) { .paddr = a, .size = s }
63 
64 static struct memaccess_area secure_only[] __nex_data = {
65 #ifdef TRUSTED_SRAM_BASE
66 	MEMACCESS_AREA(TRUSTED_SRAM_BASE, TRUSTED_SRAM_SIZE),
67 #endif
68 	MEMACCESS_AREA(TRUSTED_DRAM_BASE, TRUSTED_DRAM_SIZE),
69 };
70 
71 static struct memaccess_area nsec_shared[] __nex_data = {
72 #ifdef CFG_CORE_RESERVED_SHM
73 	MEMACCESS_AREA(TEE_SHMEM_START, TEE_SHMEM_SIZE),
74 #endif
75 };
76 
77 #if defined(CFG_SECURE_DATA_PATH)
78 static const char *tz_sdp_match = "linaro,secure-heap";
79 static struct memaccess_area sec_sdp;
80 #ifdef CFG_TEE_SDP_MEM_BASE
81 register_sdp_mem(CFG_TEE_SDP_MEM_BASE, CFG_TEE_SDP_MEM_SIZE);
82 #endif
83 #ifdef TEE_SDP_TEST_MEM_BASE
84 register_sdp_mem(TEE_SDP_TEST_MEM_BASE, TEE_SDP_TEST_MEM_SIZE);
85 #endif
86 #endif
87 
88 #ifdef CFG_CORE_RESERVED_SHM
89 register_phys_mem(MEM_AREA_NSEC_SHM, TEE_SHMEM_START, TEE_SHMEM_SIZE);
90 #endif
91 static unsigned int mmu_spinlock;
92 
93 static uint32_t mmu_lock(void)
94 {
95 	return cpu_spin_lock_xsave(&mmu_spinlock);
96 }
97 
98 static void mmu_unlock(uint32_t exceptions)
99 {
100 	cpu_spin_unlock_xrestore(&mmu_spinlock, exceptions);
101 }
102 
103 static struct tee_mmap_region *get_memory_map(void)
104 {
105 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
106 		struct tee_mmap_region *map = virt_get_memory_map();
107 
108 		if (map)
109 			return map;
110 	}
111 
112 	return static_memory_map;
113 }
114 
115 static bool _pbuf_intersects(struct memaccess_area *a, size_t alen,
116 			     paddr_t pa, size_t size)
117 {
118 	size_t n;
119 
120 	for (n = 0; n < alen; n++)
121 		if (core_is_buffer_intersect(pa, size, a[n].paddr, a[n].size))
122 			return true;
123 	return false;
124 }
125 
126 #define pbuf_intersects(a, pa, size) \
127 	_pbuf_intersects((a), ARRAY_SIZE(a), (pa), (size))
128 
129 static bool _pbuf_is_inside(struct memaccess_area *a, size_t alen,
130 			    paddr_t pa, size_t size)
131 {
132 	size_t n;
133 
134 	for (n = 0; n < alen; n++)
135 		if (core_is_buffer_inside(pa, size, a[n].paddr, a[n].size))
136 			return true;
137 	return false;
138 }
139 
140 #define pbuf_is_inside(a, pa, size) \
141 	_pbuf_is_inside((a), ARRAY_SIZE(a), (pa), (size))
142 
143 static bool pa_is_in_map(struct tee_mmap_region *map, paddr_t pa, size_t len)
144 {
145 	paddr_t end_pa = 0;
146 
147 	if (!map)
148 		return false;
149 
150 	if (SUB_OVERFLOW(len, 1, &end_pa) || ADD_OVERFLOW(pa, end_pa, &end_pa))
151 		return false;
152 
153 	return (pa >= map->pa && end_pa <= map->pa + map->size - 1);
154 }
155 
156 static bool va_is_in_map(struct tee_mmap_region *map, vaddr_t va)
157 {
158 	if (!map)
159 		return false;
160 	return (va >= map->va && va <= (map->va + map->size - 1));
161 }
162 
163 /* check if target buffer fits in a core default map area */
164 static bool pbuf_inside_map_area(unsigned long p, size_t l,
165 				 struct tee_mmap_region *map)
166 {
167 	return core_is_buffer_inside(p, l, map->pa, map->size);
168 }
169 
170 static struct tee_mmap_region *find_map_by_type(enum teecore_memtypes type)
171 {
172 	struct tee_mmap_region *map;
173 
174 	for (map = get_memory_map(); !core_mmap_is_end_of_table(map); map++)
175 		if (map->type == type)
176 			return map;
177 	return NULL;
178 }
179 
180 static struct tee_mmap_region *
181 find_map_by_type_and_pa(enum teecore_memtypes type, paddr_t pa, size_t len)
182 {
183 	struct tee_mmap_region *map;
184 
185 	for (map = get_memory_map(); !core_mmap_is_end_of_table(map); map++) {
186 		if (map->type != type)
187 			continue;
188 		if (pa_is_in_map(map, pa, len))
189 			return map;
190 	}
191 	return NULL;
192 }
193 
194 static struct tee_mmap_region *find_map_by_va(void *va)
195 {
196 	struct tee_mmap_region *map = get_memory_map();
197 	unsigned long a = (unsigned long)va;
198 
199 	while (!core_mmap_is_end_of_table(map)) {
200 		if (a >= map->va && a <= (map->va - 1 + map->size))
201 			return map;
202 		map++;
203 	}
204 	return NULL;
205 }
206 
207 static struct tee_mmap_region *find_map_by_pa(unsigned long pa)
208 {
209 	struct tee_mmap_region *map = get_memory_map();
210 
211 	while (!core_mmap_is_end_of_table(map)) {
212 		if (pa >= map->pa && pa <= (map->pa + map->size - 1))
213 			return map;
214 		map++;
215 	}
216 	return NULL;
217 }
218 
219 #if defined(CFG_SECURE_DATA_PATH)
220 static bool dtb_get_sdp_region(void)
221 {
222 	void *fdt = NULL;
223 	int node = 0;
224 	int tmp_node = 0;
225 	paddr_t tmp_addr = 0;
226 	size_t tmp_size = 0;
227 
228 	if (!IS_ENABLED(CFG_EMBED_DTB))
229 		return false;
230 
231 	fdt = get_embedded_dt();
232 	if (!fdt)
233 		panic("No DTB found");
234 
235 	node = fdt_node_offset_by_compatible(fdt, -1, tz_sdp_match);
236 	if (node < 0) {
237 		DMSG("No %s compatible node found", tz_sdp_match);
238 		return false;
239 	}
240 	tmp_node = node;
241 	while (tmp_node >= 0) {
242 		tmp_node = fdt_node_offset_by_compatible(fdt, tmp_node,
243 							 tz_sdp_match);
244 		if (tmp_node >= 0)
245 			DMSG("Ignore SDP pool node %s, supports only 1 node",
246 			     fdt_get_name(fdt, tmp_node, NULL));
247 	}
248 
249 	tmp_addr = fdt_reg_base_address(fdt, node);
250 	if (tmp_addr == DT_INFO_INVALID_REG) {
251 		EMSG("%s: Unable to get base addr from DT", tz_sdp_match);
252 		return false;
253 	}
254 
255 	tmp_size = fdt_reg_size(fdt, node);
256 	if (tmp_size == DT_INFO_INVALID_REG_SIZE) {
257 		EMSG("%s: Unable to get size of base addr from DT",
258 		     tz_sdp_match);
259 		return false;
260 	}
261 
262 	sec_sdp.paddr = tmp_addr;
263 	sec_sdp.size = tmp_size;
264 
265 	return true;
266 }
267 #endif
268 
269 #if defined(CFG_CORE_DYN_SHM) || defined(CFG_SECURE_DATA_PATH)
270 static bool pbuf_is_special_mem(paddr_t pbuf, size_t len,
271 				const struct core_mmu_phys_mem *start,
272 				const struct core_mmu_phys_mem *end)
273 {
274 	const struct core_mmu_phys_mem *mem;
275 
276 	for (mem = start; mem < end; mem++) {
277 		if (core_is_buffer_inside(pbuf, len, mem->addr, mem->size))
278 			return true;
279 	}
280 
281 	return false;
282 }
283 #endif
284 
285 #ifdef CFG_CORE_DYN_SHM
286 static void carve_out_phys_mem(struct core_mmu_phys_mem **mem, size_t *nelems,
287 			       paddr_t pa, size_t size)
288 {
289 	struct core_mmu_phys_mem *m = *mem;
290 	size_t n = 0;
291 
292 	while (true) {
293 		if (n >= *nelems) {
294 			DMSG("No need to carve out %#" PRIxPA " size %#zx",
295 			     pa, size);
296 			return;
297 		}
298 		if (core_is_buffer_inside(pa, size, m[n].addr, m[n].size))
299 			break;
300 		if (!core_is_buffer_outside(pa, size, m[n].addr, m[n].size))
301 			panic();
302 		n++;
303 	}
304 
305 	if (pa == m[n].addr && size == m[n].size) {
306 		/* Remove this entry */
307 		(*nelems)--;
308 		memmove(m + n, m + n + 1, sizeof(*m) * (*nelems - n));
309 		m = nex_realloc(m, sizeof(*m) * *nelems);
310 		if (!m)
311 			panic();
312 		*mem = m;
313 	} else if (pa == m[n].addr) {
314 		m[n].addr += size;
315 		m[n].size -= size;
316 	} else if ((pa + size) == (m[n].addr + m[n].size)) {
317 		m[n].size -= size;
318 	} else {
319 		/* Need to split the memory entry */
320 		m = nex_realloc(m, sizeof(*m) * (*nelems + 1));
321 		if (!m)
322 			panic();
323 		*mem = m;
324 		memmove(m + n + 1, m + n, sizeof(*m) * (*nelems - n));
325 		(*nelems)++;
326 		m[n].size = pa - m[n].addr;
327 		m[n + 1].size -= size + m[n].size;
328 		m[n + 1].addr = pa + size;
329 	}
330 }
331 
332 static void check_phys_mem_is_outside(struct core_mmu_phys_mem *start,
333 				      size_t nelems,
334 				      struct tee_mmap_region *map)
335 {
336 	size_t n;
337 
338 	for (n = 0; n < nelems; n++) {
339 		if (!core_is_buffer_outside(start[n].addr, start[n].size,
340 					    map->pa, map->size)) {
341 			EMSG("Non-sec mem (%#" PRIxPA ":%#" PRIxPASZ
342 			     ") overlaps map (type %d %#" PRIxPA ":%#zx)",
343 			     start[n].addr, start[n].size,
344 			     map->type, map->pa, map->size);
345 			panic();
346 		}
347 	}
348 }
349 
350 static const struct core_mmu_phys_mem *discovered_nsec_ddr_start __nex_bss;
351 static size_t discovered_nsec_ddr_nelems __nex_bss;
352 
353 static int cmp_pmem_by_addr(const void *a, const void *b)
354 {
355 	const struct core_mmu_phys_mem *pmem_a = a;
356 	const struct core_mmu_phys_mem *pmem_b = b;
357 
358 	return CMP_TRILEAN(pmem_a->addr, pmem_b->addr);
359 }
360 
361 void core_mmu_set_discovered_nsec_ddr(struct core_mmu_phys_mem *start,
362 				      size_t nelems)
363 {
364 	struct core_mmu_phys_mem *m = start;
365 	size_t num_elems = nelems;
366 	struct tee_mmap_region *map = static_memory_map;
367 	const struct core_mmu_phys_mem __maybe_unused *pmem;
368 
369 	assert(!discovered_nsec_ddr_start);
370 	assert(m && num_elems);
371 
372 	qsort(m, num_elems, sizeof(*m), cmp_pmem_by_addr);
373 
374 	/*
375 	 * Non-secure shared memory and also secure data
376 	 * path memory are supposed to reside inside
377 	 * non-secure memory. Since NSEC_SHM and SDP_MEM
378 	 * are used for a specific purpose make holes for
379 	 * those memory in the normal non-secure memory.
380 	 *
381 	 * This has to be done since for instance QEMU
382 	 * isn't aware of which memory range in the
383 	 * non-secure memory is used for NSEC_SHM.
384 	 */
385 
386 #ifdef CFG_SECURE_DATA_PATH
387 	if (dtb_get_sdp_region())
388 		carve_out_phys_mem(&m, &num_elems, sec_sdp.paddr, sec_sdp.size);
389 
390 	for (pmem = phys_sdp_mem_begin; pmem < phys_sdp_mem_end; pmem++)
391 		carve_out_phys_mem(&m, &num_elems, pmem->addr, pmem->size);
392 #endif
393 
394 	carve_out_phys_mem(&m, &num_elems, TEE_RAM_START, TEE_RAM_PH_SIZE);
395 	carve_out_phys_mem(&m, &num_elems, TA_RAM_START, TA_RAM_SIZE);
396 
397 	for (map = static_memory_map; !core_mmap_is_end_of_table(map); map++) {
398 		switch (map->type) {
399 		case MEM_AREA_NSEC_SHM:
400 			carve_out_phys_mem(&m, &num_elems, map->pa, map->size);
401 			break;
402 		case MEM_AREA_EXT_DT:
403 		case MEM_AREA_RES_VASPACE:
404 		case MEM_AREA_SHM_VASPACE:
405 		case MEM_AREA_TS_VASPACE:
406 		case MEM_AREA_PAGER_VASPACE:
407 			break;
408 		default:
409 			check_phys_mem_is_outside(m, num_elems, map);
410 		}
411 	}
412 
413 	discovered_nsec_ddr_start = m;
414 	discovered_nsec_ddr_nelems = num_elems;
415 
416 	if (!core_mmu_check_end_pa(m[num_elems - 1].addr,
417 				   m[num_elems - 1].size))
418 		panic();
419 }
420 
421 static bool get_discovered_nsec_ddr(const struct core_mmu_phys_mem **start,
422 				    const struct core_mmu_phys_mem **end)
423 {
424 	if (!discovered_nsec_ddr_start)
425 		return false;
426 
427 	*start = discovered_nsec_ddr_start;
428 	*end = discovered_nsec_ddr_start + discovered_nsec_ddr_nelems;
429 
430 	return true;
431 }
432 
433 static bool pbuf_is_nsec_ddr(paddr_t pbuf, size_t len)
434 {
435 	const struct core_mmu_phys_mem *start;
436 	const struct core_mmu_phys_mem *end;
437 
438 	if (!get_discovered_nsec_ddr(&start, &end))
439 		return false;
440 
441 	return pbuf_is_special_mem(pbuf, len, start, end);
442 }
443 
444 bool core_mmu_nsec_ddr_is_defined(void)
445 {
446 	const struct core_mmu_phys_mem *start;
447 	const struct core_mmu_phys_mem *end;
448 
449 	if (!get_discovered_nsec_ddr(&start, &end))
450 		return false;
451 
452 	return start != end;
453 }
454 #else
455 static bool pbuf_is_nsec_ddr(paddr_t pbuf __unused, size_t len __unused)
456 {
457 	return false;
458 }
459 #endif /*CFG_CORE_DYN_SHM*/
460 
461 #define MSG_MEM_INSTERSECT(pa1, sz1, pa2, sz2) \
462 	EMSG("[%" PRIxPA " %" PRIx64 "] intersects [%" PRIxPA " %" PRIx64 "]", \
463 			pa1, (uint64_t)pa1 + (sz1), pa2, (uint64_t)pa2 + (sz2))
464 
465 #ifdef CFG_SECURE_DATA_PATH
466 static bool pbuf_is_sdp_mem(paddr_t pbuf, size_t len)
467 {
468 	bool is_sdp_mem = false;
469 
470 	if (sec_sdp.size)
471 		is_sdp_mem = core_is_buffer_inside(pbuf, len, sec_sdp.paddr,
472 						   sec_sdp.size);
473 
474 	if (!is_sdp_mem)
475 		is_sdp_mem = pbuf_is_special_mem(pbuf, len, phys_sdp_mem_begin,
476 						 phys_sdp_mem_end);
477 
478 	return is_sdp_mem;
479 }
480 
481 static struct mobj *core_sdp_mem_alloc_mobj(paddr_t pa, size_t size)
482 {
483 	struct mobj *mobj = mobj_phys_alloc(pa, size, TEE_MATTR_MEM_TYPE_CACHED,
484 					    CORE_MEM_SDP_MEM);
485 
486 	if (!mobj)
487 		panic("can't create SDP physical memory object");
488 
489 	return mobj;
490 }
491 
492 struct mobj **core_sdp_mem_create_mobjs(void)
493 {
494 	const struct core_mmu_phys_mem *mem = NULL;
495 	struct mobj **mobj_base = NULL;
496 	struct mobj **mobj = NULL;
497 	int cnt = phys_sdp_mem_end - phys_sdp_mem_begin;
498 
499 	if (sec_sdp.size)
500 		cnt++;
501 
502 	/* SDP mobjs table must end with a NULL entry */
503 	mobj_base = calloc(cnt + 1, sizeof(struct mobj *));
504 	if (!mobj_base)
505 		panic("Out of memory");
506 
507 	mobj = mobj_base;
508 
509 	for (mem = phys_sdp_mem_begin; mem < phys_sdp_mem_end; mem++, mobj++)
510 		*mobj = core_sdp_mem_alloc_mobj(mem->addr, mem->size);
511 
512 	if (sec_sdp.size)
513 		*mobj = core_sdp_mem_alloc_mobj(sec_sdp.paddr, sec_sdp.size);
514 
515 	return mobj_base;
516 }
517 
518 #else /* CFG_SECURE_DATA_PATH */
519 static bool pbuf_is_sdp_mem(paddr_t pbuf __unused, size_t len __unused)
520 {
521 	return false;
522 }
523 
524 #endif /* CFG_SECURE_DATA_PATH */
525 
526 /* Check special memories comply with registered memories */
527 static void verify_special_mem_areas(struct tee_mmap_region *mem_map,
528 				     size_t len,
529 				     const struct core_mmu_phys_mem *start,
530 				     const struct core_mmu_phys_mem *end,
531 				     const char *area_name __maybe_unused)
532 {
533 	const struct core_mmu_phys_mem *mem;
534 	const struct core_mmu_phys_mem *mem2;
535 	struct tee_mmap_region *mmap;
536 	size_t n;
537 
538 	if (start == end) {
539 		DMSG("No %s memory area defined", area_name);
540 		return;
541 	}
542 
543 	for (mem = start; mem < end; mem++)
544 		DMSG("%s memory [%" PRIxPA " %" PRIx64 "]",
545 		     area_name, mem->addr, (uint64_t)mem->addr + mem->size);
546 
547 	/* Check memories do not intersect each other */
548 	for (mem = start; mem + 1 < end; mem++) {
549 		for (mem2 = mem + 1; mem2 < end; mem2++) {
550 			if (core_is_buffer_intersect(mem2->addr, mem2->size,
551 						     mem->addr, mem->size)) {
552 				MSG_MEM_INSTERSECT(mem2->addr, mem2->size,
553 						   mem->addr, mem->size);
554 				panic("Special memory intersection");
555 			}
556 		}
557 	}
558 
559 	/*
560 	 * Check memories do not intersect any mapped memory.
561 	 * This is called before reserved VA space is loaded in mem_map.
562 	 */
563 	for (mem = start; mem < end; mem++) {
564 		for (mmap = mem_map, n = 0; n < len; mmap++, n++) {
565 			if (core_is_buffer_intersect(mem->addr, mem->size,
566 						     mmap->pa, mmap->size)) {
567 				MSG_MEM_INSTERSECT(mem->addr, mem->size,
568 						   mmap->pa, mmap->size);
569 				panic("Special memory intersection");
570 			}
571 		}
572 	}
573 }
574 
575 static void add_phys_mem(struct tee_mmap_region *memory_map, size_t num_elems,
576 			 const char *mem_name __maybe_unused,
577 			 enum teecore_memtypes mem_type,
578 			 paddr_t mem_addr, paddr_size_t mem_size, size_t *last)
579 {
580 	size_t n = 0;
581 	paddr_t pa;
582 	paddr_size_t size;
583 
584 	if (!mem_size)	/* Discard null size entries */
585 		return;
586 	/*
587 	 * If some ranges of memory of the same type do overlap
588 	 * each others they are coalesced into one entry. To help this
589 	 * added entries are sorted by increasing physical.
590 	 *
591 	 * Note that it's valid to have the same physical memory as several
592 	 * different memory types, for instance the same device memory
593 	 * mapped as both secure and non-secure. This will probably not
594 	 * happen often in practice.
595 	 */
596 	DMSG("%s type %s 0x%08" PRIxPA " size 0x%08" PRIxPASZ,
597 	     mem_name, teecore_memtype_name(mem_type), mem_addr, mem_size);
598 	while (true) {
599 		if (n >= (num_elems - 1)) {
600 			EMSG("Out of entries (%zu) in memory_map", num_elems);
601 			panic();
602 		}
603 		if (n == *last)
604 			break;
605 		pa = memory_map[n].pa;
606 		size = memory_map[n].size;
607 		if (mem_type == memory_map[n].type &&
608 		    ((pa <= (mem_addr + (mem_size - 1))) &&
609 		    (mem_addr <= (pa + (size - 1))))) {
610 			DMSG("Physical mem map overlaps 0x%" PRIxPA, mem_addr);
611 			memory_map[n].pa = MIN(pa, mem_addr);
612 			memory_map[n].size = MAX(size, mem_size) +
613 					     (pa - memory_map[n].pa);
614 			return;
615 		}
616 		if (mem_type < memory_map[n].type ||
617 		    (mem_type == memory_map[n].type && mem_addr < pa))
618 			break; /* found the spot where to insert this memory */
619 		n++;
620 	}
621 
622 	memmove(memory_map + n + 1, memory_map + n,
623 		sizeof(struct tee_mmap_region) * (*last - n));
624 	(*last)++;
625 	memset(memory_map + n, 0, sizeof(memory_map[0]));
626 	memory_map[n].type = mem_type;
627 	memory_map[n].pa = mem_addr;
628 	memory_map[n].size = mem_size;
629 }
630 
631 static void add_va_space(struct tee_mmap_region *memory_map, size_t num_elems,
632 			 enum teecore_memtypes type, size_t size, size_t *last)
633 {
634 	size_t n = 0;
635 
636 	DMSG("type %s size 0x%08zx", teecore_memtype_name(type), size);
637 	while (true) {
638 		if (n >= (num_elems - 1)) {
639 			EMSG("Out of entries (%zu) in memory_map", num_elems);
640 			panic();
641 		}
642 		if (n == *last)
643 			break;
644 		if (type < memory_map[n].type)
645 			break;
646 		n++;
647 	}
648 
649 	memmove(memory_map + n + 1, memory_map + n,
650 		sizeof(struct tee_mmap_region) * (*last - n));
651 	(*last)++;
652 	memset(memory_map + n, 0, sizeof(memory_map[0]));
653 	memory_map[n].type = type;
654 	memory_map[n].size = size;
655 }
656 
657 uint32_t core_mmu_type_to_attr(enum teecore_memtypes t)
658 {
659 	const uint32_t attr = TEE_MATTR_VALID_BLOCK;
660 	const uint32_t tagged = TEE_MATTR_MEM_TYPE_TAGGED <<
661 				TEE_MATTR_MEM_TYPE_SHIFT;
662 	const uint32_t cached = TEE_MATTR_MEM_TYPE_CACHED <<
663 				TEE_MATTR_MEM_TYPE_SHIFT;
664 	const uint32_t noncache = TEE_MATTR_MEM_TYPE_DEV <<
665 				  TEE_MATTR_MEM_TYPE_SHIFT;
666 
667 	switch (t) {
668 	case MEM_AREA_TEE_RAM:
669 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRWX | tagged;
670 	case MEM_AREA_TEE_RAM_RX:
671 	case MEM_AREA_INIT_RAM_RX:
672 	case MEM_AREA_IDENTITY_MAP_RX:
673 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRX | tagged;
674 	case MEM_AREA_TEE_RAM_RO:
675 	case MEM_AREA_INIT_RAM_RO:
676 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PR | tagged;
677 	case MEM_AREA_TEE_RAM_RW:
678 	case MEM_AREA_NEX_RAM_RO: /* This has to be r/w during init runtime */
679 	case MEM_AREA_NEX_RAM_RW:
680 	case MEM_AREA_TEE_ASAN:
681 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | tagged;
682 	case MEM_AREA_TEE_COHERENT:
683 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRWX | noncache;
684 	case MEM_AREA_TA_RAM:
685 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | tagged;
686 	case MEM_AREA_NSEC_SHM:
687 		return attr | TEE_MATTR_PRW | cached;
688 	case MEM_AREA_EXT_DT:
689 		/*
690 		 * If CFG_MAP_EXT_DT_SECURE is enabled map the external device
691 		 * tree as secure non-cached memory, otherwise, fall back to
692 		 * non-secure mapping.
693 		 */
694 		if (IS_ENABLED(CFG_MAP_EXT_DT_SECURE))
695 			return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW |
696 			       noncache;
697 		fallthrough;
698 	case MEM_AREA_IO_NSEC:
699 		return attr | TEE_MATTR_PRW | noncache;
700 	case MEM_AREA_IO_SEC:
701 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | noncache;
702 	case MEM_AREA_RAM_NSEC:
703 		return attr | TEE_MATTR_PRW | cached;
704 	case MEM_AREA_RAM_SEC:
705 	case MEM_AREA_SEC_RAM_OVERALL:
706 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | cached;
707 	case MEM_AREA_RES_VASPACE:
708 	case MEM_AREA_SHM_VASPACE:
709 		return 0;
710 	case MEM_AREA_PAGER_VASPACE:
711 		return TEE_MATTR_SECURE;
712 	default:
713 		panic("invalid type");
714 	}
715 }
716 
717 static bool __maybe_unused map_is_tee_ram(const struct tee_mmap_region *mm)
718 {
719 	switch (mm->type) {
720 	case MEM_AREA_TEE_RAM:
721 	case MEM_AREA_TEE_RAM_RX:
722 	case MEM_AREA_TEE_RAM_RO:
723 	case MEM_AREA_TEE_RAM_RW:
724 	case MEM_AREA_INIT_RAM_RX:
725 	case MEM_AREA_INIT_RAM_RO:
726 	case MEM_AREA_NEX_RAM_RW:
727 	case MEM_AREA_NEX_RAM_RO:
728 	case MEM_AREA_TEE_ASAN:
729 		return true;
730 	default:
731 		return false;
732 	}
733 }
734 
735 static bool __maybe_unused map_is_secure(const struct tee_mmap_region *mm)
736 {
737 	return !!(core_mmu_type_to_attr(mm->type) & TEE_MATTR_SECURE);
738 }
739 
740 static bool __maybe_unused map_is_pgdir(const struct tee_mmap_region *mm)
741 {
742 	return mm->region_size == CORE_MMU_PGDIR_SIZE;
743 }
744 
745 static int cmp_mmap_by_lower_va(const void *a, const void *b)
746 {
747 	const struct tee_mmap_region *mm_a = a;
748 	const struct tee_mmap_region *mm_b = b;
749 
750 	return CMP_TRILEAN(mm_a->va, mm_b->va);
751 }
752 
753 static void dump_mmap_table(struct tee_mmap_region *memory_map)
754 {
755 	struct tee_mmap_region *map;
756 
757 	for (map = memory_map; !core_mmap_is_end_of_table(map); map++) {
758 		vaddr_t __maybe_unused vstart;
759 
760 		vstart = map->va + ((vaddr_t)map->pa & (map->region_size - 1));
761 		DMSG("type %-12s va 0x%08" PRIxVA "..0x%08" PRIxVA
762 		     " pa 0x%08" PRIxPA "..0x%08" PRIxPA " size 0x%08zx (%s)",
763 		     teecore_memtype_name(map->type), vstart,
764 		     vstart + map->size - 1, map->pa,
765 		     (paddr_t)(map->pa + map->size - 1), map->size,
766 		     map->region_size == SMALL_PAGE_SIZE ? "smallpg" : "pgdir");
767 	}
768 }
769 
770 #if DEBUG_XLAT_TABLE
771 
772 static void dump_xlat_table(vaddr_t va, unsigned int level)
773 {
774 	struct core_mmu_table_info tbl_info;
775 	unsigned int idx = 0;
776 	paddr_t pa;
777 	uint32_t attr;
778 
779 	core_mmu_find_table(NULL, va, level, &tbl_info);
780 	va = tbl_info.va_base;
781 	for (idx = 0; idx < tbl_info.num_entries; idx++) {
782 		core_mmu_get_entry(&tbl_info, idx, &pa, &attr);
783 		if (attr || level > CORE_MMU_BASE_TABLE_LEVEL) {
784 			const char *security_bit = "";
785 
786 			if (core_mmu_entry_have_security_bit(attr)) {
787 				if (attr & TEE_MATTR_SECURE)
788 					security_bit = "S";
789 				else
790 					security_bit = "NS";
791 			}
792 
793 			if (attr & TEE_MATTR_TABLE) {
794 				DMSG_RAW("%*s [LVL%d] VA:0x%010" PRIxVA
795 					" TBL:0x%010" PRIxPA " %s",
796 					level * 2, "", level, va, pa,
797 					security_bit);
798 				dump_xlat_table(va, level + 1);
799 			} else if (attr) {
800 				DMSG_RAW("%*s [LVL%d] VA:0x%010" PRIxVA
801 					" PA:0x%010" PRIxPA " %s-%s-%s-%s",
802 					level * 2, "", level, va, pa,
803 					mattr_is_cached(attr) ? "MEM" :
804 					"DEV",
805 					attr & TEE_MATTR_PW ? "RW" : "RO",
806 					attr & TEE_MATTR_PX ? "X " : "XN",
807 					security_bit);
808 			} else {
809 				DMSG_RAW("%*s [LVL%d] VA:0x%010" PRIxVA
810 					    " INVALID\n",
811 					    level * 2, "", level, va);
812 			}
813 		}
814 		va += BIT64(tbl_info.shift);
815 	}
816 }
817 
818 #else
819 
820 static void dump_xlat_table(vaddr_t va __unused, int level __unused)
821 {
822 }
823 
824 #endif
825 
826 /*
827  * Reserves virtual memory space for pager usage.
828  *
829  * From the start of the first memory used by the link script +
830  * TEE_RAM_VA_SIZE should be covered, either with a direct mapping or empty
831  * mapping for pager usage. This adds translation tables as needed for the
832  * pager to operate.
833  */
834 static void add_pager_vaspace(struct tee_mmap_region *mmap, size_t num_elems,
835 			      size_t *last)
836 {
837 	paddr_t begin = 0;
838 	paddr_t end = 0;
839 	size_t size = 0;
840 	size_t pos = 0;
841 	size_t n = 0;
842 
843 	if (*last >= (num_elems - 1)) {
844 		EMSG("Out of entries (%zu) in memory map", num_elems);
845 		panic();
846 	}
847 
848 	for (n = 0; !core_mmap_is_end_of_table(mmap + n); n++) {
849 		if (map_is_tee_ram(mmap + n)) {
850 			if (!begin)
851 				begin = mmap[n].pa;
852 			pos = n + 1;
853 		}
854 	}
855 
856 	end = mmap[pos - 1].pa + mmap[pos - 1].size;
857 	size = TEE_RAM_VA_SIZE - (end - begin);
858 	if (!size)
859 		return;
860 
861 	assert(pos <= *last);
862 	memmove(mmap + pos + 1, mmap + pos,
863 		sizeof(struct tee_mmap_region) * (*last - pos));
864 	(*last)++;
865 	memset(mmap + pos, 0, sizeof(mmap[0]));
866 	mmap[pos].type = MEM_AREA_PAGER_VASPACE;
867 	mmap[pos].va = 0;
868 	mmap[pos].size = size;
869 	mmap[pos].region_size = SMALL_PAGE_SIZE;
870 	mmap[pos].attr = core_mmu_type_to_attr(MEM_AREA_PAGER_VASPACE);
871 }
872 
873 static void check_sec_nsec_mem_config(void)
874 {
875 	size_t n = 0;
876 
877 	for (n = 0; n < ARRAY_SIZE(secure_only); n++) {
878 		if (pbuf_intersects(nsec_shared, secure_only[n].paddr,
879 				    secure_only[n].size))
880 			panic("Invalid memory access config: sec/nsec");
881 	}
882 }
883 
884 static size_t collect_mem_ranges(struct tee_mmap_region *memory_map,
885 				 size_t num_elems)
886 {
887 	const struct core_mmu_phys_mem *mem = NULL;
888 	size_t last = 0;
889 
890 
891 #define ADD_PHYS_MEM(_type, _addr, _size) \
892 		add_phys_mem(memory_map, num_elems, #_addr, (_type), \
893 			     (_addr), (_size),  &last)
894 
895 	if (IS_ENABLED(CFG_CORE_RWDATA_NOEXEC)) {
896 		ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RO, TEE_RAM_START,
897 			     VCORE_UNPG_RX_PA - TEE_RAM_START);
898 		ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RX, VCORE_UNPG_RX_PA,
899 			     VCORE_UNPG_RX_SZ);
900 		ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RO, VCORE_UNPG_RO_PA,
901 			     VCORE_UNPG_RO_SZ);
902 
903 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
904 			ADD_PHYS_MEM(MEM_AREA_NEX_RAM_RO, VCORE_UNPG_RW_PA,
905 				     VCORE_UNPG_RW_SZ);
906 			ADD_PHYS_MEM(MEM_AREA_NEX_RAM_RW, VCORE_NEX_RW_PA,
907 				     VCORE_NEX_RW_SZ);
908 		} else {
909 			ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RW, VCORE_UNPG_RW_PA,
910 				     VCORE_UNPG_RW_SZ);
911 		}
912 
913 		if (IS_ENABLED(CFG_WITH_PAGER)) {
914 			ADD_PHYS_MEM(MEM_AREA_INIT_RAM_RX, VCORE_INIT_RX_PA,
915 				     VCORE_INIT_RX_SZ);
916 			ADD_PHYS_MEM(MEM_AREA_INIT_RAM_RO, VCORE_INIT_RO_PA,
917 				     VCORE_INIT_RO_SZ);
918 		}
919 	} else {
920 		ADD_PHYS_MEM(MEM_AREA_TEE_RAM, TEE_RAM_START, TEE_RAM_PH_SIZE);
921 	}
922 
923 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
924 		ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, TRUSTED_DRAM_BASE,
925 			     TRUSTED_DRAM_SIZE);
926 	} else {
927 		/*
928 		 * Every guest will have own TA RAM if virtualization
929 		 * support is enabled.
930 		 */
931 		ADD_PHYS_MEM(MEM_AREA_TA_RAM, TA_RAM_START, TA_RAM_SIZE);
932 	}
933 
934 	if (IS_ENABLED(CFG_CORE_SANITIZE_KADDRESS) &&
935 	    IS_ENABLED(CFG_WITH_PAGER)) {
936 		/*
937 		 * Asan ram is part of MEM_AREA_TEE_RAM_RW when pager is
938 		 * disabled.
939 		 */
940 		ADD_PHYS_MEM(MEM_AREA_TEE_ASAN, ASAN_MAP_PA, ASAN_MAP_SZ);
941 	}
942 
943 #undef ADD_PHYS_MEM
944 
945 	for (mem = phys_mem_map_begin; mem < phys_mem_map_end; mem++) {
946 		/* Only unmapped virtual range may have a null phys addr */
947 		assert(mem->addr || !core_mmu_type_to_attr(mem->type));
948 
949 		add_phys_mem(memory_map, num_elems, mem->name, mem->type,
950 			     mem->addr, mem->size, &last);
951 	}
952 
953 	if (IS_ENABLED(CFG_SECURE_DATA_PATH))
954 		verify_special_mem_areas(memory_map, num_elems,
955 					 phys_sdp_mem_begin,
956 					 phys_sdp_mem_end, "SDP");
957 
958 	add_va_space(memory_map, num_elems, MEM_AREA_RES_VASPACE,
959 		     CFG_RESERVED_VASPACE_SIZE, &last);
960 
961 	add_va_space(memory_map, num_elems, MEM_AREA_SHM_VASPACE,
962 		     SHM_VASPACE_SIZE, &last);
963 
964 	memory_map[last].type = MEM_AREA_END;
965 
966 	return last;
967 }
968 
969 static void assign_mem_granularity(struct tee_mmap_region *memory_map)
970 {
971 	struct tee_mmap_region *map = NULL;
972 
973 	/*
974 	 * Assign region sizes, note that MEM_AREA_TEE_RAM always uses
975 	 * SMALL_PAGE_SIZE.
976 	 */
977 	for (map = memory_map; !core_mmap_is_end_of_table(map); map++) {
978 		paddr_t mask = map->pa | map->size;
979 
980 		if (!(mask & CORE_MMU_PGDIR_MASK))
981 			map->region_size = CORE_MMU_PGDIR_SIZE;
982 		else if (!(mask & SMALL_PAGE_MASK))
983 			map->region_size = SMALL_PAGE_SIZE;
984 		else
985 			panic("Impossible memory alignment");
986 
987 		if (map_is_tee_ram(map))
988 			map->region_size = SMALL_PAGE_SIZE;
989 	}
990 }
991 
992 static bool place_tee_ram_at_top(paddr_t paddr)
993 {
994 	return paddr > BIT64(core_mmu_get_va_width()) / 2;
995 }
996 
997 /*
998  * MMU arch driver shall override this function if it helps
999  * optimizing the memory footprint of the address translation tables.
1000  */
1001 bool __weak core_mmu_prefer_tee_ram_at_top(paddr_t paddr)
1002 {
1003 	return place_tee_ram_at_top(paddr);
1004 }
1005 
1006 static bool assign_mem_va_dir(vaddr_t tee_ram_va,
1007 			      struct tee_mmap_region *memory_map,
1008 			      bool tee_ram_at_top)
1009 {
1010 	struct tee_mmap_region *map = NULL;
1011 	vaddr_t va = 0;
1012 	bool va_is_secure = true;
1013 
1014 	/*
1015 	 * tee_ram_va might equals 0 when CFG_CORE_ASLR=y.
1016 	 * 0 is by design an invalid va, so return false directly.
1017 	 */
1018 	if (!tee_ram_va)
1019 		return false;
1020 
1021 	/* Clear eventual previous assignments */
1022 	for (map = memory_map; !core_mmap_is_end_of_table(map); map++)
1023 		map->va = 0;
1024 
1025 	/*
1026 	 * TEE RAM regions are always aligned with region_size.
1027 	 *
1028 	 * Note that MEM_AREA_PAGER_VASPACE also counts as TEE RAM here
1029 	 * since it handles virtual memory which covers the part of the ELF
1030 	 * that cannot fit directly into memory.
1031 	 */
1032 	va = tee_ram_va;
1033 	for (map = memory_map; !core_mmap_is_end_of_table(map); map++) {
1034 		if (map_is_tee_ram(map) ||
1035 		    map->type == MEM_AREA_PAGER_VASPACE) {
1036 			assert(!(va & (map->region_size - 1)));
1037 			assert(!(map->size & (map->region_size - 1)));
1038 			map->va = va;
1039 			if (ADD_OVERFLOW(va, map->size, &va))
1040 				return false;
1041 			if (va >= BIT64(core_mmu_get_va_width()))
1042 				return false;
1043 		}
1044 	}
1045 
1046 	if (tee_ram_at_top) {
1047 		/*
1048 		 * Map non-tee ram regions at addresses lower than the tee
1049 		 * ram region.
1050 		 */
1051 		va = tee_ram_va;
1052 		for (map = memory_map; !core_mmap_is_end_of_table(map); map++) {
1053 			map->attr = core_mmu_type_to_attr(map->type);
1054 			if (map->va)
1055 				continue;
1056 
1057 			if (!IS_ENABLED(CFG_WITH_LPAE) &&
1058 			    va_is_secure != map_is_secure(map)) {
1059 				va_is_secure = !va_is_secure;
1060 				va = ROUNDDOWN(va, CORE_MMU_PGDIR_SIZE);
1061 			}
1062 
1063 			if (SUB_OVERFLOW(va, map->size, &va))
1064 				return false;
1065 			va = ROUNDDOWN(va, map->region_size);
1066 			/*
1067 			 * Make sure that va is aligned with pa for
1068 			 * efficient pgdir mapping. Basically pa &
1069 			 * pgdir_mask should be == va & pgdir_mask
1070 			 */
1071 			if (map->size > 2 * CORE_MMU_PGDIR_SIZE) {
1072 				if (SUB_OVERFLOW(va, CORE_MMU_PGDIR_SIZE, &va))
1073 					return false;
1074 				va += (map->pa - va) & CORE_MMU_PGDIR_MASK;
1075 			}
1076 			map->va = va;
1077 		}
1078 	} else {
1079 		/*
1080 		 * Map non-tee ram regions at addresses higher than the tee
1081 		 * ram region.
1082 		 */
1083 		for (map = memory_map; !core_mmap_is_end_of_table(map); map++) {
1084 			map->attr = core_mmu_type_to_attr(map->type);
1085 			if (map->va)
1086 				continue;
1087 
1088 			if (!IS_ENABLED(CFG_WITH_LPAE) &&
1089 			    va_is_secure != map_is_secure(map)) {
1090 				va_is_secure = !va_is_secure;
1091 				if (ROUNDUP_OVERFLOW(va, CORE_MMU_PGDIR_SIZE,
1092 						     &va))
1093 					return false;
1094 			}
1095 
1096 			if (ROUNDUP_OVERFLOW(va, map->region_size, &va))
1097 				return false;
1098 			/*
1099 			 * Make sure that va is aligned with pa for
1100 			 * efficient pgdir mapping. Basically pa &
1101 			 * pgdir_mask should be == va & pgdir_mask
1102 			 */
1103 			if (map->size > 2 * CORE_MMU_PGDIR_SIZE) {
1104 				vaddr_t offs = (map->pa - va) &
1105 					       CORE_MMU_PGDIR_MASK;
1106 
1107 				if (ADD_OVERFLOW(va, offs, &va))
1108 					return false;
1109 			}
1110 
1111 			map->va = va;
1112 			if (ADD_OVERFLOW(va, map->size, &va))
1113 				return false;
1114 			if (va >= BIT64(core_mmu_get_va_width()))
1115 				return false;
1116 		}
1117 	}
1118 
1119 	return true;
1120 }
1121 
1122 static bool assign_mem_va(vaddr_t tee_ram_va,
1123 			  struct tee_mmap_region *memory_map)
1124 {
1125 	bool tee_ram_at_top = place_tee_ram_at_top(tee_ram_va);
1126 
1127 	/*
1128 	 * Check that we're not overlapping with the user VA range.
1129 	 */
1130 	if (IS_ENABLED(CFG_WITH_LPAE)) {
1131 		/*
1132 		 * User VA range is supposed to be defined after these
1133 		 * mappings have been established.
1134 		 */
1135 		assert(!core_mmu_user_va_range_is_defined());
1136 	} else {
1137 		vaddr_t user_va_base = 0;
1138 		size_t user_va_size = 0;
1139 
1140 		assert(core_mmu_user_va_range_is_defined());
1141 		core_mmu_get_user_va_range(&user_va_base, &user_va_size);
1142 		if (tee_ram_va < (user_va_base + user_va_size))
1143 			return false;
1144 	}
1145 
1146 	if (IS_ENABLED(CFG_WITH_PAGER)) {
1147 		bool prefered_dir = core_mmu_prefer_tee_ram_at_top(tee_ram_va);
1148 
1149 		/* Try whole mapping covered by a single base xlat entry */
1150 		if (prefered_dir != tee_ram_at_top &&
1151 		    assign_mem_va_dir(tee_ram_va, memory_map, prefered_dir))
1152 			return true;
1153 	}
1154 
1155 	return assign_mem_va_dir(tee_ram_va, memory_map, tee_ram_at_top);
1156 }
1157 
1158 static int cmp_init_mem_map(const void *a, const void *b)
1159 {
1160 	const struct tee_mmap_region *mm_a = a;
1161 	const struct tee_mmap_region *mm_b = b;
1162 	int rc = 0;
1163 
1164 	rc = CMP_TRILEAN(mm_a->region_size, mm_b->region_size);
1165 	if (!rc)
1166 		rc = CMP_TRILEAN(mm_a->pa, mm_b->pa);
1167 	/*
1168 	 * 32bit MMU descriptors cannot mix secure and non-secure mapping in
1169 	 * the same level2 table. Hence sort secure mapping from non-secure
1170 	 * mapping.
1171 	 */
1172 	if (!rc && !IS_ENABLED(CFG_WITH_LPAE))
1173 		rc = CMP_TRILEAN(map_is_secure(mm_a), map_is_secure(mm_b));
1174 
1175 	return rc;
1176 }
1177 
1178 static bool mem_map_add_id_map(struct tee_mmap_region *memory_map,
1179 			       size_t num_elems, size_t *last,
1180 			       vaddr_t id_map_start, vaddr_t id_map_end)
1181 {
1182 	struct tee_mmap_region *map = NULL;
1183 	vaddr_t start = ROUNDDOWN(id_map_start, SMALL_PAGE_SIZE);
1184 	vaddr_t end = ROUNDUP(id_map_end, SMALL_PAGE_SIZE);
1185 	size_t len = end - start;
1186 
1187 	if (*last >= num_elems - 1) {
1188 		EMSG("Out of entries (%zu) in memory map", num_elems);
1189 		panic();
1190 	}
1191 
1192 	for (map = memory_map; !core_mmap_is_end_of_table(map); map++)
1193 		if (core_is_buffer_intersect(map->va, map->size, start, len))
1194 			return false;
1195 
1196 	*map = (struct tee_mmap_region){
1197 		.type = MEM_AREA_IDENTITY_MAP_RX,
1198 		/*
1199 		 * Could use CORE_MMU_PGDIR_SIZE to potentially save a
1200 		 * translation table, at the increased risk of clashes with
1201 		 * the rest of the memory map.
1202 		 */
1203 		.region_size = SMALL_PAGE_SIZE,
1204 		.pa = start,
1205 		.va = start,
1206 		.size = len,
1207 		.attr = core_mmu_type_to_attr(MEM_AREA_IDENTITY_MAP_RX),
1208 	};
1209 
1210 	(*last)++;
1211 
1212 	return true;
1213 }
1214 
1215 static unsigned long init_mem_map(struct tee_mmap_region *memory_map,
1216 				  size_t num_elems, unsigned long seed)
1217 {
1218 	/*
1219 	 * @id_map_start and @id_map_end describes a physical memory range
1220 	 * that must be mapped Read-Only eXecutable at identical virtual
1221 	 * addresses.
1222 	 */
1223 	vaddr_t id_map_start = (vaddr_t)__identity_map_init_start;
1224 	vaddr_t id_map_end = (vaddr_t)__identity_map_init_end;
1225 	unsigned long offs = 0;
1226 	size_t last = 0;
1227 
1228 	last = collect_mem_ranges(memory_map, num_elems);
1229 	assign_mem_granularity(memory_map);
1230 
1231 	/*
1232 	 * To ease mapping and lower use of xlat tables, sort mapping
1233 	 * description moving small-page regions after the pgdir regions.
1234 	 */
1235 	qsort(memory_map, last, sizeof(struct tee_mmap_region),
1236 	      cmp_init_mem_map);
1237 
1238 	add_pager_vaspace(memory_map, num_elems, &last);
1239 	if (IS_ENABLED(CFG_CORE_ASLR) && seed) {
1240 		vaddr_t base_addr = TEE_RAM_START + seed;
1241 		const unsigned int va_width = core_mmu_get_va_width();
1242 		const vaddr_t va_mask = GENMASK_64(va_width - 1,
1243 						   SMALL_PAGE_SHIFT);
1244 		vaddr_t ba = base_addr;
1245 		size_t n = 0;
1246 
1247 		for (n = 0; n < 3; n++) {
1248 			if (n)
1249 				ba = base_addr ^ BIT64(va_width - n);
1250 			ba &= va_mask;
1251 			if (assign_mem_va(ba, memory_map) &&
1252 			    mem_map_add_id_map(memory_map, num_elems, &last,
1253 					       id_map_start, id_map_end)) {
1254 				offs = ba - TEE_RAM_START;
1255 				DMSG("Mapping core at %#"PRIxVA" offs %#lx",
1256 				     ba, offs);
1257 				goto out;
1258 			} else {
1259 				DMSG("Failed to map core at %#"PRIxVA, ba);
1260 			}
1261 		}
1262 		EMSG("Failed to map core with seed %#lx", seed);
1263 	}
1264 
1265 	if (!assign_mem_va(TEE_RAM_START, memory_map))
1266 		panic();
1267 
1268 out:
1269 	qsort(memory_map, last, sizeof(struct tee_mmap_region),
1270 	      cmp_mmap_by_lower_va);
1271 
1272 	dump_mmap_table(memory_map);
1273 
1274 	return offs;
1275 }
1276 
1277 static void check_mem_map(struct tee_mmap_region *map)
1278 {
1279 	struct tee_mmap_region *m = NULL;
1280 
1281 	for (m = map; !core_mmap_is_end_of_table(m); m++) {
1282 		switch (m->type) {
1283 		case MEM_AREA_TEE_RAM:
1284 		case MEM_AREA_TEE_RAM_RX:
1285 		case MEM_AREA_TEE_RAM_RO:
1286 		case MEM_AREA_TEE_RAM_RW:
1287 		case MEM_AREA_INIT_RAM_RX:
1288 		case MEM_AREA_INIT_RAM_RO:
1289 		case MEM_AREA_NEX_RAM_RW:
1290 		case MEM_AREA_NEX_RAM_RO:
1291 		case MEM_AREA_IDENTITY_MAP_RX:
1292 			if (!pbuf_is_inside(secure_only, m->pa, m->size))
1293 				panic("TEE_RAM can't fit in secure_only");
1294 			break;
1295 		case MEM_AREA_TA_RAM:
1296 			if (!pbuf_is_inside(secure_only, m->pa, m->size))
1297 				panic("TA_RAM can't fit in secure_only");
1298 			break;
1299 		case MEM_AREA_NSEC_SHM:
1300 			if (!pbuf_is_inside(nsec_shared, m->pa, m->size))
1301 				panic("NS_SHM can't fit in nsec_shared");
1302 			break;
1303 		case MEM_AREA_SEC_RAM_OVERALL:
1304 		case MEM_AREA_TEE_COHERENT:
1305 		case MEM_AREA_TEE_ASAN:
1306 		case MEM_AREA_IO_SEC:
1307 		case MEM_AREA_IO_NSEC:
1308 		case MEM_AREA_EXT_DT:
1309 		case MEM_AREA_RAM_SEC:
1310 		case MEM_AREA_RAM_NSEC:
1311 		case MEM_AREA_RES_VASPACE:
1312 		case MEM_AREA_SHM_VASPACE:
1313 		case MEM_AREA_PAGER_VASPACE:
1314 			break;
1315 		default:
1316 			EMSG("Uhandled memtype %d", m->type);
1317 			panic();
1318 		}
1319 	}
1320 }
1321 
1322 static struct tee_mmap_region *get_tmp_mmap(void)
1323 {
1324 	struct tee_mmap_region *tmp_mmap = (void *)__heap1_start;
1325 
1326 #ifdef CFG_WITH_PAGER
1327 	if (__heap1_end - __heap1_start < (ptrdiff_t)sizeof(static_memory_map))
1328 		tmp_mmap = (void *)__heap2_start;
1329 #endif
1330 
1331 	memset(tmp_mmap, 0, sizeof(static_memory_map));
1332 
1333 	return tmp_mmap;
1334 }
1335 
1336 /*
1337  * core_init_mmu_map() - init tee core default memory mapping
1338  *
1339  * This routine sets the static default TEE core mapping. If @seed is > 0
1340  * and configured with CFG_CORE_ASLR it will map tee core at a location
1341  * based on the seed and return the offset from the link address.
1342  *
1343  * If an error happened: core_init_mmu_map is expected to panic.
1344  *
1345  * Note: this function is weak just to make it possible to exclude it from
1346  * the unpaged area.
1347  */
1348 void __weak core_init_mmu_map(unsigned long seed, struct core_mmu_config *cfg)
1349 {
1350 #ifndef CFG_NS_VIRTUALIZATION
1351 	vaddr_t start = ROUNDDOWN((vaddr_t)__nozi_start, SMALL_PAGE_SIZE);
1352 #else
1353 	vaddr_t start = ROUNDDOWN((vaddr_t)__vcore_nex_rw_start,
1354 				  SMALL_PAGE_SIZE);
1355 #endif
1356 	vaddr_t len = ROUNDUP((vaddr_t)__nozi_end, SMALL_PAGE_SIZE) - start;
1357 	struct tee_mmap_region *tmp_mmap = get_tmp_mmap();
1358 	unsigned long offs = 0;
1359 
1360 	check_sec_nsec_mem_config();
1361 
1362 	/*
1363 	 * Add a entry covering the translation tables which will be
1364 	 * involved in some virt_to_phys() and phys_to_virt() conversions.
1365 	 */
1366 	static_memory_map[0] = (struct tee_mmap_region){
1367 		.type = MEM_AREA_TEE_RAM,
1368 		.region_size = SMALL_PAGE_SIZE,
1369 		.pa = start,
1370 		.va = start,
1371 		.size = len,
1372 		.attr = core_mmu_type_to_attr(MEM_AREA_IDENTITY_MAP_RX),
1373 	};
1374 
1375 	COMPILE_TIME_ASSERT(CFG_MMAP_REGIONS >= 13);
1376 	offs = init_mem_map(tmp_mmap, ARRAY_SIZE(static_memory_map), seed);
1377 
1378 	check_mem_map(tmp_mmap);
1379 	core_init_mmu(tmp_mmap);
1380 	dump_xlat_table(0x0, CORE_MMU_BASE_TABLE_LEVEL);
1381 	core_init_mmu_regs(cfg);
1382 	cfg->map_offset = offs;
1383 	memcpy(static_memory_map, tmp_mmap, sizeof(static_memory_map));
1384 }
1385 
1386 bool core_mmu_mattr_is_ok(uint32_t mattr)
1387 {
1388 	/*
1389 	 * Keep in sync with core_mmu_lpae.c:mattr_to_desc and
1390 	 * core_mmu_v7.c:mattr_to_texcb
1391 	 */
1392 
1393 	switch ((mattr >> TEE_MATTR_MEM_TYPE_SHIFT) & TEE_MATTR_MEM_TYPE_MASK) {
1394 	case TEE_MATTR_MEM_TYPE_DEV:
1395 	case TEE_MATTR_MEM_TYPE_STRONGLY_O:
1396 	case TEE_MATTR_MEM_TYPE_CACHED:
1397 	case TEE_MATTR_MEM_TYPE_TAGGED:
1398 		return true;
1399 	default:
1400 		return false;
1401 	}
1402 }
1403 
1404 /*
1405  * test attributes of target physical buffer
1406  *
1407  * Flags: pbuf_is(SECURE, NOT_SECURE, RAM, IOMEM, KEYVAULT).
1408  *
1409  */
1410 bool core_pbuf_is(uint32_t attr, paddr_t pbuf, size_t len)
1411 {
1412 	struct tee_mmap_region *map;
1413 
1414 	/* Empty buffers complies with anything */
1415 	if (len == 0)
1416 		return true;
1417 
1418 	switch (attr) {
1419 	case CORE_MEM_SEC:
1420 		return pbuf_is_inside(secure_only, pbuf, len);
1421 	case CORE_MEM_NON_SEC:
1422 		return pbuf_is_inside(nsec_shared, pbuf, len) ||
1423 			pbuf_is_nsec_ddr(pbuf, len);
1424 	case CORE_MEM_TEE_RAM:
1425 		return core_is_buffer_inside(pbuf, len, TEE_RAM_START,
1426 							TEE_RAM_PH_SIZE);
1427 	case CORE_MEM_TA_RAM:
1428 		return core_is_buffer_inside(pbuf, len, TA_RAM_START,
1429 							TA_RAM_SIZE);
1430 #ifdef CFG_CORE_RESERVED_SHM
1431 	case CORE_MEM_NSEC_SHM:
1432 		return core_is_buffer_inside(pbuf, len, TEE_SHMEM_START,
1433 							TEE_SHMEM_SIZE);
1434 #endif
1435 	case CORE_MEM_SDP_MEM:
1436 		return pbuf_is_sdp_mem(pbuf, len);
1437 	case CORE_MEM_CACHED:
1438 		map = find_map_by_pa(pbuf);
1439 		if (!map || !pbuf_inside_map_area(pbuf, len, map))
1440 			return false;
1441 		return mattr_is_cached(map->attr);
1442 	default:
1443 		return false;
1444 	}
1445 }
1446 
1447 /* test attributes of target virtual buffer (in core mapping) */
1448 bool core_vbuf_is(uint32_t attr, const void *vbuf, size_t len)
1449 {
1450 	paddr_t p;
1451 
1452 	/* Empty buffers complies with anything */
1453 	if (len == 0)
1454 		return true;
1455 
1456 	p = virt_to_phys((void *)vbuf);
1457 	if (!p)
1458 		return false;
1459 
1460 	return core_pbuf_is(attr, p, len);
1461 }
1462 
1463 /* core_va2pa - teecore exported service */
1464 static int __maybe_unused core_va2pa_helper(void *va, paddr_t *pa)
1465 {
1466 	struct tee_mmap_region *map;
1467 
1468 	map = find_map_by_va(va);
1469 	if (!va_is_in_map(map, (vaddr_t)va))
1470 		return -1;
1471 
1472 	/*
1473 	 * We can calculate PA for static map. Virtual address ranges
1474 	 * reserved to core dynamic mapping return a 'match' (return 0;)
1475 	 * together with an invalid null physical address.
1476 	 */
1477 	if (map->pa)
1478 		*pa = map->pa + (vaddr_t)va  - map->va;
1479 	else
1480 		*pa = 0;
1481 
1482 	return 0;
1483 }
1484 
1485 static void *map_pa2va(struct tee_mmap_region *map, paddr_t pa, size_t len)
1486 {
1487 	if (!pa_is_in_map(map, pa, len))
1488 		return NULL;
1489 
1490 	return (void *)(vaddr_t)(map->va + pa - map->pa);
1491 }
1492 
1493 /*
1494  * teecore gets some memory area definitions
1495  */
1496 void core_mmu_get_mem_by_type(unsigned int type, vaddr_t *s, vaddr_t *e)
1497 {
1498 	struct tee_mmap_region *map = find_map_by_type(type);
1499 
1500 	if (map) {
1501 		*s = map->va;
1502 		*e = map->va + map->size;
1503 	} else {
1504 		*s = 0;
1505 		*e = 0;
1506 	}
1507 }
1508 
1509 enum teecore_memtypes core_mmu_get_type_by_pa(paddr_t pa)
1510 {
1511 	struct tee_mmap_region *map = find_map_by_pa(pa);
1512 
1513 	if (!map)
1514 		return MEM_AREA_MAXTYPE;
1515 	return map->type;
1516 }
1517 
1518 void core_mmu_set_entry(struct core_mmu_table_info *tbl_info, unsigned int idx,
1519 			paddr_t pa, uint32_t attr)
1520 {
1521 	assert(idx < tbl_info->num_entries);
1522 	core_mmu_set_entry_primitive(tbl_info->table, tbl_info->level,
1523 				     idx, pa, attr);
1524 }
1525 
1526 void core_mmu_get_entry(struct core_mmu_table_info *tbl_info, unsigned int idx,
1527 			paddr_t *pa, uint32_t *attr)
1528 {
1529 	assert(idx < tbl_info->num_entries);
1530 	core_mmu_get_entry_primitive(tbl_info->table, tbl_info->level,
1531 				     idx, pa, attr);
1532 }
1533 
1534 static void clear_region(struct core_mmu_table_info *tbl_info,
1535 			 struct tee_mmap_region *region)
1536 {
1537 	unsigned int end = 0;
1538 	unsigned int idx = 0;
1539 
1540 	/* va, len and pa should be block aligned */
1541 	assert(!core_mmu_get_block_offset(tbl_info, region->va));
1542 	assert(!core_mmu_get_block_offset(tbl_info, region->size));
1543 	assert(!core_mmu_get_block_offset(tbl_info, region->pa));
1544 
1545 	idx = core_mmu_va2idx(tbl_info, region->va);
1546 	end = core_mmu_va2idx(tbl_info, region->va + region->size);
1547 
1548 	while (idx < end) {
1549 		core_mmu_set_entry(tbl_info, idx, 0, 0);
1550 		idx++;
1551 	}
1552 }
1553 
1554 static void set_region(struct core_mmu_table_info *tbl_info,
1555 		       struct tee_mmap_region *region)
1556 {
1557 	unsigned int end;
1558 	unsigned int idx;
1559 	paddr_t pa;
1560 
1561 	/* va, len and pa should be block aligned */
1562 	assert(!core_mmu_get_block_offset(tbl_info, region->va));
1563 	assert(!core_mmu_get_block_offset(tbl_info, region->size));
1564 	assert(!core_mmu_get_block_offset(tbl_info, region->pa));
1565 
1566 	idx = core_mmu_va2idx(tbl_info, region->va);
1567 	end = core_mmu_va2idx(tbl_info, region->va + region->size);
1568 	pa = region->pa;
1569 
1570 	while (idx < end) {
1571 		core_mmu_set_entry(tbl_info, idx, pa, region->attr);
1572 		idx++;
1573 		pa += BIT64(tbl_info->shift);
1574 	}
1575 }
1576 
1577 static void set_pg_region(struct core_mmu_table_info *dir_info,
1578 			  struct vm_region *region, struct pgt **pgt,
1579 			  struct core_mmu_table_info *pg_info)
1580 {
1581 	struct tee_mmap_region r = {
1582 		.va = region->va,
1583 		.size = region->size,
1584 		.attr = region->attr,
1585 	};
1586 	vaddr_t end = r.va + r.size;
1587 	uint32_t pgt_attr = (r.attr & TEE_MATTR_SECURE) | TEE_MATTR_TABLE;
1588 
1589 	while (r.va < end) {
1590 		if (!pg_info->table ||
1591 		    r.va >= (pg_info->va_base + CORE_MMU_PGDIR_SIZE)) {
1592 			/*
1593 			 * We're assigning a new translation table.
1594 			 */
1595 			unsigned int idx;
1596 
1597 			/* Virtual addresses must grow */
1598 			assert(r.va > pg_info->va_base);
1599 
1600 			idx = core_mmu_va2idx(dir_info, r.va);
1601 			pg_info->va_base = core_mmu_idx2va(dir_info, idx);
1602 
1603 			/*
1604 			 * Advance pgt to va_base, note that we may need to
1605 			 * skip multiple page tables if there are large
1606 			 * holes in the vm map.
1607 			 */
1608 			while ((*pgt)->vabase < pg_info->va_base) {
1609 				*pgt = SLIST_NEXT(*pgt, link);
1610 				/* We should have allocated enough */
1611 				assert(*pgt);
1612 			}
1613 			assert((*pgt)->vabase == pg_info->va_base);
1614 			pg_info->table = (*pgt)->tbl;
1615 
1616 			core_mmu_set_entry(dir_info, idx,
1617 					   virt_to_phys(pg_info->table),
1618 					   pgt_attr);
1619 		}
1620 
1621 		r.size = MIN(CORE_MMU_PGDIR_SIZE - (r.va - pg_info->va_base),
1622 			     end - r.va);
1623 
1624 		if (!(*pgt)->populated  && !mobj_is_paged(region->mobj)) {
1625 			size_t granule = BIT(pg_info->shift);
1626 			size_t offset = r.va - region->va + region->offset;
1627 
1628 			r.size = MIN(r.size,
1629 				     mobj_get_phys_granule(region->mobj));
1630 			r.size = ROUNDUP(r.size, SMALL_PAGE_SIZE);
1631 
1632 			if (mobj_get_pa(region->mobj, offset, granule,
1633 					&r.pa) != TEE_SUCCESS)
1634 				panic("Failed to get PA of unpaged mobj");
1635 			set_region(pg_info, &r);
1636 		}
1637 		r.va += r.size;
1638 	}
1639 }
1640 
1641 static bool can_map_at_level(paddr_t paddr, vaddr_t vaddr,
1642 			     size_t size_left, paddr_t block_size,
1643 			     struct tee_mmap_region *mm __maybe_unused)
1644 {
1645 	/* VA and PA are aligned to block size at current level */
1646 	if ((vaddr | paddr) & (block_size - 1))
1647 		return false;
1648 
1649 	/* Remainder fits into block at current level */
1650 	if (size_left < block_size)
1651 		return false;
1652 
1653 #ifdef CFG_WITH_PAGER
1654 	/*
1655 	 * If pager is enabled, we need to map tee ram
1656 	 * regions with small pages only
1657 	 */
1658 	if (map_is_tee_ram(mm) && block_size != SMALL_PAGE_SIZE)
1659 		return false;
1660 #endif
1661 
1662 	return true;
1663 }
1664 
1665 void core_mmu_map_region(struct mmu_partition *prtn, struct tee_mmap_region *mm)
1666 {
1667 	struct core_mmu_table_info tbl_info;
1668 	unsigned int idx;
1669 	vaddr_t vaddr = mm->va;
1670 	paddr_t paddr = mm->pa;
1671 	ssize_t size_left = mm->size;
1672 	unsigned int level;
1673 	bool table_found;
1674 	uint32_t old_attr;
1675 
1676 	assert(!((vaddr | paddr) & SMALL_PAGE_MASK));
1677 
1678 	while (size_left > 0) {
1679 		level = CORE_MMU_BASE_TABLE_LEVEL;
1680 
1681 		while (true) {
1682 			paddr_t block_size = 0;
1683 
1684 			assert(level <= CORE_MMU_PGDIR_LEVEL);
1685 
1686 			table_found = core_mmu_find_table(prtn, vaddr, level,
1687 							  &tbl_info);
1688 			if (!table_found)
1689 				panic("can't find table for mapping");
1690 
1691 			block_size = BIT64(tbl_info.shift);
1692 
1693 			idx = core_mmu_va2idx(&tbl_info, vaddr);
1694 			if (!can_map_at_level(paddr, vaddr, size_left,
1695 					      block_size, mm)) {
1696 				bool secure = mm->attr & TEE_MATTR_SECURE;
1697 
1698 				/*
1699 				 * This part of the region can't be mapped at
1700 				 * this level. Need to go deeper.
1701 				 */
1702 				if (!core_mmu_entry_to_finer_grained(&tbl_info,
1703 								     idx,
1704 								     secure))
1705 					panic("Can't divide MMU entry");
1706 				level++;
1707 				continue;
1708 			}
1709 
1710 			/* We can map part of the region at current level */
1711 			core_mmu_get_entry(&tbl_info, idx, NULL, &old_attr);
1712 			if (old_attr)
1713 				panic("Page is already mapped");
1714 
1715 			core_mmu_set_entry(&tbl_info, idx, paddr, mm->attr);
1716 			paddr += block_size;
1717 			vaddr += block_size;
1718 			size_left -= block_size;
1719 
1720 			break;
1721 		}
1722 	}
1723 }
1724 
1725 TEE_Result core_mmu_map_pages(vaddr_t vstart, paddr_t *pages, size_t num_pages,
1726 			      enum teecore_memtypes memtype)
1727 {
1728 	TEE_Result ret;
1729 	struct core_mmu_table_info tbl_info;
1730 	struct tee_mmap_region *mm;
1731 	unsigned int idx;
1732 	uint32_t old_attr;
1733 	uint32_t exceptions;
1734 	vaddr_t vaddr = vstart;
1735 	size_t i;
1736 	bool secure;
1737 
1738 	assert(!(core_mmu_type_to_attr(memtype) & TEE_MATTR_PX));
1739 
1740 	secure = core_mmu_type_to_attr(memtype) & TEE_MATTR_SECURE;
1741 
1742 	if (vaddr & SMALL_PAGE_MASK)
1743 		return TEE_ERROR_BAD_PARAMETERS;
1744 
1745 	exceptions = mmu_lock();
1746 
1747 	mm = find_map_by_va((void *)vaddr);
1748 	if (!mm || !va_is_in_map(mm, vaddr + num_pages * SMALL_PAGE_SIZE - 1))
1749 		panic("VA does not belong to any known mm region");
1750 
1751 	if (!core_mmu_is_dynamic_vaspace(mm))
1752 		panic("Trying to map into static region");
1753 
1754 	for (i = 0; i < num_pages; i++) {
1755 		if (pages[i] & SMALL_PAGE_MASK) {
1756 			ret = TEE_ERROR_BAD_PARAMETERS;
1757 			goto err;
1758 		}
1759 
1760 		while (true) {
1761 			if (!core_mmu_find_table(NULL, vaddr, UINT_MAX,
1762 						 &tbl_info))
1763 				panic("Can't find pagetable for vaddr ");
1764 
1765 			idx = core_mmu_va2idx(&tbl_info, vaddr);
1766 			if (tbl_info.shift == SMALL_PAGE_SHIFT)
1767 				break;
1768 
1769 			/* This is supertable. Need to divide it. */
1770 			if (!core_mmu_entry_to_finer_grained(&tbl_info, idx,
1771 							     secure))
1772 				panic("Failed to spread pgdir on small tables");
1773 		}
1774 
1775 		core_mmu_get_entry(&tbl_info, idx, NULL, &old_attr);
1776 		if (old_attr)
1777 			panic("Page is already mapped");
1778 
1779 		core_mmu_set_entry(&tbl_info, idx, pages[i],
1780 				   core_mmu_type_to_attr(memtype));
1781 		vaddr += SMALL_PAGE_SIZE;
1782 	}
1783 
1784 	/*
1785 	 * Make sure all the changes to translation tables are visible
1786 	 * before returning. TLB doesn't need to be invalidated as we are
1787 	 * guaranteed that there's no valid mapping in this range.
1788 	 */
1789 	core_mmu_table_write_barrier();
1790 	mmu_unlock(exceptions);
1791 
1792 	return TEE_SUCCESS;
1793 err:
1794 	mmu_unlock(exceptions);
1795 
1796 	if (i)
1797 		core_mmu_unmap_pages(vstart, i);
1798 
1799 	return ret;
1800 }
1801 
1802 TEE_Result core_mmu_map_contiguous_pages(vaddr_t vstart, paddr_t pstart,
1803 					 size_t num_pages,
1804 					 enum teecore_memtypes memtype)
1805 {
1806 	struct core_mmu_table_info tbl_info = { };
1807 	struct tee_mmap_region *mm = NULL;
1808 	unsigned int idx = 0;
1809 	uint32_t old_attr = 0;
1810 	uint32_t exceptions = 0;
1811 	vaddr_t vaddr = vstart;
1812 	paddr_t paddr = pstart;
1813 	size_t i = 0;
1814 	bool secure = false;
1815 
1816 	assert(!(core_mmu_type_to_attr(memtype) & TEE_MATTR_PX));
1817 
1818 	secure = core_mmu_type_to_attr(memtype) & TEE_MATTR_SECURE;
1819 
1820 	if ((vaddr | paddr) & SMALL_PAGE_MASK)
1821 		return TEE_ERROR_BAD_PARAMETERS;
1822 
1823 	exceptions = mmu_lock();
1824 
1825 	mm = find_map_by_va((void *)vaddr);
1826 	if (!mm || !va_is_in_map(mm, vaddr + num_pages * SMALL_PAGE_SIZE - 1))
1827 		panic("VA does not belong to any known mm region");
1828 
1829 	if (!core_mmu_is_dynamic_vaspace(mm))
1830 		panic("Trying to map into static region");
1831 
1832 	for (i = 0; i < num_pages; i++) {
1833 		while (true) {
1834 			if (!core_mmu_find_table(NULL, vaddr, UINT_MAX,
1835 						 &tbl_info))
1836 				panic("Can't find pagetable for vaddr ");
1837 
1838 			idx = core_mmu_va2idx(&tbl_info, vaddr);
1839 			if (tbl_info.shift == SMALL_PAGE_SHIFT)
1840 				break;
1841 
1842 			/* This is supertable. Need to divide it. */
1843 			if (!core_mmu_entry_to_finer_grained(&tbl_info, idx,
1844 							     secure))
1845 				panic("Failed to spread pgdir on small tables");
1846 		}
1847 
1848 		core_mmu_get_entry(&tbl_info, idx, NULL, &old_attr);
1849 		if (old_attr)
1850 			panic("Page is already mapped");
1851 
1852 		core_mmu_set_entry(&tbl_info, idx, paddr,
1853 				   core_mmu_type_to_attr(memtype));
1854 		paddr += SMALL_PAGE_SIZE;
1855 		vaddr += SMALL_PAGE_SIZE;
1856 	}
1857 
1858 	/*
1859 	 * Make sure all the changes to translation tables are visible
1860 	 * before returning. TLB doesn't need to be invalidated as we are
1861 	 * guaranteed that there's no valid mapping in this range.
1862 	 */
1863 	core_mmu_table_write_barrier();
1864 	mmu_unlock(exceptions);
1865 
1866 	return TEE_SUCCESS;
1867 }
1868 
1869 void core_mmu_unmap_pages(vaddr_t vstart, size_t num_pages)
1870 {
1871 	struct core_mmu_table_info tbl_info;
1872 	struct tee_mmap_region *mm;
1873 	size_t i;
1874 	unsigned int idx;
1875 	uint32_t exceptions;
1876 
1877 	exceptions = mmu_lock();
1878 
1879 	mm = find_map_by_va((void *)vstart);
1880 	if (!mm || !va_is_in_map(mm, vstart + num_pages * SMALL_PAGE_SIZE - 1))
1881 		panic("VA does not belong to any known mm region");
1882 
1883 	if (!core_mmu_is_dynamic_vaspace(mm))
1884 		panic("Trying to unmap static region");
1885 
1886 	for (i = 0; i < num_pages; i++, vstart += SMALL_PAGE_SIZE) {
1887 		if (!core_mmu_find_table(NULL, vstart, UINT_MAX, &tbl_info))
1888 			panic("Can't find pagetable");
1889 
1890 		if (tbl_info.shift != SMALL_PAGE_SHIFT)
1891 			panic("Invalid pagetable level");
1892 
1893 		idx = core_mmu_va2idx(&tbl_info, vstart);
1894 		core_mmu_set_entry(&tbl_info, idx, 0, 0);
1895 	}
1896 	tlbi_all();
1897 
1898 	mmu_unlock(exceptions);
1899 }
1900 
1901 void core_mmu_populate_user_map(struct core_mmu_table_info *dir_info,
1902 				struct user_mode_ctx *uctx)
1903 {
1904 	struct core_mmu_table_info pg_info = { };
1905 	struct pgt_cache *pgt_cache = &uctx->pgt_cache;
1906 	struct pgt *pgt = NULL;
1907 	struct pgt *p = NULL;
1908 	struct vm_region *r = NULL;
1909 
1910 	if (TAILQ_EMPTY(&uctx->vm_info.regions))
1911 		return; /* Nothing to map */
1912 
1913 	/*
1914 	 * Allocate all page tables in advance.
1915 	 */
1916 	pgt_get_all(uctx);
1917 	pgt = SLIST_FIRST(pgt_cache);
1918 
1919 	core_mmu_set_info_table(&pg_info, dir_info->level + 1, 0, NULL);
1920 
1921 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link)
1922 		set_pg_region(dir_info, r, &pgt, &pg_info);
1923 	/* Record that the translation tables now are populated. */
1924 	SLIST_FOREACH(p, pgt_cache, link) {
1925 		p->populated = true;
1926 		if (p == pgt)
1927 			break;
1928 	}
1929 	assert(p == pgt);
1930 }
1931 
1932 TEE_Result core_mmu_remove_mapping(enum teecore_memtypes type, void *addr,
1933 				   size_t len)
1934 {
1935 	struct core_mmu_table_info tbl_info = { };
1936 	struct tee_mmap_region *res_map = NULL;
1937 	struct tee_mmap_region *map = NULL;
1938 	paddr_t pa = virt_to_phys(addr);
1939 	size_t granule = 0;
1940 	ptrdiff_t i = 0;
1941 	paddr_t p = 0;
1942 	size_t l = 0;
1943 
1944 	map = find_map_by_type_and_pa(type, pa, len);
1945 	if (!map)
1946 		return TEE_ERROR_GENERIC;
1947 
1948 	res_map = find_map_by_type(MEM_AREA_RES_VASPACE);
1949 	if (!res_map)
1950 		return TEE_ERROR_GENERIC;
1951 	if (!core_mmu_find_table(NULL, res_map->va, UINT_MAX, &tbl_info))
1952 		return TEE_ERROR_GENERIC;
1953 	granule = BIT(tbl_info.shift);
1954 
1955 	if (map < static_memory_map ||
1956 	    map >= static_memory_map + ARRAY_SIZE(static_memory_map))
1957 		return TEE_ERROR_GENERIC;
1958 	i = map - static_memory_map;
1959 
1960 	/* Check that we have a full match */
1961 	p = ROUNDDOWN(pa, granule);
1962 	l = ROUNDUP(len + pa - p, granule);
1963 	if (map->pa != p || map->size != l)
1964 		return TEE_ERROR_GENERIC;
1965 
1966 	clear_region(&tbl_info, map);
1967 	tlbi_all();
1968 
1969 	/* If possible remove the va range from res_map */
1970 	if (res_map->va - map->size == map->va) {
1971 		res_map->va -= map->size;
1972 		res_map->size += map->size;
1973 	}
1974 
1975 	/* Remove the entry. */
1976 	memmove(map, map + 1,
1977 		(ARRAY_SIZE(static_memory_map) - i - 1) * sizeof(*map));
1978 
1979 	/* Clear the last new entry in case it was used */
1980 	memset(static_memory_map + ARRAY_SIZE(static_memory_map) - 1,
1981 	       0, sizeof(*map));
1982 
1983 	return TEE_SUCCESS;
1984 }
1985 
1986 struct tee_mmap_region *
1987 core_mmu_find_mapping_exclusive(enum teecore_memtypes type, size_t len)
1988 {
1989 	struct tee_mmap_region *map = NULL;
1990 	struct tee_mmap_region *map_found = NULL;
1991 
1992 	if (!len)
1993 		return NULL;
1994 
1995 	for (map = get_memory_map(); !core_mmap_is_end_of_table(map); map++) {
1996 		if (map->type != type)
1997 			continue;
1998 
1999 		if (map_found)
2000 			return NULL;
2001 
2002 		map_found = map;
2003 	}
2004 
2005 	if (!map_found || map_found->size < len)
2006 		return NULL;
2007 
2008 	return map_found;
2009 }
2010 
2011 void *core_mmu_add_mapping(enum teecore_memtypes type, paddr_t addr, size_t len)
2012 {
2013 	struct core_mmu_table_info tbl_info;
2014 	struct tee_mmap_region *map;
2015 	size_t n;
2016 	size_t granule;
2017 	paddr_t p;
2018 	size_t l;
2019 
2020 	if (!len)
2021 		return NULL;
2022 
2023 	if (!core_mmu_check_end_pa(addr, len))
2024 		return NULL;
2025 
2026 	/* Check if the memory is already mapped */
2027 	map = find_map_by_type_and_pa(type, addr, len);
2028 	if (map && pbuf_inside_map_area(addr, len, map))
2029 		return (void *)(vaddr_t)(map->va + addr - map->pa);
2030 
2031 	/* Find the reserved va space used for late mappings */
2032 	map = find_map_by_type(MEM_AREA_RES_VASPACE);
2033 	if (!map)
2034 		return NULL;
2035 
2036 	if (!core_mmu_find_table(NULL, map->va, UINT_MAX, &tbl_info))
2037 		return NULL;
2038 
2039 	granule = BIT64(tbl_info.shift);
2040 	p = ROUNDDOWN(addr, granule);
2041 	l = ROUNDUP(len + addr - p, granule);
2042 
2043 	/* Ban overflowing virtual addresses */
2044 	if (map->size < l)
2045 		return NULL;
2046 
2047 	/*
2048 	 * Something is wrong, we can't fit the va range into the selected
2049 	 * table. The reserved va range is possibly missaligned with
2050 	 * granule.
2051 	 */
2052 	if (core_mmu_va2idx(&tbl_info, map->va + len) >= tbl_info.num_entries)
2053 		return NULL;
2054 
2055 	/* Find end of the memory map */
2056 	n = 0;
2057 	while (!core_mmap_is_end_of_table(static_memory_map + n))
2058 		n++;
2059 
2060 	if (n < (ARRAY_SIZE(static_memory_map) - 1)) {
2061 		/* There's room for another entry */
2062 		static_memory_map[n].va = map->va;
2063 		static_memory_map[n].size = l;
2064 		static_memory_map[n + 1].type = MEM_AREA_END;
2065 		map->va += l;
2066 		map->size -= l;
2067 		map = static_memory_map + n;
2068 	} else {
2069 		/*
2070 		 * There isn't room for another entry, steal the reserved
2071 		 * entry as it's not useful for anything else any longer.
2072 		 */
2073 		map->size = l;
2074 	}
2075 	map->type = type;
2076 	map->region_size = granule;
2077 	map->attr = core_mmu_type_to_attr(type);
2078 	map->pa = p;
2079 
2080 	set_region(&tbl_info, map);
2081 
2082 	/* Make sure the new entry is visible before continuing. */
2083 	core_mmu_table_write_barrier();
2084 
2085 	return (void *)(vaddr_t)(map->va + addr - map->pa);
2086 }
2087 
2088 #ifdef CFG_WITH_PAGER
2089 static vaddr_t get_linear_map_end_va(void)
2090 {
2091 	/* this is synced with the generic linker file kern.ld.S */
2092 	return (vaddr_t)__heap2_end;
2093 }
2094 
2095 static paddr_t get_linear_map_end_pa(void)
2096 {
2097 	return get_linear_map_end_va() - VCORE_START_VA + TEE_LOAD_ADDR;
2098 }
2099 #endif
2100 
2101 #if defined(CFG_TEE_CORE_DEBUG)
2102 static void check_pa_matches_va(void *va, paddr_t pa)
2103 {
2104 	TEE_Result res = TEE_ERROR_GENERIC;
2105 	vaddr_t v = (vaddr_t)va;
2106 	paddr_t p = 0;
2107 	struct core_mmu_table_info ti __maybe_unused = { };
2108 
2109 	if (core_mmu_user_va_range_is_defined()) {
2110 		vaddr_t user_va_base = 0;
2111 		size_t user_va_size = 0;
2112 
2113 		core_mmu_get_user_va_range(&user_va_base, &user_va_size);
2114 		if (v >= user_va_base &&
2115 		    v <= (user_va_base - 1 + user_va_size)) {
2116 			if (!core_mmu_user_mapping_is_active()) {
2117 				if (pa)
2118 					panic("issue in linear address space");
2119 				return;
2120 			}
2121 
2122 			res = vm_va2pa(to_user_mode_ctx(thread_get_tsd()->ctx),
2123 				       va, &p);
2124 			if (res == TEE_ERROR_NOT_SUPPORTED)
2125 				return;
2126 			if (res == TEE_SUCCESS && pa != p)
2127 				panic("bad pa");
2128 			if (res != TEE_SUCCESS && pa)
2129 				panic("false pa");
2130 			return;
2131 		}
2132 	}
2133 #ifdef CFG_WITH_PAGER
2134 	if (is_unpaged(va)) {
2135 		if (v - boot_mmu_config.map_offset != pa)
2136 			panic("issue in linear address space");
2137 		return;
2138 	}
2139 
2140 	if (tee_pager_get_table_info(v, &ti)) {
2141 		uint32_t a;
2142 
2143 		/*
2144 		 * Lookups in the page table managed by the pager is
2145 		 * dangerous for addresses in the paged area as those pages
2146 		 * changes all the time. But some ranges are safe,
2147 		 * rw-locked areas when the page is populated for instance.
2148 		 */
2149 		core_mmu_get_entry(&ti, core_mmu_va2idx(&ti, v), &p, &a);
2150 		if (a & TEE_MATTR_VALID_BLOCK) {
2151 			paddr_t mask = BIT64(ti.shift) - 1;
2152 
2153 			p |= v & mask;
2154 			if (pa != p)
2155 				panic();
2156 		} else {
2157 			if (pa)
2158 				panic();
2159 		}
2160 		return;
2161 	}
2162 #endif
2163 
2164 	if (!core_va2pa_helper(va, &p)) {
2165 		/* Verfiy only the static mapping (case non null phys addr) */
2166 		if (p && pa != p) {
2167 			DMSG("va %p maps 0x%" PRIxPA ", expect 0x%" PRIxPA,
2168 			     va, p, pa);
2169 			panic();
2170 		}
2171 	} else {
2172 		if (pa) {
2173 			DMSG("va %p unmapped, expect 0x%" PRIxPA, va, pa);
2174 			panic();
2175 		}
2176 	}
2177 }
2178 #else
2179 static void check_pa_matches_va(void *va __unused, paddr_t pa __unused)
2180 {
2181 }
2182 #endif
2183 
2184 paddr_t virt_to_phys(void *va)
2185 {
2186 	paddr_t pa = 0;
2187 
2188 	if (!arch_va2pa_helper(va, &pa))
2189 		pa = 0;
2190 	check_pa_matches_va(va, pa);
2191 	return pa;
2192 }
2193 
2194 #if defined(CFG_TEE_CORE_DEBUG)
2195 static void check_va_matches_pa(paddr_t pa, void *va)
2196 {
2197 	paddr_t p = 0;
2198 
2199 	if (!va)
2200 		return;
2201 
2202 	p = virt_to_phys(va);
2203 	if (p != pa) {
2204 		DMSG("va %p maps 0x%" PRIxPA " expect 0x%" PRIxPA, va, p, pa);
2205 		panic();
2206 	}
2207 }
2208 #else
2209 static void check_va_matches_pa(paddr_t pa __unused, void *va __unused)
2210 {
2211 }
2212 #endif
2213 
2214 static void *phys_to_virt_ts_vaspace(paddr_t pa, size_t len)
2215 {
2216 	if (!core_mmu_user_mapping_is_active())
2217 		return NULL;
2218 
2219 	return vm_pa2va(to_user_mode_ctx(thread_get_tsd()->ctx), pa, len);
2220 }
2221 
2222 #ifdef CFG_WITH_PAGER
2223 static void *phys_to_virt_tee_ram(paddr_t pa, size_t len)
2224 {
2225 	paddr_t end_pa = 0;
2226 
2227 	if (SUB_OVERFLOW(len, 1, &end_pa) || ADD_OVERFLOW(pa, end_pa, &end_pa))
2228 		return NULL;
2229 
2230 	if (pa >= TEE_LOAD_ADDR && pa < get_linear_map_end_pa()) {
2231 		if (end_pa > get_linear_map_end_pa())
2232 			return NULL;
2233 		return (void *)(vaddr_t)(pa + boot_mmu_config.map_offset);
2234 	}
2235 
2236 	return tee_pager_phys_to_virt(pa, len);
2237 }
2238 #else
2239 static void *phys_to_virt_tee_ram(paddr_t pa, size_t len)
2240 {
2241 	struct tee_mmap_region *mmap = NULL;
2242 
2243 	mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM, pa, len);
2244 	if (!mmap)
2245 		mmap = find_map_by_type_and_pa(MEM_AREA_NEX_RAM_RW, pa, len);
2246 	if (!mmap)
2247 		mmap = find_map_by_type_and_pa(MEM_AREA_NEX_RAM_RO, pa, len);
2248 	if (!mmap)
2249 		mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM_RW, pa, len);
2250 	if (!mmap)
2251 		mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM_RO, pa, len);
2252 	if (!mmap)
2253 		mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM_RX, pa, len);
2254 	/*
2255 	 * Note that MEM_AREA_INIT_RAM_RO and MEM_AREA_INIT_RAM_RX are only
2256 	 * used with pager and not needed here.
2257 	 */
2258 	return map_pa2va(mmap, pa, len);
2259 }
2260 #endif
2261 
2262 void *phys_to_virt(paddr_t pa, enum teecore_memtypes m, size_t len)
2263 {
2264 	void *va = NULL;
2265 
2266 	switch (m) {
2267 	case MEM_AREA_TS_VASPACE:
2268 		va = phys_to_virt_ts_vaspace(pa, len);
2269 		break;
2270 	case MEM_AREA_TEE_RAM:
2271 	case MEM_AREA_TEE_RAM_RX:
2272 	case MEM_AREA_TEE_RAM_RO:
2273 	case MEM_AREA_TEE_RAM_RW:
2274 	case MEM_AREA_NEX_RAM_RO:
2275 	case MEM_AREA_NEX_RAM_RW:
2276 		va = phys_to_virt_tee_ram(pa, len);
2277 		break;
2278 	case MEM_AREA_SHM_VASPACE:
2279 		/* Find VA from PA in dynamic SHM is not yet supported */
2280 		va = NULL;
2281 		break;
2282 	default:
2283 		va = map_pa2va(find_map_by_type_and_pa(m, pa, len), pa, len);
2284 	}
2285 	if (m != MEM_AREA_SEC_RAM_OVERALL)
2286 		check_va_matches_pa(pa, va);
2287 	return va;
2288 }
2289 
2290 void *phys_to_virt_io(paddr_t pa, size_t len)
2291 {
2292 	struct tee_mmap_region *map = NULL;
2293 	void *va = NULL;
2294 
2295 	map = find_map_by_type_and_pa(MEM_AREA_IO_SEC, pa, len);
2296 	if (!map)
2297 		map = find_map_by_type_and_pa(MEM_AREA_IO_NSEC, pa, len);
2298 	if (!map)
2299 		return NULL;
2300 	va = map_pa2va(map, pa, len);
2301 	check_va_matches_pa(pa, va);
2302 	return va;
2303 }
2304 
2305 vaddr_t core_mmu_get_va(paddr_t pa, enum teecore_memtypes type, size_t len)
2306 {
2307 	if (cpu_mmu_enabled())
2308 		return (vaddr_t)phys_to_virt(pa, type, len);
2309 
2310 	return (vaddr_t)pa;
2311 }
2312 
2313 #ifdef CFG_WITH_PAGER
2314 bool is_unpaged(void *va)
2315 {
2316 	vaddr_t v = (vaddr_t)va;
2317 
2318 	return v >= VCORE_START_VA && v < get_linear_map_end_va();
2319 }
2320 #else
2321 bool is_unpaged(void *va __unused)
2322 {
2323 	return true;
2324 }
2325 #endif
2326 
2327 void core_mmu_init_virtualization(void)
2328 {
2329 	virt_init_memory(static_memory_map);
2330 }
2331 
2332 vaddr_t io_pa_or_va(struct io_pa_va *p, size_t len)
2333 {
2334 	assert(p->pa);
2335 	if (cpu_mmu_enabled()) {
2336 		if (!p->va)
2337 			p->va = (vaddr_t)phys_to_virt_io(p->pa, len);
2338 		assert(p->va);
2339 		return p->va;
2340 	}
2341 	return p->pa;
2342 }
2343 
2344 vaddr_t io_pa_or_va_secure(struct io_pa_va *p, size_t len)
2345 {
2346 	assert(p->pa);
2347 	if (cpu_mmu_enabled()) {
2348 		if (!p->va)
2349 			p->va = (vaddr_t)phys_to_virt(p->pa, MEM_AREA_IO_SEC,
2350 						      len);
2351 		assert(p->va);
2352 		return p->va;
2353 	}
2354 	return p->pa;
2355 }
2356 
2357 vaddr_t io_pa_or_va_nsec(struct io_pa_va *p, size_t len)
2358 {
2359 	assert(p->pa);
2360 	if (cpu_mmu_enabled()) {
2361 		if (!p->va)
2362 			p->va = (vaddr_t)phys_to_virt(p->pa, MEM_AREA_IO_NSEC,
2363 						      len);
2364 		assert(p->va);
2365 		return p->va;
2366 	}
2367 	return p->pa;
2368 }
2369 
2370 #ifdef CFG_CORE_RESERVED_SHM
2371 static TEE_Result teecore_init_pub_ram(void)
2372 {
2373 	vaddr_t s = 0;
2374 	vaddr_t e = 0;
2375 
2376 	/* get virtual addr/size of NSec shared mem allocated from teecore */
2377 	core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &s, &e);
2378 
2379 	if (s >= e || s & SMALL_PAGE_MASK || e & SMALL_PAGE_MASK)
2380 		panic("invalid PUB RAM");
2381 
2382 	/* extra check: we could rely on core_mmu_get_mem_by_type() */
2383 	if (!tee_vbuf_is_non_sec(s, e - s))
2384 		panic("PUB RAM is not non-secure");
2385 
2386 #ifdef CFG_PL310
2387 	/* Allocate statically the l2cc mutex */
2388 	tee_l2cc_store_mutex_boot_pa(virt_to_phys((void *)s));
2389 	s += sizeof(uint32_t);			/* size of a pl310 mutex */
2390 	s = ROUNDUP(s, SMALL_PAGE_SIZE);	/* keep required alignment */
2391 #endif
2392 
2393 	default_nsec_shm_paddr = virt_to_phys((void *)s);
2394 	default_nsec_shm_size = e - s;
2395 
2396 	return TEE_SUCCESS;
2397 }
2398 early_init(teecore_init_pub_ram);
2399 #endif /*CFG_CORE_RESERVED_SHM*/
2400 
2401 void core_mmu_init_ta_ram(void)
2402 {
2403 	vaddr_t s = 0;
2404 	vaddr_t e = 0;
2405 	paddr_t ps = 0;
2406 	size_t size = 0;
2407 
2408 	/*
2409 	 * Get virtual addr/size of RAM where TA are loaded/executedNSec
2410 	 * shared mem allocated from teecore.
2411 	 */
2412 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
2413 		virt_get_ta_ram(&s, &e);
2414 	else
2415 		core_mmu_get_mem_by_type(MEM_AREA_TA_RAM, &s, &e);
2416 
2417 	ps = virt_to_phys((void *)s);
2418 	size = e - s;
2419 
2420 	if (!ps || (ps & CORE_MMU_USER_CODE_MASK) ||
2421 	    !size || (size & CORE_MMU_USER_CODE_MASK))
2422 		panic("invalid TA RAM");
2423 
2424 	/* extra check: we could rely on core_mmu_get_mem_by_type() */
2425 	if (!tee_pbuf_is_sec(ps, size))
2426 		panic("TA RAM is not secure");
2427 
2428 	if (!tee_mm_is_empty(&tee_mm_sec_ddr))
2429 		panic("TA RAM pool is not empty");
2430 
2431 	/* remove previous config and init TA ddr memory pool */
2432 	tee_mm_final(&tee_mm_sec_ddr);
2433 	tee_mm_init(&tee_mm_sec_ddr, ps, size, CORE_MMU_USER_CODE_SHIFT,
2434 		    TEE_MM_POOL_NO_FLAGS);
2435 }
2436