xref: /optee_os/core/mm/core_mmu.c (revision e231582fca25178ed521995577f537580ed47a41)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016, 2022 Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <assert.h>
8 #include <config.h>
9 #include <kernel/boot.h>
10 #include <kernel/linker.h>
11 #include <kernel/panic.h>
12 #include <kernel/spinlock.h>
13 #include <kernel/tee_l2cc_mutex.h>
14 #include <kernel/tee_misc.h>
15 #include <kernel/tlb_helpers.h>
16 #include <kernel/user_mode_ctx.h>
17 #include <kernel/virtualization.h>
18 #include <mm/core_memprot.h>
19 #include <mm/core_mmu.h>
20 #include <mm/mobj.h>
21 #include <mm/pgt_cache.h>
22 #include <mm/tee_pager.h>
23 #include <mm/vm.h>
24 #include <platform_config.h>
25 #include <string.h>
26 #include <trace.h>
27 #include <util.h>
28 
29 #ifndef DEBUG_XLAT_TABLE
30 #define DEBUG_XLAT_TABLE 0
31 #endif
32 
33 #define SHM_VASPACE_SIZE	(1024 * 1024 * 32)
34 
35 /*
36  * These variables are initialized before .bss is cleared. To avoid
37  * resetting them when .bss is cleared we're storing them in .data instead,
38  * even if they initially are zero.
39  */
40 
41 #ifdef CFG_CORE_RESERVED_SHM
42 /* Default NSec shared memory allocated from NSec world */
43 unsigned long default_nsec_shm_size __nex_bss;
44 unsigned long default_nsec_shm_paddr __nex_bss;
45 #endif
46 
47 static struct tee_mmap_region static_memory_map[CFG_MMAP_REGIONS
48 #ifdef CFG_CORE_ASLR
49 						+ 1
50 #endif
51 						+ 1] __nex_bss;
52 
53 /* Define the platform's memory layout. */
54 struct memaccess_area {
55 	paddr_t paddr;
56 	size_t size;
57 };
58 
59 #define MEMACCESS_AREA(a, s) { .paddr = a, .size = s }
60 
61 static struct memaccess_area secure_only[] __nex_data = {
62 #ifdef TRUSTED_SRAM_BASE
63 	MEMACCESS_AREA(TRUSTED_SRAM_BASE, TRUSTED_SRAM_SIZE),
64 #endif
65 	MEMACCESS_AREA(TRUSTED_DRAM_BASE, TRUSTED_DRAM_SIZE),
66 };
67 
68 static struct memaccess_area nsec_shared[] __nex_data = {
69 #ifdef CFG_CORE_RESERVED_SHM
70 	MEMACCESS_AREA(TEE_SHMEM_START, TEE_SHMEM_SIZE),
71 #endif
72 };
73 
74 #if defined(CFG_SECURE_DATA_PATH)
75 #ifdef CFG_TEE_SDP_MEM_BASE
76 register_sdp_mem(CFG_TEE_SDP_MEM_BASE, CFG_TEE_SDP_MEM_SIZE);
77 #endif
78 #ifdef TEE_SDP_TEST_MEM_BASE
79 register_sdp_mem(TEE_SDP_TEST_MEM_BASE, TEE_SDP_TEST_MEM_SIZE);
80 #endif
81 #endif
82 
83 #ifdef CFG_CORE_RWDATA_NOEXEC
84 register_phys_mem_ul(MEM_AREA_TEE_RAM_RO, TEE_RAM_START,
85 		     VCORE_UNPG_RX_PA - TEE_RAM_START);
86 register_phys_mem_ul(MEM_AREA_TEE_RAM_RX, VCORE_UNPG_RX_PA,
87 		     VCORE_UNPG_RX_SZ_UNSAFE);
88 register_phys_mem_ul(MEM_AREA_TEE_RAM_RO, VCORE_UNPG_RO_PA,
89 		     VCORE_UNPG_RO_SZ_UNSAFE);
90 
91 #ifdef CFG_VIRTUALIZATION
92 register_phys_mem_ul(MEM_AREA_NEX_RAM_RO, VCORE_UNPG_RW_PA,
93 		     VCORE_UNPG_RW_SZ_UNSAFE);
94 register_phys_mem_ul(MEM_AREA_NEX_RAM_RW, VCORE_NEX_RW_PA,
95 		     VCORE_NEX_RW_SZ_UNSAFE);
96 #else
97 register_phys_mem_ul(MEM_AREA_TEE_RAM_RW, VCORE_UNPG_RW_PA,
98 		     VCORE_UNPG_RW_SZ_UNSAFE);
99 #endif
100 
101 #ifdef CFG_WITH_PAGER
102 register_phys_mem_ul(MEM_AREA_INIT_RAM_RX, VCORE_INIT_RX_PA,
103 		     VCORE_INIT_RX_SZ_UNSAFE);
104 register_phys_mem_ul(MEM_AREA_INIT_RAM_RO, VCORE_INIT_RO_PA,
105 		     VCORE_INIT_RO_SZ_UNSAFE);
106 #endif /*CFG_WITH_PAGER*/
107 #else /*!CFG_CORE_RWDATA_NOEXEC*/
108 register_phys_mem(MEM_AREA_TEE_RAM, TEE_RAM_START, TEE_RAM_PH_SIZE);
109 #endif /*!CFG_CORE_RWDATA_NOEXEC*/
110 
111 #ifdef CFG_VIRTUALIZATION
112 register_phys_mem(MEM_AREA_SEC_RAM_OVERALL, TRUSTED_DRAM_BASE,
113 		  TRUSTED_DRAM_SIZE);
114 #endif
115 
116 #if defined(CFG_CORE_SANITIZE_KADDRESS) && defined(CFG_WITH_PAGER)
117 /* Asan ram is part of MEM_AREA_TEE_RAM_RW when pager is disabled */
118 register_phys_mem_ul(MEM_AREA_TEE_ASAN, ASAN_MAP_PA, ASAN_MAP_SZ);
119 #endif
120 
121 #ifndef CFG_VIRTUALIZATION
122 /* Every guest will have own TA RAM if virtualization support is enabled */
123 register_phys_mem(MEM_AREA_TA_RAM, TA_RAM_START, TA_RAM_SIZE);
124 #endif
125 #ifdef CFG_CORE_RESERVED_SHM
126 register_phys_mem(MEM_AREA_NSEC_SHM, TEE_SHMEM_START, TEE_SHMEM_SIZE);
127 #endif
128 
129 static unsigned int mmu_spinlock;
130 
131 static uint32_t mmu_lock(void)
132 {
133 	return cpu_spin_lock_xsave(&mmu_spinlock);
134 }
135 
136 static void mmu_unlock(uint32_t exceptions)
137 {
138 	cpu_spin_unlock_xrestore(&mmu_spinlock, exceptions);
139 }
140 
141 static struct tee_mmap_region *get_memory_map(void)
142 {
143 	if (IS_ENABLED(CFG_VIRTUALIZATION)) {
144 		struct tee_mmap_region *map = virt_get_memory_map();
145 
146 		if (map)
147 			return map;
148 	}
149 
150 	return static_memory_map;
151 }
152 
153 static bool _pbuf_intersects(struct memaccess_area *a, size_t alen,
154 			     paddr_t pa, size_t size)
155 {
156 	size_t n;
157 
158 	for (n = 0; n < alen; n++)
159 		if (core_is_buffer_intersect(pa, size, a[n].paddr, a[n].size))
160 			return true;
161 	return false;
162 }
163 
164 #define pbuf_intersects(a, pa, size) \
165 	_pbuf_intersects((a), ARRAY_SIZE(a), (pa), (size))
166 
167 static bool _pbuf_is_inside(struct memaccess_area *a, size_t alen,
168 			    paddr_t pa, size_t size)
169 {
170 	size_t n;
171 
172 	for (n = 0; n < alen; n++)
173 		if (core_is_buffer_inside(pa, size, a[n].paddr, a[n].size))
174 			return true;
175 	return false;
176 }
177 
178 #define pbuf_is_inside(a, pa, size) \
179 	_pbuf_is_inside((a), ARRAY_SIZE(a), (pa), (size))
180 
181 static bool pa_is_in_map(struct tee_mmap_region *map, paddr_t pa, size_t len)
182 {
183 	paddr_t end_pa = 0;
184 
185 	if (!map)
186 		return false;
187 
188 	if (SUB_OVERFLOW(len, 1, &end_pa) || ADD_OVERFLOW(pa, end_pa, &end_pa))
189 		return false;
190 
191 	return (pa >= map->pa && end_pa <= map->pa + map->size - 1);
192 }
193 
194 static bool va_is_in_map(struct tee_mmap_region *map, vaddr_t va)
195 {
196 	if (!map)
197 		return false;
198 	return (va >= map->va && va <= (map->va + map->size - 1));
199 }
200 
201 /* check if target buffer fits in a core default map area */
202 static bool pbuf_inside_map_area(unsigned long p, size_t l,
203 				 struct tee_mmap_region *map)
204 {
205 	return core_is_buffer_inside(p, l, map->pa, map->size);
206 }
207 
208 static struct tee_mmap_region *find_map_by_type(enum teecore_memtypes type)
209 {
210 	struct tee_mmap_region *map;
211 
212 	for (map = get_memory_map(); !core_mmap_is_end_of_table(map); map++)
213 		if (map->type == type)
214 			return map;
215 	return NULL;
216 }
217 
218 static struct tee_mmap_region *
219 find_map_by_type_and_pa(enum teecore_memtypes type, paddr_t pa, size_t len)
220 {
221 	struct tee_mmap_region *map;
222 
223 	for (map = get_memory_map(); !core_mmap_is_end_of_table(map); map++) {
224 		if (map->type != type)
225 			continue;
226 		if (pa_is_in_map(map, pa, len))
227 			return map;
228 	}
229 	return NULL;
230 }
231 
232 static struct tee_mmap_region *find_map_by_va(void *va)
233 {
234 	struct tee_mmap_region *map = get_memory_map();
235 	unsigned long a = (unsigned long)va;
236 
237 	while (!core_mmap_is_end_of_table(map)) {
238 		if (a >= map->va && a <= (map->va - 1 + map->size))
239 			return map;
240 		map++;
241 	}
242 	return NULL;
243 }
244 
245 static struct tee_mmap_region *find_map_by_pa(unsigned long pa)
246 {
247 	struct tee_mmap_region *map = get_memory_map();
248 
249 	while (!core_mmap_is_end_of_table(map)) {
250 		if (pa >= map->pa && pa <= (map->pa + map->size - 1))
251 			return map;
252 		map++;
253 	}
254 	return NULL;
255 }
256 
257 #if defined(CFG_CORE_DYN_SHM) || defined(CFG_SECURE_DATA_PATH)
258 static bool pbuf_is_special_mem(paddr_t pbuf, size_t len,
259 				const struct core_mmu_phys_mem *start,
260 				const struct core_mmu_phys_mem *end)
261 {
262 	const struct core_mmu_phys_mem *mem;
263 
264 	for (mem = start; mem < end; mem++) {
265 		if (core_is_buffer_inside(pbuf, len, mem->addr, mem->size))
266 			return true;
267 	}
268 
269 	return false;
270 }
271 #endif
272 
273 #ifdef CFG_CORE_DYN_SHM
274 static void carve_out_phys_mem(struct core_mmu_phys_mem **mem, size_t *nelems,
275 			       paddr_t pa, size_t size)
276 {
277 	struct core_mmu_phys_mem *m = *mem;
278 	size_t n = 0;
279 
280 	while (true) {
281 		if (n >= *nelems) {
282 			DMSG("No need to carve out %#" PRIxPA " size %#zx",
283 			     pa, size);
284 			return;
285 		}
286 		if (core_is_buffer_inside(pa, size, m[n].addr, m[n].size))
287 			break;
288 		if (!core_is_buffer_outside(pa, size, m[n].addr, m[n].size))
289 			panic();
290 		n++;
291 	}
292 
293 	if (pa == m[n].addr && size == m[n].size) {
294 		/* Remove this entry */
295 		(*nelems)--;
296 		memmove(m + n, m + n + 1, sizeof(*m) * (*nelems - n));
297 		m = nex_realloc(m, sizeof(*m) * *nelems);
298 		if (!m)
299 			panic();
300 		*mem = m;
301 	} else if (pa == m[n].addr) {
302 		m[n].addr += size;
303 		m[n].size -= size;
304 	} else if ((pa + size) == (m[n].addr + m[n].size)) {
305 		m[n].size -= size;
306 	} else {
307 		/* Need to split the memory entry */
308 		m = nex_realloc(m, sizeof(*m) * (*nelems + 1));
309 		if (!m)
310 			panic();
311 		*mem = m;
312 		memmove(m + n + 1, m + n, sizeof(*m) * (*nelems - n));
313 		(*nelems)++;
314 		m[n].size = pa - m[n].addr;
315 		m[n + 1].size -= size + m[n].size;
316 		m[n + 1].addr = pa + size;
317 	}
318 }
319 
320 static void check_phys_mem_is_outside(struct core_mmu_phys_mem *start,
321 				      size_t nelems,
322 				      struct tee_mmap_region *map)
323 {
324 	size_t n;
325 
326 	for (n = 0; n < nelems; n++) {
327 		if (!core_is_buffer_outside(start[n].addr, start[n].size,
328 					    map->pa, map->size)) {
329 			EMSG("Non-sec mem (%#" PRIxPA ":%#" PRIxPASZ
330 			     ") overlaps map (type %d %#" PRIxPA ":%#zx)",
331 			     start[n].addr, start[n].size,
332 			     map->type, map->pa, map->size);
333 			panic();
334 		}
335 	}
336 }
337 
338 static const struct core_mmu_phys_mem *discovered_nsec_ddr_start __nex_bss;
339 static size_t discovered_nsec_ddr_nelems __nex_bss;
340 
341 static int cmp_pmem_by_addr(const void *a, const void *b)
342 {
343 	const struct core_mmu_phys_mem *pmem_a = a;
344 	const struct core_mmu_phys_mem *pmem_b = b;
345 
346 	return CMP_TRILEAN(pmem_a->addr, pmem_b->addr);
347 }
348 
349 void core_mmu_set_discovered_nsec_ddr(struct core_mmu_phys_mem *start,
350 				      size_t nelems)
351 {
352 	struct core_mmu_phys_mem *m = start;
353 	size_t num_elems = nelems;
354 	struct tee_mmap_region *map = static_memory_map;
355 	const struct core_mmu_phys_mem __maybe_unused *pmem;
356 
357 	assert(!discovered_nsec_ddr_start);
358 	assert(m && num_elems);
359 
360 	qsort(m, num_elems, sizeof(*m), cmp_pmem_by_addr);
361 
362 	/*
363 	 * Non-secure shared memory and also secure data
364 	 * path memory are supposed to reside inside
365 	 * non-secure memory. Since NSEC_SHM and SDP_MEM
366 	 * are used for a specific purpose make holes for
367 	 * those memory in the normal non-secure memory.
368 	 *
369 	 * This has to be done since for instance QEMU
370 	 * isn't aware of which memory range in the
371 	 * non-secure memory is used for NSEC_SHM.
372 	 */
373 
374 #ifdef CFG_SECURE_DATA_PATH
375 	for (pmem = phys_sdp_mem_begin; pmem < phys_sdp_mem_end; pmem++)
376 		carve_out_phys_mem(&m, &num_elems, pmem->addr, pmem->size);
377 #endif
378 
379 	carve_out_phys_mem(&m, &num_elems, TEE_RAM_START, TEE_RAM_PH_SIZE);
380 	carve_out_phys_mem(&m, &num_elems, TA_RAM_START, TA_RAM_SIZE);
381 
382 	for (map = static_memory_map; !core_mmap_is_end_of_table(map); map++) {
383 		switch (map->type) {
384 		case MEM_AREA_NSEC_SHM:
385 			carve_out_phys_mem(&m, &num_elems, map->pa, map->size);
386 			break;
387 		case MEM_AREA_EXT_DT:
388 		case MEM_AREA_RES_VASPACE:
389 		case MEM_AREA_SHM_VASPACE:
390 		case MEM_AREA_TS_VASPACE:
391 		case MEM_AREA_PAGER_VASPACE:
392 			break;
393 		default:
394 			check_phys_mem_is_outside(m, num_elems, map);
395 		}
396 	}
397 
398 	discovered_nsec_ddr_start = m;
399 	discovered_nsec_ddr_nelems = num_elems;
400 
401 	if (!core_mmu_check_end_pa(m[num_elems - 1].addr,
402 				   m[num_elems - 1].size))
403 		panic();
404 }
405 
406 static bool get_discovered_nsec_ddr(const struct core_mmu_phys_mem **start,
407 				    const struct core_mmu_phys_mem **end)
408 {
409 	if (!discovered_nsec_ddr_start)
410 		return false;
411 
412 	*start = discovered_nsec_ddr_start;
413 	*end = discovered_nsec_ddr_start + discovered_nsec_ddr_nelems;
414 
415 	return true;
416 }
417 
418 static bool pbuf_is_nsec_ddr(paddr_t pbuf, size_t len)
419 {
420 	const struct core_mmu_phys_mem *start;
421 	const struct core_mmu_phys_mem *end;
422 
423 	if (!get_discovered_nsec_ddr(&start, &end))
424 		return false;
425 
426 	return pbuf_is_special_mem(pbuf, len, start, end);
427 }
428 
429 bool core_mmu_nsec_ddr_is_defined(void)
430 {
431 	const struct core_mmu_phys_mem *start;
432 	const struct core_mmu_phys_mem *end;
433 
434 	if (!get_discovered_nsec_ddr(&start, &end))
435 		return false;
436 
437 	return start != end;
438 }
439 #else
440 static bool pbuf_is_nsec_ddr(paddr_t pbuf __unused, size_t len __unused)
441 {
442 	return false;
443 }
444 #endif /*CFG_CORE_DYN_SHM*/
445 
446 #define MSG_MEM_INSTERSECT(pa1, sz1, pa2, sz2) \
447 	EMSG("[%" PRIxPA " %" PRIx64 "] intersects [%" PRIxPA " %" PRIx64 "]", \
448 			pa1, (uint64_t)pa1 + (sz1), pa2, (uint64_t)pa2 + (sz2))
449 
450 #ifdef CFG_SECURE_DATA_PATH
451 static bool pbuf_is_sdp_mem(paddr_t pbuf, size_t len)
452 {
453 	return pbuf_is_special_mem(pbuf, len, phys_sdp_mem_begin,
454 				   phys_sdp_mem_end);
455 }
456 
457 struct mobj **core_sdp_mem_create_mobjs(void)
458 {
459 	const struct core_mmu_phys_mem *mem;
460 	struct mobj **mobj_base;
461 	struct mobj **mobj;
462 	int cnt = phys_sdp_mem_end - phys_sdp_mem_begin;
463 
464 	/* SDP mobjs table must end with a NULL entry */
465 	mobj_base = calloc(cnt + 1, sizeof(struct mobj *));
466 	if (!mobj_base)
467 		panic("Out of memory");
468 
469 	for (mem = phys_sdp_mem_begin, mobj = mobj_base;
470 	     mem < phys_sdp_mem_end; mem++, mobj++) {
471 		*mobj = mobj_phys_alloc(mem->addr, mem->size,
472 					TEE_MATTR_MEM_TYPE_CACHED,
473 					CORE_MEM_SDP_MEM);
474 		if (!*mobj)
475 			panic("can't create SDP physical memory object");
476 	}
477 	return mobj_base;
478 }
479 
480 #else /* CFG_SECURE_DATA_PATH */
481 static bool pbuf_is_sdp_mem(paddr_t pbuf __unused, size_t len __unused)
482 {
483 	return false;
484 }
485 
486 #endif /* CFG_SECURE_DATA_PATH */
487 
488 /* Check special memories comply with registered memories */
489 static void verify_special_mem_areas(struct tee_mmap_region *mem_map,
490 				     size_t len,
491 				     const struct core_mmu_phys_mem *start,
492 				     const struct core_mmu_phys_mem *end,
493 				     const char *area_name __maybe_unused)
494 {
495 	const struct core_mmu_phys_mem *mem;
496 	const struct core_mmu_phys_mem *mem2;
497 	struct tee_mmap_region *mmap;
498 	size_t n;
499 
500 	if (start == end) {
501 		DMSG("No %s memory area defined", area_name);
502 		return;
503 	}
504 
505 	for (mem = start; mem < end; mem++)
506 		DMSG("%s memory [%" PRIxPA " %" PRIx64 "]",
507 		     area_name, mem->addr, (uint64_t)mem->addr + mem->size);
508 
509 	/* Check memories do not intersect each other */
510 	for (mem = start; mem + 1 < end; mem++) {
511 		for (mem2 = mem + 1; mem2 < end; mem2++) {
512 			if (core_is_buffer_intersect(mem2->addr, mem2->size,
513 						     mem->addr, mem->size)) {
514 				MSG_MEM_INSTERSECT(mem2->addr, mem2->size,
515 						   mem->addr, mem->size);
516 				panic("Special memory intersection");
517 			}
518 		}
519 	}
520 
521 	/*
522 	 * Check memories do not intersect any mapped memory.
523 	 * This is called before reserved VA space is loaded in mem_map.
524 	 */
525 	for (mem = start; mem < end; mem++) {
526 		for (mmap = mem_map, n = 0; n < len; mmap++, n++) {
527 			if (core_is_buffer_intersect(mem->addr, mem->size,
528 						     mmap->pa, mmap->size)) {
529 				MSG_MEM_INSTERSECT(mem->addr, mem->size,
530 						   mmap->pa, mmap->size);
531 				panic("Special memory intersection");
532 			}
533 		}
534 	}
535 }
536 
537 static void add_phys_mem(struct tee_mmap_region *memory_map, size_t num_elems,
538 			 const struct core_mmu_phys_mem *mem, size_t *last)
539 {
540 	size_t n = 0;
541 	paddr_t pa;
542 	paddr_size_t size;
543 
544 	/*
545 	 * If some ranges of memory of the same type do overlap
546 	 * each others they are coalesced into one entry. To help this
547 	 * added entries are sorted by increasing physical.
548 	 *
549 	 * Note that it's valid to have the same physical memory as several
550 	 * different memory types, for instance the same device memory
551 	 * mapped as both secure and non-secure. This will probably not
552 	 * happen often in practice.
553 	 */
554 	DMSG("%s type %s 0x%08" PRIxPA " size 0x%08" PRIxPASZ,
555 	     mem->name, teecore_memtype_name(mem->type), mem->addr, mem->size);
556 	while (true) {
557 		if (n >= (num_elems - 1)) {
558 			EMSG("Out of entries (%zu) in memory_map", num_elems);
559 			panic();
560 		}
561 		if (n == *last)
562 			break;
563 		pa = memory_map[n].pa;
564 		size = memory_map[n].size;
565 		if (mem->type == memory_map[n].type &&
566 		    ((pa <= (mem->addr + (mem->size - 1))) &&
567 		    (mem->addr <= (pa + (size - 1))))) {
568 			DMSG("Physical mem map overlaps 0x%" PRIxPA, mem->addr);
569 			memory_map[n].pa = MIN(pa, mem->addr);
570 			memory_map[n].size = MAX(size, mem->size) +
571 					     (pa - memory_map[n].pa);
572 			return;
573 		}
574 		if (mem->type < memory_map[n].type ||
575 		    (mem->type == memory_map[n].type && mem->addr < pa))
576 			break; /* found the spot where to insert this memory */
577 		n++;
578 	}
579 
580 	memmove(memory_map + n + 1, memory_map + n,
581 		sizeof(struct tee_mmap_region) * (*last - n));
582 	(*last)++;
583 	memset(memory_map + n, 0, sizeof(memory_map[0]));
584 	memory_map[n].type = mem->type;
585 	memory_map[n].pa = mem->addr;
586 	memory_map[n].size = mem->size;
587 }
588 
589 static void add_va_space(struct tee_mmap_region *memory_map, size_t num_elems,
590 			 enum teecore_memtypes type, size_t size, size_t *last)
591 {
592 	size_t n = 0;
593 
594 	DMSG("type %s size 0x%08zx", teecore_memtype_name(type), size);
595 	while (true) {
596 		if (n >= (num_elems - 1)) {
597 			EMSG("Out of entries (%zu) in memory_map", num_elems);
598 			panic();
599 		}
600 		if (n == *last)
601 			break;
602 		if (type < memory_map[n].type)
603 			break;
604 		n++;
605 	}
606 
607 	memmove(memory_map + n + 1, memory_map + n,
608 		sizeof(struct tee_mmap_region) * (*last - n));
609 	(*last)++;
610 	memset(memory_map + n, 0, sizeof(memory_map[0]));
611 	memory_map[n].type = type;
612 	memory_map[n].size = size;
613 }
614 
615 uint32_t core_mmu_type_to_attr(enum teecore_memtypes t)
616 {
617 	const uint32_t attr = TEE_MATTR_VALID_BLOCK;
618 	const uint32_t cached = TEE_MATTR_MEM_TYPE_CACHED <<
619 				TEE_MATTR_MEM_TYPE_SHIFT;
620 	const uint32_t noncache = TEE_MATTR_MEM_TYPE_DEV <<
621 				  TEE_MATTR_MEM_TYPE_SHIFT;
622 
623 	switch (t) {
624 	case MEM_AREA_TEE_RAM:
625 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRWX | cached;
626 	case MEM_AREA_TEE_RAM_RX:
627 	case MEM_AREA_INIT_RAM_RX:
628 	case MEM_AREA_IDENTITY_MAP_RX:
629 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRX | cached;
630 	case MEM_AREA_TEE_RAM_RO:
631 	case MEM_AREA_INIT_RAM_RO:
632 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PR | cached;
633 	case MEM_AREA_TEE_RAM_RW:
634 	case MEM_AREA_NEX_RAM_RO: /* This has to be r/w during init runtime */
635 	case MEM_AREA_NEX_RAM_RW:
636 	case MEM_AREA_TEE_ASAN:
637 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | cached;
638 	case MEM_AREA_TEE_COHERENT:
639 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRWX | noncache;
640 	case MEM_AREA_TA_RAM:
641 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | cached;
642 	case MEM_AREA_NSEC_SHM:
643 		return attr | TEE_MATTR_PRW | cached;
644 	case MEM_AREA_EXT_DT:
645 	case MEM_AREA_IO_NSEC:
646 		return attr | TEE_MATTR_PRW | noncache;
647 	case MEM_AREA_IO_SEC:
648 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | noncache;
649 	case MEM_AREA_RAM_NSEC:
650 		return attr | TEE_MATTR_PRW | cached;
651 	case MEM_AREA_RAM_SEC:
652 	case MEM_AREA_SEC_RAM_OVERALL:
653 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | cached;
654 	case MEM_AREA_RES_VASPACE:
655 	case MEM_AREA_SHM_VASPACE:
656 		return 0;
657 	case MEM_AREA_PAGER_VASPACE:
658 		return TEE_MATTR_SECURE;
659 	default:
660 		panic("invalid type");
661 	}
662 }
663 
664 static bool __maybe_unused map_is_tee_ram(const struct tee_mmap_region *mm)
665 {
666 	switch (mm->type) {
667 	case MEM_AREA_TEE_RAM:
668 	case MEM_AREA_TEE_RAM_RX:
669 	case MEM_AREA_TEE_RAM_RO:
670 	case MEM_AREA_TEE_RAM_RW:
671 	case MEM_AREA_INIT_RAM_RX:
672 	case MEM_AREA_INIT_RAM_RO:
673 	case MEM_AREA_NEX_RAM_RW:
674 	case MEM_AREA_NEX_RAM_RO:
675 	case MEM_AREA_TEE_ASAN:
676 		return true;
677 	default:
678 		return false;
679 	}
680 }
681 
682 static bool __maybe_unused map_is_secure(const struct tee_mmap_region *mm)
683 {
684 	return !!(core_mmu_type_to_attr(mm->type) & TEE_MATTR_SECURE);
685 }
686 
687 static bool __maybe_unused map_is_pgdir(const struct tee_mmap_region *mm)
688 {
689 	return mm->region_size == CORE_MMU_PGDIR_SIZE;
690 }
691 
692 static int cmp_mmap_by_lower_va(const void *a, const void *b)
693 {
694 	const struct tee_mmap_region *mm_a = a;
695 	const struct tee_mmap_region *mm_b = b;
696 
697 	return CMP_TRILEAN(mm_a->va, mm_b->va);
698 }
699 
700 static void dump_mmap_table(struct tee_mmap_region *memory_map)
701 {
702 	struct tee_mmap_region *map;
703 
704 	for (map = memory_map; !core_mmap_is_end_of_table(map); map++) {
705 		vaddr_t __maybe_unused vstart;
706 
707 		vstart = map->va + ((vaddr_t)map->pa & (map->region_size - 1));
708 		DMSG("type %-12s va 0x%08" PRIxVA "..0x%08" PRIxVA
709 		     " pa 0x%08" PRIxPA "..0x%08" PRIxPA " size 0x%08zx (%s)",
710 		     teecore_memtype_name(map->type), vstart,
711 		     vstart + map->size - 1, map->pa,
712 		     (paddr_t)(map->pa + map->size - 1), map->size,
713 		     map->region_size == SMALL_PAGE_SIZE ? "smallpg" : "pgdir");
714 	}
715 }
716 
717 #if DEBUG_XLAT_TABLE
718 
719 static void dump_xlat_table(vaddr_t va, unsigned int level)
720 {
721 	struct core_mmu_table_info tbl_info;
722 	unsigned int idx = 0;
723 	paddr_t pa;
724 	uint32_t attr;
725 
726 	core_mmu_find_table(NULL, va, level, &tbl_info);
727 	va = tbl_info.va_base;
728 	for (idx = 0; idx < tbl_info.num_entries; idx++) {
729 		core_mmu_get_entry(&tbl_info, idx, &pa, &attr);
730 		if (attr || level > CORE_MMU_BASE_TABLE_LEVEL) {
731 			const char *security_bit = "";
732 
733 			if (core_mmu_entry_have_security_bit(attr)) {
734 				if (attr & TEE_MATTR_SECURE)
735 					security_bit = "S";
736 				else
737 					security_bit = "NS";
738 			}
739 
740 			if (attr & TEE_MATTR_TABLE) {
741 				DMSG_RAW("%*s [LVL%d] VA:0x%010" PRIxVA
742 					" TBL:0x%010" PRIxPA " %s",
743 					level * 2, "", level, va, pa,
744 					security_bit);
745 				dump_xlat_table(va, level + 1);
746 			} else if (attr) {
747 				DMSG_RAW("%*s [LVL%d] VA:0x%010" PRIxVA
748 					" PA:0x%010" PRIxPA " %s-%s-%s-%s",
749 					level * 2, "", level, va, pa,
750 					mattr_is_cached(attr) ? "MEM" :
751 					"DEV",
752 					attr & TEE_MATTR_PW ? "RW" : "RO",
753 					attr & TEE_MATTR_PX ? "X " : "XN",
754 					security_bit);
755 			} else {
756 				DMSG_RAW("%*s [LVL%d] VA:0x%010" PRIxVA
757 					    " INVALID\n",
758 					    level * 2, "", level, va);
759 			}
760 		}
761 		va += BIT64(tbl_info.shift);
762 	}
763 }
764 
765 #else
766 
767 static void dump_xlat_table(vaddr_t va __unused, int level __unused)
768 {
769 }
770 
771 #endif
772 
773 /*
774  * Reserves virtual memory space for pager usage.
775  *
776  * From the start of the first memory used by the link script +
777  * TEE_RAM_VA_SIZE should be covered, either with a direct mapping or empty
778  * mapping for pager usage. This adds translation tables as needed for the
779  * pager to operate.
780  */
781 static void add_pager_vaspace(struct tee_mmap_region *mmap, size_t num_elems,
782 			      size_t *last)
783 {
784 	paddr_t begin = 0;
785 	paddr_t end = 0;
786 	size_t size = 0;
787 	size_t pos = 0;
788 	size_t n = 0;
789 
790 	if (*last >= (num_elems - 1)) {
791 		EMSG("Out of entries (%zu) in memory map", num_elems);
792 		panic();
793 	}
794 
795 	for (n = 0; !core_mmap_is_end_of_table(mmap + n); n++) {
796 		if (map_is_tee_ram(mmap + n)) {
797 			if (!begin)
798 				begin = mmap[n].pa;
799 			pos = n + 1;
800 		}
801 	}
802 
803 	end = mmap[pos - 1].pa + mmap[pos - 1].size;
804 	size = TEE_RAM_VA_SIZE - (end - begin);
805 	if (!size)
806 		return;
807 
808 	assert(pos <= *last);
809 	memmove(mmap + pos + 1, mmap + pos,
810 		sizeof(struct tee_mmap_region) * (*last - pos));
811 	(*last)++;
812 	memset(mmap + pos, 0, sizeof(mmap[0]));
813 	mmap[pos].type = MEM_AREA_PAGER_VASPACE;
814 	mmap[pos].va = 0;
815 	mmap[pos].size = size;
816 	mmap[pos].region_size = SMALL_PAGE_SIZE;
817 	mmap[pos].attr = core_mmu_type_to_attr(MEM_AREA_PAGER_VASPACE);
818 }
819 
820 static void check_sec_nsec_mem_config(void)
821 {
822 	size_t n = 0;
823 
824 	for (n = 0; n < ARRAY_SIZE(secure_only); n++) {
825 		if (pbuf_intersects(nsec_shared, secure_only[n].paddr,
826 				    secure_only[n].size))
827 			panic("Invalid memory access config: sec/nsec");
828 	}
829 }
830 
831 static size_t collect_mem_ranges(struct tee_mmap_region *memory_map,
832 				 size_t num_elems)
833 {
834 	const struct core_mmu_phys_mem *mem = NULL;
835 	size_t last = 0;
836 
837 	for (mem = phys_mem_map_begin; mem < phys_mem_map_end; mem++) {
838 		struct core_mmu_phys_mem m = *mem;
839 
840 		/* Discard null size entries */
841 		if (!m.size)
842 			continue;
843 
844 		/* Only unmapped virtual range may have a null phys addr */
845 		assert(m.addr || !core_mmu_type_to_attr(m.type));
846 
847 		add_phys_mem(memory_map, num_elems, &m, &last);
848 	}
849 
850 	if (IS_ENABLED(CFG_SECURE_DATA_PATH))
851 		verify_special_mem_areas(memory_map, num_elems,
852 					 phys_sdp_mem_begin,
853 					 phys_sdp_mem_end, "SDP");
854 
855 	add_va_space(memory_map, num_elems, MEM_AREA_RES_VASPACE,
856 		     CFG_RESERVED_VASPACE_SIZE, &last);
857 
858 	add_va_space(memory_map, num_elems, MEM_AREA_SHM_VASPACE,
859 		     SHM_VASPACE_SIZE, &last);
860 
861 	memory_map[last].type = MEM_AREA_END;
862 
863 	return last;
864 }
865 
866 static void assign_mem_granularity(struct tee_mmap_region *memory_map)
867 {
868 	struct tee_mmap_region *map = NULL;
869 
870 	/*
871 	 * Assign region sizes, note that MEM_AREA_TEE_RAM always uses
872 	 * SMALL_PAGE_SIZE.
873 	 */
874 	for (map = memory_map; !core_mmap_is_end_of_table(map); map++) {
875 		paddr_t mask = map->pa | map->size;
876 
877 		if (!(mask & CORE_MMU_PGDIR_MASK))
878 			map->region_size = CORE_MMU_PGDIR_SIZE;
879 		else if (!(mask & SMALL_PAGE_MASK))
880 			map->region_size = SMALL_PAGE_SIZE;
881 		else
882 			panic("Impossible memory alignment");
883 
884 		if (map_is_tee_ram(map))
885 			map->region_size = SMALL_PAGE_SIZE;
886 	}
887 }
888 
889 static bool assign_mem_va(vaddr_t tee_ram_va,
890 			  struct tee_mmap_region *memory_map)
891 {
892 	struct tee_mmap_region *map = NULL;
893 	vaddr_t va = tee_ram_va;
894 	bool va_is_secure = true;
895 
896 	/*
897 	 * Check that we're not overlapping with the user VA range.
898 	 */
899 	if (IS_ENABLED(CFG_WITH_LPAE)) {
900 		/*
901 		 * User VA range is supposed to be defined after these
902 		 * mappings have been established.
903 		 */
904 		assert(!core_mmu_user_va_range_is_defined());
905 	} else {
906 		vaddr_t user_va_base = 0;
907 		size_t user_va_size = 0;
908 
909 		assert(core_mmu_user_va_range_is_defined());
910 		core_mmu_get_user_va_range(&user_va_base, &user_va_size);
911 		if (tee_ram_va < (user_va_base + user_va_size))
912 			return false;
913 	}
914 
915 	/* Clear eventual previous assignments */
916 	for (map = memory_map; !core_mmap_is_end_of_table(map); map++)
917 		map->va = 0;
918 
919 	/*
920 	 * TEE RAM regions are always aligned with region_size.
921 	 *
922 	 * Note that MEM_AREA_PAGER_VASPACE also counts as TEE RAM here
923 	 * since it handles virtual memory which covers the part of the ELF
924 	 * that cannot fit directly into memory.
925 	 */
926 	for (map = memory_map; !core_mmap_is_end_of_table(map); map++) {
927 		if (map_is_tee_ram(map) ||
928 		    map->type == MEM_AREA_PAGER_VASPACE) {
929 			assert(!(va & (map->region_size - 1)));
930 			assert(!(map->size & (map->region_size - 1)));
931 			map->va = va;
932 			if (ADD_OVERFLOW(va, map->size, &va))
933 				return false;
934 			if (va >= BIT64(core_mmu_get_va_width()))
935 				return false;
936 		}
937 	}
938 
939 	if (core_mmu_place_tee_ram_at_top(tee_ram_va)) {
940 		/*
941 		 * Map non-tee ram regions at addresses lower than the tee
942 		 * ram region.
943 		 */
944 		va = tee_ram_va;
945 		for (map = memory_map; !core_mmap_is_end_of_table(map); map++) {
946 			map->attr = core_mmu_type_to_attr(map->type);
947 			if (map->va)
948 				continue;
949 
950 			if (!IS_ENABLED(CFG_WITH_LPAE) &&
951 			    va_is_secure != map_is_secure(map)) {
952 				va_is_secure = !va_is_secure;
953 				va = ROUNDDOWN(va, CORE_MMU_PGDIR_SIZE);
954 			}
955 
956 			if (SUB_OVERFLOW(va, map->size, &va))
957 				return false;
958 			va = ROUNDDOWN(va, map->region_size);
959 			/*
960 			 * Make sure that va is aligned with pa for
961 			 * efficient pgdir mapping. Basically pa &
962 			 * pgdir_mask should be == va & pgdir_mask
963 			 */
964 			if (map->size > 2 * CORE_MMU_PGDIR_SIZE) {
965 				if (SUB_OVERFLOW(va, CORE_MMU_PGDIR_SIZE, &va))
966 					return false;
967 				va += (map->pa - va) & CORE_MMU_PGDIR_MASK;
968 			}
969 			map->va = va;
970 		}
971 	} else {
972 		/*
973 		 * Map non-tee ram regions at addresses higher than the tee
974 		 * ram region.
975 		 */
976 		for (map = memory_map; !core_mmap_is_end_of_table(map); map++) {
977 			map->attr = core_mmu_type_to_attr(map->type);
978 			if (map->va)
979 				continue;
980 
981 			if (!IS_ENABLED(CFG_WITH_LPAE) &&
982 			    va_is_secure != map_is_secure(map)) {
983 				va_is_secure = !va_is_secure;
984 				if (ROUNDUP_OVERFLOW(va, CORE_MMU_PGDIR_SIZE,
985 						     &va))
986 					return false;
987 			}
988 
989 			if (ROUNDUP_OVERFLOW(va, map->region_size, &va))
990 				return false;
991 			/*
992 			 * Make sure that va is aligned with pa for
993 			 * efficient pgdir mapping. Basically pa &
994 			 * pgdir_mask should be == va & pgdir_mask
995 			 */
996 			if (map->size > 2 * CORE_MMU_PGDIR_SIZE) {
997 				vaddr_t offs = (map->pa - va) &
998 					       CORE_MMU_PGDIR_MASK;
999 
1000 				if (ADD_OVERFLOW(va, offs, &va))
1001 					return false;
1002 			}
1003 
1004 			map->va = va;
1005 			if (ADD_OVERFLOW(va, map->size, &va))
1006 				return false;
1007 			if (va >= BIT64(core_mmu_get_va_width()))
1008 				return false;
1009 		}
1010 	}
1011 
1012 	return true;
1013 }
1014 
1015 static int cmp_init_mem_map(const void *a, const void *b)
1016 {
1017 	const struct tee_mmap_region *mm_a = a;
1018 	const struct tee_mmap_region *mm_b = b;
1019 	int rc = 0;
1020 
1021 	rc = CMP_TRILEAN(mm_a->region_size, mm_b->region_size);
1022 	if (!rc)
1023 		rc = CMP_TRILEAN(mm_a->pa, mm_b->pa);
1024 	/*
1025 	 * 32bit MMU descriptors cannot mix secure and non-secure mapping in
1026 	 * the same level2 table. Hence sort secure mapping from non-secure
1027 	 * mapping.
1028 	 */
1029 	if (!rc && !IS_ENABLED(CFG_WITH_LPAE))
1030 		rc = CMP_TRILEAN(map_is_secure(mm_a), map_is_secure(mm_b));
1031 
1032 	return rc;
1033 }
1034 
1035 static bool mem_map_add_id_map(struct tee_mmap_region *memory_map,
1036 			       size_t num_elems, size_t *last,
1037 			       vaddr_t id_map_start, vaddr_t id_map_end)
1038 {
1039 	struct tee_mmap_region *map = NULL;
1040 	vaddr_t start = ROUNDDOWN(id_map_start, SMALL_PAGE_SIZE);
1041 	vaddr_t end = ROUNDUP(id_map_end, SMALL_PAGE_SIZE);
1042 	size_t len = end - start;
1043 
1044 	if (*last >= num_elems - 1) {
1045 		EMSG("Out of entries (%zu) in memory map", num_elems);
1046 		panic();
1047 	}
1048 
1049 	for (map = memory_map; !core_mmap_is_end_of_table(map); map++)
1050 		if (core_is_buffer_intersect(map->va, map->size, start, len))
1051 			return false;
1052 
1053 	*map = (struct tee_mmap_region){
1054 		.type = MEM_AREA_IDENTITY_MAP_RX,
1055 		/*
1056 		 * Could use CORE_MMU_PGDIR_SIZE to potentially save a
1057 		 * translation table, at the increased risk of clashes with
1058 		 * the rest of the memory map.
1059 		 */
1060 		.region_size = SMALL_PAGE_SIZE,
1061 		.pa = start,
1062 		.va = start,
1063 		.size = len,
1064 		.attr = core_mmu_type_to_attr(MEM_AREA_IDENTITY_MAP_RX),
1065 	};
1066 
1067 	(*last)++;
1068 
1069 	return true;
1070 }
1071 
1072 static unsigned long init_mem_map(struct tee_mmap_region *memory_map,
1073 				  size_t num_elems, unsigned long seed)
1074 {
1075 	/*
1076 	 * @id_map_start and @id_map_end describes a physical memory range
1077 	 * that must be mapped Read-Only eXecutable at identical virtual
1078 	 * addresses.
1079 	 */
1080 	vaddr_t id_map_start = (vaddr_t)__identity_map_init_start;
1081 	vaddr_t id_map_end = (vaddr_t)__identity_map_init_end;
1082 	unsigned long offs = 0;
1083 	size_t last = 0;
1084 
1085 	last = collect_mem_ranges(memory_map, num_elems);
1086 	assign_mem_granularity(memory_map);
1087 
1088 	/*
1089 	 * To ease mapping and lower use of xlat tables, sort mapping
1090 	 * description moving small-page regions after the pgdir regions.
1091 	 */
1092 	qsort(memory_map, last, sizeof(struct tee_mmap_region),
1093 	      cmp_init_mem_map);
1094 
1095 	add_pager_vaspace(memory_map, num_elems, &last);
1096 	if (IS_ENABLED(CFG_CORE_ASLR) && seed) {
1097 		vaddr_t base_addr = TEE_RAM_START + seed;
1098 		const unsigned int va_width = core_mmu_get_va_width();
1099 		const vaddr_t va_mask = GENMASK_64(va_width - 1,
1100 						   SMALL_PAGE_SHIFT);
1101 		vaddr_t ba = base_addr;
1102 		size_t n = 0;
1103 
1104 		for (n = 0; n < 3; n++) {
1105 			if (n)
1106 				ba = base_addr ^ BIT64(va_width - n);
1107 			ba &= va_mask;
1108 			if (assign_mem_va(ba, memory_map) &&
1109 			    mem_map_add_id_map(memory_map, num_elems, &last,
1110 					       id_map_start, id_map_end)) {
1111 				offs = ba - TEE_RAM_START;
1112 				DMSG("Mapping core at %#"PRIxVA" offs %#lx",
1113 				     ba, offs);
1114 				goto out;
1115 			} else {
1116 				DMSG("Failed to map core at %#"PRIxVA, ba);
1117 			}
1118 		}
1119 		EMSG("Failed to map core with seed %#lx", seed);
1120 	}
1121 
1122 	if (!assign_mem_va(TEE_RAM_START, memory_map))
1123 		panic();
1124 
1125 out:
1126 	qsort(memory_map, last, sizeof(struct tee_mmap_region),
1127 	      cmp_mmap_by_lower_va);
1128 
1129 	dump_mmap_table(memory_map);
1130 
1131 	return offs;
1132 }
1133 
1134 static void check_mem_map(struct tee_mmap_region *map)
1135 {
1136 	struct tee_mmap_region *m = NULL;
1137 
1138 	for (m = map; !core_mmap_is_end_of_table(m); m++) {
1139 		switch (m->type) {
1140 		case MEM_AREA_TEE_RAM:
1141 		case MEM_AREA_TEE_RAM_RX:
1142 		case MEM_AREA_TEE_RAM_RO:
1143 		case MEM_AREA_TEE_RAM_RW:
1144 		case MEM_AREA_INIT_RAM_RX:
1145 		case MEM_AREA_INIT_RAM_RO:
1146 		case MEM_AREA_NEX_RAM_RW:
1147 		case MEM_AREA_NEX_RAM_RO:
1148 		case MEM_AREA_IDENTITY_MAP_RX:
1149 			if (!pbuf_is_inside(secure_only, m->pa, m->size))
1150 				panic("TEE_RAM can't fit in secure_only");
1151 			break;
1152 		case MEM_AREA_TA_RAM:
1153 			if (!pbuf_is_inside(secure_only, m->pa, m->size))
1154 				panic("TA_RAM can't fit in secure_only");
1155 			break;
1156 		case MEM_AREA_NSEC_SHM:
1157 			if (!pbuf_is_inside(nsec_shared, m->pa, m->size))
1158 				panic("NS_SHM can't fit in nsec_shared");
1159 			break;
1160 		case MEM_AREA_SEC_RAM_OVERALL:
1161 		case MEM_AREA_TEE_COHERENT:
1162 		case MEM_AREA_TEE_ASAN:
1163 		case MEM_AREA_IO_SEC:
1164 		case MEM_AREA_IO_NSEC:
1165 		case MEM_AREA_EXT_DT:
1166 		case MEM_AREA_RAM_SEC:
1167 		case MEM_AREA_RAM_NSEC:
1168 		case MEM_AREA_RES_VASPACE:
1169 		case MEM_AREA_SHM_VASPACE:
1170 		case MEM_AREA_PAGER_VASPACE:
1171 			break;
1172 		default:
1173 			EMSG("Uhandled memtype %d", m->type);
1174 			panic();
1175 		}
1176 	}
1177 }
1178 
1179 static struct tee_mmap_region *get_tmp_mmap(void)
1180 {
1181 	struct tee_mmap_region *tmp_mmap = (void *)__heap1_start;
1182 
1183 #ifdef CFG_WITH_PAGER
1184 	if (__heap1_end - __heap1_start < (ptrdiff_t)sizeof(static_memory_map))
1185 		tmp_mmap = (void *)__heap2_start;
1186 #endif
1187 
1188 	memset(tmp_mmap, 0, sizeof(static_memory_map));
1189 
1190 	return tmp_mmap;
1191 }
1192 
1193 /*
1194  * core_init_mmu_map() - init tee core default memory mapping
1195  *
1196  * This routine sets the static default TEE core mapping. If @seed is > 0
1197  * and configured with CFG_CORE_ASLR it will map tee core at a location
1198  * based on the seed and return the offset from the link address.
1199  *
1200  * If an error happened: core_init_mmu_map is expected to panic.
1201  *
1202  * Note: this function is weak just to make it possible to exclude it from
1203  * the unpaged area.
1204  */
1205 void __weak core_init_mmu_map(unsigned long seed, struct core_mmu_config *cfg)
1206 {
1207 #ifndef CFG_VIRTUALIZATION
1208 	vaddr_t start = ROUNDDOWN((vaddr_t)__nozi_start, SMALL_PAGE_SIZE);
1209 #else
1210 	vaddr_t start = ROUNDDOWN((vaddr_t)__vcore_nex_rw_start,
1211 				  SMALL_PAGE_SIZE);
1212 #endif
1213 	vaddr_t len = ROUNDUP((vaddr_t)__nozi_end, SMALL_PAGE_SIZE) - start;
1214 	struct tee_mmap_region *tmp_mmap = get_tmp_mmap();
1215 	unsigned long offs = 0;
1216 
1217 	check_sec_nsec_mem_config();
1218 
1219 	/*
1220 	 * Add a entry covering the translation tables which will be
1221 	 * involved in some virt_to_phys() and phys_to_virt() conversions.
1222 	 */
1223 	static_memory_map[0] = (struct tee_mmap_region){
1224 		.type = MEM_AREA_TEE_RAM,
1225 		.region_size = SMALL_PAGE_SIZE,
1226 		.pa = start,
1227 		.va = start,
1228 		.size = len,
1229 		.attr = core_mmu_type_to_attr(MEM_AREA_IDENTITY_MAP_RX),
1230 	};
1231 
1232 	COMPILE_TIME_ASSERT(CFG_MMAP_REGIONS >= 13);
1233 	offs = init_mem_map(tmp_mmap, ARRAY_SIZE(static_memory_map), seed);
1234 
1235 	check_mem_map(tmp_mmap);
1236 	core_init_mmu(tmp_mmap);
1237 	dump_xlat_table(0x0, CORE_MMU_BASE_TABLE_LEVEL);
1238 	core_init_mmu_regs(cfg);
1239 	cfg->load_offset = offs;
1240 	memcpy(static_memory_map, tmp_mmap, sizeof(static_memory_map));
1241 }
1242 
1243 bool core_mmu_mattr_is_ok(uint32_t mattr)
1244 {
1245 	/*
1246 	 * Keep in sync with core_mmu_lpae.c:mattr_to_desc and
1247 	 * core_mmu_v7.c:mattr_to_texcb
1248 	 */
1249 
1250 	switch ((mattr >> TEE_MATTR_MEM_TYPE_SHIFT) & TEE_MATTR_MEM_TYPE_MASK) {
1251 	case TEE_MATTR_MEM_TYPE_DEV:
1252 	case TEE_MATTR_MEM_TYPE_STRONGLY_O:
1253 	case TEE_MATTR_MEM_TYPE_CACHED:
1254 		return true;
1255 	default:
1256 		return false;
1257 	}
1258 }
1259 
1260 /*
1261  * test attributes of target physical buffer
1262  *
1263  * Flags: pbuf_is(SECURE, NOT_SECURE, RAM, IOMEM, KEYVAULT).
1264  *
1265  */
1266 bool core_pbuf_is(uint32_t attr, paddr_t pbuf, size_t len)
1267 {
1268 	struct tee_mmap_region *map;
1269 
1270 	/* Empty buffers complies with anything */
1271 	if (len == 0)
1272 		return true;
1273 
1274 	switch (attr) {
1275 	case CORE_MEM_SEC:
1276 		return pbuf_is_inside(secure_only, pbuf, len);
1277 	case CORE_MEM_NON_SEC:
1278 		return pbuf_is_inside(nsec_shared, pbuf, len) ||
1279 			pbuf_is_nsec_ddr(pbuf, len);
1280 	case CORE_MEM_TEE_RAM:
1281 		return core_is_buffer_inside(pbuf, len, TEE_RAM_START,
1282 							TEE_RAM_PH_SIZE);
1283 	case CORE_MEM_TA_RAM:
1284 		return core_is_buffer_inside(pbuf, len, TA_RAM_START,
1285 							TA_RAM_SIZE);
1286 #ifdef CFG_CORE_RESERVED_SHM
1287 	case CORE_MEM_NSEC_SHM:
1288 		return core_is_buffer_inside(pbuf, len, TEE_SHMEM_START,
1289 							TEE_SHMEM_SIZE);
1290 #endif
1291 	case CORE_MEM_SDP_MEM:
1292 		return pbuf_is_sdp_mem(pbuf, len);
1293 	case CORE_MEM_CACHED:
1294 		map = find_map_by_pa(pbuf);
1295 		if (!map || !pbuf_inside_map_area(pbuf, len, map))
1296 			return false;
1297 		return mattr_is_cached(map->attr);
1298 	default:
1299 		return false;
1300 	}
1301 }
1302 
1303 /* test attributes of target virtual buffer (in core mapping) */
1304 bool core_vbuf_is(uint32_t attr, const void *vbuf, size_t len)
1305 {
1306 	paddr_t p;
1307 
1308 	/* Empty buffers complies with anything */
1309 	if (len == 0)
1310 		return true;
1311 
1312 	p = virt_to_phys((void *)vbuf);
1313 	if (!p)
1314 		return false;
1315 
1316 	return core_pbuf_is(attr, p, len);
1317 }
1318 
1319 /* core_va2pa - teecore exported service */
1320 static int __maybe_unused core_va2pa_helper(void *va, paddr_t *pa)
1321 {
1322 	struct tee_mmap_region *map;
1323 
1324 	map = find_map_by_va(va);
1325 	if (!va_is_in_map(map, (vaddr_t)va))
1326 		return -1;
1327 
1328 	/*
1329 	 * We can calculate PA for static map. Virtual address ranges
1330 	 * reserved to core dynamic mapping return a 'match' (return 0;)
1331 	 * together with an invalid null physical address.
1332 	 */
1333 	if (map->pa)
1334 		*pa = map->pa + (vaddr_t)va  - map->va;
1335 	else
1336 		*pa = 0;
1337 
1338 	return 0;
1339 }
1340 
1341 static void *map_pa2va(struct tee_mmap_region *map, paddr_t pa, size_t len)
1342 {
1343 	if (!pa_is_in_map(map, pa, len))
1344 		return NULL;
1345 
1346 	return (void *)(vaddr_t)(map->va + pa - map->pa);
1347 }
1348 
1349 /*
1350  * teecore gets some memory area definitions
1351  */
1352 void core_mmu_get_mem_by_type(unsigned int type, vaddr_t *s, vaddr_t *e)
1353 {
1354 	struct tee_mmap_region *map = find_map_by_type(type);
1355 
1356 	if (map) {
1357 		*s = map->va;
1358 		*e = map->va + map->size;
1359 	} else {
1360 		*s = 0;
1361 		*e = 0;
1362 	}
1363 }
1364 
1365 enum teecore_memtypes core_mmu_get_type_by_pa(paddr_t pa)
1366 {
1367 	struct tee_mmap_region *map = find_map_by_pa(pa);
1368 
1369 	if (!map)
1370 		return MEM_AREA_MAXTYPE;
1371 	return map->type;
1372 }
1373 
1374 void core_mmu_set_entry(struct core_mmu_table_info *tbl_info, unsigned int idx,
1375 			paddr_t pa, uint32_t attr)
1376 {
1377 	assert(idx < tbl_info->num_entries);
1378 	core_mmu_set_entry_primitive(tbl_info->table, tbl_info->level,
1379 				     idx, pa, attr);
1380 }
1381 
1382 void core_mmu_get_entry(struct core_mmu_table_info *tbl_info, unsigned int idx,
1383 			paddr_t *pa, uint32_t *attr)
1384 {
1385 	assert(idx < tbl_info->num_entries);
1386 	core_mmu_get_entry_primitive(tbl_info->table, tbl_info->level,
1387 				     idx, pa, attr);
1388 }
1389 
1390 static void clear_region(struct core_mmu_table_info *tbl_info,
1391 			 struct tee_mmap_region *region)
1392 {
1393 	unsigned int end = 0;
1394 	unsigned int idx = 0;
1395 
1396 	/* va, len and pa should be block aligned */
1397 	assert(!core_mmu_get_block_offset(tbl_info, region->va));
1398 	assert(!core_mmu_get_block_offset(tbl_info, region->size));
1399 	assert(!core_mmu_get_block_offset(tbl_info, region->pa));
1400 
1401 	idx = core_mmu_va2idx(tbl_info, region->va);
1402 	end = core_mmu_va2idx(tbl_info, region->va + region->size);
1403 
1404 	while (idx < end) {
1405 		core_mmu_set_entry(tbl_info, idx, 0, 0);
1406 		idx++;
1407 	}
1408 }
1409 
1410 static void set_region(struct core_mmu_table_info *tbl_info,
1411 		       struct tee_mmap_region *region)
1412 {
1413 	unsigned int end;
1414 	unsigned int idx;
1415 	paddr_t pa;
1416 
1417 	/* va, len and pa should be block aligned */
1418 	assert(!core_mmu_get_block_offset(tbl_info, region->va));
1419 	assert(!core_mmu_get_block_offset(tbl_info, region->size));
1420 	assert(!core_mmu_get_block_offset(tbl_info, region->pa));
1421 
1422 	idx = core_mmu_va2idx(tbl_info, region->va);
1423 	end = core_mmu_va2idx(tbl_info, region->va + region->size);
1424 	pa = region->pa;
1425 
1426 	while (idx < end) {
1427 		core_mmu_set_entry(tbl_info, idx, pa, region->attr);
1428 		idx++;
1429 		pa += BIT64(tbl_info->shift);
1430 	}
1431 }
1432 
1433 static void set_pg_region(struct core_mmu_table_info *dir_info,
1434 			  struct vm_region *region, struct pgt **pgt,
1435 			  struct core_mmu_table_info *pg_info)
1436 {
1437 	struct tee_mmap_region r = {
1438 		.va = region->va,
1439 		.size = region->size,
1440 		.attr = region->attr,
1441 	};
1442 	vaddr_t end = r.va + r.size;
1443 	uint32_t pgt_attr = (r.attr & TEE_MATTR_SECURE) | TEE_MATTR_TABLE;
1444 
1445 	while (r.va < end) {
1446 		if (!pg_info->table ||
1447 		    r.va >= (pg_info->va_base + CORE_MMU_PGDIR_SIZE)) {
1448 			/*
1449 			 * We're assigning a new translation table.
1450 			 */
1451 			unsigned int idx;
1452 
1453 			/* Virtual addresses must grow */
1454 			assert(r.va > pg_info->va_base);
1455 
1456 			idx = core_mmu_va2idx(dir_info, r.va);
1457 			pg_info->va_base = core_mmu_idx2va(dir_info, idx);
1458 
1459 #ifdef CFG_PAGED_USER_TA
1460 			/*
1461 			 * Advance pgt to va_base, note that we may need to
1462 			 * skip multiple page tables if there are large
1463 			 * holes in the vm map.
1464 			 */
1465 			while ((*pgt)->vabase < pg_info->va_base) {
1466 				*pgt = SLIST_NEXT(*pgt, link);
1467 				/* We should have allocated enough */
1468 				assert(*pgt);
1469 			}
1470 			assert((*pgt)->vabase == pg_info->va_base);
1471 			pg_info->table = (*pgt)->tbl;
1472 #else
1473 			assert(*pgt); /* We should have allocated enough */
1474 			pg_info->table = (*pgt)->tbl;
1475 			*pgt = SLIST_NEXT(*pgt, link);
1476 #endif
1477 
1478 			core_mmu_set_entry(dir_info, idx,
1479 					   virt_to_phys(pg_info->table),
1480 					   pgt_attr);
1481 		}
1482 
1483 		r.size = MIN(CORE_MMU_PGDIR_SIZE - (r.va - pg_info->va_base),
1484 			     end - r.va);
1485 
1486 		if (!mobj_is_paged(region->mobj)) {
1487 			size_t granule = BIT(pg_info->shift);
1488 			size_t offset = r.va - region->va + region->offset;
1489 
1490 			r.size = MIN(r.size,
1491 				     mobj_get_phys_granule(region->mobj));
1492 			r.size = ROUNDUP(r.size, SMALL_PAGE_SIZE);
1493 
1494 			if (mobj_get_pa(region->mobj, offset, granule,
1495 					&r.pa) != TEE_SUCCESS)
1496 				panic("Failed to get PA of unpaged mobj");
1497 			set_region(pg_info, &r);
1498 		}
1499 		r.va += r.size;
1500 	}
1501 }
1502 
1503 static bool can_map_at_level(paddr_t paddr, vaddr_t vaddr,
1504 			     size_t size_left, paddr_t block_size,
1505 			     struct tee_mmap_region *mm __maybe_unused)
1506 {
1507 	/* VA and PA are aligned to block size at current level */
1508 	if ((vaddr | paddr) & (block_size - 1))
1509 		return false;
1510 
1511 	/* Remainder fits into block at current level */
1512 	if (size_left < block_size)
1513 		return false;
1514 
1515 #ifdef CFG_WITH_PAGER
1516 	/*
1517 	 * If pager is enabled, we need to map tee ram
1518 	 * regions with small pages only
1519 	 */
1520 	if (map_is_tee_ram(mm) && block_size != SMALL_PAGE_SIZE)
1521 		return false;
1522 #endif
1523 
1524 	return true;
1525 }
1526 
1527 void core_mmu_map_region(struct mmu_partition *prtn, struct tee_mmap_region *mm)
1528 {
1529 	struct core_mmu_table_info tbl_info;
1530 	unsigned int idx;
1531 	vaddr_t vaddr = mm->va;
1532 	paddr_t paddr = mm->pa;
1533 	ssize_t size_left = mm->size;
1534 	unsigned int level;
1535 	bool table_found;
1536 	uint32_t old_attr;
1537 
1538 	assert(!((vaddr | paddr) & SMALL_PAGE_MASK));
1539 
1540 	while (size_left > 0) {
1541 		level = CORE_MMU_BASE_TABLE_LEVEL;
1542 
1543 		while (true) {
1544 			paddr_t block_size = 0;
1545 
1546 			assert(level <= CORE_MMU_PGDIR_LEVEL);
1547 
1548 			table_found = core_mmu_find_table(prtn, vaddr, level,
1549 							  &tbl_info);
1550 			if (!table_found)
1551 				panic("can't find table for mapping");
1552 
1553 			block_size = BIT64(tbl_info.shift);
1554 
1555 			idx = core_mmu_va2idx(&tbl_info, vaddr);
1556 			if (!can_map_at_level(paddr, vaddr, size_left,
1557 					      block_size, mm)) {
1558 				bool secure = mm->attr & TEE_MATTR_SECURE;
1559 
1560 				/*
1561 				 * This part of the region can't be mapped at
1562 				 * this level. Need to go deeper.
1563 				 */
1564 				if (!core_mmu_entry_to_finer_grained(&tbl_info,
1565 								     idx,
1566 								     secure))
1567 					panic("Can't divide MMU entry");
1568 				level++;
1569 				continue;
1570 			}
1571 
1572 			/* We can map part of the region at current level */
1573 			core_mmu_get_entry(&tbl_info, idx, NULL, &old_attr);
1574 			if (old_attr)
1575 				panic("Page is already mapped");
1576 
1577 			core_mmu_set_entry(&tbl_info, idx, paddr, mm->attr);
1578 			paddr += block_size;
1579 			vaddr += block_size;
1580 			size_left -= block_size;
1581 
1582 			break;
1583 		}
1584 	}
1585 }
1586 
1587 TEE_Result core_mmu_map_pages(vaddr_t vstart, paddr_t *pages, size_t num_pages,
1588 			      enum teecore_memtypes memtype)
1589 {
1590 	TEE_Result ret;
1591 	struct core_mmu_table_info tbl_info;
1592 	struct tee_mmap_region *mm;
1593 	unsigned int idx;
1594 	uint32_t old_attr;
1595 	uint32_t exceptions;
1596 	vaddr_t vaddr = vstart;
1597 	size_t i;
1598 	bool secure;
1599 
1600 	assert(!(core_mmu_type_to_attr(memtype) & TEE_MATTR_PX));
1601 
1602 	secure = core_mmu_type_to_attr(memtype) & TEE_MATTR_SECURE;
1603 
1604 	if (vaddr & SMALL_PAGE_MASK)
1605 		return TEE_ERROR_BAD_PARAMETERS;
1606 
1607 	exceptions = mmu_lock();
1608 
1609 	mm = find_map_by_va((void *)vaddr);
1610 	if (!mm || !va_is_in_map(mm, vaddr + num_pages * SMALL_PAGE_SIZE - 1))
1611 		panic("VA does not belong to any known mm region");
1612 
1613 	if (!core_mmu_is_dynamic_vaspace(mm))
1614 		panic("Trying to map into static region");
1615 
1616 	for (i = 0; i < num_pages; i++) {
1617 		if (pages[i] & SMALL_PAGE_MASK) {
1618 			ret = TEE_ERROR_BAD_PARAMETERS;
1619 			goto err;
1620 		}
1621 
1622 		while (true) {
1623 			if (!core_mmu_find_table(NULL, vaddr, UINT_MAX,
1624 						 &tbl_info))
1625 				panic("Can't find pagetable for vaddr ");
1626 
1627 			idx = core_mmu_va2idx(&tbl_info, vaddr);
1628 			if (tbl_info.shift == SMALL_PAGE_SHIFT)
1629 				break;
1630 
1631 			/* This is supertable. Need to divide it. */
1632 			if (!core_mmu_entry_to_finer_grained(&tbl_info, idx,
1633 							     secure))
1634 				panic("Failed to spread pgdir on small tables");
1635 		}
1636 
1637 		core_mmu_get_entry(&tbl_info, idx, NULL, &old_attr);
1638 		if (old_attr)
1639 			panic("Page is already mapped");
1640 
1641 		core_mmu_set_entry(&tbl_info, idx, pages[i],
1642 				   core_mmu_type_to_attr(memtype));
1643 		vaddr += SMALL_PAGE_SIZE;
1644 	}
1645 
1646 	/*
1647 	 * Make sure all the changes to translation tables are visible
1648 	 * before returning. TLB doesn't need to be invalidated as we are
1649 	 * guaranteed that there's no valid mapping in this range.
1650 	 */
1651 	core_mmu_table_write_barrier();
1652 	mmu_unlock(exceptions);
1653 
1654 	return TEE_SUCCESS;
1655 err:
1656 	mmu_unlock(exceptions);
1657 
1658 	if (i)
1659 		core_mmu_unmap_pages(vstart, i);
1660 
1661 	return ret;
1662 }
1663 
1664 TEE_Result core_mmu_map_contiguous_pages(vaddr_t vstart, paddr_t pstart,
1665 					 size_t num_pages,
1666 					 enum teecore_memtypes memtype)
1667 {
1668 	struct core_mmu_table_info tbl_info = { };
1669 	struct tee_mmap_region *mm = NULL;
1670 	unsigned int idx = 0;
1671 	uint32_t old_attr = 0;
1672 	uint32_t exceptions = 0;
1673 	vaddr_t vaddr = vstart;
1674 	paddr_t paddr = pstart;
1675 	size_t i = 0;
1676 	bool secure = false;
1677 
1678 	assert(!(core_mmu_type_to_attr(memtype) & TEE_MATTR_PX));
1679 
1680 	secure = core_mmu_type_to_attr(memtype) & TEE_MATTR_SECURE;
1681 
1682 	if ((vaddr | paddr) & SMALL_PAGE_MASK)
1683 		return TEE_ERROR_BAD_PARAMETERS;
1684 
1685 	exceptions = mmu_lock();
1686 
1687 	mm = find_map_by_va((void *)vaddr);
1688 	if (!mm || !va_is_in_map(mm, vaddr + num_pages * SMALL_PAGE_SIZE - 1))
1689 		panic("VA does not belong to any known mm region");
1690 
1691 	if (!core_mmu_is_dynamic_vaspace(mm))
1692 		panic("Trying to map into static region");
1693 
1694 	for (i = 0; i < num_pages; i++) {
1695 		while (true) {
1696 			if (!core_mmu_find_table(NULL, vaddr, UINT_MAX,
1697 						 &tbl_info))
1698 				panic("Can't find pagetable for vaddr ");
1699 
1700 			idx = core_mmu_va2idx(&tbl_info, vaddr);
1701 			if (tbl_info.shift == SMALL_PAGE_SHIFT)
1702 				break;
1703 
1704 			/* This is supertable. Need to divide it. */
1705 			if (!core_mmu_entry_to_finer_grained(&tbl_info, idx,
1706 							     secure))
1707 				panic("Failed to spread pgdir on small tables");
1708 		}
1709 
1710 		core_mmu_get_entry(&tbl_info, idx, NULL, &old_attr);
1711 		if (old_attr)
1712 			panic("Page is already mapped");
1713 
1714 		core_mmu_set_entry(&tbl_info, idx, paddr,
1715 				   core_mmu_type_to_attr(memtype));
1716 		paddr += SMALL_PAGE_SIZE;
1717 		vaddr += SMALL_PAGE_SIZE;
1718 	}
1719 
1720 	/*
1721 	 * Make sure all the changes to translation tables are visible
1722 	 * before returning. TLB doesn't need to be invalidated as we are
1723 	 * guaranteed that there's no valid mapping in this range.
1724 	 */
1725 	core_mmu_table_write_barrier();
1726 	mmu_unlock(exceptions);
1727 
1728 	return TEE_SUCCESS;
1729 }
1730 
1731 void core_mmu_unmap_pages(vaddr_t vstart, size_t num_pages)
1732 {
1733 	struct core_mmu_table_info tbl_info;
1734 	struct tee_mmap_region *mm;
1735 	size_t i;
1736 	unsigned int idx;
1737 	uint32_t exceptions;
1738 
1739 	exceptions = mmu_lock();
1740 
1741 	mm = find_map_by_va((void *)vstart);
1742 	if (!mm || !va_is_in_map(mm, vstart + num_pages * SMALL_PAGE_SIZE - 1))
1743 		panic("VA does not belong to any known mm region");
1744 
1745 	if (!core_mmu_is_dynamic_vaspace(mm))
1746 		panic("Trying to unmap static region");
1747 
1748 	for (i = 0; i < num_pages; i++, vstart += SMALL_PAGE_SIZE) {
1749 		if (!core_mmu_find_table(NULL, vstart, UINT_MAX, &tbl_info))
1750 			panic("Can't find pagetable");
1751 
1752 		if (tbl_info.shift != SMALL_PAGE_SHIFT)
1753 			panic("Invalid pagetable level");
1754 
1755 		idx = core_mmu_va2idx(&tbl_info, vstart);
1756 		core_mmu_set_entry(&tbl_info, idx, 0, 0);
1757 	}
1758 	tlbi_all();
1759 
1760 	mmu_unlock(exceptions);
1761 }
1762 
1763 void core_mmu_populate_user_map(struct core_mmu_table_info *dir_info,
1764 				struct user_mode_ctx *uctx)
1765 {
1766 	struct core_mmu_table_info pg_info = { };
1767 	struct pgt_cache *pgt_cache = &thread_get_tsd()->pgt_cache;
1768 	struct pgt *pgt = NULL;
1769 	struct vm_region *r = NULL;
1770 	struct vm_region *r_last = NULL;
1771 
1772 	/* Find the first and last valid entry */
1773 	r = TAILQ_FIRST(&uctx->vm_info.regions);
1774 	if (!r)
1775 		return; /* Nothing to map */
1776 	r_last = TAILQ_LAST(&uctx->vm_info.regions, vm_region_head);
1777 
1778 	/*
1779 	 * Allocate all page tables in advance.
1780 	 */
1781 	pgt_alloc(pgt_cache, uctx->ts_ctx, r->va,
1782 		  r_last->va + r_last->size - 1);
1783 	pgt = SLIST_FIRST(pgt_cache);
1784 
1785 	core_mmu_set_info_table(&pg_info, dir_info->level + 1, 0, NULL);
1786 
1787 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link)
1788 		set_pg_region(dir_info, r, &pgt, &pg_info);
1789 }
1790 
1791 TEE_Result core_mmu_remove_mapping(enum teecore_memtypes type, void *addr,
1792 				   size_t len)
1793 {
1794 	struct core_mmu_table_info tbl_info = { };
1795 	struct tee_mmap_region *res_map = NULL;
1796 	struct tee_mmap_region *map = NULL;
1797 	paddr_t pa = virt_to_phys(addr);
1798 	size_t granule = 0;
1799 	ptrdiff_t i = 0;
1800 	paddr_t p = 0;
1801 	size_t l = 0;
1802 
1803 	map = find_map_by_type_and_pa(type, pa, len);
1804 	if (!map)
1805 		return TEE_ERROR_GENERIC;
1806 
1807 	res_map = find_map_by_type(MEM_AREA_RES_VASPACE);
1808 	if (!res_map)
1809 		return TEE_ERROR_GENERIC;
1810 	if (!core_mmu_find_table(NULL, res_map->va, UINT_MAX, &tbl_info))
1811 		return TEE_ERROR_GENERIC;
1812 	granule = BIT(tbl_info.shift);
1813 
1814 	if (map < static_memory_map ||
1815 	    map >= static_memory_map + ARRAY_SIZE(static_memory_map))
1816 		return TEE_ERROR_GENERIC;
1817 	i = map - static_memory_map;
1818 
1819 	/* Check that we have a full match */
1820 	p = ROUNDDOWN(pa, granule);
1821 	l = ROUNDUP(len + pa - p, granule);
1822 	if (map->pa != p || map->size != l)
1823 		return TEE_ERROR_GENERIC;
1824 
1825 	clear_region(&tbl_info, map);
1826 	tlbi_all();
1827 
1828 	/* If possible remove the va range from res_map */
1829 	if (res_map->va - map->size == map->va) {
1830 		res_map->va -= map->size;
1831 		res_map->size += map->size;
1832 	}
1833 
1834 	/* Remove the entry. */
1835 	memmove(map, map + 1,
1836 		(ARRAY_SIZE(static_memory_map) - i - 1) * sizeof(*map));
1837 
1838 	/* Clear the last new entry in case it was used */
1839 	memset(static_memory_map + ARRAY_SIZE(static_memory_map) - 1,
1840 	       0, sizeof(*map));
1841 
1842 	return TEE_SUCCESS;
1843 }
1844 
1845 struct tee_mmap_region *
1846 core_mmu_find_mapping_exclusive(enum teecore_memtypes type, size_t len)
1847 {
1848 	struct tee_mmap_region *map = NULL;
1849 	struct tee_mmap_region *map_found = NULL;
1850 
1851 	if (!len)
1852 		return NULL;
1853 
1854 	for (map = get_memory_map(); !core_mmap_is_end_of_table(map); map++) {
1855 		if (map->type != type)
1856 			continue;
1857 
1858 		if (map_found)
1859 			return NULL;
1860 
1861 		map_found = map;
1862 	}
1863 
1864 	if (!map_found || map_found->size < len)
1865 		return NULL;
1866 
1867 	return map_found;
1868 }
1869 
1870 void *core_mmu_add_mapping(enum teecore_memtypes type, paddr_t addr, size_t len)
1871 {
1872 	struct core_mmu_table_info tbl_info;
1873 	struct tee_mmap_region *map;
1874 	size_t n;
1875 	size_t granule;
1876 	paddr_t p;
1877 	size_t l;
1878 
1879 	if (!len)
1880 		return NULL;
1881 
1882 	if (!core_mmu_check_end_pa(addr, len))
1883 		return NULL;
1884 
1885 	/* Check if the memory is already mapped */
1886 	map = find_map_by_type_and_pa(type, addr, len);
1887 	if (map && pbuf_inside_map_area(addr, len, map))
1888 		return (void *)(vaddr_t)(map->va + addr - map->pa);
1889 
1890 	/* Find the reserved va space used for late mappings */
1891 	map = find_map_by_type(MEM_AREA_RES_VASPACE);
1892 	if (!map)
1893 		return NULL;
1894 
1895 	if (!core_mmu_find_table(NULL, map->va, UINT_MAX, &tbl_info))
1896 		return NULL;
1897 
1898 	granule = BIT64(tbl_info.shift);
1899 	p = ROUNDDOWN(addr, granule);
1900 	l = ROUNDUP(len + addr - p, granule);
1901 
1902 	/* Ban overflowing virtual addresses */
1903 	if (map->size < l)
1904 		return NULL;
1905 
1906 	/*
1907 	 * Something is wrong, we can't fit the va range into the selected
1908 	 * table. The reserved va range is possibly missaligned with
1909 	 * granule.
1910 	 */
1911 	if (core_mmu_va2idx(&tbl_info, map->va + len) >= tbl_info.num_entries)
1912 		return NULL;
1913 
1914 	/* Find end of the memory map */
1915 	n = 0;
1916 	while (!core_mmap_is_end_of_table(static_memory_map + n))
1917 		n++;
1918 
1919 	if (n < (ARRAY_SIZE(static_memory_map) - 1)) {
1920 		/* There's room for another entry */
1921 		static_memory_map[n].va = map->va;
1922 		static_memory_map[n].size = l;
1923 		static_memory_map[n + 1].type = MEM_AREA_END;
1924 		map->va += l;
1925 		map->size -= l;
1926 		map = static_memory_map + n;
1927 	} else {
1928 		/*
1929 		 * There isn't room for another entry, steal the reserved
1930 		 * entry as it's not useful for anything else any longer.
1931 		 */
1932 		map->size = l;
1933 	}
1934 	map->type = type;
1935 	map->region_size = granule;
1936 	map->attr = core_mmu_type_to_attr(type);
1937 	map->pa = p;
1938 
1939 	set_region(&tbl_info, map);
1940 
1941 	/* Make sure the new entry is visible before continuing. */
1942 	core_mmu_table_write_barrier();
1943 
1944 	return (void *)(vaddr_t)(map->va + addr - map->pa);
1945 }
1946 
1947 #ifdef CFG_WITH_PAGER
1948 static vaddr_t get_linear_map_end(void)
1949 {
1950 	/* this is synced with the generic linker file kern.ld.S */
1951 	return (vaddr_t)__heap2_end;
1952 }
1953 #endif
1954 
1955 #if defined(CFG_TEE_CORE_DEBUG)
1956 static void check_pa_matches_va(void *va, paddr_t pa)
1957 {
1958 	TEE_Result res = TEE_ERROR_GENERIC;
1959 	vaddr_t v = (vaddr_t)va;
1960 	paddr_t p = 0;
1961 	struct core_mmu_table_info ti __maybe_unused = { };
1962 
1963 	if (core_mmu_user_va_range_is_defined()) {
1964 		vaddr_t user_va_base = 0;
1965 		size_t user_va_size = 0;
1966 
1967 		core_mmu_get_user_va_range(&user_va_base, &user_va_size);
1968 		if (v >= user_va_base &&
1969 		    v <= (user_va_base - 1 + user_va_size)) {
1970 			if (!core_mmu_user_mapping_is_active()) {
1971 				if (pa)
1972 					panic("issue in linear address space");
1973 				return;
1974 			}
1975 
1976 			res = vm_va2pa(to_user_mode_ctx(thread_get_tsd()->ctx),
1977 				       va, &p);
1978 			if (res == TEE_ERROR_NOT_SUPPORTED)
1979 				return;
1980 			if (res == TEE_SUCCESS && pa != p)
1981 				panic("bad pa");
1982 			if (res != TEE_SUCCESS && pa)
1983 				panic("false pa");
1984 			return;
1985 		}
1986 	}
1987 #ifdef CFG_WITH_PAGER
1988 	if (is_unpaged(va)) {
1989 		if (v - boot_mmu_config.load_offset != pa)
1990 			panic("issue in linear address space");
1991 		return;
1992 	}
1993 
1994 	if (tee_pager_get_table_info(v, &ti)) {
1995 		uint32_t a;
1996 
1997 		/*
1998 		 * Lookups in the page table managed by the pager is
1999 		 * dangerous for addresses in the paged area as those pages
2000 		 * changes all the time. But some ranges are safe,
2001 		 * rw-locked areas when the page is populated for instance.
2002 		 */
2003 		core_mmu_get_entry(&ti, core_mmu_va2idx(&ti, v), &p, &a);
2004 		if (a & TEE_MATTR_VALID_BLOCK) {
2005 			paddr_t mask = BIT64(ti.shift) - 1;
2006 
2007 			p |= v & mask;
2008 			if (pa != p)
2009 				panic();
2010 		} else {
2011 			if (pa)
2012 				panic();
2013 		}
2014 		return;
2015 	}
2016 #endif
2017 
2018 	if (!core_va2pa_helper(va, &p)) {
2019 		/* Verfiy only the static mapping (case non null phys addr) */
2020 		if (p && pa != p) {
2021 			DMSG("va %p maps 0x%" PRIxPA ", expect 0x%" PRIxPA,
2022 			     va, p, pa);
2023 			panic();
2024 		}
2025 	} else {
2026 		if (pa) {
2027 			DMSG("va %p unmapped, expect 0x%" PRIxPA, va, pa);
2028 			panic();
2029 		}
2030 	}
2031 }
2032 #else
2033 static void check_pa_matches_va(void *va __unused, paddr_t pa __unused)
2034 {
2035 }
2036 #endif
2037 
2038 paddr_t virt_to_phys(void *va)
2039 {
2040 	paddr_t pa = 0;
2041 
2042 	if (!arch_va2pa_helper(va, &pa))
2043 		pa = 0;
2044 	check_pa_matches_va(va, pa);
2045 	return pa;
2046 }
2047 
2048 #if defined(CFG_TEE_CORE_DEBUG)
2049 static void check_va_matches_pa(paddr_t pa, void *va)
2050 {
2051 	paddr_t p = 0;
2052 
2053 	if (!va)
2054 		return;
2055 
2056 	p = virt_to_phys(va);
2057 	if (p != pa) {
2058 		DMSG("va %p maps 0x%" PRIxPA " expect 0x%" PRIxPA, va, p, pa);
2059 		panic();
2060 	}
2061 }
2062 #else
2063 static void check_va_matches_pa(paddr_t pa __unused, void *va __unused)
2064 {
2065 }
2066 #endif
2067 
2068 static void *phys_to_virt_ts_vaspace(paddr_t pa, size_t len)
2069 {
2070 	if (!core_mmu_user_mapping_is_active())
2071 		return NULL;
2072 
2073 	return vm_pa2va(to_user_mode_ctx(thread_get_tsd()->ctx), pa, len);
2074 }
2075 
2076 #ifdef CFG_WITH_PAGER
2077 static void *phys_to_virt_tee_ram(paddr_t pa, size_t len)
2078 {
2079 	paddr_t end_pa = 0;
2080 
2081 	if (SUB_OVERFLOW(len, 1, &end_pa) || ADD_OVERFLOW(pa, end_pa, &end_pa))
2082 		return NULL;
2083 
2084 	if (pa >= TEE_LOAD_ADDR && pa < get_linear_map_end()) {
2085 		if (end_pa > get_linear_map_end())
2086 			return NULL;
2087 		return (void *)(vaddr_t)(pa + boot_mmu_config.load_offset);
2088 	}
2089 
2090 	return tee_pager_phys_to_virt(pa, len);
2091 }
2092 #else
2093 static void *phys_to_virt_tee_ram(paddr_t pa, size_t len)
2094 {
2095 	struct tee_mmap_region *mmap = NULL;
2096 
2097 	mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM, pa, len);
2098 	if (!mmap)
2099 		mmap = find_map_by_type_and_pa(MEM_AREA_NEX_RAM_RW, pa, len);
2100 	if (!mmap)
2101 		mmap = find_map_by_type_and_pa(MEM_AREA_NEX_RAM_RO, pa, len);
2102 	if (!mmap)
2103 		mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM_RW, pa, len);
2104 	if (!mmap)
2105 		mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM_RO, pa, len);
2106 	if (!mmap)
2107 		mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM_RX, pa, len);
2108 	/*
2109 	 * Note that MEM_AREA_INIT_RAM_RO and MEM_AREA_INIT_RAM_RX are only
2110 	 * used with pager and not needed here.
2111 	 */
2112 	return map_pa2va(mmap, pa, len);
2113 }
2114 #endif
2115 
2116 void *phys_to_virt(paddr_t pa, enum teecore_memtypes m, size_t len)
2117 {
2118 	void *va = NULL;
2119 
2120 	switch (m) {
2121 	case MEM_AREA_TS_VASPACE:
2122 		va = phys_to_virt_ts_vaspace(pa, len);
2123 		break;
2124 	case MEM_AREA_TEE_RAM:
2125 	case MEM_AREA_TEE_RAM_RX:
2126 	case MEM_AREA_TEE_RAM_RO:
2127 	case MEM_AREA_TEE_RAM_RW:
2128 	case MEM_AREA_NEX_RAM_RO:
2129 	case MEM_AREA_NEX_RAM_RW:
2130 		va = phys_to_virt_tee_ram(pa, len);
2131 		break;
2132 	case MEM_AREA_SHM_VASPACE:
2133 		/* Find VA from PA in dynamic SHM is not yet supported */
2134 		va = NULL;
2135 		break;
2136 	default:
2137 		va = map_pa2va(find_map_by_type_and_pa(m, pa, len), pa, len);
2138 	}
2139 	if (m != MEM_AREA_SEC_RAM_OVERALL)
2140 		check_va_matches_pa(pa, va);
2141 	return va;
2142 }
2143 
2144 void *phys_to_virt_io(paddr_t pa, size_t len)
2145 {
2146 	struct tee_mmap_region *map = NULL;
2147 	void *va = NULL;
2148 
2149 	map = find_map_by_type_and_pa(MEM_AREA_IO_SEC, pa, len);
2150 	if (!map)
2151 		map = find_map_by_type_and_pa(MEM_AREA_IO_NSEC, pa, len);
2152 	if (!map)
2153 		return NULL;
2154 	va = map_pa2va(map, pa, len);
2155 	check_va_matches_pa(pa, va);
2156 	return va;
2157 }
2158 
2159 vaddr_t core_mmu_get_va(paddr_t pa, enum teecore_memtypes type, size_t len)
2160 {
2161 	if (cpu_mmu_enabled())
2162 		return (vaddr_t)phys_to_virt(pa, type, len);
2163 
2164 	return (vaddr_t)pa;
2165 }
2166 
2167 #ifdef CFG_WITH_PAGER
2168 bool is_unpaged(void *va)
2169 {
2170 	vaddr_t v = (vaddr_t)va;
2171 
2172 	return v >= VCORE_START_VA && v < get_linear_map_end();
2173 }
2174 #else
2175 bool is_unpaged(void *va __unused)
2176 {
2177 	return true;
2178 }
2179 #endif
2180 
2181 void core_mmu_init_virtualization(void)
2182 {
2183 	virt_init_memory(static_memory_map);
2184 }
2185 
2186 vaddr_t io_pa_or_va(struct io_pa_va *p, size_t len)
2187 {
2188 	assert(p->pa);
2189 	if (cpu_mmu_enabled()) {
2190 		if (!p->va)
2191 			p->va = (vaddr_t)phys_to_virt_io(p->pa, len);
2192 		assert(p->va);
2193 		return p->va;
2194 	}
2195 	return p->pa;
2196 }
2197 
2198 vaddr_t io_pa_or_va_secure(struct io_pa_va *p, size_t len)
2199 {
2200 	assert(p->pa);
2201 	if (cpu_mmu_enabled()) {
2202 		if (!p->va)
2203 			p->va = (vaddr_t)phys_to_virt(p->pa, MEM_AREA_IO_SEC,
2204 						      len);
2205 		assert(p->va);
2206 		return p->va;
2207 	}
2208 	return p->pa;
2209 }
2210 
2211 vaddr_t io_pa_or_va_nsec(struct io_pa_va *p, size_t len)
2212 {
2213 	assert(p->pa);
2214 	if (cpu_mmu_enabled()) {
2215 		if (!p->va)
2216 			p->va = (vaddr_t)phys_to_virt(p->pa, MEM_AREA_IO_NSEC,
2217 						      len);
2218 		assert(p->va);
2219 		return p->va;
2220 	}
2221 	return p->pa;
2222 }
2223 
2224 #ifdef CFG_CORE_RESERVED_SHM
2225 static TEE_Result teecore_init_pub_ram(void)
2226 {
2227 	vaddr_t s = 0;
2228 	vaddr_t e = 0;
2229 
2230 	/* get virtual addr/size of NSec shared mem allocated from teecore */
2231 	core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &s, &e);
2232 
2233 	if (s >= e || s & SMALL_PAGE_MASK || e & SMALL_PAGE_MASK)
2234 		panic("invalid PUB RAM");
2235 
2236 	/* extra check: we could rely on core_mmu_get_mem_by_type() */
2237 	if (!tee_vbuf_is_non_sec(s, e - s))
2238 		panic("PUB RAM is not non-secure");
2239 
2240 #ifdef CFG_PL310
2241 	/* Allocate statically the l2cc mutex */
2242 	tee_l2cc_store_mutex_boot_pa(virt_to_phys((void *)s));
2243 	s += sizeof(uint32_t);			/* size of a pl310 mutex */
2244 	s = ROUNDUP(s, SMALL_PAGE_SIZE);	/* keep required alignment */
2245 #endif
2246 
2247 	default_nsec_shm_paddr = virt_to_phys((void *)s);
2248 	default_nsec_shm_size = e - s;
2249 
2250 	return TEE_SUCCESS;
2251 }
2252 early_init(teecore_init_pub_ram);
2253 #endif /*CFG_CORE_RESERVED_SHM*/
2254 
2255 void core_mmu_init_ta_ram(void)
2256 {
2257 	vaddr_t s = 0;
2258 	vaddr_t e = 0;
2259 	paddr_t ps = 0;
2260 	size_t size = 0;
2261 
2262 	/*
2263 	 * Get virtual addr/size of RAM where TA are loaded/executedNSec
2264 	 * shared mem allocated from teecore.
2265 	 */
2266 	if (IS_ENABLED(CFG_VIRTUALIZATION))
2267 		virt_get_ta_ram(&s, &e);
2268 	else
2269 		core_mmu_get_mem_by_type(MEM_AREA_TA_RAM, &s, &e);
2270 
2271 	ps = virt_to_phys((void *)s);
2272 	size = e - s;
2273 
2274 	if (!ps || (ps & CORE_MMU_USER_CODE_MASK) ||
2275 	    !size || (size & CORE_MMU_USER_CODE_MASK))
2276 		panic("invalid TA RAM");
2277 
2278 	/* extra check: we could rely on core_mmu_get_mem_by_type() */
2279 	if (!tee_pbuf_is_sec(ps, size))
2280 		panic("TA RAM is not secure");
2281 
2282 	if (!tee_mm_is_empty(&tee_mm_sec_ddr))
2283 		panic("TA RAM pool is not empty");
2284 
2285 	/* remove previous config and init TA ddr memory pool */
2286 	tee_mm_final(&tee_mm_sec_ddr);
2287 	tee_mm_init(&tee_mm_sec_ddr, ps, size, CORE_MMU_USER_CODE_SHIFT,
2288 		    TEE_MM_POOL_NO_FLAGS);
2289 }
2290