xref: /optee_os/core/arch/arm/kernel/boot.c (revision 92d75aefed568a557dbd9152d749a4bc320fa9f2)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2015-2022, Linaro Limited
4  * Copyright (c) 2023, Arm Limited
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <compiler.h>
10 #include <config.h>
11 #include <console.h>
12 #include <crypto/crypto.h>
13 #include <drivers/gic.h>
14 #include <dt-bindings/interrupt-controller/arm-gic.h>
15 #include <initcall.h>
16 #include <inttypes.h>
17 #include <keep.h>
18 #include <kernel/asan.h>
19 #include <kernel/boot.h>
20 #include <kernel/linker.h>
21 #include <kernel/misc.h>
22 #include <kernel/panic.h>
23 #include <kernel/tee_misc.h>
24 #include <kernel/thread.h>
25 #include <kernel/tpm.h>
26 #include <libfdt.h>
27 #include <malloc.h>
28 #include <memtag.h>
29 #include <mm/core_memprot.h>
30 #include <mm/core_mmu.h>
31 #include <mm/fobj.h>
32 #include <mm/tee_mm.h>
33 #include <mm/tee_pager.h>
34 #include <sm/psci.h>
35 #include <stdio.h>
36 #include <trace.h>
37 #include <utee_defines.h>
38 #include <util.h>
39 
40 #include <platform_config.h>
41 
42 #if !defined(CFG_WITH_ARM_TRUSTED_FW)
43 #include <sm/sm.h>
44 #endif
45 
46 #if defined(CFG_WITH_VFP)
47 #include <kernel/vfp.h>
48 #endif
49 
50 /*
51  * In this file we're using unsigned long to represent physical pointers as
52  * they are received in a single register when OP-TEE is initially entered.
53  * This limits 32-bit systems to only use make use of the lower 32 bits
54  * of a physical address for initial parameters.
55  *
56  * 64-bit systems on the other hand can use full 64-bit physical pointers.
57  */
58 #define PADDR_INVALID		ULONG_MAX
59 
60 #if defined(CFG_BOOT_SECONDARY_REQUEST)
61 struct ns_entry_context {
62 	uintptr_t entry_point;
63 	uintptr_t context_id;
64 };
65 struct ns_entry_context ns_entry_contexts[CFG_TEE_CORE_NB_CORE];
66 static uint32_t spin_table[CFG_TEE_CORE_NB_CORE];
67 #endif
68 
69 #ifdef CFG_BOOT_SYNC_CPU
70 /*
71  * Array used when booting, to synchronize cpu.
72  * When 0, the cpu has not started.
73  * When 1, it has started
74  */
75 uint32_t sem_cpu_sync[CFG_TEE_CORE_NB_CORE];
76 DECLARE_KEEP_PAGER(sem_cpu_sync);
77 #endif
78 
79 #ifdef CFG_DT
80 struct dt_descriptor {
81 	void *blob;
82 #ifdef _CFG_USE_DTB_OVERLAY
83 	int frag_id;
84 #endif
85 };
86 
87 static struct dt_descriptor external_dt __nex_bss;
88 #ifdef CFG_CORE_SEL1_SPMC
89 static struct dt_descriptor tos_fw_config_dt __nex_bss;
90 #endif
91 #endif
92 
93 #ifdef CFG_SECONDARY_INIT_CNTFRQ
94 static uint32_t cntfrq;
95 #endif
96 
97 /* May be overridden in plat-$(PLATFORM)/main.c */
98 __weak void plat_primary_init_early(void)
99 {
100 }
101 DECLARE_KEEP_PAGER(plat_primary_init_early);
102 
103 /* May be overridden in plat-$(PLATFORM)/main.c */
104 __weak void main_init_gic(void)
105 {
106 }
107 
108 /* May be overridden in plat-$(PLATFORM)/main.c */
109 __weak void main_secondary_init_gic(void)
110 {
111 }
112 
113 /* May be overridden in plat-$(PLATFORM)/main.c */
114 __weak unsigned long plat_get_aslr_seed(void)
115 {
116 	DMSG("Warning: no ASLR seed");
117 
118 	return 0;
119 }
120 
121 #if defined(_CFG_CORE_STACK_PROTECTOR)
122 /* Generate random stack canary value on boot up */
123 __weak uintptr_t plat_get_random_stack_canary(void)
124 {
125 	uintptr_t canary = 0xbaaaad00;
126 	TEE_Result ret = TEE_ERROR_GENERIC;
127 
128 	/*
129 	 * With virtualization the RNG is not initialized in Nexus core.
130 	 * Need to override with platform specific implementation.
131 	 */
132 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
133 		IMSG("WARNING: Using fixed value for stack canary");
134 		return canary;
135 	}
136 
137 	ret = crypto_rng_read(&canary, sizeof(canary));
138 	if (ret != TEE_SUCCESS)
139 		panic("Failed to generate random stack canary");
140 
141 	/* Leave null byte in canary to prevent string base exploit */
142 	return canary & ~0xffUL;
143 }
144 #endif /*_CFG_CORE_STACK_PROTECTOR*/
145 
146 /*
147  * This function is called as a guard after each smc call which is not
148  * supposed to return.
149  */
150 void __panic_at_smc_return(void)
151 {
152 	panic();
153 }
154 
155 #if defined(CFG_WITH_ARM_TRUSTED_FW)
156 void init_sec_mon(unsigned long nsec_entry __maybe_unused)
157 {
158 	assert(nsec_entry == PADDR_INVALID);
159 	/* Do nothing as we don't have a secure monitor */
160 }
161 #else
162 /* May be overridden in plat-$(PLATFORM)/main.c */
163 __weak void init_sec_mon(unsigned long nsec_entry)
164 {
165 	struct sm_nsec_ctx *nsec_ctx;
166 
167 	assert(nsec_entry != PADDR_INVALID);
168 
169 	/* Initialize secure monitor */
170 	nsec_ctx = sm_get_nsec_ctx();
171 	nsec_ctx->mon_lr = nsec_entry;
172 	nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I;
173 	if (nsec_entry & 1)
174 		nsec_ctx->mon_spsr |= CPSR_T;
175 }
176 #endif
177 
178 #if defined(CFG_WITH_ARM_TRUSTED_FW)
179 static void init_vfp_nsec(void)
180 {
181 }
182 #else
183 static void init_vfp_nsec(void)
184 {
185 	/* Normal world can use CP10 and CP11 (SIMD/VFP) */
186 	write_nsacr(read_nsacr() | NSACR_CP10 | NSACR_CP11);
187 }
188 #endif
189 
190 #if defined(CFG_WITH_VFP)
191 
192 #ifdef ARM32
193 static void init_vfp_sec(void)
194 {
195 	uint32_t cpacr = read_cpacr();
196 
197 	/*
198 	 * Enable Advanced SIMD functionality.
199 	 * Enable use of D16-D31 of the Floating-point Extension register
200 	 * file.
201 	 */
202 	cpacr &= ~(CPACR_ASEDIS | CPACR_D32DIS);
203 	/*
204 	 * Enable usage of CP10 and CP11 (SIMD/VFP) (both kernel and user
205 	 * mode.
206 	 */
207 	cpacr |= CPACR_CP(10, CPACR_CP_ACCESS_FULL);
208 	cpacr |= CPACR_CP(11, CPACR_CP_ACCESS_FULL);
209 	write_cpacr(cpacr);
210 }
211 #endif /* ARM32 */
212 
213 #ifdef ARM64
214 static void init_vfp_sec(void)
215 {
216 	/* Not using VFP until thread_kernel_enable_vfp() */
217 	vfp_disable();
218 }
219 #endif /* ARM64 */
220 
221 #else /* CFG_WITH_VFP */
222 
223 static void init_vfp_sec(void)
224 {
225 	/* Not using VFP */
226 }
227 #endif
228 
229 #ifdef CFG_SECONDARY_INIT_CNTFRQ
230 static void primary_save_cntfrq(void)
231 {
232 	assert(cntfrq == 0);
233 
234 	/*
235 	 * CNTFRQ should be initialized on the primary CPU by a
236 	 * previous boot stage
237 	 */
238 	cntfrq = read_cntfrq();
239 }
240 
241 static void secondary_init_cntfrq(void)
242 {
243 	assert(cntfrq != 0);
244 	write_cntfrq(cntfrq);
245 }
246 #else /* CFG_SECONDARY_INIT_CNTFRQ */
247 static void primary_save_cntfrq(void)
248 {
249 }
250 
251 static void secondary_init_cntfrq(void)
252 {
253 }
254 #endif
255 
256 #ifdef CFG_CORE_SANITIZE_KADDRESS
257 static void init_run_constructors(void)
258 {
259 	const vaddr_t *ctor;
260 
261 	for (ctor = &__ctor_list; ctor < &__ctor_end; ctor++)
262 		((void (*)(void))(*ctor))();
263 }
264 
265 static void init_asan(void)
266 {
267 
268 	/*
269 	 * CFG_ASAN_SHADOW_OFFSET is also supplied as
270 	 * -fasan-shadow-offset=$(CFG_ASAN_SHADOW_OFFSET) to the compiler.
271 	 * Since all the needed values to calculate the value of
272 	 * CFG_ASAN_SHADOW_OFFSET isn't available in to make we need to
273 	 * calculate it in advance and hard code it into the platform
274 	 * conf.mk. Here where we have all the needed values we double
275 	 * check that the compiler is supplied the correct value.
276 	 */
277 
278 #define __ASAN_SHADOW_START \
279 	ROUNDUP(TEE_RAM_START + (TEE_RAM_VA_SIZE * 8) / 9 - 8, 8)
280 	assert(__ASAN_SHADOW_START == (vaddr_t)&__asan_shadow_start);
281 #define __CFG_ASAN_SHADOW_OFFSET \
282 	(__ASAN_SHADOW_START - (TEE_RAM_START / 8))
283 	COMPILE_TIME_ASSERT(CFG_ASAN_SHADOW_OFFSET == __CFG_ASAN_SHADOW_OFFSET);
284 #undef __ASAN_SHADOW_START
285 #undef __CFG_ASAN_SHADOW_OFFSET
286 
287 	/*
288 	 * Assign area covered by the shadow area, everything from start up
289 	 * to the beginning of the shadow area.
290 	 */
291 	asan_set_shadowed((void *)TEE_LOAD_ADDR, &__asan_shadow_start);
292 
293 	/*
294 	 * Add access to areas that aren't opened automatically by a
295 	 * constructor.
296 	 */
297 	asan_tag_access(&__ctor_list, &__ctor_end);
298 	asan_tag_access(__rodata_start, __rodata_end);
299 #ifdef CFG_WITH_PAGER
300 	asan_tag_access(__pageable_start, __pageable_end);
301 #endif /*CFG_WITH_PAGER*/
302 	asan_tag_access(__nozi_start, __nozi_end);
303 	asan_tag_access(__exidx_start, __exidx_end);
304 	asan_tag_access(__extab_start, __extab_end);
305 
306 	init_run_constructors();
307 
308 	/* Everything is tagged correctly, let's start address sanitizing. */
309 	asan_start();
310 }
311 #else /*CFG_CORE_SANITIZE_KADDRESS*/
312 static void init_asan(void)
313 {
314 }
315 #endif /*CFG_CORE_SANITIZE_KADDRESS*/
316 
317 #if defined(CFG_MEMTAG)
318 /* Called from entry_a64.S only when MEMTAG is configured */
319 void boot_init_memtag(void)
320 {
321 	paddr_t base = 0;
322 	paddr_size_t size = 0;
323 
324 	memtag_init_ops(feat_mte_implemented());
325 	core_mmu_get_secure_memory(&base, &size);
326 	memtag_set_tags((void *)(vaddr_t)base, size, 0);
327 }
328 #endif
329 
330 #ifdef CFG_WITH_PAGER
331 
332 #ifdef CFG_CORE_SANITIZE_KADDRESS
333 static void carve_out_asan_mem(tee_mm_pool_t *pool)
334 {
335 	const size_t s = pool->hi - pool->lo;
336 	tee_mm_entry_t *mm;
337 	paddr_t apa = ASAN_MAP_PA;
338 	size_t asz = ASAN_MAP_SZ;
339 
340 	if (core_is_buffer_outside(apa, asz, pool->lo, s))
341 		return;
342 
343 	/* Reserve the shadow area */
344 	if (!core_is_buffer_inside(apa, asz, pool->lo, s)) {
345 		if (apa < pool->lo) {
346 			/*
347 			 * ASAN buffer is overlapping with the beginning of
348 			 * the pool.
349 			 */
350 			asz -= pool->lo - apa;
351 			apa = pool->lo;
352 		} else {
353 			/*
354 			 * ASAN buffer is overlapping with the end of the
355 			 * pool.
356 			 */
357 			asz = pool->hi - apa;
358 		}
359 	}
360 	mm = tee_mm_alloc2(pool, apa, asz);
361 	assert(mm);
362 }
363 #else
364 static void carve_out_asan_mem(tee_mm_pool_t *pool __unused)
365 {
366 }
367 #endif
368 
369 static void print_pager_pool_size(void)
370 {
371 	struct tee_pager_stats __maybe_unused stats;
372 
373 	tee_pager_get_stats(&stats);
374 	IMSG("Pager pool size: %zukB",
375 		stats.npages_all * SMALL_PAGE_SIZE / 1024);
376 }
377 
378 static void init_vcore(tee_mm_pool_t *mm_vcore)
379 {
380 	const vaddr_t begin = VCORE_START_VA;
381 	size_t size = TEE_RAM_VA_SIZE;
382 
383 #ifdef CFG_CORE_SANITIZE_KADDRESS
384 	/* Carve out asan memory, flat maped after core memory */
385 	if (begin + size > ASAN_SHADOW_PA)
386 		size = ASAN_MAP_PA - begin;
387 #endif
388 
389 	if (!tee_mm_init(mm_vcore, begin, size, SMALL_PAGE_SHIFT,
390 			 TEE_MM_POOL_NO_FLAGS))
391 		panic("tee_mm_vcore init failed");
392 }
393 
394 /*
395  * With CFG_CORE_ASLR=y the init part is relocated very early during boot.
396  * The init part is also paged just as the rest of the normal paged code, with
397  * the difference that it's preloaded during boot. When the backing store
398  * is configured the entire paged binary is copied in place and then also
399  * the init part. Since the init part has been relocated (references to
400  * addresses updated to compensate for the new load address) this has to be
401  * undone for the hashes of those pages to match with the original binary.
402  *
403  * If CFG_CORE_ASLR=n, nothing needs to be done as the code/ro pages are
404  * unchanged.
405  */
406 static void undo_init_relocation(uint8_t *paged_store __maybe_unused)
407 {
408 #ifdef CFG_CORE_ASLR
409 	unsigned long *ptr = NULL;
410 	const uint32_t *reloc = NULL;
411 	const uint32_t *reloc_end = NULL;
412 	unsigned long offs = boot_mmu_config.map_offset;
413 	const struct boot_embdata *embdata = (const void *)__init_end;
414 	vaddr_t addr_end = (vaddr_t)__init_end - offs - TEE_LOAD_ADDR;
415 	vaddr_t addr_start = (vaddr_t)__init_start - offs - TEE_LOAD_ADDR;
416 
417 	reloc = (const void *)((vaddr_t)embdata + embdata->reloc_offset);
418 	reloc_end = reloc + embdata->reloc_len / sizeof(*reloc);
419 
420 	for (; reloc < reloc_end; reloc++) {
421 		if (*reloc < addr_start)
422 			continue;
423 		if (*reloc >= addr_end)
424 			break;
425 		ptr = (void *)(paged_store + *reloc - addr_start);
426 		*ptr -= offs;
427 	}
428 #endif
429 }
430 
431 static struct fobj *ro_paged_alloc(tee_mm_entry_t *mm, void *hashes,
432 				   void *store)
433 {
434 	const unsigned int num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE;
435 #ifdef CFG_CORE_ASLR
436 	unsigned int reloc_offs = (vaddr_t)__pageable_start - VCORE_START_VA;
437 	const struct boot_embdata *embdata = (const void *)__init_end;
438 	const void *reloc = __init_end + embdata->reloc_offset;
439 
440 	return fobj_ro_reloc_paged_alloc(num_pages, hashes, reloc_offs,
441 					 reloc, embdata->reloc_len, store);
442 #else
443 	return fobj_ro_paged_alloc(num_pages, hashes, store);
444 #endif
445 }
446 
447 static void init_runtime(unsigned long pageable_part)
448 {
449 	size_t n;
450 	size_t init_size = (size_t)(__init_end - __init_start);
451 	size_t pageable_start = (size_t)__pageable_start;
452 	size_t pageable_end = (size_t)__pageable_end;
453 	size_t pageable_size = pageable_end - pageable_start;
454 	vaddr_t tzsram_end = TZSRAM_BASE + TZSRAM_SIZE - TEE_LOAD_ADDR +
455 			     VCORE_START_VA;
456 	size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) *
457 			   TEE_SHA256_HASH_SIZE;
458 	const struct boot_embdata *embdata = (const void *)__init_end;
459 	const void *tmp_hashes = NULL;
460 	tee_mm_entry_t *mm = NULL;
461 	struct fobj *fobj = NULL;
462 	uint8_t *paged_store = NULL;
463 	uint8_t *hashes = NULL;
464 
465 	assert(pageable_size % SMALL_PAGE_SIZE == 0);
466 	assert(embdata->total_len >= embdata->hashes_offset +
467 				     embdata->hashes_len);
468 	assert(hash_size == embdata->hashes_len);
469 
470 	tmp_hashes = __init_end + embdata->hashes_offset;
471 
472 	init_asan();
473 
474 	/* Add heap2 first as heap1 may be too small as initial bget pool */
475 	malloc_add_pool(__heap2_start, __heap2_end - __heap2_start);
476 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
477 
478 	/*
479 	 * This needs to be initialized early to support address lookup
480 	 * in MEM_AREA_TEE_RAM
481 	 */
482 	tee_pager_early_init();
483 
484 	hashes = malloc(hash_size);
485 	IMSG_RAW("\n");
486 	IMSG("Pager is enabled. Hashes: %zu bytes", hash_size);
487 	assert(hashes);
488 	asan_memcpy_unchecked(hashes, tmp_hashes, hash_size);
489 
490 	/*
491 	 * Need tee_mm_sec_ddr initialized to be able to allocate secure
492 	 * DDR below.
493 	 */
494 	core_mmu_init_ta_ram();
495 
496 	carve_out_asan_mem(&tee_mm_sec_ddr);
497 
498 	mm = tee_mm_alloc(&tee_mm_sec_ddr, pageable_size);
499 	assert(mm);
500 	paged_store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM,
501 				   pageable_size);
502 	/*
503 	 * Load pageable part in the dedicated allocated area:
504 	 * - Move pageable non-init part into pageable area. Note bootloader
505 	 *   may have loaded it anywhere in TA RAM hence use memmove().
506 	 * - Copy pageable init part from current location into pageable area.
507 	 */
508 	memmove(paged_store + init_size,
509 		phys_to_virt(pageable_part,
510 			     core_mmu_get_type_by_pa(pageable_part),
511 			     __pageable_part_end - __pageable_part_start),
512 		__pageable_part_end - __pageable_part_start);
513 	asan_memcpy_unchecked(paged_store, __init_start, init_size);
514 	/*
515 	 * Undo eventual relocation for the init part so the hash checks
516 	 * can pass.
517 	 */
518 	undo_init_relocation(paged_store);
519 
520 	/* Check that hashes of what's in pageable area is OK */
521 	DMSG("Checking hashes of pageable area");
522 	for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) {
523 		const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE;
524 		const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE;
525 		TEE_Result res;
526 
527 		DMSG("hash pg_idx %zu hash %p page %p", n, hash, page);
528 		res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE);
529 		if (res != TEE_SUCCESS) {
530 			EMSG("Hash failed for page %zu at %p: res 0x%x",
531 			     n, (void *)page, res);
532 			panic();
533 		}
534 	}
535 
536 	/*
537 	 * Assert prepaged init sections are page aligned so that nothing
538 	 * trails uninited at the end of the premapped init area.
539 	 */
540 	assert(!(init_size & SMALL_PAGE_MASK));
541 
542 	/*
543 	 * Initialize the virtual memory pool used for main_mmu_l2_ttb which
544 	 * is supplied to tee_pager_init() below.
545 	 */
546 	init_vcore(&tee_mm_vcore);
547 
548 	/*
549 	 * Assign alias area for pager end of the small page block the rest
550 	 * of the binary is loaded into. We're taking more than needed, but
551 	 * we're guaranteed to not need more than the physical amount of
552 	 * TZSRAM.
553 	 */
554 	mm = tee_mm_alloc2(&tee_mm_vcore,
555 			   (vaddr_t)tee_mm_vcore.lo +
556 			   tee_mm_vcore.size - TZSRAM_SIZE,
557 			   TZSRAM_SIZE);
558 	assert(mm);
559 	tee_pager_set_alias_area(mm);
560 
561 	/*
562 	 * Claim virtual memory which isn't paged.
563 	 * Linear memory (flat map core memory) ends there.
564 	 */
565 	mm = tee_mm_alloc2(&tee_mm_vcore, VCORE_UNPG_RX_PA,
566 			   (vaddr_t)(__pageable_start - VCORE_UNPG_RX_PA));
567 	assert(mm);
568 
569 	/*
570 	 * Allocate virtual memory for the pageable area and let the pager
571 	 * take charge of all the pages already assigned to that memory.
572 	 */
573 	mm = tee_mm_alloc2(&tee_mm_vcore, (vaddr_t)__pageable_start,
574 			   pageable_size);
575 	assert(mm);
576 	fobj = ro_paged_alloc(mm, hashes, paged_store);
577 	assert(fobj);
578 	tee_pager_add_core_region(tee_mm_get_smem(mm), PAGED_REGION_TYPE_RO,
579 				  fobj);
580 	fobj_put(fobj);
581 
582 	tee_pager_add_pages(pageable_start, init_size / SMALL_PAGE_SIZE, false);
583 	tee_pager_add_pages(pageable_start + init_size,
584 			    (pageable_size - init_size) / SMALL_PAGE_SIZE,
585 			    true);
586 	if (pageable_end < tzsram_end)
587 		tee_pager_add_pages(pageable_end, (tzsram_end - pageable_end) /
588 						   SMALL_PAGE_SIZE, true);
589 
590 	/*
591 	 * There may be physical pages in TZSRAM before the core load address.
592 	 * These pages can be added to the physical pages pool of the pager.
593 	 * This setup may happen when a the secure bootloader runs in TZRAM
594 	 * and its memory can be reused by OP-TEE once boot stages complete.
595 	 */
596 	tee_pager_add_pages(tee_mm_vcore.lo,
597 			(VCORE_UNPG_RX_PA - tee_mm_vcore.lo) / SMALL_PAGE_SIZE,
598 			true);
599 
600 	print_pager_pool_size();
601 }
602 #else
603 
604 static void init_runtime(unsigned long pageable_part __unused)
605 {
606 	init_asan();
607 
608 	/*
609 	 * By default whole OP-TEE uses malloc, so we need to initialize
610 	 * it early. But, when virtualization is enabled, malloc is used
611 	 * only by TEE runtime, so malloc should be initialized later, for
612 	 * every virtual partition separately. Core code uses nex_malloc
613 	 * instead.
614 	 */
615 #ifdef CFG_NS_VIRTUALIZATION
616 	nex_malloc_add_pool(__nex_heap_start, __nex_heap_end -
617 					      __nex_heap_start);
618 #else
619 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
620 #endif
621 
622 	IMSG_RAW("\n");
623 }
624 #endif
625 
626 void *get_dt(void)
627 {
628 	void *fdt = get_embedded_dt();
629 
630 	if (!fdt)
631 		fdt = get_external_dt();
632 
633 	return fdt;
634 }
635 
636 void *get_secure_dt(void)
637 {
638 	void *fdt = get_embedded_dt();
639 
640 	if (!fdt && IS_ENABLED(CFG_MAP_EXT_DT_SECURE))
641 		fdt = get_external_dt();
642 
643 	return fdt;
644 }
645 
646 #if defined(CFG_EMBED_DTB)
647 void *get_embedded_dt(void)
648 {
649 	static bool checked;
650 
651 	assert(cpu_mmu_enabled());
652 
653 	if (!checked) {
654 		IMSG("Embedded DTB found");
655 
656 		if (fdt_check_header(embedded_secure_dtb))
657 			panic("Invalid embedded DTB");
658 
659 		checked = true;
660 	}
661 
662 	return embedded_secure_dtb;
663 }
664 #else
665 void *get_embedded_dt(void)
666 {
667 	return NULL;
668 }
669 #endif /*CFG_EMBED_DTB*/
670 
671 #if defined(CFG_DT)
672 void *get_external_dt(void)
673 {
674 	if (!IS_ENABLED(CFG_EXTERNAL_DT))
675 		return NULL;
676 
677 	assert(cpu_mmu_enabled());
678 	return external_dt.blob;
679 }
680 
681 static TEE_Result release_external_dt(void)
682 {
683 	int ret = 0;
684 
685 	if (!IS_ENABLED(CFG_EXTERNAL_DT))
686 		return TEE_SUCCESS;
687 
688 	if (!external_dt.blob)
689 		return TEE_SUCCESS;
690 
691 	ret = fdt_pack(external_dt.blob);
692 	if (ret < 0) {
693 		EMSG("Failed to pack Device Tree at 0x%" PRIxPA ": error %d",
694 		     virt_to_phys(external_dt.blob), ret);
695 		panic();
696 	}
697 
698 	if (core_mmu_remove_mapping(MEM_AREA_EXT_DT, external_dt.blob,
699 				    CFG_DTB_MAX_SIZE))
700 		panic("Failed to remove temporary Device Tree mapping");
701 
702 	/* External DTB no more reached, reset pointer to invalid */
703 	external_dt.blob = NULL;
704 
705 	return TEE_SUCCESS;
706 }
707 boot_final(release_external_dt);
708 
709 #ifdef _CFG_USE_DTB_OVERLAY
710 static int add_dt_overlay_fragment(struct dt_descriptor *dt, int ioffs)
711 {
712 	char frag[32];
713 	int offs;
714 	int ret;
715 
716 	snprintf(frag, sizeof(frag), "fragment@%d", dt->frag_id);
717 	offs = fdt_add_subnode(dt->blob, ioffs, frag);
718 	if (offs < 0)
719 		return offs;
720 
721 	dt->frag_id += 1;
722 
723 	ret = fdt_setprop_string(dt->blob, offs, "target-path", "/");
724 	if (ret < 0)
725 		return -1;
726 
727 	return fdt_add_subnode(dt->blob, offs, "__overlay__");
728 }
729 
730 static int init_dt_overlay(struct dt_descriptor *dt, int __maybe_unused dt_size)
731 {
732 	int fragment;
733 
734 	if (IS_ENABLED(CFG_EXTERNAL_DTB_OVERLAY)) {
735 		if (!fdt_check_header(dt->blob)) {
736 			fdt_for_each_subnode(fragment, dt->blob, 0)
737 				dt->frag_id += 1;
738 			return 0;
739 		}
740 	}
741 
742 	return fdt_create_empty_tree(dt->blob, dt_size);
743 }
744 #else
745 static int add_dt_overlay_fragment(struct dt_descriptor *dt __unused, int offs)
746 {
747 	return offs;
748 }
749 
750 static int init_dt_overlay(struct dt_descriptor *dt __unused,
751 			   int dt_size __unused)
752 {
753 	return 0;
754 }
755 #endif /* _CFG_USE_DTB_OVERLAY */
756 
757 static int add_dt_path_subnode(struct dt_descriptor *dt, const char *path,
758 			       const char *subnode)
759 {
760 	int offs;
761 
762 	offs = fdt_path_offset(dt->blob, path);
763 	if (offs < 0)
764 		return -1;
765 	offs = add_dt_overlay_fragment(dt, offs);
766 	if (offs < 0)
767 		return -1;
768 	offs = fdt_add_subnode(dt->blob, offs, subnode);
769 	if (offs < 0)
770 		return -1;
771 	return offs;
772 }
773 
774 static int add_optee_dt_node(struct dt_descriptor *dt)
775 {
776 	int offs;
777 	int ret;
778 
779 	if (fdt_path_offset(dt->blob, "/firmware/optee") >= 0) {
780 		DMSG("OP-TEE Device Tree node already exists!");
781 		return 0;
782 	}
783 
784 	offs = fdt_path_offset(dt->blob, "/firmware");
785 	if (offs < 0) {
786 		offs = add_dt_path_subnode(dt, "/", "firmware");
787 		if (offs < 0)
788 			return -1;
789 	}
790 
791 	offs = fdt_add_subnode(dt->blob, offs, "optee");
792 	if (offs < 0)
793 		return -1;
794 
795 	ret = fdt_setprop_string(dt->blob, offs, "compatible",
796 				 "linaro,optee-tz");
797 	if (ret < 0)
798 		return -1;
799 	ret = fdt_setprop_string(dt->blob, offs, "method", "smc");
800 	if (ret < 0)
801 		return -1;
802 
803 	if (CFG_CORE_ASYNC_NOTIF_GIC_INTID) {
804 		/*
805 		 * The format of the interrupt property is defined by the
806 		 * binding of the interrupt domain root. In this case it's
807 		 * one Arm GIC v1, v2 or v3 so we must be compatible with
808 		 * these.
809 		 *
810 		 * An SPI type of interrupt is indicated with a 0 in the
811 		 * first cell. A PPI type is indicated with value 1.
812 		 *
813 		 * The interrupt number goes in the second cell where
814 		 * SPIs ranges from 0 to 987 and PPI ranges from 0 to 15.
815 		 *
816 		 * Flags are passed in the third cells.
817 		 */
818 		uint32_t itr_trigger = 0;
819 		uint32_t itr_type = 0;
820 		uint32_t itr_id = 0;
821 		uint32_t val[3] = { };
822 
823 		/* PPI are visible only in current CPU cluster */
824 		static_assert(!CFG_CORE_ASYNC_NOTIF_GIC_INTID ||
825 			      (CFG_CORE_ASYNC_NOTIF_GIC_INTID >=
826 			       GIC_SPI_BASE) ||
827 			      ((CFG_TEE_CORE_NB_CORE <= 8) &&
828 			       (CFG_CORE_ASYNC_NOTIF_GIC_INTID >=
829 				GIC_PPI_BASE)));
830 
831 		if (CFG_CORE_ASYNC_NOTIF_GIC_INTID >= GIC_SPI_BASE) {
832 			itr_type = GIC_SPI;
833 			itr_id = CFG_CORE_ASYNC_NOTIF_GIC_INTID - GIC_SPI_BASE;
834 			itr_trigger = IRQ_TYPE_EDGE_RISING;
835 		} else {
836 			itr_type = GIC_PPI;
837 			itr_id = CFG_CORE_ASYNC_NOTIF_GIC_INTID - GIC_PPI_BASE;
838 			itr_trigger = IRQ_TYPE_EDGE_RISING |
839 				      GIC_CPU_MASK_SIMPLE(CFG_TEE_CORE_NB_CORE);
840 		}
841 
842 		val[0] = TEE_U32_TO_BIG_ENDIAN(itr_type);
843 		val[1] = TEE_U32_TO_BIG_ENDIAN(itr_id);
844 		val[2] = TEE_U32_TO_BIG_ENDIAN(itr_trigger);
845 
846 		ret = fdt_setprop(dt->blob, offs, "interrupts", val,
847 				  sizeof(val));
848 		if (ret < 0)
849 			return -1;
850 	}
851 	return 0;
852 }
853 
854 #ifdef CFG_PSCI_ARM32
855 static int append_psci_compatible(void *fdt, int offs, const char *str)
856 {
857 	return fdt_appendprop(fdt, offs, "compatible", str, strlen(str) + 1);
858 }
859 
860 static int dt_add_psci_node(struct dt_descriptor *dt)
861 {
862 	int offs;
863 
864 	if (fdt_path_offset(dt->blob, "/psci") >= 0) {
865 		DMSG("PSCI Device Tree node already exists!");
866 		return 0;
867 	}
868 
869 	offs = add_dt_path_subnode(dt, "/", "psci");
870 	if (offs < 0)
871 		return -1;
872 	if (append_psci_compatible(dt->blob, offs, "arm,psci-1.0"))
873 		return -1;
874 	if (append_psci_compatible(dt->blob, offs, "arm,psci-0.2"))
875 		return -1;
876 	if (append_psci_compatible(dt->blob, offs, "arm,psci"))
877 		return -1;
878 	if (fdt_setprop_string(dt->blob, offs, "method", "smc"))
879 		return -1;
880 	if (fdt_setprop_u32(dt->blob, offs, "cpu_suspend", PSCI_CPU_SUSPEND))
881 		return -1;
882 	if (fdt_setprop_u32(dt->blob, offs, "cpu_off", PSCI_CPU_OFF))
883 		return -1;
884 	if (fdt_setprop_u32(dt->blob, offs, "cpu_on", PSCI_CPU_ON))
885 		return -1;
886 	if (fdt_setprop_u32(dt->blob, offs, "sys_poweroff", PSCI_SYSTEM_OFF))
887 		return -1;
888 	if (fdt_setprop_u32(dt->blob, offs, "sys_reset", PSCI_SYSTEM_RESET))
889 		return -1;
890 	return 0;
891 }
892 
893 static int check_node_compat_prefix(struct dt_descriptor *dt, int offs,
894 				    const char *prefix)
895 {
896 	const size_t prefix_len = strlen(prefix);
897 	size_t l;
898 	int plen;
899 	const char *prop;
900 
901 	prop = fdt_getprop(dt->blob, offs, "compatible", &plen);
902 	if (!prop)
903 		return -1;
904 
905 	while (plen > 0) {
906 		if (memcmp(prop, prefix, prefix_len) == 0)
907 			return 0; /* match */
908 
909 		l = strlen(prop) + 1;
910 		prop += l;
911 		plen -= l;
912 	}
913 
914 	return -1;
915 }
916 
917 static int dt_add_psci_cpu_enable_methods(struct dt_descriptor *dt)
918 {
919 	int offs = 0;
920 
921 	while (1) {
922 		offs = fdt_next_node(dt->blob, offs, NULL);
923 		if (offs < 0)
924 			break;
925 		if (fdt_getprop(dt->blob, offs, "enable-method", NULL))
926 			continue; /* already set */
927 		if (check_node_compat_prefix(dt, offs, "arm,cortex-a"))
928 			continue; /* no compatible */
929 		if (fdt_setprop_string(dt->blob, offs, "enable-method", "psci"))
930 			return -1;
931 		/* Need to restart scanning as offsets may have changed */
932 		offs = 0;
933 	}
934 	return 0;
935 }
936 
937 static int config_psci(struct dt_descriptor *dt)
938 {
939 	if (dt_add_psci_node(dt))
940 		return -1;
941 	return dt_add_psci_cpu_enable_methods(dt);
942 }
943 #else
944 static int config_psci(struct dt_descriptor *dt __unused)
945 {
946 	return 0;
947 }
948 #endif /*CFG_PSCI_ARM32*/
949 
950 static void set_dt_val(void *data, uint32_t cell_size, uint64_t val)
951 {
952 	if (cell_size == 1) {
953 		fdt32_t v = cpu_to_fdt32((uint32_t)val);
954 
955 		memcpy(data, &v, sizeof(v));
956 	} else {
957 		fdt64_t v = cpu_to_fdt64(val);
958 
959 		memcpy(data, &v, sizeof(v));
960 	}
961 }
962 
963 static int add_res_mem_dt_node(struct dt_descriptor *dt, const char *name,
964 			       paddr_t pa, size_t size)
965 {
966 	int offs = 0;
967 	int ret = 0;
968 	int addr_size = -1;
969 	int len_size = -1;
970 	bool found = true;
971 	char subnode_name[80] = { 0 };
972 
973 	offs = fdt_path_offset(dt->blob, "/reserved-memory");
974 
975 	if (offs < 0) {
976 		found = false;
977 		offs = 0;
978 	}
979 
980 	if (IS_ENABLED2(_CFG_USE_DTB_OVERLAY)) {
981 		len_size = sizeof(paddr_t) / sizeof(uint32_t);
982 		addr_size = sizeof(paddr_t) / sizeof(uint32_t);
983 	} else {
984 		len_size = fdt_size_cells(dt->blob, offs);
985 		if (len_size < 0)
986 			return -1;
987 		addr_size = fdt_address_cells(dt->blob, offs);
988 		if (addr_size < 0)
989 			return -1;
990 	}
991 
992 	if (!found) {
993 		offs = add_dt_path_subnode(dt, "/", "reserved-memory");
994 		if (offs < 0)
995 			return -1;
996 		ret = fdt_setprop_cell(dt->blob, offs, "#address-cells",
997 				       addr_size);
998 		if (ret < 0)
999 			return -1;
1000 		ret = fdt_setprop_cell(dt->blob, offs, "#size-cells", len_size);
1001 		if (ret < 0)
1002 			return -1;
1003 		ret = fdt_setprop(dt->blob, offs, "ranges", NULL, 0);
1004 		if (ret < 0)
1005 			return -1;
1006 	}
1007 
1008 	ret = snprintf(subnode_name, sizeof(subnode_name),
1009 		       "%s@%" PRIxPA, name, pa);
1010 	if (ret < 0 || ret >= (int)sizeof(subnode_name))
1011 		DMSG("truncated node \"%s@%" PRIxPA"\"", name, pa);
1012 	offs = fdt_add_subnode(dt->blob, offs, subnode_name);
1013 	if (offs >= 0) {
1014 		uint32_t data[FDT_MAX_NCELLS * 2];
1015 
1016 		set_dt_val(data, addr_size, pa);
1017 		set_dt_val(data + addr_size, len_size, size);
1018 		ret = fdt_setprop(dt->blob, offs, "reg", data,
1019 				  sizeof(uint32_t) * (addr_size + len_size));
1020 		if (ret < 0)
1021 			return -1;
1022 		ret = fdt_setprop(dt->blob, offs, "no-map", NULL, 0);
1023 		if (ret < 0)
1024 			return -1;
1025 	} else {
1026 		return -1;
1027 	}
1028 	return 0;
1029 }
1030 
1031 #ifdef CFG_CORE_DYN_SHM
1032 static uint64_t get_dt_val_and_advance(const void *data, size_t *offs,
1033 				       uint32_t cell_size)
1034 {
1035 	uint64_t rv = 0;
1036 
1037 	if (cell_size == 1) {
1038 		uint32_t v;
1039 
1040 		memcpy(&v, (const uint8_t *)data + *offs, sizeof(v));
1041 		*offs += sizeof(v);
1042 		rv = fdt32_to_cpu(v);
1043 	} else {
1044 		uint64_t v;
1045 
1046 		memcpy(&v, (const uint8_t *)data + *offs, sizeof(v));
1047 		*offs += sizeof(v);
1048 		rv = fdt64_to_cpu(v);
1049 	}
1050 
1051 	return rv;
1052 }
1053 
1054 /*
1055  * Find all non-secure memory from DT. Memory marked inaccessible by Secure
1056  * World is ignored since it could not be mapped to be used as dynamic shared
1057  * memory.
1058  */
1059 static int get_nsec_memory_helper(void *fdt, struct core_mmu_phys_mem *mem)
1060 {
1061 	const uint8_t *prop = NULL;
1062 	uint64_t a = 0;
1063 	uint64_t l = 0;
1064 	size_t prop_offs = 0;
1065 	size_t prop_len = 0;
1066 	int elems_total = 0;
1067 	int addr_size = 0;
1068 	int len_size = 0;
1069 	int offs = 0;
1070 	size_t n = 0;
1071 	int len = 0;
1072 
1073 	addr_size = fdt_address_cells(fdt, 0);
1074 	if (addr_size < 0)
1075 		return 0;
1076 
1077 	len_size = fdt_size_cells(fdt, 0);
1078 	if (len_size < 0)
1079 		return 0;
1080 
1081 	while (true) {
1082 		offs = fdt_node_offset_by_prop_value(fdt, offs, "device_type",
1083 						     "memory",
1084 						     sizeof("memory"));
1085 		if (offs < 0)
1086 			break;
1087 
1088 		if (fdt_get_status(fdt, offs) != (DT_STATUS_OK_NSEC |
1089 						   DT_STATUS_OK_SEC))
1090 			continue;
1091 
1092 		prop = fdt_getprop(fdt, offs, "reg", &len);
1093 		if (!prop)
1094 			continue;
1095 
1096 		prop_len = len;
1097 		for (n = 0, prop_offs = 0; prop_offs < prop_len; n++) {
1098 			a = get_dt_val_and_advance(prop, &prop_offs, addr_size);
1099 			if (prop_offs >= prop_len) {
1100 				n--;
1101 				break;
1102 			}
1103 
1104 			l = get_dt_val_and_advance(prop, &prop_offs, len_size);
1105 			if (mem) {
1106 				mem->type = MEM_AREA_DDR_OVERALL;
1107 				mem->addr = a;
1108 				mem->size = l;
1109 				mem++;
1110 			}
1111 		}
1112 
1113 		elems_total += n;
1114 	}
1115 
1116 	return elems_total;
1117 }
1118 
1119 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt, size_t *nelems)
1120 {
1121 	struct core_mmu_phys_mem *mem = NULL;
1122 	int elems_total = 0;
1123 
1124 	elems_total = get_nsec_memory_helper(fdt, NULL);
1125 	if (elems_total <= 0)
1126 		return NULL;
1127 
1128 	mem = nex_calloc(elems_total, sizeof(*mem));
1129 	if (!mem)
1130 		panic();
1131 
1132 	elems_total = get_nsec_memory_helper(fdt, mem);
1133 	assert(elems_total > 0);
1134 
1135 	*nelems = elems_total;
1136 
1137 	return mem;
1138 }
1139 #endif /*CFG_CORE_DYN_SHM*/
1140 
1141 #ifdef CFG_CORE_RESERVED_SHM
1142 static int mark_static_shm_as_reserved(struct dt_descriptor *dt)
1143 {
1144 	vaddr_t shm_start;
1145 	vaddr_t shm_end;
1146 
1147 	core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &shm_start, &shm_end);
1148 	if (shm_start != shm_end)
1149 		return add_res_mem_dt_node(dt, "optee_shm",
1150 					   virt_to_phys((void *)shm_start),
1151 					   shm_end - shm_start);
1152 
1153 	DMSG("No SHM configured");
1154 	return -1;
1155 }
1156 #endif /*CFG_CORE_RESERVED_SHM*/
1157 
1158 static void init_external_dt(unsigned long phys_dt)
1159 {
1160 	struct dt_descriptor *dt = &external_dt;
1161 	void *fdt;
1162 	int ret;
1163 
1164 	if (!IS_ENABLED(CFG_EXTERNAL_DT))
1165 		return;
1166 
1167 	if (!phys_dt) {
1168 		/*
1169 		 * No need to panic as we're not using the DT in OP-TEE
1170 		 * yet, we're only adding some nodes for normal world use.
1171 		 * This makes the switch to using DT easier as we can boot
1172 		 * a newer OP-TEE with older boot loaders. Once we start to
1173 		 * initialize devices based on DT we'll likely panic
1174 		 * instead of returning here.
1175 		 */
1176 		IMSG("No non-secure external DT");
1177 		return;
1178 	}
1179 
1180 	fdt = core_mmu_add_mapping(MEM_AREA_EXT_DT, phys_dt, CFG_DTB_MAX_SIZE);
1181 	if (!fdt)
1182 		panic("Failed to map external DTB");
1183 
1184 	dt->blob = fdt;
1185 
1186 	ret = init_dt_overlay(dt, CFG_DTB_MAX_SIZE);
1187 	if (ret < 0) {
1188 		EMSG("Device Tree Overlay init fail @ %#lx: error %d", phys_dt,
1189 		     ret);
1190 		panic();
1191 	}
1192 
1193 	ret = fdt_open_into(fdt, fdt, CFG_DTB_MAX_SIZE);
1194 	if (ret < 0) {
1195 		EMSG("Invalid Device Tree at %#lx: error %d", phys_dt, ret);
1196 		panic();
1197 	}
1198 
1199 	IMSG("Non-secure external DT found");
1200 }
1201 
1202 static int mark_tzdram_as_reserved(struct dt_descriptor *dt)
1203 {
1204 	return add_res_mem_dt_node(dt, "optee_core", CFG_TZDRAM_START,
1205 				   CFG_TZDRAM_SIZE);
1206 }
1207 
1208 static void update_external_dt(void)
1209 {
1210 	struct dt_descriptor *dt = &external_dt;
1211 
1212 	if (!IS_ENABLED(CFG_EXTERNAL_DT))
1213 		return;
1214 
1215 	if (!dt->blob)
1216 		return;
1217 
1218 	if (!IS_ENABLED(CFG_CORE_FFA) && add_optee_dt_node(dt))
1219 		panic("Failed to add OP-TEE Device Tree node");
1220 
1221 	if (config_psci(dt))
1222 		panic("Failed to config PSCI");
1223 
1224 #ifdef CFG_CORE_RESERVED_SHM
1225 	if (mark_static_shm_as_reserved(dt))
1226 		panic("Failed to config non-secure memory");
1227 #endif
1228 
1229 	if (mark_tzdram_as_reserved(dt))
1230 		panic("Failed to config secure memory");
1231 }
1232 #else /*CFG_DT*/
1233 void *get_external_dt(void)
1234 {
1235 	return NULL;
1236 }
1237 
1238 static void init_external_dt(unsigned long phys_dt __unused)
1239 {
1240 }
1241 
1242 static void update_external_dt(void)
1243 {
1244 }
1245 
1246 #ifdef CFG_CORE_DYN_SHM
1247 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt __unused,
1248 						 size_t *nelems __unused)
1249 {
1250 	return NULL;
1251 }
1252 #endif /*CFG_CORE_DYN_SHM*/
1253 #endif /*!CFG_DT*/
1254 
1255 #if defined(CFG_CORE_SEL1_SPMC) && defined(CFG_DT)
1256 void *get_tos_fw_config_dt(void)
1257 {
1258 	if (!IS_ENABLED(CFG_MAP_EXT_DT_SECURE))
1259 		return NULL;
1260 
1261 	assert(cpu_mmu_enabled());
1262 
1263 	return tos_fw_config_dt.blob;
1264 }
1265 
1266 static void init_tos_fw_config_dt(unsigned long pa)
1267 {
1268 	struct dt_descriptor *dt = &tos_fw_config_dt;
1269 	void *fdt = NULL;
1270 	int ret = 0;
1271 
1272 	if (!IS_ENABLED(CFG_MAP_EXT_DT_SECURE))
1273 		return;
1274 
1275 	if (!pa)
1276 		panic("No TOS_FW_CONFIG DT found");
1277 
1278 	fdt = core_mmu_add_mapping(MEM_AREA_EXT_DT, pa, CFG_DTB_MAX_SIZE);
1279 	if (!fdt)
1280 		panic("Failed to map TOS_FW_CONFIG DT");
1281 
1282 	dt->blob = fdt;
1283 
1284 	ret = fdt_open_into(fdt, fdt, CFG_DTB_MAX_SIZE);
1285 	if (ret < 0) {
1286 		EMSG("Invalid Device Tree at %#lx: error %d", pa, ret);
1287 		panic();
1288 	}
1289 
1290 	IMSG("TOS_FW_CONFIG DT found");
1291 }
1292 #else
1293 void *get_tos_fw_config_dt(void)
1294 {
1295 	return NULL;
1296 }
1297 
1298 static void init_tos_fw_config_dt(unsigned long pa __unused)
1299 {
1300 }
1301 #endif /*CFG_CORE_SEL1_SPMC && CFG_DT*/
1302 
1303 #ifdef CFG_CORE_DYN_SHM
1304 static void discover_nsec_memory(void)
1305 {
1306 	struct core_mmu_phys_mem *mem;
1307 	const struct core_mmu_phys_mem *mem_begin = NULL;
1308 	const struct core_mmu_phys_mem *mem_end = NULL;
1309 	size_t nelems;
1310 	void *fdt = get_external_dt();
1311 
1312 	if (fdt) {
1313 		mem = get_nsec_memory(fdt, &nelems);
1314 		if (mem) {
1315 			core_mmu_set_discovered_nsec_ddr(mem, nelems);
1316 			return;
1317 		}
1318 
1319 		DMSG("No non-secure memory found in FDT");
1320 	}
1321 
1322 	mem_begin = phys_ddr_overall_begin;
1323 	mem_end = phys_ddr_overall_end;
1324 	nelems = mem_end - mem_begin;
1325 	if (nelems) {
1326 		/*
1327 		 * Platform cannot use both register_ddr() and the now
1328 		 * deprecated register_dynamic_shm().
1329 		 */
1330 		assert(phys_ddr_overall_compat_begin ==
1331 		       phys_ddr_overall_compat_end);
1332 	} else {
1333 		mem_begin = phys_ddr_overall_compat_begin;
1334 		mem_end = phys_ddr_overall_compat_end;
1335 		nelems = mem_end - mem_begin;
1336 		if (!nelems)
1337 			return;
1338 		DMSG("Warning register_dynamic_shm() is deprecated, please use register_ddr() instead");
1339 	}
1340 
1341 	mem = nex_calloc(nelems, sizeof(*mem));
1342 	if (!mem)
1343 		panic();
1344 
1345 	memcpy(mem, phys_ddr_overall_begin, sizeof(*mem) * nelems);
1346 	core_mmu_set_discovered_nsec_ddr(mem, nelems);
1347 }
1348 #else /*CFG_CORE_DYN_SHM*/
1349 static void discover_nsec_memory(void)
1350 {
1351 }
1352 #endif /*!CFG_CORE_DYN_SHM*/
1353 
1354 #ifdef CFG_NS_VIRTUALIZATION
1355 static TEE_Result virt_init_heap(void)
1356 {
1357 	/* We need to initialize pool for every virtual guest partition */
1358 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
1359 
1360 	return TEE_SUCCESS;
1361 }
1362 preinit_early(virt_init_heap);
1363 #endif
1364 
1365 void init_tee_runtime(void)
1366 {
1367 #ifndef CFG_WITH_PAGER
1368 	/* Pager initializes TA RAM early */
1369 	core_mmu_init_ta_ram();
1370 #endif
1371 	/*
1372 	 * With virtualization we call this function when creating the
1373 	 * OP-TEE partition instead.
1374 	 */
1375 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION))
1376 		call_preinitcalls();
1377 	call_initcalls();
1378 
1379 	/*
1380 	 * These two functions uses crypto_rng_read() to initialize the
1381 	 * pauth keys. Once call_initcalls() returns we're guaranteed that
1382 	 * crypto_rng_read() is ready to be used.
1383 	 */
1384 	thread_init_core_local_pauth_keys();
1385 	thread_init_thread_pauth_keys();
1386 }
1387 
1388 static void init_primary(unsigned long pageable_part, unsigned long nsec_entry)
1389 {
1390 	thread_init_core_local_stacks();
1391 	/*
1392 	 * Mask asynchronous exceptions before switch to the thread vector
1393 	 * as the thread handler requires those to be masked while
1394 	 * executing with the temporary stack. The thread subsystem also
1395 	 * asserts that the foreign interrupts are blocked when using most of
1396 	 * its functions.
1397 	 */
1398 	thread_set_exceptions(THREAD_EXCP_ALL);
1399 	primary_save_cntfrq();
1400 	init_vfp_sec();
1401 	/*
1402 	 * Pager: init_runtime() calls thread_kernel_enable_vfp() so we must
1403 	 * set a current thread right now to avoid a chicken-and-egg problem
1404 	 * (thread_init_boot_thread() sets the current thread but needs
1405 	 * things set by init_runtime()).
1406 	 */
1407 	thread_get_core_local()->curr_thread = 0;
1408 	init_runtime(pageable_part);
1409 
1410 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1411 		/*
1412 		 * Virtualization: We can't initialize threads right now because
1413 		 * threads belong to "tee" part and will be initialized
1414 		 * separately per each new virtual guest. So, we'll clear
1415 		 * "curr_thread" and call it done.
1416 		 */
1417 		thread_get_core_local()->curr_thread = -1;
1418 	} else {
1419 		thread_init_boot_thread();
1420 	}
1421 	thread_init_primary();
1422 	thread_init_per_cpu();
1423 	init_sec_mon(nsec_entry);
1424 }
1425 
1426 static bool cpu_nmfi_enabled(void)
1427 {
1428 #if defined(ARM32)
1429 	return read_sctlr() & SCTLR_NMFI;
1430 #else
1431 	/* Note: ARM64 does not feature non-maskable FIQ support. */
1432 	return false;
1433 #endif
1434 }
1435 
1436 /*
1437  * Note: this function is weak just to make it possible to exclude it from
1438  * the unpaged area.
1439  */
1440 void __weak boot_init_primary_late(unsigned long fdt,
1441 				   unsigned long tos_fw_config)
1442 {
1443 	init_external_dt(fdt);
1444 	init_tos_fw_config_dt(tos_fw_config);
1445 #ifdef CFG_CORE_SEL1_SPMC
1446 	tpm_map_log_area(get_tos_fw_config_dt());
1447 #else
1448 	tpm_map_log_area(get_external_dt());
1449 #endif
1450 	discover_nsec_memory();
1451 	update_external_dt();
1452 	configure_console_from_dt();
1453 
1454 	IMSG("OP-TEE version: %s", core_v_str);
1455 	if (IS_ENABLED(CFG_WARN_INSECURE)) {
1456 		IMSG("WARNING: This OP-TEE configuration might be insecure!");
1457 		IMSG("WARNING: Please check https://optee.readthedocs.io/en/latest/architecture/porting_guidelines.html");
1458 	}
1459 	IMSG("Primary CPU initializing");
1460 #ifdef CFG_CORE_ASLR
1461 	DMSG("Executing at offset %#lx with virtual load address %#"PRIxVA,
1462 	     (unsigned long)boot_mmu_config.map_offset, VCORE_START_VA);
1463 #endif
1464 	if (IS_ENABLED(CFG_MEMTAG))
1465 		DMSG("Memory tagging %s",
1466 		     memtag_is_enabled() ?  "enabled" : "disabled");
1467 
1468 	/* Check if platform needs NMFI workaround */
1469 	if (cpu_nmfi_enabled())	{
1470 		if (!IS_ENABLED(CFG_CORE_WORKAROUND_ARM_NMFI))
1471 			IMSG("WARNING: This ARM core has NMFI enabled, please apply workaround!");
1472 	} else {
1473 		if (IS_ENABLED(CFG_CORE_WORKAROUND_ARM_NMFI))
1474 			IMSG("WARNING: This ARM core does not have NMFI enabled, no need for workaround");
1475 	}
1476 
1477 	main_init_gic();
1478 	init_vfp_nsec();
1479 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1480 		IMSG("Initializing virtualization support");
1481 		core_mmu_init_virtualization();
1482 	} else {
1483 		init_tee_runtime();
1484 	}
1485 	call_finalcalls();
1486 	IMSG("Primary CPU switching to normal world boot");
1487 }
1488 
1489 static void init_secondary_helper(unsigned long nsec_entry)
1490 {
1491 	IMSG("Secondary CPU %zu initializing", get_core_pos());
1492 
1493 	/*
1494 	 * Mask asynchronous exceptions before switch to the thread vector
1495 	 * as the thread handler requires those to be masked while
1496 	 * executing with the temporary stack. The thread subsystem also
1497 	 * asserts that the foreign interrupts are blocked when using most of
1498 	 * its functions.
1499 	 */
1500 	thread_set_exceptions(THREAD_EXCP_ALL);
1501 
1502 	secondary_init_cntfrq();
1503 	thread_init_per_cpu();
1504 	init_sec_mon(nsec_entry);
1505 	main_secondary_init_gic();
1506 	init_vfp_sec();
1507 	init_vfp_nsec();
1508 
1509 	IMSG("Secondary CPU %zu switching to normal world boot", get_core_pos());
1510 }
1511 
1512 /*
1513  * Note: this function is weak just to make it possible to exclude it from
1514  * the unpaged area so that it lies in the init area.
1515  */
1516 void __weak boot_init_primary_early(unsigned long pageable_part,
1517 				    unsigned long nsec_entry __maybe_unused)
1518 {
1519 	unsigned long e = PADDR_INVALID;
1520 
1521 #if !defined(CFG_WITH_ARM_TRUSTED_FW)
1522 	e = nsec_entry;
1523 #endif
1524 
1525 	init_primary(pageable_part, e);
1526 }
1527 
1528 #if defined(CFG_WITH_ARM_TRUSTED_FW)
1529 unsigned long boot_cpu_on_handler(unsigned long a0 __maybe_unused,
1530 				  unsigned long a1 __unused)
1531 {
1532 	init_secondary_helper(PADDR_INVALID);
1533 	return 0;
1534 }
1535 #else
1536 void boot_init_secondary(unsigned long nsec_entry)
1537 {
1538 	init_secondary_helper(nsec_entry);
1539 }
1540 #endif
1541 
1542 #if defined(CFG_BOOT_SECONDARY_REQUEST)
1543 void boot_set_core_ns_entry(size_t core_idx, uintptr_t entry,
1544 			    uintptr_t context_id)
1545 {
1546 	ns_entry_contexts[core_idx].entry_point = entry;
1547 	ns_entry_contexts[core_idx].context_id = context_id;
1548 	dsb_ishst();
1549 }
1550 
1551 int boot_core_release(size_t core_idx, paddr_t entry)
1552 {
1553 	if (!core_idx || core_idx >= CFG_TEE_CORE_NB_CORE)
1554 		return -1;
1555 
1556 	ns_entry_contexts[core_idx].entry_point = entry;
1557 	dmb();
1558 	spin_table[core_idx] = 1;
1559 	dsb();
1560 	sev();
1561 
1562 	return 0;
1563 }
1564 
1565 /*
1566  * spin until secondary boot request, then returns with
1567  * the secondary core entry address.
1568  */
1569 struct ns_entry_context *boot_core_hpen(void)
1570 {
1571 #ifdef CFG_PSCI_ARM32
1572 	return &ns_entry_contexts[get_core_pos()];
1573 #else
1574 	do {
1575 		wfe();
1576 	} while (!spin_table[get_core_pos()]);
1577 	dmb();
1578 	return &ns_entry_contexts[get_core_pos()];
1579 #endif
1580 }
1581 #endif
1582 
1583 #if defined(CFG_CORE_ASLR)
1584 #if defined(CFG_DT)
1585 unsigned long __weak get_aslr_seed(void *fdt)
1586 {
1587 	int rc = 0;
1588 	const uint64_t *seed = NULL;
1589 	int offs = 0;
1590 	int len = 0;
1591 
1592 	if (!fdt) {
1593 		DMSG("No fdt");
1594 		goto err;
1595 	}
1596 
1597 	rc = fdt_check_header(fdt);
1598 	if (rc) {
1599 		DMSG("Bad fdt: %d", rc);
1600 		goto err;
1601 	}
1602 
1603 	offs =  fdt_path_offset(fdt, "/secure-chosen");
1604 	if (offs < 0) {
1605 		DMSG("Cannot find /secure-chosen");
1606 		goto err;
1607 	}
1608 	seed = fdt_getprop(fdt, offs, "kaslr-seed", &len);
1609 	if (!seed || len != sizeof(*seed)) {
1610 		DMSG("Cannot find valid kaslr-seed");
1611 		goto err;
1612 	}
1613 
1614 	return fdt64_to_cpu(*seed);
1615 
1616 err:
1617 	/* Try platform implementation */
1618 	return plat_get_aslr_seed();
1619 }
1620 #else /*!CFG_DT*/
1621 unsigned long __weak get_aslr_seed(void *fdt __unused)
1622 {
1623 	/* Try platform implementation */
1624 	return plat_get_aslr_seed();
1625 }
1626 #endif /*!CFG_DT*/
1627 #endif /*CFG_CORE_ASLR*/
1628