xref: /optee_os/core/arch/arm/kernel/boot.c (revision 98ada65e9e6db4ac1b8c5bd3faa7a398ee410f7e)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2015-2022, Linaro Limited
4  */
5 
6 #include <arm.h>
7 #include <assert.h>
8 #include <compiler.h>
9 #include <config.h>
10 #include <console.h>
11 #include <crypto/crypto.h>
12 #include <drivers/gic.h>
13 #include <initcall.h>
14 #include <inttypes.h>
15 #include <keep.h>
16 #include <kernel/asan.h>
17 #include <kernel/boot.h>
18 #include <kernel/linker.h>
19 #include <kernel/misc.h>
20 #include <kernel/panic.h>
21 #include <kernel/tee_misc.h>
22 #include <kernel/thread.h>
23 #include <kernel/tpm.h>
24 #include <libfdt.h>
25 #include <malloc.h>
26 #include <memtag.h>
27 #include <mm/core_memprot.h>
28 #include <mm/core_mmu.h>
29 #include <mm/fobj.h>
30 #include <mm/tee_mm.h>
31 #include <mm/tee_pager.h>
32 #include <sm/psci.h>
33 #include <stdio.h>
34 #include <trace.h>
35 #include <utee_defines.h>
36 #include <util.h>
37 
38 #include <platform_config.h>
39 
40 #if !defined(CFG_WITH_ARM_TRUSTED_FW)
41 #include <sm/sm.h>
42 #endif
43 
44 #if defined(CFG_WITH_VFP)
45 #include <kernel/vfp.h>
46 #endif
47 
48 /*
49  * In this file we're using unsigned long to represent physical pointers as
50  * they are received in a single register when OP-TEE is initially entered.
51  * This limits 32-bit systems to only use make use of the lower 32 bits
52  * of a physical address for initial parameters.
53  *
54  * 64-bit systems on the other hand can use full 64-bit physical pointers.
55  */
56 #define PADDR_INVALID		ULONG_MAX
57 
58 #if defined(CFG_BOOT_SECONDARY_REQUEST)
59 struct ns_entry_context {
60 	uintptr_t entry_point;
61 	uintptr_t context_id;
62 };
63 struct ns_entry_context ns_entry_contexts[CFG_TEE_CORE_NB_CORE];
64 static uint32_t spin_table[CFG_TEE_CORE_NB_CORE];
65 #endif
66 
67 #ifdef CFG_BOOT_SYNC_CPU
68 /*
69  * Array used when booting, to synchronize cpu.
70  * When 0, the cpu has not started.
71  * When 1, it has started
72  */
73 uint32_t sem_cpu_sync[CFG_TEE_CORE_NB_CORE];
74 DECLARE_KEEP_PAGER(sem_cpu_sync);
75 #endif
76 
77 #ifdef CFG_DT
78 struct dt_descriptor {
79 	void *blob;
80 #ifdef _CFG_USE_DTB_OVERLAY
81 	int frag_id;
82 #endif
83 };
84 
85 static struct dt_descriptor external_dt __nex_bss;
86 #endif
87 
88 #ifdef CFG_SECONDARY_INIT_CNTFRQ
89 static uint32_t cntfrq;
90 #endif
91 
92 /* May be overridden in plat-$(PLATFORM)/main.c */
93 __weak void plat_primary_init_early(void)
94 {
95 }
96 DECLARE_KEEP_PAGER(plat_primary_init_early);
97 
98 /* May be overridden in plat-$(PLATFORM)/main.c */
99 __weak void main_init_gic(void)
100 {
101 }
102 
103 /* May be overridden in plat-$(PLATFORM)/main.c */
104 __weak void main_secondary_init_gic(void)
105 {
106 }
107 
108 /* May be overridden in plat-$(PLATFORM)/main.c */
109 __weak unsigned long plat_get_aslr_seed(void)
110 {
111 	DMSG("Warning: no ASLR seed");
112 
113 	return 0;
114 }
115 
116 /*
117  * This function is called as a guard after each smc call which is not
118  * supposed to return.
119  */
120 void __panic_at_smc_return(void)
121 {
122 	panic();
123 }
124 
125 #if defined(CFG_WITH_ARM_TRUSTED_FW)
126 void init_sec_mon(unsigned long nsec_entry __maybe_unused)
127 {
128 	assert(nsec_entry == PADDR_INVALID);
129 	/* Do nothing as we don't have a secure monitor */
130 }
131 #else
132 /* May be overridden in plat-$(PLATFORM)/main.c */
133 __weak void init_sec_mon(unsigned long nsec_entry)
134 {
135 	struct sm_nsec_ctx *nsec_ctx;
136 
137 	assert(nsec_entry != PADDR_INVALID);
138 
139 	/* Initialize secure monitor */
140 	nsec_ctx = sm_get_nsec_ctx();
141 	nsec_ctx->mon_lr = nsec_entry;
142 	nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I;
143 	if (nsec_entry & 1)
144 		nsec_ctx->mon_spsr |= CPSR_T;
145 }
146 #endif
147 
148 #if defined(CFG_WITH_ARM_TRUSTED_FW)
149 static void init_vfp_nsec(void)
150 {
151 }
152 #else
153 static void init_vfp_nsec(void)
154 {
155 	/* Normal world can use CP10 and CP11 (SIMD/VFP) */
156 	write_nsacr(read_nsacr() | NSACR_CP10 | NSACR_CP11);
157 }
158 #endif
159 
160 #if defined(CFG_WITH_VFP)
161 
162 #ifdef ARM32
163 static void init_vfp_sec(void)
164 {
165 	uint32_t cpacr = read_cpacr();
166 
167 	/*
168 	 * Enable Advanced SIMD functionality.
169 	 * Enable use of D16-D31 of the Floating-point Extension register
170 	 * file.
171 	 */
172 	cpacr &= ~(CPACR_ASEDIS | CPACR_D32DIS);
173 	/*
174 	 * Enable usage of CP10 and CP11 (SIMD/VFP) (both kernel and user
175 	 * mode.
176 	 */
177 	cpacr |= CPACR_CP(10, CPACR_CP_ACCESS_FULL);
178 	cpacr |= CPACR_CP(11, CPACR_CP_ACCESS_FULL);
179 	write_cpacr(cpacr);
180 }
181 #endif /* ARM32 */
182 
183 #ifdef ARM64
184 static void init_vfp_sec(void)
185 {
186 	/* Not using VFP until thread_kernel_enable_vfp() */
187 	vfp_disable();
188 }
189 #endif /* ARM64 */
190 
191 #else /* CFG_WITH_VFP */
192 
193 static void init_vfp_sec(void)
194 {
195 	/* Not using VFP */
196 }
197 #endif
198 
199 #ifdef CFG_SECONDARY_INIT_CNTFRQ
200 static void primary_save_cntfrq(void)
201 {
202 	assert(cntfrq == 0);
203 
204 	/*
205 	 * CNTFRQ should be initialized on the primary CPU by a
206 	 * previous boot stage
207 	 */
208 	cntfrq = read_cntfrq();
209 }
210 
211 static void secondary_init_cntfrq(void)
212 {
213 	assert(cntfrq != 0);
214 	write_cntfrq(cntfrq);
215 }
216 #else /* CFG_SECONDARY_INIT_CNTFRQ */
217 static void primary_save_cntfrq(void)
218 {
219 }
220 
221 static void secondary_init_cntfrq(void)
222 {
223 }
224 #endif
225 
226 #ifdef CFG_CORE_SANITIZE_KADDRESS
227 static void init_run_constructors(void)
228 {
229 	const vaddr_t *ctor;
230 
231 	for (ctor = &__ctor_list; ctor < &__ctor_end; ctor++)
232 		((void (*)(void))(*ctor))();
233 }
234 
235 static void init_asan(void)
236 {
237 
238 	/*
239 	 * CFG_ASAN_SHADOW_OFFSET is also supplied as
240 	 * -fasan-shadow-offset=$(CFG_ASAN_SHADOW_OFFSET) to the compiler.
241 	 * Since all the needed values to calculate the value of
242 	 * CFG_ASAN_SHADOW_OFFSET isn't available in to make we need to
243 	 * calculate it in advance and hard code it into the platform
244 	 * conf.mk. Here where we have all the needed values we double
245 	 * check that the compiler is supplied the correct value.
246 	 */
247 
248 #define __ASAN_SHADOW_START \
249 	ROUNDUP(TEE_RAM_VA_START + (TEE_RAM_VA_SIZE * 8) / 9 - 8, 8)
250 	assert(__ASAN_SHADOW_START == (vaddr_t)&__asan_shadow_start);
251 #define __CFG_ASAN_SHADOW_OFFSET \
252 	(__ASAN_SHADOW_START - (TEE_RAM_VA_START / 8))
253 	COMPILE_TIME_ASSERT(CFG_ASAN_SHADOW_OFFSET == __CFG_ASAN_SHADOW_OFFSET);
254 #undef __ASAN_SHADOW_START
255 #undef __CFG_ASAN_SHADOW_OFFSET
256 
257 	/*
258 	 * Assign area covered by the shadow area, everything from start up
259 	 * to the beginning of the shadow area.
260 	 */
261 	asan_set_shadowed((void *)TEE_TEXT_VA_START, &__asan_shadow_start);
262 
263 	/*
264 	 * Add access to areas that aren't opened automatically by a
265 	 * constructor.
266 	 */
267 	asan_tag_access(&__ctor_list, &__ctor_end);
268 	asan_tag_access(__rodata_start, __rodata_end);
269 #ifdef CFG_WITH_PAGER
270 	asan_tag_access(__pageable_start, __pageable_end);
271 #endif /*CFG_WITH_PAGER*/
272 	asan_tag_access(__nozi_start, __nozi_end);
273 	asan_tag_access(__exidx_start, __exidx_end);
274 	asan_tag_access(__extab_start, __extab_end);
275 
276 	init_run_constructors();
277 
278 	/* Everything is tagged correctly, let's start address sanitizing. */
279 	asan_start();
280 }
281 #else /*CFG_CORE_SANITIZE_KADDRESS*/
282 static void init_asan(void)
283 {
284 }
285 #endif /*CFG_CORE_SANITIZE_KADDRESS*/
286 
287 #if defined(CFG_MEMTAG)
288 /* Called from entry_a64.S only when MEMTAG is configured */
289 void boot_init_memtag(void)
290 {
291 	memtag_init_ops(feat_mte_implemented());
292 	memtag_set_tags((void *)TEE_RAM_START, TEE_RAM_PH_SIZE, 0);
293 }
294 #endif
295 
296 #ifdef CFG_WITH_PAGER
297 
298 #ifdef CFG_CORE_SANITIZE_KADDRESS
299 static void carve_out_asan_mem(tee_mm_pool_t *pool)
300 {
301 	const size_t s = pool->hi - pool->lo;
302 	tee_mm_entry_t *mm;
303 	paddr_t apa = ASAN_MAP_PA;
304 	size_t asz = ASAN_MAP_SZ;
305 
306 	if (core_is_buffer_outside(apa, asz, pool->lo, s))
307 		return;
308 
309 	/* Reserve the shadow area */
310 	if (!core_is_buffer_inside(apa, asz, pool->lo, s)) {
311 		if (apa < pool->lo) {
312 			/*
313 			 * ASAN buffer is overlapping with the beginning of
314 			 * the pool.
315 			 */
316 			asz -= pool->lo - apa;
317 			apa = pool->lo;
318 		} else {
319 			/*
320 			 * ASAN buffer is overlapping with the end of the
321 			 * pool.
322 			 */
323 			asz = pool->hi - apa;
324 		}
325 	}
326 	mm = tee_mm_alloc2(pool, apa, asz);
327 	assert(mm);
328 }
329 #else
330 static void carve_out_asan_mem(tee_mm_pool_t *pool __unused)
331 {
332 }
333 #endif
334 
335 static void print_pager_pool_size(void)
336 {
337 	struct tee_pager_stats __maybe_unused stats;
338 
339 	tee_pager_get_stats(&stats);
340 	IMSG("Pager pool size: %zukB",
341 		stats.npages_all * SMALL_PAGE_SIZE / 1024);
342 }
343 
344 static void init_vcore(tee_mm_pool_t *mm_vcore)
345 {
346 	const vaddr_t begin = VCORE_START_VA;
347 	size_t size = TEE_RAM_VA_SIZE;
348 
349 #ifdef CFG_CORE_SANITIZE_KADDRESS
350 	/* Carve out asan memory, flat maped after core memory */
351 	if (begin + size > ASAN_SHADOW_PA)
352 		size = ASAN_MAP_PA - begin;
353 #endif
354 
355 	if (!tee_mm_init(mm_vcore, begin, size, SMALL_PAGE_SHIFT,
356 			 TEE_MM_POOL_NO_FLAGS))
357 		panic("tee_mm_vcore init failed");
358 }
359 
360 /*
361  * With CFG_CORE_ASLR=y the init part is relocated very early during boot.
362  * The init part is also paged just as the rest of the normal paged code, with
363  * the difference that it's preloaded during boot. When the backing store
364  * is configured the entire paged binary is copied in place and then also
365  * the init part. Since the init part has been relocated (references to
366  * addresses updated to compensate for the new load address) this has to be
367  * undone for the hashes of those pages to match with the original binary.
368  *
369  * If CFG_CORE_ASLR=n, nothing needs to be done as the code/ro pages are
370  * unchanged.
371  */
372 static void undo_init_relocation(uint8_t *paged_store __maybe_unused)
373 {
374 #ifdef CFG_CORE_ASLR
375 	unsigned long *ptr = NULL;
376 	const uint32_t *reloc = NULL;
377 	const uint32_t *reloc_end = NULL;
378 	unsigned long offs = boot_mmu_config.load_offset;
379 	const struct boot_embdata *embdata = (const void *)__init_end;
380 	vaddr_t addr_end = (vaddr_t)__init_end - offs - TEE_RAM_START;
381 	vaddr_t addr_start = (vaddr_t)__init_start - offs - TEE_RAM_START;
382 
383 	reloc = (const void *)((vaddr_t)embdata + embdata->reloc_offset);
384 	reloc_end = reloc + embdata->reloc_len / sizeof(*reloc);
385 
386 	for (; reloc < reloc_end; reloc++) {
387 		if (*reloc < addr_start)
388 			continue;
389 		if (*reloc >= addr_end)
390 			break;
391 		ptr = (void *)(paged_store + *reloc - addr_start);
392 		*ptr -= offs;
393 	}
394 #endif
395 }
396 
397 static struct fobj *ro_paged_alloc(tee_mm_entry_t *mm, void *hashes,
398 				   void *store)
399 {
400 	const unsigned int num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE;
401 #ifdef CFG_CORE_ASLR
402 	unsigned int reloc_offs = (vaddr_t)__pageable_start - VCORE_START_VA;
403 	const struct boot_embdata *embdata = (const void *)__init_end;
404 	const void *reloc = __init_end + embdata->reloc_offset;
405 
406 	return fobj_ro_reloc_paged_alloc(num_pages, hashes, reloc_offs,
407 					 reloc, embdata->reloc_len, store);
408 #else
409 	return fobj_ro_paged_alloc(num_pages, hashes, store);
410 #endif
411 }
412 
413 static void init_runtime(unsigned long pageable_part)
414 {
415 	size_t n;
416 	size_t init_size = (size_t)(__init_end - __init_start);
417 	size_t pageable_start = (size_t)__pageable_start;
418 	size_t pageable_end = (size_t)__pageable_end;
419 	size_t pageable_size = pageable_end - pageable_start;
420 	vaddr_t tzsram_end = TZSRAM_BASE + TZSRAM_SIZE - TEE_LOAD_ADDR +
421 			     VCORE_START_VA;
422 	size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) *
423 			   TEE_SHA256_HASH_SIZE;
424 	const struct boot_embdata *embdata = (const void *)__init_end;
425 	const void *tmp_hashes = NULL;
426 	tee_mm_entry_t *mm = NULL;
427 	struct fobj *fobj = NULL;
428 	uint8_t *paged_store = NULL;
429 	uint8_t *hashes = NULL;
430 
431 	assert(pageable_size % SMALL_PAGE_SIZE == 0);
432 	assert(embdata->total_len >= embdata->hashes_offset +
433 				     embdata->hashes_len);
434 	assert(hash_size == embdata->hashes_len);
435 
436 	tmp_hashes = __init_end + embdata->hashes_offset;
437 
438 	init_asan();
439 
440 	/* Add heap2 first as heap1 may be too small as initial bget pool */
441 	malloc_add_pool(__heap2_start, __heap2_end - __heap2_start);
442 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
443 
444 	/*
445 	 * This needs to be initialized early to support address lookup
446 	 * in MEM_AREA_TEE_RAM
447 	 */
448 	tee_pager_early_init();
449 
450 	hashes = malloc(hash_size);
451 	IMSG_RAW("\n");
452 	IMSG("Pager is enabled. Hashes: %zu bytes", hash_size);
453 	assert(hashes);
454 	asan_memcpy_unchecked(hashes, tmp_hashes, hash_size);
455 
456 	/*
457 	 * Need tee_mm_sec_ddr initialized to be able to allocate secure
458 	 * DDR below.
459 	 */
460 	core_mmu_init_ta_ram();
461 
462 	carve_out_asan_mem(&tee_mm_sec_ddr);
463 
464 	mm = tee_mm_alloc(&tee_mm_sec_ddr, pageable_size);
465 	assert(mm);
466 	paged_store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM,
467 				   pageable_size);
468 	/*
469 	 * Load pageable part in the dedicated allocated area:
470 	 * - Move pageable non-init part into pageable area. Note bootloader
471 	 *   may have loaded it anywhere in TA RAM hence use memmove().
472 	 * - Copy pageable init part from current location into pageable area.
473 	 */
474 	memmove(paged_store + init_size,
475 		phys_to_virt(pageable_part,
476 			     core_mmu_get_type_by_pa(pageable_part),
477 			     __pageable_part_end - __pageable_part_start),
478 		__pageable_part_end - __pageable_part_start);
479 	asan_memcpy_unchecked(paged_store, __init_start, init_size);
480 	/*
481 	 * Undo eventual relocation for the init part so the hash checks
482 	 * can pass.
483 	 */
484 	undo_init_relocation(paged_store);
485 
486 	/* Check that hashes of what's in pageable area is OK */
487 	DMSG("Checking hashes of pageable area");
488 	for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) {
489 		const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE;
490 		const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE;
491 		TEE_Result res;
492 
493 		DMSG("hash pg_idx %zu hash %p page %p", n, hash, page);
494 		res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE);
495 		if (res != TEE_SUCCESS) {
496 			EMSG("Hash failed for page %zu at %p: res 0x%x",
497 			     n, (void *)page, res);
498 			panic();
499 		}
500 	}
501 
502 	/*
503 	 * Assert prepaged init sections are page aligned so that nothing
504 	 * trails uninited at the end of the premapped init area.
505 	 */
506 	assert(!(init_size & SMALL_PAGE_MASK));
507 
508 	/*
509 	 * Initialize the virtual memory pool used for main_mmu_l2_ttb which
510 	 * is supplied to tee_pager_init() below.
511 	 */
512 	init_vcore(&tee_mm_vcore);
513 
514 	/*
515 	 * Assign alias area for pager end of the small page block the rest
516 	 * of the binary is loaded into. We're taking more than needed, but
517 	 * we're guaranteed to not need more than the physical amount of
518 	 * TZSRAM.
519 	 */
520 	mm = tee_mm_alloc2(&tee_mm_vcore,
521 			   (vaddr_t)tee_mm_vcore.lo +
522 			   tee_mm_vcore.size - TZSRAM_SIZE,
523 			   TZSRAM_SIZE);
524 	assert(mm);
525 	tee_pager_set_alias_area(mm);
526 
527 	/*
528 	 * Claim virtual memory which isn't paged.
529 	 * Linear memory (flat map core memory) ends there.
530 	 */
531 	mm = tee_mm_alloc2(&tee_mm_vcore, VCORE_UNPG_RX_PA,
532 			   (vaddr_t)(__pageable_start - VCORE_UNPG_RX_PA));
533 	assert(mm);
534 
535 	/*
536 	 * Allocate virtual memory for the pageable area and let the pager
537 	 * take charge of all the pages already assigned to that memory.
538 	 */
539 	mm = tee_mm_alloc2(&tee_mm_vcore, (vaddr_t)__pageable_start,
540 			   pageable_size);
541 	assert(mm);
542 	fobj = ro_paged_alloc(mm, hashes, paged_store);
543 	assert(fobj);
544 	tee_pager_add_core_region(tee_mm_get_smem(mm), PAGED_REGION_TYPE_RO,
545 				  fobj);
546 	fobj_put(fobj);
547 
548 	tee_pager_add_pages(pageable_start, init_size / SMALL_PAGE_SIZE, false);
549 	tee_pager_add_pages(pageable_start + init_size,
550 			    (pageable_size - init_size) / SMALL_PAGE_SIZE,
551 			    true);
552 	if (pageable_end < tzsram_end)
553 		tee_pager_add_pages(pageable_end, (tzsram_end - pageable_end) /
554 						   SMALL_PAGE_SIZE, true);
555 
556 	/*
557 	 * There may be physical pages in TZSRAM before the core load address.
558 	 * These pages can be added to the physical pages pool of the pager.
559 	 * This setup may happen when a the secure bootloader runs in TZRAM
560 	 * and its memory can be reused by OP-TEE once boot stages complete.
561 	 */
562 	tee_pager_add_pages(tee_mm_vcore.lo,
563 			(VCORE_UNPG_RX_PA - tee_mm_vcore.lo) / SMALL_PAGE_SIZE,
564 			true);
565 
566 	print_pager_pool_size();
567 }
568 #else
569 
570 static void init_runtime(unsigned long pageable_part __unused)
571 {
572 	init_asan();
573 
574 	/*
575 	 * By default whole OP-TEE uses malloc, so we need to initialize
576 	 * it early. But, when virtualization is enabled, malloc is used
577 	 * only by TEE runtime, so malloc should be initialized later, for
578 	 * every virtual partition separately. Core code uses nex_malloc
579 	 * instead.
580 	 */
581 #ifdef CFG_VIRTUALIZATION
582 	nex_malloc_add_pool(__nex_heap_start, __nex_heap_end -
583 					      __nex_heap_start);
584 #else
585 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
586 #endif
587 
588 	IMSG_RAW("\n");
589 }
590 #endif
591 
592 void *get_dt(void)
593 {
594 	void *fdt = get_embedded_dt();
595 
596 	if (!fdt)
597 		fdt = get_external_dt();
598 
599 	return fdt;
600 }
601 
602 void *get_secure_dt(void)
603 {
604 	void *fdt = get_embedded_dt();
605 
606 	if (!fdt && IS_ENABLED(CFG_MAP_EXT_DT_SECURE))
607 		fdt = get_external_dt();
608 
609 	return fdt;
610 }
611 
612 #if defined(CFG_EMBED_DTB)
613 void *get_embedded_dt(void)
614 {
615 	static bool checked;
616 
617 	assert(cpu_mmu_enabled());
618 
619 	if (!checked) {
620 		IMSG("Embedded DTB found");
621 
622 		if (fdt_check_header(embedded_secure_dtb))
623 			panic("Invalid embedded DTB");
624 
625 		checked = true;
626 	}
627 
628 	return embedded_secure_dtb;
629 }
630 #else
631 void *get_embedded_dt(void)
632 {
633 	return NULL;
634 }
635 #endif /*CFG_EMBED_DTB*/
636 
637 #if defined(CFG_DT)
638 void *get_external_dt(void)
639 {
640 	if (!IS_ENABLED(CFG_EXTERNAL_DT))
641 		return NULL;
642 
643 	assert(cpu_mmu_enabled());
644 	return external_dt.blob;
645 }
646 
647 static TEE_Result release_external_dt(void)
648 {
649 	int ret = 0;
650 
651 	if (!IS_ENABLED(CFG_EXTERNAL_DT))
652 		return TEE_SUCCESS;
653 
654 	if (!external_dt.blob)
655 		return TEE_SUCCESS;
656 
657 	ret = fdt_pack(external_dt.blob);
658 	if (ret < 0) {
659 		EMSG("Failed to pack Device Tree at 0x%" PRIxPA ": error %d",
660 		     virt_to_phys(external_dt.blob), ret);
661 		panic();
662 	}
663 
664 	if (core_mmu_remove_mapping(MEM_AREA_EXT_DT, external_dt.blob,
665 				    CFG_DTB_MAX_SIZE))
666 		panic("Failed to remove temporary Device Tree mapping");
667 
668 	/* External DTB no more reached, reset pointer to invalid */
669 	external_dt.blob = NULL;
670 
671 	return TEE_SUCCESS;
672 }
673 boot_final(release_external_dt);
674 
675 #ifdef _CFG_USE_DTB_OVERLAY
676 static int add_dt_overlay_fragment(struct dt_descriptor *dt, int ioffs)
677 {
678 	char frag[32];
679 	int offs;
680 	int ret;
681 
682 	snprintf(frag, sizeof(frag), "fragment@%d", dt->frag_id);
683 	offs = fdt_add_subnode(dt->blob, ioffs, frag);
684 	if (offs < 0)
685 		return offs;
686 
687 	dt->frag_id += 1;
688 
689 	ret = fdt_setprop_string(dt->blob, offs, "target-path", "/");
690 	if (ret < 0)
691 		return -1;
692 
693 	return fdt_add_subnode(dt->blob, offs, "__overlay__");
694 }
695 
696 static int init_dt_overlay(struct dt_descriptor *dt, int __maybe_unused dt_size)
697 {
698 	int fragment;
699 
700 	if (IS_ENABLED(CFG_EXTERNAL_DTB_OVERLAY)) {
701 		if (!fdt_check_header(dt->blob)) {
702 			fdt_for_each_subnode(fragment, dt->blob, 0)
703 				dt->frag_id += 1;
704 			return 0;
705 		}
706 	}
707 
708 	return fdt_create_empty_tree(dt->blob, dt_size);
709 }
710 #else
711 static int add_dt_overlay_fragment(struct dt_descriptor *dt __unused, int offs)
712 {
713 	return offs;
714 }
715 
716 static int init_dt_overlay(struct dt_descriptor *dt __unused,
717 			   int dt_size __unused)
718 {
719 	return 0;
720 }
721 #endif /* _CFG_USE_DTB_OVERLAY */
722 
723 static int add_dt_path_subnode(struct dt_descriptor *dt, const char *path,
724 			       const char *subnode)
725 {
726 	int offs;
727 
728 	offs = fdt_path_offset(dt->blob, path);
729 	if (offs < 0)
730 		return -1;
731 	offs = add_dt_overlay_fragment(dt, offs);
732 	if (offs < 0)
733 		return -1;
734 	offs = fdt_add_subnode(dt->blob, offs, subnode);
735 	if (offs < 0)
736 		return -1;
737 	return offs;
738 }
739 
740 static int add_optee_dt_node(struct dt_descriptor *dt)
741 {
742 	int offs;
743 	int ret;
744 
745 	if (fdt_path_offset(dt->blob, "/firmware/optee") >= 0) {
746 		DMSG("OP-TEE Device Tree node already exists!");
747 		return 0;
748 	}
749 
750 	offs = fdt_path_offset(dt->blob, "/firmware");
751 	if (offs < 0) {
752 		offs = add_dt_path_subnode(dt, "/", "firmware");
753 		if (offs < 0)
754 			return -1;
755 	}
756 
757 	offs = fdt_add_subnode(dt->blob, offs, "optee");
758 	if (offs < 0)
759 		return -1;
760 
761 	ret = fdt_setprop_string(dt->blob, offs, "compatible",
762 				 "linaro,optee-tz");
763 	if (ret < 0)
764 		return -1;
765 	ret = fdt_setprop_string(dt->blob, offs, "method", "smc");
766 	if (ret < 0)
767 		return -1;
768 	if (CFG_CORE_ASYNC_NOTIF_GIC_INTID) {
769 		/*
770 		 * The format of the interrupt property is defined by the
771 		 * binding of the interrupt domain root. In this case it's
772 		 * one Arm GIC v1, v2 or v3 so we must be compatible with
773 		 * these.
774 		 *
775 		 * An SPI type of interrupt is indicated with a 0 in the
776 		 * first cell.
777 		 *
778 		 * The interrupt number goes in the second cell where
779 		 * SPIs ranges from 0 to 987.
780 		 *
781 		 * Flags are passed in the third cell where a 1 means edge
782 		 * triggered.
783 		 */
784 		const uint32_t gic_spi = 0;
785 		const uint32_t irq_type_edge = 1;
786 		uint32_t val[] = {
787 			TEE_U32_TO_BIG_ENDIAN(gic_spi),
788 			TEE_U32_TO_BIG_ENDIAN(CFG_CORE_ASYNC_NOTIF_GIC_INTID -
789 					      GIC_SPI_BASE),
790 			TEE_U32_TO_BIG_ENDIAN(irq_type_edge),
791 		};
792 
793 		ret = fdt_setprop(dt->blob, offs, "interrupts", val,
794 				  sizeof(val));
795 		if (ret < 0)
796 			return -1;
797 	}
798 	return 0;
799 }
800 
801 #ifdef CFG_PSCI_ARM32
802 static int append_psci_compatible(void *fdt, int offs, const char *str)
803 {
804 	return fdt_appendprop(fdt, offs, "compatible", str, strlen(str) + 1);
805 }
806 
807 static int dt_add_psci_node(struct dt_descriptor *dt)
808 {
809 	int offs;
810 
811 	if (fdt_path_offset(dt->blob, "/psci") >= 0) {
812 		DMSG("PSCI Device Tree node already exists!");
813 		return 0;
814 	}
815 
816 	offs = add_dt_path_subnode(dt, "/", "psci");
817 	if (offs < 0)
818 		return -1;
819 	if (append_psci_compatible(dt->blob, offs, "arm,psci-1.0"))
820 		return -1;
821 	if (append_psci_compatible(dt->blob, offs, "arm,psci-0.2"))
822 		return -1;
823 	if (append_psci_compatible(dt->blob, offs, "arm,psci"))
824 		return -1;
825 	if (fdt_setprop_string(dt->blob, offs, "method", "smc"))
826 		return -1;
827 	if (fdt_setprop_u32(dt->blob, offs, "cpu_suspend", PSCI_CPU_SUSPEND))
828 		return -1;
829 	if (fdt_setprop_u32(dt->blob, offs, "cpu_off", PSCI_CPU_OFF))
830 		return -1;
831 	if (fdt_setprop_u32(dt->blob, offs, "cpu_on", PSCI_CPU_ON))
832 		return -1;
833 	if (fdt_setprop_u32(dt->blob, offs, "sys_poweroff", PSCI_SYSTEM_OFF))
834 		return -1;
835 	if (fdt_setprop_u32(dt->blob, offs, "sys_reset", PSCI_SYSTEM_RESET))
836 		return -1;
837 	return 0;
838 }
839 
840 static int check_node_compat_prefix(struct dt_descriptor *dt, int offs,
841 				    const char *prefix)
842 {
843 	const size_t prefix_len = strlen(prefix);
844 	size_t l;
845 	int plen;
846 	const char *prop;
847 
848 	prop = fdt_getprop(dt->blob, offs, "compatible", &plen);
849 	if (!prop)
850 		return -1;
851 
852 	while (plen > 0) {
853 		if (memcmp(prop, prefix, prefix_len) == 0)
854 			return 0; /* match */
855 
856 		l = strlen(prop) + 1;
857 		prop += l;
858 		plen -= l;
859 	}
860 
861 	return -1;
862 }
863 
864 static int dt_add_psci_cpu_enable_methods(struct dt_descriptor *dt)
865 {
866 	int offs = 0;
867 
868 	while (1) {
869 		offs = fdt_next_node(dt->blob, offs, NULL);
870 		if (offs < 0)
871 			break;
872 		if (fdt_getprop(dt->blob, offs, "enable-method", NULL))
873 			continue; /* already set */
874 		if (check_node_compat_prefix(dt, offs, "arm,cortex-a"))
875 			continue; /* no compatible */
876 		if (fdt_setprop_string(dt->blob, offs, "enable-method", "psci"))
877 			return -1;
878 		/* Need to restart scanning as offsets may have changed */
879 		offs = 0;
880 	}
881 	return 0;
882 }
883 
884 static int config_psci(struct dt_descriptor *dt)
885 {
886 	if (dt_add_psci_node(dt))
887 		return -1;
888 	return dt_add_psci_cpu_enable_methods(dt);
889 }
890 #else
891 static int config_psci(struct dt_descriptor *dt __unused)
892 {
893 	return 0;
894 }
895 #endif /*CFG_PSCI_ARM32*/
896 
897 static void set_dt_val(void *data, uint32_t cell_size, uint64_t val)
898 {
899 	if (cell_size == 1) {
900 		fdt32_t v = cpu_to_fdt32((uint32_t)val);
901 
902 		memcpy(data, &v, sizeof(v));
903 	} else {
904 		fdt64_t v = cpu_to_fdt64(val);
905 
906 		memcpy(data, &v, sizeof(v));
907 	}
908 }
909 
910 static int add_res_mem_dt_node(struct dt_descriptor *dt, const char *name,
911 			       paddr_t pa, size_t size)
912 {
913 	int offs = 0;
914 	int ret = 0;
915 	int addr_size = -1;
916 	int len_size = -1;
917 	bool found = true;
918 	char subnode_name[80] = { 0 };
919 
920 	offs = fdt_path_offset(dt->blob, "/reserved-memory");
921 
922 	if (offs < 0) {
923 		found = false;
924 		offs = 0;
925 	}
926 
927 	if (IS_ENABLED(_CFG_USE_DTB_OVERLAY)) {
928 		len_size = sizeof(paddr_t) / sizeof(uint32_t);
929 		addr_size = sizeof(paddr_t) / sizeof(uint32_t);
930 	} else {
931 		len_size = fdt_size_cells(dt->blob, offs);
932 		if (len_size < 0)
933 			return -1;
934 		addr_size = fdt_address_cells(dt->blob, offs);
935 		if (addr_size < 0)
936 			return -1;
937 	}
938 
939 	if (!found) {
940 		offs = add_dt_path_subnode(dt, "/", "reserved-memory");
941 		if (offs < 0)
942 			return -1;
943 		ret = fdt_setprop_cell(dt->blob, offs, "#address-cells",
944 				       addr_size);
945 		if (ret < 0)
946 			return -1;
947 		ret = fdt_setprop_cell(dt->blob, offs, "#size-cells", len_size);
948 		if (ret < 0)
949 			return -1;
950 		ret = fdt_setprop(dt->blob, offs, "ranges", NULL, 0);
951 		if (ret < 0)
952 			return -1;
953 	}
954 
955 	ret = snprintf(subnode_name, sizeof(subnode_name),
956 		       "%s@%" PRIxPA, name, pa);
957 	if (ret < 0 || ret >= (int)sizeof(subnode_name))
958 		DMSG("truncated node \"%s@%" PRIxPA"\"", name, pa);
959 	offs = fdt_add_subnode(dt->blob, offs, subnode_name);
960 	if (offs >= 0) {
961 		uint32_t data[FDT_MAX_NCELLS * 2];
962 
963 		set_dt_val(data, addr_size, pa);
964 		set_dt_val(data + addr_size, len_size, size);
965 		ret = fdt_setprop(dt->blob, offs, "reg", data,
966 				  sizeof(uint32_t) * (addr_size + len_size));
967 		if (ret < 0)
968 			return -1;
969 		ret = fdt_setprop(dt->blob, offs, "no-map", NULL, 0);
970 		if (ret < 0)
971 			return -1;
972 	} else {
973 		return -1;
974 	}
975 	return 0;
976 }
977 
978 #ifdef CFG_CORE_DYN_SHM
979 static uint64_t get_dt_val_and_advance(const void *data, size_t *offs,
980 				       uint32_t cell_size)
981 {
982 	uint64_t rv = 0;
983 
984 	if (cell_size == 1) {
985 		uint32_t v;
986 
987 		memcpy(&v, (const uint8_t *)data + *offs, sizeof(v));
988 		*offs += sizeof(v);
989 		rv = fdt32_to_cpu(v);
990 	} else {
991 		uint64_t v;
992 
993 		memcpy(&v, (const uint8_t *)data + *offs, sizeof(v));
994 		*offs += sizeof(v);
995 		rv = fdt64_to_cpu(v);
996 	}
997 
998 	return rv;
999 }
1000 
1001 /*
1002  * Find all non-secure memory from DT. Memory marked inaccessible by Secure
1003  * World is ignored since it could not be mapped to be used as dynamic shared
1004  * memory.
1005  */
1006 static int get_nsec_memory_helper(void *fdt, struct core_mmu_phys_mem *mem)
1007 {
1008 	const uint8_t *prop = NULL;
1009 	uint64_t a = 0;
1010 	uint64_t l = 0;
1011 	size_t prop_offs = 0;
1012 	size_t prop_len = 0;
1013 	int elems_total = 0;
1014 	int addr_size = 0;
1015 	int len_size = 0;
1016 	int offs = 0;
1017 	size_t n = 0;
1018 	int len = 0;
1019 
1020 	addr_size = fdt_address_cells(fdt, 0);
1021 	if (addr_size < 0)
1022 		return 0;
1023 
1024 	len_size = fdt_size_cells(fdt, 0);
1025 	if (len_size < 0)
1026 		return 0;
1027 
1028 	while (true) {
1029 		offs = fdt_node_offset_by_prop_value(fdt, offs, "device_type",
1030 						     "memory",
1031 						     sizeof("memory"));
1032 		if (offs < 0)
1033 			break;
1034 
1035 		if (_fdt_get_status(fdt, offs) != (DT_STATUS_OK_NSEC |
1036 						   DT_STATUS_OK_SEC))
1037 			continue;
1038 
1039 		prop = fdt_getprop(fdt, offs, "reg", &len);
1040 		if (!prop)
1041 			continue;
1042 
1043 		prop_len = len;
1044 		for (n = 0, prop_offs = 0; prop_offs < prop_len; n++) {
1045 			a = get_dt_val_and_advance(prop, &prop_offs, addr_size);
1046 			if (prop_offs >= prop_len) {
1047 				n--;
1048 				break;
1049 			}
1050 
1051 			l = get_dt_val_and_advance(prop, &prop_offs, len_size);
1052 			if (mem) {
1053 				mem->type = MEM_AREA_DDR_OVERALL;
1054 				mem->addr = a;
1055 				mem->size = l;
1056 				mem++;
1057 			}
1058 		}
1059 
1060 		elems_total += n;
1061 	}
1062 
1063 	return elems_total;
1064 }
1065 
1066 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt, size_t *nelems)
1067 {
1068 	struct core_mmu_phys_mem *mem = NULL;
1069 	int elems_total = 0;
1070 
1071 	elems_total = get_nsec_memory_helper(fdt, NULL);
1072 	if (elems_total <= 0)
1073 		return NULL;
1074 
1075 	mem = nex_calloc(elems_total, sizeof(*mem));
1076 	if (!mem)
1077 		panic();
1078 
1079 	elems_total = get_nsec_memory_helper(fdt, mem);
1080 	assert(elems_total > 0);
1081 
1082 	*nelems = elems_total;
1083 
1084 	return mem;
1085 }
1086 #endif /*CFG_CORE_DYN_SHM*/
1087 
1088 #ifdef CFG_CORE_RESERVED_SHM
1089 static int mark_static_shm_as_reserved(struct dt_descriptor *dt)
1090 {
1091 	vaddr_t shm_start;
1092 	vaddr_t shm_end;
1093 
1094 	core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &shm_start, &shm_end);
1095 	if (shm_start != shm_end)
1096 		return add_res_mem_dt_node(dt, "optee_shm",
1097 					   virt_to_phys((void *)shm_start),
1098 					   shm_end - shm_start);
1099 
1100 	DMSG("No SHM configured");
1101 	return -1;
1102 }
1103 #endif /*CFG_CORE_RESERVED_SHM*/
1104 
1105 static void init_external_dt(unsigned long phys_dt)
1106 {
1107 	struct dt_descriptor *dt = &external_dt;
1108 	void *fdt;
1109 	int ret;
1110 
1111 	if (!IS_ENABLED(CFG_EXTERNAL_DT))
1112 		return;
1113 
1114 	if (!phys_dt) {
1115 		/*
1116 		 * No need to panic as we're not using the DT in OP-TEE
1117 		 * yet, we're only adding some nodes for normal world use.
1118 		 * This makes the switch to using DT easier as we can boot
1119 		 * a newer OP-TEE with older boot loaders. Once we start to
1120 		 * initialize devices based on DT we'll likely panic
1121 		 * instead of returning here.
1122 		 */
1123 		IMSG("No non-secure external DT");
1124 		return;
1125 	}
1126 
1127 	fdt = core_mmu_add_mapping(MEM_AREA_EXT_DT, phys_dt, CFG_DTB_MAX_SIZE);
1128 	if (!fdt)
1129 		panic("Failed to map external DTB");
1130 
1131 	dt->blob = fdt;
1132 
1133 	ret = init_dt_overlay(dt, CFG_DTB_MAX_SIZE);
1134 	if (ret < 0) {
1135 		EMSG("Device Tree Overlay init fail @ %#lx: error %d", phys_dt,
1136 		     ret);
1137 		panic();
1138 	}
1139 
1140 	ret = fdt_open_into(fdt, fdt, CFG_DTB_MAX_SIZE);
1141 	if (ret < 0) {
1142 		EMSG("Invalid Device Tree at %#lx: error %d", phys_dt, ret);
1143 		panic();
1144 	}
1145 
1146 	IMSG("Non-secure external DT found");
1147 }
1148 
1149 static int mark_tzdram_as_reserved(struct dt_descriptor *dt)
1150 {
1151 	return add_res_mem_dt_node(dt, "optee_core", CFG_TZDRAM_START,
1152 				   CFG_TZDRAM_SIZE);
1153 }
1154 
1155 static void update_external_dt(void)
1156 {
1157 	struct dt_descriptor *dt = &external_dt;
1158 
1159 	if (!IS_ENABLED(CFG_EXTERNAL_DT))
1160 		return;
1161 
1162 	if (!dt->blob)
1163 		return;
1164 
1165 	if (!IS_ENABLED(CFG_CORE_FFA) && add_optee_dt_node(dt))
1166 		panic("Failed to add OP-TEE Device Tree node");
1167 
1168 	if (config_psci(dt))
1169 		panic("Failed to config PSCI");
1170 
1171 #ifdef CFG_CORE_RESERVED_SHM
1172 	if (mark_static_shm_as_reserved(dt))
1173 		panic("Failed to config non-secure memory");
1174 #endif
1175 
1176 	if (mark_tzdram_as_reserved(dt))
1177 		panic("Failed to config secure memory");
1178 }
1179 #else /*CFG_DT*/
1180 void *get_external_dt(void)
1181 {
1182 	return NULL;
1183 }
1184 
1185 static void init_external_dt(unsigned long phys_dt __unused)
1186 {
1187 }
1188 
1189 static void update_external_dt(void)
1190 {
1191 }
1192 
1193 #ifdef CFG_CORE_DYN_SHM
1194 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt __unused,
1195 						 size_t *nelems __unused)
1196 {
1197 	return NULL;
1198 }
1199 #endif /*CFG_CORE_DYN_SHM*/
1200 #endif /*!CFG_DT*/
1201 
1202 #ifdef CFG_CORE_DYN_SHM
1203 static void discover_nsec_memory(void)
1204 {
1205 	struct core_mmu_phys_mem *mem;
1206 	const struct core_mmu_phys_mem *mem_begin = NULL;
1207 	const struct core_mmu_phys_mem *mem_end = NULL;
1208 	size_t nelems;
1209 	void *fdt = get_external_dt();
1210 
1211 	if (fdt) {
1212 		mem = get_nsec_memory(fdt, &nelems);
1213 		if (mem) {
1214 			core_mmu_set_discovered_nsec_ddr(mem, nelems);
1215 			return;
1216 		}
1217 
1218 		DMSG("No non-secure memory found in FDT");
1219 	}
1220 
1221 	mem_begin = phys_ddr_overall_begin;
1222 	mem_end = phys_ddr_overall_end;
1223 	nelems = mem_end - mem_begin;
1224 	if (nelems) {
1225 		/*
1226 		 * Platform cannot use both register_ddr() and the now
1227 		 * deprecated register_dynamic_shm().
1228 		 */
1229 		assert(phys_ddr_overall_compat_begin ==
1230 		       phys_ddr_overall_compat_end);
1231 	} else {
1232 		mem_begin = phys_ddr_overall_compat_begin;
1233 		mem_end = phys_ddr_overall_compat_end;
1234 		nelems = mem_end - mem_begin;
1235 		if (!nelems)
1236 			return;
1237 		DMSG("Warning register_dynamic_shm() is deprecated, please use register_ddr() instead");
1238 	}
1239 
1240 	mem = nex_calloc(nelems, sizeof(*mem));
1241 	if (!mem)
1242 		panic();
1243 
1244 	memcpy(mem, phys_ddr_overall_begin, sizeof(*mem) * nelems);
1245 	core_mmu_set_discovered_nsec_ddr(mem, nelems);
1246 }
1247 #else /*CFG_CORE_DYN_SHM*/
1248 static void discover_nsec_memory(void)
1249 {
1250 }
1251 #endif /*!CFG_CORE_DYN_SHM*/
1252 
1253 #ifdef CFG_VIRTUALIZATION
1254 static TEE_Result virt_init_heap(void)
1255 {
1256 	/* We need to initialize pool for every virtual guest partition */
1257 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
1258 
1259 	return TEE_SUCCESS;
1260 }
1261 preinit_early(virt_init_heap);
1262 #endif
1263 
1264 void init_tee_runtime(void)
1265 {
1266 #ifndef CFG_WITH_PAGER
1267 	/* Pager initializes TA RAM early */
1268 	core_mmu_init_ta_ram();
1269 #endif
1270 	/*
1271 	 * With virtualization we call this function when creating the
1272 	 * OP-TEE partition instead.
1273 	 */
1274 	if (!IS_ENABLED(CFG_VIRTUALIZATION))
1275 		call_preinitcalls();
1276 	call_initcalls();
1277 
1278 	/*
1279 	 * These two functions uses crypto_rng_read() to initialize the
1280 	 * pauth keys. Once call_initcalls() returns we're guaranteed that
1281 	 * crypto_rng_read() is ready to be used.
1282 	 */
1283 	thread_init_core_local_pauth_keys();
1284 	thread_init_thread_pauth_keys();
1285 }
1286 
1287 static void init_primary(unsigned long pageable_part, unsigned long nsec_entry)
1288 {
1289 	thread_init_core_local_stacks();
1290 	/*
1291 	 * Mask asynchronous exceptions before switch to the thread vector
1292 	 * as the thread handler requires those to be masked while
1293 	 * executing with the temporary stack. The thread subsystem also
1294 	 * asserts that the foreign interrupts are blocked when using most of
1295 	 * its functions.
1296 	 */
1297 	thread_set_exceptions(THREAD_EXCP_ALL);
1298 	primary_save_cntfrq();
1299 	init_vfp_sec();
1300 	/*
1301 	 * Pager: init_runtime() calls thread_kernel_enable_vfp() so we must
1302 	 * set a current thread right now to avoid a chicken-and-egg problem
1303 	 * (thread_init_boot_thread() sets the current thread but needs
1304 	 * things set by init_runtime()).
1305 	 */
1306 	thread_get_core_local()->curr_thread = 0;
1307 	init_runtime(pageable_part);
1308 
1309 	if (IS_ENABLED(CFG_VIRTUALIZATION)) {
1310 		/*
1311 		 * Virtualization: We can't initialize threads right now because
1312 		 * threads belong to "tee" part and will be initialized
1313 		 * separately per each new virtual guest. So, we'll clear
1314 		 * "curr_thread" and call it done.
1315 		 */
1316 		thread_get_core_local()->curr_thread = -1;
1317 	} else {
1318 		thread_init_boot_thread();
1319 	}
1320 	thread_init_primary();
1321 	thread_init_per_cpu();
1322 	init_sec_mon(nsec_entry);
1323 }
1324 
1325 static bool cpu_nmfi_enabled(void)
1326 {
1327 #if defined(ARM32)
1328 	return read_sctlr() & SCTLR_NMFI;
1329 #else
1330 	/* Note: ARM64 does not feature non-maskable FIQ support. */
1331 	return false;
1332 #endif
1333 }
1334 
1335 /*
1336  * Note: this function is weak just to make it possible to exclude it from
1337  * the unpaged area.
1338  */
1339 void __weak boot_init_primary_late(unsigned long fdt)
1340 {
1341 	init_external_dt(fdt);
1342 	tpm_map_log_area(get_external_dt());
1343 	discover_nsec_memory();
1344 	update_external_dt();
1345 	configure_console_from_dt();
1346 
1347 	IMSG("OP-TEE version: %s", core_v_str);
1348 	if (IS_ENABLED(CFG_WARN_INSECURE)) {
1349 		IMSG("WARNING: This OP-TEE configuration might be insecure!");
1350 		IMSG("WARNING: Please check https://optee.readthedocs.io/en/latest/architecture/porting_guidelines.html");
1351 	}
1352 	IMSG("Primary CPU initializing");
1353 #ifdef CFG_CORE_ASLR
1354 	DMSG("Executing at offset %#lx with virtual load address %#"PRIxVA,
1355 	     (unsigned long)boot_mmu_config.load_offset, VCORE_START_VA);
1356 #endif
1357 	if (IS_ENABLED(CFG_MEMTAG))
1358 		DMSG("Memory tagging %s",
1359 		     memtag_is_enabled() ?  "enabled" : "disabled");
1360 
1361 	/* Check if platform needs NMFI workaround */
1362 	if (cpu_nmfi_enabled())	{
1363 		if (!IS_ENABLED(CFG_CORE_WORKAROUND_ARM_NMFI))
1364 			IMSG("WARNING: This ARM core has NMFI enabled, please apply workaround!");
1365 	} else {
1366 		if (IS_ENABLED(CFG_CORE_WORKAROUND_ARM_NMFI))
1367 			IMSG("WARNING: This ARM core does not have NMFI enabled, no need for workaround");
1368 	}
1369 
1370 	main_init_gic();
1371 	init_vfp_nsec();
1372 	if (IS_ENABLED(CFG_VIRTUALIZATION)) {
1373 		IMSG("Initializing virtualization support");
1374 		core_mmu_init_virtualization();
1375 	} else {
1376 		init_tee_runtime();
1377 	}
1378 	call_finalcalls();
1379 	IMSG("Primary CPU switching to normal world boot");
1380 }
1381 
1382 static void init_secondary_helper(unsigned long nsec_entry)
1383 {
1384 	IMSG("Secondary CPU %zu initializing", get_core_pos());
1385 
1386 	/*
1387 	 * Mask asynchronous exceptions before switch to the thread vector
1388 	 * as the thread handler requires those to be masked while
1389 	 * executing with the temporary stack. The thread subsystem also
1390 	 * asserts that the foreign interrupts are blocked when using most of
1391 	 * its functions.
1392 	 */
1393 	thread_set_exceptions(THREAD_EXCP_ALL);
1394 
1395 	secondary_init_cntfrq();
1396 	thread_init_per_cpu();
1397 	init_sec_mon(nsec_entry);
1398 	main_secondary_init_gic();
1399 	init_vfp_sec();
1400 	init_vfp_nsec();
1401 
1402 	IMSG("Secondary CPU %zu switching to normal world boot", get_core_pos());
1403 }
1404 
1405 /*
1406  * Note: this function is weak just to make it possible to exclude it from
1407  * the unpaged area so that it lies in the init area.
1408  */
1409 void __weak boot_init_primary_early(unsigned long pageable_part,
1410 				    unsigned long nsec_entry __maybe_unused)
1411 {
1412 	unsigned long e = PADDR_INVALID;
1413 
1414 #if !defined(CFG_WITH_ARM_TRUSTED_FW)
1415 	e = nsec_entry;
1416 #endif
1417 
1418 	init_primary(pageable_part, e);
1419 }
1420 
1421 #if defined(CFG_WITH_ARM_TRUSTED_FW)
1422 unsigned long boot_cpu_on_handler(unsigned long a0 __maybe_unused,
1423 				  unsigned long a1 __unused)
1424 {
1425 	init_secondary_helper(PADDR_INVALID);
1426 	return 0;
1427 }
1428 #else
1429 void boot_init_secondary(unsigned long nsec_entry)
1430 {
1431 	init_secondary_helper(nsec_entry);
1432 }
1433 #endif
1434 
1435 #if defined(CFG_BOOT_SECONDARY_REQUEST)
1436 void boot_set_core_ns_entry(size_t core_idx, uintptr_t entry,
1437 			    uintptr_t context_id)
1438 {
1439 	ns_entry_contexts[core_idx].entry_point = entry;
1440 	ns_entry_contexts[core_idx].context_id = context_id;
1441 	dsb_ishst();
1442 }
1443 
1444 int boot_core_release(size_t core_idx, paddr_t entry)
1445 {
1446 	if (!core_idx || core_idx >= CFG_TEE_CORE_NB_CORE)
1447 		return -1;
1448 
1449 	ns_entry_contexts[core_idx].entry_point = entry;
1450 	dmb();
1451 	spin_table[core_idx] = 1;
1452 	dsb();
1453 	sev();
1454 
1455 	return 0;
1456 }
1457 
1458 /*
1459  * spin until secondary boot request, then returns with
1460  * the secondary core entry address.
1461  */
1462 struct ns_entry_context *boot_core_hpen(void)
1463 {
1464 #ifdef CFG_PSCI_ARM32
1465 	return &ns_entry_contexts[get_core_pos()];
1466 #else
1467 	do {
1468 		wfe();
1469 	} while (!spin_table[get_core_pos()]);
1470 	dmb();
1471 	return &ns_entry_contexts[get_core_pos()];
1472 #endif
1473 }
1474 #endif
1475 
1476 #if defined(CFG_CORE_ASLR)
1477 #if defined(CFG_DT)
1478 unsigned long __weak get_aslr_seed(void *fdt)
1479 {
1480 	int rc = fdt_check_header(fdt);
1481 	const uint64_t *seed = NULL;
1482 	int offs = 0;
1483 	int len = 0;
1484 
1485 	if (rc) {
1486 		DMSG("Bad fdt: %d", rc);
1487 		goto err;
1488 	}
1489 
1490 	offs =  fdt_path_offset(fdt, "/secure-chosen");
1491 	if (offs < 0) {
1492 		DMSG("Cannot find /secure-chosen");
1493 		goto err;
1494 	}
1495 	seed = fdt_getprop(fdt, offs, "kaslr-seed", &len);
1496 	if (!seed || len != sizeof(*seed)) {
1497 		DMSG("Cannot find valid kaslr-seed");
1498 		goto err;
1499 	}
1500 
1501 	return fdt64_to_cpu(*seed);
1502 
1503 err:
1504 	/* Try platform implementation */
1505 	return plat_get_aslr_seed();
1506 }
1507 #else /*!CFG_DT*/
1508 unsigned long __weak get_aslr_seed(void *fdt __unused)
1509 {
1510 	/* Try platform implementation */
1511 	return plat_get_aslr_seed();
1512 }
1513 #endif /*!CFG_DT*/
1514 #endif /*CFG_CORE_ASLR*/
1515