xref: /optee_os/core/arch/arm/kernel/boot.c (revision bc12b0e95e3c63f46850c1e69c79cd6879c68543)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2015-2023, Linaro Limited
4  * Copyright (c) 2023, Arm Limited
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <compiler.h>
10 #include <config.h>
11 #include <console.h>
12 #include <crypto/crypto.h>
13 #include <drivers/gic.h>
14 #include <dt-bindings/interrupt-controller/arm-gic.h>
15 #include <ffa.h>
16 #include <initcall.h>
17 #include <inttypes.h>
18 #include <keep.h>
19 #include <kernel/asan.h>
20 #include <kernel/boot.h>
21 #include <kernel/dt.h>
22 #include <kernel/linker.h>
23 #include <kernel/misc.h>
24 #include <kernel/panic.h>
25 #include <kernel/tee_misc.h>
26 #include <kernel/thread.h>
27 #include <kernel/tpm.h>
28 #include <libfdt.h>
29 #include <malloc.h>
30 #include <memtag.h>
31 #include <mm/core_memprot.h>
32 #include <mm/core_mmu.h>
33 #include <mm/fobj.h>
34 #include <mm/tee_mm.h>
35 #include <mm/tee_pager.h>
36 #include <sm/psci.h>
37 #include <stdio.h>
38 #include <trace.h>
39 #include <utee_defines.h>
40 #include <util.h>
41 
42 #include <platform_config.h>
43 
44 #if !defined(CFG_WITH_ARM_TRUSTED_FW)
45 #include <sm/sm.h>
46 #endif
47 
48 #if defined(CFG_WITH_VFP)
49 #include <kernel/vfp.h>
50 #endif
51 
52 /*
53  * In this file we're using unsigned long to represent physical pointers as
54  * they are received in a single register when OP-TEE is initially entered.
55  * This limits 32-bit systems to only use make use of the lower 32 bits
56  * of a physical address for initial parameters.
57  *
58  * 64-bit systems on the other hand can use full 64-bit physical pointers.
59  */
60 #define PADDR_INVALID		ULONG_MAX
61 
62 #if defined(CFG_BOOT_SECONDARY_REQUEST)
63 struct ns_entry_context {
64 	uintptr_t entry_point;
65 	uintptr_t context_id;
66 };
67 struct ns_entry_context ns_entry_contexts[CFG_TEE_CORE_NB_CORE];
68 static uint32_t spin_table[CFG_TEE_CORE_NB_CORE];
69 #endif
70 
71 #ifdef CFG_BOOT_SYNC_CPU
72 /*
73  * Array used when booting, to synchronize cpu.
74  * When 0, the cpu has not started.
75  * When 1, it has started
76  */
77 uint32_t sem_cpu_sync[CFG_TEE_CORE_NB_CORE];
78 DECLARE_KEEP_PAGER(sem_cpu_sync);
79 #endif
80 
81 #ifdef CFG_DT
82 struct dt_descriptor {
83 	void *blob;
84 #ifdef _CFG_USE_DTB_OVERLAY
85 	int frag_id;
86 #endif
87 };
88 
89 static struct dt_descriptor external_dt __nex_bss;
90 #ifdef CFG_CORE_SEL1_SPMC
91 static struct dt_descriptor tos_fw_config_dt __nex_bss;
92 #endif
93 #endif
94 
95 #ifdef CFG_SECONDARY_INIT_CNTFRQ
96 static uint32_t cntfrq;
97 #endif
98 
99 /* May be overridden in plat-$(PLATFORM)/main.c */
100 __weak void plat_primary_init_early(void)
101 {
102 }
103 DECLARE_KEEP_PAGER(plat_primary_init_early);
104 
105 /* May be overridden in plat-$(PLATFORM)/main.c */
106 __weak void main_init_gic(void)
107 {
108 }
109 
110 /* May be overridden in plat-$(PLATFORM)/main.c */
111 __weak void main_secondary_init_gic(void)
112 {
113 }
114 
115 /* May be overridden in plat-$(PLATFORM)/main.c */
116 __weak unsigned long plat_get_aslr_seed(void)
117 {
118 	DMSG("Warning: no ASLR seed");
119 
120 	return 0;
121 }
122 
123 #if defined(_CFG_CORE_STACK_PROTECTOR) || defined(CFG_WITH_STACK_CANARIES)
124 /* Generate random stack canary value on boot up */
125 __weak void plat_get_random_stack_canaries(void *buf, size_t ncan, size_t size)
126 {
127 	TEE_Result ret = TEE_ERROR_GENERIC;
128 	size_t i = 0;
129 
130 	assert(buf && ncan && size);
131 
132 	/*
133 	 * With virtualization the RNG is not initialized in Nexus core.
134 	 * Need to override with platform specific implementation.
135 	 */
136 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
137 		IMSG("WARNING: Using fixed value for stack canary");
138 		memset(buf, 0xab, ncan * size);
139 		goto out;
140 	}
141 
142 	ret = crypto_rng_read(buf, ncan * size);
143 	if (ret != TEE_SUCCESS)
144 		panic("Failed to generate random stack canary");
145 
146 out:
147 	/* Leave null byte in canary to prevent string base exploit */
148 	for (i = 0; i < ncan; i++)
149 		*((uint8_t *)buf + size * i) = 0;
150 }
151 #endif /* _CFG_CORE_STACK_PROTECTOR || CFG_WITH_STACK_CANARIES */
152 
153 /*
154  * This function is called as a guard after each smc call which is not
155  * supposed to return.
156  */
157 void __panic_at_smc_return(void)
158 {
159 	panic();
160 }
161 
162 #if defined(CFG_WITH_ARM_TRUSTED_FW)
163 void init_sec_mon(unsigned long nsec_entry __maybe_unused)
164 {
165 	assert(nsec_entry == PADDR_INVALID);
166 	/* Do nothing as we don't have a secure monitor */
167 }
168 #else
169 /* May be overridden in plat-$(PLATFORM)/main.c */
170 __weak void init_sec_mon(unsigned long nsec_entry)
171 {
172 	struct sm_nsec_ctx *nsec_ctx;
173 
174 	assert(nsec_entry != PADDR_INVALID);
175 
176 	/* Initialize secure monitor */
177 	nsec_ctx = sm_get_nsec_ctx();
178 	nsec_ctx->mon_lr = nsec_entry;
179 	nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I;
180 	if (nsec_entry & 1)
181 		nsec_ctx->mon_spsr |= CPSR_T;
182 }
183 #endif
184 
185 #if defined(CFG_WITH_ARM_TRUSTED_FW)
186 static void init_vfp_nsec(void)
187 {
188 }
189 #else
190 static void init_vfp_nsec(void)
191 {
192 	/* Normal world can use CP10 and CP11 (SIMD/VFP) */
193 	write_nsacr(read_nsacr() | NSACR_CP10 | NSACR_CP11);
194 }
195 #endif
196 
197 #if defined(CFG_WITH_VFP)
198 
199 #ifdef ARM32
200 static void init_vfp_sec(void)
201 {
202 	uint32_t cpacr = read_cpacr();
203 
204 	/*
205 	 * Enable Advanced SIMD functionality.
206 	 * Enable use of D16-D31 of the Floating-point Extension register
207 	 * file.
208 	 */
209 	cpacr &= ~(CPACR_ASEDIS | CPACR_D32DIS);
210 	/*
211 	 * Enable usage of CP10 and CP11 (SIMD/VFP) (both kernel and user
212 	 * mode.
213 	 */
214 	cpacr |= CPACR_CP(10, CPACR_CP_ACCESS_FULL);
215 	cpacr |= CPACR_CP(11, CPACR_CP_ACCESS_FULL);
216 	write_cpacr(cpacr);
217 }
218 #endif /* ARM32 */
219 
220 #ifdef ARM64
221 static void init_vfp_sec(void)
222 {
223 	/* Not using VFP until thread_kernel_enable_vfp() */
224 	vfp_disable();
225 }
226 #endif /* ARM64 */
227 
228 #else /* CFG_WITH_VFP */
229 
230 static void init_vfp_sec(void)
231 {
232 	/* Not using VFP */
233 }
234 #endif
235 
236 #ifdef CFG_SECONDARY_INIT_CNTFRQ
237 static void primary_save_cntfrq(void)
238 {
239 	assert(cntfrq == 0);
240 
241 	/*
242 	 * CNTFRQ should be initialized on the primary CPU by a
243 	 * previous boot stage
244 	 */
245 	cntfrq = read_cntfrq();
246 }
247 
248 static void secondary_init_cntfrq(void)
249 {
250 	assert(cntfrq != 0);
251 	write_cntfrq(cntfrq);
252 }
253 #else /* CFG_SECONDARY_INIT_CNTFRQ */
254 static void primary_save_cntfrq(void)
255 {
256 }
257 
258 static void secondary_init_cntfrq(void)
259 {
260 }
261 #endif
262 
263 #ifdef CFG_CORE_SANITIZE_KADDRESS
264 static void init_run_constructors(void)
265 {
266 	const vaddr_t *ctor;
267 
268 	for (ctor = &__ctor_list; ctor < &__ctor_end; ctor++)
269 		((void (*)(void))(*ctor))();
270 }
271 
272 static void init_asan(void)
273 {
274 
275 	/*
276 	 * CFG_ASAN_SHADOW_OFFSET is also supplied as
277 	 * -fasan-shadow-offset=$(CFG_ASAN_SHADOW_OFFSET) to the compiler.
278 	 * Since all the needed values to calculate the value of
279 	 * CFG_ASAN_SHADOW_OFFSET isn't available in to make we need to
280 	 * calculate it in advance and hard code it into the platform
281 	 * conf.mk. Here where we have all the needed values we double
282 	 * check that the compiler is supplied the correct value.
283 	 */
284 
285 #define __ASAN_SHADOW_START \
286 	ROUNDUP(TEE_RAM_START + (TEE_RAM_VA_SIZE * 8) / 9 - 8, 8)
287 	assert(__ASAN_SHADOW_START == (vaddr_t)&__asan_shadow_start);
288 #define __CFG_ASAN_SHADOW_OFFSET \
289 	(__ASAN_SHADOW_START - (TEE_RAM_START / 8))
290 	COMPILE_TIME_ASSERT(CFG_ASAN_SHADOW_OFFSET == __CFG_ASAN_SHADOW_OFFSET);
291 #undef __ASAN_SHADOW_START
292 #undef __CFG_ASAN_SHADOW_OFFSET
293 
294 	/*
295 	 * Assign area covered by the shadow area, everything from start up
296 	 * to the beginning of the shadow area.
297 	 */
298 	asan_set_shadowed((void *)TEE_LOAD_ADDR, &__asan_shadow_start);
299 
300 	/*
301 	 * Add access to areas that aren't opened automatically by a
302 	 * constructor.
303 	 */
304 	asan_tag_access(&__ctor_list, &__ctor_end);
305 	asan_tag_access(__rodata_start, __rodata_end);
306 #ifdef CFG_WITH_PAGER
307 	asan_tag_access(__pageable_start, __pageable_end);
308 #endif /*CFG_WITH_PAGER*/
309 	asan_tag_access(__nozi_start, __nozi_end);
310 	asan_tag_access(__exidx_start, __exidx_end);
311 	asan_tag_access(__extab_start, __extab_end);
312 
313 	init_run_constructors();
314 
315 	/* Everything is tagged correctly, let's start address sanitizing. */
316 	asan_start();
317 }
318 #else /*CFG_CORE_SANITIZE_KADDRESS*/
319 static void init_asan(void)
320 {
321 }
322 #endif /*CFG_CORE_SANITIZE_KADDRESS*/
323 
324 #if defined(CFG_MEMTAG)
325 /* Called from entry_a64.S only when MEMTAG is configured */
326 void boot_init_memtag(void)
327 {
328 	paddr_t base = 0;
329 	paddr_size_t size = 0;
330 
331 	memtag_init_ops(feat_mte_implemented());
332 	core_mmu_get_secure_memory(&base, &size);
333 	memtag_set_tags((void *)(vaddr_t)base, size, 0);
334 }
335 #endif
336 
337 #ifdef CFG_WITH_PAGER
338 
339 #ifdef CFG_CORE_SANITIZE_KADDRESS
340 static void carve_out_asan_mem(tee_mm_pool_t *pool)
341 {
342 	const size_t s = pool->hi - pool->lo;
343 	tee_mm_entry_t *mm;
344 	paddr_t apa = ASAN_MAP_PA;
345 	size_t asz = ASAN_MAP_SZ;
346 
347 	if (core_is_buffer_outside(apa, asz, pool->lo, s))
348 		return;
349 
350 	/* Reserve the shadow area */
351 	if (!core_is_buffer_inside(apa, asz, pool->lo, s)) {
352 		if (apa < pool->lo) {
353 			/*
354 			 * ASAN buffer is overlapping with the beginning of
355 			 * the pool.
356 			 */
357 			asz -= pool->lo - apa;
358 			apa = pool->lo;
359 		} else {
360 			/*
361 			 * ASAN buffer is overlapping with the end of the
362 			 * pool.
363 			 */
364 			asz = pool->hi - apa;
365 		}
366 	}
367 	mm = tee_mm_alloc2(pool, apa, asz);
368 	assert(mm);
369 }
370 #else
371 static void carve_out_asan_mem(tee_mm_pool_t *pool __unused)
372 {
373 }
374 #endif
375 
376 static void print_pager_pool_size(void)
377 {
378 	struct tee_pager_stats __maybe_unused stats;
379 
380 	tee_pager_get_stats(&stats);
381 	IMSG("Pager pool size: %zukB",
382 		stats.npages_all * SMALL_PAGE_SIZE / 1024);
383 }
384 
385 static void init_vcore(tee_mm_pool_t *mm_vcore)
386 {
387 	const vaddr_t begin = VCORE_START_VA;
388 	size_t size = TEE_RAM_VA_SIZE;
389 
390 #ifdef CFG_CORE_SANITIZE_KADDRESS
391 	/* Carve out asan memory, flat maped after core memory */
392 	if (begin + size > ASAN_SHADOW_PA)
393 		size = ASAN_MAP_PA - begin;
394 #endif
395 
396 	if (!tee_mm_init(mm_vcore, begin, size, SMALL_PAGE_SHIFT,
397 			 TEE_MM_POOL_NO_FLAGS))
398 		panic("tee_mm_vcore init failed");
399 }
400 
401 /*
402  * With CFG_CORE_ASLR=y the init part is relocated very early during boot.
403  * The init part is also paged just as the rest of the normal paged code, with
404  * the difference that it's preloaded during boot. When the backing store
405  * is configured the entire paged binary is copied in place and then also
406  * the init part. Since the init part has been relocated (references to
407  * addresses updated to compensate for the new load address) this has to be
408  * undone for the hashes of those pages to match with the original binary.
409  *
410  * If CFG_CORE_ASLR=n, nothing needs to be done as the code/ro pages are
411  * unchanged.
412  */
413 static void undo_init_relocation(uint8_t *paged_store __maybe_unused)
414 {
415 #ifdef CFG_CORE_ASLR
416 	unsigned long *ptr = NULL;
417 	const uint32_t *reloc = NULL;
418 	const uint32_t *reloc_end = NULL;
419 	unsigned long offs = boot_mmu_config.map_offset;
420 	const struct boot_embdata *embdata = (const void *)__init_end;
421 	vaddr_t addr_end = (vaddr_t)__init_end - offs - TEE_LOAD_ADDR;
422 	vaddr_t addr_start = (vaddr_t)__init_start - offs - TEE_LOAD_ADDR;
423 
424 	reloc = (const void *)((vaddr_t)embdata + embdata->reloc_offset);
425 	reloc_end = reloc + embdata->reloc_len / sizeof(*reloc);
426 
427 	for (; reloc < reloc_end; reloc++) {
428 		if (*reloc < addr_start)
429 			continue;
430 		if (*reloc >= addr_end)
431 			break;
432 		ptr = (void *)(paged_store + *reloc - addr_start);
433 		*ptr -= offs;
434 	}
435 #endif
436 }
437 
438 static struct fobj *ro_paged_alloc(tee_mm_entry_t *mm, void *hashes,
439 				   void *store)
440 {
441 	const unsigned int num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE;
442 #ifdef CFG_CORE_ASLR
443 	unsigned int reloc_offs = (vaddr_t)__pageable_start - VCORE_START_VA;
444 	const struct boot_embdata *embdata = (const void *)__init_end;
445 	const void *reloc = __init_end + embdata->reloc_offset;
446 
447 	return fobj_ro_reloc_paged_alloc(num_pages, hashes, reloc_offs,
448 					 reloc, embdata->reloc_len, store);
449 #else
450 	return fobj_ro_paged_alloc(num_pages, hashes, store);
451 #endif
452 }
453 
454 static void init_runtime(unsigned long pageable_part)
455 {
456 	size_t n;
457 	size_t init_size = (size_t)(__init_end - __init_start);
458 	size_t pageable_start = (size_t)__pageable_start;
459 	size_t pageable_end = (size_t)__pageable_end;
460 	size_t pageable_size = pageable_end - pageable_start;
461 	vaddr_t tzsram_end = TZSRAM_BASE + TZSRAM_SIZE - TEE_LOAD_ADDR +
462 			     VCORE_START_VA;
463 	size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) *
464 			   TEE_SHA256_HASH_SIZE;
465 	const struct boot_embdata *embdata = (const void *)__init_end;
466 	const void *tmp_hashes = NULL;
467 	tee_mm_entry_t *mm = NULL;
468 	struct fobj *fobj = NULL;
469 	uint8_t *paged_store = NULL;
470 	uint8_t *hashes = NULL;
471 
472 	assert(pageable_size % SMALL_PAGE_SIZE == 0);
473 	assert(embdata->total_len >= embdata->hashes_offset +
474 				     embdata->hashes_len);
475 	assert(hash_size == embdata->hashes_len);
476 
477 	tmp_hashes = __init_end + embdata->hashes_offset;
478 
479 	init_asan();
480 
481 	/* Add heap2 first as heap1 may be too small as initial bget pool */
482 	malloc_add_pool(__heap2_start, __heap2_end - __heap2_start);
483 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
484 
485 	/*
486 	 * This needs to be initialized early to support address lookup
487 	 * in MEM_AREA_TEE_RAM
488 	 */
489 	tee_pager_early_init();
490 
491 	hashes = malloc(hash_size);
492 	IMSG_RAW("\n");
493 	IMSG("Pager is enabled. Hashes: %zu bytes", hash_size);
494 	assert(hashes);
495 	asan_memcpy_unchecked(hashes, tmp_hashes, hash_size);
496 
497 	/*
498 	 * Need tee_mm_sec_ddr initialized to be able to allocate secure
499 	 * DDR below.
500 	 */
501 	core_mmu_init_ta_ram();
502 
503 	carve_out_asan_mem(&tee_mm_sec_ddr);
504 
505 	mm = tee_mm_alloc(&tee_mm_sec_ddr, pageable_size);
506 	assert(mm);
507 	paged_store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM,
508 				   pageable_size);
509 	/*
510 	 * Load pageable part in the dedicated allocated area:
511 	 * - Move pageable non-init part into pageable area. Note bootloader
512 	 *   may have loaded it anywhere in TA RAM hence use memmove().
513 	 * - Copy pageable init part from current location into pageable area.
514 	 */
515 	memmove(paged_store + init_size,
516 		phys_to_virt(pageable_part,
517 			     core_mmu_get_type_by_pa(pageable_part),
518 			     __pageable_part_end - __pageable_part_start),
519 		__pageable_part_end - __pageable_part_start);
520 	asan_memcpy_unchecked(paged_store, __init_start, init_size);
521 	/*
522 	 * Undo eventual relocation for the init part so the hash checks
523 	 * can pass.
524 	 */
525 	undo_init_relocation(paged_store);
526 
527 	/* Check that hashes of what's in pageable area is OK */
528 	DMSG("Checking hashes of pageable area");
529 	for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) {
530 		const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE;
531 		const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE;
532 		TEE_Result res;
533 
534 		DMSG("hash pg_idx %zu hash %p page %p", n, hash, page);
535 		res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE);
536 		if (res != TEE_SUCCESS) {
537 			EMSG("Hash failed for page %zu at %p: res 0x%x",
538 			     n, (void *)page, res);
539 			panic();
540 		}
541 	}
542 
543 	/*
544 	 * Assert prepaged init sections are page aligned so that nothing
545 	 * trails uninited at the end of the premapped init area.
546 	 */
547 	assert(!(init_size & SMALL_PAGE_MASK));
548 
549 	/*
550 	 * Initialize the virtual memory pool used for main_mmu_l2_ttb which
551 	 * is supplied to tee_pager_init() below.
552 	 */
553 	init_vcore(&tee_mm_vcore);
554 
555 	/*
556 	 * Assign alias area for pager end of the small page block the rest
557 	 * of the binary is loaded into. We're taking more than needed, but
558 	 * we're guaranteed to not need more than the physical amount of
559 	 * TZSRAM.
560 	 */
561 	mm = tee_mm_alloc2(&tee_mm_vcore,
562 			   (vaddr_t)tee_mm_vcore.lo +
563 			   tee_mm_vcore.size - TZSRAM_SIZE,
564 			   TZSRAM_SIZE);
565 	assert(mm);
566 	tee_pager_set_alias_area(mm);
567 
568 	/*
569 	 * Claim virtual memory which isn't paged.
570 	 * Linear memory (flat map core memory) ends there.
571 	 */
572 	mm = tee_mm_alloc2(&tee_mm_vcore, VCORE_UNPG_RX_PA,
573 			   (vaddr_t)(__pageable_start - VCORE_UNPG_RX_PA));
574 	assert(mm);
575 
576 	/*
577 	 * Allocate virtual memory for the pageable area and let the pager
578 	 * take charge of all the pages already assigned to that memory.
579 	 */
580 	mm = tee_mm_alloc2(&tee_mm_vcore, (vaddr_t)__pageable_start,
581 			   pageable_size);
582 	assert(mm);
583 	fobj = ro_paged_alloc(mm, hashes, paged_store);
584 	assert(fobj);
585 	tee_pager_add_core_region(tee_mm_get_smem(mm), PAGED_REGION_TYPE_RO,
586 				  fobj);
587 	fobj_put(fobj);
588 
589 	tee_pager_add_pages(pageable_start, init_size / SMALL_PAGE_SIZE, false);
590 	tee_pager_add_pages(pageable_start + init_size,
591 			    (pageable_size - init_size) / SMALL_PAGE_SIZE,
592 			    true);
593 	if (pageable_end < tzsram_end)
594 		tee_pager_add_pages(pageable_end, (tzsram_end - pageable_end) /
595 						   SMALL_PAGE_SIZE, true);
596 
597 	/*
598 	 * There may be physical pages in TZSRAM before the core load address.
599 	 * These pages can be added to the physical pages pool of the pager.
600 	 * This setup may happen when a the secure bootloader runs in TZRAM
601 	 * and its memory can be reused by OP-TEE once boot stages complete.
602 	 */
603 	tee_pager_add_pages(tee_mm_vcore.lo,
604 			(VCORE_UNPG_RX_PA - tee_mm_vcore.lo) / SMALL_PAGE_SIZE,
605 			true);
606 
607 	print_pager_pool_size();
608 }
609 #else
610 
611 static void init_runtime(unsigned long pageable_part __unused)
612 {
613 	init_asan();
614 
615 	/*
616 	 * By default whole OP-TEE uses malloc, so we need to initialize
617 	 * it early. But, when virtualization is enabled, malloc is used
618 	 * only by TEE runtime, so malloc should be initialized later, for
619 	 * every virtual partition separately. Core code uses nex_malloc
620 	 * instead.
621 	 */
622 #ifdef CFG_NS_VIRTUALIZATION
623 	nex_malloc_add_pool(__nex_heap_start, __nex_heap_end -
624 					      __nex_heap_start);
625 #else
626 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
627 #endif
628 
629 	IMSG_RAW("\n");
630 }
631 #endif
632 
633 void *get_dt(void)
634 {
635 	void *fdt = get_embedded_dt();
636 
637 	if (!fdt)
638 		fdt = get_external_dt();
639 
640 	return fdt;
641 }
642 
643 void *get_secure_dt(void)
644 {
645 	void *fdt = get_embedded_dt();
646 
647 	if (!fdt && IS_ENABLED(CFG_MAP_EXT_DT_SECURE))
648 		fdt = get_external_dt();
649 
650 	return fdt;
651 }
652 
653 #if defined(CFG_EMBED_DTB)
654 void *get_embedded_dt(void)
655 {
656 	static bool checked;
657 
658 	assert(cpu_mmu_enabled());
659 
660 	if (!checked) {
661 		IMSG("Embedded DTB found");
662 
663 		if (fdt_check_header(embedded_secure_dtb))
664 			panic("Invalid embedded DTB");
665 
666 		checked = true;
667 	}
668 
669 	return embedded_secure_dtb;
670 }
671 #else
672 void *get_embedded_dt(void)
673 {
674 	return NULL;
675 }
676 #endif /*CFG_EMBED_DTB*/
677 
678 #if defined(CFG_DT)
679 void *get_external_dt(void)
680 {
681 	if (!IS_ENABLED(CFG_EXTERNAL_DT))
682 		return NULL;
683 
684 	assert(cpu_mmu_enabled());
685 	return external_dt.blob;
686 }
687 
688 static TEE_Result release_external_dt(void)
689 {
690 	int ret = 0;
691 
692 	if (!IS_ENABLED(CFG_EXTERNAL_DT))
693 		return TEE_SUCCESS;
694 
695 	if (!external_dt.blob)
696 		return TEE_SUCCESS;
697 
698 	ret = fdt_pack(external_dt.blob);
699 	if (ret < 0) {
700 		EMSG("Failed to pack Device Tree at 0x%" PRIxPA ": error %d",
701 		     virt_to_phys(external_dt.blob), ret);
702 		panic();
703 	}
704 
705 	if (core_mmu_remove_mapping(MEM_AREA_EXT_DT, external_dt.blob,
706 				    CFG_DTB_MAX_SIZE))
707 		panic("Failed to remove temporary Device Tree mapping");
708 
709 	/* External DTB no more reached, reset pointer to invalid */
710 	external_dt.blob = NULL;
711 
712 	return TEE_SUCCESS;
713 }
714 boot_final(release_external_dt);
715 
716 #ifdef _CFG_USE_DTB_OVERLAY
717 static int add_dt_overlay_fragment(struct dt_descriptor *dt, int ioffs)
718 {
719 	char frag[32];
720 	int offs;
721 	int ret;
722 
723 	snprintf(frag, sizeof(frag), "fragment@%d", dt->frag_id);
724 	offs = fdt_add_subnode(dt->blob, ioffs, frag);
725 	if (offs < 0)
726 		return offs;
727 
728 	dt->frag_id += 1;
729 
730 	ret = fdt_setprop_string(dt->blob, offs, "target-path", "/");
731 	if (ret < 0)
732 		return -1;
733 
734 	return fdt_add_subnode(dt->blob, offs, "__overlay__");
735 }
736 
737 static int init_dt_overlay(struct dt_descriptor *dt, int __maybe_unused dt_size)
738 {
739 	int fragment;
740 
741 	if (IS_ENABLED(CFG_EXTERNAL_DTB_OVERLAY)) {
742 		if (!fdt_check_header(dt->blob)) {
743 			fdt_for_each_subnode(fragment, dt->blob, 0)
744 				dt->frag_id += 1;
745 			return 0;
746 		}
747 	}
748 
749 	return fdt_create_empty_tree(dt->blob, dt_size);
750 }
751 #else
752 static int add_dt_overlay_fragment(struct dt_descriptor *dt __unused, int offs)
753 {
754 	return offs;
755 }
756 
757 static int init_dt_overlay(struct dt_descriptor *dt __unused,
758 			   int dt_size __unused)
759 {
760 	return 0;
761 }
762 #endif /* _CFG_USE_DTB_OVERLAY */
763 
764 static int add_dt_path_subnode(struct dt_descriptor *dt, const char *path,
765 			       const char *subnode)
766 {
767 	int offs;
768 
769 	offs = fdt_path_offset(dt->blob, path);
770 	if (offs < 0)
771 		return -1;
772 	offs = add_dt_overlay_fragment(dt, offs);
773 	if (offs < 0)
774 		return -1;
775 	offs = fdt_add_subnode(dt->blob, offs, subnode);
776 	if (offs < 0)
777 		return -1;
778 	return offs;
779 }
780 
781 static int add_optee_dt_node(struct dt_descriptor *dt)
782 {
783 	int offs;
784 	int ret;
785 
786 	if (fdt_path_offset(dt->blob, "/firmware/optee") >= 0) {
787 		DMSG("OP-TEE Device Tree node already exists!");
788 		return 0;
789 	}
790 
791 	offs = fdt_path_offset(dt->blob, "/firmware");
792 	if (offs < 0) {
793 		offs = add_dt_path_subnode(dt, "/", "firmware");
794 		if (offs < 0)
795 			return -1;
796 	}
797 
798 	offs = fdt_add_subnode(dt->blob, offs, "optee");
799 	if (offs < 0)
800 		return -1;
801 
802 	ret = fdt_setprop_string(dt->blob, offs, "compatible",
803 				 "linaro,optee-tz");
804 	if (ret < 0)
805 		return -1;
806 	ret = fdt_setprop_string(dt->blob, offs, "method", "smc");
807 	if (ret < 0)
808 		return -1;
809 
810 	if (CFG_CORE_ASYNC_NOTIF_GIC_INTID) {
811 		/*
812 		 * The format of the interrupt property is defined by the
813 		 * binding of the interrupt domain root. In this case it's
814 		 * one Arm GIC v1, v2 or v3 so we must be compatible with
815 		 * these.
816 		 *
817 		 * An SPI type of interrupt is indicated with a 0 in the
818 		 * first cell. A PPI type is indicated with value 1.
819 		 *
820 		 * The interrupt number goes in the second cell where
821 		 * SPIs ranges from 0 to 987 and PPI ranges from 0 to 15.
822 		 *
823 		 * Flags are passed in the third cells.
824 		 */
825 		uint32_t itr_trigger = 0;
826 		uint32_t itr_type = 0;
827 		uint32_t itr_id = 0;
828 		uint32_t val[3] = { };
829 
830 		/* PPI are visible only in current CPU cluster */
831 		static_assert(!CFG_CORE_ASYNC_NOTIF_GIC_INTID ||
832 			      (CFG_CORE_ASYNC_NOTIF_GIC_INTID >=
833 			       GIC_SPI_BASE) ||
834 			      ((CFG_TEE_CORE_NB_CORE <= 8) &&
835 			       (CFG_CORE_ASYNC_NOTIF_GIC_INTID >=
836 				GIC_PPI_BASE)));
837 
838 		if (CFG_CORE_ASYNC_NOTIF_GIC_INTID >= GIC_SPI_BASE) {
839 			itr_type = GIC_SPI;
840 			itr_id = CFG_CORE_ASYNC_NOTIF_GIC_INTID - GIC_SPI_BASE;
841 			itr_trigger = IRQ_TYPE_EDGE_RISING;
842 		} else {
843 			itr_type = GIC_PPI;
844 			itr_id = CFG_CORE_ASYNC_NOTIF_GIC_INTID - GIC_PPI_BASE;
845 			itr_trigger = IRQ_TYPE_EDGE_RISING |
846 				      GIC_CPU_MASK_SIMPLE(CFG_TEE_CORE_NB_CORE);
847 		}
848 
849 		val[0] = TEE_U32_TO_BIG_ENDIAN(itr_type);
850 		val[1] = TEE_U32_TO_BIG_ENDIAN(itr_id);
851 		val[2] = TEE_U32_TO_BIG_ENDIAN(itr_trigger);
852 
853 		ret = fdt_setprop(dt->blob, offs, "interrupts", val,
854 				  sizeof(val));
855 		if (ret < 0)
856 			return -1;
857 	}
858 	return 0;
859 }
860 
861 #ifdef CFG_PSCI_ARM32
862 static int append_psci_compatible(void *fdt, int offs, const char *str)
863 {
864 	return fdt_appendprop(fdt, offs, "compatible", str, strlen(str) + 1);
865 }
866 
867 static int dt_add_psci_node(struct dt_descriptor *dt)
868 {
869 	int offs;
870 
871 	if (fdt_path_offset(dt->blob, "/psci") >= 0) {
872 		DMSG("PSCI Device Tree node already exists!");
873 		return 0;
874 	}
875 
876 	offs = add_dt_path_subnode(dt, "/", "psci");
877 	if (offs < 0)
878 		return -1;
879 	if (append_psci_compatible(dt->blob, offs, "arm,psci-1.0"))
880 		return -1;
881 	if (append_psci_compatible(dt->blob, offs, "arm,psci-0.2"))
882 		return -1;
883 	if (append_psci_compatible(dt->blob, offs, "arm,psci"))
884 		return -1;
885 	if (fdt_setprop_string(dt->blob, offs, "method", "smc"))
886 		return -1;
887 	if (fdt_setprop_u32(dt->blob, offs, "cpu_suspend", PSCI_CPU_SUSPEND))
888 		return -1;
889 	if (fdt_setprop_u32(dt->blob, offs, "cpu_off", PSCI_CPU_OFF))
890 		return -1;
891 	if (fdt_setprop_u32(dt->blob, offs, "cpu_on", PSCI_CPU_ON))
892 		return -1;
893 	if (fdt_setprop_u32(dt->blob, offs, "sys_poweroff", PSCI_SYSTEM_OFF))
894 		return -1;
895 	if (fdt_setprop_u32(dt->blob, offs, "sys_reset", PSCI_SYSTEM_RESET))
896 		return -1;
897 	return 0;
898 }
899 
900 static int check_node_compat_prefix(struct dt_descriptor *dt, int offs,
901 				    const char *prefix)
902 {
903 	const size_t prefix_len = strlen(prefix);
904 	size_t l;
905 	int plen;
906 	const char *prop;
907 
908 	prop = fdt_getprop(dt->blob, offs, "compatible", &plen);
909 	if (!prop)
910 		return -1;
911 
912 	while (plen > 0) {
913 		if (memcmp(prop, prefix, prefix_len) == 0)
914 			return 0; /* match */
915 
916 		l = strlen(prop) + 1;
917 		prop += l;
918 		plen -= l;
919 	}
920 
921 	return -1;
922 }
923 
924 static int dt_add_psci_cpu_enable_methods(struct dt_descriptor *dt)
925 {
926 	int offs = 0;
927 
928 	while (1) {
929 		offs = fdt_next_node(dt->blob, offs, NULL);
930 		if (offs < 0)
931 			break;
932 		if (fdt_getprop(dt->blob, offs, "enable-method", NULL))
933 			continue; /* already set */
934 		if (check_node_compat_prefix(dt, offs, "arm,cortex-a"))
935 			continue; /* no compatible */
936 		if (fdt_setprop_string(dt->blob, offs, "enable-method", "psci"))
937 			return -1;
938 		/* Need to restart scanning as offsets may have changed */
939 		offs = 0;
940 	}
941 	return 0;
942 }
943 
944 static int config_psci(struct dt_descriptor *dt)
945 {
946 	if (dt_add_psci_node(dt))
947 		return -1;
948 	return dt_add_psci_cpu_enable_methods(dt);
949 }
950 #else
951 static int config_psci(struct dt_descriptor *dt __unused)
952 {
953 	return 0;
954 }
955 #endif /*CFG_PSCI_ARM32*/
956 
957 static void set_dt_val(void *data, uint32_t cell_size, uint64_t val)
958 {
959 	if (cell_size == 1) {
960 		fdt32_t v = cpu_to_fdt32((uint32_t)val);
961 
962 		memcpy(data, &v, sizeof(v));
963 	} else {
964 		fdt64_t v = cpu_to_fdt64(val);
965 
966 		memcpy(data, &v, sizeof(v));
967 	}
968 }
969 
970 static int add_res_mem_dt_node(struct dt_descriptor *dt, const char *name,
971 			       paddr_t pa, size_t size)
972 {
973 	int offs = 0;
974 	int ret = 0;
975 	int addr_size = -1;
976 	int len_size = -1;
977 	bool found = true;
978 	char subnode_name[80] = { 0 };
979 
980 	offs = fdt_path_offset(dt->blob, "/reserved-memory");
981 
982 	if (offs < 0) {
983 		found = false;
984 		offs = 0;
985 	}
986 
987 	if (IS_ENABLED2(_CFG_USE_DTB_OVERLAY)) {
988 		len_size = sizeof(paddr_t) / sizeof(uint32_t);
989 		addr_size = sizeof(paddr_t) / sizeof(uint32_t);
990 	} else {
991 		len_size = fdt_size_cells(dt->blob, offs);
992 		if (len_size < 0)
993 			return -1;
994 		addr_size = fdt_address_cells(dt->blob, offs);
995 		if (addr_size < 0)
996 			return -1;
997 	}
998 
999 	if (!found) {
1000 		offs = add_dt_path_subnode(dt, "/", "reserved-memory");
1001 		if (offs < 0)
1002 			return -1;
1003 		ret = fdt_setprop_cell(dt->blob, offs, "#address-cells",
1004 				       addr_size);
1005 		if (ret < 0)
1006 			return -1;
1007 		ret = fdt_setprop_cell(dt->blob, offs, "#size-cells", len_size);
1008 		if (ret < 0)
1009 			return -1;
1010 		ret = fdt_setprop(dt->blob, offs, "ranges", NULL, 0);
1011 		if (ret < 0)
1012 			return -1;
1013 	}
1014 
1015 	ret = snprintf(subnode_name, sizeof(subnode_name),
1016 		       "%s@%" PRIxPA, name, pa);
1017 	if (ret < 0 || ret >= (int)sizeof(subnode_name))
1018 		DMSG("truncated node \"%s@%" PRIxPA"\"", name, pa);
1019 	offs = fdt_add_subnode(dt->blob, offs, subnode_name);
1020 	if (offs >= 0) {
1021 		uint32_t data[FDT_MAX_NCELLS * 2];
1022 
1023 		set_dt_val(data, addr_size, pa);
1024 		set_dt_val(data + addr_size, len_size, size);
1025 		ret = fdt_setprop(dt->blob, offs, "reg", data,
1026 				  sizeof(uint32_t) * (addr_size + len_size));
1027 		if (ret < 0)
1028 			return -1;
1029 		ret = fdt_setprop(dt->blob, offs, "no-map", NULL, 0);
1030 		if (ret < 0)
1031 			return -1;
1032 	} else {
1033 		return -1;
1034 	}
1035 	return 0;
1036 }
1037 
1038 #ifdef CFG_CORE_DYN_SHM
1039 static uint64_t get_dt_val_and_advance(const void *data, size_t *offs,
1040 				       uint32_t cell_size)
1041 {
1042 	uint64_t rv = 0;
1043 
1044 	if (cell_size == 1) {
1045 		uint32_t v;
1046 
1047 		memcpy(&v, (const uint8_t *)data + *offs, sizeof(v));
1048 		*offs += sizeof(v);
1049 		rv = fdt32_to_cpu(v);
1050 	} else {
1051 		uint64_t v;
1052 
1053 		memcpy(&v, (const uint8_t *)data + *offs, sizeof(v));
1054 		*offs += sizeof(v);
1055 		rv = fdt64_to_cpu(v);
1056 	}
1057 
1058 	return rv;
1059 }
1060 
1061 /*
1062  * Find all non-secure memory from DT. Memory marked inaccessible by Secure
1063  * World is ignored since it could not be mapped to be used as dynamic shared
1064  * memory.
1065  */
1066 static int get_nsec_memory_helper(void *fdt, struct core_mmu_phys_mem *mem)
1067 {
1068 	const uint8_t *prop = NULL;
1069 	uint64_t a = 0;
1070 	uint64_t l = 0;
1071 	size_t prop_offs = 0;
1072 	size_t prop_len = 0;
1073 	int elems_total = 0;
1074 	int addr_size = 0;
1075 	int len_size = 0;
1076 	int offs = 0;
1077 	size_t n = 0;
1078 	int len = 0;
1079 
1080 	addr_size = fdt_address_cells(fdt, 0);
1081 	if (addr_size < 0)
1082 		return 0;
1083 
1084 	len_size = fdt_size_cells(fdt, 0);
1085 	if (len_size < 0)
1086 		return 0;
1087 
1088 	while (true) {
1089 		offs = fdt_node_offset_by_prop_value(fdt, offs, "device_type",
1090 						     "memory",
1091 						     sizeof("memory"));
1092 		if (offs < 0)
1093 			break;
1094 
1095 		if (fdt_get_status(fdt, offs) != (DT_STATUS_OK_NSEC |
1096 						   DT_STATUS_OK_SEC))
1097 			continue;
1098 
1099 		prop = fdt_getprop(fdt, offs, "reg", &len);
1100 		if (!prop)
1101 			continue;
1102 
1103 		prop_len = len;
1104 		for (n = 0, prop_offs = 0; prop_offs < prop_len; n++) {
1105 			a = get_dt_val_and_advance(prop, &prop_offs, addr_size);
1106 			if (prop_offs >= prop_len) {
1107 				n--;
1108 				break;
1109 			}
1110 
1111 			l = get_dt_val_and_advance(prop, &prop_offs, len_size);
1112 			if (mem) {
1113 				mem->type = MEM_AREA_DDR_OVERALL;
1114 				mem->addr = a;
1115 				mem->size = l;
1116 				mem++;
1117 			}
1118 		}
1119 
1120 		elems_total += n;
1121 	}
1122 
1123 	return elems_total;
1124 }
1125 
1126 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt, size_t *nelems)
1127 {
1128 	struct core_mmu_phys_mem *mem = NULL;
1129 	int elems_total = 0;
1130 
1131 	elems_total = get_nsec_memory_helper(fdt, NULL);
1132 	if (elems_total <= 0)
1133 		return NULL;
1134 
1135 	mem = nex_calloc(elems_total, sizeof(*mem));
1136 	if (!mem)
1137 		panic();
1138 
1139 	elems_total = get_nsec_memory_helper(fdt, mem);
1140 	assert(elems_total > 0);
1141 
1142 	*nelems = elems_total;
1143 
1144 	return mem;
1145 }
1146 #endif /*CFG_CORE_DYN_SHM*/
1147 
1148 #ifdef CFG_CORE_RESERVED_SHM
1149 static int mark_static_shm_as_reserved(struct dt_descriptor *dt)
1150 {
1151 	vaddr_t shm_start;
1152 	vaddr_t shm_end;
1153 
1154 	core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &shm_start, &shm_end);
1155 	if (shm_start != shm_end)
1156 		return add_res_mem_dt_node(dt, "optee_shm",
1157 					   virt_to_phys((void *)shm_start),
1158 					   shm_end - shm_start);
1159 
1160 	DMSG("No SHM configured");
1161 	return -1;
1162 }
1163 #endif /*CFG_CORE_RESERVED_SHM*/
1164 
1165 static void init_external_dt(unsigned long phys_dt)
1166 {
1167 	struct dt_descriptor *dt = &external_dt;
1168 	void *fdt;
1169 	int ret;
1170 
1171 	if (!IS_ENABLED(CFG_EXTERNAL_DT))
1172 		return;
1173 
1174 	if (!phys_dt) {
1175 		/*
1176 		 * No need to panic as we're not using the DT in OP-TEE
1177 		 * yet, we're only adding some nodes for normal world use.
1178 		 * This makes the switch to using DT easier as we can boot
1179 		 * a newer OP-TEE with older boot loaders. Once we start to
1180 		 * initialize devices based on DT we'll likely panic
1181 		 * instead of returning here.
1182 		 */
1183 		IMSG("No non-secure external DT");
1184 		return;
1185 	}
1186 
1187 	fdt = core_mmu_add_mapping(MEM_AREA_EXT_DT, phys_dt, CFG_DTB_MAX_SIZE);
1188 	if (!fdt)
1189 		panic("Failed to map external DTB");
1190 
1191 	dt->blob = fdt;
1192 
1193 	ret = init_dt_overlay(dt, CFG_DTB_MAX_SIZE);
1194 	if (ret < 0) {
1195 		EMSG("Device Tree Overlay init fail @ %#lx: error %d", phys_dt,
1196 		     ret);
1197 		panic();
1198 	}
1199 
1200 	ret = fdt_open_into(fdt, fdt, CFG_DTB_MAX_SIZE);
1201 	if (ret < 0) {
1202 		EMSG("Invalid Device Tree at %#lx: error %d", phys_dt, ret);
1203 		panic();
1204 	}
1205 
1206 	IMSG("Non-secure external DT found");
1207 }
1208 
1209 static int mark_tzdram_as_reserved(struct dt_descriptor *dt)
1210 {
1211 	return add_res_mem_dt_node(dt, "optee_core", CFG_TZDRAM_START,
1212 				   CFG_TZDRAM_SIZE);
1213 }
1214 
1215 static void update_external_dt(void)
1216 {
1217 	struct dt_descriptor *dt = &external_dt;
1218 
1219 	if (!IS_ENABLED(CFG_EXTERNAL_DT))
1220 		return;
1221 
1222 	if (!dt->blob)
1223 		return;
1224 
1225 	if (!IS_ENABLED(CFG_CORE_FFA) && add_optee_dt_node(dt))
1226 		panic("Failed to add OP-TEE Device Tree node");
1227 
1228 	if (config_psci(dt))
1229 		panic("Failed to config PSCI");
1230 
1231 #ifdef CFG_CORE_RESERVED_SHM
1232 	if (mark_static_shm_as_reserved(dt))
1233 		panic("Failed to config non-secure memory");
1234 #endif
1235 
1236 	if (mark_tzdram_as_reserved(dt))
1237 		panic("Failed to config secure memory");
1238 }
1239 #else /*CFG_DT*/
1240 void *get_external_dt(void)
1241 {
1242 	return NULL;
1243 }
1244 
1245 static void init_external_dt(unsigned long phys_dt __unused)
1246 {
1247 }
1248 
1249 static void update_external_dt(void)
1250 {
1251 }
1252 
1253 #ifdef CFG_CORE_DYN_SHM
1254 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt __unused,
1255 						 size_t *nelems __unused)
1256 {
1257 	return NULL;
1258 }
1259 #endif /*CFG_CORE_DYN_SHM*/
1260 #endif /*!CFG_DT*/
1261 
1262 #if defined(CFG_CORE_SEL1_SPMC) && defined(CFG_DT)
1263 void *get_tos_fw_config_dt(void)
1264 {
1265 	if (!IS_ENABLED(CFG_MAP_EXT_DT_SECURE))
1266 		return NULL;
1267 
1268 	assert(cpu_mmu_enabled());
1269 
1270 	return tos_fw_config_dt.blob;
1271 }
1272 
1273 static void init_tos_fw_config_dt(unsigned long pa)
1274 {
1275 	struct dt_descriptor *dt = &tos_fw_config_dt;
1276 	void *fdt = NULL;
1277 	int ret = 0;
1278 
1279 	if (!IS_ENABLED(CFG_MAP_EXT_DT_SECURE))
1280 		return;
1281 
1282 	if (!pa)
1283 		panic("No TOS_FW_CONFIG DT found");
1284 
1285 	fdt = core_mmu_add_mapping(MEM_AREA_EXT_DT, pa, CFG_DTB_MAX_SIZE);
1286 	if (!fdt)
1287 		panic("Failed to map TOS_FW_CONFIG DT");
1288 
1289 	dt->blob = fdt;
1290 
1291 	ret = fdt_open_into(fdt, fdt, CFG_DTB_MAX_SIZE);
1292 	if (ret < 0) {
1293 		EMSG("Invalid Device Tree at %#lx: error %d", pa, ret);
1294 		panic();
1295 	}
1296 
1297 	IMSG("TOS_FW_CONFIG DT found");
1298 }
1299 #else
1300 void *get_tos_fw_config_dt(void)
1301 {
1302 	return NULL;
1303 }
1304 
1305 static void init_tos_fw_config_dt(unsigned long pa __unused)
1306 {
1307 }
1308 #endif /*CFG_CORE_SEL1_SPMC && CFG_DT*/
1309 
1310 #ifdef CFG_CORE_DYN_SHM
1311 static void discover_nsec_memory(void)
1312 {
1313 	struct core_mmu_phys_mem *mem;
1314 	const struct core_mmu_phys_mem *mem_begin = NULL;
1315 	const struct core_mmu_phys_mem *mem_end = NULL;
1316 	size_t nelems;
1317 	void *fdt = get_external_dt();
1318 
1319 	if (fdt) {
1320 		mem = get_nsec_memory(fdt, &nelems);
1321 		if (mem) {
1322 			core_mmu_set_discovered_nsec_ddr(mem, nelems);
1323 			return;
1324 		}
1325 
1326 		DMSG("No non-secure memory found in FDT");
1327 	}
1328 
1329 	mem_begin = phys_ddr_overall_begin;
1330 	mem_end = phys_ddr_overall_end;
1331 	nelems = mem_end - mem_begin;
1332 	if (nelems) {
1333 		/*
1334 		 * Platform cannot use both register_ddr() and the now
1335 		 * deprecated register_dynamic_shm().
1336 		 */
1337 		assert(phys_ddr_overall_compat_begin ==
1338 		       phys_ddr_overall_compat_end);
1339 	} else {
1340 		mem_begin = phys_ddr_overall_compat_begin;
1341 		mem_end = phys_ddr_overall_compat_end;
1342 		nelems = mem_end - mem_begin;
1343 		if (!nelems)
1344 			return;
1345 		DMSG("Warning register_dynamic_shm() is deprecated, please use register_ddr() instead");
1346 	}
1347 
1348 	mem = nex_calloc(nelems, sizeof(*mem));
1349 	if (!mem)
1350 		panic();
1351 
1352 	memcpy(mem, phys_ddr_overall_begin, sizeof(*mem) * nelems);
1353 	core_mmu_set_discovered_nsec_ddr(mem, nelems);
1354 }
1355 #else /*CFG_CORE_DYN_SHM*/
1356 static void discover_nsec_memory(void)
1357 {
1358 }
1359 #endif /*!CFG_CORE_DYN_SHM*/
1360 
1361 #ifdef CFG_NS_VIRTUALIZATION
1362 static TEE_Result virt_init_heap(void)
1363 {
1364 	/* We need to initialize pool for every virtual guest partition */
1365 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
1366 
1367 	return TEE_SUCCESS;
1368 }
1369 preinit_early(virt_init_heap);
1370 #endif
1371 
1372 void init_tee_runtime(void)
1373 {
1374 #ifndef CFG_WITH_PAGER
1375 	/* Pager initializes TA RAM early */
1376 	core_mmu_init_ta_ram();
1377 #endif
1378 	/*
1379 	 * With virtualization we call this function when creating the
1380 	 * OP-TEE partition instead.
1381 	 */
1382 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION))
1383 		call_preinitcalls();
1384 	call_initcalls();
1385 
1386 	/*
1387 	 * These two functions uses crypto_rng_read() to initialize the
1388 	 * pauth keys. Once call_initcalls() returns we're guaranteed that
1389 	 * crypto_rng_read() is ready to be used.
1390 	 */
1391 	thread_init_core_local_pauth_keys();
1392 	thread_init_thread_pauth_keys();
1393 
1394 	/*
1395 	 * Reinitialize canaries around the stacks with crypto_rng_read().
1396 	 *
1397 	 * TODO: Updating canaries when CFG_NS_VIRTUALIZATION is enabled will
1398 	 * require synchronization between thread_check_canaries() and
1399 	 * thread_update_canaries().
1400 	 */
1401 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION))
1402 		thread_update_canaries();
1403 }
1404 
1405 static void init_primary(unsigned long pageable_part, unsigned long nsec_entry)
1406 {
1407 	thread_init_core_local_stacks();
1408 	/*
1409 	 * Mask asynchronous exceptions before switch to the thread vector
1410 	 * as the thread handler requires those to be masked while
1411 	 * executing with the temporary stack. The thread subsystem also
1412 	 * asserts that the foreign interrupts are blocked when using most of
1413 	 * its functions.
1414 	 */
1415 	thread_set_exceptions(THREAD_EXCP_ALL);
1416 	primary_save_cntfrq();
1417 	init_vfp_sec();
1418 	/*
1419 	 * Pager: init_runtime() calls thread_kernel_enable_vfp() so we must
1420 	 * set a current thread right now to avoid a chicken-and-egg problem
1421 	 * (thread_init_boot_thread() sets the current thread but needs
1422 	 * things set by init_runtime()).
1423 	 */
1424 	thread_get_core_local()->curr_thread = 0;
1425 	init_runtime(pageable_part);
1426 
1427 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1428 		/*
1429 		 * Virtualization: We can't initialize threads right now because
1430 		 * threads belong to "tee" part and will be initialized
1431 		 * separately per each new virtual guest. So, we'll clear
1432 		 * "curr_thread" and call it done.
1433 		 */
1434 		thread_get_core_local()->curr_thread = -1;
1435 	} else {
1436 		thread_init_boot_thread();
1437 	}
1438 	thread_init_primary();
1439 	thread_init_per_cpu();
1440 	init_sec_mon(nsec_entry);
1441 }
1442 
1443 static bool cpu_nmfi_enabled(void)
1444 {
1445 #if defined(ARM32)
1446 	return read_sctlr() & SCTLR_NMFI;
1447 #else
1448 	/* Note: ARM64 does not feature non-maskable FIQ support. */
1449 	return false;
1450 #endif
1451 }
1452 
1453 /*
1454  * Note: this function is weak just to make it possible to exclude it from
1455  * the unpaged area.
1456  */
1457 void __weak boot_init_primary_late(unsigned long fdt,
1458 				   unsigned long tos_fw_config)
1459 {
1460 	init_external_dt(fdt);
1461 	init_tos_fw_config_dt(tos_fw_config);
1462 #ifdef CFG_CORE_SEL1_SPMC
1463 	tpm_map_log_area(get_tos_fw_config_dt());
1464 #else
1465 	tpm_map_log_area(get_external_dt());
1466 #endif
1467 	discover_nsec_memory();
1468 	update_external_dt();
1469 	configure_console_from_dt();
1470 
1471 	IMSG("OP-TEE version: %s", core_v_str);
1472 	if (IS_ENABLED(CFG_WARN_INSECURE)) {
1473 		IMSG("WARNING: This OP-TEE configuration might be insecure!");
1474 		IMSG("WARNING: Please check https://optee.readthedocs.io/en/latest/architecture/porting_guidelines.html");
1475 	}
1476 	IMSG("Primary CPU initializing");
1477 #ifdef CFG_CORE_ASLR
1478 	DMSG("Executing at offset %#lx with virtual load address %#"PRIxVA,
1479 	     (unsigned long)boot_mmu_config.map_offset, VCORE_START_VA);
1480 #endif
1481 	if (IS_ENABLED(CFG_MEMTAG))
1482 		DMSG("Memory tagging %s",
1483 		     memtag_is_enabled() ?  "enabled" : "disabled");
1484 
1485 	/* Check if platform needs NMFI workaround */
1486 	if (cpu_nmfi_enabled())	{
1487 		if (!IS_ENABLED(CFG_CORE_WORKAROUND_ARM_NMFI))
1488 			IMSG("WARNING: This ARM core has NMFI enabled, please apply workaround!");
1489 	} else {
1490 		if (IS_ENABLED(CFG_CORE_WORKAROUND_ARM_NMFI))
1491 			IMSG("WARNING: This ARM core does not have NMFI enabled, no need for workaround");
1492 	}
1493 
1494 	main_init_gic();
1495 	init_vfp_nsec();
1496 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1497 		IMSG("Initializing virtualization support");
1498 		core_mmu_init_virtualization();
1499 	} else {
1500 		init_tee_runtime();
1501 	}
1502 	call_finalcalls();
1503 	IMSG("Primary CPU switching to normal world boot");
1504 }
1505 
1506 static void init_secondary_helper(unsigned long nsec_entry)
1507 {
1508 	IMSG("Secondary CPU %zu initializing", get_core_pos());
1509 
1510 	/*
1511 	 * Mask asynchronous exceptions before switch to the thread vector
1512 	 * as the thread handler requires those to be masked while
1513 	 * executing with the temporary stack. The thread subsystem also
1514 	 * asserts that the foreign interrupts are blocked when using most of
1515 	 * its functions.
1516 	 */
1517 	thread_set_exceptions(THREAD_EXCP_ALL);
1518 
1519 	secondary_init_cntfrq();
1520 	thread_init_per_cpu();
1521 	init_sec_mon(nsec_entry);
1522 	main_secondary_init_gic();
1523 	init_vfp_sec();
1524 	init_vfp_nsec();
1525 
1526 	IMSG("Secondary CPU %zu switching to normal world boot", get_core_pos());
1527 }
1528 
1529 /*
1530  * Note: this function is weak just to make it possible to exclude it from
1531  * the unpaged area so that it lies in the init area.
1532  */
1533 void __weak boot_init_primary_early(unsigned long pageable_part,
1534 				    unsigned long nsec_entry __maybe_unused)
1535 {
1536 	unsigned long e = PADDR_INVALID;
1537 
1538 #if !defined(CFG_WITH_ARM_TRUSTED_FW)
1539 	e = nsec_entry;
1540 #endif
1541 
1542 	init_primary(pageable_part, e);
1543 }
1544 
1545 #if defined(CFG_WITH_ARM_TRUSTED_FW)
1546 unsigned long boot_cpu_on_handler(unsigned long a0 __maybe_unused,
1547 				  unsigned long a1 __unused)
1548 {
1549 	init_secondary_helper(PADDR_INVALID);
1550 	return 0;
1551 }
1552 #else
1553 void boot_init_secondary(unsigned long nsec_entry)
1554 {
1555 	init_secondary_helper(nsec_entry);
1556 }
1557 #endif
1558 
1559 #if defined(CFG_BOOT_SECONDARY_REQUEST)
1560 void boot_set_core_ns_entry(size_t core_idx, uintptr_t entry,
1561 			    uintptr_t context_id)
1562 {
1563 	ns_entry_contexts[core_idx].entry_point = entry;
1564 	ns_entry_contexts[core_idx].context_id = context_id;
1565 	dsb_ishst();
1566 }
1567 
1568 int boot_core_release(size_t core_idx, paddr_t entry)
1569 {
1570 	if (!core_idx || core_idx >= CFG_TEE_CORE_NB_CORE)
1571 		return -1;
1572 
1573 	ns_entry_contexts[core_idx].entry_point = entry;
1574 	dmb();
1575 	spin_table[core_idx] = 1;
1576 	dsb();
1577 	sev();
1578 
1579 	return 0;
1580 }
1581 
1582 /*
1583  * spin until secondary boot request, then returns with
1584  * the secondary core entry address.
1585  */
1586 struct ns_entry_context *boot_core_hpen(void)
1587 {
1588 #ifdef CFG_PSCI_ARM32
1589 	return &ns_entry_contexts[get_core_pos()];
1590 #else
1591 	do {
1592 		wfe();
1593 	} while (!spin_table[get_core_pos()]);
1594 	dmb();
1595 	return &ns_entry_contexts[get_core_pos()];
1596 #endif
1597 }
1598 #endif
1599 
1600 #if defined(CFG_CORE_ASLR)
1601 #if defined(CFG_DT)
1602 unsigned long __weak get_aslr_seed(void *fdt)
1603 {
1604 	int rc = 0;
1605 	const uint64_t *seed = NULL;
1606 	int offs = 0;
1607 	int len = 0;
1608 
1609 	if (!fdt) {
1610 		DMSG("No fdt");
1611 		goto err;
1612 	}
1613 
1614 	rc = fdt_check_header(fdt);
1615 	if (rc) {
1616 		DMSG("Bad fdt: %d", rc);
1617 		goto err;
1618 	}
1619 
1620 	offs =  fdt_path_offset(fdt, "/secure-chosen");
1621 	if (offs < 0) {
1622 		DMSG("Cannot find /secure-chosen");
1623 		goto err;
1624 	}
1625 	seed = fdt_getprop(fdt, offs, "kaslr-seed", &len);
1626 	if (!seed || len != sizeof(*seed)) {
1627 		DMSG("Cannot find valid kaslr-seed");
1628 		goto err;
1629 	}
1630 
1631 	return fdt64_to_cpu(*seed);
1632 
1633 err:
1634 	/* Try platform implementation */
1635 	return plat_get_aslr_seed();
1636 }
1637 #else /*!CFG_DT*/
1638 unsigned long __weak get_aslr_seed(void *fdt __unused)
1639 {
1640 	/* Try platform implementation */
1641 	return plat_get_aslr_seed();
1642 }
1643 #endif /*!CFG_DT*/
1644 #endif /*CFG_CORE_ASLR*/
1645 
1646 #if defined(CFG_CORE_SEL2_SPMC) && defined(CFG_CORE_PHYS_RELOCATABLE)
1647 static void *get_fdt_from_boot_info(struct ffa_boot_info_header_1_1 *hdr)
1648 {
1649 	struct ffa_boot_info_1_1 *desc = NULL;
1650 	uint8_t content_fmt = 0;
1651 	uint8_t name_fmt = 0;
1652 	void *fdt = NULL;
1653 	int ret = 0;
1654 
1655 	if (hdr->signature != FFA_BOOT_INFO_SIGNATURE) {
1656 		EMSG("Bad boot info signature %#"PRIx32, hdr->signature);
1657 		panic();
1658 	}
1659 	if (hdr->version != FFA_BOOT_INFO_VERSION) {
1660 		EMSG("Bad boot info version %#"PRIx32, hdr->version);
1661 		panic();
1662 	}
1663 	if (hdr->desc_count != 1) {
1664 		EMSG("Bad boot info descriptor count %#"PRIx32,
1665 		     hdr->desc_count);
1666 		panic();
1667 	}
1668 	desc = (void *)((vaddr_t)hdr + hdr->desc_offset);
1669 	name_fmt = desc->flags & FFA_BOOT_INFO_FLAG_NAME_FORMAT_MASK;
1670 	if (name_fmt == FFA_BOOT_INFO_FLAG_NAME_FORMAT_STRING)
1671 		DMSG("Boot info descriptor name \"%16s\"", desc->name);
1672 	else if (name_fmt == FFA_BOOT_INFO_FLAG_NAME_FORMAT_UUID)
1673 		DMSG("Boot info descriptor UUID %pUl", (void *)desc->name);
1674 	else
1675 		DMSG("Boot info descriptor: unknown name format %"PRIu8,
1676 		     name_fmt);
1677 
1678 	content_fmt = (desc->flags & FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_MASK) >>
1679 		      FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_SHIFT;
1680 	if (content_fmt != FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_ADDR) {
1681 		EMSG("Bad boot info content format %"PRIu8", expected %u (address)",
1682 		     content_fmt, FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_ADDR);
1683 		panic();
1684 	}
1685 
1686 	fdt = (void *)(vaddr_t)desc->contents;
1687 	ret = fdt_check_full(fdt, desc->size);
1688 	if (ret < 0) {
1689 		EMSG("Invalid Device Tree at %p: error %d", fdt, ret);
1690 		panic();
1691 	}
1692 	return fdt;
1693 }
1694 
1695 static void get_sec_mem_from_manifest(void *fdt, paddr_t *base, size_t *size)
1696 {
1697 	int ret = 0;
1698 	uint64_t num = 0;
1699 
1700 	ret = fdt_node_check_compatible(fdt, 0, "arm,ffa-manifest-1.0");
1701 	if (ret < 0) {
1702 		EMSG("Invalid FF-A manifest at %p: error %d", fdt, ret);
1703 		panic();
1704 	}
1705 	ret = dt_getprop_as_number(fdt, 0, "load-address", &num);
1706 	if (ret < 0) {
1707 		EMSG("Can't read \"load-address\" from FF-A manifest at %p: error %d",
1708 		     fdt, ret);
1709 		panic();
1710 	}
1711 	*base = num;
1712 	/* "mem-size" is currently an undocumented extension to the spec. */
1713 	ret = dt_getprop_as_number(fdt, 0, "mem-size", &num);
1714 	if (ret < 0) {
1715 		EMSG("Can't read \"mem-size\" from FF-A manifest at %p: error %d",
1716 		     fdt, ret);
1717 		panic();
1718 	}
1719 	*size = num;
1720 }
1721 
1722 void __weak boot_save_boot_info(void *boot_info)
1723 {
1724 	void *fdt = NULL;
1725 	paddr_t base = 0;
1726 	size_t size = 0;
1727 
1728 	fdt = get_fdt_from_boot_info(boot_info);
1729 	get_sec_mem_from_manifest(fdt, &base, &size);
1730 	core_mmu_set_secure_memory(base, size);
1731 }
1732 #endif /*CFG_CORE_SEL2_SPMC && CFG_CORE_PHYS_RELOCATABLE*/
1733