xref: /optee_os/core/arch/arm/kernel/boot.c (revision ba2a6adb764f1310ad3c3091d89de84274f86b02)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2015-2023, Linaro Limited
4  * Copyright (c) 2023, Arm Limited
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <compiler.h>
10 #include <config.h>
11 #include <console.h>
12 #include <crypto/crypto.h>
13 #include <drivers/gic.h>
14 #include <dt-bindings/interrupt-controller/arm-gic.h>
15 #include <ffa.h>
16 #include <initcall.h>
17 #include <inttypes.h>
18 #include <keep.h>
19 #include <kernel/asan.h>
20 #include <kernel/boot.h>
21 #include <kernel/dt.h>
22 #include <kernel/linker.h>
23 #include <kernel/misc.h>
24 #include <kernel/panic.h>
25 #include <kernel/tee_misc.h>
26 #include <kernel/thread.h>
27 #include <kernel/tpm.h>
28 #include <libfdt.h>
29 #include <malloc.h>
30 #include <memtag.h>
31 #include <mm/core_memprot.h>
32 #include <mm/core_mmu.h>
33 #include <mm/fobj.h>
34 #include <mm/tee_mm.h>
35 #include <mm/tee_pager.h>
36 #include <sm/psci.h>
37 #include <stdio.h>
38 #include <trace.h>
39 #include <utee_defines.h>
40 #include <util.h>
41 
42 #include <platform_config.h>
43 
44 #if !defined(CFG_WITH_ARM_TRUSTED_FW)
45 #include <sm/sm.h>
46 #endif
47 
48 #if defined(CFG_WITH_VFP)
49 #include <kernel/vfp.h>
50 #endif
51 
52 /*
53  * In this file we're using unsigned long to represent physical pointers as
54  * they are received in a single register when OP-TEE is initially entered.
55  * This limits 32-bit systems to only use make use of the lower 32 bits
56  * of a physical address for initial parameters.
57  *
58  * 64-bit systems on the other hand can use full 64-bit physical pointers.
59  */
60 #define PADDR_INVALID		ULONG_MAX
61 
62 #if defined(CFG_BOOT_SECONDARY_REQUEST)
63 struct ns_entry_context {
64 	uintptr_t entry_point;
65 	uintptr_t context_id;
66 };
67 struct ns_entry_context ns_entry_contexts[CFG_TEE_CORE_NB_CORE];
68 static uint32_t spin_table[CFG_TEE_CORE_NB_CORE];
69 #endif
70 
71 #ifdef CFG_BOOT_SYNC_CPU
72 /*
73  * Array used when booting, to synchronize cpu.
74  * When 0, the cpu has not started.
75  * When 1, it has started
76  */
77 uint32_t sem_cpu_sync[CFG_TEE_CORE_NB_CORE];
78 DECLARE_KEEP_PAGER(sem_cpu_sync);
79 #endif
80 
81 #ifdef CFG_DT
82 struct dt_descriptor {
83 	void *blob;
84 #ifdef _CFG_USE_DTB_OVERLAY
85 	int frag_id;
86 #endif
87 };
88 
89 static struct dt_descriptor external_dt __nex_bss;
90 #ifdef CFG_CORE_SEL1_SPMC
91 static struct dt_descriptor tos_fw_config_dt __nex_bss;
92 #endif
93 #endif
94 
95 #ifdef CFG_SECONDARY_INIT_CNTFRQ
96 static uint32_t cntfrq;
97 #endif
98 
99 /* May be overridden in plat-$(PLATFORM)/main.c */
100 __weak void plat_primary_init_early(void)
101 {
102 }
103 DECLARE_KEEP_PAGER(plat_primary_init_early);
104 
105 /* May be overridden in plat-$(PLATFORM)/main.c */
106 __weak void main_init_gic(void)
107 {
108 }
109 
110 /* May be overridden in plat-$(PLATFORM)/main.c */
111 __weak void main_secondary_init_gic(void)
112 {
113 }
114 
115 /* May be overridden in plat-$(PLATFORM)/main.c */
116 __weak unsigned long plat_get_aslr_seed(void)
117 {
118 	DMSG("Warning: no ASLR seed");
119 
120 	return 0;
121 }
122 
123 #if defined(_CFG_CORE_STACK_PROTECTOR)
124 /* Generate random stack canary value on boot up */
125 __weak uintptr_t plat_get_random_stack_canary(void)
126 {
127 	uintptr_t canary = 0xbaaaad00;
128 	TEE_Result ret = TEE_ERROR_GENERIC;
129 
130 	/*
131 	 * With virtualization the RNG is not initialized in Nexus core.
132 	 * Need to override with platform specific implementation.
133 	 */
134 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
135 		IMSG("WARNING: Using fixed value for stack canary");
136 		return canary;
137 	}
138 
139 	ret = crypto_rng_read(&canary, sizeof(canary));
140 	if (ret != TEE_SUCCESS)
141 		panic("Failed to generate random stack canary");
142 
143 	/* Leave null byte in canary to prevent string base exploit */
144 	return canary & ~0xffUL;
145 }
146 #endif /*_CFG_CORE_STACK_PROTECTOR*/
147 
148 /*
149  * This function is called as a guard after each smc call which is not
150  * supposed to return.
151  */
152 void __panic_at_smc_return(void)
153 {
154 	panic();
155 }
156 
157 #if defined(CFG_WITH_ARM_TRUSTED_FW)
158 void init_sec_mon(unsigned long nsec_entry __maybe_unused)
159 {
160 	assert(nsec_entry == PADDR_INVALID);
161 	/* Do nothing as we don't have a secure monitor */
162 }
163 #else
164 /* May be overridden in plat-$(PLATFORM)/main.c */
165 __weak void init_sec_mon(unsigned long nsec_entry)
166 {
167 	struct sm_nsec_ctx *nsec_ctx;
168 
169 	assert(nsec_entry != PADDR_INVALID);
170 
171 	/* Initialize secure monitor */
172 	nsec_ctx = sm_get_nsec_ctx();
173 	nsec_ctx->mon_lr = nsec_entry;
174 	nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I;
175 	if (nsec_entry & 1)
176 		nsec_ctx->mon_spsr |= CPSR_T;
177 }
178 #endif
179 
180 #if defined(CFG_WITH_ARM_TRUSTED_FW)
181 static void init_vfp_nsec(void)
182 {
183 }
184 #else
185 static void init_vfp_nsec(void)
186 {
187 	/* Normal world can use CP10 and CP11 (SIMD/VFP) */
188 	write_nsacr(read_nsacr() | NSACR_CP10 | NSACR_CP11);
189 }
190 #endif
191 
192 #if defined(CFG_WITH_VFP)
193 
194 #ifdef ARM32
195 static void init_vfp_sec(void)
196 {
197 	uint32_t cpacr = read_cpacr();
198 
199 	/*
200 	 * Enable Advanced SIMD functionality.
201 	 * Enable use of D16-D31 of the Floating-point Extension register
202 	 * file.
203 	 */
204 	cpacr &= ~(CPACR_ASEDIS | CPACR_D32DIS);
205 	/*
206 	 * Enable usage of CP10 and CP11 (SIMD/VFP) (both kernel and user
207 	 * mode.
208 	 */
209 	cpacr |= CPACR_CP(10, CPACR_CP_ACCESS_FULL);
210 	cpacr |= CPACR_CP(11, CPACR_CP_ACCESS_FULL);
211 	write_cpacr(cpacr);
212 }
213 #endif /* ARM32 */
214 
215 #ifdef ARM64
216 static void init_vfp_sec(void)
217 {
218 	/* Not using VFP until thread_kernel_enable_vfp() */
219 	vfp_disable();
220 }
221 #endif /* ARM64 */
222 
223 #else /* CFG_WITH_VFP */
224 
225 static void init_vfp_sec(void)
226 {
227 	/* Not using VFP */
228 }
229 #endif
230 
231 #ifdef CFG_SECONDARY_INIT_CNTFRQ
232 static void primary_save_cntfrq(void)
233 {
234 	assert(cntfrq == 0);
235 
236 	/*
237 	 * CNTFRQ should be initialized on the primary CPU by a
238 	 * previous boot stage
239 	 */
240 	cntfrq = read_cntfrq();
241 }
242 
243 static void secondary_init_cntfrq(void)
244 {
245 	assert(cntfrq != 0);
246 	write_cntfrq(cntfrq);
247 }
248 #else /* CFG_SECONDARY_INIT_CNTFRQ */
249 static void primary_save_cntfrq(void)
250 {
251 }
252 
253 static void secondary_init_cntfrq(void)
254 {
255 }
256 #endif
257 
258 #ifdef CFG_CORE_SANITIZE_KADDRESS
259 static void init_run_constructors(void)
260 {
261 	const vaddr_t *ctor;
262 
263 	for (ctor = &__ctor_list; ctor < &__ctor_end; ctor++)
264 		((void (*)(void))(*ctor))();
265 }
266 
267 static void init_asan(void)
268 {
269 
270 	/*
271 	 * CFG_ASAN_SHADOW_OFFSET is also supplied as
272 	 * -fasan-shadow-offset=$(CFG_ASAN_SHADOW_OFFSET) to the compiler.
273 	 * Since all the needed values to calculate the value of
274 	 * CFG_ASAN_SHADOW_OFFSET isn't available in to make we need to
275 	 * calculate it in advance and hard code it into the platform
276 	 * conf.mk. Here where we have all the needed values we double
277 	 * check that the compiler is supplied the correct value.
278 	 */
279 
280 #define __ASAN_SHADOW_START \
281 	ROUNDUP(TEE_RAM_START + (TEE_RAM_VA_SIZE * 8) / 9 - 8, 8)
282 	assert(__ASAN_SHADOW_START == (vaddr_t)&__asan_shadow_start);
283 #define __CFG_ASAN_SHADOW_OFFSET \
284 	(__ASAN_SHADOW_START - (TEE_RAM_START / 8))
285 	COMPILE_TIME_ASSERT(CFG_ASAN_SHADOW_OFFSET == __CFG_ASAN_SHADOW_OFFSET);
286 #undef __ASAN_SHADOW_START
287 #undef __CFG_ASAN_SHADOW_OFFSET
288 
289 	/*
290 	 * Assign area covered by the shadow area, everything from start up
291 	 * to the beginning of the shadow area.
292 	 */
293 	asan_set_shadowed((void *)TEE_LOAD_ADDR, &__asan_shadow_start);
294 
295 	/*
296 	 * Add access to areas that aren't opened automatically by a
297 	 * constructor.
298 	 */
299 	asan_tag_access(&__ctor_list, &__ctor_end);
300 	asan_tag_access(__rodata_start, __rodata_end);
301 #ifdef CFG_WITH_PAGER
302 	asan_tag_access(__pageable_start, __pageable_end);
303 #endif /*CFG_WITH_PAGER*/
304 	asan_tag_access(__nozi_start, __nozi_end);
305 	asan_tag_access(__exidx_start, __exidx_end);
306 	asan_tag_access(__extab_start, __extab_end);
307 
308 	init_run_constructors();
309 
310 	/* Everything is tagged correctly, let's start address sanitizing. */
311 	asan_start();
312 }
313 #else /*CFG_CORE_SANITIZE_KADDRESS*/
314 static void init_asan(void)
315 {
316 }
317 #endif /*CFG_CORE_SANITIZE_KADDRESS*/
318 
319 #if defined(CFG_MEMTAG)
320 /* Called from entry_a64.S only when MEMTAG is configured */
321 void boot_init_memtag(void)
322 {
323 	paddr_t base = 0;
324 	paddr_size_t size = 0;
325 
326 	memtag_init_ops(feat_mte_implemented());
327 	core_mmu_get_secure_memory(&base, &size);
328 	memtag_set_tags((void *)(vaddr_t)base, size, 0);
329 }
330 #endif
331 
332 #ifdef CFG_WITH_PAGER
333 
334 #ifdef CFG_CORE_SANITIZE_KADDRESS
335 static void carve_out_asan_mem(tee_mm_pool_t *pool)
336 {
337 	const size_t s = pool->hi - pool->lo;
338 	tee_mm_entry_t *mm;
339 	paddr_t apa = ASAN_MAP_PA;
340 	size_t asz = ASAN_MAP_SZ;
341 
342 	if (core_is_buffer_outside(apa, asz, pool->lo, s))
343 		return;
344 
345 	/* Reserve the shadow area */
346 	if (!core_is_buffer_inside(apa, asz, pool->lo, s)) {
347 		if (apa < pool->lo) {
348 			/*
349 			 * ASAN buffer is overlapping with the beginning of
350 			 * the pool.
351 			 */
352 			asz -= pool->lo - apa;
353 			apa = pool->lo;
354 		} else {
355 			/*
356 			 * ASAN buffer is overlapping with the end of the
357 			 * pool.
358 			 */
359 			asz = pool->hi - apa;
360 		}
361 	}
362 	mm = tee_mm_alloc2(pool, apa, asz);
363 	assert(mm);
364 }
365 #else
366 static void carve_out_asan_mem(tee_mm_pool_t *pool __unused)
367 {
368 }
369 #endif
370 
371 static void print_pager_pool_size(void)
372 {
373 	struct tee_pager_stats __maybe_unused stats;
374 
375 	tee_pager_get_stats(&stats);
376 	IMSG("Pager pool size: %zukB",
377 		stats.npages_all * SMALL_PAGE_SIZE / 1024);
378 }
379 
380 static void init_vcore(tee_mm_pool_t *mm_vcore)
381 {
382 	const vaddr_t begin = VCORE_START_VA;
383 	size_t size = TEE_RAM_VA_SIZE;
384 
385 #ifdef CFG_CORE_SANITIZE_KADDRESS
386 	/* Carve out asan memory, flat maped after core memory */
387 	if (begin + size > ASAN_SHADOW_PA)
388 		size = ASAN_MAP_PA - begin;
389 #endif
390 
391 	if (!tee_mm_init(mm_vcore, begin, size, SMALL_PAGE_SHIFT,
392 			 TEE_MM_POOL_NO_FLAGS))
393 		panic("tee_mm_vcore init failed");
394 }
395 
396 /*
397  * With CFG_CORE_ASLR=y the init part is relocated very early during boot.
398  * The init part is also paged just as the rest of the normal paged code, with
399  * the difference that it's preloaded during boot. When the backing store
400  * is configured the entire paged binary is copied in place and then also
401  * the init part. Since the init part has been relocated (references to
402  * addresses updated to compensate for the new load address) this has to be
403  * undone for the hashes of those pages to match with the original binary.
404  *
405  * If CFG_CORE_ASLR=n, nothing needs to be done as the code/ro pages are
406  * unchanged.
407  */
408 static void undo_init_relocation(uint8_t *paged_store __maybe_unused)
409 {
410 #ifdef CFG_CORE_ASLR
411 	unsigned long *ptr = NULL;
412 	const uint32_t *reloc = NULL;
413 	const uint32_t *reloc_end = NULL;
414 	unsigned long offs = boot_mmu_config.map_offset;
415 	const struct boot_embdata *embdata = (const void *)__init_end;
416 	vaddr_t addr_end = (vaddr_t)__init_end - offs - TEE_LOAD_ADDR;
417 	vaddr_t addr_start = (vaddr_t)__init_start - offs - TEE_LOAD_ADDR;
418 
419 	reloc = (const void *)((vaddr_t)embdata + embdata->reloc_offset);
420 	reloc_end = reloc + embdata->reloc_len / sizeof(*reloc);
421 
422 	for (; reloc < reloc_end; reloc++) {
423 		if (*reloc < addr_start)
424 			continue;
425 		if (*reloc >= addr_end)
426 			break;
427 		ptr = (void *)(paged_store + *reloc - addr_start);
428 		*ptr -= offs;
429 	}
430 #endif
431 }
432 
433 static struct fobj *ro_paged_alloc(tee_mm_entry_t *mm, void *hashes,
434 				   void *store)
435 {
436 	const unsigned int num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE;
437 #ifdef CFG_CORE_ASLR
438 	unsigned int reloc_offs = (vaddr_t)__pageable_start - VCORE_START_VA;
439 	const struct boot_embdata *embdata = (const void *)__init_end;
440 	const void *reloc = __init_end + embdata->reloc_offset;
441 
442 	return fobj_ro_reloc_paged_alloc(num_pages, hashes, reloc_offs,
443 					 reloc, embdata->reloc_len, store);
444 #else
445 	return fobj_ro_paged_alloc(num_pages, hashes, store);
446 #endif
447 }
448 
449 static void init_runtime(unsigned long pageable_part)
450 {
451 	size_t n;
452 	size_t init_size = (size_t)(__init_end - __init_start);
453 	size_t pageable_start = (size_t)__pageable_start;
454 	size_t pageable_end = (size_t)__pageable_end;
455 	size_t pageable_size = pageable_end - pageable_start;
456 	vaddr_t tzsram_end = TZSRAM_BASE + TZSRAM_SIZE - TEE_LOAD_ADDR +
457 			     VCORE_START_VA;
458 	size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) *
459 			   TEE_SHA256_HASH_SIZE;
460 	const struct boot_embdata *embdata = (const void *)__init_end;
461 	const void *tmp_hashes = NULL;
462 	tee_mm_entry_t *mm = NULL;
463 	struct fobj *fobj = NULL;
464 	uint8_t *paged_store = NULL;
465 	uint8_t *hashes = NULL;
466 
467 	assert(pageable_size % SMALL_PAGE_SIZE == 0);
468 	assert(embdata->total_len >= embdata->hashes_offset +
469 				     embdata->hashes_len);
470 	assert(hash_size == embdata->hashes_len);
471 
472 	tmp_hashes = __init_end + embdata->hashes_offset;
473 
474 	init_asan();
475 
476 	/* Add heap2 first as heap1 may be too small as initial bget pool */
477 	malloc_add_pool(__heap2_start, __heap2_end - __heap2_start);
478 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
479 
480 	/*
481 	 * This needs to be initialized early to support address lookup
482 	 * in MEM_AREA_TEE_RAM
483 	 */
484 	tee_pager_early_init();
485 
486 	hashes = malloc(hash_size);
487 	IMSG_RAW("\n");
488 	IMSG("Pager is enabled. Hashes: %zu bytes", hash_size);
489 	assert(hashes);
490 	asan_memcpy_unchecked(hashes, tmp_hashes, hash_size);
491 
492 	/*
493 	 * Need tee_mm_sec_ddr initialized to be able to allocate secure
494 	 * DDR below.
495 	 */
496 	core_mmu_init_ta_ram();
497 
498 	carve_out_asan_mem(&tee_mm_sec_ddr);
499 
500 	mm = tee_mm_alloc(&tee_mm_sec_ddr, pageable_size);
501 	assert(mm);
502 	paged_store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM,
503 				   pageable_size);
504 	/*
505 	 * Load pageable part in the dedicated allocated area:
506 	 * - Move pageable non-init part into pageable area. Note bootloader
507 	 *   may have loaded it anywhere in TA RAM hence use memmove().
508 	 * - Copy pageable init part from current location into pageable area.
509 	 */
510 	memmove(paged_store + init_size,
511 		phys_to_virt(pageable_part,
512 			     core_mmu_get_type_by_pa(pageable_part),
513 			     __pageable_part_end - __pageable_part_start),
514 		__pageable_part_end - __pageable_part_start);
515 	asan_memcpy_unchecked(paged_store, __init_start, init_size);
516 	/*
517 	 * Undo eventual relocation for the init part so the hash checks
518 	 * can pass.
519 	 */
520 	undo_init_relocation(paged_store);
521 
522 	/* Check that hashes of what's in pageable area is OK */
523 	DMSG("Checking hashes of pageable area");
524 	for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) {
525 		const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE;
526 		const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE;
527 		TEE_Result res;
528 
529 		DMSG("hash pg_idx %zu hash %p page %p", n, hash, page);
530 		res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE);
531 		if (res != TEE_SUCCESS) {
532 			EMSG("Hash failed for page %zu at %p: res 0x%x",
533 			     n, (void *)page, res);
534 			panic();
535 		}
536 	}
537 
538 	/*
539 	 * Assert prepaged init sections are page aligned so that nothing
540 	 * trails uninited at the end of the premapped init area.
541 	 */
542 	assert(!(init_size & SMALL_PAGE_MASK));
543 
544 	/*
545 	 * Initialize the virtual memory pool used for main_mmu_l2_ttb which
546 	 * is supplied to tee_pager_init() below.
547 	 */
548 	init_vcore(&tee_mm_vcore);
549 
550 	/*
551 	 * Assign alias area for pager end of the small page block the rest
552 	 * of the binary is loaded into. We're taking more than needed, but
553 	 * we're guaranteed to not need more than the physical amount of
554 	 * TZSRAM.
555 	 */
556 	mm = tee_mm_alloc2(&tee_mm_vcore,
557 			   (vaddr_t)tee_mm_vcore.lo +
558 			   tee_mm_vcore.size - TZSRAM_SIZE,
559 			   TZSRAM_SIZE);
560 	assert(mm);
561 	tee_pager_set_alias_area(mm);
562 
563 	/*
564 	 * Claim virtual memory which isn't paged.
565 	 * Linear memory (flat map core memory) ends there.
566 	 */
567 	mm = tee_mm_alloc2(&tee_mm_vcore, VCORE_UNPG_RX_PA,
568 			   (vaddr_t)(__pageable_start - VCORE_UNPG_RX_PA));
569 	assert(mm);
570 
571 	/*
572 	 * Allocate virtual memory for the pageable area and let the pager
573 	 * take charge of all the pages already assigned to that memory.
574 	 */
575 	mm = tee_mm_alloc2(&tee_mm_vcore, (vaddr_t)__pageable_start,
576 			   pageable_size);
577 	assert(mm);
578 	fobj = ro_paged_alloc(mm, hashes, paged_store);
579 	assert(fobj);
580 	tee_pager_add_core_region(tee_mm_get_smem(mm), PAGED_REGION_TYPE_RO,
581 				  fobj);
582 	fobj_put(fobj);
583 
584 	tee_pager_add_pages(pageable_start, init_size / SMALL_PAGE_SIZE, false);
585 	tee_pager_add_pages(pageable_start + init_size,
586 			    (pageable_size - init_size) / SMALL_PAGE_SIZE,
587 			    true);
588 	if (pageable_end < tzsram_end)
589 		tee_pager_add_pages(pageable_end, (tzsram_end - pageable_end) /
590 						   SMALL_PAGE_SIZE, true);
591 
592 	/*
593 	 * There may be physical pages in TZSRAM before the core load address.
594 	 * These pages can be added to the physical pages pool of the pager.
595 	 * This setup may happen when a the secure bootloader runs in TZRAM
596 	 * and its memory can be reused by OP-TEE once boot stages complete.
597 	 */
598 	tee_pager_add_pages(tee_mm_vcore.lo,
599 			(VCORE_UNPG_RX_PA - tee_mm_vcore.lo) / SMALL_PAGE_SIZE,
600 			true);
601 
602 	print_pager_pool_size();
603 }
604 #else
605 
606 static void init_runtime(unsigned long pageable_part __unused)
607 {
608 	init_asan();
609 
610 	/*
611 	 * By default whole OP-TEE uses malloc, so we need to initialize
612 	 * it early. But, when virtualization is enabled, malloc is used
613 	 * only by TEE runtime, so malloc should be initialized later, for
614 	 * every virtual partition separately. Core code uses nex_malloc
615 	 * instead.
616 	 */
617 #ifdef CFG_NS_VIRTUALIZATION
618 	nex_malloc_add_pool(__nex_heap_start, __nex_heap_end -
619 					      __nex_heap_start);
620 #else
621 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
622 #endif
623 
624 	IMSG_RAW("\n");
625 }
626 #endif
627 
628 void *get_dt(void)
629 {
630 	void *fdt = get_embedded_dt();
631 
632 	if (!fdt)
633 		fdt = get_external_dt();
634 
635 	return fdt;
636 }
637 
638 void *get_secure_dt(void)
639 {
640 	void *fdt = get_embedded_dt();
641 
642 	if (!fdt && IS_ENABLED(CFG_MAP_EXT_DT_SECURE))
643 		fdt = get_external_dt();
644 
645 	return fdt;
646 }
647 
648 #if defined(CFG_EMBED_DTB)
649 void *get_embedded_dt(void)
650 {
651 	static bool checked;
652 
653 	assert(cpu_mmu_enabled());
654 
655 	if (!checked) {
656 		IMSG("Embedded DTB found");
657 
658 		if (fdt_check_header(embedded_secure_dtb))
659 			panic("Invalid embedded DTB");
660 
661 		checked = true;
662 	}
663 
664 	return embedded_secure_dtb;
665 }
666 #else
667 void *get_embedded_dt(void)
668 {
669 	return NULL;
670 }
671 #endif /*CFG_EMBED_DTB*/
672 
673 #if defined(CFG_DT)
674 void *get_external_dt(void)
675 {
676 	if (!IS_ENABLED(CFG_EXTERNAL_DT))
677 		return NULL;
678 
679 	assert(cpu_mmu_enabled());
680 	return external_dt.blob;
681 }
682 
683 static TEE_Result release_external_dt(void)
684 {
685 	int ret = 0;
686 
687 	if (!IS_ENABLED(CFG_EXTERNAL_DT))
688 		return TEE_SUCCESS;
689 
690 	if (!external_dt.blob)
691 		return TEE_SUCCESS;
692 
693 	ret = fdt_pack(external_dt.blob);
694 	if (ret < 0) {
695 		EMSG("Failed to pack Device Tree at 0x%" PRIxPA ": error %d",
696 		     virt_to_phys(external_dt.blob), ret);
697 		panic();
698 	}
699 
700 	if (core_mmu_remove_mapping(MEM_AREA_EXT_DT, external_dt.blob,
701 				    CFG_DTB_MAX_SIZE))
702 		panic("Failed to remove temporary Device Tree mapping");
703 
704 	/* External DTB no more reached, reset pointer to invalid */
705 	external_dt.blob = NULL;
706 
707 	return TEE_SUCCESS;
708 }
709 boot_final(release_external_dt);
710 
711 #ifdef _CFG_USE_DTB_OVERLAY
712 static int add_dt_overlay_fragment(struct dt_descriptor *dt, int ioffs)
713 {
714 	char frag[32];
715 	int offs;
716 	int ret;
717 
718 	snprintf(frag, sizeof(frag), "fragment@%d", dt->frag_id);
719 	offs = fdt_add_subnode(dt->blob, ioffs, frag);
720 	if (offs < 0)
721 		return offs;
722 
723 	dt->frag_id += 1;
724 
725 	ret = fdt_setprop_string(dt->blob, offs, "target-path", "/");
726 	if (ret < 0)
727 		return -1;
728 
729 	return fdt_add_subnode(dt->blob, offs, "__overlay__");
730 }
731 
732 static int init_dt_overlay(struct dt_descriptor *dt, int __maybe_unused dt_size)
733 {
734 	int fragment;
735 
736 	if (IS_ENABLED(CFG_EXTERNAL_DTB_OVERLAY)) {
737 		if (!fdt_check_header(dt->blob)) {
738 			fdt_for_each_subnode(fragment, dt->blob, 0)
739 				dt->frag_id += 1;
740 			return 0;
741 		}
742 	}
743 
744 	return fdt_create_empty_tree(dt->blob, dt_size);
745 }
746 #else
747 static int add_dt_overlay_fragment(struct dt_descriptor *dt __unused, int offs)
748 {
749 	return offs;
750 }
751 
752 static int init_dt_overlay(struct dt_descriptor *dt __unused,
753 			   int dt_size __unused)
754 {
755 	return 0;
756 }
757 #endif /* _CFG_USE_DTB_OVERLAY */
758 
759 static int add_dt_path_subnode(struct dt_descriptor *dt, const char *path,
760 			       const char *subnode)
761 {
762 	int offs;
763 
764 	offs = fdt_path_offset(dt->blob, path);
765 	if (offs < 0)
766 		return -1;
767 	offs = add_dt_overlay_fragment(dt, offs);
768 	if (offs < 0)
769 		return -1;
770 	offs = fdt_add_subnode(dt->blob, offs, subnode);
771 	if (offs < 0)
772 		return -1;
773 	return offs;
774 }
775 
776 static int add_optee_dt_node(struct dt_descriptor *dt)
777 {
778 	int offs;
779 	int ret;
780 
781 	if (fdt_path_offset(dt->blob, "/firmware/optee") >= 0) {
782 		DMSG("OP-TEE Device Tree node already exists!");
783 		return 0;
784 	}
785 
786 	offs = fdt_path_offset(dt->blob, "/firmware");
787 	if (offs < 0) {
788 		offs = add_dt_path_subnode(dt, "/", "firmware");
789 		if (offs < 0)
790 			return -1;
791 	}
792 
793 	offs = fdt_add_subnode(dt->blob, offs, "optee");
794 	if (offs < 0)
795 		return -1;
796 
797 	ret = fdt_setprop_string(dt->blob, offs, "compatible",
798 				 "linaro,optee-tz");
799 	if (ret < 0)
800 		return -1;
801 	ret = fdt_setprop_string(dt->blob, offs, "method", "smc");
802 	if (ret < 0)
803 		return -1;
804 
805 	if (CFG_CORE_ASYNC_NOTIF_GIC_INTID) {
806 		/*
807 		 * The format of the interrupt property is defined by the
808 		 * binding of the interrupt domain root. In this case it's
809 		 * one Arm GIC v1, v2 or v3 so we must be compatible with
810 		 * these.
811 		 *
812 		 * An SPI type of interrupt is indicated with a 0 in the
813 		 * first cell. A PPI type is indicated with value 1.
814 		 *
815 		 * The interrupt number goes in the second cell where
816 		 * SPIs ranges from 0 to 987 and PPI ranges from 0 to 15.
817 		 *
818 		 * Flags are passed in the third cells.
819 		 */
820 		uint32_t itr_trigger = 0;
821 		uint32_t itr_type = 0;
822 		uint32_t itr_id = 0;
823 		uint32_t val[3] = { };
824 
825 		/* PPI are visible only in current CPU cluster */
826 		static_assert(!CFG_CORE_ASYNC_NOTIF_GIC_INTID ||
827 			      (CFG_CORE_ASYNC_NOTIF_GIC_INTID >=
828 			       GIC_SPI_BASE) ||
829 			      ((CFG_TEE_CORE_NB_CORE <= 8) &&
830 			       (CFG_CORE_ASYNC_NOTIF_GIC_INTID >=
831 				GIC_PPI_BASE)));
832 
833 		if (CFG_CORE_ASYNC_NOTIF_GIC_INTID >= GIC_SPI_BASE) {
834 			itr_type = GIC_SPI;
835 			itr_id = CFG_CORE_ASYNC_NOTIF_GIC_INTID - GIC_SPI_BASE;
836 			itr_trigger = IRQ_TYPE_EDGE_RISING;
837 		} else {
838 			itr_type = GIC_PPI;
839 			itr_id = CFG_CORE_ASYNC_NOTIF_GIC_INTID - GIC_PPI_BASE;
840 			itr_trigger = IRQ_TYPE_EDGE_RISING |
841 				      GIC_CPU_MASK_SIMPLE(CFG_TEE_CORE_NB_CORE);
842 		}
843 
844 		val[0] = TEE_U32_TO_BIG_ENDIAN(itr_type);
845 		val[1] = TEE_U32_TO_BIG_ENDIAN(itr_id);
846 		val[2] = TEE_U32_TO_BIG_ENDIAN(itr_trigger);
847 
848 		ret = fdt_setprop(dt->blob, offs, "interrupts", val,
849 				  sizeof(val));
850 		if (ret < 0)
851 			return -1;
852 	}
853 	return 0;
854 }
855 
856 #ifdef CFG_PSCI_ARM32
857 static int append_psci_compatible(void *fdt, int offs, const char *str)
858 {
859 	return fdt_appendprop(fdt, offs, "compatible", str, strlen(str) + 1);
860 }
861 
862 static int dt_add_psci_node(struct dt_descriptor *dt)
863 {
864 	int offs;
865 
866 	if (fdt_path_offset(dt->blob, "/psci") >= 0) {
867 		DMSG("PSCI Device Tree node already exists!");
868 		return 0;
869 	}
870 
871 	offs = add_dt_path_subnode(dt, "/", "psci");
872 	if (offs < 0)
873 		return -1;
874 	if (append_psci_compatible(dt->blob, offs, "arm,psci-1.0"))
875 		return -1;
876 	if (append_psci_compatible(dt->blob, offs, "arm,psci-0.2"))
877 		return -1;
878 	if (append_psci_compatible(dt->blob, offs, "arm,psci"))
879 		return -1;
880 	if (fdt_setprop_string(dt->blob, offs, "method", "smc"))
881 		return -1;
882 	if (fdt_setprop_u32(dt->blob, offs, "cpu_suspend", PSCI_CPU_SUSPEND))
883 		return -1;
884 	if (fdt_setprop_u32(dt->blob, offs, "cpu_off", PSCI_CPU_OFF))
885 		return -1;
886 	if (fdt_setprop_u32(dt->blob, offs, "cpu_on", PSCI_CPU_ON))
887 		return -1;
888 	if (fdt_setprop_u32(dt->blob, offs, "sys_poweroff", PSCI_SYSTEM_OFF))
889 		return -1;
890 	if (fdt_setprop_u32(dt->blob, offs, "sys_reset", PSCI_SYSTEM_RESET))
891 		return -1;
892 	return 0;
893 }
894 
895 static int check_node_compat_prefix(struct dt_descriptor *dt, int offs,
896 				    const char *prefix)
897 {
898 	const size_t prefix_len = strlen(prefix);
899 	size_t l;
900 	int plen;
901 	const char *prop;
902 
903 	prop = fdt_getprop(dt->blob, offs, "compatible", &plen);
904 	if (!prop)
905 		return -1;
906 
907 	while (plen > 0) {
908 		if (memcmp(prop, prefix, prefix_len) == 0)
909 			return 0; /* match */
910 
911 		l = strlen(prop) + 1;
912 		prop += l;
913 		plen -= l;
914 	}
915 
916 	return -1;
917 }
918 
919 static int dt_add_psci_cpu_enable_methods(struct dt_descriptor *dt)
920 {
921 	int offs = 0;
922 
923 	while (1) {
924 		offs = fdt_next_node(dt->blob, offs, NULL);
925 		if (offs < 0)
926 			break;
927 		if (fdt_getprop(dt->blob, offs, "enable-method", NULL))
928 			continue; /* already set */
929 		if (check_node_compat_prefix(dt, offs, "arm,cortex-a"))
930 			continue; /* no compatible */
931 		if (fdt_setprop_string(dt->blob, offs, "enable-method", "psci"))
932 			return -1;
933 		/* Need to restart scanning as offsets may have changed */
934 		offs = 0;
935 	}
936 	return 0;
937 }
938 
939 static int config_psci(struct dt_descriptor *dt)
940 {
941 	if (dt_add_psci_node(dt))
942 		return -1;
943 	return dt_add_psci_cpu_enable_methods(dt);
944 }
945 #else
946 static int config_psci(struct dt_descriptor *dt __unused)
947 {
948 	return 0;
949 }
950 #endif /*CFG_PSCI_ARM32*/
951 
952 static void set_dt_val(void *data, uint32_t cell_size, uint64_t val)
953 {
954 	if (cell_size == 1) {
955 		fdt32_t v = cpu_to_fdt32((uint32_t)val);
956 
957 		memcpy(data, &v, sizeof(v));
958 	} else {
959 		fdt64_t v = cpu_to_fdt64(val);
960 
961 		memcpy(data, &v, sizeof(v));
962 	}
963 }
964 
965 static int add_res_mem_dt_node(struct dt_descriptor *dt, const char *name,
966 			       paddr_t pa, size_t size)
967 {
968 	int offs = 0;
969 	int ret = 0;
970 	int addr_size = -1;
971 	int len_size = -1;
972 	bool found = true;
973 	char subnode_name[80] = { 0 };
974 
975 	offs = fdt_path_offset(dt->blob, "/reserved-memory");
976 
977 	if (offs < 0) {
978 		found = false;
979 		offs = 0;
980 	}
981 
982 	if (IS_ENABLED2(_CFG_USE_DTB_OVERLAY)) {
983 		len_size = sizeof(paddr_t) / sizeof(uint32_t);
984 		addr_size = sizeof(paddr_t) / sizeof(uint32_t);
985 	} else {
986 		len_size = fdt_size_cells(dt->blob, offs);
987 		if (len_size < 0)
988 			return -1;
989 		addr_size = fdt_address_cells(dt->blob, offs);
990 		if (addr_size < 0)
991 			return -1;
992 	}
993 
994 	if (!found) {
995 		offs = add_dt_path_subnode(dt, "/", "reserved-memory");
996 		if (offs < 0)
997 			return -1;
998 		ret = fdt_setprop_cell(dt->blob, offs, "#address-cells",
999 				       addr_size);
1000 		if (ret < 0)
1001 			return -1;
1002 		ret = fdt_setprop_cell(dt->blob, offs, "#size-cells", len_size);
1003 		if (ret < 0)
1004 			return -1;
1005 		ret = fdt_setprop(dt->blob, offs, "ranges", NULL, 0);
1006 		if (ret < 0)
1007 			return -1;
1008 	}
1009 
1010 	ret = snprintf(subnode_name, sizeof(subnode_name),
1011 		       "%s@%" PRIxPA, name, pa);
1012 	if (ret < 0 || ret >= (int)sizeof(subnode_name))
1013 		DMSG("truncated node \"%s@%" PRIxPA"\"", name, pa);
1014 	offs = fdt_add_subnode(dt->blob, offs, subnode_name);
1015 	if (offs >= 0) {
1016 		uint32_t data[FDT_MAX_NCELLS * 2];
1017 
1018 		set_dt_val(data, addr_size, pa);
1019 		set_dt_val(data + addr_size, len_size, size);
1020 		ret = fdt_setprop(dt->blob, offs, "reg", data,
1021 				  sizeof(uint32_t) * (addr_size + len_size));
1022 		if (ret < 0)
1023 			return -1;
1024 		ret = fdt_setprop(dt->blob, offs, "no-map", NULL, 0);
1025 		if (ret < 0)
1026 			return -1;
1027 	} else {
1028 		return -1;
1029 	}
1030 	return 0;
1031 }
1032 
1033 #ifdef CFG_CORE_DYN_SHM
1034 static uint64_t get_dt_val_and_advance(const void *data, size_t *offs,
1035 				       uint32_t cell_size)
1036 {
1037 	uint64_t rv = 0;
1038 
1039 	if (cell_size == 1) {
1040 		uint32_t v;
1041 
1042 		memcpy(&v, (const uint8_t *)data + *offs, sizeof(v));
1043 		*offs += sizeof(v);
1044 		rv = fdt32_to_cpu(v);
1045 	} else {
1046 		uint64_t v;
1047 
1048 		memcpy(&v, (const uint8_t *)data + *offs, sizeof(v));
1049 		*offs += sizeof(v);
1050 		rv = fdt64_to_cpu(v);
1051 	}
1052 
1053 	return rv;
1054 }
1055 
1056 /*
1057  * Find all non-secure memory from DT. Memory marked inaccessible by Secure
1058  * World is ignored since it could not be mapped to be used as dynamic shared
1059  * memory.
1060  */
1061 static int get_nsec_memory_helper(void *fdt, struct core_mmu_phys_mem *mem)
1062 {
1063 	const uint8_t *prop = NULL;
1064 	uint64_t a = 0;
1065 	uint64_t l = 0;
1066 	size_t prop_offs = 0;
1067 	size_t prop_len = 0;
1068 	int elems_total = 0;
1069 	int addr_size = 0;
1070 	int len_size = 0;
1071 	int offs = 0;
1072 	size_t n = 0;
1073 	int len = 0;
1074 
1075 	addr_size = fdt_address_cells(fdt, 0);
1076 	if (addr_size < 0)
1077 		return 0;
1078 
1079 	len_size = fdt_size_cells(fdt, 0);
1080 	if (len_size < 0)
1081 		return 0;
1082 
1083 	while (true) {
1084 		offs = fdt_node_offset_by_prop_value(fdt, offs, "device_type",
1085 						     "memory",
1086 						     sizeof("memory"));
1087 		if (offs < 0)
1088 			break;
1089 
1090 		if (fdt_get_status(fdt, offs) != (DT_STATUS_OK_NSEC |
1091 						   DT_STATUS_OK_SEC))
1092 			continue;
1093 
1094 		prop = fdt_getprop(fdt, offs, "reg", &len);
1095 		if (!prop)
1096 			continue;
1097 
1098 		prop_len = len;
1099 		for (n = 0, prop_offs = 0; prop_offs < prop_len; n++) {
1100 			a = get_dt_val_and_advance(prop, &prop_offs, addr_size);
1101 			if (prop_offs >= prop_len) {
1102 				n--;
1103 				break;
1104 			}
1105 
1106 			l = get_dt_val_and_advance(prop, &prop_offs, len_size);
1107 			if (mem) {
1108 				mem->type = MEM_AREA_DDR_OVERALL;
1109 				mem->addr = a;
1110 				mem->size = l;
1111 				mem++;
1112 			}
1113 		}
1114 
1115 		elems_total += n;
1116 	}
1117 
1118 	return elems_total;
1119 }
1120 
1121 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt, size_t *nelems)
1122 {
1123 	struct core_mmu_phys_mem *mem = NULL;
1124 	int elems_total = 0;
1125 
1126 	elems_total = get_nsec_memory_helper(fdt, NULL);
1127 	if (elems_total <= 0)
1128 		return NULL;
1129 
1130 	mem = nex_calloc(elems_total, sizeof(*mem));
1131 	if (!mem)
1132 		panic();
1133 
1134 	elems_total = get_nsec_memory_helper(fdt, mem);
1135 	assert(elems_total > 0);
1136 
1137 	*nelems = elems_total;
1138 
1139 	return mem;
1140 }
1141 #endif /*CFG_CORE_DYN_SHM*/
1142 
1143 #ifdef CFG_CORE_RESERVED_SHM
1144 static int mark_static_shm_as_reserved(struct dt_descriptor *dt)
1145 {
1146 	vaddr_t shm_start;
1147 	vaddr_t shm_end;
1148 
1149 	core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &shm_start, &shm_end);
1150 	if (shm_start != shm_end)
1151 		return add_res_mem_dt_node(dt, "optee_shm",
1152 					   virt_to_phys((void *)shm_start),
1153 					   shm_end - shm_start);
1154 
1155 	DMSG("No SHM configured");
1156 	return -1;
1157 }
1158 #endif /*CFG_CORE_RESERVED_SHM*/
1159 
1160 static void init_external_dt(unsigned long phys_dt)
1161 {
1162 	struct dt_descriptor *dt = &external_dt;
1163 	void *fdt;
1164 	int ret;
1165 
1166 	if (!IS_ENABLED(CFG_EXTERNAL_DT))
1167 		return;
1168 
1169 	if (!phys_dt) {
1170 		/*
1171 		 * No need to panic as we're not using the DT in OP-TEE
1172 		 * yet, we're only adding some nodes for normal world use.
1173 		 * This makes the switch to using DT easier as we can boot
1174 		 * a newer OP-TEE with older boot loaders. Once we start to
1175 		 * initialize devices based on DT we'll likely panic
1176 		 * instead of returning here.
1177 		 */
1178 		IMSG("No non-secure external DT");
1179 		return;
1180 	}
1181 
1182 	fdt = core_mmu_add_mapping(MEM_AREA_EXT_DT, phys_dt, CFG_DTB_MAX_SIZE);
1183 	if (!fdt)
1184 		panic("Failed to map external DTB");
1185 
1186 	dt->blob = fdt;
1187 
1188 	ret = init_dt_overlay(dt, CFG_DTB_MAX_SIZE);
1189 	if (ret < 0) {
1190 		EMSG("Device Tree Overlay init fail @ %#lx: error %d", phys_dt,
1191 		     ret);
1192 		panic();
1193 	}
1194 
1195 	ret = fdt_open_into(fdt, fdt, CFG_DTB_MAX_SIZE);
1196 	if (ret < 0) {
1197 		EMSG("Invalid Device Tree at %#lx: error %d", phys_dt, ret);
1198 		panic();
1199 	}
1200 
1201 	IMSG("Non-secure external DT found");
1202 }
1203 
1204 static int mark_tzdram_as_reserved(struct dt_descriptor *dt)
1205 {
1206 	return add_res_mem_dt_node(dt, "optee_core", CFG_TZDRAM_START,
1207 				   CFG_TZDRAM_SIZE);
1208 }
1209 
1210 static void update_external_dt(void)
1211 {
1212 	struct dt_descriptor *dt = &external_dt;
1213 
1214 	if (!IS_ENABLED(CFG_EXTERNAL_DT))
1215 		return;
1216 
1217 	if (!dt->blob)
1218 		return;
1219 
1220 	if (!IS_ENABLED(CFG_CORE_FFA) && add_optee_dt_node(dt))
1221 		panic("Failed to add OP-TEE Device Tree node");
1222 
1223 	if (config_psci(dt))
1224 		panic("Failed to config PSCI");
1225 
1226 #ifdef CFG_CORE_RESERVED_SHM
1227 	if (mark_static_shm_as_reserved(dt))
1228 		panic("Failed to config non-secure memory");
1229 #endif
1230 
1231 	if (mark_tzdram_as_reserved(dt))
1232 		panic("Failed to config secure memory");
1233 }
1234 #else /*CFG_DT*/
1235 void *get_external_dt(void)
1236 {
1237 	return NULL;
1238 }
1239 
1240 static void init_external_dt(unsigned long phys_dt __unused)
1241 {
1242 }
1243 
1244 static void update_external_dt(void)
1245 {
1246 }
1247 
1248 #ifdef CFG_CORE_DYN_SHM
1249 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt __unused,
1250 						 size_t *nelems __unused)
1251 {
1252 	return NULL;
1253 }
1254 #endif /*CFG_CORE_DYN_SHM*/
1255 #endif /*!CFG_DT*/
1256 
1257 #if defined(CFG_CORE_SEL1_SPMC) && defined(CFG_DT)
1258 void *get_tos_fw_config_dt(void)
1259 {
1260 	if (!IS_ENABLED(CFG_MAP_EXT_DT_SECURE))
1261 		return NULL;
1262 
1263 	assert(cpu_mmu_enabled());
1264 
1265 	return tos_fw_config_dt.blob;
1266 }
1267 
1268 static void init_tos_fw_config_dt(unsigned long pa)
1269 {
1270 	struct dt_descriptor *dt = &tos_fw_config_dt;
1271 	void *fdt = NULL;
1272 	int ret = 0;
1273 
1274 	if (!IS_ENABLED(CFG_MAP_EXT_DT_SECURE))
1275 		return;
1276 
1277 	if (!pa)
1278 		panic("No TOS_FW_CONFIG DT found");
1279 
1280 	fdt = core_mmu_add_mapping(MEM_AREA_EXT_DT, pa, CFG_DTB_MAX_SIZE);
1281 	if (!fdt)
1282 		panic("Failed to map TOS_FW_CONFIG DT");
1283 
1284 	dt->blob = fdt;
1285 
1286 	ret = fdt_open_into(fdt, fdt, CFG_DTB_MAX_SIZE);
1287 	if (ret < 0) {
1288 		EMSG("Invalid Device Tree at %#lx: error %d", pa, ret);
1289 		panic();
1290 	}
1291 
1292 	IMSG("TOS_FW_CONFIG DT found");
1293 }
1294 #else
1295 void *get_tos_fw_config_dt(void)
1296 {
1297 	return NULL;
1298 }
1299 
1300 static void init_tos_fw_config_dt(unsigned long pa __unused)
1301 {
1302 }
1303 #endif /*CFG_CORE_SEL1_SPMC && CFG_DT*/
1304 
1305 #ifdef CFG_CORE_DYN_SHM
1306 static void discover_nsec_memory(void)
1307 {
1308 	struct core_mmu_phys_mem *mem;
1309 	const struct core_mmu_phys_mem *mem_begin = NULL;
1310 	const struct core_mmu_phys_mem *mem_end = NULL;
1311 	size_t nelems;
1312 	void *fdt = get_external_dt();
1313 
1314 	if (fdt) {
1315 		mem = get_nsec_memory(fdt, &nelems);
1316 		if (mem) {
1317 			core_mmu_set_discovered_nsec_ddr(mem, nelems);
1318 			return;
1319 		}
1320 
1321 		DMSG("No non-secure memory found in FDT");
1322 	}
1323 
1324 	mem_begin = phys_ddr_overall_begin;
1325 	mem_end = phys_ddr_overall_end;
1326 	nelems = mem_end - mem_begin;
1327 	if (nelems) {
1328 		/*
1329 		 * Platform cannot use both register_ddr() and the now
1330 		 * deprecated register_dynamic_shm().
1331 		 */
1332 		assert(phys_ddr_overall_compat_begin ==
1333 		       phys_ddr_overall_compat_end);
1334 	} else {
1335 		mem_begin = phys_ddr_overall_compat_begin;
1336 		mem_end = phys_ddr_overall_compat_end;
1337 		nelems = mem_end - mem_begin;
1338 		if (!nelems)
1339 			return;
1340 		DMSG("Warning register_dynamic_shm() is deprecated, please use register_ddr() instead");
1341 	}
1342 
1343 	mem = nex_calloc(nelems, sizeof(*mem));
1344 	if (!mem)
1345 		panic();
1346 
1347 	memcpy(mem, phys_ddr_overall_begin, sizeof(*mem) * nelems);
1348 	core_mmu_set_discovered_nsec_ddr(mem, nelems);
1349 }
1350 #else /*CFG_CORE_DYN_SHM*/
1351 static void discover_nsec_memory(void)
1352 {
1353 }
1354 #endif /*!CFG_CORE_DYN_SHM*/
1355 
1356 #ifdef CFG_NS_VIRTUALIZATION
1357 static TEE_Result virt_init_heap(void)
1358 {
1359 	/* We need to initialize pool for every virtual guest partition */
1360 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
1361 
1362 	return TEE_SUCCESS;
1363 }
1364 preinit_early(virt_init_heap);
1365 #endif
1366 
1367 void init_tee_runtime(void)
1368 {
1369 #ifndef CFG_WITH_PAGER
1370 	/* Pager initializes TA RAM early */
1371 	core_mmu_init_ta_ram();
1372 #endif
1373 	/*
1374 	 * With virtualization we call this function when creating the
1375 	 * OP-TEE partition instead.
1376 	 */
1377 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION))
1378 		call_preinitcalls();
1379 	call_initcalls();
1380 
1381 	/*
1382 	 * These two functions uses crypto_rng_read() to initialize the
1383 	 * pauth keys. Once call_initcalls() returns we're guaranteed that
1384 	 * crypto_rng_read() is ready to be used.
1385 	 */
1386 	thread_init_core_local_pauth_keys();
1387 	thread_init_thread_pauth_keys();
1388 }
1389 
1390 static void init_primary(unsigned long pageable_part, unsigned long nsec_entry)
1391 {
1392 	thread_init_core_local_stacks();
1393 	/*
1394 	 * Mask asynchronous exceptions before switch to the thread vector
1395 	 * as the thread handler requires those to be masked while
1396 	 * executing with the temporary stack. The thread subsystem also
1397 	 * asserts that the foreign interrupts are blocked when using most of
1398 	 * its functions.
1399 	 */
1400 	thread_set_exceptions(THREAD_EXCP_ALL);
1401 	primary_save_cntfrq();
1402 	init_vfp_sec();
1403 	/*
1404 	 * Pager: init_runtime() calls thread_kernel_enable_vfp() so we must
1405 	 * set a current thread right now to avoid a chicken-and-egg problem
1406 	 * (thread_init_boot_thread() sets the current thread but needs
1407 	 * things set by init_runtime()).
1408 	 */
1409 	thread_get_core_local()->curr_thread = 0;
1410 	init_runtime(pageable_part);
1411 
1412 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1413 		/*
1414 		 * Virtualization: We can't initialize threads right now because
1415 		 * threads belong to "tee" part and will be initialized
1416 		 * separately per each new virtual guest. So, we'll clear
1417 		 * "curr_thread" and call it done.
1418 		 */
1419 		thread_get_core_local()->curr_thread = -1;
1420 	} else {
1421 		thread_init_boot_thread();
1422 	}
1423 	thread_init_primary();
1424 	thread_init_per_cpu();
1425 	init_sec_mon(nsec_entry);
1426 }
1427 
1428 static bool cpu_nmfi_enabled(void)
1429 {
1430 #if defined(ARM32)
1431 	return read_sctlr() & SCTLR_NMFI;
1432 #else
1433 	/* Note: ARM64 does not feature non-maskable FIQ support. */
1434 	return false;
1435 #endif
1436 }
1437 
1438 /*
1439  * Note: this function is weak just to make it possible to exclude it from
1440  * the unpaged area.
1441  */
1442 void __weak boot_init_primary_late(unsigned long fdt,
1443 				   unsigned long tos_fw_config)
1444 {
1445 	init_external_dt(fdt);
1446 	init_tos_fw_config_dt(tos_fw_config);
1447 #ifdef CFG_CORE_SEL1_SPMC
1448 	tpm_map_log_area(get_tos_fw_config_dt());
1449 #else
1450 	tpm_map_log_area(get_external_dt());
1451 #endif
1452 	discover_nsec_memory();
1453 	update_external_dt();
1454 	configure_console_from_dt();
1455 
1456 	IMSG("OP-TEE version: %s", core_v_str);
1457 	if (IS_ENABLED(CFG_WARN_INSECURE)) {
1458 		IMSG("WARNING: This OP-TEE configuration might be insecure!");
1459 		IMSG("WARNING: Please check https://optee.readthedocs.io/en/latest/architecture/porting_guidelines.html");
1460 	}
1461 	IMSG("Primary CPU initializing");
1462 #ifdef CFG_CORE_ASLR
1463 	DMSG("Executing at offset %#lx with virtual load address %#"PRIxVA,
1464 	     (unsigned long)boot_mmu_config.map_offset, VCORE_START_VA);
1465 #endif
1466 	if (IS_ENABLED(CFG_MEMTAG))
1467 		DMSG("Memory tagging %s",
1468 		     memtag_is_enabled() ?  "enabled" : "disabled");
1469 
1470 	/* Check if platform needs NMFI workaround */
1471 	if (cpu_nmfi_enabled())	{
1472 		if (!IS_ENABLED(CFG_CORE_WORKAROUND_ARM_NMFI))
1473 			IMSG("WARNING: This ARM core has NMFI enabled, please apply workaround!");
1474 	} else {
1475 		if (IS_ENABLED(CFG_CORE_WORKAROUND_ARM_NMFI))
1476 			IMSG("WARNING: This ARM core does not have NMFI enabled, no need for workaround");
1477 	}
1478 
1479 	main_init_gic();
1480 	init_vfp_nsec();
1481 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1482 		IMSG("Initializing virtualization support");
1483 		core_mmu_init_virtualization();
1484 	} else {
1485 		init_tee_runtime();
1486 	}
1487 	call_finalcalls();
1488 	IMSG("Primary CPU switching to normal world boot");
1489 }
1490 
1491 static void init_secondary_helper(unsigned long nsec_entry)
1492 {
1493 	IMSG("Secondary CPU %zu initializing", get_core_pos());
1494 
1495 	/*
1496 	 * Mask asynchronous exceptions before switch to the thread vector
1497 	 * as the thread handler requires those to be masked while
1498 	 * executing with the temporary stack. The thread subsystem also
1499 	 * asserts that the foreign interrupts are blocked when using most of
1500 	 * its functions.
1501 	 */
1502 	thread_set_exceptions(THREAD_EXCP_ALL);
1503 
1504 	secondary_init_cntfrq();
1505 	thread_init_per_cpu();
1506 	init_sec_mon(nsec_entry);
1507 	main_secondary_init_gic();
1508 	init_vfp_sec();
1509 	init_vfp_nsec();
1510 
1511 	IMSG("Secondary CPU %zu switching to normal world boot", get_core_pos());
1512 }
1513 
1514 /*
1515  * Note: this function is weak just to make it possible to exclude it from
1516  * the unpaged area so that it lies in the init area.
1517  */
1518 void __weak boot_init_primary_early(unsigned long pageable_part,
1519 				    unsigned long nsec_entry __maybe_unused)
1520 {
1521 	unsigned long e = PADDR_INVALID;
1522 
1523 #if !defined(CFG_WITH_ARM_TRUSTED_FW)
1524 	e = nsec_entry;
1525 #endif
1526 
1527 	init_primary(pageable_part, e);
1528 }
1529 
1530 #if defined(CFG_WITH_ARM_TRUSTED_FW)
1531 unsigned long boot_cpu_on_handler(unsigned long a0 __maybe_unused,
1532 				  unsigned long a1 __unused)
1533 {
1534 	init_secondary_helper(PADDR_INVALID);
1535 	return 0;
1536 }
1537 #else
1538 void boot_init_secondary(unsigned long nsec_entry)
1539 {
1540 	init_secondary_helper(nsec_entry);
1541 }
1542 #endif
1543 
1544 #if defined(CFG_BOOT_SECONDARY_REQUEST)
1545 void boot_set_core_ns_entry(size_t core_idx, uintptr_t entry,
1546 			    uintptr_t context_id)
1547 {
1548 	ns_entry_contexts[core_idx].entry_point = entry;
1549 	ns_entry_contexts[core_idx].context_id = context_id;
1550 	dsb_ishst();
1551 }
1552 
1553 int boot_core_release(size_t core_idx, paddr_t entry)
1554 {
1555 	if (!core_idx || core_idx >= CFG_TEE_CORE_NB_CORE)
1556 		return -1;
1557 
1558 	ns_entry_contexts[core_idx].entry_point = entry;
1559 	dmb();
1560 	spin_table[core_idx] = 1;
1561 	dsb();
1562 	sev();
1563 
1564 	return 0;
1565 }
1566 
1567 /*
1568  * spin until secondary boot request, then returns with
1569  * the secondary core entry address.
1570  */
1571 struct ns_entry_context *boot_core_hpen(void)
1572 {
1573 #ifdef CFG_PSCI_ARM32
1574 	return &ns_entry_contexts[get_core_pos()];
1575 #else
1576 	do {
1577 		wfe();
1578 	} while (!spin_table[get_core_pos()]);
1579 	dmb();
1580 	return &ns_entry_contexts[get_core_pos()];
1581 #endif
1582 }
1583 #endif
1584 
1585 #if defined(CFG_CORE_ASLR)
1586 #if defined(CFG_DT)
1587 unsigned long __weak get_aslr_seed(void *fdt)
1588 {
1589 	int rc = 0;
1590 	const uint64_t *seed = NULL;
1591 	int offs = 0;
1592 	int len = 0;
1593 
1594 	if (!fdt) {
1595 		DMSG("No fdt");
1596 		goto err;
1597 	}
1598 
1599 	rc = fdt_check_header(fdt);
1600 	if (rc) {
1601 		DMSG("Bad fdt: %d", rc);
1602 		goto err;
1603 	}
1604 
1605 	offs =  fdt_path_offset(fdt, "/secure-chosen");
1606 	if (offs < 0) {
1607 		DMSG("Cannot find /secure-chosen");
1608 		goto err;
1609 	}
1610 	seed = fdt_getprop(fdt, offs, "kaslr-seed", &len);
1611 	if (!seed || len != sizeof(*seed)) {
1612 		DMSG("Cannot find valid kaslr-seed");
1613 		goto err;
1614 	}
1615 
1616 	return fdt64_to_cpu(*seed);
1617 
1618 err:
1619 	/* Try platform implementation */
1620 	return plat_get_aslr_seed();
1621 }
1622 #else /*!CFG_DT*/
1623 unsigned long __weak get_aslr_seed(void *fdt __unused)
1624 {
1625 	/* Try platform implementation */
1626 	return plat_get_aslr_seed();
1627 }
1628 #endif /*!CFG_DT*/
1629 #endif /*CFG_CORE_ASLR*/
1630 
1631 #if defined(CFG_CORE_SEL2_SPMC) && defined(CFG_CORE_PHYS_RELOCATABLE)
1632 static void *get_fdt_from_boot_info(struct ffa_boot_info_header_1_1 *hdr)
1633 {
1634 	struct ffa_boot_info_1_1 *desc = NULL;
1635 	uint8_t content_fmt = 0;
1636 	uint8_t name_fmt = 0;
1637 	void *fdt = NULL;
1638 	int ret = 0;
1639 
1640 	if (hdr->signature != FFA_BOOT_INFO_SIGNATURE) {
1641 		EMSG("Bad boot info signature %#"PRIx32, hdr->signature);
1642 		panic();
1643 	}
1644 	if (hdr->version != FFA_BOOT_INFO_VERSION) {
1645 		EMSG("Bad boot info version %#"PRIx32, hdr->version);
1646 		panic();
1647 	}
1648 	if (hdr->desc_count != 1) {
1649 		EMSG("Bad boot info descriptor count %#"PRIx32,
1650 		     hdr->desc_count);
1651 		panic();
1652 	}
1653 	desc = (void *)((vaddr_t)hdr + hdr->desc_offset);
1654 	name_fmt = desc->flags & FFA_BOOT_INFO_FLAG_NAME_FORMAT_MASK;
1655 	if (name_fmt == FFA_BOOT_INFO_FLAG_NAME_FORMAT_STRING)
1656 		DMSG("Boot info descriptor name \"%16s\"", desc->name);
1657 	else if (name_fmt == FFA_BOOT_INFO_FLAG_NAME_FORMAT_UUID)
1658 		DMSG("Boot info descriptor UUID %pUl", (void *)desc->name);
1659 	else
1660 		DMSG("Boot info descriptor: unknown name format %"PRIu8,
1661 		     name_fmt);
1662 
1663 	content_fmt = (desc->flags & FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_MASK) >>
1664 		      FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_SHIFT;
1665 	if (content_fmt != FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_ADDR) {
1666 		EMSG("Bad boot info content format %"PRIu8", expected %u (address)",
1667 		     content_fmt, FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_ADDR);
1668 		panic();
1669 	}
1670 
1671 	fdt = (void *)(vaddr_t)desc->contents;
1672 	ret = fdt_check_full(fdt, desc->size);
1673 	if (ret < 0) {
1674 		EMSG("Invalid Device Tree at %p: error %d", fdt, ret);
1675 		panic();
1676 	}
1677 	return fdt;
1678 }
1679 
1680 static void get_sec_mem_from_manifest(void *fdt, paddr_t *base, size_t *size)
1681 {
1682 	int ret = 0;
1683 	uint64_t num = 0;
1684 
1685 	ret = fdt_node_check_compatible(fdt, 0, "arm,ffa-manifest-1.0");
1686 	if (ret < 0) {
1687 		EMSG("Invalid FF-A manifest at %p: error %d", fdt, ret);
1688 		panic();
1689 	}
1690 	ret = dt_getprop_as_number(fdt, 0, "load-address", &num);
1691 	if (ret < 0) {
1692 		EMSG("Can't read \"load-address\" from FF-A manifest at %p: error %d",
1693 		     fdt, ret);
1694 		panic();
1695 	}
1696 	*base = num;
1697 	/* "mem-size" is currently an undocumented extension to the spec. */
1698 	ret = dt_getprop_as_number(fdt, 0, "mem-size", &num);
1699 	if (ret < 0) {
1700 		EMSG("Can't read \"mem-size\" from FF-A manifest at %p: error %d",
1701 		     fdt, ret);
1702 		panic();
1703 	}
1704 	*size = num;
1705 }
1706 
1707 void __weak boot_save_boot_info(void *boot_info)
1708 {
1709 	void *fdt = NULL;
1710 	paddr_t base = 0;
1711 	size_t size = 0;
1712 
1713 	fdt = get_fdt_from_boot_info(boot_info);
1714 	get_sec_mem_from_manifest(fdt, &base, &size);
1715 	core_mmu_set_secure_memory(base, size);
1716 }
1717 #endif /*CFG_CORE_SEL2_SPMC && CFG_CORE_PHYS_RELOCATABLE*/
1718