xref: /optee_os/core/arch/arm/kernel/boot.c (revision 1478437e65c44163f7c96f8a4c5d1532a9312bc3)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2015-2022, Linaro Limited
4  * Copyright (c) 2023, Arm Limited
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <compiler.h>
10 #include <config.h>
11 #include <console.h>
12 #include <crypto/crypto.h>
13 #include <drivers/gic.h>
14 #include <dt-bindings/interrupt-controller/arm-gic.h>
15 #include <initcall.h>
16 #include <inttypes.h>
17 #include <keep.h>
18 #include <kernel/asan.h>
19 #include <kernel/boot.h>
20 #include <kernel/linker.h>
21 #include <kernel/misc.h>
22 #include <kernel/panic.h>
23 #include <kernel/tee_misc.h>
24 #include <kernel/thread.h>
25 #include <kernel/tpm.h>
26 #include <libfdt.h>
27 #include <malloc.h>
28 #include <memtag.h>
29 #include <mm/core_memprot.h>
30 #include <mm/core_mmu.h>
31 #include <mm/fobj.h>
32 #include <mm/tee_mm.h>
33 #include <mm/tee_pager.h>
34 #include <sm/psci.h>
35 #include <stdio.h>
36 #include <trace.h>
37 #include <utee_defines.h>
38 #include <util.h>
39 
40 #include <platform_config.h>
41 
42 #if !defined(CFG_WITH_ARM_TRUSTED_FW)
43 #include <sm/sm.h>
44 #endif
45 
46 #if defined(CFG_WITH_VFP)
47 #include <kernel/vfp.h>
48 #endif
49 
50 /*
51  * In this file we're using unsigned long to represent physical pointers as
52  * they are received in a single register when OP-TEE is initially entered.
53  * This limits 32-bit systems to only use make use of the lower 32 bits
54  * of a physical address for initial parameters.
55  *
56  * 64-bit systems on the other hand can use full 64-bit physical pointers.
57  */
58 #define PADDR_INVALID		ULONG_MAX
59 
60 #if defined(CFG_BOOT_SECONDARY_REQUEST)
61 struct ns_entry_context {
62 	uintptr_t entry_point;
63 	uintptr_t context_id;
64 };
65 struct ns_entry_context ns_entry_contexts[CFG_TEE_CORE_NB_CORE];
66 static uint32_t spin_table[CFG_TEE_CORE_NB_CORE];
67 #endif
68 
69 #ifdef CFG_BOOT_SYNC_CPU
70 /*
71  * Array used when booting, to synchronize cpu.
72  * When 0, the cpu has not started.
73  * When 1, it has started
74  */
75 uint32_t sem_cpu_sync[CFG_TEE_CORE_NB_CORE];
76 DECLARE_KEEP_PAGER(sem_cpu_sync);
77 #endif
78 
79 #ifdef CFG_DT
80 struct dt_descriptor {
81 	void *blob;
82 #ifdef _CFG_USE_DTB_OVERLAY
83 	int frag_id;
84 #endif
85 };
86 
87 static struct dt_descriptor external_dt __nex_bss;
88 #ifdef CFG_CORE_SEL1_SPMC
89 static struct dt_descriptor tos_fw_config_dt __nex_bss;
90 #endif
91 #endif
92 
93 #ifdef CFG_SECONDARY_INIT_CNTFRQ
94 static uint32_t cntfrq;
95 #endif
96 
97 /* May be overridden in plat-$(PLATFORM)/main.c */
98 __weak void plat_primary_init_early(void)
99 {
100 }
101 DECLARE_KEEP_PAGER(plat_primary_init_early);
102 
103 /* May be overridden in plat-$(PLATFORM)/main.c */
104 __weak void main_init_gic(void)
105 {
106 }
107 
108 /* May be overridden in plat-$(PLATFORM)/main.c */
109 __weak void main_secondary_init_gic(void)
110 {
111 }
112 
113 /* May be overridden in plat-$(PLATFORM)/main.c */
114 __weak unsigned long plat_get_aslr_seed(void)
115 {
116 	DMSG("Warning: no ASLR seed");
117 
118 	return 0;
119 }
120 
121 #if defined(_CFG_CORE_STACK_PROTECTOR)
122 /* Generate random stack canary value on boot up */
123 __weak uintptr_t plat_get_random_stack_canary(void)
124 {
125 	uintptr_t canary = 0xbaaaad00;
126 	TEE_Result ret = TEE_ERROR_GENERIC;
127 
128 	/*
129 	 * With virtualization the RNG is not initialized in Nexus core.
130 	 * Need to override with platform specific implementation.
131 	 */
132 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
133 		IMSG("WARNING: Using fixed value for stack canary");
134 		return canary;
135 	}
136 
137 	ret = crypto_rng_read(&canary, sizeof(canary));
138 	if (ret != TEE_SUCCESS)
139 		panic("Failed to generate random stack canary");
140 
141 	/* Leave null byte in canary to prevent string base exploit */
142 	return canary & ~0xffUL;
143 }
144 #endif /*_CFG_CORE_STACK_PROTECTOR*/
145 
146 /*
147  * This function is called as a guard after each smc call which is not
148  * supposed to return.
149  */
150 void __panic_at_smc_return(void)
151 {
152 	panic();
153 }
154 
155 #if defined(CFG_WITH_ARM_TRUSTED_FW)
156 void init_sec_mon(unsigned long nsec_entry __maybe_unused)
157 {
158 	assert(nsec_entry == PADDR_INVALID);
159 	/* Do nothing as we don't have a secure monitor */
160 }
161 #else
162 /* May be overridden in plat-$(PLATFORM)/main.c */
163 __weak void init_sec_mon(unsigned long nsec_entry)
164 {
165 	struct sm_nsec_ctx *nsec_ctx;
166 
167 	assert(nsec_entry != PADDR_INVALID);
168 
169 	/* Initialize secure monitor */
170 	nsec_ctx = sm_get_nsec_ctx();
171 	nsec_ctx->mon_lr = nsec_entry;
172 	nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I;
173 	if (nsec_entry & 1)
174 		nsec_ctx->mon_spsr |= CPSR_T;
175 }
176 #endif
177 
178 #if defined(CFG_WITH_ARM_TRUSTED_FW)
179 static void init_vfp_nsec(void)
180 {
181 }
182 #else
183 static void init_vfp_nsec(void)
184 {
185 	/* Normal world can use CP10 and CP11 (SIMD/VFP) */
186 	write_nsacr(read_nsacr() | NSACR_CP10 | NSACR_CP11);
187 }
188 #endif
189 
190 #if defined(CFG_WITH_VFP)
191 
192 #ifdef ARM32
193 static void init_vfp_sec(void)
194 {
195 	uint32_t cpacr = read_cpacr();
196 
197 	/*
198 	 * Enable Advanced SIMD functionality.
199 	 * Enable use of D16-D31 of the Floating-point Extension register
200 	 * file.
201 	 */
202 	cpacr &= ~(CPACR_ASEDIS | CPACR_D32DIS);
203 	/*
204 	 * Enable usage of CP10 and CP11 (SIMD/VFP) (both kernel and user
205 	 * mode.
206 	 */
207 	cpacr |= CPACR_CP(10, CPACR_CP_ACCESS_FULL);
208 	cpacr |= CPACR_CP(11, CPACR_CP_ACCESS_FULL);
209 	write_cpacr(cpacr);
210 }
211 #endif /* ARM32 */
212 
213 #ifdef ARM64
214 static void init_vfp_sec(void)
215 {
216 	/* Not using VFP until thread_kernel_enable_vfp() */
217 	vfp_disable();
218 }
219 #endif /* ARM64 */
220 
221 #else /* CFG_WITH_VFP */
222 
223 static void init_vfp_sec(void)
224 {
225 	/* Not using VFP */
226 }
227 #endif
228 
229 #ifdef CFG_SECONDARY_INIT_CNTFRQ
230 static void primary_save_cntfrq(void)
231 {
232 	assert(cntfrq == 0);
233 
234 	/*
235 	 * CNTFRQ should be initialized on the primary CPU by a
236 	 * previous boot stage
237 	 */
238 	cntfrq = read_cntfrq();
239 }
240 
241 static void secondary_init_cntfrq(void)
242 {
243 	assert(cntfrq != 0);
244 	write_cntfrq(cntfrq);
245 }
246 #else /* CFG_SECONDARY_INIT_CNTFRQ */
247 static void primary_save_cntfrq(void)
248 {
249 }
250 
251 static void secondary_init_cntfrq(void)
252 {
253 }
254 #endif
255 
256 #ifdef CFG_CORE_SANITIZE_KADDRESS
257 static void init_run_constructors(void)
258 {
259 	const vaddr_t *ctor;
260 
261 	for (ctor = &__ctor_list; ctor < &__ctor_end; ctor++)
262 		((void (*)(void))(*ctor))();
263 }
264 
265 static void init_asan(void)
266 {
267 
268 	/*
269 	 * CFG_ASAN_SHADOW_OFFSET is also supplied as
270 	 * -fasan-shadow-offset=$(CFG_ASAN_SHADOW_OFFSET) to the compiler.
271 	 * Since all the needed values to calculate the value of
272 	 * CFG_ASAN_SHADOW_OFFSET isn't available in to make we need to
273 	 * calculate it in advance and hard code it into the platform
274 	 * conf.mk. Here where we have all the needed values we double
275 	 * check that the compiler is supplied the correct value.
276 	 */
277 
278 #define __ASAN_SHADOW_START \
279 	ROUNDUP(TEE_RAM_VA_START + (TEE_RAM_VA_SIZE * 8) / 9 - 8, 8)
280 	assert(__ASAN_SHADOW_START == (vaddr_t)&__asan_shadow_start);
281 #define __CFG_ASAN_SHADOW_OFFSET \
282 	(__ASAN_SHADOW_START - (TEE_RAM_VA_START / 8))
283 	COMPILE_TIME_ASSERT(CFG_ASAN_SHADOW_OFFSET == __CFG_ASAN_SHADOW_OFFSET);
284 #undef __ASAN_SHADOW_START
285 #undef __CFG_ASAN_SHADOW_OFFSET
286 
287 	/*
288 	 * Assign area covered by the shadow area, everything from start up
289 	 * to the beginning of the shadow area.
290 	 */
291 	asan_set_shadowed((void *)TEE_TEXT_VA_START, &__asan_shadow_start);
292 
293 	/*
294 	 * Add access to areas that aren't opened automatically by a
295 	 * constructor.
296 	 */
297 	asan_tag_access(&__ctor_list, &__ctor_end);
298 	asan_tag_access(__rodata_start, __rodata_end);
299 #ifdef CFG_WITH_PAGER
300 	asan_tag_access(__pageable_start, __pageable_end);
301 #endif /*CFG_WITH_PAGER*/
302 	asan_tag_access(__nozi_start, __nozi_end);
303 	asan_tag_access(__exidx_start, __exidx_end);
304 	asan_tag_access(__extab_start, __extab_end);
305 
306 	init_run_constructors();
307 
308 	/* Everything is tagged correctly, let's start address sanitizing. */
309 	asan_start();
310 }
311 #else /*CFG_CORE_SANITIZE_KADDRESS*/
312 static void init_asan(void)
313 {
314 }
315 #endif /*CFG_CORE_SANITIZE_KADDRESS*/
316 
317 #if defined(CFG_MEMTAG)
318 /* Called from entry_a64.S only when MEMTAG is configured */
319 void boot_init_memtag(void)
320 {
321 	memtag_init_ops(feat_mte_implemented());
322 	memtag_set_tags((void *)TEE_RAM_START, TEE_RAM_PH_SIZE, 0);
323 }
324 #endif
325 
326 #ifdef CFG_WITH_PAGER
327 
328 #ifdef CFG_CORE_SANITIZE_KADDRESS
329 static void carve_out_asan_mem(tee_mm_pool_t *pool)
330 {
331 	const size_t s = pool->hi - pool->lo;
332 	tee_mm_entry_t *mm;
333 	paddr_t apa = ASAN_MAP_PA;
334 	size_t asz = ASAN_MAP_SZ;
335 
336 	if (core_is_buffer_outside(apa, asz, pool->lo, s))
337 		return;
338 
339 	/* Reserve the shadow area */
340 	if (!core_is_buffer_inside(apa, asz, pool->lo, s)) {
341 		if (apa < pool->lo) {
342 			/*
343 			 * ASAN buffer is overlapping with the beginning of
344 			 * the pool.
345 			 */
346 			asz -= pool->lo - apa;
347 			apa = pool->lo;
348 		} else {
349 			/*
350 			 * ASAN buffer is overlapping with the end of the
351 			 * pool.
352 			 */
353 			asz = pool->hi - apa;
354 		}
355 	}
356 	mm = tee_mm_alloc2(pool, apa, asz);
357 	assert(mm);
358 }
359 #else
360 static void carve_out_asan_mem(tee_mm_pool_t *pool __unused)
361 {
362 }
363 #endif
364 
365 static void print_pager_pool_size(void)
366 {
367 	struct tee_pager_stats __maybe_unused stats;
368 
369 	tee_pager_get_stats(&stats);
370 	IMSG("Pager pool size: %zukB",
371 		stats.npages_all * SMALL_PAGE_SIZE / 1024);
372 }
373 
374 static void init_vcore(tee_mm_pool_t *mm_vcore)
375 {
376 	const vaddr_t begin = VCORE_START_VA;
377 	size_t size = TEE_RAM_VA_SIZE;
378 
379 #ifdef CFG_CORE_SANITIZE_KADDRESS
380 	/* Carve out asan memory, flat maped after core memory */
381 	if (begin + size > ASAN_SHADOW_PA)
382 		size = ASAN_MAP_PA - begin;
383 #endif
384 
385 	if (!tee_mm_init(mm_vcore, begin, size, SMALL_PAGE_SHIFT,
386 			 TEE_MM_POOL_NO_FLAGS))
387 		panic("tee_mm_vcore init failed");
388 }
389 
390 /*
391  * With CFG_CORE_ASLR=y the init part is relocated very early during boot.
392  * The init part is also paged just as the rest of the normal paged code, with
393  * the difference that it's preloaded during boot. When the backing store
394  * is configured the entire paged binary is copied in place and then also
395  * the init part. Since the init part has been relocated (references to
396  * addresses updated to compensate for the new load address) this has to be
397  * undone for the hashes of those pages to match with the original binary.
398  *
399  * If CFG_CORE_ASLR=n, nothing needs to be done as the code/ro pages are
400  * unchanged.
401  */
402 static void undo_init_relocation(uint8_t *paged_store __maybe_unused)
403 {
404 #ifdef CFG_CORE_ASLR
405 	unsigned long *ptr = NULL;
406 	const uint32_t *reloc = NULL;
407 	const uint32_t *reloc_end = NULL;
408 	unsigned long offs = boot_mmu_config.load_offset;
409 	const struct boot_embdata *embdata = (const void *)__init_end;
410 	vaddr_t addr_end = (vaddr_t)__init_end - offs - TEE_RAM_START;
411 	vaddr_t addr_start = (vaddr_t)__init_start - offs - TEE_RAM_START;
412 
413 	reloc = (const void *)((vaddr_t)embdata + embdata->reloc_offset);
414 	reloc_end = reloc + embdata->reloc_len / sizeof(*reloc);
415 
416 	for (; reloc < reloc_end; reloc++) {
417 		if (*reloc < addr_start)
418 			continue;
419 		if (*reloc >= addr_end)
420 			break;
421 		ptr = (void *)(paged_store + *reloc - addr_start);
422 		*ptr -= offs;
423 	}
424 #endif
425 }
426 
427 static struct fobj *ro_paged_alloc(tee_mm_entry_t *mm, void *hashes,
428 				   void *store)
429 {
430 	const unsigned int num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE;
431 #ifdef CFG_CORE_ASLR
432 	unsigned int reloc_offs = (vaddr_t)__pageable_start - VCORE_START_VA;
433 	const struct boot_embdata *embdata = (const void *)__init_end;
434 	const void *reloc = __init_end + embdata->reloc_offset;
435 
436 	return fobj_ro_reloc_paged_alloc(num_pages, hashes, reloc_offs,
437 					 reloc, embdata->reloc_len, store);
438 #else
439 	return fobj_ro_paged_alloc(num_pages, hashes, store);
440 #endif
441 }
442 
443 static void init_runtime(unsigned long pageable_part)
444 {
445 	size_t n;
446 	size_t init_size = (size_t)(__init_end - __init_start);
447 	size_t pageable_start = (size_t)__pageable_start;
448 	size_t pageable_end = (size_t)__pageable_end;
449 	size_t pageable_size = pageable_end - pageable_start;
450 	vaddr_t tzsram_end = TZSRAM_BASE + TZSRAM_SIZE - TEE_LOAD_ADDR +
451 			     VCORE_START_VA;
452 	size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) *
453 			   TEE_SHA256_HASH_SIZE;
454 	const struct boot_embdata *embdata = (const void *)__init_end;
455 	const void *tmp_hashes = NULL;
456 	tee_mm_entry_t *mm = NULL;
457 	struct fobj *fobj = NULL;
458 	uint8_t *paged_store = NULL;
459 	uint8_t *hashes = NULL;
460 
461 	assert(pageable_size % SMALL_PAGE_SIZE == 0);
462 	assert(embdata->total_len >= embdata->hashes_offset +
463 				     embdata->hashes_len);
464 	assert(hash_size == embdata->hashes_len);
465 
466 	tmp_hashes = __init_end + embdata->hashes_offset;
467 
468 	init_asan();
469 
470 	/* Add heap2 first as heap1 may be too small as initial bget pool */
471 	malloc_add_pool(__heap2_start, __heap2_end - __heap2_start);
472 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
473 
474 	/*
475 	 * This needs to be initialized early to support address lookup
476 	 * in MEM_AREA_TEE_RAM
477 	 */
478 	tee_pager_early_init();
479 
480 	hashes = malloc(hash_size);
481 	IMSG_RAW("\n");
482 	IMSG("Pager is enabled. Hashes: %zu bytes", hash_size);
483 	assert(hashes);
484 	asan_memcpy_unchecked(hashes, tmp_hashes, hash_size);
485 
486 	/*
487 	 * Need tee_mm_sec_ddr initialized to be able to allocate secure
488 	 * DDR below.
489 	 */
490 	core_mmu_init_ta_ram();
491 
492 	carve_out_asan_mem(&tee_mm_sec_ddr);
493 
494 	mm = tee_mm_alloc(&tee_mm_sec_ddr, pageable_size);
495 	assert(mm);
496 	paged_store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM,
497 				   pageable_size);
498 	/*
499 	 * Load pageable part in the dedicated allocated area:
500 	 * - Move pageable non-init part into pageable area. Note bootloader
501 	 *   may have loaded it anywhere in TA RAM hence use memmove().
502 	 * - Copy pageable init part from current location into pageable area.
503 	 */
504 	memmove(paged_store + init_size,
505 		phys_to_virt(pageable_part,
506 			     core_mmu_get_type_by_pa(pageable_part),
507 			     __pageable_part_end - __pageable_part_start),
508 		__pageable_part_end - __pageable_part_start);
509 	asan_memcpy_unchecked(paged_store, __init_start, init_size);
510 	/*
511 	 * Undo eventual relocation for the init part so the hash checks
512 	 * can pass.
513 	 */
514 	undo_init_relocation(paged_store);
515 
516 	/* Check that hashes of what's in pageable area is OK */
517 	DMSG("Checking hashes of pageable area");
518 	for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) {
519 		const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE;
520 		const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE;
521 		TEE_Result res;
522 
523 		DMSG("hash pg_idx %zu hash %p page %p", n, hash, page);
524 		res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE);
525 		if (res != TEE_SUCCESS) {
526 			EMSG("Hash failed for page %zu at %p: res 0x%x",
527 			     n, (void *)page, res);
528 			panic();
529 		}
530 	}
531 
532 	/*
533 	 * Assert prepaged init sections are page aligned so that nothing
534 	 * trails uninited at the end of the premapped init area.
535 	 */
536 	assert(!(init_size & SMALL_PAGE_MASK));
537 
538 	/*
539 	 * Initialize the virtual memory pool used for main_mmu_l2_ttb which
540 	 * is supplied to tee_pager_init() below.
541 	 */
542 	init_vcore(&tee_mm_vcore);
543 
544 	/*
545 	 * Assign alias area for pager end of the small page block the rest
546 	 * of the binary is loaded into. We're taking more than needed, but
547 	 * we're guaranteed to not need more than the physical amount of
548 	 * TZSRAM.
549 	 */
550 	mm = tee_mm_alloc2(&tee_mm_vcore,
551 			   (vaddr_t)tee_mm_vcore.lo +
552 			   tee_mm_vcore.size - TZSRAM_SIZE,
553 			   TZSRAM_SIZE);
554 	assert(mm);
555 	tee_pager_set_alias_area(mm);
556 
557 	/*
558 	 * Claim virtual memory which isn't paged.
559 	 * Linear memory (flat map core memory) ends there.
560 	 */
561 	mm = tee_mm_alloc2(&tee_mm_vcore, VCORE_UNPG_RX_PA,
562 			   (vaddr_t)(__pageable_start - VCORE_UNPG_RX_PA));
563 	assert(mm);
564 
565 	/*
566 	 * Allocate virtual memory for the pageable area and let the pager
567 	 * take charge of all the pages already assigned to that memory.
568 	 */
569 	mm = tee_mm_alloc2(&tee_mm_vcore, (vaddr_t)__pageable_start,
570 			   pageable_size);
571 	assert(mm);
572 	fobj = ro_paged_alloc(mm, hashes, paged_store);
573 	assert(fobj);
574 	tee_pager_add_core_region(tee_mm_get_smem(mm), PAGED_REGION_TYPE_RO,
575 				  fobj);
576 	fobj_put(fobj);
577 
578 	tee_pager_add_pages(pageable_start, init_size / SMALL_PAGE_SIZE, false);
579 	tee_pager_add_pages(pageable_start + init_size,
580 			    (pageable_size - init_size) / SMALL_PAGE_SIZE,
581 			    true);
582 	if (pageable_end < tzsram_end)
583 		tee_pager_add_pages(pageable_end, (tzsram_end - pageable_end) /
584 						   SMALL_PAGE_SIZE, true);
585 
586 	/*
587 	 * There may be physical pages in TZSRAM before the core load address.
588 	 * These pages can be added to the physical pages pool of the pager.
589 	 * This setup may happen when a the secure bootloader runs in TZRAM
590 	 * and its memory can be reused by OP-TEE once boot stages complete.
591 	 */
592 	tee_pager_add_pages(tee_mm_vcore.lo,
593 			(VCORE_UNPG_RX_PA - tee_mm_vcore.lo) / SMALL_PAGE_SIZE,
594 			true);
595 
596 	print_pager_pool_size();
597 }
598 #else
599 
600 static void init_runtime(unsigned long pageable_part __unused)
601 {
602 	init_asan();
603 
604 	/*
605 	 * By default whole OP-TEE uses malloc, so we need to initialize
606 	 * it early. But, when virtualization is enabled, malloc is used
607 	 * only by TEE runtime, so malloc should be initialized later, for
608 	 * every virtual partition separately. Core code uses nex_malloc
609 	 * instead.
610 	 */
611 #ifdef CFG_NS_VIRTUALIZATION
612 	nex_malloc_add_pool(__nex_heap_start, __nex_heap_end -
613 					      __nex_heap_start);
614 #else
615 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
616 #endif
617 
618 	IMSG_RAW("\n");
619 }
620 #endif
621 
622 void *get_dt(void)
623 {
624 	void *fdt = get_embedded_dt();
625 
626 	if (!fdt)
627 		fdt = get_external_dt();
628 
629 	return fdt;
630 }
631 
632 void *get_secure_dt(void)
633 {
634 	void *fdt = get_embedded_dt();
635 
636 	if (!fdt && IS_ENABLED(CFG_MAP_EXT_DT_SECURE))
637 		fdt = get_external_dt();
638 
639 	return fdt;
640 }
641 
642 #if defined(CFG_EMBED_DTB)
643 void *get_embedded_dt(void)
644 {
645 	static bool checked;
646 
647 	assert(cpu_mmu_enabled());
648 
649 	if (!checked) {
650 		IMSG("Embedded DTB found");
651 
652 		if (fdt_check_header(embedded_secure_dtb))
653 			panic("Invalid embedded DTB");
654 
655 		checked = true;
656 	}
657 
658 	return embedded_secure_dtb;
659 }
660 #else
661 void *get_embedded_dt(void)
662 {
663 	return NULL;
664 }
665 #endif /*CFG_EMBED_DTB*/
666 
667 #if defined(CFG_DT)
668 void *get_external_dt(void)
669 {
670 	if (!IS_ENABLED(CFG_EXTERNAL_DT))
671 		return NULL;
672 
673 	assert(cpu_mmu_enabled());
674 	return external_dt.blob;
675 }
676 
677 static TEE_Result release_external_dt(void)
678 {
679 	int ret = 0;
680 
681 	if (!IS_ENABLED(CFG_EXTERNAL_DT))
682 		return TEE_SUCCESS;
683 
684 	if (!external_dt.blob)
685 		return TEE_SUCCESS;
686 
687 	ret = fdt_pack(external_dt.blob);
688 	if (ret < 0) {
689 		EMSG("Failed to pack Device Tree at 0x%" PRIxPA ": error %d",
690 		     virt_to_phys(external_dt.blob), ret);
691 		panic();
692 	}
693 
694 	if (core_mmu_remove_mapping(MEM_AREA_EXT_DT, external_dt.blob,
695 				    CFG_DTB_MAX_SIZE))
696 		panic("Failed to remove temporary Device Tree mapping");
697 
698 	/* External DTB no more reached, reset pointer to invalid */
699 	external_dt.blob = NULL;
700 
701 	return TEE_SUCCESS;
702 }
703 boot_final(release_external_dt);
704 
705 #ifdef _CFG_USE_DTB_OVERLAY
706 static int add_dt_overlay_fragment(struct dt_descriptor *dt, int ioffs)
707 {
708 	char frag[32];
709 	int offs;
710 	int ret;
711 
712 	snprintf(frag, sizeof(frag), "fragment@%d", dt->frag_id);
713 	offs = fdt_add_subnode(dt->blob, ioffs, frag);
714 	if (offs < 0)
715 		return offs;
716 
717 	dt->frag_id += 1;
718 
719 	ret = fdt_setprop_string(dt->blob, offs, "target-path", "/");
720 	if (ret < 0)
721 		return -1;
722 
723 	return fdt_add_subnode(dt->blob, offs, "__overlay__");
724 }
725 
726 static int init_dt_overlay(struct dt_descriptor *dt, int __maybe_unused dt_size)
727 {
728 	int fragment;
729 
730 	if (IS_ENABLED(CFG_EXTERNAL_DTB_OVERLAY)) {
731 		if (!fdt_check_header(dt->blob)) {
732 			fdt_for_each_subnode(fragment, dt->blob, 0)
733 				dt->frag_id += 1;
734 			return 0;
735 		}
736 	}
737 
738 	return fdt_create_empty_tree(dt->blob, dt_size);
739 }
740 #else
741 static int add_dt_overlay_fragment(struct dt_descriptor *dt __unused, int offs)
742 {
743 	return offs;
744 }
745 
746 static int init_dt_overlay(struct dt_descriptor *dt __unused,
747 			   int dt_size __unused)
748 {
749 	return 0;
750 }
751 #endif /* _CFG_USE_DTB_OVERLAY */
752 
753 static int add_dt_path_subnode(struct dt_descriptor *dt, const char *path,
754 			       const char *subnode)
755 {
756 	int offs;
757 
758 	offs = fdt_path_offset(dt->blob, path);
759 	if (offs < 0)
760 		return -1;
761 	offs = add_dt_overlay_fragment(dt, offs);
762 	if (offs < 0)
763 		return -1;
764 	offs = fdt_add_subnode(dt->blob, offs, subnode);
765 	if (offs < 0)
766 		return -1;
767 	return offs;
768 }
769 
770 static int add_optee_dt_node(struct dt_descriptor *dt)
771 {
772 	int offs;
773 	int ret;
774 
775 	if (fdt_path_offset(dt->blob, "/firmware/optee") >= 0) {
776 		DMSG("OP-TEE Device Tree node already exists!");
777 		return 0;
778 	}
779 
780 	offs = fdt_path_offset(dt->blob, "/firmware");
781 	if (offs < 0) {
782 		offs = add_dt_path_subnode(dt, "/", "firmware");
783 		if (offs < 0)
784 			return -1;
785 	}
786 
787 	offs = fdt_add_subnode(dt->blob, offs, "optee");
788 	if (offs < 0)
789 		return -1;
790 
791 	ret = fdt_setprop_string(dt->blob, offs, "compatible",
792 				 "linaro,optee-tz");
793 	if (ret < 0)
794 		return -1;
795 	ret = fdt_setprop_string(dt->blob, offs, "method", "smc");
796 	if (ret < 0)
797 		return -1;
798 
799 	if (CFG_CORE_ASYNC_NOTIF_GIC_INTID) {
800 		/*
801 		 * The format of the interrupt property is defined by the
802 		 * binding of the interrupt domain root. In this case it's
803 		 * one Arm GIC v1, v2 or v3 so we must be compatible with
804 		 * these.
805 		 *
806 		 * An SPI type of interrupt is indicated with a 0 in the
807 		 * first cell. A PPI type is indicated with value 1.
808 		 *
809 		 * The interrupt number goes in the second cell where
810 		 * SPIs ranges from 0 to 987 and PPI ranges from 0 to 15.
811 		 *
812 		 * Flags are passed in the third cells.
813 		 */
814 		uint32_t itr_trigger = 0;
815 		uint32_t itr_type = 0;
816 		uint32_t itr_id = 0;
817 		uint32_t val[3] = { };
818 
819 		/* PPI are visible only in current CPU cluster */
820 		static_assert(!CFG_CORE_ASYNC_NOTIF_GIC_INTID ||
821 			      (CFG_CORE_ASYNC_NOTIF_GIC_INTID >=
822 			       GIC_SPI_BASE) ||
823 			      ((CFG_TEE_CORE_NB_CORE <= 8) &&
824 			       (CFG_CORE_ASYNC_NOTIF_GIC_INTID >=
825 				GIC_PPI_BASE)));
826 
827 		if (CFG_CORE_ASYNC_NOTIF_GIC_INTID >= GIC_SPI_BASE) {
828 			itr_type = GIC_SPI;
829 			itr_id = CFG_CORE_ASYNC_NOTIF_GIC_INTID - GIC_SPI_BASE;
830 			itr_trigger = IRQ_TYPE_EDGE_RISING;
831 		} else {
832 			itr_type = GIC_PPI;
833 			itr_id = CFG_CORE_ASYNC_NOTIF_GIC_INTID - GIC_PPI_BASE;
834 			itr_trigger = IRQ_TYPE_EDGE_RISING |
835 				      GIC_CPU_MASK_SIMPLE(CFG_TEE_CORE_NB_CORE);
836 		}
837 
838 		val[0] = TEE_U32_TO_BIG_ENDIAN(itr_type);
839 		val[1] = TEE_U32_TO_BIG_ENDIAN(itr_id);
840 		val[2] = TEE_U32_TO_BIG_ENDIAN(itr_trigger);
841 
842 		ret = fdt_setprop(dt->blob, offs, "interrupts", val,
843 				  sizeof(val));
844 		if (ret < 0)
845 			return -1;
846 	}
847 	return 0;
848 }
849 
850 #ifdef CFG_PSCI_ARM32
851 static int append_psci_compatible(void *fdt, int offs, const char *str)
852 {
853 	return fdt_appendprop(fdt, offs, "compatible", str, strlen(str) + 1);
854 }
855 
856 static int dt_add_psci_node(struct dt_descriptor *dt)
857 {
858 	int offs;
859 
860 	if (fdt_path_offset(dt->blob, "/psci") >= 0) {
861 		DMSG("PSCI Device Tree node already exists!");
862 		return 0;
863 	}
864 
865 	offs = add_dt_path_subnode(dt, "/", "psci");
866 	if (offs < 0)
867 		return -1;
868 	if (append_psci_compatible(dt->blob, offs, "arm,psci-1.0"))
869 		return -1;
870 	if (append_psci_compatible(dt->blob, offs, "arm,psci-0.2"))
871 		return -1;
872 	if (append_psci_compatible(dt->blob, offs, "arm,psci"))
873 		return -1;
874 	if (fdt_setprop_string(dt->blob, offs, "method", "smc"))
875 		return -1;
876 	if (fdt_setprop_u32(dt->blob, offs, "cpu_suspend", PSCI_CPU_SUSPEND))
877 		return -1;
878 	if (fdt_setprop_u32(dt->blob, offs, "cpu_off", PSCI_CPU_OFF))
879 		return -1;
880 	if (fdt_setprop_u32(dt->blob, offs, "cpu_on", PSCI_CPU_ON))
881 		return -1;
882 	if (fdt_setprop_u32(dt->blob, offs, "sys_poweroff", PSCI_SYSTEM_OFF))
883 		return -1;
884 	if (fdt_setprop_u32(dt->blob, offs, "sys_reset", PSCI_SYSTEM_RESET))
885 		return -1;
886 	return 0;
887 }
888 
889 static int check_node_compat_prefix(struct dt_descriptor *dt, int offs,
890 				    const char *prefix)
891 {
892 	const size_t prefix_len = strlen(prefix);
893 	size_t l;
894 	int plen;
895 	const char *prop;
896 
897 	prop = fdt_getprop(dt->blob, offs, "compatible", &plen);
898 	if (!prop)
899 		return -1;
900 
901 	while (plen > 0) {
902 		if (memcmp(prop, prefix, prefix_len) == 0)
903 			return 0; /* match */
904 
905 		l = strlen(prop) + 1;
906 		prop += l;
907 		plen -= l;
908 	}
909 
910 	return -1;
911 }
912 
913 static int dt_add_psci_cpu_enable_methods(struct dt_descriptor *dt)
914 {
915 	int offs = 0;
916 
917 	while (1) {
918 		offs = fdt_next_node(dt->blob, offs, NULL);
919 		if (offs < 0)
920 			break;
921 		if (fdt_getprop(dt->blob, offs, "enable-method", NULL))
922 			continue; /* already set */
923 		if (check_node_compat_prefix(dt, offs, "arm,cortex-a"))
924 			continue; /* no compatible */
925 		if (fdt_setprop_string(dt->blob, offs, "enable-method", "psci"))
926 			return -1;
927 		/* Need to restart scanning as offsets may have changed */
928 		offs = 0;
929 	}
930 	return 0;
931 }
932 
933 static int config_psci(struct dt_descriptor *dt)
934 {
935 	if (dt_add_psci_node(dt))
936 		return -1;
937 	return dt_add_psci_cpu_enable_methods(dt);
938 }
939 #else
940 static int config_psci(struct dt_descriptor *dt __unused)
941 {
942 	return 0;
943 }
944 #endif /*CFG_PSCI_ARM32*/
945 
946 static void set_dt_val(void *data, uint32_t cell_size, uint64_t val)
947 {
948 	if (cell_size == 1) {
949 		fdt32_t v = cpu_to_fdt32((uint32_t)val);
950 
951 		memcpy(data, &v, sizeof(v));
952 	} else {
953 		fdt64_t v = cpu_to_fdt64(val);
954 
955 		memcpy(data, &v, sizeof(v));
956 	}
957 }
958 
959 static int add_res_mem_dt_node(struct dt_descriptor *dt, const char *name,
960 			       paddr_t pa, size_t size)
961 {
962 	int offs = 0;
963 	int ret = 0;
964 	int addr_size = -1;
965 	int len_size = -1;
966 	bool found = true;
967 	char subnode_name[80] = { 0 };
968 
969 	offs = fdt_path_offset(dt->blob, "/reserved-memory");
970 
971 	if (offs < 0) {
972 		found = false;
973 		offs = 0;
974 	}
975 
976 	if (IS_ENABLED2(_CFG_USE_DTB_OVERLAY)) {
977 		len_size = sizeof(paddr_t) / sizeof(uint32_t);
978 		addr_size = sizeof(paddr_t) / sizeof(uint32_t);
979 	} else {
980 		len_size = fdt_size_cells(dt->blob, offs);
981 		if (len_size < 0)
982 			return -1;
983 		addr_size = fdt_address_cells(dt->blob, offs);
984 		if (addr_size < 0)
985 			return -1;
986 	}
987 
988 	if (!found) {
989 		offs = add_dt_path_subnode(dt, "/", "reserved-memory");
990 		if (offs < 0)
991 			return -1;
992 		ret = fdt_setprop_cell(dt->blob, offs, "#address-cells",
993 				       addr_size);
994 		if (ret < 0)
995 			return -1;
996 		ret = fdt_setprop_cell(dt->blob, offs, "#size-cells", len_size);
997 		if (ret < 0)
998 			return -1;
999 		ret = fdt_setprop(dt->blob, offs, "ranges", NULL, 0);
1000 		if (ret < 0)
1001 			return -1;
1002 	}
1003 
1004 	ret = snprintf(subnode_name, sizeof(subnode_name),
1005 		       "%s@%" PRIxPA, name, pa);
1006 	if (ret < 0 || ret >= (int)sizeof(subnode_name))
1007 		DMSG("truncated node \"%s@%" PRIxPA"\"", name, pa);
1008 	offs = fdt_add_subnode(dt->blob, offs, subnode_name);
1009 	if (offs >= 0) {
1010 		uint32_t data[FDT_MAX_NCELLS * 2];
1011 
1012 		set_dt_val(data, addr_size, pa);
1013 		set_dt_val(data + addr_size, len_size, size);
1014 		ret = fdt_setprop(dt->blob, offs, "reg", data,
1015 				  sizeof(uint32_t) * (addr_size + len_size));
1016 		if (ret < 0)
1017 			return -1;
1018 		ret = fdt_setprop(dt->blob, offs, "no-map", NULL, 0);
1019 		if (ret < 0)
1020 			return -1;
1021 	} else {
1022 		return -1;
1023 	}
1024 	return 0;
1025 }
1026 
1027 #ifdef CFG_CORE_DYN_SHM
1028 static uint64_t get_dt_val_and_advance(const void *data, size_t *offs,
1029 				       uint32_t cell_size)
1030 {
1031 	uint64_t rv = 0;
1032 
1033 	if (cell_size == 1) {
1034 		uint32_t v;
1035 
1036 		memcpy(&v, (const uint8_t *)data + *offs, sizeof(v));
1037 		*offs += sizeof(v);
1038 		rv = fdt32_to_cpu(v);
1039 	} else {
1040 		uint64_t v;
1041 
1042 		memcpy(&v, (const uint8_t *)data + *offs, sizeof(v));
1043 		*offs += sizeof(v);
1044 		rv = fdt64_to_cpu(v);
1045 	}
1046 
1047 	return rv;
1048 }
1049 
1050 /*
1051  * Find all non-secure memory from DT. Memory marked inaccessible by Secure
1052  * World is ignored since it could not be mapped to be used as dynamic shared
1053  * memory.
1054  */
1055 static int get_nsec_memory_helper(void *fdt, struct core_mmu_phys_mem *mem)
1056 {
1057 	const uint8_t *prop = NULL;
1058 	uint64_t a = 0;
1059 	uint64_t l = 0;
1060 	size_t prop_offs = 0;
1061 	size_t prop_len = 0;
1062 	int elems_total = 0;
1063 	int addr_size = 0;
1064 	int len_size = 0;
1065 	int offs = 0;
1066 	size_t n = 0;
1067 	int len = 0;
1068 
1069 	addr_size = fdt_address_cells(fdt, 0);
1070 	if (addr_size < 0)
1071 		return 0;
1072 
1073 	len_size = fdt_size_cells(fdt, 0);
1074 	if (len_size < 0)
1075 		return 0;
1076 
1077 	while (true) {
1078 		offs = fdt_node_offset_by_prop_value(fdt, offs, "device_type",
1079 						     "memory",
1080 						     sizeof("memory"));
1081 		if (offs < 0)
1082 			break;
1083 
1084 		if (_fdt_get_status(fdt, offs) != (DT_STATUS_OK_NSEC |
1085 						   DT_STATUS_OK_SEC))
1086 			continue;
1087 
1088 		prop = fdt_getprop(fdt, offs, "reg", &len);
1089 		if (!prop)
1090 			continue;
1091 
1092 		prop_len = len;
1093 		for (n = 0, prop_offs = 0; prop_offs < prop_len; n++) {
1094 			a = get_dt_val_and_advance(prop, &prop_offs, addr_size);
1095 			if (prop_offs >= prop_len) {
1096 				n--;
1097 				break;
1098 			}
1099 
1100 			l = get_dt_val_and_advance(prop, &prop_offs, len_size);
1101 			if (mem) {
1102 				mem->type = MEM_AREA_DDR_OVERALL;
1103 				mem->addr = a;
1104 				mem->size = l;
1105 				mem++;
1106 			}
1107 		}
1108 
1109 		elems_total += n;
1110 	}
1111 
1112 	return elems_total;
1113 }
1114 
1115 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt, size_t *nelems)
1116 {
1117 	struct core_mmu_phys_mem *mem = NULL;
1118 	int elems_total = 0;
1119 
1120 	elems_total = get_nsec_memory_helper(fdt, NULL);
1121 	if (elems_total <= 0)
1122 		return NULL;
1123 
1124 	mem = nex_calloc(elems_total, sizeof(*mem));
1125 	if (!mem)
1126 		panic();
1127 
1128 	elems_total = get_nsec_memory_helper(fdt, mem);
1129 	assert(elems_total > 0);
1130 
1131 	*nelems = elems_total;
1132 
1133 	return mem;
1134 }
1135 #endif /*CFG_CORE_DYN_SHM*/
1136 
1137 #ifdef CFG_CORE_RESERVED_SHM
1138 static int mark_static_shm_as_reserved(struct dt_descriptor *dt)
1139 {
1140 	vaddr_t shm_start;
1141 	vaddr_t shm_end;
1142 
1143 	core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &shm_start, &shm_end);
1144 	if (shm_start != shm_end)
1145 		return add_res_mem_dt_node(dt, "optee_shm",
1146 					   virt_to_phys((void *)shm_start),
1147 					   shm_end - shm_start);
1148 
1149 	DMSG("No SHM configured");
1150 	return -1;
1151 }
1152 #endif /*CFG_CORE_RESERVED_SHM*/
1153 
1154 static void init_external_dt(unsigned long phys_dt)
1155 {
1156 	struct dt_descriptor *dt = &external_dt;
1157 	void *fdt;
1158 	int ret;
1159 
1160 	if (!IS_ENABLED(CFG_EXTERNAL_DT))
1161 		return;
1162 
1163 	if (!phys_dt) {
1164 		/*
1165 		 * No need to panic as we're not using the DT in OP-TEE
1166 		 * yet, we're only adding some nodes for normal world use.
1167 		 * This makes the switch to using DT easier as we can boot
1168 		 * a newer OP-TEE with older boot loaders. Once we start to
1169 		 * initialize devices based on DT we'll likely panic
1170 		 * instead of returning here.
1171 		 */
1172 		IMSG("No non-secure external DT");
1173 		return;
1174 	}
1175 
1176 	fdt = core_mmu_add_mapping(MEM_AREA_EXT_DT, phys_dt, CFG_DTB_MAX_SIZE);
1177 	if (!fdt)
1178 		panic("Failed to map external DTB");
1179 
1180 	dt->blob = fdt;
1181 
1182 	ret = init_dt_overlay(dt, CFG_DTB_MAX_SIZE);
1183 	if (ret < 0) {
1184 		EMSG("Device Tree Overlay init fail @ %#lx: error %d", phys_dt,
1185 		     ret);
1186 		panic();
1187 	}
1188 
1189 	ret = fdt_open_into(fdt, fdt, CFG_DTB_MAX_SIZE);
1190 	if (ret < 0) {
1191 		EMSG("Invalid Device Tree at %#lx: error %d", phys_dt, ret);
1192 		panic();
1193 	}
1194 
1195 	IMSG("Non-secure external DT found");
1196 }
1197 
1198 static int mark_tzdram_as_reserved(struct dt_descriptor *dt)
1199 {
1200 	return add_res_mem_dt_node(dt, "optee_core", CFG_TZDRAM_START,
1201 				   CFG_TZDRAM_SIZE);
1202 }
1203 
1204 static void update_external_dt(void)
1205 {
1206 	struct dt_descriptor *dt = &external_dt;
1207 
1208 	if (!IS_ENABLED(CFG_EXTERNAL_DT))
1209 		return;
1210 
1211 	if (!dt->blob)
1212 		return;
1213 
1214 	if (!IS_ENABLED(CFG_CORE_FFA) && add_optee_dt_node(dt))
1215 		panic("Failed to add OP-TEE Device Tree node");
1216 
1217 	if (config_psci(dt))
1218 		panic("Failed to config PSCI");
1219 
1220 #ifdef CFG_CORE_RESERVED_SHM
1221 	if (mark_static_shm_as_reserved(dt))
1222 		panic("Failed to config non-secure memory");
1223 #endif
1224 
1225 	if (mark_tzdram_as_reserved(dt))
1226 		panic("Failed to config secure memory");
1227 }
1228 #else /*CFG_DT*/
1229 void *get_external_dt(void)
1230 {
1231 	return NULL;
1232 }
1233 
1234 static void init_external_dt(unsigned long phys_dt __unused)
1235 {
1236 }
1237 
1238 static void update_external_dt(void)
1239 {
1240 }
1241 
1242 #ifdef CFG_CORE_DYN_SHM
1243 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt __unused,
1244 						 size_t *nelems __unused)
1245 {
1246 	return NULL;
1247 }
1248 #endif /*CFG_CORE_DYN_SHM*/
1249 #endif /*!CFG_DT*/
1250 
1251 #if defined(CFG_CORE_SEL1_SPMC) && defined(CFG_DT)
1252 void *get_tos_fw_config_dt(void)
1253 {
1254 	if (!IS_ENABLED(CFG_MAP_EXT_DT_SECURE))
1255 		return NULL;
1256 
1257 	assert(cpu_mmu_enabled());
1258 
1259 	return tos_fw_config_dt.blob;
1260 }
1261 
1262 static void init_tos_fw_config_dt(unsigned long pa)
1263 {
1264 	struct dt_descriptor *dt = &tos_fw_config_dt;
1265 	void *fdt = NULL;
1266 	int ret = 0;
1267 
1268 	if (!IS_ENABLED(CFG_MAP_EXT_DT_SECURE))
1269 		return;
1270 
1271 	if (!pa)
1272 		panic("No TOS_FW_CONFIG DT found");
1273 
1274 	fdt = core_mmu_add_mapping(MEM_AREA_EXT_DT, pa, CFG_DTB_MAX_SIZE);
1275 	if (!fdt)
1276 		panic("Failed to map TOS_FW_CONFIG DT");
1277 
1278 	dt->blob = fdt;
1279 
1280 	ret = fdt_open_into(fdt, fdt, CFG_DTB_MAX_SIZE);
1281 	if (ret < 0) {
1282 		EMSG("Invalid Device Tree at %#lx: error %d", pa, ret);
1283 		panic();
1284 	}
1285 
1286 	IMSG("TOS_FW_CONFIG DT found");
1287 }
1288 #else
1289 void *get_tos_fw_config_dt(void)
1290 {
1291 	return NULL;
1292 }
1293 
1294 static void init_tos_fw_config_dt(unsigned long pa __unused)
1295 {
1296 }
1297 #endif /*CFG_CORE_SEL1_SPMC && CFG_DT*/
1298 
1299 #ifdef CFG_CORE_DYN_SHM
1300 static void discover_nsec_memory(void)
1301 {
1302 	struct core_mmu_phys_mem *mem;
1303 	const struct core_mmu_phys_mem *mem_begin = NULL;
1304 	const struct core_mmu_phys_mem *mem_end = NULL;
1305 	size_t nelems;
1306 	void *fdt = get_external_dt();
1307 
1308 	if (fdt) {
1309 		mem = get_nsec_memory(fdt, &nelems);
1310 		if (mem) {
1311 			core_mmu_set_discovered_nsec_ddr(mem, nelems);
1312 			return;
1313 		}
1314 
1315 		DMSG("No non-secure memory found in FDT");
1316 	}
1317 
1318 	mem_begin = phys_ddr_overall_begin;
1319 	mem_end = phys_ddr_overall_end;
1320 	nelems = mem_end - mem_begin;
1321 	if (nelems) {
1322 		/*
1323 		 * Platform cannot use both register_ddr() and the now
1324 		 * deprecated register_dynamic_shm().
1325 		 */
1326 		assert(phys_ddr_overall_compat_begin ==
1327 		       phys_ddr_overall_compat_end);
1328 	} else {
1329 		mem_begin = phys_ddr_overall_compat_begin;
1330 		mem_end = phys_ddr_overall_compat_end;
1331 		nelems = mem_end - mem_begin;
1332 		if (!nelems)
1333 			return;
1334 		DMSG("Warning register_dynamic_shm() is deprecated, please use register_ddr() instead");
1335 	}
1336 
1337 	mem = nex_calloc(nelems, sizeof(*mem));
1338 	if (!mem)
1339 		panic();
1340 
1341 	memcpy(mem, phys_ddr_overall_begin, sizeof(*mem) * nelems);
1342 	core_mmu_set_discovered_nsec_ddr(mem, nelems);
1343 }
1344 #else /*CFG_CORE_DYN_SHM*/
1345 static void discover_nsec_memory(void)
1346 {
1347 }
1348 #endif /*!CFG_CORE_DYN_SHM*/
1349 
1350 #ifdef CFG_NS_VIRTUALIZATION
1351 static TEE_Result virt_init_heap(void)
1352 {
1353 	/* We need to initialize pool for every virtual guest partition */
1354 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
1355 
1356 	return TEE_SUCCESS;
1357 }
1358 preinit_early(virt_init_heap);
1359 #endif
1360 
1361 void init_tee_runtime(void)
1362 {
1363 #ifndef CFG_WITH_PAGER
1364 	/* Pager initializes TA RAM early */
1365 	core_mmu_init_ta_ram();
1366 #endif
1367 	/*
1368 	 * With virtualization we call this function when creating the
1369 	 * OP-TEE partition instead.
1370 	 */
1371 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION))
1372 		call_preinitcalls();
1373 	call_initcalls();
1374 
1375 	/*
1376 	 * These two functions uses crypto_rng_read() to initialize the
1377 	 * pauth keys. Once call_initcalls() returns we're guaranteed that
1378 	 * crypto_rng_read() is ready to be used.
1379 	 */
1380 	thread_init_core_local_pauth_keys();
1381 	thread_init_thread_pauth_keys();
1382 }
1383 
1384 static void init_primary(unsigned long pageable_part, unsigned long nsec_entry)
1385 {
1386 	thread_init_core_local_stacks();
1387 	/*
1388 	 * Mask asynchronous exceptions before switch to the thread vector
1389 	 * as the thread handler requires those to be masked while
1390 	 * executing with the temporary stack. The thread subsystem also
1391 	 * asserts that the foreign interrupts are blocked when using most of
1392 	 * its functions.
1393 	 */
1394 	thread_set_exceptions(THREAD_EXCP_ALL);
1395 	primary_save_cntfrq();
1396 	init_vfp_sec();
1397 	/*
1398 	 * Pager: init_runtime() calls thread_kernel_enable_vfp() so we must
1399 	 * set a current thread right now to avoid a chicken-and-egg problem
1400 	 * (thread_init_boot_thread() sets the current thread but needs
1401 	 * things set by init_runtime()).
1402 	 */
1403 	thread_get_core_local()->curr_thread = 0;
1404 	init_runtime(pageable_part);
1405 
1406 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1407 		/*
1408 		 * Virtualization: We can't initialize threads right now because
1409 		 * threads belong to "tee" part and will be initialized
1410 		 * separately per each new virtual guest. So, we'll clear
1411 		 * "curr_thread" and call it done.
1412 		 */
1413 		thread_get_core_local()->curr_thread = -1;
1414 	} else {
1415 		thread_init_boot_thread();
1416 	}
1417 	thread_init_primary();
1418 	thread_init_per_cpu();
1419 	init_sec_mon(nsec_entry);
1420 }
1421 
1422 static bool cpu_nmfi_enabled(void)
1423 {
1424 #if defined(ARM32)
1425 	return read_sctlr() & SCTLR_NMFI;
1426 #else
1427 	/* Note: ARM64 does not feature non-maskable FIQ support. */
1428 	return false;
1429 #endif
1430 }
1431 
1432 /*
1433  * Note: this function is weak just to make it possible to exclude it from
1434  * the unpaged area.
1435  */
1436 void __weak boot_init_primary_late(unsigned long fdt,
1437 				   unsigned long tos_fw_config)
1438 {
1439 	init_external_dt(fdt);
1440 	init_tos_fw_config_dt(tos_fw_config);
1441 #ifdef CFG_CORE_SEL1_SPMC
1442 	tpm_map_log_area(get_tos_fw_config_dt());
1443 #else
1444 	tpm_map_log_area(get_external_dt());
1445 #endif
1446 	discover_nsec_memory();
1447 	update_external_dt();
1448 	configure_console_from_dt();
1449 
1450 	IMSG("OP-TEE version: %s", core_v_str);
1451 	if (IS_ENABLED(CFG_WARN_INSECURE)) {
1452 		IMSG("WARNING: This OP-TEE configuration might be insecure!");
1453 		IMSG("WARNING: Please check https://optee.readthedocs.io/en/latest/architecture/porting_guidelines.html");
1454 	}
1455 	IMSG("Primary CPU initializing");
1456 #ifdef CFG_CORE_ASLR
1457 	DMSG("Executing at offset %#lx with virtual load address %#"PRIxVA,
1458 	     (unsigned long)boot_mmu_config.load_offset, VCORE_START_VA);
1459 #endif
1460 	if (IS_ENABLED(CFG_MEMTAG))
1461 		DMSG("Memory tagging %s",
1462 		     memtag_is_enabled() ?  "enabled" : "disabled");
1463 
1464 	/* Check if platform needs NMFI workaround */
1465 	if (cpu_nmfi_enabled())	{
1466 		if (!IS_ENABLED(CFG_CORE_WORKAROUND_ARM_NMFI))
1467 			IMSG("WARNING: This ARM core has NMFI enabled, please apply workaround!");
1468 	} else {
1469 		if (IS_ENABLED(CFG_CORE_WORKAROUND_ARM_NMFI))
1470 			IMSG("WARNING: This ARM core does not have NMFI enabled, no need for workaround");
1471 	}
1472 
1473 	main_init_gic();
1474 	init_vfp_nsec();
1475 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1476 		IMSG("Initializing virtualization support");
1477 		core_mmu_init_virtualization();
1478 	} else {
1479 		init_tee_runtime();
1480 	}
1481 	call_finalcalls();
1482 	IMSG("Primary CPU switching to normal world boot");
1483 }
1484 
1485 static void init_secondary_helper(unsigned long nsec_entry)
1486 {
1487 	IMSG("Secondary CPU %zu initializing", get_core_pos());
1488 
1489 	/*
1490 	 * Mask asynchronous exceptions before switch to the thread vector
1491 	 * as the thread handler requires those to be masked while
1492 	 * executing with the temporary stack. The thread subsystem also
1493 	 * asserts that the foreign interrupts are blocked when using most of
1494 	 * its functions.
1495 	 */
1496 	thread_set_exceptions(THREAD_EXCP_ALL);
1497 
1498 	secondary_init_cntfrq();
1499 	thread_init_per_cpu();
1500 	init_sec_mon(nsec_entry);
1501 	main_secondary_init_gic();
1502 	init_vfp_sec();
1503 	init_vfp_nsec();
1504 
1505 	IMSG("Secondary CPU %zu switching to normal world boot", get_core_pos());
1506 }
1507 
1508 /*
1509  * Note: this function is weak just to make it possible to exclude it from
1510  * the unpaged area so that it lies in the init area.
1511  */
1512 void __weak boot_init_primary_early(unsigned long pageable_part,
1513 				    unsigned long nsec_entry __maybe_unused)
1514 {
1515 	unsigned long e = PADDR_INVALID;
1516 
1517 #if !defined(CFG_WITH_ARM_TRUSTED_FW)
1518 	e = nsec_entry;
1519 #endif
1520 
1521 	init_primary(pageable_part, e);
1522 }
1523 
1524 #if defined(CFG_WITH_ARM_TRUSTED_FW)
1525 unsigned long boot_cpu_on_handler(unsigned long a0 __maybe_unused,
1526 				  unsigned long a1 __unused)
1527 {
1528 	init_secondary_helper(PADDR_INVALID);
1529 	return 0;
1530 }
1531 #else
1532 void boot_init_secondary(unsigned long nsec_entry)
1533 {
1534 	init_secondary_helper(nsec_entry);
1535 }
1536 #endif
1537 
1538 #if defined(CFG_BOOT_SECONDARY_REQUEST)
1539 void boot_set_core_ns_entry(size_t core_idx, uintptr_t entry,
1540 			    uintptr_t context_id)
1541 {
1542 	ns_entry_contexts[core_idx].entry_point = entry;
1543 	ns_entry_contexts[core_idx].context_id = context_id;
1544 	dsb_ishst();
1545 }
1546 
1547 int boot_core_release(size_t core_idx, paddr_t entry)
1548 {
1549 	if (!core_idx || core_idx >= CFG_TEE_CORE_NB_CORE)
1550 		return -1;
1551 
1552 	ns_entry_contexts[core_idx].entry_point = entry;
1553 	dmb();
1554 	spin_table[core_idx] = 1;
1555 	dsb();
1556 	sev();
1557 
1558 	return 0;
1559 }
1560 
1561 /*
1562  * spin until secondary boot request, then returns with
1563  * the secondary core entry address.
1564  */
1565 struct ns_entry_context *boot_core_hpen(void)
1566 {
1567 #ifdef CFG_PSCI_ARM32
1568 	return &ns_entry_contexts[get_core_pos()];
1569 #else
1570 	do {
1571 		wfe();
1572 	} while (!spin_table[get_core_pos()]);
1573 	dmb();
1574 	return &ns_entry_contexts[get_core_pos()];
1575 #endif
1576 }
1577 #endif
1578 
1579 #if defined(CFG_CORE_ASLR)
1580 #if defined(CFG_DT)
1581 unsigned long __weak get_aslr_seed(void *fdt)
1582 {
1583 	int rc = 0;
1584 	const uint64_t *seed = NULL;
1585 	int offs = 0;
1586 	int len = 0;
1587 
1588 	if (!fdt) {
1589 		DMSG("No fdt");
1590 		goto err;
1591 	}
1592 
1593 	rc = fdt_check_header(fdt);
1594 	if (rc) {
1595 		DMSG("Bad fdt: %d", rc);
1596 		goto err;
1597 	}
1598 
1599 	offs =  fdt_path_offset(fdt, "/secure-chosen");
1600 	if (offs < 0) {
1601 		DMSG("Cannot find /secure-chosen");
1602 		goto err;
1603 	}
1604 	seed = fdt_getprop(fdt, offs, "kaslr-seed", &len);
1605 	if (!seed || len != sizeof(*seed)) {
1606 		DMSG("Cannot find valid kaslr-seed");
1607 		goto err;
1608 	}
1609 
1610 	return fdt64_to_cpu(*seed);
1611 
1612 err:
1613 	/* Try platform implementation */
1614 	return plat_get_aslr_seed();
1615 }
1616 #else /*!CFG_DT*/
1617 unsigned long __weak get_aslr_seed(void *fdt __unused)
1618 {
1619 	/* Try platform implementation */
1620 	return plat_get_aslr_seed();
1621 }
1622 #endif /*!CFG_DT*/
1623 #endif /*CFG_CORE_ASLR*/
1624