xref: /optee_os/core/arch/arm/kernel/boot.c (revision c4cab13e43f8170575612892f66bdbc8c9e67285)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2015-2022, Linaro Limited
4  */
5 
6 #include <arm.h>
7 #include <assert.h>
8 #include <compiler.h>
9 #include <config.h>
10 #include <console.h>
11 #include <crypto/crypto.h>
12 #include <drivers/gic.h>
13 #include <dt-bindings/interrupt-controller/arm-gic.h>
14 #include <initcall.h>
15 #include <inttypes.h>
16 #include <keep.h>
17 #include <kernel/asan.h>
18 #include <kernel/boot.h>
19 #include <kernel/linker.h>
20 #include <kernel/misc.h>
21 #include <kernel/panic.h>
22 #include <kernel/tee_misc.h>
23 #include <kernel/thread.h>
24 #include <kernel/tpm.h>
25 #include <libfdt.h>
26 #include <malloc.h>
27 #include <memtag.h>
28 #include <mm/core_memprot.h>
29 #include <mm/core_mmu.h>
30 #include <mm/fobj.h>
31 #include <mm/tee_mm.h>
32 #include <mm/tee_pager.h>
33 #include <sm/psci.h>
34 #include <stdio.h>
35 #include <trace.h>
36 #include <utee_defines.h>
37 #include <util.h>
38 
39 #include <platform_config.h>
40 
41 #if !defined(CFG_WITH_ARM_TRUSTED_FW)
42 #include <sm/sm.h>
43 #endif
44 
45 #if defined(CFG_WITH_VFP)
46 #include <kernel/vfp.h>
47 #endif
48 
49 /*
50  * In this file we're using unsigned long to represent physical pointers as
51  * they are received in a single register when OP-TEE is initially entered.
52  * This limits 32-bit systems to only use make use of the lower 32 bits
53  * of a physical address for initial parameters.
54  *
55  * 64-bit systems on the other hand can use full 64-bit physical pointers.
56  */
57 #define PADDR_INVALID		ULONG_MAX
58 
59 #if defined(CFG_BOOT_SECONDARY_REQUEST)
60 struct ns_entry_context {
61 	uintptr_t entry_point;
62 	uintptr_t context_id;
63 };
64 struct ns_entry_context ns_entry_contexts[CFG_TEE_CORE_NB_CORE];
65 static uint32_t spin_table[CFG_TEE_CORE_NB_CORE];
66 #endif
67 
68 #ifdef CFG_BOOT_SYNC_CPU
69 /*
70  * Array used when booting, to synchronize cpu.
71  * When 0, the cpu has not started.
72  * When 1, it has started
73  */
74 uint32_t sem_cpu_sync[CFG_TEE_CORE_NB_CORE];
75 DECLARE_KEEP_PAGER(sem_cpu_sync);
76 #endif
77 
78 #ifdef CFG_DT
79 struct dt_descriptor {
80 	void *blob;
81 #ifdef _CFG_USE_DTB_OVERLAY
82 	int frag_id;
83 #endif
84 };
85 
86 static struct dt_descriptor external_dt __nex_bss;
87 #endif
88 
89 #ifdef CFG_SECONDARY_INIT_CNTFRQ
90 static uint32_t cntfrq;
91 #endif
92 
93 /* May be overridden in plat-$(PLATFORM)/main.c */
94 __weak void plat_primary_init_early(void)
95 {
96 }
97 DECLARE_KEEP_PAGER(plat_primary_init_early);
98 
99 /* May be overridden in plat-$(PLATFORM)/main.c */
100 __weak void main_init_gic(void)
101 {
102 }
103 
104 /* May be overridden in plat-$(PLATFORM)/main.c */
105 __weak void main_secondary_init_gic(void)
106 {
107 }
108 
109 /* May be overridden in plat-$(PLATFORM)/main.c */
110 __weak unsigned long plat_get_aslr_seed(void)
111 {
112 	DMSG("Warning: no ASLR seed");
113 
114 	return 0;
115 }
116 
117 #if defined(_CFG_CORE_STACK_PROTECTOR)
118 /* Generate random stack canary value on boot up */
119 __weak uintptr_t plat_get_random_stack_canary(void)
120 {
121 	uintptr_t canary = 0xbaaaad00;
122 	TEE_Result ret = TEE_ERROR_GENERIC;
123 
124 	/*
125 	 * With virtualization the RNG is not initialized in Nexus core.
126 	 * Need to override with platform specific implementation.
127 	 */
128 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
129 		IMSG("WARNING: Using fixed value for stack canary");
130 		return canary;
131 	}
132 
133 	ret = crypto_rng_read(&canary, sizeof(canary));
134 	if (ret != TEE_SUCCESS)
135 		panic("Failed to generate random stack canary");
136 
137 	/* Leave null byte in canary to prevent string base exploit */
138 	return canary & ~0xffUL;
139 }
140 #endif /*_CFG_CORE_STACK_PROTECTOR*/
141 
142 /*
143  * This function is called as a guard after each smc call which is not
144  * supposed to return.
145  */
146 void __panic_at_smc_return(void)
147 {
148 	panic();
149 }
150 
151 #if defined(CFG_WITH_ARM_TRUSTED_FW)
152 void init_sec_mon(unsigned long nsec_entry __maybe_unused)
153 {
154 	assert(nsec_entry == PADDR_INVALID);
155 	/* Do nothing as we don't have a secure monitor */
156 }
157 #else
158 /* May be overridden in plat-$(PLATFORM)/main.c */
159 __weak void init_sec_mon(unsigned long nsec_entry)
160 {
161 	struct sm_nsec_ctx *nsec_ctx;
162 
163 	assert(nsec_entry != PADDR_INVALID);
164 
165 	/* Initialize secure monitor */
166 	nsec_ctx = sm_get_nsec_ctx();
167 	nsec_ctx->mon_lr = nsec_entry;
168 	nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I;
169 	if (nsec_entry & 1)
170 		nsec_ctx->mon_spsr |= CPSR_T;
171 }
172 #endif
173 
174 #if defined(CFG_WITH_ARM_TRUSTED_FW)
175 static void init_vfp_nsec(void)
176 {
177 }
178 #else
179 static void init_vfp_nsec(void)
180 {
181 	/* Normal world can use CP10 and CP11 (SIMD/VFP) */
182 	write_nsacr(read_nsacr() | NSACR_CP10 | NSACR_CP11);
183 }
184 #endif
185 
186 #if defined(CFG_WITH_VFP)
187 
188 #ifdef ARM32
189 static void init_vfp_sec(void)
190 {
191 	uint32_t cpacr = read_cpacr();
192 
193 	/*
194 	 * Enable Advanced SIMD functionality.
195 	 * Enable use of D16-D31 of the Floating-point Extension register
196 	 * file.
197 	 */
198 	cpacr &= ~(CPACR_ASEDIS | CPACR_D32DIS);
199 	/*
200 	 * Enable usage of CP10 and CP11 (SIMD/VFP) (both kernel and user
201 	 * mode.
202 	 */
203 	cpacr |= CPACR_CP(10, CPACR_CP_ACCESS_FULL);
204 	cpacr |= CPACR_CP(11, CPACR_CP_ACCESS_FULL);
205 	write_cpacr(cpacr);
206 }
207 #endif /* ARM32 */
208 
209 #ifdef ARM64
210 static void init_vfp_sec(void)
211 {
212 	/* Not using VFP until thread_kernel_enable_vfp() */
213 	vfp_disable();
214 }
215 #endif /* ARM64 */
216 
217 #else /* CFG_WITH_VFP */
218 
219 static void init_vfp_sec(void)
220 {
221 	/* Not using VFP */
222 }
223 #endif
224 
225 #ifdef CFG_SECONDARY_INIT_CNTFRQ
226 static void primary_save_cntfrq(void)
227 {
228 	assert(cntfrq == 0);
229 
230 	/*
231 	 * CNTFRQ should be initialized on the primary CPU by a
232 	 * previous boot stage
233 	 */
234 	cntfrq = read_cntfrq();
235 }
236 
237 static void secondary_init_cntfrq(void)
238 {
239 	assert(cntfrq != 0);
240 	write_cntfrq(cntfrq);
241 }
242 #else /* CFG_SECONDARY_INIT_CNTFRQ */
243 static void primary_save_cntfrq(void)
244 {
245 }
246 
247 static void secondary_init_cntfrq(void)
248 {
249 }
250 #endif
251 
252 #ifdef CFG_CORE_SANITIZE_KADDRESS
253 static void init_run_constructors(void)
254 {
255 	const vaddr_t *ctor;
256 
257 	for (ctor = &__ctor_list; ctor < &__ctor_end; ctor++)
258 		((void (*)(void))(*ctor))();
259 }
260 
261 static void init_asan(void)
262 {
263 
264 	/*
265 	 * CFG_ASAN_SHADOW_OFFSET is also supplied as
266 	 * -fasan-shadow-offset=$(CFG_ASAN_SHADOW_OFFSET) to the compiler.
267 	 * Since all the needed values to calculate the value of
268 	 * CFG_ASAN_SHADOW_OFFSET isn't available in to make we need to
269 	 * calculate it in advance and hard code it into the platform
270 	 * conf.mk. Here where we have all the needed values we double
271 	 * check that the compiler is supplied the correct value.
272 	 */
273 
274 #define __ASAN_SHADOW_START \
275 	ROUNDUP(TEE_RAM_VA_START + (TEE_RAM_VA_SIZE * 8) / 9 - 8, 8)
276 	assert(__ASAN_SHADOW_START == (vaddr_t)&__asan_shadow_start);
277 #define __CFG_ASAN_SHADOW_OFFSET \
278 	(__ASAN_SHADOW_START - (TEE_RAM_VA_START / 8))
279 	COMPILE_TIME_ASSERT(CFG_ASAN_SHADOW_OFFSET == __CFG_ASAN_SHADOW_OFFSET);
280 #undef __ASAN_SHADOW_START
281 #undef __CFG_ASAN_SHADOW_OFFSET
282 
283 	/*
284 	 * Assign area covered by the shadow area, everything from start up
285 	 * to the beginning of the shadow area.
286 	 */
287 	asan_set_shadowed((void *)TEE_TEXT_VA_START, &__asan_shadow_start);
288 
289 	/*
290 	 * Add access to areas that aren't opened automatically by a
291 	 * constructor.
292 	 */
293 	asan_tag_access(&__ctor_list, &__ctor_end);
294 	asan_tag_access(__rodata_start, __rodata_end);
295 #ifdef CFG_WITH_PAGER
296 	asan_tag_access(__pageable_start, __pageable_end);
297 #endif /*CFG_WITH_PAGER*/
298 	asan_tag_access(__nozi_start, __nozi_end);
299 	asan_tag_access(__exidx_start, __exidx_end);
300 	asan_tag_access(__extab_start, __extab_end);
301 
302 	init_run_constructors();
303 
304 	/* Everything is tagged correctly, let's start address sanitizing. */
305 	asan_start();
306 }
307 #else /*CFG_CORE_SANITIZE_KADDRESS*/
308 static void init_asan(void)
309 {
310 }
311 #endif /*CFG_CORE_SANITIZE_KADDRESS*/
312 
313 #if defined(CFG_MEMTAG)
314 /* Called from entry_a64.S only when MEMTAG is configured */
315 void boot_init_memtag(void)
316 {
317 	memtag_init_ops(feat_mte_implemented());
318 	memtag_set_tags((void *)TEE_RAM_START, TEE_RAM_PH_SIZE, 0);
319 }
320 #endif
321 
322 #ifdef CFG_WITH_PAGER
323 
324 #ifdef CFG_CORE_SANITIZE_KADDRESS
325 static void carve_out_asan_mem(tee_mm_pool_t *pool)
326 {
327 	const size_t s = pool->hi - pool->lo;
328 	tee_mm_entry_t *mm;
329 	paddr_t apa = ASAN_MAP_PA;
330 	size_t asz = ASAN_MAP_SZ;
331 
332 	if (core_is_buffer_outside(apa, asz, pool->lo, s))
333 		return;
334 
335 	/* Reserve the shadow area */
336 	if (!core_is_buffer_inside(apa, asz, pool->lo, s)) {
337 		if (apa < pool->lo) {
338 			/*
339 			 * ASAN buffer is overlapping with the beginning of
340 			 * the pool.
341 			 */
342 			asz -= pool->lo - apa;
343 			apa = pool->lo;
344 		} else {
345 			/*
346 			 * ASAN buffer is overlapping with the end of the
347 			 * pool.
348 			 */
349 			asz = pool->hi - apa;
350 		}
351 	}
352 	mm = tee_mm_alloc2(pool, apa, asz);
353 	assert(mm);
354 }
355 #else
356 static void carve_out_asan_mem(tee_mm_pool_t *pool __unused)
357 {
358 }
359 #endif
360 
361 static void print_pager_pool_size(void)
362 {
363 	struct tee_pager_stats __maybe_unused stats;
364 
365 	tee_pager_get_stats(&stats);
366 	IMSG("Pager pool size: %zukB",
367 		stats.npages_all * SMALL_PAGE_SIZE / 1024);
368 }
369 
370 static void init_vcore(tee_mm_pool_t *mm_vcore)
371 {
372 	const vaddr_t begin = VCORE_START_VA;
373 	size_t size = TEE_RAM_VA_SIZE;
374 
375 #ifdef CFG_CORE_SANITIZE_KADDRESS
376 	/* Carve out asan memory, flat maped after core memory */
377 	if (begin + size > ASAN_SHADOW_PA)
378 		size = ASAN_MAP_PA - begin;
379 #endif
380 
381 	if (!tee_mm_init(mm_vcore, begin, size, SMALL_PAGE_SHIFT,
382 			 TEE_MM_POOL_NO_FLAGS))
383 		panic("tee_mm_vcore init failed");
384 }
385 
386 /*
387  * With CFG_CORE_ASLR=y the init part is relocated very early during boot.
388  * The init part is also paged just as the rest of the normal paged code, with
389  * the difference that it's preloaded during boot. When the backing store
390  * is configured the entire paged binary is copied in place and then also
391  * the init part. Since the init part has been relocated (references to
392  * addresses updated to compensate for the new load address) this has to be
393  * undone for the hashes of those pages to match with the original binary.
394  *
395  * If CFG_CORE_ASLR=n, nothing needs to be done as the code/ro pages are
396  * unchanged.
397  */
398 static void undo_init_relocation(uint8_t *paged_store __maybe_unused)
399 {
400 #ifdef CFG_CORE_ASLR
401 	unsigned long *ptr = NULL;
402 	const uint32_t *reloc = NULL;
403 	const uint32_t *reloc_end = NULL;
404 	unsigned long offs = boot_mmu_config.load_offset;
405 	const struct boot_embdata *embdata = (const void *)__init_end;
406 	vaddr_t addr_end = (vaddr_t)__init_end - offs - TEE_RAM_START;
407 	vaddr_t addr_start = (vaddr_t)__init_start - offs - TEE_RAM_START;
408 
409 	reloc = (const void *)((vaddr_t)embdata + embdata->reloc_offset);
410 	reloc_end = reloc + embdata->reloc_len / sizeof(*reloc);
411 
412 	for (; reloc < reloc_end; reloc++) {
413 		if (*reloc < addr_start)
414 			continue;
415 		if (*reloc >= addr_end)
416 			break;
417 		ptr = (void *)(paged_store + *reloc - addr_start);
418 		*ptr -= offs;
419 	}
420 #endif
421 }
422 
423 static struct fobj *ro_paged_alloc(tee_mm_entry_t *mm, void *hashes,
424 				   void *store)
425 {
426 	const unsigned int num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE;
427 #ifdef CFG_CORE_ASLR
428 	unsigned int reloc_offs = (vaddr_t)__pageable_start - VCORE_START_VA;
429 	const struct boot_embdata *embdata = (const void *)__init_end;
430 	const void *reloc = __init_end + embdata->reloc_offset;
431 
432 	return fobj_ro_reloc_paged_alloc(num_pages, hashes, reloc_offs,
433 					 reloc, embdata->reloc_len, store);
434 #else
435 	return fobj_ro_paged_alloc(num_pages, hashes, store);
436 #endif
437 }
438 
439 static void init_runtime(unsigned long pageable_part)
440 {
441 	size_t n;
442 	size_t init_size = (size_t)(__init_end - __init_start);
443 	size_t pageable_start = (size_t)__pageable_start;
444 	size_t pageable_end = (size_t)__pageable_end;
445 	size_t pageable_size = pageable_end - pageable_start;
446 	vaddr_t tzsram_end = TZSRAM_BASE + TZSRAM_SIZE - TEE_LOAD_ADDR +
447 			     VCORE_START_VA;
448 	size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) *
449 			   TEE_SHA256_HASH_SIZE;
450 	const struct boot_embdata *embdata = (const void *)__init_end;
451 	const void *tmp_hashes = NULL;
452 	tee_mm_entry_t *mm = NULL;
453 	struct fobj *fobj = NULL;
454 	uint8_t *paged_store = NULL;
455 	uint8_t *hashes = NULL;
456 
457 	assert(pageable_size % SMALL_PAGE_SIZE == 0);
458 	assert(embdata->total_len >= embdata->hashes_offset +
459 				     embdata->hashes_len);
460 	assert(hash_size == embdata->hashes_len);
461 
462 	tmp_hashes = __init_end + embdata->hashes_offset;
463 
464 	init_asan();
465 
466 	/* Add heap2 first as heap1 may be too small as initial bget pool */
467 	malloc_add_pool(__heap2_start, __heap2_end - __heap2_start);
468 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
469 
470 	/*
471 	 * This needs to be initialized early to support address lookup
472 	 * in MEM_AREA_TEE_RAM
473 	 */
474 	tee_pager_early_init();
475 
476 	hashes = malloc(hash_size);
477 	IMSG_RAW("\n");
478 	IMSG("Pager is enabled. Hashes: %zu bytes", hash_size);
479 	assert(hashes);
480 	asan_memcpy_unchecked(hashes, tmp_hashes, hash_size);
481 
482 	/*
483 	 * Need tee_mm_sec_ddr initialized to be able to allocate secure
484 	 * DDR below.
485 	 */
486 	core_mmu_init_ta_ram();
487 
488 	carve_out_asan_mem(&tee_mm_sec_ddr);
489 
490 	mm = tee_mm_alloc(&tee_mm_sec_ddr, pageable_size);
491 	assert(mm);
492 	paged_store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM,
493 				   pageable_size);
494 	/*
495 	 * Load pageable part in the dedicated allocated area:
496 	 * - Move pageable non-init part into pageable area. Note bootloader
497 	 *   may have loaded it anywhere in TA RAM hence use memmove().
498 	 * - Copy pageable init part from current location into pageable area.
499 	 */
500 	memmove(paged_store + init_size,
501 		phys_to_virt(pageable_part,
502 			     core_mmu_get_type_by_pa(pageable_part),
503 			     __pageable_part_end - __pageable_part_start),
504 		__pageable_part_end - __pageable_part_start);
505 	asan_memcpy_unchecked(paged_store, __init_start, init_size);
506 	/*
507 	 * Undo eventual relocation for the init part so the hash checks
508 	 * can pass.
509 	 */
510 	undo_init_relocation(paged_store);
511 
512 	/* Check that hashes of what's in pageable area is OK */
513 	DMSG("Checking hashes of pageable area");
514 	for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) {
515 		const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE;
516 		const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE;
517 		TEE_Result res;
518 
519 		DMSG("hash pg_idx %zu hash %p page %p", n, hash, page);
520 		res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE);
521 		if (res != TEE_SUCCESS) {
522 			EMSG("Hash failed for page %zu at %p: res 0x%x",
523 			     n, (void *)page, res);
524 			panic();
525 		}
526 	}
527 
528 	/*
529 	 * Assert prepaged init sections are page aligned so that nothing
530 	 * trails uninited at the end of the premapped init area.
531 	 */
532 	assert(!(init_size & SMALL_PAGE_MASK));
533 
534 	/*
535 	 * Initialize the virtual memory pool used for main_mmu_l2_ttb which
536 	 * is supplied to tee_pager_init() below.
537 	 */
538 	init_vcore(&tee_mm_vcore);
539 
540 	/*
541 	 * Assign alias area for pager end of the small page block the rest
542 	 * of the binary is loaded into. We're taking more than needed, but
543 	 * we're guaranteed to not need more than the physical amount of
544 	 * TZSRAM.
545 	 */
546 	mm = tee_mm_alloc2(&tee_mm_vcore,
547 			   (vaddr_t)tee_mm_vcore.lo +
548 			   tee_mm_vcore.size - TZSRAM_SIZE,
549 			   TZSRAM_SIZE);
550 	assert(mm);
551 	tee_pager_set_alias_area(mm);
552 
553 	/*
554 	 * Claim virtual memory which isn't paged.
555 	 * Linear memory (flat map core memory) ends there.
556 	 */
557 	mm = tee_mm_alloc2(&tee_mm_vcore, VCORE_UNPG_RX_PA,
558 			   (vaddr_t)(__pageable_start - VCORE_UNPG_RX_PA));
559 	assert(mm);
560 
561 	/*
562 	 * Allocate virtual memory for the pageable area and let the pager
563 	 * take charge of all the pages already assigned to that memory.
564 	 */
565 	mm = tee_mm_alloc2(&tee_mm_vcore, (vaddr_t)__pageable_start,
566 			   pageable_size);
567 	assert(mm);
568 	fobj = ro_paged_alloc(mm, hashes, paged_store);
569 	assert(fobj);
570 	tee_pager_add_core_region(tee_mm_get_smem(mm), PAGED_REGION_TYPE_RO,
571 				  fobj);
572 	fobj_put(fobj);
573 
574 	tee_pager_add_pages(pageable_start, init_size / SMALL_PAGE_SIZE, false);
575 	tee_pager_add_pages(pageable_start + init_size,
576 			    (pageable_size - init_size) / SMALL_PAGE_SIZE,
577 			    true);
578 	if (pageable_end < tzsram_end)
579 		tee_pager_add_pages(pageable_end, (tzsram_end - pageable_end) /
580 						   SMALL_PAGE_SIZE, true);
581 
582 	/*
583 	 * There may be physical pages in TZSRAM before the core load address.
584 	 * These pages can be added to the physical pages pool of the pager.
585 	 * This setup may happen when a the secure bootloader runs in TZRAM
586 	 * and its memory can be reused by OP-TEE once boot stages complete.
587 	 */
588 	tee_pager_add_pages(tee_mm_vcore.lo,
589 			(VCORE_UNPG_RX_PA - tee_mm_vcore.lo) / SMALL_PAGE_SIZE,
590 			true);
591 
592 	print_pager_pool_size();
593 }
594 #else
595 
596 static void init_runtime(unsigned long pageable_part __unused)
597 {
598 	init_asan();
599 
600 	/*
601 	 * By default whole OP-TEE uses malloc, so we need to initialize
602 	 * it early. But, when virtualization is enabled, malloc is used
603 	 * only by TEE runtime, so malloc should be initialized later, for
604 	 * every virtual partition separately. Core code uses nex_malloc
605 	 * instead.
606 	 */
607 #ifdef CFG_NS_VIRTUALIZATION
608 	nex_malloc_add_pool(__nex_heap_start, __nex_heap_end -
609 					      __nex_heap_start);
610 #else
611 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
612 #endif
613 
614 	IMSG_RAW("\n");
615 }
616 #endif
617 
618 void *get_dt(void)
619 {
620 	void *fdt = get_embedded_dt();
621 
622 	if (!fdt)
623 		fdt = get_external_dt();
624 
625 	return fdt;
626 }
627 
628 void *get_secure_dt(void)
629 {
630 	void *fdt = get_embedded_dt();
631 
632 	if (!fdt && IS_ENABLED(CFG_MAP_EXT_DT_SECURE))
633 		fdt = get_external_dt();
634 
635 	return fdt;
636 }
637 
638 #if defined(CFG_EMBED_DTB)
639 void *get_embedded_dt(void)
640 {
641 	static bool checked;
642 
643 	assert(cpu_mmu_enabled());
644 
645 	if (!checked) {
646 		IMSG("Embedded DTB found");
647 
648 		if (fdt_check_header(embedded_secure_dtb))
649 			panic("Invalid embedded DTB");
650 
651 		checked = true;
652 	}
653 
654 	return embedded_secure_dtb;
655 }
656 #else
657 void *get_embedded_dt(void)
658 {
659 	return NULL;
660 }
661 #endif /*CFG_EMBED_DTB*/
662 
663 #if defined(CFG_DT)
664 void *get_external_dt(void)
665 {
666 	if (!IS_ENABLED(CFG_EXTERNAL_DT))
667 		return NULL;
668 
669 	assert(cpu_mmu_enabled());
670 	return external_dt.blob;
671 }
672 
673 static TEE_Result release_external_dt(void)
674 {
675 	int ret = 0;
676 
677 	if (!IS_ENABLED(CFG_EXTERNAL_DT))
678 		return TEE_SUCCESS;
679 
680 	if (!external_dt.blob)
681 		return TEE_SUCCESS;
682 
683 	ret = fdt_pack(external_dt.blob);
684 	if (ret < 0) {
685 		EMSG("Failed to pack Device Tree at 0x%" PRIxPA ": error %d",
686 		     virt_to_phys(external_dt.blob), ret);
687 		panic();
688 	}
689 
690 	if (core_mmu_remove_mapping(MEM_AREA_EXT_DT, external_dt.blob,
691 				    CFG_DTB_MAX_SIZE))
692 		panic("Failed to remove temporary Device Tree mapping");
693 
694 	/* External DTB no more reached, reset pointer to invalid */
695 	external_dt.blob = NULL;
696 
697 	return TEE_SUCCESS;
698 }
699 boot_final(release_external_dt);
700 
701 #ifdef _CFG_USE_DTB_OVERLAY
702 static int add_dt_overlay_fragment(struct dt_descriptor *dt, int ioffs)
703 {
704 	char frag[32];
705 	int offs;
706 	int ret;
707 
708 	snprintf(frag, sizeof(frag), "fragment@%d", dt->frag_id);
709 	offs = fdt_add_subnode(dt->blob, ioffs, frag);
710 	if (offs < 0)
711 		return offs;
712 
713 	dt->frag_id += 1;
714 
715 	ret = fdt_setprop_string(dt->blob, offs, "target-path", "/");
716 	if (ret < 0)
717 		return -1;
718 
719 	return fdt_add_subnode(dt->blob, offs, "__overlay__");
720 }
721 
722 static int init_dt_overlay(struct dt_descriptor *dt, int __maybe_unused dt_size)
723 {
724 	int fragment;
725 
726 	if (IS_ENABLED(CFG_EXTERNAL_DTB_OVERLAY)) {
727 		if (!fdt_check_header(dt->blob)) {
728 			fdt_for_each_subnode(fragment, dt->blob, 0)
729 				dt->frag_id += 1;
730 			return 0;
731 		}
732 	}
733 
734 	return fdt_create_empty_tree(dt->blob, dt_size);
735 }
736 #else
737 static int add_dt_overlay_fragment(struct dt_descriptor *dt __unused, int offs)
738 {
739 	return offs;
740 }
741 
742 static int init_dt_overlay(struct dt_descriptor *dt __unused,
743 			   int dt_size __unused)
744 {
745 	return 0;
746 }
747 #endif /* _CFG_USE_DTB_OVERLAY */
748 
749 static int add_dt_path_subnode(struct dt_descriptor *dt, const char *path,
750 			       const char *subnode)
751 {
752 	int offs;
753 
754 	offs = fdt_path_offset(dt->blob, path);
755 	if (offs < 0)
756 		return -1;
757 	offs = add_dt_overlay_fragment(dt, offs);
758 	if (offs < 0)
759 		return -1;
760 	offs = fdt_add_subnode(dt->blob, offs, subnode);
761 	if (offs < 0)
762 		return -1;
763 	return offs;
764 }
765 
766 static int add_optee_dt_node(struct dt_descriptor *dt)
767 {
768 	int offs;
769 	int ret;
770 
771 	if (fdt_path_offset(dt->blob, "/firmware/optee") >= 0) {
772 		DMSG("OP-TEE Device Tree node already exists!");
773 		return 0;
774 	}
775 
776 	offs = fdt_path_offset(dt->blob, "/firmware");
777 	if (offs < 0) {
778 		offs = add_dt_path_subnode(dt, "/", "firmware");
779 		if (offs < 0)
780 			return -1;
781 	}
782 
783 	offs = fdt_add_subnode(dt->blob, offs, "optee");
784 	if (offs < 0)
785 		return -1;
786 
787 	ret = fdt_setprop_string(dt->blob, offs, "compatible",
788 				 "linaro,optee-tz");
789 	if (ret < 0)
790 		return -1;
791 	ret = fdt_setprop_string(dt->blob, offs, "method", "smc");
792 	if (ret < 0)
793 		return -1;
794 
795 	if (CFG_CORE_ASYNC_NOTIF_GIC_INTID) {
796 		/*
797 		 * The format of the interrupt property is defined by the
798 		 * binding of the interrupt domain root. In this case it's
799 		 * one Arm GIC v1, v2 or v3 so we must be compatible with
800 		 * these.
801 		 *
802 		 * An SPI type of interrupt is indicated with a 0 in the
803 		 * first cell. A PPI type is indicated with value 1.
804 		 *
805 		 * The interrupt number goes in the second cell where
806 		 * SPIs ranges from 0 to 987 and PPI ranges from 0 to 15.
807 		 *
808 		 * Flags are passed in the third cells.
809 		 */
810 		uint32_t itr_trigger = 0;
811 		uint32_t itr_type = 0;
812 		uint32_t itr_id = 0;
813 		uint32_t val[3] = { };
814 
815 		/* PPI are visible only in current CPU cluster */
816 		static_assert(!CFG_CORE_ASYNC_NOTIF_GIC_INTID ||
817 			      (CFG_CORE_ASYNC_NOTIF_GIC_INTID >=
818 			       GIC_SPI_BASE) ||
819 			      ((CFG_TEE_CORE_NB_CORE <= 8) &&
820 			       (CFG_CORE_ASYNC_NOTIF_GIC_INTID >=
821 				GIC_PPI_BASE)));
822 
823 		if (CFG_CORE_ASYNC_NOTIF_GIC_INTID >= GIC_SPI_BASE) {
824 			itr_type = GIC_SPI;
825 			itr_id = CFG_CORE_ASYNC_NOTIF_GIC_INTID - GIC_SPI_BASE;
826 			itr_trigger = IRQ_TYPE_EDGE_RISING;
827 		} else {
828 			itr_type = GIC_PPI;
829 			itr_id = CFG_CORE_ASYNC_NOTIF_GIC_INTID - GIC_PPI_BASE;
830 			itr_trigger = IRQ_TYPE_EDGE_RISING |
831 				      GIC_CPU_MASK_SIMPLE(CFG_TEE_CORE_NB_CORE);
832 		}
833 
834 		val[0] = TEE_U32_TO_BIG_ENDIAN(itr_type);
835 		val[1] = TEE_U32_TO_BIG_ENDIAN(itr_id);
836 		val[2] = TEE_U32_TO_BIG_ENDIAN(itr_trigger);
837 
838 		ret = fdt_setprop(dt->blob, offs, "interrupts", val,
839 				  sizeof(val));
840 		if (ret < 0)
841 			return -1;
842 	}
843 	return 0;
844 }
845 
846 #ifdef CFG_PSCI_ARM32
847 static int append_psci_compatible(void *fdt, int offs, const char *str)
848 {
849 	return fdt_appendprop(fdt, offs, "compatible", str, strlen(str) + 1);
850 }
851 
852 static int dt_add_psci_node(struct dt_descriptor *dt)
853 {
854 	int offs;
855 
856 	if (fdt_path_offset(dt->blob, "/psci") >= 0) {
857 		DMSG("PSCI Device Tree node already exists!");
858 		return 0;
859 	}
860 
861 	offs = add_dt_path_subnode(dt, "/", "psci");
862 	if (offs < 0)
863 		return -1;
864 	if (append_psci_compatible(dt->blob, offs, "arm,psci-1.0"))
865 		return -1;
866 	if (append_psci_compatible(dt->blob, offs, "arm,psci-0.2"))
867 		return -1;
868 	if (append_psci_compatible(dt->blob, offs, "arm,psci"))
869 		return -1;
870 	if (fdt_setprop_string(dt->blob, offs, "method", "smc"))
871 		return -1;
872 	if (fdt_setprop_u32(dt->blob, offs, "cpu_suspend", PSCI_CPU_SUSPEND))
873 		return -1;
874 	if (fdt_setprop_u32(dt->blob, offs, "cpu_off", PSCI_CPU_OFF))
875 		return -1;
876 	if (fdt_setprop_u32(dt->blob, offs, "cpu_on", PSCI_CPU_ON))
877 		return -1;
878 	if (fdt_setprop_u32(dt->blob, offs, "sys_poweroff", PSCI_SYSTEM_OFF))
879 		return -1;
880 	if (fdt_setprop_u32(dt->blob, offs, "sys_reset", PSCI_SYSTEM_RESET))
881 		return -1;
882 	return 0;
883 }
884 
885 static int check_node_compat_prefix(struct dt_descriptor *dt, int offs,
886 				    const char *prefix)
887 {
888 	const size_t prefix_len = strlen(prefix);
889 	size_t l;
890 	int plen;
891 	const char *prop;
892 
893 	prop = fdt_getprop(dt->blob, offs, "compatible", &plen);
894 	if (!prop)
895 		return -1;
896 
897 	while (plen > 0) {
898 		if (memcmp(prop, prefix, prefix_len) == 0)
899 			return 0; /* match */
900 
901 		l = strlen(prop) + 1;
902 		prop += l;
903 		plen -= l;
904 	}
905 
906 	return -1;
907 }
908 
909 static int dt_add_psci_cpu_enable_methods(struct dt_descriptor *dt)
910 {
911 	int offs = 0;
912 
913 	while (1) {
914 		offs = fdt_next_node(dt->blob, offs, NULL);
915 		if (offs < 0)
916 			break;
917 		if (fdt_getprop(dt->blob, offs, "enable-method", NULL))
918 			continue; /* already set */
919 		if (check_node_compat_prefix(dt, offs, "arm,cortex-a"))
920 			continue; /* no compatible */
921 		if (fdt_setprop_string(dt->blob, offs, "enable-method", "psci"))
922 			return -1;
923 		/* Need to restart scanning as offsets may have changed */
924 		offs = 0;
925 	}
926 	return 0;
927 }
928 
929 static int config_psci(struct dt_descriptor *dt)
930 {
931 	if (dt_add_psci_node(dt))
932 		return -1;
933 	return dt_add_psci_cpu_enable_methods(dt);
934 }
935 #else
936 static int config_psci(struct dt_descriptor *dt __unused)
937 {
938 	return 0;
939 }
940 #endif /*CFG_PSCI_ARM32*/
941 
942 static void set_dt_val(void *data, uint32_t cell_size, uint64_t val)
943 {
944 	if (cell_size == 1) {
945 		fdt32_t v = cpu_to_fdt32((uint32_t)val);
946 
947 		memcpy(data, &v, sizeof(v));
948 	} else {
949 		fdt64_t v = cpu_to_fdt64(val);
950 
951 		memcpy(data, &v, sizeof(v));
952 	}
953 }
954 
955 static int add_res_mem_dt_node(struct dt_descriptor *dt, const char *name,
956 			       paddr_t pa, size_t size)
957 {
958 	int offs = 0;
959 	int ret = 0;
960 	int addr_size = -1;
961 	int len_size = -1;
962 	bool found = true;
963 	char subnode_name[80] = { 0 };
964 
965 	offs = fdt_path_offset(dt->blob, "/reserved-memory");
966 
967 	if (offs < 0) {
968 		found = false;
969 		offs = 0;
970 	}
971 
972 	if (IS_ENABLED2(_CFG_USE_DTB_OVERLAY)) {
973 		len_size = sizeof(paddr_t) / sizeof(uint32_t);
974 		addr_size = sizeof(paddr_t) / sizeof(uint32_t);
975 	} else {
976 		len_size = fdt_size_cells(dt->blob, offs);
977 		if (len_size < 0)
978 			return -1;
979 		addr_size = fdt_address_cells(dt->blob, offs);
980 		if (addr_size < 0)
981 			return -1;
982 	}
983 
984 	if (!found) {
985 		offs = add_dt_path_subnode(dt, "/", "reserved-memory");
986 		if (offs < 0)
987 			return -1;
988 		ret = fdt_setprop_cell(dt->blob, offs, "#address-cells",
989 				       addr_size);
990 		if (ret < 0)
991 			return -1;
992 		ret = fdt_setprop_cell(dt->blob, offs, "#size-cells", len_size);
993 		if (ret < 0)
994 			return -1;
995 		ret = fdt_setprop(dt->blob, offs, "ranges", NULL, 0);
996 		if (ret < 0)
997 			return -1;
998 	}
999 
1000 	ret = snprintf(subnode_name, sizeof(subnode_name),
1001 		       "%s@%" PRIxPA, name, pa);
1002 	if (ret < 0 || ret >= (int)sizeof(subnode_name))
1003 		DMSG("truncated node \"%s@%" PRIxPA"\"", name, pa);
1004 	offs = fdt_add_subnode(dt->blob, offs, subnode_name);
1005 	if (offs >= 0) {
1006 		uint32_t data[FDT_MAX_NCELLS * 2];
1007 
1008 		set_dt_val(data, addr_size, pa);
1009 		set_dt_val(data + addr_size, len_size, size);
1010 		ret = fdt_setprop(dt->blob, offs, "reg", data,
1011 				  sizeof(uint32_t) * (addr_size + len_size));
1012 		if (ret < 0)
1013 			return -1;
1014 		ret = fdt_setprop(dt->blob, offs, "no-map", NULL, 0);
1015 		if (ret < 0)
1016 			return -1;
1017 	} else {
1018 		return -1;
1019 	}
1020 	return 0;
1021 }
1022 
1023 #ifdef CFG_CORE_DYN_SHM
1024 static uint64_t get_dt_val_and_advance(const void *data, size_t *offs,
1025 				       uint32_t cell_size)
1026 {
1027 	uint64_t rv = 0;
1028 
1029 	if (cell_size == 1) {
1030 		uint32_t v;
1031 
1032 		memcpy(&v, (const uint8_t *)data + *offs, sizeof(v));
1033 		*offs += sizeof(v);
1034 		rv = fdt32_to_cpu(v);
1035 	} else {
1036 		uint64_t v;
1037 
1038 		memcpy(&v, (const uint8_t *)data + *offs, sizeof(v));
1039 		*offs += sizeof(v);
1040 		rv = fdt64_to_cpu(v);
1041 	}
1042 
1043 	return rv;
1044 }
1045 
1046 /*
1047  * Find all non-secure memory from DT. Memory marked inaccessible by Secure
1048  * World is ignored since it could not be mapped to be used as dynamic shared
1049  * memory.
1050  */
1051 static int get_nsec_memory_helper(void *fdt, struct core_mmu_phys_mem *mem)
1052 {
1053 	const uint8_t *prop = NULL;
1054 	uint64_t a = 0;
1055 	uint64_t l = 0;
1056 	size_t prop_offs = 0;
1057 	size_t prop_len = 0;
1058 	int elems_total = 0;
1059 	int addr_size = 0;
1060 	int len_size = 0;
1061 	int offs = 0;
1062 	size_t n = 0;
1063 	int len = 0;
1064 
1065 	addr_size = fdt_address_cells(fdt, 0);
1066 	if (addr_size < 0)
1067 		return 0;
1068 
1069 	len_size = fdt_size_cells(fdt, 0);
1070 	if (len_size < 0)
1071 		return 0;
1072 
1073 	while (true) {
1074 		offs = fdt_node_offset_by_prop_value(fdt, offs, "device_type",
1075 						     "memory",
1076 						     sizeof("memory"));
1077 		if (offs < 0)
1078 			break;
1079 
1080 		if (_fdt_get_status(fdt, offs) != (DT_STATUS_OK_NSEC |
1081 						   DT_STATUS_OK_SEC))
1082 			continue;
1083 
1084 		prop = fdt_getprop(fdt, offs, "reg", &len);
1085 		if (!prop)
1086 			continue;
1087 
1088 		prop_len = len;
1089 		for (n = 0, prop_offs = 0; prop_offs < prop_len; n++) {
1090 			a = get_dt_val_and_advance(prop, &prop_offs, addr_size);
1091 			if (prop_offs >= prop_len) {
1092 				n--;
1093 				break;
1094 			}
1095 
1096 			l = get_dt_val_and_advance(prop, &prop_offs, len_size);
1097 			if (mem) {
1098 				mem->type = MEM_AREA_DDR_OVERALL;
1099 				mem->addr = a;
1100 				mem->size = l;
1101 				mem++;
1102 			}
1103 		}
1104 
1105 		elems_total += n;
1106 	}
1107 
1108 	return elems_total;
1109 }
1110 
1111 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt, size_t *nelems)
1112 {
1113 	struct core_mmu_phys_mem *mem = NULL;
1114 	int elems_total = 0;
1115 
1116 	elems_total = get_nsec_memory_helper(fdt, NULL);
1117 	if (elems_total <= 0)
1118 		return NULL;
1119 
1120 	mem = nex_calloc(elems_total, sizeof(*mem));
1121 	if (!mem)
1122 		panic();
1123 
1124 	elems_total = get_nsec_memory_helper(fdt, mem);
1125 	assert(elems_total > 0);
1126 
1127 	*nelems = elems_total;
1128 
1129 	return mem;
1130 }
1131 #endif /*CFG_CORE_DYN_SHM*/
1132 
1133 #ifdef CFG_CORE_RESERVED_SHM
1134 static int mark_static_shm_as_reserved(struct dt_descriptor *dt)
1135 {
1136 	vaddr_t shm_start;
1137 	vaddr_t shm_end;
1138 
1139 	core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &shm_start, &shm_end);
1140 	if (shm_start != shm_end)
1141 		return add_res_mem_dt_node(dt, "optee_shm",
1142 					   virt_to_phys((void *)shm_start),
1143 					   shm_end - shm_start);
1144 
1145 	DMSG("No SHM configured");
1146 	return -1;
1147 }
1148 #endif /*CFG_CORE_RESERVED_SHM*/
1149 
1150 static void init_external_dt(unsigned long phys_dt)
1151 {
1152 	struct dt_descriptor *dt = &external_dt;
1153 	void *fdt;
1154 	int ret;
1155 
1156 	if (!IS_ENABLED(CFG_EXTERNAL_DT))
1157 		return;
1158 
1159 	if (!phys_dt) {
1160 		/*
1161 		 * No need to panic as we're not using the DT in OP-TEE
1162 		 * yet, we're only adding some nodes for normal world use.
1163 		 * This makes the switch to using DT easier as we can boot
1164 		 * a newer OP-TEE with older boot loaders. Once we start to
1165 		 * initialize devices based on DT we'll likely panic
1166 		 * instead of returning here.
1167 		 */
1168 		IMSG("No non-secure external DT");
1169 		return;
1170 	}
1171 
1172 	fdt = core_mmu_add_mapping(MEM_AREA_EXT_DT, phys_dt, CFG_DTB_MAX_SIZE);
1173 	if (!fdt)
1174 		panic("Failed to map external DTB");
1175 
1176 	dt->blob = fdt;
1177 
1178 	ret = init_dt_overlay(dt, CFG_DTB_MAX_SIZE);
1179 	if (ret < 0) {
1180 		EMSG("Device Tree Overlay init fail @ %#lx: error %d", phys_dt,
1181 		     ret);
1182 		panic();
1183 	}
1184 
1185 	ret = fdt_open_into(fdt, fdt, CFG_DTB_MAX_SIZE);
1186 	if (ret < 0) {
1187 		EMSG("Invalid Device Tree at %#lx: error %d", phys_dt, ret);
1188 		panic();
1189 	}
1190 
1191 	IMSG("Non-secure external DT found");
1192 }
1193 
1194 static int mark_tzdram_as_reserved(struct dt_descriptor *dt)
1195 {
1196 	return add_res_mem_dt_node(dt, "optee_core", CFG_TZDRAM_START,
1197 				   CFG_TZDRAM_SIZE);
1198 }
1199 
1200 static void update_external_dt(void)
1201 {
1202 	struct dt_descriptor *dt = &external_dt;
1203 
1204 	if (!IS_ENABLED(CFG_EXTERNAL_DT))
1205 		return;
1206 
1207 	if (!dt->blob)
1208 		return;
1209 
1210 	if (!IS_ENABLED(CFG_CORE_FFA) && add_optee_dt_node(dt))
1211 		panic("Failed to add OP-TEE Device Tree node");
1212 
1213 	if (config_psci(dt))
1214 		panic("Failed to config PSCI");
1215 
1216 #ifdef CFG_CORE_RESERVED_SHM
1217 	if (mark_static_shm_as_reserved(dt))
1218 		panic("Failed to config non-secure memory");
1219 #endif
1220 
1221 	if (mark_tzdram_as_reserved(dt))
1222 		panic("Failed to config secure memory");
1223 }
1224 #else /*CFG_DT*/
1225 void *get_external_dt(void)
1226 {
1227 	return NULL;
1228 }
1229 
1230 static void init_external_dt(unsigned long phys_dt __unused)
1231 {
1232 }
1233 
1234 static void update_external_dt(void)
1235 {
1236 }
1237 
1238 #ifdef CFG_CORE_DYN_SHM
1239 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt __unused,
1240 						 size_t *nelems __unused)
1241 {
1242 	return NULL;
1243 }
1244 #endif /*CFG_CORE_DYN_SHM*/
1245 #endif /*!CFG_DT*/
1246 
1247 #ifdef CFG_CORE_DYN_SHM
1248 static void discover_nsec_memory(void)
1249 {
1250 	struct core_mmu_phys_mem *mem;
1251 	const struct core_mmu_phys_mem *mem_begin = NULL;
1252 	const struct core_mmu_phys_mem *mem_end = NULL;
1253 	size_t nelems;
1254 	void *fdt = get_external_dt();
1255 
1256 	if (fdt) {
1257 		mem = get_nsec_memory(fdt, &nelems);
1258 		if (mem) {
1259 			core_mmu_set_discovered_nsec_ddr(mem, nelems);
1260 			return;
1261 		}
1262 
1263 		DMSG("No non-secure memory found in FDT");
1264 	}
1265 
1266 	mem_begin = phys_ddr_overall_begin;
1267 	mem_end = phys_ddr_overall_end;
1268 	nelems = mem_end - mem_begin;
1269 	if (nelems) {
1270 		/*
1271 		 * Platform cannot use both register_ddr() and the now
1272 		 * deprecated register_dynamic_shm().
1273 		 */
1274 		assert(phys_ddr_overall_compat_begin ==
1275 		       phys_ddr_overall_compat_end);
1276 	} else {
1277 		mem_begin = phys_ddr_overall_compat_begin;
1278 		mem_end = phys_ddr_overall_compat_end;
1279 		nelems = mem_end - mem_begin;
1280 		if (!nelems)
1281 			return;
1282 		DMSG("Warning register_dynamic_shm() is deprecated, please use register_ddr() instead");
1283 	}
1284 
1285 	mem = nex_calloc(nelems, sizeof(*mem));
1286 	if (!mem)
1287 		panic();
1288 
1289 	memcpy(mem, phys_ddr_overall_begin, sizeof(*mem) * nelems);
1290 	core_mmu_set_discovered_nsec_ddr(mem, nelems);
1291 }
1292 #else /*CFG_CORE_DYN_SHM*/
1293 static void discover_nsec_memory(void)
1294 {
1295 }
1296 #endif /*!CFG_CORE_DYN_SHM*/
1297 
1298 #ifdef CFG_NS_VIRTUALIZATION
1299 static TEE_Result virt_init_heap(void)
1300 {
1301 	/* We need to initialize pool for every virtual guest partition */
1302 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
1303 
1304 	return TEE_SUCCESS;
1305 }
1306 preinit_early(virt_init_heap);
1307 #endif
1308 
1309 void init_tee_runtime(void)
1310 {
1311 #ifndef CFG_WITH_PAGER
1312 	/* Pager initializes TA RAM early */
1313 	core_mmu_init_ta_ram();
1314 #endif
1315 	/*
1316 	 * With virtualization we call this function when creating the
1317 	 * OP-TEE partition instead.
1318 	 */
1319 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION))
1320 		call_preinitcalls();
1321 	call_initcalls();
1322 
1323 	/*
1324 	 * These two functions uses crypto_rng_read() to initialize the
1325 	 * pauth keys. Once call_initcalls() returns we're guaranteed that
1326 	 * crypto_rng_read() is ready to be used.
1327 	 */
1328 	thread_init_core_local_pauth_keys();
1329 	thread_init_thread_pauth_keys();
1330 }
1331 
1332 static void init_primary(unsigned long pageable_part, unsigned long nsec_entry)
1333 {
1334 	thread_init_core_local_stacks();
1335 	/*
1336 	 * Mask asynchronous exceptions before switch to the thread vector
1337 	 * as the thread handler requires those to be masked while
1338 	 * executing with the temporary stack. The thread subsystem also
1339 	 * asserts that the foreign interrupts are blocked when using most of
1340 	 * its functions.
1341 	 */
1342 	thread_set_exceptions(THREAD_EXCP_ALL);
1343 	primary_save_cntfrq();
1344 	init_vfp_sec();
1345 	/*
1346 	 * Pager: init_runtime() calls thread_kernel_enable_vfp() so we must
1347 	 * set a current thread right now to avoid a chicken-and-egg problem
1348 	 * (thread_init_boot_thread() sets the current thread but needs
1349 	 * things set by init_runtime()).
1350 	 */
1351 	thread_get_core_local()->curr_thread = 0;
1352 	init_runtime(pageable_part);
1353 
1354 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1355 		/*
1356 		 * Virtualization: We can't initialize threads right now because
1357 		 * threads belong to "tee" part and will be initialized
1358 		 * separately per each new virtual guest. So, we'll clear
1359 		 * "curr_thread" and call it done.
1360 		 */
1361 		thread_get_core_local()->curr_thread = -1;
1362 	} else {
1363 		thread_init_boot_thread();
1364 	}
1365 	thread_init_primary();
1366 	thread_init_per_cpu();
1367 	init_sec_mon(nsec_entry);
1368 }
1369 
1370 static bool cpu_nmfi_enabled(void)
1371 {
1372 #if defined(ARM32)
1373 	return read_sctlr() & SCTLR_NMFI;
1374 #else
1375 	/* Note: ARM64 does not feature non-maskable FIQ support. */
1376 	return false;
1377 #endif
1378 }
1379 
1380 /*
1381  * Note: this function is weak just to make it possible to exclude it from
1382  * the unpaged area.
1383  */
1384 void __weak boot_init_primary_late(unsigned long fdt)
1385 {
1386 	init_external_dt(fdt);
1387 	tpm_map_log_area(get_external_dt());
1388 	discover_nsec_memory();
1389 	update_external_dt();
1390 	configure_console_from_dt();
1391 
1392 	IMSG("OP-TEE version: %s", core_v_str);
1393 	if (IS_ENABLED(CFG_WARN_INSECURE)) {
1394 		IMSG("WARNING: This OP-TEE configuration might be insecure!");
1395 		IMSG("WARNING: Please check https://optee.readthedocs.io/en/latest/architecture/porting_guidelines.html");
1396 	}
1397 	IMSG("Primary CPU initializing");
1398 #ifdef CFG_CORE_ASLR
1399 	DMSG("Executing at offset %#lx with virtual load address %#"PRIxVA,
1400 	     (unsigned long)boot_mmu_config.load_offset, VCORE_START_VA);
1401 #endif
1402 	if (IS_ENABLED(CFG_MEMTAG))
1403 		DMSG("Memory tagging %s",
1404 		     memtag_is_enabled() ?  "enabled" : "disabled");
1405 
1406 	/* Check if platform needs NMFI workaround */
1407 	if (cpu_nmfi_enabled())	{
1408 		if (!IS_ENABLED(CFG_CORE_WORKAROUND_ARM_NMFI))
1409 			IMSG("WARNING: This ARM core has NMFI enabled, please apply workaround!");
1410 	} else {
1411 		if (IS_ENABLED(CFG_CORE_WORKAROUND_ARM_NMFI))
1412 			IMSG("WARNING: This ARM core does not have NMFI enabled, no need for workaround");
1413 	}
1414 
1415 	main_init_gic();
1416 	init_vfp_nsec();
1417 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1418 		IMSG("Initializing virtualization support");
1419 		core_mmu_init_virtualization();
1420 	} else {
1421 		init_tee_runtime();
1422 	}
1423 	call_finalcalls();
1424 	IMSG("Primary CPU switching to normal world boot");
1425 }
1426 
1427 static void init_secondary_helper(unsigned long nsec_entry)
1428 {
1429 	IMSG("Secondary CPU %zu initializing", get_core_pos());
1430 
1431 	/*
1432 	 * Mask asynchronous exceptions before switch to the thread vector
1433 	 * as the thread handler requires those to be masked while
1434 	 * executing with the temporary stack. The thread subsystem also
1435 	 * asserts that the foreign interrupts are blocked when using most of
1436 	 * its functions.
1437 	 */
1438 	thread_set_exceptions(THREAD_EXCP_ALL);
1439 
1440 	secondary_init_cntfrq();
1441 	thread_init_per_cpu();
1442 	init_sec_mon(nsec_entry);
1443 	main_secondary_init_gic();
1444 	init_vfp_sec();
1445 	init_vfp_nsec();
1446 
1447 	IMSG("Secondary CPU %zu switching to normal world boot", get_core_pos());
1448 }
1449 
1450 /*
1451  * Note: this function is weak just to make it possible to exclude it from
1452  * the unpaged area so that it lies in the init area.
1453  */
1454 void __weak boot_init_primary_early(unsigned long pageable_part,
1455 				    unsigned long nsec_entry __maybe_unused)
1456 {
1457 	unsigned long e = PADDR_INVALID;
1458 
1459 #if !defined(CFG_WITH_ARM_TRUSTED_FW)
1460 	e = nsec_entry;
1461 #endif
1462 
1463 	init_primary(pageable_part, e);
1464 }
1465 
1466 #if defined(CFG_WITH_ARM_TRUSTED_FW)
1467 unsigned long boot_cpu_on_handler(unsigned long a0 __maybe_unused,
1468 				  unsigned long a1 __unused)
1469 {
1470 	init_secondary_helper(PADDR_INVALID);
1471 	return 0;
1472 }
1473 #else
1474 void boot_init_secondary(unsigned long nsec_entry)
1475 {
1476 	init_secondary_helper(nsec_entry);
1477 }
1478 #endif
1479 
1480 #if defined(CFG_BOOT_SECONDARY_REQUEST)
1481 void boot_set_core_ns_entry(size_t core_idx, uintptr_t entry,
1482 			    uintptr_t context_id)
1483 {
1484 	ns_entry_contexts[core_idx].entry_point = entry;
1485 	ns_entry_contexts[core_idx].context_id = context_id;
1486 	dsb_ishst();
1487 }
1488 
1489 int boot_core_release(size_t core_idx, paddr_t entry)
1490 {
1491 	if (!core_idx || core_idx >= CFG_TEE_CORE_NB_CORE)
1492 		return -1;
1493 
1494 	ns_entry_contexts[core_idx].entry_point = entry;
1495 	dmb();
1496 	spin_table[core_idx] = 1;
1497 	dsb();
1498 	sev();
1499 
1500 	return 0;
1501 }
1502 
1503 /*
1504  * spin until secondary boot request, then returns with
1505  * the secondary core entry address.
1506  */
1507 struct ns_entry_context *boot_core_hpen(void)
1508 {
1509 #ifdef CFG_PSCI_ARM32
1510 	return &ns_entry_contexts[get_core_pos()];
1511 #else
1512 	do {
1513 		wfe();
1514 	} while (!spin_table[get_core_pos()]);
1515 	dmb();
1516 	return &ns_entry_contexts[get_core_pos()];
1517 #endif
1518 }
1519 #endif
1520 
1521 #if defined(CFG_CORE_ASLR)
1522 #if defined(CFG_DT)
1523 unsigned long __weak get_aslr_seed(void *fdt)
1524 {
1525 	int rc = 0;
1526 	const uint64_t *seed = NULL;
1527 	int offs = 0;
1528 	int len = 0;
1529 
1530 	if (!fdt) {
1531 		DMSG("No fdt");
1532 		goto err;
1533 	}
1534 
1535 	rc = fdt_check_header(fdt);
1536 	if (rc) {
1537 		DMSG("Bad fdt: %d", rc);
1538 		goto err;
1539 	}
1540 
1541 	offs =  fdt_path_offset(fdt, "/secure-chosen");
1542 	if (offs < 0) {
1543 		DMSG("Cannot find /secure-chosen");
1544 		goto err;
1545 	}
1546 	seed = fdt_getprop(fdt, offs, "kaslr-seed", &len);
1547 	if (!seed || len != sizeof(*seed)) {
1548 		DMSG("Cannot find valid kaslr-seed");
1549 		goto err;
1550 	}
1551 
1552 	return fdt64_to_cpu(*seed);
1553 
1554 err:
1555 	/* Try platform implementation */
1556 	return plat_get_aslr_seed();
1557 }
1558 #else /*!CFG_DT*/
1559 unsigned long __weak get_aslr_seed(void *fdt __unused)
1560 {
1561 	/* Try platform implementation */
1562 	return plat_get_aslr_seed();
1563 }
1564 #endif /*!CFG_DT*/
1565 #endif /*CFG_CORE_ASLR*/
1566