xref: /optee_os/core/arch/arm/kernel/boot.c (revision c6c416f1bf4617feef23d592155ba7de69bceea9)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2015-2021, Linaro Limited
4  */
5 
6 #include <arm.h>
7 #include <assert.h>
8 #include <compiler.h>
9 #include <config.h>
10 #include <console.h>
11 #include <crypto/crypto.h>
12 #include <drivers/gic.h>
13 #include <initcall.h>
14 #include <inttypes.h>
15 #include <keep.h>
16 #include <kernel/asan.h>
17 #include <kernel/boot.h>
18 #include <kernel/linker.h>
19 #include <kernel/misc.h>
20 #include <kernel/panic.h>
21 #include <kernel/tee_misc.h>
22 #include <kernel/thread.h>
23 #include <kernel/tpm.h>
24 #include <libfdt.h>
25 #include <malloc.h>
26 #include <mm/core_memprot.h>
27 #include <mm/core_mmu.h>
28 #include <mm/fobj.h>
29 #include <mm/tee_mm.h>
30 #include <mm/tee_pager.h>
31 #include <sm/psci.h>
32 #include <stdio.h>
33 #include <trace.h>
34 #include <utee_defines.h>
35 #include <util.h>
36 
37 #include <platform_config.h>
38 
39 #if !defined(CFG_WITH_ARM_TRUSTED_FW)
40 #include <sm/sm.h>
41 #endif
42 
43 #if defined(CFG_WITH_VFP)
44 #include <kernel/vfp.h>
45 #endif
46 
47 /*
48  * In this file we're using unsigned long to represent physical pointers as
49  * they are received in a single register when OP-TEE is initially entered.
50  * This limits 32-bit systems to only use make use of the lower 32 bits
51  * of a physical address for initial parameters.
52  *
53  * 64-bit systems on the other hand can use full 64-bit physical pointers.
54  */
55 #define PADDR_INVALID		ULONG_MAX
56 
57 #if defined(CFG_BOOT_SECONDARY_REQUEST)
58 struct ns_entry_context {
59 	uintptr_t entry_point;
60 	uintptr_t context_id;
61 };
62 struct ns_entry_context ns_entry_contexts[CFG_TEE_CORE_NB_CORE];
63 static uint32_t spin_table[CFG_TEE_CORE_NB_CORE];
64 #endif
65 
66 #ifdef CFG_BOOT_SYNC_CPU
67 /*
68  * Array used when booting, to synchronize cpu.
69  * When 0, the cpu has not started.
70  * When 1, it has started
71  */
72 uint32_t sem_cpu_sync[CFG_TEE_CORE_NB_CORE];
73 DECLARE_KEEP_PAGER(sem_cpu_sync);
74 #endif
75 
76 #ifdef CFG_DT
77 struct dt_descriptor {
78 	void *blob;
79 #ifdef _CFG_USE_DTB_OVERLAY
80 	int frag_id;
81 #endif
82 };
83 
84 static struct dt_descriptor external_dt __nex_bss;
85 #endif
86 
87 #ifdef CFG_SECONDARY_INIT_CNTFRQ
88 static uint32_t cntfrq;
89 #endif
90 
91 /* May be overridden in plat-$(PLATFORM)/main.c */
92 __weak void plat_primary_init_early(void)
93 {
94 }
95 DECLARE_KEEP_PAGER(plat_primary_init_early);
96 
97 /* May be overridden in plat-$(PLATFORM)/main.c */
98 __weak void main_init_gic(void)
99 {
100 }
101 
102 /* May be overridden in plat-$(PLATFORM)/main.c */
103 __weak void main_secondary_init_gic(void)
104 {
105 }
106 
107 /* May be overridden in plat-$(PLATFORM)/main.c */
108 __weak unsigned long plat_get_aslr_seed(void)
109 {
110 	DMSG("Warning: no ASLR seed");
111 
112 	return 0;
113 }
114 
115 /*
116  * This function is called as a guard after each smc call which is not
117  * supposed to return.
118  */
119 void __panic_at_smc_return(void)
120 {
121 	panic();
122 }
123 
124 #if defined(CFG_WITH_ARM_TRUSTED_FW)
125 void init_sec_mon(unsigned long nsec_entry __maybe_unused)
126 {
127 	assert(nsec_entry == PADDR_INVALID);
128 	/* Do nothing as we don't have a secure monitor */
129 }
130 #else
131 /* May be overridden in plat-$(PLATFORM)/main.c */
132 __weak void init_sec_mon(unsigned long nsec_entry)
133 {
134 	struct sm_nsec_ctx *nsec_ctx;
135 
136 	assert(nsec_entry != PADDR_INVALID);
137 
138 	/* Initialize secure monitor */
139 	nsec_ctx = sm_get_nsec_ctx();
140 	nsec_ctx->mon_lr = nsec_entry;
141 	nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I;
142 	if (nsec_entry & 1)
143 		nsec_ctx->mon_spsr |= CPSR_T;
144 }
145 #endif
146 
147 #if defined(CFG_WITH_ARM_TRUSTED_FW)
148 static void init_vfp_nsec(void)
149 {
150 }
151 #else
152 static void init_vfp_nsec(void)
153 {
154 	/* Normal world can use CP10 and CP11 (SIMD/VFP) */
155 	write_nsacr(read_nsacr() | NSACR_CP10 | NSACR_CP11);
156 }
157 #endif
158 
159 #if defined(CFG_WITH_VFP)
160 
161 #ifdef ARM32
162 static void init_vfp_sec(void)
163 {
164 	uint32_t cpacr = read_cpacr();
165 
166 	/*
167 	 * Enable Advanced SIMD functionality.
168 	 * Enable use of D16-D31 of the Floating-point Extension register
169 	 * file.
170 	 */
171 	cpacr &= ~(CPACR_ASEDIS | CPACR_D32DIS);
172 	/*
173 	 * Enable usage of CP10 and CP11 (SIMD/VFP) (both kernel and user
174 	 * mode.
175 	 */
176 	cpacr |= CPACR_CP(10, CPACR_CP_ACCESS_FULL);
177 	cpacr |= CPACR_CP(11, CPACR_CP_ACCESS_FULL);
178 	write_cpacr(cpacr);
179 }
180 #endif /* ARM32 */
181 
182 #ifdef ARM64
183 static void init_vfp_sec(void)
184 {
185 	/* Not using VFP until thread_kernel_enable_vfp() */
186 	vfp_disable();
187 }
188 #endif /* ARM64 */
189 
190 #else /* CFG_WITH_VFP */
191 
192 static void init_vfp_sec(void)
193 {
194 	/* Not using VFP */
195 }
196 #endif
197 
198 #ifdef CFG_SECONDARY_INIT_CNTFRQ
199 static void primary_save_cntfrq(void)
200 {
201 	assert(cntfrq == 0);
202 
203 	/*
204 	 * CNTFRQ should be initialized on the primary CPU by a
205 	 * previous boot stage
206 	 */
207 	cntfrq = read_cntfrq();
208 }
209 
210 static void secondary_init_cntfrq(void)
211 {
212 	assert(cntfrq != 0);
213 	write_cntfrq(cntfrq);
214 }
215 #else /* CFG_SECONDARY_INIT_CNTFRQ */
216 static void primary_save_cntfrq(void)
217 {
218 }
219 
220 static void secondary_init_cntfrq(void)
221 {
222 }
223 #endif
224 
225 #ifdef CFG_CORE_SANITIZE_KADDRESS
226 static void init_run_constructors(void)
227 {
228 	const vaddr_t *ctor;
229 
230 	for (ctor = &__ctor_list; ctor < &__ctor_end; ctor++)
231 		((void (*)(void))(*ctor))();
232 }
233 
234 static void init_asan(void)
235 {
236 
237 	/*
238 	 * CFG_ASAN_SHADOW_OFFSET is also supplied as
239 	 * -fasan-shadow-offset=$(CFG_ASAN_SHADOW_OFFSET) to the compiler.
240 	 * Since all the needed values to calculate the value of
241 	 * CFG_ASAN_SHADOW_OFFSET isn't available in to make we need to
242 	 * calculate it in advance and hard code it into the platform
243 	 * conf.mk. Here where we have all the needed values we double
244 	 * check that the compiler is supplied the correct value.
245 	 */
246 
247 #define __ASAN_SHADOW_START \
248 	ROUNDUP(TEE_RAM_VA_START + (TEE_RAM_VA_SIZE * 8) / 9 - 8, 8)
249 	assert(__ASAN_SHADOW_START == (vaddr_t)&__asan_shadow_start);
250 #define __CFG_ASAN_SHADOW_OFFSET \
251 	(__ASAN_SHADOW_START - (TEE_RAM_VA_START / 8))
252 	COMPILE_TIME_ASSERT(CFG_ASAN_SHADOW_OFFSET == __CFG_ASAN_SHADOW_OFFSET);
253 #undef __ASAN_SHADOW_START
254 #undef __CFG_ASAN_SHADOW_OFFSET
255 
256 	/*
257 	 * Assign area covered by the shadow area, everything from start up
258 	 * to the beginning of the shadow area.
259 	 */
260 	asan_set_shadowed((void *)TEE_TEXT_VA_START, &__asan_shadow_start);
261 
262 	/*
263 	 * Add access to areas that aren't opened automatically by a
264 	 * constructor.
265 	 */
266 	asan_tag_access(&__ctor_list, &__ctor_end);
267 	asan_tag_access(__rodata_start, __rodata_end);
268 #ifdef CFG_WITH_PAGER
269 	asan_tag_access(__pageable_start, __pageable_end);
270 #endif /*CFG_WITH_PAGER*/
271 	asan_tag_access(__nozi_start, __nozi_end);
272 	asan_tag_access(__exidx_start, __exidx_end);
273 	asan_tag_access(__extab_start, __extab_end);
274 
275 	init_run_constructors();
276 
277 	/* Everything is tagged correctly, let's start address sanitizing. */
278 	asan_start();
279 }
280 #else /*CFG_CORE_SANITIZE_KADDRESS*/
281 static void init_asan(void)
282 {
283 }
284 #endif /*CFG_CORE_SANITIZE_KADDRESS*/
285 
286 #ifdef CFG_WITH_PAGER
287 
288 #ifdef CFG_CORE_SANITIZE_KADDRESS
289 static void carve_out_asan_mem(tee_mm_pool_t *pool)
290 {
291 	const size_t s = pool->hi - pool->lo;
292 	tee_mm_entry_t *mm;
293 	paddr_t apa = ASAN_MAP_PA;
294 	size_t asz = ASAN_MAP_SZ;
295 
296 	if (core_is_buffer_outside(apa, asz, pool->lo, s))
297 		return;
298 
299 	/* Reserve the shadow area */
300 	if (!core_is_buffer_inside(apa, asz, pool->lo, s)) {
301 		if (apa < pool->lo) {
302 			/*
303 			 * ASAN buffer is overlapping with the beginning of
304 			 * the pool.
305 			 */
306 			asz -= pool->lo - apa;
307 			apa = pool->lo;
308 		} else {
309 			/*
310 			 * ASAN buffer is overlapping with the end of the
311 			 * pool.
312 			 */
313 			asz = pool->hi - apa;
314 		}
315 	}
316 	mm = tee_mm_alloc2(pool, apa, asz);
317 	assert(mm);
318 }
319 #else
320 static void carve_out_asan_mem(tee_mm_pool_t *pool __unused)
321 {
322 }
323 #endif
324 
325 static void print_pager_pool_size(void)
326 {
327 	struct tee_pager_stats __maybe_unused stats;
328 
329 	tee_pager_get_stats(&stats);
330 	IMSG("Pager pool size: %zukB",
331 		stats.npages_all * SMALL_PAGE_SIZE / 1024);
332 }
333 
334 static void init_vcore(tee_mm_pool_t *mm_vcore)
335 {
336 	const vaddr_t begin = VCORE_START_VA;
337 	size_t size = TEE_RAM_VA_SIZE;
338 
339 #ifdef CFG_CORE_SANITIZE_KADDRESS
340 	/* Carve out asan memory, flat maped after core memory */
341 	if (begin + size > ASAN_SHADOW_PA)
342 		size = ASAN_MAP_PA - begin;
343 #endif
344 
345 	if (!tee_mm_init(mm_vcore, begin, size, SMALL_PAGE_SHIFT,
346 			 TEE_MM_POOL_NO_FLAGS))
347 		panic("tee_mm_vcore init failed");
348 }
349 
350 /*
351  * With CFG_CORE_ASLR=y the init part is relocated very early during boot.
352  * The init part is also paged just as the rest of the normal paged code, with
353  * the difference that it's preloaded during boot. When the backing store
354  * is configured the entire paged binary is copied in place and then also
355  * the init part. Since the init part has been relocated (references to
356  * addresses updated to compensate for the new load address) this has to be
357  * undone for the hashes of those pages to match with the original binary.
358  *
359  * If CFG_CORE_ASLR=n, nothing needs to be done as the code/ro pages are
360  * unchanged.
361  */
362 static void undo_init_relocation(uint8_t *paged_store __maybe_unused)
363 {
364 #ifdef CFG_CORE_ASLR
365 	unsigned long *ptr = NULL;
366 	const uint32_t *reloc = NULL;
367 	const uint32_t *reloc_end = NULL;
368 	unsigned long offs = boot_mmu_config.load_offset;
369 	const struct boot_embdata *embdata = (const void *)__init_end;
370 	vaddr_t addr_end = (vaddr_t)__init_end - offs - TEE_RAM_START;
371 	vaddr_t addr_start = (vaddr_t)__init_start - offs - TEE_RAM_START;
372 
373 	reloc = (const void *)((vaddr_t)embdata + embdata->reloc_offset);
374 	reloc_end = reloc + embdata->reloc_len / sizeof(*reloc);
375 
376 	for (; reloc < reloc_end; reloc++) {
377 		if (*reloc < addr_start)
378 			continue;
379 		if (*reloc >= addr_end)
380 			break;
381 		ptr = (void *)(paged_store + *reloc - addr_start);
382 		*ptr -= offs;
383 	}
384 #endif
385 }
386 
387 static struct fobj *ro_paged_alloc(tee_mm_entry_t *mm, void *hashes,
388 				   void *store)
389 {
390 	const unsigned int num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE;
391 #ifdef CFG_CORE_ASLR
392 	unsigned int reloc_offs = (vaddr_t)__pageable_start - VCORE_START_VA;
393 	const struct boot_embdata *embdata = (const void *)__init_end;
394 	const void *reloc = __init_end + embdata->reloc_offset;
395 
396 	return fobj_ro_reloc_paged_alloc(num_pages, hashes, reloc_offs,
397 					 reloc, embdata->reloc_len, store);
398 #else
399 	return fobj_ro_paged_alloc(num_pages, hashes, store);
400 #endif
401 }
402 
403 static void init_runtime(unsigned long pageable_part)
404 {
405 	size_t n;
406 	size_t init_size = (size_t)(__init_end - __init_start);
407 	size_t pageable_start = (size_t)__pageable_start;
408 	size_t pageable_end = (size_t)__pageable_end;
409 	size_t pageable_size = pageable_end - pageable_start;
410 	size_t tzsram_end = TZSRAM_BASE + TZSRAM_SIZE;
411 	size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) *
412 			   TEE_SHA256_HASH_SIZE;
413 	const struct boot_embdata *embdata = (const void *)__init_end;
414 	const void *tmp_hashes = NULL;
415 	tee_mm_entry_t *mm = NULL;
416 	struct fobj *fobj = NULL;
417 	uint8_t *paged_store = NULL;
418 	uint8_t *hashes = NULL;
419 
420 	assert(pageable_size % SMALL_PAGE_SIZE == 0);
421 	assert(embdata->total_len >= embdata->hashes_offset +
422 				     embdata->hashes_len);
423 	assert(hash_size == embdata->hashes_len);
424 
425 	tmp_hashes = __init_end + embdata->hashes_offset;
426 
427 	init_asan();
428 
429 	/* Add heap2 first as heap1 may be too small as initial bget pool */
430 	malloc_add_pool(__heap2_start, __heap2_end - __heap2_start);
431 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
432 
433 	/*
434 	 * This needs to be initialized early to support address lookup
435 	 * in MEM_AREA_TEE_RAM
436 	 */
437 	tee_pager_early_init();
438 
439 	hashes = malloc(hash_size);
440 	IMSG_RAW("\n");
441 	IMSG("Pager is enabled. Hashes: %zu bytes", hash_size);
442 	assert(hashes);
443 	asan_memcpy_unchecked(hashes, tmp_hashes, hash_size);
444 
445 	/*
446 	 * Need tee_mm_sec_ddr initialized to be able to allocate secure
447 	 * DDR below.
448 	 */
449 	core_mmu_init_ta_ram();
450 
451 	carve_out_asan_mem(&tee_mm_sec_ddr);
452 
453 	mm = tee_mm_alloc(&tee_mm_sec_ddr, pageable_size);
454 	assert(mm);
455 	paged_store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM,
456 				   pageable_size);
457 	/*
458 	 * Load pageable part in the dedicated allocated area:
459 	 * - Move pageable non-init part into pageable area. Note bootloader
460 	 *   may have loaded it anywhere in TA RAM hence use memmove().
461 	 * - Copy pageable init part from current location into pageable area.
462 	 */
463 	memmove(paged_store + init_size,
464 		phys_to_virt(pageable_part,
465 			     core_mmu_get_type_by_pa(pageable_part),
466 			     __pageable_part_end - __pageable_part_start),
467 		__pageable_part_end - __pageable_part_start);
468 	asan_memcpy_unchecked(paged_store, __init_start, init_size);
469 	/*
470 	 * Undo eventual relocation for the init part so the hash checks
471 	 * can pass.
472 	 */
473 	undo_init_relocation(paged_store);
474 
475 	/* Check that hashes of what's in pageable area is OK */
476 	DMSG("Checking hashes of pageable area");
477 	for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) {
478 		const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE;
479 		const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE;
480 		TEE_Result res;
481 
482 		DMSG("hash pg_idx %zu hash %p page %p", n, hash, page);
483 		res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE);
484 		if (res != TEE_SUCCESS) {
485 			EMSG("Hash failed for page %zu at %p: res 0x%x",
486 			     n, (void *)page, res);
487 			panic();
488 		}
489 	}
490 
491 	/*
492 	 * Assert prepaged init sections are page aligned so that nothing
493 	 * trails uninited at the end of the premapped init area.
494 	 */
495 	assert(!(init_size & SMALL_PAGE_MASK));
496 
497 	/*
498 	 * Initialize the virtual memory pool used for main_mmu_l2_ttb which
499 	 * is supplied to tee_pager_init() below.
500 	 */
501 	init_vcore(&tee_mm_vcore);
502 
503 	/*
504 	 * Assign alias area for pager end of the small page block the rest
505 	 * of the binary is loaded into. We're taking more than needed, but
506 	 * we're guaranteed to not need more than the physical amount of
507 	 * TZSRAM.
508 	 */
509 	mm = tee_mm_alloc2(&tee_mm_vcore,
510 			   (vaddr_t)tee_mm_vcore.lo +
511 			   tee_mm_vcore.size - TZSRAM_SIZE,
512 			   TZSRAM_SIZE);
513 	assert(mm);
514 	tee_pager_set_alias_area(mm);
515 
516 	/*
517 	 * Claim virtual memory which isn't paged.
518 	 * Linear memory (flat map core memory) ends there.
519 	 */
520 	mm = tee_mm_alloc2(&tee_mm_vcore, VCORE_UNPG_RX_PA,
521 			   (vaddr_t)(__pageable_start - VCORE_UNPG_RX_PA));
522 	assert(mm);
523 
524 	/*
525 	 * Allocate virtual memory for the pageable area and let the pager
526 	 * take charge of all the pages already assigned to that memory.
527 	 */
528 	mm = tee_mm_alloc2(&tee_mm_vcore, (vaddr_t)__pageable_start,
529 			   pageable_size);
530 	assert(mm);
531 	fobj = ro_paged_alloc(mm, hashes, paged_store);
532 	assert(fobj);
533 	tee_pager_add_core_region(tee_mm_get_smem(mm), PAGED_REGION_TYPE_RO,
534 				  fobj);
535 	fobj_put(fobj);
536 
537 	tee_pager_add_pages(pageable_start, init_size / SMALL_PAGE_SIZE, false);
538 	tee_pager_add_pages(pageable_start + init_size,
539 			    (pageable_size - init_size) / SMALL_PAGE_SIZE,
540 			    true);
541 	if (pageable_end < tzsram_end)
542 		tee_pager_add_pages(pageable_end, (tzsram_end - pageable_end) /
543 						   SMALL_PAGE_SIZE, true);
544 
545 	/*
546 	 * There may be physical pages in TZSRAM before the core load address.
547 	 * These pages can be added to the physical pages pool of the pager.
548 	 * This setup may happen when a the secure bootloader runs in TZRAM
549 	 * and its memory can be reused by OP-TEE once boot stages complete.
550 	 */
551 	tee_pager_add_pages(tee_mm_vcore.lo,
552 			(VCORE_UNPG_RX_PA - tee_mm_vcore.lo) / SMALL_PAGE_SIZE,
553 			true);
554 
555 	print_pager_pool_size();
556 }
557 #else
558 
559 static void init_runtime(unsigned long pageable_part __unused)
560 {
561 	init_asan();
562 
563 	/*
564 	 * By default whole OP-TEE uses malloc, so we need to initialize
565 	 * it early. But, when virtualization is enabled, malloc is used
566 	 * only by TEE runtime, so malloc should be initialized later, for
567 	 * every virtual partition separately. Core code uses nex_malloc
568 	 * instead.
569 	 */
570 #ifdef CFG_VIRTUALIZATION
571 	nex_malloc_add_pool(__nex_heap_start, __nex_heap_end -
572 					      __nex_heap_start);
573 #else
574 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
575 #endif
576 
577 	IMSG_RAW("\n");
578 }
579 #endif
580 
581 void *get_dt(void)
582 {
583 	void *fdt = get_embedded_dt();
584 
585 	if (!fdt)
586 		fdt = get_external_dt();
587 
588 	return fdt;
589 }
590 
591 #if defined(CFG_EMBED_DTB)
592 void *get_embedded_dt(void)
593 {
594 	static bool checked;
595 
596 	assert(cpu_mmu_enabled());
597 
598 	if (!checked) {
599 		IMSG("Embedded DTB found");
600 
601 		if (fdt_check_header(embedded_secure_dtb))
602 			panic("Invalid embedded DTB");
603 
604 		checked = true;
605 	}
606 
607 	return embedded_secure_dtb;
608 }
609 #else
610 void *get_embedded_dt(void)
611 {
612 	return NULL;
613 }
614 #endif /*CFG_EMBED_DTB*/
615 
616 #if defined(CFG_DT)
617 void *get_external_dt(void)
618 {
619 	assert(cpu_mmu_enabled());
620 	return external_dt.blob;
621 }
622 
623 static TEE_Result release_external_dt(void)
624 {
625 	int ret = 0;
626 
627 	if (!external_dt.blob)
628 		return TEE_SUCCESS;
629 
630 	ret = fdt_pack(external_dt.blob);
631 	if (ret < 0) {
632 		EMSG("Failed to pack Device Tree at 0x%" PRIxPA ": error %d",
633 		     virt_to_phys(external_dt.blob), ret);
634 		panic();
635 	}
636 
637 	if (core_mmu_remove_mapping(MEM_AREA_EXT_DT, external_dt.blob,
638 				    CFG_DTB_MAX_SIZE))
639 		panic("Failed to remove temporary Device Tree mapping");
640 
641 	/* External DTB no more reached, reset pointer to invalid */
642 	external_dt.blob = NULL;
643 
644 	return TEE_SUCCESS;
645 }
646 boot_final(release_external_dt);
647 
648 #ifdef _CFG_USE_DTB_OVERLAY
649 static int add_dt_overlay_fragment(struct dt_descriptor *dt, int ioffs)
650 {
651 	char frag[32];
652 	int offs;
653 	int ret;
654 
655 	snprintf(frag, sizeof(frag), "fragment@%d", dt->frag_id);
656 	offs = fdt_add_subnode(dt->blob, ioffs, frag);
657 	if (offs < 0)
658 		return offs;
659 
660 	dt->frag_id += 1;
661 
662 	ret = fdt_setprop_string(dt->blob, offs, "target-path", "/");
663 	if (ret < 0)
664 		return -1;
665 
666 	return fdt_add_subnode(dt->blob, offs, "__overlay__");
667 }
668 
669 static int init_dt_overlay(struct dt_descriptor *dt, int __maybe_unused dt_size)
670 {
671 	int fragment;
672 
673 	if (IS_ENABLED(CFG_EXTERNAL_DTB_OVERLAY)) {
674 		if (!fdt_check_header(dt->blob)) {
675 			fdt_for_each_subnode(fragment, dt->blob, 0)
676 				dt->frag_id += 1;
677 			return 0;
678 		}
679 	}
680 
681 	return fdt_create_empty_tree(dt->blob, dt_size);
682 }
683 #else
684 static int add_dt_overlay_fragment(struct dt_descriptor *dt __unused, int offs)
685 {
686 	return offs;
687 }
688 
689 static int init_dt_overlay(struct dt_descriptor *dt __unused,
690 			   int dt_size __unused)
691 {
692 	return 0;
693 }
694 #endif /* _CFG_USE_DTB_OVERLAY */
695 
696 static int add_dt_path_subnode(struct dt_descriptor *dt, const char *path,
697 			       const char *subnode)
698 {
699 	int offs;
700 
701 	offs = fdt_path_offset(dt->blob, path);
702 	if (offs < 0)
703 		return -1;
704 	offs = add_dt_overlay_fragment(dt, offs);
705 	if (offs < 0)
706 		return -1;
707 	offs = fdt_add_subnode(dt->blob, offs, subnode);
708 	if (offs < 0)
709 		return -1;
710 	return offs;
711 }
712 
713 static int add_optee_dt_node(struct dt_descriptor *dt)
714 {
715 	int offs;
716 	int ret;
717 
718 	if (fdt_path_offset(dt->blob, "/firmware/optee") >= 0) {
719 		DMSG("OP-TEE Device Tree node already exists!");
720 		return 0;
721 	}
722 
723 	offs = fdt_path_offset(dt->blob, "/firmware");
724 	if (offs < 0) {
725 		offs = add_dt_path_subnode(dt, "/", "firmware");
726 		if (offs < 0)
727 			return -1;
728 	}
729 
730 	offs = fdt_add_subnode(dt->blob, offs, "optee");
731 	if (offs < 0)
732 		return -1;
733 
734 	ret = fdt_setprop_string(dt->blob, offs, "compatible",
735 				 "linaro,optee-tz");
736 	if (ret < 0)
737 		return -1;
738 	ret = fdt_setprop_string(dt->blob, offs, "method", "smc");
739 	if (ret < 0)
740 		return -1;
741 	if (CFG_CORE_ASYNC_NOTIF_GIC_INTID) {
742 		/*
743 		 * The format of the interrupt property is defined by the
744 		 * binding of the interrupt domain root. In this case it's
745 		 * one Arm GIC v1, v2 or v3 so we must be compatible with
746 		 * these.
747 		 *
748 		 * An SPI type of interrupt is indicated with a 0 in the
749 		 * first cell.
750 		 *
751 		 * The interrupt number goes in the second cell where
752 		 * SPIs ranges from 0 to 987.
753 		 *
754 		 * Flags are passed in the third cell where a 1 means edge
755 		 * triggered.
756 		 */
757 		const uint32_t gic_spi = 0;
758 		const uint32_t irq_type_edge = 1;
759 		uint32_t val[] = {
760 			TEE_U32_TO_BIG_ENDIAN(gic_spi),
761 			TEE_U32_TO_BIG_ENDIAN(CFG_CORE_ASYNC_NOTIF_GIC_INTID -
762 					      GIC_SPI_BASE),
763 			TEE_U32_TO_BIG_ENDIAN(irq_type_edge),
764 		};
765 
766 		ret = fdt_setprop(dt->blob, offs, "interrupts", val,
767 				  sizeof(val));
768 		if (ret < 0)
769 			return -1;
770 	}
771 	return 0;
772 }
773 
774 #ifdef CFG_PSCI_ARM32
775 static int append_psci_compatible(void *fdt, int offs, const char *str)
776 {
777 	return fdt_appendprop(fdt, offs, "compatible", str, strlen(str) + 1);
778 }
779 
780 static int dt_add_psci_node(struct dt_descriptor *dt)
781 {
782 	int offs;
783 
784 	if (fdt_path_offset(dt->blob, "/psci") >= 0) {
785 		DMSG("PSCI Device Tree node already exists!");
786 		return 0;
787 	}
788 
789 	offs = add_dt_path_subnode(dt, "/", "psci");
790 	if (offs < 0)
791 		return -1;
792 	if (append_psci_compatible(dt->blob, offs, "arm,psci-1.0"))
793 		return -1;
794 	if (append_psci_compatible(dt->blob, offs, "arm,psci-0.2"))
795 		return -1;
796 	if (append_psci_compatible(dt->blob, offs, "arm,psci"))
797 		return -1;
798 	if (fdt_setprop_string(dt->blob, offs, "method", "smc"))
799 		return -1;
800 	if (fdt_setprop_u32(dt->blob, offs, "cpu_suspend", PSCI_CPU_SUSPEND))
801 		return -1;
802 	if (fdt_setprop_u32(dt->blob, offs, "cpu_off", PSCI_CPU_OFF))
803 		return -1;
804 	if (fdt_setprop_u32(dt->blob, offs, "cpu_on", PSCI_CPU_ON))
805 		return -1;
806 	if (fdt_setprop_u32(dt->blob, offs, "sys_poweroff", PSCI_SYSTEM_OFF))
807 		return -1;
808 	if (fdt_setprop_u32(dt->blob, offs, "sys_reset", PSCI_SYSTEM_RESET))
809 		return -1;
810 	return 0;
811 }
812 
813 static int check_node_compat_prefix(struct dt_descriptor *dt, int offs,
814 				    const char *prefix)
815 {
816 	const size_t prefix_len = strlen(prefix);
817 	size_t l;
818 	int plen;
819 	const char *prop;
820 
821 	prop = fdt_getprop(dt->blob, offs, "compatible", &plen);
822 	if (!prop)
823 		return -1;
824 
825 	while (plen > 0) {
826 		if (memcmp(prop, prefix, prefix_len) == 0)
827 			return 0; /* match */
828 
829 		l = strlen(prop) + 1;
830 		prop += l;
831 		plen -= l;
832 	}
833 
834 	return -1;
835 }
836 
837 static int dt_add_psci_cpu_enable_methods(struct dt_descriptor *dt)
838 {
839 	int offs = 0;
840 
841 	while (1) {
842 		offs = fdt_next_node(dt->blob, offs, NULL);
843 		if (offs < 0)
844 			break;
845 		if (fdt_getprop(dt->blob, offs, "enable-method", NULL))
846 			continue; /* already set */
847 		if (check_node_compat_prefix(dt, offs, "arm,cortex-a"))
848 			continue; /* no compatible */
849 		if (fdt_setprop_string(dt->blob, offs, "enable-method", "psci"))
850 			return -1;
851 		/* Need to restart scanning as offsets may have changed */
852 		offs = 0;
853 	}
854 	return 0;
855 }
856 
857 static int config_psci(struct dt_descriptor *dt)
858 {
859 	if (dt_add_psci_node(dt))
860 		return -1;
861 	return dt_add_psci_cpu_enable_methods(dt);
862 }
863 #else
864 static int config_psci(struct dt_descriptor *dt __unused)
865 {
866 	return 0;
867 }
868 #endif /*CFG_PSCI_ARM32*/
869 
870 static void set_dt_val(void *data, uint32_t cell_size, uint64_t val)
871 {
872 	if (cell_size == 1) {
873 		fdt32_t v = cpu_to_fdt32((uint32_t)val);
874 
875 		memcpy(data, &v, sizeof(v));
876 	} else {
877 		fdt64_t v = cpu_to_fdt64(val);
878 
879 		memcpy(data, &v, sizeof(v));
880 	}
881 }
882 
883 static int add_res_mem_dt_node(struct dt_descriptor *dt, const char *name,
884 			       paddr_t pa, size_t size)
885 {
886 	int offs = 0;
887 	int ret = 0;
888 	int addr_size = -1;
889 	int len_size = -1;
890 	bool found = true;
891 	char subnode_name[80] = { 0 };
892 
893 	offs = fdt_path_offset(dt->blob, "/reserved-memory");
894 
895 	if (offs < 0) {
896 		found = false;
897 		offs = 0;
898 	}
899 
900 	if (IS_ENABLED(_CFG_USE_DTB_OVERLAY)) {
901 		len_size = sizeof(paddr_t) / sizeof(uint32_t);
902 		addr_size = sizeof(paddr_t) / sizeof(uint32_t);
903 	} else {
904 		len_size = fdt_size_cells(dt->blob, offs);
905 		if (len_size < 0)
906 			return -1;
907 		addr_size = fdt_address_cells(dt->blob, offs);
908 		if (addr_size < 0)
909 			return -1;
910 	}
911 
912 	if (!found) {
913 		offs = add_dt_path_subnode(dt, "/", "reserved-memory");
914 		if (offs < 0)
915 			return -1;
916 		ret = fdt_setprop_cell(dt->blob, offs, "#address-cells",
917 				       addr_size);
918 		if (ret < 0)
919 			return -1;
920 		ret = fdt_setprop_cell(dt->blob, offs, "#size-cells", len_size);
921 		if (ret < 0)
922 			return -1;
923 		ret = fdt_setprop(dt->blob, offs, "ranges", NULL, 0);
924 		if (ret < 0)
925 			return -1;
926 	}
927 
928 	ret = snprintf(subnode_name, sizeof(subnode_name),
929 		       "%s@%" PRIxPA, name, pa);
930 	if (ret < 0 || ret >= (int)sizeof(subnode_name))
931 		DMSG("truncated node \"%s@%" PRIxPA"\"", name, pa);
932 	offs = fdt_add_subnode(dt->blob, offs, subnode_name);
933 	if (offs >= 0) {
934 		uint32_t data[FDT_MAX_NCELLS * 2];
935 
936 		set_dt_val(data, addr_size, pa);
937 		set_dt_val(data + addr_size, len_size, size);
938 		ret = fdt_setprop(dt->blob, offs, "reg", data,
939 				  sizeof(uint32_t) * (addr_size + len_size));
940 		if (ret < 0)
941 			return -1;
942 		ret = fdt_setprop(dt->blob, offs, "no-map", NULL, 0);
943 		if (ret < 0)
944 			return -1;
945 	} else {
946 		return -1;
947 	}
948 	return 0;
949 }
950 
951 #ifdef CFG_CORE_DYN_SHM
952 static uint64_t get_dt_val_and_advance(const void *data, size_t *offs,
953 				       uint32_t cell_size)
954 {
955 	uint64_t rv = 0;
956 
957 	if (cell_size == 1) {
958 		uint32_t v;
959 
960 		memcpy(&v, (const uint8_t *)data + *offs, sizeof(v));
961 		*offs += sizeof(v);
962 		rv = fdt32_to_cpu(v);
963 	} else {
964 		uint64_t v;
965 
966 		memcpy(&v, (const uint8_t *)data + *offs, sizeof(v));
967 		*offs += sizeof(v);
968 		rv = fdt64_to_cpu(v);
969 	}
970 
971 	return rv;
972 }
973 
974 /*
975  * Find all non-secure memory from DT. Memory marked inaccessible by Secure
976  * World is ignored since it could not be mapped to be used as dynamic shared
977  * memory.
978  */
979 static int get_nsec_memory_helper(void *fdt, struct core_mmu_phys_mem *mem)
980 {
981 	const uint8_t *prop = NULL;
982 	uint64_t a = 0;
983 	uint64_t l = 0;
984 	size_t prop_offs = 0;
985 	size_t prop_len = 0;
986 	int elems_total = 0;
987 	int addr_size = 0;
988 	int len_size = 0;
989 	int offs = 0;
990 	size_t n = 0;
991 	int len = 0;
992 
993 	addr_size = fdt_address_cells(fdt, 0);
994 	if (addr_size < 0)
995 		return 0;
996 
997 	len_size = fdt_size_cells(fdt, 0);
998 	if (len_size < 0)
999 		return 0;
1000 
1001 	while (true) {
1002 		offs = fdt_node_offset_by_prop_value(fdt, offs, "device_type",
1003 						     "memory",
1004 						     sizeof("memory"));
1005 		if (offs < 0)
1006 			break;
1007 
1008 		if (_fdt_get_status(fdt, offs) != (DT_STATUS_OK_NSEC |
1009 						   DT_STATUS_OK_SEC))
1010 			continue;
1011 
1012 		prop = fdt_getprop(fdt, offs, "reg", &len);
1013 		if (!prop)
1014 			continue;
1015 
1016 		prop_len = len;
1017 		for (n = 0, prop_offs = 0; prop_offs < prop_len; n++) {
1018 			a = get_dt_val_and_advance(prop, &prop_offs, addr_size);
1019 			if (prop_offs >= prop_len) {
1020 				n--;
1021 				break;
1022 			}
1023 
1024 			l = get_dt_val_and_advance(prop, &prop_offs, len_size);
1025 			if (mem) {
1026 				mem->type = MEM_AREA_DDR_OVERALL;
1027 				mem->addr = a;
1028 				mem->size = l;
1029 				mem++;
1030 			}
1031 		}
1032 
1033 		elems_total += n;
1034 	}
1035 
1036 	return elems_total;
1037 }
1038 
1039 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt, size_t *nelems)
1040 {
1041 	struct core_mmu_phys_mem *mem = NULL;
1042 	int elems_total = 0;
1043 
1044 	elems_total = get_nsec_memory_helper(fdt, NULL);
1045 	if (elems_total <= 0)
1046 		return NULL;
1047 
1048 	mem = nex_calloc(elems_total, sizeof(*mem));
1049 	if (!mem)
1050 		panic();
1051 
1052 	elems_total = get_nsec_memory_helper(fdt, mem);
1053 	assert(elems_total > 0);
1054 
1055 	*nelems = elems_total;
1056 
1057 	return mem;
1058 }
1059 #endif /*CFG_CORE_DYN_SHM*/
1060 
1061 #ifdef CFG_CORE_RESERVED_SHM
1062 static int mark_static_shm_as_reserved(struct dt_descriptor *dt)
1063 {
1064 	vaddr_t shm_start;
1065 	vaddr_t shm_end;
1066 
1067 	core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &shm_start, &shm_end);
1068 	if (shm_start != shm_end)
1069 		return add_res_mem_dt_node(dt, "optee_shm",
1070 					   virt_to_phys((void *)shm_start),
1071 					   shm_end - shm_start);
1072 
1073 	DMSG("No SHM configured");
1074 	return -1;
1075 }
1076 #endif /*CFG_CORE_RESERVED_SHM*/
1077 
1078 static void init_external_dt(unsigned long phys_dt)
1079 {
1080 	struct dt_descriptor *dt = &external_dt;
1081 	void *fdt;
1082 	int ret;
1083 
1084 	if (!phys_dt) {
1085 		/*
1086 		 * No need to panic as we're not using the DT in OP-TEE
1087 		 * yet, we're only adding some nodes for normal world use.
1088 		 * This makes the switch to using DT easier as we can boot
1089 		 * a newer OP-TEE with older boot loaders. Once we start to
1090 		 * initialize devices based on DT we'll likely panic
1091 		 * instead of returning here.
1092 		 */
1093 		IMSG("No non-secure external DT");
1094 		return;
1095 	}
1096 
1097 	fdt = core_mmu_add_mapping(MEM_AREA_EXT_DT, phys_dt, CFG_DTB_MAX_SIZE);
1098 	if (!fdt)
1099 		panic("Failed to map external DTB");
1100 
1101 	dt->blob = fdt;
1102 
1103 	ret = init_dt_overlay(dt, CFG_DTB_MAX_SIZE);
1104 	if (ret < 0) {
1105 		EMSG("Device Tree Overlay init fail @ %#lx: error %d", phys_dt,
1106 		     ret);
1107 		panic();
1108 	}
1109 
1110 	ret = fdt_open_into(fdt, fdt, CFG_DTB_MAX_SIZE);
1111 	if (ret < 0) {
1112 		EMSG("Invalid Device Tree at %#lx: error %d", phys_dt, ret);
1113 		panic();
1114 	}
1115 
1116 	IMSG("Non-secure external DT found");
1117 }
1118 
1119 static int mark_tzdram_as_reserved(struct dt_descriptor *dt)
1120 {
1121 	return add_res_mem_dt_node(dt, "optee_core", CFG_TZDRAM_START,
1122 				   CFG_TZDRAM_SIZE);
1123 }
1124 
1125 static void update_external_dt(void)
1126 {
1127 	struct dt_descriptor *dt = &external_dt;
1128 
1129 	if (!dt->blob)
1130 		return;
1131 
1132 	if (!IS_ENABLED(CFG_CORE_FFA) && add_optee_dt_node(dt))
1133 		panic("Failed to add OP-TEE Device Tree node");
1134 
1135 	if (config_psci(dt))
1136 		panic("Failed to config PSCI");
1137 
1138 #ifdef CFG_CORE_RESERVED_SHM
1139 	if (mark_static_shm_as_reserved(dt))
1140 		panic("Failed to config non-secure memory");
1141 #endif
1142 
1143 	if (mark_tzdram_as_reserved(dt))
1144 		panic("Failed to config secure memory");
1145 }
1146 #else /*CFG_DT*/
1147 void *get_external_dt(void)
1148 {
1149 	return NULL;
1150 }
1151 
1152 static void init_external_dt(unsigned long phys_dt __unused)
1153 {
1154 }
1155 
1156 static void update_external_dt(void)
1157 {
1158 }
1159 
1160 #ifdef CFG_CORE_DYN_SHM
1161 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt __unused,
1162 						 size_t *nelems __unused)
1163 {
1164 	return NULL;
1165 }
1166 #endif /*CFG_CORE_DYN_SHM*/
1167 #endif /*!CFG_DT*/
1168 
1169 #ifdef CFG_CORE_DYN_SHM
1170 static void discover_nsec_memory(void)
1171 {
1172 	struct core_mmu_phys_mem *mem;
1173 	const struct core_mmu_phys_mem *mem_begin = NULL;
1174 	const struct core_mmu_phys_mem *mem_end = NULL;
1175 	size_t nelems;
1176 	void *fdt = get_external_dt();
1177 
1178 	if (fdt) {
1179 		mem = get_nsec_memory(fdt, &nelems);
1180 		if (mem) {
1181 			core_mmu_set_discovered_nsec_ddr(mem, nelems);
1182 			return;
1183 		}
1184 
1185 		DMSG("No non-secure memory found in FDT");
1186 	}
1187 
1188 	mem_begin = phys_ddr_overall_begin;
1189 	mem_end = phys_ddr_overall_end;
1190 	nelems = mem_end - mem_begin;
1191 	if (nelems) {
1192 		/*
1193 		 * Platform cannot use both register_ddr() and the now
1194 		 * deprecated register_dynamic_shm().
1195 		 */
1196 		assert(phys_ddr_overall_compat_begin ==
1197 		       phys_ddr_overall_compat_end);
1198 	} else {
1199 		mem_begin = phys_ddr_overall_compat_begin;
1200 		mem_end = phys_ddr_overall_compat_end;
1201 		nelems = mem_end - mem_begin;
1202 		if (!nelems)
1203 			return;
1204 		DMSG("Warning register_dynamic_shm() is deprecated, please use register_ddr() instead");
1205 	}
1206 
1207 	mem = nex_calloc(nelems, sizeof(*mem));
1208 	if (!mem)
1209 		panic();
1210 
1211 	memcpy(mem, phys_ddr_overall_begin, sizeof(*mem) * nelems);
1212 	core_mmu_set_discovered_nsec_ddr(mem, nelems);
1213 }
1214 #else /*CFG_CORE_DYN_SHM*/
1215 static void discover_nsec_memory(void)
1216 {
1217 }
1218 #endif /*!CFG_CORE_DYN_SHM*/
1219 
1220 #ifdef CFG_VIRTUALIZATION
1221 static TEE_Result virt_init_heap(void)
1222 {
1223 	/* We need to initialize pool for every virtual guest partition */
1224 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
1225 
1226 	return TEE_SUCCESS;
1227 }
1228 preinit_early(virt_init_heap);
1229 #endif
1230 
1231 void init_tee_runtime(void)
1232 {
1233 #ifndef CFG_WITH_PAGER
1234 	/* Pager initializes TA RAM early */
1235 	core_mmu_init_ta_ram();
1236 #endif
1237 	/*
1238 	 * With virtualization we call this function when creating the
1239 	 * OP-TEE partition instead.
1240 	 */
1241 	if (!IS_ENABLED(CFG_VIRTUALIZATION))
1242 		call_preinitcalls();
1243 	call_initcalls();
1244 }
1245 
1246 static void init_primary(unsigned long pageable_part, unsigned long nsec_entry)
1247 {
1248 	/*
1249 	 * Mask asynchronous exceptions before switch to the thread vector
1250 	 * as the thread handler requires those to be masked while
1251 	 * executing with the temporary stack. The thread subsystem also
1252 	 * asserts that the foreign interrupts are blocked when using most of
1253 	 * its functions.
1254 	 */
1255 	thread_set_exceptions(THREAD_EXCP_ALL);
1256 	primary_save_cntfrq();
1257 	init_vfp_sec();
1258 	/*
1259 	 * Pager: init_runtime() calls thread_kernel_enable_vfp() so we must
1260 	 * set a current thread right now to avoid a chicken-and-egg problem
1261 	 * (thread_init_boot_thread() sets the current thread but needs
1262 	 * things set by init_runtime()).
1263 	 */
1264 	thread_get_core_local()->curr_thread = 0;
1265 	init_runtime(pageable_part);
1266 
1267 	if (IS_ENABLED(CFG_VIRTUALIZATION)) {
1268 		/*
1269 		 * Virtualization: We can't initialize threads right now because
1270 		 * threads belong to "tee" part and will be initialized
1271 		 * separately per each new virtual guest. So, we'll clear
1272 		 * "curr_thread" and call it done.
1273 		 */
1274 		thread_get_core_local()->curr_thread = -1;
1275 	} else {
1276 		thread_init_boot_thread();
1277 	}
1278 	thread_init_primary();
1279 	thread_init_per_cpu();
1280 	init_sec_mon(nsec_entry);
1281 }
1282 
1283 /*
1284  * Note: this function is weak just to make it possible to exclude it from
1285  * the unpaged area.
1286  */
1287 void __weak boot_init_primary_late(unsigned long fdt)
1288 {
1289 	init_external_dt(fdt);
1290 	tpm_map_log_area(get_external_dt());
1291 	discover_nsec_memory();
1292 	update_external_dt();
1293 	configure_console_from_dt();
1294 
1295 	IMSG("OP-TEE version: %s", core_v_str);
1296 	if (IS_ENABLED(CFG_WARN_INSECURE)) {
1297 		IMSG("WARNING: This OP-TEE configuration might be insecure!");
1298 		IMSG("WARNING: Please check https://optee.readthedocs.io/en/latest/architecture/porting_guidelines.html");
1299 	}
1300 	IMSG("Primary CPU initializing");
1301 #ifdef CFG_CORE_ASLR
1302 	DMSG("Executing at offset %#lx with virtual load address %#"PRIxVA,
1303 	     (unsigned long)boot_mmu_config.load_offset, VCORE_START_VA);
1304 #endif
1305 
1306 	main_init_gic();
1307 	init_vfp_nsec();
1308 	if (IS_ENABLED(CFG_VIRTUALIZATION)) {
1309 		IMSG("Initializing virtualization support");
1310 		core_mmu_init_virtualization();
1311 	} else {
1312 		init_tee_runtime();
1313 	}
1314 	call_finalcalls();
1315 	IMSG("Primary CPU switching to normal world boot");
1316 }
1317 
1318 static void init_secondary_helper(unsigned long nsec_entry)
1319 {
1320 	IMSG("Secondary CPU %zu initializing", get_core_pos());
1321 
1322 	/*
1323 	 * Mask asynchronous exceptions before switch to the thread vector
1324 	 * as the thread handler requires those to be masked while
1325 	 * executing with the temporary stack. The thread subsystem also
1326 	 * asserts that the foreign interrupts are blocked when using most of
1327 	 * its functions.
1328 	 */
1329 	thread_set_exceptions(THREAD_EXCP_ALL);
1330 
1331 	secondary_init_cntfrq();
1332 	thread_init_per_cpu();
1333 	init_sec_mon(nsec_entry);
1334 	main_secondary_init_gic();
1335 	init_vfp_sec();
1336 	init_vfp_nsec();
1337 
1338 	IMSG("Secondary CPU %zu switching to normal world boot", get_core_pos());
1339 }
1340 
1341 /*
1342  * Note: this function is weak just to make it possible to exclude it from
1343  * the unpaged area so that it lies in the init area.
1344  */
1345 void __weak boot_init_primary_early(unsigned long pageable_part,
1346 				    unsigned long nsec_entry __maybe_unused)
1347 {
1348 	unsigned long e = PADDR_INVALID;
1349 
1350 #if !defined(CFG_WITH_ARM_TRUSTED_FW)
1351 	e = nsec_entry;
1352 #endif
1353 
1354 	init_primary(pageable_part, e);
1355 }
1356 
1357 #if defined(CFG_WITH_ARM_TRUSTED_FW)
1358 unsigned long boot_cpu_on_handler(unsigned long a0 __maybe_unused,
1359 				  unsigned long a1 __unused)
1360 {
1361 	init_secondary_helper(PADDR_INVALID);
1362 	return 0;
1363 }
1364 #else
1365 void boot_init_secondary(unsigned long nsec_entry)
1366 {
1367 	init_secondary_helper(nsec_entry);
1368 }
1369 #endif
1370 
1371 #if defined(CFG_BOOT_SECONDARY_REQUEST)
1372 void boot_set_core_ns_entry(size_t core_idx, uintptr_t entry,
1373 			    uintptr_t context_id)
1374 {
1375 	ns_entry_contexts[core_idx].entry_point = entry;
1376 	ns_entry_contexts[core_idx].context_id = context_id;
1377 	dsb_ishst();
1378 }
1379 
1380 int boot_core_release(size_t core_idx, paddr_t entry)
1381 {
1382 	if (!core_idx || core_idx >= CFG_TEE_CORE_NB_CORE)
1383 		return -1;
1384 
1385 	ns_entry_contexts[core_idx].entry_point = entry;
1386 	dmb();
1387 	spin_table[core_idx] = 1;
1388 	dsb();
1389 	sev();
1390 
1391 	return 0;
1392 }
1393 
1394 /*
1395  * spin until secondary boot request, then returns with
1396  * the secondary core entry address.
1397  */
1398 struct ns_entry_context *boot_core_hpen(void)
1399 {
1400 #ifdef CFG_PSCI_ARM32
1401 	return &ns_entry_contexts[get_core_pos()];
1402 #else
1403 	do {
1404 		wfe();
1405 	} while (!spin_table[get_core_pos()]);
1406 	dmb();
1407 	return &ns_entry_contexts[get_core_pos()];
1408 #endif
1409 }
1410 #endif
1411 
1412 #if defined(CFG_CORE_ASLR)
1413 #if defined(CFG_DT)
1414 unsigned long __weak get_aslr_seed(void *fdt)
1415 {
1416 	int rc = fdt_check_header(fdt);
1417 	const uint64_t *seed = NULL;
1418 	int offs = 0;
1419 	int len = 0;
1420 
1421 	if (rc) {
1422 		DMSG("Bad fdt: %d", rc);
1423 		goto err;
1424 	}
1425 
1426 	offs =  fdt_path_offset(fdt, "/secure-chosen");
1427 	if (offs < 0) {
1428 		DMSG("Cannot find /secure-chosen");
1429 		goto err;
1430 	}
1431 	seed = fdt_getprop(fdt, offs, "kaslr-seed", &len);
1432 	if (!seed || len != sizeof(*seed)) {
1433 		DMSG("Cannot find valid kaslr-seed");
1434 		goto err;
1435 	}
1436 
1437 	return fdt64_to_cpu(*seed);
1438 
1439 err:
1440 	/* Try platform implementation */
1441 	return plat_get_aslr_seed();
1442 }
1443 #else /*!CFG_DT*/
1444 unsigned long __weak get_aslr_seed(void *fdt __unused)
1445 {
1446 	/* Try platform implementation */
1447 	return plat_get_aslr_seed();
1448 }
1449 #endif /*!CFG_DT*/
1450 #endif /*CFG_CORE_ASLR*/
1451