xref: /optee_os/core/arch/arm/kernel/boot.c (revision 4edd96e6d7a7228e907cf498b23e5b5fbdaf39a0)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2015-2023, Linaro Limited
4  * Copyright (c) 2023, Arm Limited
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <compiler.h>
10 #include <config.h>
11 #include <console.h>
12 #include <crypto/crypto.h>
13 #include <drivers/gic.h>
14 #include <dt-bindings/interrupt-controller/arm-gic.h>
15 #include <ffa.h>
16 #include <initcall.h>
17 #include <inttypes.h>
18 #include <keep.h>
19 #include <kernel/asan.h>
20 #include <kernel/boot.h>
21 #include <kernel/dt.h>
22 #include <kernel/linker.h>
23 #include <kernel/misc.h>
24 #include <kernel/panic.h>
25 #include <kernel/tee_misc.h>
26 #include <kernel/thread.h>
27 #include <kernel/tpm.h>
28 #include <libfdt.h>
29 #include <malloc.h>
30 #include <memtag.h>
31 #include <mm/core_memprot.h>
32 #include <mm/core_mmu.h>
33 #include <mm/fobj.h>
34 #include <mm/tee_mm.h>
35 #include <mm/tee_pager.h>
36 #include <sm/psci.h>
37 #include <stdio.h>
38 #include <trace.h>
39 #include <utee_defines.h>
40 #include <util.h>
41 
42 #include <platform_config.h>
43 
44 #if !defined(CFG_WITH_ARM_TRUSTED_FW)
45 #include <sm/sm.h>
46 #endif
47 
48 #if defined(CFG_WITH_VFP)
49 #include <kernel/vfp.h>
50 #endif
51 
52 /*
53  * In this file we're using unsigned long to represent physical pointers as
54  * they are received in a single register when OP-TEE is initially entered.
55  * This limits 32-bit systems to only use make use of the lower 32 bits
56  * of a physical address for initial parameters.
57  *
58  * 64-bit systems on the other hand can use full 64-bit physical pointers.
59  */
60 #define PADDR_INVALID		ULONG_MAX
61 
62 #if defined(CFG_BOOT_SECONDARY_REQUEST)
63 struct ns_entry_context {
64 	uintptr_t entry_point;
65 	uintptr_t context_id;
66 };
67 struct ns_entry_context ns_entry_contexts[CFG_TEE_CORE_NB_CORE];
68 static uint32_t spin_table[CFG_TEE_CORE_NB_CORE];
69 #endif
70 
71 #ifdef CFG_BOOT_SYNC_CPU
72 /*
73  * Array used when booting, to synchronize cpu.
74  * When 0, the cpu has not started.
75  * When 1, it has started
76  */
77 uint32_t sem_cpu_sync[CFG_TEE_CORE_NB_CORE];
78 DECLARE_KEEP_PAGER(sem_cpu_sync);
79 #endif
80 
81 #ifdef CFG_DT
82 #ifdef CFG_CORE_SEL1_SPMC
83 static struct dt_descriptor tos_fw_config_dt __nex_bss;
84 #endif
85 #endif
86 
87 #ifdef CFG_SECONDARY_INIT_CNTFRQ
88 static uint32_t cntfrq;
89 #endif
90 
91 /* May be overridden in plat-$(PLATFORM)/main.c */
92 __weak void plat_primary_init_early(void)
93 {
94 }
95 DECLARE_KEEP_PAGER(plat_primary_init_early);
96 
97 /* May be overridden in plat-$(PLATFORM)/main.c */
98 __weak void boot_primary_init_intc(void)
99 {
100 }
101 
102 /* May be overridden in plat-$(PLATFORM)/main.c */
103 __weak void boot_secondary_init_intc(void)
104 {
105 }
106 
107 /* May be overridden in plat-$(PLATFORM)/main.c */
108 __weak unsigned long plat_get_aslr_seed(void)
109 {
110 	DMSG("Warning: no ASLR seed");
111 
112 	return 0;
113 }
114 
115 #if defined(_CFG_CORE_STACK_PROTECTOR) || defined(CFG_WITH_STACK_CANARIES)
116 /* Generate random stack canary value on boot up */
117 __weak void plat_get_random_stack_canaries(void *buf, size_t ncan, size_t size)
118 {
119 	TEE_Result ret = TEE_ERROR_GENERIC;
120 	size_t i = 0;
121 
122 	assert(buf && ncan && size);
123 
124 	/*
125 	 * With virtualization the RNG is not initialized in Nexus core.
126 	 * Need to override with platform specific implementation.
127 	 */
128 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
129 		IMSG("WARNING: Using fixed value for stack canary");
130 		memset(buf, 0xab, ncan * size);
131 		goto out;
132 	}
133 
134 	ret = crypto_rng_read(buf, ncan * size);
135 	if (ret != TEE_SUCCESS)
136 		panic("Failed to generate random stack canary");
137 
138 out:
139 	/* Leave null byte in canary to prevent string base exploit */
140 	for (i = 0; i < ncan; i++)
141 		*((uint8_t *)buf + size * i) = 0;
142 }
143 #endif /* _CFG_CORE_STACK_PROTECTOR || CFG_WITH_STACK_CANARIES */
144 
145 /*
146  * This function is called as a guard after each smc call which is not
147  * supposed to return.
148  */
149 void __panic_at_smc_return(void)
150 {
151 	panic();
152 }
153 
154 #if defined(CFG_WITH_ARM_TRUSTED_FW)
155 void init_sec_mon(unsigned long nsec_entry __maybe_unused)
156 {
157 	assert(nsec_entry == PADDR_INVALID);
158 	/* Do nothing as we don't have a secure monitor */
159 }
160 #else
161 /* May be overridden in plat-$(PLATFORM)/main.c */
162 __weak void init_sec_mon(unsigned long nsec_entry)
163 {
164 	struct sm_nsec_ctx *nsec_ctx;
165 
166 	assert(nsec_entry != PADDR_INVALID);
167 
168 	/* Initialize secure monitor */
169 	nsec_ctx = sm_get_nsec_ctx();
170 	nsec_ctx->mon_lr = nsec_entry;
171 	nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I;
172 	if (nsec_entry & 1)
173 		nsec_ctx->mon_spsr |= CPSR_T;
174 }
175 #endif
176 
177 #if defined(CFG_WITH_ARM_TRUSTED_FW)
178 static void init_vfp_nsec(void)
179 {
180 }
181 #else
182 static void init_vfp_nsec(void)
183 {
184 	/* Normal world can use CP10 and CP11 (SIMD/VFP) */
185 	write_nsacr(read_nsacr() | NSACR_CP10 | NSACR_CP11);
186 }
187 #endif
188 
189 #if defined(CFG_WITH_VFP)
190 
191 #ifdef ARM32
192 static void init_vfp_sec(void)
193 {
194 	uint32_t cpacr = read_cpacr();
195 
196 	/*
197 	 * Enable Advanced SIMD functionality.
198 	 * Enable use of D16-D31 of the Floating-point Extension register
199 	 * file.
200 	 */
201 	cpacr &= ~(CPACR_ASEDIS | CPACR_D32DIS);
202 	/*
203 	 * Enable usage of CP10 and CP11 (SIMD/VFP) (both kernel and user
204 	 * mode.
205 	 */
206 	cpacr |= CPACR_CP(10, CPACR_CP_ACCESS_FULL);
207 	cpacr |= CPACR_CP(11, CPACR_CP_ACCESS_FULL);
208 	write_cpacr(cpacr);
209 }
210 #endif /* ARM32 */
211 
212 #ifdef ARM64
213 static void init_vfp_sec(void)
214 {
215 	/* Not using VFP until thread_kernel_enable_vfp() */
216 	vfp_disable();
217 }
218 #endif /* ARM64 */
219 
220 #else /* CFG_WITH_VFP */
221 
222 static void init_vfp_sec(void)
223 {
224 	/* Not using VFP */
225 }
226 #endif
227 
228 #ifdef CFG_SECONDARY_INIT_CNTFRQ
229 static void primary_save_cntfrq(void)
230 {
231 	assert(cntfrq == 0);
232 
233 	/*
234 	 * CNTFRQ should be initialized on the primary CPU by a
235 	 * previous boot stage
236 	 */
237 	cntfrq = read_cntfrq();
238 }
239 
240 static void secondary_init_cntfrq(void)
241 {
242 	assert(cntfrq != 0);
243 	write_cntfrq(cntfrq);
244 }
245 #else /* CFG_SECONDARY_INIT_CNTFRQ */
246 static void primary_save_cntfrq(void)
247 {
248 }
249 
250 static void secondary_init_cntfrq(void)
251 {
252 }
253 #endif
254 
255 #ifdef CFG_CORE_SANITIZE_KADDRESS
256 static void init_run_constructors(void)
257 {
258 	const vaddr_t *ctor;
259 
260 	for (ctor = &__ctor_list; ctor < &__ctor_end; ctor++)
261 		((void (*)(void))(*ctor))();
262 }
263 
264 static void init_asan(void)
265 {
266 
267 	/*
268 	 * CFG_ASAN_SHADOW_OFFSET is also supplied as
269 	 * -fasan-shadow-offset=$(CFG_ASAN_SHADOW_OFFSET) to the compiler.
270 	 * Since all the needed values to calculate the value of
271 	 * CFG_ASAN_SHADOW_OFFSET isn't available in to make we need to
272 	 * calculate it in advance and hard code it into the platform
273 	 * conf.mk. Here where we have all the needed values we double
274 	 * check that the compiler is supplied the correct value.
275 	 */
276 
277 #define __ASAN_SHADOW_START \
278 	ROUNDUP(TEE_RAM_START + (TEE_RAM_VA_SIZE * 8) / 9 - 8, 8)
279 	assert(__ASAN_SHADOW_START == (vaddr_t)&__asan_shadow_start);
280 #define __CFG_ASAN_SHADOW_OFFSET \
281 	(__ASAN_SHADOW_START - (TEE_RAM_START / 8))
282 	COMPILE_TIME_ASSERT(CFG_ASAN_SHADOW_OFFSET == __CFG_ASAN_SHADOW_OFFSET);
283 #undef __ASAN_SHADOW_START
284 #undef __CFG_ASAN_SHADOW_OFFSET
285 
286 	/*
287 	 * Assign area covered by the shadow area, everything from start up
288 	 * to the beginning of the shadow area.
289 	 */
290 	asan_set_shadowed((void *)TEE_LOAD_ADDR, &__asan_shadow_start);
291 
292 	/*
293 	 * Add access to areas that aren't opened automatically by a
294 	 * constructor.
295 	 */
296 	asan_tag_access(&__ctor_list, &__ctor_end);
297 	asan_tag_access(__rodata_start, __rodata_end);
298 #ifdef CFG_WITH_PAGER
299 	asan_tag_access(__pageable_start, __pageable_end);
300 #endif /*CFG_WITH_PAGER*/
301 	asan_tag_access(__nozi_start, __nozi_end);
302 #ifdef ARM32
303 	asan_tag_access(__exidx_start, __exidx_end);
304 	asan_tag_access(__extab_start, __extab_end);
305 #endif
306 
307 	init_run_constructors();
308 
309 	/* Everything is tagged correctly, let's start address sanitizing. */
310 	asan_start();
311 }
312 #else /*CFG_CORE_SANITIZE_KADDRESS*/
313 static void init_asan(void)
314 {
315 }
316 #endif /*CFG_CORE_SANITIZE_KADDRESS*/
317 
318 #if defined(CFG_MEMTAG)
319 /* Called from entry_a64.S only when MEMTAG is configured */
320 void boot_init_memtag(void)
321 {
322 	paddr_t base = 0;
323 	paddr_size_t size = 0;
324 
325 	memtag_init_ops(feat_mte_implemented());
326 	core_mmu_get_secure_memory(&base, &size);
327 	memtag_set_tags((void *)(vaddr_t)base, size, 0);
328 }
329 #endif
330 
331 #ifdef CFG_WITH_PAGER
332 
333 #ifdef CFG_CORE_SANITIZE_KADDRESS
334 static void carve_out_asan_mem(tee_mm_pool_t *pool)
335 {
336 	const size_t s = pool->hi - pool->lo;
337 	tee_mm_entry_t *mm;
338 	paddr_t apa = ASAN_MAP_PA;
339 	size_t asz = ASAN_MAP_SZ;
340 
341 	if (core_is_buffer_outside(apa, asz, pool->lo, s))
342 		return;
343 
344 	/* Reserve the shadow area */
345 	if (!core_is_buffer_inside(apa, asz, pool->lo, s)) {
346 		if (apa < pool->lo) {
347 			/*
348 			 * ASAN buffer is overlapping with the beginning of
349 			 * the pool.
350 			 */
351 			asz -= pool->lo - apa;
352 			apa = pool->lo;
353 		} else {
354 			/*
355 			 * ASAN buffer is overlapping with the end of the
356 			 * pool.
357 			 */
358 			asz = pool->hi - apa;
359 		}
360 	}
361 	mm = tee_mm_alloc2(pool, apa, asz);
362 	assert(mm);
363 }
364 #else
365 static void carve_out_asan_mem(tee_mm_pool_t *pool __unused)
366 {
367 }
368 #endif
369 
370 static void print_pager_pool_size(void)
371 {
372 	struct tee_pager_stats __maybe_unused stats;
373 
374 	tee_pager_get_stats(&stats);
375 	IMSG("Pager pool size: %zukB",
376 		stats.npages_all * SMALL_PAGE_SIZE / 1024);
377 }
378 
379 static void init_vcore(tee_mm_pool_t *mm_vcore)
380 {
381 	const vaddr_t begin = VCORE_START_VA;
382 	size_t size = TEE_RAM_VA_SIZE;
383 
384 #ifdef CFG_CORE_SANITIZE_KADDRESS
385 	/* Carve out asan memory, flat maped after core memory */
386 	if (begin + size > ASAN_SHADOW_PA)
387 		size = ASAN_MAP_PA - begin;
388 #endif
389 
390 	if (!tee_mm_init(mm_vcore, begin, size, SMALL_PAGE_SHIFT,
391 			 TEE_MM_POOL_NO_FLAGS))
392 		panic("tee_mm_vcore init failed");
393 }
394 
395 /*
396  * With CFG_CORE_ASLR=y the init part is relocated very early during boot.
397  * The init part is also paged just as the rest of the normal paged code, with
398  * the difference that it's preloaded during boot. When the backing store
399  * is configured the entire paged binary is copied in place and then also
400  * the init part. Since the init part has been relocated (references to
401  * addresses updated to compensate for the new load address) this has to be
402  * undone for the hashes of those pages to match with the original binary.
403  *
404  * If CFG_CORE_ASLR=n, nothing needs to be done as the code/ro pages are
405  * unchanged.
406  */
407 static void undo_init_relocation(uint8_t *paged_store __maybe_unused)
408 {
409 #ifdef CFG_CORE_ASLR
410 	unsigned long *ptr = NULL;
411 	const uint32_t *reloc = NULL;
412 	const uint32_t *reloc_end = NULL;
413 	unsigned long offs = boot_mmu_config.map_offset;
414 	const struct boot_embdata *embdata = (const void *)__init_end;
415 	vaddr_t addr_end = (vaddr_t)__init_end - offs - TEE_LOAD_ADDR;
416 	vaddr_t addr_start = (vaddr_t)__init_start - offs - TEE_LOAD_ADDR;
417 
418 	reloc = (const void *)((vaddr_t)embdata + embdata->reloc_offset);
419 	reloc_end = reloc + embdata->reloc_len / sizeof(*reloc);
420 
421 	for (; reloc < reloc_end; reloc++) {
422 		if (*reloc < addr_start)
423 			continue;
424 		if (*reloc >= addr_end)
425 			break;
426 		ptr = (void *)(paged_store + *reloc - addr_start);
427 		*ptr -= offs;
428 	}
429 #endif
430 }
431 
432 static struct fobj *ro_paged_alloc(tee_mm_entry_t *mm, void *hashes,
433 				   void *store)
434 {
435 	const unsigned int num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE;
436 #ifdef CFG_CORE_ASLR
437 	unsigned int reloc_offs = (vaddr_t)__pageable_start - VCORE_START_VA;
438 	const struct boot_embdata *embdata = (const void *)__init_end;
439 	const void *reloc = __init_end + embdata->reloc_offset;
440 
441 	return fobj_ro_reloc_paged_alloc(num_pages, hashes, reloc_offs,
442 					 reloc, embdata->reloc_len, store);
443 #else
444 	return fobj_ro_paged_alloc(num_pages, hashes, store);
445 #endif
446 }
447 
448 static void init_runtime(unsigned long pageable_part)
449 {
450 	size_t n;
451 	size_t init_size = (size_t)(__init_end - __init_start);
452 	size_t pageable_start = (size_t)__pageable_start;
453 	size_t pageable_end = (size_t)__pageable_end;
454 	size_t pageable_size = pageable_end - pageable_start;
455 	vaddr_t tzsram_end = TZSRAM_BASE + TZSRAM_SIZE - TEE_LOAD_ADDR +
456 			     VCORE_START_VA;
457 	size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) *
458 			   TEE_SHA256_HASH_SIZE;
459 	const struct boot_embdata *embdata = (const void *)__init_end;
460 	const void *tmp_hashes = NULL;
461 	tee_mm_entry_t *mm = NULL;
462 	struct fobj *fobj = NULL;
463 	uint8_t *paged_store = NULL;
464 	uint8_t *hashes = NULL;
465 
466 	assert(pageable_size % SMALL_PAGE_SIZE == 0);
467 	assert(embdata->total_len >= embdata->hashes_offset +
468 				     embdata->hashes_len);
469 	assert(hash_size == embdata->hashes_len);
470 
471 	tmp_hashes = __init_end + embdata->hashes_offset;
472 
473 	init_asan();
474 
475 	/* Add heap2 first as heap1 may be too small as initial bget pool */
476 	malloc_add_pool(__heap2_start, __heap2_end - __heap2_start);
477 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
478 
479 	/*
480 	 * This needs to be initialized early to support address lookup
481 	 * in MEM_AREA_TEE_RAM
482 	 */
483 	tee_pager_early_init();
484 
485 	hashes = malloc(hash_size);
486 	IMSG_RAW("\n");
487 	IMSG("Pager is enabled. Hashes: %zu bytes", hash_size);
488 	assert(hashes);
489 	asan_memcpy_unchecked(hashes, tmp_hashes, hash_size);
490 
491 	/*
492 	 * Need tee_mm_sec_ddr initialized to be able to allocate secure
493 	 * DDR below.
494 	 */
495 	core_mmu_init_ta_ram();
496 
497 	carve_out_asan_mem(&tee_mm_sec_ddr);
498 
499 	mm = tee_mm_alloc(&tee_mm_sec_ddr, pageable_size);
500 	assert(mm);
501 	paged_store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM,
502 				   pageable_size);
503 	/*
504 	 * Load pageable part in the dedicated allocated area:
505 	 * - Move pageable non-init part into pageable area. Note bootloader
506 	 *   may have loaded it anywhere in TA RAM hence use memmove().
507 	 * - Copy pageable init part from current location into pageable area.
508 	 */
509 	memmove(paged_store + init_size,
510 		phys_to_virt(pageable_part,
511 			     core_mmu_get_type_by_pa(pageable_part),
512 			     __pageable_part_end - __pageable_part_start),
513 		__pageable_part_end - __pageable_part_start);
514 	asan_memcpy_unchecked(paged_store, __init_start, init_size);
515 	/*
516 	 * Undo eventual relocation for the init part so the hash checks
517 	 * can pass.
518 	 */
519 	undo_init_relocation(paged_store);
520 
521 	/* Check that hashes of what's in pageable area is OK */
522 	DMSG("Checking hashes of pageable area");
523 	for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) {
524 		const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE;
525 		const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE;
526 		TEE_Result res;
527 
528 		DMSG("hash pg_idx %zu hash %p page %p", n, hash, page);
529 		res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE);
530 		if (res != TEE_SUCCESS) {
531 			EMSG("Hash failed for page %zu at %p: res 0x%x",
532 			     n, (void *)page, res);
533 			panic();
534 		}
535 	}
536 
537 	/*
538 	 * Assert prepaged init sections are page aligned so that nothing
539 	 * trails uninited at the end of the premapped init area.
540 	 */
541 	assert(!(init_size & SMALL_PAGE_MASK));
542 
543 	/*
544 	 * Initialize the virtual memory pool used for main_mmu_l2_ttb which
545 	 * is supplied to tee_pager_init() below.
546 	 */
547 	init_vcore(&tee_mm_vcore);
548 
549 	/*
550 	 * Assign alias area for pager end of the small page block the rest
551 	 * of the binary is loaded into. We're taking more than needed, but
552 	 * we're guaranteed to not need more than the physical amount of
553 	 * TZSRAM.
554 	 */
555 	mm = tee_mm_alloc2(&tee_mm_vcore,
556 			   (vaddr_t)tee_mm_vcore.lo +
557 			   tee_mm_vcore.size - TZSRAM_SIZE,
558 			   TZSRAM_SIZE);
559 	assert(mm);
560 	tee_pager_set_alias_area(mm);
561 
562 	/*
563 	 * Claim virtual memory which isn't paged.
564 	 * Linear memory (flat map core memory) ends there.
565 	 */
566 	mm = tee_mm_alloc2(&tee_mm_vcore, VCORE_UNPG_RX_PA,
567 			   (vaddr_t)(__pageable_start - VCORE_UNPG_RX_PA));
568 	assert(mm);
569 
570 	/*
571 	 * Allocate virtual memory for the pageable area and let the pager
572 	 * take charge of all the pages already assigned to that memory.
573 	 */
574 	mm = tee_mm_alloc2(&tee_mm_vcore, (vaddr_t)__pageable_start,
575 			   pageable_size);
576 	assert(mm);
577 	fobj = ro_paged_alloc(mm, hashes, paged_store);
578 	assert(fobj);
579 	tee_pager_add_core_region(tee_mm_get_smem(mm), PAGED_REGION_TYPE_RO,
580 				  fobj);
581 	fobj_put(fobj);
582 
583 	tee_pager_add_pages(pageable_start, init_size / SMALL_PAGE_SIZE, false);
584 	tee_pager_add_pages(pageable_start + init_size,
585 			    (pageable_size - init_size) / SMALL_PAGE_SIZE,
586 			    true);
587 	if (pageable_end < tzsram_end)
588 		tee_pager_add_pages(pageable_end, (tzsram_end - pageable_end) /
589 						   SMALL_PAGE_SIZE, true);
590 
591 	/*
592 	 * There may be physical pages in TZSRAM before the core load address.
593 	 * These pages can be added to the physical pages pool of the pager.
594 	 * This setup may happen when a the secure bootloader runs in TZRAM
595 	 * and its memory can be reused by OP-TEE once boot stages complete.
596 	 */
597 	tee_pager_add_pages(tee_mm_vcore.lo,
598 			(VCORE_UNPG_RX_PA - tee_mm_vcore.lo) / SMALL_PAGE_SIZE,
599 			true);
600 
601 	print_pager_pool_size();
602 }
603 #else
604 
605 static void init_runtime(unsigned long pageable_part __unused)
606 {
607 	init_asan();
608 
609 	/*
610 	 * By default whole OP-TEE uses malloc, so we need to initialize
611 	 * it early. But, when virtualization is enabled, malloc is used
612 	 * only by TEE runtime, so malloc should be initialized later, for
613 	 * every virtual partition separately. Core code uses nex_malloc
614 	 * instead.
615 	 */
616 #ifdef CFG_NS_VIRTUALIZATION
617 	nex_malloc_add_pool(__nex_heap_start, __nex_heap_end -
618 					      __nex_heap_start);
619 #else
620 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
621 #endif
622 
623 	IMSG_RAW("\n");
624 }
625 #endif
626 
627 #if defined(CFG_DT)
628 static int add_optee_dt_node(struct dt_descriptor *dt)
629 {
630 	int offs;
631 	int ret;
632 
633 	if (fdt_path_offset(dt->blob, "/firmware/optee") >= 0) {
634 		DMSG("OP-TEE Device Tree node already exists!");
635 		return 0;
636 	}
637 
638 	offs = fdt_path_offset(dt->blob, "/firmware");
639 	if (offs < 0) {
640 		offs = add_dt_path_subnode(dt, "/", "firmware");
641 		if (offs < 0)
642 			return -1;
643 	}
644 
645 	offs = fdt_add_subnode(dt->blob, offs, "optee");
646 	if (offs < 0)
647 		return -1;
648 
649 	ret = fdt_setprop_string(dt->blob, offs, "compatible",
650 				 "linaro,optee-tz");
651 	if (ret < 0)
652 		return -1;
653 	ret = fdt_setprop_string(dt->blob, offs, "method", "smc");
654 	if (ret < 0)
655 		return -1;
656 
657 	if (CFG_CORE_ASYNC_NOTIF_GIC_INTID) {
658 		/*
659 		 * The format of the interrupt property is defined by the
660 		 * binding of the interrupt domain root. In this case it's
661 		 * one Arm GIC v1, v2 or v3 so we must be compatible with
662 		 * these.
663 		 *
664 		 * An SPI type of interrupt is indicated with a 0 in the
665 		 * first cell. A PPI type is indicated with value 1.
666 		 *
667 		 * The interrupt number goes in the second cell where
668 		 * SPIs ranges from 0 to 987 and PPI ranges from 0 to 15.
669 		 *
670 		 * Flags are passed in the third cells.
671 		 */
672 		uint32_t itr_trigger = 0;
673 		uint32_t itr_type = 0;
674 		uint32_t itr_id = 0;
675 		uint32_t val[3] = { };
676 
677 		/* PPI are visible only in current CPU cluster */
678 		static_assert(!CFG_CORE_ASYNC_NOTIF_GIC_INTID ||
679 			      (CFG_CORE_ASYNC_NOTIF_GIC_INTID >=
680 			       GIC_SPI_BASE) ||
681 			      ((CFG_TEE_CORE_NB_CORE <= 8) &&
682 			       (CFG_CORE_ASYNC_NOTIF_GIC_INTID >=
683 				GIC_PPI_BASE)));
684 
685 		if (CFG_CORE_ASYNC_NOTIF_GIC_INTID >= GIC_SPI_BASE) {
686 			itr_type = GIC_SPI;
687 			itr_id = CFG_CORE_ASYNC_NOTIF_GIC_INTID - GIC_SPI_BASE;
688 			itr_trigger = IRQ_TYPE_EDGE_RISING;
689 		} else {
690 			itr_type = GIC_PPI;
691 			itr_id = CFG_CORE_ASYNC_NOTIF_GIC_INTID - GIC_PPI_BASE;
692 			itr_trigger = IRQ_TYPE_EDGE_RISING |
693 				      GIC_CPU_MASK_SIMPLE(CFG_TEE_CORE_NB_CORE);
694 		}
695 
696 		val[0] = TEE_U32_TO_BIG_ENDIAN(itr_type);
697 		val[1] = TEE_U32_TO_BIG_ENDIAN(itr_id);
698 		val[2] = TEE_U32_TO_BIG_ENDIAN(itr_trigger);
699 
700 		ret = fdt_setprop(dt->blob, offs, "interrupts", val,
701 				  sizeof(val));
702 		if (ret < 0)
703 			return -1;
704 	}
705 	return 0;
706 }
707 
708 #ifdef CFG_PSCI_ARM32
709 static int append_psci_compatible(void *fdt, int offs, const char *str)
710 {
711 	return fdt_appendprop(fdt, offs, "compatible", str, strlen(str) + 1);
712 }
713 
714 static int dt_add_psci_node(struct dt_descriptor *dt)
715 {
716 	int offs;
717 
718 	if (fdt_path_offset(dt->blob, "/psci") >= 0) {
719 		DMSG("PSCI Device Tree node already exists!");
720 		return 0;
721 	}
722 
723 	offs = add_dt_path_subnode(dt, "/", "psci");
724 	if (offs < 0)
725 		return -1;
726 	if (append_psci_compatible(dt->blob, offs, "arm,psci-1.0"))
727 		return -1;
728 	if (append_psci_compatible(dt->blob, offs, "arm,psci-0.2"))
729 		return -1;
730 	if (append_psci_compatible(dt->blob, offs, "arm,psci"))
731 		return -1;
732 	if (fdt_setprop_string(dt->blob, offs, "method", "smc"))
733 		return -1;
734 	if (fdt_setprop_u32(dt->blob, offs, "cpu_suspend", PSCI_CPU_SUSPEND))
735 		return -1;
736 	if (fdt_setprop_u32(dt->blob, offs, "cpu_off", PSCI_CPU_OFF))
737 		return -1;
738 	if (fdt_setprop_u32(dt->blob, offs, "cpu_on", PSCI_CPU_ON))
739 		return -1;
740 	if (fdt_setprop_u32(dt->blob, offs, "sys_poweroff", PSCI_SYSTEM_OFF))
741 		return -1;
742 	if (fdt_setprop_u32(dt->blob, offs, "sys_reset", PSCI_SYSTEM_RESET))
743 		return -1;
744 	return 0;
745 }
746 
747 static int check_node_compat_prefix(struct dt_descriptor *dt, int offs,
748 				    const char *prefix)
749 {
750 	const size_t prefix_len = strlen(prefix);
751 	size_t l;
752 	int plen;
753 	const char *prop;
754 
755 	prop = fdt_getprop(dt->blob, offs, "compatible", &plen);
756 	if (!prop)
757 		return -1;
758 
759 	while (plen > 0) {
760 		if (memcmp(prop, prefix, prefix_len) == 0)
761 			return 0; /* match */
762 
763 		l = strlen(prop) + 1;
764 		prop += l;
765 		plen -= l;
766 	}
767 
768 	return -1;
769 }
770 
771 static int dt_add_psci_cpu_enable_methods(struct dt_descriptor *dt)
772 {
773 	int offs = 0;
774 
775 	while (1) {
776 		offs = fdt_next_node(dt->blob, offs, NULL);
777 		if (offs < 0)
778 			break;
779 		if (fdt_getprop(dt->blob, offs, "enable-method", NULL))
780 			continue; /* already set */
781 		if (check_node_compat_prefix(dt, offs, "arm,cortex-a"))
782 			continue; /* no compatible */
783 		if (fdt_setprop_string(dt->blob, offs, "enable-method", "psci"))
784 			return -1;
785 		/* Need to restart scanning as offsets may have changed */
786 		offs = 0;
787 	}
788 	return 0;
789 }
790 
791 static int config_psci(struct dt_descriptor *dt)
792 {
793 	if (dt_add_psci_node(dt))
794 		return -1;
795 	return dt_add_psci_cpu_enable_methods(dt);
796 }
797 #else
798 static int config_psci(struct dt_descriptor *dt __unused)
799 {
800 	return 0;
801 }
802 #endif /*CFG_PSCI_ARM32*/
803 
804 #ifdef CFG_CORE_DYN_SHM
805 static uint64_t get_dt_val_and_advance(const void *data, size_t *offs,
806 				       uint32_t cell_size)
807 {
808 	uint64_t rv = 0;
809 
810 	if (cell_size == 1) {
811 		uint32_t v;
812 
813 		memcpy(&v, (const uint8_t *)data + *offs, sizeof(v));
814 		*offs += sizeof(v);
815 		rv = fdt32_to_cpu(v);
816 	} else {
817 		uint64_t v;
818 
819 		memcpy(&v, (const uint8_t *)data + *offs, sizeof(v));
820 		*offs += sizeof(v);
821 		rv = fdt64_to_cpu(v);
822 	}
823 
824 	return rv;
825 }
826 
827 /*
828  * Find all non-secure memory from DT. Memory marked inaccessible by Secure
829  * World is ignored since it could not be mapped to be used as dynamic shared
830  * memory.
831  */
832 static int get_nsec_memory_helper(void *fdt, struct core_mmu_phys_mem *mem)
833 {
834 	const uint8_t *prop = NULL;
835 	uint64_t a = 0;
836 	uint64_t l = 0;
837 	size_t prop_offs = 0;
838 	size_t prop_len = 0;
839 	int elems_total = 0;
840 	int addr_size = 0;
841 	int len_size = 0;
842 	int offs = 0;
843 	size_t n = 0;
844 	int len = 0;
845 
846 	addr_size = fdt_address_cells(fdt, 0);
847 	if (addr_size < 0)
848 		return 0;
849 
850 	len_size = fdt_size_cells(fdt, 0);
851 	if (len_size < 0)
852 		return 0;
853 
854 	while (true) {
855 		offs = fdt_node_offset_by_prop_value(fdt, offs, "device_type",
856 						     "memory",
857 						     sizeof("memory"));
858 		if (offs < 0)
859 			break;
860 
861 		if (fdt_get_status(fdt, offs) != (DT_STATUS_OK_NSEC |
862 						   DT_STATUS_OK_SEC))
863 			continue;
864 
865 		prop = fdt_getprop(fdt, offs, "reg", &len);
866 		if (!prop)
867 			continue;
868 
869 		prop_len = len;
870 		for (n = 0, prop_offs = 0; prop_offs < prop_len; n++) {
871 			a = get_dt_val_and_advance(prop, &prop_offs, addr_size);
872 			if (prop_offs >= prop_len) {
873 				n--;
874 				break;
875 			}
876 
877 			l = get_dt_val_and_advance(prop, &prop_offs, len_size);
878 			if (mem) {
879 				mem->type = MEM_AREA_DDR_OVERALL;
880 				mem->addr = a;
881 				mem->size = l;
882 				mem++;
883 			}
884 		}
885 
886 		elems_total += n;
887 	}
888 
889 	return elems_total;
890 }
891 
892 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt, size_t *nelems)
893 {
894 	struct core_mmu_phys_mem *mem = NULL;
895 	int elems_total = 0;
896 
897 	elems_total = get_nsec_memory_helper(fdt, NULL);
898 	if (elems_total <= 0)
899 		return NULL;
900 
901 	mem = nex_calloc(elems_total, sizeof(*mem));
902 	if (!mem)
903 		panic();
904 
905 	elems_total = get_nsec_memory_helper(fdt, mem);
906 	assert(elems_total > 0);
907 
908 	*nelems = elems_total;
909 
910 	return mem;
911 }
912 #endif /*CFG_CORE_DYN_SHM*/
913 
914 #ifdef CFG_CORE_RESERVED_SHM
915 static int mark_static_shm_as_reserved(struct dt_descriptor *dt)
916 {
917 	vaddr_t shm_start;
918 	vaddr_t shm_end;
919 
920 	core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &shm_start, &shm_end);
921 	if (shm_start != shm_end)
922 		return add_res_mem_dt_node(dt, "optee_shm",
923 					   virt_to_phys((void *)shm_start),
924 					   shm_end - shm_start);
925 
926 	DMSG("No SHM configured");
927 	return -1;
928 }
929 #endif /*CFG_CORE_RESERVED_SHM*/
930 
931 static int mark_tzdram_as_reserved(struct dt_descriptor *dt)
932 {
933 	return add_res_mem_dt_node(dt, "optee_core", CFG_TZDRAM_START,
934 				   CFG_TZDRAM_SIZE);
935 }
936 
937 static void update_external_dt(void)
938 {
939 	struct dt_descriptor *dt = get_external_dt_desc();
940 
941 	if (!dt || !dt->blob)
942 		return;
943 
944 	if (!IS_ENABLED(CFG_CORE_FFA) && add_optee_dt_node(dt))
945 		panic("Failed to add OP-TEE Device Tree node");
946 
947 	if (config_psci(dt))
948 		panic("Failed to config PSCI");
949 
950 #ifdef CFG_CORE_RESERVED_SHM
951 	if (mark_static_shm_as_reserved(dt))
952 		panic("Failed to config non-secure memory");
953 #endif
954 
955 	if (mark_tzdram_as_reserved(dt))
956 		panic("Failed to config secure memory");
957 }
958 #else /*CFG_DT*/
959 static void update_external_dt(void)
960 {
961 }
962 
963 #ifdef CFG_CORE_DYN_SHM
964 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt __unused,
965 						 size_t *nelems __unused)
966 {
967 	return NULL;
968 }
969 #endif /*CFG_CORE_DYN_SHM*/
970 #endif /*!CFG_DT*/
971 
972 #if defined(CFG_CORE_SEL1_SPMC) && defined(CFG_DT)
973 void *get_tos_fw_config_dt(void)
974 {
975 	if (!IS_ENABLED(CFG_MAP_EXT_DT_SECURE))
976 		return NULL;
977 
978 	assert(cpu_mmu_enabled());
979 
980 	return tos_fw_config_dt.blob;
981 }
982 
983 static void init_tos_fw_config_dt(unsigned long pa)
984 {
985 	struct dt_descriptor *dt = &tos_fw_config_dt;
986 	void *fdt = NULL;
987 	int ret = 0;
988 
989 	if (!IS_ENABLED(CFG_MAP_EXT_DT_SECURE))
990 		return;
991 
992 	if (!pa)
993 		panic("No TOS_FW_CONFIG DT found");
994 
995 	fdt = core_mmu_add_mapping(MEM_AREA_EXT_DT, pa, CFG_DTB_MAX_SIZE);
996 	if (!fdt)
997 		panic("Failed to map TOS_FW_CONFIG DT");
998 
999 	dt->blob = fdt;
1000 
1001 	ret = fdt_open_into(fdt, fdt, CFG_DTB_MAX_SIZE);
1002 	if (ret < 0) {
1003 		EMSG("Invalid Device Tree at %#lx: error %d", pa, ret);
1004 		panic();
1005 	}
1006 
1007 	IMSG("TOS_FW_CONFIG DT found");
1008 }
1009 #else
1010 void *get_tos_fw_config_dt(void)
1011 {
1012 	return NULL;
1013 }
1014 
1015 static void init_tos_fw_config_dt(unsigned long pa __unused)
1016 {
1017 }
1018 #endif /*CFG_CORE_SEL1_SPMC && CFG_DT*/
1019 
1020 #ifdef CFG_CORE_DYN_SHM
1021 static void discover_nsec_memory(void)
1022 {
1023 	struct core_mmu_phys_mem *mem;
1024 	const struct core_mmu_phys_mem *mem_begin = NULL;
1025 	const struct core_mmu_phys_mem *mem_end = NULL;
1026 	size_t nelems;
1027 	void *fdt = get_external_dt();
1028 
1029 	if (fdt) {
1030 		mem = get_nsec_memory(fdt, &nelems);
1031 		if (mem) {
1032 			core_mmu_set_discovered_nsec_ddr(mem, nelems);
1033 			return;
1034 		}
1035 
1036 		DMSG("No non-secure memory found in FDT");
1037 	}
1038 
1039 	mem_begin = phys_ddr_overall_begin;
1040 	mem_end = phys_ddr_overall_end;
1041 	nelems = mem_end - mem_begin;
1042 	if (nelems) {
1043 		/*
1044 		 * Platform cannot use both register_ddr() and the now
1045 		 * deprecated register_dynamic_shm().
1046 		 */
1047 		assert(phys_ddr_overall_compat_begin ==
1048 		       phys_ddr_overall_compat_end);
1049 	} else {
1050 		mem_begin = phys_ddr_overall_compat_begin;
1051 		mem_end = phys_ddr_overall_compat_end;
1052 		nelems = mem_end - mem_begin;
1053 		if (!nelems)
1054 			return;
1055 		DMSG("Warning register_dynamic_shm() is deprecated, please use register_ddr() instead");
1056 	}
1057 
1058 	mem = nex_calloc(nelems, sizeof(*mem));
1059 	if (!mem)
1060 		panic();
1061 
1062 	memcpy(mem, phys_ddr_overall_begin, sizeof(*mem) * nelems);
1063 	core_mmu_set_discovered_nsec_ddr(mem, nelems);
1064 }
1065 #else /*CFG_CORE_DYN_SHM*/
1066 static void discover_nsec_memory(void)
1067 {
1068 }
1069 #endif /*!CFG_CORE_DYN_SHM*/
1070 
1071 #ifdef CFG_NS_VIRTUALIZATION
1072 static TEE_Result virt_init_heap(void)
1073 {
1074 	/* We need to initialize pool for every virtual guest partition */
1075 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
1076 
1077 	return TEE_SUCCESS;
1078 }
1079 preinit_early(virt_init_heap);
1080 #endif
1081 
1082 void init_tee_runtime(void)
1083 {
1084 #ifndef CFG_WITH_PAGER
1085 	/* Pager initializes TA RAM early */
1086 	core_mmu_init_ta_ram();
1087 #endif
1088 	/*
1089 	 * With virtualization we call this function when creating the
1090 	 * OP-TEE partition instead.
1091 	 */
1092 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION))
1093 		call_preinitcalls();
1094 	call_initcalls();
1095 
1096 	/*
1097 	 * These two functions uses crypto_rng_read() to initialize the
1098 	 * pauth keys. Once call_initcalls() returns we're guaranteed that
1099 	 * crypto_rng_read() is ready to be used.
1100 	 */
1101 	thread_init_core_local_pauth_keys();
1102 	thread_init_thread_pauth_keys();
1103 
1104 	/*
1105 	 * Reinitialize canaries around the stacks with crypto_rng_read().
1106 	 *
1107 	 * TODO: Updating canaries when CFG_NS_VIRTUALIZATION is enabled will
1108 	 * require synchronization between thread_check_canaries() and
1109 	 * thread_update_canaries().
1110 	 */
1111 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION))
1112 		thread_update_canaries();
1113 }
1114 
1115 static void init_primary(unsigned long pageable_part, unsigned long nsec_entry)
1116 {
1117 	thread_init_core_local_stacks();
1118 	/*
1119 	 * Mask asynchronous exceptions before switch to the thread vector
1120 	 * as the thread handler requires those to be masked while
1121 	 * executing with the temporary stack. The thread subsystem also
1122 	 * asserts that the foreign interrupts are blocked when using most of
1123 	 * its functions.
1124 	 */
1125 	thread_set_exceptions(THREAD_EXCP_ALL);
1126 	primary_save_cntfrq();
1127 	init_vfp_sec();
1128 	/*
1129 	 * Pager: init_runtime() calls thread_kernel_enable_vfp() so we must
1130 	 * set a current thread right now to avoid a chicken-and-egg problem
1131 	 * (thread_init_boot_thread() sets the current thread but needs
1132 	 * things set by init_runtime()).
1133 	 */
1134 	thread_get_core_local()->curr_thread = 0;
1135 	init_runtime(pageable_part);
1136 
1137 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1138 		/*
1139 		 * Virtualization: We can't initialize threads right now because
1140 		 * threads belong to "tee" part and will be initialized
1141 		 * separately per each new virtual guest. So, we'll clear
1142 		 * "curr_thread" and call it done.
1143 		 */
1144 		thread_get_core_local()->curr_thread = -1;
1145 	} else {
1146 		thread_init_boot_thread();
1147 	}
1148 	thread_init_primary();
1149 	thread_init_per_cpu();
1150 	init_sec_mon(nsec_entry);
1151 }
1152 
1153 static bool cpu_nmfi_enabled(void)
1154 {
1155 #if defined(ARM32)
1156 	return read_sctlr() & SCTLR_NMFI;
1157 #else
1158 	/* Note: ARM64 does not feature non-maskable FIQ support. */
1159 	return false;
1160 #endif
1161 }
1162 
1163 /*
1164  * Note: this function is weak just to make it possible to exclude it from
1165  * the unpaged area.
1166  */
1167 void __weak boot_init_primary_late(unsigned long fdt,
1168 				   unsigned long tos_fw_config)
1169 {
1170 	init_external_dt(fdt);
1171 	init_tos_fw_config_dt(tos_fw_config);
1172 #ifdef CFG_CORE_SEL1_SPMC
1173 	tpm_map_log_area(get_tos_fw_config_dt());
1174 #else
1175 	tpm_map_log_area(get_external_dt());
1176 #endif
1177 	discover_nsec_memory();
1178 	update_external_dt();
1179 	configure_console_from_dt();
1180 
1181 	IMSG("OP-TEE version: %s", core_v_str);
1182 	if (IS_ENABLED(CFG_WARN_INSECURE)) {
1183 		IMSG("WARNING: This OP-TEE configuration might be insecure!");
1184 		IMSG("WARNING: Please check https://optee.readthedocs.io/en/latest/architecture/porting_guidelines.html");
1185 	}
1186 	IMSG("Primary CPU initializing");
1187 #ifdef CFG_CORE_ASLR
1188 	DMSG("Executing at offset %#lx with virtual load address %#"PRIxVA,
1189 	     (unsigned long)boot_mmu_config.map_offset, VCORE_START_VA);
1190 #endif
1191 	if (IS_ENABLED(CFG_MEMTAG))
1192 		DMSG("Memory tagging %s",
1193 		     memtag_is_enabled() ?  "enabled" : "disabled");
1194 
1195 	/* Check if platform needs NMFI workaround */
1196 	if (cpu_nmfi_enabled())	{
1197 		if (!IS_ENABLED(CFG_CORE_WORKAROUND_ARM_NMFI))
1198 			IMSG("WARNING: This ARM core has NMFI enabled, please apply workaround!");
1199 	} else {
1200 		if (IS_ENABLED(CFG_CORE_WORKAROUND_ARM_NMFI))
1201 			IMSG("WARNING: This ARM core does not have NMFI enabled, no need for workaround");
1202 	}
1203 
1204 	boot_primary_init_intc();
1205 	init_vfp_nsec();
1206 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1207 		IMSG("Initializing virtualization support");
1208 		core_mmu_init_virtualization();
1209 	} else {
1210 		init_tee_runtime();
1211 	}
1212 	call_finalcalls();
1213 	IMSG("Primary CPU switching to normal world boot");
1214 }
1215 
1216 static void init_secondary_helper(unsigned long nsec_entry)
1217 {
1218 	IMSG("Secondary CPU %zu initializing", get_core_pos());
1219 
1220 	/*
1221 	 * Mask asynchronous exceptions before switch to the thread vector
1222 	 * as the thread handler requires those to be masked while
1223 	 * executing with the temporary stack. The thread subsystem also
1224 	 * asserts that the foreign interrupts are blocked when using most of
1225 	 * its functions.
1226 	 */
1227 	thread_set_exceptions(THREAD_EXCP_ALL);
1228 
1229 	secondary_init_cntfrq();
1230 	thread_init_per_cpu();
1231 	init_sec_mon(nsec_entry);
1232 	boot_secondary_init_intc();
1233 	init_vfp_sec();
1234 	init_vfp_nsec();
1235 
1236 	IMSG("Secondary CPU %zu switching to normal world boot", get_core_pos());
1237 }
1238 
1239 /*
1240  * Note: this function is weak just to make it possible to exclude it from
1241  * the unpaged area so that it lies in the init area.
1242  */
1243 void __weak boot_init_primary_early(unsigned long pageable_part,
1244 				    unsigned long nsec_entry __maybe_unused)
1245 {
1246 	unsigned long e = PADDR_INVALID;
1247 
1248 #if !defined(CFG_WITH_ARM_TRUSTED_FW)
1249 	e = nsec_entry;
1250 #endif
1251 
1252 	init_primary(pageable_part, e);
1253 }
1254 
1255 #if defined(CFG_WITH_ARM_TRUSTED_FW)
1256 unsigned long boot_cpu_on_handler(unsigned long a0 __maybe_unused,
1257 				  unsigned long a1 __unused)
1258 {
1259 	init_secondary_helper(PADDR_INVALID);
1260 	return 0;
1261 }
1262 #else
1263 void boot_init_secondary(unsigned long nsec_entry)
1264 {
1265 	init_secondary_helper(nsec_entry);
1266 }
1267 #endif
1268 
1269 #if defined(CFG_BOOT_SECONDARY_REQUEST)
1270 void boot_set_core_ns_entry(size_t core_idx, uintptr_t entry,
1271 			    uintptr_t context_id)
1272 {
1273 	ns_entry_contexts[core_idx].entry_point = entry;
1274 	ns_entry_contexts[core_idx].context_id = context_id;
1275 	dsb_ishst();
1276 }
1277 
1278 int boot_core_release(size_t core_idx, paddr_t entry)
1279 {
1280 	if (!core_idx || core_idx >= CFG_TEE_CORE_NB_CORE)
1281 		return -1;
1282 
1283 	ns_entry_contexts[core_idx].entry_point = entry;
1284 	dmb();
1285 	spin_table[core_idx] = 1;
1286 	dsb();
1287 	sev();
1288 
1289 	return 0;
1290 }
1291 
1292 /*
1293  * spin until secondary boot request, then returns with
1294  * the secondary core entry address.
1295  */
1296 struct ns_entry_context *boot_core_hpen(void)
1297 {
1298 #ifdef CFG_PSCI_ARM32
1299 	return &ns_entry_contexts[get_core_pos()];
1300 #else
1301 	do {
1302 		wfe();
1303 	} while (!spin_table[get_core_pos()]);
1304 	dmb();
1305 	return &ns_entry_contexts[get_core_pos()];
1306 #endif
1307 }
1308 #endif
1309 
1310 #if defined(CFG_CORE_ASLR)
1311 #if defined(CFG_DT)
1312 unsigned long __weak get_aslr_seed(void *fdt)
1313 {
1314 	int rc = 0;
1315 	const uint64_t *seed = NULL;
1316 	int offs = 0;
1317 	int len = 0;
1318 
1319 	if (!fdt) {
1320 		DMSG("No fdt");
1321 		goto err;
1322 	}
1323 
1324 	rc = fdt_check_header(fdt);
1325 	if (rc) {
1326 		DMSG("Bad fdt: %d", rc);
1327 		goto err;
1328 	}
1329 
1330 	offs =  fdt_path_offset(fdt, "/secure-chosen");
1331 	if (offs < 0) {
1332 		DMSG("Cannot find /secure-chosen");
1333 		goto err;
1334 	}
1335 	seed = fdt_getprop(fdt, offs, "kaslr-seed", &len);
1336 	if (!seed || len != sizeof(*seed)) {
1337 		DMSG("Cannot find valid kaslr-seed");
1338 		goto err;
1339 	}
1340 
1341 	return fdt64_to_cpu(*seed);
1342 
1343 err:
1344 	/* Try platform implementation */
1345 	return plat_get_aslr_seed();
1346 }
1347 #else /*!CFG_DT*/
1348 unsigned long __weak get_aslr_seed(void *fdt __unused)
1349 {
1350 	/* Try platform implementation */
1351 	return plat_get_aslr_seed();
1352 }
1353 #endif /*!CFG_DT*/
1354 #endif /*CFG_CORE_ASLR*/
1355 
1356 #if defined(CFG_CORE_SEL2_SPMC) && defined(CFG_CORE_PHYS_RELOCATABLE)
1357 static void *get_fdt_from_boot_info(struct ffa_boot_info_header_1_1 *hdr)
1358 {
1359 	struct ffa_boot_info_1_1 *desc = NULL;
1360 	uint8_t content_fmt = 0;
1361 	uint8_t name_fmt = 0;
1362 	void *fdt = NULL;
1363 	int ret = 0;
1364 
1365 	if (hdr->signature != FFA_BOOT_INFO_SIGNATURE) {
1366 		EMSG("Bad boot info signature %#"PRIx32, hdr->signature);
1367 		panic();
1368 	}
1369 	if (hdr->version != FFA_BOOT_INFO_VERSION) {
1370 		EMSG("Bad boot info version %#"PRIx32, hdr->version);
1371 		panic();
1372 	}
1373 	if (hdr->desc_count != 1) {
1374 		EMSG("Bad boot info descriptor count %#"PRIx32,
1375 		     hdr->desc_count);
1376 		panic();
1377 	}
1378 	desc = (void *)((vaddr_t)hdr + hdr->desc_offset);
1379 	name_fmt = desc->flags & FFA_BOOT_INFO_FLAG_NAME_FORMAT_MASK;
1380 	if (name_fmt == FFA_BOOT_INFO_FLAG_NAME_FORMAT_STRING)
1381 		DMSG("Boot info descriptor name \"%16s\"", desc->name);
1382 	else if (name_fmt == FFA_BOOT_INFO_FLAG_NAME_FORMAT_UUID)
1383 		DMSG("Boot info descriptor UUID %pUl", (void *)desc->name);
1384 	else
1385 		DMSG("Boot info descriptor: unknown name format %"PRIu8,
1386 		     name_fmt);
1387 
1388 	content_fmt = (desc->flags & FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_MASK) >>
1389 		      FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_SHIFT;
1390 	if (content_fmt != FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_ADDR) {
1391 		EMSG("Bad boot info content format %"PRIu8", expected %u (address)",
1392 		     content_fmt, FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_ADDR);
1393 		panic();
1394 	}
1395 
1396 	fdt = (void *)(vaddr_t)desc->contents;
1397 	ret = fdt_check_full(fdt, desc->size);
1398 	if (ret < 0) {
1399 		EMSG("Invalid Device Tree at %p: error %d", fdt, ret);
1400 		panic();
1401 	}
1402 	return fdt;
1403 }
1404 
1405 static void get_sec_mem_from_manifest(void *fdt, paddr_t *base, size_t *size)
1406 {
1407 	int ret = 0;
1408 	uint64_t num = 0;
1409 
1410 	ret = fdt_node_check_compatible(fdt, 0, "arm,ffa-manifest-1.0");
1411 	if (ret < 0) {
1412 		EMSG("Invalid FF-A manifest at %p: error %d", fdt, ret);
1413 		panic();
1414 	}
1415 	ret = dt_getprop_as_number(fdt, 0, "load-address", &num);
1416 	if (ret < 0) {
1417 		EMSG("Can't read \"load-address\" from FF-A manifest at %p: error %d",
1418 		     fdt, ret);
1419 		panic();
1420 	}
1421 	*base = num;
1422 	/* "mem-size" is currently an undocumented extension to the spec. */
1423 	ret = dt_getprop_as_number(fdt, 0, "mem-size", &num);
1424 	if (ret < 0) {
1425 		EMSG("Can't read \"mem-size\" from FF-A manifest at %p: error %d",
1426 		     fdt, ret);
1427 		panic();
1428 	}
1429 	*size = num;
1430 }
1431 
1432 void __weak boot_save_boot_info(void *boot_info)
1433 {
1434 	void *fdt = NULL;
1435 	paddr_t base = 0;
1436 	size_t size = 0;
1437 
1438 	fdt = get_fdt_from_boot_info(boot_info);
1439 	get_sec_mem_from_manifest(fdt, &base, &size);
1440 	core_mmu_set_secure_memory(base, size);
1441 }
1442 #endif /*CFG_CORE_SEL2_SPMC && CFG_CORE_PHYS_RELOCATABLE*/
1443