xref: /optee_os/core/arch/arm/kernel/boot.c (revision f406e0d7745f4a18eecaed7958cbd705d97ab8df)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2015-2023, Linaro Limited
4  * Copyright (c) 2023, Arm Limited
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <compiler.h>
10 #include <config.h>
11 #include <console.h>
12 #include <crypto/crypto.h>
13 #include <drivers/gic.h>
14 #include <dt-bindings/interrupt-controller/arm-gic.h>
15 #include <ffa.h>
16 #include <initcall.h>
17 #include <inttypes.h>
18 #include <keep.h>
19 #include <kernel/asan.h>
20 #include <kernel/boot.h>
21 #include <kernel/dt.h>
22 #include <kernel/linker.h>
23 #include <kernel/misc.h>
24 #include <kernel/panic.h>
25 #include <kernel/tee_misc.h>
26 #include <kernel/thread.h>
27 #include <kernel/tpm.h>
28 #include <libfdt.h>
29 #include <malloc.h>
30 #include <memtag.h>
31 #include <mm/core_memprot.h>
32 #include <mm/core_mmu.h>
33 #include <mm/fobj.h>
34 #include <mm/tee_mm.h>
35 #include <mm/tee_pager.h>
36 #include <sm/psci.h>
37 #include <stdio.h>
38 #include <trace.h>
39 #include <utee_defines.h>
40 #include <util.h>
41 
42 #include <platform_config.h>
43 
44 #if !defined(CFG_WITH_ARM_TRUSTED_FW)
45 #include <sm/sm.h>
46 #endif
47 
48 #if defined(CFG_WITH_VFP)
49 #include <kernel/vfp.h>
50 #endif
51 
52 /*
53  * In this file we're using unsigned long to represent physical pointers as
54  * they are received in a single register when OP-TEE is initially entered.
55  * This limits 32-bit systems to only use make use of the lower 32 bits
56  * of a physical address for initial parameters.
57  *
58  * 64-bit systems on the other hand can use full 64-bit physical pointers.
59  */
60 #define PADDR_INVALID		ULONG_MAX
61 
62 #if defined(CFG_BOOT_SECONDARY_REQUEST)
63 struct ns_entry_context {
64 	uintptr_t entry_point;
65 	uintptr_t context_id;
66 };
67 struct ns_entry_context ns_entry_contexts[CFG_TEE_CORE_NB_CORE];
68 static uint32_t spin_table[CFG_TEE_CORE_NB_CORE];
69 #endif
70 
71 #ifdef CFG_BOOT_SYNC_CPU
72 /*
73  * Array used when booting, to synchronize cpu.
74  * When 0, the cpu has not started.
75  * When 1, it has started
76  */
77 uint32_t sem_cpu_sync[CFG_TEE_CORE_NB_CORE];
78 DECLARE_KEEP_PAGER(sem_cpu_sync);
79 #endif
80 
81 static void *manifest_dt __nex_bss;
82 static unsigned long boot_arg_fdt __nex_bss;
83 static unsigned long boot_arg_nsec_entry __nex_bss;
84 static unsigned long boot_arg_pageable_part __nex_bss;
85 
86 #ifdef CFG_SECONDARY_INIT_CNTFRQ
87 static uint32_t cntfrq;
88 #endif
89 
90 /* May be overridden in plat-$(PLATFORM)/main.c */
91 __weak void plat_primary_init_early(void)
92 {
93 }
94 DECLARE_KEEP_PAGER(plat_primary_init_early);
95 
96 /* May be overridden in plat-$(PLATFORM)/main.c */
97 __weak void boot_primary_init_intc(void)
98 {
99 }
100 
101 /* May be overridden in plat-$(PLATFORM)/main.c */
102 __weak void boot_secondary_init_intc(void)
103 {
104 }
105 
106 /* May be overridden in plat-$(PLATFORM)/main.c */
107 __weak unsigned long plat_get_aslr_seed(void)
108 {
109 	DMSG("Warning: no ASLR seed");
110 
111 	return 0;
112 }
113 
114 #if defined(_CFG_CORE_STACK_PROTECTOR) || defined(CFG_WITH_STACK_CANARIES)
115 /* Generate random stack canary value on boot up */
116 __weak void plat_get_random_stack_canaries(void *buf, size_t ncan, size_t size)
117 {
118 	TEE_Result ret = TEE_ERROR_GENERIC;
119 	size_t i = 0;
120 
121 	assert(buf && ncan && size);
122 
123 	/*
124 	 * With virtualization the RNG is not initialized in Nexus core.
125 	 * Need to override with platform specific implementation.
126 	 */
127 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
128 		IMSG("WARNING: Using fixed value for stack canary");
129 		memset(buf, 0xab, ncan * size);
130 		goto out;
131 	}
132 
133 	ret = crypto_rng_read(buf, ncan * size);
134 	if (ret != TEE_SUCCESS)
135 		panic("Failed to generate random stack canary");
136 
137 out:
138 	/* Leave null byte in canary to prevent string base exploit */
139 	for (i = 0; i < ncan; i++)
140 		*((uint8_t *)buf + size * i) = 0;
141 }
142 #endif /* _CFG_CORE_STACK_PROTECTOR || CFG_WITH_STACK_CANARIES */
143 
144 /*
145  * This function is called as a guard after each smc call which is not
146  * supposed to return.
147  */
148 void __panic_at_smc_return(void)
149 {
150 	panic();
151 }
152 
153 #if defined(CFG_WITH_ARM_TRUSTED_FW)
154 void init_sec_mon(unsigned long nsec_entry __maybe_unused)
155 {
156 	assert(nsec_entry == PADDR_INVALID);
157 	/* Do nothing as we don't have a secure monitor */
158 }
159 #else
160 /* May be overridden in plat-$(PLATFORM)/main.c */
161 __weak void init_sec_mon(unsigned long nsec_entry)
162 {
163 	struct sm_nsec_ctx *nsec_ctx;
164 
165 	assert(nsec_entry != PADDR_INVALID);
166 
167 	/* Initialize secure monitor */
168 	nsec_ctx = sm_get_nsec_ctx();
169 	nsec_ctx->mon_lr = nsec_entry;
170 	nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I;
171 	if (nsec_entry & 1)
172 		nsec_ctx->mon_spsr |= CPSR_T;
173 }
174 #endif
175 
176 #if defined(CFG_WITH_ARM_TRUSTED_FW)
177 static void init_vfp_nsec(void)
178 {
179 }
180 #else
181 static void init_vfp_nsec(void)
182 {
183 	/* Normal world can use CP10 and CP11 (SIMD/VFP) */
184 	write_nsacr(read_nsacr() | NSACR_CP10 | NSACR_CP11);
185 }
186 #endif
187 
188 #if defined(CFG_WITH_VFP)
189 
190 #ifdef ARM32
191 static void init_vfp_sec(void)
192 {
193 	uint32_t cpacr = read_cpacr();
194 
195 	/*
196 	 * Enable Advanced SIMD functionality.
197 	 * Enable use of D16-D31 of the Floating-point Extension register
198 	 * file.
199 	 */
200 	cpacr &= ~(CPACR_ASEDIS | CPACR_D32DIS);
201 	/*
202 	 * Enable usage of CP10 and CP11 (SIMD/VFP) (both kernel and user
203 	 * mode.
204 	 */
205 	cpacr |= CPACR_CP(10, CPACR_CP_ACCESS_FULL);
206 	cpacr |= CPACR_CP(11, CPACR_CP_ACCESS_FULL);
207 	write_cpacr(cpacr);
208 }
209 #endif /* ARM32 */
210 
211 #ifdef ARM64
212 static void init_vfp_sec(void)
213 {
214 	/* Not using VFP until thread_kernel_enable_vfp() */
215 	vfp_disable();
216 }
217 #endif /* ARM64 */
218 
219 #else /* CFG_WITH_VFP */
220 
221 static void init_vfp_sec(void)
222 {
223 	/* Not using VFP */
224 }
225 #endif
226 
227 #ifdef CFG_SECONDARY_INIT_CNTFRQ
228 static void primary_save_cntfrq(void)
229 {
230 	assert(cntfrq == 0);
231 
232 	/*
233 	 * CNTFRQ should be initialized on the primary CPU by a
234 	 * previous boot stage
235 	 */
236 	cntfrq = read_cntfrq();
237 }
238 
239 static void secondary_init_cntfrq(void)
240 {
241 	assert(cntfrq != 0);
242 	write_cntfrq(cntfrq);
243 }
244 #else /* CFG_SECONDARY_INIT_CNTFRQ */
245 static void primary_save_cntfrq(void)
246 {
247 }
248 
249 static void secondary_init_cntfrq(void)
250 {
251 }
252 #endif
253 
254 #ifdef CFG_CORE_SANITIZE_KADDRESS
255 static void init_run_constructors(void)
256 {
257 	const vaddr_t *ctor;
258 
259 	for (ctor = &__ctor_list; ctor < &__ctor_end; ctor++)
260 		((void (*)(void))(*ctor))();
261 }
262 
263 static void init_asan(void)
264 {
265 
266 	/*
267 	 * CFG_ASAN_SHADOW_OFFSET is also supplied as
268 	 * -fasan-shadow-offset=$(CFG_ASAN_SHADOW_OFFSET) to the compiler.
269 	 * Since all the needed values to calculate the value of
270 	 * CFG_ASAN_SHADOW_OFFSET isn't available in to make we need to
271 	 * calculate it in advance and hard code it into the platform
272 	 * conf.mk. Here where we have all the needed values we double
273 	 * check that the compiler is supplied the correct value.
274 	 */
275 
276 #define __ASAN_SHADOW_START \
277 	ROUNDUP(TEE_RAM_START + (TEE_RAM_VA_SIZE * 8) / 9 - 8, 8)
278 	assert(__ASAN_SHADOW_START == (vaddr_t)&__asan_shadow_start);
279 #define __CFG_ASAN_SHADOW_OFFSET \
280 	(__ASAN_SHADOW_START - (TEE_RAM_START / 8))
281 	COMPILE_TIME_ASSERT(CFG_ASAN_SHADOW_OFFSET == __CFG_ASAN_SHADOW_OFFSET);
282 #undef __ASAN_SHADOW_START
283 #undef __CFG_ASAN_SHADOW_OFFSET
284 
285 	/*
286 	 * Assign area covered by the shadow area, everything from start up
287 	 * to the beginning of the shadow area.
288 	 */
289 	asan_set_shadowed((void *)TEE_LOAD_ADDR, &__asan_shadow_start);
290 
291 	/*
292 	 * Add access to areas that aren't opened automatically by a
293 	 * constructor.
294 	 */
295 	asan_tag_access(&__ctor_list, &__ctor_end);
296 	asan_tag_access(__rodata_start, __rodata_end);
297 #ifdef CFG_WITH_PAGER
298 	asan_tag_access(__pageable_start, __pageable_end);
299 #endif /*CFG_WITH_PAGER*/
300 	asan_tag_access(__nozi_start, __nozi_end);
301 #ifdef ARM32
302 	asan_tag_access(__exidx_start, __exidx_end);
303 	asan_tag_access(__extab_start, __extab_end);
304 #endif
305 
306 	init_run_constructors();
307 
308 	/* Everything is tagged correctly, let's start address sanitizing. */
309 	asan_start();
310 }
311 #else /*CFG_CORE_SANITIZE_KADDRESS*/
312 static void init_asan(void)
313 {
314 }
315 #endif /*CFG_CORE_SANITIZE_KADDRESS*/
316 
317 #if defined(CFG_MEMTAG)
318 /* Called from entry_a64.S only when MEMTAG is configured */
319 void boot_init_memtag(void)
320 {
321 	paddr_t base = 0;
322 	paddr_size_t size = 0;
323 
324 	memtag_init_ops(feat_mte_implemented());
325 	core_mmu_get_secure_memory(&base, &size);
326 	memtag_set_tags((void *)(vaddr_t)base, size, 0);
327 }
328 #endif
329 
330 #ifdef CFG_WITH_PAGER
331 
332 #ifdef CFG_CORE_SANITIZE_KADDRESS
333 static void carve_out_asan_mem(tee_mm_pool_t *pool)
334 {
335 	const size_t s = pool->hi - pool->lo;
336 	tee_mm_entry_t *mm;
337 	paddr_t apa = ASAN_MAP_PA;
338 	size_t asz = ASAN_MAP_SZ;
339 
340 	if (core_is_buffer_outside(apa, asz, pool->lo, s))
341 		return;
342 
343 	/* Reserve the shadow area */
344 	if (!core_is_buffer_inside(apa, asz, pool->lo, s)) {
345 		if (apa < pool->lo) {
346 			/*
347 			 * ASAN buffer is overlapping with the beginning of
348 			 * the pool.
349 			 */
350 			asz -= pool->lo - apa;
351 			apa = pool->lo;
352 		} else {
353 			/*
354 			 * ASAN buffer is overlapping with the end of the
355 			 * pool.
356 			 */
357 			asz = pool->hi - apa;
358 		}
359 	}
360 	mm = tee_mm_alloc2(pool, apa, asz);
361 	assert(mm);
362 }
363 #else
364 static void carve_out_asan_mem(tee_mm_pool_t *pool __unused)
365 {
366 }
367 #endif
368 
369 static void print_pager_pool_size(void)
370 {
371 	struct tee_pager_stats __maybe_unused stats;
372 
373 	tee_pager_get_stats(&stats);
374 	IMSG("Pager pool size: %zukB",
375 		stats.npages_all * SMALL_PAGE_SIZE / 1024);
376 }
377 
378 static void init_vcore(tee_mm_pool_t *mm_vcore)
379 {
380 	const vaddr_t begin = VCORE_START_VA;
381 	size_t size = TEE_RAM_VA_SIZE;
382 
383 #ifdef CFG_CORE_SANITIZE_KADDRESS
384 	/* Carve out asan memory, flat maped after core memory */
385 	if (begin + size > ASAN_SHADOW_PA)
386 		size = ASAN_MAP_PA - begin;
387 #endif
388 
389 	if (!tee_mm_init(mm_vcore, begin, size, SMALL_PAGE_SHIFT,
390 			 TEE_MM_POOL_NO_FLAGS))
391 		panic("tee_mm_vcore init failed");
392 }
393 
394 /*
395  * With CFG_CORE_ASLR=y the init part is relocated very early during boot.
396  * The init part is also paged just as the rest of the normal paged code, with
397  * the difference that it's preloaded during boot. When the backing store
398  * is configured the entire paged binary is copied in place and then also
399  * the init part. Since the init part has been relocated (references to
400  * addresses updated to compensate for the new load address) this has to be
401  * undone for the hashes of those pages to match with the original binary.
402  *
403  * If CFG_CORE_ASLR=n, nothing needs to be done as the code/ro pages are
404  * unchanged.
405  */
406 static void undo_init_relocation(uint8_t *paged_store __maybe_unused)
407 {
408 #ifdef CFG_CORE_ASLR
409 	unsigned long *ptr = NULL;
410 	const uint32_t *reloc = NULL;
411 	const uint32_t *reloc_end = NULL;
412 	unsigned long offs = boot_mmu_config.map_offset;
413 	const struct boot_embdata *embdata = (const void *)__init_end;
414 	vaddr_t addr_end = (vaddr_t)__init_end - offs - TEE_LOAD_ADDR;
415 	vaddr_t addr_start = (vaddr_t)__init_start - offs - TEE_LOAD_ADDR;
416 
417 	reloc = (const void *)((vaddr_t)embdata + embdata->reloc_offset);
418 	reloc_end = reloc + embdata->reloc_len / sizeof(*reloc);
419 
420 	for (; reloc < reloc_end; reloc++) {
421 		if (*reloc < addr_start)
422 			continue;
423 		if (*reloc >= addr_end)
424 			break;
425 		ptr = (void *)(paged_store + *reloc - addr_start);
426 		*ptr -= offs;
427 	}
428 #endif
429 }
430 
431 static struct fobj *ro_paged_alloc(tee_mm_entry_t *mm, void *hashes,
432 				   void *store)
433 {
434 	const unsigned int num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE;
435 #ifdef CFG_CORE_ASLR
436 	unsigned int reloc_offs = (vaddr_t)__pageable_start - VCORE_START_VA;
437 	const struct boot_embdata *embdata = (const void *)__init_end;
438 	const void *reloc = __init_end + embdata->reloc_offset;
439 
440 	return fobj_ro_reloc_paged_alloc(num_pages, hashes, reloc_offs,
441 					 reloc, embdata->reloc_len, store);
442 #else
443 	return fobj_ro_paged_alloc(num_pages, hashes, store);
444 #endif
445 }
446 
447 static void init_runtime(unsigned long pageable_part)
448 {
449 	size_t n;
450 	size_t init_size = (size_t)(__init_end - __init_start);
451 	size_t pageable_start = (size_t)__pageable_start;
452 	size_t pageable_end = (size_t)__pageable_end;
453 	size_t pageable_size = pageable_end - pageable_start;
454 	vaddr_t tzsram_end = TZSRAM_BASE + TZSRAM_SIZE - TEE_LOAD_ADDR +
455 			     VCORE_START_VA;
456 	size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) *
457 			   TEE_SHA256_HASH_SIZE;
458 	const struct boot_embdata *embdata = (const void *)__init_end;
459 	const void *tmp_hashes = NULL;
460 	tee_mm_entry_t *mm = NULL;
461 	struct fobj *fobj = NULL;
462 	uint8_t *paged_store = NULL;
463 	uint8_t *hashes = NULL;
464 
465 	assert(pageable_size % SMALL_PAGE_SIZE == 0);
466 	assert(embdata->total_len >= embdata->hashes_offset +
467 				     embdata->hashes_len);
468 	assert(hash_size == embdata->hashes_len);
469 
470 	tmp_hashes = __init_end + embdata->hashes_offset;
471 
472 	init_asan();
473 
474 	/* Add heap2 first as heap1 may be too small as initial bget pool */
475 	malloc_add_pool(__heap2_start, __heap2_end - __heap2_start);
476 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
477 
478 	/*
479 	 * This needs to be initialized early to support address lookup
480 	 * in MEM_AREA_TEE_RAM
481 	 */
482 	tee_pager_early_init();
483 
484 	hashes = malloc(hash_size);
485 	IMSG_RAW("\n");
486 	IMSG("Pager is enabled. Hashes: %zu bytes", hash_size);
487 	assert(hashes);
488 	asan_memcpy_unchecked(hashes, tmp_hashes, hash_size);
489 
490 	/*
491 	 * Need tee_mm_sec_ddr initialized to be able to allocate secure
492 	 * DDR below.
493 	 */
494 	core_mmu_init_ta_ram();
495 
496 	carve_out_asan_mem(&tee_mm_sec_ddr);
497 
498 	mm = tee_mm_alloc(&tee_mm_sec_ddr, pageable_size);
499 	assert(mm);
500 	paged_store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM,
501 				   pageable_size);
502 	/*
503 	 * Load pageable part in the dedicated allocated area:
504 	 * - Move pageable non-init part into pageable area. Note bootloader
505 	 *   may have loaded it anywhere in TA RAM hence use memmove().
506 	 * - Copy pageable init part from current location into pageable area.
507 	 */
508 	memmove(paged_store + init_size,
509 		phys_to_virt(pageable_part,
510 			     core_mmu_get_type_by_pa(pageable_part),
511 			     __pageable_part_end - __pageable_part_start),
512 		__pageable_part_end - __pageable_part_start);
513 	asan_memcpy_unchecked(paged_store, __init_start, init_size);
514 	/*
515 	 * Undo eventual relocation for the init part so the hash checks
516 	 * can pass.
517 	 */
518 	undo_init_relocation(paged_store);
519 
520 	/* Check that hashes of what's in pageable area is OK */
521 	DMSG("Checking hashes of pageable area");
522 	for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) {
523 		const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE;
524 		const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE;
525 		TEE_Result res;
526 
527 		DMSG("hash pg_idx %zu hash %p page %p", n, hash, page);
528 		res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE);
529 		if (res != TEE_SUCCESS) {
530 			EMSG("Hash failed for page %zu at %p: res 0x%x",
531 			     n, (void *)page, res);
532 			panic();
533 		}
534 	}
535 
536 	/*
537 	 * Assert prepaged init sections are page aligned so that nothing
538 	 * trails uninited at the end of the premapped init area.
539 	 */
540 	assert(!(init_size & SMALL_PAGE_MASK));
541 
542 	/*
543 	 * Initialize the virtual memory pool used for main_mmu_l2_ttb which
544 	 * is supplied to tee_pager_init() below.
545 	 */
546 	init_vcore(&tee_mm_vcore);
547 
548 	/*
549 	 * Assign alias area for pager end of the small page block the rest
550 	 * of the binary is loaded into. We're taking more than needed, but
551 	 * we're guaranteed to not need more than the physical amount of
552 	 * TZSRAM.
553 	 */
554 	mm = tee_mm_alloc2(&tee_mm_vcore,
555 			   (vaddr_t)tee_mm_vcore.lo +
556 			   tee_mm_vcore.size - TZSRAM_SIZE,
557 			   TZSRAM_SIZE);
558 	assert(mm);
559 	tee_pager_set_alias_area(mm);
560 
561 	/*
562 	 * Claim virtual memory which isn't paged.
563 	 * Linear memory (flat map core memory) ends there.
564 	 */
565 	mm = tee_mm_alloc2(&tee_mm_vcore, VCORE_UNPG_RX_PA,
566 			   (vaddr_t)(__pageable_start - VCORE_UNPG_RX_PA));
567 	assert(mm);
568 
569 	/*
570 	 * Allocate virtual memory for the pageable area and let the pager
571 	 * take charge of all the pages already assigned to that memory.
572 	 */
573 	mm = tee_mm_alloc2(&tee_mm_vcore, (vaddr_t)__pageable_start,
574 			   pageable_size);
575 	assert(mm);
576 	fobj = ro_paged_alloc(mm, hashes, paged_store);
577 	assert(fobj);
578 	tee_pager_add_core_region(tee_mm_get_smem(mm), PAGED_REGION_TYPE_RO,
579 				  fobj);
580 	fobj_put(fobj);
581 
582 	tee_pager_add_pages(pageable_start, init_size / SMALL_PAGE_SIZE, false);
583 	tee_pager_add_pages(pageable_start + init_size,
584 			    (pageable_size - init_size) / SMALL_PAGE_SIZE,
585 			    true);
586 	if (pageable_end < tzsram_end)
587 		tee_pager_add_pages(pageable_end, (tzsram_end - pageable_end) /
588 						   SMALL_PAGE_SIZE, true);
589 
590 	/*
591 	 * There may be physical pages in TZSRAM before the core load address.
592 	 * These pages can be added to the physical pages pool of the pager.
593 	 * This setup may happen when a the secure bootloader runs in TZRAM
594 	 * and its memory can be reused by OP-TEE once boot stages complete.
595 	 */
596 	tee_pager_add_pages(tee_mm_vcore.lo,
597 			(VCORE_UNPG_RX_PA - tee_mm_vcore.lo) / SMALL_PAGE_SIZE,
598 			true);
599 
600 	print_pager_pool_size();
601 }
602 #else
603 
604 static void init_runtime(unsigned long pageable_part __unused)
605 {
606 	init_asan();
607 
608 	/*
609 	 * By default whole OP-TEE uses malloc, so we need to initialize
610 	 * it early. But, when virtualization is enabled, malloc is used
611 	 * only by TEE runtime, so malloc should be initialized later, for
612 	 * every virtual partition separately. Core code uses nex_malloc
613 	 * instead.
614 	 */
615 #ifdef CFG_NS_VIRTUALIZATION
616 	nex_malloc_add_pool(__nex_heap_start, __nex_heap_end -
617 					      __nex_heap_start);
618 #else
619 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
620 #endif
621 
622 	IMSG_RAW("\n");
623 }
624 #endif
625 
626 #if defined(CFG_DT)
627 static int add_optee_dt_node(struct dt_descriptor *dt)
628 {
629 	int offs;
630 	int ret;
631 
632 	if (fdt_path_offset(dt->blob, "/firmware/optee") >= 0) {
633 		DMSG("OP-TEE Device Tree node already exists!");
634 		return 0;
635 	}
636 
637 	offs = fdt_path_offset(dt->blob, "/firmware");
638 	if (offs < 0) {
639 		offs = add_dt_path_subnode(dt, "/", "firmware");
640 		if (offs < 0)
641 			return -1;
642 	}
643 
644 	offs = fdt_add_subnode(dt->blob, offs, "optee");
645 	if (offs < 0)
646 		return -1;
647 
648 	ret = fdt_setprop_string(dt->blob, offs, "compatible",
649 				 "linaro,optee-tz");
650 	if (ret < 0)
651 		return -1;
652 	ret = fdt_setprop_string(dt->blob, offs, "method", "smc");
653 	if (ret < 0)
654 		return -1;
655 
656 	if (CFG_CORE_ASYNC_NOTIF_GIC_INTID) {
657 		/*
658 		 * The format of the interrupt property is defined by the
659 		 * binding of the interrupt domain root. In this case it's
660 		 * one Arm GIC v1, v2 or v3 so we must be compatible with
661 		 * these.
662 		 *
663 		 * An SPI type of interrupt is indicated with a 0 in the
664 		 * first cell. A PPI type is indicated with value 1.
665 		 *
666 		 * The interrupt number goes in the second cell where
667 		 * SPIs ranges from 0 to 987 and PPI ranges from 0 to 15.
668 		 *
669 		 * Flags are passed in the third cells.
670 		 */
671 		uint32_t itr_trigger = 0;
672 		uint32_t itr_type = 0;
673 		uint32_t itr_id = 0;
674 		uint32_t val[3] = { };
675 
676 		/* PPI are visible only in current CPU cluster */
677 		static_assert(!CFG_CORE_ASYNC_NOTIF_GIC_INTID ||
678 			      (CFG_CORE_ASYNC_NOTIF_GIC_INTID >=
679 			       GIC_SPI_BASE) ||
680 			      ((CFG_TEE_CORE_NB_CORE <= 8) &&
681 			       (CFG_CORE_ASYNC_NOTIF_GIC_INTID >=
682 				GIC_PPI_BASE)));
683 
684 		if (CFG_CORE_ASYNC_NOTIF_GIC_INTID >= GIC_SPI_BASE) {
685 			itr_type = GIC_SPI;
686 			itr_id = CFG_CORE_ASYNC_NOTIF_GIC_INTID - GIC_SPI_BASE;
687 			itr_trigger = IRQ_TYPE_EDGE_RISING;
688 		} else {
689 			itr_type = GIC_PPI;
690 			itr_id = CFG_CORE_ASYNC_NOTIF_GIC_INTID - GIC_PPI_BASE;
691 			itr_trigger = IRQ_TYPE_EDGE_RISING |
692 				      GIC_CPU_MASK_SIMPLE(CFG_TEE_CORE_NB_CORE);
693 		}
694 
695 		val[0] = TEE_U32_TO_BIG_ENDIAN(itr_type);
696 		val[1] = TEE_U32_TO_BIG_ENDIAN(itr_id);
697 		val[2] = TEE_U32_TO_BIG_ENDIAN(itr_trigger);
698 
699 		ret = fdt_setprop(dt->blob, offs, "interrupts", val,
700 				  sizeof(val));
701 		if (ret < 0)
702 			return -1;
703 	}
704 	return 0;
705 }
706 
707 #ifdef CFG_PSCI_ARM32
708 static int append_psci_compatible(void *fdt, int offs, const char *str)
709 {
710 	return fdt_appendprop(fdt, offs, "compatible", str, strlen(str) + 1);
711 }
712 
713 static int dt_add_psci_node(struct dt_descriptor *dt)
714 {
715 	int offs;
716 
717 	if (fdt_path_offset(dt->blob, "/psci") >= 0) {
718 		DMSG("PSCI Device Tree node already exists!");
719 		return 0;
720 	}
721 
722 	offs = add_dt_path_subnode(dt, "/", "psci");
723 	if (offs < 0)
724 		return -1;
725 	if (append_psci_compatible(dt->blob, offs, "arm,psci-1.0"))
726 		return -1;
727 	if (append_psci_compatible(dt->blob, offs, "arm,psci-0.2"))
728 		return -1;
729 	if (append_psci_compatible(dt->blob, offs, "arm,psci"))
730 		return -1;
731 	if (fdt_setprop_string(dt->blob, offs, "method", "smc"))
732 		return -1;
733 	if (fdt_setprop_u32(dt->blob, offs, "cpu_suspend", PSCI_CPU_SUSPEND))
734 		return -1;
735 	if (fdt_setprop_u32(dt->blob, offs, "cpu_off", PSCI_CPU_OFF))
736 		return -1;
737 	if (fdt_setprop_u32(dt->blob, offs, "cpu_on", PSCI_CPU_ON))
738 		return -1;
739 	if (fdt_setprop_u32(dt->blob, offs, "sys_poweroff", PSCI_SYSTEM_OFF))
740 		return -1;
741 	if (fdt_setprop_u32(dt->blob, offs, "sys_reset", PSCI_SYSTEM_RESET))
742 		return -1;
743 	return 0;
744 }
745 
746 static int check_node_compat_prefix(struct dt_descriptor *dt, int offs,
747 				    const char *prefix)
748 {
749 	const size_t prefix_len = strlen(prefix);
750 	size_t l;
751 	int plen;
752 	const char *prop;
753 
754 	prop = fdt_getprop(dt->blob, offs, "compatible", &plen);
755 	if (!prop)
756 		return -1;
757 
758 	while (plen > 0) {
759 		if (memcmp(prop, prefix, prefix_len) == 0)
760 			return 0; /* match */
761 
762 		l = strlen(prop) + 1;
763 		prop += l;
764 		plen -= l;
765 	}
766 
767 	return -1;
768 }
769 
770 static int dt_add_psci_cpu_enable_methods(struct dt_descriptor *dt)
771 {
772 	int offs = 0;
773 
774 	while (1) {
775 		offs = fdt_next_node(dt->blob, offs, NULL);
776 		if (offs < 0)
777 			break;
778 		if (fdt_getprop(dt->blob, offs, "enable-method", NULL))
779 			continue; /* already set */
780 		if (check_node_compat_prefix(dt, offs, "arm,cortex-a"))
781 			continue; /* no compatible */
782 		if (fdt_setprop_string(dt->blob, offs, "enable-method", "psci"))
783 			return -1;
784 		/* Need to restart scanning as offsets may have changed */
785 		offs = 0;
786 	}
787 	return 0;
788 }
789 
790 static int config_psci(struct dt_descriptor *dt)
791 {
792 	if (dt_add_psci_node(dt))
793 		return -1;
794 	return dt_add_psci_cpu_enable_methods(dt);
795 }
796 #else
797 static int config_psci(struct dt_descriptor *dt __unused)
798 {
799 	return 0;
800 }
801 #endif /*CFG_PSCI_ARM32*/
802 
803 #ifdef CFG_CORE_DYN_SHM
804 static uint64_t get_dt_val_and_advance(const void *data, size_t *offs,
805 				       uint32_t cell_size)
806 {
807 	uint64_t rv = 0;
808 
809 	if (cell_size == 1) {
810 		uint32_t v;
811 
812 		memcpy(&v, (const uint8_t *)data + *offs, sizeof(v));
813 		*offs += sizeof(v);
814 		rv = fdt32_to_cpu(v);
815 	} else {
816 		uint64_t v;
817 
818 		memcpy(&v, (const uint8_t *)data + *offs, sizeof(v));
819 		*offs += sizeof(v);
820 		rv = fdt64_to_cpu(v);
821 	}
822 
823 	return rv;
824 }
825 
826 /*
827  * Find all non-secure memory from DT. Memory marked inaccessible by Secure
828  * World is ignored since it could not be mapped to be used as dynamic shared
829  * memory.
830  */
831 static int get_nsec_memory_helper(void *fdt, struct core_mmu_phys_mem *mem)
832 {
833 	const uint8_t *prop = NULL;
834 	uint64_t a = 0;
835 	uint64_t l = 0;
836 	size_t prop_offs = 0;
837 	size_t prop_len = 0;
838 	int elems_total = 0;
839 	int addr_size = 0;
840 	int len_size = 0;
841 	int offs = 0;
842 	size_t n = 0;
843 	int len = 0;
844 
845 	addr_size = fdt_address_cells(fdt, 0);
846 	if (addr_size < 0)
847 		return 0;
848 
849 	len_size = fdt_size_cells(fdt, 0);
850 	if (len_size < 0)
851 		return 0;
852 
853 	while (true) {
854 		offs = fdt_node_offset_by_prop_value(fdt, offs, "device_type",
855 						     "memory",
856 						     sizeof("memory"));
857 		if (offs < 0)
858 			break;
859 
860 		if (fdt_get_status(fdt, offs) != (DT_STATUS_OK_NSEC |
861 						   DT_STATUS_OK_SEC))
862 			continue;
863 
864 		prop = fdt_getprop(fdt, offs, "reg", &len);
865 		if (!prop)
866 			continue;
867 
868 		prop_len = len;
869 		for (n = 0, prop_offs = 0; prop_offs < prop_len; n++) {
870 			a = get_dt_val_and_advance(prop, &prop_offs, addr_size);
871 			if (prop_offs >= prop_len) {
872 				n--;
873 				break;
874 			}
875 
876 			l = get_dt_val_and_advance(prop, &prop_offs, len_size);
877 			if (mem) {
878 				mem->type = MEM_AREA_DDR_OVERALL;
879 				mem->addr = a;
880 				mem->size = l;
881 				mem++;
882 			}
883 		}
884 
885 		elems_total += n;
886 	}
887 
888 	return elems_total;
889 }
890 
891 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt, size_t *nelems)
892 {
893 	struct core_mmu_phys_mem *mem = NULL;
894 	int elems_total = 0;
895 
896 	elems_total = get_nsec_memory_helper(fdt, NULL);
897 	if (elems_total <= 0)
898 		return NULL;
899 
900 	mem = nex_calloc(elems_total, sizeof(*mem));
901 	if (!mem)
902 		panic();
903 
904 	elems_total = get_nsec_memory_helper(fdt, mem);
905 	assert(elems_total > 0);
906 
907 	*nelems = elems_total;
908 
909 	return mem;
910 }
911 #endif /*CFG_CORE_DYN_SHM*/
912 
913 #ifdef CFG_CORE_RESERVED_SHM
914 static int mark_static_shm_as_reserved(struct dt_descriptor *dt)
915 {
916 	vaddr_t shm_start;
917 	vaddr_t shm_end;
918 
919 	core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &shm_start, &shm_end);
920 	if (shm_start != shm_end)
921 		return add_res_mem_dt_node(dt, "optee_shm",
922 					   virt_to_phys((void *)shm_start),
923 					   shm_end - shm_start);
924 
925 	DMSG("No SHM configured");
926 	return -1;
927 }
928 #endif /*CFG_CORE_RESERVED_SHM*/
929 
930 static int mark_tzdram_as_reserved(struct dt_descriptor *dt)
931 {
932 	return add_res_mem_dt_node(dt, "optee_core", CFG_TZDRAM_START,
933 				   CFG_TZDRAM_SIZE);
934 }
935 
936 static void update_external_dt(void)
937 {
938 	struct dt_descriptor *dt = get_external_dt_desc();
939 
940 	if (!dt || !dt->blob)
941 		return;
942 
943 	if (!IS_ENABLED(CFG_CORE_FFA) && add_optee_dt_node(dt))
944 		panic("Failed to add OP-TEE Device Tree node");
945 
946 	if (config_psci(dt))
947 		panic("Failed to config PSCI");
948 
949 #ifdef CFG_CORE_RESERVED_SHM
950 	if (mark_static_shm_as_reserved(dt))
951 		panic("Failed to config non-secure memory");
952 #endif
953 
954 	if (mark_tzdram_as_reserved(dt))
955 		panic("Failed to config secure memory");
956 }
957 #else /*CFG_DT*/
958 static void update_external_dt(void)
959 {
960 }
961 
962 #ifdef CFG_CORE_DYN_SHM
963 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt __unused,
964 						 size_t *nelems __unused)
965 {
966 	return NULL;
967 }
968 #endif /*CFG_CORE_DYN_SHM*/
969 #endif /*!CFG_DT*/
970 
971 #if defined(CFG_CORE_FFA)
972 void *get_manifest_dt(void)
973 {
974 	return manifest_dt;
975 }
976 
977 static void reinit_manifest_dt(void)
978 {
979 	paddr_t pa = (unsigned long)manifest_dt;
980 	void *fdt = NULL;
981 	int ret = 0;
982 
983 	if (!pa) {
984 		EMSG("No manifest DT found");
985 		return;
986 	}
987 
988 	fdt = core_mmu_add_mapping(MEM_AREA_MANIFEST_DT, pa, CFG_DTB_MAX_SIZE);
989 	if (!fdt)
990 		panic("Failed to map manifest DT");
991 
992 	manifest_dt = fdt;
993 
994 	ret = fdt_check_full(fdt, CFG_DTB_MAX_SIZE);
995 	if (ret < 0) {
996 		EMSG("Invalid manifest Device Tree at %#lx: error %d", pa, ret);
997 		panic();
998 	}
999 
1000 	IMSG("manifest DT found");
1001 }
1002 
1003 static TEE_Result release_manifest_dt(void)
1004 {
1005 	if (!manifest_dt)
1006 		return TEE_SUCCESS;
1007 
1008 	if (core_mmu_remove_mapping(MEM_AREA_MANIFEST_DT, manifest_dt,
1009 				    CFG_DTB_MAX_SIZE))
1010 		panic("Failed to remove temporary manifest DT mapping");
1011 	manifest_dt = NULL;
1012 
1013 	return TEE_SUCCESS;
1014 }
1015 
1016 boot_final(release_manifest_dt);
1017 #else
1018 void *get_manifest_dt(void)
1019 {
1020 	return NULL;
1021 }
1022 
1023 static void reinit_manifest_dt(void)
1024 {
1025 }
1026 #endif /*CFG_CORE_FFA*/
1027 
1028 #ifdef CFG_CORE_DYN_SHM
1029 static void discover_nsec_memory(void)
1030 {
1031 	struct core_mmu_phys_mem *mem;
1032 	const struct core_mmu_phys_mem *mem_begin = NULL;
1033 	const struct core_mmu_phys_mem *mem_end = NULL;
1034 	size_t nelems;
1035 	void *fdt = get_external_dt();
1036 
1037 	if (fdt) {
1038 		mem = get_nsec_memory(fdt, &nelems);
1039 		if (mem) {
1040 			core_mmu_set_discovered_nsec_ddr(mem, nelems);
1041 			return;
1042 		}
1043 
1044 		DMSG("No non-secure memory found in FDT");
1045 	}
1046 
1047 	mem_begin = phys_ddr_overall_begin;
1048 	mem_end = phys_ddr_overall_end;
1049 	nelems = mem_end - mem_begin;
1050 	if (nelems) {
1051 		/*
1052 		 * Platform cannot use both register_ddr() and the now
1053 		 * deprecated register_dynamic_shm().
1054 		 */
1055 		assert(phys_ddr_overall_compat_begin ==
1056 		       phys_ddr_overall_compat_end);
1057 	} else {
1058 		mem_begin = phys_ddr_overall_compat_begin;
1059 		mem_end = phys_ddr_overall_compat_end;
1060 		nelems = mem_end - mem_begin;
1061 		if (!nelems)
1062 			return;
1063 		DMSG("Warning register_dynamic_shm() is deprecated, please use register_ddr() instead");
1064 	}
1065 
1066 	mem = nex_calloc(nelems, sizeof(*mem));
1067 	if (!mem)
1068 		panic();
1069 
1070 	memcpy(mem, phys_ddr_overall_begin, sizeof(*mem) * nelems);
1071 	core_mmu_set_discovered_nsec_ddr(mem, nelems);
1072 }
1073 #else /*CFG_CORE_DYN_SHM*/
1074 static void discover_nsec_memory(void)
1075 {
1076 }
1077 #endif /*!CFG_CORE_DYN_SHM*/
1078 
1079 #ifdef CFG_NS_VIRTUALIZATION
1080 static TEE_Result virt_init_heap(void)
1081 {
1082 	/* We need to initialize pool for every virtual guest partition */
1083 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
1084 
1085 	return TEE_SUCCESS;
1086 }
1087 preinit_early(virt_init_heap);
1088 #endif
1089 
1090 void init_tee_runtime(void)
1091 {
1092 #ifndef CFG_WITH_PAGER
1093 	/* Pager initializes TA RAM early */
1094 	core_mmu_init_ta_ram();
1095 #endif
1096 	/*
1097 	 * With virtualization we call this function when creating the
1098 	 * OP-TEE partition instead.
1099 	 */
1100 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION))
1101 		call_preinitcalls();
1102 	call_initcalls();
1103 
1104 	/*
1105 	 * These two functions uses crypto_rng_read() to initialize the
1106 	 * pauth keys. Once call_initcalls() returns we're guaranteed that
1107 	 * crypto_rng_read() is ready to be used.
1108 	 */
1109 	thread_init_core_local_pauth_keys();
1110 	thread_init_thread_pauth_keys();
1111 
1112 	/*
1113 	 * Reinitialize canaries around the stacks with crypto_rng_read().
1114 	 *
1115 	 * TODO: Updating canaries when CFG_NS_VIRTUALIZATION is enabled will
1116 	 * require synchronization between thread_check_canaries() and
1117 	 * thread_update_canaries().
1118 	 */
1119 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION))
1120 		thread_update_canaries();
1121 }
1122 
1123 static void init_primary(unsigned long pageable_part, unsigned long nsec_entry)
1124 {
1125 	thread_init_core_local_stacks();
1126 	/*
1127 	 * Mask asynchronous exceptions before switch to the thread vector
1128 	 * as the thread handler requires those to be masked while
1129 	 * executing with the temporary stack. The thread subsystem also
1130 	 * asserts that the foreign interrupts are blocked when using most of
1131 	 * its functions.
1132 	 */
1133 	thread_set_exceptions(THREAD_EXCP_ALL);
1134 	primary_save_cntfrq();
1135 	init_vfp_sec();
1136 	/*
1137 	 * Pager: init_runtime() calls thread_kernel_enable_vfp() so we must
1138 	 * set a current thread right now to avoid a chicken-and-egg problem
1139 	 * (thread_init_boot_thread() sets the current thread but needs
1140 	 * things set by init_runtime()).
1141 	 */
1142 	thread_get_core_local()->curr_thread = 0;
1143 	init_runtime(pageable_part);
1144 
1145 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1146 		/*
1147 		 * Virtualization: We can't initialize threads right now because
1148 		 * threads belong to "tee" part and will be initialized
1149 		 * separately per each new virtual guest. So, we'll clear
1150 		 * "curr_thread" and call it done.
1151 		 */
1152 		thread_get_core_local()->curr_thread = -1;
1153 	} else {
1154 		thread_init_boot_thread();
1155 	}
1156 	thread_init_primary();
1157 	thread_init_per_cpu();
1158 	init_sec_mon(nsec_entry);
1159 }
1160 
1161 static bool cpu_nmfi_enabled(void)
1162 {
1163 #if defined(ARM32)
1164 	return read_sctlr() & SCTLR_NMFI;
1165 #else
1166 	/* Note: ARM64 does not feature non-maskable FIQ support. */
1167 	return false;
1168 #endif
1169 }
1170 
1171 /*
1172  * Note: this function is weak just to make it possible to exclude it from
1173  * the unpaged area.
1174  */
1175 void __weak boot_init_primary_late(unsigned long fdt __unused,
1176 				   unsigned long manifest __unused)
1177 {
1178 	init_external_dt(boot_arg_fdt);
1179 	reinit_manifest_dt();
1180 #ifdef CFG_CORE_SEL1_SPMC
1181 	tpm_map_log_area(get_manifest_dt());
1182 #else
1183 	tpm_map_log_area(get_external_dt());
1184 #endif
1185 	discover_nsec_memory();
1186 	update_external_dt();
1187 	configure_console_from_dt();
1188 
1189 	IMSG("OP-TEE version: %s", core_v_str);
1190 	if (IS_ENABLED(CFG_WARN_INSECURE)) {
1191 		IMSG("WARNING: This OP-TEE configuration might be insecure!");
1192 		IMSG("WARNING: Please check https://optee.readthedocs.io/en/latest/architecture/porting_guidelines.html");
1193 	}
1194 	IMSG("Primary CPU initializing");
1195 #ifdef CFG_CORE_ASLR
1196 	DMSG("Executing at offset %#lx with virtual load address %#"PRIxVA,
1197 	     (unsigned long)boot_mmu_config.map_offset, VCORE_START_VA);
1198 #endif
1199 	if (IS_ENABLED(CFG_MEMTAG))
1200 		DMSG("Memory tagging %s",
1201 		     memtag_is_enabled() ?  "enabled" : "disabled");
1202 
1203 	/* Check if platform needs NMFI workaround */
1204 	if (cpu_nmfi_enabled())	{
1205 		if (!IS_ENABLED(CFG_CORE_WORKAROUND_ARM_NMFI))
1206 			IMSG("WARNING: This ARM core has NMFI enabled, please apply workaround!");
1207 	} else {
1208 		if (IS_ENABLED(CFG_CORE_WORKAROUND_ARM_NMFI))
1209 			IMSG("WARNING: This ARM core does not have NMFI enabled, no need for workaround");
1210 	}
1211 
1212 	boot_primary_init_intc();
1213 	init_vfp_nsec();
1214 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1215 		IMSG("Initializing virtualization support");
1216 		core_mmu_init_virtualization();
1217 	} else {
1218 		init_tee_runtime();
1219 	}
1220 	call_finalcalls();
1221 	IMSG("Primary CPU switching to normal world boot");
1222 }
1223 
1224 static void init_secondary_helper(unsigned long nsec_entry)
1225 {
1226 	IMSG("Secondary CPU %zu initializing", get_core_pos());
1227 
1228 	/*
1229 	 * Mask asynchronous exceptions before switch to the thread vector
1230 	 * as the thread handler requires those to be masked while
1231 	 * executing with the temporary stack. The thread subsystem also
1232 	 * asserts that the foreign interrupts are blocked when using most of
1233 	 * its functions.
1234 	 */
1235 	thread_set_exceptions(THREAD_EXCP_ALL);
1236 
1237 	secondary_init_cntfrq();
1238 	thread_init_per_cpu();
1239 	init_sec_mon(nsec_entry);
1240 	boot_secondary_init_intc();
1241 	init_vfp_sec();
1242 	init_vfp_nsec();
1243 
1244 	IMSG("Secondary CPU %zu switching to normal world boot", get_core_pos());
1245 }
1246 
1247 /*
1248  * Note: this function is weak just to make it possible to exclude it from
1249  * the unpaged area so that it lies in the init area.
1250  */
1251 void __weak boot_init_primary_early(void)
1252 {
1253 	unsigned long pageable_part = 0;
1254 	unsigned long e = PADDR_INVALID;
1255 
1256 	if (!IS_ENABLED(CFG_WITH_ARM_TRUSTED_FW))
1257 		e = boot_arg_nsec_entry;
1258 	if (IS_ENABLED(CFG_WITH_PAGER))
1259 		pageable_part = boot_arg_pageable_part;
1260 
1261 	init_primary(pageable_part, e);
1262 }
1263 
1264 #if defined(CFG_WITH_ARM_TRUSTED_FW)
1265 unsigned long boot_cpu_on_handler(unsigned long a0 __maybe_unused,
1266 				  unsigned long a1 __unused)
1267 {
1268 	init_secondary_helper(PADDR_INVALID);
1269 	return 0;
1270 }
1271 #else
1272 void boot_init_secondary(unsigned long nsec_entry)
1273 {
1274 	init_secondary_helper(nsec_entry);
1275 }
1276 #endif
1277 
1278 #if defined(CFG_BOOT_SECONDARY_REQUEST)
1279 void boot_set_core_ns_entry(size_t core_idx, uintptr_t entry,
1280 			    uintptr_t context_id)
1281 {
1282 	ns_entry_contexts[core_idx].entry_point = entry;
1283 	ns_entry_contexts[core_idx].context_id = context_id;
1284 	dsb_ishst();
1285 }
1286 
1287 int boot_core_release(size_t core_idx, paddr_t entry)
1288 {
1289 	if (!core_idx || core_idx >= CFG_TEE_CORE_NB_CORE)
1290 		return -1;
1291 
1292 	ns_entry_contexts[core_idx].entry_point = entry;
1293 	dmb();
1294 	spin_table[core_idx] = 1;
1295 	dsb();
1296 	sev();
1297 
1298 	return 0;
1299 }
1300 
1301 /*
1302  * spin until secondary boot request, then returns with
1303  * the secondary core entry address.
1304  */
1305 struct ns_entry_context *boot_core_hpen(void)
1306 {
1307 #ifdef CFG_PSCI_ARM32
1308 	return &ns_entry_contexts[get_core_pos()];
1309 #else
1310 	do {
1311 		wfe();
1312 	} while (!spin_table[get_core_pos()]);
1313 	dmb();
1314 	return &ns_entry_contexts[get_core_pos()];
1315 #endif
1316 }
1317 #endif
1318 
1319 #if defined(CFG_CORE_ASLR)
1320 #if defined(CFG_DT)
1321 unsigned long __weak get_aslr_seed(void)
1322 {
1323 	void *fdt = NULL;
1324 	int rc = 0;
1325 	const uint64_t *seed = NULL;
1326 	int offs = 0;
1327 	int len = 0;
1328 
1329 	if (!IS_ENABLED(CFG_CORE_SEL2_SPMC))
1330 		fdt = (void *)boot_arg_fdt;
1331 
1332 	if (!fdt) {
1333 		DMSG("No fdt");
1334 		goto err;
1335 	}
1336 
1337 	rc = fdt_check_header(fdt);
1338 	if (rc) {
1339 		DMSG("Bad fdt: %d", rc);
1340 		goto err;
1341 	}
1342 
1343 	offs =  fdt_path_offset(fdt, "/secure-chosen");
1344 	if (offs < 0) {
1345 		DMSG("Cannot find /secure-chosen");
1346 		goto err;
1347 	}
1348 	seed = fdt_getprop(fdt, offs, "kaslr-seed", &len);
1349 	if (!seed || len != sizeof(*seed)) {
1350 		DMSG("Cannot find valid kaslr-seed");
1351 		goto err;
1352 	}
1353 
1354 	return fdt64_to_cpu(*seed);
1355 
1356 err:
1357 	/* Try platform implementation */
1358 	return plat_get_aslr_seed();
1359 }
1360 #else /*!CFG_DT*/
1361 unsigned long __weak get_aslr_seed(void)
1362 {
1363 	/* Try platform implementation */
1364 	return plat_get_aslr_seed();
1365 }
1366 #endif /*!CFG_DT*/
1367 #endif /*CFG_CORE_ASLR*/
1368 
1369 static void *get_fdt_from_boot_info(struct ffa_boot_info_header_1_1 *hdr)
1370 {
1371 	struct ffa_boot_info_1_1 *desc = NULL;
1372 	uint8_t content_fmt = 0;
1373 	uint8_t name_fmt = 0;
1374 	void *fdt = NULL;
1375 	int ret = 0;
1376 
1377 	if (hdr->signature != FFA_BOOT_INFO_SIGNATURE) {
1378 		EMSG("Bad boot info signature %#"PRIx32, hdr->signature);
1379 		panic();
1380 	}
1381 	if (hdr->version != FFA_BOOT_INFO_VERSION) {
1382 		EMSG("Bad boot info version %#"PRIx32, hdr->version);
1383 		panic();
1384 	}
1385 	if (hdr->desc_count != 1) {
1386 		EMSG("Bad boot info descriptor count %#"PRIx32,
1387 		     hdr->desc_count);
1388 		panic();
1389 	}
1390 	desc = (void *)((vaddr_t)hdr + hdr->desc_offset);
1391 	name_fmt = desc->flags & FFA_BOOT_INFO_FLAG_NAME_FORMAT_MASK;
1392 	if (name_fmt == FFA_BOOT_INFO_FLAG_NAME_FORMAT_STRING)
1393 		DMSG("Boot info descriptor name \"%16s\"", desc->name);
1394 	else if (name_fmt == FFA_BOOT_INFO_FLAG_NAME_FORMAT_UUID)
1395 		DMSG("Boot info descriptor UUID %pUl", (void *)desc->name);
1396 	else
1397 		DMSG("Boot info descriptor: unknown name format %"PRIu8,
1398 		     name_fmt);
1399 
1400 	content_fmt = (desc->flags & FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_MASK) >>
1401 		      FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_SHIFT;
1402 	if (content_fmt != FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_ADDR) {
1403 		EMSG("Bad boot info content format %"PRIu8", expected %u (address)",
1404 		     content_fmt, FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_ADDR);
1405 		panic();
1406 	}
1407 
1408 	fdt = (void *)(vaddr_t)desc->contents;
1409 	ret = fdt_check_full(fdt, desc->size);
1410 	if (ret < 0) {
1411 		EMSG("Invalid Device Tree at %p: error %d", fdt, ret);
1412 		panic();
1413 	}
1414 	return fdt;
1415 }
1416 
1417 static void get_sec_mem_from_manifest(void *fdt, paddr_t *base, size_t *size)
1418 {
1419 	int ret = 0;
1420 	uint64_t num = 0;
1421 
1422 	ret = fdt_node_check_compatible(fdt, 0, "arm,ffa-manifest-1.0");
1423 	if (ret < 0) {
1424 		EMSG("Invalid FF-A manifest at %p: error %d", fdt, ret);
1425 		panic();
1426 	}
1427 	ret = dt_getprop_as_number(fdt, 0, "load-address", &num);
1428 	if (ret < 0) {
1429 		EMSG("Can't read \"load-address\" from FF-A manifest at %p: error %d",
1430 		     fdt, ret);
1431 		panic();
1432 	}
1433 	*base = num;
1434 	/* "mem-size" is currently an undocumented extension to the spec. */
1435 	ret = dt_getprop_as_number(fdt, 0, "mem-size", &num);
1436 	if (ret < 0) {
1437 		EMSG("Can't read \"mem-size\" from FF-A manifest at %p: error %d",
1438 		     fdt, ret);
1439 		panic();
1440 	}
1441 	*size = num;
1442 }
1443 
1444 void __weak boot_save_args(unsigned long a0, unsigned long a1 __unused,
1445 			   unsigned long a2 __maybe_unused,
1446 			   unsigned long a3 __unused,
1447 			   unsigned long a4 __maybe_unused)
1448 {
1449 	/*
1450 	 * Register use:
1451 	 * a0   - CFG_CORE_FFA=y && CFG_CORE_SEL2_SPMC=n:
1452 	 *        if non-NULL holds the TOS FW config [1] address
1453 	 *      - CFG_CORE_FFA=y &&
1454 		  (CFG_CORE_SEL2_SPMC=y || CFG_CORE_EL3_SPMC=y):
1455 	 *        address of FF-A Boot Information Blob
1456 	 *      - CFG_CORE_FFA=n:
1457 	 *        if non-NULL holds the pagable part address
1458 	 * a1	- CFG_WITH_ARM_TRUSTED_FW=n (Armv7):
1459 	 *	  Armv7 standard bootarg #1 (kept track of in entry_a32.S)
1460 	 * a2   - CFG_CORE_SEL2_SPMC=n:
1461 	 *        if non-NULL holds the system DTB address
1462 	 *	- CFG_WITH_ARM_TRUSTED_FW=n (Armv7):
1463 	 *	  Armv7 standard bootarg #2 (system DTB address, kept track
1464 	 *	  of in entry_a32.S)
1465 	 * a3	- Not used
1466 	 * a4	- CFG_WITH_ARM_TRUSTED_FW=n:
1467 	 *	  Non-secure entry address
1468 	 *
1469 	 * [1] A TF-A concept: TOS_FW_CONFIG - Trusted OS Firmware
1470 	 * configuration file. Used by Trusted OS (BL32), that is, OP-TEE
1471 	 * here. This is also called Manifest DT, related to the Manifest DT
1472 	 * passed in the FF-A Boot Information Blob, but with a different
1473 	 * compatible string.
1474 	 */
1475 
1476 	if (!IS_ENABLED(CFG_CORE_SEL2_SPMC)) {
1477 #if defined(CFG_DT_ADDR)
1478 		boot_arg_fdt = CFG_DT_ADDR;
1479 #else
1480 		boot_arg_fdt = a2;
1481 #endif
1482 	}
1483 
1484 	if (IS_ENABLED(CFG_CORE_FFA)) {
1485 		if (IS_ENABLED(CFG_CORE_SEL2_SPMC) ||
1486 		    IS_ENABLED(CFG_CORE_EL3_SPMC))
1487 			manifest_dt = get_fdt_from_boot_info((void *)a0);
1488 		else
1489 			manifest_dt = (void *)a0;
1490 		if (IS_ENABLED(CFG_CORE_SEL2_SPMC) &&
1491 		    IS_ENABLED(CFG_CORE_PHYS_RELOCATABLE)) {
1492 			paddr_t base = 0;
1493 			size_t size = 0;
1494 
1495 			get_sec_mem_from_manifest(manifest_dt, &base, &size);
1496 			core_mmu_set_secure_memory(base, size);
1497 		}
1498 	} else {
1499 		if (IS_ENABLED(CFG_WITH_PAGER)) {
1500 #if defined(CFG_PAGEABLE_ADDR)
1501 			boot_arg_pageable_part = CFG_PAGEABLE_ADDR;
1502 #else
1503 			boot_arg_pageable_part = a0;
1504 #endif
1505 		}
1506 		if (!IS_ENABLED(CFG_WITH_ARM_TRUSTED_FW)) {
1507 #if defined(CFG_NS_ENTRY_ADDR)
1508 			boot_arg_nsec_entry = CFG_NS_ENTRY_ADDR;
1509 #else
1510 			boot_arg_nsec_entry = a4;
1511 #endif
1512 		}
1513 	}
1514 }
1515