xref: /optee_os/core/arch/arm/kernel/boot.c (revision 49286073c91e225524563320e42ee35f1fee9167)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2015-2023, Linaro Limited
4  * Copyright (c) 2023, Arm Limited
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <compiler.h>
10 #include <config.h>
11 #include <console.h>
12 #include <crypto/crypto.h>
13 #include <drivers/gic.h>
14 #include <dt-bindings/interrupt-controller/arm-gic.h>
15 #include <ffa.h>
16 #include <initcall.h>
17 #include <inttypes.h>
18 #include <io.h>
19 #include <keep.h>
20 #include <kernel/asan.h>
21 #include <kernel/boot.h>
22 #include <kernel/dt.h>
23 #include <kernel/linker.h>
24 #include <kernel/misc.h>
25 #include <kernel/panic.h>
26 #include <kernel/tee_misc.h>
27 #include <kernel/thread.h>
28 #include <kernel/tpm.h>
29 #include <kernel/transfer_list.h>
30 #include <libfdt.h>
31 #include <malloc.h>
32 #include <memtag.h>
33 #include <mm/core_memprot.h>
34 #include <mm/core_mmu.h>
35 #include <mm/fobj.h>
36 #include <mm/tee_mm.h>
37 #include <mm/tee_pager.h>
38 #include <sm/psci.h>
39 #include <trace.h>
40 #include <utee_defines.h>
41 #include <util.h>
42 
43 #include <platform_config.h>
44 
45 #if !defined(CFG_WITH_ARM_TRUSTED_FW)
46 #include <sm/sm.h>
47 #endif
48 
49 #if defined(CFG_WITH_VFP)
50 #include <kernel/vfp.h>
51 #endif
52 
53 /*
54  * In this file we're using unsigned long to represent physical pointers as
55  * they are received in a single register when OP-TEE is initially entered.
56  * This limits 32-bit systems to only use make use of the lower 32 bits
57  * of a physical address for initial parameters.
58  *
59  * 64-bit systems on the other hand can use full 64-bit physical pointers.
60  */
61 #define PADDR_INVALID		ULONG_MAX
62 
63 #if defined(CFG_BOOT_SECONDARY_REQUEST)
64 struct ns_entry_context {
65 	uintptr_t entry_point;
66 	uintptr_t context_id;
67 };
68 struct ns_entry_context ns_entry_contexts[CFG_TEE_CORE_NB_CORE];
69 static uint32_t spin_table[CFG_TEE_CORE_NB_CORE];
70 #endif
71 
72 #ifdef CFG_BOOT_SYNC_CPU
73 /*
74  * Array used when booting, to synchronize cpu.
75  * When 0, the cpu has not started.
76  * When 1, it has started
77  */
78 uint32_t sem_cpu_sync[CFG_TEE_CORE_NB_CORE];
79 DECLARE_KEEP_PAGER(sem_cpu_sync);
80 #endif
81 
82 static void *manifest_dt __nex_bss;
83 static unsigned long boot_arg_fdt __nex_bss;
84 static unsigned long boot_arg_nsec_entry __nex_bss;
85 static unsigned long boot_arg_pageable_part __nex_bss;
86 static unsigned long boot_arg_transfer_list __nex_bss;
87 static struct transfer_list_header *mapped_tl __nex_bss;
88 
89 #ifdef CFG_SECONDARY_INIT_CNTFRQ
90 static uint32_t cntfrq;
91 #endif
92 
93 /* May be overridden in plat-$(PLATFORM)/main.c */
94 __weak void plat_primary_init_early(void)
95 {
96 }
97 DECLARE_KEEP_PAGER(plat_primary_init_early);
98 
99 /* May be overridden in plat-$(PLATFORM)/main.c */
100 __weak void boot_primary_init_intc(void)
101 {
102 }
103 
104 /* May be overridden in plat-$(PLATFORM)/main.c */
105 __weak void boot_secondary_init_intc(void)
106 {
107 }
108 
109 /* May be overridden in plat-$(PLATFORM)/main.c */
110 __weak unsigned long plat_get_aslr_seed(void)
111 {
112 	DMSG("Warning: no ASLR seed");
113 
114 	return 0;
115 }
116 
117 #if defined(_CFG_CORE_STACK_PROTECTOR) || defined(CFG_WITH_STACK_CANARIES)
118 /* Generate random stack canary value on boot up */
119 __weak void plat_get_random_stack_canaries(void *buf, size_t ncan, size_t size)
120 {
121 	TEE_Result ret = TEE_ERROR_GENERIC;
122 	size_t i = 0;
123 
124 	assert(buf && ncan && size);
125 
126 	/*
127 	 * With virtualization the RNG is not initialized in Nexus core.
128 	 * Need to override with platform specific implementation.
129 	 */
130 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
131 		IMSG("WARNING: Using fixed value for stack canary");
132 		memset(buf, 0xab, ncan * size);
133 		goto out;
134 	}
135 
136 	ret = crypto_rng_read(buf, ncan * size);
137 	if (ret != TEE_SUCCESS)
138 		panic("Failed to generate random stack canary");
139 
140 out:
141 	/* Leave null byte in canary to prevent string base exploit */
142 	for (i = 0; i < ncan; i++)
143 		*((uint8_t *)buf + size * i) = 0;
144 }
145 #endif /* _CFG_CORE_STACK_PROTECTOR || CFG_WITH_STACK_CANARIES */
146 
147 /*
148  * This function is called as a guard after each smc call which is not
149  * supposed to return.
150  */
151 void __panic_at_smc_return(void)
152 {
153 	panic();
154 }
155 
156 #if defined(CFG_WITH_ARM_TRUSTED_FW)
157 void init_sec_mon(unsigned long nsec_entry __maybe_unused)
158 {
159 	assert(nsec_entry == PADDR_INVALID);
160 	/* Do nothing as we don't have a secure monitor */
161 }
162 #else
163 /* May be overridden in plat-$(PLATFORM)/main.c */
164 __weak void init_sec_mon(unsigned long nsec_entry)
165 {
166 	struct sm_nsec_ctx *nsec_ctx;
167 
168 	assert(nsec_entry != PADDR_INVALID);
169 
170 	/* Initialize secure monitor */
171 	nsec_ctx = sm_get_nsec_ctx();
172 	nsec_ctx->mon_lr = nsec_entry;
173 	nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I;
174 	if (nsec_entry & 1)
175 		nsec_ctx->mon_spsr |= CPSR_T;
176 }
177 #endif
178 
179 #if defined(CFG_WITH_ARM_TRUSTED_FW)
180 static void init_vfp_nsec(void)
181 {
182 }
183 #else
184 static void init_vfp_nsec(void)
185 {
186 	/* Normal world can use CP10 and CP11 (SIMD/VFP) */
187 	write_nsacr(read_nsacr() | NSACR_CP10 | NSACR_CP11);
188 }
189 #endif
190 
191 #if defined(CFG_WITH_VFP)
192 
193 #ifdef ARM32
194 static void init_vfp_sec(void)
195 {
196 	uint32_t cpacr = read_cpacr();
197 
198 	/*
199 	 * Enable Advanced SIMD functionality.
200 	 * Enable use of D16-D31 of the Floating-point Extension register
201 	 * file.
202 	 */
203 	cpacr &= ~(CPACR_ASEDIS | CPACR_D32DIS);
204 	/*
205 	 * Enable usage of CP10 and CP11 (SIMD/VFP) (both kernel and user
206 	 * mode.
207 	 */
208 	cpacr |= CPACR_CP(10, CPACR_CP_ACCESS_FULL);
209 	cpacr |= CPACR_CP(11, CPACR_CP_ACCESS_FULL);
210 	write_cpacr(cpacr);
211 }
212 #endif /* ARM32 */
213 
214 #ifdef ARM64
215 static void init_vfp_sec(void)
216 {
217 	/* Not using VFP until thread_kernel_enable_vfp() */
218 	vfp_disable();
219 }
220 #endif /* ARM64 */
221 
222 #else /* CFG_WITH_VFP */
223 
224 static void init_vfp_sec(void)
225 {
226 	/* Not using VFP */
227 }
228 #endif
229 
230 #ifdef CFG_SECONDARY_INIT_CNTFRQ
231 static void primary_save_cntfrq(void)
232 {
233 	assert(cntfrq == 0);
234 
235 	/*
236 	 * CNTFRQ should be initialized on the primary CPU by a
237 	 * previous boot stage
238 	 */
239 	cntfrq = read_cntfrq();
240 }
241 
242 static void secondary_init_cntfrq(void)
243 {
244 	assert(cntfrq != 0);
245 	write_cntfrq(cntfrq);
246 }
247 #else /* CFG_SECONDARY_INIT_CNTFRQ */
248 static void primary_save_cntfrq(void)
249 {
250 }
251 
252 static void secondary_init_cntfrq(void)
253 {
254 }
255 #endif
256 
257 #ifdef CFG_CORE_SANITIZE_KADDRESS
258 static void init_run_constructors(void)
259 {
260 	const vaddr_t *ctor;
261 
262 	for (ctor = &__ctor_list; ctor < &__ctor_end; ctor++)
263 		((void (*)(void))(*ctor))();
264 }
265 
266 static void init_asan(void)
267 {
268 
269 	/*
270 	 * CFG_ASAN_SHADOW_OFFSET is also supplied as
271 	 * -fasan-shadow-offset=$(CFG_ASAN_SHADOW_OFFSET) to the compiler.
272 	 * Since all the needed values to calculate the value of
273 	 * CFG_ASAN_SHADOW_OFFSET isn't available in to make we need to
274 	 * calculate it in advance and hard code it into the platform
275 	 * conf.mk. Here where we have all the needed values we double
276 	 * check that the compiler is supplied the correct value.
277 	 */
278 
279 #define __ASAN_SHADOW_START \
280 	ROUNDUP(TEE_RAM_START + (TEE_RAM_VA_SIZE * 8) / 9 - 8, 8)
281 	assert(__ASAN_SHADOW_START == (vaddr_t)&__asan_shadow_start);
282 #define __CFG_ASAN_SHADOW_OFFSET \
283 	(__ASAN_SHADOW_START - (TEE_RAM_START / 8))
284 	COMPILE_TIME_ASSERT(CFG_ASAN_SHADOW_OFFSET == __CFG_ASAN_SHADOW_OFFSET);
285 #undef __ASAN_SHADOW_START
286 #undef __CFG_ASAN_SHADOW_OFFSET
287 
288 	/*
289 	 * Assign area covered by the shadow area, everything from start up
290 	 * to the beginning of the shadow area.
291 	 */
292 	asan_set_shadowed((void *)TEE_LOAD_ADDR, &__asan_shadow_start);
293 
294 	/*
295 	 * Add access to areas that aren't opened automatically by a
296 	 * constructor.
297 	 */
298 	asan_tag_access(&__ctor_list, &__ctor_end);
299 	asan_tag_access(__rodata_start, __rodata_end);
300 #ifdef CFG_WITH_PAGER
301 	asan_tag_access(__pageable_start, __pageable_end);
302 #endif /*CFG_WITH_PAGER*/
303 	asan_tag_access(__nozi_start, __nozi_end);
304 #ifdef ARM32
305 	asan_tag_access(__exidx_start, __exidx_end);
306 	asan_tag_access(__extab_start, __extab_end);
307 #endif
308 
309 	init_run_constructors();
310 
311 	/* Everything is tagged correctly, let's start address sanitizing. */
312 	asan_start();
313 }
314 #else /*CFG_CORE_SANITIZE_KADDRESS*/
315 static void init_asan(void)
316 {
317 }
318 #endif /*CFG_CORE_SANITIZE_KADDRESS*/
319 
320 #if defined(CFG_MEMTAG)
321 /* Called from entry_a64.S only when MEMTAG is configured */
322 void boot_init_memtag(void)
323 {
324 	paddr_t base = 0;
325 	paddr_size_t size = 0;
326 
327 	memtag_init_ops(feat_mte_implemented());
328 	core_mmu_get_secure_memory(&base, &size);
329 	memtag_set_tags((void *)(vaddr_t)base, size, 0);
330 }
331 #endif
332 
333 #ifdef CFG_WITH_PAGER
334 
335 #ifdef CFG_CORE_SANITIZE_KADDRESS
336 static void carve_out_asan_mem(tee_mm_pool_t *pool)
337 {
338 	const size_t s = pool->hi - pool->lo;
339 	tee_mm_entry_t *mm;
340 	paddr_t apa = ASAN_MAP_PA;
341 	size_t asz = ASAN_MAP_SZ;
342 
343 	if (core_is_buffer_outside(apa, asz, pool->lo, s))
344 		return;
345 
346 	/* Reserve the shadow area */
347 	if (!core_is_buffer_inside(apa, asz, pool->lo, s)) {
348 		if (apa < pool->lo) {
349 			/*
350 			 * ASAN buffer is overlapping with the beginning of
351 			 * the pool.
352 			 */
353 			asz -= pool->lo - apa;
354 			apa = pool->lo;
355 		} else {
356 			/*
357 			 * ASAN buffer is overlapping with the end of the
358 			 * pool.
359 			 */
360 			asz = pool->hi - apa;
361 		}
362 	}
363 	mm = tee_mm_alloc2(pool, apa, asz);
364 	assert(mm);
365 }
366 #else
367 static void carve_out_asan_mem(tee_mm_pool_t *pool __unused)
368 {
369 }
370 #endif
371 
372 static void print_pager_pool_size(void)
373 {
374 	struct tee_pager_stats __maybe_unused stats;
375 
376 	tee_pager_get_stats(&stats);
377 	IMSG("Pager pool size: %zukB",
378 		stats.npages_all * SMALL_PAGE_SIZE / 1024);
379 }
380 
381 static void init_vcore(tee_mm_pool_t *mm_vcore)
382 {
383 	const vaddr_t begin = VCORE_START_VA;
384 	size_t size = TEE_RAM_VA_SIZE;
385 
386 #ifdef CFG_CORE_SANITIZE_KADDRESS
387 	/* Carve out asan memory, flat maped after core memory */
388 	if (begin + size > ASAN_SHADOW_PA)
389 		size = ASAN_MAP_PA - begin;
390 #endif
391 
392 	if (!tee_mm_init(mm_vcore, begin, size, SMALL_PAGE_SHIFT,
393 			 TEE_MM_POOL_NO_FLAGS))
394 		panic("tee_mm_vcore init failed");
395 }
396 
397 /*
398  * With CFG_CORE_ASLR=y the init part is relocated very early during boot.
399  * The init part is also paged just as the rest of the normal paged code, with
400  * the difference that it's preloaded during boot. When the backing store
401  * is configured the entire paged binary is copied in place and then also
402  * the init part. Since the init part has been relocated (references to
403  * addresses updated to compensate for the new load address) this has to be
404  * undone for the hashes of those pages to match with the original binary.
405  *
406  * If CFG_CORE_ASLR=n, nothing needs to be done as the code/ro pages are
407  * unchanged.
408  */
409 static void undo_init_relocation(uint8_t *paged_store __maybe_unused)
410 {
411 #ifdef CFG_CORE_ASLR
412 	unsigned long *ptr = NULL;
413 	const uint32_t *reloc = NULL;
414 	const uint32_t *reloc_end = NULL;
415 	unsigned long offs = boot_mmu_config.map_offset;
416 	const struct boot_embdata *embdata = (const void *)__init_end;
417 	vaddr_t addr_end = (vaddr_t)__init_end - offs - TEE_LOAD_ADDR;
418 	vaddr_t addr_start = (vaddr_t)__init_start - offs - TEE_LOAD_ADDR;
419 
420 	reloc = (const void *)((vaddr_t)embdata + embdata->reloc_offset);
421 	reloc_end = reloc + embdata->reloc_len / sizeof(*reloc);
422 
423 	for (; reloc < reloc_end; reloc++) {
424 		if (*reloc < addr_start)
425 			continue;
426 		if (*reloc >= addr_end)
427 			break;
428 		ptr = (void *)(paged_store + *reloc - addr_start);
429 		*ptr -= offs;
430 	}
431 #endif
432 }
433 
434 static struct fobj *ro_paged_alloc(tee_mm_entry_t *mm, void *hashes,
435 				   void *store)
436 {
437 	const unsigned int num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE;
438 #ifdef CFG_CORE_ASLR
439 	unsigned int reloc_offs = (vaddr_t)__pageable_start - VCORE_START_VA;
440 	const struct boot_embdata *embdata = (const void *)__init_end;
441 	const void *reloc = __init_end + embdata->reloc_offset;
442 
443 	return fobj_ro_reloc_paged_alloc(num_pages, hashes, reloc_offs,
444 					 reloc, embdata->reloc_len, store);
445 #else
446 	return fobj_ro_paged_alloc(num_pages, hashes, store);
447 #endif
448 }
449 
450 static void init_runtime(unsigned long pageable_part)
451 {
452 	size_t n;
453 	size_t init_size = (size_t)(__init_end - __init_start);
454 	size_t pageable_start = (size_t)__pageable_start;
455 	size_t pageable_end = (size_t)__pageable_end;
456 	size_t pageable_size = pageable_end - pageable_start;
457 	vaddr_t tzsram_end = TZSRAM_BASE + TZSRAM_SIZE - TEE_LOAD_ADDR +
458 			     VCORE_START_VA;
459 	size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) *
460 			   TEE_SHA256_HASH_SIZE;
461 	const struct boot_embdata *embdata = (const void *)__init_end;
462 	const void *tmp_hashes = NULL;
463 	tee_mm_entry_t *mm = NULL;
464 	struct fobj *fobj = NULL;
465 	uint8_t *paged_store = NULL;
466 	uint8_t *hashes = NULL;
467 
468 	assert(pageable_size % SMALL_PAGE_SIZE == 0);
469 	assert(embdata->total_len >= embdata->hashes_offset +
470 				     embdata->hashes_len);
471 	assert(hash_size == embdata->hashes_len);
472 
473 	tmp_hashes = __init_end + embdata->hashes_offset;
474 
475 	init_asan();
476 
477 	/* Add heap2 first as heap1 may be too small as initial bget pool */
478 	malloc_add_pool(__heap2_start, __heap2_end - __heap2_start);
479 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
480 
481 	/*
482 	 * This needs to be initialized early to support address lookup
483 	 * in MEM_AREA_TEE_RAM
484 	 */
485 	tee_pager_early_init();
486 
487 	hashes = malloc(hash_size);
488 	IMSG_RAW("\n");
489 	IMSG("Pager is enabled. Hashes: %zu bytes", hash_size);
490 	assert(hashes);
491 	asan_memcpy_unchecked(hashes, tmp_hashes, hash_size);
492 
493 	/*
494 	 * Need tee_mm_sec_ddr initialized to be able to allocate secure
495 	 * DDR below.
496 	 */
497 	core_mmu_init_ta_ram();
498 
499 	carve_out_asan_mem(&tee_mm_sec_ddr);
500 
501 	mm = tee_mm_alloc(&tee_mm_sec_ddr, pageable_size);
502 	assert(mm);
503 	paged_store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM,
504 				   pageable_size);
505 	/*
506 	 * Load pageable part in the dedicated allocated area:
507 	 * - Move pageable non-init part into pageable area. Note bootloader
508 	 *   may have loaded it anywhere in TA RAM hence use memmove().
509 	 * - Copy pageable init part from current location into pageable area.
510 	 */
511 	memmove(paged_store + init_size,
512 		phys_to_virt(pageable_part,
513 			     core_mmu_get_type_by_pa(pageable_part),
514 			     __pageable_part_end - __pageable_part_start),
515 		__pageable_part_end - __pageable_part_start);
516 	asan_memcpy_unchecked(paged_store, __init_start, init_size);
517 	/*
518 	 * Undo eventual relocation for the init part so the hash checks
519 	 * can pass.
520 	 */
521 	undo_init_relocation(paged_store);
522 
523 	/* Check that hashes of what's in pageable area is OK */
524 	DMSG("Checking hashes of pageable area");
525 	for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) {
526 		const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE;
527 		const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE;
528 		TEE_Result res;
529 
530 		DMSG("hash pg_idx %zu hash %p page %p", n, hash, page);
531 		res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE);
532 		if (res != TEE_SUCCESS) {
533 			EMSG("Hash failed for page %zu at %p: res 0x%x",
534 			     n, (void *)page, res);
535 			panic();
536 		}
537 	}
538 
539 	/*
540 	 * Assert prepaged init sections are page aligned so that nothing
541 	 * trails uninited at the end of the premapped init area.
542 	 */
543 	assert(!(init_size & SMALL_PAGE_MASK));
544 
545 	/*
546 	 * Initialize the virtual memory pool used for main_mmu_l2_ttb which
547 	 * is supplied to tee_pager_init() below.
548 	 */
549 	init_vcore(&tee_mm_vcore);
550 
551 	/*
552 	 * Assign alias area for pager end of the small page block the rest
553 	 * of the binary is loaded into. We're taking more than needed, but
554 	 * we're guaranteed to not need more than the physical amount of
555 	 * TZSRAM.
556 	 */
557 	mm = tee_mm_alloc2(&tee_mm_vcore,
558 			   (vaddr_t)tee_mm_vcore.lo +
559 			   tee_mm_vcore.size - TZSRAM_SIZE,
560 			   TZSRAM_SIZE);
561 	assert(mm);
562 	tee_pager_set_alias_area(mm);
563 
564 	/*
565 	 * Claim virtual memory which isn't paged.
566 	 * Linear memory (flat map core memory) ends there.
567 	 */
568 	mm = tee_mm_alloc2(&tee_mm_vcore, VCORE_UNPG_RX_PA,
569 			   (vaddr_t)(__pageable_start - VCORE_UNPG_RX_PA));
570 	assert(mm);
571 
572 	/*
573 	 * Allocate virtual memory for the pageable area and let the pager
574 	 * take charge of all the pages already assigned to that memory.
575 	 */
576 	mm = tee_mm_alloc2(&tee_mm_vcore, (vaddr_t)__pageable_start,
577 			   pageable_size);
578 	assert(mm);
579 	fobj = ro_paged_alloc(mm, hashes, paged_store);
580 	assert(fobj);
581 	tee_pager_add_core_region(tee_mm_get_smem(mm), PAGED_REGION_TYPE_RO,
582 				  fobj);
583 	fobj_put(fobj);
584 
585 	tee_pager_add_pages(pageable_start, init_size / SMALL_PAGE_SIZE, false);
586 	tee_pager_add_pages(pageable_start + init_size,
587 			    (pageable_size - init_size) / SMALL_PAGE_SIZE,
588 			    true);
589 	if (pageable_end < tzsram_end)
590 		tee_pager_add_pages(pageable_end, (tzsram_end - pageable_end) /
591 						   SMALL_PAGE_SIZE, true);
592 
593 	/*
594 	 * There may be physical pages in TZSRAM before the core load address.
595 	 * These pages can be added to the physical pages pool of the pager.
596 	 * This setup may happen when a the secure bootloader runs in TZRAM
597 	 * and its memory can be reused by OP-TEE once boot stages complete.
598 	 */
599 	tee_pager_add_pages(tee_mm_vcore.lo,
600 			(VCORE_UNPG_RX_PA - tee_mm_vcore.lo) / SMALL_PAGE_SIZE,
601 			true);
602 
603 	print_pager_pool_size();
604 }
605 #else
606 
607 static void init_runtime(unsigned long pageable_part __unused)
608 {
609 	init_asan();
610 
611 	/*
612 	 * By default whole OP-TEE uses malloc, so we need to initialize
613 	 * it early. But, when virtualization is enabled, malloc is used
614 	 * only by TEE runtime, so malloc should be initialized later, for
615 	 * every virtual partition separately. Core code uses nex_malloc
616 	 * instead.
617 	 */
618 #ifdef CFG_NS_VIRTUALIZATION
619 	nex_malloc_add_pool(__nex_heap_start, __nex_heap_end -
620 					      __nex_heap_start);
621 #else
622 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
623 #endif
624 
625 	IMSG_RAW("\n");
626 }
627 #endif
628 
629 #if defined(CFG_DT)
630 static int add_optee_dt_node(struct dt_descriptor *dt)
631 {
632 	int offs;
633 	int ret;
634 
635 	if (fdt_path_offset(dt->blob, "/firmware/optee") >= 0) {
636 		DMSG("OP-TEE Device Tree node already exists!");
637 		return 0;
638 	}
639 
640 	offs = fdt_path_offset(dt->blob, "/firmware");
641 	if (offs < 0) {
642 		offs = add_dt_path_subnode(dt, "/", "firmware");
643 		if (offs < 0)
644 			return -1;
645 	}
646 
647 	offs = fdt_add_subnode(dt->blob, offs, "optee");
648 	if (offs < 0)
649 		return -1;
650 
651 	ret = fdt_setprop_string(dt->blob, offs, "compatible",
652 				 "linaro,optee-tz");
653 	if (ret < 0)
654 		return -1;
655 	ret = fdt_setprop_string(dt->blob, offs, "method", "smc");
656 	if (ret < 0)
657 		return -1;
658 
659 	if (CFG_CORE_ASYNC_NOTIF_GIC_INTID) {
660 		/*
661 		 * The format of the interrupt property is defined by the
662 		 * binding of the interrupt domain root. In this case it's
663 		 * one Arm GIC v1, v2 or v3 so we must be compatible with
664 		 * these.
665 		 *
666 		 * An SPI type of interrupt is indicated with a 0 in the
667 		 * first cell. A PPI type is indicated with value 1.
668 		 *
669 		 * The interrupt number goes in the second cell where
670 		 * SPIs ranges from 0 to 987 and PPI ranges from 0 to 15.
671 		 *
672 		 * Flags are passed in the third cells.
673 		 */
674 		uint32_t itr_trigger = 0;
675 		uint32_t itr_type = 0;
676 		uint32_t itr_id = 0;
677 		uint32_t val[3] = { };
678 
679 		/* PPI are visible only in current CPU cluster */
680 		static_assert(IS_ENABLED(CFG_CORE_FFA) ||
681 			      !CFG_CORE_ASYNC_NOTIF_GIC_INTID ||
682 			      (CFG_CORE_ASYNC_NOTIF_GIC_INTID >=
683 			       GIC_SPI_BASE) ||
684 			      ((CFG_TEE_CORE_NB_CORE <= 8) &&
685 			       (CFG_CORE_ASYNC_NOTIF_GIC_INTID >=
686 				GIC_PPI_BASE)));
687 
688 		if (CFG_CORE_ASYNC_NOTIF_GIC_INTID >= GIC_SPI_BASE) {
689 			itr_type = GIC_SPI;
690 			itr_id = CFG_CORE_ASYNC_NOTIF_GIC_INTID - GIC_SPI_BASE;
691 			itr_trigger = IRQ_TYPE_EDGE_RISING;
692 		} else {
693 			itr_type = GIC_PPI;
694 			itr_id = CFG_CORE_ASYNC_NOTIF_GIC_INTID - GIC_PPI_BASE;
695 			itr_trigger = IRQ_TYPE_EDGE_RISING |
696 				      GIC_CPU_MASK_SIMPLE(CFG_TEE_CORE_NB_CORE);
697 		}
698 
699 		val[0] = TEE_U32_TO_BIG_ENDIAN(itr_type);
700 		val[1] = TEE_U32_TO_BIG_ENDIAN(itr_id);
701 		val[2] = TEE_U32_TO_BIG_ENDIAN(itr_trigger);
702 
703 		ret = fdt_setprop(dt->blob, offs, "interrupts", val,
704 				  sizeof(val));
705 		if (ret < 0)
706 			return -1;
707 	}
708 	return 0;
709 }
710 
711 #ifdef CFG_PSCI_ARM32
712 static int append_psci_compatible(void *fdt, int offs, const char *str)
713 {
714 	return fdt_appendprop(fdt, offs, "compatible", str, strlen(str) + 1);
715 }
716 
717 static int dt_add_psci_node(struct dt_descriptor *dt)
718 {
719 	int offs;
720 
721 	if (fdt_path_offset(dt->blob, "/psci") >= 0) {
722 		DMSG("PSCI Device Tree node already exists!");
723 		return 0;
724 	}
725 
726 	offs = add_dt_path_subnode(dt, "/", "psci");
727 	if (offs < 0)
728 		return -1;
729 	if (append_psci_compatible(dt->blob, offs, "arm,psci-1.0"))
730 		return -1;
731 	if (append_psci_compatible(dt->blob, offs, "arm,psci-0.2"))
732 		return -1;
733 	if (append_psci_compatible(dt->blob, offs, "arm,psci"))
734 		return -1;
735 	if (fdt_setprop_string(dt->blob, offs, "method", "smc"))
736 		return -1;
737 	if (fdt_setprop_u32(dt->blob, offs, "cpu_suspend", PSCI_CPU_SUSPEND))
738 		return -1;
739 	if (fdt_setprop_u32(dt->blob, offs, "cpu_off", PSCI_CPU_OFF))
740 		return -1;
741 	if (fdt_setprop_u32(dt->blob, offs, "cpu_on", PSCI_CPU_ON))
742 		return -1;
743 	if (fdt_setprop_u32(dt->blob, offs, "sys_poweroff", PSCI_SYSTEM_OFF))
744 		return -1;
745 	if (fdt_setprop_u32(dt->blob, offs, "sys_reset", PSCI_SYSTEM_RESET))
746 		return -1;
747 	return 0;
748 }
749 
750 static int check_node_compat_prefix(struct dt_descriptor *dt, int offs,
751 				    const char *prefix)
752 {
753 	const size_t prefix_len = strlen(prefix);
754 	size_t l;
755 	int plen;
756 	const char *prop;
757 
758 	prop = fdt_getprop(dt->blob, offs, "compatible", &plen);
759 	if (!prop)
760 		return -1;
761 
762 	while (plen > 0) {
763 		if (memcmp(prop, prefix, prefix_len) == 0)
764 			return 0; /* match */
765 
766 		l = strlen(prop) + 1;
767 		prop += l;
768 		plen -= l;
769 	}
770 
771 	return -1;
772 }
773 
774 static int dt_add_psci_cpu_enable_methods(struct dt_descriptor *dt)
775 {
776 	int offs = 0;
777 
778 	while (1) {
779 		offs = fdt_next_node(dt->blob, offs, NULL);
780 		if (offs < 0)
781 			break;
782 		if (fdt_getprop(dt->blob, offs, "enable-method", NULL))
783 			continue; /* already set */
784 		if (check_node_compat_prefix(dt, offs, "arm,cortex-a"))
785 			continue; /* no compatible */
786 		if (fdt_setprop_string(dt->blob, offs, "enable-method", "psci"))
787 			return -1;
788 		/* Need to restart scanning as offsets may have changed */
789 		offs = 0;
790 	}
791 	return 0;
792 }
793 
794 static int config_psci(struct dt_descriptor *dt)
795 {
796 	if (dt_add_psci_node(dt))
797 		return -1;
798 	return dt_add_psci_cpu_enable_methods(dt);
799 }
800 #else
801 static int config_psci(struct dt_descriptor *dt __unused)
802 {
803 	return 0;
804 }
805 #endif /*CFG_PSCI_ARM32*/
806 
807 #ifdef CFG_CORE_DYN_SHM
808 static uint64_t get_dt_val_and_advance(const void *data, size_t *offs,
809 				       uint32_t cell_size)
810 {
811 	uint64_t rv = 0;
812 
813 	if (cell_size == 1) {
814 		uint32_t v;
815 
816 		memcpy(&v, (const uint8_t *)data + *offs, sizeof(v));
817 		*offs += sizeof(v);
818 		rv = fdt32_to_cpu(v);
819 	} else {
820 		uint64_t v;
821 
822 		memcpy(&v, (const uint8_t *)data + *offs, sizeof(v));
823 		*offs += sizeof(v);
824 		rv = fdt64_to_cpu(v);
825 	}
826 
827 	return rv;
828 }
829 
830 /*
831  * Find all non-secure memory from DT. Memory marked inaccessible by Secure
832  * World is ignored since it could not be mapped to be used as dynamic shared
833  * memory.
834  */
835 static int get_nsec_memory_helper(void *fdt, struct core_mmu_phys_mem *mem)
836 {
837 	const uint8_t *prop = NULL;
838 	uint64_t a = 0;
839 	uint64_t l = 0;
840 	size_t prop_offs = 0;
841 	size_t prop_len = 0;
842 	int elems_total = 0;
843 	int addr_size = 0;
844 	int len_size = 0;
845 	int offs = 0;
846 	size_t n = 0;
847 	int len = 0;
848 
849 	addr_size = fdt_address_cells(fdt, 0);
850 	if (addr_size < 0)
851 		return 0;
852 
853 	len_size = fdt_size_cells(fdt, 0);
854 	if (len_size < 0)
855 		return 0;
856 
857 	while (true) {
858 		offs = fdt_node_offset_by_prop_value(fdt, offs, "device_type",
859 						     "memory",
860 						     sizeof("memory"));
861 		if (offs < 0)
862 			break;
863 
864 		if (fdt_get_status(fdt, offs) != (DT_STATUS_OK_NSEC |
865 						   DT_STATUS_OK_SEC))
866 			continue;
867 
868 		prop = fdt_getprop(fdt, offs, "reg", &len);
869 		if (!prop)
870 			continue;
871 
872 		prop_len = len;
873 		for (n = 0, prop_offs = 0; prop_offs < prop_len; n++) {
874 			a = get_dt_val_and_advance(prop, &prop_offs, addr_size);
875 			if (prop_offs >= prop_len) {
876 				n--;
877 				break;
878 			}
879 
880 			l = get_dt_val_and_advance(prop, &prop_offs, len_size);
881 			if (mem) {
882 				mem->type = MEM_AREA_DDR_OVERALL;
883 				mem->addr = a;
884 				mem->size = l;
885 				mem++;
886 			}
887 		}
888 
889 		elems_total += n;
890 	}
891 
892 	return elems_total;
893 }
894 
895 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt, size_t *nelems)
896 {
897 	struct core_mmu_phys_mem *mem = NULL;
898 	int elems_total = 0;
899 
900 	elems_total = get_nsec_memory_helper(fdt, NULL);
901 	if (elems_total <= 0)
902 		return NULL;
903 
904 	mem = nex_calloc(elems_total, sizeof(*mem));
905 	if (!mem)
906 		panic();
907 
908 	elems_total = get_nsec_memory_helper(fdt, mem);
909 	assert(elems_total > 0);
910 
911 	*nelems = elems_total;
912 
913 	return mem;
914 }
915 #endif /*CFG_CORE_DYN_SHM*/
916 
917 #ifdef CFG_CORE_RESERVED_SHM
918 static int mark_static_shm_as_reserved(struct dt_descriptor *dt)
919 {
920 	vaddr_t shm_start;
921 	vaddr_t shm_end;
922 
923 	core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &shm_start, &shm_end);
924 	if (shm_start != shm_end)
925 		return add_res_mem_dt_node(dt, "optee_shm",
926 					   virt_to_phys((void *)shm_start),
927 					   shm_end - shm_start);
928 
929 	DMSG("No SHM configured");
930 	return -1;
931 }
932 #endif /*CFG_CORE_RESERVED_SHM*/
933 
934 static int mark_tzdram_as_reserved(struct dt_descriptor *dt)
935 {
936 	return add_res_mem_dt_node(dt, "optee_core", CFG_TZDRAM_START,
937 				   CFG_TZDRAM_SIZE);
938 }
939 
940 static void update_external_dt(void)
941 {
942 	struct dt_descriptor *dt = get_external_dt_desc();
943 
944 	if (!dt || !dt->blob)
945 		return;
946 
947 	if (!IS_ENABLED(CFG_CORE_FFA) && add_optee_dt_node(dt))
948 		panic("Failed to add OP-TEE Device Tree node");
949 
950 	if (config_psci(dt))
951 		panic("Failed to config PSCI");
952 
953 #ifdef CFG_CORE_RESERVED_SHM
954 	if (mark_static_shm_as_reserved(dt))
955 		panic("Failed to config non-secure memory");
956 #endif
957 
958 	if (mark_tzdram_as_reserved(dt))
959 		panic("Failed to config secure memory");
960 }
961 #else /*CFG_DT*/
962 static void update_external_dt(void)
963 {
964 }
965 
966 #ifdef CFG_CORE_DYN_SHM
967 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt __unused,
968 						 size_t *nelems __unused)
969 {
970 	return NULL;
971 }
972 #endif /*CFG_CORE_DYN_SHM*/
973 #endif /*!CFG_DT*/
974 
975 #if defined(CFG_CORE_FFA)
976 void *get_manifest_dt(void)
977 {
978 	return manifest_dt;
979 }
980 
981 static void reinit_manifest_dt(void)
982 {
983 	paddr_t pa = (unsigned long)manifest_dt;
984 	void *fdt = NULL;
985 	int ret = 0;
986 
987 	if (!pa) {
988 		EMSG("No manifest DT found");
989 		return;
990 	}
991 
992 	fdt = core_mmu_add_mapping(MEM_AREA_MANIFEST_DT, pa, CFG_DTB_MAX_SIZE);
993 	if (!fdt)
994 		panic("Failed to map manifest DT");
995 
996 	manifest_dt = fdt;
997 
998 	ret = fdt_check_full(fdt, CFG_DTB_MAX_SIZE);
999 	if (ret < 0) {
1000 		EMSG("Invalid manifest Device Tree at %#lx: error %d", pa, ret);
1001 		panic();
1002 	}
1003 
1004 	IMSG("manifest DT found");
1005 }
1006 
1007 static TEE_Result release_manifest_dt(void)
1008 {
1009 	if (!manifest_dt)
1010 		return TEE_SUCCESS;
1011 
1012 	if (core_mmu_remove_mapping(MEM_AREA_MANIFEST_DT, manifest_dt,
1013 				    CFG_DTB_MAX_SIZE))
1014 		panic("Failed to remove temporary manifest DT mapping");
1015 	manifest_dt = NULL;
1016 
1017 	return TEE_SUCCESS;
1018 }
1019 
1020 boot_final(release_manifest_dt);
1021 #else
1022 void *get_manifest_dt(void)
1023 {
1024 	return NULL;
1025 }
1026 
1027 static void reinit_manifest_dt(void)
1028 {
1029 }
1030 #endif /*CFG_CORE_FFA*/
1031 
1032 #ifdef CFG_CORE_DYN_SHM
1033 static void discover_nsec_memory(void)
1034 {
1035 	struct core_mmu_phys_mem *mem;
1036 	const struct core_mmu_phys_mem *mem_begin = NULL;
1037 	const struct core_mmu_phys_mem *mem_end = NULL;
1038 	size_t nelems;
1039 	void *fdt = get_external_dt();
1040 
1041 	if (fdt) {
1042 		mem = get_nsec_memory(fdt, &nelems);
1043 		if (mem) {
1044 			core_mmu_set_discovered_nsec_ddr(mem, nelems);
1045 			return;
1046 		}
1047 
1048 		DMSG("No non-secure memory found in external DT");
1049 	}
1050 
1051 	fdt = get_embedded_dt();
1052 	if (fdt) {
1053 		mem = get_nsec_memory(fdt, &nelems);
1054 		if (mem) {
1055 			core_mmu_set_discovered_nsec_ddr(mem, nelems);
1056 			return;
1057 		}
1058 
1059 		DMSG("No non-secure memory found in embedded DT");
1060 	}
1061 
1062 	mem_begin = phys_ddr_overall_begin;
1063 	mem_end = phys_ddr_overall_end;
1064 	nelems = mem_end - mem_begin;
1065 	if (nelems) {
1066 		/*
1067 		 * Platform cannot use both register_ddr() and the now
1068 		 * deprecated register_dynamic_shm().
1069 		 */
1070 		assert(phys_ddr_overall_compat_begin ==
1071 		       phys_ddr_overall_compat_end);
1072 	} else {
1073 		mem_begin = phys_ddr_overall_compat_begin;
1074 		mem_end = phys_ddr_overall_compat_end;
1075 		nelems = mem_end - mem_begin;
1076 		if (!nelems)
1077 			return;
1078 		DMSG("Warning register_dynamic_shm() is deprecated, please use register_ddr() instead");
1079 	}
1080 
1081 	mem = nex_calloc(nelems, sizeof(*mem));
1082 	if (!mem)
1083 		panic();
1084 
1085 	memcpy(mem, phys_ddr_overall_begin, sizeof(*mem) * nelems);
1086 	core_mmu_set_discovered_nsec_ddr(mem, nelems);
1087 }
1088 #else /*CFG_CORE_DYN_SHM*/
1089 static void discover_nsec_memory(void)
1090 {
1091 }
1092 #endif /*!CFG_CORE_DYN_SHM*/
1093 
1094 #ifdef CFG_NS_VIRTUALIZATION
1095 static TEE_Result virt_init_heap(void)
1096 {
1097 	/* We need to initialize pool for every virtual guest partition */
1098 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
1099 
1100 	return TEE_SUCCESS;
1101 }
1102 preinit_early(virt_init_heap);
1103 #endif
1104 
1105 void init_tee_runtime(void)
1106 {
1107 #ifndef CFG_WITH_PAGER
1108 	/* Pager initializes TA RAM early */
1109 	core_mmu_init_ta_ram();
1110 #endif
1111 	/*
1112 	 * With virtualization we call this function when creating the
1113 	 * OP-TEE partition instead.
1114 	 */
1115 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION))
1116 		call_preinitcalls();
1117 	call_initcalls();
1118 
1119 	/*
1120 	 * These two functions uses crypto_rng_read() to initialize the
1121 	 * pauth keys. Once call_initcalls() returns we're guaranteed that
1122 	 * crypto_rng_read() is ready to be used.
1123 	 */
1124 	thread_init_core_local_pauth_keys();
1125 	thread_init_thread_pauth_keys();
1126 
1127 	/*
1128 	 * Reinitialize canaries around the stacks with crypto_rng_read().
1129 	 *
1130 	 * TODO: Updating canaries when CFG_NS_VIRTUALIZATION is enabled will
1131 	 * require synchronization between thread_check_canaries() and
1132 	 * thread_update_canaries().
1133 	 */
1134 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION))
1135 		thread_update_canaries();
1136 }
1137 
1138 static void init_primary(unsigned long pageable_part, unsigned long nsec_entry)
1139 {
1140 	thread_init_core_local_stacks();
1141 	/*
1142 	 * Mask asynchronous exceptions before switch to the thread vector
1143 	 * as the thread handler requires those to be masked while
1144 	 * executing with the temporary stack. The thread subsystem also
1145 	 * asserts that the foreign interrupts are blocked when using most of
1146 	 * its functions.
1147 	 */
1148 	thread_set_exceptions(THREAD_EXCP_ALL);
1149 	primary_save_cntfrq();
1150 	init_vfp_sec();
1151 	/*
1152 	 * Pager: init_runtime() calls thread_kernel_enable_vfp() so we must
1153 	 * set a current thread right now to avoid a chicken-and-egg problem
1154 	 * (thread_init_boot_thread() sets the current thread but needs
1155 	 * things set by init_runtime()).
1156 	 */
1157 	thread_get_core_local()->curr_thread = 0;
1158 	init_runtime(pageable_part);
1159 
1160 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1161 		/*
1162 		 * Virtualization: We can't initialize threads right now because
1163 		 * threads belong to "tee" part and will be initialized
1164 		 * separately per each new virtual guest. So, we'll clear
1165 		 * "curr_thread" and call it done.
1166 		 */
1167 		thread_get_core_local()->curr_thread = -1;
1168 	} else {
1169 		thread_init_boot_thread();
1170 	}
1171 	thread_init_primary();
1172 	thread_init_per_cpu();
1173 	init_sec_mon(nsec_entry);
1174 }
1175 
1176 static bool cpu_nmfi_enabled(void)
1177 {
1178 #if defined(ARM32)
1179 	return read_sctlr() & SCTLR_NMFI;
1180 #else
1181 	/* Note: ARM64 does not feature non-maskable FIQ support. */
1182 	return false;
1183 #endif
1184 }
1185 
1186 /*
1187  * Note: this function is weak just to make it possible to exclude it from
1188  * the unpaged area.
1189  */
1190 void __weak boot_init_primary_late(unsigned long fdt __unused,
1191 				   unsigned long manifest __unused)
1192 {
1193 	size_t fdt_size = CFG_DTB_MAX_SIZE;
1194 
1195 	if (IS_ENABLED(CFG_TRANSFER_LIST) && mapped_tl) {
1196 		struct transfer_list_entry *tl_e = NULL;
1197 
1198 		tl_e = transfer_list_find(mapped_tl, TL_TAG_FDT);
1199 		if (tl_e)
1200 			fdt_size = tl_e->data_size;
1201 	}
1202 
1203 	init_external_dt(boot_arg_fdt, fdt_size);
1204 	reinit_manifest_dt();
1205 #ifdef CFG_CORE_SEL1_SPMC
1206 	tpm_map_log_area(get_manifest_dt());
1207 #else
1208 	tpm_map_log_area(get_external_dt());
1209 #endif
1210 	discover_nsec_memory();
1211 	update_external_dt();
1212 	configure_console_from_dt();
1213 
1214 	IMSG("OP-TEE version: %s", core_v_str);
1215 	if (IS_ENABLED(CFG_INSECURE)) {
1216 		IMSG("WARNING: This OP-TEE configuration might be insecure!");
1217 		IMSG("WARNING: Please check https://optee.readthedocs.io/en/latest/architecture/porting_guidelines.html");
1218 	}
1219 	IMSG("Primary CPU initializing");
1220 #ifdef CFG_CORE_ASLR
1221 	DMSG("Executing at offset %#lx with virtual load address %#"PRIxVA,
1222 	     (unsigned long)boot_mmu_config.map_offset, VCORE_START_VA);
1223 #endif
1224 	if (IS_ENABLED(CFG_MEMTAG))
1225 		DMSG("Memory tagging %s",
1226 		     memtag_is_enabled() ?  "enabled" : "disabled");
1227 
1228 	/* Check if platform needs NMFI workaround */
1229 	if (cpu_nmfi_enabled())	{
1230 		if (!IS_ENABLED(CFG_CORE_WORKAROUND_ARM_NMFI))
1231 			IMSG("WARNING: This ARM core has NMFI enabled, please apply workaround!");
1232 	} else {
1233 		if (IS_ENABLED(CFG_CORE_WORKAROUND_ARM_NMFI))
1234 			IMSG("WARNING: This ARM core does not have NMFI enabled, no need for workaround");
1235 	}
1236 
1237 	boot_primary_init_intc();
1238 	init_vfp_nsec();
1239 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1240 		IMSG("Initializing virtualization support");
1241 		core_mmu_init_virtualization();
1242 	} else {
1243 		init_tee_runtime();
1244 	}
1245 	call_finalcalls();
1246 	IMSG("Primary CPU switching to normal world boot");
1247 }
1248 
1249 static void init_secondary_helper(unsigned long nsec_entry)
1250 {
1251 	IMSG("Secondary CPU %zu initializing", get_core_pos());
1252 
1253 	/*
1254 	 * Mask asynchronous exceptions before switch to the thread vector
1255 	 * as the thread handler requires those to be masked while
1256 	 * executing with the temporary stack. The thread subsystem also
1257 	 * asserts that the foreign interrupts are blocked when using most of
1258 	 * its functions.
1259 	 */
1260 	thread_set_exceptions(THREAD_EXCP_ALL);
1261 
1262 	secondary_init_cntfrq();
1263 	thread_init_per_cpu();
1264 	init_sec_mon(nsec_entry);
1265 	boot_secondary_init_intc();
1266 	init_vfp_sec();
1267 	init_vfp_nsec();
1268 
1269 	IMSG("Secondary CPU %zu switching to normal world boot", get_core_pos());
1270 }
1271 
1272 /*
1273  * Note: this function is weak just to make it possible to exclude it from
1274  * the unpaged area so that it lies in the init area.
1275  */
1276 void __weak boot_init_primary_early(void)
1277 {
1278 	unsigned long pageable_part = 0;
1279 	unsigned long e = PADDR_INVALID;
1280 	struct transfer_list_entry *tl_e = NULL;
1281 
1282 	if (!IS_ENABLED(CFG_WITH_ARM_TRUSTED_FW))
1283 		e = boot_arg_nsec_entry;
1284 
1285 	if (IS_ENABLED(CFG_TRANSFER_LIST) && boot_arg_transfer_list) {
1286 		/* map and save the TL */
1287 		mapped_tl = transfer_list_map(boot_arg_transfer_list);
1288 		if (!mapped_tl)
1289 			panic("Failed to map transfer list");
1290 
1291 		transfer_list_dump(mapped_tl);
1292 		tl_e = transfer_list_find(mapped_tl, TL_TAG_FDT);
1293 		if (tl_e) {
1294 			/*
1295 			 * Expand the data size of the DTB entry to the maximum
1296 			 * allocable mapped memory to reserve sufficient space
1297 			 * for inserting new nodes, avoid potentially corrupting
1298 			 * next entries.
1299 			 */
1300 			uint32_t dtb_max_sz = mapped_tl->max_size -
1301 					      mapped_tl->size + tl_e->data_size;
1302 
1303 			if (!transfer_list_set_data_size(mapped_tl, tl_e,
1304 							 dtb_max_sz)) {
1305 				EMSG("Failed to extend DTB size to %#"PRIx32,
1306 				     dtb_max_sz);
1307 				panic();
1308 			}
1309 		}
1310 		tl_e = transfer_list_find(mapped_tl, TL_TAG_OPTEE_PAGABLE_PART);
1311 	}
1312 
1313 	if (IS_ENABLED(CFG_WITH_PAGER)) {
1314 		if (IS_ENABLED(CFG_TRANSFER_LIST) && tl_e)
1315 			pageable_part =
1316 				get_le64(transfer_list_entry_data(tl_e));
1317 		else
1318 			pageable_part = boot_arg_pageable_part;
1319 	}
1320 
1321 	init_primary(pageable_part, e);
1322 }
1323 
1324 static void boot_save_transfer_list(unsigned long zero_reg,
1325 				    unsigned long transfer_list,
1326 				    unsigned long fdt)
1327 {
1328 	struct transfer_list_header *tl = (void *)transfer_list;
1329 	struct transfer_list_entry *tl_e = NULL;
1330 
1331 	if (zero_reg != 0)
1332 		panic("Incorrect transfer list register convention");
1333 
1334 	if (!IS_ALIGNED_WITH_TYPE(transfer_list, struct transfer_list_header) ||
1335 	    !IS_ALIGNED(transfer_list, TL_ALIGNMENT_FROM_ORDER(tl->alignment)))
1336 		panic("Transfer list base address is not aligned");
1337 
1338 	if (transfer_list_check_header(tl) == TL_OPS_NONE)
1339 		panic("Invalid transfer list");
1340 
1341 	tl_e = transfer_list_find(tl, TL_TAG_FDT);
1342 	if (fdt != (unsigned long)transfer_list_entry_data(tl_e))
1343 		panic("DT does not match to the DT entry of the TL");
1344 
1345 	boot_arg_transfer_list = transfer_list;
1346 }
1347 
1348 #if defined(CFG_WITH_ARM_TRUSTED_FW)
1349 unsigned long boot_cpu_on_handler(unsigned long a0 __maybe_unused,
1350 				  unsigned long a1 __unused)
1351 {
1352 	init_secondary_helper(PADDR_INVALID);
1353 	return 0;
1354 }
1355 #else
1356 void boot_init_secondary(unsigned long nsec_entry)
1357 {
1358 	init_secondary_helper(nsec_entry);
1359 }
1360 #endif
1361 
1362 #if defined(CFG_BOOT_SECONDARY_REQUEST)
1363 void boot_set_core_ns_entry(size_t core_idx, uintptr_t entry,
1364 			    uintptr_t context_id)
1365 {
1366 	ns_entry_contexts[core_idx].entry_point = entry;
1367 	ns_entry_contexts[core_idx].context_id = context_id;
1368 	dsb_ishst();
1369 }
1370 
1371 int boot_core_release(size_t core_idx, paddr_t entry)
1372 {
1373 	if (!core_idx || core_idx >= CFG_TEE_CORE_NB_CORE)
1374 		return -1;
1375 
1376 	ns_entry_contexts[core_idx].entry_point = entry;
1377 	dmb();
1378 	spin_table[core_idx] = 1;
1379 	dsb();
1380 	sev();
1381 
1382 	return 0;
1383 }
1384 
1385 /*
1386  * spin until secondary boot request, then returns with
1387  * the secondary core entry address.
1388  */
1389 struct ns_entry_context *boot_core_hpen(void)
1390 {
1391 #ifdef CFG_PSCI_ARM32
1392 	return &ns_entry_contexts[get_core_pos()];
1393 #else
1394 	do {
1395 		wfe();
1396 	} while (!spin_table[get_core_pos()]);
1397 	dmb();
1398 	return &ns_entry_contexts[get_core_pos()];
1399 #endif
1400 }
1401 #endif
1402 
1403 #if defined(CFG_CORE_ASLR)
1404 #if defined(CFG_DT)
1405 unsigned long __weak get_aslr_seed(void)
1406 {
1407 	void *fdt = NULL;
1408 	int rc = 0;
1409 	const uint64_t *seed = NULL;
1410 	int offs = 0;
1411 	int len = 0;
1412 
1413 	if (!IS_ENABLED(CFG_CORE_SEL2_SPMC))
1414 		fdt = (void *)boot_arg_fdt;
1415 
1416 	if (!fdt) {
1417 		DMSG("No fdt");
1418 		goto err;
1419 	}
1420 
1421 	rc = fdt_check_header(fdt);
1422 	if (rc) {
1423 		DMSG("Bad fdt: %d", rc);
1424 		goto err;
1425 	}
1426 
1427 	offs =  fdt_path_offset(fdt, "/secure-chosen");
1428 	if (offs < 0) {
1429 		DMSG("Cannot find /secure-chosen");
1430 		goto err;
1431 	}
1432 	seed = fdt_getprop(fdt, offs, "kaslr-seed", &len);
1433 	if (!seed || len != sizeof(*seed)) {
1434 		DMSG("Cannot find valid kaslr-seed");
1435 		goto err;
1436 	}
1437 
1438 	return fdt64_to_cpu(*seed);
1439 
1440 err:
1441 	/* Try platform implementation */
1442 	return plat_get_aslr_seed();
1443 }
1444 #else /*!CFG_DT*/
1445 unsigned long __weak get_aslr_seed(void)
1446 {
1447 	/* Try platform implementation */
1448 	return plat_get_aslr_seed();
1449 }
1450 #endif /*!CFG_DT*/
1451 #endif /*CFG_CORE_ASLR*/
1452 
1453 static void *get_fdt_from_boot_info(struct ffa_boot_info_header_1_1 *hdr)
1454 {
1455 	struct ffa_boot_info_1_1 *desc = NULL;
1456 	uint8_t content_fmt = 0;
1457 	uint8_t name_fmt = 0;
1458 	void *fdt = NULL;
1459 	int ret = 0;
1460 
1461 	if (hdr->signature != FFA_BOOT_INFO_SIGNATURE) {
1462 		EMSG("Bad boot info signature %#"PRIx32, hdr->signature);
1463 		panic();
1464 	}
1465 	if (hdr->version != FFA_BOOT_INFO_VERSION) {
1466 		EMSG("Bad boot info version %#"PRIx32, hdr->version);
1467 		panic();
1468 	}
1469 	if (hdr->desc_count != 1) {
1470 		EMSG("Bad boot info descriptor count %#"PRIx32,
1471 		     hdr->desc_count);
1472 		panic();
1473 	}
1474 	desc = (void *)((vaddr_t)hdr + hdr->desc_offset);
1475 	name_fmt = desc->flags & FFA_BOOT_INFO_FLAG_NAME_FORMAT_MASK;
1476 	if (name_fmt == FFA_BOOT_INFO_FLAG_NAME_FORMAT_STRING)
1477 		DMSG("Boot info descriptor name \"%16s\"", desc->name);
1478 	else if (name_fmt == FFA_BOOT_INFO_FLAG_NAME_FORMAT_UUID)
1479 		DMSG("Boot info descriptor UUID %pUl", (void *)desc->name);
1480 	else
1481 		DMSG("Boot info descriptor: unknown name format %"PRIu8,
1482 		     name_fmt);
1483 
1484 	content_fmt = (desc->flags & FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_MASK) >>
1485 		      FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_SHIFT;
1486 	if (content_fmt != FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_ADDR) {
1487 		EMSG("Bad boot info content format %"PRIu8", expected %u (address)",
1488 		     content_fmt, FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_ADDR);
1489 		panic();
1490 	}
1491 
1492 	fdt = (void *)(vaddr_t)desc->contents;
1493 	ret = fdt_check_full(fdt, desc->size);
1494 	if (ret < 0) {
1495 		EMSG("Invalid Device Tree at %p: error %d", fdt, ret);
1496 		panic();
1497 	}
1498 	return fdt;
1499 }
1500 
1501 static void get_sec_mem_from_manifest(void *fdt, paddr_t *base, size_t *size)
1502 {
1503 	int ret = 0;
1504 	uint64_t num = 0;
1505 
1506 	ret = fdt_node_check_compatible(fdt, 0, "arm,ffa-manifest-1.0");
1507 	if (ret < 0) {
1508 		EMSG("Invalid FF-A manifest at %p: error %d", fdt, ret);
1509 		panic();
1510 	}
1511 	ret = dt_getprop_as_number(fdt, 0, "load-address", &num);
1512 	if (ret < 0) {
1513 		EMSG("Can't read \"load-address\" from FF-A manifest at %p: error %d",
1514 		     fdt, ret);
1515 		panic();
1516 	}
1517 	*base = num;
1518 	/* "mem-size" is currently an undocumented extension to the spec. */
1519 	ret = dt_getprop_as_number(fdt, 0, "mem-size", &num);
1520 	if (ret < 0) {
1521 		EMSG("Can't read \"mem-size\" from FF-A manifest at %p: error %d",
1522 		     fdt, ret);
1523 		panic();
1524 	}
1525 	*size = num;
1526 }
1527 
1528 void __weak boot_save_args(unsigned long a0, unsigned long a1,
1529 			   unsigned long a2, unsigned long a3,
1530 			   unsigned long a4 __maybe_unused)
1531 {
1532 	/*
1533 	 * Register use:
1534 	 *
1535 	 * Scenario A: Default arguments
1536 	 * a0   - CFG_CORE_FFA=y && CFG_CORE_SEL2_SPMC=n:
1537 	 *        if non-NULL holds the TOS FW config [1] address
1538 	 *      - CFG_CORE_FFA=y &&
1539 		  (CFG_CORE_SEL2_SPMC=y || CFG_CORE_EL3_SPMC=y):
1540 	 *        address of FF-A Boot Information Blob
1541 	 *      - CFG_CORE_FFA=n:
1542 	 *        if non-NULL holds the pagable part address
1543 	 * a1	- CFG_WITH_ARM_TRUSTED_FW=n (Armv7):
1544 	 *	  Armv7 standard bootarg #1 (kept track of in entry_a32.S)
1545 	 * a2   - CFG_CORE_SEL2_SPMC=n:
1546 	 *        if non-NULL holds the system DTB address
1547 	 *	- CFG_WITH_ARM_TRUSTED_FW=n (Armv7):
1548 	 *	  Armv7 standard bootarg #2 (system DTB address, kept track
1549 	 *	  of in entry_a32.S)
1550 	 * a3	- Not used
1551 	 * a4	- CFG_WITH_ARM_TRUSTED_FW=n:
1552 	 *	  Non-secure entry address
1553 	 *
1554 	 * [1] A TF-A concept: TOS_FW_CONFIG - Trusted OS Firmware
1555 	 * configuration file. Used by Trusted OS (BL32), that is, OP-TEE
1556 	 * here. This is also called Manifest DT, related to the Manifest DT
1557 	 * passed in the FF-A Boot Information Blob, but with a different
1558 	 * compatible string.
1559 
1560 	 * Scenario B: FW Handoff via Transfer List
1561 	 * Note: FF-A and non-secure entry are not yet supported with
1562 	 *       Transfer List
1563 	 * a0	- DTB address or 0 (AArch64)
1564 	 *	- must be 0 (AArch32)
1565 	 * a1	- TRANSFER_LIST_SIGNATURE | REG_CONVENTION_VER_MASK
1566 	 * a2	- must be 0 (AArch64)
1567 	 *	- DTB address or 0 (AArch32)
1568 	 * a3	- Transfer list base address
1569 	 * a4	- Not used
1570 	 */
1571 
1572 	if (IS_ENABLED(CFG_TRANSFER_LIST) &&
1573 	    a1 == (TRANSFER_LIST_SIGNATURE | REG_CONVENTION_VER_MASK)) {
1574 		if (IS_ENABLED(CFG_ARM64_core)) {
1575 			boot_save_transfer_list(a2, a3, a0);
1576 			boot_arg_fdt = a0;
1577 		} else {
1578 			boot_save_transfer_list(a0, a3, a2);
1579 			boot_arg_fdt = a2;
1580 		}
1581 		return;
1582 	}
1583 
1584 	if (!IS_ENABLED(CFG_CORE_SEL2_SPMC)) {
1585 #if defined(CFG_DT_ADDR)
1586 		boot_arg_fdt = CFG_DT_ADDR;
1587 #else
1588 		boot_arg_fdt = a2;
1589 #endif
1590 	}
1591 
1592 	if (IS_ENABLED(CFG_CORE_FFA)) {
1593 		if (IS_ENABLED(CFG_CORE_SEL2_SPMC) ||
1594 		    IS_ENABLED(CFG_CORE_EL3_SPMC))
1595 			manifest_dt = get_fdt_from_boot_info((void *)a0);
1596 		else
1597 			manifest_dt = (void *)a0;
1598 		if (IS_ENABLED(CFG_CORE_SEL2_SPMC) &&
1599 		    IS_ENABLED(CFG_CORE_PHYS_RELOCATABLE)) {
1600 			paddr_t base = 0;
1601 			size_t size = 0;
1602 
1603 			get_sec_mem_from_manifest(manifest_dt, &base, &size);
1604 			core_mmu_set_secure_memory(base, size);
1605 		}
1606 	} else {
1607 		if (IS_ENABLED(CFG_WITH_PAGER)) {
1608 #if defined(CFG_PAGEABLE_ADDR)
1609 			boot_arg_pageable_part = CFG_PAGEABLE_ADDR;
1610 #else
1611 			boot_arg_pageable_part = a0;
1612 #endif
1613 		}
1614 		if (!IS_ENABLED(CFG_WITH_ARM_TRUSTED_FW)) {
1615 #if defined(CFG_NS_ENTRY_ADDR)
1616 			boot_arg_nsec_entry = CFG_NS_ENTRY_ADDR;
1617 #else
1618 			boot_arg_nsec_entry = a4;
1619 #endif
1620 		}
1621 	}
1622 }
1623 
1624 #if defined(CFG_TRANSFER_LIST)
1625 static TEE_Result release_transfer_list(void)
1626 {
1627 	struct dt_descriptor *dt = get_external_dt_desc();
1628 
1629 	if (!mapped_tl)
1630 		return TEE_SUCCESS;
1631 
1632 	if (dt) {
1633 		int ret = 0;
1634 		struct transfer_list_entry *tl_e = NULL;
1635 
1636 		/*
1637 		 * Pack the DTB and update the transfer list before un-mapping
1638 		 */
1639 		ret = fdt_pack(dt->blob);
1640 		if (ret < 0) {
1641 			EMSG("Failed to pack Device Tree at 0x%" PRIxPA
1642 			     ": error %d", virt_to_phys(dt->blob), ret);
1643 			panic();
1644 		}
1645 
1646 		tl_e = transfer_list_find(mapped_tl, TL_TAG_FDT);
1647 		assert(dt->blob == transfer_list_entry_data(tl_e));
1648 		transfer_list_set_data_size(mapped_tl, tl_e,
1649 					    fdt_totalsize(dt->blob));
1650 		dt->blob = NULL;
1651 	}
1652 
1653 	transfer_list_unmap_sync(mapped_tl);
1654 	mapped_tl = NULL;
1655 
1656 	return TEE_SUCCESS;
1657 }
1658 
1659 boot_final(release_transfer_list);
1660 #endif
1661