xref: /optee_os/core/arch/arm/kernel/boot.c (revision 45c754cea36aa970be953a3e579ff81a63eb461f)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2015-2023, Linaro Limited
4  * Copyright (c) 2023, Arm Limited
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <compiler.h>
10 #include <config.h>
11 #include <console.h>
12 #include <crypto/crypto.h>
13 #include <drivers/gic.h>
14 #include <dt-bindings/interrupt-controller/arm-gic.h>
15 #include <ffa.h>
16 #include <initcall.h>
17 #include <inttypes.h>
18 #include <io.h>
19 #include <keep.h>
20 #include <kernel/asan.h>
21 #include <kernel/boot.h>
22 #include <kernel/dt.h>
23 #include <kernel/linker.h>
24 #include <kernel/misc.h>
25 #include <kernel/panic.h>
26 #include <kernel/tee_misc.h>
27 #include <kernel/thread.h>
28 #include <kernel/tpm.h>
29 #include <kernel/transfer_list.h>
30 #include <libfdt.h>
31 #include <malloc.h>
32 #include <memtag.h>
33 #include <mm/core_memprot.h>
34 #include <mm/core_mmu.h>
35 #include <mm/fobj.h>
36 #include <mm/page_alloc.h>
37 #include <mm/phys_mem.h>
38 #include <mm/tee_mm.h>
39 #include <mm/tee_pager.h>
40 #include <sm/psci.h>
41 #include <trace.h>
42 #include <utee_defines.h>
43 #include <util.h>
44 
45 #include <platform_config.h>
46 
47 #if !defined(CFG_WITH_ARM_TRUSTED_FW)
48 #include <sm/sm.h>
49 #endif
50 
51 #if defined(CFG_WITH_VFP)
52 #include <kernel/vfp.h>
53 #endif
54 
55 /*
56  * In this file we're using unsigned long to represent physical pointers as
57  * they are received in a single register when OP-TEE is initially entered.
58  * This limits 32-bit systems to only use make use of the lower 32 bits
59  * of a physical address for initial parameters.
60  *
61  * 64-bit systems on the other hand can use full 64-bit physical pointers.
62  */
63 #define PADDR_INVALID		ULONG_MAX
64 
65 #if defined(CFG_BOOT_SECONDARY_REQUEST)
66 struct ns_entry_context {
67 	uintptr_t entry_point;
68 	uintptr_t context_id;
69 };
70 struct ns_entry_context ns_entry_contexts[CFG_TEE_CORE_NB_CORE];
71 static uint32_t spin_table[CFG_TEE_CORE_NB_CORE];
72 #endif
73 
74 #ifdef CFG_BOOT_SYNC_CPU
75 /*
76  * Array used when booting, to synchronize cpu.
77  * When 0, the cpu has not started.
78  * When 1, it has started
79  */
80 uint32_t sem_cpu_sync[CFG_TEE_CORE_NB_CORE];
81 DECLARE_KEEP_PAGER(sem_cpu_sync);
82 #endif
83 
84 /*
85  * Must not be in .bss since it's initialized and used from assembly before
86  * .bss is cleared.
87  */
88 vaddr_t boot_cached_mem_end __nex_data = 1;
89 
90 static unsigned long boot_arg_fdt __nex_bss;
91 unsigned long boot_arg_nsec_entry __nex_bss;
92 static unsigned long boot_arg_pageable_part __nex_bss;
93 static unsigned long boot_arg_transfer_list __nex_bss;
94 static struct transfer_list_header *mapped_tl __nex_bss;
95 
96 #ifdef CFG_SECONDARY_INIT_CNTFRQ
97 static uint32_t cntfrq;
98 #endif
99 
100 /* May be overridden in plat-$(PLATFORM)/main.c */
101 __weak void plat_primary_init_early(void)
102 {
103 }
104 DECLARE_KEEP_PAGER(plat_primary_init_early);
105 
106 /* May be overridden in plat-$(PLATFORM)/main.c */
107 __weak void boot_primary_init_intc(void)
108 {
109 }
110 
111 /* May be overridden in plat-$(PLATFORM)/main.c */
112 __weak void boot_secondary_init_intc(void)
113 {
114 }
115 
116 /* May be overridden in plat-$(PLATFORM)/main.c */
117 __weak unsigned long plat_get_aslr_seed(void)
118 {
119 	DMSG("Warning: no ASLR seed");
120 
121 	return 0;
122 }
123 
124 /*
125  * This function is called as a guard after each smc call which is not
126  * supposed to return.
127  */
128 void __panic_at_smc_return(void)
129 {
130 	panic();
131 }
132 
133 #if defined(CFG_WITH_ARM_TRUSTED_FW)
134 void init_sec_mon(unsigned long nsec_entry __maybe_unused)
135 {
136 	assert(nsec_entry == PADDR_INVALID);
137 	/* Do nothing as we don't have a secure monitor */
138 }
139 #else
140 /* May be overridden in plat-$(PLATFORM)/main.c */
141 __weak void init_sec_mon(unsigned long nsec_entry)
142 {
143 	struct sm_nsec_ctx *nsec_ctx;
144 
145 	assert(nsec_entry != PADDR_INVALID);
146 
147 	/* Initialize secure monitor */
148 	nsec_ctx = sm_get_nsec_ctx();
149 	nsec_ctx->mon_lr = nsec_entry;
150 	nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I;
151 	if (nsec_entry & 1)
152 		nsec_ctx->mon_spsr |= CPSR_T;
153 }
154 #endif
155 
156 #if defined(CFG_WITH_ARM_TRUSTED_FW)
157 static void init_vfp_nsec(void)
158 {
159 }
160 #else
161 static void init_vfp_nsec(void)
162 {
163 	/* Normal world can use CP10 and CP11 (SIMD/VFP) */
164 	write_nsacr(read_nsacr() | NSACR_CP10 | NSACR_CP11);
165 }
166 #endif
167 
168 static void check_crypto_extensions(void)
169 {
170 	bool ce_supported = true;
171 
172 	if (!feat_aes_implemented() &&
173 	    IS_ENABLED(CFG_CRYPTO_AES_ARM_CE)) {
174 		EMSG("AES instructions are not supported");
175 		ce_supported = false;
176 	}
177 
178 	if (!feat_sha1_implemented() &&
179 	    IS_ENABLED(CFG_CRYPTO_SHA1_ARM_CE)) {
180 		EMSG("SHA1 instructions are not supported");
181 		ce_supported = false;
182 	}
183 
184 	if (!feat_sha256_implemented() &&
185 	    IS_ENABLED(CFG_CRYPTO_SHA256_ARM_CE)) {
186 		EMSG("SHA256 instructions are not supported");
187 		ce_supported = false;
188 	}
189 
190 	/* Check aarch64 specific instructions */
191 	if (IS_ENABLED(CFG_ARM64_core)) {
192 		if (!feat_sha512_implemented() &&
193 		    IS_ENABLED(CFG_CRYPTO_SHA512_ARM_CE)) {
194 			EMSG("SHA512 instructions are not supported");
195 			ce_supported = false;
196 		}
197 
198 		if (!feat_sha3_implemented() &&
199 		    IS_ENABLED(CFG_CRYPTO_SHA3_ARM_CE)) {
200 			EMSG("SHA3 instructions are not supported");
201 			ce_supported = false;
202 		}
203 
204 		if (!feat_sm3_implemented() &&
205 		    IS_ENABLED(CFG_CRYPTO_SM3_ARM_CE)) {
206 			EMSG("SM3 instructions are not supported");
207 			ce_supported = false;
208 		}
209 
210 		if (!feat_sm4_implemented() &&
211 		    IS_ENABLED(CFG_CRYPTO_SM4_ARM_CE)) {
212 			EMSG("SM4 instructions are not supported");
213 			ce_supported = false;
214 		}
215 	}
216 
217 	if (!ce_supported)
218 		panic("HW doesn't support CE instructions");
219 }
220 
221 #if defined(CFG_WITH_VFP)
222 
223 #ifdef ARM32
224 static void init_vfp_sec(void)
225 {
226 	uint32_t cpacr = read_cpacr();
227 
228 	/*
229 	 * Enable Advanced SIMD functionality.
230 	 * Enable use of D16-D31 of the Floating-point Extension register
231 	 * file.
232 	 */
233 	cpacr &= ~(CPACR_ASEDIS | CPACR_D32DIS);
234 	/*
235 	 * Enable usage of CP10 and CP11 (SIMD/VFP) (both kernel and user
236 	 * mode.
237 	 */
238 	cpacr |= CPACR_CP(10, CPACR_CP_ACCESS_FULL);
239 	cpacr |= CPACR_CP(11, CPACR_CP_ACCESS_FULL);
240 	write_cpacr(cpacr);
241 }
242 #endif /* ARM32 */
243 
244 #ifdef ARM64
245 static void init_vfp_sec(void)
246 {
247 	/* Not using VFP until thread_kernel_enable_vfp() */
248 	vfp_disable();
249 }
250 #endif /* ARM64 */
251 
252 #else /* CFG_WITH_VFP */
253 
254 static void init_vfp_sec(void)
255 {
256 	/* Not using VFP */
257 }
258 #endif
259 
260 #ifdef CFG_SECONDARY_INIT_CNTFRQ
261 static void primary_save_cntfrq(void)
262 {
263 	assert(cntfrq == 0);
264 
265 	/*
266 	 * CNTFRQ should be initialized on the primary CPU by a
267 	 * previous boot stage
268 	 */
269 	cntfrq = read_cntfrq();
270 }
271 
272 static void secondary_init_cntfrq(void)
273 {
274 	assert(cntfrq != 0);
275 	write_cntfrq(cntfrq);
276 }
277 #else /* CFG_SECONDARY_INIT_CNTFRQ */
278 static void primary_save_cntfrq(void)
279 {
280 }
281 
282 static void secondary_init_cntfrq(void)
283 {
284 }
285 #endif
286 
287 #ifdef CFG_CORE_SANITIZE_KADDRESS
288 static void init_run_constructors(void)
289 {
290 	const vaddr_t *ctor;
291 
292 	for (ctor = &__ctor_list; ctor < &__ctor_end; ctor++)
293 		((void (*)(void))(*ctor))();
294 }
295 
296 static void init_asan(void)
297 {
298 
299 	/*
300 	 * CFG_ASAN_SHADOW_OFFSET is also supplied as
301 	 * -fasan-shadow-offset=$(CFG_ASAN_SHADOW_OFFSET) to the compiler.
302 	 * Since all the needed values to calculate the value of
303 	 * CFG_ASAN_SHADOW_OFFSET isn't available in to make we need to
304 	 * calculate it in advance and hard code it into the platform
305 	 * conf.mk. Here where we have all the needed values we double
306 	 * check that the compiler is supplied the correct value.
307 	 */
308 
309 #define __ASAN_SHADOW_START \
310 	ROUNDUP(TEE_RAM_START + (TEE_RAM_VA_SIZE * 8) / 9 - 8, 8)
311 	assert(__ASAN_SHADOW_START == (vaddr_t)&__asan_shadow_start);
312 #define __CFG_ASAN_SHADOW_OFFSET \
313 	(__ASAN_SHADOW_START - (TEE_RAM_START / 8))
314 	COMPILE_TIME_ASSERT(CFG_ASAN_SHADOW_OFFSET == __CFG_ASAN_SHADOW_OFFSET);
315 #undef __ASAN_SHADOW_START
316 #undef __CFG_ASAN_SHADOW_OFFSET
317 
318 	/*
319 	 * Assign area covered by the shadow area, everything from start up
320 	 * to the beginning of the shadow area.
321 	 */
322 	asan_set_shadowed((void *)TEE_LOAD_ADDR, &__asan_shadow_start);
323 
324 	/*
325 	 * Add access to areas that aren't opened automatically by a
326 	 * constructor.
327 	 */
328 	asan_tag_access(&__ctor_list, &__ctor_end);
329 	asan_tag_access(__rodata_start, __rodata_end);
330 #ifdef CFG_WITH_PAGER
331 	asan_tag_access(__pageable_start, __pageable_end);
332 #endif /*CFG_WITH_PAGER*/
333 	asan_tag_access(__nozi_start, __nozi_end);
334 #ifdef ARM32
335 	asan_tag_access(__exidx_start, __exidx_end);
336 	asan_tag_access(__extab_start, __extab_end);
337 #endif
338 
339 	init_run_constructors();
340 
341 	/* Everything is tagged correctly, let's start address sanitizing. */
342 	asan_start();
343 }
344 #else /*CFG_CORE_SANITIZE_KADDRESS*/
345 static void init_asan(void)
346 {
347 }
348 #endif /*CFG_CORE_SANITIZE_KADDRESS*/
349 
350 #if defined(CFG_MEMTAG)
351 /* Called from entry_a64.S only when MEMTAG is configured */
352 void boot_init_memtag(void)
353 {
354 	memtag_init_ops(feat_mte_implemented());
355 }
356 
357 static TEE_Result mmap_clear_memtag(struct tee_mmap_region *map,
358 				    void *ptr __unused)
359 {
360 	switch (map->type) {
361 	case MEM_AREA_NEX_RAM_RO:
362 	case MEM_AREA_SEC_RAM_OVERALL:
363 		DMSG("Clearing tags for VA %#"PRIxVA"..%#"PRIxVA,
364 		     map->va, map->va + map->size - 1);
365 		memtag_set_tags((void *)map->va, map->size, 0);
366 		break;
367 	default:
368 		break;
369 	}
370 
371 	return TEE_SUCCESS;
372 }
373 
374 /* Called from entry_a64.S only when MEMTAG is configured */
375 void boot_clear_memtag(void)
376 {
377 	core_mmu_for_each_map(NULL, mmap_clear_memtag);
378 }
379 #endif
380 
381 #ifdef CFG_WITH_PAGER
382 
383 #ifdef CFG_CORE_SANITIZE_KADDRESS
384 static void carve_out_asan_mem(void)
385 {
386 	nex_phys_mem_partial_carve_out(ASAN_MAP_PA, ASAN_MAP_SZ);
387 }
388 #else
389 static void carve_out_asan_mem(void)
390 {
391 }
392 #endif
393 
394 static void print_pager_pool_size(void)
395 {
396 	struct tee_pager_stats __maybe_unused stats;
397 
398 	tee_pager_get_stats(&stats);
399 	IMSG("Pager pool size: %zukB",
400 		stats.npages_all * SMALL_PAGE_SIZE / 1024);
401 }
402 
403 static void init_virt_pool(tee_mm_pool_t *virt_pool)
404 {
405 	const vaddr_t begin = VCORE_START_VA;
406 	size_t size = TEE_RAM_VA_SIZE;
407 
408 #ifdef CFG_CORE_SANITIZE_KADDRESS
409 	/* Carve out asan memory, flat maped after core memory */
410 	if (begin + size > ASAN_SHADOW_PA)
411 		size = ASAN_MAP_PA - begin;
412 #endif
413 
414 	if (!tee_mm_init(virt_pool, begin, size, SMALL_PAGE_SHIFT,
415 			 TEE_MM_POOL_NO_FLAGS))
416 		panic("core_virt_mem_pool init failed");
417 }
418 
419 /*
420  * With CFG_CORE_ASLR=y the init part is relocated very early during boot.
421  * The init part is also paged just as the rest of the normal paged code, with
422  * the difference that it's preloaded during boot. When the backing store
423  * is configured the entire paged binary is copied in place and then also
424  * the init part. Since the init part has been relocated (references to
425  * addresses updated to compensate for the new load address) this has to be
426  * undone for the hashes of those pages to match with the original binary.
427  *
428  * If CFG_CORE_ASLR=n, nothing needs to be done as the code/ro pages are
429  * unchanged.
430  */
431 static void undo_init_relocation(uint8_t *paged_store __maybe_unused)
432 {
433 #ifdef CFG_CORE_ASLR
434 	unsigned long *ptr = NULL;
435 	const uint32_t *reloc = NULL;
436 	const uint32_t *reloc_end = NULL;
437 	unsigned long offs = boot_mmu_config.map_offset;
438 	const struct boot_embdata *embdata = (const void *)__init_end;
439 	vaddr_t addr_end = (vaddr_t)__init_end - offs - TEE_LOAD_ADDR;
440 	vaddr_t addr_start = (vaddr_t)__init_start - offs - TEE_LOAD_ADDR;
441 
442 	reloc = (const void *)((vaddr_t)embdata + embdata->reloc_offset);
443 	reloc_end = reloc + embdata->reloc_len / sizeof(*reloc);
444 
445 	for (; reloc < reloc_end; reloc++) {
446 		if (*reloc < addr_start)
447 			continue;
448 		if (*reloc >= addr_end)
449 			break;
450 		ptr = (void *)(paged_store + *reloc - addr_start);
451 		*ptr -= offs;
452 	}
453 #endif
454 }
455 
456 static struct fobj *ro_paged_alloc(tee_mm_entry_t *mm, void *hashes,
457 				   void *store)
458 {
459 	const unsigned int num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE;
460 #ifdef CFG_CORE_ASLR
461 	unsigned int reloc_offs = (vaddr_t)__pageable_start - VCORE_START_VA;
462 	const struct boot_embdata *embdata = (const void *)__init_end;
463 	const void *reloc = __init_end + embdata->reloc_offset;
464 
465 	return fobj_ro_reloc_paged_alloc(num_pages, hashes, reloc_offs,
466 					 reloc, embdata->reloc_len, store);
467 #else
468 	return fobj_ro_paged_alloc(num_pages, hashes, store);
469 #endif
470 }
471 
472 static void init_pager_runtime(unsigned long pageable_part)
473 {
474 	size_t n;
475 	size_t init_size = (size_t)(__init_end - __init_start);
476 	size_t pageable_start = (size_t)__pageable_start;
477 	size_t pageable_end = (size_t)__pageable_end;
478 	size_t pageable_size = pageable_end - pageable_start;
479 	vaddr_t tzsram_end = TZSRAM_BASE + TZSRAM_SIZE - TEE_LOAD_ADDR +
480 			     VCORE_START_VA;
481 	size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) *
482 			   TEE_SHA256_HASH_SIZE;
483 	const struct boot_embdata *embdata = (const void *)__init_end;
484 	const void *tmp_hashes = NULL;
485 	tee_mm_entry_t *mm = NULL;
486 	struct fobj *fobj = NULL;
487 	uint8_t *paged_store = NULL;
488 	uint8_t *hashes = NULL;
489 
490 	assert(pageable_size % SMALL_PAGE_SIZE == 0);
491 	assert(embdata->total_len >= embdata->hashes_offset +
492 				     embdata->hashes_len);
493 	assert(hash_size == embdata->hashes_len);
494 
495 	tmp_hashes = __init_end + embdata->hashes_offset;
496 
497 	/*
498 	 * This needs to be initialized early to support address lookup
499 	 * in MEM_AREA_TEE_RAM
500 	 */
501 	tee_pager_early_init();
502 
503 	hashes = malloc(hash_size);
504 	IMSG_RAW("\n");
505 	IMSG("Pager is enabled. Hashes: %zu bytes", hash_size);
506 	assert(hashes);
507 	asan_memcpy_unchecked(hashes, tmp_hashes, hash_size);
508 
509 	/*
510 	 * The pager is about the be enabled below, eventual temporary boot
511 	 * memory allocation must be removed now.
512 	 */
513 	boot_mem_release_tmp_alloc();
514 
515 	carve_out_asan_mem();
516 
517 	mm = nex_phys_mem_ta_alloc(pageable_size);
518 	assert(mm);
519 	paged_store = phys_to_virt(tee_mm_get_smem(mm),
520 				   MEM_AREA_SEC_RAM_OVERALL, pageable_size);
521 	/*
522 	 * Load pageable part in the dedicated allocated area:
523 	 * - Move pageable non-init part into pageable area. Note bootloader
524 	 *   may have loaded it anywhere in TA RAM hence use memmove().
525 	 * - Copy pageable init part from current location into pageable area.
526 	 */
527 	memmove(paged_store + init_size,
528 		phys_to_virt(pageable_part,
529 			     core_mmu_get_type_by_pa(pageable_part),
530 			     __pageable_part_end - __pageable_part_start),
531 		__pageable_part_end - __pageable_part_start);
532 	asan_memcpy_unchecked(paged_store, __init_start, init_size);
533 	/*
534 	 * Undo eventual relocation for the init part so the hash checks
535 	 * can pass.
536 	 */
537 	undo_init_relocation(paged_store);
538 
539 	/* Check that hashes of what's in pageable area is OK */
540 	DMSG("Checking hashes of pageable area");
541 	for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) {
542 		const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE;
543 		const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE;
544 		TEE_Result res;
545 
546 		DMSG("hash pg_idx %zu hash %p page %p", n, hash, page);
547 		res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE);
548 		if (res != TEE_SUCCESS) {
549 			EMSG("Hash failed for page %zu at %p: res 0x%x",
550 			     n, (void *)page, res);
551 			panic();
552 		}
553 	}
554 
555 	/*
556 	 * Assert prepaged init sections are page aligned so that nothing
557 	 * trails uninited at the end of the premapped init area.
558 	 */
559 	assert(!(init_size & SMALL_PAGE_MASK));
560 
561 	/*
562 	 * Initialize the virtual memory pool used for main_mmu_l2_ttb which
563 	 * is supplied to tee_pager_init() below.
564 	 */
565 	init_virt_pool(&core_virt_mem_pool);
566 
567 	/*
568 	 * Assign alias area for pager end of the small page block the rest
569 	 * of the binary is loaded into. We're taking more than needed, but
570 	 * we're guaranteed to not need more than the physical amount of
571 	 * TZSRAM.
572 	 */
573 	mm = tee_mm_alloc2(&core_virt_mem_pool,
574 			   (vaddr_t)core_virt_mem_pool.lo +
575 			   core_virt_mem_pool.size - TZSRAM_SIZE,
576 			   TZSRAM_SIZE);
577 	assert(mm);
578 	tee_pager_set_alias_area(mm);
579 
580 	/*
581 	 * Claim virtual memory which isn't paged.
582 	 * Linear memory (flat map core memory) ends there.
583 	 */
584 	mm = tee_mm_alloc2(&core_virt_mem_pool, VCORE_UNPG_RX_PA,
585 			   (vaddr_t)(__pageable_start - VCORE_UNPG_RX_PA));
586 	assert(mm);
587 
588 	/*
589 	 * Allocate virtual memory for the pageable area and let the pager
590 	 * take charge of all the pages already assigned to that memory.
591 	 */
592 	mm = tee_mm_alloc2(&core_virt_mem_pool, (vaddr_t)__pageable_start,
593 			   pageable_size);
594 	assert(mm);
595 	fobj = ro_paged_alloc(mm, hashes, paged_store);
596 	assert(fobj);
597 	tee_pager_add_core_region(tee_mm_get_smem(mm), PAGED_REGION_TYPE_RO,
598 				  fobj);
599 	fobj_put(fobj);
600 
601 	tee_pager_add_pages(pageable_start, init_size / SMALL_PAGE_SIZE, false);
602 	tee_pager_add_pages(pageable_start + init_size,
603 			    (pageable_size - init_size) / SMALL_PAGE_SIZE,
604 			    true);
605 	if (pageable_end < tzsram_end)
606 		tee_pager_add_pages(pageable_end, (tzsram_end - pageable_end) /
607 						   SMALL_PAGE_SIZE, true);
608 
609 	/*
610 	 * There may be physical pages in TZSRAM before the core load address.
611 	 * These pages can be added to the physical pages pool of the pager.
612 	 * This setup may happen when a the secure bootloader runs in TZRAM
613 	 * and its memory can be reused by OP-TEE once boot stages complete.
614 	 */
615 	tee_pager_add_pages(core_virt_mem_pool.lo,
616 			    (VCORE_UNPG_RX_PA - core_virt_mem_pool.lo) /
617 				SMALL_PAGE_SIZE,
618 			    true);
619 
620 	print_pager_pool_size();
621 }
622 #else /*!CFG_WITH_PAGER*/
623 static void init_pager_runtime(unsigned long pageable_part __unused)
624 {
625 }
626 #endif
627 
628 #if defined(CFG_DT)
629 static int add_optee_dt_node(struct dt_descriptor *dt)
630 {
631 	int offs;
632 	int ret;
633 
634 	if (fdt_path_offset(dt->blob, "/firmware/optee") >= 0) {
635 		DMSG("OP-TEE Device Tree node already exists!");
636 		return 0;
637 	}
638 
639 	offs = fdt_path_offset(dt->blob, "/firmware");
640 	if (offs < 0) {
641 		offs = add_dt_path_subnode(dt, "/", "firmware");
642 		if (offs < 0)
643 			return -1;
644 	}
645 
646 	offs = fdt_add_subnode(dt->blob, offs, "optee");
647 	if (offs < 0)
648 		return -1;
649 
650 	ret = fdt_setprop_string(dt->blob, offs, "compatible",
651 				 "linaro,optee-tz");
652 	if (ret < 0)
653 		return -1;
654 	ret = fdt_setprop_string(dt->blob, offs, "method", "smc");
655 	if (ret < 0)
656 		return -1;
657 
658 	if (CFG_CORE_ASYNC_NOTIF_GIC_INTID) {
659 		/*
660 		 * The format of the interrupt property is defined by the
661 		 * binding of the interrupt domain root. In this case it's
662 		 * one Arm GIC v1, v2 or v3 so we must be compatible with
663 		 * these.
664 		 *
665 		 * An SPI type of interrupt is indicated with a 0 in the
666 		 * first cell. A PPI type is indicated with value 1.
667 		 *
668 		 * The interrupt number goes in the second cell where
669 		 * SPIs ranges from 0 to 987 and PPI ranges from 0 to 15.
670 		 *
671 		 * Flags are passed in the third cells.
672 		 */
673 		uint32_t itr_trigger = 0;
674 		uint32_t itr_type = 0;
675 		uint32_t itr_id = 0;
676 		uint32_t val[3] = { };
677 
678 		/* PPI are visible only in current CPU cluster */
679 		static_assert(IS_ENABLED(CFG_CORE_FFA) ||
680 			      !CFG_CORE_ASYNC_NOTIF_GIC_INTID ||
681 			      (CFG_CORE_ASYNC_NOTIF_GIC_INTID >=
682 			       GIC_SPI_BASE) ||
683 			      ((CFG_TEE_CORE_NB_CORE <= 8) &&
684 			       (CFG_CORE_ASYNC_NOTIF_GIC_INTID >=
685 				GIC_PPI_BASE)));
686 
687 		if (CFG_CORE_ASYNC_NOTIF_GIC_INTID >= GIC_SPI_BASE) {
688 			itr_type = GIC_SPI;
689 			itr_id = CFG_CORE_ASYNC_NOTIF_GIC_INTID - GIC_SPI_BASE;
690 			itr_trigger = IRQ_TYPE_EDGE_RISING;
691 		} else {
692 			itr_type = GIC_PPI;
693 			itr_id = CFG_CORE_ASYNC_NOTIF_GIC_INTID - GIC_PPI_BASE;
694 			itr_trigger = IRQ_TYPE_EDGE_RISING |
695 				      GIC_CPU_MASK_SIMPLE(CFG_TEE_CORE_NB_CORE);
696 		}
697 
698 		val[0] = TEE_U32_TO_BIG_ENDIAN(itr_type);
699 		val[1] = TEE_U32_TO_BIG_ENDIAN(itr_id);
700 		val[2] = TEE_U32_TO_BIG_ENDIAN(itr_trigger);
701 
702 		ret = fdt_setprop(dt->blob, offs, "interrupts", val,
703 				  sizeof(val));
704 		if (ret < 0)
705 			return -1;
706 	}
707 	return 0;
708 }
709 
710 #ifdef CFG_PSCI_ARM32
711 static int append_psci_compatible(void *fdt, int offs, const char *str)
712 {
713 	return fdt_appendprop(fdt, offs, "compatible", str, strlen(str) + 1);
714 }
715 
716 static int dt_add_psci_node(struct dt_descriptor *dt)
717 {
718 	int offs;
719 
720 	if (fdt_path_offset(dt->blob, "/psci") >= 0) {
721 		DMSG("PSCI Device Tree node already exists!");
722 		return 0;
723 	}
724 
725 	offs = add_dt_path_subnode(dt, "/", "psci");
726 	if (offs < 0)
727 		return -1;
728 	if (append_psci_compatible(dt->blob, offs, "arm,psci-1.0"))
729 		return -1;
730 	if (append_psci_compatible(dt->blob, offs, "arm,psci-0.2"))
731 		return -1;
732 	if (append_psci_compatible(dt->blob, offs, "arm,psci"))
733 		return -1;
734 	if (fdt_setprop_string(dt->blob, offs, "method", "smc"))
735 		return -1;
736 	if (fdt_setprop_u32(dt->blob, offs, "cpu_suspend", PSCI_CPU_SUSPEND))
737 		return -1;
738 	if (fdt_setprop_u32(dt->blob, offs, "cpu_off", PSCI_CPU_OFF))
739 		return -1;
740 	if (fdt_setprop_u32(dt->blob, offs, "cpu_on", PSCI_CPU_ON))
741 		return -1;
742 	if (fdt_setprop_u32(dt->blob, offs, "sys_poweroff", PSCI_SYSTEM_OFF))
743 		return -1;
744 	if (fdt_setprop_u32(dt->blob, offs, "sys_reset", PSCI_SYSTEM_RESET))
745 		return -1;
746 	return 0;
747 }
748 
749 static int check_node_compat_prefix(struct dt_descriptor *dt, int offs,
750 				    const char *prefix)
751 {
752 	const size_t prefix_len = strlen(prefix);
753 	size_t l;
754 	int plen;
755 	const char *prop;
756 
757 	prop = fdt_getprop(dt->blob, offs, "compatible", &plen);
758 	if (!prop)
759 		return -1;
760 
761 	while (plen > 0) {
762 		if (memcmp(prop, prefix, prefix_len) == 0)
763 			return 0; /* match */
764 
765 		l = strlen(prop) + 1;
766 		prop += l;
767 		plen -= l;
768 	}
769 
770 	return -1;
771 }
772 
773 static int dt_add_psci_cpu_enable_methods(struct dt_descriptor *dt)
774 {
775 	int offs = 0;
776 
777 	while (1) {
778 		offs = fdt_next_node(dt->blob, offs, NULL);
779 		if (offs < 0)
780 			break;
781 		if (fdt_getprop(dt->blob, offs, "enable-method", NULL))
782 			continue; /* already set */
783 		if (check_node_compat_prefix(dt, offs, "arm,cortex-a"))
784 			continue; /* no compatible */
785 		if (fdt_setprop_string(dt->blob, offs, "enable-method", "psci"))
786 			return -1;
787 		/* Need to restart scanning as offsets may have changed */
788 		offs = 0;
789 	}
790 	return 0;
791 }
792 
793 static int config_psci(struct dt_descriptor *dt)
794 {
795 	if (dt_add_psci_node(dt))
796 		return -1;
797 	return dt_add_psci_cpu_enable_methods(dt);
798 }
799 #else
800 static int config_psci(struct dt_descriptor *dt __unused)
801 {
802 	return 0;
803 }
804 #endif /*CFG_PSCI_ARM32*/
805 
806 static int mark_tzdram_as_reserved(struct dt_descriptor *dt)
807 {
808 	return add_res_mem_dt_node(dt, "optee_core", CFG_TZDRAM_START,
809 				   CFG_TZDRAM_SIZE);
810 }
811 
812 static void update_external_dt(void)
813 {
814 	struct dt_descriptor *dt = get_external_dt_desc();
815 
816 	if (!dt || !dt->blob)
817 		return;
818 
819 	if (!IS_ENABLED(CFG_CORE_FFA) && add_optee_dt_node(dt))
820 		panic("Failed to add OP-TEE Device Tree node");
821 
822 	if (config_psci(dt))
823 		panic("Failed to config PSCI");
824 
825 #ifdef CFG_CORE_RESERVED_SHM
826 	if (mark_static_shm_as_reserved(dt))
827 		panic("Failed to config non-secure memory");
828 #endif
829 
830 	if (mark_tzdram_as_reserved(dt))
831 		panic("Failed to config secure memory");
832 }
833 #else /*CFG_DT*/
834 static void update_external_dt(void)
835 {
836 }
837 #endif /*!CFG_DT*/
838 
839 void init_tee_runtime(void)
840 {
841 	/*
842 	 * With virtualization we call this function when creating the
843 	 * OP-TEE partition instead.
844 	 */
845 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION))
846 		call_preinitcalls();
847 	call_early_initcalls();
848 	call_service_initcalls();
849 
850 	/*
851 	 * These two functions uses crypto_rng_read() to initialize the
852 	 * pauth keys. Once call_initcalls() returns we're guaranteed that
853 	 * crypto_rng_read() is ready to be used.
854 	 */
855 	thread_init_core_local_pauth_keys();
856 	thread_init_thread_pauth_keys();
857 
858 	/*
859 	 * Reinitialize canaries around the stacks with crypto_rng_read().
860 	 *
861 	 * TODO: Updating canaries when CFG_NS_VIRTUALIZATION is enabled will
862 	 * require synchronization between thread_check_canaries() and
863 	 * thread_update_canaries().
864 	 */
865 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION))
866 		thread_update_canaries();
867 }
868 
869 static bool add_padding_to_pool(vaddr_t va, size_t len, void *ptr __unused)
870 {
871 #ifdef CFG_NS_VIRTUALIZATION
872 	nex_malloc_add_pool((void *)va, len);
873 #else
874 	malloc_add_pool((void *)va, len);
875 #endif
876 	return true;
877 }
878 
879 static void init_primary(unsigned long pageable_part)
880 {
881 	vaddr_t va = 0;
882 
883 	/*
884 	 * Mask asynchronous exceptions before switch to the thread vector
885 	 * as the thread handler requires those to be masked while
886 	 * executing with the temporary stack. The thread subsystem also
887 	 * asserts that the foreign interrupts are blocked when using most of
888 	 * its functions.
889 	 */
890 	thread_set_exceptions(THREAD_EXCP_ALL);
891 	primary_save_cntfrq();
892 	init_vfp_sec();
893 
894 	if (IS_ENABLED(CFG_CRYPTO_WITH_CE))
895 		check_crypto_extensions();
896 
897 	init_asan();
898 
899 	/*
900 	 * By default whole OP-TEE uses malloc, so we need to initialize
901 	 * it early. But, when virtualization is enabled, malloc is used
902 	 * only by TEE runtime, so malloc should be initialized later, for
903 	 * every virtual partition separately. Core code uses nex_malloc
904 	 * instead.
905 	 */
906 #ifdef CFG_WITH_PAGER
907 	/* Add heap2 first as heap1 may be too small as initial bget pool */
908 	malloc_add_pool(__heap2_start, __heap2_end - __heap2_start);
909 #endif
910 #ifdef CFG_NS_VIRTUALIZATION
911 	nex_malloc_add_pool(__nex_heap_start, __nex_heap_end -
912 					      __nex_heap_start);
913 #else
914 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
915 #endif
916 	IMSG_RAW("\n");
917 
918 	core_mmu_save_mem_map();
919 	core_mmu_init_phys_mem();
920 	boot_mem_foreach_padding(add_padding_to_pool, NULL);
921 	va = boot_mem_release_unused();
922 	if (!IS_ENABLED(CFG_WITH_PAGER)) {
923 		/*
924 		 * We must update boot_cached_mem_end to reflect the memory
925 		 * just unmapped by boot_mem_release_unused().
926 		 */
927 		assert(va && va <= boot_cached_mem_end);
928 		boot_cached_mem_end = va;
929 	}
930 
931 	if (IS_ENABLED(CFG_DYN_CONFIG)) {
932 		/*
933 		 * This is needed to enable virt_page_alloc() now that
934 		 * boot_mem_alloc() can't be used any longer.
935 		 */
936 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
937 			nex_page_alloc_init();
938 		else
939 			page_alloc_init();
940 	}
941 
942 	if (IS_ENABLED(CFG_WITH_PAGER)) {
943 		/*
944 		 * Pager: init_runtime() calls thread_kernel_enable_vfp()
945 		 * so we must set a current thread right now to avoid a
946 		 * chicken-and-egg problem (thread_init_boot_thread() sets
947 		 * the current thread but needs things set by
948 		 * init_runtime()).
949 		 */
950 		thread_get_core_local()->curr_thread = 0;
951 		init_pager_runtime(pageable_part);
952 	}
953 
954 	thread_init_primary();
955 	thread_init_per_cpu();
956 }
957 
958 static bool cpu_nmfi_enabled(void)
959 {
960 #if defined(ARM32)
961 	return read_sctlr() & SCTLR_NMFI;
962 #else
963 	/* Note: ARM64 does not feature non-maskable FIQ support. */
964 	return false;
965 #endif
966 }
967 
968 /*
969  * Note: this function is weak just to make it possible to exclude it from
970  * the unpaged area.
971  */
972 void __weak boot_init_primary_late(unsigned long fdt __unused,
973 				   unsigned long manifest __unused)
974 {
975 	size_t fdt_size = CFG_DTB_MAX_SIZE;
976 
977 	if (IS_ENABLED(CFG_TRANSFER_LIST) && mapped_tl) {
978 		struct transfer_list_entry *tl_e = NULL;
979 
980 		tl_e = transfer_list_find(mapped_tl, TL_TAG_FDT);
981 		if (tl_e) {
982 			/*
983 			 * Expand the data size of the DTB entry to the maximum
984 			 * allocable mapped memory to reserve sufficient space
985 			 * for inserting new nodes, avoid potentially corrupting
986 			 * next entries.
987 			 */
988 			uint32_t dtb_max_sz = mapped_tl->max_size -
989 					      mapped_tl->size + tl_e->data_size;
990 
991 			if (!transfer_list_set_data_size(mapped_tl, tl_e,
992 							 dtb_max_sz)) {
993 				EMSG("Failed to extend DTB size to %#"PRIx32,
994 				     dtb_max_sz);
995 				panic();
996 			}
997 			fdt_size = tl_e->data_size;
998 		}
999 	}
1000 
1001 	init_external_dt(boot_arg_fdt, fdt_size);
1002 	reinit_manifest_dt();
1003 #ifdef CFG_CORE_SEL1_SPMC
1004 	tpm_map_log_area(get_manifest_dt());
1005 #else
1006 	tpm_map_log_area(get_external_dt());
1007 #endif
1008 	discover_nsec_memory();
1009 	update_external_dt();
1010 	configure_console_from_dt();
1011 
1012 	thread_init_thread_core_local();
1013 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1014 		/*
1015 		 * Virtualization: We can't initialize threads right now because
1016 		 * threads belong to "tee" part and will be initialized
1017 		 * separately per each new virtual guest. So, we'll clear
1018 		 * "curr_thread" and call it done.
1019 		 */
1020 		thread_get_core_local()->curr_thread = -1;
1021 	} else {
1022 		thread_init_boot_thread();
1023 	}
1024 }
1025 
1026 void __weak boot_init_primary_runtime(void)
1027 {
1028 	IMSG("OP-TEE version: %s", core_v_str);
1029 	if (IS_ENABLED(CFG_INSECURE)) {
1030 		IMSG("WARNING: This OP-TEE configuration might be insecure!");
1031 		IMSG("WARNING: Please check https://optee.readthedocs.io/en/latest/architecture/porting_guidelines.html");
1032 	}
1033 	IMSG("Primary CPU initializing");
1034 #ifdef CFG_CORE_ASLR
1035 	DMSG("Executing at offset %#lx with virtual load address %#"PRIxVA,
1036 	     (unsigned long)boot_mmu_config.map_offset, VCORE_START_VA);
1037 #endif
1038 #ifdef CFG_NS_VIRTUALIZATION
1039 	DMSG("NS-virtualization enabled, supporting %u guests",
1040 	     CFG_VIRT_GUEST_COUNT);
1041 #endif
1042 	if (IS_ENABLED(CFG_MEMTAG))
1043 		DMSG("Memory tagging %s",
1044 		     memtag_is_enabled() ?  "enabled" : "disabled");
1045 
1046 	/* Check if platform needs NMFI workaround */
1047 	if (cpu_nmfi_enabled())	{
1048 		if (!IS_ENABLED(CFG_CORE_WORKAROUND_ARM_NMFI))
1049 			IMSG("WARNING: This ARM core has NMFI enabled, please apply workaround!");
1050 	} else {
1051 		if (IS_ENABLED(CFG_CORE_WORKAROUND_ARM_NMFI))
1052 			IMSG("WARNING: This ARM core does not have NMFI enabled, no need for workaround");
1053 	}
1054 
1055 	boot_primary_init_intc();
1056 	init_vfp_nsec();
1057 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1058 		/*
1059 		 * Unmask native interrupts during driver initcalls.
1060 		 *
1061 		 * NS-virtualization still uses the temporary stack also
1062 		 * used for exception handling so it must still have native
1063 		 * interrupts masked.
1064 		 */
1065 		thread_set_exceptions(thread_get_exceptions() &
1066 				      ~THREAD_EXCP_NATIVE_INTR);
1067 		init_tee_runtime();
1068 	}
1069 
1070 	if (!IS_ENABLED(CFG_WITH_PAGER))
1071 		boot_mem_release_tmp_alloc();
1072 }
1073 
1074 void __weak boot_init_primary_final(void)
1075 {
1076 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION))
1077 		call_driver_initcalls();
1078 
1079 	call_finalcalls();
1080 
1081 	IMSG("Primary CPU switching to normal world boot");
1082 
1083 	/* Mask native interrupts before switching to the normal world */
1084 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION))
1085 		thread_set_exceptions(thread_get_exceptions() |
1086 				      THREAD_EXCP_NATIVE_INTR);
1087 }
1088 
1089 static void init_secondary_helper(void)
1090 {
1091 	IMSG("Secondary CPU %zu initializing", get_core_pos());
1092 
1093 	/*
1094 	 * Mask asynchronous exceptions before switch to the thread vector
1095 	 * as the thread handler requires those to be masked while
1096 	 * executing with the temporary stack. The thread subsystem also
1097 	 * asserts that the foreign interrupts are blocked when using most of
1098 	 * its functions.
1099 	 */
1100 	thread_set_exceptions(THREAD_EXCP_ALL);
1101 
1102 	secondary_init_cntfrq();
1103 	thread_init_per_cpu();
1104 	boot_secondary_init_intc();
1105 	init_vfp_sec();
1106 	init_vfp_nsec();
1107 
1108 	IMSG("Secondary CPU %zu switching to normal world boot", get_core_pos());
1109 }
1110 
1111 /*
1112  * Note: this function is weak just to make it possible to exclude it from
1113  * the unpaged area so that it lies in the init area.
1114  */
1115 void __weak boot_init_primary_early(void)
1116 {
1117 	unsigned long pageable_part = 0;
1118 	struct transfer_list_entry *tl_e = NULL;
1119 
1120 	if (IS_ENABLED(CFG_TRANSFER_LIST) && boot_arg_transfer_list) {
1121 		/* map and save the TL */
1122 		mapped_tl = transfer_list_map(boot_arg_transfer_list);
1123 		if (!mapped_tl)
1124 			panic("Failed to map transfer list");
1125 
1126 		transfer_list_dump(mapped_tl);
1127 		tl_e = transfer_list_find(mapped_tl, TL_TAG_OPTEE_PAGABLE_PART);
1128 	}
1129 
1130 	if (IS_ENABLED(CFG_WITH_PAGER)) {
1131 		if (IS_ENABLED(CFG_TRANSFER_LIST) && tl_e)
1132 			pageable_part =
1133 				get_le64(transfer_list_entry_data(tl_e));
1134 		else
1135 			pageable_part = boot_arg_pageable_part;
1136 	}
1137 
1138 	init_primary(pageable_part);
1139 }
1140 
1141 static void boot_save_transfer_list(unsigned long zero_reg,
1142 				    unsigned long transfer_list,
1143 				    unsigned long fdt)
1144 {
1145 	struct transfer_list_header *tl = (void *)transfer_list;
1146 	struct transfer_list_entry *tl_e = NULL;
1147 
1148 	if (zero_reg != 0)
1149 		panic("Incorrect transfer list register convention");
1150 
1151 	if (!IS_ALIGNED_WITH_TYPE(transfer_list, struct transfer_list_header) ||
1152 	    !IS_ALIGNED(transfer_list, TL_ALIGNMENT_FROM_ORDER(tl->alignment)))
1153 		panic("Transfer list base address is not aligned");
1154 
1155 	if (transfer_list_check_header(tl) == TL_OPS_NONE)
1156 		panic("Invalid transfer list");
1157 
1158 	tl_e = transfer_list_find(tl, TL_TAG_FDT);
1159 	if (fdt != (unsigned long)transfer_list_entry_data(tl_e))
1160 		panic("DT does not match to the DT entry of the TL");
1161 
1162 	boot_arg_transfer_list = transfer_list;
1163 }
1164 
1165 #if defined(CFG_WITH_ARM_TRUSTED_FW)
1166 unsigned long boot_cpu_on_handler(unsigned long a0 __maybe_unused,
1167 				  unsigned long a1 __unused)
1168 {
1169 	init_secondary_helper();
1170 	return 0;
1171 }
1172 #else
1173 void boot_init_secondary(unsigned long nsec_entry __unused)
1174 {
1175 	init_secondary_helper();
1176 }
1177 #endif
1178 
1179 #if defined(CFG_BOOT_SECONDARY_REQUEST)
1180 void boot_set_core_ns_entry(size_t core_idx, uintptr_t entry,
1181 			    uintptr_t context_id)
1182 {
1183 	ns_entry_contexts[core_idx].entry_point = entry;
1184 	ns_entry_contexts[core_idx].context_id = context_id;
1185 	dsb_ishst();
1186 }
1187 
1188 int boot_core_release(size_t core_idx, paddr_t entry)
1189 {
1190 	if (!core_idx || core_idx >= CFG_TEE_CORE_NB_CORE)
1191 		return -1;
1192 
1193 	ns_entry_contexts[core_idx].entry_point = entry;
1194 	dmb();
1195 	spin_table[core_idx] = 1;
1196 	dsb();
1197 	sev();
1198 
1199 	return 0;
1200 }
1201 
1202 /*
1203  * spin until secondary boot request, then returns with
1204  * the secondary core entry address.
1205  */
1206 struct ns_entry_context *boot_core_hpen(void)
1207 {
1208 #ifdef CFG_PSCI_ARM32
1209 	return &ns_entry_contexts[get_core_pos()];
1210 #else
1211 	do {
1212 		wfe();
1213 	} while (!spin_table[get_core_pos()]);
1214 	dmb();
1215 	return &ns_entry_contexts[get_core_pos()];
1216 #endif
1217 }
1218 #endif
1219 
1220 #if defined(CFG_CORE_ASLR)
1221 #if defined(CFG_DT)
1222 unsigned long __weak get_aslr_seed(void)
1223 {
1224 	void *fdt = NULL;
1225 	int rc = 0;
1226 	const uint64_t *seed = NULL;
1227 	int offs = 0;
1228 	int len = 0;
1229 
1230 	if (!IS_ENABLED(CFG_CORE_SEL2_SPMC))
1231 		fdt = (void *)boot_arg_fdt;
1232 
1233 	if (!fdt) {
1234 		DMSG("No fdt");
1235 		goto err;
1236 	}
1237 
1238 	rc = fdt_check_header(fdt);
1239 	if (rc) {
1240 		DMSG("Bad fdt: %d", rc);
1241 		goto err;
1242 	}
1243 
1244 	offs =  fdt_path_offset(fdt, "/secure-chosen");
1245 	if (offs < 0) {
1246 		DMSG("Cannot find /secure-chosen");
1247 		goto err;
1248 	}
1249 	seed = fdt_getprop(fdt, offs, "kaslr-seed", &len);
1250 	if (!seed || len != sizeof(*seed)) {
1251 		DMSG("Cannot find valid kaslr-seed");
1252 		goto err;
1253 	}
1254 
1255 	return fdt64_to_cpu(fdt64_ld(seed));
1256 
1257 err:
1258 	/* Try platform implementation */
1259 	return plat_get_aslr_seed();
1260 }
1261 #else /*!CFG_DT*/
1262 unsigned long __weak get_aslr_seed(void)
1263 {
1264 	/* Try platform implementation */
1265 	return plat_get_aslr_seed();
1266 }
1267 #endif /*!CFG_DT*/
1268 #endif /*CFG_CORE_ASLR*/
1269 
1270 static void *get_fdt_from_boot_info(struct ffa_boot_info_header_1_1 *hdr)
1271 {
1272 	struct ffa_boot_info_1_1 *desc = NULL;
1273 	uint8_t content_fmt = 0;
1274 	uint8_t name_fmt = 0;
1275 	void *fdt = NULL;
1276 	int ret = 0;
1277 
1278 	if (hdr->signature != FFA_BOOT_INFO_SIGNATURE) {
1279 		EMSG("Bad boot info signature %#"PRIx32, hdr->signature);
1280 		panic();
1281 	}
1282 	if (hdr->version != FFA_BOOT_INFO_VERSION_1_1 &&
1283 	    hdr->version != FFA_BOOT_INFO_VERSION_1_2) {
1284 		EMSG("Bad boot info version %#"PRIx32, hdr->version);
1285 		panic();
1286 	}
1287 	if (hdr->desc_count != 1) {
1288 		EMSG("Bad boot info descriptor count %#"PRIx32,
1289 		     hdr->desc_count);
1290 		panic();
1291 	}
1292 	desc = (void *)((vaddr_t)hdr + hdr->desc_offset);
1293 	name_fmt = desc->flags & FFA_BOOT_INFO_FLAG_NAME_FORMAT_MASK;
1294 	if (name_fmt == FFA_BOOT_INFO_FLAG_NAME_FORMAT_STRING)
1295 		DMSG("Boot info descriptor name \"%16s\"", desc->name);
1296 	else if (name_fmt == FFA_BOOT_INFO_FLAG_NAME_FORMAT_UUID)
1297 		DMSG("Boot info descriptor UUID %pUl", (void *)desc->name);
1298 	else
1299 		DMSG("Boot info descriptor: unknown name format %"PRIu8,
1300 		     name_fmt);
1301 
1302 	content_fmt = (desc->flags & FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_MASK) >>
1303 		      FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_SHIFT;
1304 	if (content_fmt != FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_ADDR) {
1305 		EMSG("Bad boot info content format %"PRIu8", expected %u (address)",
1306 		     content_fmt, FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_ADDR);
1307 		panic();
1308 	}
1309 
1310 	fdt = (void *)(vaddr_t)desc->contents;
1311 	ret = fdt_check_full(fdt, desc->size);
1312 	if (ret < 0) {
1313 		EMSG("Invalid Device Tree at %p: error %d", fdt, ret);
1314 		panic();
1315 	}
1316 	return fdt;
1317 }
1318 
1319 static void get_sec_mem_from_manifest(void *fdt, paddr_t *base, size_t *size)
1320 {
1321 	int ret = 0;
1322 	uint64_t num = 0;
1323 
1324 	ret = fdt_node_check_compatible(fdt, 0, "arm,ffa-manifest-1.0");
1325 	if (ret < 0) {
1326 		EMSG("Invalid FF-A manifest at %p: error %d", fdt, ret);
1327 		panic();
1328 	}
1329 	ret = dt_getprop_as_number(fdt, 0, "load-address", &num);
1330 	if (ret < 0) {
1331 		EMSG("Can't read \"load-address\" from FF-A manifest at %p: error %d",
1332 		     fdt, ret);
1333 		panic();
1334 	}
1335 	*base = num;
1336 	/* "mem-size" is currently an undocumented extension to the spec. */
1337 	ret = dt_getprop_as_number(fdt, 0, "mem-size", &num);
1338 	if (ret < 0) {
1339 		EMSG("Can't read \"mem-size\" from FF-A manifest at %p: error %d",
1340 		     fdt, ret);
1341 		panic();
1342 	}
1343 	*size = num;
1344 }
1345 
1346 void __weak boot_save_args(unsigned long a0, unsigned long a1,
1347 			   unsigned long a2, unsigned long a3,
1348 			   unsigned long a4 __maybe_unused)
1349 {
1350 	/*
1351 	 * Register use:
1352 	 *
1353 	 * Scenario A: Default arguments
1354 	 * a0   - CFG_CORE_FFA=y && CFG_CORE_SEL2_SPMC=n:
1355 	 *        if non-NULL holds the TOS FW config [1] address
1356 	 *      - CFG_CORE_FFA=y &&
1357 		  (CFG_CORE_SEL2_SPMC=y || CFG_CORE_EL3_SPMC=y):
1358 	 *        address of FF-A Boot Information Blob
1359 	 *      - CFG_CORE_FFA=n:
1360 	 *        if non-NULL holds the pagable part address
1361 	 * a1	- CFG_WITH_ARM_TRUSTED_FW=n (Armv7):
1362 	 *	  Armv7 standard bootarg #1 (kept track of in entry_a32.S)
1363 	 * a2   - CFG_CORE_SEL2_SPMC=n:
1364 	 *        if non-NULL holds the system DTB address
1365 	 *	- CFG_WITH_ARM_TRUSTED_FW=n (Armv7):
1366 	 *	  Armv7 standard bootarg #2 (system DTB address, kept track
1367 	 *	  of in entry_a32.S)
1368 	 * a3	- Not used
1369 	 * a4	- CFG_WITH_ARM_TRUSTED_FW=n:
1370 	 *	  Non-secure entry address
1371 	 *
1372 	 * [1] A TF-A concept: TOS_FW_CONFIG - Trusted OS Firmware
1373 	 * configuration file. Used by Trusted OS (BL32), that is, OP-TEE
1374 	 * here. This is also called Manifest DT, related to the Manifest DT
1375 	 * passed in the FF-A Boot Information Blob, but with a different
1376 	 * compatible string.
1377 
1378 	 * Scenario B: FW Handoff via Transfer List
1379 	 * Note: FF-A and non-secure entry are not yet supported with
1380 	 *       Transfer List
1381 	 * a0	- DTB address or 0 (AArch64)
1382 	 *	- must be 0 (AArch32)
1383 	 * a1	- 1 << 32 | TRANSFER_LIST_SIGNATURE[0:31] (AArch64)
1384 	 *	- 1 << 24 | TRANSFER_LIST_SIGNATURE[0:23] (AArch32)
1385 	 * a2	- must be 0 (AArch64)
1386 	 *	- DTB address or 0 (AArch32)
1387 	 * a3	- Transfer list base address
1388 	 * a4	- Not used
1389 	 */
1390 
1391 	if (IS_ENABLED(CFG_TRANSFER_LIST)) {
1392 		if (IS_ENABLED(CFG_ARM64_core) &&
1393 		    a1 == TL_HANDOFF_X1_VALUE(TL_REG_CONVENTION_VER)) {
1394 			boot_save_transfer_list(a2, a3, a0);
1395 			boot_arg_fdt = a0;
1396 		} else if (IS_ENABLED(CFG_ARM32_core) &&
1397 			   a1 == TL_HANDOFF_R1_VALUE(TL_REG_CONVENTION_VER)) {
1398 			boot_save_transfer_list(a0, a3, a2);
1399 			boot_arg_fdt = a2;
1400 		}
1401 
1402 		return;
1403 	}
1404 
1405 	if (!IS_ENABLED(CFG_CORE_SEL2_SPMC)) {
1406 #if defined(CFG_DT_ADDR)
1407 		boot_arg_fdt = CFG_DT_ADDR;
1408 #else
1409 		boot_arg_fdt = a2;
1410 #endif
1411 	}
1412 
1413 	if (IS_ENABLED(CFG_CORE_FFA)) {
1414 		if (IS_ENABLED(CFG_CORE_SEL2_SPMC) ||
1415 		    IS_ENABLED(CFG_CORE_EL3_SPMC))
1416 			init_manifest_dt(get_fdt_from_boot_info((void *)a0));
1417 		else
1418 			init_manifest_dt((void *)a0);
1419 		if (IS_ENABLED(CFG_CORE_SEL2_SPMC) &&
1420 		    IS_ENABLED(CFG_CORE_PHYS_RELOCATABLE)) {
1421 			paddr_t base = 0;
1422 			size_t size = 0;
1423 
1424 			get_sec_mem_from_manifest(get_manifest_dt(),
1425 						  &base, &size);
1426 			core_mmu_set_secure_memory(base, size);
1427 		}
1428 	} else {
1429 		if (IS_ENABLED(CFG_WITH_PAGER)) {
1430 #if defined(CFG_PAGEABLE_ADDR)
1431 			boot_arg_pageable_part = CFG_PAGEABLE_ADDR;
1432 #else
1433 			boot_arg_pageable_part = a0;
1434 #endif
1435 		}
1436 		if (!IS_ENABLED(CFG_WITH_ARM_TRUSTED_FW)) {
1437 #if defined(CFG_NS_ENTRY_ADDR)
1438 			boot_arg_nsec_entry = CFG_NS_ENTRY_ADDR;
1439 #else
1440 			boot_arg_nsec_entry = a4;
1441 #endif
1442 		}
1443 	}
1444 }
1445 
1446 #if defined(CFG_TRANSFER_LIST)
1447 static TEE_Result release_transfer_list(void)
1448 {
1449 	struct dt_descriptor *dt = get_external_dt_desc();
1450 
1451 	if (!mapped_tl)
1452 		return TEE_SUCCESS;
1453 
1454 	if (dt) {
1455 		int ret = 0;
1456 		struct transfer_list_entry *tl_e = NULL;
1457 
1458 		/*
1459 		 * Pack the DTB and update the transfer list before un-mapping
1460 		 */
1461 		ret = fdt_pack(dt->blob);
1462 		if (ret < 0) {
1463 			EMSG("Failed to pack Device Tree at 0x%" PRIxPA
1464 			     ": error %d", virt_to_phys(dt->blob), ret);
1465 			panic();
1466 		}
1467 
1468 		tl_e = transfer_list_find(mapped_tl, TL_TAG_FDT);
1469 		assert(dt->blob == transfer_list_entry_data(tl_e));
1470 		transfer_list_set_data_size(mapped_tl, tl_e,
1471 					    fdt_totalsize(dt->blob));
1472 		dt->blob = NULL;
1473 	}
1474 
1475 	transfer_list_unmap_sync(mapped_tl);
1476 	mapped_tl = NULL;
1477 
1478 	return TEE_SUCCESS;
1479 }
1480 
1481 boot_final(release_transfer_list);
1482 #endif
1483