xref: /optee_os/core/arch/arm/kernel/boot.c (revision e9e263e8b5f95bf7a69ae36177fbe3c7043ddab0)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2015-2023, Linaro Limited
4  * Copyright (c) 2023, Arm Limited
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <compiler.h>
10 #include <config.h>
11 #include <console.h>
12 #include <crypto/crypto.h>
13 #include <drivers/gic.h>
14 #include <dt-bindings/interrupt-controller/arm-gic.h>
15 #include <ffa.h>
16 #include <initcall.h>
17 #include <inttypes.h>
18 #include <io.h>
19 #include <keep.h>
20 #include <kernel/asan.h>
21 #include <kernel/boot.h>
22 #include <kernel/dt.h>
23 #include <kernel/linker.h>
24 #include <kernel/misc.h>
25 #include <kernel/panic.h>
26 #include <kernel/tee_misc.h>
27 #include <kernel/thread.h>
28 #include <kernel/tpm.h>
29 #include <kernel/transfer_list.h>
30 #include <libfdt.h>
31 #include <malloc.h>
32 #include <memtag.h>
33 #include <mm/core_memprot.h>
34 #include <mm/core_mmu.h>
35 #include <mm/fobj.h>
36 #include <mm/page_alloc.h>
37 #include <mm/phys_mem.h>
38 #include <mm/tee_mm.h>
39 #include <mm/tee_pager.h>
40 #include <sm/psci.h>
41 #include <stdalign.h>
42 #include <trace.h>
43 #include <utee_defines.h>
44 #include <util.h>
45 
46 #include <platform_config.h>
47 
48 #if !defined(CFG_WITH_ARM_TRUSTED_FW)
49 #include <sm/sm.h>
50 #endif
51 
52 #if defined(CFG_WITH_VFP)
53 #include <kernel/vfp.h>
54 #endif
55 
56 /*
57  * In this file we're using unsigned long to represent physical pointers as
58  * they are received in a single register when OP-TEE is initially entered.
59  * This limits 32-bit systems to only use make use of the lower 32 bits
60  * of a physical address for initial parameters.
61  *
62  * 64-bit systems on the other hand can use full 64-bit physical pointers.
63  */
64 #define PADDR_INVALID		ULONG_MAX
65 
66 #if defined(CFG_BOOT_SECONDARY_REQUEST)
67 struct ns_entry_context {
68 	uintptr_t entry_point;
69 	uintptr_t context_id;
70 };
71 struct ns_entry_context ns_entry_contexts[CFG_TEE_CORE_NB_CORE];
72 static uint32_t spin_table[CFG_TEE_CORE_NB_CORE];
73 #endif
74 
75 #ifdef CFG_BOOT_SYNC_CPU
76 /*
77  * Array used when booting, to synchronize cpu.
78  * When 0, the cpu has not started.
79  * When 1, it has started
80  */
81 uint32_t sem_cpu_sync[CFG_TEE_CORE_NB_CORE];
82 DECLARE_KEEP_PAGER(sem_cpu_sync);
83 #endif
84 
85 /*
86  * Must not be in .bss since it's initialized and used from assembly before
87  * .bss is cleared.
88  */
89 vaddr_t boot_cached_mem_end __nex_data = 1;
90 
91 static unsigned long boot_arg_fdt __nex_bss;
92 unsigned long boot_arg_nsec_entry __nex_bss;
93 static unsigned long boot_arg_pageable_part __nex_bss;
94 static unsigned long boot_arg_transfer_list __nex_bss;
95 static struct transfer_list_header *mapped_tl __nex_bss;
96 
97 #ifdef CFG_SECONDARY_INIT_CNTFRQ
98 static uint32_t cntfrq;
99 #endif
100 
101 /* May be overridden in plat-$(PLATFORM)/main.c */
102 __weak void plat_primary_init_early(void)
103 {
104 }
105 DECLARE_KEEP_PAGER(plat_primary_init_early);
106 
107 /* May be overridden in plat-$(PLATFORM)/main.c */
108 __weak void boot_primary_init_intc(void)
109 {
110 }
111 
112 /* May be overridden in plat-$(PLATFORM)/main.c */
113 __weak void boot_secondary_init_intc(void)
114 {
115 }
116 
117 /* May be overridden in plat-$(PLATFORM)/main.c */
118 __weak unsigned long plat_get_aslr_seed(void)
119 {
120 	DMSG("Warning: no ASLR seed");
121 
122 	return 0;
123 }
124 
125 /*
126  * This function is called as a guard after each smc call which is not
127  * supposed to return.
128  */
129 void __panic_at_smc_return(void)
130 {
131 	panic();
132 }
133 
134 #if defined(CFG_WITH_ARM_TRUSTED_FW)
135 void init_sec_mon(unsigned long nsec_entry __maybe_unused)
136 {
137 	assert(nsec_entry == PADDR_INVALID);
138 	/* Do nothing as we don't have a secure monitor */
139 }
140 #else
141 /* May be overridden in plat-$(PLATFORM)/main.c */
142 __weak void init_sec_mon(unsigned long nsec_entry)
143 {
144 	struct sm_nsec_ctx *nsec_ctx;
145 
146 	assert(nsec_entry != PADDR_INVALID);
147 
148 	/* Initialize secure monitor */
149 	nsec_ctx = sm_get_nsec_ctx();
150 	nsec_ctx->mon_lr = nsec_entry;
151 	nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I;
152 	if (nsec_entry & 1)
153 		nsec_ctx->mon_spsr |= CPSR_T;
154 }
155 #endif
156 
157 #if defined(CFG_WITH_ARM_TRUSTED_FW)
158 static void init_vfp_nsec(void)
159 {
160 }
161 #else
162 static void init_vfp_nsec(void)
163 {
164 	/* Normal world can use CP10 and CP11 (SIMD/VFP) */
165 	write_nsacr(read_nsacr() | NSACR_CP10 | NSACR_CP11);
166 }
167 #endif
168 
169 static void check_crypto_extensions(void)
170 {
171 	bool ce_supported = true;
172 
173 	if (!feat_aes_implemented() &&
174 	    IS_ENABLED(CFG_CRYPTO_AES_ARM_CE)) {
175 		EMSG("AES instructions are not supported");
176 		ce_supported = false;
177 	}
178 
179 	if (!feat_sha1_implemented() &&
180 	    IS_ENABLED(CFG_CRYPTO_SHA1_ARM_CE)) {
181 		EMSG("SHA1 instructions are not supported");
182 		ce_supported = false;
183 	}
184 
185 	if (!feat_sha256_implemented() &&
186 	    IS_ENABLED(CFG_CRYPTO_SHA256_ARM_CE)) {
187 		EMSG("SHA256 instructions are not supported");
188 		ce_supported = false;
189 	}
190 
191 	/* Check aarch64 specific instructions */
192 	if (IS_ENABLED(CFG_ARM64_core)) {
193 		if (!feat_sha512_implemented() &&
194 		    IS_ENABLED(CFG_CRYPTO_SHA512_ARM_CE)) {
195 			EMSG("SHA512 instructions are not supported");
196 			ce_supported = false;
197 		}
198 
199 		if (!feat_sha3_implemented() &&
200 		    IS_ENABLED(CFG_CRYPTO_SHA3_ARM_CE)) {
201 			EMSG("SHA3 instructions are not supported");
202 			ce_supported = false;
203 		}
204 
205 		if (!feat_sm3_implemented() &&
206 		    IS_ENABLED(CFG_CRYPTO_SM3_ARM_CE)) {
207 			EMSG("SM3 instructions are not supported");
208 			ce_supported = false;
209 		}
210 
211 		if (!feat_sm4_implemented() &&
212 		    IS_ENABLED(CFG_CRYPTO_SM4_ARM_CE)) {
213 			EMSG("SM4 instructions are not supported");
214 			ce_supported = false;
215 		}
216 	}
217 
218 	if (!ce_supported)
219 		panic("HW doesn't support CE instructions");
220 }
221 
222 #if defined(CFG_WITH_VFP)
223 
224 #ifdef ARM32
225 static void init_vfp_sec(void)
226 {
227 	uint32_t cpacr = read_cpacr();
228 
229 	/*
230 	 * Enable Advanced SIMD functionality.
231 	 * Enable use of D16-D31 of the Floating-point Extension register
232 	 * file.
233 	 */
234 	cpacr &= ~(CPACR_ASEDIS | CPACR_D32DIS);
235 	/*
236 	 * Enable usage of CP10 and CP11 (SIMD/VFP) (both kernel and user
237 	 * mode.
238 	 */
239 	cpacr |= CPACR_CP(10, CPACR_CP_ACCESS_FULL);
240 	cpacr |= CPACR_CP(11, CPACR_CP_ACCESS_FULL);
241 	write_cpacr(cpacr);
242 }
243 #endif /* ARM32 */
244 
245 #ifdef ARM64
246 static void init_vfp_sec(void)
247 {
248 	/* Not using VFP until thread_kernel_enable_vfp() */
249 	vfp_disable();
250 }
251 #endif /* ARM64 */
252 
253 #else /* CFG_WITH_VFP */
254 
255 static void init_vfp_sec(void)
256 {
257 	/* Not using VFP */
258 }
259 #endif
260 
261 #ifdef CFG_SECONDARY_INIT_CNTFRQ
262 static void primary_save_cntfrq(void)
263 {
264 	assert(cntfrq == 0);
265 
266 	/*
267 	 * CNTFRQ should be initialized on the primary CPU by a
268 	 * previous boot stage
269 	 */
270 	cntfrq = read_cntfrq();
271 }
272 
273 static void secondary_init_cntfrq(void)
274 {
275 	assert(cntfrq != 0);
276 	write_cntfrq(cntfrq);
277 }
278 #else /* CFG_SECONDARY_INIT_CNTFRQ */
279 static void primary_save_cntfrq(void)
280 {
281 }
282 
283 static void secondary_init_cntfrq(void)
284 {
285 }
286 #endif
287 
288 #ifdef CFG_CORE_SANITIZE_KADDRESS
289 static void init_run_constructors(void)
290 {
291 	const vaddr_t *ctor;
292 
293 	for (ctor = &__ctor_list; ctor < &__ctor_end; ctor++)
294 		((void (*)(void))(*ctor))();
295 }
296 
297 static void init_asan(void)
298 {
299 
300 	/*
301 	 * CFG_ASAN_SHADOW_OFFSET is also supplied as
302 	 * -fasan-shadow-offset=$(CFG_ASAN_SHADOW_OFFSET) to the compiler.
303 	 * Since all the needed values to calculate the value of
304 	 * CFG_ASAN_SHADOW_OFFSET isn't available in to make we need to
305 	 * calculate it in advance and hard code it into the platform
306 	 * conf.mk. Here where we have all the needed values we double
307 	 * check that the compiler is supplied the correct value.
308 	 */
309 
310 #define __ASAN_SHADOW_START \
311 	ROUNDUP(TEE_RAM_START + (TEE_RAM_VA_SIZE * 8) / 9 - 8, 8)
312 	assert(__ASAN_SHADOW_START == (vaddr_t)&__asan_shadow_start);
313 #define __CFG_ASAN_SHADOW_OFFSET \
314 	(__ASAN_SHADOW_START - (TEE_RAM_START / 8))
315 	COMPILE_TIME_ASSERT(CFG_ASAN_SHADOW_OFFSET == __CFG_ASAN_SHADOW_OFFSET);
316 #undef __ASAN_SHADOW_START
317 #undef __CFG_ASAN_SHADOW_OFFSET
318 
319 	/*
320 	 * Assign area covered by the shadow area, everything from start up
321 	 * to the beginning of the shadow area.
322 	 */
323 	asan_set_shadowed((void *)TEE_LOAD_ADDR, &__asan_shadow_start);
324 
325 	/*
326 	 * Add access to areas that aren't opened automatically by a
327 	 * constructor.
328 	 */
329 	boot_mem_init_asan();
330 	asan_tag_access(&__ctor_list, &__ctor_end);
331 	asan_tag_access(__rodata_start, __rodata_end);
332 #ifdef CFG_WITH_PAGER
333 	asan_tag_access(__pageable_start, __pageable_end);
334 #endif /*CFG_WITH_PAGER*/
335 	asan_tag_access(__nozi_start, __nozi_end);
336 #ifdef ARM32
337 	asan_tag_access(__exidx_start, __exidx_end);
338 	asan_tag_access(__extab_start, __extab_end);
339 #endif
340 
341 	init_run_constructors();
342 
343 	/* Everything is tagged correctly, let's start address sanitizing. */
344 	asan_start();
345 }
346 #else /*CFG_CORE_SANITIZE_KADDRESS*/
347 static void init_asan(void)
348 {
349 }
350 #endif /*CFG_CORE_SANITIZE_KADDRESS*/
351 
352 #if defined(CFG_MEMTAG)
353 /* Called from entry_a64.S only when MEMTAG is configured */
354 void boot_init_memtag(void)
355 {
356 	memtag_init_ops(feat_mte_implemented());
357 }
358 
359 static TEE_Result mmap_clear_memtag(struct tee_mmap_region *map,
360 				    void *ptr __unused)
361 {
362 	switch (map->type) {
363 	case MEM_AREA_NEX_RAM_RO:
364 	case MEM_AREA_SEC_RAM_OVERALL:
365 		DMSG("Clearing tags for VA %#"PRIxVA"..%#"PRIxVA,
366 		     map->va, map->va + map->size - 1);
367 		memtag_set_tags((void *)map->va, map->size, 0);
368 		break;
369 	default:
370 		break;
371 	}
372 
373 	return TEE_SUCCESS;
374 }
375 
376 /* Called from entry_a64.S only when MEMTAG is configured */
377 void boot_clear_memtag(void)
378 {
379 	core_mmu_for_each_map(NULL, mmap_clear_memtag);
380 }
381 #endif
382 
383 #ifdef CFG_WITH_PAGER
384 
385 #ifdef CFG_CORE_SANITIZE_KADDRESS
386 static void carve_out_asan_mem(void)
387 {
388 	nex_phys_mem_partial_carve_out(ASAN_MAP_PA, ASAN_MAP_SZ);
389 }
390 #else
391 static void carve_out_asan_mem(void)
392 {
393 }
394 #endif
395 
396 static void print_pager_pool_size(void)
397 {
398 	struct tee_pager_stats __maybe_unused stats;
399 
400 	tee_pager_get_stats(&stats);
401 	IMSG("Pager pool size: %zukB",
402 		stats.npages_all * SMALL_PAGE_SIZE / 1024);
403 }
404 
405 static void init_virt_pool(tee_mm_pool_t *virt_pool)
406 {
407 	const vaddr_t begin = VCORE_START_VA;
408 	size_t size = TEE_RAM_VA_SIZE;
409 
410 #ifdef CFG_CORE_SANITIZE_KADDRESS
411 	/* Carve out asan memory, flat maped after core memory */
412 	if (begin + size > ASAN_SHADOW_PA)
413 		size = ASAN_MAP_PA - begin;
414 #endif
415 
416 	if (!tee_mm_init(virt_pool, begin, size, SMALL_PAGE_SHIFT,
417 			 TEE_MM_POOL_NO_FLAGS))
418 		panic("core_virt_mem_pool init failed");
419 }
420 
421 /*
422  * With CFG_CORE_ASLR=y the init part is relocated very early during boot.
423  * The init part is also paged just as the rest of the normal paged code, with
424  * the difference that it's preloaded during boot. When the backing store
425  * is configured the entire paged binary is copied in place and then also
426  * the init part. Since the init part has been relocated (references to
427  * addresses updated to compensate for the new load address) this has to be
428  * undone for the hashes of those pages to match with the original binary.
429  *
430  * If CFG_CORE_ASLR=n, nothing needs to be done as the code/ro pages are
431  * unchanged.
432  */
433 static void undo_init_relocation(uint8_t *paged_store __maybe_unused)
434 {
435 #ifdef CFG_CORE_ASLR
436 	unsigned long *ptr = NULL;
437 	const uint32_t *reloc = NULL;
438 	const uint32_t *reloc_end = NULL;
439 	unsigned long offs = boot_mmu_config.map_offset;
440 	const struct boot_embdata *embdata = (const void *)__init_end;
441 	vaddr_t addr_end = (vaddr_t)__init_end - offs - TEE_LOAD_ADDR;
442 	vaddr_t addr_start = (vaddr_t)__init_start - offs - TEE_LOAD_ADDR;
443 
444 	reloc = (const void *)((vaddr_t)embdata + embdata->reloc_offset);
445 	reloc_end = reloc + embdata->reloc_len / sizeof(*reloc);
446 
447 	for (; reloc < reloc_end; reloc++) {
448 		if (*reloc < addr_start)
449 			continue;
450 		if (*reloc >= addr_end)
451 			break;
452 		ptr = (void *)(paged_store + *reloc - addr_start);
453 		*ptr -= offs;
454 	}
455 #endif
456 }
457 
458 static struct fobj *ro_paged_alloc(tee_mm_entry_t *mm, void *hashes,
459 				   void *store)
460 {
461 	const unsigned int num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE;
462 #ifdef CFG_CORE_ASLR
463 	unsigned int reloc_offs = (vaddr_t)__pageable_start - VCORE_START_VA;
464 	const struct boot_embdata *embdata = (const void *)__init_end;
465 	const void *reloc = __init_end + embdata->reloc_offset;
466 
467 	return fobj_ro_reloc_paged_alloc(num_pages, hashes, reloc_offs,
468 					 reloc, embdata->reloc_len, store);
469 #else
470 	return fobj_ro_paged_alloc(num_pages, hashes, store);
471 #endif
472 }
473 
474 static void init_pager_runtime(unsigned long pageable_part)
475 {
476 	size_t n;
477 	size_t init_size = (size_t)(__init_end - __init_start);
478 	size_t pageable_start = (size_t)__pageable_start;
479 	size_t pageable_end = (size_t)__pageable_end;
480 	size_t pageable_size = pageable_end - pageable_start;
481 	vaddr_t tzsram_end = TZSRAM_BASE + TZSRAM_SIZE - TEE_LOAD_ADDR +
482 			     VCORE_START_VA;
483 	size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) *
484 			   TEE_SHA256_HASH_SIZE;
485 	const struct boot_embdata *embdata = (const void *)__init_end;
486 	const void *tmp_hashes = NULL;
487 	tee_mm_entry_t *mm = NULL;
488 	struct fobj *fobj = NULL;
489 	uint8_t *paged_store = NULL;
490 	uint8_t *hashes = NULL;
491 
492 	assert(pageable_size % SMALL_PAGE_SIZE == 0);
493 	assert(embdata->total_len >= embdata->hashes_offset +
494 				     embdata->hashes_len);
495 	assert(hash_size == embdata->hashes_len);
496 
497 	tmp_hashes = __init_end + embdata->hashes_offset;
498 
499 	/*
500 	 * This needs to be initialized early to support address lookup
501 	 * in MEM_AREA_TEE_RAM
502 	 */
503 	tee_pager_early_init();
504 
505 	hashes = malloc(hash_size);
506 	IMSG_RAW("\n");
507 	IMSG("Pager is enabled. Hashes: %zu bytes", hash_size);
508 	assert(hashes);
509 	asan_memcpy_unchecked(hashes, tmp_hashes, hash_size);
510 
511 	/*
512 	 * The pager is about the be enabled below, eventual temporary boot
513 	 * memory allocation must be removed now.
514 	 */
515 	boot_mem_release_tmp_alloc();
516 
517 	carve_out_asan_mem();
518 
519 	mm = nex_phys_mem_ta_alloc(pageable_size);
520 	assert(mm);
521 	paged_store = phys_to_virt(tee_mm_get_smem(mm),
522 				   MEM_AREA_SEC_RAM_OVERALL, pageable_size);
523 	/*
524 	 * Load pageable part in the dedicated allocated area:
525 	 * - Move pageable non-init part into pageable area. Note bootloader
526 	 *   may have loaded it anywhere in TA RAM hence use memmove().
527 	 * - Copy pageable init part from current location into pageable area.
528 	 */
529 	memmove(paged_store + init_size,
530 		phys_to_virt(pageable_part,
531 			     core_mmu_get_type_by_pa(pageable_part),
532 			     __pageable_part_end - __pageable_part_start),
533 		__pageable_part_end - __pageable_part_start);
534 	asan_memcpy_unchecked(paged_store, __init_start, init_size);
535 	/*
536 	 * Undo eventual relocation for the init part so the hash checks
537 	 * can pass.
538 	 */
539 	undo_init_relocation(paged_store);
540 
541 	/* Check that hashes of what's in pageable area is OK */
542 	DMSG("Checking hashes of pageable area");
543 	for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) {
544 		const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE;
545 		const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE;
546 		TEE_Result res;
547 
548 		DMSG("hash pg_idx %zu hash %p page %p", n, hash, page);
549 		res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE);
550 		if (res != TEE_SUCCESS) {
551 			EMSG("Hash failed for page %zu at %p: res 0x%x",
552 			     n, (void *)page, res);
553 			panic();
554 		}
555 	}
556 
557 	/*
558 	 * Assert prepaged init sections are page aligned so that nothing
559 	 * trails uninited at the end of the premapped init area.
560 	 */
561 	assert(!(init_size & SMALL_PAGE_MASK));
562 
563 	/*
564 	 * Initialize the virtual memory pool used for main_mmu_l2_ttb which
565 	 * is supplied to tee_pager_init() below.
566 	 */
567 	init_virt_pool(&core_virt_mem_pool);
568 
569 	/*
570 	 * Assign alias area for pager end of the small page block the rest
571 	 * of the binary is loaded into. We're taking more than needed, but
572 	 * we're guaranteed to not need more than the physical amount of
573 	 * TZSRAM.
574 	 */
575 	mm = tee_mm_alloc2(&core_virt_mem_pool,
576 			   (vaddr_t)core_virt_mem_pool.lo +
577 			   core_virt_mem_pool.size - TZSRAM_SIZE,
578 			   TZSRAM_SIZE);
579 	assert(mm);
580 	tee_pager_set_alias_area(mm);
581 
582 	/*
583 	 * Claim virtual memory which isn't paged.
584 	 * Linear memory (flat map core memory) ends there.
585 	 */
586 	mm = tee_mm_alloc2(&core_virt_mem_pool, VCORE_UNPG_RX_PA,
587 			   (vaddr_t)(__pageable_start - VCORE_UNPG_RX_PA));
588 	assert(mm);
589 
590 	/*
591 	 * Allocate virtual memory for the pageable area and let the pager
592 	 * take charge of all the pages already assigned to that memory.
593 	 */
594 	mm = tee_mm_alloc2(&core_virt_mem_pool, (vaddr_t)__pageable_start,
595 			   pageable_size);
596 	assert(mm);
597 	fobj = ro_paged_alloc(mm, hashes, paged_store);
598 	assert(fobj);
599 	tee_pager_add_core_region(tee_mm_get_smem(mm), PAGED_REGION_TYPE_RO,
600 				  fobj);
601 	fobj_put(fobj);
602 
603 	tee_pager_add_pages(pageable_start, init_size / SMALL_PAGE_SIZE, false);
604 	tee_pager_add_pages(pageable_start + init_size,
605 			    (pageable_size - init_size) / SMALL_PAGE_SIZE,
606 			    true);
607 	if (pageable_end < tzsram_end)
608 		tee_pager_add_pages(pageable_end, (tzsram_end - pageable_end) /
609 						   SMALL_PAGE_SIZE, true);
610 
611 	/*
612 	 * There may be physical pages in TZSRAM before the core load address.
613 	 * These pages can be added to the physical pages pool of the pager.
614 	 * This setup may happen when a the secure bootloader runs in TZRAM
615 	 * and its memory can be reused by OP-TEE once boot stages complete.
616 	 */
617 	tee_pager_add_pages(core_virt_mem_pool.lo,
618 			    (VCORE_UNPG_RX_PA - core_virt_mem_pool.lo) /
619 				SMALL_PAGE_SIZE,
620 			    true);
621 
622 	print_pager_pool_size();
623 }
624 #else /*!CFG_WITH_PAGER*/
625 static void init_pager_runtime(unsigned long pageable_part __unused)
626 {
627 }
628 #endif
629 
630 #if defined(CFG_DT)
631 static int add_optee_dt_node(struct dt_descriptor *dt)
632 {
633 	int offs;
634 	int ret;
635 
636 	if (fdt_path_offset(dt->blob, "/firmware/optee") >= 0) {
637 		DMSG("OP-TEE Device Tree node already exists!");
638 		return 0;
639 	}
640 
641 	offs = fdt_path_offset(dt->blob, "/firmware");
642 	if (offs < 0) {
643 		offs = add_dt_path_subnode(dt, "/", "firmware");
644 		if (offs < 0)
645 			return -1;
646 	}
647 
648 	offs = fdt_add_subnode(dt->blob, offs, "optee");
649 	if (offs < 0)
650 		return -1;
651 
652 	ret = fdt_setprop_string(dt->blob, offs, "compatible",
653 				 "linaro,optee-tz");
654 	if (ret < 0)
655 		return -1;
656 	ret = fdt_setprop_string(dt->blob, offs, "method", "smc");
657 	if (ret < 0)
658 		return -1;
659 
660 	if (CFG_CORE_ASYNC_NOTIF_GIC_INTID) {
661 		/*
662 		 * The format of the interrupt property is defined by the
663 		 * binding of the interrupt domain root. In this case it's
664 		 * one Arm GIC v1, v2 or v3 so we must be compatible with
665 		 * these.
666 		 *
667 		 * An SPI type of interrupt is indicated with a 0 in the
668 		 * first cell. A PPI type is indicated with value 1.
669 		 *
670 		 * The interrupt number goes in the second cell where
671 		 * SPIs ranges from 0 to 987 and PPI ranges from 0 to 15.
672 		 *
673 		 * Flags are passed in the third cells.
674 		 */
675 		uint32_t itr_trigger = 0;
676 		uint32_t itr_type = 0;
677 		uint32_t itr_id = 0;
678 		uint32_t val[3] = { };
679 
680 		/* PPI are visible only in current CPU cluster */
681 		static_assert(IS_ENABLED(CFG_CORE_FFA) ||
682 			      !CFG_CORE_ASYNC_NOTIF_GIC_INTID ||
683 			      (CFG_CORE_ASYNC_NOTIF_GIC_INTID >=
684 			       GIC_SPI_BASE) ||
685 			      ((CFG_TEE_CORE_NB_CORE <= 8) &&
686 			       (CFG_CORE_ASYNC_NOTIF_GIC_INTID >=
687 				GIC_PPI_BASE)));
688 
689 		if (CFG_CORE_ASYNC_NOTIF_GIC_INTID >= GIC_SPI_BASE) {
690 			itr_type = GIC_SPI;
691 			itr_id = CFG_CORE_ASYNC_NOTIF_GIC_INTID - GIC_SPI_BASE;
692 			itr_trigger = IRQ_TYPE_EDGE_RISING;
693 		} else {
694 			itr_type = GIC_PPI;
695 			itr_id = CFG_CORE_ASYNC_NOTIF_GIC_INTID - GIC_PPI_BASE;
696 			itr_trigger = IRQ_TYPE_EDGE_RISING |
697 				      GIC_CPU_MASK_SIMPLE(CFG_TEE_CORE_NB_CORE);
698 		}
699 
700 		val[0] = TEE_U32_TO_BIG_ENDIAN(itr_type);
701 		val[1] = TEE_U32_TO_BIG_ENDIAN(itr_id);
702 		val[2] = TEE_U32_TO_BIG_ENDIAN(itr_trigger);
703 
704 		ret = fdt_setprop(dt->blob, offs, "interrupts", val,
705 				  sizeof(val));
706 		if (ret < 0)
707 			return -1;
708 	}
709 	return 0;
710 }
711 
712 #ifdef CFG_PSCI_ARM32
713 static int append_psci_compatible(void *fdt, int offs, const char *str)
714 {
715 	return fdt_appendprop(fdt, offs, "compatible", str, strlen(str) + 1);
716 }
717 
718 static int dt_add_psci_node(struct dt_descriptor *dt)
719 {
720 	int offs;
721 
722 	if (fdt_path_offset(dt->blob, "/psci") >= 0) {
723 		DMSG("PSCI Device Tree node already exists!");
724 		return 0;
725 	}
726 
727 	offs = add_dt_path_subnode(dt, "/", "psci");
728 	if (offs < 0)
729 		return -1;
730 	if (append_psci_compatible(dt->blob, offs, "arm,psci-1.0"))
731 		return -1;
732 	if (append_psci_compatible(dt->blob, offs, "arm,psci-0.2"))
733 		return -1;
734 	if (append_psci_compatible(dt->blob, offs, "arm,psci"))
735 		return -1;
736 	if (fdt_setprop_string(dt->blob, offs, "method", "smc"))
737 		return -1;
738 	if (fdt_setprop_u32(dt->blob, offs, "cpu_suspend", PSCI_CPU_SUSPEND))
739 		return -1;
740 	if (fdt_setprop_u32(dt->blob, offs, "cpu_off", PSCI_CPU_OFF))
741 		return -1;
742 	if (fdt_setprop_u32(dt->blob, offs, "cpu_on", PSCI_CPU_ON))
743 		return -1;
744 	if (fdt_setprop_u32(dt->blob, offs, "sys_poweroff", PSCI_SYSTEM_OFF))
745 		return -1;
746 	if (fdt_setprop_u32(dt->blob, offs, "sys_reset", PSCI_SYSTEM_RESET))
747 		return -1;
748 	return 0;
749 }
750 
751 static int check_node_compat_prefix(struct dt_descriptor *dt, int offs,
752 				    const char *prefix)
753 {
754 	const size_t prefix_len = strlen(prefix);
755 	size_t l;
756 	int plen;
757 	const char *prop;
758 
759 	prop = fdt_getprop(dt->blob, offs, "compatible", &plen);
760 	if (!prop)
761 		return -1;
762 
763 	while (plen > 0) {
764 		if (memcmp(prop, prefix, prefix_len) == 0)
765 			return 0; /* match */
766 
767 		l = strlen(prop) + 1;
768 		prop += l;
769 		plen -= l;
770 	}
771 
772 	return -1;
773 }
774 
775 static int dt_add_psci_cpu_enable_methods(struct dt_descriptor *dt)
776 {
777 	int offs = 0;
778 
779 	while (1) {
780 		offs = fdt_next_node(dt->blob, offs, NULL);
781 		if (offs < 0)
782 			break;
783 		if (fdt_getprop(dt->blob, offs, "enable-method", NULL))
784 			continue; /* already set */
785 		if (check_node_compat_prefix(dt, offs, "arm,cortex-a"))
786 			continue; /* no compatible */
787 		if (fdt_setprop_string(dt->blob, offs, "enable-method", "psci"))
788 			return -1;
789 		/* Need to restart scanning as offsets may have changed */
790 		offs = 0;
791 	}
792 	return 0;
793 }
794 
795 static int config_psci(struct dt_descriptor *dt)
796 {
797 	if (dt_add_psci_node(dt))
798 		return -1;
799 	return dt_add_psci_cpu_enable_methods(dt);
800 }
801 #else
802 static int config_psci(struct dt_descriptor *dt __unused)
803 {
804 	return 0;
805 }
806 #endif /*CFG_PSCI_ARM32*/
807 
808 static int mark_tzdram_as_reserved(struct dt_descriptor *dt)
809 {
810 	return add_res_mem_dt_node(dt, "optee_core", CFG_TZDRAM_START,
811 				   CFG_TZDRAM_SIZE);
812 }
813 
814 static void update_external_dt(void)
815 {
816 	struct dt_descriptor *dt = get_external_dt_desc();
817 
818 	if (!dt || !dt->blob)
819 		return;
820 
821 	if (!IS_ENABLED(CFG_CORE_FFA) && add_optee_dt_node(dt))
822 		panic("Failed to add OP-TEE Device Tree node");
823 
824 	if (config_psci(dt))
825 		panic("Failed to config PSCI");
826 
827 #ifdef CFG_CORE_RESERVED_SHM
828 	if (mark_static_shm_as_reserved(dt))
829 		panic("Failed to config non-secure memory");
830 #endif
831 
832 	if (mark_tzdram_as_reserved(dt))
833 		panic("Failed to config secure memory");
834 }
835 #else /*CFG_DT*/
836 static void update_external_dt(void)
837 {
838 }
839 #endif /*!CFG_DT*/
840 
841 void init_tee_runtime(void)
842 {
843 	/*
844 	 * With virtualization we call this function when creating the
845 	 * OP-TEE partition instead.
846 	 */
847 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION))
848 		call_preinitcalls();
849 	call_early_initcalls();
850 	call_service_initcalls();
851 
852 	/*
853 	 * These two functions uses crypto_rng_read() to initialize the
854 	 * pauth keys. Once call_initcalls() returns we're guaranteed that
855 	 * crypto_rng_read() is ready to be used.
856 	 */
857 	thread_init_core_local_pauth_keys();
858 	thread_init_thread_pauth_keys();
859 
860 	/*
861 	 * Reinitialize canaries around the stacks with crypto_rng_read().
862 	 *
863 	 * TODO: Updating canaries when CFG_NS_VIRTUALIZATION is enabled will
864 	 * require synchronization between thread_check_canaries() and
865 	 * thread_update_canaries().
866 	 */
867 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION))
868 		thread_update_canaries();
869 }
870 
871 static bool add_padding_to_pool(vaddr_t va, size_t len, void *ptr __unused)
872 {
873 #ifdef CFG_NS_VIRTUALIZATION
874 	nex_malloc_add_pool((void *)va, len);
875 #else
876 	malloc_add_pool((void *)va, len);
877 #endif
878 	return true;
879 }
880 
881 static void init_primary(unsigned long pageable_part)
882 {
883 	vaddr_t va = 0;
884 
885 	/*
886 	 * Mask asynchronous exceptions before switch to the thread vector
887 	 * as the thread handler requires those to be masked while
888 	 * executing with the temporary stack. The thread subsystem also
889 	 * asserts that the foreign interrupts are blocked when using most of
890 	 * its functions.
891 	 */
892 	thread_set_exceptions(THREAD_EXCP_ALL);
893 	primary_save_cntfrq();
894 	init_vfp_sec();
895 
896 	if (IS_ENABLED(CFG_CRYPTO_WITH_CE))
897 		check_crypto_extensions();
898 
899 	init_asan();
900 
901 	/*
902 	 * By default whole OP-TEE uses malloc, so we need to initialize
903 	 * it early. But, when virtualization is enabled, malloc is used
904 	 * only by TEE runtime, so malloc should be initialized later, for
905 	 * every virtual partition separately. Core code uses nex_malloc
906 	 * instead.
907 	 */
908 #ifdef CFG_WITH_PAGER
909 	/* Add heap2 first as heap1 may be too small as initial bget pool */
910 	malloc_add_pool(__heap2_start, __heap2_end - __heap2_start);
911 #endif
912 #ifdef CFG_NS_VIRTUALIZATION
913 	nex_malloc_add_pool(__nex_heap_start, __nex_heap_end -
914 					      __nex_heap_start);
915 #else
916 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
917 #endif
918 	IMSG_RAW("\n");
919 	if (IS_ENABLED(CFG_DYN_CONFIG)) {
920 		size_t sz = sizeof(struct thread_core_local) *
921 			    CFG_TEE_CORE_NB_CORE;
922 		void *p = boot_mem_alloc(sz, alignof(void *) * 2);
923 
924 #ifdef CFG_NS_VIRTUALIZATION
925 		nex_malloc_add_pool(p, sz);
926 #else
927 		malloc_add_pool(p, sz);
928 #endif
929 	}
930 
931 	core_mmu_save_mem_map();
932 	core_mmu_init_phys_mem();
933 	boot_mem_foreach_padding(add_padding_to_pool, NULL);
934 	va = boot_mem_release_unused();
935 	if (!IS_ENABLED(CFG_WITH_PAGER)) {
936 		/*
937 		 * We must update boot_cached_mem_end to reflect the memory
938 		 * just unmapped by boot_mem_release_unused().
939 		 */
940 		assert(va && va <= boot_cached_mem_end);
941 		boot_cached_mem_end = va;
942 	}
943 
944 	if (IS_ENABLED(CFG_DYN_CONFIG)) {
945 		/*
946 		 * This is needed to enable virt_page_alloc() now that
947 		 * boot_mem_alloc() can't be used any longer.
948 		 */
949 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
950 			nex_page_alloc_init();
951 		else
952 			page_alloc_init();
953 	}
954 
955 	if (IS_ENABLED(CFG_WITH_PAGER)) {
956 		/*
957 		 * Pager: init_runtime() calls thread_kernel_enable_vfp()
958 		 * so we must set a current thread right now to avoid a
959 		 * chicken-and-egg problem (thread_init_boot_thread() sets
960 		 * the current thread but needs things set by
961 		 * init_runtime()).
962 		 */
963 		thread_get_core_local()->curr_thread = 0;
964 		init_pager_runtime(pageable_part);
965 	}
966 
967 	/* Initialize canaries around the stacks */
968 	thread_init_canaries();
969 	thread_init_per_cpu();
970 }
971 
972 static bool cpu_nmfi_enabled(void)
973 {
974 #if defined(ARM32)
975 	return read_sctlr() & SCTLR_NMFI;
976 #else
977 	/* Note: ARM64 does not feature non-maskable FIQ support. */
978 	return false;
979 #endif
980 }
981 
982 /*
983  * Note: this function is weak just to make it possible to exclude it from
984  * the unpaged area.
985  */
986 void __weak boot_init_primary_late(unsigned long fdt __unused,
987 				   unsigned long manifest __unused)
988 {
989 	size_t fdt_size = CFG_DTB_MAX_SIZE;
990 
991 	if (IS_ENABLED(CFG_TRANSFER_LIST) && mapped_tl) {
992 		struct transfer_list_entry *tl_e = NULL;
993 
994 		tl_e = transfer_list_find(mapped_tl, TL_TAG_FDT);
995 		if (tl_e) {
996 			/*
997 			 * Expand the data size of the DTB entry to the maximum
998 			 * allocable mapped memory to reserve sufficient space
999 			 * for inserting new nodes, avoid potentially corrupting
1000 			 * next entries.
1001 			 */
1002 			uint32_t dtb_max_sz = mapped_tl->max_size -
1003 					      mapped_tl->size + tl_e->data_size;
1004 
1005 			if (!transfer_list_set_data_size(mapped_tl, tl_e,
1006 							 dtb_max_sz)) {
1007 				EMSG("Failed to extend DTB size to %#"PRIx32,
1008 				     dtb_max_sz);
1009 				panic();
1010 			}
1011 			fdt_size = tl_e->data_size;
1012 		}
1013 	}
1014 
1015 	init_external_dt(boot_arg_fdt, fdt_size);
1016 	reinit_manifest_dt();
1017 #ifdef CFG_CORE_SEL1_SPMC
1018 	tpm_map_log_area(get_manifest_dt());
1019 #else
1020 	tpm_map_log_area(get_external_dt());
1021 #endif
1022 	discover_nsec_memory();
1023 	update_external_dt();
1024 	configure_console_from_dt();
1025 
1026 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1027 		/*
1028 		 * Virtualization: We can't initialize threads right now because
1029 		 * threads belong to "tee" part and will be initialized
1030 		 * separately per each new virtual guest. So, we'll clear
1031 		 * "curr_thread" and call it done.
1032 		 */
1033 		thread_get_core_local()->curr_thread = -1;
1034 	} else {
1035 		thread_init_threads(CFG_NUM_THREADS);
1036 		thread_init_boot_thread();
1037 	}
1038 	thread_init_thread_core_local(CFG_TEE_CORE_NB_CORE);
1039 }
1040 
1041 void __weak boot_init_primary_runtime(void)
1042 {
1043 	thread_init_primary();
1044 	IMSG("OP-TEE version: %s", core_v_str);
1045 	if (IS_ENABLED(CFG_INSECURE)) {
1046 		IMSG("WARNING: This OP-TEE configuration might be insecure!");
1047 		IMSG("WARNING: Please check https://optee.readthedocs.io/en/latest/architecture/porting_guidelines.html");
1048 	}
1049 	IMSG("Primary CPU initializing");
1050 #ifdef CFG_CORE_ASLR
1051 	DMSG("Executing at offset %#lx with virtual load address %#"PRIxVA,
1052 	     (unsigned long)boot_mmu_config.map_offset, VCORE_START_VA);
1053 #endif
1054 #ifdef CFG_NS_VIRTUALIZATION
1055 	DMSG("NS-virtualization enabled, supporting %u guests",
1056 	     CFG_VIRT_GUEST_COUNT);
1057 #endif
1058 	if (IS_ENABLED(CFG_MEMTAG))
1059 		DMSG("Memory tagging %s",
1060 		     memtag_is_enabled() ?  "enabled" : "disabled");
1061 
1062 	/* Check if platform needs NMFI workaround */
1063 	if (cpu_nmfi_enabled())	{
1064 		if (!IS_ENABLED(CFG_CORE_WORKAROUND_ARM_NMFI))
1065 			IMSG("WARNING: This ARM core has NMFI enabled, please apply workaround!");
1066 	} else {
1067 		if (IS_ENABLED(CFG_CORE_WORKAROUND_ARM_NMFI))
1068 			IMSG("WARNING: This ARM core does not have NMFI enabled, no need for workaround");
1069 	}
1070 
1071 	boot_primary_init_intc();
1072 	init_vfp_nsec();
1073 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1074 		/*
1075 		 * Unmask native interrupts during driver initcalls.
1076 		 *
1077 		 * NS-virtualization still uses the temporary stack also
1078 		 * used for exception handling so it must still have native
1079 		 * interrupts masked.
1080 		 */
1081 		thread_set_exceptions(thread_get_exceptions() &
1082 				      ~THREAD_EXCP_NATIVE_INTR);
1083 		init_tee_runtime();
1084 	}
1085 
1086 	if (!IS_ENABLED(CFG_WITH_PAGER))
1087 		boot_mem_release_tmp_alloc();
1088 }
1089 
1090 void __weak boot_init_primary_final(void)
1091 {
1092 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION))
1093 		call_driver_initcalls();
1094 
1095 	call_finalcalls();
1096 
1097 	IMSG("Primary CPU switching to normal world boot");
1098 
1099 	/* Mask native interrupts before switching to the normal world */
1100 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION))
1101 		thread_set_exceptions(thread_get_exceptions() |
1102 				      THREAD_EXCP_NATIVE_INTR);
1103 }
1104 
1105 static void init_secondary_helper(void)
1106 {
1107 	IMSG("Secondary CPU %zu initializing", get_core_pos());
1108 
1109 	/*
1110 	 * Mask asynchronous exceptions before switch to the thread vector
1111 	 * as the thread handler requires those to be masked while
1112 	 * executing with the temporary stack. The thread subsystem also
1113 	 * asserts that the foreign interrupts are blocked when using most of
1114 	 * its functions.
1115 	 */
1116 	thread_set_exceptions(THREAD_EXCP_ALL);
1117 
1118 	secondary_init_cntfrq();
1119 	thread_init_per_cpu();
1120 	boot_secondary_init_intc();
1121 	init_vfp_sec();
1122 	init_vfp_nsec();
1123 
1124 	IMSG("Secondary CPU %zu switching to normal world boot", get_core_pos());
1125 }
1126 
1127 /*
1128  * Note: this function is weak just to make it possible to exclude it from
1129  * the unpaged area so that it lies in the init area.
1130  */
1131 void __weak boot_init_primary_early(void)
1132 {
1133 	unsigned long pageable_part = 0;
1134 	struct transfer_list_entry *tl_e = NULL;
1135 
1136 	if (IS_ENABLED(CFG_TRANSFER_LIST) && boot_arg_transfer_list) {
1137 		/* map and save the TL */
1138 		mapped_tl = transfer_list_map(boot_arg_transfer_list);
1139 		if (!mapped_tl)
1140 			panic("Failed to map transfer list");
1141 
1142 		transfer_list_dump(mapped_tl);
1143 		tl_e = transfer_list_find(mapped_tl, TL_TAG_OPTEE_PAGABLE_PART);
1144 	}
1145 
1146 	if (IS_ENABLED(CFG_WITH_PAGER)) {
1147 		if (IS_ENABLED(CFG_TRANSFER_LIST) && tl_e)
1148 			pageable_part =
1149 				get_le64(transfer_list_entry_data(tl_e));
1150 		else
1151 			pageable_part = boot_arg_pageable_part;
1152 	}
1153 
1154 	init_primary(pageable_part);
1155 }
1156 
1157 static void boot_save_transfer_list(unsigned long zero_reg,
1158 				    unsigned long transfer_list,
1159 				    unsigned long fdt)
1160 {
1161 	struct transfer_list_header *tl = (void *)transfer_list;
1162 	struct transfer_list_entry *tl_e = NULL;
1163 
1164 	if (zero_reg != 0)
1165 		panic("Incorrect transfer list register convention");
1166 
1167 	if (!IS_ALIGNED_WITH_TYPE(transfer_list, struct transfer_list_header) ||
1168 	    !IS_ALIGNED(transfer_list, TL_ALIGNMENT_FROM_ORDER(tl->alignment)))
1169 		panic("Transfer list base address is not aligned");
1170 
1171 	if (transfer_list_check_header(tl) == TL_OPS_NONE)
1172 		panic("Invalid transfer list");
1173 
1174 	tl_e = transfer_list_find(tl, TL_TAG_FDT);
1175 	if (fdt != (unsigned long)transfer_list_entry_data(tl_e))
1176 		panic("DT does not match to the DT entry of the TL");
1177 
1178 	boot_arg_transfer_list = transfer_list;
1179 }
1180 
1181 #if defined(CFG_WITH_ARM_TRUSTED_FW)
1182 unsigned long boot_cpu_on_handler(unsigned long a0 __maybe_unused,
1183 				  unsigned long a1 __unused)
1184 {
1185 	init_secondary_helper();
1186 	return 0;
1187 }
1188 #else
1189 void boot_init_secondary(unsigned long nsec_entry __unused)
1190 {
1191 	init_secondary_helper();
1192 }
1193 #endif
1194 
1195 #if defined(CFG_BOOT_SECONDARY_REQUEST)
1196 void boot_set_core_ns_entry(size_t core_idx, uintptr_t entry,
1197 			    uintptr_t context_id)
1198 {
1199 	ns_entry_contexts[core_idx].entry_point = entry;
1200 	ns_entry_contexts[core_idx].context_id = context_id;
1201 	dsb_ishst();
1202 }
1203 
1204 int boot_core_release(size_t core_idx, paddr_t entry)
1205 {
1206 	if (!core_idx || core_idx >= CFG_TEE_CORE_NB_CORE)
1207 		return -1;
1208 
1209 	ns_entry_contexts[core_idx].entry_point = entry;
1210 	dmb();
1211 	spin_table[core_idx] = 1;
1212 	dsb();
1213 	sev();
1214 
1215 	return 0;
1216 }
1217 
1218 /*
1219  * spin until secondary boot request, then returns with
1220  * the secondary core entry address.
1221  */
1222 struct ns_entry_context *boot_core_hpen(void)
1223 {
1224 #ifdef CFG_PSCI_ARM32
1225 	return &ns_entry_contexts[get_core_pos()];
1226 #else
1227 	do {
1228 		wfe();
1229 	} while (!spin_table[get_core_pos()]);
1230 	dmb();
1231 	return &ns_entry_contexts[get_core_pos()];
1232 #endif
1233 }
1234 #endif
1235 
1236 #if defined(CFG_CORE_ASLR)
1237 #if defined(CFG_DT)
1238 unsigned long __weak get_aslr_seed(void)
1239 {
1240 	void *fdt = NULL;
1241 	int rc = 0;
1242 	const uint64_t *seed = NULL;
1243 	int offs = 0;
1244 	int len = 0;
1245 
1246 	if (!IS_ENABLED(CFG_CORE_SEL2_SPMC))
1247 		fdt = (void *)boot_arg_fdt;
1248 
1249 	if (!fdt) {
1250 		DMSG("No fdt");
1251 		goto err;
1252 	}
1253 
1254 	rc = fdt_check_header(fdt);
1255 	if (rc) {
1256 		DMSG("Bad fdt: %d", rc);
1257 		goto err;
1258 	}
1259 
1260 	offs =  fdt_path_offset(fdt, "/secure-chosen");
1261 	if (offs < 0) {
1262 		DMSG("Cannot find /secure-chosen");
1263 		goto err;
1264 	}
1265 	seed = fdt_getprop(fdt, offs, "kaslr-seed", &len);
1266 	if (!seed || len != sizeof(*seed)) {
1267 		DMSG("Cannot find valid kaslr-seed");
1268 		goto err;
1269 	}
1270 
1271 	return fdt64_to_cpu(fdt64_ld(seed));
1272 
1273 err:
1274 	/* Try platform implementation */
1275 	return plat_get_aslr_seed();
1276 }
1277 #else /*!CFG_DT*/
1278 unsigned long __weak get_aslr_seed(void)
1279 {
1280 	/* Try platform implementation */
1281 	return plat_get_aslr_seed();
1282 }
1283 #endif /*!CFG_DT*/
1284 #endif /*CFG_CORE_ASLR*/
1285 
1286 static void *get_fdt_from_boot_info(struct ffa_boot_info_header_1_1 *hdr)
1287 {
1288 	struct ffa_boot_info_1_1 *desc = NULL;
1289 	uint8_t content_fmt = 0;
1290 	uint8_t name_fmt = 0;
1291 	void *fdt = NULL;
1292 	int ret = 0;
1293 
1294 	if (hdr->signature != FFA_BOOT_INFO_SIGNATURE) {
1295 		EMSG("Bad boot info signature %#"PRIx32, hdr->signature);
1296 		panic();
1297 	}
1298 	if (hdr->version != FFA_BOOT_INFO_VERSION_1_1 &&
1299 	    hdr->version != FFA_BOOT_INFO_VERSION_1_2) {
1300 		EMSG("Bad boot info version %#"PRIx32, hdr->version);
1301 		panic();
1302 	}
1303 	if (hdr->desc_count != 1) {
1304 		EMSG("Bad boot info descriptor count %#"PRIx32,
1305 		     hdr->desc_count);
1306 		panic();
1307 	}
1308 	desc = (void *)((vaddr_t)hdr + hdr->desc_offset);
1309 	name_fmt = desc->flags & FFA_BOOT_INFO_FLAG_NAME_FORMAT_MASK;
1310 	if (name_fmt == FFA_BOOT_INFO_FLAG_NAME_FORMAT_STRING)
1311 		DMSG("Boot info descriptor name \"%16s\"", desc->name);
1312 	else if (name_fmt == FFA_BOOT_INFO_FLAG_NAME_FORMAT_UUID)
1313 		DMSG("Boot info descriptor UUID %pUl", (void *)desc->name);
1314 	else
1315 		DMSG("Boot info descriptor: unknown name format %"PRIu8,
1316 		     name_fmt);
1317 
1318 	content_fmt = (desc->flags & FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_MASK) >>
1319 		      FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_SHIFT;
1320 	if (content_fmt != FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_ADDR) {
1321 		EMSG("Bad boot info content format %"PRIu8", expected %u (address)",
1322 		     content_fmt, FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_ADDR);
1323 		panic();
1324 	}
1325 
1326 	fdt = (void *)(vaddr_t)desc->contents;
1327 	ret = fdt_check_full(fdt, desc->size);
1328 	if (ret < 0) {
1329 		EMSG("Invalid Device Tree at %p: error %d", fdt, ret);
1330 		panic();
1331 	}
1332 	return fdt;
1333 }
1334 
1335 static void get_sec_mem_from_manifest(void *fdt, paddr_t *base,
1336 				      paddr_size_t *size)
1337 {
1338 	int ret = 0;
1339 	uint64_t num = 0;
1340 
1341 	ret = fdt_node_check_compatible(fdt, 0, "arm,ffa-manifest-1.0");
1342 	if (ret < 0) {
1343 		EMSG("Invalid FF-A manifest at %p: error %d", fdt, ret);
1344 		panic();
1345 	}
1346 	ret = dt_getprop_as_number(fdt, 0, "load-address", &num);
1347 	if (ret < 0) {
1348 		EMSG("Can't read \"load-address\" from FF-A manifest at %p: error %d",
1349 		     fdt, ret);
1350 		panic();
1351 	}
1352 	*base = num;
1353 	/* "mem-size" is currently an undocumented extension to the spec. */
1354 	ret = dt_getprop_as_number(fdt, 0, "mem-size", &num);
1355 	if (ret < 0) {
1356 		EMSG("Can't read \"mem-size\" from FF-A manifest at %p: error %d",
1357 		     fdt, ret);
1358 		panic();
1359 	}
1360 	*size = num;
1361 }
1362 
1363 void __weak boot_save_args(unsigned long a0, unsigned long a1,
1364 			   unsigned long a2, unsigned long a3,
1365 			   unsigned long a4 __maybe_unused)
1366 {
1367 	/*
1368 	 * Register use:
1369 	 *
1370 	 * Scenario A: Default arguments
1371 	 * a0   - CFG_CORE_FFA=y && CFG_CORE_SEL2_SPMC=n:
1372 	 *        if non-NULL holds the TOS FW config [1] address
1373 	 *      - CFG_CORE_FFA=y &&
1374 		  (CFG_CORE_SEL2_SPMC=y || CFG_CORE_EL3_SPMC=y):
1375 	 *        address of FF-A Boot Information Blob
1376 	 *      - CFG_CORE_FFA=n:
1377 	 *        if non-NULL holds the pagable part address
1378 	 * a1	- CFG_WITH_ARM_TRUSTED_FW=n (Armv7):
1379 	 *	  Armv7 standard bootarg #1 (kept track of in entry_a32.S)
1380 	 * a2   - CFG_CORE_SEL2_SPMC=n:
1381 	 *        if non-NULL holds the system DTB address
1382 	 *	- CFG_WITH_ARM_TRUSTED_FW=n (Armv7):
1383 	 *	  Armv7 standard bootarg #2 (system DTB address, kept track
1384 	 *	  of in entry_a32.S)
1385 	 * a3	- Not used
1386 	 * a4	- CFG_WITH_ARM_TRUSTED_FW=n:
1387 	 *	  Non-secure entry address
1388 	 *
1389 	 * [1] A TF-A concept: TOS_FW_CONFIG - Trusted OS Firmware
1390 	 * configuration file. Used by Trusted OS (BL32), that is, OP-TEE
1391 	 * here. This is also called Manifest DT, related to the Manifest DT
1392 	 * passed in the FF-A Boot Information Blob, but with a different
1393 	 * compatible string.
1394 
1395 	 * Scenario B: FW Handoff via Transfer List
1396 	 * Note: FF-A and non-secure entry are not yet supported with
1397 	 *       Transfer List
1398 	 * a0	- DTB address or 0 (AArch64)
1399 	 *	- must be 0 (AArch32)
1400 	 * a1	- 1 << 32 | TRANSFER_LIST_SIGNATURE[0:31] (AArch64)
1401 	 *	- 1 << 24 | TRANSFER_LIST_SIGNATURE[0:23] (AArch32)
1402 	 * a2	- must be 0 (AArch64)
1403 	 *	- DTB address or 0 (AArch32)
1404 	 * a3	- Transfer list base address
1405 	 * a4	- Not used
1406 	 */
1407 
1408 	if (IS_ENABLED(CFG_TRANSFER_LIST)) {
1409 		if (IS_ENABLED(CFG_ARM64_core) &&
1410 		    a1 == TL_HANDOFF_X1_VALUE(TL_REG_CONVENTION_VER)) {
1411 			boot_save_transfer_list(a2, a3, a0);
1412 			boot_arg_fdt = a0;
1413 		} else if (IS_ENABLED(CFG_ARM32_core) &&
1414 			   a1 == TL_HANDOFF_R1_VALUE(TL_REG_CONVENTION_VER)) {
1415 			boot_save_transfer_list(a0, a3, a2);
1416 			boot_arg_fdt = a2;
1417 		}
1418 
1419 		return;
1420 	}
1421 
1422 	if (!IS_ENABLED(CFG_CORE_SEL2_SPMC)) {
1423 #if defined(CFG_DT_ADDR)
1424 		boot_arg_fdt = CFG_DT_ADDR;
1425 #else
1426 		boot_arg_fdt = a2;
1427 #endif
1428 	}
1429 
1430 	if (IS_ENABLED(CFG_CORE_FFA)) {
1431 		size_t fdt_max_size = CFG_DTB_MAX_SIZE;
1432 		void *fdt = NULL;
1433 
1434 		if (IS_ENABLED(CFG_CORE_SEL2_SPMC) ||
1435 		    IS_ENABLED(CFG_CORE_EL3_SPMC))
1436 			fdt = get_fdt_from_boot_info((void *)a0);
1437 		else
1438 			fdt = (void *)a0;
1439 		if (IS_ENABLED(CFG_CORE_SEL2_SPMC)) {
1440 			paddr_size_t size = 0;
1441 			paddr_t base = 0;
1442 
1443 			if (IS_ENABLED(CFG_CORE_PHYS_RELOCATABLE)) {
1444 				get_sec_mem_from_manifest(fdt, &base, &size);
1445 				core_mmu_set_secure_memory(base, size);
1446 			} else {
1447 				core_mmu_get_secure_memory(&base, &size);
1448 			}
1449 			assert((unsigned long)fdt >= base);
1450 			assert((unsigned long)fdt <= base + size);
1451 			assert((unsigned long)fdt < VCORE_START_VA);
1452 			fdt_max_size = VCORE_START_VA - (unsigned long)fdt;
1453 		}
1454 		init_manifest_dt(fdt, fdt_max_size);
1455 	} else {
1456 		if (IS_ENABLED(CFG_WITH_PAGER)) {
1457 #if defined(CFG_PAGEABLE_ADDR)
1458 			boot_arg_pageable_part = CFG_PAGEABLE_ADDR;
1459 #else
1460 			boot_arg_pageable_part = a0;
1461 #endif
1462 		}
1463 		if (!IS_ENABLED(CFG_WITH_ARM_TRUSTED_FW)) {
1464 #if defined(CFG_NS_ENTRY_ADDR)
1465 			boot_arg_nsec_entry = CFG_NS_ENTRY_ADDR;
1466 #else
1467 			boot_arg_nsec_entry = a4;
1468 #endif
1469 		}
1470 	}
1471 }
1472 
1473 #if defined(CFG_TRANSFER_LIST)
1474 static TEE_Result release_transfer_list(void)
1475 {
1476 	struct dt_descriptor *dt = get_external_dt_desc();
1477 
1478 	if (!mapped_tl)
1479 		return TEE_SUCCESS;
1480 
1481 	if (dt) {
1482 		int ret = 0;
1483 		struct transfer_list_entry *tl_e = NULL;
1484 
1485 		/*
1486 		 * Pack the DTB and update the transfer list before un-mapping
1487 		 */
1488 		ret = fdt_pack(dt->blob);
1489 		if (ret < 0) {
1490 			EMSG("Failed to pack Device Tree at 0x%" PRIxPA
1491 			     ": error %d", virt_to_phys(dt->blob), ret);
1492 			panic();
1493 		}
1494 
1495 		tl_e = transfer_list_find(mapped_tl, TL_TAG_FDT);
1496 		assert(dt->blob == transfer_list_entry_data(tl_e));
1497 		transfer_list_set_data_size(mapped_tl, tl_e,
1498 					    fdt_totalsize(dt->blob));
1499 		dt->blob = NULL;
1500 	}
1501 
1502 	transfer_list_unmap_sync(mapped_tl);
1503 	mapped_tl = NULL;
1504 
1505 	return TEE_SUCCESS;
1506 }
1507 
1508 boot_final(release_transfer_list);
1509 #endif
1510