xref: /optee_os/core/arch/arm/kernel/boot.c (revision 949b0c0c6256c79b714d188839b67a85ec5a0b3b)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2015-2023, Linaro Limited
4  * Copyright (c) 2023, Arm Limited
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <compiler.h>
10 #include <config.h>
11 #include <console.h>
12 #include <crypto/crypto.h>
13 #include <drivers/gic.h>
14 #include <dt-bindings/interrupt-controller/arm-gic.h>
15 #include <ffa.h>
16 #include <initcall.h>
17 #include <inttypes.h>
18 #include <io.h>
19 #include <keep.h>
20 #include <kernel/asan.h>
21 #include <kernel/boot.h>
22 #include <kernel/dt.h>
23 #include <kernel/linker.h>
24 #include <kernel/misc.h>
25 #include <kernel/panic.h>
26 #include <kernel/tee_misc.h>
27 #include <kernel/thread.h>
28 #include <kernel/tpm.h>
29 #include <kernel/transfer_list.h>
30 #include <libfdt.h>
31 #include <malloc.h>
32 #include <memtag.h>
33 #include <mm/core_memprot.h>
34 #include <mm/core_mmu.h>
35 #include <mm/fobj.h>
36 #include <mm/page_alloc.h>
37 #include <mm/phys_mem.h>
38 #include <mm/tee_mm.h>
39 #include <mm/tee_pager.h>
40 #include <sm/psci.h>
41 #include <trace.h>
42 #include <utee_defines.h>
43 #include <util.h>
44 
45 #include <platform_config.h>
46 
47 #if !defined(CFG_WITH_ARM_TRUSTED_FW)
48 #include <sm/sm.h>
49 #endif
50 
51 #if defined(CFG_WITH_VFP)
52 #include <kernel/vfp.h>
53 #endif
54 
55 /*
56  * In this file we're using unsigned long to represent physical pointers as
57  * they are received in a single register when OP-TEE is initially entered.
58  * This limits 32-bit systems to only use make use of the lower 32 bits
59  * of a physical address for initial parameters.
60  *
61  * 64-bit systems on the other hand can use full 64-bit physical pointers.
62  */
63 #define PADDR_INVALID		ULONG_MAX
64 
65 #if defined(CFG_BOOT_SECONDARY_REQUEST)
66 struct ns_entry_context {
67 	uintptr_t entry_point;
68 	uintptr_t context_id;
69 };
70 struct ns_entry_context ns_entry_contexts[CFG_TEE_CORE_NB_CORE];
71 static uint32_t spin_table[CFG_TEE_CORE_NB_CORE];
72 #endif
73 
74 #ifdef CFG_BOOT_SYNC_CPU
75 /*
76  * Array used when booting, to synchronize cpu.
77  * When 0, the cpu has not started.
78  * When 1, it has started
79  */
80 uint32_t sem_cpu_sync[CFG_TEE_CORE_NB_CORE];
81 DECLARE_KEEP_PAGER(sem_cpu_sync);
82 #endif
83 
84 /*
85  * Must not be in .bss since it's initialized and used from assembly before
86  * .bss is cleared.
87  */
88 vaddr_t boot_cached_mem_end __nex_data = 1;
89 
90 static unsigned long boot_arg_fdt __nex_bss;
91 unsigned long boot_arg_nsec_entry __nex_bss;
92 static unsigned long boot_arg_pageable_part __nex_bss;
93 static unsigned long boot_arg_transfer_list __nex_bss;
94 static struct transfer_list_header *mapped_tl __nex_bss;
95 
96 #ifdef CFG_SECONDARY_INIT_CNTFRQ
97 static uint32_t cntfrq;
98 #endif
99 
100 /* May be overridden in plat-$(PLATFORM)/main.c */
101 __weak void plat_primary_init_early(void)
102 {
103 }
104 DECLARE_KEEP_PAGER(plat_primary_init_early);
105 
106 /* May be overridden in plat-$(PLATFORM)/main.c */
107 __weak void boot_primary_init_intc(void)
108 {
109 }
110 
111 /* May be overridden in plat-$(PLATFORM)/main.c */
112 __weak void boot_secondary_init_intc(void)
113 {
114 }
115 
116 /* May be overridden in plat-$(PLATFORM)/main.c */
117 __weak unsigned long plat_get_aslr_seed(void)
118 {
119 	DMSG("Warning: no ASLR seed");
120 
121 	return 0;
122 }
123 
124 #if defined(_CFG_CORE_STACK_PROTECTOR) || defined(CFG_WITH_STACK_CANARIES)
125 /* Generate random stack canary value on boot up */
126 __weak void plat_get_random_stack_canaries(void *buf, size_t ncan, size_t size)
127 {
128 	TEE_Result ret = TEE_ERROR_GENERIC;
129 	size_t i = 0;
130 
131 	assert(buf && ncan && size);
132 
133 	/*
134 	 * With virtualization the RNG is not initialized in Nexus core.
135 	 * Need to override with platform specific implementation.
136 	 */
137 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
138 		IMSG("WARNING: Using fixed value for stack canary");
139 		memset(buf, 0xab, ncan * size);
140 		goto out;
141 	}
142 
143 	ret = crypto_rng_read(buf, ncan * size);
144 	if (ret != TEE_SUCCESS)
145 		panic("Failed to generate random stack canary");
146 
147 out:
148 	/* Leave null byte in canary to prevent string base exploit */
149 	for (i = 0; i < ncan; i++)
150 		*((uint8_t *)buf + size * i) = 0;
151 }
152 #endif /* _CFG_CORE_STACK_PROTECTOR || CFG_WITH_STACK_CANARIES */
153 
154 /*
155  * This function is called as a guard after each smc call which is not
156  * supposed to return.
157  */
158 void __panic_at_smc_return(void)
159 {
160 	panic();
161 }
162 
163 #if defined(CFG_WITH_ARM_TRUSTED_FW)
164 void init_sec_mon(unsigned long nsec_entry __maybe_unused)
165 {
166 	assert(nsec_entry == PADDR_INVALID);
167 	/* Do nothing as we don't have a secure monitor */
168 }
169 #else
170 /* May be overridden in plat-$(PLATFORM)/main.c */
171 __weak void init_sec_mon(unsigned long nsec_entry)
172 {
173 	struct sm_nsec_ctx *nsec_ctx;
174 
175 	assert(nsec_entry != PADDR_INVALID);
176 
177 	/* Initialize secure monitor */
178 	nsec_ctx = sm_get_nsec_ctx();
179 	nsec_ctx->mon_lr = nsec_entry;
180 	nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I;
181 	if (nsec_entry & 1)
182 		nsec_ctx->mon_spsr |= CPSR_T;
183 }
184 #endif
185 
186 #if defined(CFG_WITH_ARM_TRUSTED_FW)
187 static void init_vfp_nsec(void)
188 {
189 }
190 #else
191 static void init_vfp_nsec(void)
192 {
193 	/* Normal world can use CP10 and CP11 (SIMD/VFP) */
194 	write_nsacr(read_nsacr() | NSACR_CP10 | NSACR_CP11);
195 }
196 #endif
197 
198 static void check_crypto_extensions(void)
199 {
200 	bool ce_supported = true;
201 
202 	if (!feat_aes_implemented() &&
203 	    IS_ENABLED(CFG_CRYPTO_AES_ARM_CE)) {
204 		EMSG("AES instructions are not supported");
205 		ce_supported = false;
206 	}
207 
208 	if (!feat_sha1_implemented() &&
209 	    IS_ENABLED(CFG_CRYPTO_SHA1_ARM_CE)) {
210 		EMSG("SHA1 instructions are not supported");
211 		ce_supported = false;
212 	}
213 
214 	if (!feat_sha256_implemented() &&
215 	    IS_ENABLED(CFG_CRYPTO_SHA256_ARM_CE)) {
216 		EMSG("SHA256 instructions are not supported");
217 		ce_supported = false;
218 	}
219 
220 	/* Check aarch64 specific instructions */
221 	if (IS_ENABLED(CFG_ARM64_core)) {
222 		if (!feat_sha512_implemented() &&
223 		    IS_ENABLED(CFG_CRYPTO_SHA512_ARM_CE)) {
224 			EMSG("SHA512 instructions are not supported");
225 			ce_supported = false;
226 		}
227 
228 		if (!feat_sha3_implemented() &&
229 		    IS_ENABLED(CFG_CRYPTO_SHA3_ARM_CE)) {
230 			EMSG("SHA3 instructions are not supported");
231 			ce_supported = false;
232 		}
233 
234 		if (!feat_sm3_implemented() &&
235 		    IS_ENABLED(CFG_CRYPTO_SM3_ARM_CE)) {
236 			EMSG("SM3 instructions are not supported");
237 			ce_supported = false;
238 		}
239 
240 		if (!feat_sm4_implemented() &&
241 		    IS_ENABLED(CFG_CRYPTO_SM4_ARM_CE)) {
242 			EMSG("SM4 instructions are not supported");
243 			ce_supported = false;
244 		}
245 	}
246 
247 	if (!ce_supported)
248 		panic("HW doesn't support CE instructions");
249 }
250 
251 #if defined(CFG_WITH_VFP)
252 
253 #ifdef ARM32
254 static void init_vfp_sec(void)
255 {
256 	uint32_t cpacr = read_cpacr();
257 
258 	/*
259 	 * Enable Advanced SIMD functionality.
260 	 * Enable use of D16-D31 of the Floating-point Extension register
261 	 * file.
262 	 */
263 	cpacr &= ~(CPACR_ASEDIS | CPACR_D32DIS);
264 	/*
265 	 * Enable usage of CP10 and CP11 (SIMD/VFP) (both kernel and user
266 	 * mode.
267 	 */
268 	cpacr |= CPACR_CP(10, CPACR_CP_ACCESS_FULL);
269 	cpacr |= CPACR_CP(11, CPACR_CP_ACCESS_FULL);
270 	write_cpacr(cpacr);
271 }
272 #endif /* ARM32 */
273 
274 #ifdef ARM64
275 static void init_vfp_sec(void)
276 {
277 	/* Not using VFP until thread_kernel_enable_vfp() */
278 	vfp_disable();
279 }
280 #endif /* ARM64 */
281 
282 #else /* CFG_WITH_VFP */
283 
284 static void init_vfp_sec(void)
285 {
286 	/* Not using VFP */
287 }
288 #endif
289 
290 #ifdef CFG_SECONDARY_INIT_CNTFRQ
291 static void primary_save_cntfrq(void)
292 {
293 	assert(cntfrq == 0);
294 
295 	/*
296 	 * CNTFRQ should be initialized on the primary CPU by a
297 	 * previous boot stage
298 	 */
299 	cntfrq = read_cntfrq();
300 }
301 
302 static void secondary_init_cntfrq(void)
303 {
304 	assert(cntfrq != 0);
305 	write_cntfrq(cntfrq);
306 }
307 #else /* CFG_SECONDARY_INIT_CNTFRQ */
308 static void primary_save_cntfrq(void)
309 {
310 }
311 
312 static void secondary_init_cntfrq(void)
313 {
314 }
315 #endif
316 
317 #ifdef CFG_CORE_SANITIZE_KADDRESS
318 static void init_run_constructors(void)
319 {
320 	const vaddr_t *ctor;
321 
322 	for (ctor = &__ctor_list; ctor < &__ctor_end; ctor++)
323 		((void (*)(void))(*ctor))();
324 }
325 
326 static void init_asan(void)
327 {
328 
329 	/*
330 	 * CFG_ASAN_SHADOW_OFFSET is also supplied as
331 	 * -fasan-shadow-offset=$(CFG_ASAN_SHADOW_OFFSET) to the compiler.
332 	 * Since all the needed values to calculate the value of
333 	 * CFG_ASAN_SHADOW_OFFSET isn't available in to make we need to
334 	 * calculate it in advance and hard code it into the platform
335 	 * conf.mk. Here where we have all the needed values we double
336 	 * check that the compiler is supplied the correct value.
337 	 */
338 
339 #define __ASAN_SHADOW_START \
340 	ROUNDUP(TEE_RAM_START + (TEE_RAM_VA_SIZE * 8) / 9 - 8, 8)
341 	assert(__ASAN_SHADOW_START == (vaddr_t)&__asan_shadow_start);
342 #define __CFG_ASAN_SHADOW_OFFSET \
343 	(__ASAN_SHADOW_START - (TEE_RAM_START / 8))
344 	COMPILE_TIME_ASSERT(CFG_ASAN_SHADOW_OFFSET == __CFG_ASAN_SHADOW_OFFSET);
345 #undef __ASAN_SHADOW_START
346 #undef __CFG_ASAN_SHADOW_OFFSET
347 
348 	/*
349 	 * Assign area covered by the shadow area, everything from start up
350 	 * to the beginning of the shadow area.
351 	 */
352 	asan_set_shadowed((void *)TEE_LOAD_ADDR, &__asan_shadow_start);
353 
354 	/*
355 	 * Add access to areas that aren't opened automatically by a
356 	 * constructor.
357 	 */
358 	asan_tag_access(&__ctor_list, &__ctor_end);
359 	asan_tag_access(__rodata_start, __rodata_end);
360 #ifdef CFG_WITH_PAGER
361 	asan_tag_access(__pageable_start, __pageable_end);
362 #endif /*CFG_WITH_PAGER*/
363 	asan_tag_access(__nozi_start, __nozi_end);
364 #ifdef ARM32
365 	asan_tag_access(__exidx_start, __exidx_end);
366 	asan_tag_access(__extab_start, __extab_end);
367 #endif
368 
369 	init_run_constructors();
370 
371 	/* Everything is tagged correctly, let's start address sanitizing. */
372 	asan_start();
373 }
374 #else /*CFG_CORE_SANITIZE_KADDRESS*/
375 static void init_asan(void)
376 {
377 }
378 #endif /*CFG_CORE_SANITIZE_KADDRESS*/
379 
380 #if defined(CFG_MEMTAG)
381 /* Called from entry_a64.S only when MEMTAG is configured */
382 void boot_init_memtag(void)
383 {
384 	memtag_init_ops(feat_mte_implemented());
385 }
386 
387 static TEE_Result mmap_clear_memtag(struct tee_mmap_region *map,
388 				    void *ptr __unused)
389 {
390 	switch (map->type) {
391 	case MEM_AREA_NEX_RAM_RO:
392 	case MEM_AREA_SEC_RAM_OVERALL:
393 		DMSG("Clearing tags for VA %#"PRIxVA"..%#"PRIxVA,
394 		     map->va, map->va + map->size - 1);
395 		memtag_set_tags((void *)map->va, map->size, 0);
396 		break;
397 	default:
398 		break;
399 	}
400 
401 	return TEE_SUCCESS;
402 }
403 
404 /* Called from entry_a64.S only when MEMTAG is configured */
405 void boot_clear_memtag(void)
406 {
407 	core_mmu_for_each_map(NULL, mmap_clear_memtag);
408 }
409 #endif
410 
411 #ifdef CFG_WITH_PAGER
412 
413 #ifdef CFG_CORE_SANITIZE_KADDRESS
414 static void carve_out_asan_mem(void)
415 {
416 	nex_phys_mem_partial_carve_out(ASAN_MAP_PA, ASAN_MAP_SZ);
417 }
418 #else
419 static void carve_out_asan_mem(void)
420 {
421 }
422 #endif
423 
424 static void print_pager_pool_size(void)
425 {
426 	struct tee_pager_stats __maybe_unused stats;
427 
428 	tee_pager_get_stats(&stats);
429 	IMSG("Pager pool size: %zukB",
430 		stats.npages_all * SMALL_PAGE_SIZE / 1024);
431 }
432 
433 static void init_virt_pool(tee_mm_pool_t *virt_pool)
434 {
435 	const vaddr_t begin = VCORE_START_VA;
436 	size_t size = TEE_RAM_VA_SIZE;
437 
438 #ifdef CFG_CORE_SANITIZE_KADDRESS
439 	/* Carve out asan memory, flat maped after core memory */
440 	if (begin + size > ASAN_SHADOW_PA)
441 		size = ASAN_MAP_PA - begin;
442 #endif
443 
444 	if (!tee_mm_init(virt_pool, begin, size, SMALL_PAGE_SHIFT,
445 			 TEE_MM_POOL_NO_FLAGS))
446 		panic("core_virt_mem_pool init failed");
447 }
448 
449 /*
450  * With CFG_CORE_ASLR=y the init part is relocated very early during boot.
451  * The init part is also paged just as the rest of the normal paged code, with
452  * the difference that it's preloaded during boot. When the backing store
453  * is configured the entire paged binary is copied in place and then also
454  * the init part. Since the init part has been relocated (references to
455  * addresses updated to compensate for the new load address) this has to be
456  * undone for the hashes of those pages to match with the original binary.
457  *
458  * If CFG_CORE_ASLR=n, nothing needs to be done as the code/ro pages are
459  * unchanged.
460  */
461 static void undo_init_relocation(uint8_t *paged_store __maybe_unused)
462 {
463 #ifdef CFG_CORE_ASLR
464 	unsigned long *ptr = NULL;
465 	const uint32_t *reloc = NULL;
466 	const uint32_t *reloc_end = NULL;
467 	unsigned long offs = boot_mmu_config.map_offset;
468 	const struct boot_embdata *embdata = (const void *)__init_end;
469 	vaddr_t addr_end = (vaddr_t)__init_end - offs - TEE_LOAD_ADDR;
470 	vaddr_t addr_start = (vaddr_t)__init_start - offs - TEE_LOAD_ADDR;
471 
472 	reloc = (const void *)((vaddr_t)embdata + embdata->reloc_offset);
473 	reloc_end = reloc + embdata->reloc_len / sizeof(*reloc);
474 
475 	for (; reloc < reloc_end; reloc++) {
476 		if (*reloc < addr_start)
477 			continue;
478 		if (*reloc >= addr_end)
479 			break;
480 		ptr = (void *)(paged_store + *reloc - addr_start);
481 		*ptr -= offs;
482 	}
483 #endif
484 }
485 
486 static struct fobj *ro_paged_alloc(tee_mm_entry_t *mm, void *hashes,
487 				   void *store)
488 {
489 	const unsigned int num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE;
490 #ifdef CFG_CORE_ASLR
491 	unsigned int reloc_offs = (vaddr_t)__pageable_start - VCORE_START_VA;
492 	const struct boot_embdata *embdata = (const void *)__init_end;
493 	const void *reloc = __init_end + embdata->reloc_offset;
494 
495 	return fobj_ro_reloc_paged_alloc(num_pages, hashes, reloc_offs,
496 					 reloc, embdata->reloc_len, store);
497 #else
498 	return fobj_ro_paged_alloc(num_pages, hashes, store);
499 #endif
500 }
501 
502 static void init_pager_runtime(unsigned long pageable_part)
503 {
504 	size_t n;
505 	size_t init_size = (size_t)(__init_end - __init_start);
506 	size_t pageable_start = (size_t)__pageable_start;
507 	size_t pageable_end = (size_t)__pageable_end;
508 	size_t pageable_size = pageable_end - pageable_start;
509 	vaddr_t tzsram_end = TZSRAM_BASE + TZSRAM_SIZE - TEE_LOAD_ADDR +
510 			     VCORE_START_VA;
511 	size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) *
512 			   TEE_SHA256_HASH_SIZE;
513 	const struct boot_embdata *embdata = (const void *)__init_end;
514 	const void *tmp_hashes = NULL;
515 	tee_mm_entry_t *mm = NULL;
516 	struct fobj *fobj = NULL;
517 	uint8_t *paged_store = NULL;
518 	uint8_t *hashes = NULL;
519 
520 	assert(pageable_size % SMALL_PAGE_SIZE == 0);
521 	assert(embdata->total_len >= embdata->hashes_offset +
522 				     embdata->hashes_len);
523 	assert(hash_size == embdata->hashes_len);
524 
525 	tmp_hashes = __init_end + embdata->hashes_offset;
526 
527 	/*
528 	 * This needs to be initialized early to support address lookup
529 	 * in MEM_AREA_TEE_RAM
530 	 */
531 	tee_pager_early_init();
532 
533 	hashes = malloc(hash_size);
534 	IMSG_RAW("\n");
535 	IMSG("Pager is enabled. Hashes: %zu bytes", hash_size);
536 	assert(hashes);
537 	asan_memcpy_unchecked(hashes, tmp_hashes, hash_size);
538 
539 	/*
540 	 * The pager is about the be enabled below, eventual temporary boot
541 	 * memory allocation must be removed now.
542 	 */
543 	boot_mem_release_tmp_alloc();
544 
545 	carve_out_asan_mem();
546 
547 	mm = nex_phys_mem_ta_alloc(pageable_size);
548 	assert(mm);
549 	paged_store = phys_to_virt(tee_mm_get_smem(mm),
550 				   MEM_AREA_SEC_RAM_OVERALL, pageable_size);
551 	/*
552 	 * Load pageable part in the dedicated allocated area:
553 	 * - Move pageable non-init part into pageable area. Note bootloader
554 	 *   may have loaded it anywhere in TA RAM hence use memmove().
555 	 * - Copy pageable init part from current location into pageable area.
556 	 */
557 	memmove(paged_store + init_size,
558 		phys_to_virt(pageable_part,
559 			     core_mmu_get_type_by_pa(pageable_part),
560 			     __pageable_part_end - __pageable_part_start),
561 		__pageable_part_end - __pageable_part_start);
562 	asan_memcpy_unchecked(paged_store, __init_start, init_size);
563 	/*
564 	 * Undo eventual relocation for the init part so the hash checks
565 	 * can pass.
566 	 */
567 	undo_init_relocation(paged_store);
568 
569 	/* Check that hashes of what's in pageable area is OK */
570 	DMSG("Checking hashes of pageable area");
571 	for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) {
572 		const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE;
573 		const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE;
574 		TEE_Result res;
575 
576 		DMSG("hash pg_idx %zu hash %p page %p", n, hash, page);
577 		res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE);
578 		if (res != TEE_SUCCESS) {
579 			EMSG("Hash failed for page %zu at %p: res 0x%x",
580 			     n, (void *)page, res);
581 			panic();
582 		}
583 	}
584 
585 	/*
586 	 * Assert prepaged init sections are page aligned so that nothing
587 	 * trails uninited at the end of the premapped init area.
588 	 */
589 	assert(!(init_size & SMALL_PAGE_MASK));
590 
591 	/*
592 	 * Initialize the virtual memory pool used for main_mmu_l2_ttb which
593 	 * is supplied to tee_pager_init() below.
594 	 */
595 	init_virt_pool(&core_virt_mem_pool);
596 
597 	/*
598 	 * Assign alias area for pager end of the small page block the rest
599 	 * of the binary is loaded into. We're taking more than needed, but
600 	 * we're guaranteed to not need more than the physical amount of
601 	 * TZSRAM.
602 	 */
603 	mm = tee_mm_alloc2(&core_virt_mem_pool,
604 			   (vaddr_t)core_virt_mem_pool.lo +
605 			   core_virt_mem_pool.size - TZSRAM_SIZE,
606 			   TZSRAM_SIZE);
607 	assert(mm);
608 	tee_pager_set_alias_area(mm);
609 
610 	/*
611 	 * Claim virtual memory which isn't paged.
612 	 * Linear memory (flat map core memory) ends there.
613 	 */
614 	mm = tee_mm_alloc2(&core_virt_mem_pool, VCORE_UNPG_RX_PA,
615 			   (vaddr_t)(__pageable_start - VCORE_UNPG_RX_PA));
616 	assert(mm);
617 
618 	/*
619 	 * Allocate virtual memory for the pageable area and let the pager
620 	 * take charge of all the pages already assigned to that memory.
621 	 */
622 	mm = tee_mm_alloc2(&core_virt_mem_pool, (vaddr_t)__pageable_start,
623 			   pageable_size);
624 	assert(mm);
625 	fobj = ro_paged_alloc(mm, hashes, paged_store);
626 	assert(fobj);
627 	tee_pager_add_core_region(tee_mm_get_smem(mm), PAGED_REGION_TYPE_RO,
628 				  fobj);
629 	fobj_put(fobj);
630 
631 	tee_pager_add_pages(pageable_start, init_size / SMALL_PAGE_SIZE, false);
632 	tee_pager_add_pages(pageable_start + init_size,
633 			    (pageable_size - init_size) / SMALL_PAGE_SIZE,
634 			    true);
635 	if (pageable_end < tzsram_end)
636 		tee_pager_add_pages(pageable_end, (tzsram_end - pageable_end) /
637 						   SMALL_PAGE_SIZE, true);
638 
639 	/*
640 	 * There may be physical pages in TZSRAM before the core load address.
641 	 * These pages can be added to the physical pages pool of the pager.
642 	 * This setup may happen when a the secure bootloader runs in TZRAM
643 	 * and its memory can be reused by OP-TEE once boot stages complete.
644 	 */
645 	tee_pager_add_pages(core_virt_mem_pool.lo,
646 			    (VCORE_UNPG_RX_PA - core_virt_mem_pool.lo) /
647 				SMALL_PAGE_SIZE,
648 			    true);
649 
650 	print_pager_pool_size();
651 }
652 #else /*!CFG_WITH_PAGER*/
653 static void init_pager_runtime(unsigned long pageable_part __unused)
654 {
655 }
656 #endif
657 
658 #if defined(CFG_DT)
659 static int add_optee_dt_node(struct dt_descriptor *dt)
660 {
661 	int offs;
662 	int ret;
663 
664 	if (fdt_path_offset(dt->blob, "/firmware/optee") >= 0) {
665 		DMSG("OP-TEE Device Tree node already exists!");
666 		return 0;
667 	}
668 
669 	offs = fdt_path_offset(dt->blob, "/firmware");
670 	if (offs < 0) {
671 		offs = add_dt_path_subnode(dt, "/", "firmware");
672 		if (offs < 0)
673 			return -1;
674 	}
675 
676 	offs = fdt_add_subnode(dt->blob, offs, "optee");
677 	if (offs < 0)
678 		return -1;
679 
680 	ret = fdt_setprop_string(dt->blob, offs, "compatible",
681 				 "linaro,optee-tz");
682 	if (ret < 0)
683 		return -1;
684 	ret = fdt_setprop_string(dt->blob, offs, "method", "smc");
685 	if (ret < 0)
686 		return -1;
687 
688 	if (CFG_CORE_ASYNC_NOTIF_GIC_INTID) {
689 		/*
690 		 * The format of the interrupt property is defined by the
691 		 * binding of the interrupt domain root. In this case it's
692 		 * one Arm GIC v1, v2 or v3 so we must be compatible with
693 		 * these.
694 		 *
695 		 * An SPI type of interrupt is indicated with a 0 in the
696 		 * first cell. A PPI type is indicated with value 1.
697 		 *
698 		 * The interrupt number goes in the second cell where
699 		 * SPIs ranges from 0 to 987 and PPI ranges from 0 to 15.
700 		 *
701 		 * Flags are passed in the third cells.
702 		 */
703 		uint32_t itr_trigger = 0;
704 		uint32_t itr_type = 0;
705 		uint32_t itr_id = 0;
706 		uint32_t val[3] = { };
707 
708 		/* PPI are visible only in current CPU cluster */
709 		static_assert(IS_ENABLED(CFG_CORE_FFA) ||
710 			      !CFG_CORE_ASYNC_NOTIF_GIC_INTID ||
711 			      (CFG_CORE_ASYNC_NOTIF_GIC_INTID >=
712 			       GIC_SPI_BASE) ||
713 			      ((CFG_TEE_CORE_NB_CORE <= 8) &&
714 			       (CFG_CORE_ASYNC_NOTIF_GIC_INTID >=
715 				GIC_PPI_BASE)));
716 
717 		if (CFG_CORE_ASYNC_NOTIF_GIC_INTID >= GIC_SPI_BASE) {
718 			itr_type = GIC_SPI;
719 			itr_id = CFG_CORE_ASYNC_NOTIF_GIC_INTID - GIC_SPI_BASE;
720 			itr_trigger = IRQ_TYPE_EDGE_RISING;
721 		} else {
722 			itr_type = GIC_PPI;
723 			itr_id = CFG_CORE_ASYNC_NOTIF_GIC_INTID - GIC_PPI_BASE;
724 			itr_trigger = IRQ_TYPE_EDGE_RISING |
725 				      GIC_CPU_MASK_SIMPLE(CFG_TEE_CORE_NB_CORE);
726 		}
727 
728 		val[0] = TEE_U32_TO_BIG_ENDIAN(itr_type);
729 		val[1] = TEE_U32_TO_BIG_ENDIAN(itr_id);
730 		val[2] = TEE_U32_TO_BIG_ENDIAN(itr_trigger);
731 
732 		ret = fdt_setprop(dt->blob, offs, "interrupts", val,
733 				  sizeof(val));
734 		if (ret < 0)
735 			return -1;
736 	}
737 	return 0;
738 }
739 
740 #ifdef CFG_PSCI_ARM32
741 static int append_psci_compatible(void *fdt, int offs, const char *str)
742 {
743 	return fdt_appendprop(fdt, offs, "compatible", str, strlen(str) + 1);
744 }
745 
746 static int dt_add_psci_node(struct dt_descriptor *dt)
747 {
748 	int offs;
749 
750 	if (fdt_path_offset(dt->blob, "/psci") >= 0) {
751 		DMSG("PSCI Device Tree node already exists!");
752 		return 0;
753 	}
754 
755 	offs = add_dt_path_subnode(dt, "/", "psci");
756 	if (offs < 0)
757 		return -1;
758 	if (append_psci_compatible(dt->blob, offs, "arm,psci-1.0"))
759 		return -1;
760 	if (append_psci_compatible(dt->blob, offs, "arm,psci-0.2"))
761 		return -1;
762 	if (append_psci_compatible(dt->blob, offs, "arm,psci"))
763 		return -1;
764 	if (fdt_setprop_string(dt->blob, offs, "method", "smc"))
765 		return -1;
766 	if (fdt_setprop_u32(dt->blob, offs, "cpu_suspend", PSCI_CPU_SUSPEND))
767 		return -1;
768 	if (fdt_setprop_u32(dt->blob, offs, "cpu_off", PSCI_CPU_OFF))
769 		return -1;
770 	if (fdt_setprop_u32(dt->blob, offs, "cpu_on", PSCI_CPU_ON))
771 		return -1;
772 	if (fdt_setprop_u32(dt->blob, offs, "sys_poweroff", PSCI_SYSTEM_OFF))
773 		return -1;
774 	if (fdt_setprop_u32(dt->blob, offs, "sys_reset", PSCI_SYSTEM_RESET))
775 		return -1;
776 	return 0;
777 }
778 
779 static int check_node_compat_prefix(struct dt_descriptor *dt, int offs,
780 				    const char *prefix)
781 {
782 	const size_t prefix_len = strlen(prefix);
783 	size_t l;
784 	int plen;
785 	const char *prop;
786 
787 	prop = fdt_getprop(dt->blob, offs, "compatible", &plen);
788 	if (!prop)
789 		return -1;
790 
791 	while (plen > 0) {
792 		if (memcmp(prop, prefix, prefix_len) == 0)
793 			return 0; /* match */
794 
795 		l = strlen(prop) + 1;
796 		prop += l;
797 		plen -= l;
798 	}
799 
800 	return -1;
801 }
802 
803 static int dt_add_psci_cpu_enable_methods(struct dt_descriptor *dt)
804 {
805 	int offs = 0;
806 
807 	while (1) {
808 		offs = fdt_next_node(dt->blob, offs, NULL);
809 		if (offs < 0)
810 			break;
811 		if (fdt_getprop(dt->blob, offs, "enable-method", NULL))
812 			continue; /* already set */
813 		if (check_node_compat_prefix(dt, offs, "arm,cortex-a"))
814 			continue; /* no compatible */
815 		if (fdt_setprop_string(dt->blob, offs, "enable-method", "psci"))
816 			return -1;
817 		/* Need to restart scanning as offsets may have changed */
818 		offs = 0;
819 	}
820 	return 0;
821 }
822 
823 static int config_psci(struct dt_descriptor *dt)
824 {
825 	if (dt_add_psci_node(dt))
826 		return -1;
827 	return dt_add_psci_cpu_enable_methods(dt);
828 }
829 #else
830 static int config_psci(struct dt_descriptor *dt __unused)
831 {
832 	return 0;
833 }
834 #endif /*CFG_PSCI_ARM32*/
835 
836 static int mark_tzdram_as_reserved(struct dt_descriptor *dt)
837 {
838 	return add_res_mem_dt_node(dt, "optee_core", CFG_TZDRAM_START,
839 				   CFG_TZDRAM_SIZE);
840 }
841 
842 static void update_external_dt(void)
843 {
844 	struct dt_descriptor *dt = get_external_dt_desc();
845 
846 	if (!dt || !dt->blob)
847 		return;
848 
849 	if (!IS_ENABLED(CFG_CORE_FFA) && add_optee_dt_node(dt))
850 		panic("Failed to add OP-TEE Device Tree node");
851 
852 	if (config_psci(dt))
853 		panic("Failed to config PSCI");
854 
855 #ifdef CFG_CORE_RESERVED_SHM
856 	if (mark_static_shm_as_reserved(dt))
857 		panic("Failed to config non-secure memory");
858 #endif
859 
860 	if (mark_tzdram_as_reserved(dt))
861 		panic("Failed to config secure memory");
862 }
863 #else /*CFG_DT*/
864 static void update_external_dt(void)
865 {
866 }
867 #endif /*!CFG_DT*/
868 
869 void init_tee_runtime(void)
870 {
871 	/*
872 	 * With virtualization we call this function when creating the
873 	 * OP-TEE partition instead.
874 	 */
875 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION))
876 		call_preinitcalls();
877 	call_early_initcalls();
878 	call_service_initcalls();
879 
880 	/*
881 	 * These two functions uses crypto_rng_read() to initialize the
882 	 * pauth keys. Once call_initcalls() returns we're guaranteed that
883 	 * crypto_rng_read() is ready to be used.
884 	 */
885 	thread_init_core_local_pauth_keys();
886 	thread_init_thread_pauth_keys();
887 
888 	/*
889 	 * Reinitialize canaries around the stacks with crypto_rng_read().
890 	 *
891 	 * TODO: Updating canaries when CFG_NS_VIRTUALIZATION is enabled will
892 	 * require synchronization between thread_check_canaries() and
893 	 * thread_update_canaries().
894 	 */
895 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION))
896 		thread_update_canaries();
897 }
898 
899 static bool add_padding_to_pool(vaddr_t va, size_t len, void *ptr __unused)
900 {
901 #ifdef CFG_NS_VIRTUALIZATION
902 	nex_malloc_add_pool((void *)va, len);
903 #else
904 	malloc_add_pool((void *)va, len);
905 #endif
906 	return true;
907 }
908 
909 static void init_primary(unsigned long pageable_part)
910 {
911 	vaddr_t va = 0;
912 
913 	/*
914 	 * Mask asynchronous exceptions before switch to the thread vector
915 	 * as the thread handler requires those to be masked while
916 	 * executing with the temporary stack. The thread subsystem also
917 	 * asserts that the foreign interrupts are blocked when using most of
918 	 * its functions.
919 	 */
920 	thread_set_exceptions(THREAD_EXCP_ALL);
921 	primary_save_cntfrq();
922 	init_vfp_sec();
923 
924 	if (IS_ENABLED(CFG_CRYPTO_WITH_CE))
925 		check_crypto_extensions();
926 
927 	init_asan();
928 
929 	/*
930 	 * By default whole OP-TEE uses malloc, so we need to initialize
931 	 * it early. But, when virtualization is enabled, malloc is used
932 	 * only by TEE runtime, so malloc should be initialized later, for
933 	 * every virtual partition separately. Core code uses nex_malloc
934 	 * instead.
935 	 */
936 #ifdef CFG_WITH_PAGER
937 	/* Add heap2 first as heap1 may be too small as initial bget pool */
938 	malloc_add_pool(__heap2_start, __heap2_end - __heap2_start);
939 #endif
940 #ifdef CFG_NS_VIRTUALIZATION
941 	nex_malloc_add_pool(__nex_heap_start, __nex_heap_end -
942 					      __nex_heap_start);
943 #else
944 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
945 #endif
946 	IMSG_RAW("\n");
947 
948 	core_mmu_save_mem_map();
949 	core_mmu_init_phys_mem();
950 	boot_mem_foreach_padding(add_padding_to_pool, NULL);
951 	va = boot_mem_release_unused();
952 	if (!IS_ENABLED(CFG_WITH_PAGER)) {
953 		/*
954 		 * We must update boot_cached_mem_end to reflect the memory
955 		 * just unmapped by boot_mem_release_unused().
956 		 */
957 		assert(va && va <= boot_cached_mem_end);
958 		boot_cached_mem_end = va;
959 	}
960 
961 	if (IS_ENABLED(CFG_DYN_CONFIG)) {
962 		/*
963 		 * This is needed to enable virt_page_alloc() now that
964 		 * boot_mem_alloc() can't be used any longer.
965 		 */
966 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
967 			nex_page_alloc_init();
968 		else
969 			page_alloc_init();
970 	}
971 
972 	if (IS_ENABLED(CFG_WITH_PAGER)) {
973 		/*
974 		 * Pager: init_runtime() calls thread_kernel_enable_vfp()
975 		 * so we must set a current thread right now to avoid a
976 		 * chicken-and-egg problem (thread_init_boot_thread() sets
977 		 * the current thread but needs things set by
978 		 * init_runtime()).
979 		 */
980 		thread_get_core_local()->curr_thread = 0;
981 		init_pager_runtime(pageable_part);
982 	}
983 
984 	thread_init_primary();
985 	thread_init_per_cpu();
986 }
987 
988 static bool cpu_nmfi_enabled(void)
989 {
990 #if defined(ARM32)
991 	return read_sctlr() & SCTLR_NMFI;
992 #else
993 	/* Note: ARM64 does not feature non-maskable FIQ support. */
994 	return false;
995 #endif
996 }
997 
998 /*
999  * Note: this function is weak just to make it possible to exclude it from
1000  * the unpaged area.
1001  */
1002 void __weak boot_init_primary_late(unsigned long fdt __unused,
1003 				   unsigned long manifest __unused)
1004 {
1005 	size_t fdt_size = CFG_DTB_MAX_SIZE;
1006 
1007 	if (IS_ENABLED(CFG_TRANSFER_LIST) && mapped_tl) {
1008 		struct transfer_list_entry *tl_e = NULL;
1009 
1010 		tl_e = transfer_list_find(mapped_tl, TL_TAG_FDT);
1011 		if (tl_e) {
1012 			/*
1013 			 * Expand the data size of the DTB entry to the maximum
1014 			 * allocable mapped memory to reserve sufficient space
1015 			 * for inserting new nodes, avoid potentially corrupting
1016 			 * next entries.
1017 			 */
1018 			uint32_t dtb_max_sz = mapped_tl->max_size -
1019 					      mapped_tl->size + tl_e->data_size;
1020 
1021 			if (!transfer_list_set_data_size(mapped_tl, tl_e,
1022 							 dtb_max_sz)) {
1023 				EMSG("Failed to extend DTB size to %#"PRIx32,
1024 				     dtb_max_sz);
1025 				panic();
1026 			}
1027 			fdt_size = tl_e->data_size;
1028 		}
1029 	}
1030 
1031 	init_external_dt(boot_arg_fdt, fdt_size);
1032 	reinit_manifest_dt();
1033 #ifdef CFG_CORE_SEL1_SPMC
1034 	tpm_map_log_area(get_manifest_dt());
1035 #else
1036 	tpm_map_log_area(get_external_dt());
1037 #endif
1038 	discover_nsec_memory();
1039 	update_external_dt();
1040 	configure_console_from_dt();
1041 
1042 	thread_init_thread_core_local();
1043 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1044 		/*
1045 		 * Virtualization: We can't initialize threads right now because
1046 		 * threads belong to "tee" part and will be initialized
1047 		 * separately per each new virtual guest. So, we'll clear
1048 		 * "curr_thread" and call it done.
1049 		 */
1050 		thread_get_core_local()->curr_thread = -1;
1051 	} else {
1052 		thread_init_boot_thread();
1053 	}
1054 }
1055 
1056 void __weak boot_init_primary_runtime(void)
1057 {
1058 	IMSG("OP-TEE version: %s", core_v_str);
1059 	if (IS_ENABLED(CFG_INSECURE)) {
1060 		IMSG("WARNING: This OP-TEE configuration might be insecure!");
1061 		IMSG("WARNING: Please check https://optee.readthedocs.io/en/latest/architecture/porting_guidelines.html");
1062 	}
1063 	IMSG("Primary CPU initializing");
1064 #ifdef CFG_CORE_ASLR
1065 	DMSG("Executing at offset %#lx with virtual load address %#"PRIxVA,
1066 	     (unsigned long)boot_mmu_config.map_offset, VCORE_START_VA);
1067 #endif
1068 #ifdef CFG_NS_VIRTUALIZATION
1069 	DMSG("NS-virtualization enabled, supporting %u guests",
1070 	     CFG_VIRT_GUEST_COUNT);
1071 #endif
1072 	if (IS_ENABLED(CFG_MEMTAG))
1073 		DMSG("Memory tagging %s",
1074 		     memtag_is_enabled() ?  "enabled" : "disabled");
1075 
1076 	/* Check if platform needs NMFI workaround */
1077 	if (cpu_nmfi_enabled())	{
1078 		if (!IS_ENABLED(CFG_CORE_WORKAROUND_ARM_NMFI))
1079 			IMSG("WARNING: This ARM core has NMFI enabled, please apply workaround!");
1080 	} else {
1081 		if (IS_ENABLED(CFG_CORE_WORKAROUND_ARM_NMFI))
1082 			IMSG("WARNING: This ARM core does not have NMFI enabled, no need for workaround");
1083 	}
1084 
1085 	boot_primary_init_intc();
1086 	init_vfp_nsec();
1087 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1088 		/*
1089 		 * Unmask native interrupts during driver initcalls.
1090 		 *
1091 		 * NS-virtualization still uses the temporary stack also
1092 		 * used for exception handling so it must still have native
1093 		 * interrupts masked.
1094 		 */
1095 		thread_set_exceptions(thread_get_exceptions() &
1096 				      ~THREAD_EXCP_NATIVE_INTR);
1097 		init_tee_runtime();
1098 	}
1099 
1100 	if (!IS_ENABLED(CFG_WITH_PAGER))
1101 		boot_mem_release_tmp_alloc();
1102 }
1103 
1104 void __weak boot_init_primary_final(void)
1105 {
1106 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION))
1107 		call_driver_initcalls();
1108 
1109 	call_finalcalls();
1110 
1111 	IMSG("Primary CPU switching to normal world boot");
1112 
1113 	/* Mask native interrupts before switching to the normal world */
1114 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION))
1115 		thread_set_exceptions(thread_get_exceptions() |
1116 				      THREAD_EXCP_NATIVE_INTR);
1117 }
1118 
1119 static void init_secondary_helper(void)
1120 {
1121 	IMSG("Secondary CPU %zu initializing", get_core_pos());
1122 
1123 	/*
1124 	 * Mask asynchronous exceptions before switch to the thread vector
1125 	 * as the thread handler requires those to be masked while
1126 	 * executing with the temporary stack. The thread subsystem also
1127 	 * asserts that the foreign interrupts are blocked when using most of
1128 	 * its functions.
1129 	 */
1130 	thread_set_exceptions(THREAD_EXCP_ALL);
1131 
1132 	secondary_init_cntfrq();
1133 	thread_init_per_cpu();
1134 	boot_secondary_init_intc();
1135 	init_vfp_sec();
1136 	init_vfp_nsec();
1137 
1138 	IMSG("Secondary CPU %zu switching to normal world boot", get_core_pos());
1139 }
1140 
1141 /*
1142  * Note: this function is weak just to make it possible to exclude it from
1143  * the unpaged area so that it lies in the init area.
1144  */
1145 void __weak boot_init_primary_early(void)
1146 {
1147 	unsigned long pageable_part = 0;
1148 	struct transfer_list_entry *tl_e = NULL;
1149 
1150 	if (IS_ENABLED(CFG_TRANSFER_LIST) && boot_arg_transfer_list) {
1151 		/* map and save the TL */
1152 		mapped_tl = transfer_list_map(boot_arg_transfer_list);
1153 		if (!mapped_tl)
1154 			panic("Failed to map transfer list");
1155 
1156 		transfer_list_dump(mapped_tl);
1157 		tl_e = transfer_list_find(mapped_tl, TL_TAG_OPTEE_PAGABLE_PART);
1158 	}
1159 
1160 	if (IS_ENABLED(CFG_WITH_PAGER)) {
1161 		if (IS_ENABLED(CFG_TRANSFER_LIST) && tl_e)
1162 			pageable_part =
1163 				get_le64(transfer_list_entry_data(tl_e));
1164 		else
1165 			pageable_part = boot_arg_pageable_part;
1166 	}
1167 
1168 	init_primary(pageable_part);
1169 }
1170 
1171 static void boot_save_transfer_list(unsigned long zero_reg,
1172 				    unsigned long transfer_list,
1173 				    unsigned long fdt)
1174 {
1175 	struct transfer_list_header *tl = (void *)transfer_list;
1176 	struct transfer_list_entry *tl_e = NULL;
1177 
1178 	if (zero_reg != 0)
1179 		panic("Incorrect transfer list register convention");
1180 
1181 	if (!IS_ALIGNED_WITH_TYPE(transfer_list, struct transfer_list_header) ||
1182 	    !IS_ALIGNED(transfer_list, TL_ALIGNMENT_FROM_ORDER(tl->alignment)))
1183 		panic("Transfer list base address is not aligned");
1184 
1185 	if (transfer_list_check_header(tl) == TL_OPS_NONE)
1186 		panic("Invalid transfer list");
1187 
1188 	tl_e = transfer_list_find(tl, TL_TAG_FDT);
1189 	if (fdt != (unsigned long)transfer_list_entry_data(tl_e))
1190 		panic("DT does not match to the DT entry of the TL");
1191 
1192 	boot_arg_transfer_list = transfer_list;
1193 }
1194 
1195 #if defined(CFG_WITH_ARM_TRUSTED_FW)
1196 unsigned long boot_cpu_on_handler(unsigned long a0 __maybe_unused,
1197 				  unsigned long a1 __unused)
1198 {
1199 	init_secondary_helper();
1200 	return 0;
1201 }
1202 #else
1203 void boot_init_secondary(unsigned long nsec_entry __unused)
1204 {
1205 	init_secondary_helper();
1206 }
1207 #endif
1208 
1209 #if defined(CFG_BOOT_SECONDARY_REQUEST)
1210 void boot_set_core_ns_entry(size_t core_idx, uintptr_t entry,
1211 			    uintptr_t context_id)
1212 {
1213 	ns_entry_contexts[core_idx].entry_point = entry;
1214 	ns_entry_contexts[core_idx].context_id = context_id;
1215 	dsb_ishst();
1216 }
1217 
1218 int boot_core_release(size_t core_idx, paddr_t entry)
1219 {
1220 	if (!core_idx || core_idx >= CFG_TEE_CORE_NB_CORE)
1221 		return -1;
1222 
1223 	ns_entry_contexts[core_idx].entry_point = entry;
1224 	dmb();
1225 	spin_table[core_idx] = 1;
1226 	dsb();
1227 	sev();
1228 
1229 	return 0;
1230 }
1231 
1232 /*
1233  * spin until secondary boot request, then returns with
1234  * the secondary core entry address.
1235  */
1236 struct ns_entry_context *boot_core_hpen(void)
1237 {
1238 #ifdef CFG_PSCI_ARM32
1239 	return &ns_entry_contexts[get_core_pos()];
1240 #else
1241 	do {
1242 		wfe();
1243 	} while (!spin_table[get_core_pos()]);
1244 	dmb();
1245 	return &ns_entry_contexts[get_core_pos()];
1246 #endif
1247 }
1248 #endif
1249 
1250 #if defined(CFG_CORE_ASLR)
1251 #if defined(CFG_DT)
1252 unsigned long __weak get_aslr_seed(void)
1253 {
1254 	void *fdt = NULL;
1255 	int rc = 0;
1256 	const uint64_t *seed = NULL;
1257 	int offs = 0;
1258 	int len = 0;
1259 
1260 	if (!IS_ENABLED(CFG_CORE_SEL2_SPMC))
1261 		fdt = (void *)boot_arg_fdt;
1262 
1263 	if (!fdt) {
1264 		DMSG("No fdt");
1265 		goto err;
1266 	}
1267 
1268 	rc = fdt_check_header(fdt);
1269 	if (rc) {
1270 		DMSG("Bad fdt: %d", rc);
1271 		goto err;
1272 	}
1273 
1274 	offs =  fdt_path_offset(fdt, "/secure-chosen");
1275 	if (offs < 0) {
1276 		DMSG("Cannot find /secure-chosen");
1277 		goto err;
1278 	}
1279 	seed = fdt_getprop(fdt, offs, "kaslr-seed", &len);
1280 	if (!seed || len != sizeof(*seed)) {
1281 		DMSG("Cannot find valid kaslr-seed");
1282 		goto err;
1283 	}
1284 
1285 	return fdt64_to_cpu(fdt64_ld(seed));
1286 
1287 err:
1288 	/* Try platform implementation */
1289 	return plat_get_aslr_seed();
1290 }
1291 #else /*!CFG_DT*/
1292 unsigned long __weak get_aslr_seed(void)
1293 {
1294 	/* Try platform implementation */
1295 	return plat_get_aslr_seed();
1296 }
1297 #endif /*!CFG_DT*/
1298 #endif /*CFG_CORE_ASLR*/
1299 
1300 static void *get_fdt_from_boot_info(struct ffa_boot_info_header_1_1 *hdr)
1301 {
1302 	struct ffa_boot_info_1_1 *desc = NULL;
1303 	uint8_t content_fmt = 0;
1304 	uint8_t name_fmt = 0;
1305 	void *fdt = NULL;
1306 	int ret = 0;
1307 
1308 	if (hdr->signature != FFA_BOOT_INFO_SIGNATURE) {
1309 		EMSG("Bad boot info signature %#"PRIx32, hdr->signature);
1310 		panic();
1311 	}
1312 	if (hdr->version != FFA_BOOT_INFO_VERSION_1_1 &&
1313 	    hdr->version != FFA_BOOT_INFO_VERSION_1_2) {
1314 		EMSG("Bad boot info version %#"PRIx32, hdr->version);
1315 		panic();
1316 	}
1317 	if (hdr->desc_count != 1) {
1318 		EMSG("Bad boot info descriptor count %#"PRIx32,
1319 		     hdr->desc_count);
1320 		panic();
1321 	}
1322 	desc = (void *)((vaddr_t)hdr + hdr->desc_offset);
1323 	name_fmt = desc->flags & FFA_BOOT_INFO_FLAG_NAME_FORMAT_MASK;
1324 	if (name_fmt == FFA_BOOT_INFO_FLAG_NAME_FORMAT_STRING)
1325 		DMSG("Boot info descriptor name \"%16s\"", desc->name);
1326 	else if (name_fmt == FFA_BOOT_INFO_FLAG_NAME_FORMAT_UUID)
1327 		DMSG("Boot info descriptor UUID %pUl", (void *)desc->name);
1328 	else
1329 		DMSG("Boot info descriptor: unknown name format %"PRIu8,
1330 		     name_fmt);
1331 
1332 	content_fmt = (desc->flags & FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_MASK) >>
1333 		      FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_SHIFT;
1334 	if (content_fmt != FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_ADDR) {
1335 		EMSG("Bad boot info content format %"PRIu8", expected %u (address)",
1336 		     content_fmt, FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_ADDR);
1337 		panic();
1338 	}
1339 
1340 	fdt = (void *)(vaddr_t)desc->contents;
1341 	ret = fdt_check_full(fdt, desc->size);
1342 	if (ret < 0) {
1343 		EMSG("Invalid Device Tree at %p: error %d", fdt, ret);
1344 		panic();
1345 	}
1346 	return fdt;
1347 }
1348 
1349 static void get_sec_mem_from_manifest(void *fdt, paddr_t *base, size_t *size)
1350 {
1351 	int ret = 0;
1352 	uint64_t num = 0;
1353 
1354 	ret = fdt_node_check_compatible(fdt, 0, "arm,ffa-manifest-1.0");
1355 	if (ret < 0) {
1356 		EMSG("Invalid FF-A manifest at %p: error %d", fdt, ret);
1357 		panic();
1358 	}
1359 	ret = dt_getprop_as_number(fdt, 0, "load-address", &num);
1360 	if (ret < 0) {
1361 		EMSG("Can't read \"load-address\" from FF-A manifest at %p: error %d",
1362 		     fdt, ret);
1363 		panic();
1364 	}
1365 	*base = num;
1366 	/* "mem-size" is currently an undocumented extension to the spec. */
1367 	ret = dt_getprop_as_number(fdt, 0, "mem-size", &num);
1368 	if (ret < 0) {
1369 		EMSG("Can't read \"mem-size\" from FF-A manifest at %p: error %d",
1370 		     fdt, ret);
1371 		panic();
1372 	}
1373 	*size = num;
1374 }
1375 
1376 void __weak boot_save_args(unsigned long a0, unsigned long a1,
1377 			   unsigned long a2, unsigned long a3,
1378 			   unsigned long a4 __maybe_unused)
1379 {
1380 	/*
1381 	 * Register use:
1382 	 *
1383 	 * Scenario A: Default arguments
1384 	 * a0   - CFG_CORE_FFA=y && CFG_CORE_SEL2_SPMC=n:
1385 	 *        if non-NULL holds the TOS FW config [1] address
1386 	 *      - CFG_CORE_FFA=y &&
1387 		  (CFG_CORE_SEL2_SPMC=y || CFG_CORE_EL3_SPMC=y):
1388 	 *        address of FF-A Boot Information Blob
1389 	 *      - CFG_CORE_FFA=n:
1390 	 *        if non-NULL holds the pagable part address
1391 	 * a1	- CFG_WITH_ARM_TRUSTED_FW=n (Armv7):
1392 	 *	  Armv7 standard bootarg #1 (kept track of in entry_a32.S)
1393 	 * a2   - CFG_CORE_SEL2_SPMC=n:
1394 	 *        if non-NULL holds the system DTB address
1395 	 *	- CFG_WITH_ARM_TRUSTED_FW=n (Armv7):
1396 	 *	  Armv7 standard bootarg #2 (system DTB address, kept track
1397 	 *	  of in entry_a32.S)
1398 	 * a3	- Not used
1399 	 * a4	- CFG_WITH_ARM_TRUSTED_FW=n:
1400 	 *	  Non-secure entry address
1401 	 *
1402 	 * [1] A TF-A concept: TOS_FW_CONFIG - Trusted OS Firmware
1403 	 * configuration file. Used by Trusted OS (BL32), that is, OP-TEE
1404 	 * here. This is also called Manifest DT, related to the Manifest DT
1405 	 * passed in the FF-A Boot Information Blob, but with a different
1406 	 * compatible string.
1407 
1408 	 * Scenario B: FW Handoff via Transfer List
1409 	 * Note: FF-A and non-secure entry are not yet supported with
1410 	 *       Transfer List
1411 	 * a0	- DTB address or 0 (AArch64)
1412 	 *	- must be 0 (AArch32)
1413 	 * a1	- 1 << 32 | TRANSFER_LIST_SIGNATURE[0:31] (AArch64)
1414 	 *	- 1 << 24 | TRANSFER_LIST_SIGNATURE[0:23] (AArch32)
1415 	 * a2	- must be 0 (AArch64)
1416 	 *	- DTB address or 0 (AArch32)
1417 	 * a3	- Transfer list base address
1418 	 * a4	- Not used
1419 	 */
1420 
1421 	if (IS_ENABLED(CFG_TRANSFER_LIST)) {
1422 		if (IS_ENABLED(CFG_ARM64_core) &&
1423 		    a1 == TL_HANDOFF_X1_VALUE(TL_REG_CONVENTION_VER)) {
1424 			boot_save_transfer_list(a2, a3, a0);
1425 			boot_arg_fdt = a0;
1426 		} else if (IS_ENABLED(CFG_ARM32_core) &&
1427 			   a1 == TL_HANDOFF_R1_VALUE(TL_REG_CONVENTION_VER)) {
1428 			boot_save_transfer_list(a0, a3, a2);
1429 			boot_arg_fdt = a2;
1430 		}
1431 
1432 		return;
1433 	}
1434 
1435 	if (!IS_ENABLED(CFG_CORE_SEL2_SPMC)) {
1436 #if defined(CFG_DT_ADDR)
1437 		boot_arg_fdt = CFG_DT_ADDR;
1438 #else
1439 		boot_arg_fdt = a2;
1440 #endif
1441 	}
1442 
1443 	if (IS_ENABLED(CFG_CORE_FFA)) {
1444 		if (IS_ENABLED(CFG_CORE_SEL2_SPMC) ||
1445 		    IS_ENABLED(CFG_CORE_EL3_SPMC))
1446 			init_manifest_dt(get_fdt_from_boot_info((void *)a0));
1447 		else
1448 			init_manifest_dt((void *)a0);
1449 		if (IS_ENABLED(CFG_CORE_SEL2_SPMC) &&
1450 		    IS_ENABLED(CFG_CORE_PHYS_RELOCATABLE)) {
1451 			paddr_t base = 0;
1452 			size_t size = 0;
1453 
1454 			get_sec_mem_from_manifest(get_manifest_dt(),
1455 						  &base, &size);
1456 			core_mmu_set_secure_memory(base, size);
1457 		}
1458 	} else {
1459 		if (IS_ENABLED(CFG_WITH_PAGER)) {
1460 #if defined(CFG_PAGEABLE_ADDR)
1461 			boot_arg_pageable_part = CFG_PAGEABLE_ADDR;
1462 #else
1463 			boot_arg_pageable_part = a0;
1464 #endif
1465 		}
1466 		if (!IS_ENABLED(CFG_WITH_ARM_TRUSTED_FW)) {
1467 #if defined(CFG_NS_ENTRY_ADDR)
1468 			boot_arg_nsec_entry = CFG_NS_ENTRY_ADDR;
1469 #else
1470 			boot_arg_nsec_entry = a4;
1471 #endif
1472 		}
1473 	}
1474 }
1475 
1476 #if defined(CFG_TRANSFER_LIST)
1477 static TEE_Result release_transfer_list(void)
1478 {
1479 	struct dt_descriptor *dt = get_external_dt_desc();
1480 
1481 	if (!mapped_tl)
1482 		return TEE_SUCCESS;
1483 
1484 	if (dt) {
1485 		int ret = 0;
1486 		struct transfer_list_entry *tl_e = NULL;
1487 
1488 		/*
1489 		 * Pack the DTB and update the transfer list before un-mapping
1490 		 */
1491 		ret = fdt_pack(dt->blob);
1492 		if (ret < 0) {
1493 			EMSG("Failed to pack Device Tree at 0x%" PRIxPA
1494 			     ": error %d", virt_to_phys(dt->blob), ret);
1495 			panic();
1496 		}
1497 
1498 		tl_e = transfer_list_find(mapped_tl, TL_TAG_FDT);
1499 		assert(dt->blob == transfer_list_entry_data(tl_e));
1500 		transfer_list_set_data_size(mapped_tl, tl_e,
1501 					    fdt_totalsize(dt->blob));
1502 		dt->blob = NULL;
1503 	}
1504 
1505 	transfer_list_unmap_sync(mapped_tl);
1506 	mapped_tl = NULL;
1507 
1508 	return TEE_SUCCESS;
1509 }
1510 
1511 boot_final(release_transfer_list);
1512 #endif
1513