xref: /optee_os/core/arch/arm/kernel/boot.c (revision 021fee0affe5db6f1f713fea1119350a7433baea)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2015-2023, Linaro Limited
4  * Copyright (c) 2023, Arm Limited
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <compiler.h>
10 #include <config.h>
11 #include <console.h>
12 #include <crypto/crypto.h>
13 #include <drivers/gic.h>
14 #include <dt-bindings/interrupt-controller/arm-gic.h>
15 #include <ffa.h>
16 #include <initcall.h>
17 #include <inttypes.h>
18 #include <io.h>
19 #include <keep.h>
20 #include <kernel/asan.h>
21 #include <kernel/boot.h>
22 #include <kernel/dt.h>
23 #include <kernel/linker.h>
24 #include <kernel/misc.h>
25 #include <kernel/panic.h>
26 #include <kernel/tee_misc.h>
27 #include <kernel/thread.h>
28 #include <kernel/tpm.h>
29 #include <kernel/transfer_list.h>
30 #include <libfdt.h>
31 #include <malloc.h>
32 #include <memtag.h>
33 #include <mm/core_memprot.h>
34 #include <mm/core_mmu.h>
35 #include <mm/fobj.h>
36 #include <mm/tee_mm.h>
37 #include <mm/tee_pager.h>
38 #include <sm/psci.h>
39 #include <trace.h>
40 #include <utee_defines.h>
41 #include <util.h>
42 
43 #include <platform_config.h>
44 
45 #if !defined(CFG_WITH_ARM_TRUSTED_FW)
46 #include <sm/sm.h>
47 #endif
48 
49 #if defined(CFG_WITH_VFP)
50 #include <kernel/vfp.h>
51 #endif
52 
53 /*
54  * In this file we're using unsigned long to represent physical pointers as
55  * they are received in a single register when OP-TEE is initially entered.
56  * This limits 32-bit systems to only use make use of the lower 32 bits
57  * of a physical address for initial parameters.
58  *
59  * 64-bit systems on the other hand can use full 64-bit physical pointers.
60  */
61 #define PADDR_INVALID		ULONG_MAX
62 
63 #if defined(CFG_BOOT_SECONDARY_REQUEST)
64 struct ns_entry_context {
65 	uintptr_t entry_point;
66 	uintptr_t context_id;
67 };
68 struct ns_entry_context ns_entry_contexts[CFG_TEE_CORE_NB_CORE];
69 static uint32_t spin_table[CFG_TEE_CORE_NB_CORE];
70 #endif
71 
72 #ifdef CFG_BOOT_SYNC_CPU
73 /*
74  * Array used when booting, to synchronize cpu.
75  * When 0, the cpu has not started.
76  * When 1, it has started
77  */
78 uint32_t sem_cpu_sync[CFG_TEE_CORE_NB_CORE];
79 DECLARE_KEEP_PAGER(sem_cpu_sync);
80 #endif
81 
82 static unsigned long boot_arg_fdt __nex_bss;
83 static unsigned long boot_arg_nsec_entry __nex_bss;
84 static unsigned long boot_arg_pageable_part __nex_bss;
85 static unsigned long boot_arg_transfer_list __nex_bss;
86 static struct transfer_list_header *mapped_tl __nex_bss;
87 
88 #ifdef CFG_SECONDARY_INIT_CNTFRQ
89 static uint32_t cntfrq;
90 #endif
91 
92 /* May be overridden in plat-$(PLATFORM)/main.c */
93 __weak void plat_primary_init_early(void)
94 {
95 }
96 DECLARE_KEEP_PAGER(plat_primary_init_early);
97 
98 /* May be overridden in plat-$(PLATFORM)/main.c */
99 __weak void boot_primary_init_intc(void)
100 {
101 }
102 
103 /* May be overridden in plat-$(PLATFORM)/main.c */
104 __weak void boot_secondary_init_intc(void)
105 {
106 }
107 
108 /* May be overridden in plat-$(PLATFORM)/main.c */
109 __weak unsigned long plat_get_aslr_seed(void)
110 {
111 	DMSG("Warning: no ASLR seed");
112 
113 	return 0;
114 }
115 
116 #if defined(_CFG_CORE_STACK_PROTECTOR) || defined(CFG_WITH_STACK_CANARIES)
117 /* Generate random stack canary value on boot up */
118 __weak void plat_get_random_stack_canaries(void *buf, size_t ncan, size_t size)
119 {
120 	TEE_Result ret = TEE_ERROR_GENERIC;
121 	size_t i = 0;
122 
123 	assert(buf && ncan && size);
124 
125 	/*
126 	 * With virtualization the RNG is not initialized in Nexus core.
127 	 * Need to override with platform specific implementation.
128 	 */
129 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
130 		IMSG("WARNING: Using fixed value for stack canary");
131 		memset(buf, 0xab, ncan * size);
132 		goto out;
133 	}
134 
135 	ret = crypto_rng_read(buf, ncan * size);
136 	if (ret != TEE_SUCCESS)
137 		panic("Failed to generate random stack canary");
138 
139 out:
140 	/* Leave null byte in canary to prevent string base exploit */
141 	for (i = 0; i < ncan; i++)
142 		*((uint8_t *)buf + size * i) = 0;
143 }
144 #endif /* _CFG_CORE_STACK_PROTECTOR || CFG_WITH_STACK_CANARIES */
145 
146 /*
147  * This function is called as a guard after each smc call which is not
148  * supposed to return.
149  */
150 void __panic_at_smc_return(void)
151 {
152 	panic();
153 }
154 
155 #if defined(CFG_WITH_ARM_TRUSTED_FW)
156 void init_sec_mon(unsigned long nsec_entry __maybe_unused)
157 {
158 	assert(nsec_entry == PADDR_INVALID);
159 	/* Do nothing as we don't have a secure monitor */
160 }
161 #else
162 /* May be overridden in plat-$(PLATFORM)/main.c */
163 __weak void init_sec_mon(unsigned long nsec_entry)
164 {
165 	struct sm_nsec_ctx *nsec_ctx;
166 
167 	assert(nsec_entry != PADDR_INVALID);
168 
169 	/* Initialize secure monitor */
170 	nsec_ctx = sm_get_nsec_ctx();
171 	nsec_ctx->mon_lr = nsec_entry;
172 	nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I;
173 	if (nsec_entry & 1)
174 		nsec_ctx->mon_spsr |= CPSR_T;
175 }
176 #endif
177 
178 #if defined(CFG_WITH_ARM_TRUSTED_FW)
179 static void init_vfp_nsec(void)
180 {
181 }
182 #else
183 static void init_vfp_nsec(void)
184 {
185 	/* Normal world can use CP10 and CP11 (SIMD/VFP) */
186 	write_nsacr(read_nsacr() | NSACR_CP10 | NSACR_CP11);
187 }
188 #endif
189 
190 static void check_crypto_extensions(void)
191 {
192 	bool ce_supported = true;
193 
194 	if (!feat_aes_implemented() &&
195 	    IS_ENABLED(CFG_CRYPTO_AES_ARM_CE)) {
196 		EMSG("AES instructions are not supported");
197 		ce_supported = false;
198 	}
199 
200 	if (!feat_sha1_implemented() &&
201 	    IS_ENABLED(CFG_CRYPTO_SHA1_ARM_CE)) {
202 		EMSG("SHA1 instructions are not supported");
203 		ce_supported = false;
204 	}
205 
206 	if (!feat_sha256_implemented() &&
207 	    IS_ENABLED(CFG_CRYPTO_SHA256_ARM_CE)) {
208 		EMSG("SHA256 instructions are not supported");
209 		ce_supported = false;
210 	}
211 
212 	/* Check aarch64 specific instructions */
213 	if (IS_ENABLED(CFG_ARM64_core)) {
214 		if (!feat_sha512_implemented() &&
215 		    IS_ENABLED(CFG_CRYPTO_SHA512_ARM_CE)) {
216 			EMSG("SHA512 instructions are not supported");
217 			ce_supported = false;
218 		}
219 
220 		if (!feat_sha3_implemented() &&
221 		    IS_ENABLED(CFG_CRYPTO_SHA3_ARM_CE)) {
222 			EMSG("SHA3 instructions are not supported");
223 			ce_supported = false;
224 		}
225 
226 		if (!feat_sm3_implemented() &&
227 		    IS_ENABLED(CFG_CRYPTO_SM3_ARM_CE)) {
228 			EMSG("SM3 instructions are not supported");
229 			ce_supported = false;
230 		}
231 
232 		if (!feat_sm4_implemented() &&
233 		    IS_ENABLED(CFG_CRYPTO_SM4_ARM_CE)) {
234 			EMSG("SM4 instructions are not supported");
235 			ce_supported = false;
236 		}
237 	}
238 
239 	if (!ce_supported)
240 		panic("HW doesn't support CE instructions");
241 }
242 
243 #if defined(CFG_WITH_VFP)
244 
245 #ifdef ARM32
246 static void init_vfp_sec(void)
247 {
248 	uint32_t cpacr = read_cpacr();
249 
250 	/*
251 	 * Enable Advanced SIMD functionality.
252 	 * Enable use of D16-D31 of the Floating-point Extension register
253 	 * file.
254 	 */
255 	cpacr &= ~(CPACR_ASEDIS | CPACR_D32DIS);
256 	/*
257 	 * Enable usage of CP10 and CP11 (SIMD/VFP) (both kernel and user
258 	 * mode.
259 	 */
260 	cpacr |= CPACR_CP(10, CPACR_CP_ACCESS_FULL);
261 	cpacr |= CPACR_CP(11, CPACR_CP_ACCESS_FULL);
262 	write_cpacr(cpacr);
263 }
264 #endif /* ARM32 */
265 
266 #ifdef ARM64
267 static void init_vfp_sec(void)
268 {
269 	/* Not using VFP until thread_kernel_enable_vfp() */
270 	vfp_disable();
271 }
272 #endif /* ARM64 */
273 
274 #else /* CFG_WITH_VFP */
275 
276 static void init_vfp_sec(void)
277 {
278 	/* Not using VFP */
279 }
280 #endif
281 
282 #ifdef CFG_SECONDARY_INIT_CNTFRQ
283 static void primary_save_cntfrq(void)
284 {
285 	assert(cntfrq == 0);
286 
287 	/*
288 	 * CNTFRQ should be initialized on the primary CPU by a
289 	 * previous boot stage
290 	 */
291 	cntfrq = read_cntfrq();
292 }
293 
294 static void secondary_init_cntfrq(void)
295 {
296 	assert(cntfrq != 0);
297 	write_cntfrq(cntfrq);
298 }
299 #else /* CFG_SECONDARY_INIT_CNTFRQ */
300 static void primary_save_cntfrq(void)
301 {
302 }
303 
304 static void secondary_init_cntfrq(void)
305 {
306 }
307 #endif
308 
309 #ifdef CFG_CORE_SANITIZE_KADDRESS
310 static void init_run_constructors(void)
311 {
312 	const vaddr_t *ctor;
313 
314 	for (ctor = &__ctor_list; ctor < &__ctor_end; ctor++)
315 		((void (*)(void))(*ctor))();
316 }
317 
318 static void init_asan(void)
319 {
320 
321 	/*
322 	 * CFG_ASAN_SHADOW_OFFSET is also supplied as
323 	 * -fasan-shadow-offset=$(CFG_ASAN_SHADOW_OFFSET) to the compiler.
324 	 * Since all the needed values to calculate the value of
325 	 * CFG_ASAN_SHADOW_OFFSET isn't available in to make we need to
326 	 * calculate it in advance and hard code it into the platform
327 	 * conf.mk. Here where we have all the needed values we double
328 	 * check that the compiler is supplied the correct value.
329 	 */
330 
331 #define __ASAN_SHADOW_START \
332 	ROUNDUP(TEE_RAM_START + (TEE_RAM_VA_SIZE * 8) / 9 - 8, 8)
333 	assert(__ASAN_SHADOW_START == (vaddr_t)&__asan_shadow_start);
334 #define __CFG_ASAN_SHADOW_OFFSET \
335 	(__ASAN_SHADOW_START - (TEE_RAM_START / 8))
336 	COMPILE_TIME_ASSERT(CFG_ASAN_SHADOW_OFFSET == __CFG_ASAN_SHADOW_OFFSET);
337 #undef __ASAN_SHADOW_START
338 #undef __CFG_ASAN_SHADOW_OFFSET
339 
340 	/*
341 	 * Assign area covered by the shadow area, everything from start up
342 	 * to the beginning of the shadow area.
343 	 */
344 	asan_set_shadowed((void *)TEE_LOAD_ADDR, &__asan_shadow_start);
345 
346 	/*
347 	 * Add access to areas that aren't opened automatically by a
348 	 * constructor.
349 	 */
350 	asan_tag_access(&__ctor_list, &__ctor_end);
351 	asan_tag_access(__rodata_start, __rodata_end);
352 #ifdef CFG_WITH_PAGER
353 	asan_tag_access(__pageable_start, __pageable_end);
354 #endif /*CFG_WITH_PAGER*/
355 	asan_tag_access(__nozi_start, __nozi_end);
356 #ifdef ARM32
357 	asan_tag_access(__exidx_start, __exidx_end);
358 	asan_tag_access(__extab_start, __extab_end);
359 #endif
360 
361 	init_run_constructors();
362 
363 	/* Everything is tagged correctly, let's start address sanitizing. */
364 	asan_start();
365 }
366 #else /*CFG_CORE_SANITIZE_KADDRESS*/
367 static void init_asan(void)
368 {
369 }
370 #endif /*CFG_CORE_SANITIZE_KADDRESS*/
371 
372 #if defined(CFG_MEMTAG)
373 /* Called from entry_a64.S only when MEMTAG is configured */
374 void boot_init_memtag(void)
375 {
376 	memtag_init_ops(feat_mte_implemented());
377 }
378 
379 static TEE_Result mmap_clear_memtag(struct tee_mmap_region *map,
380 				    void *ptr __unused)
381 {
382 	switch (map->type) {
383 	case MEM_AREA_TEE_RAM:
384 	case MEM_AREA_TEE_RAM_RW:
385 	case MEM_AREA_NEX_RAM_RO:
386 	case MEM_AREA_NEX_RAM_RW:
387 	case MEM_AREA_TEE_ASAN:
388 	case MEM_AREA_TA_RAM:
389 		DMSG("Clearing tags for VA %#"PRIxVA"..%#"PRIxVA,
390 		     map->va, map->va + map->size - 1);
391 		memtag_set_tags((void *)map->va, map->size, 0);
392 		break;
393 	default:
394 		break;
395 	}
396 
397 	return TEE_SUCCESS;
398 }
399 
400 /* Called from entry_a64.S only when MEMTAG is configured */
401 void boot_clear_memtag(void)
402 {
403 	core_mmu_for_each_map(NULL, mmap_clear_memtag);
404 }
405 #endif
406 
407 #ifdef CFG_WITH_PAGER
408 
409 #ifdef CFG_CORE_SANITIZE_KADDRESS
410 static void carve_out_asan_mem(tee_mm_pool_t *pool)
411 {
412 	const size_t s = pool->hi - pool->lo;
413 	tee_mm_entry_t *mm;
414 	paddr_t apa = ASAN_MAP_PA;
415 	size_t asz = ASAN_MAP_SZ;
416 
417 	if (core_is_buffer_outside(apa, asz, pool->lo, s))
418 		return;
419 
420 	/* Reserve the shadow area */
421 	if (!core_is_buffer_inside(apa, asz, pool->lo, s)) {
422 		if (apa < pool->lo) {
423 			/*
424 			 * ASAN buffer is overlapping with the beginning of
425 			 * the pool.
426 			 */
427 			asz -= pool->lo - apa;
428 			apa = pool->lo;
429 		} else {
430 			/*
431 			 * ASAN buffer is overlapping with the end of the
432 			 * pool.
433 			 */
434 			asz = pool->hi - apa;
435 		}
436 	}
437 	mm = tee_mm_alloc2(pool, apa, asz);
438 	assert(mm);
439 }
440 #else
441 static void carve_out_asan_mem(tee_mm_pool_t *pool __unused)
442 {
443 }
444 #endif
445 
446 static void print_pager_pool_size(void)
447 {
448 	struct tee_pager_stats __maybe_unused stats;
449 
450 	tee_pager_get_stats(&stats);
451 	IMSG("Pager pool size: %zukB",
452 		stats.npages_all * SMALL_PAGE_SIZE / 1024);
453 }
454 
455 static void init_virt_pool(tee_mm_pool_t *virt_pool)
456 {
457 	const vaddr_t begin = VCORE_START_VA;
458 	size_t size = TEE_RAM_VA_SIZE;
459 
460 #ifdef CFG_CORE_SANITIZE_KADDRESS
461 	/* Carve out asan memory, flat maped after core memory */
462 	if (begin + size > ASAN_SHADOW_PA)
463 		size = ASAN_MAP_PA - begin;
464 #endif
465 
466 	if (!tee_mm_init(virt_pool, begin, size, SMALL_PAGE_SHIFT,
467 			 TEE_MM_POOL_NO_FLAGS))
468 		panic("core_virt_mem_pool init failed");
469 }
470 
471 /*
472  * With CFG_CORE_ASLR=y the init part is relocated very early during boot.
473  * The init part is also paged just as the rest of the normal paged code, with
474  * the difference that it's preloaded during boot. When the backing store
475  * is configured the entire paged binary is copied in place and then also
476  * the init part. Since the init part has been relocated (references to
477  * addresses updated to compensate for the new load address) this has to be
478  * undone for the hashes of those pages to match with the original binary.
479  *
480  * If CFG_CORE_ASLR=n, nothing needs to be done as the code/ro pages are
481  * unchanged.
482  */
483 static void undo_init_relocation(uint8_t *paged_store __maybe_unused)
484 {
485 #ifdef CFG_CORE_ASLR
486 	unsigned long *ptr = NULL;
487 	const uint32_t *reloc = NULL;
488 	const uint32_t *reloc_end = NULL;
489 	unsigned long offs = boot_mmu_config.map_offset;
490 	const struct boot_embdata *embdata = (const void *)__init_end;
491 	vaddr_t addr_end = (vaddr_t)__init_end - offs - TEE_LOAD_ADDR;
492 	vaddr_t addr_start = (vaddr_t)__init_start - offs - TEE_LOAD_ADDR;
493 
494 	reloc = (const void *)((vaddr_t)embdata + embdata->reloc_offset);
495 	reloc_end = reloc + embdata->reloc_len / sizeof(*reloc);
496 
497 	for (; reloc < reloc_end; reloc++) {
498 		if (*reloc < addr_start)
499 			continue;
500 		if (*reloc >= addr_end)
501 			break;
502 		ptr = (void *)(paged_store + *reloc - addr_start);
503 		*ptr -= offs;
504 	}
505 #endif
506 }
507 
508 static struct fobj *ro_paged_alloc(tee_mm_entry_t *mm, void *hashes,
509 				   void *store)
510 {
511 	const unsigned int num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE;
512 #ifdef CFG_CORE_ASLR
513 	unsigned int reloc_offs = (vaddr_t)__pageable_start - VCORE_START_VA;
514 	const struct boot_embdata *embdata = (const void *)__init_end;
515 	const void *reloc = __init_end + embdata->reloc_offset;
516 
517 	return fobj_ro_reloc_paged_alloc(num_pages, hashes, reloc_offs,
518 					 reloc, embdata->reloc_len, store);
519 #else
520 	return fobj_ro_paged_alloc(num_pages, hashes, store);
521 #endif
522 }
523 
524 static void init_runtime(unsigned long pageable_part)
525 {
526 	size_t n;
527 	size_t init_size = (size_t)(__init_end - __init_start);
528 	size_t pageable_start = (size_t)__pageable_start;
529 	size_t pageable_end = (size_t)__pageable_end;
530 	size_t pageable_size = pageable_end - pageable_start;
531 	vaddr_t tzsram_end = TZSRAM_BASE + TZSRAM_SIZE - TEE_LOAD_ADDR +
532 			     VCORE_START_VA;
533 	size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) *
534 			   TEE_SHA256_HASH_SIZE;
535 	const struct boot_embdata *embdata = (const void *)__init_end;
536 	const void *tmp_hashes = NULL;
537 	tee_mm_entry_t *mm = NULL;
538 	struct fobj *fobj = NULL;
539 	uint8_t *paged_store = NULL;
540 	uint8_t *hashes = NULL;
541 
542 	assert(pageable_size % SMALL_PAGE_SIZE == 0);
543 	assert(embdata->total_len >= embdata->hashes_offset +
544 				     embdata->hashes_len);
545 	assert(hash_size == embdata->hashes_len);
546 
547 	tmp_hashes = __init_end + embdata->hashes_offset;
548 
549 	init_asan();
550 
551 	/* Add heap2 first as heap1 may be too small as initial bget pool */
552 	malloc_add_pool(__heap2_start, __heap2_end - __heap2_start);
553 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
554 
555 	/*
556 	 * This needs to be initialized early to support address lookup
557 	 * in MEM_AREA_TEE_RAM
558 	 */
559 	tee_pager_early_init();
560 
561 	hashes = malloc(hash_size);
562 	IMSG_RAW("\n");
563 	IMSG("Pager is enabled. Hashes: %zu bytes", hash_size);
564 	assert(hashes);
565 	asan_memcpy_unchecked(hashes, tmp_hashes, hash_size);
566 
567 	/*
568 	 * Need tee_mm_sec_ddr initialized to be able to allocate secure
569 	 * DDR below.
570 	 */
571 	core_mmu_init_ta_ram();
572 
573 	carve_out_asan_mem(&tee_mm_sec_ddr);
574 
575 	mm = tee_mm_alloc(&tee_mm_sec_ddr, pageable_size);
576 	assert(mm);
577 	paged_store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM,
578 				   pageable_size);
579 	/*
580 	 * Load pageable part in the dedicated allocated area:
581 	 * - Move pageable non-init part into pageable area. Note bootloader
582 	 *   may have loaded it anywhere in TA RAM hence use memmove().
583 	 * - Copy pageable init part from current location into pageable area.
584 	 */
585 	memmove(paged_store + init_size,
586 		phys_to_virt(pageable_part,
587 			     core_mmu_get_type_by_pa(pageable_part),
588 			     __pageable_part_end - __pageable_part_start),
589 		__pageable_part_end - __pageable_part_start);
590 	asan_memcpy_unchecked(paged_store, __init_start, init_size);
591 	/*
592 	 * Undo eventual relocation for the init part so the hash checks
593 	 * can pass.
594 	 */
595 	undo_init_relocation(paged_store);
596 
597 	/* Check that hashes of what's in pageable area is OK */
598 	DMSG("Checking hashes of pageable area");
599 	for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) {
600 		const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE;
601 		const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE;
602 		TEE_Result res;
603 
604 		DMSG("hash pg_idx %zu hash %p page %p", n, hash, page);
605 		res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE);
606 		if (res != TEE_SUCCESS) {
607 			EMSG("Hash failed for page %zu at %p: res 0x%x",
608 			     n, (void *)page, res);
609 			panic();
610 		}
611 	}
612 
613 	/*
614 	 * Assert prepaged init sections are page aligned so that nothing
615 	 * trails uninited at the end of the premapped init area.
616 	 */
617 	assert(!(init_size & SMALL_PAGE_MASK));
618 
619 	/*
620 	 * Initialize the virtual memory pool used for main_mmu_l2_ttb which
621 	 * is supplied to tee_pager_init() below.
622 	 */
623 	init_virt_pool(&core_virt_mem_pool);
624 
625 	/*
626 	 * Assign alias area for pager end of the small page block the rest
627 	 * of the binary is loaded into. We're taking more than needed, but
628 	 * we're guaranteed to not need more than the physical amount of
629 	 * TZSRAM.
630 	 */
631 	mm = tee_mm_alloc2(&core_virt_mem_pool,
632 			   (vaddr_t)core_virt_mem_pool.lo +
633 			   core_virt_mem_pool.size - TZSRAM_SIZE,
634 			   TZSRAM_SIZE);
635 	assert(mm);
636 	tee_pager_set_alias_area(mm);
637 
638 	/*
639 	 * Claim virtual memory which isn't paged.
640 	 * Linear memory (flat map core memory) ends there.
641 	 */
642 	mm = tee_mm_alloc2(&core_virt_mem_pool, VCORE_UNPG_RX_PA,
643 			   (vaddr_t)(__pageable_start - VCORE_UNPG_RX_PA));
644 	assert(mm);
645 
646 	/*
647 	 * Allocate virtual memory for the pageable area and let the pager
648 	 * take charge of all the pages already assigned to that memory.
649 	 */
650 	mm = tee_mm_alloc2(&core_virt_mem_pool, (vaddr_t)__pageable_start,
651 			   pageable_size);
652 	assert(mm);
653 	fobj = ro_paged_alloc(mm, hashes, paged_store);
654 	assert(fobj);
655 	tee_pager_add_core_region(tee_mm_get_smem(mm), PAGED_REGION_TYPE_RO,
656 				  fobj);
657 	fobj_put(fobj);
658 
659 	tee_pager_add_pages(pageable_start, init_size / SMALL_PAGE_SIZE, false);
660 	tee_pager_add_pages(pageable_start + init_size,
661 			    (pageable_size - init_size) / SMALL_PAGE_SIZE,
662 			    true);
663 	if (pageable_end < tzsram_end)
664 		tee_pager_add_pages(pageable_end, (tzsram_end - pageable_end) /
665 						   SMALL_PAGE_SIZE, true);
666 
667 	/*
668 	 * There may be physical pages in TZSRAM before the core load address.
669 	 * These pages can be added to the physical pages pool of the pager.
670 	 * This setup may happen when a the secure bootloader runs in TZRAM
671 	 * and its memory can be reused by OP-TEE once boot stages complete.
672 	 */
673 	tee_pager_add_pages(core_virt_mem_pool.lo,
674 			    (VCORE_UNPG_RX_PA - core_virt_mem_pool.lo) /
675 				SMALL_PAGE_SIZE,
676 			    true);
677 
678 	print_pager_pool_size();
679 }
680 #else
681 
682 static void init_runtime(unsigned long pageable_part __unused)
683 {
684 	init_asan();
685 
686 	/*
687 	 * By default whole OP-TEE uses malloc, so we need to initialize
688 	 * it early. But, when virtualization is enabled, malloc is used
689 	 * only by TEE runtime, so malloc should be initialized later, for
690 	 * every virtual partition separately. Core code uses nex_malloc
691 	 * instead.
692 	 */
693 #ifdef CFG_NS_VIRTUALIZATION
694 	nex_malloc_add_pool(__nex_heap_start, __nex_heap_end -
695 					      __nex_heap_start);
696 #else
697 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
698 #endif
699 
700 	IMSG_RAW("\n");
701 }
702 #endif
703 
704 #if defined(CFG_DT)
705 static int add_optee_dt_node(struct dt_descriptor *dt)
706 {
707 	int offs;
708 	int ret;
709 
710 	if (fdt_path_offset(dt->blob, "/firmware/optee") >= 0) {
711 		DMSG("OP-TEE Device Tree node already exists!");
712 		return 0;
713 	}
714 
715 	offs = fdt_path_offset(dt->blob, "/firmware");
716 	if (offs < 0) {
717 		offs = add_dt_path_subnode(dt, "/", "firmware");
718 		if (offs < 0)
719 			return -1;
720 	}
721 
722 	offs = fdt_add_subnode(dt->blob, offs, "optee");
723 	if (offs < 0)
724 		return -1;
725 
726 	ret = fdt_setprop_string(dt->blob, offs, "compatible",
727 				 "linaro,optee-tz");
728 	if (ret < 0)
729 		return -1;
730 	ret = fdt_setprop_string(dt->blob, offs, "method", "smc");
731 	if (ret < 0)
732 		return -1;
733 
734 	if (CFG_CORE_ASYNC_NOTIF_GIC_INTID) {
735 		/*
736 		 * The format of the interrupt property is defined by the
737 		 * binding of the interrupt domain root. In this case it's
738 		 * one Arm GIC v1, v2 or v3 so we must be compatible with
739 		 * these.
740 		 *
741 		 * An SPI type of interrupt is indicated with a 0 in the
742 		 * first cell. A PPI type is indicated with value 1.
743 		 *
744 		 * The interrupt number goes in the second cell where
745 		 * SPIs ranges from 0 to 987 and PPI ranges from 0 to 15.
746 		 *
747 		 * Flags are passed in the third cells.
748 		 */
749 		uint32_t itr_trigger = 0;
750 		uint32_t itr_type = 0;
751 		uint32_t itr_id = 0;
752 		uint32_t val[3] = { };
753 
754 		/* PPI are visible only in current CPU cluster */
755 		static_assert(IS_ENABLED(CFG_CORE_FFA) ||
756 			      !CFG_CORE_ASYNC_NOTIF_GIC_INTID ||
757 			      (CFG_CORE_ASYNC_NOTIF_GIC_INTID >=
758 			       GIC_SPI_BASE) ||
759 			      ((CFG_TEE_CORE_NB_CORE <= 8) &&
760 			       (CFG_CORE_ASYNC_NOTIF_GIC_INTID >=
761 				GIC_PPI_BASE)));
762 
763 		if (CFG_CORE_ASYNC_NOTIF_GIC_INTID >= GIC_SPI_BASE) {
764 			itr_type = GIC_SPI;
765 			itr_id = CFG_CORE_ASYNC_NOTIF_GIC_INTID - GIC_SPI_BASE;
766 			itr_trigger = IRQ_TYPE_EDGE_RISING;
767 		} else {
768 			itr_type = GIC_PPI;
769 			itr_id = CFG_CORE_ASYNC_NOTIF_GIC_INTID - GIC_PPI_BASE;
770 			itr_trigger = IRQ_TYPE_EDGE_RISING |
771 				      GIC_CPU_MASK_SIMPLE(CFG_TEE_CORE_NB_CORE);
772 		}
773 
774 		val[0] = TEE_U32_TO_BIG_ENDIAN(itr_type);
775 		val[1] = TEE_U32_TO_BIG_ENDIAN(itr_id);
776 		val[2] = TEE_U32_TO_BIG_ENDIAN(itr_trigger);
777 
778 		ret = fdt_setprop(dt->blob, offs, "interrupts", val,
779 				  sizeof(val));
780 		if (ret < 0)
781 			return -1;
782 	}
783 	return 0;
784 }
785 
786 #ifdef CFG_PSCI_ARM32
787 static int append_psci_compatible(void *fdt, int offs, const char *str)
788 {
789 	return fdt_appendprop(fdt, offs, "compatible", str, strlen(str) + 1);
790 }
791 
792 static int dt_add_psci_node(struct dt_descriptor *dt)
793 {
794 	int offs;
795 
796 	if (fdt_path_offset(dt->blob, "/psci") >= 0) {
797 		DMSG("PSCI Device Tree node already exists!");
798 		return 0;
799 	}
800 
801 	offs = add_dt_path_subnode(dt, "/", "psci");
802 	if (offs < 0)
803 		return -1;
804 	if (append_psci_compatible(dt->blob, offs, "arm,psci-1.0"))
805 		return -1;
806 	if (append_psci_compatible(dt->blob, offs, "arm,psci-0.2"))
807 		return -1;
808 	if (append_psci_compatible(dt->blob, offs, "arm,psci"))
809 		return -1;
810 	if (fdt_setprop_string(dt->blob, offs, "method", "smc"))
811 		return -1;
812 	if (fdt_setprop_u32(dt->blob, offs, "cpu_suspend", PSCI_CPU_SUSPEND))
813 		return -1;
814 	if (fdt_setprop_u32(dt->blob, offs, "cpu_off", PSCI_CPU_OFF))
815 		return -1;
816 	if (fdt_setprop_u32(dt->blob, offs, "cpu_on", PSCI_CPU_ON))
817 		return -1;
818 	if (fdt_setprop_u32(dt->blob, offs, "sys_poweroff", PSCI_SYSTEM_OFF))
819 		return -1;
820 	if (fdt_setprop_u32(dt->blob, offs, "sys_reset", PSCI_SYSTEM_RESET))
821 		return -1;
822 	return 0;
823 }
824 
825 static int check_node_compat_prefix(struct dt_descriptor *dt, int offs,
826 				    const char *prefix)
827 {
828 	const size_t prefix_len = strlen(prefix);
829 	size_t l;
830 	int plen;
831 	const char *prop;
832 
833 	prop = fdt_getprop(dt->blob, offs, "compatible", &plen);
834 	if (!prop)
835 		return -1;
836 
837 	while (plen > 0) {
838 		if (memcmp(prop, prefix, prefix_len) == 0)
839 			return 0; /* match */
840 
841 		l = strlen(prop) + 1;
842 		prop += l;
843 		plen -= l;
844 	}
845 
846 	return -1;
847 }
848 
849 static int dt_add_psci_cpu_enable_methods(struct dt_descriptor *dt)
850 {
851 	int offs = 0;
852 
853 	while (1) {
854 		offs = fdt_next_node(dt->blob, offs, NULL);
855 		if (offs < 0)
856 			break;
857 		if (fdt_getprop(dt->blob, offs, "enable-method", NULL))
858 			continue; /* already set */
859 		if (check_node_compat_prefix(dt, offs, "arm,cortex-a"))
860 			continue; /* no compatible */
861 		if (fdt_setprop_string(dt->blob, offs, "enable-method", "psci"))
862 			return -1;
863 		/* Need to restart scanning as offsets may have changed */
864 		offs = 0;
865 	}
866 	return 0;
867 }
868 
869 static int config_psci(struct dt_descriptor *dt)
870 {
871 	if (dt_add_psci_node(dt))
872 		return -1;
873 	return dt_add_psci_cpu_enable_methods(dt);
874 }
875 #else
876 static int config_psci(struct dt_descriptor *dt __unused)
877 {
878 	return 0;
879 }
880 #endif /*CFG_PSCI_ARM32*/
881 
882 static int mark_tzdram_as_reserved(struct dt_descriptor *dt)
883 {
884 	return add_res_mem_dt_node(dt, "optee_core", CFG_TZDRAM_START,
885 				   CFG_TZDRAM_SIZE);
886 }
887 
888 static void update_external_dt(void)
889 {
890 	struct dt_descriptor *dt = get_external_dt_desc();
891 
892 	if (!dt || !dt->blob)
893 		return;
894 
895 	if (!IS_ENABLED(CFG_CORE_FFA) && add_optee_dt_node(dt))
896 		panic("Failed to add OP-TEE Device Tree node");
897 
898 	if (config_psci(dt))
899 		panic("Failed to config PSCI");
900 
901 #ifdef CFG_CORE_RESERVED_SHM
902 	if (mark_static_shm_as_reserved(dt))
903 		panic("Failed to config non-secure memory");
904 #endif
905 
906 	if (mark_tzdram_as_reserved(dt))
907 		panic("Failed to config secure memory");
908 }
909 #else /*CFG_DT*/
910 static void update_external_dt(void)
911 {
912 }
913 #endif /*!CFG_DT*/
914 
915 #ifdef CFG_NS_VIRTUALIZATION
916 static TEE_Result virt_init_heap(void)
917 {
918 	/* We need to initialize pool for every virtual guest partition */
919 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
920 
921 	return TEE_SUCCESS;
922 }
923 preinit_early(virt_init_heap);
924 #endif
925 
926 void init_tee_runtime(void)
927 {
928 #ifndef CFG_WITH_PAGER
929 	/* Pager initializes TA RAM early */
930 	core_mmu_init_ta_ram();
931 #endif
932 	/*
933 	 * With virtualization we call this function when creating the
934 	 * OP-TEE partition instead.
935 	 */
936 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION))
937 		call_preinitcalls();
938 	call_initcalls();
939 
940 	/*
941 	 * These two functions uses crypto_rng_read() to initialize the
942 	 * pauth keys. Once call_initcalls() returns we're guaranteed that
943 	 * crypto_rng_read() is ready to be used.
944 	 */
945 	thread_init_core_local_pauth_keys();
946 	thread_init_thread_pauth_keys();
947 
948 	/*
949 	 * Reinitialize canaries around the stacks with crypto_rng_read().
950 	 *
951 	 * TODO: Updating canaries when CFG_NS_VIRTUALIZATION is enabled will
952 	 * require synchronization between thread_check_canaries() and
953 	 * thread_update_canaries().
954 	 */
955 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION))
956 		thread_update_canaries();
957 }
958 
959 static void init_primary(unsigned long pageable_part, unsigned long nsec_entry)
960 {
961 	thread_init_core_local_stacks();
962 	/*
963 	 * Mask asynchronous exceptions before switch to the thread vector
964 	 * as the thread handler requires those to be masked while
965 	 * executing with the temporary stack. The thread subsystem also
966 	 * asserts that the foreign interrupts are blocked when using most of
967 	 * its functions.
968 	 */
969 	thread_set_exceptions(THREAD_EXCP_ALL);
970 	primary_save_cntfrq();
971 	init_vfp_sec();
972 
973 	if (IS_ENABLED(CFG_CRYPTO_WITH_CE))
974 		check_crypto_extensions();
975 
976 	/*
977 	 * Pager: init_runtime() calls thread_kernel_enable_vfp() so we must
978 	 * set a current thread right now to avoid a chicken-and-egg problem
979 	 * (thread_init_boot_thread() sets the current thread but needs
980 	 * things set by init_runtime()).
981 	 */
982 	thread_get_core_local()->curr_thread = 0;
983 	init_runtime(pageable_part);
984 
985 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
986 		/*
987 		 * Virtualization: We can't initialize threads right now because
988 		 * threads belong to "tee" part and will be initialized
989 		 * separately per each new virtual guest. So, we'll clear
990 		 * "curr_thread" and call it done.
991 		 */
992 		thread_get_core_local()->curr_thread = -1;
993 	} else {
994 		thread_init_boot_thread();
995 	}
996 	thread_init_primary();
997 	thread_init_per_cpu();
998 	init_sec_mon(nsec_entry);
999 }
1000 
1001 static bool cpu_nmfi_enabled(void)
1002 {
1003 #if defined(ARM32)
1004 	return read_sctlr() & SCTLR_NMFI;
1005 #else
1006 	/* Note: ARM64 does not feature non-maskable FIQ support. */
1007 	return false;
1008 #endif
1009 }
1010 
1011 /*
1012  * Note: this function is weak just to make it possible to exclude it from
1013  * the unpaged area.
1014  */
1015 void __weak boot_init_primary_late(unsigned long fdt __unused,
1016 				   unsigned long manifest __unused)
1017 {
1018 	size_t fdt_size = CFG_DTB_MAX_SIZE;
1019 
1020 	if (IS_ENABLED(CFG_TRANSFER_LIST) && mapped_tl) {
1021 		struct transfer_list_entry *tl_e = NULL;
1022 
1023 		tl_e = transfer_list_find(mapped_tl, TL_TAG_FDT);
1024 		if (tl_e)
1025 			fdt_size = tl_e->data_size;
1026 	}
1027 
1028 	init_external_dt(boot_arg_fdt, fdt_size);
1029 	reinit_manifest_dt();
1030 #ifdef CFG_CORE_SEL1_SPMC
1031 	tpm_map_log_area(get_manifest_dt());
1032 #else
1033 	tpm_map_log_area(get_external_dt());
1034 #endif
1035 	discover_nsec_memory();
1036 	update_external_dt();
1037 	configure_console_from_dt();
1038 
1039 	IMSG("OP-TEE version: %s", core_v_str);
1040 	if (IS_ENABLED(CFG_INSECURE)) {
1041 		IMSG("WARNING: This OP-TEE configuration might be insecure!");
1042 		IMSG("WARNING: Please check https://optee.readthedocs.io/en/latest/architecture/porting_guidelines.html");
1043 	}
1044 	IMSG("Primary CPU initializing");
1045 #ifdef CFG_CORE_ASLR
1046 	DMSG("Executing at offset %#lx with virtual load address %#"PRIxVA,
1047 	     (unsigned long)boot_mmu_config.map_offset, VCORE_START_VA);
1048 #endif
1049 	if (IS_ENABLED(CFG_MEMTAG))
1050 		DMSG("Memory tagging %s",
1051 		     memtag_is_enabled() ?  "enabled" : "disabled");
1052 
1053 	/* Check if platform needs NMFI workaround */
1054 	if (cpu_nmfi_enabled())	{
1055 		if (!IS_ENABLED(CFG_CORE_WORKAROUND_ARM_NMFI))
1056 			IMSG("WARNING: This ARM core has NMFI enabled, please apply workaround!");
1057 	} else {
1058 		if (IS_ENABLED(CFG_CORE_WORKAROUND_ARM_NMFI))
1059 			IMSG("WARNING: This ARM core does not have NMFI enabled, no need for workaround");
1060 	}
1061 
1062 	boot_primary_init_intc();
1063 	init_vfp_nsec();
1064 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1065 		IMSG("Initializing virtualization support");
1066 		core_mmu_init_virtualization();
1067 	} else {
1068 		init_tee_runtime();
1069 	}
1070 	call_finalcalls();
1071 	IMSG("Primary CPU switching to normal world boot");
1072 }
1073 
1074 static void init_secondary_helper(unsigned long nsec_entry)
1075 {
1076 	IMSG("Secondary CPU %zu initializing", get_core_pos());
1077 
1078 	/*
1079 	 * Mask asynchronous exceptions before switch to the thread vector
1080 	 * as the thread handler requires those to be masked while
1081 	 * executing with the temporary stack. The thread subsystem also
1082 	 * asserts that the foreign interrupts are blocked when using most of
1083 	 * its functions.
1084 	 */
1085 	thread_set_exceptions(THREAD_EXCP_ALL);
1086 
1087 	secondary_init_cntfrq();
1088 	thread_init_per_cpu();
1089 	init_sec_mon(nsec_entry);
1090 	boot_secondary_init_intc();
1091 	init_vfp_sec();
1092 	init_vfp_nsec();
1093 
1094 	IMSG("Secondary CPU %zu switching to normal world boot", get_core_pos());
1095 }
1096 
1097 /*
1098  * Note: this function is weak just to make it possible to exclude it from
1099  * the unpaged area so that it lies in the init area.
1100  */
1101 void __weak boot_init_primary_early(void)
1102 {
1103 	unsigned long pageable_part = 0;
1104 	unsigned long e = PADDR_INVALID;
1105 	struct transfer_list_entry *tl_e = NULL;
1106 
1107 	if (!IS_ENABLED(CFG_WITH_ARM_TRUSTED_FW))
1108 		e = boot_arg_nsec_entry;
1109 
1110 	if (IS_ENABLED(CFG_TRANSFER_LIST) && boot_arg_transfer_list) {
1111 		/* map and save the TL */
1112 		mapped_tl = transfer_list_map(boot_arg_transfer_list);
1113 		if (!mapped_tl)
1114 			panic("Failed to map transfer list");
1115 
1116 		transfer_list_dump(mapped_tl);
1117 		tl_e = transfer_list_find(mapped_tl, TL_TAG_FDT);
1118 		if (tl_e) {
1119 			/*
1120 			 * Expand the data size of the DTB entry to the maximum
1121 			 * allocable mapped memory to reserve sufficient space
1122 			 * for inserting new nodes, avoid potentially corrupting
1123 			 * next entries.
1124 			 */
1125 			uint32_t dtb_max_sz = mapped_tl->max_size -
1126 					      mapped_tl->size + tl_e->data_size;
1127 
1128 			if (!transfer_list_set_data_size(mapped_tl, tl_e,
1129 							 dtb_max_sz)) {
1130 				EMSG("Failed to extend DTB size to %#"PRIx32,
1131 				     dtb_max_sz);
1132 				panic();
1133 			}
1134 		}
1135 		tl_e = transfer_list_find(mapped_tl, TL_TAG_OPTEE_PAGABLE_PART);
1136 	}
1137 
1138 	if (IS_ENABLED(CFG_WITH_PAGER)) {
1139 		if (IS_ENABLED(CFG_TRANSFER_LIST) && tl_e)
1140 			pageable_part =
1141 				get_le64(transfer_list_entry_data(tl_e));
1142 		else
1143 			pageable_part = boot_arg_pageable_part;
1144 	}
1145 
1146 	init_primary(pageable_part, e);
1147 }
1148 
1149 static void boot_save_transfer_list(unsigned long zero_reg,
1150 				    unsigned long transfer_list,
1151 				    unsigned long fdt)
1152 {
1153 	struct transfer_list_header *tl = (void *)transfer_list;
1154 	struct transfer_list_entry *tl_e = NULL;
1155 
1156 	if (zero_reg != 0)
1157 		panic("Incorrect transfer list register convention");
1158 
1159 	if (!IS_ALIGNED_WITH_TYPE(transfer_list, struct transfer_list_header) ||
1160 	    !IS_ALIGNED(transfer_list, TL_ALIGNMENT_FROM_ORDER(tl->alignment)))
1161 		panic("Transfer list base address is not aligned");
1162 
1163 	if (transfer_list_check_header(tl) == TL_OPS_NONE)
1164 		panic("Invalid transfer list");
1165 
1166 	tl_e = transfer_list_find(tl, TL_TAG_FDT);
1167 	if (fdt != (unsigned long)transfer_list_entry_data(tl_e))
1168 		panic("DT does not match to the DT entry of the TL");
1169 
1170 	boot_arg_transfer_list = transfer_list;
1171 }
1172 
1173 #if defined(CFG_WITH_ARM_TRUSTED_FW)
1174 unsigned long boot_cpu_on_handler(unsigned long a0 __maybe_unused,
1175 				  unsigned long a1 __unused)
1176 {
1177 	init_secondary_helper(PADDR_INVALID);
1178 	return 0;
1179 }
1180 #else
1181 void boot_init_secondary(unsigned long nsec_entry)
1182 {
1183 	init_secondary_helper(nsec_entry);
1184 }
1185 #endif
1186 
1187 #if defined(CFG_BOOT_SECONDARY_REQUEST)
1188 void boot_set_core_ns_entry(size_t core_idx, uintptr_t entry,
1189 			    uintptr_t context_id)
1190 {
1191 	ns_entry_contexts[core_idx].entry_point = entry;
1192 	ns_entry_contexts[core_idx].context_id = context_id;
1193 	dsb_ishst();
1194 }
1195 
1196 int boot_core_release(size_t core_idx, paddr_t entry)
1197 {
1198 	if (!core_idx || core_idx >= CFG_TEE_CORE_NB_CORE)
1199 		return -1;
1200 
1201 	ns_entry_contexts[core_idx].entry_point = entry;
1202 	dmb();
1203 	spin_table[core_idx] = 1;
1204 	dsb();
1205 	sev();
1206 
1207 	return 0;
1208 }
1209 
1210 /*
1211  * spin until secondary boot request, then returns with
1212  * the secondary core entry address.
1213  */
1214 struct ns_entry_context *boot_core_hpen(void)
1215 {
1216 #ifdef CFG_PSCI_ARM32
1217 	return &ns_entry_contexts[get_core_pos()];
1218 #else
1219 	do {
1220 		wfe();
1221 	} while (!spin_table[get_core_pos()]);
1222 	dmb();
1223 	return &ns_entry_contexts[get_core_pos()];
1224 #endif
1225 }
1226 #endif
1227 
1228 #if defined(CFG_CORE_ASLR)
1229 #if defined(CFG_DT)
1230 unsigned long __weak get_aslr_seed(void)
1231 {
1232 	void *fdt = NULL;
1233 	int rc = 0;
1234 	const uint64_t *seed = NULL;
1235 	int offs = 0;
1236 	int len = 0;
1237 
1238 	if (!IS_ENABLED(CFG_CORE_SEL2_SPMC))
1239 		fdt = (void *)boot_arg_fdt;
1240 
1241 	if (!fdt) {
1242 		DMSG("No fdt");
1243 		goto err;
1244 	}
1245 
1246 	rc = fdt_check_header(fdt);
1247 	if (rc) {
1248 		DMSG("Bad fdt: %d", rc);
1249 		goto err;
1250 	}
1251 
1252 	offs =  fdt_path_offset(fdt, "/secure-chosen");
1253 	if (offs < 0) {
1254 		DMSG("Cannot find /secure-chosen");
1255 		goto err;
1256 	}
1257 	seed = fdt_getprop(fdt, offs, "kaslr-seed", &len);
1258 	if (!seed || len != sizeof(*seed)) {
1259 		DMSG("Cannot find valid kaslr-seed");
1260 		goto err;
1261 	}
1262 
1263 	return fdt64_to_cpu(fdt64_ld(seed));
1264 
1265 err:
1266 	/* Try platform implementation */
1267 	return plat_get_aslr_seed();
1268 }
1269 #else /*!CFG_DT*/
1270 unsigned long __weak get_aslr_seed(void)
1271 {
1272 	/* Try platform implementation */
1273 	return plat_get_aslr_seed();
1274 }
1275 #endif /*!CFG_DT*/
1276 #endif /*CFG_CORE_ASLR*/
1277 
1278 static void *get_fdt_from_boot_info(struct ffa_boot_info_header_1_1 *hdr)
1279 {
1280 	struct ffa_boot_info_1_1 *desc = NULL;
1281 	uint8_t content_fmt = 0;
1282 	uint8_t name_fmt = 0;
1283 	void *fdt = NULL;
1284 	int ret = 0;
1285 
1286 	if (hdr->signature != FFA_BOOT_INFO_SIGNATURE) {
1287 		EMSG("Bad boot info signature %#"PRIx32, hdr->signature);
1288 		panic();
1289 	}
1290 	if (hdr->version != FFA_BOOT_INFO_VERSION) {
1291 		EMSG("Bad boot info version %#"PRIx32, hdr->version);
1292 		panic();
1293 	}
1294 	if (hdr->desc_count != 1) {
1295 		EMSG("Bad boot info descriptor count %#"PRIx32,
1296 		     hdr->desc_count);
1297 		panic();
1298 	}
1299 	desc = (void *)((vaddr_t)hdr + hdr->desc_offset);
1300 	name_fmt = desc->flags & FFA_BOOT_INFO_FLAG_NAME_FORMAT_MASK;
1301 	if (name_fmt == FFA_BOOT_INFO_FLAG_NAME_FORMAT_STRING)
1302 		DMSG("Boot info descriptor name \"%16s\"", desc->name);
1303 	else if (name_fmt == FFA_BOOT_INFO_FLAG_NAME_FORMAT_UUID)
1304 		DMSG("Boot info descriptor UUID %pUl", (void *)desc->name);
1305 	else
1306 		DMSG("Boot info descriptor: unknown name format %"PRIu8,
1307 		     name_fmt);
1308 
1309 	content_fmt = (desc->flags & FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_MASK) >>
1310 		      FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_SHIFT;
1311 	if (content_fmt != FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_ADDR) {
1312 		EMSG("Bad boot info content format %"PRIu8", expected %u (address)",
1313 		     content_fmt, FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_ADDR);
1314 		panic();
1315 	}
1316 
1317 	fdt = (void *)(vaddr_t)desc->contents;
1318 	ret = fdt_check_full(fdt, desc->size);
1319 	if (ret < 0) {
1320 		EMSG("Invalid Device Tree at %p: error %d", fdt, ret);
1321 		panic();
1322 	}
1323 	return fdt;
1324 }
1325 
1326 static void get_sec_mem_from_manifest(void *fdt, paddr_t *base, size_t *size)
1327 {
1328 	int ret = 0;
1329 	uint64_t num = 0;
1330 
1331 	ret = fdt_node_check_compatible(fdt, 0, "arm,ffa-manifest-1.0");
1332 	if (ret < 0) {
1333 		EMSG("Invalid FF-A manifest at %p: error %d", fdt, ret);
1334 		panic();
1335 	}
1336 	ret = dt_getprop_as_number(fdt, 0, "load-address", &num);
1337 	if (ret < 0) {
1338 		EMSG("Can't read \"load-address\" from FF-A manifest at %p: error %d",
1339 		     fdt, ret);
1340 		panic();
1341 	}
1342 	*base = num;
1343 	/* "mem-size" is currently an undocumented extension to the spec. */
1344 	ret = dt_getprop_as_number(fdt, 0, "mem-size", &num);
1345 	if (ret < 0) {
1346 		EMSG("Can't read \"mem-size\" from FF-A manifest at %p: error %d",
1347 		     fdt, ret);
1348 		panic();
1349 	}
1350 	*size = num;
1351 }
1352 
1353 void __weak boot_save_args(unsigned long a0, unsigned long a1,
1354 			   unsigned long a2, unsigned long a3,
1355 			   unsigned long a4 __maybe_unused)
1356 {
1357 	/*
1358 	 * Register use:
1359 	 *
1360 	 * Scenario A: Default arguments
1361 	 * a0   - CFG_CORE_FFA=y && CFG_CORE_SEL2_SPMC=n:
1362 	 *        if non-NULL holds the TOS FW config [1] address
1363 	 *      - CFG_CORE_FFA=y &&
1364 		  (CFG_CORE_SEL2_SPMC=y || CFG_CORE_EL3_SPMC=y):
1365 	 *        address of FF-A Boot Information Blob
1366 	 *      - CFG_CORE_FFA=n:
1367 	 *        if non-NULL holds the pagable part address
1368 	 * a1	- CFG_WITH_ARM_TRUSTED_FW=n (Armv7):
1369 	 *	  Armv7 standard bootarg #1 (kept track of in entry_a32.S)
1370 	 * a2   - CFG_CORE_SEL2_SPMC=n:
1371 	 *        if non-NULL holds the system DTB address
1372 	 *	- CFG_WITH_ARM_TRUSTED_FW=n (Armv7):
1373 	 *	  Armv7 standard bootarg #2 (system DTB address, kept track
1374 	 *	  of in entry_a32.S)
1375 	 * a3	- Not used
1376 	 * a4	- CFG_WITH_ARM_TRUSTED_FW=n:
1377 	 *	  Non-secure entry address
1378 	 *
1379 	 * [1] A TF-A concept: TOS_FW_CONFIG - Trusted OS Firmware
1380 	 * configuration file. Used by Trusted OS (BL32), that is, OP-TEE
1381 	 * here. This is also called Manifest DT, related to the Manifest DT
1382 	 * passed in the FF-A Boot Information Blob, but with a different
1383 	 * compatible string.
1384 
1385 	 * Scenario B: FW Handoff via Transfer List
1386 	 * Note: FF-A and non-secure entry are not yet supported with
1387 	 *       Transfer List
1388 	 * a0	- DTB address or 0 (AArch64)
1389 	 *	- must be 0 (AArch32)
1390 	 * a1	- 1 << 32 | TRANSFER_LIST_SIGNATURE[0:31] (AArch64)
1391 	 *	- 1 << 24 | TRANSFER_LIST_SIGNATURE[0:23] (AArch32)
1392 	 * a2	- must be 0 (AArch64)
1393 	 *	- DTB address or 0 (AArch32)
1394 	 * a3	- Transfer list base address
1395 	 * a4	- Not used
1396 	 */
1397 
1398 	if (IS_ENABLED(CFG_TRANSFER_LIST)) {
1399 		if (IS_ENABLED(CFG_ARM64_core) &&
1400 		    a1 == TL_HANDOFF_X1_VALUE(TL_REG_CONVENTION_VER)) {
1401 			boot_save_transfer_list(a2, a3, a0);
1402 			boot_arg_fdt = a0;
1403 		} else if (IS_ENABLED(CFG_ARM32_core) &&
1404 			   a1 == TL_HANDOFF_R1_VALUE(TL_REG_CONVENTION_VER)) {
1405 			boot_save_transfer_list(a0, a3, a2);
1406 			boot_arg_fdt = a2;
1407 		}
1408 
1409 		return;
1410 	}
1411 
1412 	if (!IS_ENABLED(CFG_CORE_SEL2_SPMC)) {
1413 #if defined(CFG_DT_ADDR)
1414 		boot_arg_fdt = CFG_DT_ADDR;
1415 #else
1416 		boot_arg_fdt = a2;
1417 #endif
1418 	}
1419 
1420 	if (IS_ENABLED(CFG_CORE_FFA)) {
1421 		if (IS_ENABLED(CFG_CORE_SEL2_SPMC) ||
1422 		    IS_ENABLED(CFG_CORE_EL3_SPMC))
1423 			init_manifest_dt(get_fdt_from_boot_info((void *)a0));
1424 		else
1425 			init_manifest_dt((void *)a0);
1426 		if (IS_ENABLED(CFG_CORE_SEL2_SPMC) &&
1427 		    IS_ENABLED(CFG_CORE_PHYS_RELOCATABLE)) {
1428 			paddr_t base = 0;
1429 			size_t size = 0;
1430 
1431 			get_sec_mem_from_manifest(get_manifest_dt(),
1432 						  &base, &size);
1433 			core_mmu_set_secure_memory(base, size);
1434 		}
1435 	} else {
1436 		if (IS_ENABLED(CFG_WITH_PAGER)) {
1437 #if defined(CFG_PAGEABLE_ADDR)
1438 			boot_arg_pageable_part = CFG_PAGEABLE_ADDR;
1439 #else
1440 			boot_arg_pageable_part = a0;
1441 #endif
1442 		}
1443 		if (!IS_ENABLED(CFG_WITH_ARM_TRUSTED_FW)) {
1444 #if defined(CFG_NS_ENTRY_ADDR)
1445 			boot_arg_nsec_entry = CFG_NS_ENTRY_ADDR;
1446 #else
1447 			boot_arg_nsec_entry = a4;
1448 #endif
1449 		}
1450 	}
1451 }
1452 
1453 #if defined(CFG_TRANSFER_LIST)
1454 static TEE_Result release_transfer_list(void)
1455 {
1456 	struct dt_descriptor *dt = get_external_dt_desc();
1457 
1458 	if (!mapped_tl)
1459 		return TEE_SUCCESS;
1460 
1461 	if (dt) {
1462 		int ret = 0;
1463 		struct transfer_list_entry *tl_e = NULL;
1464 
1465 		/*
1466 		 * Pack the DTB and update the transfer list before un-mapping
1467 		 */
1468 		ret = fdt_pack(dt->blob);
1469 		if (ret < 0) {
1470 			EMSG("Failed to pack Device Tree at 0x%" PRIxPA
1471 			     ": error %d", virt_to_phys(dt->blob), ret);
1472 			panic();
1473 		}
1474 
1475 		tl_e = transfer_list_find(mapped_tl, TL_TAG_FDT);
1476 		assert(dt->blob == transfer_list_entry_data(tl_e));
1477 		transfer_list_set_data_size(mapped_tl, tl_e,
1478 					    fdt_totalsize(dt->blob));
1479 		dt->blob = NULL;
1480 	}
1481 
1482 	transfer_list_unmap_sync(mapped_tl);
1483 	mapped_tl = NULL;
1484 
1485 	return TEE_SUCCESS;
1486 }
1487 
1488 boot_final(release_transfer_list);
1489 #endif
1490