xref: /optee_os/core/arch/arm/kernel/boot.c (revision 6b1c18580069a7c71e32deb57f609031fffb6e68)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2015-2023, Linaro Limited
4  * Copyright (c) 2023, Arm Limited
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <compiler.h>
10 #include <config.h>
11 #include <console.h>
12 #include <crypto/crypto.h>
13 #include <drivers/gic.h>
14 #include <dt-bindings/interrupt-controller/arm-gic.h>
15 #include <ffa.h>
16 #include <initcall.h>
17 #include <inttypes.h>
18 #include <io.h>
19 #include <keep.h>
20 #include <kernel/asan.h>
21 #include <kernel/boot.h>
22 #include <kernel/dt.h>
23 #include <kernel/linker.h>
24 #include <kernel/misc.h>
25 #include <kernel/panic.h>
26 #include <kernel/tee_misc.h>
27 #include <kernel/thread.h>
28 #include <kernel/tpm.h>
29 #include <kernel/transfer_list.h>
30 #include <libfdt.h>
31 #include <malloc.h>
32 #include <memtag.h>
33 #include <mm/core_memprot.h>
34 #include <mm/core_mmu.h>
35 #include <mm/fobj.h>
36 #include <mm/phys_mem.h>
37 #include <mm/tee_mm.h>
38 #include <mm/tee_pager.h>
39 #include <sm/psci.h>
40 #include <trace.h>
41 #include <utee_defines.h>
42 #include <util.h>
43 
44 #include <platform_config.h>
45 
46 #if !defined(CFG_WITH_ARM_TRUSTED_FW)
47 #include <sm/sm.h>
48 #endif
49 
50 #if defined(CFG_WITH_VFP)
51 #include <kernel/vfp.h>
52 #endif
53 
54 /*
55  * In this file we're using unsigned long to represent physical pointers as
56  * they are received in a single register when OP-TEE is initially entered.
57  * This limits 32-bit systems to only use make use of the lower 32 bits
58  * of a physical address for initial parameters.
59  *
60  * 64-bit systems on the other hand can use full 64-bit physical pointers.
61  */
62 #define PADDR_INVALID		ULONG_MAX
63 
64 #if defined(CFG_BOOT_SECONDARY_REQUEST)
65 struct ns_entry_context {
66 	uintptr_t entry_point;
67 	uintptr_t context_id;
68 };
69 struct ns_entry_context ns_entry_contexts[CFG_TEE_CORE_NB_CORE];
70 static uint32_t spin_table[CFG_TEE_CORE_NB_CORE];
71 #endif
72 
73 #ifdef CFG_BOOT_SYNC_CPU
74 /*
75  * Array used when booting, to synchronize cpu.
76  * When 0, the cpu has not started.
77  * When 1, it has started
78  */
79 uint32_t sem_cpu_sync[CFG_TEE_CORE_NB_CORE];
80 DECLARE_KEEP_PAGER(sem_cpu_sync);
81 #endif
82 
83 /*
84  * Must not be in .bss since it's initialized and used from assembly before
85  * .bss is cleared.
86  */
87 vaddr_t boot_cached_mem_end __nex_data = 1;
88 
89 static unsigned long boot_arg_fdt __nex_bss;
90 unsigned long boot_arg_nsec_entry __nex_bss;
91 static unsigned long boot_arg_pageable_part __nex_bss;
92 static unsigned long boot_arg_transfer_list __nex_bss;
93 static struct transfer_list_header *mapped_tl __nex_bss;
94 
95 #ifdef CFG_SECONDARY_INIT_CNTFRQ
96 static uint32_t cntfrq;
97 #endif
98 
99 /* May be overridden in plat-$(PLATFORM)/main.c */
100 __weak void plat_primary_init_early(void)
101 {
102 }
103 DECLARE_KEEP_PAGER(plat_primary_init_early);
104 
105 /* May be overridden in plat-$(PLATFORM)/main.c */
106 __weak void boot_primary_init_intc(void)
107 {
108 }
109 
110 /* May be overridden in plat-$(PLATFORM)/main.c */
111 __weak void boot_secondary_init_intc(void)
112 {
113 }
114 
115 /* May be overridden in plat-$(PLATFORM)/main.c */
116 __weak unsigned long plat_get_aslr_seed(void)
117 {
118 	DMSG("Warning: no ASLR seed");
119 
120 	return 0;
121 }
122 
123 #if defined(_CFG_CORE_STACK_PROTECTOR) || defined(CFG_WITH_STACK_CANARIES)
124 /* Generate random stack canary value on boot up */
125 __weak void plat_get_random_stack_canaries(void *buf, size_t ncan, size_t size)
126 {
127 	TEE_Result ret = TEE_ERROR_GENERIC;
128 	size_t i = 0;
129 
130 	assert(buf && ncan && size);
131 
132 	/*
133 	 * With virtualization the RNG is not initialized in Nexus core.
134 	 * Need to override with platform specific implementation.
135 	 */
136 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
137 		IMSG("WARNING: Using fixed value for stack canary");
138 		memset(buf, 0xab, ncan * size);
139 		goto out;
140 	}
141 
142 	ret = crypto_rng_read(buf, ncan * size);
143 	if (ret != TEE_SUCCESS)
144 		panic("Failed to generate random stack canary");
145 
146 out:
147 	/* Leave null byte in canary to prevent string base exploit */
148 	for (i = 0; i < ncan; i++)
149 		*((uint8_t *)buf + size * i) = 0;
150 }
151 #endif /* _CFG_CORE_STACK_PROTECTOR || CFG_WITH_STACK_CANARIES */
152 
153 /*
154  * This function is called as a guard after each smc call which is not
155  * supposed to return.
156  */
157 void __panic_at_smc_return(void)
158 {
159 	panic();
160 }
161 
162 #if defined(CFG_WITH_ARM_TRUSTED_FW)
163 void init_sec_mon(unsigned long nsec_entry __maybe_unused)
164 {
165 	assert(nsec_entry == PADDR_INVALID);
166 	/* Do nothing as we don't have a secure monitor */
167 }
168 #else
169 /* May be overridden in plat-$(PLATFORM)/main.c */
170 __weak void init_sec_mon(unsigned long nsec_entry)
171 {
172 	struct sm_nsec_ctx *nsec_ctx;
173 
174 	assert(nsec_entry != PADDR_INVALID);
175 
176 	/* Initialize secure monitor */
177 	nsec_ctx = sm_get_nsec_ctx();
178 	nsec_ctx->mon_lr = nsec_entry;
179 	nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I;
180 	if (nsec_entry & 1)
181 		nsec_ctx->mon_spsr |= CPSR_T;
182 }
183 #endif
184 
185 #if defined(CFG_WITH_ARM_TRUSTED_FW)
186 static void init_vfp_nsec(void)
187 {
188 }
189 #else
190 static void init_vfp_nsec(void)
191 {
192 	/* Normal world can use CP10 and CP11 (SIMD/VFP) */
193 	write_nsacr(read_nsacr() | NSACR_CP10 | NSACR_CP11);
194 }
195 #endif
196 
197 static void check_crypto_extensions(void)
198 {
199 	bool ce_supported = true;
200 
201 	if (!feat_aes_implemented() &&
202 	    IS_ENABLED(CFG_CRYPTO_AES_ARM_CE)) {
203 		EMSG("AES instructions are not supported");
204 		ce_supported = false;
205 	}
206 
207 	if (!feat_sha1_implemented() &&
208 	    IS_ENABLED(CFG_CRYPTO_SHA1_ARM_CE)) {
209 		EMSG("SHA1 instructions are not supported");
210 		ce_supported = false;
211 	}
212 
213 	if (!feat_sha256_implemented() &&
214 	    IS_ENABLED(CFG_CRYPTO_SHA256_ARM_CE)) {
215 		EMSG("SHA256 instructions are not supported");
216 		ce_supported = false;
217 	}
218 
219 	/* Check aarch64 specific instructions */
220 	if (IS_ENABLED(CFG_ARM64_core)) {
221 		if (!feat_sha512_implemented() &&
222 		    IS_ENABLED(CFG_CRYPTO_SHA512_ARM_CE)) {
223 			EMSG("SHA512 instructions are not supported");
224 			ce_supported = false;
225 		}
226 
227 		if (!feat_sha3_implemented() &&
228 		    IS_ENABLED(CFG_CRYPTO_SHA3_ARM_CE)) {
229 			EMSG("SHA3 instructions are not supported");
230 			ce_supported = false;
231 		}
232 
233 		if (!feat_sm3_implemented() &&
234 		    IS_ENABLED(CFG_CRYPTO_SM3_ARM_CE)) {
235 			EMSG("SM3 instructions are not supported");
236 			ce_supported = false;
237 		}
238 
239 		if (!feat_sm4_implemented() &&
240 		    IS_ENABLED(CFG_CRYPTO_SM4_ARM_CE)) {
241 			EMSG("SM4 instructions are not supported");
242 			ce_supported = false;
243 		}
244 	}
245 
246 	if (!ce_supported)
247 		panic("HW doesn't support CE instructions");
248 }
249 
250 #if defined(CFG_WITH_VFP)
251 
252 #ifdef ARM32
253 static void init_vfp_sec(void)
254 {
255 	uint32_t cpacr = read_cpacr();
256 
257 	/*
258 	 * Enable Advanced SIMD functionality.
259 	 * Enable use of D16-D31 of the Floating-point Extension register
260 	 * file.
261 	 */
262 	cpacr &= ~(CPACR_ASEDIS | CPACR_D32DIS);
263 	/*
264 	 * Enable usage of CP10 and CP11 (SIMD/VFP) (both kernel and user
265 	 * mode.
266 	 */
267 	cpacr |= CPACR_CP(10, CPACR_CP_ACCESS_FULL);
268 	cpacr |= CPACR_CP(11, CPACR_CP_ACCESS_FULL);
269 	write_cpacr(cpacr);
270 }
271 #endif /* ARM32 */
272 
273 #ifdef ARM64
274 static void init_vfp_sec(void)
275 {
276 	/* Not using VFP until thread_kernel_enable_vfp() */
277 	vfp_disable();
278 }
279 #endif /* ARM64 */
280 
281 #else /* CFG_WITH_VFP */
282 
283 static void init_vfp_sec(void)
284 {
285 	/* Not using VFP */
286 }
287 #endif
288 
289 #ifdef CFG_SECONDARY_INIT_CNTFRQ
290 static void primary_save_cntfrq(void)
291 {
292 	assert(cntfrq == 0);
293 
294 	/*
295 	 * CNTFRQ should be initialized on the primary CPU by a
296 	 * previous boot stage
297 	 */
298 	cntfrq = read_cntfrq();
299 }
300 
301 static void secondary_init_cntfrq(void)
302 {
303 	assert(cntfrq != 0);
304 	write_cntfrq(cntfrq);
305 }
306 #else /* CFG_SECONDARY_INIT_CNTFRQ */
307 static void primary_save_cntfrq(void)
308 {
309 }
310 
311 static void secondary_init_cntfrq(void)
312 {
313 }
314 #endif
315 
316 #ifdef CFG_CORE_SANITIZE_KADDRESS
317 static void init_run_constructors(void)
318 {
319 	const vaddr_t *ctor;
320 
321 	for (ctor = &__ctor_list; ctor < &__ctor_end; ctor++)
322 		((void (*)(void))(*ctor))();
323 }
324 
325 static void init_asan(void)
326 {
327 
328 	/*
329 	 * CFG_ASAN_SHADOW_OFFSET is also supplied as
330 	 * -fasan-shadow-offset=$(CFG_ASAN_SHADOW_OFFSET) to the compiler.
331 	 * Since all the needed values to calculate the value of
332 	 * CFG_ASAN_SHADOW_OFFSET isn't available in to make we need to
333 	 * calculate it in advance and hard code it into the platform
334 	 * conf.mk. Here where we have all the needed values we double
335 	 * check that the compiler is supplied the correct value.
336 	 */
337 
338 #define __ASAN_SHADOW_START \
339 	ROUNDUP(TEE_RAM_START + (TEE_RAM_VA_SIZE * 8) / 9 - 8, 8)
340 	assert(__ASAN_SHADOW_START == (vaddr_t)&__asan_shadow_start);
341 #define __CFG_ASAN_SHADOW_OFFSET \
342 	(__ASAN_SHADOW_START - (TEE_RAM_START / 8))
343 	COMPILE_TIME_ASSERT(CFG_ASAN_SHADOW_OFFSET == __CFG_ASAN_SHADOW_OFFSET);
344 #undef __ASAN_SHADOW_START
345 #undef __CFG_ASAN_SHADOW_OFFSET
346 
347 	/*
348 	 * Assign area covered by the shadow area, everything from start up
349 	 * to the beginning of the shadow area.
350 	 */
351 	asan_set_shadowed((void *)TEE_LOAD_ADDR, &__asan_shadow_start);
352 
353 	/*
354 	 * Add access to areas that aren't opened automatically by a
355 	 * constructor.
356 	 */
357 	asan_tag_access(&__ctor_list, &__ctor_end);
358 	asan_tag_access(__rodata_start, __rodata_end);
359 #ifdef CFG_WITH_PAGER
360 	asan_tag_access(__pageable_start, __pageable_end);
361 #endif /*CFG_WITH_PAGER*/
362 	asan_tag_access(__nozi_start, __nozi_end);
363 #ifdef ARM32
364 	asan_tag_access(__exidx_start, __exidx_end);
365 	asan_tag_access(__extab_start, __extab_end);
366 #endif
367 
368 	init_run_constructors();
369 
370 	/* Everything is tagged correctly, let's start address sanitizing. */
371 	asan_start();
372 }
373 #else /*CFG_CORE_SANITIZE_KADDRESS*/
374 static void init_asan(void)
375 {
376 }
377 #endif /*CFG_CORE_SANITIZE_KADDRESS*/
378 
379 #if defined(CFG_MEMTAG)
380 /* Called from entry_a64.S only when MEMTAG is configured */
381 void boot_init_memtag(void)
382 {
383 	memtag_init_ops(feat_mte_implemented());
384 }
385 
386 static TEE_Result mmap_clear_memtag(struct tee_mmap_region *map,
387 				    void *ptr __unused)
388 {
389 	switch (map->type) {
390 	case MEM_AREA_NEX_RAM_RO:
391 	case MEM_AREA_SEC_RAM_OVERALL:
392 		DMSG("Clearing tags for VA %#"PRIxVA"..%#"PRIxVA,
393 		     map->va, map->va + map->size - 1);
394 		memtag_set_tags((void *)map->va, map->size, 0);
395 		break;
396 	default:
397 		break;
398 	}
399 
400 	return TEE_SUCCESS;
401 }
402 
403 /* Called from entry_a64.S only when MEMTAG is configured */
404 void boot_clear_memtag(void)
405 {
406 	core_mmu_for_each_map(NULL, mmap_clear_memtag);
407 }
408 #endif
409 
410 #ifdef CFG_WITH_PAGER
411 
412 #ifdef CFG_CORE_SANITIZE_KADDRESS
413 static void carve_out_asan_mem(void)
414 {
415 	nex_phys_mem_partial_carve_out(ASAN_MAP_PA, ASAN_MAP_SZ);
416 }
417 #else
418 static void carve_out_asan_mem(void)
419 {
420 }
421 #endif
422 
423 static void print_pager_pool_size(void)
424 {
425 	struct tee_pager_stats __maybe_unused stats;
426 
427 	tee_pager_get_stats(&stats);
428 	IMSG("Pager pool size: %zukB",
429 		stats.npages_all * SMALL_PAGE_SIZE / 1024);
430 }
431 
432 static void init_virt_pool(tee_mm_pool_t *virt_pool)
433 {
434 	const vaddr_t begin = VCORE_START_VA;
435 	size_t size = TEE_RAM_VA_SIZE;
436 
437 #ifdef CFG_CORE_SANITIZE_KADDRESS
438 	/* Carve out asan memory, flat maped after core memory */
439 	if (begin + size > ASAN_SHADOW_PA)
440 		size = ASAN_MAP_PA - begin;
441 #endif
442 
443 	if (!tee_mm_init(virt_pool, begin, size, SMALL_PAGE_SHIFT,
444 			 TEE_MM_POOL_NO_FLAGS))
445 		panic("core_virt_mem_pool init failed");
446 }
447 
448 /*
449  * With CFG_CORE_ASLR=y the init part is relocated very early during boot.
450  * The init part is also paged just as the rest of the normal paged code, with
451  * the difference that it's preloaded during boot. When the backing store
452  * is configured the entire paged binary is copied in place and then also
453  * the init part. Since the init part has been relocated (references to
454  * addresses updated to compensate for the new load address) this has to be
455  * undone for the hashes of those pages to match with the original binary.
456  *
457  * If CFG_CORE_ASLR=n, nothing needs to be done as the code/ro pages are
458  * unchanged.
459  */
460 static void undo_init_relocation(uint8_t *paged_store __maybe_unused)
461 {
462 #ifdef CFG_CORE_ASLR
463 	unsigned long *ptr = NULL;
464 	const uint32_t *reloc = NULL;
465 	const uint32_t *reloc_end = NULL;
466 	unsigned long offs = boot_mmu_config.map_offset;
467 	const struct boot_embdata *embdata = (const void *)__init_end;
468 	vaddr_t addr_end = (vaddr_t)__init_end - offs - TEE_LOAD_ADDR;
469 	vaddr_t addr_start = (vaddr_t)__init_start - offs - TEE_LOAD_ADDR;
470 
471 	reloc = (const void *)((vaddr_t)embdata + embdata->reloc_offset);
472 	reloc_end = reloc + embdata->reloc_len / sizeof(*reloc);
473 
474 	for (; reloc < reloc_end; reloc++) {
475 		if (*reloc < addr_start)
476 			continue;
477 		if (*reloc >= addr_end)
478 			break;
479 		ptr = (void *)(paged_store + *reloc - addr_start);
480 		*ptr -= offs;
481 	}
482 #endif
483 }
484 
485 static struct fobj *ro_paged_alloc(tee_mm_entry_t *mm, void *hashes,
486 				   void *store)
487 {
488 	const unsigned int num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE;
489 #ifdef CFG_CORE_ASLR
490 	unsigned int reloc_offs = (vaddr_t)__pageable_start - VCORE_START_VA;
491 	const struct boot_embdata *embdata = (const void *)__init_end;
492 	const void *reloc = __init_end + embdata->reloc_offset;
493 
494 	return fobj_ro_reloc_paged_alloc(num_pages, hashes, reloc_offs,
495 					 reloc, embdata->reloc_len, store);
496 #else
497 	return fobj_ro_paged_alloc(num_pages, hashes, store);
498 #endif
499 }
500 
501 static void init_pager_runtime(unsigned long pageable_part)
502 {
503 	size_t n;
504 	size_t init_size = (size_t)(__init_end - __init_start);
505 	size_t pageable_start = (size_t)__pageable_start;
506 	size_t pageable_end = (size_t)__pageable_end;
507 	size_t pageable_size = pageable_end - pageable_start;
508 	vaddr_t tzsram_end = TZSRAM_BASE + TZSRAM_SIZE - TEE_LOAD_ADDR +
509 			     VCORE_START_VA;
510 	size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) *
511 			   TEE_SHA256_HASH_SIZE;
512 	const struct boot_embdata *embdata = (const void *)__init_end;
513 	const void *tmp_hashes = NULL;
514 	tee_mm_entry_t *mm = NULL;
515 	struct fobj *fobj = NULL;
516 	uint8_t *paged_store = NULL;
517 	uint8_t *hashes = NULL;
518 
519 	assert(pageable_size % SMALL_PAGE_SIZE == 0);
520 	assert(embdata->total_len >= embdata->hashes_offset +
521 				     embdata->hashes_len);
522 	assert(hash_size == embdata->hashes_len);
523 
524 	tmp_hashes = __init_end + embdata->hashes_offset;
525 
526 	/*
527 	 * This needs to be initialized early to support address lookup
528 	 * in MEM_AREA_TEE_RAM
529 	 */
530 	tee_pager_early_init();
531 
532 	hashes = malloc(hash_size);
533 	IMSG_RAW("\n");
534 	IMSG("Pager is enabled. Hashes: %zu bytes", hash_size);
535 	assert(hashes);
536 	asan_memcpy_unchecked(hashes, tmp_hashes, hash_size);
537 
538 	/*
539 	 * The pager is about the be enabled below, eventual temporary boot
540 	 * memory allocation must be removed now.
541 	 */
542 	boot_mem_release_tmp_alloc();
543 
544 	carve_out_asan_mem();
545 
546 	mm = nex_phys_mem_ta_alloc(pageable_size);
547 	assert(mm);
548 	paged_store = phys_to_virt(tee_mm_get_smem(mm),
549 				   MEM_AREA_SEC_RAM_OVERALL, pageable_size);
550 	/*
551 	 * Load pageable part in the dedicated allocated area:
552 	 * - Move pageable non-init part into pageable area. Note bootloader
553 	 *   may have loaded it anywhere in TA RAM hence use memmove().
554 	 * - Copy pageable init part from current location into pageable area.
555 	 */
556 	memmove(paged_store + init_size,
557 		phys_to_virt(pageable_part,
558 			     core_mmu_get_type_by_pa(pageable_part),
559 			     __pageable_part_end - __pageable_part_start),
560 		__pageable_part_end - __pageable_part_start);
561 	asan_memcpy_unchecked(paged_store, __init_start, init_size);
562 	/*
563 	 * Undo eventual relocation for the init part so the hash checks
564 	 * can pass.
565 	 */
566 	undo_init_relocation(paged_store);
567 
568 	/* Check that hashes of what's in pageable area is OK */
569 	DMSG("Checking hashes of pageable area");
570 	for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) {
571 		const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE;
572 		const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE;
573 		TEE_Result res;
574 
575 		DMSG("hash pg_idx %zu hash %p page %p", n, hash, page);
576 		res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE);
577 		if (res != TEE_SUCCESS) {
578 			EMSG("Hash failed for page %zu at %p: res 0x%x",
579 			     n, (void *)page, res);
580 			panic();
581 		}
582 	}
583 
584 	/*
585 	 * Assert prepaged init sections are page aligned so that nothing
586 	 * trails uninited at the end of the premapped init area.
587 	 */
588 	assert(!(init_size & SMALL_PAGE_MASK));
589 
590 	/*
591 	 * Initialize the virtual memory pool used for main_mmu_l2_ttb which
592 	 * is supplied to tee_pager_init() below.
593 	 */
594 	init_virt_pool(&core_virt_mem_pool);
595 
596 	/*
597 	 * Assign alias area for pager end of the small page block the rest
598 	 * of the binary is loaded into. We're taking more than needed, but
599 	 * we're guaranteed to not need more than the physical amount of
600 	 * TZSRAM.
601 	 */
602 	mm = tee_mm_alloc2(&core_virt_mem_pool,
603 			   (vaddr_t)core_virt_mem_pool.lo +
604 			   core_virt_mem_pool.size - TZSRAM_SIZE,
605 			   TZSRAM_SIZE);
606 	assert(mm);
607 	tee_pager_set_alias_area(mm);
608 
609 	/*
610 	 * Claim virtual memory which isn't paged.
611 	 * Linear memory (flat map core memory) ends there.
612 	 */
613 	mm = tee_mm_alloc2(&core_virt_mem_pool, VCORE_UNPG_RX_PA,
614 			   (vaddr_t)(__pageable_start - VCORE_UNPG_RX_PA));
615 	assert(mm);
616 
617 	/*
618 	 * Allocate virtual memory for the pageable area and let the pager
619 	 * take charge of all the pages already assigned to that memory.
620 	 */
621 	mm = tee_mm_alloc2(&core_virt_mem_pool, (vaddr_t)__pageable_start,
622 			   pageable_size);
623 	assert(mm);
624 	fobj = ro_paged_alloc(mm, hashes, paged_store);
625 	assert(fobj);
626 	tee_pager_add_core_region(tee_mm_get_smem(mm), PAGED_REGION_TYPE_RO,
627 				  fobj);
628 	fobj_put(fobj);
629 
630 	tee_pager_add_pages(pageable_start, init_size / SMALL_PAGE_SIZE, false);
631 	tee_pager_add_pages(pageable_start + init_size,
632 			    (pageable_size - init_size) / SMALL_PAGE_SIZE,
633 			    true);
634 	if (pageable_end < tzsram_end)
635 		tee_pager_add_pages(pageable_end, (tzsram_end - pageable_end) /
636 						   SMALL_PAGE_SIZE, true);
637 
638 	/*
639 	 * There may be physical pages in TZSRAM before the core load address.
640 	 * These pages can be added to the physical pages pool of the pager.
641 	 * This setup may happen when a the secure bootloader runs in TZRAM
642 	 * and its memory can be reused by OP-TEE once boot stages complete.
643 	 */
644 	tee_pager_add_pages(core_virt_mem_pool.lo,
645 			    (VCORE_UNPG_RX_PA - core_virt_mem_pool.lo) /
646 				SMALL_PAGE_SIZE,
647 			    true);
648 
649 	print_pager_pool_size();
650 }
651 #else /*!CFG_WITH_PAGER*/
652 static void init_pager_runtime(unsigned long pageable_part __unused)
653 {
654 }
655 #endif
656 
657 #if defined(CFG_DT)
658 static int add_optee_dt_node(struct dt_descriptor *dt)
659 {
660 	int offs;
661 	int ret;
662 
663 	if (fdt_path_offset(dt->blob, "/firmware/optee") >= 0) {
664 		DMSG("OP-TEE Device Tree node already exists!");
665 		return 0;
666 	}
667 
668 	offs = fdt_path_offset(dt->blob, "/firmware");
669 	if (offs < 0) {
670 		offs = add_dt_path_subnode(dt, "/", "firmware");
671 		if (offs < 0)
672 			return -1;
673 	}
674 
675 	offs = fdt_add_subnode(dt->blob, offs, "optee");
676 	if (offs < 0)
677 		return -1;
678 
679 	ret = fdt_setprop_string(dt->blob, offs, "compatible",
680 				 "linaro,optee-tz");
681 	if (ret < 0)
682 		return -1;
683 	ret = fdt_setprop_string(dt->blob, offs, "method", "smc");
684 	if (ret < 0)
685 		return -1;
686 
687 	if (CFG_CORE_ASYNC_NOTIF_GIC_INTID) {
688 		/*
689 		 * The format of the interrupt property is defined by the
690 		 * binding of the interrupt domain root. In this case it's
691 		 * one Arm GIC v1, v2 or v3 so we must be compatible with
692 		 * these.
693 		 *
694 		 * An SPI type of interrupt is indicated with a 0 in the
695 		 * first cell. A PPI type is indicated with value 1.
696 		 *
697 		 * The interrupt number goes in the second cell where
698 		 * SPIs ranges from 0 to 987 and PPI ranges from 0 to 15.
699 		 *
700 		 * Flags are passed in the third cells.
701 		 */
702 		uint32_t itr_trigger = 0;
703 		uint32_t itr_type = 0;
704 		uint32_t itr_id = 0;
705 		uint32_t val[3] = { };
706 
707 		/* PPI are visible only in current CPU cluster */
708 		static_assert(IS_ENABLED(CFG_CORE_FFA) ||
709 			      !CFG_CORE_ASYNC_NOTIF_GIC_INTID ||
710 			      (CFG_CORE_ASYNC_NOTIF_GIC_INTID >=
711 			       GIC_SPI_BASE) ||
712 			      ((CFG_TEE_CORE_NB_CORE <= 8) &&
713 			       (CFG_CORE_ASYNC_NOTIF_GIC_INTID >=
714 				GIC_PPI_BASE)));
715 
716 		if (CFG_CORE_ASYNC_NOTIF_GIC_INTID >= GIC_SPI_BASE) {
717 			itr_type = GIC_SPI;
718 			itr_id = CFG_CORE_ASYNC_NOTIF_GIC_INTID - GIC_SPI_BASE;
719 			itr_trigger = IRQ_TYPE_EDGE_RISING;
720 		} else {
721 			itr_type = GIC_PPI;
722 			itr_id = CFG_CORE_ASYNC_NOTIF_GIC_INTID - GIC_PPI_BASE;
723 			itr_trigger = IRQ_TYPE_EDGE_RISING |
724 				      GIC_CPU_MASK_SIMPLE(CFG_TEE_CORE_NB_CORE);
725 		}
726 
727 		val[0] = TEE_U32_TO_BIG_ENDIAN(itr_type);
728 		val[1] = TEE_U32_TO_BIG_ENDIAN(itr_id);
729 		val[2] = TEE_U32_TO_BIG_ENDIAN(itr_trigger);
730 
731 		ret = fdt_setprop(dt->blob, offs, "interrupts", val,
732 				  sizeof(val));
733 		if (ret < 0)
734 			return -1;
735 	}
736 	return 0;
737 }
738 
739 #ifdef CFG_PSCI_ARM32
740 static int append_psci_compatible(void *fdt, int offs, const char *str)
741 {
742 	return fdt_appendprop(fdt, offs, "compatible", str, strlen(str) + 1);
743 }
744 
745 static int dt_add_psci_node(struct dt_descriptor *dt)
746 {
747 	int offs;
748 
749 	if (fdt_path_offset(dt->blob, "/psci") >= 0) {
750 		DMSG("PSCI Device Tree node already exists!");
751 		return 0;
752 	}
753 
754 	offs = add_dt_path_subnode(dt, "/", "psci");
755 	if (offs < 0)
756 		return -1;
757 	if (append_psci_compatible(dt->blob, offs, "arm,psci-1.0"))
758 		return -1;
759 	if (append_psci_compatible(dt->blob, offs, "arm,psci-0.2"))
760 		return -1;
761 	if (append_psci_compatible(dt->blob, offs, "arm,psci"))
762 		return -1;
763 	if (fdt_setprop_string(dt->blob, offs, "method", "smc"))
764 		return -1;
765 	if (fdt_setprop_u32(dt->blob, offs, "cpu_suspend", PSCI_CPU_SUSPEND))
766 		return -1;
767 	if (fdt_setprop_u32(dt->blob, offs, "cpu_off", PSCI_CPU_OFF))
768 		return -1;
769 	if (fdt_setprop_u32(dt->blob, offs, "cpu_on", PSCI_CPU_ON))
770 		return -1;
771 	if (fdt_setprop_u32(dt->blob, offs, "sys_poweroff", PSCI_SYSTEM_OFF))
772 		return -1;
773 	if (fdt_setprop_u32(dt->blob, offs, "sys_reset", PSCI_SYSTEM_RESET))
774 		return -1;
775 	return 0;
776 }
777 
778 static int check_node_compat_prefix(struct dt_descriptor *dt, int offs,
779 				    const char *prefix)
780 {
781 	const size_t prefix_len = strlen(prefix);
782 	size_t l;
783 	int plen;
784 	const char *prop;
785 
786 	prop = fdt_getprop(dt->blob, offs, "compatible", &plen);
787 	if (!prop)
788 		return -1;
789 
790 	while (plen > 0) {
791 		if (memcmp(prop, prefix, prefix_len) == 0)
792 			return 0; /* match */
793 
794 		l = strlen(prop) + 1;
795 		prop += l;
796 		plen -= l;
797 	}
798 
799 	return -1;
800 }
801 
802 static int dt_add_psci_cpu_enable_methods(struct dt_descriptor *dt)
803 {
804 	int offs = 0;
805 
806 	while (1) {
807 		offs = fdt_next_node(dt->blob, offs, NULL);
808 		if (offs < 0)
809 			break;
810 		if (fdt_getprop(dt->blob, offs, "enable-method", NULL))
811 			continue; /* already set */
812 		if (check_node_compat_prefix(dt, offs, "arm,cortex-a"))
813 			continue; /* no compatible */
814 		if (fdt_setprop_string(dt->blob, offs, "enable-method", "psci"))
815 			return -1;
816 		/* Need to restart scanning as offsets may have changed */
817 		offs = 0;
818 	}
819 	return 0;
820 }
821 
822 static int config_psci(struct dt_descriptor *dt)
823 {
824 	if (dt_add_psci_node(dt))
825 		return -1;
826 	return dt_add_psci_cpu_enable_methods(dt);
827 }
828 #else
829 static int config_psci(struct dt_descriptor *dt __unused)
830 {
831 	return 0;
832 }
833 #endif /*CFG_PSCI_ARM32*/
834 
835 static int mark_tzdram_as_reserved(struct dt_descriptor *dt)
836 {
837 	return add_res_mem_dt_node(dt, "optee_core", CFG_TZDRAM_START,
838 				   CFG_TZDRAM_SIZE);
839 }
840 
841 static void update_external_dt(void)
842 {
843 	struct dt_descriptor *dt = get_external_dt_desc();
844 
845 	if (!dt || !dt->blob)
846 		return;
847 
848 	if (!IS_ENABLED(CFG_CORE_FFA) && add_optee_dt_node(dt))
849 		panic("Failed to add OP-TEE Device Tree node");
850 
851 	if (config_psci(dt))
852 		panic("Failed to config PSCI");
853 
854 #ifdef CFG_CORE_RESERVED_SHM
855 	if (mark_static_shm_as_reserved(dt))
856 		panic("Failed to config non-secure memory");
857 #endif
858 
859 	if (mark_tzdram_as_reserved(dt))
860 		panic("Failed to config secure memory");
861 }
862 #else /*CFG_DT*/
863 static void update_external_dt(void)
864 {
865 }
866 #endif /*!CFG_DT*/
867 
868 void init_tee_runtime(void)
869 {
870 	/*
871 	 * With virtualization we call this function when creating the
872 	 * OP-TEE partition instead.
873 	 */
874 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION))
875 		call_preinitcalls();
876 	call_early_initcalls();
877 	call_service_initcalls();
878 
879 	/*
880 	 * These two functions uses crypto_rng_read() to initialize the
881 	 * pauth keys. Once call_initcalls() returns we're guaranteed that
882 	 * crypto_rng_read() is ready to be used.
883 	 */
884 	thread_init_core_local_pauth_keys();
885 	thread_init_thread_pauth_keys();
886 
887 	/*
888 	 * Reinitialize canaries around the stacks with crypto_rng_read().
889 	 *
890 	 * TODO: Updating canaries when CFG_NS_VIRTUALIZATION is enabled will
891 	 * require synchronization between thread_check_canaries() and
892 	 * thread_update_canaries().
893 	 */
894 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION))
895 		thread_update_canaries();
896 }
897 
898 static bool add_padding_to_pool(vaddr_t va, size_t len, void *ptr __unused)
899 {
900 #ifdef CFG_NS_VIRTUALIZATION
901 	nex_malloc_add_pool((void *)va, len);
902 #else
903 	malloc_add_pool((void *)va, len);
904 #endif
905 	return true;
906 }
907 
908 static void init_primary(unsigned long pageable_part)
909 {
910 	vaddr_t va = 0;
911 
912 	/*
913 	 * Mask asynchronous exceptions before switch to the thread vector
914 	 * as the thread handler requires those to be masked while
915 	 * executing with the temporary stack. The thread subsystem also
916 	 * asserts that the foreign interrupts are blocked when using most of
917 	 * its functions.
918 	 */
919 	thread_set_exceptions(THREAD_EXCP_ALL);
920 	primary_save_cntfrq();
921 	init_vfp_sec();
922 
923 	if (IS_ENABLED(CFG_CRYPTO_WITH_CE))
924 		check_crypto_extensions();
925 
926 	init_asan();
927 
928 	/*
929 	 * By default whole OP-TEE uses malloc, so we need to initialize
930 	 * it early. But, when virtualization is enabled, malloc is used
931 	 * only by TEE runtime, so malloc should be initialized later, for
932 	 * every virtual partition separately. Core code uses nex_malloc
933 	 * instead.
934 	 */
935 #ifdef CFG_WITH_PAGER
936 	/* Add heap2 first as heap1 may be too small as initial bget pool */
937 	malloc_add_pool(__heap2_start, __heap2_end - __heap2_start);
938 #endif
939 #ifdef CFG_NS_VIRTUALIZATION
940 	nex_malloc_add_pool(__nex_heap_start, __nex_heap_end -
941 					      __nex_heap_start);
942 #else
943 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
944 #endif
945 	IMSG_RAW("\n");
946 
947 	core_mmu_save_mem_map();
948 	core_mmu_init_phys_mem();
949 	boot_mem_foreach_padding(add_padding_to_pool, NULL);
950 	va = boot_mem_release_unused();
951 	if (!IS_ENABLED(CFG_WITH_PAGER)) {
952 		/*
953 		 * We must update boot_cached_mem_end to reflect the memory
954 		 * just unmapped by boot_mem_release_unused().
955 		 */
956 		assert(va && va <= boot_cached_mem_end);
957 		boot_cached_mem_end = va;
958 	}
959 
960 	if (IS_ENABLED(CFG_WITH_PAGER)) {
961 		/*
962 		 * Pager: init_runtime() calls thread_kernel_enable_vfp()
963 		 * so we must set a current thread right now to avoid a
964 		 * chicken-and-egg problem (thread_init_boot_thread() sets
965 		 * the current thread but needs things set by
966 		 * init_runtime()).
967 		 */
968 		thread_get_core_local()->curr_thread = 0;
969 		init_pager_runtime(pageable_part);
970 	}
971 
972 	thread_init_primary();
973 	thread_init_per_cpu();
974 }
975 
976 static bool cpu_nmfi_enabled(void)
977 {
978 #if defined(ARM32)
979 	return read_sctlr() & SCTLR_NMFI;
980 #else
981 	/* Note: ARM64 does not feature non-maskable FIQ support. */
982 	return false;
983 #endif
984 }
985 
986 /*
987  * Note: this function is weak just to make it possible to exclude it from
988  * the unpaged area.
989  */
990 void __weak boot_init_primary_late(unsigned long fdt __unused,
991 				   unsigned long manifest __unused)
992 {
993 	size_t fdt_size = CFG_DTB_MAX_SIZE;
994 
995 	if (IS_ENABLED(CFG_TRANSFER_LIST) && mapped_tl) {
996 		struct transfer_list_entry *tl_e = NULL;
997 
998 		tl_e = transfer_list_find(mapped_tl, TL_TAG_FDT);
999 		if (tl_e) {
1000 			/*
1001 			 * Expand the data size of the DTB entry to the maximum
1002 			 * allocable mapped memory to reserve sufficient space
1003 			 * for inserting new nodes, avoid potentially corrupting
1004 			 * next entries.
1005 			 */
1006 			uint32_t dtb_max_sz = mapped_tl->max_size -
1007 					      mapped_tl->size + tl_e->data_size;
1008 
1009 			if (!transfer_list_set_data_size(mapped_tl, tl_e,
1010 							 dtb_max_sz)) {
1011 				EMSG("Failed to extend DTB size to %#"PRIx32,
1012 				     dtb_max_sz);
1013 				panic();
1014 			}
1015 			fdt_size = tl_e->data_size;
1016 		}
1017 	}
1018 
1019 	init_external_dt(boot_arg_fdt, fdt_size);
1020 	reinit_manifest_dt();
1021 #ifdef CFG_CORE_SEL1_SPMC
1022 	tpm_map_log_area(get_manifest_dt());
1023 #else
1024 	tpm_map_log_area(get_external_dt());
1025 #endif
1026 	discover_nsec_memory();
1027 	update_external_dt();
1028 	configure_console_from_dt();
1029 
1030 	thread_init_thread_core_local();
1031 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1032 		/*
1033 		 * Virtualization: We can't initialize threads right now because
1034 		 * threads belong to "tee" part and will be initialized
1035 		 * separately per each new virtual guest. So, we'll clear
1036 		 * "curr_thread" and call it done.
1037 		 */
1038 		thread_get_core_local()->curr_thread = -1;
1039 	} else {
1040 		thread_init_boot_thread();
1041 	}
1042 }
1043 
1044 void __weak boot_init_primary_runtime(void)
1045 {
1046 	IMSG("OP-TEE version: %s", core_v_str);
1047 	if (IS_ENABLED(CFG_INSECURE)) {
1048 		IMSG("WARNING: This OP-TEE configuration might be insecure!");
1049 		IMSG("WARNING: Please check https://optee.readthedocs.io/en/latest/architecture/porting_guidelines.html");
1050 	}
1051 	IMSG("Primary CPU initializing");
1052 #ifdef CFG_CORE_ASLR
1053 	DMSG("Executing at offset %#lx with virtual load address %#"PRIxVA,
1054 	     (unsigned long)boot_mmu_config.map_offset, VCORE_START_VA);
1055 #endif
1056 #ifdef CFG_NS_VIRTUALIZATION
1057 	DMSG("NS-virtualization enabled, supporting %u guests",
1058 	     CFG_VIRT_GUEST_COUNT);
1059 #endif
1060 	if (IS_ENABLED(CFG_MEMTAG))
1061 		DMSG("Memory tagging %s",
1062 		     memtag_is_enabled() ?  "enabled" : "disabled");
1063 
1064 	/* Check if platform needs NMFI workaround */
1065 	if (cpu_nmfi_enabled())	{
1066 		if (!IS_ENABLED(CFG_CORE_WORKAROUND_ARM_NMFI))
1067 			IMSG("WARNING: This ARM core has NMFI enabled, please apply workaround!");
1068 	} else {
1069 		if (IS_ENABLED(CFG_CORE_WORKAROUND_ARM_NMFI))
1070 			IMSG("WARNING: This ARM core does not have NMFI enabled, no need for workaround");
1071 	}
1072 
1073 	boot_primary_init_intc();
1074 	init_vfp_nsec();
1075 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1076 		/*
1077 		 * Unmask native interrupts during driver initcalls.
1078 		 *
1079 		 * NS-virtualization still uses the temporary stack also
1080 		 * used for exception handling so it must still have native
1081 		 * interrupts masked.
1082 		 */
1083 		thread_set_exceptions(thread_get_exceptions() &
1084 				      ~THREAD_EXCP_NATIVE_INTR);
1085 		init_tee_runtime();
1086 	}
1087 
1088 	if (!IS_ENABLED(CFG_WITH_PAGER))
1089 		boot_mem_release_tmp_alloc();
1090 }
1091 
1092 void __weak boot_init_primary_final(void)
1093 {
1094 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION))
1095 		call_driver_initcalls();
1096 
1097 	call_finalcalls();
1098 
1099 	IMSG("Primary CPU switching to normal world boot");
1100 
1101 	/* Mask native interrupts before switching to the normal world */
1102 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION))
1103 		thread_set_exceptions(thread_get_exceptions() |
1104 				      THREAD_EXCP_NATIVE_INTR);
1105 }
1106 
1107 static void init_secondary_helper(void)
1108 {
1109 	IMSG("Secondary CPU %zu initializing", get_core_pos());
1110 
1111 	/*
1112 	 * Mask asynchronous exceptions before switch to the thread vector
1113 	 * as the thread handler requires those to be masked while
1114 	 * executing with the temporary stack. The thread subsystem also
1115 	 * asserts that the foreign interrupts are blocked when using most of
1116 	 * its functions.
1117 	 */
1118 	thread_set_exceptions(THREAD_EXCP_ALL);
1119 
1120 	secondary_init_cntfrq();
1121 	thread_init_per_cpu();
1122 	boot_secondary_init_intc();
1123 	init_vfp_sec();
1124 	init_vfp_nsec();
1125 
1126 	IMSG("Secondary CPU %zu switching to normal world boot", get_core_pos());
1127 }
1128 
1129 /*
1130  * Note: this function is weak just to make it possible to exclude it from
1131  * the unpaged area so that it lies in the init area.
1132  */
1133 void __weak boot_init_primary_early(void)
1134 {
1135 	unsigned long pageable_part = 0;
1136 	struct transfer_list_entry *tl_e = NULL;
1137 
1138 	if (IS_ENABLED(CFG_TRANSFER_LIST) && boot_arg_transfer_list) {
1139 		/* map and save the TL */
1140 		mapped_tl = transfer_list_map(boot_arg_transfer_list);
1141 		if (!mapped_tl)
1142 			panic("Failed to map transfer list");
1143 
1144 		transfer_list_dump(mapped_tl);
1145 		tl_e = transfer_list_find(mapped_tl, TL_TAG_OPTEE_PAGABLE_PART);
1146 	}
1147 
1148 	if (IS_ENABLED(CFG_WITH_PAGER)) {
1149 		if (IS_ENABLED(CFG_TRANSFER_LIST) && tl_e)
1150 			pageable_part =
1151 				get_le64(transfer_list_entry_data(tl_e));
1152 		else
1153 			pageable_part = boot_arg_pageable_part;
1154 	}
1155 
1156 	init_primary(pageable_part);
1157 }
1158 
1159 static void boot_save_transfer_list(unsigned long zero_reg,
1160 				    unsigned long transfer_list,
1161 				    unsigned long fdt)
1162 {
1163 	struct transfer_list_header *tl = (void *)transfer_list;
1164 	struct transfer_list_entry *tl_e = NULL;
1165 
1166 	if (zero_reg != 0)
1167 		panic("Incorrect transfer list register convention");
1168 
1169 	if (!IS_ALIGNED_WITH_TYPE(transfer_list, struct transfer_list_header) ||
1170 	    !IS_ALIGNED(transfer_list, TL_ALIGNMENT_FROM_ORDER(tl->alignment)))
1171 		panic("Transfer list base address is not aligned");
1172 
1173 	if (transfer_list_check_header(tl) == TL_OPS_NONE)
1174 		panic("Invalid transfer list");
1175 
1176 	tl_e = transfer_list_find(tl, TL_TAG_FDT);
1177 	if (fdt != (unsigned long)transfer_list_entry_data(tl_e))
1178 		panic("DT does not match to the DT entry of the TL");
1179 
1180 	boot_arg_transfer_list = transfer_list;
1181 }
1182 
1183 #if defined(CFG_WITH_ARM_TRUSTED_FW)
1184 unsigned long boot_cpu_on_handler(unsigned long a0 __maybe_unused,
1185 				  unsigned long a1 __unused)
1186 {
1187 	init_secondary_helper();
1188 	return 0;
1189 }
1190 #else
1191 void boot_init_secondary(unsigned long nsec_entry __unused)
1192 {
1193 	init_secondary_helper();
1194 }
1195 #endif
1196 
1197 #if defined(CFG_BOOT_SECONDARY_REQUEST)
1198 void boot_set_core_ns_entry(size_t core_idx, uintptr_t entry,
1199 			    uintptr_t context_id)
1200 {
1201 	ns_entry_contexts[core_idx].entry_point = entry;
1202 	ns_entry_contexts[core_idx].context_id = context_id;
1203 	dsb_ishst();
1204 }
1205 
1206 int boot_core_release(size_t core_idx, paddr_t entry)
1207 {
1208 	if (!core_idx || core_idx >= CFG_TEE_CORE_NB_CORE)
1209 		return -1;
1210 
1211 	ns_entry_contexts[core_idx].entry_point = entry;
1212 	dmb();
1213 	spin_table[core_idx] = 1;
1214 	dsb();
1215 	sev();
1216 
1217 	return 0;
1218 }
1219 
1220 /*
1221  * spin until secondary boot request, then returns with
1222  * the secondary core entry address.
1223  */
1224 struct ns_entry_context *boot_core_hpen(void)
1225 {
1226 #ifdef CFG_PSCI_ARM32
1227 	return &ns_entry_contexts[get_core_pos()];
1228 #else
1229 	do {
1230 		wfe();
1231 	} while (!spin_table[get_core_pos()]);
1232 	dmb();
1233 	return &ns_entry_contexts[get_core_pos()];
1234 #endif
1235 }
1236 #endif
1237 
1238 #if defined(CFG_CORE_ASLR)
1239 #if defined(CFG_DT)
1240 unsigned long __weak get_aslr_seed(void)
1241 {
1242 	void *fdt = NULL;
1243 	int rc = 0;
1244 	const uint64_t *seed = NULL;
1245 	int offs = 0;
1246 	int len = 0;
1247 
1248 	if (!IS_ENABLED(CFG_CORE_SEL2_SPMC))
1249 		fdt = (void *)boot_arg_fdt;
1250 
1251 	if (!fdt) {
1252 		DMSG("No fdt");
1253 		goto err;
1254 	}
1255 
1256 	rc = fdt_check_header(fdt);
1257 	if (rc) {
1258 		DMSG("Bad fdt: %d", rc);
1259 		goto err;
1260 	}
1261 
1262 	offs =  fdt_path_offset(fdt, "/secure-chosen");
1263 	if (offs < 0) {
1264 		DMSG("Cannot find /secure-chosen");
1265 		goto err;
1266 	}
1267 	seed = fdt_getprop(fdt, offs, "kaslr-seed", &len);
1268 	if (!seed || len != sizeof(*seed)) {
1269 		DMSG("Cannot find valid kaslr-seed");
1270 		goto err;
1271 	}
1272 
1273 	return fdt64_to_cpu(fdt64_ld(seed));
1274 
1275 err:
1276 	/* Try platform implementation */
1277 	return plat_get_aslr_seed();
1278 }
1279 #else /*!CFG_DT*/
1280 unsigned long __weak get_aslr_seed(void)
1281 {
1282 	/* Try platform implementation */
1283 	return plat_get_aslr_seed();
1284 }
1285 #endif /*!CFG_DT*/
1286 #endif /*CFG_CORE_ASLR*/
1287 
1288 static void *get_fdt_from_boot_info(struct ffa_boot_info_header_1_1 *hdr)
1289 {
1290 	struct ffa_boot_info_1_1 *desc = NULL;
1291 	uint8_t content_fmt = 0;
1292 	uint8_t name_fmt = 0;
1293 	void *fdt = NULL;
1294 	int ret = 0;
1295 
1296 	if (hdr->signature != FFA_BOOT_INFO_SIGNATURE) {
1297 		EMSG("Bad boot info signature %#"PRIx32, hdr->signature);
1298 		panic();
1299 	}
1300 	if (hdr->version != FFA_BOOT_INFO_VERSION_1_1 &&
1301 	    hdr->version != FFA_BOOT_INFO_VERSION_1_2) {
1302 		EMSG("Bad boot info version %#"PRIx32, hdr->version);
1303 		panic();
1304 	}
1305 	if (hdr->desc_count != 1) {
1306 		EMSG("Bad boot info descriptor count %#"PRIx32,
1307 		     hdr->desc_count);
1308 		panic();
1309 	}
1310 	desc = (void *)((vaddr_t)hdr + hdr->desc_offset);
1311 	name_fmt = desc->flags & FFA_BOOT_INFO_FLAG_NAME_FORMAT_MASK;
1312 	if (name_fmt == FFA_BOOT_INFO_FLAG_NAME_FORMAT_STRING)
1313 		DMSG("Boot info descriptor name \"%16s\"", desc->name);
1314 	else if (name_fmt == FFA_BOOT_INFO_FLAG_NAME_FORMAT_UUID)
1315 		DMSG("Boot info descriptor UUID %pUl", (void *)desc->name);
1316 	else
1317 		DMSG("Boot info descriptor: unknown name format %"PRIu8,
1318 		     name_fmt);
1319 
1320 	content_fmt = (desc->flags & FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_MASK) >>
1321 		      FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_SHIFT;
1322 	if (content_fmt != FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_ADDR) {
1323 		EMSG("Bad boot info content format %"PRIu8", expected %u (address)",
1324 		     content_fmt, FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_ADDR);
1325 		panic();
1326 	}
1327 
1328 	fdt = (void *)(vaddr_t)desc->contents;
1329 	ret = fdt_check_full(fdt, desc->size);
1330 	if (ret < 0) {
1331 		EMSG("Invalid Device Tree at %p: error %d", fdt, ret);
1332 		panic();
1333 	}
1334 	return fdt;
1335 }
1336 
1337 static void get_sec_mem_from_manifest(void *fdt, paddr_t *base, size_t *size)
1338 {
1339 	int ret = 0;
1340 	uint64_t num = 0;
1341 
1342 	ret = fdt_node_check_compatible(fdt, 0, "arm,ffa-manifest-1.0");
1343 	if (ret < 0) {
1344 		EMSG("Invalid FF-A manifest at %p: error %d", fdt, ret);
1345 		panic();
1346 	}
1347 	ret = dt_getprop_as_number(fdt, 0, "load-address", &num);
1348 	if (ret < 0) {
1349 		EMSG("Can't read \"load-address\" from FF-A manifest at %p: error %d",
1350 		     fdt, ret);
1351 		panic();
1352 	}
1353 	*base = num;
1354 	/* "mem-size" is currently an undocumented extension to the spec. */
1355 	ret = dt_getprop_as_number(fdt, 0, "mem-size", &num);
1356 	if (ret < 0) {
1357 		EMSG("Can't read \"mem-size\" from FF-A manifest at %p: error %d",
1358 		     fdt, ret);
1359 		panic();
1360 	}
1361 	*size = num;
1362 }
1363 
1364 void __weak boot_save_args(unsigned long a0, unsigned long a1,
1365 			   unsigned long a2, unsigned long a3,
1366 			   unsigned long a4 __maybe_unused)
1367 {
1368 	/*
1369 	 * Register use:
1370 	 *
1371 	 * Scenario A: Default arguments
1372 	 * a0   - CFG_CORE_FFA=y && CFG_CORE_SEL2_SPMC=n:
1373 	 *        if non-NULL holds the TOS FW config [1] address
1374 	 *      - CFG_CORE_FFA=y &&
1375 		  (CFG_CORE_SEL2_SPMC=y || CFG_CORE_EL3_SPMC=y):
1376 	 *        address of FF-A Boot Information Blob
1377 	 *      - CFG_CORE_FFA=n:
1378 	 *        if non-NULL holds the pagable part address
1379 	 * a1	- CFG_WITH_ARM_TRUSTED_FW=n (Armv7):
1380 	 *	  Armv7 standard bootarg #1 (kept track of in entry_a32.S)
1381 	 * a2   - CFG_CORE_SEL2_SPMC=n:
1382 	 *        if non-NULL holds the system DTB address
1383 	 *	- CFG_WITH_ARM_TRUSTED_FW=n (Armv7):
1384 	 *	  Armv7 standard bootarg #2 (system DTB address, kept track
1385 	 *	  of in entry_a32.S)
1386 	 * a3	- Not used
1387 	 * a4	- CFG_WITH_ARM_TRUSTED_FW=n:
1388 	 *	  Non-secure entry address
1389 	 *
1390 	 * [1] A TF-A concept: TOS_FW_CONFIG - Trusted OS Firmware
1391 	 * configuration file. Used by Trusted OS (BL32), that is, OP-TEE
1392 	 * here. This is also called Manifest DT, related to the Manifest DT
1393 	 * passed in the FF-A Boot Information Blob, but with a different
1394 	 * compatible string.
1395 
1396 	 * Scenario B: FW Handoff via Transfer List
1397 	 * Note: FF-A and non-secure entry are not yet supported with
1398 	 *       Transfer List
1399 	 * a0	- DTB address or 0 (AArch64)
1400 	 *	- must be 0 (AArch32)
1401 	 * a1	- 1 << 32 | TRANSFER_LIST_SIGNATURE[0:31] (AArch64)
1402 	 *	- 1 << 24 | TRANSFER_LIST_SIGNATURE[0:23] (AArch32)
1403 	 * a2	- must be 0 (AArch64)
1404 	 *	- DTB address or 0 (AArch32)
1405 	 * a3	- Transfer list base address
1406 	 * a4	- Not used
1407 	 */
1408 
1409 	if (IS_ENABLED(CFG_TRANSFER_LIST)) {
1410 		if (IS_ENABLED(CFG_ARM64_core) &&
1411 		    a1 == TL_HANDOFF_X1_VALUE(TL_REG_CONVENTION_VER)) {
1412 			boot_save_transfer_list(a2, a3, a0);
1413 			boot_arg_fdt = a0;
1414 		} else if (IS_ENABLED(CFG_ARM32_core) &&
1415 			   a1 == TL_HANDOFF_R1_VALUE(TL_REG_CONVENTION_VER)) {
1416 			boot_save_transfer_list(a0, a3, a2);
1417 			boot_arg_fdt = a2;
1418 		}
1419 
1420 		return;
1421 	}
1422 
1423 	if (!IS_ENABLED(CFG_CORE_SEL2_SPMC)) {
1424 #if defined(CFG_DT_ADDR)
1425 		boot_arg_fdt = CFG_DT_ADDR;
1426 #else
1427 		boot_arg_fdt = a2;
1428 #endif
1429 	}
1430 
1431 	if (IS_ENABLED(CFG_CORE_FFA)) {
1432 		if (IS_ENABLED(CFG_CORE_SEL2_SPMC) ||
1433 		    IS_ENABLED(CFG_CORE_EL3_SPMC))
1434 			init_manifest_dt(get_fdt_from_boot_info((void *)a0));
1435 		else
1436 			init_manifest_dt((void *)a0);
1437 		if (IS_ENABLED(CFG_CORE_SEL2_SPMC) &&
1438 		    IS_ENABLED(CFG_CORE_PHYS_RELOCATABLE)) {
1439 			paddr_t base = 0;
1440 			size_t size = 0;
1441 
1442 			get_sec_mem_from_manifest(get_manifest_dt(),
1443 						  &base, &size);
1444 			core_mmu_set_secure_memory(base, size);
1445 		}
1446 	} else {
1447 		if (IS_ENABLED(CFG_WITH_PAGER)) {
1448 #if defined(CFG_PAGEABLE_ADDR)
1449 			boot_arg_pageable_part = CFG_PAGEABLE_ADDR;
1450 #else
1451 			boot_arg_pageable_part = a0;
1452 #endif
1453 		}
1454 		if (!IS_ENABLED(CFG_WITH_ARM_TRUSTED_FW)) {
1455 #if defined(CFG_NS_ENTRY_ADDR)
1456 			boot_arg_nsec_entry = CFG_NS_ENTRY_ADDR;
1457 #else
1458 			boot_arg_nsec_entry = a4;
1459 #endif
1460 		}
1461 	}
1462 }
1463 
1464 #if defined(CFG_TRANSFER_LIST)
1465 static TEE_Result release_transfer_list(void)
1466 {
1467 	struct dt_descriptor *dt = get_external_dt_desc();
1468 
1469 	if (!mapped_tl)
1470 		return TEE_SUCCESS;
1471 
1472 	if (dt) {
1473 		int ret = 0;
1474 		struct transfer_list_entry *tl_e = NULL;
1475 
1476 		/*
1477 		 * Pack the DTB and update the transfer list before un-mapping
1478 		 */
1479 		ret = fdt_pack(dt->blob);
1480 		if (ret < 0) {
1481 			EMSG("Failed to pack Device Tree at 0x%" PRIxPA
1482 			     ": error %d", virt_to_phys(dt->blob), ret);
1483 			panic();
1484 		}
1485 
1486 		tl_e = transfer_list_find(mapped_tl, TL_TAG_FDT);
1487 		assert(dt->blob == transfer_list_entry_data(tl_e));
1488 		transfer_list_set_data_size(mapped_tl, tl_e,
1489 					    fdt_totalsize(dt->blob));
1490 		dt->blob = NULL;
1491 	}
1492 
1493 	transfer_list_unmap_sync(mapped_tl);
1494 	mapped_tl = NULL;
1495 
1496 	return TEE_SUCCESS;
1497 }
1498 
1499 boot_final(release_transfer_list);
1500 #endif
1501