xref: /optee_os/core/arch/arm/kernel/boot.c (revision 19a31ec40245ae01a9adcd206eec2a4bb4479fc9)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2015-2023, Linaro Limited
4  * Copyright (c) 2023, Arm Limited
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <compiler.h>
10 #include <config.h>
11 #include <console.h>
12 #include <crypto/crypto.h>
13 #include <drivers/gic.h>
14 #include <dt-bindings/interrupt-controller/arm-gic.h>
15 #include <ffa.h>
16 #include <initcall.h>
17 #include <inttypes.h>
18 #include <io.h>
19 #include <keep.h>
20 #include <kernel/asan.h>
21 #include <kernel/boot.h>
22 #include <kernel/dt.h>
23 #include <kernel/linker.h>
24 #include <kernel/misc.h>
25 #include <kernel/panic.h>
26 #include <kernel/tee_misc.h>
27 #include <kernel/thread.h>
28 #include <kernel/tpm.h>
29 #include <kernel/transfer_list.h>
30 #include <libfdt.h>
31 #include <malloc.h>
32 #include <memtag.h>
33 #include <mm/core_memprot.h>
34 #include <mm/core_mmu.h>
35 #include <mm/fobj.h>
36 #include <mm/tee_mm.h>
37 #include <mm/tee_pager.h>
38 #include <sm/psci.h>
39 #include <trace.h>
40 #include <utee_defines.h>
41 #include <util.h>
42 
43 #include <platform_config.h>
44 
45 #if !defined(CFG_WITH_ARM_TRUSTED_FW)
46 #include <sm/sm.h>
47 #endif
48 
49 #if defined(CFG_WITH_VFP)
50 #include <kernel/vfp.h>
51 #endif
52 
53 /*
54  * In this file we're using unsigned long to represent physical pointers as
55  * they are received in a single register when OP-TEE is initially entered.
56  * This limits 32-bit systems to only use make use of the lower 32 bits
57  * of a physical address for initial parameters.
58  *
59  * 64-bit systems on the other hand can use full 64-bit physical pointers.
60  */
61 #define PADDR_INVALID		ULONG_MAX
62 
63 #if defined(CFG_BOOT_SECONDARY_REQUEST)
64 struct ns_entry_context {
65 	uintptr_t entry_point;
66 	uintptr_t context_id;
67 };
68 struct ns_entry_context ns_entry_contexts[CFG_TEE_CORE_NB_CORE];
69 static uint32_t spin_table[CFG_TEE_CORE_NB_CORE];
70 #endif
71 
72 #ifdef CFG_BOOT_SYNC_CPU
73 /*
74  * Array used when booting, to synchronize cpu.
75  * When 0, the cpu has not started.
76  * When 1, it has started
77  */
78 uint32_t sem_cpu_sync[CFG_TEE_CORE_NB_CORE];
79 DECLARE_KEEP_PAGER(sem_cpu_sync);
80 #endif
81 
82 static void *manifest_dt __nex_bss;
83 static unsigned long boot_arg_fdt __nex_bss;
84 static unsigned long boot_arg_nsec_entry __nex_bss;
85 static unsigned long boot_arg_pageable_part __nex_bss;
86 static unsigned long boot_arg_transfer_list __nex_bss;
87 static struct transfer_list_header *mapped_tl __nex_bss;
88 
89 #ifdef CFG_SECONDARY_INIT_CNTFRQ
90 static uint32_t cntfrq;
91 #endif
92 
93 /* May be overridden in plat-$(PLATFORM)/main.c */
94 __weak void plat_primary_init_early(void)
95 {
96 }
97 DECLARE_KEEP_PAGER(plat_primary_init_early);
98 
99 /* May be overridden in plat-$(PLATFORM)/main.c */
100 __weak void boot_primary_init_intc(void)
101 {
102 }
103 
104 /* May be overridden in plat-$(PLATFORM)/main.c */
105 __weak void boot_secondary_init_intc(void)
106 {
107 }
108 
109 /* May be overridden in plat-$(PLATFORM)/main.c */
110 __weak unsigned long plat_get_aslr_seed(void)
111 {
112 	DMSG("Warning: no ASLR seed");
113 
114 	return 0;
115 }
116 
117 #if defined(_CFG_CORE_STACK_PROTECTOR) || defined(CFG_WITH_STACK_CANARIES)
118 /* Generate random stack canary value on boot up */
119 __weak void plat_get_random_stack_canaries(void *buf, size_t ncan, size_t size)
120 {
121 	TEE_Result ret = TEE_ERROR_GENERIC;
122 	size_t i = 0;
123 
124 	assert(buf && ncan && size);
125 
126 	/*
127 	 * With virtualization the RNG is not initialized in Nexus core.
128 	 * Need to override with platform specific implementation.
129 	 */
130 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
131 		IMSG("WARNING: Using fixed value for stack canary");
132 		memset(buf, 0xab, ncan * size);
133 		goto out;
134 	}
135 
136 	ret = crypto_rng_read(buf, ncan * size);
137 	if (ret != TEE_SUCCESS)
138 		panic("Failed to generate random stack canary");
139 
140 out:
141 	/* Leave null byte in canary to prevent string base exploit */
142 	for (i = 0; i < ncan; i++)
143 		*((uint8_t *)buf + size * i) = 0;
144 }
145 #endif /* _CFG_CORE_STACK_PROTECTOR || CFG_WITH_STACK_CANARIES */
146 
147 /*
148  * This function is called as a guard after each smc call which is not
149  * supposed to return.
150  */
151 void __panic_at_smc_return(void)
152 {
153 	panic();
154 }
155 
156 #if defined(CFG_WITH_ARM_TRUSTED_FW)
157 void init_sec_mon(unsigned long nsec_entry __maybe_unused)
158 {
159 	assert(nsec_entry == PADDR_INVALID);
160 	/* Do nothing as we don't have a secure monitor */
161 }
162 #else
163 /* May be overridden in plat-$(PLATFORM)/main.c */
164 __weak void init_sec_mon(unsigned long nsec_entry)
165 {
166 	struct sm_nsec_ctx *nsec_ctx;
167 
168 	assert(nsec_entry != PADDR_INVALID);
169 
170 	/* Initialize secure monitor */
171 	nsec_ctx = sm_get_nsec_ctx();
172 	nsec_ctx->mon_lr = nsec_entry;
173 	nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I;
174 	if (nsec_entry & 1)
175 		nsec_ctx->mon_spsr |= CPSR_T;
176 }
177 #endif
178 
179 #if defined(CFG_WITH_ARM_TRUSTED_FW)
180 static void init_vfp_nsec(void)
181 {
182 }
183 #else
184 static void init_vfp_nsec(void)
185 {
186 	/* Normal world can use CP10 and CP11 (SIMD/VFP) */
187 	write_nsacr(read_nsacr() | NSACR_CP10 | NSACR_CP11);
188 }
189 #endif
190 
191 static void check_crypto_extensions(void)
192 {
193 	bool ce_supported = true;
194 
195 	if (!feat_aes_implemented() &&
196 	    IS_ENABLED(CFG_CRYPTO_AES_ARM_CE)) {
197 		EMSG("AES instructions are not supported");
198 		ce_supported = false;
199 	}
200 
201 	if (!feat_sha1_implemented() &&
202 	    IS_ENABLED(CFG_CRYPTO_SHA1_ARM_CE)) {
203 		EMSG("SHA1 instructions are not supported");
204 		ce_supported = false;
205 	}
206 
207 	if (!feat_sha256_implemented() &&
208 	    IS_ENABLED(CFG_CRYPTO_SHA256_ARM_CE)) {
209 		EMSG("SHA256 instructions are not supported");
210 		ce_supported = false;
211 	}
212 
213 	/* Check aarch64 specific instructions */
214 	if (IS_ENABLED(CFG_ARM64_core)) {
215 		if (!feat_sha512_implemented() &&
216 		    IS_ENABLED(CFG_CRYPTO_SHA512_ARM_CE)) {
217 			EMSG("SHA512 instructions are not supported");
218 			ce_supported = false;
219 		}
220 
221 		if (!feat_sha3_implemented() &&
222 		    IS_ENABLED(CFG_CRYPTO_SHA3_ARM_CE)) {
223 			EMSG("SHA3 instructions are not supported");
224 			ce_supported = false;
225 		}
226 
227 		if (!feat_sm3_implemented() &&
228 		    IS_ENABLED(CFG_CRYPTO_SM3_ARM_CE)) {
229 			EMSG("SM3 instructions are not supported");
230 			ce_supported = false;
231 		}
232 
233 		if (!feat_sm4_implemented() &&
234 		    IS_ENABLED(CFG_CRYPTO_SM4_ARM_CE)) {
235 			EMSG("SM4 instructions are not supported");
236 			ce_supported = false;
237 		}
238 	}
239 
240 	if (!ce_supported)
241 		panic("HW doesn't support CE instructions");
242 }
243 
244 #if defined(CFG_WITH_VFP)
245 
246 #ifdef ARM32
247 static void init_vfp_sec(void)
248 {
249 	uint32_t cpacr = read_cpacr();
250 
251 	/*
252 	 * Enable Advanced SIMD functionality.
253 	 * Enable use of D16-D31 of the Floating-point Extension register
254 	 * file.
255 	 */
256 	cpacr &= ~(CPACR_ASEDIS | CPACR_D32DIS);
257 	/*
258 	 * Enable usage of CP10 and CP11 (SIMD/VFP) (both kernel and user
259 	 * mode.
260 	 */
261 	cpacr |= CPACR_CP(10, CPACR_CP_ACCESS_FULL);
262 	cpacr |= CPACR_CP(11, CPACR_CP_ACCESS_FULL);
263 	write_cpacr(cpacr);
264 }
265 #endif /* ARM32 */
266 
267 #ifdef ARM64
268 static void init_vfp_sec(void)
269 {
270 	/* Not using VFP until thread_kernel_enable_vfp() */
271 	vfp_disable();
272 }
273 #endif /* ARM64 */
274 
275 #else /* CFG_WITH_VFP */
276 
277 static void init_vfp_sec(void)
278 {
279 	/* Not using VFP */
280 }
281 #endif
282 
283 #ifdef CFG_SECONDARY_INIT_CNTFRQ
284 static void primary_save_cntfrq(void)
285 {
286 	assert(cntfrq == 0);
287 
288 	/*
289 	 * CNTFRQ should be initialized on the primary CPU by a
290 	 * previous boot stage
291 	 */
292 	cntfrq = read_cntfrq();
293 }
294 
295 static void secondary_init_cntfrq(void)
296 {
297 	assert(cntfrq != 0);
298 	write_cntfrq(cntfrq);
299 }
300 #else /* CFG_SECONDARY_INIT_CNTFRQ */
301 static void primary_save_cntfrq(void)
302 {
303 }
304 
305 static void secondary_init_cntfrq(void)
306 {
307 }
308 #endif
309 
310 #ifdef CFG_CORE_SANITIZE_KADDRESS
311 static void init_run_constructors(void)
312 {
313 	const vaddr_t *ctor;
314 
315 	for (ctor = &__ctor_list; ctor < &__ctor_end; ctor++)
316 		((void (*)(void))(*ctor))();
317 }
318 
319 static void init_asan(void)
320 {
321 
322 	/*
323 	 * CFG_ASAN_SHADOW_OFFSET is also supplied as
324 	 * -fasan-shadow-offset=$(CFG_ASAN_SHADOW_OFFSET) to the compiler.
325 	 * Since all the needed values to calculate the value of
326 	 * CFG_ASAN_SHADOW_OFFSET isn't available in to make we need to
327 	 * calculate it in advance and hard code it into the platform
328 	 * conf.mk. Here where we have all the needed values we double
329 	 * check that the compiler is supplied the correct value.
330 	 */
331 
332 #define __ASAN_SHADOW_START \
333 	ROUNDUP(TEE_RAM_START + (TEE_RAM_VA_SIZE * 8) / 9 - 8, 8)
334 	assert(__ASAN_SHADOW_START == (vaddr_t)&__asan_shadow_start);
335 #define __CFG_ASAN_SHADOW_OFFSET \
336 	(__ASAN_SHADOW_START - (TEE_RAM_START / 8))
337 	COMPILE_TIME_ASSERT(CFG_ASAN_SHADOW_OFFSET == __CFG_ASAN_SHADOW_OFFSET);
338 #undef __ASAN_SHADOW_START
339 #undef __CFG_ASAN_SHADOW_OFFSET
340 
341 	/*
342 	 * Assign area covered by the shadow area, everything from start up
343 	 * to the beginning of the shadow area.
344 	 */
345 	asan_set_shadowed((void *)TEE_LOAD_ADDR, &__asan_shadow_start);
346 
347 	/*
348 	 * Add access to areas that aren't opened automatically by a
349 	 * constructor.
350 	 */
351 	asan_tag_access(&__ctor_list, &__ctor_end);
352 	asan_tag_access(__rodata_start, __rodata_end);
353 #ifdef CFG_WITH_PAGER
354 	asan_tag_access(__pageable_start, __pageable_end);
355 #endif /*CFG_WITH_PAGER*/
356 	asan_tag_access(__nozi_start, __nozi_end);
357 #ifdef ARM32
358 	asan_tag_access(__exidx_start, __exidx_end);
359 	asan_tag_access(__extab_start, __extab_end);
360 #endif
361 
362 	init_run_constructors();
363 
364 	/* Everything is tagged correctly, let's start address sanitizing. */
365 	asan_start();
366 }
367 #else /*CFG_CORE_SANITIZE_KADDRESS*/
368 static void init_asan(void)
369 {
370 }
371 #endif /*CFG_CORE_SANITIZE_KADDRESS*/
372 
373 #if defined(CFG_MEMTAG)
374 /* Called from entry_a64.S only when MEMTAG is configured */
375 void boot_init_memtag(void)
376 {
377 	memtag_init_ops(feat_mte_implemented());
378 }
379 
380 /* Called from entry_a64.S only when MEMTAG is configured */
381 void boot_clear_memtag(void)
382 {
383 	enum teecore_memtypes mtypes[] = {
384 		MEM_AREA_TEE_RAM, MEM_AREA_TEE_RAM_RW, MEM_AREA_NEX_RAM_RO,
385 		MEM_AREA_NEX_RAM_RW, MEM_AREA_TEE_ASAN, MEM_AREA_TA_RAM
386 	};
387 	vaddr_t s = 0;
388 	vaddr_t e = 0;
389 	size_t n = 0;
390 
391 	for (n = 0; n < ARRAY_SIZE(mtypes); n++) {
392 		core_mmu_get_mem_by_type(mtypes[n], &s, &e);
393 		if (e > s) {
394 			DMSG("Clearing tags for VA %#"PRIxVA"..%#"PRIxVA,
395 			     s, e - 1);
396 			memtag_set_tags((void *)s, e - s, 0);
397 		}
398 	}
399 }
400 #endif
401 
402 #ifdef CFG_WITH_PAGER
403 
404 #ifdef CFG_CORE_SANITIZE_KADDRESS
405 static void carve_out_asan_mem(tee_mm_pool_t *pool)
406 {
407 	const size_t s = pool->hi - pool->lo;
408 	tee_mm_entry_t *mm;
409 	paddr_t apa = ASAN_MAP_PA;
410 	size_t asz = ASAN_MAP_SZ;
411 
412 	if (core_is_buffer_outside(apa, asz, pool->lo, s))
413 		return;
414 
415 	/* Reserve the shadow area */
416 	if (!core_is_buffer_inside(apa, asz, pool->lo, s)) {
417 		if (apa < pool->lo) {
418 			/*
419 			 * ASAN buffer is overlapping with the beginning of
420 			 * the pool.
421 			 */
422 			asz -= pool->lo - apa;
423 			apa = pool->lo;
424 		} else {
425 			/*
426 			 * ASAN buffer is overlapping with the end of the
427 			 * pool.
428 			 */
429 			asz = pool->hi - apa;
430 		}
431 	}
432 	mm = tee_mm_alloc2(pool, apa, asz);
433 	assert(mm);
434 }
435 #else
436 static void carve_out_asan_mem(tee_mm_pool_t *pool __unused)
437 {
438 }
439 #endif
440 
441 static void print_pager_pool_size(void)
442 {
443 	struct tee_pager_stats __maybe_unused stats;
444 
445 	tee_pager_get_stats(&stats);
446 	IMSG("Pager pool size: %zukB",
447 		stats.npages_all * SMALL_PAGE_SIZE / 1024);
448 }
449 
450 static void init_vcore(tee_mm_pool_t *mm_vcore)
451 {
452 	const vaddr_t begin = VCORE_START_VA;
453 	size_t size = TEE_RAM_VA_SIZE;
454 
455 #ifdef CFG_CORE_SANITIZE_KADDRESS
456 	/* Carve out asan memory, flat maped after core memory */
457 	if (begin + size > ASAN_SHADOW_PA)
458 		size = ASAN_MAP_PA - begin;
459 #endif
460 
461 	if (!tee_mm_init(mm_vcore, begin, size, SMALL_PAGE_SHIFT,
462 			 TEE_MM_POOL_NO_FLAGS))
463 		panic("tee_mm_vcore init failed");
464 }
465 
466 /*
467  * With CFG_CORE_ASLR=y the init part is relocated very early during boot.
468  * The init part is also paged just as the rest of the normal paged code, with
469  * the difference that it's preloaded during boot. When the backing store
470  * is configured the entire paged binary is copied in place and then also
471  * the init part. Since the init part has been relocated (references to
472  * addresses updated to compensate for the new load address) this has to be
473  * undone for the hashes of those pages to match with the original binary.
474  *
475  * If CFG_CORE_ASLR=n, nothing needs to be done as the code/ro pages are
476  * unchanged.
477  */
478 static void undo_init_relocation(uint8_t *paged_store __maybe_unused)
479 {
480 #ifdef CFG_CORE_ASLR
481 	unsigned long *ptr = NULL;
482 	const uint32_t *reloc = NULL;
483 	const uint32_t *reloc_end = NULL;
484 	unsigned long offs = boot_mmu_config.map_offset;
485 	const struct boot_embdata *embdata = (const void *)__init_end;
486 	vaddr_t addr_end = (vaddr_t)__init_end - offs - TEE_LOAD_ADDR;
487 	vaddr_t addr_start = (vaddr_t)__init_start - offs - TEE_LOAD_ADDR;
488 
489 	reloc = (const void *)((vaddr_t)embdata + embdata->reloc_offset);
490 	reloc_end = reloc + embdata->reloc_len / sizeof(*reloc);
491 
492 	for (; reloc < reloc_end; reloc++) {
493 		if (*reloc < addr_start)
494 			continue;
495 		if (*reloc >= addr_end)
496 			break;
497 		ptr = (void *)(paged_store + *reloc - addr_start);
498 		*ptr -= offs;
499 	}
500 #endif
501 }
502 
503 static struct fobj *ro_paged_alloc(tee_mm_entry_t *mm, void *hashes,
504 				   void *store)
505 {
506 	const unsigned int num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE;
507 #ifdef CFG_CORE_ASLR
508 	unsigned int reloc_offs = (vaddr_t)__pageable_start - VCORE_START_VA;
509 	const struct boot_embdata *embdata = (const void *)__init_end;
510 	const void *reloc = __init_end + embdata->reloc_offset;
511 
512 	return fobj_ro_reloc_paged_alloc(num_pages, hashes, reloc_offs,
513 					 reloc, embdata->reloc_len, store);
514 #else
515 	return fobj_ro_paged_alloc(num_pages, hashes, store);
516 #endif
517 }
518 
519 static void init_runtime(unsigned long pageable_part)
520 {
521 	size_t n;
522 	size_t init_size = (size_t)(__init_end - __init_start);
523 	size_t pageable_start = (size_t)__pageable_start;
524 	size_t pageable_end = (size_t)__pageable_end;
525 	size_t pageable_size = pageable_end - pageable_start;
526 	vaddr_t tzsram_end = TZSRAM_BASE + TZSRAM_SIZE - TEE_LOAD_ADDR +
527 			     VCORE_START_VA;
528 	size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) *
529 			   TEE_SHA256_HASH_SIZE;
530 	const struct boot_embdata *embdata = (const void *)__init_end;
531 	const void *tmp_hashes = NULL;
532 	tee_mm_entry_t *mm = NULL;
533 	struct fobj *fobj = NULL;
534 	uint8_t *paged_store = NULL;
535 	uint8_t *hashes = NULL;
536 
537 	assert(pageable_size % SMALL_PAGE_SIZE == 0);
538 	assert(embdata->total_len >= embdata->hashes_offset +
539 				     embdata->hashes_len);
540 	assert(hash_size == embdata->hashes_len);
541 
542 	tmp_hashes = __init_end + embdata->hashes_offset;
543 
544 	init_asan();
545 
546 	/* Add heap2 first as heap1 may be too small as initial bget pool */
547 	malloc_add_pool(__heap2_start, __heap2_end - __heap2_start);
548 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
549 
550 	/*
551 	 * This needs to be initialized early to support address lookup
552 	 * in MEM_AREA_TEE_RAM
553 	 */
554 	tee_pager_early_init();
555 
556 	hashes = malloc(hash_size);
557 	IMSG_RAW("\n");
558 	IMSG("Pager is enabled. Hashes: %zu bytes", hash_size);
559 	assert(hashes);
560 	asan_memcpy_unchecked(hashes, tmp_hashes, hash_size);
561 
562 	/*
563 	 * Need tee_mm_sec_ddr initialized to be able to allocate secure
564 	 * DDR below.
565 	 */
566 	core_mmu_init_ta_ram();
567 
568 	carve_out_asan_mem(&tee_mm_sec_ddr);
569 
570 	mm = tee_mm_alloc(&tee_mm_sec_ddr, pageable_size);
571 	assert(mm);
572 	paged_store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM,
573 				   pageable_size);
574 	/*
575 	 * Load pageable part in the dedicated allocated area:
576 	 * - Move pageable non-init part into pageable area. Note bootloader
577 	 *   may have loaded it anywhere in TA RAM hence use memmove().
578 	 * - Copy pageable init part from current location into pageable area.
579 	 */
580 	memmove(paged_store + init_size,
581 		phys_to_virt(pageable_part,
582 			     core_mmu_get_type_by_pa(pageable_part),
583 			     __pageable_part_end - __pageable_part_start),
584 		__pageable_part_end - __pageable_part_start);
585 	asan_memcpy_unchecked(paged_store, __init_start, init_size);
586 	/*
587 	 * Undo eventual relocation for the init part so the hash checks
588 	 * can pass.
589 	 */
590 	undo_init_relocation(paged_store);
591 
592 	/* Check that hashes of what's in pageable area is OK */
593 	DMSG("Checking hashes of pageable area");
594 	for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) {
595 		const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE;
596 		const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE;
597 		TEE_Result res;
598 
599 		DMSG("hash pg_idx %zu hash %p page %p", n, hash, page);
600 		res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE);
601 		if (res != TEE_SUCCESS) {
602 			EMSG("Hash failed for page %zu at %p: res 0x%x",
603 			     n, (void *)page, res);
604 			panic();
605 		}
606 	}
607 
608 	/*
609 	 * Assert prepaged init sections are page aligned so that nothing
610 	 * trails uninited at the end of the premapped init area.
611 	 */
612 	assert(!(init_size & SMALL_PAGE_MASK));
613 
614 	/*
615 	 * Initialize the virtual memory pool used for main_mmu_l2_ttb which
616 	 * is supplied to tee_pager_init() below.
617 	 */
618 	init_vcore(&tee_mm_vcore);
619 
620 	/*
621 	 * Assign alias area for pager end of the small page block the rest
622 	 * of the binary is loaded into. We're taking more than needed, but
623 	 * we're guaranteed to not need more than the physical amount of
624 	 * TZSRAM.
625 	 */
626 	mm = tee_mm_alloc2(&tee_mm_vcore,
627 			   (vaddr_t)tee_mm_vcore.lo +
628 			   tee_mm_vcore.size - TZSRAM_SIZE,
629 			   TZSRAM_SIZE);
630 	assert(mm);
631 	tee_pager_set_alias_area(mm);
632 
633 	/*
634 	 * Claim virtual memory which isn't paged.
635 	 * Linear memory (flat map core memory) ends there.
636 	 */
637 	mm = tee_mm_alloc2(&tee_mm_vcore, VCORE_UNPG_RX_PA,
638 			   (vaddr_t)(__pageable_start - VCORE_UNPG_RX_PA));
639 	assert(mm);
640 
641 	/*
642 	 * Allocate virtual memory for the pageable area and let the pager
643 	 * take charge of all the pages already assigned to that memory.
644 	 */
645 	mm = tee_mm_alloc2(&tee_mm_vcore, (vaddr_t)__pageable_start,
646 			   pageable_size);
647 	assert(mm);
648 	fobj = ro_paged_alloc(mm, hashes, paged_store);
649 	assert(fobj);
650 	tee_pager_add_core_region(tee_mm_get_smem(mm), PAGED_REGION_TYPE_RO,
651 				  fobj);
652 	fobj_put(fobj);
653 
654 	tee_pager_add_pages(pageable_start, init_size / SMALL_PAGE_SIZE, false);
655 	tee_pager_add_pages(pageable_start + init_size,
656 			    (pageable_size - init_size) / SMALL_PAGE_SIZE,
657 			    true);
658 	if (pageable_end < tzsram_end)
659 		tee_pager_add_pages(pageable_end, (tzsram_end - pageable_end) /
660 						   SMALL_PAGE_SIZE, true);
661 
662 	/*
663 	 * There may be physical pages in TZSRAM before the core load address.
664 	 * These pages can be added to the physical pages pool of the pager.
665 	 * This setup may happen when a the secure bootloader runs in TZRAM
666 	 * and its memory can be reused by OP-TEE once boot stages complete.
667 	 */
668 	tee_pager_add_pages(tee_mm_vcore.lo,
669 			(VCORE_UNPG_RX_PA - tee_mm_vcore.lo) / SMALL_PAGE_SIZE,
670 			true);
671 
672 	print_pager_pool_size();
673 }
674 #else
675 
676 static void init_runtime(unsigned long pageable_part __unused)
677 {
678 	init_asan();
679 
680 	/*
681 	 * By default whole OP-TEE uses malloc, so we need to initialize
682 	 * it early. But, when virtualization is enabled, malloc is used
683 	 * only by TEE runtime, so malloc should be initialized later, for
684 	 * every virtual partition separately. Core code uses nex_malloc
685 	 * instead.
686 	 */
687 #ifdef CFG_NS_VIRTUALIZATION
688 	nex_malloc_add_pool(__nex_heap_start, __nex_heap_end -
689 					      __nex_heap_start);
690 #else
691 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
692 #endif
693 
694 	IMSG_RAW("\n");
695 }
696 #endif
697 
698 #if defined(CFG_DT)
699 static int add_optee_dt_node(struct dt_descriptor *dt)
700 {
701 	int offs;
702 	int ret;
703 
704 	if (fdt_path_offset(dt->blob, "/firmware/optee") >= 0) {
705 		DMSG("OP-TEE Device Tree node already exists!");
706 		return 0;
707 	}
708 
709 	offs = fdt_path_offset(dt->blob, "/firmware");
710 	if (offs < 0) {
711 		offs = add_dt_path_subnode(dt, "/", "firmware");
712 		if (offs < 0)
713 			return -1;
714 	}
715 
716 	offs = fdt_add_subnode(dt->blob, offs, "optee");
717 	if (offs < 0)
718 		return -1;
719 
720 	ret = fdt_setprop_string(dt->blob, offs, "compatible",
721 				 "linaro,optee-tz");
722 	if (ret < 0)
723 		return -1;
724 	ret = fdt_setprop_string(dt->blob, offs, "method", "smc");
725 	if (ret < 0)
726 		return -1;
727 
728 	if (CFG_CORE_ASYNC_NOTIF_GIC_INTID) {
729 		/*
730 		 * The format of the interrupt property is defined by the
731 		 * binding of the interrupt domain root. In this case it's
732 		 * one Arm GIC v1, v2 or v3 so we must be compatible with
733 		 * these.
734 		 *
735 		 * An SPI type of interrupt is indicated with a 0 in the
736 		 * first cell. A PPI type is indicated with value 1.
737 		 *
738 		 * The interrupt number goes in the second cell where
739 		 * SPIs ranges from 0 to 987 and PPI ranges from 0 to 15.
740 		 *
741 		 * Flags are passed in the third cells.
742 		 */
743 		uint32_t itr_trigger = 0;
744 		uint32_t itr_type = 0;
745 		uint32_t itr_id = 0;
746 		uint32_t val[3] = { };
747 
748 		/* PPI are visible only in current CPU cluster */
749 		static_assert(IS_ENABLED(CFG_CORE_FFA) ||
750 			      !CFG_CORE_ASYNC_NOTIF_GIC_INTID ||
751 			      (CFG_CORE_ASYNC_NOTIF_GIC_INTID >=
752 			       GIC_SPI_BASE) ||
753 			      ((CFG_TEE_CORE_NB_CORE <= 8) &&
754 			       (CFG_CORE_ASYNC_NOTIF_GIC_INTID >=
755 				GIC_PPI_BASE)));
756 
757 		if (CFG_CORE_ASYNC_NOTIF_GIC_INTID >= GIC_SPI_BASE) {
758 			itr_type = GIC_SPI;
759 			itr_id = CFG_CORE_ASYNC_NOTIF_GIC_INTID - GIC_SPI_BASE;
760 			itr_trigger = IRQ_TYPE_EDGE_RISING;
761 		} else {
762 			itr_type = GIC_PPI;
763 			itr_id = CFG_CORE_ASYNC_NOTIF_GIC_INTID - GIC_PPI_BASE;
764 			itr_trigger = IRQ_TYPE_EDGE_RISING |
765 				      GIC_CPU_MASK_SIMPLE(CFG_TEE_CORE_NB_CORE);
766 		}
767 
768 		val[0] = TEE_U32_TO_BIG_ENDIAN(itr_type);
769 		val[1] = TEE_U32_TO_BIG_ENDIAN(itr_id);
770 		val[2] = TEE_U32_TO_BIG_ENDIAN(itr_trigger);
771 
772 		ret = fdt_setprop(dt->blob, offs, "interrupts", val,
773 				  sizeof(val));
774 		if (ret < 0)
775 			return -1;
776 	}
777 	return 0;
778 }
779 
780 #ifdef CFG_PSCI_ARM32
781 static int append_psci_compatible(void *fdt, int offs, const char *str)
782 {
783 	return fdt_appendprop(fdt, offs, "compatible", str, strlen(str) + 1);
784 }
785 
786 static int dt_add_psci_node(struct dt_descriptor *dt)
787 {
788 	int offs;
789 
790 	if (fdt_path_offset(dt->blob, "/psci") >= 0) {
791 		DMSG("PSCI Device Tree node already exists!");
792 		return 0;
793 	}
794 
795 	offs = add_dt_path_subnode(dt, "/", "psci");
796 	if (offs < 0)
797 		return -1;
798 	if (append_psci_compatible(dt->blob, offs, "arm,psci-1.0"))
799 		return -1;
800 	if (append_psci_compatible(dt->blob, offs, "arm,psci-0.2"))
801 		return -1;
802 	if (append_psci_compatible(dt->blob, offs, "arm,psci"))
803 		return -1;
804 	if (fdt_setprop_string(dt->blob, offs, "method", "smc"))
805 		return -1;
806 	if (fdt_setprop_u32(dt->blob, offs, "cpu_suspend", PSCI_CPU_SUSPEND))
807 		return -1;
808 	if (fdt_setprop_u32(dt->blob, offs, "cpu_off", PSCI_CPU_OFF))
809 		return -1;
810 	if (fdt_setprop_u32(dt->blob, offs, "cpu_on", PSCI_CPU_ON))
811 		return -1;
812 	if (fdt_setprop_u32(dt->blob, offs, "sys_poweroff", PSCI_SYSTEM_OFF))
813 		return -1;
814 	if (fdt_setprop_u32(dt->blob, offs, "sys_reset", PSCI_SYSTEM_RESET))
815 		return -1;
816 	return 0;
817 }
818 
819 static int check_node_compat_prefix(struct dt_descriptor *dt, int offs,
820 				    const char *prefix)
821 {
822 	const size_t prefix_len = strlen(prefix);
823 	size_t l;
824 	int plen;
825 	const char *prop;
826 
827 	prop = fdt_getprop(dt->blob, offs, "compatible", &plen);
828 	if (!prop)
829 		return -1;
830 
831 	while (plen > 0) {
832 		if (memcmp(prop, prefix, prefix_len) == 0)
833 			return 0; /* match */
834 
835 		l = strlen(prop) + 1;
836 		prop += l;
837 		plen -= l;
838 	}
839 
840 	return -1;
841 }
842 
843 static int dt_add_psci_cpu_enable_methods(struct dt_descriptor *dt)
844 {
845 	int offs = 0;
846 
847 	while (1) {
848 		offs = fdt_next_node(dt->blob, offs, NULL);
849 		if (offs < 0)
850 			break;
851 		if (fdt_getprop(dt->blob, offs, "enable-method", NULL))
852 			continue; /* already set */
853 		if (check_node_compat_prefix(dt, offs, "arm,cortex-a"))
854 			continue; /* no compatible */
855 		if (fdt_setprop_string(dt->blob, offs, "enable-method", "psci"))
856 			return -1;
857 		/* Need to restart scanning as offsets may have changed */
858 		offs = 0;
859 	}
860 	return 0;
861 }
862 
863 static int config_psci(struct dt_descriptor *dt)
864 {
865 	if (dt_add_psci_node(dt))
866 		return -1;
867 	return dt_add_psci_cpu_enable_methods(dt);
868 }
869 #else
870 static int config_psci(struct dt_descriptor *dt __unused)
871 {
872 	return 0;
873 }
874 #endif /*CFG_PSCI_ARM32*/
875 
876 #ifdef CFG_CORE_DYN_SHM
877 static uint64_t get_dt_val_and_advance(const void *data, size_t *offs,
878 				       uint32_t cell_size)
879 {
880 	uint64_t rv = 0;
881 
882 	if (cell_size == 1) {
883 		uint32_t v;
884 
885 		memcpy(&v, (const uint8_t *)data + *offs, sizeof(v));
886 		*offs += sizeof(v);
887 		rv = fdt32_to_cpu(v);
888 	} else {
889 		uint64_t v;
890 
891 		memcpy(&v, (const uint8_t *)data + *offs, sizeof(v));
892 		*offs += sizeof(v);
893 		rv = fdt64_to_cpu(v);
894 	}
895 
896 	return rv;
897 }
898 
899 /*
900  * Find all non-secure memory from DT. Memory marked inaccessible by Secure
901  * World is ignored since it could not be mapped to be used as dynamic shared
902  * memory.
903  */
904 static int get_nsec_memory_helper(void *fdt, struct core_mmu_phys_mem *mem)
905 {
906 	const uint8_t *prop = NULL;
907 	uint64_t a = 0;
908 	uint64_t l = 0;
909 	size_t prop_offs = 0;
910 	size_t prop_len = 0;
911 	int elems_total = 0;
912 	int addr_size = 0;
913 	int len_size = 0;
914 	int offs = 0;
915 	size_t n = 0;
916 	int len = 0;
917 
918 	addr_size = fdt_address_cells(fdt, 0);
919 	if (addr_size < 0)
920 		return 0;
921 
922 	len_size = fdt_size_cells(fdt, 0);
923 	if (len_size < 0)
924 		return 0;
925 
926 	while (true) {
927 		offs = fdt_node_offset_by_prop_value(fdt, offs, "device_type",
928 						     "memory",
929 						     sizeof("memory"));
930 		if (offs < 0)
931 			break;
932 
933 		if (fdt_get_status(fdt, offs) != (DT_STATUS_OK_NSEC |
934 						   DT_STATUS_OK_SEC))
935 			continue;
936 
937 		prop = fdt_getprop(fdt, offs, "reg", &len);
938 		if (!prop)
939 			continue;
940 
941 		prop_len = len;
942 		for (n = 0, prop_offs = 0; prop_offs < prop_len; n++) {
943 			a = get_dt_val_and_advance(prop, &prop_offs, addr_size);
944 			if (prop_offs >= prop_len) {
945 				n--;
946 				break;
947 			}
948 
949 			l = get_dt_val_and_advance(prop, &prop_offs, len_size);
950 			if (mem) {
951 				mem->type = MEM_AREA_DDR_OVERALL;
952 				mem->addr = a;
953 				mem->size = l;
954 				mem++;
955 			}
956 		}
957 
958 		elems_total += n;
959 	}
960 
961 	return elems_total;
962 }
963 
964 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt, size_t *nelems)
965 {
966 	struct core_mmu_phys_mem *mem = NULL;
967 	int elems_total = 0;
968 
969 	elems_total = get_nsec_memory_helper(fdt, NULL);
970 	if (elems_total <= 0)
971 		return NULL;
972 
973 	mem = nex_calloc(elems_total, sizeof(*mem));
974 	if (!mem)
975 		panic();
976 
977 	elems_total = get_nsec_memory_helper(fdt, mem);
978 	assert(elems_total > 0);
979 
980 	*nelems = elems_total;
981 
982 	return mem;
983 }
984 #endif /*CFG_CORE_DYN_SHM*/
985 
986 #ifdef CFG_CORE_RESERVED_SHM
987 static int mark_static_shm_as_reserved(struct dt_descriptor *dt)
988 {
989 	vaddr_t shm_start;
990 	vaddr_t shm_end;
991 
992 	core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &shm_start, &shm_end);
993 	if (shm_start != shm_end)
994 		return add_res_mem_dt_node(dt, "optee_shm",
995 					   virt_to_phys((void *)shm_start),
996 					   shm_end - shm_start);
997 
998 	DMSG("No SHM configured");
999 	return -1;
1000 }
1001 #endif /*CFG_CORE_RESERVED_SHM*/
1002 
1003 static int mark_tzdram_as_reserved(struct dt_descriptor *dt)
1004 {
1005 	return add_res_mem_dt_node(dt, "optee_core", CFG_TZDRAM_START,
1006 				   CFG_TZDRAM_SIZE);
1007 }
1008 
1009 static void update_external_dt(void)
1010 {
1011 	struct dt_descriptor *dt = get_external_dt_desc();
1012 
1013 	if (!dt || !dt->blob)
1014 		return;
1015 
1016 	if (!IS_ENABLED(CFG_CORE_FFA) && add_optee_dt_node(dt))
1017 		panic("Failed to add OP-TEE Device Tree node");
1018 
1019 	if (config_psci(dt))
1020 		panic("Failed to config PSCI");
1021 
1022 #ifdef CFG_CORE_RESERVED_SHM
1023 	if (mark_static_shm_as_reserved(dt))
1024 		panic("Failed to config non-secure memory");
1025 #endif
1026 
1027 	if (mark_tzdram_as_reserved(dt))
1028 		panic("Failed to config secure memory");
1029 }
1030 #else /*CFG_DT*/
1031 static void update_external_dt(void)
1032 {
1033 }
1034 
1035 #ifdef CFG_CORE_DYN_SHM
1036 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt __unused,
1037 						 size_t *nelems __unused)
1038 {
1039 	return NULL;
1040 }
1041 #endif /*CFG_CORE_DYN_SHM*/
1042 #endif /*!CFG_DT*/
1043 
1044 #if defined(CFG_CORE_FFA)
1045 void *get_manifest_dt(void)
1046 {
1047 	return manifest_dt;
1048 }
1049 
1050 static void reinit_manifest_dt(void)
1051 {
1052 	paddr_t pa = (unsigned long)manifest_dt;
1053 	void *fdt = NULL;
1054 	int ret = 0;
1055 
1056 	if (!pa) {
1057 		EMSG("No manifest DT found");
1058 		return;
1059 	}
1060 
1061 	fdt = core_mmu_add_mapping(MEM_AREA_MANIFEST_DT, pa, CFG_DTB_MAX_SIZE);
1062 	if (!fdt)
1063 		panic("Failed to map manifest DT");
1064 
1065 	manifest_dt = fdt;
1066 
1067 	ret = fdt_check_full(fdt, CFG_DTB_MAX_SIZE);
1068 	if (ret < 0) {
1069 		EMSG("Invalid manifest Device Tree at %#lx: error %d", pa, ret);
1070 		panic();
1071 	}
1072 
1073 	IMSG("manifest DT found");
1074 }
1075 
1076 static TEE_Result release_manifest_dt(void)
1077 {
1078 	if (!manifest_dt)
1079 		return TEE_SUCCESS;
1080 
1081 	if (core_mmu_remove_mapping(MEM_AREA_MANIFEST_DT, manifest_dt,
1082 				    CFG_DTB_MAX_SIZE))
1083 		panic("Failed to remove temporary manifest DT mapping");
1084 	manifest_dt = NULL;
1085 
1086 	return TEE_SUCCESS;
1087 }
1088 
1089 boot_final(release_manifest_dt);
1090 #else
1091 void *get_manifest_dt(void)
1092 {
1093 	return NULL;
1094 }
1095 
1096 static void reinit_manifest_dt(void)
1097 {
1098 }
1099 #endif /*CFG_CORE_FFA*/
1100 
1101 #ifdef CFG_CORE_DYN_SHM
1102 static void discover_nsec_memory(void)
1103 {
1104 	struct core_mmu_phys_mem *mem;
1105 	const struct core_mmu_phys_mem *mem_begin = NULL;
1106 	const struct core_mmu_phys_mem *mem_end = NULL;
1107 	size_t nelems;
1108 	void *fdt = get_external_dt();
1109 
1110 	if (fdt) {
1111 		mem = get_nsec_memory(fdt, &nelems);
1112 		if (mem) {
1113 			core_mmu_set_discovered_nsec_ddr(mem, nelems);
1114 			return;
1115 		}
1116 
1117 		DMSG("No non-secure memory found in external DT");
1118 	}
1119 
1120 	fdt = get_embedded_dt();
1121 	if (fdt) {
1122 		mem = get_nsec_memory(fdt, &nelems);
1123 		if (mem) {
1124 			core_mmu_set_discovered_nsec_ddr(mem, nelems);
1125 			return;
1126 		}
1127 
1128 		DMSG("No non-secure memory found in embedded DT");
1129 	}
1130 
1131 	mem_begin = phys_ddr_overall_begin;
1132 	mem_end = phys_ddr_overall_end;
1133 	nelems = mem_end - mem_begin;
1134 	if (nelems) {
1135 		/*
1136 		 * Platform cannot use both register_ddr() and the now
1137 		 * deprecated register_dynamic_shm().
1138 		 */
1139 		assert(phys_ddr_overall_compat_begin ==
1140 		       phys_ddr_overall_compat_end);
1141 	} else {
1142 		mem_begin = phys_ddr_overall_compat_begin;
1143 		mem_end = phys_ddr_overall_compat_end;
1144 		nelems = mem_end - mem_begin;
1145 		if (!nelems)
1146 			return;
1147 		DMSG("Warning register_dynamic_shm() is deprecated, please use register_ddr() instead");
1148 	}
1149 
1150 	mem = nex_calloc(nelems, sizeof(*mem));
1151 	if (!mem)
1152 		panic();
1153 
1154 	memcpy(mem, phys_ddr_overall_begin, sizeof(*mem) * nelems);
1155 	core_mmu_set_discovered_nsec_ddr(mem, nelems);
1156 }
1157 #else /*CFG_CORE_DYN_SHM*/
1158 static void discover_nsec_memory(void)
1159 {
1160 }
1161 #endif /*!CFG_CORE_DYN_SHM*/
1162 
1163 #ifdef CFG_NS_VIRTUALIZATION
1164 static TEE_Result virt_init_heap(void)
1165 {
1166 	/* We need to initialize pool for every virtual guest partition */
1167 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
1168 
1169 	return TEE_SUCCESS;
1170 }
1171 preinit_early(virt_init_heap);
1172 #endif
1173 
1174 void init_tee_runtime(void)
1175 {
1176 #ifndef CFG_WITH_PAGER
1177 	/* Pager initializes TA RAM early */
1178 	core_mmu_init_ta_ram();
1179 #endif
1180 	/*
1181 	 * With virtualization we call this function when creating the
1182 	 * OP-TEE partition instead.
1183 	 */
1184 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION))
1185 		call_preinitcalls();
1186 	call_initcalls();
1187 
1188 	/*
1189 	 * These two functions uses crypto_rng_read() to initialize the
1190 	 * pauth keys. Once call_initcalls() returns we're guaranteed that
1191 	 * crypto_rng_read() is ready to be used.
1192 	 */
1193 	thread_init_core_local_pauth_keys();
1194 	thread_init_thread_pauth_keys();
1195 
1196 	/*
1197 	 * Reinitialize canaries around the stacks with crypto_rng_read().
1198 	 *
1199 	 * TODO: Updating canaries when CFG_NS_VIRTUALIZATION is enabled will
1200 	 * require synchronization between thread_check_canaries() and
1201 	 * thread_update_canaries().
1202 	 */
1203 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION))
1204 		thread_update_canaries();
1205 }
1206 
1207 static void init_primary(unsigned long pageable_part, unsigned long nsec_entry)
1208 {
1209 	thread_init_core_local_stacks();
1210 	/*
1211 	 * Mask asynchronous exceptions before switch to the thread vector
1212 	 * as the thread handler requires those to be masked while
1213 	 * executing with the temporary stack. The thread subsystem also
1214 	 * asserts that the foreign interrupts are blocked when using most of
1215 	 * its functions.
1216 	 */
1217 	thread_set_exceptions(THREAD_EXCP_ALL);
1218 	primary_save_cntfrq();
1219 	init_vfp_sec();
1220 
1221 	if (IS_ENABLED(CFG_CRYPTO_WITH_CE))
1222 		check_crypto_extensions();
1223 
1224 	/*
1225 	 * Pager: init_runtime() calls thread_kernel_enable_vfp() so we must
1226 	 * set a current thread right now to avoid a chicken-and-egg problem
1227 	 * (thread_init_boot_thread() sets the current thread but needs
1228 	 * things set by init_runtime()).
1229 	 */
1230 	thread_get_core_local()->curr_thread = 0;
1231 	init_runtime(pageable_part);
1232 
1233 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1234 		/*
1235 		 * Virtualization: We can't initialize threads right now because
1236 		 * threads belong to "tee" part and will be initialized
1237 		 * separately per each new virtual guest. So, we'll clear
1238 		 * "curr_thread" and call it done.
1239 		 */
1240 		thread_get_core_local()->curr_thread = -1;
1241 	} else {
1242 		thread_init_boot_thread();
1243 	}
1244 	thread_init_primary();
1245 	thread_init_per_cpu();
1246 	init_sec_mon(nsec_entry);
1247 }
1248 
1249 static bool cpu_nmfi_enabled(void)
1250 {
1251 #if defined(ARM32)
1252 	return read_sctlr() & SCTLR_NMFI;
1253 #else
1254 	/* Note: ARM64 does not feature non-maskable FIQ support. */
1255 	return false;
1256 #endif
1257 }
1258 
1259 /*
1260  * Note: this function is weak just to make it possible to exclude it from
1261  * the unpaged area.
1262  */
1263 void __weak boot_init_primary_late(unsigned long fdt __unused,
1264 				   unsigned long manifest __unused)
1265 {
1266 	size_t fdt_size = CFG_DTB_MAX_SIZE;
1267 
1268 	if (IS_ENABLED(CFG_TRANSFER_LIST) && mapped_tl) {
1269 		struct transfer_list_entry *tl_e = NULL;
1270 
1271 		tl_e = transfer_list_find(mapped_tl, TL_TAG_FDT);
1272 		if (tl_e)
1273 			fdt_size = tl_e->data_size;
1274 	}
1275 
1276 	init_external_dt(boot_arg_fdt, fdt_size);
1277 	reinit_manifest_dt();
1278 #ifdef CFG_CORE_SEL1_SPMC
1279 	tpm_map_log_area(get_manifest_dt());
1280 #else
1281 	tpm_map_log_area(get_external_dt());
1282 #endif
1283 	discover_nsec_memory();
1284 	update_external_dt();
1285 	configure_console_from_dt();
1286 
1287 	IMSG("OP-TEE version: %s", core_v_str);
1288 	if (IS_ENABLED(CFG_INSECURE)) {
1289 		IMSG("WARNING: This OP-TEE configuration might be insecure!");
1290 		IMSG("WARNING: Please check https://optee.readthedocs.io/en/latest/architecture/porting_guidelines.html");
1291 	}
1292 	IMSG("Primary CPU initializing");
1293 #ifdef CFG_CORE_ASLR
1294 	DMSG("Executing at offset %#lx with virtual load address %#"PRIxVA,
1295 	     (unsigned long)boot_mmu_config.map_offset, VCORE_START_VA);
1296 #endif
1297 	if (IS_ENABLED(CFG_MEMTAG))
1298 		DMSG("Memory tagging %s",
1299 		     memtag_is_enabled() ?  "enabled" : "disabled");
1300 
1301 	/* Check if platform needs NMFI workaround */
1302 	if (cpu_nmfi_enabled())	{
1303 		if (!IS_ENABLED(CFG_CORE_WORKAROUND_ARM_NMFI))
1304 			IMSG("WARNING: This ARM core has NMFI enabled, please apply workaround!");
1305 	} else {
1306 		if (IS_ENABLED(CFG_CORE_WORKAROUND_ARM_NMFI))
1307 			IMSG("WARNING: This ARM core does not have NMFI enabled, no need for workaround");
1308 	}
1309 
1310 	boot_primary_init_intc();
1311 	init_vfp_nsec();
1312 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1313 		IMSG("Initializing virtualization support");
1314 		core_mmu_init_virtualization();
1315 	} else {
1316 		init_tee_runtime();
1317 	}
1318 	call_finalcalls();
1319 	IMSG("Primary CPU switching to normal world boot");
1320 }
1321 
1322 static void init_secondary_helper(unsigned long nsec_entry)
1323 {
1324 	IMSG("Secondary CPU %zu initializing", get_core_pos());
1325 
1326 	/*
1327 	 * Mask asynchronous exceptions before switch to the thread vector
1328 	 * as the thread handler requires those to be masked while
1329 	 * executing with the temporary stack. The thread subsystem also
1330 	 * asserts that the foreign interrupts are blocked when using most of
1331 	 * its functions.
1332 	 */
1333 	thread_set_exceptions(THREAD_EXCP_ALL);
1334 
1335 	secondary_init_cntfrq();
1336 	thread_init_per_cpu();
1337 	init_sec_mon(nsec_entry);
1338 	boot_secondary_init_intc();
1339 	init_vfp_sec();
1340 	init_vfp_nsec();
1341 
1342 	IMSG("Secondary CPU %zu switching to normal world boot", get_core_pos());
1343 }
1344 
1345 /*
1346  * Note: this function is weak just to make it possible to exclude it from
1347  * the unpaged area so that it lies in the init area.
1348  */
1349 void __weak boot_init_primary_early(void)
1350 {
1351 	unsigned long pageable_part = 0;
1352 	unsigned long e = PADDR_INVALID;
1353 	struct transfer_list_entry *tl_e = NULL;
1354 
1355 	if (!IS_ENABLED(CFG_WITH_ARM_TRUSTED_FW))
1356 		e = boot_arg_nsec_entry;
1357 
1358 	if (IS_ENABLED(CFG_TRANSFER_LIST) && boot_arg_transfer_list) {
1359 		/* map and save the TL */
1360 		mapped_tl = transfer_list_map(boot_arg_transfer_list);
1361 		if (!mapped_tl)
1362 			panic("Failed to map transfer list");
1363 
1364 		transfer_list_dump(mapped_tl);
1365 		tl_e = transfer_list_find(mapped_tl, TL_TAG_FDT);
1366 		if (tl_e) {
1367 			/*
1368 			 * Expand the data size of the DTB entry to the maximum
1369 			 * allocable mapped memory to reserve sufficient space
1370 			 * for inserting new nodes, avoid potentially corrupting
1371 			 * next entries.
1372 			 */
1373 			uint32_t dtb_max_sz = mapped_tl->max_size -
1374 					      mapped_tl->size + tl_e->data_size;
1375 
1376 			if (!transfer_list_set_data_size(mapped_tl, tl_e,
1377 							 dtb_max_sz)) {
1378 				EMSG("Failed to extend DTB size to %#"PRIx32,
1379 				     dtb_max_sz);
1380 				panic();
1381 			}
1382 		}
1383 		tl_e = transfer_list_find(mapped_tl, TL_TAG_OPTEE_PAGABLE_PART);
1384 	}
1385 
1386 	if (IS_ENABLED(CFG_WITH_PAGER)) {
1387 		if (IS_ENABLED(CFG_TRANSFER_LIST) && tl_e)
1388 			pageable_part =
1389 				get_le64(transfer_list_entry_data(tl_e));
1390 		else
1391 			pageable_part = boot_arg_pageable_part;
1392 	}
1393 
1394 	init_primary(pageable_part, e);
1395 }
1396 
1397 static void boot_save_transfer_list(unsigned long zero_reg,
1398 				    unsigned long transfer_list,
1399 				    unsigned long fdt)
1400 {
1401 	struct transfer_list_header *tl = (void *)transfer_list;
1402 	struct transfer_list_entry *tl_e = NULL;
1403 
1404 	if (zero_reg != 0)
1405 		panic("Incorrect transfer list register convention");
1406 
1407 	if (!IS_ALIGNED_WITH_TYPE(transfer_list, struct transfer_list_header) ||
1408 	    !IS_ALIGNED(transfer_list, TL_ALIGNMENT_FROM_ORDER(tl->alignment)))
1409 		panic("Transfer list base address is not aligned");
1410 
1411 	if (transfer_list_check_header(tl) == TL_OPS_NONE)
1412 		panic("Invalid transfer list");
1413 
1414 	tl_e = transfer_list_find(tl, TL_TAG_FDT);
1415 	if (fdt != (unsigned long)transfer_list_entry_data(tl_e))
1416 		panic("DT does not match to the DT entry of the TL");
1417 
1418 	boot_arg_transfer_list = transfer_list;
1419 }
1420 
1421 #if defined(CFG_WITH_ARM_TRUSTED_FW)
1422 unsigned long boot_cpu_on_handler(unsigned long a0 __maybe_unused,
1423 				  unsigned long a1 __unused)
1424 {
1425 	init_secondary_helper(PADDR_INVALID);
1426 	return 0;
1427 }
1428 #else
1429 void boot_init_secondary(unsigned long nsec_entry)
1430 {
1431 	init_secondary_helper(nsec_entry);
1432 }
1433 #endif
1434 
1435 #if defined(CFG_BOOT_SECONDARY_REQUEST)
1436 void boot_set_core_ns_entry(size_t core_idx, uintptr_t entry,
1437 			    uintptr_t context_id)
1438 {
1439 	ns_entry_contexts[core_idx].entry_point = entry;
1440 	ns_entry_contexts[core_idx].context_id = context_id;
1441 	dsb_ishst();
1442 }
1443 
1444 int boot_core_release(size_t core_idx, paddr_t entry)
1445 {
1446 	if (!core_idx || core_idx >= CFG_TEE_CORE_NB_CORE)
1447 		return -1;
1448 
1449 	ns_entry_contexts[core_idx].entry_point = entry;
1450 	dmb();
1451 	spin_table[core_idx] = 1;
1452 	dsb();
1453 	sev();
1454 
1455 	return 0;
1456 }
1457 
1458 /*
1459  * spin until secondary boot request, then returns with
1460  * the secondary core entry address.
1461  */
1462 struct ns_entry_context *boot_core_hpen(void)
1463 {
1464 #ifdef CFG_PSCI_ARM32
1465 	return &ns_entry_contexts[get_core_pos()];
1466 #else
1467 	do {
1468 		wfe();
1469 	} while (!spin_table[get_core_pos()]);
1470 	dmb();
1471 	return &ns_entry_contexts[get_core_pos()];
1472 #endif
1473 }
1474 #endif
1475 
1476 #if defined(CFG_CORE_ASLR)
1477 #if defined(CFG_DT)
1478 unsigned long __weak get_aslr_seed(void)
1479 {
1480 	void *fdt = NULL;
1481 	int rc = 0;
1482 	const uint64_t *seed = NULL;
1483 	int offs = 0;
1484 	int len = 0;
1485 
1486 	if (!IS_ENABLED(CFG_CORE_SEL2_SPMC))
1487 		fdt = (void *)boot_arg_fdt;
1488 
1489 	if (!fdt) {
1490 		DMSG("No fdt");
1491 		goto err;
1492 	}
1493 
1494 	rc = fdt_check_header(fdt);
1495 	if (rc) {
1496 		DMSG("Bad fdt: %d", rc);
1497 		goto err;
1498 	}
1499 
1500 	offs =  fdt_path_offset(fdt, "/secure-chosen");
1501 	if (offs < 0) {
1502 		DMSG("Cannot find /secure-chosen");
1503 		goto err;
1504 	}
1505 	seed = fdt_getprop(fdt, offs, "kaslr-seed", &len);
1506 	if (!seed || len != sizeof(*seed)) {
1507 		DMSG("Cannot find valid kaslr-seed");
1508 		goto err;
1509 	}
1510 
1511 	return fdt64_to_cpu(*seed);
1512 
1513 err:
1514 	/* Try platform implementation */
1515 	return plat_get_aslr_seed();
1516 }
1517 #else /*!CFG_DT*/
1518 unsigned long __weak get_aslr_seed(void)
1519 {
1520 	/* Try platform implementation */
1521 	return plat_get_aslr_seed();
1522 }
1523 #endif /*!CFG_DT*/
1524 #endif /*CFG_CORE_ASLR*/
1525 
1526 static void *get_fdt_from_boot_info(struct ffa_boot_info_header_1_1 *hdr)
1527 {
1528 	struct ffa_boot_info_1_1 *desc = NULL;
1529 	uint8_t content_fmt = 0;
1530 	uint8_t name_fmt = 0;
1531 	void *fdt = NULL;
1532 	int ret = 0;
1533 
1534 	if (hdr->signature != FFA_BOOT_INFO_SIGNATURE) {
1535 		EMSG("Bad boot info signature %#"PRIx32, hdr->signature);
1536 		panic();
1537 	}
1538 	if (hdr->version != FFA_BOOT_INFO_VERSION) {
1539 		EMSG("Bad boot info version %#"PRIx32, hdr->version);
1540 		panic();
1541 	}
1542 	if (hdr->desc_count != 1) {
1543 		EMSG("Bad boot info descriptor count %#"PRIx32,
1544 		     hdr->desc_count);
1545 		panic();
1546 	}
1547 	desc = (void *)((vaddr_t)hdr + hdr->desc_offset);
1548 	name_fmt = desc->flags & FFA_BOOT_INFO_FLAG_NAME_FORMAT_MASK;
1549 	if (name_fmt == FFA_BOOT_INFO_FLAG_NAME_FORMAT_STRING)
1550 		DMSG("Boot info descriptor name \"%16s\"", desc->name);
1551 	else if (name_fmt == FFA_BOOT_INFO_FLAG_NAME_FORMAT_UUID)
1552 		DMSG("Boot info descriptor UUID %pUl", (void *)desc->name);
1553 	else
1554 		DMSG("Boot info descriptor: unknown name format %"PRIu8,
1555 		     name_fmt);
1556 
1557 	content_fmt = (desc->flags & FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_MASK) >>
1558 		      FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_SHIFT;
1559 	if (content_fmt != FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_ADDR) {
1560 		EMSG("Bad boot info content format %"PRIu8", expected %u (address)",
1561 		     content_fmt, FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_ADDR);
1562 		panic();
1563 	}
1564 
1565 	fdt = (void *)(vaddr_t)desc->contents;
1566 	ret = fdt_check_full(fdt, desc->size);
1567 	if (ret < 0) {
1568 		EMSG("Invalid Device Tree at %p: error %d", fdt, ret);
1569 		panic();
1570 	}
1571 	return fdt;
1572 }
1573 
1574 static void get_sec_mem_from_manifest(void *fdt, paddr_t *base, size_t *size)
1575 {
1576 	int ret = 0;
1577 	uint64_t num = 0;
1578 
1579 	ret = fdt_node_check_compatible(fdt, 0, "arm,ffa-manifest-1.0");
1580 	if (ret < 0) {
1581 		EMSG("Invalid FF-A manifest at %p: error %d", fdt, ret);
1582 		panic();
1583 	}
1584 	ret = dt_getprop_as_number(fdt, 0, "load-address", &num);
1585 	if (ret < 0) {
1586 		EMSG("Can't read \"load-address\" from FF-A manifest at %p: error %d",
1587 		     fdt, ret);
1588 		panic();
1589 	}
1590 	*base = num;
1591 	/* "mem-size" is currently an undocumented extension to the spec. */
1592 	ret = dt_getprop_as_number(fdt, 0, "mem-size", &num);
1593 	if (ret < 0) {
1594 		EMSG("Can't read \"mem-size\" from FF-A manifest at %p: error %d",
1595 		     fdt, ret);
1596 		panic();
1597 	}
1598 	*size = num;
1599 }
1600 
1601 void __weak boot_save_args(unsigned long a0, unsigned long a1,
1602 			   unsigned long a2, unsigned long a3,
1603 			   unsigned long a4 __maybe_unused)
1604 {
1605 	/*
1606 	 * Register use:
1607 	 *
1608 	 * Scenario A: Default arguments
1609 	 * a0   - CFG_CORE_FFA=y && CFG_CORE_SEL2_SPMC=n:
1610 	 *        if non-NULL holds the TOS FW config [1] address
1611 	 *      - CFG_CORE_FFA=y &&
1612 		  (CFG_CORE_SEL2_SPMC=y || CFG_CORE_EL3_SPMC=y):
1613 	 *        address of FF-A Boot Information Blob
1614 	 *      - CFG_CORE_FFA=n:
1615 	 *        if non-NULL holds the pagable part address
1616 	 * a1	- CFG_WITH_ARM_TRUSTED_FW=n (Armv7):
1617 	 *	  Armv7 standard bootarg #1 (kept track of in entry_a32.S)
1618 	 * a2   - CFG_CORE_SEL2_SPMC=n:
1619 	 *        if non-NULL holds the system DTB address
1620 	 *	- CFG_WITH_ARM_TRUSTED_FW=n (Armv7):
1621 	 *	  Armv7 standard bootarg #2 (system DTB address, kept track
1622 	 *	  of in entry_a32.S)
1623 	 * a3	- Not used
1624 	 * a4	- CFG_WITH_ARM_TRUSTED_FW=n:
1625 	 *	  Non-secure entry address
1626 	 *
1627 	 * [1] A TF-A concept: TOS_FW_CONFIG - Trusted OS Firmware
1628 	 * configuration file. Used by Trusted OS (BL32), that is, OP-TEE
1629 	 * here. This is also called Manifest DT, related to the Manifest DT
1630 	 * passed in the FF-A Boot Information Blob, but with a different
1631 	 * compatible string.
1632 
1633 	 * Scenario B: FW Handoff via Transfer List
1634 	 * Note: FF-A and non-secure entry are not yet supported with
1635 	 *       Transfer List
1636 	 * a0	- DTB address or 0 (AArch64)
1637 	 *	- must be 0 (AArch32)
1638 	 * a1	- TRANSFER_LIST_SIGNATURE | REG_CONVENTION_VER_MASK
1639 	 * a2	- must be 0 (AArch64)
1640 	 *	- DTB address or 0 (AArch32)
1641 	 * a3	- Transfer list base address
1642 	 * a4	- Not used
1643 	 */
1644 
1645 	if (IS_ENABLED(CFG_TRANSFER_LIST) &&
1646 	    a1 == (TRANSFER_LIST_SIGNATURE | REG_CONVENTION_VER_MASK)) {
1647 		if (IS_ENABLED(CFG_ARM64_core)) {
1648 			boot_save_transfer_list(a2, a3, a0);
1649 			boot_arg_fdt = a0;
1650 		} else {
1651 			boot_save_transfer_list(a0, a3, a2);
1652 			boot_arg_fdt = a2;
1653 		}
1654 		return;
1655 	}
1656 
1657 	if (!IS_ENABLED(CFG_CORE_SEL2_SPMC)) {
1658 #if defined(CFG_DT_ADDR)
1659 		boot_arg_fdt = CFG_DT_ADDR;
1660 #else
1661 		boot_arg_fdt = a2;
1662 #endif
1663 	}
1664 
1665 	if (IS_ENABLED(CFG_CORE_FFA)) {
1666 		if (IS_ENABLED(CFG_CORE_SEL2_SPMC) ||
1667 		    IS_ENABLED(CFG_CORE_EL3_SPMC))
1668 			manifest_dt = get_fdt_from_boot_info((void *)a0);
1669 		else
1670 			manifest_dt = (void *)a0;
1671 		if (IS_ENABLED(CFG_CORE_SEL2_SPMC) &&
1672 		    IS_ENABLED(CFG_CORE_PHYS_RELOCATABLE)) {
1673 			paddr_t base = 0;
1674 			size_t size = 0;
1675 
1676 			get_sec_mem_from_manifest(manifest_dt, &base, &size);
1677 			core_mmu_set_secure_memory(base, size);
1678 		}
1679 	} else {
1680 		if (IS_ENABLED(CFG_WITH_PAGER)) {
1681 #if defined(CFG_PAGEABLE_ADDR)
1682 			boot_arg_pageable_part = CFG_PAGEABLE_ADDR;
1683 #else
1684 			boot_arg_pageable_part = a0;
1685 #endif
1686 		}
1687 		if (!IS_ENABLED(CFG_WITH_ARM_TRUSTED_FW)) {
1688 #if defined(CFG_NS_ENTRY_ADDR)
1689 			boot_arg_nsec_entry = CFG_NS_ENTRY_ADDR;
1690 #else
1691 			boot_arg_nsec_entry = a4;
1692 #endif
1693 		}
1694 	}
1695 }
1696 
1697 #if defined(CFG_TRANSFER_LIST)
1698 static TEE_Result release_transfer_list(void)
1699 {
1700 	struct dt_descriptor *dt = get_external_dt_desc();
1701 
1702 	if (!mapped_tl)
1703 		return TEE_SUCCESS;
1704 
1705 	if (dt) {
1706 		int ret = 0;
1707 		struct transfer_list_entry *tl_e = NULL;
1708 
1709 		/*
1710 		 * Pack the DTB and update the transfer list before un-mapping
1711 		 */
1712 		ret = fdt_pack(dt->blob);
1713 		if (ret < 0) {
1714 			EMSG("Failed to pack Device Tree at 0x%" PRIxPA
1715 			     ": error %d", virt_to_phys(dt->blob), ret);
1716 			panic();
1717 		}
1718 
1719 		tl_e = transfer_list_find(mapped_tl, TL_TAG_FDT);
1720 		assert(dt->blob == transfer_list_entry_data(tl_e));
1721 		transfer_list_set_data_size(mapped_tl, tl_e,
1722 					    fdt_totalsize(dt->blob));
1723 		dt->blob = NULL;
1724 	}
1725 
1726 	transfer_list_unmap_sync(mapped_tl);
1727 	mapped_tl = NULL;
1728 
1729 	return TEE_SUCCESS;
1730 }
1731 
1732 boot_final(release_transfer_list);
1733 #endif
1734