xref: /optee_os/core/arch/arm/kernel/boot.c (revision 5f7f88c6b9d618d1e068166bbf2b07757350791d)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2015-2023, Linaro Limited
4  * Copyright (c) 2023, Arm Limited
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <compiler.h>
10 #include <config.h>
11 #include <console.h>
12 #include <crypto/crypto.h>
13 #include <drivers/gic.h>
14 #include <dt-bindings/interrupt-controller/arm-gic.h>
15 #include <ffa.h>
16 #include <initcall.h>
17 #include <inttypes.h>
18 #include <io.h>
19 #include <keep.h>
20 #include <kernel/asan.h>
21 #include <kernel/boot.h>
22 #include <kernel/dt.h>
23 #include <kernel/linker.h>
24 #include <kernel/misc.h>
25 #include <kernel/panic.h>
26 #include <kernel/tee_misc.h>
27 #include <kernel/thread.h>
28 #include <kernel/tpm.h>
29 #include <kernel/transfer_list.h>
30 #include <libfdt.h>
31 #include <malloc.h>
32 #include <memtag.h>
33 #include <mm/core_memprot.h>
34 #include <mm/core_mmu.h>
35 #include <mm/fobj.h>
36 #include <mm/tee_mm.h>
37 #include <mm/tee_pager.h>
38 #include <sm/psci.h>
39 #include <stdio.h>
40 #include <trace.h>
41 #include <utee_defines.h>
42 #include <util.h>
43 
44 #include <platform_config.h>
45 
46 #if !defined(CFG_WITH_ARM_TRUSTED_FW)
47 #include <sm/sm.h>
48 #endif
49 
50 #if defined(CFG_WITH_VFP)
51 #include <kernel/vfp.h>
52 #endif
53 
54 /*
55  * In this file we're using unsigned long to represent physical pointers as
56  * they are received in a single register when OP-TEE is initially entered.
57  * This limits 32-bit systems to only use make use of the lower 32 bits
58  * of a physical address for initial parameters.
59  *
60  * 64-bit systems on the other hand can use full 64-bit physical pointers.
61  */
62 #define PADDR_INVALID		ULONG_MAX
63 
64 #if defined(CFG_BOOT_SECONDARY_REQUEST)
65 struct ns_entry_context {
66 	uintptr_t entry_point;
67 	uintptr_t context_id;
68 };
69 struct ns_entry_context ns_entry_contexts[CFG_TEE_CORE_NB_CORE];
70 static uint32_t spin_table[CFG_TEE_CORE_NB_CORE];
71 #endif
72 
73 #ifdef CFG_BOOT_SYNC_CPU
74 /*
75  * Array used when booting, to synchronize cpu.
76  * When 0, the cpu has not started.
77  * When 1, it has started
78  */
79 uint32_t sem_cpu_sync[CFG_TEE_CORE_NB_CORE];
80 DECLARE_KEEP_PAGER(sem_cpu_sync);
81 #endif
82 
83 static void *manifest_dt __nex_bss;
84 static unsigned long boot_arg_fdt __nex_bss;
85 static unsigned long boot_arg_nsec_entry __nex_bss;
86 static unsigned long boot_arg_pageable_part __nex_bss;
87 static unsigned long boot_arg_transfer_list __nex_bss;
88 static struct transfer_list_header *mapped_tl __nex_bss;
89 
90 #ifdef CFG_SECONDARY_INIT_CNTFRQ
91 static uint32_t cntfrq;
92 #endif
93 
94 /* May be overridden in plat-$(PLATFORM)/main.c */
95 __weak void plat_primary_init_early(void)
96 {
97 }
98 DECLARE_KEEP_PAGER(plat_primary_init_early);
99 
100 /* May be overridden in plat-$(PLATFORM)/main.c */
101 __weak void boot_primary_init_intc(void)
102 {
103 }
104 
105 /* May be overridden in plat-$(PLATFORM)/main.c */
106 __weak void boot_secondary_init_intc(void)
107 {
108 }
109 
110 /* May be overridden in plat-$(PLATFORM)/main.c */
111 __weak unsigned long plat_get_aslr_seed(void)
112 {
113 	DMSG("Warning: no ASLR seed");
114 
115 	return 0;
116 }
117 
118 #if defined(_CFG_CORE_STACK_PROTECTOR) || defined(CFG_WITH_STACK_CANARIES)
119 /* Generate random stack canary value on boot up */
120 __weak void plat_get_random_stack_canaries(void *buf, size_t ncan, size_t size)
121 {
122 	TEE_Result ret = TEE_ERROR_GENERIC;
123 	size_t i = 0;
124 
125 	assert(buf && ncan && size);
126 
127 	/*
128 	 * With virtualization the RNG is not initialized in Nexus core.
129 	 * Need to override with platform specific implementation.
130 	 */
131 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
132 		IMSG("WARNING: Using fixed value for stack canary");
133 		memset(buf, 0xab, ncan * size);
134 		goto out;
135 	}
136 
137 	ret = crypto_rng_read(buf, ncan * size);
138 	if (ret != TEE_SUCCESS)
139 		panic("Failed to generate random stack canary");
140 
141 out:
142 	/* Leave null byte in canary to prevent string base exploit */
143 	for (i = 0; i < ncan; i++)
144 		*((uint8_t *)buf + size * i) = 0;
145 }
146 #endif /* _CFG_CORE_STACK_PROTECTOR || CFG_WITH_STACK_CANARIES */
147 
148 /*
149  * This function is called as a guard after each smc call which is not
150  * supposed to return.
151  */
152 void __panic_at_smc_return(void)
153 {
154 	panic();
155 }
156 
157 #if defined(CFG_WITH_ARM_TRUSTED_FW)
158 void init_sec_mon(unsigned long nsec_entry __maybe_unused)
159 {
160 	assert(nsec_entry == PADDR_INVALID);
161 	/* Do nothing as we don't have a secure monitor */
162 }
163 #else
164 /* May be overridden in plat-$(PLATFORM)/main.c */
165 __weak void init_sec_mon(unsigned long nsec_entry)
166 {
167 	struct sm_nsec_ctx *nsec_ctx;
168 
169 	assert(nsec_entry != PADDR_INVALID);
170 
171 	/* Initialize secure monitor */
172 	nsec_ctx = sm_get_nsec_ctx();
173 	nsec_ctx->mon_lr = nsec_entry;
174 	nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I;
175 	if (nsec_entry & 1)
176 		nsec_ctx->mon_spsr |= CPSR_T;
177 }
178 #endif
179 
180 #if defined(CFG_WITH_ARM_TRUSTED_FW)
181 static void init_vfp_nsec(void)
182 {
183 }
184 #else
185 static void init_vfp_nsec(void)
186 {
187 	/* Normal world can use CP10 and CP11 (SIMD/VFP) */
188 	write_nsacr(read_nsacr() | NSACR_CP10 | NSACR_CP11);
189 }
190 #endif
191 
192 #if defined(CFG_WITH_VFP)
193 
194 #ifdef ARM32
195 static void init_vfp_sec(void)
196 {
197 	uint32_t cpacr = read_cpacr();
198 
199 	/*
200 	 * Enable Advanced SIMD functionality.
201 	 * Enable use of D16-D31 of the Floating-point Extension register
202 	 * file.
203 	 */
204 	cpacr &= ~(CPACR_ASEDIS | CPACR_D32DIS);
205 	/*
206 	 * Enable usage of CP10 and CP11 (SIMD/VFP) (both kernel and user
207 	 * mode.
208 	 */
209 	cpacr |= CPACR_CP(10, CPACR_CP_ACCESS_FULL);
210 	cpacr |= CPACR_CP(11, CPACR_CP_ACCESS_FULL);
211 	write_cpacr(cpacr);
212 }
213 #endif /* ARM32 */
214 
215 #ifdef ARM64
216 static void init_vfp_sec(void)
217 {
218 	/* Not using VFP until thread_kernel_enable_vfp() */
219 	vfp_disable();
220 }
221 #endif /* ARM64 */
222 
223 #else /* CFG_WITH_VFP */
224 
225 static void init_vfp_sec(void)
226 {
227 	/* Not using VFP */
228 }
229 #endif
230 
231 #ifdef CFG_SECONDARY_INIT_CNTFRQ
232 static void primary_save_cntfrq(void)
233 {
234 	assert(cntfrq == 0);
235 
236 	/*
237 	 * CNTFRQ should be initialized on the primary CPU by a
238 	 * previous boot stage
239 	 */
240 	cntfrq = read_cntfrq();
241 }
242 
243 static void secondary_init_cntfrq(void)
244 {
245 	assert(cntfrq != 0);
246 	write_cntfrq(cntfrq);
247 }
248 #else /* CFG_SECONDARY_INIT_CNTFRQ */
249 static void primary_save_cntfrq(void)
250 {
251 }
252 
253 static void secondary_init_cntfrq(void)
254 {
255 }
256 #endif
257 
258 #ifdef CFG_CORE_SANITIZE_KADDRESS
259 static void init_run_constructors(void)
260 {
261 	const vaddr_t *ctor;
262 
263 	for (ctor = &__ctor_list; ctor < &__ctor_end; ctor++)
264 		((void (*)(void))(*ctor))();
265 }
266 
267 static void init_asan(void)
268 {
269 
270 	/*
271 	 * CFG_ASAN_SHADOW_OFFSET is also supplied as
272 	 * -fasan-shadow-offset=$(CFG_ASAN_SHADOW_OFFSET) to the compiler.
273 	 * Since all the needed values to calculate the value of
274 	 * CFG_ASAN_SHADOW_OFFSET isn't available in to make we need to
275 	 * calculate it in advance and hard code it into the platform
276 	 * conf.mk. Here where we have all the needed values we double
277 	 * check that the compiler is supplied the correct value.
278 	 */
279 
280 #define __ASAN_SHADOW_START \
281 	ROUNDUP(TEE_RAM_START + (TEE_RAM_VA_SIZE * 8) / 9 - 8, 8)
282 	assert(__ASAN_SHADOW_START == (vaddr_t)&__asan_shadow_start);
283 #define __CFG_ASAN_SHADOW_OFFSET \
284 	(__ASAN_SHADOW_START - (TEE_RAM_START / 8))
285 	COMPILE_TIME_ASSERT(CFG_ASAN_SHADOW_OFFSET == __CFG_ASAN_SHADOW_OFFSET);
286 #undef __ASAN_SHADOW_START
287 #undef __CFG_ASAN_SHADOW_OFFSET
288 
289 	/*
290 	 * Assign area covered by the shadow area, everything from start up
291 	 * to the beginning of the shadow area.
292 	 */
293 	asan_set_shadowed((void *)TEE_LOAD_ADDR, &__asan_shadow_start);
294 
295 	/*
296 	 * Add access to areas that aren't opened automatically by a
297 	 * constructor.
298 	 */
299 	asan_tag_access(&__ctor_list, &__ctor_end);
300 	asan_tag_access(__rodata_start, __rodata_end);
301 #ifdef CFG_WITH_PAGER
302 	asan_tag_access(__pageable_start, __pageable_end);
303 #endif /*CFG_WITH_PAGER*/
304 	asan_tag_access(__nozi_start, __nozi_end);
305 #ifdef ARM32
306 	asan_tag_access(__exidx_start, __exidx_end);
307 	asan_tag_access(__extab_start, __extab_end);
308 #endif
309 
310 	init_run_constructors();
311 
312 	/* Everything is tagged correctly, let's start address sanitizing. */
313 	asan_start();
314 }
315 #else /*CFG_CORE_SANITIZE_KADDRESS*/
316 static void init_asan(void)
317 {
318 }
319 #endif /*CFG_CORE_SANITIZE_KADDRESS*/
320 
321 #if defined(CFG_MEMTAG)
322 /* Called from entry_a64.S only when MEMTAG is configured */
323 void boot_init_memtag(void)
324 {
325 	paddr_t base = 0;
326 	paddr_size_t size = 0;
327 
328 	memtag_init_ops(feat_mte_implemented());
329 	core_mmu_get_secure_memory(&base, &size);
330 	memtag_set_tags((void *)(vaddr_t)base, size, 0);
331 }
332 #endif
333 
334 #ifdef CFG_WITH_PAGER
335 
336 #ifdef CFG_CORE_SANITIZE_KADDRESS
337 static void carve_out_asan_mem(tee_mm_pool_t *pool)
338 {
339 	const size_t s = pool->hi - pool->lo;
340 	tee_mm_entry_t *mm;
341 	paddr_t apa = ASAN_MAP_PA;
342 	size_t asz = ASAN_MAP_SZ;
343 
344 	if (core_is_buffer_outside(apa, asz, pool->lo, s))
345 		return;
346 
347 	/* Reserve the shadow area */
348 	if (!core_is_buffer_inside(apa, asz, pool->lo, s)) {
349 		if (apa < pool->lo) {
350 			/*
351 			 * ASAN buffer is overlapping with the beginning of
352 			 * the pool.
353 			 */
354 			asz -= pool->lo - apa;
355 			apa = pool->lo;
356 		} else {
357 			/*
358 			 * ASAN buffer is overlapping with the end of the
359 			 * pool.
360 			 */
361 			asz = pool->hi - apa;
362 		}
363 	}
364 	mm = tee_mm_alloc2(pool, apa, asz);
365 	assert(mm);
366 }
367 #else
368 static void carve_out_asan_mem(tee_mm_pool_t *pool __unused)
369 {
370 }
371 #endif
372 
373 static void print_pager_pool_size(void)
374 {
375 	struct tee_pager_stats __maybe_unused stats;
376 
377 	tee_pager_get_stats(&stats);
378 	IMSG("Pager pool size: %zukB",
379 		stats.npages_all * SMALL_PAGE_SIZE / 1024);
380 }
381 
382 static void init_vcore(tee_mm_pool_t *mm_vcore)
383 {
384 	const vaddr_t begin = VCORE_START_VA;
385 	size_t size = TEE_RAM_VA_SIZE;
386 
387 #ifdef CFG_CORE_SANITIZE_KADDRESS
388 	/* Carve out asan memory, flat maped after core memory */
389 	if (begin + size > ASAN_SHADOW_PA)
390 		size = ASAN_MAP_PA - begin;
391 #endif
392 
393 	if (!tee_mm_init(mm_vcore, begin, size, SMALL_PAGE_SHIFT,
394 			 TEE_MM_POOL_NO_FLAGS))
395 		panic("tee_mm_vcore init failed");
396 }
397 
398 /*
399  * With CFG_CORE_ASLR=y the init part is relocated very early during boot.
400  * The init part is also paged just as the rest of the normal paged code, with
401  * the difference that it's preloaded during boot. When the backing store
402  * is configured the entire paged binary is copied in place and then also
403  * the init part. Since the init part has been relocated (references to
404  * addresses updated to compensate for the new load address) this has to be
405  * undone for the hashes of those pages to match with the original binary.
406  *
407  * If CFG_CORE_ASLR=n, nothing needs to be done as the code/ro pages are
408  * unchanged.
409  */
410 static void undo_init_relocation(uint8_t *paged_store __maybe_unused)
411 {
412 #ifdef CFG_CORE_ASLR
413 	unsigned long *ptr = NULL;
414 	const uint32_t *reloc = NULL;
415 	const uint32_t *reloc_end = NULL;
416 	unsigned long offs = boot_mmu_config.map_offset;
417 	const struct boot_embdata *embdata = (const void *)__init_end;
418 	vaddr_t addr_end = (vaddr_t)__init_end - offs - TEE_LOAD_ADDR;
419 	vaddr_t addr_start = (vaddr_t)__init_start - offs - TEE_LOAD_ADDR;
420 
421 	reloc = (const void *)((vaddr_t)embdata + embdata->reloc_offset);
422 	reloc_end = reloc + embdata->reloc_len / sizeof(*reloc);
423 
424 	for (; reloc < reloc_end; reloc++) {
425 		if (*reloc < addr_start)
426 			continue;
427 		if (*reloc >= addr_end)
428 			break;
429 		ptr = (void *)(paged_store + *reloc - addr_start);
430 		*ptr -= offs;
431 	}
432 #endif
433 }
434 
435 static struct fobj *ro_paged_alloc(tee_mm_entry_t *mm, void *hashes,
436 				   void *store)
437 {
438 	const unsigned int num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE;
439 #ifdef CFG_CORE_ASLR
440 	unsigned int reloc_offs = (vaddr_t)__pageable_start - VCORE_START_VA;
441 	const struct boot_embdata *embdata = (const void *)__init_end;
442 	const void *reloc = __init_end + embdata->reloc_offset;
443 
444 	return fobj_ro_reloc_paged_alloc(num_pages, hashes, reloc_offs,
445 					 reloc, embdata->reloc_len, store);
446 #else
447 	return fobj_ro_paged_alloc(num_pages, hashes, store);
448 #endif
449 }
450 
451 static void init_runtime(unsigned long pageable_part)
452 {
453 	size_t n;
454 	size_t init_size = (size_t)(__init_end - __init_start);
455 	size_t pageable_start = (size_t)__pageable_start;
456 	size_t pageable_end = (size_t)__pageable_end;
457 	size_t pageable_size = pageable_end - pageable_start;
458 	vaddr_t tzsram_end = TZSRAM_BASE + TZSRAM_SIZE - TEE_LOAD_ADDR +
459 			     VCORE_START_VA;
460 	size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) *
461 			   TEE_SHA256_HASH_SIZE;
462 	const struct boot_embdata *embdata = (const void *)__init_end;
463 	const void *tmp_hashes = NULL;
464 	tee_mm_entry_t *mm = NULL;
465 	struct fobj *fobj = NULL;
466 	uint8_t *paged_store = NULL;
467 	uint8_t *hashes = NULL;
468 
469 	assert(pageable_size % SMALL_PAGE_SIZE == 0);
470 	assert(embdata->total_len >= embdata->hashes_offset +
471 				     embdata->hashes_len);
472 	assert(hash_size == embdata->hashes_len);
473 
474 	tmp_hashes = __init_end + embdata->hashes_offset;
475 
476 	init_asan();
477 
478 	/* Add heap2 first as heap1 may be too small as initial bget pool */
479 	malloc_add_pool(__heap2_start, __heap2_end - __heap2_start);
480 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
481 
482 	/*
483 	 * This needs to be initialized early to support address lookup
484 	 * in MEM_AREA_TEE_RAM
485 	 */
486 	tee_pager_early_init();
487 
488 	hashes = malloc(hash_size);
489 	IMSG_RAW("\n");
490 	IMSG("Pager is enabled. Hashes: %zu bytes", hash_size);
491 	assert(hashes);
492 	asan_memcpy_unchecked(hashes, tmp_hashes, hash_size);
493 
494 	/*
495 	 * Need tee_mm_sec_ddr initialized to be able to allocate secure
496 	 * DDR below.
497 	 */
498 	core_mmu_init_ta_ram();
499 
500 	carve_out_asan_mem(&tee_mm_sec_ddr);
501 
502 	mm = tee_mm_alloc(&tee_mm_sec_ddr, pageable_size);
503 	assert(mm);
504 	paged_store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM,
505 				   pageable_size);
506 	/*
507 	 * Load pageable part in the dedicated allocated area:
508 	 * - Move pageable non-init part into pageable area. Note bootloader
509 	 *   may have loaded it anywhere in TA RAM hence use memmove().
510 	 * - Copy pageable init part from current location into pageable area.
511 	 */
512 	memmove(paged_store + init_size,
513 		phys_to_virt(pageable_part,
514 			     core_mmu_get_type_by_pa(pageable_part),
515 			     __pageable_part_end - __pageable_part_start),
516 		__pageable_part_end - __pageable_part_start);
517 	asan_memcpy_unchecked(paged_store, __init_start, init_size);
518 	/*
519 	 * Undo eventual relocation for the init part so the hash checks
520 	 * can pass.
521 	 */
522 	undo_init_relocation(paged_store);
523 
524 	/* Check that hashes of what's in pageable area is OK */
525 	DMSG("Checking hashes of pageable area");
526 	for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) {
527 		const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE;
528 		const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE;
529 		TEE_Result res;
530 
531 		DMSG("hash pg_idx %zu hash %p page %p", n, hash, page);
532 		res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE);
533 		if (res != TEE_SUCCESS) {
534 			EMSG("Hash failed for page %zu at %p: res 0x%x",
535 			     n, (void *)page, res);
536 			panic();
537 		}
538 	}
539 
540 	/*
541 	 * Assert prepaged init sections are page aligned so that nothing
542 	 * trails uninited at the end of the premapped init area.
543 	 */
544 	assert(!(init_size & SMALL_PAGE_MASK));
545 
546 	/*
547 	 * Initialize the virtual memory pool used for main_mmu_l2_ttb which
548 	 * is supplied to tee_pager_init() below.
549 	 */
550 	init_vcore(&tee_mm_vcore);
551 
552 	/*
553 	 * Assign alias area for pager end of the small page block the rest
554 	 * of the binary is loaded into. We're taking more than needed, but
555 	 * we're guaranteed to not need more than the physical amount of
556 	 * TZSRAM.
557 	 */
558 	mm = tee_mm_alloc2(&tee_mm_vcore,
559 			   (vaddr_t)tee_mm_vcore.lo +
560 			   tee_mm_vcore.size - TZSRAM_SIZE,
561 			   TZSRAM_SIZE);
562 	assert(mm);
563 	tee_pager_set_alias_area(mm);
564 
565 	/*
566 	 * Claim virtual memory which isn't paged.
567 	 * Linear memory (flat map core memory) ends there.
568 	 */
569 	mm = tee_mm_alloc2(&tee_mm_vcore, VCORE_UNPG_RX_PA,
570 			   (vaddr_t)(__pageable_start - VCORE_UNPG_RX_PA));
571 	assert(mm);
572 
573 	/*
574 	 * Allocate virtual memory for the pageable area and let the pager
575 	 * take charge of all the pages already assigned to that memory.
576 	 */
577 	mm = tee_mm_alloc2(&tee_mm_vcore, (vaddr_t)__pageable_start,
578 			   pageable_size);
579 	assert(mm);
580 	fobj = ro_paged_alloc(mm, hashes, paged_store);
581 	assert(fobj);
582 	tee_pager_add_core_region(tee_mm_get_smem(mm), PAGED_REGION_TYPE_RO,
583 				  fobj);
584 	fobj_put(fobj);
585 
586 	tee_pager_add_pages(pageable_start, init_size / SMALL_PAGE_SIZE, false);
587 	tee_pager_add_pages(pageable_start + init_size,
588 			    (pageable_size - init_size) / SMALL_PAGE_SIZE,
589 			    true);
590 	if (pageable_end < tzsram_end)
591 		tee_pager_add_pages(pageable_end, (tzsram_end - pageable_end) /
592 						   SMALL_PAGE_SIZE, true);
593 
594 	/*
595 	 * There may be physical pages in TZSRAM before the core load address.
596 	 * These pages can be added to the physical pages pool of the pager.
597 	 * This setup may happen when a the secure bootloader runs in TZRAM
598 	 * and its memory can be reused by OP-TEE once boot stages complete.
599 	 */
600 	tee_pager_add_pages(tee_mm_vcore.lo,
601 			(VCORE_UNPG_RX_PA - tee_mm_vcore.lo) / SMALL_PAGE_SIZE,
602 			true);
603 
604 	print_pager_pool_size();
605 }
606 #else
607 
608 static void init_runtime(unsigned long pageable_part __unused)
609 {
610 	init_asan();
611 
612 	/*
613 	 * By default whole OP-TEE uses malloc, so we need to initialize
614 	 * it early. But, when virtualization is enabled, malloc is used
615 	 * only by TEE runtime, so malloc should be initialized later, for
616 	 * every virtual partition separately. Core code uses nex_malloc
617 	 * instead.
618 	 */
619 #ifdef CFG_NS_VIRTUALIZATION
620 	nex_malloc_add_pool(__nex_heap_start, __nex_heap_end -
621 					      __nex_heap_start);
622 #else
623 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
624 #endif
625 
626 	IMSG_RAW("\n");
627 }
628 #endif
629 
630 #if defined(CFG_DT)
631 static int add_optee_dt_node(struct dt_descriptor *dt)
632 {
633 	int offs;
634 	int ret;
635 
636 	if (fdt_path_offset(dt->blob, "/firmware/optee") >= 0) {
637 		DMSG("OP-TEE Device Tree node already exists!");
638 		return 0;
639 	}
640 
641 	offs = fdt_path_offset(dt->blob, "/firmware");
642 	if (offs < 0) {
643 		offs = add_dt_path_subnode(dt, "/", "firmware");
644 		if (offs < 0)
645 			return -1;
646 	}
647 
648 	offs = fdt_add_subnode(dt->blob, offs, "optee");
649 	if (offs < 0)
650 		return -1;
651 
652 	ret = fdt_setprop_string(dt->blob, offs, "compatible",
653 				 "linaro,optee-tz");
654 	if (ret < 0)
655 		return -1;
656 	ret = fdt_setprop_string(dt->blob, offs, "method", "smc");
657 	if (ret < 0)
658 		return -1;
659 
660 	if (CFG_CORE_ASYNC_NOTIF_GIC_INTID) {
661 		/*
662 		 * The format of the interrupt property is defined by the
663 		 * binding of the interrupt domain root. In this case it's
664 		 * one Arm GIC v1, v2 or v3 so we must be compatible with
665 		 * these.
666 		 *
667 		 * An SPI type of interrupt is indicated with a 0 in the
668 		 * first cell. A PPI type is indicated with value 1.
669 		 *
670 		 * The interrupt number goes in the second cell where
671 		 * SPIs ranges from 0 to 987 and PPI ranges from 0 to 15.
672 		 *
673 		 * Flags are passed in the third cells.
674 		 */
675 		uint32_t itr_trigger = 0;
676 		uint32_t itr_type = 0;
677 		uint32_t itr_id = 0;
678 		uint32_t val[3] = { };
679 
680 		/* PPI are visible only in current CPU cluster */
681 		static_assert(IS_ENABLED(CFG_CORE_FFA) ||
682 			      !CFG_CORE_ASYNC_NOTIF_GIC_INTID ||
683 			      (CFG_CORE_ASYNC_NOTIF_GIC_INTID >=
684 			       GIC_SPI_BASE) ||
685 			      ((CFG_TEE_CORE_NB_CORE <= 8) &&
686 			       (CFG_CORE_ASYNC_NOTIF_GIC_INTID >=
687 				GIC_PPI_BASE)));
688 
689 		if (CFG_CORE_ASYNC_NOTIF_GIC_INTID >= GIC_SPI_BASE) {
690 			itr_type = GIC_SPI;
691 			itr_id = CFG_CORE_ASYNC_NOTIF_GIC_INTID - GIC_SPI_BASE;
692 			itr_trigger = IRQ_TYPE_EDGE_RISING;
693 		} else {
694 			itr_type = GIC_PPI;
695 			itr_id = CFG_CORE_ASYNC_NOTIF_GIC_INTID - GIC_PPI_BASE;
696 			itr_trigger = IRQ_TYPE_EDGE_RISING |
697 				      GIC_CPU_MASK_SIMPLE(CFG_TEE_CORE_NB_CORE);
698 		}
699 
700 		val[0] = TEE_U32_TO_BIG_ENDIAN(itr_type);
701 		val[1] = TEE_U32_TO_BIG_ENDIAN(itr_id);
702 		val[2] = TEE_U32_TO_BIG_ENDIAN(itr_trigger);
703 
704 		ret = fdt_setprop(dt->blob, offs, "interrupts", val,
705 				  sizeof(val));
706 		if (ret < 0)
707 			return -1;
708 	}
709 	return 0;
710 }
711 
712 #ifdef CFG_PSCI_ARM32
713 static int append_psci_compatible(void *fdt, int offs, const char *str)
714 {
715 	return fdt_appendprop(fdt, offs, "compatible", str, strlen(str) + 1);
716 }
717 
718 static int dt_add_psci_node(struct dt_descriptor *dt)
719 {
720 	int offs;
721 
722 	if (fdt_path_offset(dt->blob, "/psci") >= 0) {
723 		DMSG("PSCI Device Tree node already exists!");
724 		return 0;
725 	}
726 
727 	offs = add_dt_path_subnode(dt, "/", "psci");
728 	if (offs < 0)
729 		return -1;
730 	if (append_psci_compatible(dt->blob, offs, "arm,psci-1.0"))
731 		return -1;
732 	if (append_psci_compatible(dt->blob, offs, "arm,psci-0.2"))
733 		return -1;
734 	if (append_psci_compatible(dt->blob, offs, "arm,psci"))
735 		return -1;
736 	if (fdt_setprop_string(dt->blob, offs, "method", "smc"))
737 		return -1;
738 	if (fdt_setprop_u32(dt->blob, offs, "cpu_suspend", PSCI_CPU_SUSPEND))
739 		return -1;
740 	if (fdt_setprop_u32(dt->blob, offs, "cpu_off", PSCI_CPU_OFF))
741 		return -1;
742 	if (fdt_setprop_u32(dt->blob, offs, "cpu_on", PSCI_CPU_ON))
743 		return -1;
744 	if (fdt_setprop_u32(dt->blob, offs, "sys_poweroff", PSCI_SYSTEM_OFF))
745 		return -1;
746 	if (fdt_setprop_u32(dt->blob, offs, "sys_reset", PSCI_SYSTEM_RESET))
747 		return -1;
748 	return 0;
749 }
750 
751 static int check_node_compat_prefix(struct dt_descriptor *dt, int offs,
752 				    const char *prefix)
753 {
754 	const size_t prefix_len = strlen(prefix);
755 	size_t l;
756 	int plen;
757 	const char *prop;
758 
759 	prop = fdt_getprop(dt->blob, offs, "compatible", &plen);
760 	if (!prop)
761 		return -1;
762 
763 	while (plen > 0) {
764 		if (memcmp(prop, prefix, prefix_len) == 0)
765 			return 0; /* match */
766 
767 		l = strlen(prop) + 1;
768 		prop += l;
769 		plen -= l;
770 	}
771 
772 	return -1;
773 }
774 
775 static int dt_add_psci_cpu_enable_methods(struct dt_descriptor *dt)
776 {
777 	int offs = 0;
778 
779 	while (1) {
780 		offs = fdt_next_node(dt->blob, offs, NULL);
781 		if (offs < 0)
782 			break;
783 		if (fdt_getprop(dt->blob, offs, "enable-method", NULL))
784 			continue; /* already set */
785 		if (check_node_compat_prefix(dt, offs, "arm,cortex-a"))
786 			continue; /* no compatible */
787 		if (fdt_setprop_string(dt->blob, offs, "enable-method", "psci"))
788 			return -1;
789 		/* Need to restart scanning as offsets may have changed */
790 		offs = 0;
791 	}
792 	return 0;
793 }
794 
795 static int config_psci(struct dt_descriptor *dt)
796 {
797 	if (dt_add_psci_node(dt))
798 		return -1;
799 	return dt_add_psci_cpu_enable_methods(dt);
800 }
801 #else
802 static int config_psci(struct dt_descriptor *dt __unused)
803 {
804 	return 0;
805 }
806 #endif /*CFG_PSCI_ARM32*/
807 
808 #ifdef CFG_CORE_DYN_SHM
809 static uint64_t get_dt_val_and_advance(const void *data, size_t *offs,
810 				       uint32_t cell_size)
811 {
812 	uint64_t rv = 0;
813 
814 	if (cell_size == 1) {
815 		uint32_t v;
816 
817 		memcpy(&v, (const uint8_t *)data + *offs, sizeof(v));
818 		*offs += sizeof(v);
819 		rv = fdt32_to_cpu(v);
820 	} else {
821 		uint64_t v;
822 
823 		memcpy(&v, (const uint8_t *)data + *offs, sizeof(v));
824 		*offs += sizeof(v);
825 		rv = fdt64_to_cpu(v);
826 	}
827 
828 	return rv;
829 }
830 
831 /*
832  * Find all non-secure memory from DT. Memory marked inaccessible by Secure
833  * World is ignored since it could not be mapped to be used as dynamic shared
834  * memory.
835  */
836 static int get_nsec_memory_helper(void *fdt, struct core_mmu_phys_mem *mem)
837 {
838 	const uint8_t *prop = NULL;
839 	uint64_t a = 0;
840 	uint64_t l = 0;
841 	size_t prop_offs = 0;
842 	size_t prop_len = 0;
843 	int elems_total = 0;
844 	int addr_size = 0;
845 	int len_size = 0;
846 	int offs = 0;
847 	size_t n = 0;
848 	int len = 0;
849 
850 	addr_size = fdt_address_cells(fdt, 0);
851 	if (addr_size < 0)
852 		return 0;
853 
854 	len_size = fdt_size_cells(fdt, 0);
855 	if (len_size < 0)
856 		return 0;
857 
858 	while (true) {
859 		offs = fdt_node_offset_by_prop_value(fdt, offs, "device_type",
860 						     "memory",
861 						     sizeof("memory"));
862 		if (offs < 0)
863 			break;
864 
865 		if (fdt_get_status(fdt, offs) != (DT_STATUS_OK_NSEC |
866 						   DT_STATUS_OK_SEC))
867 			continue;
868 
869 		prop = fdt_getprop(fdt, offs, "reg", &len);
870 		if (!prop)
871 			continue;
872 
873 		prop_len = len;
874 		for (n = 0, prop_offs = 0; prop_offs < prop_len; n++) {
875 			a = get_dt_val_and_advance(prop, &prop_offs, addr_size);
876 			if (prop_offs >= prop_len) {
877 				n--;
878 				break;
879 			}
880 
881 			l = get_dt_val_and_advance(prop, &prop_offs, len_size);
882 			if (mem) {
883 				mem->type = MEM_AREA_DDR_OVERALL;
884 				mem->addr = a;
885 				mem->size = l;
886 				mem++;
887 			}
888 		}
889 
890 		elems_total += n;
891 	}
892 
893 	return elems_total;
894 }
895 
896 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt, size_t *nelems)
897 {
898 	struct core_mmu_phys_mem *mem = NULL;
899 	int elems_total = 0;
900 
901 	elems_total = get_nsec_memory_helper(fdt, NULL);
902 	if (elems_total <= 0)
903 		return NULL;
904 
905 	mem = nex_calloc(elems_total, sizeof(*mem));
906 	if (!mem)
907 		panic();
908 
909 	elems_total = get_nsec_memory_helper(fdt, mem);
910 	assert(elems_total > 0);
911 
912 	*nelems = elems_total;
913 
914 	return mem;
915 }
916 #endif /*CFG_CORE_DYN_SHM*/
917 
918 #ifdef CFG_CORE_RESERVED_SHM
919 static int mark_static_shm_as_reserved(struct dt_descriptor *dt)
920 {
921 	vaddr_t shm_start;
922 	vaddr_t shm_end;
923 
924 	core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &shm_start, &shm_end);
925 	if (shm_start != shm_end)
926 		return add_res_mem_dt_node(dt, "optee_shm",
927 					   virt_to_phys((void *)shm_start),
928 					   shm_end - shm_start);
929 
930 	DMSG("No SHM configured");
931 	return -1;
932 }
933 #endif /*CFG_CORE_RESERVED_SHM*/
934 
935 static int mark_tzdram_as_reserved(struct dt_descriptor *dt)
936 {
937 	return add_res_mem_dt_node(dt, "optee_core", CFG_TZDRAM_START,
938 				   CFG_TZDRAM_SIZE);
939 }
940 
941 static void update_external_dt(void)
942 {
943 	struct dt_descriptor *dt = get_external_dt_desc();
944 
945 	if (!dt || !dt->blob)
946 		return;
947 
948 	if (!IS_ENABLED(CFG_CORE_FFA) && add_optee_dt_node(dt))
949 		panic("Failed to add OP-TEE Device Tree node");
950 
951 	if (config_psci(dt))
952 		panic("Failed to config PSCI");
953 
954 #ifdef CFG_CORE_RESERVED_SHM
955 	if (mark_static_shm_as_reserved(dt))
956 		panic("Failed to config non-secure memory");
957 #endif
958 
959 	if (mark_tzdram_as_reserved(dt))
960 		panic("Failed to config secure memory");
961 }
962 #else /*CFG_DT*/
963 static void update_external_dt(void)
964 {
965 }
966 
967 #ifdef CFG_CORE_DYN_SHM
968 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt __unused,
969 						 size_t *nelems __unused)
970 {
971 	return NULL;
972 }
973 #endif /*CFG_CORE_DYN_SHM*/
974 #endif /*!CFG_DT*/
975 
976 #if defined(CFG_CORE_FFA)
977 void *get_manifest_dt(void)
978 {
979 	return manifest_dt;
980 }
981 
982 static void reinit_manifest_dt(void)
983 {
984 	paddr_t pa = (unsigned long)manifest_dt;
985 	void *fdt = NULL;
986 	int ret = 0;
987 
988 	if (!pa) {
989 		EMSG("No manifest DT found");
990 		return;
991 	}
992 
993 	fdt = core_mmu_add_mapping(MEM_AREA_MANIFEST_DT, pa, CFG_DTB_MAX_SIZE);
994 	if (!fdt)
995 		panic("Failed to map manifest DT");
996 
997 	manifest_dt = fdt;
998 
999 	ret = fdt_check_full(fdt, CFG_DTB_MAX_SIZE);
1000 	if (ret < 0) {
1001 		EMSG("Invalid manifest Device Tree at %#lx: error %d", pa, ret);
1002 		panic();
1003 	}
1004 
1005 	IMSG("manifest DT found");
1006 }
1007 
1008 static TEE_Result release_manifest_dt(void)
1009 {
1010 	if (!manifest_dt)
1011 		return TEE_SUCCESS;
1012 
1013 	if (core_mmu_remove_mapping(MEM_AREA_MANIFEST_DT, manifest_dt,
1014 				    CFG_DTB_MAX_SIZE))
1015 		panic("Failed to remove temporary manifest DT mapping");
1016 	manifest_dt = NULL;
1017 
1018 	return TEE_SUCCESS;
1019 }
1020 
1021 boot_final(release_manifest_dt);
1022 #else
1023 void *get_manifest_dt(void)
1024 {
1025 	return NULL;
1026 }
1027 
1028 static void reinit_manifest_dt(void)
1029 {
1030 }
1031 #endif /*CFG_CORE_FFA*/
1032 
1033 #ifdef CFG_CORE_DYN_SHM
1034 static void discover_nsec_memory(void)
1035 {
1036 	struct core_mmu_phys_mem *mem;
1037 	const struct core_mmu_phys_mem *mem_begin = NULL;
1038 	const struct core_mmu_phys_mem *mem_end = NULL;
1039 	size_t nelems;
1040 	void *fdt = get_external_dt();
1041 
1042 	if (fdt) {
1043 		mem = get_nsec_memory(fdt, &nelems);
1044 		if (mem) {
1045 			core_mmu_set_discovered_nsec_ddr(mem, nelems);
1046 			return;
1047 		}
1048 
1049 		DMSG("No non-secure memory found in FDT");
1050 	}
1051 
1052 	mem_begin = phys_ddr_overall_begin;
1053 	mem_end = phys_ddr_overall_end;
1054 	nelems = mem_end - mem_begin;
1055 	if (nelems) {
1056 		/*
1057 		 * Platform cannot use both register_ddr() and the now
1058 		 * deprecated register_dynamic_shm().
1059 		 */
1060 		assert(phys_ddr_overall_compat_begin ==
1061 		       phys_ddr_overall_compat_end);
1062 	} else {
1063 		mem_begin = phys_ddr_overall_compat_begin;
1064 		mem_end = phys_ddr_overall_compat_end;
1065 		nelems = mem_end - mem_begin;
1066 		if (!nelems)
1067 			return;
1068 		DMSG("Warning register_dynamic_shm() is deprecated, please use register_ddr() instead");
1069 	}
1070 
1071 	mem = nex_calloc(nelems, sizeof(*mem));
1072 	if (!mem)
1073 		panic();
1074 
1075 	memcpy(mem, phys_ddr_overall_begin, sizeof(*mem) * nelems);
1076 	core_mmu_set_discovered_nsec_ddr(mem, nelems);
1077 }
1078 #else /*CFG_CORE_DYN_SHM*/
1079 static void discover_nsec_memory(void)
1080 {
1081 }
1082 #endif /*!CFG_CORE_DYN_SHM*/
1083 
1084 #ifdef CFG_NS_VIRTUALIZATION
1085 static TEE_Result virt_init_heap(void)
1086 {
1087 	/* We need to initialize pool for every virtual guest partition */
1088 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
1089 
1090 	return TEE_SUCCESS;
1091 }
1092 preinit_early(virt_init_heap);
1093 #endif
1094 
1095 void init_tee_runtime(void)
1096 {
1097 #ifndef CFG_WITH_PAGER
1098 	/* Pager initializes TA RAM early */
1099 	core_mmu_init_ta_ram();
1100 #endif
1101 	/*
1102 	 * With virtualization we call this function when creating the
1103 	 * OP-TEE partition instead.
1104 	 */
1105 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION))
1106 		call_preinitcalls();
1107 	call_initcalls();
1108 
1109 	/*
1110 	 * These two functions uses crypto_rng_read() to initialize the
1111 	 * pauth keys. Once call_initcalls() returns we're guaranteed that
1112 	 * crypto_rng_read() is ready to be used.
1113 	 */
1114 	thread_init_core_local_pauth_keys();
1115 	thread_init_thread_pauth_keys();
1116 
1117 	/*
1118 	 * Reinitialize canaries around the stacks with crypto_rng_read().
1119 	 *
1120 	 * TODO: Updating canaries when CFG_NS_VIRTUALIZATION is enabled will
1121 	 * require synchronization between thread_check_canaries() and
1122 	 * thread_update_canaries().
1123 	 */
1124 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION))
1125 		thread_update_canaries();
1126 }
1127 
1128 static void init_primary(unsigned long pageable_part, unsigned long nsec_entry)
1129 {
1130 	thread_init_core_local_stacks();
1131 	/*
1132 	 * Mask asynchronous exceptions before switch to the thread vector
1133 	 * as the thread handler requires those to be masked while
1134 	 * executing with the temporary stack. The thread subsystem also
1135 	 * asserts that the foreign interrupts are blocked when using most of
1136 	 * its functions.
1137 	 */
1138 	thread_set_exceptions(THREAD_EXCP_ALL);
1139 	primary_save_cntfrq();
1140 	init_vfp_sec();
1141 	/*
1142 	 * Pager: init_runtime() calls thread_kernel_enable_vfp() so we must
1143 	 * set a current thread right now to avoid a chicken-and-egg problem
1144 	 * (thread_init_boot_thread() sets the current thread but needs
1145 	 * things set by init_runtime()).
1146 	 */
1147 	thread_get_core_local()->curr_thread = 0;
1148 	init_runtime(pageable_part);
1149 
1150 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1151 		/*
1152 		 * Virtualization: We can't initialize threads right now because
1153 		 * threads belong to "tee" part and will be initialized
1154 		 * separately per each new virtual guest. So, we'll clear
1155 		 * "curr_thread" and call it done.
1156 		 */
1157 		thread_get_core_local()->curr_thread = -1;
1158 	} else {
1159 		thread_init_boot_thread();
1160 	}
1161 	thread_init_primary();
1162 	thread_init_per_cpu();
1163 	init_sec_mon(nsec_entry);
1164 }
1165 
1166 static bool cpu_nmfi_enabled(void)
1167 {
1168 #if defined(ARM32)
1169 	return read_sctlr() & SCTLR_NMFI;
1170 #else
1171 	/* Note: ARM64 does not feature non-maskable FIQ support. */
1172 	return false;
1173 #endif
1174 }
1175 
1176 /*
1177  * Note: this function is weak just to make it possible to exclude it from
1178  * the unpaged area.
1179  */
1180 void __weak boot_init_primary_late(unsigned long fdt __unused,
1181 				   unsigned long manifest __unused)
1182 {
1183 	size_t fdt_size = CFG_DTB_MAX_SIZE;
1184 
1185 	if (IS_ENABLED(CFG_TRANSFER_LIST) && mapped_tl) {
1186 		struct transfer_list_entry *tl_e = NULL;
1187 
1188 		tl_e = transfer_list_find(mapped_tl, TL_TAG_FDT);
1189 		if (tl_e)
1190 			fdt_size = tl_e->data_size;
1191 	}
1192 
1193 	init_external_dt(boot_arg_fdt, fdt_size);
1194 	reinit_manifest_dt();
1195 #ifdef CFG_CORE_SEL1_SPMC
1196 	tpm_map_log_area(get_manifest_dt());
1197 #else
1198 	tpm_map_log_area(get_external_dt());
1199 #endif
1200 	discover_nsec_memory();
1201 	update_external_dt();
1202 	configure_console_from_dt();
1203 
1204 	IMSG("OP-TEE version: %s", core_v_str);
1205 	if (IS_ENABLED(CFG_INSECURE)) {
1206 		IMSG("WARNING: This OP-TEE configuration might be insecure!");
1207 		IMSG("WARNING: Please check https://optee.readthedocs.io/en/latest/architecture/porting_guidelines.html");
1208 	}
1209 	IMSG("Primary CPU initializing");
1210 #ifdef CFG_CORE_ASLR
1211 	DMSG("Executing at offset %#lx with virtual load address %#"PRIxVA,
1212 	     (unsigned long)boot_mmu_config.map_offset, VCORE_START_VA);
1213 #endif
1214 	if (IS_ENABLED(CFG_MEMTAG))
1215 		DMSG("Memory tagging %s",
1216 		     memtag_is_enabled() ?  "enabled" : "disabled");
1217 
1218 	/* Check if platform needs NMFI workaround */
1219 	if (cpu_nmfi_enabled())	{
1220 		if (!IS_ENABLED(CFG_CORE_WORKAROUND_ARM_NMFI))
1221 			IMSG("WARNING: This ARM core has NMFI enabled, please apply workaround!");
1222 	} else {
1223 		if (IS_ENABLED(CFG_CORE_WORKAROUND_ARM_NMFI))
1224 			IMSG("WARNING: This ARM core does not have NMFI enabled, no need for workaround");
1225 	}
1226 
1227 	boot_primary_init_intc();
1228 	init_vfp_nsec();
1229 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1230 		IMSG("Initializing virtualization support");
1231 		core_mmu_init_virtualization();
1232 	} else {
1233 		init_tee_runtime();
1234 	}
1235 	call_finalcalls();
1236 	IMSG("Primary CPU switching to normal world boot");
1237 }
1238 
1239 static void init_secondary_helper(unsigned long nsec_entry)
1240 {
1241 	IMSG("Secondary CPU %zu initializing", get_core_pos());
1242 
1243 	/*
1244 	 * Mask asynchronous exceptions before switch to the thread vector
1245 	 * as the thread handler requires those to be masked while
1246 	 * executing with the temporary stack. The thread subsystem also
1247 	 * asserts that the foreign interrupts are blocked when using most of
1248 	 * its functions.
1249 	 */
1250 	thread_set_exceptions(THREAD_EXCP_ALL);
1251 
1252 	secondary_init_cntfrq();
1253 	thread_init_per_cpu();
1254 	init_sec_mon(nsec_entry);
1255 	boot_secondary_init_intc();
1256 	init_vfp_sec();
1257 	init_vfp_nsec();
1258 
1259 	IMSG("Secondary CPU %zu switching to normal world boot", get_core_pos());
1260 }
1261 
1262 /*
1263  * Note: this function is weak just to make it possible to exclude it from
1264  * the unpaged area so that it lies in the init area.
1265  */
1266 void __weak boot_init_primary_early(void)
1267 {
1268 	unsigned long pageable_part = 0;
1269 	unsigned long e = PADDR_INVALID;
1270 	struct transfer_list_entry *tl_e = NULL;
1271 
1272 	if (!IS_ENABLED(CFG_WITH_ARM_TRUSTED_FW))
1273 		e = boot_arg_nsec_entry;
1274 
1275 	if (IS_ENABLED(CFG_TRANSFER_LIST) && boot_arg_transfer_list) {
1276 		/* map and save the TL */
1277 		mapped_tl = transfer_list_map(boot_arg_transfer_list);
1278 		if (!mapped_tl)
1279 			panic("Failed to map transfer list");
1280 
1281 		transfer_list_dump(mapped_tl);
1282 		tl_e = transfer_list_find(mapped_tl, TL_TAG_FDT);
1283 		if (tl_e) {
1284 			/*
1285 			 * Expand the data size of the DTB entry to the maximum
1286 			 * allocable mapped memory to reserve sufficient space
1287 			 * for inserting new nodes, avoid potentially corrupting
1288 			 * next entries.
1289 			 */
1290 			uint32_t dtb_max_sz = mapped_tl->max_size -
1291 					      mapped_tl->size + tl_e->data_size;
1292 
1293 			if (!transfer_list_set_data_size(mapped_tl, tl_e,
1294 							 dtb_max_sz)) {
1295 				EMSG("Failed to extend DTB size to %#"PRIx32,
1296 				     dtb_max_sz);
1297 				panic();
1298 			}
1299 		}
1300 		tl_e = transfer_list_find(mapped_tl, TL_TAG_OPTEE_PAGABLE_PART);
1301 	}
1302 
1303 	if (IS_ENABLED(CFG_WITH_PAGER)) {
1304 		if (IS_ENABLED(CFG_TRANSFER_LIST) && tl_e)
1305 			pageable_part =
1306 				get_le64(transfer_list_entry_data(tl_e));
1307 		else
1308 			pageable_part = boot_arg_pageable_part;
1309 	}
1310 
1311 	init_primary(pageable_part, e);
1312 }
1313 
1314 static void boot_save_transfer_list(unsigned long zero_reg,
1315 				    unsigned long transfer_list,
1316 				    unsigned long fdt)
1317 {
1318 	struct transfer_list_header *tl = (void *)transfer_list;
1319 	struct transfer_list_entry *tl_e = NULL;
1320 
1321 	if (zero_reg != 0)
1322 		panic("Incorrect transfer list register convention");
1323 
1324 	if (!IS_ALIGNED_WITH_TYPE(transfer_list, struct transfer_list_header) ||
1325 	    !IS_ALIGNED(transfer_list, TL_ALIGNMENT_FROM_ORDER(tl->alignment)))
1326 		panic("Transfer list base address is not aligned");
1327 
1328 	if (transfer_list_check_header(tl) == TL_OPS_NONE)
1329 		panic("Invalid transfer list");
1330 
1331 	tl_e = transfer_list_find(tl, TL_TAG_FDT);
1332 	if (fdt != (unsigned long)transfer_list_entry_data(tl_e))
1333 		panic("DT does not match to the DT entry of the TL");
1334 
1335 	boot_arg_transfer_list = transfer_list;
1336 }
1337 
1338 #if defined(CFG_WITH_ARM_TRUSTED_FW)
1339 unsigned long boot_cpu_on_handler(unsigned long a0 __maybe_unused,
1340 				  unsigned long a1 __unused)
1341 {
1342 	init_secondary_helper(PADDR_INVALID);
1343 	return 0;
1344 }
1345 #else
1346 void boot_init_secondary(unsigned long nsec_entry)
1347 {
1348 	init_secondary_helper(nsec_entry);
1349 }
1350 #endif
1351 
1352 #if defined(CFG_BOOT_SECONDARY_REQUEST)
1353 void boot_set_core_ns_entry(size_t core_idx, uintptr_t entry,
1354 			    uintptr_t context_id)
1355 {
1356 	ns_entry_contexts[core_idx].entry_point = entry;
1357 	ns_entry_contexts[core_idx].context_id = context_id;
1358 	dsb_ishst();
1359 }
1360 
1361 int boot_core_release(size_t core_idx, paddr_t entry)
1362 {
1363 	if (!core_idx || core_idx >= CFG_TEE_CORE_NB_CORE)
1364 		return -1;
1365 
1366 	ns_entry_contexts[core_idx].entry_point = entry;
1367 	dmb();
1368 	spin_table[core_idx] = 1;
1369 	dsb();
1370 	sev();
1371 
1372 	return 0;
1373 }
1374 
1375 /*
1376  * spin until secondary boot request, then returns with
1377  * the secondary core entry address.
1378  */
1379 struct ns_entry_context *boot_core_hpen(void)
1380 {
1381 #ifdef CFG_PSCI_ARM32
1382 	return &ns_entry_contexts[get_core_pos()];
1383 #else
1384 	do {
1385 		wfe();
1386 	} while (!spin_table[get_core_pos()]);
1387 	dmb();
1388 	return &ns_entry_contexts[get_core_pos()];
1389 #endif
1390 }
1391 #endif
1392 
1393 #if defined(CFG_CORE_ASLR)
1394 #if defined(CFG_DT)
1395 unsigned long __weak get_aslr_seed(void)
1396 {
1397 	void *fdt = NULL;
1398 	int rc = 0;
1399 	const uint64_t *seed = NULL;
1400 	int offs = 0;
1401 	int len = 0;
1402 
1403 	if (!IS_ENABLED(CFG_CORE_SEL2_SPMC))
1404 		fdt = (void *)boot_arg_fdt;
1405 
1406 	if (!fdt) {
1407 		DMSG("No fdt");
1408 		goto err;
1409 	}
1410 
1411 	rc = fdt_check_header(fdt);
1412 	if (rc) {
1413 		DMSG("Bad fdt: %d", rc);
1414 		goto err;
1415 	}
1416 
1417 	offs =  fdt_path_offset(fdt, "/secure-chosen");
1418 	if (offs < 0) {
1419 		DMSG("Cannot find /secure-chosen");
1420 		goto err;
1421 	}
1422 	seed = fdt_getprop(fdt, offs, "kaslr-seed", &len);
1423 	if (!seed || len != sizeof(*seed)) {
1424 		DMSG("Cannot find valid kaslr-seed");
1425 		goto err;
1426 	}
1427 
1428 	return fdt64_to_cpu(*seed);
1429 
1430 err:
1431 	/* Try platform implementation */
1432 	return plat_get_aslr_seed();
1433 }
1434 #else /*!CFG_DT*/
1435 unsigned long __weak get_aslr_seed(void)
1436 {
1437 	/* Try platform implementation */
1438 	return plat_get_aslr_seed();
1439 }
1440 #endif /*!CFG_DT*/
1441 #endif /*CFG_CORE_ASLR*/
1442 
1443 static void *get_fdt_from_boot_info(struct ffa_boot_info_header_1_1 *hdr)
1444 {
1445 	struct ffa_boot_info_1_1 *desc = NULL;
1446 	uint8_t content_fmt = 0;
1447 	uint8_t name_fmt = 0;
1448 	void *fdt = NULL;
1449 	int ret = 0;
1450 
1451 	if (hdr->signature != FFA_BOOT_INFO_SIGNATURE) {
1452 		EMSG("Bad boot info signature %#"PRIx32, hdr->signature);
1453 		panic();
1454 	}
1455 	if (hdr->version != FFA_BOOT_INFO_VERSION) {
1456 		EMSG("Bad boot info version %#"PRIx32, hdr->version);
1457 		panic();
1458 	}
1459 	if (hdr->desc_count != 1) {
1460 		EMSG("Bad boot info descriptor count %#"PRIx32,
1461 		     hdr->desc_count);
1462 		panic();
1463 	}
1464 	desc = (void *)((vaddr_t)hdr + hdr->desc_offset);
1465 	name_fmt = desc->flags & FFA_BOOT_INFO_FLAG_NAME_FORMAT_MASK;
1466 	if (name_fmt == FFA_BOOT_INFO_FLAG_NAME_FORMAT_STRING)
1467 		DMSG("Boot info descriptor name \"%16s\"", desc->name);
1468 	else if (name_fmt == FFA_BOOT_INFO_FLAG_NAME_FORMAT_UUID)
1469 		DMSG("Boot info descriptor UUID %pUl", (void *)desc->name);
1470 	else
1471 		DMSG("Boot info descriptor: unknown name format %"PRIu8,
1472 		     name_fmt);
1473 
1474 	content_fmt = (desc->flags & FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_MASK) >>
1475 		      FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_SHIFT;
1476 	if (content_fmt != FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_ADDR) {
1477 		EMSG("Bad boot info content format %"PRIu8", expected %u (address)",
1478 		     content_fmt, FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_ADDR);
1479 		panic();
1480 	}
1481 
1482 	fdt = (void *)(vaddr_t)desc->contents;
1483 	ret = fdt_check_full(fdt, desc->size);
1484 	if (ret < 0) {
1485 		EMSG("Invalid Device Tree at %p: error %d", fdt, ret);
1486 		panic();
1487 	}
1488 	return fdt;
1489 }
1490 
1491 static void get_sec_mem_from_manifest(void *fdt, paddr_t *base, size_t *size)
1492 {
1493 	int ret = 0;
1494 	uint64_t num = 0;
1495 
1496 	ret = fdt_node_check_compatible(fdt, 0, "arm,ffa-manifest-1.0");
1497 	if (ret < 0) {
1498 		EMSG("Invalid FF-A manifest at %p: error %d", fdt, ret);
1499 		panic();
1500 	}
1501 	ret = dt_getprop_as_number(fdt, 0, "load-address", &num);
1502 	if (ret < 0) {
1503 		EMSG("Can't read \"load-address\" from FF-A manifest at %p: error %d",
1504 		     fdt, ret);
1505 		panic();
1506 	}
1507 	*base = num;
1508 	/* "mem-size" is currently an undocumented extension to the spec. */
1509 	ret = dt_getprop_as_number(fdt, 0, "mem-size", &num);
1510 	if (ret < 0) {
1511 		EMSG("Can't read \"mem-size\" from FF-A manifest at %p: error %d",
1512 		     fdt, ret);
1513 		panic();
1514 	}
1515 	*size = num;
1516 }
1517 
1518 void __weak boot_save_args(unsigned long a0, unsigned long a1,
1519 			   unsigned long a2, unsigned long a3,
1520 			   unsigned long a4 __maybe_unused)
1521 {
1522 	/*
1523 	 * Register use:
1524 	 *
1525 	 * Scenario A: Default arguments
1526 	 * a0   - CFG_CORE_FFA=y && CFG_CORE_SEL2_SPMC=n:
1527 	 *        if non-NULL holds the TOS FW config [1] address
1528 	 *      - CFG_CORE_FFA=y &&
1529 		  (CFG_CORE_SEL2_SPMC=y || CFG_CORE_EL3_SPMC=y):
1530 	 *        address of FF-A Boot Information Blob
1531 	 *      - CFG_CORE_FFA=n:
1532 	 *        if non-NULL holds the pagable part address
1533 	 * a1	- CFG_WITH_ARM_TRUSTED_FW=n (Armv7):
1534 	 *	  Armv7 standard bootarg #1 (kept track of in entry_a32.S)
1535 	 * a2   - CFG_CORE_SEL2_SPMC=n:
1536 	 *        if non-NULL holds the system DTB address
1537 	 *	- CFG_WITH_ARM_TRUSTED_FW=n (Armv7):
1538 	 *	  Armv7 standard bootarg #2 (system DTB address, kept track
1539 	 *	  of in entry_a32.S)
1540 	 * a3	- Not used
1541 	 * a4	- CFG_WITH_ARM_TRUSTED_FW=n:
1542 	 *	  Non-secure entry address
1543 	 *
1544 	 * [1] A TF-A concept: TOS_FW_CONFIG - Trusted OS Firmware
1545 	 * configuration file. Used by Trusted OS (BL32), that is, OP-TEE
1546 	 * here. This is also called Manifest DT, related to the Manifest DT
1547 	 * passed in the FF-A Boot Information Blob, but with a different
1548 	 * compatible string.
1549 
1550 	 * Scenario B: FW Handoff via Transfer List
1551 	 * Note: FF-A and non-secure entry are not yet supported with
1552 	 *       Transfer List
1553 	 * a0	- DTB address or 0 (AArch64)
1554 	 *	- must be 0 (AArch32)
1555 	 * a1	- TRANSFER_LIST_SIGNATURE | REG_CONVENTION_VER_MASK
1556 	 * a2	- must be 0 (AArch64)
1557 	 *	- DTB address or 0 (AArch32)
1558 	 * a3	- Transfer list base address
1559 	 * a4	- Not used
1560 	 */
1561 
1562 	if (IS_ENABLED(CFG_TRANSFER_LIST) &&
1563 	    a1 == (TRANSFER_LIST_SIGNATURE | REG_CONVENTION_VER_MASK)) {
1564 		if (IS_ENABLED(CFG_ARM64_core)) {
1565 			boot_save_transfer_list(a2, a3, a0);
1566 			boot_arg_fdt = a0;
1567 		} else {
1568 			boot_save_transfer_list(a0, a3, a2);
1569 			boot_arg_fdt = a2;
1570 		}
1571 		return;
1572 	}
1573 
1574 	if (!IS_ENABLED(CFG_CORE_SEL2_SPMC)) {
1575 #if defined(CFG_DT_ADDR)
1576 		boot_arg_fdt = CFG_DT_ADDR;
1577 #else
1578 		boot_arg_fdt = a2;
1579 #endif
1580 	}
1581 
1582 	if (IS_ENABLED(CFG_CORE_FFA)) {
1583 		if (IS_ENABLED(CFG_CORE_SEL2_SPMC) ||
1584 		    IS_ENABLED(CFG_CORE_EL3_SPMC))
1585 			manifest_dt = get_fdt_from_boot_info((void *)a0);
1586 		else
1587 			manifest_dt = (void *)a0;
1588 		if (IS_ENABLED(CFG_CORE_SEL2_SPMC) &&
1589 		    IS_ENABLED(CFG_CORE_PHYS_RELOCATABLE)) {
1590 			paddr_t base = 0;
1591 			size_t size = 0;
1592 
1593 			get_sec_mem_from_manifest(manifest_dt, &base, &size);
1594 			core_mmu_set_secure_memory(base, size);
1595 		}
1596 	} else {
1597 		if (IS_ENABLED(CFG_WITH_PAGER)) {
1598 #if defined(CFG_PAGEABLE_ADDR)
1599 			boot_arg_pageable_part = CFG_PAGEABLE_ADDR;
1600 #else
1601 			boot_arg_pageable_part = a0;
1602 #endif
1603 		}
1604 		if (!IS_ENABLED(CFG_WITH_ARM_TRUSTED_FW)) {
1605 #if defined(CFG_NS_ENTRY_ADDR)
1606 			boot_arg_nsec_entry = CFG_NS_ENTRY_ADDR;
1607 #else
1608 			boot_arg_nsec_entry = a4;
1609 #endif
1610 		}
1611 	}
1612 }
1613 
1614 #if defined(CFG_TRANSFER_LIST)
1615 static TEE_Result release_transfer_list(void)
1616 {
1617 	struct dt_descriptor *dt = get_external_dt_desc();
1618 
1619 	if (!mapped_tl)
1620 		return TEE_SUCCESS;
1621 
1622 	if (dt) {
1623 		int ret = 0;
1624 		struct transfer_list_entry *tl_e = NULL;
1625 
1626 		/*
1627 		 * Pack the DTB and update the transfer list before un-mapping
1628 		 */
1629 		ret = fdt_pack(dt->blob);
1630 		if (ret < 0) {
1631 			EMSG("Failed to pack Device Tree at 0x%" PRIxPA
1632 			     ": error %d", virt_to_phys(dt->blob), ret);
1633 			panic();
1634 		}
1635 
1636 		tl_e = transfer_list_find(mapped_tl, TL_TAG_FDT);
1637 		assert(dt->blob == transfer_list_entry_data(tl_e));
1638 		transfer_list_set_data_size(mapped_tl, tl_e,
1639 					    fdt_totalsize(dt->blob));
1640 		dt->blob = NULL;
1641 	}
1642 
1643 	transfer_list_unmap_sync(mapped_tl);
1644 	mapped_tl = NULL;
1645 
1646 	return TEE_SUCCESS;
1647 }
1648 
1649 boot_final(release_transfer_list);
1650 #endif
1651