xref: /optee_os/core/arch/arm/kernel/boot.c (revision 62caa4d4f8a585269031e92a587cbdb349d10f15)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2015-2023, Linaro Limited
4  * Copyright (c) 2023, Arm Limited
5  * Copyright (c) 2025, NVIDIA Corporation & AFFILIATES.
6  */
7 
8 #include <arm.h>
9 #include <assert.h>
10 #include <compiler.h>
11 #include <config.h>
12 #include <console.h>
13 #include <crypto/crypto.h>
14 #include <drivers/gic.h>
15 #include <dt-bindings/interrupt-controller/arm-gic.h>
16 #include <ffa.h>
17 #include <initcall.h>
18 #include <inttypes.h>
19 #include <io.h>
20 #include <keep.h>
21 #include <kernel/asan.h>
22 #include <kernel/boot.h>
23 #include <kernel/dt.h>
24 #include <kernel/linker.h>
25 #include <kernel/misc.h>
26 #include <kernel/panic.h>
27 #include <kernel/tee_misc.h>
28 #include <kernel/thread.h>
29 #include <kernel/tpm.h>
30 #include <kernel/transfer_list.h>
31 #include <libfdt.h>
32 #include <malloc.h>
33 #include <memtag.h>
34 #include <mm/core_memprot.h>
35 #include <mm/core_mmu.h>
36 #include <mm/fobj.h>
37 #include <mm/page_alloc.h>
38 #include <mm/phys_mem.h>
39 #include <mm/tee_mm.h>
40 #include <mm/tee_pager.h>
41 #include <sm/psci.h>
42 #include <stdalign.h>
43 #include <trace.h>
44 #include <utee_defines.h>
45 #include <util.h>
46 
47 #include <platform_config.h>
48 
49 #if !defined(CFG_WITH_ARM_TRUSTED_FW)
50 #include <sm/sm.h>
51 #endif
52 
53 #if defined(CFG_WITH_VFP)
54 #include <kernel/vfp.h>
55 #endif
56 
57 /*
58  * In this file we're using unsigned long to represent physical pointers as
59  * they are received in a single register when OP-TEE is initially entered.
60  * This limits 32-bit systems to only use make use of the lower 32 bits
61  * of a physical address for initial parameters.
62  *
63  * 64-bit systems on the other hand can use full 64-bit physical pointers.
64  */
65 #define PADDR_INVALID		ULONG_MAX
66 
67 #if defined(CFG_BOOT_SECONDARY_REQUEST)
68 struct ns_entry_context {
69 	uintptr_t entry_point;
70 	uintptr_t context_id;
71 };
72 struct ns_entry_context ns_entry_contexts[CFG_TEE_CORE_NB_CORE];
73 static uint32_t spin_table[CFG_TEE_CORE_NB_CORE];
74 #endif
75 
76 #ifdef CFG_BOOT_SYNC_CPU
77 /*
78  * Array used when booting, to synchronize cpu.
79  * When 0, the cpu has not started.
80  * When 1, it has started
81  */
82 uint32_t sem_cpu_sync[CFG_TEE_CORE_NB_CORE];
83 DECLARE_KEEP_PAGER(sem_cpu_sync);
84 #endif
85 
86 /*
87  * Must not be in .bss since it's initialized and used from assembly before
88  * .bss is cleared.
89  */
90 vaddr_t boot_cached_mem_end __nex_data = 1;
91 
92 static unsigned long boot_arg_fdt __nex_bss;
93 unsigned long boot_arg_nsec_entry __nex_bss;
94 static unsigned long boot_arg_pageable_part __nex_bss;
95 static unsigned long boot_arg_transfer_list __nex_bss;
96 static struct transfer_list_header *mapped_tl __nex_bss;
97 
98 #ifdef CFG_SECONDARY_INIT_CNTFRQ
99 static uint32_t cntfrq;
100 #endif
101 
102 /* May be overridden in plat-$(PLATFORM)/main.c */
plat_primary_init_early(void)103 __weak void plat_primary_init_early(void)
104 {
105 }
106 DECLARE_KEEP_PAGER(plat_primary_init_early);
107 
108 /* May be overridden in plat-$(PLATFORM)/main.c */
boot_primary_init_intc(void)109 __weak void boot_primary_init_intc(void)
110 {
111 }
112 
113 /* May be overridden in plat-$(PLATFORM)/main.c */
boot_secondary_init_intc(void)114 __weak void boot_secondary_init_intc(void)
115 {
116 }
117 
118 /* May be overridden in plat-$(PLATFORM)/main.c */
plat_get_aslr_seed(void)119 __weak unsigned long plat_get_aslr_seed(void)
120 {
121 	DMSG("Warning: no ASLR seed");
122 
123 	return 0;
124 }
125 
126 /*
127  * This function is called as a guard after each smc call which is not
128  * supposed to return.
129  */
__panic_at_smc_return(void)130 void __panic_at_smc_return(void)
131 {
132 	panic();
133 }
134 
135 #if defined(CFG_WITH_ARM_TRUSTED_FW)
init_sec_mon(unsigned long nsec_entry __maybe_unused)136 void init_sec_mon(unsigned long nsec_entry __maybe_unused)
137 {
138 	assert(nsec_entry == PADDR_INVALID);
139 	/* Do nothing as we don't have a secure monitor */
140 }
141 #else
142 /* May be overridden in plat-$(PLATFORM)/main.c */
init_sec_mon(unsigned long nsec_entry)143 __weak void init_sec_mon(unsigned long nsec_entry)
144 {
145 	struct sm_nsec_ctx *nsec_ctx;
146 
147 	assert(nsec_entry != PADDR_INVALID);
148 
149 	/* Initialize secure monitor */
150 	nsec_ctx = sm_get_nsec_ctx();
151 	nsec_ctx->mon_lr = nsec_entry;
152 	nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I;
153 	if (nsec_entry & 1)
154 		nsec_ctx->mon_spsr |= CPSR_T;
155 }
156 #endif
157 
158 #if defined(CFG_WITH_ARM_TRUSTED_FW)
init_vfp_nsec(void)159 static void init_vfp_nsec(void)
160 {
161 }
162 #else
init_vfp_nsec(void)163 static void init_vfp_nsec(void)
164 {
165 	/* Normal world can use CP10 and CP11 (SIMD/VFP) */
166 	write_nsacr(read_nsacr() | NSACR_CP10 | NSACR_CP11);
167 }
168 #endif
169 
check_crypto_extensions(void)170 static void check_crypto_extensions(void)
171 {
172 	bool ce_supported = true;
173 
174 	if (!feat_aes_implemented() &&
175 	    IS_ENABLED(CFG_CRYPTO_AES_ARM_CE)) {
176 		EMSG("AES instructions are not supported");
177 		ce_supported = false;
178 	}
179 
180 	if (!feat_sha1_implemented() &&
181 	    IS_ENABLED(CFG_CRYPTO_SHA1_ARM_CE)) {
182 		EMSG("SHA1 instructions are not supported");
183 		ce_supported = false;
184 	}
185 
186 	if (!feat_sha256_implemented() &&
187 	    IS_ENABLED(CFG_CRYPTO_SHA256_ARM_CE)) {
188 		EMSG("SHA256 instructions are not supported");
189 		ce_supported = false;
190 	}
191 
192 	/* Check aarch64 specific instructions */
193 	if (IS_ENABLED(CFG_ARM64_core)) {
194 		if (!feat_sha512_implemented() &&
195 		    IS_ENABLED(CFG_CRYPTO_SHA512_ARM_CE)) {
196 			EMSG("SHA512 instructions are not supported");
197 			ce_supported = false;
198 		}
199 
200 		if (!feat_sha3_implemented() &&
201 		    IS_ENABLED(CFG_CRYPTO_SHA3_ARM_CE)) {
202 			EMSG("SHA3 instructions are not supported");
203 			ce_supported = false;
204 		}
205 
206 		if (!feat_sm3_implemented() &&
207 		    IS_ENABLED(CFG_CRYPTO_SM3_ARM_CE)) {
208 			EMSG("SM3 instructions are not supported");
209 			ce_supported = false;
210 		}
211 
212 		if (!feat_sm4_implemented() &&
213 		    IS_ENABLED(CFG_CRYPTO_SM4_ARM_CE)) {
214 			EMSG("SM4 instructions are not supported");
215 			ce_supported = false;
216 		}
217 	}
218 
219 	if (!ce_supported)
220 		panic("HW doesn't support CE instructions");
221 }
222 
223 #if defined(CFG_WITH_VFP)
224 
225 #ifdef ARM32
init_vfp_sec(void)226 static void init_vfp_sec(void)
227 {
228 	uint32_t cpacr = read_cpacr();
229 
230 	/*
231 	 * Enable Advanced SIMD functionality.
232 	 * Enable use of D16-D31 of the Floating-point Extension register
233 	 * file.
234 	 */
235 	cpacr &= ~(CPACR_ASEDIS | CPACR_D32DIS);
236 	/*
237 	 * Enable usage of CP10 and CP11 (SIMD/VFP) (both kernel and user
238 	 * mode.
239 	 */
240 	cpacr |= CPACR_CP(10, CPACR_CP_ACCESS_FULL);
241 	cpacr |= CPACR_CP(11, CPACR_CP_ACCESS_FULL);
242 	write_cpacr(cpacr);
243 }
244 #endif /* ARM32 */
245 
246 #ifdef ARM64
init_vfp_sec(void)247 static void init_vfp_sec(void)
248 {
249 	/* Not using VFP until thread_kernel_enable_vfp() */
250 	vfp_disable();
251 }
252 #endif /* ARM64 */
253 
254 #else /* CFG_WITH_VFP */
255 
init_vfp_sec(void)256 static void init_vfp_sec(void)
257 {
258 	/* Not using VFP */
259 }
260 #endif
261 
262 #ifdef CFG_SECONDARY_INIT_CNTFRQ
primary_save_cntfrq(void)263 static void primary_save_cntfrq(void)
264 {
265 	assert(cntfrq == 0);
266 
267 	/*
268 	 * CNTFRQ should be initialized on the primary CPU by a
269 	 * previous boot stage
270 	 */
271 	cntfrq = read_cntfrq();
272 }
273 
secondary_init_cntfrq(void)274 static void secondary_init_cntfrq(void)
275 {
276 	assert(cntfrq != 0);
277 	write_cntfrq(cntfrq);
278 }
279 #else /* CFG_SECONDARY_INIT_CNTFRQ */
primary_save_cntfrq(void)280 static void primary_save_cntfrq(void)
281 {
282 }
283 
secondary_init_cntfrq(void)284 static void secondary_init_cntfrq(void)
285 {
286 }
287 #endif
288 
289 #ifdef CFG_CORE_SANITIZE_KADDRESS
init_run_constructors(void)290 static void init_run_constructors(void)
291 {
292 	const vaddr_t *ctor;
293 
294 	for (ctor = &__ctor_list; ctor < &__ctor_end; ctor++)
295 		((void (*)(void))(*ctor))();
296 }
297 
init_asan(void)298 static void init_asan(void)
299 {
300 
301 	/*
302 	 * CFG_ASAN_SHADOW_OFFSET is also supplied as
303 	 * -fasan-shadow-offset=$(CFG_ASAN_SHADOW_OFFSET) to the compiler.
304 	 * Since all the needed values to calculate the value of
305 	 * CFG_ASAN_SHADOW_OFFSET isn't available in to make we need to
306 	 * calculate it in advance and hard code it into the platform
307 	 * conf.mk. Here where we have all the needed values we double
308 	 * check that the compiler is supplied the correct value.
309 	 */
310 
311 #define __ASAN_SHADOW_START \
312 	ROUNDUP(TEE_RAM_START + (TEE_RAM_VA_SIZE * 8) / 9 - 8, 8)
313 	assert(__ASAN_SHADOW_START == (vaddr_t)&__asan_shadow_start);
314 #define __CFG_ASAN_SHADOW_OFFSET \
315 	(__ASAN_SHADOW_START - (TEE_RAM_START / 8))
316 	COMPILE_TIME_ASSERT(CFG_ASAN_SHADOW_OFFSET == __CFG_ASAN_SHADOW_OFFSET);
317 #undef __ASAN_SHADOW_START
318 #undef __CFG_ASAN_SHADOW_OFFSET
319 
320 	/*
321 	 * Assign area covered by the shadow area, everything from start up
322 	 * to the beginning of the shadow area.
323 	 */
324 	asan_set_shadowed((void *)TEE_LOAD_ADDR, &__asan_shadow_start);
325 
326 	/*
327 	 * Add access to areas that aren't opened automatically by a
328 	 * constructor.
329 	 */
330 	boot_mem_init_asan();
331 	asan_tag_access(&__ctor_list, &__ctor_end);
332 	asan_tag_access(__rodata_start, __rodata_end);
333 #ifdef CFG_WITH_PAGER
334 	asan_tag_access(__pageable_start, __pageable_end);
335 #endif /*CFG_WITH_PAGER*/
336 	asan_tag_access(__nozi_start, __nozi_end);
337 #ifdef ARM32
338 	asan_tag_access(__exidx_start, __exidx_end);
339 	asan_tag_access(__extab_start, __extab_end);
340 #endif
341 
342 	init_run_constructors();
343 
344 	/* Everything is tagged correctly, let's start address sanitizing. */
345 	asan_start();
346 }
347 #else /*CFG_CORE_SANITIZE_KADDRESS*/
init_asan(void)348 static void init_asan(void)
349 {
350 }
351 #endif /*CFG_CORE_SANITIZE_KADDRESS*/
352 
353 #if defined(CFG_MEMTAG)
354 /* Called from entry_a64.S only when MEMTAG is configured */
boot_init_memtag(void)355 void boot_init_memtag(void)
356 {
357 	memtag_init_ops(feat_mte_implemented());
358 }
359 
mmap_clear_memtag(struct tee_mmap_region * map,void * ptr __unused)360 static TEE_Result mmap_clear_memtag(struct tee_mmap_region *map,
361 				    void *ptr __unused)
362 {
363 	switch (map->type) {
364 	case MEM_AREA_NEX_RAM_RO:
365 	case MEM_AREA_SEC_RAM_OVERALL:
366 		DMSG("Clearing tags for VA %#"PRIxVA"..%#"PRIxVA,
367 		     map->va, map->va + map->size - 1);
368 		memtag_set_tags((void *)map->va, map->size, 0);
369 		break;
370 	default:
371 		break;
372 	}
373 
374 	return TEE_SUCCESS;
375 }
376 
377 /* Called from entry_a64.S only when MEMTAG is configured */
boot_clear_memtag(void)378 void boot_clear_memtag(void)
379 {
380 	core_mmu_for_each_map(NULL, mmap_clear_memtag);
381 }
382 #endif
383 
384 #ifdef CFG_WITH_PAGER
385 
386 #ifdef CFG_CORE_SANITIZE_KADDRESS
carve_out_asan_mem(void)387 static void carve_out_asan_mem(void)
388 {
389 	nex_phys_mem_partial_carve_out(ASAN_MAP_PA, ASAN_MAP_SZ);
390 }
391 #else
carve_out_asan_mem(void)392 static void carve_out_asan_mem(void)
393 {
394 }
395 #endif
396 
print_pager_pool_size(void)397 static void print_pager_pool_size(void)
398 {
399 	struct tee_pager_stats __maybe_unused stats;
400 
401 	tee_pager_get_stats(&stats);
402 	IMSG("Pager pool size: %zukB",
403 		stats.npages_all * SMALL_PAGE_SIZE / 1024);
404 }
405 
init_virt_pool(tee_mm_pool_t * virt_pool)406 static void init_virt_pool(tee_mm_pool_t *virt_pool)
407 {
408 	const vaddr_t begin = VCORE_START_VA;
409 	size_t size = TEE_RAM_VA_SIZE;
410 
411 #ifdef CFG_CORE_SANITIZE_KADDRESS
412 	/* Carve out asan memory, flat maped after core memory */
413 	if (begin + size > ASAN_SHADOW_PA)
414 		size = ASAN_MAP_PA - begin;
415 #endif
416 
417 	if (!tee_mm_init(virt_pool, begin, size, SMALL_PAGE_SHIFT,
418 			 TEE_MM_POOL_NO_FLAGS))
419 		panic("core_virt_mem_pool init failed");
420 }
421 
422 /*
423  * With CFG_CORE_ASLR=y the init part is relocated very early during boot.
424  * The init part is also paged just as the rest of the normal paged code, with
425  * the difference that it's preloaded during boot. When the backing store
426  * is configured the entire paged binary is copied in place and then also
427  * the init part. Since the init part has been relocated (references to
428  * addresses updated to compensate for the new load address) this has to be
429  * undone for the hashes of those pages to match with the original binary.
430  *
431  * If CFG_CORE_ASLR=n, nothing needs to be done as the code/ro pages are
432  * unchanged.
433  */
undo_init_relocation(uint8_t * paged_store __maybe_unused)434 static void undo_init_relocation(uint8_t *paged_store __maybe_unused)
435 {
436 #ifdef CFG_CORE_ASLR
437 	unsigned long *ptr = NULL;
438 	const uint32_t *reloc = NULL;
439 	const uint32_t *reloc_end = NULL;
440 	unsigned long offs = boot_mmu_config.map_offset;
441 	const struct boot_embdata *embdata = (const void *)__init_end;
442 	vaddr_t addr_end = (vaddr_t)__init_end - offs - TEE_LOAD_ADDR;
443 	vaddr_t addr_start = (vaddr_t)__init_start - offs - TEE_LOAD_ADDR;
444 
445 	reloc = (const void *)((vaddr_t)embdata + embdata->reloc_offset);
446 	reloc_end = reloc + embdata->reloc_len / sizeof(*reloc);
447 
448 	for (; reloc < reloc_end; reloc++) {
449 		if (*reloc < addr_start)
450 			continue;
451 		if (*reloc >= addr_end)
452 			break;
453 		ptr = (void *)(paged_store + *reloc - addr_start);
454 		*ptr -= offs;
455 	}
456 #endif
457 }
458 
ro_paged_alloc(tee_mm_entry_t * mm,void * hashes,void * store)459 static struct fobj *ro_paged_alloc(tee_mm_entry_t *mm, void *hashes,
460 				   void *store)
461 {
462 	const unsigned int num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE;
463 #ifdef CFG_CORE_ASLR
464 	unsigned int reloc_offs = (vaddr_t)__pageable_start - VCORE_START_VA;
465 	const struct boot_embdata *embdata = (const void *)__init_end;
466 	const void *reloc = __init_end + embdata->reloc_offset;
467 
468 	return fobj_ro_reloc_paged_alloc(num_pages, hashes, reloc_offs,
469 					 reloc, embdata->reloc_len, store);
470 #else
471 	return fobj_ro_paged_alloc(num_pages, hashes, store);
472 #endif
473 }
474 
init_pager_runtime(unsigned long pageable_part)475 static void init_pager_runtime(unsigned long pageable_part)
476 {
477 	size_t n;
478 	size_t init_size = (size_t)(__init_end - __init_start);
479 	size_t pageable_start = (size_t)__pageable_start;
480 	size_t pageable_end = (size_t)__pageable_end;
481 	size_t pageable_size = pageable_end - pageable_start;
482 	vaddr_t tzsram_end = TZSRAM_BASE + TZSRAM_SIZE - TEE_LOAD_ADDR +
483 			     VCORE_START_VA;
484 	size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) *
485 			   TEE_SHA256_HASH_SIZE;
486 	const struct boot_embdata *embdata = (const void *)__init_end;
487 	const void *tmp_hashes = NULL;
488 	tee_mm_entry_t *mm = NULL;
489 	struct fobj *fobj = NULL;
490 	uint8_t *paged_store = NULL;
491 	uint8_t *hashes = NULL;
492 
493 	assert(pageable_size % SMALL_PAGE_SIZE == 0);
494 	assert(embdata->total_len >= embdata->hashes_offset +
495 				     embdata->hashes_len);
496 	assert(hash_size == embdata->hashes_len);
497 
498 	tmp_hashes = __init_end + embdata->hashes_offset;
499 
500 	/*
501 	 * This needs to be initialized early to support address lookup
502 	 * in MEM_AREA_TEE_RAM
503 	 */
504 	tee_pager_early_init();
505 
506 	hashes = malloc(hash_size);
507 	IMSG_RAW("\n");
508 	IMSG("Pager is enabled. Hashes: %zu bytes", hash_size);
509 	assert(hashes);
510 	asan_memcpy_unchecked(hashes, tmp_hashes, hash_size);
511 
512 	/*
513 	 * The pager is about the be enabled below, eventual temporary boot
514 	 * memory allocation must be removed now.
515 	 */
516 	boot_mem_release_tmp_alloc();
517 
518 	carve_out_asan_mem();
519 
520 	mm = nex_phys_mem_ta_alloc(pageable_size);
521 	assert(mm);
522 	paged_store = phys_to_virt(tee_mm_get_smem(mm),
523 				   MEM_AREA_SEC_RAM_OVERALL, pageable_size);
524 	/*
525 	 * Load pageable part in the dedicated allocated area:
526 	 * - Move pageable non-init part into pageable area. Note bootloader
527 	 *   may have loaded it anywhere in TA RAM hence use memmove().
528 	 * - Copy pageable init part from current location into pageable area.
529 	 */
530 	memmove(paged_store + init_size,
531 		phys_to_virt(pageable_part,
532 			     core_mmu_get_type_by_pa(pageable_part),
533 			     __pageable_part_end - __pageable_part_start),
534 		__pageable_part_end - __pageable_part_start);
535 	asan_memcpy_unchecked(paged_store, __init_start, init_size);
536 	/*
537 	 * Undo eventual relocation for the init part so the hash checks
538 	 * can pass.
539 	 */
540 	undo_init_relocation(paged_store);
541 
542 	/* Check that hashes of what's in pageable area is OK */
543 	DMSG("Checking hashes of pageable area");
544 	for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) {
545 		const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE;
546 		const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE;
547 		TEE_Result res;
548 
549 		DMSG("hash pg_idx %zu hash %p page %p", n, hash, page);
550 		res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE);
551 		if (res != TEE_SUCCESS) {
552 			EMSG("Hash failed for page %zu at %p: res 0x%x",
553 			     n, (void *)page, res);
554 			panic();
555 		}
556 	}
557 
558 	/*
559 	 * Assert prepaged init sections are page aligned so that nothing
560 	 * trails uninited at the end of the premapped init area.
561 	 */
562 	assert(!(init_size & SMALL_PAGE_MASK));
563 
564 	/*
565 	 * Initialize the virtual memory pool used for main_mmu_l2_ttb which
566 	 * is supplied to tee_pager_init() below.
567 	 */
568 	init_virt_pool(&core_virt_mem_pool);
569 
570 	/*
571 	 * Assign alias area for pager end of the small page block the rest
572 	 * of the binary is loaded into. We're taking more than needed, but
573 	 * we're guaranteed to not need more than the physical amount of
574 	 * TZSRAM.
575 	 */
576 	mm = tee_mm_alloc2(&core_virt_mem_pool,
577 			   (vaddr_t)core_virt_mem_pool.lo +
578 			   core_virt_mem_pool.size - TZSRAM_SIZE,
579 			   TZSRAM_SIZE);
580 	assert(mm);
581 	tee_pager_set_alias_area(mm);
582 
583 	/*
584 	 * Claim virtual memory which isn't paged.
585 	 * Linear memory (flat map core memory) ends there.
586 	 */
587 	mm = tee_mm_alloc2(&core_virt_mem_pool, VCORE_UNPG_RX_PA,
588 			   (vaddr_t)(__pageable_start - VCORE_UNPG_RX_PA));
589 	assert(mm);
590 
591 	/*
592 	 * Allocate virtual memory for the pageable area and let the pager
593 	 * take charge of all the pages already assigned to that memory.
594 	 */
595 	mm = tee_mm_alloc2(&core_virt_mem_pool, (vaddr_t)__pageable_start,
596 			   pageable_size);
597 	assert(mm);
598 	fobj = ro_paged_alloc(mm, hashes, paged_store);
599 	assert(fobj);
600 	tee_pager_add_core_region(tee_mm_get_smem(mm), PAGED_REGION_TYPE_RO,
601 				  fobj);
602 	fobj_put(fobj);
603 
604 	tee_pager_add_pages(pageable_start, init_size / SMALL_PAGE_SIZE, false);
605 	tee_pager_add_pages(pageable_start + init_size,
606 			    (pageable_size - init_size) / SMALL_PAGE_SIZE,
607 			    true);
608 	if (pageable_end < tzsram_end)
609 		tee_pager_add_pages(pageable_end, (tzsram_end - pageable_end) /
610 						   SMALL_PAGE_SIZE, true);
611 
612 	/*
613 	 * There may be physical pages in TZSRAM before the core load address.
614 	 * These pages can be added to the physical pages pool of the pager.
615 	 * This setup may happen when a the secure bootloader runs in TZRAM
616 	 * and its memory can be reused by OP-TEE once boot stages complete.
617 	 */
618 	tee_pager_add_pages(core_virt_mem_pool.lo,
619 			    (VCORE_UNPG_RX_PA - core_virt_mem_pool.lo) /
620 				SMALL_PAGE_SIZE,
621 			    true);
622 
623 	print_pager_pool_size();
624 }
625 #else /*!CFG_WITH_PAGER*/
init_pager_runtime(unsigned long pageable_part __unused)626 static void init_pager_runtime(unsigned long pageable_part __unused)
627 {
628 }
629 #endif
630 
631 #if defined(CFG_DT)
add_optee_dt_node(struct dt_descriptor * dt)632 static int add_optee_dt_node(struct dt_descriptor *dt)
633 {
634 	int offs;
635 	int ret;
636 
637 	if (fdt_path_offset(dt->blob, "/firmware/optee") >= 0) {
638 		DMSG("OP-TEE Device Tree node already exists!");
639 		return 0;
640 	}
641 
642 	offs = fdt_path_offset(dt->blob, "/firmware");
643 	if (offs < 0) {
644 		offs = add_dt_path_subnode(dt, "/", "firmware");
645 		if (offs < 0)
646 			return -1;
647 	}
648 
649 	offs = fdt_add_subnode(dt->blob, offs, "optee");
650 	if (offs < 0)
651 		return -1;
652 
653 	ret = fdt_setprop_string(dt->blob, offs, "compatible",
654 				 "linaro,optee-tz");
655 	if (ret < 0)
656 		return -1;
657 	ret = fdt_setprop_string(dt->blob, offs, "method", "smc");
658 	if (ret < 0)
659 		return -1;
660 
661 	if (CFG_CORE_ASYNC_NOTIF_GIC_INTID) {
662 		/*
663 		 * The format of the interrupt property is defined by the
664 		 * binding of the interrupt domain root. In this case it's
665 		 * one Arm GIC v1, v2 or v3 so we must be compatible with
666 		 * these.
667 		 *
668 		 * An SPI type of interrupt is indicated with a 0 in the
669 		 * first cell. A PPI type is indicated with value 1.
670 		 *
671 		 * The interrupt number goes in the second cell where
672 		 * SPIs ranges from 0 to 987 and PPI ranges from 0 to 15.
673 		 *
674 		 * Flags are passed in the third cells.
675 		 */
676 		uint32_t itr_trigger = 0;
677 		uint32_t itr_type = 0;
678 		uint32_t itr_id = 0;
679 		uint32_t val[3] = { };
680 
681 		/* PPI are visible only in current CPU cluster */
682 		static_assert(IS_ENABLED(CFG_CORE_FFA) ||
683 			      !CFG_CORE_ASYNC_NOTIF_GIC_INTID ||
684 			      (CFG_CORE_ASYNC_NOTIF_GIC_INTID >=
685 			       GIC_SPI_BASE) ||
686 			      ((CFG_TEE_CORE_NB_CORE <= 8) &&
687 			       (CFG_CORE_ASYNC_NOTIF_GIC_INTID >=
688 				GIC_PPI_BASE)));
689 
690 		if (CFG_CORE_ASYNC_NOTIF_GIC_INTID >= GIC_SPI_BASE) {
691 			itr_type = GIC_SPI;
692 			itr_id = CFG_CORE_ASYNC_NOTIF_GIC_INTID - GIC_SPI_BASE;
693 			itr_trigger = IRQ_TYPE_EDGE_RISING;
694 		} else {
695 			itr_type = GIC_PPI;
696 			itr_id = CFG_CORE_ASYNC_NOTIF_GIC_INTID - GIC_PPI_BASE;
697 			itr_trigger = IRQ_TYPE_EDGE_RISING |
698 				      GIC_CPU_MASK_SIMPLE(CFG_TEE_CORE_NB_CORE);
699 		}
700 
701 		val[0] = TEE_U32_TO_BIG_ENDIAN(itr_type);
702 		val[1] = TEE_U32_TO_BIG_ENDIAN(itr_id);
703 		val[2] = TEE_U32_TO_BIG_ENDIAN(itr_trigger);
704 
705 		ret = fdt_setprop(dt->blob, offs, "interrupts", val,
706 				  sizeof(val));
707 		if (ret < 0)
708 			return -1;
709 	}
710 	return 0;
711 }
712 
713 #ifdef CFG_PSCI_ARM32
append_psci_compatible(void * fdt,int offs,const char * str)714 static int append_psci_compatible(void *fdt, int offs, const char *str)
715 {
716 	return fdt_appendprop(fdt, offs, "compatible", str, strlen(str) + 1);
717 }
718 
dt_add_psci_node(struct dt_descriptor * dt)719 static int dt_add_psci_node(struct dt_descriptor *dt)
720 {
721 	int offs;
722 
723 	if (fdt_path_offset(dt->blob, "/psci") >= 0) {
724 		DMSG("PSCI Device Tree node already exists!");
725 		return 0;
726 	}
727 
728 	offs = add_dt_path_subnode(dt, "/", "psci");
729 	if (offs < 0)
730 		return -1;
731 	if (append_psci_compatible(dt->blob, offs, "arm,psci-1.0"))
732 		return -1;
733 	if (append_psci_compatible(dt->blob, offs, "arm,psci-0.2"))
734 		return -1;
735 	if (append_psci_compatible(dt->blob, offs, "arm,psci"))
736 		return -1;
737 	if (fdt_setprop_string(dt->blob, offs, "method", "smc"))
738 		return -1;
739 	if (fdt_setprop_u32(dt->blob, offs, "cpu_suspend", PSCI_CPU_SUSPEND))
740 		return -1;
741 	if (fdt_setprop_u32(dt->blob, offs, "cpu_off", PSCI_CPU_OFF))
742 		return -1;
743 	if (fdt_setprop_u32(dt->blob, offs, "cpu_on", PSCI_CPU_ON))
744 		return -1;
745 	if (fdt_setprop_u32(dt->blob, offs, "sys_poweroff", PSCI_SYSTEM_OFF))
746 		return -1;
747 	if (fdt_setprop_u32(dt->blob, offs, "sys_reset", PSCI_SYSTEM_RESET))
748 		return -1;
749 	return 0;
750 }
751 
check_node_compat_prefix(struct dt_descriptor * dt,int offs,const char * prefix)752 static int check_node_compat_prefix(struct dt_descriptor *dt, int offs,
753 				    const char *prefix)
754 {
755 	const size_t prefix_len = strlen(prefix);
756 	size_t l;
757 	int plen;
758 	const char *prop;
759 
760 	prop = fdt_getprop(dt->blob, offs, "compatible", &plen);
761 	if (!prop)
762 		return -1;
763 
764 	while (plen > 0) {
765 		if (memcmp(prop, prefix, prefix_len) == 0)
766 			return 0; /* match */
767 
768 		l = strlen(prop) + 1;
769 		prop += l;
770 		plen -= l;
771 	}
772 
773 	return -1;
774 }
775 
dt_add_psci_cpu_enable_methods(struct dt_descriptor * dt)776 static int dt_add_psci_cpu_enable_methods(struct dt_descriptor *dt)
777 {
778 	int offs = 0;
779 
780 	while (1) {
781 		offs = fdt_next_node(dt->blob, offs, NULL);
782 		if (offs < 0)
783 			break;
784 		if (fdt_getprop(dt->blob, offs, "enable-method", NULL))
785 			continue; /* already set */
786 		if (check_node_compat_prefix(dt, offs, "arm,cortex-a"))
787 			continue; /* no compatible */
788 		if (fdt_setprop_string(dt->blob, offs, "enable-method", "psci"))
789 			return -1;
790 		/* Need to restart scanning as offsets may have changed */
791 		offs = 0;
792 	}
793 	return 0;
794 }
795 
config_psci(struct dt_descriptor * dt)796 static int config_psci(struct dt_descriptor *dt)
797 {
798 	if (dt_add_psci_node(dt))
799 		return -1;
800 	return dt_add_psci_cpu_enable_methods(dt);
801 }
802 #else
config_psci(struct dt_descriptor * dt __unused)803 static int config_psci(struct dt_descriptor *dt __unused)
804 {
805 	return 0;
806 }
807 #endif /*CFG_PSCI_ARM32*/
808 
mark_tzdram_as_reserved(struct dt_descriptor * dt)809 static int mark_tzdram_as_reserved(struct dt_descriptor *dt)
810 {
811 	return add_res_mem_dt_node(dt, "optee_core", CFG_TZDRAM_START,
812 				   CFG_TZDRAM_SIZE);
813 }
814 
update_external_dt(void)815 static void update_external_dt(void)
816 {
817 	struct dt_descriptor *dt = get_external_dt_desc();
818 
819 	if (!dt || !dt->blob)
820 		return;
821 
822 	if (!IS_ENABLED(CFG_CORE_FFA) && add_optee_dt_node(dt))
823 		panic("Failed to add OP-TEE Device Tree node");
824 
825 	if (config_psci(dt))
826 		panic("Failed to config PSCI");
827 
828 #ifdef CFG_CORE_RESERVED_SHM
829 	if (mark_static_shm_as_reserved(dt))
830 		panic("Failed to config non-secure memory");
831 #endif
832 
833 	if (mark_tzdram_as_reserved(dt))
834 		panic("Failed to config secure memory");
835 }
836 #else /*CFG_DT*/
update_external_dt(void)837 static void update_external_dt(void)
838 {
839 }
840 #endif /*!CFG_DT*/
841 
init_tee_runtime(void)842 void init_tee_runtime(void)
843 {
844 	/*
845 	 * With virtualization we call this function when creating the
846 	 * OP-TEE partition instead.
847 	 */
848 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION))
849 		call_preinitcalls();
850 	call_early_initcalls();
851 	call_service_initcalls();
852 
853 	/*
854 	 * These two functions uses crypto_rng_read() to initialize the
855 	 * pauth keys. Once call_initcalls() returns we're guaranteed that
856 	 * crypto_rng_read() is ready to be used.
857 	 */
858 	thread_init_core_local_pauth_keys();
859 	thread_init_thread_pauth_keys();
860 
861 	/*
862 	 * Reinitialize canaries around the stacks with crypto_rng_read().
863 	 *
864 	 * TODO: Updating canaries when CFG_NS_VIRTUALIZATION is enabled will
865 	 * require synchronization between thread_check_canaries() and
866 	 * thread_update_canaries().
867 	 */
868 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION))
869 		thread_update_canaries();
870 }
871 
add_padding_to_pool(vaddr_t va,size_t len,void * ptr __unused)872 static bool add_padding_to_pool(vaddr_t va, size_t len, void *ptr __unused)
873 {
874 #ifdef CFG_NS_VIRTUALIZATION
875 	nex_malloc_add_pool((void *)va, len);
876 #else
877 	malloc_add_pool((void *)va, len);
878 #endif
879 	return true;
880 }
881 
init_primary(unsigned long pageable_part)882 static void init_primary(unsigned long pageable_part)
883 {
884 	vaddr_t va = 0;
885 
886 	/*
887 	 * Mask asynchronous exceptions before switch to the thread vector
888 	 * as the thread handler requires those to be masked while
889 	 * executing with the temporary stack. The thread subsystem also
890 	 * asserts that the foreign interrupts are blocked when using most of
891 	 * its functions.
892 	 */
893 	thread_set_exceptions(THREAD_EXCP_ALL);
894 	primary_save_cntfrq();
895 	init_vfp_sec();
896 
897 	if (IS_ENABLED(CFG_CRYPTO_WITH_CE))
898 		check_crypto_extensions();
899 
900 	init_asan();
901 
902 	/*
903 	 * By default whole OP-TEE uses malloc, so we need to initialize
904 	 * it early. But, when virtualization is enabled, malloc is used
905 	 * only by TEE runtime, so malloc should be initialized later, for
906 	 * every virtual partition separately. Core code uses nex_malloc
907 	 * instead.
908 	 */
909 #ifdef CFG_WITH_PAGER
910 	/* Add heap2 first as heap1 may be too small as initial bget pool */
911 	malloc_add_pool(__heap2_start, __heap2_end - __heap2_start);
912 #endif
913 #ifdef CFG_NS_VIRTUALIZATION
914 	nex_malloc_add_pool(__nex_heap_start, __nex_heap_end -
915 					      __nex_heap_start);
916 #else
917 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
918 #endif
919 	IMSG_RAW("\n");
920 	if (IS_ENABLED(CFG_DYN_CONFIG)) {
921 		size_t sz = sizeof(struct thread_core_local) *
922 			    CFG_TEE_CORE_NB_CORE;
923 		void *p = boot_mem_alloc(sz, alignof(void *) * 2);
924 
925 #ifdef CFG_NS_VIRTUALIZATION
926 		nex_malloc_add_pool(p, sz);
927 #else
928 		malloc_add_pool(p, sz);
929 #endif
930 	}
931 
932 	core_mmu_save_mem_map();
933 	core_mmu_init_phys_mem();
934 	boot_mem_foreach_padding(add_padding_to_pool, NULL);
935 	va = boot_mem_release_unused();
936 	if (!IS_ENABLED(CFG_WITH_PAGER)) {
937 		/*
938 		 * We must update boot_cached_mem_end to reflect the memory
939 		 * just unmapped by boot_mem_release_unused().
940 		 */
941 		assert(va && va <= boot_cached_mem_end);
942 		boot_cached_mem_end = va;
943 	}
944 
945 	if (IS_ENABLED(CFG_DYN_CONFIG)) {
946 		/*
947 		 * This is needed to enable virt_page_alloc() now that
948 		 * boot_mem_alloc() can't be used any longer.
949 		 */
950 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
951 			nex_page_alloc_init();
952 		else
953 			page_alloc_init();
954 	}
955 
956 	if (IS_ENABLED(CFG_WITH_PAGER)) {
957 		/*
958 		 * Pager: init_runtime() calls thread_kernel_enable_vfp()
959 		 * so we must set a current thread right now to avoid a
960 		 * chicken-and-egg problem (thread_init_boot_thread() sets
961 		 * the current thread but needs things set by
962 		 * init_runtime()).
963 		 */
964 		thread_get_core_local()->curr_thread = 0;
965 		init_pager_runtime(pageable_part);
966 	}
967 
968 	/* Initialize canaries around the stacks */
969 	thread_init_canaries();
970 	thread_init_per_cpu();
971 }
972 
cpu_nmfi_enabled(void)973 static bool cpu_nmfi_enabled(void)
974 {
975 #if defined(ARM32)
976 	return read_sctlr() & SCTLR_NMFI;
977 #else
978 	/* Note: ARM64 does not feature non-maskable FIQ support. */
979 	return false;
980 #endif
981 }
982 
983 /*
984  * Note: this function is weak just to make it possible to exclude it from
985  * the unpaged area.
986  */
boot_init_primary_late(unsigned long fdt __unused,unsigned long manifest __unused)987 void __weak boot_init_primary_late(unsigned long fdt __unused,
988 				   unsigned long manifest __unused)
989 {
990 	size_t fdt_size = CFG_DTB_MAX_SIZE;
991 
992 	if (IS_ENABLED(CFG_TRANSFER_LIST) && mapped_tl) {
993 		struct transfer_list_entry *tl_e = NULL;
994 
995 		tl_e = transfer_list_find(mapped_tl, TL_TAG_FDT);
996 		if (tl_e) {
997 			/*
998 			 * Expand the data size of the DTB entry to the maximum
999 			 * allocable mapped memory to reserve sufficient space
1000 			 * for inserting new nodes, avoid potentially corrupting
1001 			 * next entries.
1002 			 */
1003 			uint32_t dtb_max_sz = mapped_tl->max_size -
1004 					      mapped_tl->size + tl_e->data_size;
1005 
1006 			if (!transfer_list_set_data_size(mapped_tl, tl_e,
1007 							 dtb_max_sz)) {
1008 				EMSG("Failed to extend DTB size to %#"PRIx32,
1009 				     dtb_max_sz);
1010 				panic();
1011 			}
1012 			fdt_size = tl_e->data_size;
1013 		}
1014 	}
1015 
1016 	init_external_dt(boot_arg_fdt, fdt_size);
1017 	reinit_manifest_dt();
1018 #ifdef CFG_CORE_FFA
1019 	tpm_map_log_area(get_manifest_dt());
1020 #else
1021 	tpm_map_log_area(get_external_dt());
1022 #endif
1023 	discover_nsec_memory();
1024 	update_external_dt();
1025 	configure_console_from_dt();
1026 
1027 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1028 		/*
1029 		 * Virtualization: We can't initialize threads right now because
1030 		 * threads belong to "tee" part and will be initialized
1031 		 * separately per each new virtual guest. So, we'll clear
1032 		 * "curr_thread" and call it done.
1033 		 */
1034 		thread_get_core_local()->curr_thread = -1;
1035 	} else {
1036 		thread_init_threads(CFG_NUM_THREADS);
1037 		thread_init_boot_thread();
1038 	}
1039 	thread_init_thread_core_local(CFG_TEE_CORE_NB_CORE);
1040 }
1041 
boot_init_primary_runtime(void)1042 void __weak boot_init_primary_runtime(void)
1043 {
1044 	thread_init_primary();
1045 	IMSG("OP-TEE version: %s", core_v_str);
1046 	if (IS_ENABLED(CFG_INSECURE)) {
1047 		IMSG("WARNING: This OP-TEE configuration might be insecure!");
1048 		IMSG("WARNING: Please check https://optee.readthedocs.io/en/latest/architecture/porting_guidelines.html");
1049 	}
1050 	IMSG("Primary CPU initializing");
1051 #ifdef CFG_CORE_ASLR
1052 	DMSG("Executing at offset %#lx with virtual load address %#"PRIxVA,
1053 	     (unsigned long)boot_mmu_config.map_offset, VCORE_START_VA);
1054 #endif
1055 #ifdef CFG_NS_VIRTUALIZATION
1056 	DMSG("NS-virtualization enabled, supporting %u guests",
1057 	     CFG_VIRT_GUEST_COUNT);
1058 #endif
1059 	if (IS_ENABLED(CFG_MEMTAG))
1060 		DMSG("Memory tagging %s",
1061 		     memtag_is_enabled() ?  "enabled" : "disabled");
1062 
1063 	/* Check if platform needs NMFI workaround */
1064 	if (cpu_nmfi_enabled())	{
1065 		if (!IS_ENABLED(CFG_CORE_WORKAROUND_ARM_NMFI))
1066 			IMSG("WARNING: This ARM core has NMFI enabled, please apply workaround!");
1067 	} else {
1068 		if (IS_ENABLED(CFG_CORE_WORKAROUND_ARM_NMFI))
1069 			IMSG("WARNING: This ARM core does not have NMFI enabled, no need for workaround");
1070 	}
1071 
1072 	boot_primary_init_intc();
1073 	init_vfp_nsec();
1074 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1075 		/*
1076 		 * Unmask native interrupts during driver initcalls.
1077 		 *
1078 		 * NS-virtualization still uses the temporary stack also
1079 		 * used for exception handling so it must still have native
1080 		 * interrupts masked.
1081 		 */
1082 		thread_set_exceptions(thread_get_exceptions() &
1083 				      ~THREAD_EXCP_NATIVE_INTR);
1084 		init_tee_runtime();
1085 	}
1086 
1087 	if (!IS_ENABLED(CFG_WITH_PAGER))
1088 		boot_mem_release_tmp_alloc();
1089 }
1090 
boot_init_primary_final(void)1091 void __weak boot_init_primary_final(void)
1092 {
1093 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION))
1094 		call_driver_initcalls();
1095 
1096 	call_finalcalls();
1097 
1098 	IMSG("Primary CPU switching to normal world boot");
1099 
1100 	/* Mask native interrupts before switching to the normal world */
1101 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION))
1102 		thread_set_exceptions(thread_get_exceptions() |
1103 				      THREAD_EXCP_NATIVE_INTR);
1104 }
1105 
init_secondary_helper(void)1106 static void init_secondary_helper(void)
1107 {
1108 	IMSG("Secondary CPU %zu initializing", get_core_pos());
1109 
1110 	/*
1111 	 * Mask asynchronous exceptions before switch to the thread vector
1112 	 * as the thread handler requires those to be masked while
1113 	 * executing with the temporary stack. The thread subsystem also
1114 	 * asserts that the foreign interrupts are blocked when using most of
1115 	 * its functions.
1116 	 */
1117 	thread_set_exceptions(THREAD_EXCP_ALL);
1118 
1119 	secondary_init_cntfrq();
1120 	thread_init_per_cpu();
1121 	boot_secondary_init_intc();
1122 	init_vfp_sec();
1123 	init_vfp_nsec();
1124 
1125 	IMSG("Secondary CPU %zu switching to normal world boot", get_core_pos());
1126 }
1127 
1128 /*
1129  * Note: this function is weak just to make it possible to exclude it from
1130  * the unpaged area so that it lies in the init area.
1131  */
boot_init_primary_early(void)1132 void __weak boot_init_primary_early(void)
1133 {
1134 	unsigned long pageable_part = 0;
1135 	struct transfer_list_entry *tl_e = NULL;
1136 
1137 	if (IS_ENABLED(CFG_TRANSFER_LIST) && boot_arg_transfer_list) {
1138 		/* map and save the TL */
1139 		mapped_tl = transfer_list_map(boot_arg_transfer_list);
1140 		if (!mapped_tl)
1141 			panic("Failed to map transfer list");
1142 
1143 		transfer_list_dump(mapped_tl);
1144 		tl_e = transfer_list_find(mapped_tl, TL_TAG_OPTEE_PAGABLE_PART);
1145 	}
1146 
1147 	if (IS_ENABLED(CFG_WITH_PAGER)) {
1148 		if (IS_ENABLED(CFG_TRANSFER_LIST) && tl_e)
1149 			pageable_part =
1150 				get_le64(transfer_list_entry_data(tl_e));
1151 		else
1152 			pageable_part = boot_arg_pageable_part;
1153 	}
1154 
1155 	init_primary(pageable_part);
1156 }
1157 
boot_save_transfer_list(unsigned long zero_reg,unsigned long transfer_list,unsigned long fdt)1158 static void boot_save_transfer_list(unsigned long zero_reg,
1159 				    unsigned long transfer_list,
1160 				    unsigned long fdt)
1161 {
1162 	struct transfer_list_header *tl = (void *)transfer_list;
1163 	struct transfer_list_entry *tl_e = NULL;
1164 
1165 	if (zero_reg != 0)
1166 		panic("Incorrect transfer list register convention");
1167 
1168 	if (!IS_ALIGNED_WITH_TYPE(transfer_list, struct transfer_list_header) ||
1169 	    !IS_ALIGNED(transfer_list, TL_ALIGNMENT_FROM_ORDER(tl->alignment)))
1170 		panic("Transfer list base address is not aligned");
1171 
1172 	if (transfer_list_check_header(tl) == TL_OPS_NONE)
1173 		panic("Invalid transfer list");
1174 
1175 	tl_e = transfer_list_find(tl, TL_TAG_FDT);
1176 	if (fdt != (unsigned long)transfer_list_entry_data(tl_e))
1177 		panic("DT does not match to the DT entry of the TL");
1178 
1179 	boot_arg_transfer_list = transfer_list;
1180 }
1181 
1182 #if defined(CFG_WITH_ARM_TRUSTED_FW)
boot_cpu_on_handler(unsigned long a0 __maybe_unused,unsigned long a1 __unused)1183 unsigned long boot_cpu_on_handler(unsigned long a0 __maybe_unused,
1184 				  unsigned long a1 __unused)
1185 {
1186 	init_secondary_helper();
1187 	return 0;
1188 }
1189 #else
boot_init_secondary(unsigned long nsec_entry __unused)1190 void boot_init_secondary(unsigned long nsec_entry __unused)
1191 {
1192 	init_secondary_helper();
1193 }
1194 #endif
1195 
1196 #if defined(CFG_BOOT_SECONDARY_REQUEST)
boot_set_core_ns_entry(size_t core_idx,uintptr_t entry,uintptr_t context_id)1197 void boot_set_core_ns_entry(size_t core_idx, uintptr_t entry,
1198 			    uintptr_t context_id)
1199 {
1200 	ns_entry_contexts[core_idx].entry_point = entry;
1201 	ns_entry_contexts[core_idx].context_id = context_id;
1202 	dsb_ishst();
1203 }
1204 
boot_core_release(size_t core_idx,paddr_t entry)1205 int boot_core_release(size_t core_idx, paddr_t entry)
1206 {
1207 	if (!core_idx || core_idx >= CFG_TEE_CORE_NB_CORE)
1208 		return -1;
1209 
1210 	ns_entry_contexts[core_idx].entry_point = entry;
1211 	dmb();
1212 	spin_table[core_idx] = 1;
1213 	dsb();
1214 	sev();
1215 
1216 	return 0;
1217 }
1218 
1219 /*
1220  * spin until secondary boot request, then returns with
1221  * the secondary core entry address.
1222  */
boot_core_hpen(void)1223 struct ns_entry_context *boot_core_hpen(void)
1224 {
1225 #ifdef CFG_PSCI_ARM32
1226 	return &ns_entry_contexts[get_core_pos()];
1227 #else
1228 	do {
1229 		wfe();
1230 	} while (!spin_table[get_core_pos()]);
1231 	dmb();
1232 	return &ns_entry_contexts[get_core_pos()];
1233 #endif
1234 }
1235 #endif
1236 
1237 #if defined(CFG_CORE_ASLR)
1238 #if defined(CFG_DT)
get_aslr_seed(void)1239 unsigned long __weak get_aslr_seed(void)
1240 {
1241 	void *fdt = NULL;
1242 	int rc = 0;
1243 	const uint64_t *seed = NULL;
1244 	int offs = 0;
1245 	int len = 0;
1246 
1247 	if (!IS_ENABLED(CFG_CORE_SEL2_SPMC))
1248 		fdt = (void *)boot_arg_fdt;
1249 
1250 	if (!fdt) {
1251 		DMSG("No fdt");
1252 		goto err;
1253 	}
1254 
1255 	rc = fdt_check_header(fdt);
1256 	if (rc) {
1257 		DMSG("Bad fdt: %d", rc);
1258 		goto err;
1259 	}
1260 
1261 	offs =  fdt_path_offset(fdt, "/secure-chosen");
1262 	if (offs < 0) {
1263 		DMSG("Cannot find /secure-chosen");
1264 		goto err;
1265 	}
1266 	seed = fdt_getprop(fdt, offs, "kaslr-seed", &len);
1267 	if (!seed || len != sizeof(*seed)) {
1268 		DMSG("Cannot find valid kaslr-seed");
1269 		goto err;
1270 	}
1271 
1272 	return fdt64_to_cpu(fdt64_ld(seed));
1273 
1274 err:
1275 	/* Try platform implementation */
1276 	return plat_get_aslr_seed();
1277 }
1278 #else /*!CFG_DT*/
get_aslr_seed(void)1279 unsigned long __weak get_aslr_seed(void)
1280 {
1281 	/* Try platform implementation */
1282 	return plat_get_aslr_seed();
1283 }
1284 #endif /*!CFG_DT*/
1285 #endif /*CFG_CORE_ASLR*/
1286 
get_fdt_from_boot_info(struct ffa_boot_info_header_1_1 * hdr)1287 static void *get_fdt_from_boot_info(struct ffa_boot_info_header_1_1 *hdr)
1288 {
1289 	struct ffa_boot_info_1_1 *desc = NULL;
1290 	uint8_t content_fmt = 0;
1291 	uint8_t name_fmt = 0;
1292 	void *fdt = NULL;
1293 	int ret = 0;
1294 
1295 	if (hdr->signature != FFA_BOOT_INFO_SIGNATURE) {
1296 		EMSG("Bad boot info signature %#"PRIx32, hdr->signature);
1297 		panic();
1298 	}
1299 	if (hdr->version != FFA_BOOT_INFO_VERSION_1_1 &&
1300 	    hdr->version != FFA_BOOT_INFO_VERSION_1_2) {
1301 		EMSG("Bad boot info version %#"PRIx32, hdr->version);
1302 		panic();
1303 	}
1304 	if (hdr->desc_count != 1) {
1305 		EMSG("Bad boot info descriptor count %#"PRIx32,
1306 		     hdr->desc_count);
1307 		panic();
1308 	}
1309 	desc = (void *)((vaddr_t)hdr + hdr->desc_offset);
1310 	name_fmt = desc->flags & FFA_BOOT_INFO_FLAG_NAME_FORMAT_MASK;
1311 	if (name_fmt == FFA_BOOT_INFO_FLAG_NAME_FORMAT_STRING)
1312 		DMSG("Boot info descriptor name \"%16s\"", desc->name);
1313 	else if (name_fmt == FFA_BOOT_INFO_FLAG_NAME_FORMAT_UUID)
1314 		DMSG("Boot info descriptor UUID %pUl", (void *)desc->name);
1315 	else
1316 		DMSG("Boot info descriptor: unknown name format %"PRIu8,
1317 		     name_fmt);
1318 
1319 	content_fmt = (desc->flags & FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_MASK) >>
1320 		      FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_SHIFT;
1321 	if (content_fmt != FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_ADDR) {
1322 		EMSG("Bad boot info content format %"PRIu8", expected %u (address)",
1323 		     content_fmt, FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_ADDR);
1324 		panic();
1325 	}
1326 
1327 	fdt = (void *)(vaddr_t)desc->contents;
1328 	ret = fdt_check_full(fdt, desc->size);
1329 	if (ret < 0) {
1330 		EMSG("Invalid Device Tree at %p: error %d", fdt, ret);
1331 		panic();
1332 	}
1333 	return fdt;
1334 }
1335 
get_sec_mem_from_manifest(void * fdt,paddr_t * base,paddr_size_t * size)1336 static void get_sec_mem_from_manifest(void *fdt, paddr_t *base,
1337 				      paddr_size_t *size)
1338 {
1339 	int ret = 0;
1340 	uint64_t num = 0;
1341 
1342 	ret = fdt_node_check_compatible(fdt, 0, "arm,ffa-manifest-1.0");
1343 	if (ret < 0) {
1344 		EMSG("Invalid FF-A manifest at %p: error %d", fdt, ret);
1345 		panic();
1346 	}
1347 	ret = dt_getprop_as_number(fdt, 0, "load-address", &num);
1348 	if (ret < 0) {
1349 		EMSG("Can't read \"load-address\" from FF-A manifest at %p: error %d",
1350 		     fdt, ret);
1351 		panic();
1352 	}
1353 	*base = num;
1354 	/* "mem-size" is currently an undocumented extension to the spec. */
1355 	ret = dt_getprop_as_number(fdt, 0, "mem-size", &num);
1356 	if (ret < 0) {
1357 		EMSG("Can't read \"mem-size\" from FF-A manifest at %p: error %d",
1358 		     fdt, ret);
1359 		panic();
1360 	}
1361 	*size = num;
1362 }
1363 
boot_save_args(unsigned long a0,unsigned long a1,unsigned long a2,unsigned long a3,unsigned long a4 __maybe_unused)1364 void __weak boot_save_args(unsigned long a0, unsigned long a1,
1365 			   unsigned long a2, unsigned long a3,
1366 			   unsigned long a4 __maybe_unused)
1367 {
1368 	/*
1369 	 * Register use:
1370 	 *
1371 	 * Scenario A: Default arguments
1372 	 * a0   - CFG_CORE_FFA=y && CFG_CORE_SEL2_SPMC=n:
1373 	 *        if non-NULL holds the TOS FW config [1] address
1374 	 *      - CFG_CORE_FFA=y &&
1375 		  (CFG_CORE_SEL2_SPMC=y || CFG_CORE_EL3_SPMC=y):
1376 	 *        address of FF-A Boot Information Blob
1377 	 *      - CFG_CORE_FFA=n:
1378 	 *        if non-NULL holds the pagable part address
1379 	 * a1	- CFG_WITH_ARM_TRUSTED_FW=n (Armv7):
1380 	 *	  Armv7 standard bootarg #1 (kept track of in entry_a32.S)
1381 	 * a2   - CFG_CORE_SEL2_SPMC=n:
1382 	 *        if non-NULL holds the system DTB address
1383 	 *	- CFG_WITH_ARM_TRUSTED_FW=n (Armv7):
1384 	 *	  Armv7 standard bootarg #2 (system DTB address, kept track
1385 	 *	  of in entry_a32.S)
1386 	 * a3	- Not used
1387 	 * a4	- CFG_WITH_ARM_TRUSTED_FW=n:
1388 	 *	  Non-secure entry address
1389 	 *
1390 	 * [1] A TF-A concept: TOS_FW_CONFIG - Trusted OS Firmware
1391 	 * configuration file. Used by Trusted OS (BL32), that is, OP-TEE
1392 	 * here. This is also called Manifest DT, related to the Manifest DT
1393 	 * passed in the FF-A Boot Information Blob, but with a different
1394 	 * compatible string.
1395 
1396 	 * Scenario B: FW Handoff via Transfer List
1397 	 * Note: FF-A and non-secure entry are not yet supported with
1398 	 *       Transfer List
1399 	 * a0	- DTB address or 0 (AArch64)
1400 	 *	- must be 0 (AArch32)
1401 	 * a1	- 1 << 32 | TRANSFER_LIST_SIGNATURE[0:31] (AArch64)
1402 	 *	- 1 << 24 | TRANSFER_LIST_SIGNATURE[0:23] (AArch32)
1403 	 * a2	- must be 0 (AArch64)
1404 	 *	- DTB address or 0 (AArch32)
1405 	 * a3	- Transfer list base address
1406 	 * a4	- Not used
1407 	 */
1408 
1409 	if (IS_ENABLED(CFG_TRANSFER_LIST)) {
1410 		if (IS_ENABLED(CFG_ARM64_core) &&
1411 		    a1 == TL_HANDOFF_X1_VALUE(TL_REG_CONVENTION_VER)) {
1412 			boot_save_transfer_list(a2, a3, a0);
1413 			boot_arg_fdt = a0;
1414 		} else if (IS_ENABLED(CFG_ARM32_core) &&
1415 			   a1 == TL_HANDOFF_R1_VALUE(TL_REG_CONVENTION_VER)) {
1416 			boot_save_transfer_list(a0, a3, a2);
1417 			boot_arg_fdt = a2;
1418 		}
1419 
1420 		return;
1421 	}
1422 
1423 	if (!IS_ENABLED(CFG_CORE_SEL2_SPMC)) {
1424 #if defined(CFG_DT_ADDR)
1425 		boot_arg_fdt = CFG_DT_ADDR;
1426 #else
1427 		boot_arg_fdt = a2;
1428 #endif
1429 	}
1430 
1431 	if (IS_ENABLED(CFG_CORE_FFA)) {
1432 		size_t fdt_max_size = CFG_DTB_MAX_SIZE;
1433 		void *fdt = NULL;
1434 
1435 		if (IS_ENABLED(CFG_CORE_SEL2_SPMC) ||
1436 		    IS_ENABLED(CFG_CORE_EL3_SPMC))
1437 			fdt = get_fdt_from_boot_info((void *)a0);
1438 		else
1439 			fdt = (void *)a0;
1440 		if (IS_ENABLED(CFG_CORE_SEL2_SPMC)) {
1441 			paddr_size_t size = 0;
1442 			paddr_t base = 0;
1443 
1444 			if (IS_ENABLED(CFG_CORE_PHYS_RELOCATABLE)) {
1445 				get_sec_mem_from_manifest(fdt, &base, &size);
1446 				core_mmu_set_secure_memory(base, size);
1447 			} else {
1448 				core_mmu_get_secure_memory(&base, &size);
1449 			}
1450 			assert((unsigned long)fdt >= base);
1451 			assert((unsigned long)fdt <= base + size);
1452 			assert((unsigned long)fdt < VCORE_START_VA);
1453 			fdt_max_size = VCORE_START_VA - (unsigned long)fdt;
1454 		}
1455 		init_manifest_dt(fdt, fdt_max_size);
1456 	} else {
1457 		if (IS_ENABLED(CFG_WITH_PAGER)) {
1458 #if defined(CFG_PAGEABLE_ADDR)
1459 			boot_arg_pageable_part = CFG_PAGEABLE_ADDR;
1460 #else
1461 			boot_arg_pageable_part = a0;
1462 #endif
1463 		}
1464 		if (!IS_ENABLED(CFG_WITH_ARM_TRUSTED_FW)) {
1465 #if defined(CFG_NS_ENTRY_ADDR)
1466 			boot_arg_nsec_entry = CFG_NS_ENTRY_ADDR;
1467 #else
1468 			boot_arg_nsec_entry = a4;
1469 #endif
1470 		}
1471 	}
1472 }
1473 
1474 #if defined(CFG_TRANSFER_LIST)
release_transfer_list(void)1475 static TEE_Result release_transfer_list(void)
1476 {
1477 	struct dt_descriptor *dt = get_external_dt_desc();
1478 
1479 	if (!mapped_tl)
1480 		return TEE_SUCCESS;
1481 
1482 	if (dt) {
1483 		int ret = 0;
1484 		struct transfer_list_entry *tl_e = NULL;
1485 
1486 		/*
1487 		 * Pack the DTB and update the transfer list before un-mapping
1488 		 */
1489 		ret = fdt_pack(dt->blob);
1490 		if (ret < 0) {
1491 			EMSG("Failed to pack Device Tree at 0x%" PRIxPA
1492 			     ": error %d", virt_to_phys(dt->blob), ret);
1493 			panic();
1494 		}
1495 
1496 		tl_e = transfer_list_find(mapped_tl, TL_TAG_FDT);
1497 		assert(dt->blob == transfer_list_entry_data(tl_e));
1498 		transfer_list_set_data_size(mapped_tl, tl_e,
1499 					    fdt_totalsize(dt->blob));
1500 		dt->blob = NULL;
1501 	}
1502 
1503 	transfer_list_unmap_sync(mapped_tl);
1504 	mapped_tl = NULL;
1505 
1506 	return TEE_SUCCESS;
1507 }
1508 
1509 boot_final(release_transfer_list);
1510 #endif
1511