xref: /optee_os/core/arch/arm/kernel/boot.c (revision a1d5c81f8834a9d2c6f4372cce2e59e70e709121)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2015-2020, Linaro Limited
4  */
5 
6 #include <arm.h>
7 #include <assert.h>
8 #include <compiler.h>
9 #include <config.h>
10 #include <console.h>
11 #include <crypto/crypto.h>
12 #include <initcall.h>
13 #include <inttypes.h>
14 #include <keep.h>
15 #include <kernel/asan.h>
16 #include <kernel/boot.h>
17 #include <kernel/linker.h>
18 #include <kernel/misc.h>
19 #include <kernel/panic.h>
20 #include <kernel/tee_misc.h>
21 #include <kernel/thread.h>
22 #include <kernel/tpm.h>
23 #include <libfdt.h>
24 #include <malloc.h>
25 #include <mm/core_memprot.h>
26 #include <mm/core_mmu.h>
27 #include <mm/fobj.h>
28 #include <mm/tee_mm.h>
29 #include <mm/tee_mmu.h>
30 #include <mm/tee_pager.h>
31 #include <sm/psci.h>
32 #include <stdio.h>
33 #include <trace.h>
34 #include <utee_defines.h>
35 #include <util.h>
36 
37 #include <platform_config.h>
38 
39 #if !defined(CFG_WITH_ARM_TRUSTED_FW)
40 #include <sm/sm.h>
41 #endif
42 
43 #if defined(CFG_WITH_VFP)
44 #include <kernel/vfp.h>
45 #endif
46 
47 /*
48  * In this file we're using unsigned long to represent physical pointers as
49  * they are received in a single register when OP-TEE is initially entered.
50  * This limits 32-bit systems to only use make use of the lower 32 bits
51  * of a physical address for initial parameters.
52  *
53  * 64-bit systems on the other hand can use full 64-bit physical pointers.
54  */
55 #define PADDR_INVALID		ULONG_MAX
56 
57 #if defined(CFG_BOOT_SECONDARY_REQUEST)
58 struct ns_entry_context {
59 	uintptr_t entry_point;
60 	uintptr_t context_id;
61 };
62 struct ns_entry_context ns_entry_contexts[CFG_TEE_CORE_NB_CORE];
63 static uint32_t spin_table[CFG_TEE_CORE_NB_CORE];
64 #endif
65 
66 #ifdef CFG_BOOT_SYNC_CPU
67 /*
68  * Array used when booting, to synchronize cpu.
69  * When 0, the cpu has not started.
70  * When 1, it has started
71  */
72 uint32_t sem_cpu_sync[CFG_TEE_CORE_NB_CORE];
73 DECLARE_KEEP_PAGER(sem_cpu_sync);
74 #endif
75 
76 #ifdef CFG_DT
77 struct dt_descriptor {
78 	void *blob;
79 	int frag_id;
80 };
81 
82 static struct dt_descriptor external_dt __nex_bss;
83 #endif
84 
85 #ifdef CFG_SECONDARY_INIT_CNTFRQ
86 static uint32_t cntfrq;
87 #endif
88 
89 /* May be overridden in plat-$(PLATFORM)/main.c */
90 __weak void plat_primary_init_early(void)
91 {
92 }
93 DECLARE_KEEP_PAGER(plat_primary_init_early);
94 
95 /* May be overridden in plat-$(PLATFORM)/main.c */
96 __weak void main_init_gic(void)
97 {
98 }
99 
100 /* May be overridden in plat-$(PLATFORM)/main.c */
101 __weak void main_secondary_init_gic(void)
102 {
103 }
104 
105 #if defined(CFG_WITH_ARM_TRUSTED_FW)
106 void init_sec_mon(unsigned long nsec_entry __maybe_unused)
107 {
108 	assert(nsec_entry == PADDR_INVALID);
109 	/* Do nothing as we don't have a secure monitor */
110 }
111 #else
112 /* May be overridden in plat-$(PLATFORM)/main.c */
113 __weak void init_sec_mon(unsigned long nsec_entry)
114 {
115 	struct sm_nsec_ctx *nsec_ctx;
116 
117 	assert(nsec_entry != PADDR_INVALID);
118 
119 	/* Initialize secure monitor */
120 	nsec_ctx = sm_get_nsec_ctx();
121 	nsec_ctx->mon_lr = nsec_entry;
122 	nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I;
123 	if (nsec_entry & 1)
124 		nsec_ctx->mon_spsr |= CPSR_T;
125 }
126 #endif
127 
128 #if defined(CFG_WITH_ARM_TRUSTED_FW)
129 static void init_vfp_nsec(void)
130 {
131 }
132 #else
133 static void init_vfp_nsec(void)
134 {
135 	/* Normal world can use CP10 and CP11 (SIMD/VFP) */
136 	write_nsacr(read_nsacr() | NSACR_CP10 | NSACR_CP11);
137 }
138 #endif
139 
140 #if defined(CFG_WITH_VFP)
141 
142 #ifdef ARM32
143 static void init_vfp_sec(void)
144 {
145 	uint32_t cpacr = read_cpacr();
146 
147 	/*
148 	 * Enable Advanced SIMD functionality.
149 	 * Enable use of D16-D31 of the Floating-point Extension register
150 	 * file.
151 	 */
152 	cpacr &= ~(CPACR_ASEDIS | CPACR_D32DIS);
153 	/*
154 	 * Enable usage of CP10 and CP11 (SIMD/VFP) (both kernel and user
155 	 * mode.
156 	 */
157 	cpacr |= CPACR_CP(10, CPACR_CP_ACCESS_FULL);
158 	cpacr |= CPACR_CP(11, CPACR_CP_ACCESS_FULL);
159 	write_cpacr(cpacr);
160 }
161 #endif /* ARM32 */
162 
163 #ifdef ARM64
164 static void init_vfp_sec(void)
165 {
166 	/* Not using VFP until thread_kernel_enable_vfp() */
167 	vfp_disable();
168 }
169 #endif /* ARM64 */
170 
171 #else /* CFG_WITH_VFP */
172 
173 static void init_vfp_sec(void)
174 {
175 	/* Not using VFP */
176 }
177 #endif
178 
179 #ifdef CFG_SECONDARY_INIT_CNTFRQ
180 static void primary_save_cntfrq(void)
181 {
182 	assert(cntfrq == 0);
183 
184 	/*
185 	 * CNTFRQ should be initialized on the primary CPU by a
186 	 * previous boot stage
187 	 */
188 	cntfrq = read_cntfrq();
189 }
190 
191 static void secondary_init_cntfrq(void)
192 {
193 	assert(cntfrq != 0);
194 	write_cntfrq(cntfrq);
195 }
196 #else /* CFG_SECONDARY_INIT_CNTFRQ */
197 static void primary_save_cntfrq(void)
198 {
199 }
200 
201 static void secondary_init_cntfrq(void)
202 {
203 }
204 #endif
205 
206 #ifdef CFG_CORE_SANITIZE_KADDRESS
207 static void init_run_constructors(void)
208 {
209 	const vaddr_t *ctor;
210 
211 	for (ctor = &__ctor_list; ctor < &__ctor_end; ctor++)
212 		((void (*)(void))(*ctor))();
213 }
214 
215 static void init_asan(void)
216 {
217 
218 	/*
219 	 * CFG_ASAN_SHADOW_OFFSET is also supplied as
220 	 * -fasan-shadow-offset=$(CFG_ASAN_SHADOW_OFFSET) to the compiler.
221 	 * Since all the needed values to calculate the value of
222 	 * CFG_ASAN_SHADOW_OFFSET isn't available in to make we need to
223 	 * calculate it in advance and hard code it into the platform
224 	 * conf.mk. Here where we have all the needed values we double
225 	 * check that the compiler is supplied the correct value.
226 	 */
227 
228 #define __ASAN_SHADOW_START \
229 	ROUNDUP(TEE_RAM_VA_START + (TEE_RAM_VA_SIZE * 8) / 9 - 8, 8)
230 	assert(__ASAN_SHADOW_START == (vaddr_t)&__asan_shadow_start);
231 #define __CFG_ASAN_SHADOW_OFFSET \
232 	(__ASAN_SHADOW_START - (TEE_RAM_VA_START / 8))
233 	COMPILE_TIME_ASSERT(CFG_ASAN_SHADOW_OFFSET == __CFG_ASAN_SHADOW_OFFSET);
234 #undef __ASAN_SHADOW_START
235 #undef __CFG_ASAN_SHADOW_OFFSET
236 
237 	/*
238 	 * Assign area covered by the shadow area, everything from start up
239 	 * to the beginning of the shadow area.
240 	 */
241 	asan_set_shadowed((void *)TEE_TEXT_VA_START, &__asan_shadow_start);
242 
243 	/*
244 	 * Add access to areas that aren't opened automatically by a
245 	 * constructor.
246 	 */
247 	asan_tag_access(&__ctor_list, &__ctor_end);
248 	asan_tag_access(__rodata_start, __rodata_end);
249 #ifdef CFG_WITH_PAGER
250 	asan_tag_access(__pageable_start, __pageable_end);
251 #endif /*CFG_WITH_PAGER*/
252 	asan_tag_access(__nozi_start, __nozi_end);
253 	asan_tag_access(__exidx_start, __exidx_end);
254 	asan_tag_access(__extab_start, __extab_end);
255 
256 	init_run_constructors();
257 
258 	/* Everything is tagged correctly, let's start address sanitizing. */
259 	asan_start();
260 }
261 #else /*CFG_CORE_SANITIZE_KADDRESS*/
262 static void init_asan(void)
263 {
264 }
265 #endif /*CFG_CORE_SANITIZE_KADDRESS*/
266 
267 #ifdef CFG_WITH_PAGER
268 
269 #ifdef CFG_CORE_SANITIZE_KADDRESS
270 static void carve_out_asan_mem(tee_mm_pool_t *pool)
271 {
272 	const size_t s = pool->hi - pool->lo;
273 	tee_mm_entry_t *mm;
274 	paddr_t apa = ASAN_MAP_PA;
275 	size_t asz = ASAN_MAP_SZ;
276 
277 	if (core_is_buffer_outside(apa, asz, pool->lo, s))
278 		return;
279 
280 	/* Reserve the shadow area */
281 	if (!core_is_buffer_inside(apa, asz, pool->lo, s)) {
282 		if (apa < pool->lo) {
283 			/*
284 			 * ASAN buffer is overlapping with the beginning of
285 			 * the pool.
286 			 */
287 			asz -= pool->lo - apa;
288 			apa = pool->lo;
289 		} else {
290 			/*
291 			 * ASAN buffer is overlapping with the end of the
292 			 * pool.
293 			 */
294 			asz = pool->hi - apa;
295 		}
296 	}
297 	mm = tee_mm_alloc2(pool, apa, asz);
298 	assert(mm);
299 }
300 #else
301 static void carve_out_asan_mem(tee_mm_pool_t *pool __unused)
302 {
303 }
304 #endif
305 
306 static void print_pager_pool_size(void)
307 {
308 	struct tee_pager_stats __maybe_unused stats;
309 
310 	tee_pager_get_stats(&stats);
311 	IMSG("Pager pool size: %zukB",
312 		stats.npages_all * SMALL_PAGE_SIZE / 1024);
313 }
314 
315 static void init_vcore(tee_mm_pool_t *mm_vcore)
316 {
317 	const vaddr_t begin = VCORE_START_VA;
318 	vaddr_t end = begin + TEE_RAM_VA_SIZE;
319 
320 #ifdef CFG_CORE_SANITIZE_KADDRESS
321 	/* Carve out asan memory, flat maped after core memory */
322 	if (end > ASAN_SHADOW_PA)
323 		end = ASAN_MAP_PA;
324 #endif
325 
326 	if (!tee_mm_init(mm_vcore, begin, end, SMALL_PAGE_SHIFT,
327 			 TEE_MM_POOL_NO_FLAGS))
328 		panic("tee_mm_vcore init failed");
329 }
330 
331 /*
332  * With CFG_CORE_ASLR=y the init part is relocated very early during boot.
333  * The init part is also paged just as the rest of the normal paged code, with
334  * the difference that it's preloaded during boot. When the backing store
335  * is configured the entire paged binary is copied in place and then also
336  * the init part. Since the init part has been relocated (references to
337  * addresses updated to compensate for the new load address) this has to be
338  * undone for the hashes of those pages to match with the original binary.
339  *
340  * If CFG_CORE_ASLR=n, nothing needs to be done as the code/ro pages are
341  * unchanged.
342  */
343 static void undo_init_relocation(uint8_t *paged_store __maybe_unused)
344 {
345 #ifdef CFG_CORE_ASLR
346 	unsigned long *ptr = NULL;
347 	const uint32_t *reloc = NULL;
348 	const uint32_t *reloc_end = NULL;
349 	unsigned long offs = boot_mmu_config.load_offset;
350 	const struct boot_embdata *embdata = (const void *)__init_end;
351 	vaddr_t addr_end = (vaddr_t)__init_end - offs - TEE_RAM_START;
352 	vaddr_t addr_start = (vaddr_t)__init_start - offs - TEE_RAM_START;
353 
354 	reloc = (const void *)((vaddr_t)embdata + embdata->reloc_offset);
355 	reloc_end = reloc + embdata->reloc_len / sizeof(*reloc);
356 
357 	for (; reloc < reloc_end; reloc++) {
358 		if (*reloc < addr_start)
359 			continue;
360 		if (*reloc >= addr_end)
361 			break;
362 		ptr = (void *)(paged_store + *reloc - addr_start);
363 		*ptr -= offs;
364 	}
365 #endif
366 }
367 
368 static struct fobj *ro_paged_alloc(tee_mm_entry_t *mm, void *hashes,
369 				   void *store)
370 {
371 	const unsigned int num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE;
372 #ifdef CFG_CORE_ASLR
373 	unsigned int reloc_offs = (vaddr_t)__pageable_start - VCORE_START_VA;
374 	const struct boot_embdata *embdata = (const void *)__init_end;
375 	const void *reloc = __init_end + embdata->reloc_offset;
376 
377 	return fobj_ro_reloc_paged_alloc(num_pages, hashes, reloc_offs,
378 					 reloc, embdata->reloc_len, store);
379 #else
380 	return fobj_ro_paged_alloc(num_pages, hashes, store);
381 #endif
382 }
383 
384 static void init_runtime(unsigned long pageable_part)
385 {
386 	size_t n;
387 	size_t init_size = (size_t)(__init_end - __init_start);
388 	size_t pageable_start = (size_t)__pageable_start;
389 	size_t pageable_end = (size_t)__pageable_end;
390 	size_t pageable_size = pageable_end - pageable_start;
391 	size_t tzsram_end = TZSRAM_BASE + TZSRAM_SIZE;
392 	size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) *
393 			   TEE_SHA256_HASH_SIZE;
394 	const struct boot_embdata *embdata = (const void *)__init_end;
395 	const void *tmp_hashes = NULL;
396 	tee_mm_entry_t *mm = NULL;
397 	struct fobj *fobj = NULL;
398 	uint8_t *paged_store = NULL;
399 	uint8_t *hashes = NULL;
400 
401 	assert(pageable_size % SMALL_PAGE_SIZE == 0);
402 	assert(embdata->total_len >= embdata->hashes_offset +
403 				     embdata->hashes_len);
404 	assert(hash_size == embdata->hashes_len);
405 
406 	tmp_hashes = __init_end + embdata->hashes_offset;
407 
408 	init_asan();
409 
410 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
411 	malloc_add_pool(__heap2_start, __heap2_end - __heap2_start);
412 
413 	/*
414 	 * This needs to be initialized early to support address lookup
415 	 * in MEM_AREA_TEE_RAM
416 	 */
417 	tee_pager_early_init();
418 
419 	hashes = malloc(hash_size);
420 	IMSG_RAW("\n");
421 	IMSG("Pager is enabled. Hashes: %zu bytes", hash_size);
422 	assert(hashes);
423 	asan_memcpy_unchecked(hashes, tmp_hashes, hash_size);
424 
425 	/*
426 	 * Need tee_mm_sec_ddr initialized to be able to allocate secure
427 	 * DDR below.
428 	 */
429 	teecore_init_ta_ram();
430 
431 	carve_out_asan_mem(&tee_mm_sec_ddr);
432 
433 	mm = tee_mm_alloc(&tee_mm_sec_ddr, pageable_size);
434 	assert(mm);
435 	paged_store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM);
436 	/*
437 	 * Load pageable part in the dedicated allocated area:
438 	 * - Move pageable non-init part into pageable area. Note bootloader
439 	 *   may have loaded it anywhere in TA RAM hence use memmove().
440 	 * - Copy pageable init part from current location into pageable area.
441 	 */
442 	memmove(paged_store + init_size,
443 		phys_to_virt(pageable_part,
444 			     core_mmu_get_type_by_pa(pageable_part)),
445 		__pageable_part_end - __pageable_part_start);
446 	asan_memcpy_unchecked(paged_store, __init_start, init_size);
447 	/*
448 	 * Undo eventual relocation for the init part so the hash checks
449 	 * can pass.
450 	 */
451 	undo_init_relocation(paged_store);
452 
453 	/* Check that hashes of what's in pageable area is OK */
454 	DMSG("Checking hashes of pageable area");
455 	for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) {
456 		const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE;
457 		const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE;
458 		TEE_Result res;
459 
460 		DMSG("hash pg_idx %zu hash %p page %p", n, hash, page);
461 		res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE);
462 		if (res != TEE_SUCCESS) {
463 			EMSG("Hash failed for page %zu at %p: res 0x%x",
464 			     n, (void *)page, res);
465 			panic();
466 		}
467 	}
468 
469 	/*
470 	 * Assert prepaged init sections are page aligned so that nothing
471 	 * trails uninited at the end of the premapped init area.
472 	 */
473 	assert(!(init_size & SMALL_PAGE_MASK));
474 
475 	/*
476 	 * Initialize the virtual memory pool used for main_mmu_l2_ttb which
477 	 * is supplied to tee_pager_init() below.
478 	 */
479 	init_vcore(&tee_mm_vcore);
480 
481 	/*
482 	 * Assign alias area for pager end of the small page block the rest
483 	 * of the binary is loaded into. We're taking more than needed, but
484 	 * we're guaranteed to not need more than the physical amount of
485 	 * TZSRAM.
486 	 */
487 	mm = tee_mm_alloc2(&tee_mm_vcore,
488 		(vaddr_t)tee_mm_vcore.hi - TZSRAM_SIZE, TZSRAM_SIZE);
489 	assert(mm);
490 	tee_pager_set_alias_area(mm);
491 
492 	/*
493 	 * Claim virtual memory which isn't paged.
494 	 * Linear memory (flat map core memory) ends there.
495 	 */
496 	mm = tee_mm_alloc2(&tee_mm_vcore, VCORE_UNPG_RX_PA,
497 			   (vaddr_t)(__pageable_start - VCORE_UNPG_RX_PA));
498 	assert(mm);
499 
500 	/*
501 	 * Allocate virtual memory for the pageable area and let the pager
502 	 * take charge of all the pages already assigned to that memory.
503 	 */
504 	mm = tee_mm_alloc2(&tee_mm_vcore, (vaddr_t)__pageable_start,
505 			   pageable_size);
506 	assert(mm);
507 	fobj = ro_paged_alloc(mm, hashes, paged_store);
508 	assert(fobj);
509 	tee_pager_add_core_area(tee_mm_get_smem(mm), PAGER_AREA_TYPE_RO, fobj);
510 	fobj_put(fobj);
511 
512 	tee_pager_add_pages(pageable_start, init_size / SMALL_PAGE_SIZE, false);
513 	tee_pager_add_pages(pageable_start + init_size,
514 			    (pageable_size - init_size) / SMALL_PAGE_SIZE,
515 			    true);
516 	if (pageable_end < tzsram_end)
517 		tee_pager_add_pages(pageable_end, (tzsram_end - pageable_end) /
518 						   SMALL_PAGE_SIZE, true);
519 
520 	/*
521 	 * There may be physical pages in TZSRAM before the core load address.
522 	 * These pages can be added to the physical pages pool of the pager.
523 	 * This setup may happen when a the secure bootloader runs in TZRAM
524 	 * and its memory can be reused by OP-TEE once boot stages complete.
525 	 */
526 	tee_pager_add_pages(tee_mm_vcore.lo,
527 			(VCORE_UNPG_RX_PA - tee_mm_vcore.lo) / SMALL_PAGE_SIZE,
528 			true);
529 
530 	print_pager_pool_size();
531 }
532 #else
533 
534 static void init_runtime(unsigned long pageable_part __unused)
535 {
536 	init_asan();
537 
538 	/*
539 	 * By default whole OP-TEE uses malloc, so we need to initialize
540 	 * it early. But, when virtualization is enabled, malloc is used
541 	 * only by TEE runtime, so malloc should be initialized later, for
542 	 * every virtual partition separately. Core code uses nex_malloc
543 	 * instead.
544 	 */
545 #ifdef CFG_VIRTUALIZATION
546 	nex_malloc_add_pool(__nex_heap_start, __nex_heap_end -
547 					      __nex_heap_start);
548 #else
549 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
550 #endif
551 
552 	IMSG_RAW("\n");
553 }
554 #endif
555 
556 void *get_dt(void)
557 {
558 	void *fdt = get_embedded_dt();
559 
560 	if (!fdt)
561 		fdt = get_external_dt();
562 
563 	return fdt;
564 }
565 
566 #if defined(CFG_EMBED_DTB)
567 void *get_embedded_dt(void)
568 {
569 	static bool checked;
570 
571 	assert(cpu_mmu_enabled());
572 
573 	if (!checked) {
574 		IMSG("Embedded DTB found");
575 
576 		if (fdt_check_header(embedded_secure_dtb))
577 			panic("Invalid embedded DTB");
578 
579 		checked = true;
580 	}
581 
582 	return embedded_secure_dtb;
583 }
584 #else
585 void *get_embedded_dt(void)
586 {
587 	return NULL;
588 }
589 #endif /*CFG_EMBED_DTB*/
590 
591 #if defined(CFG_DT)
592 void *get_external_dt(void)
593 {
594 	assert(cpu_mmu_enabled());
595 	return external_dt.blob;
596 }
597 
598 static TEE_Result release_external_dt(void)
599 {
600 	int ret = 0;
601 
602 	if (!external_dt.blob)
603 		return TEE_SUCCESS;
604 
605 	ret = fdt_pack(external_dt.blob);
606 	if (ret < 0) {
607 		EMSG("Failed to pack Device Tree at 0x%" PRIxPA ": error %d",
608 		     virt_to_phys(external_dt.blob), ret);
609 		panic();
610 	}
611 
612 	/* External DTB no more reached, reset pointer to invalid */
613 	external_dt.blob = NULL;
614 
615 	return TEE_SUCCESS;
616 }
617 boot_final(release_external_dt);
618 
619 #ifdef CFG_EXTERNAL_DTB_OVERLAY
620 static int add_dt_overlay_fragment(struct dt_descriptor *dt, int ioffs)
621 {
622 	char frag[32];
623 	int offs;
624 	int ret;
625 
626 	snprintf(frag, sizeof(frag), "fragment@%d", dt->frag_id);
627 	offs = fdt_add_subnode(dt->blob, ioffs, frag);
628 	if (offs < 0)
629 		return offs;
630 
631 	dt->frag_id += 1;
632 
633 	ret = fdt_setprop_string(dt->blob, offs, "target-path", "/");
634 	if (ret < 0)
635 		return -1;
636 
637 	return fdt_add_subnode(dt->blob, offs, "__overlay__");
638 }
639 
640 static int init_dt_overlay(struct dt_descriptor *dt, int __maybe_unused dt_size)
641 {
642 	int fragment;
643 	int ret;
644 
645 	ret = fdt_check_header(dt->blob);
646 	if (!ret) {
647 		fdt_for_each_subnode(fragment, dt->blob, 0)
648 			dt->frag_id += 1;
649 		return ret;
650 	}
651 
652 #ifdef CFG_DT_ADDR
653 	return fdt_create_empty_tree(dt->blob, dt_size);
654 #else
655 	return -1;
656 #endif
657 }
658 #else
659 static int add_dt_overlay_fragment(struct dt_descriptor *dt __unused, int offs)
660 {
661 	return offs;
662 }
663 
664 static int init_dt_overlay(struct dt_descriptor *dt __unused,
665 			   int dt_size __unused)
666 {
667 	return 0;
668 }
669 #endif /* CFG_EXTERNAL_DTB_OVERLAY */
670 
671 static int add_dt_path_subnode(struct dt_descriptor *dt, const char *path,
672 			       const char *subnode)
673 {
674 	int offs;
675 
676 	offs = fdt_path_offset(dt->blob, path);
677 	if (offs < 0)
678 		return -1;
679 	offs = add_dt_overlay_fragment(dt, offs);
680 	if (offs < 0)
681 		return -1;
682 	offs = fdt_add_subnode(dt->blob, offs, subnode);
683 	if (offs < 0)
684 		return -1;
685 	return offs;
686 }
687 
688 static int add_optee_dt_node(struct dt_descriptor *dt)
689 {
690 	int offs;
691 	int ret;
692 
693 	if (fdt_path_offset(dt->blob, "/firmware/optee") >= 0) {
694 		DMSG("OP-TEE Device Tree node already exists!");
695 		return 0;
696 	}
697 
698 	offs = fdt_path_offset(dt->blob, "/firmware");
699 	if (offs < 0) {
700 		offs = add_dt_path_subnode(dt, "/", "firmware");
701 		if (offs < 0)
702 			return -1;
703 	}
704 
705 	offs = fdt_add_subnode(dt->blob, offs, "optee");
706 	if (offs < 0)
707 		return -1;
708 
709 	ret = fdt_setprop_string(dt->blob, offs, "compatible",
710 				 "linaro,optee-tz");
711 	if (ret < 0)
712 		return -1;
713 	ret = fdt_setprop_string(dt->blob, offs, "method", "smc");
714 	if (ret < 0)
715 		return -1;
716 	return 0;
717 }
718 
719 #ifdef CFG_PSCI_ARM32
720 static int append_psci_compatible(void *fdt, int offs, const char *str)
721 {
722 	return fdt_appendprop(fdt, offs, "compatible", str, strlen(str) + 1);
723 }
724 
725 static int dt_add_psci_node(struct dt_descriptor *dt)
726 {
727 	int offs;
728 
729 	if (fdt_path_offset(dt->blob, "/psci") >= 0) {
730 		DMSG("PSCI Device Tree node already exists!");
731 		return 0;
732 	}
733 
734 	offs = add_dt_path_subnode(dt, "/", "psci");
735 	if (offs < 0)
736 		return -1;
737 	if (append_psci_compatible(dt->blob, offs, "arm,psci-1.0"))
738 		return -1;
739 	if (append_psci_compatible(dt->blob, offs, "arm,psci-0.2"))
740 		return -1;
741 	if (append_psci_compatible(dt->blob, offs, "arm,psci"))
742 		return -1;
743 	if (fdt_setprop_string(dt->blob, offs, "method", "smc"))
744 		return -1;
745 	if (fdt_setprop_u32(dt->blob, offs, "cpu_suspend", PSCI_CPU_SUSPEND))
746 		return -1;
747 	if (fdt_setprop_u32(dt->blob, offs, "cpu_off", PSCI_CPU_OFF))
748 		return -1;
749 	if (fdt_setprop_u32(dt->blob, offs, "cpu_on", PSCI_CPU_ON))
750 		return -1;
751 	if (fdt_setprop_u32(dt->blob, offs, "sys_poweroff", PSCI_SYSTEM_OFF))
752 		return -1;
753 	if (fdt_setprop_u32(dt->blob, offs, "sys_reset", PSCI_SYSTEM_RESET))
754 		return -1;
755 	return 0;
756 }
757 
758 static int check_node_compat_prefix(struct dt_descriptor *dt, int offs,
759 				    const char *prefix)
760 {
761 	const size_t prefix_len = strlen(prefix);
762 	size_t l;
763 	int plen;
764 	const char *prop;
765 
766 	prop = fdt_getprop(dt->blob, offs, "compatible", &plen);
767 	if (!prop)
768 		return -1;
769 
770 	while (plen > 0) {
771 		if (memcmp(prop, prefix, prefix_len) == 0)
772 			return 0; /* match */
773 
774 		l = strlen(prop) + 1;
775 		prop += l;
776 		plen -= l;
777 	}
778 
779 	return -1;
780 }
781 
782 static int dt_add_psci_cpu_enable_methods(struct dt_descriptor *dt)
783 {
784 	int offs = 0;
785 
786 	while (1) {
787 		offs = fdt_next_node(dt->blob, offs, NULL);
788 		if (offs < 0)
789 			break;
790 		if (fdt_getprop(dt->blob, offs, "enable-method", NULL))
791 			continue; /* already set */
792 		if (check_node_compat_prefix(dt, offs, "arm,cortex-a"))
793 			continue; /* no compatible */
794 		if (fdt_setprop_string(dt->blob, offs, "enable-method", "psci"))
795 			return -1;
796 		/* Need to restart scanning as offsets may have changed */
797 		offs = 0;
798 	}
799 	return 0;
800 }
801 
802 static int config_psci(struct dt_descriptor *dt)
803 {
804 	if (dt_add_psci_node(dt))
805 		return -1;
806 	return dt_add_psci_cpu_enable_methods(dt);
807 }
808 #else
809 static int config_psci(struct dt_descriptor *dt __unused)
810 {
811 	return 0;
812 }
813 #endif /*CFG_PSCI_ARM32*/
814 
815 static void set_dt_val(void *data, uint32_t cell_size, uint64_t val)
816 {
817 	if (cell_size == 1) {
818 		fdt32_t v = cpu_to_fdt32((uint32_t)val);
819 
820 		memcpy(data, &v, sizeof(v));
821 	} else {
822 		fdt64_t v = cpu_to_fdt64(val);
823 
824 		memcpy(data, &v, sizeof(v));
825 	}
826 }
827 
828 static int add_res_mem_dt_node(struct dt_descriptor *dt, const char *name,
829 			       paddr_t pa, size_t size)
830 {
831 	int offs = 0;
832 	int ret = 0;
833 	int addr_size = -1;
834 	int len_size = -1;
835 	bool found = true;
836 	char subnode_name[80] = { 0 };
837 
838 	offs = fdt_path_offset(dt->blob, "/reserved-memory");
839 
840 	if (offs < 0) {
841 		found = false;
842 		offs = 0;
843 	}
844 
845 	if (IS_ENABLED(CFG_EXTERNAL_DTB_OVERLAY)) {
846 		len_size = sizeof(paddr_t) / sizeof(uint32_t);
847 		addr_size = sizeof(paddr_t) / sizeof(uint32_t);
848 	} else {
849 		len_size = fdt_size_cells(dt->blob, offs);
850 		if (len_size < 0)
851 			return -1;
852 		addr_size = fdt_address_cells(dt->blob, offs);
853 		if (addr_size < 0)
854 			return -1;
855 	}
856 
857 	if (!found) {
858 		offs = add_dt_path_subnode(dt, "/", "reserved-memory");
859 		if (offs < 0)
860 			return -1;
861 		ret = fdt_setprop_cell(dt->blob, offs, "#address-cells",
862 				       addr_size);
863 		if (ret < 0)
864 			return -1;
865 		ret = fdt_setprop_cell(dt->blob, offs, "#size-cells", len_size);
866 		if (ret < 0)
867 			return -1;
868 		ret = fdt_setprop(dt->blob, offs, "ranges", NULL, 0);
869 		if (ret < 0)
870 			return -1;
871 	}
872 
873 	snprintf(subnode_name, sizeof(subnode_name),
874 		 "%s@0x%" PRIxPA, name, pa);
875 	offs = fdt_add_subnode(dt->blob, offs, subnode_name);
876 	if (offs >= 0) {
877 		uint32_t data[FDT_MAX_NCELLS * 2];
878 
879 		set_dt_val(data, addr_size, pa);
880 		set_dt_val(data + addr_size, len_size, size);
881 		ret = fdt_setprop(dt->blob, offs, "reg", data,
882 				  sizeof(uint32_t) * (addr_size + len_size));
883 		if (ret < 0)
884 			return -1;
885 		ret = fdt_setprop(dt->blob, offs, "no-map", NULL, 0);
886 		if (ret < 0)
887 			return -1;
888 	} else {
889 		return -1;
890 	}
891 	return 0;
892 }
893 
894 #ifdef CFG_CORE_DYN_SHM
895 static uint64_t get_dt_val_and_advance(const void *data, size_t *offs,
896 				       uint32_t cell_size)
897 {
898 	uint64_t rv = 0;
899 
900 	if (cell_size == 1) {
901 		uint32_t v;
902 
903 		memcpy(&v, (const uint8_t *)data + *offs, sizeof(v));
904 		*offs += sizeof(v);
905 		rv = fdt32_to_cpu(v);
906 	} else {
907 		uint64_t v;
908 
909 		memcpy(&v, (const uint8_t *)data + *offs, sizeof(v));
910 		*offs += sizeof(v);
911 		rv = fdt64_to_cpu(v);
912 	}
913 
914 	return rv;
915 }
916 
917 static struct core_mmu_phys_mem *get_memory(void *fdt, size_t *nelems)
918 {
919 	int offs = 0;
920 	int addr_size = 0;
921 	int len_size = 0;
922 	size_t prop_len = 0;
923 	const uint8_t *prop = NULL;
924 	size_t prop_offs = 0;
925 	size_t n = 0;
926 	struct core_mmu_phys_mem *mem = NULL;
927 
928 	offs = fdt_subnode_offset(fdt, 0, "memory");
929 	if (offs < 0)
930 		return NULL;
931 
932 	prop = fdt_getprop(fdt, offs, "reg", &addr_size);
933 	if (!prop)
934 		return NULL;
935 
936 	prop_len = addr_size;
937 	addr_size = fdt_address_cells(fdt, 0);
938 	if (addr_size < 0)
939 		return NULL;
940 
941 	len_size = fdt_size_cells(fdt, 0);
942 	if (len_size < 0)
943 		return NULL;
944 
945 	for (n = 0, prop_offs = 0; prop_offs < prop_len; n++) {
946 		get_dt_val_and_advance(prop, &prop_offs, addr_size);
947 		if (prop_offs >= prop_len) {
948 			n--;
949 			break;
950 		}
951 		get_dt_val_and_advance(prop, &prop_offs, len_size);
952 	}
953 
954 	if (!n)
955 		return NULL;
956 
957 	*nelems = n;
958 	mem = nex_calloc(n, sizeof(*mem));
959 	if (!mem)
960 		panic();
961 
962 	for (n = 0, prop_offs = 0; n < *nelems; n++) {
963 		mem[n].type = MEM_AREA_RAM_NSEC;
964 		mem[n].addr = get_dt_val_and_advance(prop, &prop_offs,
965 						     addr_size);
966 		mem[n].size = get_dt_val_and_advance(prop, &prop_offs,
967 						     len_size);
968 	}
969 
970 	return mem;
971 }
972 #endif /*CFG_CORE_DYN_SHM*/
973 
974 #ifdef CFG_CORE_RESERVED_SHM
975 static int mark_static_shm_as_reserved(struct dt_descriptor *dt)
976 {
977 	vaddr_t shm_start;
978 	vaddr_t shm_end;
979 
980 	core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &shm_start, &shm_end);
981 	if (shm_start != shm_end)
982 		return add_res_mem_dt_node(dt, "optee_shm",
983 					   virt_to_phys((void *)shm_start),
984 					   shm_end - shm_start);
985 
986 	DMSG("No SHM configured");
987 	return -1;
988 }
989 #endif /*CFG_CORE_RESERVED_SHM*/
990 
991 static void init_external_dt(unsigned long phys_dt)
992 {
993 	struct dt_descriptor *dt = &external_dt;
994 	void *fdt;
995 	int ret;
996 
997 	if (!phys_dt) {
998 		/*
999 		 * No need to panic as we're not using the DT in OP-TEE
1000 		 * yet, we're only adding some nodes for normal world use.
1001 		 * This makes the switch to using DT easier as we can boot
1002 		 * a newer OP-TEE with older boot loaders. Once we start to
1003 		 * initialize devices based on DT we'll likely panic
1004 		 * instead of returning here.
1005 		 */
1006 		IMSG("No non-secure external DT");
1007 		return;
1008 	}
1009 
1010 	if (!core_mmu_add_mapping(MEM_AREA_EXT_DT, phys_dt, CFG_DTB_MAX_SIZE))
1011 		panic("Failed to map external DTB");
1012 
1013 	fdt = phys_to_virt(phys_dt, MEM_AREA_EXT_DT);
1014 	if (!fdt)
1015 		panic();
1016 
1017 	dt->blob = fdt;
1018 
1019 	ret = init_dt_overlay(dt, CFG_DTB_MAX_SIZE);
1020 	if (ret < 0) {
1021 		EMSG("Device Tree Overlay init fail @ %#lx: error %d", phys_dt,
1022 		     ret);
1023 		panic();
1024 	}
1025 
1026 	ret = fdt_open_into(fdt, fdt, CFG_DTB_MAX_SIZE);
1027 	if (ret < 0) {
1028 		EMSG("Invalid Device Tree at %#lx: error %d", phys_dt, ret);
1029 		panic();
1030 	}
1031 
1032 	IMSG("Non-secure external DT found");
1033 }
1034 
1035 static int mark_tzdram_as_reserved(struct dt_descriptor *dt)
1036 {
1037 	return add_res_mem_dt_node(dt, "optee_core", CFG_TZDRAM_START,
1038 				   CFG_TZDRAM_SIZE);
1039 }
1040 
1041 static void update_external_dt(void)
1042 {
1043 	struct dt_descriptor *dt = &external_dt;
1044 
1045 	if (!dt->blob)
1046 		return;
1047 
1048 	if (add_optee_dt_node(dt))
1049 		panic("Failed to add OP-TEE Device Tree node");
1050 
1051 	if (config_psci(dt))
1052 		panic("Failed to config PSCI");
1053 
1054 #ifdef CFG_CORE_RESERVED_SHM
1055 	if (mark_static_shm_as_reserved(dt))
1056 		panic("Failed to config non-secure memory");
1057 #endif
1058 
1059 	if (mark_tzdram_as_reserved(dt))
1060 		panic("Failed to config secure memory");
1061 }
1062 #else /*CFG_DT*/
1063 void *get_external_dt(void)
1064 {
1065 	return NULL;
1066 }
1067 
1068 static void init_external_dt(unsigned long phys_dt __unused)
1069 {
1070 }
1071 
1072 static void update_external_dt(void)
1073 {
1074 }
1075 
1076 #ifdef CFG_CORE_DYN_SHM
1077 static struct core_mmu_phys_mem *get_memory(void *fdt __unused,
1078 					    size_t *nelems __unused)
1079 {
1080 	return NULL;
1081 }
1082 #endif /*CFG_CORE_DYN_SHM*/
1083 #endif /*!CFG_DT*/
1084 
1085 #ifdef CFG_CORE_DYN_SHM
1086 static void discover_nsec_memory(void)
1087 {
1088 	struct core_mmu_phys_mem *mem;
1089 	size_t nelems;
1090 	void *fdt = get_external_dt();
1091 
1092 	if (fdt) {
1093 		mem = get_memory(fdt, &nelems);
1094 		if (mem) {
1095 			core_mmu_set_discovered_nsec_ddr(mem, nelems);
1096 			return;
1097 		}
1098 
1099 		DMSG("No non-secure memory found in FDT");
1100 	}
1101 
1102 	nelems = phys_ddr_overall_end - phys_ddr_overall_begin;
1103 	if (!nelems)
1104 		return;
1105 
1106 	/* Platform cannot define nsec_ddr && overall_ddr */
1107 	assert(phys_nsec_ddr_begin == phys_nsec_ddr_end);
1108 
1109 	mem = nex_calloc(nelems, sizeof(*mem));
1110 	if (!mem)
1111 		panic();
1112 
1113 	memcpy(mem, phys_ddr_overall_begin, sizeof(*mem) * nelems);
1114 	core_mmu_set_discovered_nsec_ddr(mem, nelems);
1115 }
1116 #else /*CFG_CORE_DYN_SHM*/
1117 static void discover_nsec_memory(void)
1118 {
1119 }
1120 #endif /*!CFG_CORE_DYN_SHM*/
1121 
1122 void init_tee_runtime(void)
1123 {
1124 #ifdef CFG_VIRTUALIZATION
1125 	/* We need to initialize pool for every virtual guest partition */
1126 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
1127 #endif
1128 
1129 #ifndef CFG_WITH_PAGER
1130 	/* Pager initializes TA RAM early */
1131 	teecore_init_ta_ram();
1132 #endif
1133 	call_initcalls();
1134 }
1135 
1136 static void init_primary(unsigned long pageable_part, unsigned long nsec_entry)
1137 {
1138 	/*
1139 	 * Mask asynchronous exceptions before switch to the thread vector
1140 	 * as the thread handler requires those to be masked while
1141 	 * executing with the temporary stack. The thread subsystem also
1142 	 * asserts that the foreign interrupts are blocked when using most of
1143 	 * its functions.
1144 	 */
1145 	thread_set_exceptions(THREAD_EXCP_ALL);
1146 	primary_save_cntfrq();
1147 	init_vfp_sec();
1148 	/*
1149 	 * Pager: init_runtime() calls thread_kernel_enable_vfp() so we must
1150 	 * set a current thread right now to avoid a chicken-and-egg problem
1151 	 * (thread_init_boot_thread() sets the current thread but needs
1152 	 * things set by init_runtime()).
1153 	 */
1154 	thread_get_core_local()->curr_thread = 0;
1155 	init_runtime(pageable_part);
1156 
1157 	if (IS_ENABLED(CFG_VIRTUALIZATION)) {
1158 		/*
1159 		 * Virtualization: We can't initialize threads right now because
1160 		 * threads belong to "tee" part and will be initialized
1161 		 * separately per each new virtual guest. So, we'll clear
1162 		 * "curr_thread" and call it done.
1163 		 */
1164 		thread_get_core_local()->curr_thread = -1;
1165 	} else {
1166 		thread_init_boot_thread();
1167 	}
1168 	thread_init_primary();
1169 	thread_init_per_cpu();
1170 	init_sec_mon(nsec_entry);
1171 }
1172 
1173 /*
1174  * Note: this function is weak just to make it possible to exclude it from
1175  * the unpaged area.
1176  */
1177 void __weak paged_init_primary(unsigned long fdt)
1178 {
1179 	init_external_dt(fdt);
1180 	tpm_map_log_area(get_external_dt());
1181 	discover_nsec_memory();
1182 	update_external_dt();
1183 	configure_console_from_dt();
1184 
1185 	IMSG("OP-TEE version: %s", core_v_str);
1186 	IMSG("Primary CPU initializing");
1187 #ifdef CFG_CORE_ASLR
1188 	DMSG("Executing at offset %#lx with virtual load address %#"PRIxVA,
1189 	     (unsigned long)boot_mmu_config.load_offset, VCORE_START_VA);
1190 #endif
1191 
1192 	main_init_gic();
1193 	init_vfp_nsec();
1194 #ifndef CFG_VIRTUALIZATION
1195 	init_tee_runtime();
1196 #endif
1197 #ifdef CFG_VIRTUALIZATION
1198 	IMSG("Initializing virtualization support");
1199 	core_mmu_init_virtualization();
1200 #endif
1201 	call_finalcalls();
1202 	IMSG("Primary CPU switching to normal world boot");
1203 }
1204 
1205 static void init_secondary_helper(unsigned long nsec_entry)
1206 {
1207 	IMSG("Secondary CPU %zu initializing", get_core_pos());
1208 
1209 	/*
1210 	 * Mask asynchronous exceptions before switch to the thread vector
1211 	 * as the thread handler requires those to be masked while
1212 	 * executing with the temporary stack. The thread subsystem also
1213 	 * asserts that the foreign interrupts are blocked when using most of
1214 	 * its functions.
1215 	 */
1216 	thread_set_exceptions(THREAD_EXCP_ALL);
1217 
1218 	secondary_init_cntfrq();
1219 	thread_init_per_cpu();
1220 	init_sec_mon(nsec_entry);
1221 	main_secondary_init_gic();
1222 	init_vfp_sec();
1223 	init_vfp_nsec();
1224 
1225 	IMSG("Secondary CPU %zu switching to normal world boot", get_core_pos());
1226 }
1227 
1228 /*
1229  * Note: this function is weak just to make it possible to exclude it from
1230  * the unpaged area so that it lies in the init area.
1231  */
1232 void __weak boot_init_primary(unsigned long pageable_part,
1233 			      unsigned long nsec_entry __maybe_unused,
1234 			      unsigned long fdt)
1235 {
1236 	unsigned long e = PADDR_INVALID;
1237 
1238 #if !defined(CFG_WITH_ARM_TRUSTED_FW)
1239 	e = nsec_entry;
1240 #endif
1241 
1242 	init_primary(pageable_part, e);
1243 	paged_init_primary(fdt);
1244 }
1245 
1246 #if defined(CFG_WITH_ARM_TRUSTED_FW)
1247 unsigned long boot_cpu_on_handler(unsigned long a0 __maybe_unused,
1248 				  unsigned long a1 __unused)
1249 {
1250 	init_secondary_helper(PADDR_INVALID);
1251 	return 0;
1252 }
1253 #else
1254 void boot_init_secondary(unsigned long nsec_entry)
1255 {
1256 	init_secondary_helper(nsec_entry);
1257 }
1258 #endif
1259 
1260 #if defined(CFG_BOOT_SECONDARY_REQUEST)
1261 void boot_set_core_ns_entry(size_t core_idx, uintptr_t entry,
1262 			    uintptr_t context_id)
1263 {
1264 	ns_entry_contexts[core_idx].entry_point = entry;
1265 	ns_entry_contexts[core_idx].context_id = context_id;
1266 	dsb_ishst();
1267 }
1268 
1269 int boot_core_release(size_t core_idx, paddr_t entry)
1270 {
1271 	if (!core_idx || core_idx >= CFG_TEE_CORE_NB_CORE)
1272 		return -1;
1273 
1274 	ns_entry_contexts[core_idx].entry_point = entry;
1275 	dmb();
1276 	spin_table[core_idx] = 1;
1277 	dsb();
1278 	sev();
1279 
1280 	return 0;
1281 }
1282 
1283 /*
1284  * spin until secondary boot request, then returns with
1285  * the secondary core entry address.
1286  */
1287 struct ns_entry_context *boot_core_hpen(void)
1288 {
1289 #ifdef CFG_PSCI_ARM32
1290 	return &ns_entry_contexts[get_core_pos()];
1291 #else
1292 	do {
1293 		wfe();
1294 	} while (!spin_table[get_core_pos()]);
1295 	dmb();
1296 	return &ns_entry_contexts[get_core_pos()];
1297 #endif
1298 }
1299 #endif
1300 
1301 #if defined(CFG_CORE_ASLR)
1302 #if defined(CFG_DT)
1303 unsigned long __weak get_aslr_seed(void *fdt)
1304 {
1305 	int rc = fdt_check_header(fdt);
1306 	const uint64_t *seed = NULL;
1307 	int offs = 0;
1308 	int len = 0;
1309 
1310 	if (rc) {
1311 		DMSG("Bad fdt: %d", rc);
1312 		return 0;
1313 	}
1314 
1315 	offs =  fdt_path_offset(fdt, "/secure-chosen");
1316 	if (offs < 0) {
1317 		DMSG("Cannot find /secure-chosen");
1318 		return 0;
1319 	}
1320 	seed = fdt_getprop(fdt, offs, "kaslr-seed", &len);
1321 	if (!seed || len != sizeof(*seed)) {
1322 		DMSG("Cannot find valid kaslr-seed");
1323 		return 0;
1324 	}
1325 
1326 	return fdt64_to_cpu(*seed);
1327 }
1328 #else /*!CFG_DT*/
1329 unsigned long __weak get_aslr_seed(void *fdt __unused)
1330 {
1331 	DMSG("Warning: no ASLR seed");
1332 	return 0;
1333 }
1334 #endif /*!CFG_DT*/
1335 #endif /*CFG_CORE_ASLR*/
1336