xref: /optee_os/core/arch/arm/kernel/boot.c (revision 279bfce83bac403aa516516574af9ca403d31290)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2015-2021, Linaro Limited
4  */
5 
6 #include <arm.h>
7 #include <assert.h>
8 #include <compiler.h>
9 #include <config.h>
10 #include <console.h>
11 #include <crypto/crypto.h>
12 #include <initcall.h>
13 #include <inttypes.h>
14 #include <keep.h>
15 #include <kernel/asan.h>
16 #include <kernel/boot.h>
17 #include <kernel/linker.h>
18 #include <kernel/misc.h>
19 #include <kernel/panic.h>
20 #include <kernel/tee_misc.h>
21 #include <kernel/thread.h>
22 #include <kernel/tpm.h>
23 #include <libfdt.h>
24 #include <malloc.h>
25 #include <mm/core_memprot.h>
26 #include <mm/core_mmu.h>
27 #include <mm/fobj.h>
28 #include <mm/tee_mm.h>
29 #include <mm/tee_pager.h>
30 #include <sm/psci.h>
31 #include <stdio.h>
32 #include <trace.h>
33 #include <utee_defines.h>
34 #include <util.h>
35 
36 #include <platform_config.h>
37 
38 #if !defined(CFG_WITH_ARM_TRUSTED_FW)
39 #include <sm/sm.h>
40 #endif
41 
42 #if defined(CFG_WITH_VFP)
43 #include <kernel/vfp.h>
44 #endif
45 
46 /*
47  * In this file we're using unsigned long to represent physical pointers as
48  * they are received in a single register when OP-TEE is initially entered.
49  * This limits 32-bit systems to only use make use of the lower 32 bits
50  * of a physical address for initial parameters.
51  *
52  * 64-bit systems on the other hand can use full 64-bit physical pointers.
53  */
54 #define PADDR_INVALID		ULONG_MAX
55 
56 #if defined(CFG_BOOT_SECONDARY_REQUEST)
57 struct ns_entry_context {
58 	uintptr_t entry_point;
59 	uintptr_t context_id;
60 };
61 struct ns_entry_context ns_entry_contexts[CFG_TEE_CORE_NB_CORE];
62 static uint32_t spin_table[CFG_TEE_CORE_NB_CORE];
63 #endif
64 
65 #ifdef CFG_BOOT_SYNC_CPU
66 /*
67  * Array used when booting, to synchronize cpu.
68  * When 0, the cpu has not started.
69  * When 1, it has started
70  */
71 uint32_t sem_cpu_sync[CFG_TEE_CORE_NB_CORE];
72 DECLARE_KEEP_PAGER(sem_cpu_sync);
73 #endif
74 
75 #ifdef CFG_DT
76 struct dt_descriptor {
77 	void *blob;
78 #ifdef _CFG_USE_DTB_OVERLAY
79 	int frag_id;
80 #endif
81 };
82 
83 static struct dt_descriptor external_dt __nex_bss;
84 #endif
85 
86 #ifdef CFG_SECONDARY_INIT_CNTFRQ
87 static uint32_t cntfrq;
88 #endif
89 
90 /* May be overridden in plat-$(PLATFORM)/main.c */
91 __weak void plat_primary_init_early(void)
92 {
93 }
94 DECLARE_KEEP_PAGER(plat_primary_init_early);
95 
96 /* May be overridden in plat-$(PLATFORM)/main.c */
97 __weak void main_init_gic(void)
98 {
99 }
100 
101 /* May be overridden in plat-$(PLATFORM)/main.c */
102 __weak void main_secondary_init_gic(void)
103 {
104 }
105 
106 /* May be overridden in plat-$(PLATFORM)/main.c */
107 __weak unsigned long plat_get_aslr_seed(void)
108 {
109 	DMSG("Warning: no ASLR seed");
110 
111 	return 0;
112 }
113 
114 #if defined(CFG_WITH_ARM_TRUSTED_FW)
115 void init_sec_mon(unsigned long nsec_entry __maybe_unused)
116 {
117 	assert(nsec_entry == PADDR_INVALID);
118 	/* Do nothing as we don't have a secure monitor */
119 }
120 #else
121 /* May be overridden in plat-$(PLATFORM)/main.c */
122 __weak void init_sec_mon(unsigned long nsec_entry)
123 {
124 	struct sm_nsec_ctx *nsec_ctx;
125 
126 	assert(nsec_entry != PADDR_INVALID);
127 
128 	/* Initialize secure monitor */
129 	nsec_ctx = sm_get_nsec_ctx();
130 	nsec_ctx->mon_lr = nsec_entry;
131 	nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I;
132 	if (nsec_entry & 1)
133 		nsec_ctx->mon_spsr |= CPSR_T;
134 }
135 #endif
136 
137 #if defined(CFG_WITH_ARM_TRUSTED_FW)
138 static void init_vfp_nsec(void)
139 {
140 }
141 #else
142 static void init_vfp_nsec(void)
143 {
144 	/* Normal world can use CP10 and CP11 (SIMD/VFP) */
145 	write_nsacr(read_nsacr() | NSACR_CP10 | NSACR_CP11);
146 }
147 #endif
148 
149 #if defined(CFG_WITH_VFP)
150 
151 #ifdef ARM32
152 static void init_vfp_sec(void)
153 {
154 	uint32_t cpacr = read_cpacr();
155 
156 	/*
157 	 * Enable Advanced SIMD functionality.
158 	 * Enable use of D16-D31 of the Floating-point Extension register
159 	 * file.
160 	 */
161 	cpacr &= ~(CPACR_ASEDIS | CPACR_D32DIS);
162 	/*
163 	 * Enable usage of CP10 and CP11 (SIMD/VFP) (both kernel and user
164 	 * mode.
165 	 */
166 	cpacr |= CPACR_CP(10, CPACR_CP_ACCESS_FULL);
167 	cpacr |= CPACR_CP(11, CPACR_CP_ACCESS_FULL);
168 	write_cpacr(cpacr);
169 }
170 #endif /* ARM32 */
171 
172 #ifdef ARM64
173 static void init_vfp_sec(void)
174 {
175 	/* Not using VFP until thread_kernel_enable_vfp() */
176 	vfp_disable();
177 }
178 #endif /* ARM64 */
179 
180 #else /* CFG_WITH_VFP */
181 
182 static void init_vfp_sec(void)
183 {
184 	/* Not using VFP */
185 }
186 #endif
187 
188 #ifdef CFG_SECONDARY_INIT_CNTFRQ
189 static void primary_save_cntfrq(void)
190 {
191 	assert(cntfrq == 0);
192 
193 	/*
194 	 * CNTFRQ should be initialized on the primary CPU by a
195 	 * previous boot stage
196 	 */
197 	cntfrq = read_cntfrq();
198 }
199 
200 static void secondary_init_cntfrq(void)
201 {
202 	assert(cntfrq != 0);
203 	write_cntfrq(cntfrq);
204 }
205 #else /* CFG_SECONDARY_INIT_CNTFRQ */
206 static void primary_save_cntfrq(void)
207 {
208 }
209 
210 static void secondary_init_cntfrq(void)
211 {
212 }
213 #endif
214 
215 #ifdef CFG_CORE_SANITIZE_KADDRESS
216 static void init_run_constructors(void)
217 {
218 	const vaddr_t *ctor;
219 
220 	for (ctor = &__ctor_list; ctor < &__ctor_end; ctor++)
221 		((void (*)(void))(*ctor))();
222 }
223 
224 static void init_asan(void)
225 {
226 
227 	/*
228 	 * CFG_ASAN_SHADOW_OFFSET is also supplied as
229 	 * -fasan-shadow-offset=$(CFG_ASAN_SHADOW_OFFSET) to the compiler.
230 	 * Since all the needed values to calculate the value of
231 	 * CFG_ASAN_SHADOW_OFFSET isn't available in to make we need to
232 	 * calculate it in advance and hard code it into the platform
233 	 * conf.mk. Here where we have all the needed values we double
234 	 * check that the compiler is supplied the correct value.
235 	 */
236 
237 #define __ASAN_SHADOW_START \
238 	ROUNDUP(TEE_RAM_VA_START + (TEE_RAM_VA_SIZE * 8) / 9 - 8, 8)
239 	assert(__ASAN_SHADOW_START == (vaddr_t)&__asan_shadow_start);
240 #define __CFG_ASAN_SHADOW_OFFSET \
241 	(__ASAN_SHADOW_START - (TEE_RAM_VA_START / 8))
242 	COMPILE_TIME_ASSERT(CFG_ASAN_SHADOW_OFFSET == __CFG_ASAN_SHADOW_OFFSET);
243 #undef __ASAN_SHADOW_START
244 #undef __CFG_ASAN_SHADOW_OFFSET
245 
246 	/*
247 	 * Assign area covered by the shadow area, everything from start up
248 	 * to the beginning of the shadow area.
249 	 */
250 	asan_set_shadowed((void *)TEE_TEXT_VA_START, &__asan_shadow_start);
251 
252 	/*
253 	 * Add access to areas that aren't opened automatically by a
254 	 * constructor.
255 	 */
256 	asan_tag_access(&__ctor_list, &__ctor_end);
257 	asan_tag_access(__rodata_start, __rodata_end);
258 #ifdef CFG_WITH_PAGER
259 	asan_tag_access(__pageable_start, __pageable_end);
260 #endif /*CFG_WITH_PAGER*/
261 	asan_tag_access(__nozi_start, __nozi_end);
262 	asan_tag_access(__exidx_start, __exidx_end);
263 	asan_tag_access(__extab_start, __extab_end);
264 
265 	init_run_constructors();
266 
267 	/* Everything is tagged correctly, let's start address sanitizing. */
268 	asan_start();
269 }
270 #else /*CFG_CORE_SANITIZE_KADDRESS*/
271 static void init_asan(void)
272 {
273 }
274 #endif /*CFG_CORE_SANITIZE_KADDRESS*/
275 
276 #ifdef CFG_WITH_PAGER
277 
278 #ifdef CFG_CORE_SANITIZE_KADDRESS
279 static void carve_out_asan_mem(tee_mm_pool_t *pool)
280 {
281 	const size_t s = pool->hi - pool->lo;
282 	tee_mm_entry_t *mm;
283 	paddr_t apa = ASAN_MAP_PA;
284 	size_t asz = ASAN_MAP_SZ;
285 
286 	if (core_is_buffer_outside(apa, asz, pool->lo, s))
287 		return;
288 
289 	/* Reserve the shadow area */
290 	if (!core_is_buffer_inside(apa, asz, pool->lo, s)) {
291 		if (apa < pool->lo) {
292 			/*
293 			 * ASAN buffer is overlapping with the beginning of
294 			 * the pool.
295 			 */
296 			asz -= pool->lo - apa;
297 			apa = pool->lo;
298 		} else {
299 			/*
300 			 * ASAN buffer is overlapping with the end of the
301 			 * pool.
302 			 */
303 			asz = pool->hi - apa;
304 		}
305 	}
306 	mm = tee_mm_alloc2(pool, apa, asz);
307 	assert(mm);
308 }
309 #else
310 static void carve_out_asan_mem(tee_mm_pool_t *pool __unused)
311 {
312 }
313 #endif
314 
315 static void print_pager_pool_size(void)
316 {
317 	struct tee_pager_stats __maybe_unused stats;
318 
319 	tee_pager_get_stats(&stats);
320 	IMSG("Pager pool size: %zukB",
321 		stats.npages_all * SMALL_PAGE_SIZE / 1024);
322 }
323 
324 static void init_vcore(tee_mm_pool_t *mm_vcore)
325 {
326 	const vaddr_t begin = VCORE_START_VA;
327 	size_t size = TEE_RAM_VA_SIZE;
328 
329 #ifdef CFG_CORE_SANITIZE_KADDRESS
330 	/* Carve out asan memory, flat maped after core memory */
331 	if (begin + size > ASAN_SHADOW_PA)
332 		size = ASAN_MAP_PA - begin;
333 #endif
334 
335 	if (!tee_mm_init(mm_vcore, begin, size, SMALL_PAGE_SHIFT,
336 			 TEE_MM_POOL_NO_FLAGS))
337 		panic("tee_mm_vcore init failed");
338 }
339 
340 /*
341  * With CFG_CORE_ASLR=y the init part is relocated very early during boot.
342  * The init part is also paged just as the rest of the normal paged code, with
343  * the difference that it's preloaded during boot. When the backing store
344  * is configured the entire paged binary is copied in place and then also
345  * the init part. Since the init part has been relocated (references to
346  * addresses updated to compensate for the new load address) this has to be
347  * undone for the hashes of those pages to match with the original binary.
348  *
349  * If CFG_CORE_ASLR=n, nothing needs to be done as the code/ro pages are
350  * unchanged.
351  */
352 static void undo_init_relocation(uint8_t *paged_store __maybe_unused)
353 {
354 #ifdef CFG_CORE_ASLR
355 	unsigned long *ptr = NULL;
356 	const uint32_t *reloc = NULL;
357 	const uint32_t *reloc_end = NULL;
358 	unsigned long offs = boot_mmu_config.load_offset;
359 	const struct boot_embdata *embdata = (const void *)__init_end;
360 	vaddr_t addr_end = (vaddr_t)__init_end - offs - TEE_RAM_START;
361 	vaddr_t addr_start = (vaddr_t)__init_start - offs - TEE_RAM_START;
362 
363 	reloc = (const void *)((vaddr_t)embdata + embdata->reloc_offset);
364 	reloc_end = reloc + embdata->reloc_len / sizeof(*reloc);
365 
366 	for (; reloc < reloc_end; reloc++) {
367 		if (*reloc < addr_start)
368 			continue;
369 		if (*reloc >= addr_end)
370 			break;
371 		ptr = (void *)(paged_store + *reloc - addr_start);
372 		*ptr -= offs;
373 	}
374 #endif
375 }
376 
377 static struct fobj *ro_paged_alloc(tee_mm_entry_t *mm, void *hashes,
378 				   void *store)
379 {
380 	const unsigned int num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE;
381 #ifdef CFG_CORE_ASLR
382 	unsigned int reloc_offs = (vaddr_t)__pageable_start - VCORE_START_VA;
383 	const struct boot_embdata *embdata = (const void *)__init_end;
384 	const void *reloc = __init_end + embdata->reloc_offset;
385 
386 	return fobj_ro_reloc_paged_alloc(num_pages, hashes, reloc_offs,
387 					 reloc, embdata->reloc_len, store);
388 #else
389 	return fobj_ro_paged_alloc(num_pages, hashes, store);
390 #endif
391 }
392 
393 static void init_runtime(unsigned long pageable_part)
394 {
395 	size_t n;
396 	size_t init_size = (size_t)(__init_end - __init_start);
397 	size_t pageable_start = (size_t)__pageable_start;
398 	size_t pageable_end = (size_t)__pageable_end;
399 	size_t pageable_size = pageable_end - pageable_start;
400 	size_t tzsram_end = TZSRAM_BASE + TZSRAM_SIZE;
401 	size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) *
402 			   TEE_SHA256_HASH_SIZE;
403 	const struct boot_embdata *embdata = (const void *)__init_end;
404 	const void *tmp_hashes = NULL;
405 	tee_mm_entry_t *mm = NULL;
406 	struct fobj *fobj = NULL;
407 	uint8_t *paged_store = NULL;
408 	uint8_t *hashes = NULL;
409 
410 	assert(pageable_size % SMALL_PAGE_SIZE == 0);
411 	assert(embdata->total_len >= embdata->hashes_offset +
412 				     embdata->hashes_len);
413 	assert(hash_size == embdata->hashes_len);
414 
415 	tmp_hashes = __init_end + embdata->hashes_offset;
416 
417 	init_asan();
418 
419 	/* Add heap2 first as heap1 may be too small as initial bget pool */
420 	malloc_add_pool(__heap2_start, __heap2_end - __heap2_start);
421 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
422 
423 	/*
424 	 * This needs to be initialized early to support address lookup
425 	 * in MEM_AREA_TEE_RAM
426 	 */
427 	tee_pager_early_init();
428 
429 	hashes = malloc(hash_size);
430 	IMSG_RAW("\n");
431 	IMSG("Pager is enabled. Hashes: %zu bytes", hash_size);
432 	assert(hashes);
433 	asan_memcpy_unchecked(hashes, tmp_hashes, hash_size);
434 
435 	/*
436 	 * Need tee_mm_sec_ddr initialized to be able to allocate secure
437 	 * DDR below.
438 	 */
439 	core_mmu_init_ta_ram();
440 
441 	carve_out_asan_mem(&tee_mm_sec_ddr);
442 
443 	mm = tee_mm_alloc(&tee_mm_sec_ddr, pageable_size);
444 	assert(mm);
445 	paged_store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM,
446 				   pageable_size);
447 	/*
448 	 * Load pageable part in the dedicated allocated area:
449 	 * - Move pageable non-init part into pageable area. Note bootloader
450 	 *   may have loaded it anywhere in TA RAM hence use memmove().
451 	 * - Copy pageable init part from current location into pageable area.
452 	 */
453 	memmove(paged_store + init_size,
454 		phys_to_virt(pageable_part,
455 			     core_mmu_get_type_by_pa(pageable_part),
456 			     __pageable_part_end - __pageable_part_start),
457 		__pageable_part_end - __pageable_part_start);
458 	asan_memcpy_unchecked(paged_store, __init_start, init_size);
459 	/*
460 	 * Undo eventual relocation for the init part so the hash checks
461 	 * can pass.
462 	 */
463 	undo_init_relocation(paged_store);
464 
465 	/* Check that hashes of what's in pageable area is OK */
466 	DMSG("Checking hashes of pageable area");
467 	for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) {
468 		const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE;
469 		const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE;
470 		TEE_Result res;
471 
472 		DMSG("hash pg_idx %zu hash %p page %p", n, hash, page);
473 		res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE);
474 		if (res != TEE_SUCCESS) {
475 			EMSG("Hash failed for page %zu at %p: res 0x%x",
476 			     n, (void *)page, res);
477 			panic();
478 		}
479 	}
480 
481 	/*
482 	 * Assert prepaged init sections are page aligned so that nothing
483 	 * trails uninited at the end of the premapped init area.
484 	 */
485 	assert(!(init_size & SMALL_PAGE_MASK));
486 
487 	/*
488 	 * Initialize the virtual memory pool used for main_mmu_l2_ttb which
489 	 * is supplied to tee_pager_init() below.
490 	 */
491 	init_vcore(&tee_mm_vcore);
492 
493 	/*
494 	 * Assign alias area for pager end of the small page block the rest
495 	 * of the binary is loaded into. We're taking more than needed, but
496 	 * we're guaranteed to not need more than the physical amount of
497 	 * TZSRAM.
498 	 */
499 	mm = tee_mm_alloc2(&tee_mm_vcore,
500 			   (vaddr_t)tee_mm_vcore.lo +
501 			   tee_mm_vcore.size - TZSRAM_SIZE,
502 			   TZSRAM_SIZE);
503 	assert(mm);
504 	tee_pager_set_alias_area(mm);
505 
506 	/*
507 	 * Claim virtual memory which isn't paged.
508 	 * Linear memory (flat map core memory) ends there.
509 	 */
510 	mm = tee_mm_alloc2(&tee_mm_vcore, VCORE_UNPG_RX_PA,
511 			   (vaddr_t)(__pageable_start - VCORE_UNPG_RX_PA));
512 	assert(mm);
513 
514 	/*
515 	 * Allocate virtual memory for the pageable area and let the pager
516 	 * take charge of all the pages already assigned to that memory.
517 	 */
518 	mm = tee_mm_alloc2(&tee_mm_vcore, (vaddr_t)__pageable_start,
519 			   pageable_size);
520 	assert(mm);
521 	fobj = ro_paged_alloc(mm, hashes, paged_store);
522 	assert(fobj);
523 	tee_pager_add_core_region(tee_mm_get_smem(mm), PAGED_REGION_TYPE_RO,
524 				  fobj);
525 	fobj_put(fobj);
526 
527 	tee_pager_add_pages(pageable_start, init_size / SMALL_PAGE_SIZE, false);
528 	tee_pager_add_pages(pageable_start + init_size,
529 			    (pageable_size - init_size) / SMALL_PAGE_SIZE,
530 			    true);
531 	if (pageable_end < tzsram_end)
532 		tee_pager_add_pages(pageable_end, (tzsram_end - pageable_end) /
533 						   SMALL_PAGE_SIZE, true);
534 
535 	/*
536 	 * There may be physical pages in TZSRAM before the core load address.
537 	 * These pages can be added to the physical pages pool of the pager.
538 	 * This setup may happen when a the secure bootloader runs in TZRAM
539 	 * and its memory can be reused by OP-TEE once boot stages complete.
540 	 */
541 	tee_pager_add_pages(tee_mm_vcore.lo,
542 			(VCORE_UNPG_RX_PA - tee_mm_vcore.lo) / SMALL_PAGE_SIZE,
543 			true);
544 
545 	print_pager_pool_size();
546 }
547 #else
548 
549 static void init_runtime(unsigned long pageable_part __unused)
550 {
551 	init_asan();
552 
553 	/*
554 	 * By default whole OP-TEE uses malloc, so we need to initialize
555 	 * it early. But, when virtualization is enabled, malloc is used
556 	 * only by TEE runtime, so malloc should be initialized later, for
557 	 * every virtual partition separately. Core code uses nex_malloc
558 	 * instead.
559 	 */
560 #ifdef CFG_VIRTUALIZATION
561 	nex_malloc_add_pool(__nex_heap_start, __nex_heap_end -
562 					      __nex_heap_start);
563 #else
564 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
565 #endif
566 
567 	IMSG_RAW("\n");
568 }
569 #endif
570 
571 void *get_dt(void)
572 {
573 	void *fdt = get_embedded_dt();
574 
575 	if (!fdt)
576 		fdt = get_external_dt();
577 
578 	return fdt;
579 }
580 
581 #if defined(CFG_EMBED_DTB)
582 void *get_embedded_dt(void)
583 {
584 	static bool checked;
585 
586 	assert(cpu_mmu_enabled());
587 
588 	if (!checked) {
589 		IMSG("Embedded DTB found");
590 
591 		if (fdt_check_header(embedded_secure_dtb))
592 			panic("Invalid embedded DTB");
593 
594 		checked = true;
595 	}
596 
597 	return embedded_secure_dtb;
598 }
599 #else
600 void *get_embedded_dt(void)
601 {
602 	return NULL;
603 }
604 #endif /*CFG_EMBED_DTB*/
605 
606 #if defined(CFG_DT)
607 void *get_external_dt(void)
608 {
609 	assert(cpu_mmu_enabled());
610 	return external_dt.blob;
611 }
612 
613 static TEE_Result release_external_dt(void)
614 {
615 	int ret = 0;
616 
617 	if (!external_dt.blob)
618 		return TEE_SUCCESS;
619 
620 	ret = fdt_pack(external_dt.blob);
621 	if (ret < 0) {
622 		EMSG("Failed to pack Device Tree at 0x%" PRIxPA ": error %d",
623 		     virt_to_phys(external_dt.blob), ret);
624 		panic();
625 	}
626 
627 	if (core_mmu_remove_mapping(MEM_AREA_EXT_DT, external_dt.blob,
628 				    CFG_DTB_MAX_SIZE))
629 		panic("Failed to remove temporary Device Tree mapping");
630 
631 	/* External DTB no more reached, reset pointer to invalid */
632 	external_dt.blob = NULL;
633 
634 	return TEE_SUCCESS;
635 }
636 boot_final(release_external_dt);
637 
638 #ifdef _CFG_USE_DTB_OVERLAY
639 static int add_dt_overlay_fragment(struct dt_descriptor *dt, int ioffs)
640 {
641 	char frag[32];
642 	int offs;
643 	int ret;
644 
645 	snprintf(frag, sizeof(frag), "fragment@%d", dt->frag_id);
646 	offs = fdt_add_subnode(dt->blob, ioffs, frag);
647 	if (offs < 0)
648 		return offs;
649 
650 	dt->frag_id += 1;
651 
652 	ret = fdt_setprop_string(dt->blob, offs, "target-path", "/");
653 	if (ret < 0)
654 		return -1;
655 
656 	return fdt_add_subnode(dt->blob, offs, "__overlay__");
657 }
658 
659 static int init_dt_overlay(struct dt_descriptor *dt, int __maybe_unused dt_size)
660 {
661 	int fragment;
662 
663 	if (IS_ENABLED(CFG_EXTERNAL_DTB_OVERLAY)) {
664 		if (!fdt_check_header(dt->blob)) {
665 			fdt_for_each_subnode(fragment, dt->blob, 0)
666 				dt->frag_id += 1;
667 			return 0;
668 		}
669 	}
670 
671 	return fdt_create_empty_tree(dt->blob, dt_size);
672 }
673 #else
674 static int add_dt_overlay_fragment(struct dt_descriptor *dt __unused, int offs)
675 {
676 	return offs;
677 }
678 
679 static int init_dt_overlay(struct dt_descriptor *dt __unused,
680 			   int dt_size __unused)
681 {
682 	return 0;
683 }
684 #endif /* _CFG_USE_DTB_OVERLAY */
685 
686 static int add_dt_path_subnode(struct dt_descriptor *dt, const char *path,
687 			       const char *subnode)
688 {
689 	int offs;
690 
691 	offs = fdt_path_offset(dt->blob, path);
692 	if (offs < 0)
693 		return -1;
694 	offs = add_dt_overlay_fragment(dt, offs);
695 	if (offs < 0)
696 		return -1;
697 	offs = fdt_add_subnode(dt->blob, offs, subnode);
698 	if (offs < 0)
699 		return -1;
700 	return offs;
701 }
702 
703 static int add_optee_dt_node(struct dt_descriptor *dt)
704 {
705 	int offs;
706 	int ret;
707 
708 	if (fdt_path_offset(dt->blob, "/firmware/optee") >= 0) {
709 		DMSG("OP-TEE Device Tree node already exists!");
710 		return 0;
711 	}
712 
713 	offs = fdt_path_offset(dt->blob, "/firmware");
714 	if (offs < 0) {
715 		offs = add_dt_path_subnode(dt, "/", "firmware");
716 		if (offs < 0)
717 			return -1;
718 	}
719 
720 	offs = fdt_add_subnode(dt->blob, offs, "optee");
721 	if (offs < 0)
722 		return -1;
723 
724 	ret = fdt_setprop_string(dt->blob, offs, "compatible",
725 				 "linaro,optee-tz");
726 	if (ret < 0)
727 		return -1;
728 	ret = fdt_setprop_string(dt->blob, offs, "method", "smc");
729 	if (ret < 0)
730 		return -1;
731 	return 0;
732 }
733 
734 #ifdef CFG_PSCI_ARM32
735 static int append_psci_compatible(void *fdt, int offs, const char *str)
736 {
737 	return fdt_appendprop(fdt, offs, "compatible", str, strlen(str) + 1);
738 }
739 
740 static int dt_add_psci_node(struct dt_descriptor *dt)
741 {
742 	int offs;
743 
744 	if (fdt_path_offset(dt->blob, "/psci") >= 0) {
745 		DMSG("PSCI Device Tree node already exists!");
746 		return 0;
747 	}
748 
749 	offs = add_dt_path_subnode(dt, "/", "psci");
750 	if (offs < 0)
751 		return -1;
752 	if (append_psci_compatible(dt->blob, offs, "arm,psci-1.0"))
753 		return -1;
754 	if (append_psci_compatible(dt->blob, offs, "arm,psci-0.2"))
755 		return -1;
756 	if (append_psci_compatible(dt->blob, offs, "arm,psci"))
757 		return -1;
758 	if (fdt_setprop_string(dt->blob, offs, "method", "smc"))
759 		return -1;
760 	if (fdt_setprop_u32(dt->blob, offs, "cpu_suspend", PSCI_CPU_SUSPEND))
761 		return -1;
762 	if (fdt_setprop_u32(dt->blob, offs, "cpu_off", PSCI_CPU_OFF))
763 		return -1;
764 	if (fdt_setprop_u32(dt->blob, offs, "cpu_on", PSCI_CPU_ON))
765 		return -1;
766 	if (fdt_setprop_u32(dt->blob, offs, "sys_poweroff", PSCI_SYSTEM_OFF))
767 		return -1;
768 	if (fdt_setprop_u32(dt->blob, offs, "sys_reset", PSCI_SYSTEM_RESET))
769 		return -1;
770 	return 0;
771 }
772 
773 static int check_node_compat_prefix(struct dt_descriptor *dt, int offs,
774 				    const char *prefix)
775 {
776 	const size_t prefix_len = strlen(prefix);
777 	size_t l;
778 	int plen;
779 	const char *prop;
780 
781 	prop = fdt_getprop(dt->blob, offs, "compatible", &plen);
782 	if (!prop)
783 		return -1;
784 
785 	while (plen > 0) {
786 		if (memcmp(prop, prefix, prefix_len) == 0)
787 			return 0; /* match */
788 
789 		l = strlen(prop) + 1;
790 		prop += l;
791 		plen -= l;
792 	}
793 
794 	return -1;
795 }
796 
797 static int dt_add_psci_cpu_enable_methods(struct dt_descriptor *dt)
798 {
799 	int offs = 0;
800 
801 	while (1) {
802 		offs = fdt_next_node(dt->blob, offs, NULL);
803 		if (offs < 0)
804 			break;
805 		if (fdt_getprop(dt->blob, offs, "enable-method", NULL))
806 			continue; /* already set */
807 		if (check_node_compat_prefix(dt, offs, "arm,cortex-a"))
808 			continue; /* no compatible */
809 		if (fdt_setprop_string(dt->blob, offs, "enable-method", "psci"))
810 			return -1;
811 		/* Need to restart scanning as offsets may have changed */
812 		offs = 0;
813 	}
814 	return 0;
815 }
816 
817 static int config_psci(struct dt_descriptor *dt)
818 {
819 	if (dt_add_psci_node(dt))
820 		return -1;
821 	return dt_add_psci_cpu_enable_methods(dt);
822 }
823 #else
824 static int config_psci(struct dt_descriptor *dt __unused)
825 {
826 	return 0;
827 }
828 #endif /*CFG_PSCI_ARM32*/
829 
830 static void set_dt_val(void *data, uint32_t cell_size, uint64_t val)
831 {
832 	if (cell_size == 1) {
833 		fdt32_t v = cpu_to_fdt32((uint32_t)val);
834 
835 		memcpy(data, &v, sizeof(v));
836 	} else {
837 		fdt64_t v = cpu_to_fdt64(val);
838 
839 		memcpy(data, &v, sizeof(v));
840 	}
841 }
842 
843 static int add_res_mem_dt_node(struct dt_descriptor *dt, const char *name,
844 			       paddr_t pa, size_t size)
845 {
846 	int offs = 0;
847 	int ret = 0;
848 	int addr_size = -1;
849 	int len_size = -1;
850 	bool found = true;
851 	char subnode_name[80] = { 0 };
852 
853 	offs = fdt_path_offset(dt->blob, "/reserved-memory");
854 
855 	if (offs < 0) {
856 		found = false;
857 		offs = 0;
858 	}
859 
860 	if (IS_ENABLED(_CFG_USE_DTB_OVERLAY)) {
861 		len_size = sizeof(paddr_t) / sizeof(uint32_t);
862 		addr_size = sizeof(paddr_t) / sizeof(uint32_t);
863 	} else {
864 		len_size = fdt_size_cells(dt->blob, offs);
865 		if (len_size < 0)
866 			return -1;
867 		addr_size = fdt_address_cells(dt->blob, offs);
868 		if (addr_size < 0)
869 			return -1;
870 	}
871 
872 	if (!found) {
873 		offs = add_dt_path_subnode(dt, "/", "reserved-memory");
874 		if (offs < 0)
875 			return -1;
876 		ret = fdt_setprop_cell(dt->blob, offs, "#address-cells",
877 				       addr_size);
878 		if (ret < 0)
879 			return -1;
880 		ret = fdt_setprop_cell(dt->blob, offs, "#size-cells", len_size);
881 		if (ret < 0)
882 			return -1;
883 		ret = fdt_setprop(dt->blob, offs, "ranges", NULL, 0);
884 		if (ret < 0)
885 			return -1;
886 	}
887 
888 	ret = snprintf(subnode_name, sizeof(subnode_name),
889 		       "%s@%" PRIxPA, name, pa);
890 	if (ret < 0 || ret >= (int)sizeof(subnode_name))
891 		DMSG("truncated node \"%s@%" PRIxPA"\"", name, pa);
892 	offs = fdt_add_subnode(dt->blob, offs, subnode_name);
893 	if (offs >= 0) {
894 		uint32_t data[FDT_MAX_NCELLS * 2];
895 
896 		set_dt_val(data, addr_size, pa);
897 		set_dt_val(data + addr_size, len_size, size);
898 		ret = fdt_setprop(dt->blob, offs, "reg", data,
899 				  sizeof(uint32_t) * (addr_size + len_size));
900 		if (ret < 0)
901 			return -1;
902 		ret = fdt_setprop(dt->blob, offs, "no-map", NULL, 0);
903 		if (ret < 0)
904 			return -1;
905 	} else {
906 		return -1;
907 	}
908 	return 0;
909 }
910 
911 #ifdef CFG_CORE_DYN_SHM
912 static uint64_t get_dt_val_and_advance(const void *data, size_t *offs,
913 				       uint32_t cell_size)
914 {
915 	uint64_t rv = 0;
916 
917 	if (cell_size == 1) {
918 		uint32_t v;
919 
920 		memcpy(&v, (const uint8_t *)data + *offs, sizeof(v));
921 		*offs += sizeof(v);
922 		rv = fdt32_to_cpu(v);
923 	} else {
924 		uint64_t v;
925 
926 		memcpy(&v, (const uint8_t *)data + *offs, sizeof(v));
927 		*offs += sizeof(v);
928 		rv = fdt64_to_cpu(v);
929 	}
930 
931 	return rv;
932 }
933 
934 /*
935  * Find all non-secure memory from DT. Memory marked inaccessible by Secure
936  * World is ignored since it could not be mapped to be used as dynamic shared
937  * memory.
938  */
939 static int get_nsec_memory_helper(void *fdt, struct core_mmu_phys_mem *mem)
940 {
941 	const uint8_t *prop = NULL;
942 	uint64_t a = 0;
943 	uint64_t l = 0;
944 	size_t prop_offs = 0;
945 	size_t prop_len = 0;
946 	int elems_total = 0;
947 	int addr_size = 0;
948 	int len_size = 0;
949 	int offs = 0;
950 	size_t n = 0;
951 	int len = 0;
952 
953 	addr_size = fdt_address_cells(fdt, 0);
954 	if (addr_size < 0)
955 		return 0;
956 
957 	len_size = fdt_size_cells(fdt, 0);
958 	if (len_size < 0)
959 		return 0;
960 
961 	while (true) {
962 		offs = fdt_node_offset_by_prop_value(fdt, offs, "device_type",
963 						     "memory",
964 						     sizeof("memory"));
965 		if (offs < 0)
966 			break;
967 
968 		if (_fdt_get_status(fdt, offs) != (DT_STATUS_OK_NSEC |
969 						   DT_STATUS_OK_SEC))
970 			continue;
971 
972 		prop = fdt_getprop(fdt, offs, "reg", &len);
973 		if (!prop)
974 			continue;
975 
976 		prop_len = len;
977 		for (n = 0, prop_offs = 0; prop_offs < prop_len; n++) {
978 			a = get_dt_val_and_advance(prop, &prop_offs, addr_size);
979 			if (prop_offs >= prop_len) {
980 				n--;
981 				break;
982 			}
983 
984 			l = get_dt_val_and_advance(prop, &prop_offs, len_size);
985 			if (mem) {
986 				mem->type = MEM_AREA_DDR_OVERALL;
987 				mem->addr = a;
988 				mem->size = l;
989 				mem++;
990 			}
991 		}
992 
993 		elems_total += n;
994 	}
995 
996 	return elems_total;
997 }
998 
999 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt, size_t *nelems)
1000 {
1001 	struct core_mmu_phys_mem *mem = NULL;
1002 	int elems_total = 0;
1003 
1004 	elems_total = get_nsec_memory_helper(fdt, NULL);
1005 	if (elems_total <= 0)
1006 		return NULL;
1007 
1008 	mem = nex_calloc(elems_total, sizeof(*mem));
1009 	if (!mem)
1010 		panic();
1011 
1012 	elems_total = get_nsec_memory_helper(fdt, mem);
1013 	assert(elems_total > 0);
1014 
1015 	*nelems = elems_total;
1016 
1017 	return mem;
1018 }
1019 #endif /*CFG_CORE_DYN_SHM*/
1020 
1021 #ifdef CFG_CORE_RESERVED_SHM
1022 static int mark_static_shm_as_reserved(struct dt_descriptor *dt)
1023 {
1024 	vaddr_t shm_start;
1025 	vaddr_t shm_end;
1026 
1027 	core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &shm_start, &shm_end);
1028 	if (shm_start != shm_end)
1029 		return add_res_mem_dt_node(dt, "optee_shm",
1030 					   virt_to_phys((void *)shm_start),
1031 					   shm_end - shm_start);
1032 
1033 	DMSG("No SHM configured");
1034 	return -1;
1035 }
1036 #endif /*CFG_CORE_RESERVED_SHM*/
1037 
1038 static void init_external_dt(unsigned long phys_dt)
1039 {
1040 	struct dt_descriptor *dt = &external_dt;
1041 	void *fdt;
1042 	int ret;
1043 
1044 	if (!phys_dt) {
1045 		/*
1046 		 * No need to panic as we're not using the DT in OP-TEE
1047 		 * yet, we're only adding some nodes for normal world use.
1048 		 * This makes the switch to using DT easier as we can boot
1049 		 * a newer OP-TEE with older boot loaders. Once we start to
1050 		 * initialize devices based on DT we'll likely panic
1051 		 * instead of returning here.
1052 		 */
1053 		IMSG("No non-secure external DT");
1054 		return;
1055 	}
1056 
1057 	fdt = core_mmu_add_mapping(MEM_AREA_EXT_DT, phys_dt, CFG_DTB_MAX_SIZE);
1058 	if (!fdt)
1059 		panic("Failed to map external DTB");
1060 
1061 	dt->blob = fdt;
1062 
1063 	ret = init_dt_overlay(dt, CFG_DTB_MAX_SIZE);
1064 	if (ret < 0) {
1065 		EMSG("Device Tree Overlay init fail @ %#lx: error %d", phys_dt,
1066 		     ret);
1067 		panic();
1068 	}
1069 
1070 	ret = fdt_open_into(fdt, fdt, CFG_DTB_MAX_SIZE);
1071 	if (ret < 0) {
1072 		EMSG("Invalid Device Tree at %#lx: error %d", phys_dt, ret);
1073 		panic();
1074 	}
1075 
1076 	IMSG("Non-secure external DT found");
1077 }
1078 
1079 static int mark_tzdram_as_reserved(struct dt_descriptor *dt)
1080 {
1081 	return add_res_mem_dt_node(dt, "optee_core", CFG_TZDRAM_START,
1082 				   CFG_TZDRAM_SIZE);
1083 }
1084 
1085 static void update_external_dt(void)
1086 {
1087 	struct dt_descriptor *dt = &external_dt;
1088 
1089 	if (!dt->blob)
1090 		return;
1091 
1092 	if (!IS_ENABLED(CFG_CORE_FFA) && add_optee_dt_node(dt))
1093 		panic("Failed to add OP-TEE Device Tree node");
1094 
1095 	if (config_psci(dt))
1096 		panic("Failed to config PSCI");
1097 
1098 #ifdef CFG_CORE_RESERVED_SHM
1099 	if (mark_static_shm_as_reserved(dt))
1100 		panic("Failed to config non-secure memory");
1101 #endif
1102 
1103 	if (mark_tzdram_as_reserved(dt))
1104 		panic("Failed to config secure memory");
1105 }
1106 #else /*CFG_DT*/
1107 void *get_external_dt(void)
1108 {
1109 	return NULL;
1110 }
1111 
1112 static void init_external_dt(unsigned long phys_dt __unused)
1113 {
1114 }
1115 
1116 static void update_external_dt(void)
1117 {
1118 }
1119 
1120 #ifdef CFG_CORE_DYN_SHM
1121 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt __unused,
1122 						 size_t *nelems __unused)
1123 {
1124 	return NULL;
1125 }
1126 #endif /*CFG_CORE_DYN_SHM*/
1127 #endif /*!CFG_DT*/
1128 
1129 #ifdef CFG_CORE_DYN_SHM
1130 static void discover_nsec_memory(void)
1131 {
1132 	struct core_mmu_phys_mem *mem;
1133 	const struct core_mmu_phys_mem *mem_begin = NULL;
1134 	const struct core_mmu_phys_mem *mem_end = NULL;
1135 	size_t nelems;
1136 	void *fdt = get_external_dt();
1137 
1138 	if (fdt) {
1139 		mem = get_nsec_memory(fdt, &nelems);
1140 		if (mem) {
1141 			core_mmu_set_discovered_nsec_ddr(mem, nelems);
1142 			return;
1143 		}
1144 
1145 		DMSG("No non-secure memory found in FDT");
1146 	}
1147 
1148 	mem_begin = phys_ddr_overall_begin;
1149 	mem_end = phys_ddr_overall_end;
1150 	nelems = mem_end - mem_begin;
1151 	if (nelems) {
1152 		/*
1153 		 * Platform cannot use both register_ddr() and the now
1154 		 * deprecated register_dynamic_shm().
1155 		 */
1156 		assert(phys_ddr_overall_compat_begin ==
1157 		       phys_ddr_overall_compat_end);
1158 	} else {
1159 		mem_begin = phys_ddr_overall_compat_begin;
1160 		mem_end = phys_ddr_overall_compat_end;
1161 		nelems = mem_end - mem_begin;
1162 		if (!nelems)
1163 			return;
1164 		DMSG("Warning register_dynamic_shm() is deprecated, please use register_ddr() instead");
1165 	}
1166 
1167 	mem = nex_calloc(nelems, sizeof(*mem));
1168 	if (!mem)
1169 		panic();
1170 
1171 	memcpy(mem, phys_ddr_overall_begin, sizeof(*mem) * nelems);
1172 	core_mmu_set_discovered_nsec_ddr(mem, nelems);
1173 }
1174 #else /*CFG_CORE_DYN_SHM*/
1175 static void discover_nsec_memory(void)
1176 {
1177 }
1178 #endif /*!CFG_CORE_DYN_SHM*/
1179 
1180 #ifdef CFG_VIRTUALIZATION
1181 static TEE_Result virt_init_heap(void)
1182 {
1183 	/* We need to initialize pool for every virtual guest partition */
1184 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
1185 
1186 	return TEE_SUCCESS;
1187 }
1188 preinit_early(virt_init_heap);
1189 #endif
1190 
1191 void init_tee_runtime(void)
1192 {
1193 #ifndef CFG_WITH_PAGER
1194 	/* Pager initializes TA RAM early */
1195 	core_mmu_init_ta_ram();
1196 #endif
1197 	/*
1198 	 * With virtualization we call this function when creating the
1199 	 * OP-TEE partition instead.
1200 	 */
1201 	if (!IS_ENABLED(CFG_VIRTUALIZATION))
1202 		call_preinitcalls();
1203 	call_initcalls();
1204 }
1205 
1206 static void init_primary(unsigned long pageable_part, unsigned long nsec_entry)
1207 {
1208 	/*
1209 	 * Mask asynchronous exceptions before switch to the thread vector
1210 	 * as the thread handler requires those to be masked while
1211 	 * executing with the temporary stack. The thread subsystem also
1212 	 * asserts that the foreign interrupts are blocked when using most of
1213 	 * its functions.
1214 	 */
1215 	thread_set_exceptions(THREAD_EXCP_ALL);
1216 	primary_save_cntfrq();
1217 	init_vfp_sec();
1218 	/*
1219 	 * Pager: init_runtime() calls thread_kernel_enable_vfp() so we must
1220 	 * set a current thread right now to avoid a chicken-and-egg problem
1221 	 * (thread_init_boot_thread() sets the current thread but needs
1222 	 * things set by init_runtime()).
1223 	 */
1224 	thread_get_core_local()->curr_thread = 0;
1225 	init_runtime(pageable_part);
1226 
1227 	if (IS_ENABLED(CFG_VIRTUALIZATION)) {
1228 		/*
1229 		 * Virtualization: We can't initialize threads right now because
1230 		 * threads belong to "tee" part and will be initialized
1231 		 * separately per each new virtual guest. So, we'll clear
1232 		 * "curr_thread" and call it done.
1233 		 */
1234 		thread_get_core_local()->curr_thread = -1;
1235 	} else {
1236 		thread_init_boot_thread();
1237 	}
1238 	thread_init_primary();
1239 	thread_init_per_cpu();
1240 	init_sec_mon(nsec_entry);
1241 }
1242 
1243 /*
1244  * Note: this function is weak just to make it possible to exclude it from
1245  * the unpaged area.
1246  */
1247 void __weak boot_init_primary_late(unsigned long fdt)
1248 {
1249 	init_external_dt(fdt);
1250 	tpm_map_log_area(get_external_dt());
1251 	discover_nsec_memory();
1252 	update_external_dt();
1253 	configure_console_from_dt();
1254 
1255 	IMSG("OP-TEE version: %s", core_v_str);
1256 	if (IS_ENABLED(CFG_WARN_INSECURE)) {
1257 		IMSG("WARNING: This OP-TEE configuration might be insecure!");
1258 		IMSG("WARNING: Please check https://optee.readthedocs.io/en/latest/architecture/porting_guidelines.html");
1259 	}
1260 	IMSG("Primary CPU initializing");
1261 #ifdef CFG_CORE_ASLR
1262 	DMSG("Executing at offset %#lx with virtual load address %#"PRIxVA,
1263 	     (unsigned long)boot_mmu_config.load_offset, VCORE_START_VA);
1264 #endif
1265 
1266 	main_init_gic();
1267 	init_vfp_nsec();
1268 	if (IS_ENABLED(CFG_VIRTUALIZATION)) {
1269 		IMSG("Initializing virtualization support");
1270 		core_mmu_init_virtualization();
1271 	} else {
1272 		init_tee_runtime();
1273 	}
1274 	call_finalcalls();
1275 	IMSG("Primary CPU switching to normal world boot");
1276 }
1277 
1278 static void init_secondary_helper(unsigned long nsec_entry)
1279 {
1280 	IMSG("Secondary CPU %zu initializing", get_core_pos());
1281 
1282 	/*
1283 	 * Mask asynchronous exceptions before switch to the thread vector
1284 	 * as the thread handler requires those to be masked while
1285 	 * executing with the temporary stack. The thread subsystem also
1286 	 * asserts that the foreign interrupts are blocked when using most of
1287 	 * its functions.
1288 	 */
1289 	thread_set_exceptions(THREAD_EXCP_ALL);
1290 
1291 	secondary_init_cntfrq();
1292 	thread_init_per_cpu();
1293 	init_sec_mon(nsec_entry);
1294 	main_secondary_init_gic();
1295 	init_vfp_sec();
1296 	init_vfp_nsec();
1297 
1298 	IMSG("Secondary CPU %zu switching to normal world boot", get_core_pos());
1299 }
1300 
1301 /*
1302  * Note: this function is weak just to make it possible to exclude it from
1303  * the unpaged area so that it lies in the init area.
1304  */
1305 void __weak boot_init_primary_early(unsigned long pageable_part,
1306 				    unsigned long nsec_entry __maybe_unused)
1307 {
1308 	unsigned long e = PADDR_INVALID;
1309 
1310 #if !defined(CFG_WITH_ARM_TRUSTED_FW)
1311 	e = nsec_entry;
1312 #endif
1313 
1314 	init_primary(pageable_part, e);
1315 }
1316 
1317 #if defined(CFG_WITH_ARM_TRUSTED_FW)
1318 unsigned long boot_cpu_on_handler(unsigned long a0 __maybe_unused,
1319 				  unsigned long a1 __unused)
1320 {
1321 	init_secondary_helper(PADDR_INVALID);
1322 	return 0;
1323 }
1324 #else
1325 void boot_init_secondary(unsigned long nsec_entry)
1326 {
1327 	init_secondary_helper(nsec_entry);
1328 }
1329 #endif
1330 
1331 #if defined(CFG_BOOT_SECONDARY_REQUEST)
1332 void boot_set_core_ns_entry(size_t core_idx, uintptr_t entry,
1333 			    uintptr_t context_id)
1334 {
1335 	ns_entry_contexts[core_idx].entry_point = entry;
1336 	ns_entry_contexts[core_idx].context_id = context_id;
1337 	dsb_ishst();
1338 }
1339 
1340 int boot_core_release(size_t core_idx, paddr_t entry)
1341 {
1342 	if (!core_idx || core_idx >= CFG_TEE_CORE_NB_CORE)
1343 		return -1;
1344 
1345 	ns_entry_contexts[core_idx].entry_point = entry;
1346 	dmb();
1347 	spin_table[core_idx] = 1;
1348 	dsb();
1349 	sev();
1350 
1351 	return 0;
1352 }
1353 
1354 /*
1355  * spin until secondary boot request, then returns with
1356  * the secondary core entry address.
1357  */
1358 struct ns_entry_context *boot_core_hpen(void)
1359 {
1360 #ifdef CFG_PSCI_ARM32
1361 	return &ns_entry_contexts[get_core_pos()];
1362 #else
1363 	do {
1364 		wfe();
1365 	} while (!spin_table[get_core_pos()]);
1366 	dmb();
1367 	return &ns_entry_contexts[get_core_pos()];
1368 #endif
1369 }
1370 #endif
1371 
1372 #if defined(CFG_CORE_ASLR)
1373 #if defined(CFG_DT)
1374 unsigned long __weak get_aslr_seed(void *fdt)
1375 {
1376 	int rc = fdt_check_header(fdt);
1377 	const uint64_t *seed = NULL;
1378 	int offs = 0;
1379 	int len = 0;
1380 
1381 	if (rc) {
1382 		DMSG("Bad fdt: %d", rc);
1383 		goto err;
1384 	}
1385 
1386 	offs =  fdt_path_offset(fdt, "/secure-chosen");
1387 	if (offs < 0) {
1388 		DMSG("Cannot find /secure-chosen");
1389 		goto err;
1390 	}
1391 	seed = fdt_getprop(fdt, offs, "kaslr-seed", &len);
1392 	if (!seed || len != sizeof(*seed)) {
1393 		DMSG("Cannot find valid kaslr-seed");
1394 		goto err;
1395 	}
1396 
1397 	return fdt64_to_cpu(*seed);
1398 
1399 err:
1400 	/* Try platform implementation */
1401 	return plat_get_aslr_seed();
1402 }
1403 #else /*!CFG_DT*/
1404 unsigned long __weak get_aslr_seed(void *fdt __unused)
1405 {
1406 	/* Try platform implementation */
1407 	return plat_get_aslr_seed();
1408 }
1409 #endif /*!CFG_DT*/
1410 #endif /*CFG_CORE_ASLR*/
1411