xref: /optee_os/core/arch/arm/kernel/boot.c (revision c04a96a45ffe0e665a4d86e542ec921fae932aa8)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2015-2021, Linaro Limited
4  */
5 
6 #include <arm.h>
7 #include <assert.h>
8 #include <compiler.h>
9 #include <config.h>
10 #include <console.h>
11 #include <crypto/crypto.h>
12 #include <initcall.h>
13 #include <inttypes.h>
14 #include <keep.h>
15 #include <kernel/asan.h>
16 #include <kernel/boot.h>
17 #include <kernel/linker.h>
18 #include <kernel/misc.h>
19 #include <kernel/panic.h>
20 #include <kernel/tee_misc.h>
21 #include <kernel/thread.h>
22 #include <kernel/tpm.h>
23 #include <libfdt.h>
24 #include <malloc.h>
25 #include <mm/core_memprot.h>
26 #include <mm/core_mmu.h>
27 #include <mm/fobj.h>
28 #include <mm/tee_mm.h>
29 #include <mm/tee_pager.h>
30 #include <sm/psci.h>
31 #include <stdio.h>
32 #include <trace.h>
33 #include <utee_defines.h>
34 #include <util.h>
35 
36 #include <platform_config.h>
37 
38 #if !defined(CFG_WITH_ARM_TRUSTED_FW)
39 #include <sm/sm.h>
40 #endif
41 
42 #if defined(CFG_WITH_VFP)
43 #include <kernel/vfp.h>
44 #endif
45 
46 /*
47  * In this file we're using unsigned long to represent physical pointers as
48  * they are received in a single register when OP-TEE is initially entered.
49  * This limits 32-bit systems to only use make use of the lower 32 bits
50  * of a physical address for initial parameters.
51  *
52  * 64-bit systems on the other hand can use full 64-bit physical pointers.
53  */
54 #define PADDR_INVALID		ULONG_MAX
55 
56 #if defined(CFG_BOOT_SECONDARY_REQUEST)
57 struct ns_entry_context {
58 	uintptr_t entry_point;
59 	uintptr_t context_id;
60 };
61 struct ns_entry_context ns_entry_contexts[CFG_TEE_CORE_NB_CORE];
62 static uint32_t spin_table[CFG_TEE_CORE_NB_CORE];
63 #endif
64 
65 #ifdef CFG_BOOT_SYNC_CPU
66 /*
67  * Array used when booting, to synchronize cpu.
68  * When 0, the cpu has not started.
69  * When 1, it has started
70  */
71 uint32_t sem_cpu_sync[CFG_TEE_CORE_NB_CORE];
72 DECLARE_KEEP_PAGER(sem_cpu_sync);
73 #endif
74 
75 #ifdef CFG_DT
76 struct dt_descriptor {
77 	void *blob;
78 #ifdef CFG_EXTERNAL_DTB_OVERLAY
79 	int frag_id;
80 #endif
81 };
82 
83 static struct dt_descriptor external_dt __nex_bss;
84 #endif
85 
86 #ifdef CFG_SECONDARY_INIT_CNTFRQ
87 static uint32_t cntfrq;
88 #endif
89 
90 /* May be overridden in plat-$(PLATFORM)/main.c */
91 __weak void plat_primary_init_early(void)
92 {
93 }
94 DECLARE_KEEP_PAGER(plat_primary_init_early);
95 
96 /* May be overridden in plat-$(PLATFORM)/main.c */
97 __weak void main_init_gic(void)
98 {
99 }
100 
101 /* May be overridden in plat-$(PLATFORM)/main.c */
102 __weak void main_secondary_init_gic(void)
103 {
104 }
105 
106 #if defined(CFG_WITH_ARM_TRUSTED_FW)
107 void init_sec_mon(unsigned long nsec_entry __maybe_unused)
108 {
109 	assert(nsec_entry == PADDR_INVALID);
110 	/* Do nothing as we don't have a secure monitor */
111 }
112 #else
113 /* May be overridden in plat-$(PLATFORM)/main.c */
114 __weak void init_sec_mon(unsigned long nsec_entry)
115 {
116 	struct sm_nsec_ctx *nsec_ctx;
117 
118 	assert(nsec_entry != PADDR_INVALID);
119 
120 	/* Initialize secure monitor */
121 	nsec_ctx = sm_get_nsec_ctx();
122 	nsec_ctx->mon_lr = nsec_entry;
123 	nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I;
124 	if (nsec_entry & 1)
125 		nsec_ctx->mon_spsr |= CPSR_T;
126 }
127 #endif
128 
129 #if defined(CFG_WITH_ARM_TRUSTED_FW)
130 static void init_vfp_nsec(void)
131 {
132 }
133 #else
134 static void init_vfp_nsec(void)
135 {
136 	/* Normal world can use CP10 and CP11 (SIMD/VFP) */
137 	write_nsacr(read_nsacr() | NSACR_CP10 | NSACR_CP11);
138 }
139 #endif
140 
141 #if defined(CFG_WITH_VFP)
142 
143 #ifdef ARM32
144 static void init_vfp_sec(void)
145 {
146 	uint32_t cpacr = read_cpacr();
147 
148 	/*
149 	 * Enable Advanced SIMD functionality.
150 	 * Enable use of D16-D31 of the Floating-point Extension register
151 	 * file.
152 	 */
153 	cpacr &= ~(CPACR_ASEDIS | CPACR_D32DIS);
154 	/*
155 	 * Enable usage of CP10 and CP11 (SIMD/VFP) (both kernel and user
156 	 * mode.
157 	 */
158 	cpacr |= CPACR_CP(10, CPACR_CP_ACCESS_FULL);
159 	cpacr |= CPACR_CP(11, CPACR_CP_ACCESS_FULL);
160 	write_cpacr(cpacr);
161 }
162 #endif /* ARM32 */
163 
164 #ifdef ARM64
165 static void init_vfp_sec(void)
166 {
167 	/* Not using VFP until thread_kernel_enable_vfp() */
168 	vfp_disable();
169 }
170 #endif /* ARM64 */
171 
172 #else /* CFG_WITH_VFP */
173 
174 static void init_vfp_sec(void)
175 {
176 	/* Not using VFP */
177 }
178 #endif
179 
180 #ifdef CFG_SECONDARY_INIT_CNTFRQ
181 static void primary_save_cntfrq(void)
182 {
183 	assert(cntfrq == 0);
184 
185 	/*
186 	 * CNTFRQ should be initialized on the primary CPU by a
187 	 * previous boot stage
188 	 */
189 	cntfrq = read_cntfrq();
190 }
191 
192 static void secondary_init_cntfrq(void)
193 {
194 	assert(cntfrq != 0);
195 	write_cntfrq(cntfrq);
196 }
197 #else /* CFG_SECONDARY_INIT_CNTFRQ */
198 static void primary_save_cntfrq(void)
199 {
200 }
201 
202 static void secondary_init_cntfrq(void)
203 {
204 }
205 #endif
206 
207 #ifdef CFG_CORE_SANITIZE_KADDRESS
208 static void init_run_constructors(void)
209 {
210 	const vaddr_t *ctor;
211 
212 	for (ctor = &__ctor_list; ctor < &__ctor_end; ctor++)
213 		((void (*)(void))(*ctor))();
214 }
215 
216 static void init_asan(void)
217 {
218 
219 	/*
220 	 * CFG_ASAN_SHADOW_OFFSET is also supplied as
221 	 * -fasan-shadow-offset=$(CFG_ASAN_SHADOW_OFFSET) to the compiler.
222 	 * Since all the needed values to calculate the value of
223 	 * CFG_ASAN_SHADOW_OFFSET isn't available in to make we need to
224 	 * calculate it in advance and hard code it into the platform
225 	 * conf.mk. Here where we have all the needed values we double
226 	 * check that the compiler is supplied the correct value.
227 	 */
228 
229 #define __ASAN_SHADOW_START \
230 	ROUNDUP(TEE_RAM_VA_START + (TEE_RAM_VA_SIZE * 8) / 9 - 8, 8)
231 	assert(__ASAN_SHADOW_START == (vaddr_t)&__asan_shadow_start);
232 #define __CFG_ASAN_SHADOW_OFFSET \
233 	(__ASAN_SHADOW_START - (TEE_RAM_VA_START / 8))
234 	COMPILE_TIME_ASSERT(CFG_ASAN_SHADOW_OFFSET == __CFG_ASAN_SHADOW_OFFSET);
235 #undef __ASAN_SHADOW_START
236 #undef __CFG_ASAN_SHADOW_OFFSET
237 
238 	/*
239 	 * Assign area covered by the shadow area, everything from start up
240 	 * to the beginning of the shadow area.
241 	 */
242 	asan_set_shadowed((void *)TEE_TEXT_VA_START, &__asan_shadow_start);
243 
244 	/*
245 	 * Add access to areas that aren't opened automatically by a
246 	 * constructor.
247 	 */
248 	asan_tag_access(&__ctor_list, &__ctor_end);
249 	asan_tag_access(__rodata_start, __rodata_end);
250 #ifdef CFG_WITH_PAGER
251 	asan_tag_access(__pageable_start, __pageable_end);
252 #endif /*CFG_WITH_PAGER*/
253 	asan_tag_access(__nozi_start, __nozi_end);
254 	asan_tag_access(__exidx_start, __exidx_end);
255 	asan_tag_access(__extab_start, __extab_end);
256 
257 	init_run_constructors();
258 
259 	/* Everything is tagged correctly, let's start address sanitizing. */
260 	asan_start();
261 }
262 #else /*CFG_CORE_SANITIZE_KADDRESS*/
263 static void init_asan(void)
264 {
265 }
266 #endif /*CFG_CORE_SANITIZE_KADDRESS*/
267 
268 #ifdef CFG_WITH_PAGER
269 
270 #ifdef CFG_CORE_SANITIZE_KADDRESS
271 static void carve_out_asan_mem(tee_mm_pool_t *pool)
272 {
273 	const size_t s = pool->hi - pool->lo;
274 	tee_mm_entry_t *mm;
275 	paddr_t apa = ASAN_MAP_PA;
276 	size_t asz = ASAN_MAP_SZ;
277 
278 	if (core_is_buffer_outside(apa, asz, pool->lo, s))
279 		return;
280 
281 	/* Reserve the shadow area */
282 	if (!core_is_buffer_inside(apa, asz, pool->lo, s)) {
283 		if (apa < pool->lo) {
284 			/*
285 			 * ASAN buffer is overlapping with the beginning of
286 			 * the pool.
287 			 */
288 			asz -= pool->lo - apa;
289 			apa = pool->lo;
290 		} else {
291 			/*
292 			 * ASAN buffer is overlapping with the end of the
293 			 * pool.
294 			 */
295 			asz = pool->hi - apa;
296 		}
297 	}
298 	mm = tee_mm_alloc2(pool, apa, asz);
299 	assert(mm);
300 }
301 #else
302 static void carve_out_asan_mem(tee_mm_pool_t *pool __unused)
303 {
304 }
305 #endif
306 
307 static void print_pager_pool_size(void)
308 {
309 	struct tee_pager_stats __maybe_unused stats;
310 
311 	tee_pager_get_stats(&stats);
312 	IMSG("Pager pool size: %zukB",
313 		stats.npages_all * SMALL_PAGE_SIZE / 1024);
314 }
315 
316 static void init_vcore(tee_mm_pool_t *mm_vcore)
317 {
318 	const vaddr_t begin = VCORE_START_VA;
319 	vaddr_t end = begin + TEE_RAM_VA_SIZE;
320 
321 #ifdef CFG_CORE_SANITIZE_KADDRESS
322 	/* Carve out asan memory, flat maped after core memory */
323 	if (end > ASAN_SHADOW_PA)
324 		end = ASAN_MAP_PA;
325 #endif
326 
327 	if (!tee_mm_init(mm_vcore, begin, end, SMALL_PAGE_SHIFT,
328 			 TEE_MM_POOL_NO_FLAGS))
329 		panic("tee_mm_vcore init failed");
330 }
331 
332 /*
333  * With CFG_CORE_ASLR=y the init part is relocated very early during boot.
334  * The init part is also paged just as the rest of the normal paged code, with
335  * the difference that it's preloaded during boot. When the backing store
336  * is configured the entire paged binary is copied in place and then also
337  * the init part. Since the init part has been relocated (references to
338  * addresses updated to compensate for the new load address) this has to be
339  * undone for the hashes of those pages to match with the original binary.
340  *
341  * If CFG_CORE_ASLR=n, nothing needs to be done as the code/ro pages are
342  * unchanged.
343  */
344 static void undo_init_relocation(uint8_t *paged_store __maybe_unused)
345 {
346 #ifdef CFG_CORE_ASLR
347 	unsigned long *ptr = NULL;
348 	const uint32_t *reloc = NULL;
349 	const uint32_t *reloc_end = NULL;
350 	unsigned long offs = boot_mmu_config.load_offset;
351 	const struct boot_embdata *embdata = (const void *)__init_end;
352 	vaddr_t addr_end = (vaddr_t)__init_end - offs - TEE_RAM_START;
353 	vaddr_t addr_start = (vaddr_t)__init_start - offs - TEE_RAM_START;
354 
355 	reloc = (const void *)((vaddr_t)embdata + embdata->reloc_offset);
356 	reloc_end = reloc + embdata->reloc_len / sizeof(*reloc);
357 
358 	for (; reloc < reloc_end; reloc++) {
359 		if (*reloc < addr_start)
360 			continue;
361 		if (*reloc >= addr_end)
362 			break;
363 		ptr = (void *)(paged_store + *reloc - addr_start);
364 		*ptr -= offs;
365 	}
366 #endif
367 }
368 
369 static struct fobj *ro_paged_alloc(tee_mm_entry_t *mm, void *hashes,
370 				   void *store)
371 {
372 	const unsigned int num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE;
373 #ifdef CFG_CORE_ASLR
374 	unsigned int reloc_offs = (vaddr_t)__pageable_start - VCORE_START_VA;
375 	const struct boot_embdata *embdata = (const void *)__init_end;
376 	const void *reloc = __init_end + embdata->reloc_offset;
377 
378 	return fobj_ro_reloc_paged_alloc(num_pages, hashes, reloc_offs,
379 					 reloc, embdata->reloc_len, store);
380 #else
381 	return fobj_ro_paged_alloc(num_pages, hashes, store);
382 #endif
383 }
384 
385 static void init_runtime(unsigned long pageable_part)
386 {
387 	size_t n;
388 	size_t init_size = (size_t)(__init_end - __init_start);
389 	size_t pageable_start = (size_t)__pageable_start;
390 	size_t pageable_end = (size_t)__pageable_end;
391 	size_t pageable_size = pageable_end - pageable_start;
392 	size_t tzsram_end = TZSRAM_BASE + TZSRAM_SIZE;
393 	size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) *
394 			   TEE_SHA256_HASH_SIZE;
395 	const struct boot_embdata *embdata = (const void *)__init_end;
396 	const void *tmp_hashes = NULL;
397 	tee_mm_entry_t *mm = NULL;
398 	struct fobj *fobj = NULL;
399 	uint8_t *paged_store = NULL;
400 	uint8_t *hashes = NULL;
401 
402 	assert(pageable_size % SMALL_PAGE_SIZE == 0);
403 	assert(embdata->total_len >= embdata->hashes_offset +
404 				     embdata->hashes_len);
405 	assert(hash_size == embdata->hashes_len);
406 
407 	tmp_hashes = __init_end + embdata->hashes_offset;
408 
409 	init_asan();
410 
411 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
412 	malloc_add_pool(__heap2_start, __heap2_end - __heap2_start);
413 
414 	/*
415 	 * This needs to be initialized early to support address lookup
416 	 * in MEM_AREA_TEE_RAM
417 	 */
418 	tee_pager_early_init();
419 
420 	hashes = malloc(hash_size);
421 	IMSG_RAW("\n");
422 	IMSG("Pager is enabled. Hashes: %zu bytes", hash_size);
423 	assert(hashes);
424 	asan_memcpy_unchecked(hashes, tmp_hashes, hash_size);
425 
426 	/*
427 	 * Need tee_mm_sec_ddr initialized to be able to allocate secure
428 	 * DDR below.
429 	 */
430 	core_mmu_init_ta_ram();
431 
432 	carve_out_asan_mem(&tee_mm_sec_ddr);
433 
434 	mm = tee_mm_alloc(&tee_mm_sec_ddr, pageable_size);
435 	assert(mm);
436 	paged_store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM);
437 	/*
438 	 * Load pageable part in the dedicated allocated area:
439 	 * - Move pageable non-init part into pageable area. Note bootloader
440 	 *   may have loaded it anywhere in TA RAM hence use memmove().
441 	 * - Copy pageable init part from current location into pageable area.
442 	 */
443 	memmove(paged_store + init_size,
444 		phys_to_virt(pageable_part,
445 			     core_mmu_get_type_by_pa(pageable_part)),
446 		__pageable_part_end - __pageable_part_start);
447 	asan_memcpy_unchecked(paged_store, __init_start, init_size);
448 	/*
449 	 * Undo eventual relocation for the init part so the hash checks
450 	 * can pass.
451 	 */
452 	undo_init_relocation(paged_store);
453 
454 	/* Check that hashes of what's in pageable area is OK */
455 	DMSG("Checking hashes of pageable area");
456 	for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) {
457 		const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE;
458 		const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE;
459 		TEE_Result res;
460 
461 		DMSG("hash pg_idx %zu hash %p page %p", n, hash, page);
462 		res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE);
463 		if (res != TEE_SUCCESS) {
464 			EMSG("Hash failed for page %zu at %p: res 0x%x",
465 			     n, (void *)page, res);
466 			panic();
467 		}
468 	}
469 
470 	/*
471 	 * Assert prepaged init sections are page aligned so that nothing
472 	 * trails uninited at the end of the premapped init area.
473 	 */
474 	assert(!(init_size & SMALL_PAGE_MASK));
475 
476 	/*
477 	 * Initialize the virtual memory pool used for main_mmu_l2_ttb which
478 	 * is supplied to tee_pager_init() below.
479 	 */
480 	init_vcore(&tee_mm_vcore);
481 
482 	/*
483 	 * Assign alias area for pager end of the small page block the rest
484 	 * of the binary is loaded into. We're taking more than needed, but
485 	 * we're guaranteed to not need more than the physical amount of
486 	 * TZSRAM.
487 	 */
488 	mm = tee_mm_alloc2(&tee_mm_vcore,
489 		(vaddr_t)tee_mm_vcore.hi - TZSRAM_SIZE, TZSRAM_SIZE);
490 	assert(mm);
491 	tee_pager_set_alias_area(mm);
492 
493 	/*
494 	 * Claim virtual memory which isn't paged.
495 	 * Linear memory (flat map core memory) ends there.
496 	 */
497 	mm = tee_mm_alloc2(&tee_mm_vcore, VCORE_UNPG_RX_PA,
498 			   (vaddr_t)(__pageable_start - VCORE_UNPG_RX_PA));
499 	assert(mm);
500 
501 	/*
502 	 * Allocate virtual memory for the pageable area and let the pager
503 	 * take charge of all the pages already assigned to that memory.
504 	 */
505 	mm = tee_mm_alloc2(&tee_mm_vcore, (vaddr_t)__pageable_start,
506 			   pageable_size);
507 	assert(mm);
508 	fobj = ro_paged_alloc(mm, hashes, paged_store);
509 	assert(fobj);
510 	tee_pager_add_core_region(tee_mm_get_smem(mm), PAGED_REGION_TYPE_RO,
511 				  fobj);
512 	fobj_put(fobj);
513 
514 	tee_pager_add_pages(pageable_start, init_size / SMALL_PAGE_SIZE, false);
515 	tee_pager_add_pages(pageable_start + init_size,
516 			    (pageable_size - init_size) / SMALL_PAGE_SIZE,
517 			    true);
518 	if (pageable_end < tzsram_end)
519 		tee_pager_add_pages(pageable_end, (tzsram_end - pageable_end) /
520 						   SMALL_PAGE_SIZE, true);
521 
522 	/*
523 	 * There may be physical pages in TZSRAM before the core load address.
524 	 * These pages can be added to the physical pages pool of the pager.
525 	 * This setup may happen when a the secure bootloader runs in TZRAM
526 	 * and its memory can be reused by OP-TEE once boot stages complete.
527 	 */
528 	tee_pager_add_pages(tee_mm_vcore.lo,
529 			(VCORE_UNPG_RX_PA - tee_mm_vcore.lo) / SMALL_PAGE_SIZE,
530 			true);
531 
532 	print_pager_pool_size();
533 }
534 #else
535 
536 static void init_runtime(unsigned long pageable_part __unused)
537 {
538 	init_asan();
539 
540 	/*
541 	 * By default whole OP-TEE uses malloc, so we need to initialize
542 	 * it early. But, when virtualization is enabled, malloc is used
543 	 * only by TEE runtime, so malloc should be initialized later, for
544 	 * every virtual partition separately. Core code uses nex_malloc
545 	 * instead.
546 	 */
547 #ifdef CFG_VIRTUALIZATION
548 	nex_malloc_add_pool(__nex_heap_start, __nex_heap_end -
549 					      __nex_heap_start);
550 #else
551 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
552 #endif
553 
554 	IMSG_RAW("\n");
555 }
556 #endif
557 
558 void *get_dt(void)
559 {
560 	void *fdt = get_embedded_dt();
561 
562 	if (!fdt)
563 		fdt = get_external_dt();
564 
565 	return fdt;
566 }
567 
568 #if defined(CFG_EMBED_DTB)
569 void *get_embedded_dt(void)
570 {
571 	static bool checked;
572 
573 	assert(cpu_mmu_enabled());
574 
575 	if (!checked) {
576 		IMSG("Embedded DTB found");
577 
578 		if (fdt_check_header(embedded_secure_dtb))
579 			panic("Invalid embedded DTB");
580 
581 		checked = true;
582 	}
583 
584 	return embedded_secure_dtb;
585 }
586 #else
587 void *get_embedded_dt(void)
588 {
589 	return NULL;
590 }
591 #endif /*CFG_EMBED_DTB*/
592 
593 #if defined(CFG_DT)
594 void *get_external_dt(void)
595 {
596 	assert(cpu_mmu_enabled());
597 	return external_dt.blob;
598 }
599 
600 static TEE_Result release_external_dt(void)
601 {
602 	int ret = 0;
603 
604 	if (!external_dt.blob)
605 		return TEE_SUCCESS;
606 
607 	ret = fdt_pack(external_dt.blob);
608 	if (ret < 0) {
609 		EMSG("Failed to pack Device Tree at 0x%" PRIxPA ": error %d",
610 		     virt_to_phys(external_dt.blob), ret);
611 		panic();
612 	}
613 
614 	if (core_mmu_remove_mapping(MEM_AREA_EXT_DT, external_dt.blob,
615 				    CFG_DTB_MAX_SIZE))
616 		panic("Failed to remove temporary Device Tree mapping");
617 
618 	/* External DTB no more reached, reset pointer to invalid */
619 	external_dt.blob = NULL;
620 
621 	return TEE_SUCCESS;
622 }
623 boot_final(release_external_dt);
624 
625 #ifdef CFG_EXTERNAL_DTB_OVERLAY
626 static int add_dt_overlay_fragment(struct dt_descriptor *dt, int ioffs)
627 {
628 	char frag[32];
629 	int offs;
630 	int ret;
631 
632 	snprintf(frag, sizeof(frag), "fragment@%d", dt->frag_id);
633 	offs = fdt_add_subnode(dt->blob, ioffs, frag);
634 	if (offs < 0)
635 		return offs;
636 
637 	dt->frag_id += 1;
638 
639 	ret = fdt_setprop_string(dt->blob, offs, "target-path", "/");
640 	if (ret < 0)
641 		return -1;
642 
643 	return fdt_add_subnode(dt->blob, offs, "__overlay__");
644 }
645 
646 static int init_dt_overlay(struct dt_descriptor *dt, int __maybe_unused dt_size)
647 {
648 	int fragment;
649 	int ret;
650 
651 	ret = fdt_check_header(dt->blob);
652 	if (!ret) {
653 		fdt_for_each_subnode(fragment, dt->blob, 0)
654 			dt->frag_id += 1;
655 		return ret;
656 	}
657 
658 #ifdef CFG_DT_ADDR
659 	return fdt_create_empty_tree(dt->blob, dt_size);
660 #else
661 	return -1;
662 #endif
663 }
664 #else
665 static int add_dt_overlay_fragment(struct dt_descriptor *dt __unused, int offs)
666 {
667 	return offs;
668 }
669 
670 static int init_dt_overlay(struct dt_descriptor *dt __unused,
671 			   int dt_size __unused)
672 {
673 	return 0;
674 }
675 #endif /* CFG_EXTERNAL_DTB_OVERLAY */
676 
677 static int add_dt_path_subnode(struct dt_descriptor *dt, const char *path,
678 			       const char *subnode)
679 {
680 	int offs;
681 
682 	offs = fdt_path_offset(dt->blob, path);
683 	if (offs < 0)
684 		return -1;
685 	offs = add_dt_overlay_fragment(dt, offs);
686 	if (offs < 0)
687 		return -1;
688 	offs = fdt_add_subnode(dt->blob, offs, subnode);
689 	if (offs < 0)
690 		return -1;
691 	return offs;
692 }
693 
694 static int add_optee_dt_node(struct dt_descriptor *dt)
695 {
696 	int offs;
697 	int ret;
698 
699 	if (fdt_path_offset(dt->blob, "/firmware/optee") >= 0) {
700 		DMSG("OP-TEE Device Tree node already exists!");
701 		return 0;
702 	}
703 
704 	offs = fdt_path_offset(dt->blob, "/firmware");
705 	if (offs < 0) {
706 		offs = add_dt_path_subnode(dt, "/", "firmware");
707 		if (offs < 0)
708 			return -1;
709 	}
710 
711 	offs = fdt_add_subnode(dt->blob, offs, "optee");
712 	if (offs < 0)
713 		return -1;
714 
715 	ret = fdt_setprop_string(dt->blob, offs, "compatible",
716 				 "linaro,optee-tz");
717 	if (ret < 0)
718 		return -1;
719 	ret = fdt_setprop_string(dt->blob, offs, "method", "smc");
720 	if (ret < 0)
721 		return -1;
722 	return 0;
723 }
724 
725 #ifdef CFG_PSCI_ARM32
726 static int append_psci_compatible(void *fdt, int offs, const char *str)
727 {
728 	return fdt_appendprop(fdt, offs, "compatible", str, strlen(str) + 1);
729 }
730 
731 static int dt_add_psci_node(struct dt_descriptor *dt)
732 {
733 	int offs;
734 
735 	if (fdt_path_offset(dt->blob, "/psci") >= 0) {
736 		DMSG("PSCI Device Tree node already exists!");
737 		return 0;
738 	}
739 
740 	offs = add_dt_path_subnode(dt, "/", "psci");
741 	if (offs < 0)
742 		return -1;
743 	if (append_psci_compatible(dt->blob, offs, "arm,psci-1.0"))
744 		return -1;
745 	if (append_psci_compatible(dt->blob, offs, "arm,psci-0.2"))
746 		return -1;
747 	if (append_psci_compatible(dt->blob, offs, "arm,psci"))
748 		return -1;
749 	if (fdt_setprop_string(dt->blob, offs, "method", "smc"))
750 		return -1;
751 	if (fdt_setprop_u32(dt->blob, offs, "cpu_suspend", PSCI_CPU_SUSPEND))
752 		return -1;
753 	if (fdt_setprop_u32(dt->blob, offs, "cpu_off", PSCI_CPU_OFF))
754 		return -1;
755 	if (fdt_setprop_u32(dt->blob, offs, "cpu_on", PSCI_CPU_ON))
756 		return -1;
757 	if (fdt_setprop_u32(dt->blob, offs, "sys_poweroff", PSCI_SYSTEM_OFF))
758 		return -1;
759 	if (fdt_setprop_u32(dt->blob, offs, "sys_reset", PSCI_SYSTEM_RESET))
760 		return -1;
761 	return 0;
762 }
763 
764 static int check_node_compat_prefix(struct dt_descriptor *dt, int offs,
765 				    const char *prefix)
766 {
767 	const size_t prefix_len = strlen(prefix);
768 	size_t l;
769 	int plen;
770 	const char *prop;
771 
772 	prop = fdt_getprop(dt->blob, offs, "compatible", &plen);
773 	if (!prop)
774 		return -1;
775 
776 	while (plen > 0) {
777 		if (memcmp(prop, prefix, prefix_len) == 0)
778 			return 0; /* match */
779 
780 		l = strlen(prop) + 1;
781 		prop += l;
782 		plen -= l;
783 	}
784 
785 	return -1;
786 }
787 
788 static int dt_add_psci_cpu_enable_methods(struct dt_descriptor *dt)
789 {
790 	int offs = 0;
791 
792 	while (1) {
793 		offs = fdt_next_node(dt->blob, offs, NULL);
794 		if (offs < 0)
795 			break;
796 		if (fdt_getprop(dt->blob, offs, "enable-method", NULL))
797 			continue; /* already set */
798 		if (check_node_compat_prefix(dt, offs, "arm,cortex-a"))
799 			continue; /* no compatible */
800 		if (fdt_setprop_string(dt->blob, offs, "enable-method", "psci"))
801 			return -1;
802 		/* Need to restart scanning as offsets may have changed */
803 		offs = 0;
804 	}
805 	return 0;
806 }
807 
808 static int config_psci(struct dt_descriptor *dt)
809 {
810 	if (dt_add_psci_node(dt))
811 		return -1;
812 	return dt_add_psci_cpu_enable_methods(dt);
813 }
814 #else
815 static int config_psci(struct dt_descriptor *dt __unused)
816 {
817 	return 0;
818 }
819 #endif /*CFG_PSCI_ARM32*/
820 
821 static void set_dt_val(void *data, uint32_t cell_size, uint64_t val)
822 {
823 	if (cell_size == 1) {
824 		fdt32_t v = cpu_to_fdt32((uint32_t)val);
825 
826 		memcpy(data, &v, sizeof(v));
827 	} else {
828 		fdt64_t v = cpu_to_fdt64(val);
829 
830 		memcpy(data, &v, sizeof(v));
831 	}
832 }
833 
834 static int add_res_mem_dt_node(struct dt_descriptor *dt, const char *name,
835 			       paddr_t pa, size_t size)
836 {
837 	int offs = 0;
838 	int ret = 0;
839 	int addr_size = -1;
840 	int len_size = -1;
841 	bool found = true;
842 	char subnode_name[80] = { 0 };
843 
844 	offs = fdt_path_offset(dt->blob, "/reserved-memory");
845 
846 	if (offs < 0) {
847 		found = false;
848 		offs = 0;
849 	}
850 
851 	if (IS_ENABLED(CFG_EXTERNAL_DTB_OVERLAY)) {
852 		len_size = sizeof(paddr_t) / sizeof(uint32_t);
853 		addr_size = sizeof(paddr_t) / sizeof(uint32_t);
854 	} else {
855 		len_size = fdt_size_cells(dt->blob, offs);
856 		if (len_size < 0)
857 			return -1;
858 		addr_size = fdt_address_cells(dt->blob, offs);
859 		if (addr_size < 0)
860 			return -1;
861 	}
862 
863 	if (!found) {
864 		offs = add_dt_path_subnode(dt, "/", "reserved-memory");
865 		if (offs < 0)
866 			return -1;
867 		ret = fdt_setprop_cell(dt->blob, offs, "#address-cells",
868 				       addr_size);
869 		if (ret < 0)
870 			return -1;
871 		ret = fdt_setprop_cell(dt->blob, offs, "#size-cells", len_size);
872 		if (ret < 0)
873 			return -1;
874 		ret = fdt_setprop(dt->blob, offs, "ranges", NULL, 0);
875 		if (ret < 0)
876 			return -1;
877 	}
878 
879 	ret = snprintf(subnode_name, sizeof(subnode_name),
880 		       "%s@0x%" PRIxPA, name, pa);
881 	if (ret < 0 || ret >= (int)sizeof(subnode_name))
882 		DMSG("truncated node \"%s@0x%"PRIxPA"\"", name, pa);
883 	offs = fdt_add_subnode(dt->blob, offs, subnode_name);
884 	if (offs >= 0) {
885 		uint32_t data[FDT_MAX_NCELLS * 2];
886 
887 		set_dt_val(data, addr_size, pa);
888 		set_dt_val(data + addr_size, len_size, size);
889 		ret = fdt_setprop(dt->blob, offs, "reg", data,
890 				  sizeof(uint32_t) * (addr_size + len_size));
891 		if (ret < 0)
892 			return -1;
893 		ret = fdt_setprop(dt->blob, offs, "no-map", NULL, 0);
894 		if (ret < 0)
895 			return -1;
896 	} else {
897 		return -1;
898 	}
899 	return 0;
900 }
901 
902 #ifdef CFG_CORE_DYN_SHM
903 static uint64_t get_dt_val_and_advance(const void *data, size_t *offs,
904 				       uint32_t cell_size)
905 {
906 	uint64_t rv = 0;
907 
908 	if (cell_size == 1) {
909 		uint32_t v;
910 
911 		memcpy(&v, (const uint8_t *)data + *offs, sizeof(v));
912 		*offs += sizeof(v);
913 		rv = fdt32_to_cpu(v);
914 	} else {
915 		uint64_t v;
916 
917 		memcpy(&v, (const uint8_t *)data + *offs, sizeof(v));
918 		*offs += sizeof(v);
919 		rv = fdt64_to_cpu(v);
920 	}
921 
922 	return rv;
923 }
924 
925 /*
926  * Find all non-secure memory from DT. Memory marked inaccessible by Secure
927  * World is ignored since it could not be mapped to be used as dynamic shared
928  * memory.
929  */
930 static int get_nsec_memory_helper(void *fdt, struct core_mmu_phys_mem *mem)
931 {
932 	const uint8_t *prop = NULL;
933 	uint64_t a = 0;
934 	uint64_t l = 0;
935 	size_t prop_offs = 0;
936 	size_t prop_len = 0;
937 	int elems_total = 0;
938 	int addr_size = 0;
939 	int len_size = 0;
940 	int offs = 0;
941 	size_t n = 0;
942 	int len = 0;
943 
944 	addr_size = fdt_address_cells(fdt, 0);
945 	if (addr_size < 0)
946 		return 0;
947 
948 	len_size = fdt_size_cells(fdt, 0);
949 	if (len_size < 0)
950 		return 0;
951 
952 	while (true) {
953 		offs = fdt_node_offset_by_prop_value(fdt, offs, "device_type",
954 						     "memory",
955 						     sizeof("memory"));
956 		if (offs < 0)
957 			break;
958 
959 		if (_fdt_get_status(fdt, offs) != (DT_STATUS_OK_NSEC |
960 						   DT_STATUS_OK_SEC))
961 			continue;
962 
963 		prop = fdt_getprop(fdt, offs, "reg", &len);
964 		if (!prop)
965 			continue;
966 
967 		prop_len = len;
968 		for (n = 0, prop_offs = 0; prop_offs < prop_len; n++) {
969 			a = get_dt_val_and_advance(prop, &prop_offs, addr_size);
970 			if (prop_offs >= prop_len) {
971 				n--;
972 				break;
973 			}
974 
975 			l = get_dt_val_and_advance(prop, &prop_offs, len_size);
976 			if (mem) {
977 				mem->type = MEM_AREA_DDR_OVERALL;
978 				mem->addr = a;
979 				mem->size = l;
980 				mem++;
981 			}
982 		}
983 
984 		elems_total += n;
985 	}
986 
987 	return elems_total;
988 }
989 
990 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt, size_t *nelems)
991 {
992 	struct core_mmu_phys_mem *mem = NULL;
993 	int elems_total = 0;
994 
995 	elems_total = get_nsec_memory_helper(fdt, NULL);
996 	if (elems_total <= 0)
997 		return NULL;
998 
999 	mem = nex_calloc(elems_total, sizeof(*mem));
1000 	if (!mem)
1001 		panic();
1002 
1003 	elems_total = get_nsec_memory_helper(fdt, mem);
1004 	assert(elems_total > 0);
1005 
1006 	*nelems = elems_total;
1007 
1008 	return mem;
1009 }
1010 #endif /*CFG_CORE_DYN_SHM*/
1011 
1012 #ifdef CFG_CORE_RESERVED_SHM
1013 static int mark_static_shm_as_reserved(struct dt_descriptor *dt)
1014 {
1015 	vaddr_t shm_start;
1016 	vaddr_t shm_end;
1017 
1018 	core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &shm_start, &shm_end);
1019 	if (shm_start != shm_end)
1020 		return add_res_mem_dt_node(dt, "optee_shm",
1021 					   virt_to_phys((void *)shm_start),
1022 					   shm_end - shm_start);
1023 
1024 	DMSG("No SHM configured");
1025 	return -1;
1026 }
1027 #endif /*CFG_CORE_RESERVED_SHM*/
1028 
1029 static void init_external_dt(unsigned long phys_dt)
1030 {
1031 	struct dt_descriptor *dt = &external_dt;
1032 	void *fdt;
1033 	int ret;
1034 
1035 	if (!phys_dt) {
1036 		/*
1037 		 * No need to panic as we're not using the DT in OP-TEE
1038 		 * yet, we're only adding some nodes for normal world use.
1039 		 * This makes the switch to using DT easier as we can boot
1040 		 * a newer OP-TEE with older boot loaders. Once we start to
1041 		 * initialize devices based on DT we'll likely panic
1042 		 * instead of returning here.
1043 		 */
1044 		IMSG("No non-secure external DT");
1045 		return;
1046 	}
1047 
1048 	if (!core_mmu_add_mapping(MEM_AREA_EXT_DT, phys_dt, CFG_DTB_MAX_SIZE))
1049 		panic("Failed to map external DTB");
1050 
1051 	fdt = phys_to_virt(phys_dt, MEM_AREA_EXT_DT);
1052 	if (!fdt)
1053 		panic();
1054 
1055 	dt->blob = fdt;
1056 
1057 	ret = init_dt_overlay(dt, CFG_DTB_MAX_SIZE);
1058 	if (ret < 0) {
1059 		EMSG("Device Tree Overlay init fail @ %#lx: error %d", phys_dt,
1060 		     ret);
1061 		panic();
1062 	}
1063 
1064 	ret = fdt_open_into(fdt, fdt, CFG_DTB_MAX_SIZE);
1065 	if (ret < 0) {
1066 		EMSG("Invalid Device Tree at %#lx: error %d", phys_dt, ret);
1067 		panic();
1068 	}
1069 
1070 	IMSG("Non-secure external DT found");
1071 }
1072 
1073 static int mark_tzdram_as_reserved(struct dt_descriptor *dt)
1074 {
1075 	return add_res_mem_dt_node(dt, "optee_core", CFG_TZDRAM_START,
1076 				   CFG_TZDRAM_SIZE);
1077 }
1078 
1079 static void update_external_dt(void)
1080 {
1081 	struct dt_descriptor *dt = &external_dt;
1082 
1083 	if (!dt->blob)
1084 		return;
1085 
1086 	if (add_optee_dt_node(dt))
1087 		panic("Failed to add OP-TEE Device Tree node");
1088 
1089 	if (config_psci(dt))
1090 		panic("Failed to config PSCI");
1091 
1092 #ifdef CFG_CORE_RESERVED_SHM
1093 	if (mark_static_shm_as_reserved(dt))
1094 		panic("Failed to config non-secure memory");
1095 #endif
1096 
1097 	if (mark_tzdram_as_reserved(dt))
1098 		panic("Failed to config secure memory");
1099 }
1100 #else /*CFG_DT*/
1101 void *get_external_dt(void)
1102 {
1103 	return NULL;
1104 }
1105 
1106 static void init_external_dt(unsigned long phys_dt __unused)
1107 {
1108 }
1109 
1110 static void update_external_dt(void)
1111 {
1112 }
1113 
1114 #ifdef CFG_CORE_DYN_SHM
1115 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt __unused,
1116 						 size_t *nelems __unused)
1117 {
1118 	return NULL;
1119 }
1120 #endif /*CFG_CORE_DYN_SHM*/
1121 #endif /*!CFG_DT*/
1122 
1123 #ifdef CFG_CORE_DYN_SHM
1124 static void discover_nsec_memory(void)
1125 {
1126 	struct core_mmu_phys_mem *mem;
1127 	const struct core_mmu_phys_mem *mem_begin = NULL;
1128 	const struct core_mmu_phys_mem *mem_end = NULL;
1129 	size_t nelems;
1130 	void *fdt = get_external_dt();
1131 
1132 	if (fdt) {
1133 		mem = get_nsec_memory(fdt, &nelems);
1134 		if (mem) {
1135 			core_mmu_set_discovered_nsec_ddr(mem, nelems);
1136 			return;
1137 		}
1138 
1139 		DMSG("No non-secure memory found in FDT");
1140 	}
1141 
1142 	mem_begin = phys_ddr_overall_begin;
1143 	mem_end = phys_ddr_overall_end;
1144 	nelems = mem_end - mem_begin;
1145 	if (nelems) {
1146 		/*
1147 		 * Platform cannot use both register_ddr() and the now
1148 		 * deprecated register_dynamic_shm().
1149 		 */
1150 		assert(phys_ddr_overall_compat_begin ==
1151 		       phys_ddr_overall_compat_end);
1152 	} else {
1153 		mem_begin = phys_ddr_overall_compat_begin;
1154 		mem_end = phys_ddr_overall_compat_end;
1155 		nelems = mem_end - mem_begin;
1156 		if (!nelems)
1157 			return;
1158 		DMSG("Warning register_dynamic_shm() is deprecated, please use register_ddr() instead");
1159 	}
1160 
1161 	mem = nex_calloc(nelems, sizeof(*mem));
1162 	if (!mem)
1163 		panic();
1164 
1165 	memcpy(mem, phys_ddr_overall_begin, sizeof(*mem) * nelems);
1166 	core_mmu_set_discovered_nsec_ddr(mem, nelems);
1167 }
1168 #else /*CFG_CORE_DYN_SHM*/
1169 static void discover_nsec_memory(void)
1170 {
1171 }
1172 #endif /*!CFG_CORE_DYN_SHM*/
1173 
1174 void init_tee_runtime(void)
1175 {
1176 #ifdef CFG_VIRTUALIZATION
1177 	/* We need to initialize pool for every virtual guest partition */
1178 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
1179 #endif
1180 
1181 #ifndef CFG_WITH_PAGER
1182 	/* Pager initializes TA RAM early */
1183 	core_mmu_init_ta_ram();
1184 #endif
1185 	call_initcalls();
1186 }
1187 
1188 static void init_primary(unsigned long pageable_part, unsigned long nsec_entry)
1189 {
1190 	/*
1191 	 * Mask asynchronous exceptions before switch to the thread vector
1192 	 * as the thread handler requires those to be masked while
1193 	 * executing with the temporary stack. The thread subsystem also
1194 	 * asserts that the foreign interrupts are blocked when using most of
1195 	 * its functions.
1196 	 */
1197 	thread_set_exceptions(THREAD_EXCP_ALL);
1198 	primary_save_cntfrq();
1199 	init_vfp_sec();
1200 	/*
1201 	 * Pager: init_runtime() calls thread_kernel_enable_vfp() so we must
1202 	 * set a current thread right now to avoid a chicken-and-egg problem
1203 	 * (thread_init_boot_thread() sets the current thread but needs
1204 	 * things set by init_runtime()).
1205 	 */
1206 	thread_get_core_local()->curr_thread = 0;
1207 	init_runtime(pageable_part);
1208 
1209 	if (IS_ENABLED(CFG_VIRTUALIZATION)) {
1210 		/*
1211 		 * Virtualization: We can't initialize threads right now because
1212 		 * threads belong to "tee" part and will be initialized
1213 		 * separately per each new virtual guest. So, we'll clear
1214 		 * "curr_thread" and call it done.
1215 		 */
1216 		thread_get_core_local()->curr_thread = -1;
1217 	} else {
1218 		thread_init_boot_thread();
1219 	}
1220 	thread_init_primary();
1221 	thread_init_per_cpu();
1222 	init_sec_mon(nsec_entry);
1223 }
1224 
1225 /*
1226  * Note: this function is weak just to make it possible to exclude it from
1227  * the unpaged area.
1228  */
1229 void __weak boot_init_primary_late(unsigned long fdt)
1230 {
1231 	init_external_dt(fdt);
1232 	tpm_map_log_area(get_external_dt());
1233 	discover_nsec_memory();
1234 	update_external_dt();
1235 	configure_console_from_dt();
1236 
1237 	IMSG("OP-TEE version: %s", core_v_str);
1238 	IMSG("Primary CPU initializing");
1239 #ifdef CFG_CORE_ASLR
1240 	DMSG("Executing at offset %#lx with virtual load address %#"PRIxVA,
1241 	     (unsigned long)boot_mmu_config.load_offset, VCORE_START_VA);
1242 #endif
1243 
1244 	main_init_gic();
1245 	init_vfp_nsec();
1246 #ifndef CFG_VIRTUALIZATION
1247 	init_tee_runtime();
1248 #endif
1249 #ifdef CFG_VIRTUALIZATION
1250 	IMSG("Initializing virtualization support");
1251 	core_mmu_init_virtualization();
1252 #endif
1253 	call_finalcalls();
1254 	IMSG("Primary CPU switching to normal world boot");
1255 }
1256 
1257 static void init_secondary_helper(unsigned long nsec_entry)
1258 {
1259 	IMSG("Secondary CPU %zu initializing", get_core_pos());
1260 
1261 	/*
1262 	 * Mask asynchronous exceptions before switch to the thread vector
1263 	 * as the thread handler requires those to be masked while
1264 	 * executing with the temporary stack. The thread subsystem also
1265 	 * asserts that the foreign interrupts are blocked when using most of
1266 	 * its functions.
1267 	 */
1268 	thread_set_exceptions(THREAD_EXCP_ALL);
1269 
1270 	secondary_init_cntfrq();
1271 	thread_init_per_cpu();
1272 	init_sec_mon(nsec_entry);
1273 	main_secondary_init_gic();
1274 	init_vfp_sec();
1275 	init_vfp_nsec();
1276 
1277 	IMSG("Secondary CPU %zu switching to normal world boot", get_core_pos());
1278 }
1279 
1280 /*
1281  * Note: this function is weak just to make it possible to exclude it from
1282  * the unpaged area so that it lies in the init area.
1283  */
1284 void __weak boot_init_primary_early(unsigned long pageable_part,
1285 				    unsigned long nsec_entry __maybe_unused)
1286 {
1287 	unsigned long e = PADDR_INVALID;
1288 
1289 #if !defined(CFG_WITH_ARM_TRUSTED_FW)
1290 	e = nsec_entry;
1291 #endif
1292 
1293 	init_primary(pageable_part, e);
1294 }
1295 
1296 #if defined(CFG_WITH_ARM_TRUSTED_FW)
1297 unsigned long boot_cpu_on_handler(unsigned long a0 __maybe_unused,
1298 				  unsigned long a1 __unused)
1299 {
1300 	init_secondary_helper(PADDR_INVALID);
1301 	return 0;
1302 }
1303 #else
1304 void boot_init_secondary(unsigned long nsec_entry)
1305 {
1306 	init_secondary_helper(nsec_entry);
1307 }
1308 #endif
1309 
1310 #if defined(CFG_BOOT_SECONDARY_REQUEST)
1311 void boot_set_core_ns_entry(size_t core_idx, uintptr_t entry,
1312 			    uintptr_t context_id)
1313 {
1314 	ns_entry_contexts[core_idx].entry_point = entry;
1315 	ns_entry_contexts[core_idx].context_id = context_id;
1316 	dsb_ishst();
1317 }
1318 
1319 int boot_core_release(size_t core_idx, paddr_t entry)
1320 {
1321 	if (!core_idx || core_idx >= CFG_TEE_CORE_NB_CORE)
1322 		return -1;
1323 
1324 	ns_entry_contexts[core_idx].entry_point = entry;
1325 	dmb();
1326 	spin_table[core_idx] = 1;
1327 	dsb();
1328 	sev();
1329 
1330 	return 0;
1331 }
1332 
1333 /*
1334  * spin until secondary boot request, then returns with
1335  * the secondary core entry address.
1336  */
1337 struct ns_entry_context *boot_core_hpen(void)
1338 {
1339 #ifdef CFG_PSCI_ARM32
1340 	return &ns_entry_contexts[get_core_pos()];
1341 #else
1342 	do {
1343 		wfe();
1344 	} while (!spin_table[get_core_pos()]);
1345 	dmb();
1346 	return &ns_entry_contexts[get_core_pos()];
1347 #endif
1348 }
1349 #endif
1350 
1351 #if defined(CFG_CORE_ASLR)
1352 #if defined(CFG_DT)
1353 unsigned long __weak get_aslr_seed(void *fdt)
1354 {
1355 	int rc = fdt_check_header(fdt);
1356 	const uint64_t *seed = NULL;
1357 	int offs = 0;
1358 	int len = 0;
1359 
1360 	if (rc) {
1361 		DMSG("Bad fdt: %d", rc);
1362 		return 0;
1363 	}
1364 
1365 	offs =  fdt_path_offset(fdt, "/secure-chosen");
1366 	if (offs < 0) {
1367 		DMSG("Cannot find /secure-chosen");
1368 		return 0;
1369 	}
1370 	seed = fdt_getprop(fdt, offs, "kaslr-seed", &len);
1371 	if (!seed || len != sizeof(*seed)) {
1372 		DMSG("Cannot find valid kaslr-seed");
1373 		return 0;
1374 	}
1375 
1376 	return fdt64_to_cpu(*seed);
1377 }
1378 #else /*!CFG_DT*/
1379 unsigned long __weak get_aslr_seed(void *fdt __unused)
1380 {
1381 	DMSG("Warning: no ASLR seed");
1382 	return 0;
1383 }
1384 #endif /*!CFG_DT*/
1385 #endif /*CFG_CORE_ASLR*/
1386