xref: /optee_os/core/arch/arm/kernel/boot.c (revision 5118efbe82358fd69fda6e0158a30e59f59ba09d)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2015-2020, Linaro Limited
4  */
5 
6 #include <arm.h>
7 #include <assert.h>
8 #include <compiler.h>
9 #include <config.h>
10 #include <console.h>
11 #include <crypto/crypto.h>
12 #include <initcall.h>
13 #include <inttypes.h>
14 #include <keep.h>
15 #include <kernel/asan.h>
16 #include <kernel/boot.h>
17 #include <kernel/linker.h>
18 #include <kernel/misc.h>
19 #include <kernel/panic.h>
20 #include <kernel/tee_misc.h>
21 #include <kernel/thread.h>
22 #include <kernel/tpm.h>
23 #include <libfdt.h>
24 #include <malloc.h>
25 #include <mm/core_memprot.h>
26 #include <mm/core_mmu.h>
27 #include <mm/fobj.h>
28 #include <mm/tee_mm.h>
29 #include <mm/tee_pager.h>
30 #include <sm/psci.h>
31 #include <stdio.h>
32 #include <trace.h>
33 #include <utee_defines.h>
34 #include <util.h>
35 
36 #include <platform_config.h>
37 
38 #if !defined(CFG_WITH_ARM_TRUSTED_FW)
39 #include <sm/sm.h>
40 #endif
41 
42 #if defined(CFG_WITH_VFP)
43 #include <kernel/vfp.h>
44 #endif
45 
46 /*
47  * In this file we're using unsigned long to represent physical pointers as
48  * they are received in a single register when OP-TEE is initially entered.
49  * This limits 32-bit systems to only use make use of the lower 32 bits
50  * of a physical address for initial parameters.
51  *
52  * 64-bit systems on the other hand can use full 64-bit physical pointers.
53  */
54 #define PADDR_INVALID		ULONG_MAX
55 
56 #if defined(CFG_BOOT_SECONDARY_REQUEST)
57 struct ns_entry_context {
58 	uintptr_t entry_point;
59 	uintptr_t context_id;
60 };
61 struct ns_entry_context ns_entry_contexts[CFG_TEE_CORE_NB_CORE];
62 static uint32_t spin_table[CFG_TEE_CORE_NB_CORE];
63 #endif
64 
65 #ifdef CFG_BOOT_SYNC_CPU
66 /*
67  * Array used when booting, to synchronize cpu.
68  * When 0, the cpu has not started.
69  * When 1, it has started
70  */
71 uint32_t sem_cpu_sync[CFG_TEE_CORE_NB_CORE];
72 DECLARE_KEEP_PAGER(sem_cpu_sync);
73 #endif
74 
75 #ifdef CFG_DT
76 struct dt_descriptor {
77 	void *blob;
78 	int frag_id;
79 };
80 
81 static struct dt_descriptor external_dt __nex_bss;
82 #endif
83 
84 #ifdef CFG_SECONDARY_INIT_CNTFRQ
85 static uint32_t cntfrq;
86 #endif
87 
88 /* May be overridden in plat-$(PLATFORM)/main.c */
89 __weak void plat_primary_init_early(void)
90 {
91 }
92 DECLARE_KEEP_PAGER(plat_primary_init_early);
93 
94 /* May be overridden in plat-$(PLATFORM)/main.c */
95 __weak void main_init_gic(void)
96 {
97 }
98 
99 /* May be overridden in plat-$(PLATFORM)/main.c */
100 __weak void main_secondary_init_gic(void)
101 {
102 }
103 
104 #if defined(CFG_WITH_ARM_TRUSTED_FW)
105 void init_sec_mon(unsigned long nsec_entry __maybe_unused)
106 {
107 	assert(nsec_entry == PADDR_INVALID);
108 	/* Do nothing as we don't have a secure monitor */
109 }
110 #else
111 /* May be overridden in plat-$(PLATFORM)/main.c */
112 __weak void init_sec_mon(unsigned long nsec_entry)
113 {
114 	struct sm_nsec_ctx *nsec_ctx;
115 
116 	assert(nsec_entry != PADDR_INVALID);
117 
118 	/* Initialize secure monitor */
119 	nsec_ctx = sm_get_nsec_ctx();
120 	nsec_ctx->mon_lr = nsec_entry;
121 	nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I;
122 	if (nsec_entry & 1)
123 		nsec_ctx->mon_spsr |= CPSR_T;
124 }
125 #endif
126 
127 #if defined(CFG_WITH_ARM_TRUSTED_FW)
128 static void init_vfp_nsec(void)
129 {
130 }
131 #else
132 static void init_vfp_nsec(void)
133 {
134 	/* Normal world can use CP10 and CP11 (SIMD/VFP) */
135 	write_nsacr(read_nsacr() | NSACR_CP10 | NSACR_CP11);
136 }
137 #endif
138 
139 #if defined(CFG_WITH_VFP)
140 
141 #ifdef ARM32
142 static void init_vfp_sec(void)
143 {
144 	uint32_t cpacr = read_cpacr();
145 
146 	/*
147 	 * Enable Advanced SIMD functionality.
148 	 * Enable use of D16-D31 of the Floating-point Extension register
149 	 * file.
150 	 */
151 	cpacr &= ~(CPACR_ASEDIS | CPACR_D32DIS);
152 	/*
153 	 * Enable usage of CP10 and CP11 (SIMD/VFP) (both kernel and user
154 	 * mode.
155 	 */
156 	cpacr |= CPACR_CP(10, CPACR_CP_ACCESS_FULL);
157 	cpacr |= CPACR_CP(11, CPACR_CP_ACCESS_FULL);
158 	write_cpacr(cpacr);
159 }
160 #endif /* ARM32 */
161 
162 #ifdef ARM64
163 static void init_vfp_sec(void)
164 {
165 	/* Not using VFP until thread_kernel_enable_vfp() */
166 	vfp_disable();
167 }
168 #endif /* ARM64 */
169 
170 #else /* CFG_WITH_VFP */
171 
172 static void init_vfp_sec(void)
173 {
174 	/* Not using VFP */
175 }
176 #endif
177 
178 #ifdef CFG_SECONDARY_INIT_CNTFRQ
179 static void primary_save_cntfrq(void)
180 {
181 	assert(cntfrq == 0);
182 
183 	/*
184 	 * CNTFRQ should be initialized on the primary CPU by a
185 	 * previous boot stage
186 	 */
187 	cntfrq = read_cntfrq();
188 }
189 
190 static void secondary_init_cntfrq(void)
191 {
192 	assert(cntfrq != 0);
193 	write_cntfrq(cntfrq);
194 }
195 #else /* CFG_SECONDARY_INIT_CNTFRQ */
196 static void primary_save_cntfrq(void)
197 {
198 }
199 
200 static void secondary_init_cntfrq(void)
201 {
202 }
203 #endif
204 
205 #ifdef CFG_CORE_SANITIZE_KADDRESS
206 static void init_run_constructors(void)
207 {
208 	const vaddr_t *ctor;
209 
210 	for (ctor = &__ctor_list; ctor < &__ctor_end; ctor++)
211 		((void (*)(void))(*ctor))();
212 }
213 
214 static void init_asan(void)
215 {
216 
217 	/*
218 	 * CFG_ASAN_SHADOW_OFFSET is also supplied as
219 	 * -fasan-shadow-offset=$(CFG_ASAN_SHADOW_OFFSET) to the compiler.
220 	 * Since all the needed values to calculate the value of
221 	 * CFG_ASAN_SHADOW_OFFSET isn't available in to make we need to
222 	 * calculate it in advance and hard code it into the platform
223 	 * conf.mk. Here where we have all the needed values we double
224 	 * check that the compiler is supplied the correct value.
225 	 */
226 
227 #define __ASAN_SHADOW_START \
228 	ROUNDUP(TEE_RAM_VA_START + (TEE_RAM_VA_SIZE * 8) / 9 - 8, 8)
229 	assert(__ASAN_SHADOW_START == (vaddr_t)&__asan_shadow_start);
230 #define __CFG_ASAN_SHADOW_OFFSET \
231 	(__ASAN_SHADOW_START - (TEE_RAM_VA_START / 8))
232 	COMPILE_TIME_ASSERT(CFG_ASAN_SHADOW_OFFSET == __CFG_ASAN_SHADOW_OFFSET);
233 #undef __ASAN_SHADOW_START
234 #undef __CFG_ASAN_SHADOW_OFFSET
235 
236 	/*
237 	 * Assign area covered by the shadow area, everything from start up
238 	 * to the beginning of the shadow area.
239 	 */
240 	asan_set_shadowed((void *)TEE_TEXT_VA_START, &__asan_shadow_start);
241 
242 	/*
243 	 * Add access to areas that aren't opened automatically by a
244 	 * constructor.
245 	 */
246 	asan_tag_access(&__ctor_list, &__ctor_end);
247 	asan_tag_access(__rodata_start, __rodata_end);
248 #ifdef CFG_WITH_PAGER
249 	asan_tag_access(__pageable_start, __pageable_end);
250 #endif /*CFG_WITH_PAGER*/
251 	asan_tag_access(__nozi_start, __nozi_end);
252 	asan_tag_access(__exidx_start, __exidx_end);
253 	asan_tag_access(__extab_start, __extab_end);
254 
255 	init_run_constructors();
256 
257 	/* Everything is tagged correctly, let's start address sanitizing. */
258 	asan_start();
259 }
260 #else /*CFG_CORE_SANITIZE_KADDRESS*/
261 static void init_asan(void)
262 {
263 }
264 #endif /*CFG_CORE_SANITIZE_KADDRESS*/
265 
266 #ifdef CFG_WITH_PAGER
267 
268 #ifdef CFG_CORE_SANITIZE_KADDRESS
269 static void carve_out_asan_mem(tee_mm_pool_t *pool)
270 {
271 	const size_t s = pool->hi - pool->lo;
272 	tee_mm_entry_t *mm;
273 	paddr_t apa = ASAN_MAP_PA;
274 	size_t asz = ASAN_MAP_SZ;
275 
276 	if (core_is_buffer_outside(apa, asz, pool->lo, s))
277 		return;
278 
279 	/* Reserve the shadow area */
280 	if (!core_is_buffer_inside(apa, asz, pool->lo, s)) {
281 		if (apa < pool->lo) {
282 			/*
283 			 * ASAN buffer is overlapping with the beginning of
284 			 * the pool.
285 			 */
286 			asz -= pool->lo - apa;
287 			apa = pool->lo;
288 		} else {
289 			/*
290 			 * ASAN buffer is overlapping with the end of the
291 			 * pool.
292 			 */
293 			asz = pool->hi - apa;
294 		}
295 	}
296 	mm = tee_mm_alloc2(pool, apa, asz);
297 	assert(mm);
298 }
299 #else
300 static void carve_out_asan_mem(tee_mm_pool_t *pool __unused)
301 {
302 }
303 #endif
304 
305 static void print_pager_pool_size(void)
306 {
307 	struct tee_pager_stats __maybe_unused stats;
308 
309 	tee_pager_get_stats(&stats);
310 	IMSG("Pager pool size: %zukB",
311 		stats.npages_all * SMALL_PAGE_SIZE / 1024);
312 }
313 
314 static void init_vcore(tee_mm_pool_t *mm_vcore)
315 {
316 	const vaddr_t begin = VCORE_START_VA;
317 	vaddr_t end = begin + TEE_RAM_VA_SIZE;
318 
319 #ifdef CFG_CORE_SANITIZE_KADDRESS
320 	/* Carve out asan memory, flat maped after core memory */
321 	if (end > ASAN_SHADOW_PA)
322 		end = ASAN_MAP_PA;
323 #endif
324 
325 	if (!tee_mm_init(mm_vcore, begin, end, SMALL_PAGE_SHIFT,
326 			 TEE_MM_POOL_NO_FLAGS))
327 		panic("tee_mm_vcore init failed");
328 }
329 
330 /*
331  * With CFG_CORE_ASLR=y the init part is relocated very early during boot.
332  * The init part is also paged just as the rest of the normal paged code, with
333  * the difference that it's preloaded during boot. When the backing store
334  * is configured the entire paged binary is copied in place and then also
335  * the init part. Since the init part has been relocated (references to
336  * addresses updated to compensate for the new load address) this has to be
337  * undone for the hashes of those pages to match with the original binary.
338  *
339  * If CFG_CORE_ASLR=n, nothing needs to be done as the code/ro pages are
340  * unchanged.
341  */
342 static void undo_init_relocation(uint8_t *paged_store __maybe_unused)
343 {
344 #ifdef CFG_CORE_ASLR
345 	unsigned long *ptr = NULL;
346 	const uint32_t *reloc = NULL;
347 	const uint32_t *reloc_end = NULL;
348 	unsigned long offs = boot_mmu_config.load_offset;
349 	const struct boot_embdata *embdata = (const void *)__init_end;
350 	vaddr_t addr_end = (vaddr_t)__init_end - offs - TEE_RAM_START;
351 	vaddr_t addr_start = (vaddr_t)__init_start - offs - TEE_RAM_START;
352 
353 	reloc = (const void *)((vaddr_t)embdata + embdata->reloc_offset);
354 	reloc_end = reloc + embdata->reloc_len / sizeof(*reloc);
355 
356 	for (; reloc < reloc_end; reloc++) {
357 		if (*reloc < addr_start)
358 			continue;
359 		if (*reloc >= addr_end)
360 			break;
361 		ptr = (void *)(paged_store + *reloc - addr_start);
362 		*ptr -= offs;
363 	}
364 #endif
365 }
366 
367 static struct fobj *ro_paged_alloc(tee_mm_entry_t *mm, void *hashes,
368 				   void *store)
369 {
370 	const unsigned int num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE;
371 #ifdef CFG_CORE_ASLR
372 	unsigned int reloc_offs = (vaddr_t)__pageable_start - VCORE_START_VA;
373 	const struct boot_embdata *embdata = (const void *)__init_end;
374 	const void *reloc = __init_end + embdata->reloc_offset;
375 
376 	return fobj_ro_reloc_paged_alloc(num_pages, hashes, reloc_offs,
377 					 reloc, embdata->reloc_len, store);
378 #else
379 	return fobj_ro_paged_alloc(num_pages, hashes, store);
380 #endif
381 }
382 
383 static void init_runtime(unsigned long pageable_part)
384 {
385 	size_t n;
386 	size_t init_size = (size_t)(__init_end - __init_start);
387 	size_t pageable_start = (size_t)__pageable_start;
388 	size_t pageable_end = (size_t)__pageable_end;
389 	size_t pageable_size = pageable_end - pageable_start;
390 	size_t tzsram_end = TZSRAM_BASE + TZSRAM_SIZE;
391 	size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) *
392 			   TEE_SHA256_HASH_SIZE;
393 	const struct boot_embdata *embdata = (const void *)__init_end;
394 	const void *tmp_hashes = NULL;
395 	tee_mm_entry_t *mm = NULL;
396 	struct fobj *fobj = NULL;
397 	uint8_t *paged_store = NULL;
398 	uint8_t *hashes = NULL;
399 
400 	assert(pageable_size % SMALL_PAGE_SIZE == 0);
401 	assert(embdata->total_len >= embdata->hashes_offset +
402 				     embdata->hashes_len);
403 	assert(hash_size == embdata->hashes_len);
404 
405 	tmp_hashes = __init_end + embdata->hashes_offset;
406 
407 	init_asan();
408 
409 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
410 	malloc_add_pool(__heap2_start, __heap2_end - __heap2_start);
411 
412 	/*
413 	 * This needs to be initialized early to support address lookup
414 	 * in MEM_AREA_TEE_RAM
415 	 */
416 	tee_pager_early_init();
417 
418 	hashes = malloc(hash_size);
419 	IMSG_RAW("\n");
420 	IMSG("Pager is enabled. Hashes: %zu bytes", hash_size);
421 	assert(hashes);
422 	asan_memcpy_unchecked(hashes, tmp_hashes, hash_size);
423 
424 	/*
425 	 * Need tee_mm_sec_ddr initialized to be able to allocate secure
426 	 * DDR below.
427 	 */
428 	core_mmu_init_ta_ram();
429 
430 	carve_out_asan_mem(&tee_mm_sec_ddr);
431 
432 	mm = tee_mm_alloc(&tee_mm_sec_ddr, pageable_size);
433 	assert(mm);
434 	paged_store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM);
435 	/*
436 	 * Load pageable part in the dedicated allocated area:
437 	 * - Move pageable non-init part into pageable area. Note bootloader
438 	 *   may have loaded it anywhere in TA RAM hence use memmove().
439 	 * - Copy pageable init part from current location into pageable area.
440 	 */
441 	memmove(paged_store + init_size,
442 		phys_to_virt(pageable_part,
443 			     core_mmu_get_type_by_pa(pageable_part)),
444 		__pageable_part_end - __pageable_part_start);
445 	asan_memcpy_unchecked(paged_store, __init_start, init_size);
446 	/*
447 	 * Undo eventual relocation for the init part so the hash checks
448 	 * can pass.
449 	 */
450 	undo_init_relocation(paged_store);
451 
452 	/* Check that hashes of what's in pageable area is OK */
453 	DMSG("Checking hashes of pageable area");
454 	for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) {
455 		const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE;
456 		const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE;
457 		TEE_Result res;
458 
459 		DMSG("hash pg_idx %zu hash %p page %p", n, hash, page);
460 		res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE);
461 		if (res != TEE_SUCCESS) {
462 			EMSG("Hash failed for page %zu at %p: res 0x%x",
463 			     n, (void *)page, res);
464 			panic();
465 		}
466 	}
467 
468 	/*
469 	 * Assert prepaged init sections are page aligned so that nothing
470 	 * trails uninited at the end of the premapped init area.
471 	 */
472 	assert(!(init_size & SMALL_PAGE_MASK));
473 
474 	/*
475 	 * Initialize the virtual memory pool used for main_mmu_l2_ttb which
476 	 * is supplied to tee_pager_init() below.
477 	 */
478 	init_vcore(&tee_mm_vcore);
479 
480 	/*
481 	 * Assign alias area for pager end of the small page block the rest
482 	 * of the binary is loaded into. We're taking more than needed, but
483 	 * we're guaranteed to not need more than the physical amount of
484 	 * TZSRAM.
485 	 */
486 	mm = tee_mm_alloc2(&tee_mm_vcore,
487 		(vaddr_t)tee_mm_vcore.hi - TZSRAM_SIZE, TZSRAM_SIZE);
488 	assert(mm);
489 	tee_pager_set_alias_area(mm);
490 
491 	/*
492 	 * Claim virtual memory which isn't paged.
493 	 * Linear memory (flat map core memory) ends there.
494 	 */
495 	mm = tee_mm_alloc2(&tee_mm_vcore, VCORE_UNPG_RX_PA,
496 			   (vaddr_t)(__pageable_start - VCORE_UNPG_RX_PA));
497 	assert(mm);
498 
499 	/*
500 	 * Allocate virtual memory for the pageable area and let the pager
501 	 * take charge of all the pages already assigned to that memory.
502 	 */
503 	mm = tee_mm_alloc2(&tee_mm_vcore, (vaddr_t)__pageable_start,
504 			   pageable_size);
505 	assert(mm);
506 	fobj = ro_paged_alloc(mm, hashes, paged_store);
507 	assert(fobj);
508 	tee_pager_add_core_area(tee_mm_get_smem(mm), PAGER_AREA_TYPE_RO, fobj);
509 	fobj_put(fobj);
510 
511 	tee_pager_add_pages(pageable_start, init_size / SMALL_PAGE_SIZE, false);
512 	tee_pager_add_pages(pageable_start + init_size,
513 			    (pageable_size - init_size) / SMALL_PAGE_SIZE,
514 			    true);
515 	if (pageable_end < tzsram_end)
516 		tee_pager_add_pages(pageable_end, (tzsram_end - pageable_end) /
517 						   SMALL_PAGE_SIZE, true);
518 
519 	/*
520 	 * There may be physical pages in TZSRAM before the core load address.
521 	 * These pages can be added to the physical pages pool of the pager.
522 	 * This setup may happen when a the secure bootloader runs in TZRAM
523 	 * and its memory can be reused by OP-TEE once boot stages complete.
524 	 */
525 	tee_pager_add_pages(tee_mm_vcore.lo,
526 			(VCORE_UNPG_RX_PA - tee_mm_vcore.lo) / SMALL_PAGE_SIZE,
527 			true);
528 
529 	print_pager_pool_size();
530 }
531 #else
532 
533 static void init_runtime(unsigned long pageable_part __unused)
534 {
535 	init_asan();
536 
537 	/*
538 	 * By default whole OP-TEE uses malloc, so we need to initialize
539 	 * it early. But, when virtualization is enabled, malloc is used
540 	 * only by TEE runtime, so malloc should be initialized later, for
541 	 * every virtual partition separately. Core code uses nex_malloc
542 	 * instead.
543 	 */
544 #ifdef CFG_VIRTUALIZATION
545 	nex_malloc_add_pool(__nex_heap_start, __nex_heap_end -
546 					      __nex_heap_start);
547 #else
548 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
549 #endif
550 
551 	IMSG_RAW("\n");
552 }
553 #endif
554 
555 void *get_dt(void)
556 {
557 	void *fdt = get_embedded_dt();
558 
559 	if (!fdt)
560 		fdt = get_external_dt();
561 
562 	return fdt;
563 }
564 
565 #if defined(CFG_EMBED_DTB)
566 void *get_embedded_dt(void)
567 {
568 	static bool checked;
569 
570 	assert(cpu_mmu_enabled());
571 
572 	if (!checked) {
573 		IMSG("Embedded DTB found");
574 
575 		if (fdt_check_header(embedded_secure_dtb))
576 			panic("Invalid embedded DTB");
577 
578 		checked = true;
579 	}
580 
581 	return embedded_secure_dtb;
582 }
583 #else
584 void *get_embedded_dt(void)
585 {
586 	return NULL;
587 }
588 #endif /*CFG_EMBED_DTB*/
589 
590 #if defined(CFG_DT)
591 void *get_external_dt(void)
592 {
593 	assert(cpu_mmu_enabled());
594 	return external_dt.blob;
595 }
596 
597 static TEE_Result release_external_dt(void)
598 {
599 	int ret = 0;
600 
601 	if (!external_dt.blob)
602 		return TEE_SUCCESS;
603 
604 	ret = fdt_pack(external_dt.blob);
605 	if (ret < 0) {
606 		EMSG("Failed to pack Device Tree at 0x%" PRIxPA ": error %d",
607 		     virt_to_phys(external_dt.blob), ret);
608 		panic();
609 	}
610 
611 	/* External DTB no more reached, reset pointer to invalid */
612 	external_dt.blob = NULL;
613 
614 	return TEE_SUCCESS;
615 }
616 boot_final(release_external_dt);
617 
618 #ifdef CFG_EXTERNAL_DTB_OVERLAY
619 static int add_dt_overlay_fragment(struct dt_descriptor *dt, int ioffs)
620 {
621 	char frag[32];
622 	int offs;
623 	int ret;
624 
625 	snprintf(frag, sizeof(frag), "fragment@%d", dt->frag_id);
626 	offs = fdt_add_subnode(dt->blob, ioffs, frag);
627 	if (offs < 0)
628 		return offs;
629 
630 	dt->frag_id += 1;
631 
632 	ret = fdt_setprop_string(dt->blob, offs, "target-path", "/");
633 	if (ret < 0)
634 		return -1;
635 
636 	return fdt_add_subnode(dt->blob, offs, "__overlay__");
637 }
638 
639 static int init_dt_overlay(struct dt_descriptor *dt, int __maybe_unused dt_size)
640 {
641 	int fragment;
642 	int ret;
643 
644 	ret = fdt_check_header(dt->blob);
645 	if (!ret) {
646 		fdt_for_each_subnode(fragment, dt->blob, 0)
647 			dt->frag_id += 1;
648 		return ret;
649 	}
650 
651 #ifdef CFG_DT_ADDR
652 	return fdt_create_empty_tree(dt->blob, dt_size);
653 #else
654 	return -1;
655 #endif
656 }
657 #else
658 static int add_dt_overlay_fragment(struct dt_descriptor *dt __unused, int offs)
659 {
660 	return offs;
661 }
662 
663 static int init_dt_overlay(struct dt_descriptor *dt __unused,
664 			   int dt_size __unused)
665 {
666 	return 0;
667 }
668 #endif /* CFG_EXTERNAL_DTB_OVERLAY */
669 
670 static int add_dt_path_subnode(struct dt_descriptor *dt, const char *path,
671 			       const char *subnode)
672 {
673 	int offs;
674 
675 	offs = fdt_path_offset(dt->blob, path);
676 	if (offs < 0)
677 		return -1;
678 	offs = add_dt_overlay_fragment(dt, offs);
679 	if (offs < 0)
680 		return -1;
681 	offs = fdt_add_subnode(dt->blob, offs, subnode);
682 	if (offs < 0)
683 		return -1;
684 	return offs;
685 }
686 
687 static int add_optee_dt_node(struct dt_descriptor *dt)
688 {
689 	int offs;
690 	int ret;
691 
692 	if (fdt_path_offset(dt->blob, "/firmware/optee") >= 0) {
693 		DMSG("OP-TEE Device Tree node already exists!");
694 		return 0;
695 	}
696 
697 	offs = fdt_path_offset(dt->blob, "/firmware");
698 	if (offs < 0) {
699 		offs = add_dt_path_subnode(dt, "/", "firmware");
700 		if (offs < 0)
701 			return -1;
702 	}
703 
704 	offs = fdt_add_subnode(dt->blob, offs, "optee");
705 	if (offs < 0)
706 		return -1;
707 
708 	ret = fdt_setprop_string(dt->blob, offs, "compatible",
709 				 "linaro,optee-tz");
710 	if (ret < 0)
711 		return -1;
712 	ret = fdt_setprop_string(dt->blob, offs, "method", "smc");
713 	if (ret < 0)
714 		return -1;
715 	return 0;
716 }
717 
718 #ifdef CFG_PSCI_ARM32
719 static int append_psci_compatible(void *fdt, int offs, const char *str)
720 {
721 	return fdt_appendprop(fdt, offs, "compatible", str, strlen(str) + 1);
722 }
723 
724 static int dt_add_psci_node(struct dt_descriptor *dt)
725 {
726 	int offs;
727 
728 	if (fdt_path_offset(dt->blob, "/psci") >= 0) {
729 		DMSG("PSCI Device Tree node already exists!");
730 		return 0;
731 	}
732 
733 	offs = add_dt_path_subnode(dt, "/", "psci");
734 	if (offs < 0)
735 		return -1;
736 	if (append_psci_compatible(dt->blob, offs, "arm,psci-1.0"))
737 		return -1;
738 	if (append_psci_compatible(dt->blob, offs, "arm,psci-0.2"))
739 		return -1;
740 	if (append_psci_compatible(dt->blob, offs, "arm,psci"))
741 		return -1;
742 	if (fdt_setprop_string(dt->blob, offs, "method", "smc"))
743 		return -1;
744 	if (fdt_setprop_u32(dt->blob, offs, "cpu_suspend", PSCI_CPU_SUSPEND))
745 		return -1;
746 	if (fdt_setprop_u32(dt->blob, offs, "cpu_off", PSCI_CPU_OFF))
747 		return -1;
748 	if (fdt_setprop_u32(dt->blob, offs, "cpu_on", PSCI_CPU_ON))
749 		return -1;
750 	if (fdt_setprop_u32(dt->blob, offs, "sys_poweroff", PSCI_SYSTEM_OFF))
751 		return -1;
752 	if (fdt_setprop_u32(dt->blob, offs, "sys_reset", PSCI_SYSTEM_RESET))
753 		return -1;
754 	return 0;
755 }
756 
757 static int check_node_compat_prefix(struct dt_descriptor *dt, int offs,
758 				    const char *prefix)
759 {
760 	const size_t prefix_len = strlen(prefix);
761 	size_t l;
762 	int plen;
763 	const char *prop;
764 
765 	prop = fdt_getprop(dt->blob, offs, "compatible", &plen);
766 	if (!prop)
767 		return -1;
768 
769 	while (plen > 0) {
770 		if (memcmp(prop, prefix, prefix_len) == 0)
771 			return 0; /* match */
772 
773 		l = strlen(prop) + 1;
774 		prop += l;
775 		plen -= l;
776 	}
777 
778 	return -1;
779 }
780 
781 static int dt_add_psci_cpu_enable_methods(struct dt_descriptor *dt)
782 {
783 	int offs = 0;
784 
785 	while (1) {
786 		offs = fdt_next_node(dt->blob, offs, NULL);
787 		if (offs < 0)
788 			break;
789 		if (fdt_getprop(dt->blob, offs, "enable-method", NULL))
790 			continue; /* already set */
791 		if (check_node_compat_prefix(dt, offs, "arm,cortex-a"))
792 			continue; /* no compatible */
793 		if (fdt_setprop_string(dt->blob, offs, "enable-method", "psci"))
794 			return -1;
795 		/* Need to restart scanning as offsets may have changed */
796 		offs = 0;
797 	}
798 	return 0;
799 }
800 
801 static int config_psci(struct dt_descriptor *dt)
802 {
803 	if (dt_add_psci_node(dt))
804 		return -1;
805 	return dt_add_psci_cpu_enable_methods(dt);
806 }
807 #else
808 static int config_psci(struct dt_descriptor *dt __unused)
809 {
810 	return 0;
811 }
812 #endif /*CFG_PSCI_ARM32*/
813 
814 static void set_dt_val(void *data, uint32_t cell_size, uint64_t val)
815 {
816 	if (cell_size == 1) {
817 		fdt32_t v = cpu_to_fdt32((uint32_t)val);
818 
819 		memcpy(data, &v, sizeof(v));
820 	} else {
821 		fdt64_t v = cpu_to_fdt64(val);
822 
823 		memcpy(data, &v, sizeof(v));
824 	}
825 }
826 
827 static int add_res_mem_dt_node(struct dt_descriptor *dt, const char *name,
828 			       paddr_t pa, size_t size)
829 {
830 	int offs = 0;
831 	int ret = 0;
832 	int addr_size = -1;
833 	int len_size = -1;
834 	bool found = true;
835 	char subnode_name[80] = { 0 };
836 
837 	offs = fdt_path_offset(dt->blob, "/reserved-memory");
838 
839 	if (offs < 0) {
840 		found = false;
841 		offs = 0;
842 	}
843 
844 	if (IS_ENABLED(CFG_EXTERNAL_DTB_OVERLAY)) {
845 		len_size = sizeof(paddr_t) / sizeof(uint32_t);
846 		addr_size = sizeof(paddr_t) / sizeof(uint32_t);
847 	} else {
848 		len_size = fdt_size_cells(dt->blob, offs);
849 		if (len_size < 0)
850 			return -1;
851 		addr_size = fdt_address_cells(dt->blob, offs);
852 		if (addr_size < 0)
853 			return -1;
854 	}
855 
856 	if (!found) {
857 		offs = add_dt_path_subnode(dt, "/", "reserved-memory");
858 		if (offs < 0)
859 			return -1;
860 		ret = fdt_setprop_cell(dt->blob, offs, "#address-cells",
861 				       addr_size);
862 		if (ret < 0)
863 			return -1;
864 		ret = fdt_setprop_cell(dt->blob, offs, "#size-cells", len_size);
865 		if (ret < 0)
866 			return -1;
867 		ret = fdt_setprop(dt->blob, offs, "ranges", NULL, 0);
868 		if (ret < 0)
869 			return -1;
870 	}
871 
872 	snprintf(subnode_name, sizeof(subnode_name),
873 		 "%s@0x%" PRIxPA, name, pa);
874 	offs = fdt_add_subnode(dt->blob, offs, subnode_name);
875 	if (offs >= 0) {
876 		uint32_t data[FDT_MAX_NCELLS * 2];
877 
878 		set_dt_val(data, addr_size, pa);
879 		set_dt_val(data + addr_size, len_size, size);
880 		ret = fdt_setprop(dt->blob, offs, "reg", data,
881 				  sizeof(uint32_t) * (addr_size + len_size));
882 		if (ret < 0)
883 			return -1;
884 		ret = fdt_setprop(dt->blob, offs, "no-map", NULL, 0);
885 		if (ret < 0)
886 			return -1;
887 	} else {
888 		return -1;
889 	}
890 	return 0;
891 }
892 
893 #ifdef CFG_CORE_DYN_SHM
894 static uint64_t get_dt_val_and_advance(const void *data, size_t *offs,
895 				       uint32_t cell_size)
896 {
897 	uint64_t rv = 0;
898 
899 	if (cell_size == 1) {
900 		uint32_t v;
901 
902 		memcpy(&v, (const uint8_t *)data + *offs, sizeof(v));
903 		*offs += sizeof(v);
904 		rv = fdt32_to_cpu(v);
905 	} else {
906 		uint64_t v;
907 
908 		memcpy(&v, (const uint8_t *)data + *offs, sizeof(v));
909 		*offs += sizeof(v);
910 		rv = fdt64_to_cpu(v);
911 	}
912 
913 	return rv;
914 }
915 
916 /*
917  * Find all non-secure memory from DT. Memory marked inaccessible by Secure
918  * World is ignored since it could not be mapped to be used as dynamic shared
919  * memory.
920  */
921 static int get_nsec_memory_helper(void *fdt, struct core_mmu_phys_mem *mem)
922 {
923 	const uint8_t *prop = NULL;
924 	uint64_t a = 0;
925 	uint64_t l = 0;
926 	size_t prop_offs = 0;
927 	size_t prop_len = 0;
928 	int elems_total = 0;
929 	int addr_size = 0;
930 	int len_size = 0;
931 	int offs = 0;
932 	size_t n = 0;
933 	int len = 0;
934 
935 	addr_size = fdt_address_cells(fdt, 0);
936 	if (addr_size < 0)
937 		return 0;
938 
939 	len_size = fdt_size_cells(fdt, 0);
940 	if (len_size < 0)
941 		return 0;
942 
943 	while (true) {
944 		offs = fdt_node_offset_by_prop_value(fdt, offs, "device_type",
945 						     "memory",
946 						     sizeof("memory"));
947 		if (offs < 0)
948 			break;
949 
950 		if (_fdt_get_status(fdt, offs) != (DT_STATUS_OK_NSEC |
951 						   DT_STATUS_OK_SEC))
952 			continue;
953 
954 		prop = fdt_getprop(fdt, offs, "reg", &len);
955 		if (!prop)
956 			continue;
957 
958 		prop_len = len;
959 		for (n = 0, prop_offs = 0; prop_offs < prop_len; n++) {
960 			a = get_dt_val_and_advance(prop, &prop_offs, addr_size);
961 			if (prop_offs >= prop_len) {
962 				n--;
963 				break;
964 			}
965 
966 			l = get_dt_val_and_advance(prop, &prop_offs, len_size);
967 			if (mem) {
968 				mem->type = MEM_AREA_DDR_OVERALL;
969 				mem->addr = a;
970 				mem->size = l;
971 				mem++;
972 			}
973 		}
974 
975 		elems_total += n;
976 	}
977 
978 	return elems_total;
979 }
980 
981 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt, size_t *nelems)
982 {
983 	struct core_mmu_phys_mem *mem = NULL;
984 	int elems_total = 0;
985 
986 	elems_total = get_nsec_memory_helper(fdt, NULL);
987 	if (elems_total <= 0)
988 		return NULL;
989 
990 	mem = nex_calloc(elems_total, sizeof(*mem));
991 	if (!mem)
992 		panic();
993 
994 	elems_total = get_nsec_memory_helper(fdt, mem);
995 	assert(elems_total > 0);
996 
997 	*nelems = elems_total;
998 
999 	return mem;
1000 }
1001 #endif /*CFG_CORE_DYN_SHM*/
1002 
1003 #ifdef CFG_CORE_RESERVED_SHM
1004 static int mark_static_shm_as_reserved(struct dt_descriptor *dt)
1005 {
1006 	vaddr_t shm_start;
1007 	vaddr_t shm_end;
1008 
1009 	core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &shm_start, &shm_end);
1010 	if (shm_start != shm_end)
1011 		return add_res_mem_dt_node(dt, "optee_shm",
1012 					   virt_to_phys((void *)shm_start),
1013 					   shm_end - shm_start);
1014 
1015 	DMSG("No SHM configured");
1016 	return -1;
1017 }
1018 #endif /*CFG_CORE_RESERVED_SHM*/
1019 
1020 static void init_external_dt(unsigned long phys_dt)
1021 {
1022 	struct dt_descriptor *dt = &external_dt;
1023 	void *fdt;
1024 	int ret;
1025 
1026 	if (!phys_dt) {
1027 		/*
1028 		 * No need to panic as we're not using the DT in OP-TEE
1029 		 * yet, we're only adding some nodes for normal world use.
1030 		 * This makes the switch to using DT easier as we can boot
1031 		 * a newer OP-TEE with older boot loaders. Once we start to
1032 		 * initialize devices based on DT we'll likely panic
1033 		 * instead of returning here.
1034 		 */
1035 		IMSG("No non-secure external DT");
1036 		return;
1037 	}
1038 
1039 	if (!core_mmu_add_mapping(MEM_AREA_EXT_DT, phys_dt, CFG_DTB_MAX_SIZE))
1040 		panic("Failed to map external DTB");
1041 
1042 	fdt = phys_to_virt(phys_dt, MEM_AREA_EXT_DT);
1043 	if (!fdt)
1044 		panic();
1045 
1046 	dt->blob = fdt;
1047 
1048 	ret = init_dt_overlay(dt, CFG_DTB_MAX_SIZE);
1049 	if (ret < 0) {
1050 		EMSG("Device Tree Overlay init fail @ %#lx: error %d", phys_dt,
1051 		     ret);
1052 		panic();
1053 	}
1054 
1055 	ret = fdt_open_into(fdt, fdt, CFG_DTB_MAX_SIZE);
1056 	if (ret < 0) {
1057 		EMSG("Invalid Device Tree at %#lx: error %d", phys_dt, ret);
1058 		panic();
1059 	}
1060 
1061 	IMSG("Non-secure external DT found");
1062 }
1063 
1064 static int mark_tzdram_as_reserved(struct dt_descriptor *dt)
1065 {
1066 	return add_res_mem_dt_node(dt, "optee_core", CFG_TZDRAM_START,
1067 				   CFG_TZDRAM_SIZE);
1068 }
1069 
1070 static void update_external_dt(void)
1071 {
1072 	struct dt_descriptor *dt = &external_dt;
1073 
1074 	if (!dt->blob)
1075 		return;
1076 
1077 	if (add_optee_dt_node(dt))
1078 		panic("Failed to add OP-TEE Device Tree node");
1079 
1080 	if (config_psci(dt))
1081 		panic("Failed to config PSCI");
1082 
1083 #ifdef CFG_CORE_RESERVED_SHM
1084 	if (mark_static_shm_as_reserved(dt))
1085 		panic("Failed to config non-secure memory");
1086 #endif
1087 
1088 	if (mark_tzdram_as_reserved(dt))
1089 		panic("Failed to config secure memory");
1090 }
1091 #else /*CFG_DT*/
1092 void *get_external_dt(void)
1093 {
1094 	return NULL;
1095 }
1096 
1097 static void init_external_dt(unsigned long phys_dt __unused)
1098 {
1099 }
1100 
1101 static void update_external_dt(void)
1102 {
1103 }
1104 
1105 #ifdef CFG_CORE_DYN_SHM
1106 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt __unused,
1107 						 size_t *nelems __unused)
1108 {
1109 	return NULL;
1110 }
1111 #endif /*CFG_CORE_DYN_SHM*/
1112 #endif /*!CFG_DT*/
1113 
1114 #ifdef CFG_CORE_DYN_SHM
1115 static void discover_nsec_memory(void)
1116 {
1117 	struct core_mmu_phys_mem *mem;
1118 	const struct core_mmu_phys_mem *mem_begin = NULL;
1119 	const struct core_mmu_phys_mem *mem_end = NULL;
1120 	size_t nelems;
1121 	void *fdt = get_external_dt();
1122 
1123 	if (fdt) {
1124 		mem = get_nsec_memory(fdt, &nelems);
1125 		if (mem) {
1126 			core_mmu_set_discovered_nsec_ddr(mem, nelems);
1127 			return;
1128 		}
1129 
1130 		DMSG("No non-secure memory found in FDT");
1131 	}
1132 
1133 	mem_begin = phys_ddr_overall_begin;
1134 	mem_end = phys_ddr_overall_end;
1135 	nelems = mem_end - mem_begin;
1136 	if (nelems) {
1137 		/*
1138 		 * Platform cannot use both register_ddr() and the now
1139 		 * deprecated register_dynamic_shm().
1140 		 */
1141 		assert(phys_ddr_overall_compat_begin ==
1142 		       phys_ddr_overall_compat_end);
1143 	} else {
1144 		mem_begin = phys_ddr_overall_compat_begin;
1145 		mem_end = phys_ddr_overall_compat_end;
1146 		nelems = mem_end - mem_begin;
1147 		if (!nelems)
1148 			return;
1149 		DMSG("Warning register_dynamic_shm() is deprecated, please use register_ddr() instead");
1150 	}
1151 
1152 	mem = nex_calloc(nelems, sizeof(*mem));
1153 	if (!mem)
1154 		panic();
1155 
1156 	memcpy(mem, phys_ddr_overall_begin, sizeof(*mem) * nelems);
1157 	core_mmu_set_discovered_nsec_ddr(mem, nelems);
1158 }
1159 #else /*CFG_CORE_DYN_SHM*/
1160 static void discover_nsec_memory(void)
1161 {
1162 }
1163 #endif /*!CFG_CORE_DYN_SHM*/
1164 
1165 void init_tee_runtime(void)
1166 {
1167 #ifdef CFG_VIRTUALIZATION
1168 	/* We need to initialize pool for every virtual guest partition */
1169 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
1170 #endif
1171 
1172 #ifndef CFG_WITH_PAGER
1173 	/* Pager initializes TA RAM early */
1174 	core_mmu_init_ta_ram();
1175 #endif
1176 	call_initcalls();
1177 }
1178 
1179 static void init_primary(unsigned long pageable_part, unsigned long nsec_entry)
1180 {
1181 	/*
1182 	 * Mask asynchronous exceptions before switch to the thread vector
1183 	 * as the thread handler requires those to be masked while
1184 	 * executing with the temporary stack. The thread subsystem also
1185 	 * asserts that the foreign interrupts are blocked when using most of
1186 	 * its functions.
1187 	 */
1188 	thread_set_exceptions(THREAD_EXCP_ALL);
1189 	primary_save_cntfrq();
1190 	init_vfp_sec();
1191 	/*
1192 	 * Pager: init_runtime() calls thread_kernel_enable_vfp() so we must
1193 	 * set a current thread right now to avoid a chicken-and-egg problem
1194 	 * (thread_init_boot_thread() sets the current thread but needs
1195 	 * things set by init_runtime()).
1196 	 */
1197 	thread_get_core_local()->curr_thread = 0;
1198 	init_runtime(pageable_part);
1199 
1200 	if (IS_ENABLED(CFG_VIRTUALIZATION)) {
1201 		/*
1202 		 * Virtualization: We can't initialize threads right now because
1203 		 * threads belong to "tee" part and will be initialized
1204 		 * separately per each new virtual guest. So, we'll clear
1205 		 * "curr_thread" and call it done.
1206 		 */
1207 		thread_get_core_local()->curr_thread = -1;
1208 	} else {
1209 		thread_init_boot_thread();
1210 	}
1211 	thread_init_primary();
1212 	thread_init_per_cpu();
1213 	init_sec_mon(nsec_entry);
1214 }
1215 
1216 /*
1217  * Note: this function is weak just to make it possible to exclude it from
1218  * the unpaged area.
1219  */
1220 void __weak paged_init_primary(unsigned long fdt)
1221 {
1222 	init_external_dt(fdt);
1223 	tpm_map_log_area(get_external_dt());
1224 	discover_nsec_memory();
1225 	update_external_dt();
1226 	configure_console_from_dt();
1227 
1228 	IMSG("OP-TEE version: %s", core_v_str);
1229 	IMSG("Primary CPU initializing");
1230 #ifdef CFG_CORE_ASLR
1231 	DMSG("Executing at offset %#lx with virtual load address %#"PRIxVA,
1232 	     (unsigned long)boot_mmu_config.load_offset, VCORE_START_VA);
1233 #endif
1234 
1235 	main_init_gic();
1236 	init_vfp_nsec();
1237 #ifndef CFG_VIRTUALIZATION
1238 	init_tee_runtime();
1239 #endif
1240 #ifdef CFG_VIRTUALIZATION
1241 	IMSG("Initializing virtualization support");
1242 	core_mmu_init_virtualization();
1243 #endif
1244 	call_finalcalls();
1245 	IMSG("Primary CPU switching to normal world boot");
1246 }
1247 
1248 static void init_secondary_helper(unsigned long nsec_entry)
1249 {
1250 	IMSG("Secondary CPU %zu initializing", get_core_pos());
1251 
1252 	/*
1253 	 * Mask asynchronous exceptions before switch to the thread vector
1254 	 * as the thread handler requires those to be masked while
1255 	 * executing with the temporary stack. The thread subsystem also
1256 	 * asserts that the foreign interrupts are blocked when using most of
1257 	 * its functions.
1258 	 */
1259 	thread_set_exceptions(THREAD_EXCP_ALL);
1260 
1261 	secondary_init_cntfrq();
1262 	thread_init_per_cpu();
1263 	init_sec_mon(nsec_entry);
1264 	main_secondary_init_gic();
1265 	init_vfp_sec();
1266 	init_vfp_nsec();
1267 
1268 	IMSG("Secondary CPU %zu switching to normal world boot", get_core_pos());
1269 }
1270 
1271 /*
1272  * Note: this function is weak just to make it possible to exclude it from
1273  * the unpaged area so that it lies in the init area.
1274  */
1275 void __weak boot_init_primary(unsigned long pageable_part,
1276 			      unsigned long nsec_entry __maybe_unused,
1277 			      unsigned long fdt)
1278 {
1279 	unsigned long e = PADDR_INVALID;
1280 
1281 #if !defined(CFG_WITH_ARM_TRUSTED_FW)
1282 	e = nsec_entry;
1283 #endif
1284 
1285 	init_primary(pageable_part, e);
1286 	paged_init_primary(fdt);
1287 }
1288 
1289 #if defined(CFG_WITH_ARM_TRUSTED_FW)
1290 unsigned long boot_cpu_on_handler(unsigned long a0 __maybe_unused,
1291 				  unsigned long a1 __unused)
1292 {
1293 	init_secondary_helper(PADDR_INVALID);
1294 	return 0;
1295 }
1296 #else
1297 void boot_init_secondary(unsigned long nsec_entry)
1298 {
1299 	init_secondary_helper(nsec_entry);
1300 }
1301 #endif
1302 
1303 #if defined(CFG_BOOT_SECONDARY_REQUEST)
1304 void boot_set_core_ns_entry(size_t core_idx, uintptr_t entry,
1305 			    uintptr_t context_id)
1306 {
1307 	ns_entry_contexts[core_idx].entry_point = entry;
1308 	ns_entry_contexts[core_idx].context_id = context_id;
1309 	dsb_ishst();
1310 }
1311 
1312 int boot_core_release(size_t core_idx, paddr_t entry)
1313 {
1314 	if (!core_idx || core_idx >= CFG_TEE_CORE_NB_CORE)
1315 		return -1;
1316 
1317 	ns_entry_contexts[core_idx].entry_point = entry;
1318 	dmb();
1319 	spin_table[core_idx] = 1;
1320 	dsb();
1321 	sev();
1322 
1323 	return 0;
1324 }
1325 
1326 /*
1327  * spin until secondary boot request, then returns with
1328  * the secondary core entry address.
1329  */
1330 struct ns_entry_context *boot_core_hpen(void)
1331 {
1332 #ifdef CFG_PSCI_ARM32
1333 	return &ns_entry_contexts[get_core_pos()];
1334 #else
1335 	do {
1336 		wfe();
1337 	} while (!spin_table[get_core_pos()]);
1338 	dmb();
1339 	return &ns_entry_contexts[get_core_pos()];
1340 #endif
1341 }
1342 #endif
1343 
1344 #if defined(CFG_CORE_ASLR)
1345 #if defined(CFG_DT)
1346 unsigned long __weak get_aslr_seed(void *fdt)
1347 {
1348 	int rc = fdt_check_header(fdt);
1349 	const uint64_t *seed = NULL;
1350 	int offs = 0;
1351 	int len = 0;
1352 
1353 	if (rc) {
1354 		DMSG("Bad fdt: %d", rc);
1355 		return 0;
1356 	}
1357 
1358 	offs =  fdt_path_offset(fdt, "/secure-chosen");
1359 	if (offs < 0) {
1360 		DMSG("Cannot find /secure-chosen");
1361 		return 0;
1362 	}
1363 	seed = fdt_getprop(fdt, offs, "kaslr-seed", &len);
1364 	if (!seed || len != sizeof(*seed)) {
1365 		DMSG("Cannot find valid kaslr-seed");
1366 		return 0;
1367 	}
1368 
1369 	return fdt64_to_cpu(*seed);
1370 }
1371 #else /*!CFG_DT*/
1372 unsigned long __weak get_aslr_seed(void *fdt __unused)
1373 {
1374 	DMSG("Warning: no ASLR seed");
1375 	return 0;
1376 }
1377 #endif /*!CFG_DT*/
1378 #endif /*CFG_CORE_ASLR*/
1379