1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2015-2023, Linaro Limited 4 * Copyright (c) 2023, Arm Limited 5 */ 6 7 #include <arm.h> 8 #include <assert.h> 9 #include <compiler.h> 10 #include <config.h> 11 #include <console.h> 12 #include <crypto/crypto.h> 13 #include <drivers/gic.h> 14 #include <dt-bindings/interrupt-controller/arm-gic.h> 15 #include <ffa.h> 16 #include <initcall.h> 17 #include <inttypes.h> 18 #include <keep.h> 19 #include <kernel/asan.h> 20 #include <kernel/boot.h> 21 #include <kernel/dt.h> 22 #include <kernel/linker.h> 23 #include <kernel/misc.h> 24 #include <kernel/panic.h> 25 #include <kernel/tee_misc.h> 26 #include <kernel/thread.h> 27 #include <kernel/tpm.h> 28 #include <libfdt.h> 29 #include <malloc.h> 30 #include <memtag.h> 31 #include <mm/core_memprot.h> 32 #include <mm/core_mmu.h> 33 #include <mm/fobj.h> 34 #include <mm/tee_mm.h> 35 #include <mm/tee_pager.h> 36 #include <sm/psci.h> 37 #include <stdio.h> 38 #include <trace.h> 39 #include <utee_defines.h> 40 #include <util.h> 41 42 #include <platform_config.h> 43 44 #if !defined(CFG_WITH_ARM_TRUSTED_FW) 45 #include <sm/sm.h> 46 #endif 47 48 #if defined(CFG_WITH_VFP) 49 #include <kernel/vfp.h> 50 #endif 51 52 /* 53 * In this file we're using unsigned long to represent physical pointers as 54 * they are received in a single register when OP-TEE is initially entered. 55 * This limits 32-bit systems to only use make use of the lower 32 bits 56 * of a physical address for initial parameters. 57 * 58 * 64-bit systems on the other hand can use full 64-bit physical pointers. 59 */ 60 #define PADDR_INVALID ULONG_MAX 61 62 #if defined(CFG_BOOT_SECONDARY_REQUEST) 63 struct ns_entry_context { 64 uintptr_t entry_point; 65 uintptr_t context_id; 66 }; 67 struct ns_entry_context ns_entry_contexts[CFG_TEE_CORE_NB_CORE]; 68 static uint32_t spin_table[CFG_TEE_CORE_NB_CORE]; 69 #endif 70 71 #ifdef CFG_BOOT_SYNC_CPU 72 /* 73 * Array used when booting, to synchronize cpu. 74 * When 0, the cpu has not started. 75 * When 1, it has started 76 */ 77 uint32_t sem_cpu_sync[CFG_TEE_CORE_NB_CORE]; 78 DECLARE_KEEP_PAGER(sem_cpu_sync); 79 #endif 80 81 #ifdef CFG_CORE_FFA 82 static void *manifest_dt __nex_bss; 83 #endif 84 85 #ifdef CFG_SECONDARY_INIT_CNTFRQ 86 static uint32_t cntfrq; 87 #endif 88 89 /* May be overridden in plat-$(PLATFORM)/main.c */ 90 __weak void plat_primary_init_early(void) 91 { 92 } 93 DECLARE_KEEP_PAGER(plat_primary_init_early); 94 95 /* May be overridden in plat-$(PLATFORM)/main.c */ 96 __weak void boot_primary_init_intc(void) 97 { 98 } 99 100 /* May be overridden in plat-$(PLATFORM)/main.c */ 101 __weak void boot_secondary_init_intc(void) 102 { 103 } 104 105 /* May be overridden in plat-$(PLATFORM)/main.c */ 106 __weak unsigned long plat_get_aslr_seed(void) 107 { 108 DMSG("Warning: no ASLR seed"); 109 110 return 0; 111 } 112 113 #if defined(_CFG_CORE_STACK_PROTECTOR) || defined(CFG_WITH_STACK_CANARIES) 114 /* Generate random stack canary value on boot up */ 115 __weak void plat_get_random_stack_canaries(void *buf, size_t ncan, size_t size) 116 { 117 TEE_Result ret = TEE_ERROR_GENERIC; 118 size_t i = 0; 119 120 assert(buf && ncan && size); 121 122 /* 123 * With virtualization the RNG is not initialized in Nexus core. 124 * Need to override with platform specific implementation. 125 */ 126 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 127 IMSG("WARNING: Using fixed value for stack canary"); 128 memset(buf, 0xab, ncan * size); 129 goto out; 130 } 131 132 ret = crypto_rng_read(buf, ncan * size); 133 if (ret != TEE_SUCCESS) 134 panic("Failed to generate random stack canary"); 135 136 out: 137 /* Leave null byte in canary to prevent string base exploit */ 138 for (i = 0; i < ncan; i++) 139 *((uint8_t *)buf + size * i) = 0; 140 } 141 #endif /* _CFG_CORE_STACK_PROTECTOR || CFG_WITH_STACK_CANARIES */ 142 143 /* 144 * This function is called as a guard after each smc call which is not 145 * supposed to return. 146 */ 147 void __panic_at_smc_return(void) 148 { 149 panic(); 150 } 151 152 #if defined(CFG_WITH_ARM_TRUSTED_FW) 153 void init_sec_mon(unsigned long nsec_entry __maybe_unused) 154 { 155 assert(nsec_entry == PADDR_INVALID); 156 /* Do nothing as we don't have a secure monitor */ 157 } 158 #else 159 /* May be overridden in plat-$(PLATFORM)/main.c */ 160 __weak void init_sec_mon(unsigned long nsec_entry) 161 { 162 struct sm_nsec_ctx *nsec_ctx; 163 164 assert(nsec_entry != PADDR_INVALID); 165 166 /* Initialize secure monitor */ 167 nsec_ctx = sm_get_nsec_ctx(); 168 nsec_ctx->mon_lr = nsec_entry; 169 nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I; 170 if (nsec_entry & 1) 171 nsec_ctx->mon_spsr |= CPSR_T; 172 } 173 #endif 174 175 #if defined(CFG_WITH_ARM_TRUSTED_FW) 176 static void init_vfp_nsec(void) 177 { 178 } 179 #else 180 static void init_vfp_nsec(void) 181 { 182 /* Normal world can use CP10 and CP11 (SIMD/VFP) */ 183 write_nsacr(read_nsacr() | NSACR_CP10 | NSACR_CP11); 184 } 185 #endif 186 187 #if defined(CFG_WITH_VFP) 188 189 #ifdef ARM32 190 static void init_vfp_sec(void) 191 { 192 uint32_t cpacr = read_cpacr(); 193 194 /* 195 * Enable Advanced SIMD functionality. 196 * Enable use of D16-D31 of the Floating-point Extension register 197 * file. 198 */ 199 cpacr &= ~(CPACR_ASEDIS | CPACR_D32DIS); 200 /* 201 * Enable usage of CP10 and CP11 (SIMD/VFP) (both kernel and user 202 * mode. 203 */ 204 cpacr |= CPACR_CP(10, CPACR_CP_ACCESS_FULL); 205 cpacr |= CPACR_CP(11, CPACR_CP_ACCESS_FULL); 206 write_cpacr(cpacr); 207 } 208 #endif /* ARM32 */ 209 210 #ifdef ARM64 211 static void init_vfp_sec(void) 212 { 213 /* Not using VFP until thread_kernel_enable_vfp() */ 214 vfp_disable(); 215 } 216 #endif /* ARM64 */ 217 218 #else /* CFG_WITH_VFP */ 219 220 static void init_vfp_sec(void) 221 { 222 /* Not using VFP */ 223 } 224 #endif 225 226 #ifdef CFG_SECONDARY_INIT_CNTFRQ 227 static void primary_save_cntfrq(void) 228 { 229 assert(cntfrq == 0); 230 231 /* 232 * CNTFRQ should be initialized on the primary CPU by a 233 * previous boot stage 234 */ 235 cntfrq = read_cntfrq(); 236 } 237 238 static void secondary_init_cntfrq(void) 239 { 240 assert(cntfrq != 0); 241 write_cntfrq(cntfrq); 242 } 243 #else /* CFG_SECONDARY_INIT_CNTFRQ */ 244 static void primary_save_cntfrq(void) 245 { 246 } 247 248 static void secondary_init_cntfrq(void) 249 { 250 } 251 #endif 252 253 #ifdef CFG_CORE_SANITIZE_KADDRESS 254 static void init_run_constructors(void) 255 { 256 const vaddr_t *ctor; 257 258 for (ctor = &__ctor_list; ctor < &__ctor_end; ctor++) 259 ((void (*)(void))(*ctor))(); 260 } 261 262 static void init_asan(void) 263 { 264 265 /* 266 * CFG_ASAN_SHADOW_OFFSET is also supplied as 267 * -fasan-shadow-offset=$(CFG_ASAN_SHADOW_OFFSET) to the compiler. 268 * Since all the needed values to calculate the value of 269 * CFG_ASAN_SHADOW_OFFSET isn't available in to make we need to 270 * calculate it in advance and hard code it into the platform 271 * conf.mk. Here where we have all the needed values we double 272 * check that the compiler is supplied the correct value. 273 */ 274 275 #define __ASAN_SHADOW_START \ 276 ROUNDUP(TEE_RAM_START + (TEE_RAM_VA_SIZE * 8) / 9 - 8, 8) 277 assert(__ASAN_SHADOW_START == (vaddr_t)&__asan_shadow_start); 278 #define __CFG_ASAN_SHADOW_OFFSET \ 279 (__ASAN_SHADOW_START - (TEE_RAM_START / 8)) 280 COMPILE_TIME_ASSERT(CFG_ASAN_SHADOW_OFFSET == __CFG_ASAN_SHADOW_OFFSET); 281 #undef __ASAN_SHADOW_START 282 #undef __CFG_ASAN_SHADOW_OFFSET 283 284 /* 285 * Assign area covered by the shadow area, everything from start up 286 * to the beginning of the shadow area. 287 */ 288 asan_set_shadowed((void *)TEE_LOAD_ADDR, &__asan_shadow_start); 289 290 /* 291 * Add access to areas that aren't opened automatically by a 292 * constructor. 293 */ 294 asan_tag_access(&__ctor_list, &__ctor_end); 295 asan_tag_access(__rodata_start, __rodata_end); 296 #ifdef CFG_WITH_PAGER 297 asan_tag_access(__pageable_start, __pageable_end); 298 #endif /*CFG_WITH_PAGER*/ 299 asan_tag_access(__nozi_start, __nozi_end); 300 #ifdef ARM32 301 asan_tag_access(__exidx_start, __exidx_end); 302 asan_tag_access(__extab_start, __extab_end); 303 #endif 304 305 init_run_constructors(); 306 307 /* Everything is tagged correctly, let's start address sanitizing. */ 308 asan_start(); 309 } 310 #else /*CFG_CORE_SANITIZE_KADDRESS*/ 311 static void init_asan(void) 312 { 313 } 314 #endif /*CFG_CORE_SANITIZE_KADDRESS*/ 315 316 #if defined(CFG_MEMTAG) 317 /* Called from entry_a64.S only when MEMTAG is configured */ 318 void boot_init_memtag(void) 319 { 320 paddr_t base = 0; 321 paddr_size_t size = 0; 322 323 memtag_init_ops(feat_mte_implemented()); 324 core_mmu_get_secure_memory(&base, &size); 325 memtag_set_tags((void *)(vaddr_t)base, size, 0); 326 } 327 #endif 328 329 #ifdef CFG_WITH_PAGER 330 331 #ifdef CFG_CORE_SANITIZE_KADDRESS 332 static void carve_out_asan_mem(tee_mm_pool_t *pool) 333 { 334 const size_t s = pool->hi - pool->lo; 335 tee_mm_entry_t *mm; 336 paddr_t apa = ASAN_MAP_PA; 337 size_t asz = ASAN_MAP_SZ; 338 339 if (core_is_buffer_outside(apa, asz, pool->lo, s)) 340 return; 341 342 /* Reserve the shadow area */ 343 if (!core_is_buffer_inside(apa, asz, pool->lo, s)) { 344 if (apa < pool->lo) { 345 /* 346 * ASAN buffer is overlapping with the beginning of 347 * the pool. 348 */ 349 asz -= pool->lo - apa; 350 apa = pool->lo; 351 } else { 352 /* 353 * ASAN buffer is overlapping with the end of the 354 * pool. 355 */ 356 asz = pool->hi - apa; 357 } 358 } 359 mm = tee_mm_alloc2(pool, apa, asz); 360 assert(mm); 361 } 362 #else 363 static void carve_out_asan_mem(tee_mm_pool_t *pool __unused) 364 { 365 } 366 #endif 367 368 static void print_pager_pool_size(void) 369 { 370 struct tee_pager_stats __maybe_unused stats; 371 372 tee_pager_get_stats(&stats); 373 IMSG("Pager pool size: %zukB", 374 stats.npages_all * SMALL_PAGE_SIZE / 1024); 375 } 376 377 static void init_vcore(tee_mm_pool_t *mm_vcore) 378 { 379 const vaddr_t begin = VCORE_START_VA; 380 size_t size = TEE_RAM_VA_SIZE; 381 382 #ifdef CFG_CORE_SANITIZE_KADDRESS 383 /* Carve out asan memory, flat maped after core memory */ 384 if (begin + size > ASAN_SHADOW_PA) 385 size = ASAN_MAP_PA - begin; 386 #endif 387 388 if (!tee_mm_init(mm_vcore, begin, size, SMALL_PAGE_SHIFT, 389 TEE_MM_POOL_NO_FLAGS)) 390 panic("tee_mm_vcore init failed"); 391 } 392 393 /* 394 * With CFG_CORE_ASLR=y the init part is relocated very early during boot. 395 * The init part is also paged just as the rest of the normal paged code, with 396 * the difference that it's preloaded during boot. When the backing store 397 * is configured the entire paged binary is copied in place and then also 398 * the init part. Since the init part has been relocated (references to 399 * addresses updated to compensate for the new load address) this has to be 400 * undone for the hashes of those pages to match with the original binary. 401 * 402 * If CFG_CORE_ASLR=n, nothing needs to be done as the code/ro pages are 403 * unchanged. 404 */ 405 static void undo_init_relocation(uint8_t *paged_store __maybe_unused) 406 { 407 #ifdef CFG_CORE_ASLR 408 unsigned long *ptr = NULL; 409 const uint32_t *reloc = NULL; 410 const uint32_t *reloc_end = NULL; 411 unsigned long offs = boot_mmu_config.map_offset; 412 const struct boot_embdata *embdata = (const void *)__init_end; 413 vaddr_t addr_end = (vaddr_t)__init_end - offs - TEE_LOAD_ADDR; 414 vaddr_t addr_start = (vaddr_t)__init_start - offs - TEE_LOAD_ADDR; 415 416 reloc = (const void *)((vaddr_t)embdata + embdata->reloc_offset); 417 reloc_end = reloc + embdata->reloc_len / sizeof(*reloc); 418 419 for (; reloc < reloc_end; reloc++) { 420 if (*reloc < addr_start) 421 continue; 422 if (*reloc >= addr_end) 423 break; 424 ptr = (void *)(paged_store + *reloc - addr_start); 425 *ptr -= offs; 426 } 427 #endif 428 } 429 430 static struct fobj *ro_paged_alloc(tee_mm_entry_t *mm, void *hashes, 431 void *store) 432 { 433 const unsigned int num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE; 434 #ifdef CFG_CORE_ASLR 435 unsigned int reloc_offs = (vaddr_t)__pageable_start - VCORE_START_VA; 436 const struct boot_embdata *embdata = (const void *)__init_end; 437 const void *reloc = __init_end + embdata->reloc_offset; 438 439 return fobj_ro_reloc_paged_alloc(num_pages, hashes, reloc_offs, 440 reloc, embdata->reloc_len, store); 441 #else 442 return fobj_ro_paged_alloc(num_pages, hashes, store); 443 #endif 444 } 445 446 static void init_runtime(unsigned long pageable_part) 447 { 448 size_t n; 449 size_t init_size = (size_t)(__init_end - __init_start); 450 size_t pageable_start = (size_t)__pageable_start; 451 size_t pageable_end = (size_t)__pageable_end; 452 size_t pageable_size = pageable_end - pageable_start; 453 vaddr_t tzsram_end = TZSRAM_BASE + TZSRAM_SIZE - TEE_LOAD_ADDR + 454 VCORE_START_VA; 455 size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) * 456 TEE_SHA256_HASH_SIZE; 457 const struct boot_embdata *embdata = (const void *)__init_end; 458 const void *tmp_hashes = NULL; 459 tee_mm_entry_t *mm = NULL; 460 struct fobj *fobj = NULL; 461 uint8_t *paged_store = NULL; 462 uint8_t *hashes = NULL; 463 464 assert(pageable_size % SMALL_PAGE_SIZE == 0); 465 assert(embdata->total_len >= embdata->hashes_offset + 466 embdata->hashes_len); 467 assert(hash_size == embdata->hashes_len); 468 469 tmp_hashes = __init_end + embdata->hashes_offset; 470 471 init_asan(); 472 473 /* Add heap2 first as heap1 may be too small as initial bget pool */ 474 malloc_add_pool(__heap2_start, __heap2_end - __heap2_start); 475 malloc_add_pool(__heap1_start, __heap1_end - __heap1_start); 476 477 /* 478 * This needs to be initialized early to support address lookup 479 * in MEM_AREA_TEE_RAM 480 */ 481 tee_pager_early_init(); 482 483 hashes = malloc(hash_size); 484 IMSG_RAW("\n"); 485 IMSG("Pager is enabled. Hashes: %zu bytes", hash_size); 486 assert(hashes); 487 asan_memcpy_unchecked(hashes, tmp_hashes, hash_size); 488 489 /* 490 * Need tee_mm_sec_ddr initialized to be able to allocate secure 491 * DDR below. 492 */ 493 core_mmu_init_ta_ram(); 494 495 carve_out_asan_mem(&tee_mm_sec_ddr); 496 497 mm = tee_mm_alloc(&tee_mm_sec_ddr, pageable_size); 498 assert(mm); 499 paged_store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM, 500 pageable_size); 501 /* 502 * Load pageable part in the dedicated allocated area: 503 * - Move pageable non-init part into pageable area. Note bootloader 504 * may have loaded it anywhere in TA RAM hence use memmove(). 505 * - Copy pageable init part from current location into pageable area. 506 */ 507 memmove(paged_store + init_size, 508 phys_to_virt(pageable_part, 509 core_mmu_get_type_by_pa(pageable_part), 510 __pageable_part_end - __pageable_part_start), 511 __pageable_part_end - __pageable_part_start); 512 asan_memcpy_unchecked(paged_store, __init_start, init_size); 513 /* 514 * Undo eventual relocation for the init part so the hash checks 515 * can pass. 516 */ 517 undo_init_relocation(paged_store); 518 519 /* Check that hashes of what's in pageable area is OK */ 520 DMSG("Checking hashes of pageable area"); 521 for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) { 522 const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE; 523 const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE; 524 TEE_Result res; 525 526 DMSG("hash pg_idx %zu hash %p page %p", n, hash, page); 527 res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE); 528 if (res != TEE_SUCCESS) { 529 EMSG("Hash failed for page %zu at %p: res 0x%x", 530 n, (void *)page, res); 531 panic(); 532 } 533 } 534 535 /* 536 * Assert prepaged init sections are page aligned so that nothing 537 * trails uninited at the end of the premapped init area. 538 */ 539 assert(!(init_size & SMALL_PAGE_MASK)); 540 541 /* 542 * Initialize the virtual memory pool used for main_mmu_l2_ttb which 543 * is supplied to tee_pager_init() below. 544 */ 545 init_vcore(&tee_mm_vcore); 546 547 /* 548 * Assign alias area for pager end of the small page block the rest 549 * of the binary is loaded into. We're taking more than needed, but 550 * we're guaranteed to not need more than the physical amount of 551 * TZSRAM. 552 */ 553 mm = tee_mm_alloc2(&tee_mm_vcore, 554 (vaddr_t)tee_mm_vcore.lo + 555 tee_mm_vcore.size - TZSRAM_SIZE, 556 TZSRAM_SIZE); 557 assert(mm); 558 tee_pager_set_alias_area(mm); 559 560 /* 561 * Claim virtual memory which isn't paged. 562 * Linear memory (flat map core memory) ends there. 563 */ 564 mm = tee_mm_alloc2(&tee_mm_vcore, VCORE_UNPG_RX_PA, 565 (vaddr_t)(__pageable_start - VCORE_UNPG_RX_PA)); 566 assert(mm); 567 568 /* 569 * Allocate virtual memory for the pageable area and let the pager 570 * take charge of all the pages already assigned to that memory. 571 */ 572 mm = tee_mm_alloc2(&tee_mm_vcore, (vaddr_t)__pageable_start, 573 pageable_size); 574 assert(mm); 575 fobj = ro_paged_alloc(mm, hashes, paged_store); 576 assert(fobj); 577 tee_pager_add_core_region(tee_mm_get_smem(mm), PAGED_REGION_TYPE_RO, 578 fobj); 579 fobj_put(fobj); 580 581 tee_pager_add_pages(pageable_start, init_size / SMALL_PAGE_SIZE, false); 582 tee_pager_add_pages(pageable_start + init_size, 583 (pageable_size - init_size) / SMALL_PAGE_SIZE, 584 true); 585 if (pageable_end < tzsram_end) 586 tee_pager_add_pages(pageable_end, (tzsram_end - pageable_end) / 587 SMALL_PAGE_SIZE, true); 588 589 /* 590 * There may be physical pages in TZSRAM before the core load address. 591 * These pages can be added to the physical pages pool of the pager. 592 * This setup may happen when a the secure bootloader runs in TZRAM 593 * and its memory can be reused by OP-TEE once boot stages complete. 594 */ 595 tee_pager_add_pages(tee_mm_vcore.lo, 596 (VCORE_UNPG_RX_PA - tee_mm_vcore.lo) / SMALL_PAGE_SIZE, 597 true); 598 599 print_pager_pool_size(); 600 } 601 #else 602 603 static void init_runtime(unsigned long pageable_part __unused) 604 { 605 init_asan(); 606 607 /* 608 * By default whole OP-TEE uses malloc, so we need to initialize 609 * it early. But, when virtualization is enabled, malloc is used 610 * only by TEE runtime, so malloc should be initialized later, for 611 * every virtual partition separately. Core code uses nex_malloc 612 * instead. 613 */ 614 #ifdef CFG_NS_VIRTUALIZATION 615 nex_malloc_add_pool(__nex_heap_start, __nex_heap_end - 616 __nex_heap_start); 617 #else 618 malloc_add_pool(__heap1_start, __heap1_end - __heap1_start); 619 #endif 620 621 IMSG_RAW("\n"); 622 } 623 #endif 624 625 #if defined(CFG_DT) 626 static int add_optee_dt_node(struct dt_descriptor *dt) 627 { 628 int offs; 629 int ret; 630 631 if (fdt_path_offset(dt->blob, "/firmware/optee") >= 0) { 632 DMSG("OP-TEE Device Tree node already exists!"); 633 return 0; 634 } 635 636 offs = fdt_path_offset(dt->blob, "/firmware"); 637 if (offs < 0) { 638 offs = add_dt_path_subnode(dt, "/", "firmware"); 639 if (offs < 0) 640 return -1; 641 } 642 643 offs = fdt_add_subnode(dt->blob, offs, "optee"); 644 if (offs < 0) 645 return -1; 646 647 ret = fdt_setprop_string(dt->blob, offs, "compatible", 648 "linaro,optee-tz"); 649 if (ret < 0) 650 return -1; 651 ret = fdt_setprop_string(dt->blob, offs, "method", "smc"); 652 if (ret < 0) 653 return -1; 654 655 if (CFG_CORE_ASYNC_NOTIF_GIC_INTID) { 656 /* 657 * The format of the interrupt property is defined by the 658 * binding of the interrupt domain root. In this case it's 659 * one Arm GIC v1, v2 or v3 so we must be compatible with 660 * these. 661 * 662 * An SPI type of interrupt is indicated with a 0 in the 663 * first cell. A PPI type is indicated with value 1. 664 * 665 * The interrupt number goes in the second cell where 666 * SPIs ranges from 0 to 987 and PPI ranges from 0 to 15. 667 * 668 * Flags are passed in the third cells. 669 */ 670 uint32_t itr_trigger = 0; 671 uint32_t itr_type = 0; 672 uint32_t itr_id = 0; 673 uint32_t val[3] = { }; 674 675 /* PPI are visible only in current CPU cluster */ 676 static_assert(!CFG_CORE_ASYNC_NOTIF_GIC_INTID || 677 (CFG_CORE_ASYNC_NOTIF_GIC_INTID >= 678 GIC_SPI_BASE) || 679 ((CFG_TEE_CORE_NB_CORE <= 8) && 680 (CFG_CORE_ASYNC_NOTIF_GIC_INTID >= 681 GIC_PPI_BASE))); 682 683 if (CFG_CORE_ASYNC_NOTIF_GIC_INTID >= GIC_SPI_BASE) { 684 itr_type = GIC_SPI; 685 itr_id = CFG_CORE_ASYNC_NOTIF_GIC_INTID - GIC_SPI_BASE; 686 itr_trigger = IRQ_TYPE_EDGE_RISING; 687 } else { 688 itr_type = GIC_PPI; 689 itr_id = CFG_CORE_ASYNC_NOTIF_GIC_INTID - GIC_PPI_BASE; 690 itr_trigger = IRQ_TYPE_EDGE_RISING | 691 GIC_CPU_MASK_SIMPLE(CFG_TEE_CORE_NB_CORE); 692 } 693 694 val[0] = TEE_U32_TO_BIG_ENDIAN(itr_type); 695 val[1] = TEE_U32_TO_BIG_ENDIAN(itr_id); 696 val[2] = TEE_U32_TO_BIG_ENDIAN(itr_trigger); 697 698 ret = fdt_setprop(dt->blob, offs, "interrupts", val, 699 sizeof(val)); 700 if (ret < 0) 701 return -1; 702 } 703 return 0; 704 } 705 706 #ifdef CFG_PSCI_ARM32 707 static int append_psci_compatible(void *fdt, int offs, const char *str) 708 { 709 return fdt_appendprop(fdt, offs, "compatible", str, strlen(str) + 1); 710 } 711 712 static int dt_add_psci_node(struct dt_descriptor *dt) 713 { 714 int offs; 715 716 if (fdt_path_offset(dt->blob, "/psci") >= 0) { 717 DMSG("PSCI Device Tree node already exists!"); 718 return 0; 719 } 720 721 offs = add_dt_path_subnode(dt, "/", "psci"); 722 if (offs < 0) 723 return -1; 724 if (append_psci_compatible(dt->blob, offs, "arm,psci-1.0")) 725 return -1; 726 if (append_psci_compatible(dt->blob, offs, "arm,psci-0.2")) 727 return -1; 728 if (append_psci_compatible(dt->blob, offs, "arm,psci")) 729 return -1; 730 if (fdt_setprop_string(dt->blob, offs, "method", "smc")) 731 return -1; 732 if (fdt_setprop_u32(dt->blob, offs, "cpu_suspend", PSCI_CPU_SUSPEND)) 733 return -1; 734 if (fdt_setprop_u32(dt->blob, offs, "cpu_off", PSCI_CPU_OFF)) 735 return -1; 736 if (fdt_setprop_u32(dt->blob, offs, "cpu_on", PSCI_CPU_ON)) 737 return -1; 738 if (fdt_setprop_u32(dt->blob, offs, "sys_poweroff", PSCI_SYSTEM_OFF)) 739 return -1; 740 if (fdt_setprop_u32(dt->blob, offs, "sys_reset", PSCI_SYSTEM_RESET)) 741 return -1; 742 return 0; 743 } 744 745 static int check_node_compat_prefix(struct dt_descriptor *dt, int offs, 746 const char *prefix) 747 { 748 const size_t prefix_len = strlen(prefix); 749 size_t l; 750 int plen; 751 const char *prop; 752 753 prop = fdt_getprop(dt->blob, offs, "compatible", &plen); 754 if (!prop) 755 return -1; 756 757 while (plen > 0) { 758 if (memcmp(prop, prefix, prefix_len) == 0) 759 return 0; /* match */ 760 761 l = strlen(prop) + 1; 762 prop += l; 763 plen -= l; 764 } 765 766 return -1; 767 } 768 769 static int dt_add_psci_cpu_enable_methods(struct dt_descriptor *dt) 770 { 771 int offs = 0; 772 773 while (1) { 774 offs = fdt_next_node(dt->blob, offs, NULL); 775 if (offs < 0) 776 break; 777 if (fdt_getprop(dt->blob, offs, "enable-method", NULL)) 778 continue; /* already set */ 779 if (check_node_compat_prefix(dt, offs, "arm,cortex-a")) 780 continue; /* no compatible */ 781 if (fdt_setprop_string(dt->blob, offs, "enable-method", "psci")) 782 return -1; 783 /* Need to restart scanning as offsets may have changed */ 784 offs = 0; 785 } 786 return 0; 787 } 788 789 static int config_psci(struct dt_descriptor *dt) 790 { 791 if (dt_add_psci_node(dt)) 792 return -1; 793 return dt_add_psci_cpu_enable_methods(dt); 794 } 795 #else 796 static int config_psci(struct dt_descriptor *dt __unused) 797 { 798 return 0; 799 } 800 #endif /*CFG_PSCI_ARM32*/ 801 802 #ifdef CFG_CORE_DYN_SHM 803 static uint64_t get_dt_val_and_advance(const void *data, size_t *offs, 804 uint32_t cell_size) 805 { 806 uint64_t rv = 0; 807 808 if (cell_size == 1) { 809 uint32_t v; 810 811 memcpy(&v, (const uint8_t *)data + *offs, sizeof(v)); 812 *offs += sizeof(v); 813 rv = fdt32_to_cpu(v); 814 } else { 815 uint64_t v; 816 817 memcpy(&v, (const uint8_t *)data + *offs, sizeof(v)); 818 *offs += sizeof(v); 819 rv = fdt64_to_cpu(v); 820 } 821 822 return rv; 823 } 824 825 /* 826 * Find all non-secure memory from DT. Memory marked inaccessible by Secure 827 * World is ignored since it could not be mapped to be used as dynamic shared 828 * memory. 829 */ 830 static int get_nsec_memory_helper(void *fdt, struct core_mmu_phys_mem *mem) 831 { 832 const uint8_t *prop = NULL; 833 uint64_t a = 0; 834 uint64_t l = 0; 835 size_t prop_offs = 0; 836 size_t prop_len = 0; 837 int elems_total = 0; 838 int addr_size = 0; 839 int len_size = 0; 840 int offs = 0; 841 size_t n = 0; 842 int len = 0; 843 844 addr_size = fdt_address_cells(fdt, 0); 845 if (addr_size < 0) 846 return 0; 847 848 len_size = fdt_size_cells(fdt, 0); 849 if (len_size < 0) 850 return 0; 851 852 while (true) { 853 offs = fdt_node_offset_by_prop_value(fdt, offs, "device_type", 854 "memory", 855 sizeof("memory")); 856 if (offs < 0) 857 break; 858 859 if (fdt_get_status(fdt, offs) != (DT_STATUS_OK_NSEC | 860 DT_STATUS_OK_SEC)) 861 continue; 862 863 prop = fdt_getprop(fdt, offs, "reg", &len); 864 if (!prop) 865 continue; 866 867 prop_len = len; 868 for (n = 0, prop_offs = 0; prop_offs < prop_len; n++) { 869 a = get_dt_val_and_advance(prop, &prop_offs, addr_size); 870 if (prop_offs >= prop_len) { 871 n--; 872 break; 873 } 874 875 l = get_dt_val_and_advance(prop, &prop_offs, len_size); 876 if (mem) { 877 mem->type = MEM_AREA_DDR_OVERALL; 878 mem->addr = a; 879 mem->size = l; 880 mem++; 881 } 882 } 883 884 elems_total += n; 885 } 886 887 return elems_total; 888 } 889 890 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt, size_t *nelems) 891 { 892 struct core_mmu_phys_mem *mem = NULL; 893 int elems_total = 0; 894 895 elems_total = get_nsec_memory_helper(fdt, NULL); 896 if (elems_total <= 0) 897 return NULL; 898 899 mem = nex_calloc(elems_total, sizeof(*mem)); 900 if (!mem) 901 panic(); 902 903 elems_total = get_nsec_memory_helper(fdt, mem); 904 assert(elems_total > 0); 905 906 *nelems = elems_total; 907 908 return mem; 909 } 910 #endif /*CFG_CORE_DYN_SHM*/ 911 912 #ifdef CFG_CORE_RESERVED_SHM 913 static int mark_static_shm_as_reserved(struct dt_descriptor *dt) 914 { 915 vaddr_t shm_start; 916 vaddr_t shm_end; 917 918 core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &shm_start, &shm_end); 919 if (shm_start != shm_end) 920 return add_res_mem_dt_node(dt, "optee_shm", 921 virt_to_phys((void *)shm_start), 922 shm_end - shm_start); 923 924 DMSG("No SHM configured"); 925 return -1; 926 } 927 #endif /*CFG_CORE_RESERVED_SHM*/ 928 929 static int mark_tzdram_as_reserved(struct dt_descriptor *dt) 930 { 931 return add_res_mem_dt_node(dt, "optee_core", CFG_TZDRAM_START, 932 CFG_TZDRAM_SIZE); 933 } 934 935 static void update_external_dt(void) 936 { 937 struct dt_descriptor *dt = get_external_dt_desc(); 938 939 if (!dt || !dt->blob) 940 return; 941 942 if (!IS_ENABLED(CFG_CORE_FFA) && add_optee_dt_node(dt)) 943 panic("Failed to add OP-TEE Device Tree node"); 944 945 if (config_psci(dt)) 946 panic("Failed to config PSCI"); 947 948 #ifdef CFG_CORE_RESERVED_SHM 949 if (mark_static_shm_as_reserved(dt)) 950 panic("Failed to config non-secure memory"); 951 #endif 952 953 if (mark_tzdram_as_reserved(dt)) 954 panic("Failed to config secure memory"); 955 } 956 #else /*CFG_DT*/ 957 static void update_external_dt(void) 958 { 959 } 960 961 #ifdef CFG_CORE_DYN_SHM 962 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt __unused, 963 size_t *nelems __unused) 964 { 965 return NULL; 966 } 967 #endif /*CFG_CORE_DYN_SHM*/ 968 #endif /*!CFG_DT*/ 969 970 #if defined(CFG_CORE_FFA) 971 void *get_manifest_dt(void) 972 { 973 return manifest_dt; 974 } 975 976 static void init_manifest_dt(unsigned long pa) 977 { 978 void *fdt = NULL; 979 int ret = 0; 980 981 if (!pa) { 982 EMSG("No manifest DT found"); 983 return; 984 } 985 986 fdt = core_mmu_add_mapping(MEM_AREA_MANIFEST_DT, pa, CFG_DTB_MAX_SIZE); 987 if (!fdt) 988 panic("Failed to map manifest DT"); 989 990 manifest_dt = fdt; 991 992 ret = fdt_check_full(fdt, CFG_DTB_MAX_SIZE); 993 if (ret < 0) { 994 EMSG("Invalid manifest Device Tree at %#lx: error %d", pa, ret); 995 panic(); 996 } 997 998 IMSG("manifest DT found"); 999 } 1000 1001 static TEE_Result release_manifest_dt(void) 1002 { 1003 if (!manifest_dt) 1004 return TEE_SUCCESS; 1005 1006 if (core_mmu_remove_mapping(MEM_AREA_MANIFEST_DT, manifest_dt, 1007 CFG_DTB_MAX_SIZE)) 1008 panic("Failed to remove temporary manifest DT mapping"); 1009 manifest_dt = NULL; 1010 1011 return TEE_SUCCESS; 1012 } 1013 1014 boot_final(release_manifest_dt); 1015 #else 1016 void *get_manifest_dt(void) 1017 { 1018 return NULL; 1019 } 1020 1021 static void init_manifest_dt(unsigned long pa __unused) 1022 { 1023 } 1024 #endif /*CFG_CORE_FFA*/ 1025 1026 #ifdef CFG_CORE_DYN_SHM 1027 static void discover_nsec_memory(void) 1028 { 1029 struct core_mmu_phys_mem *mem; 1030 const struct core_mmu_phys_mem *mem_begin = NULL; 1031 const struct core_mmu_phys_mem *mem_end = NULL; 1032 size_t nelems; 1033 void *fdt = get_external_dt(); 1034 1035 if (fdt) { 1036 mem = get_nsec_memory(fdt, &nelems); 1037 if (mem) { 1038 core_mmu_set_discovered_nsec_ddr(mem, nelems); 1039 return; 1040 } 1041 1042 DMSG("No non-secure memory found in FDT"); 1043 } 1044 1045 mem_begin = phys_ddr_overall_begin; 1046 mem_end = phys_ddr_overall_end; 1047 nelems = mem_end - mem_begin; 1048 if (nelems) { 1049 /* 1050 * Platform cannot use both register_ddr() and the now 1051 * deprecated register_dynamic_shm(). 1052 */ 1053 assert(phys_ddr_overall_compat_begin == 1054 phys_ddr_overall_compat_end); 1055 } else { 1056 mem_begin = phys_ddr_overall_compat_begin; 1057 mem_end = phys_ddr_overall_compat_end; 1058 nelems = mem_end - mem_begin; 1059 if (!nelems) 1060 return; 1061 DMSG("Warning register_dynamic_shm() is deprecated, please use register_ddr() instead"); 1062 } 1063 1064 mem = nex_calloc(nelems, sizeof(*mem)); 1065 if (!mem) 1066 panic(); 1067 1068 memcpy(mem, phys_ddr_overall_begin, sizeof(*mem) * nelems); 1069 core_mmu_set_discovered_nsec_ddr(mem, nelems); 1070 } 1071 #else /*CFG_CORE_DYN_SHM*/ 1072 static void discover_nsec_memory(void) 1073 { 1074 } 1075 #endif /*!CFG_CORE_DYN_SHM*/ 1076 1077 #ifdef CFG_NS_VIRTUALIZATION 1078 static TEE_Result virt_init_heap(void) 1079 { 1080 /* We need to initialize pool for every virtual guest partition */ 1081 malloc_add_pool(__heap1_start, __heap1_end - __heap1_start); 1082 1083 return TEE_SUCCESS; 1084 } 1085 preinit_early(virt_init_heap); 1086 #endif 1087 1088 void init_tee_runtime(void) 1089 { 1090 #ifndef CFG_WITH_PAGER 1091 /* Pager initializes TA RAM early */ 1092 core_mmu_init_ta_ram(); 1093 #endif 1094 /* 1095 * With virtualization we call this function when creating the 1096 * OP-TEE partition instead. 1097 */ 1098 if (!IS_ENABLED(CFG_NS_VIRTUALIZATION)) 1099 call_preinitcalls(); 1100 call_initcalls(); 1101 1102 /* 1103 * These two functions uses crypto_rng_read() to initialize the 1104 * pauth keys. Once call_initcalls() returns we're guaranteed that 1105 * crypto_rng_read() is ready to be used. 1106 */ 1107 thread_init_core_local_pauth_keys(); 1108 thread_init_thread_pauth_keys(); 1109 1110 /* 1111 * Reinitialize canaries around the stacks with crypto_rng_read(). 1112 * 1113 * TODO: Updating canaries when CFG_NS_VIRTUALIZATION is enabled will 1114 * require synchronization between thread_check_canaries() and 1115 * thread_update_canaries(). 1116 */ 1117 if (!IS_ENABLED(CFG_NS_VIRTUALIZATION)) 1118 thread_update_canaries(); 1119 } 1120 1121 static void init_primary(unsigned long pageable_part, unsigned long nsec_entry) 1122 { 1123 thread_init_core_local_stacks(); 1124 /* 1125 * Mask asynchronous exceptions before switch to the thread vector 1126 * as the thread handler requires those to be masked while 1127 * executing with the temporary stack. The thread subsystem also 1128 * asserts that the foreign interrupts are blocked when using most of 1129 * its functions. 1130 */ 1131 thread_set_exceptions(THREAD_EXCP_ALL); 1132 primary_save_cntfrq(); 1133 init_vfp_sec(); 1134 /* 1135 * Pager: init_runtime() calls thread_kernel_enable_vfp() so we must 1136 * set a current thread right now to avoid a chicken-and-egg problem 1137 * (thread_init_boot_thread() sets the current thread but needs 1138 * things set by init_runtime()). 1139 */ 1140 thread_get_core_local()->curr_thread = 0; 1141 init_runtime(pageable_part); 1142 1143 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 1144 /* 1145 * Virtualization: We can't initialize threads right now because 1146 * threads belong to "tee" part and will be initialized 1147 * separately per each new virtual guest. So, we'll clear 1148 * "curr_thread" and call it done. 1149 */ 1150 thread_get_core_local()->curr_thread = -1; 1151 } else { 1152 thread_init_boot_thread(); 1153 } 1154 thread_init_primary(); 1155 thread_init_per_cpu(); 1156 init_sec_mon(nsec_entry); 1157 } 1158 1159 static bool cpu_nmfi_enabled(void) 1160 { 1161 #if defined(ARM32) 1162 return read_sctlr() & SCTLR_NMFI; 1163 #else 1164 /* Note: ARM64 does not feature non-maskable FIQ support. */ 1165 return false; 1166 #endif 1167 } 1168 1169 /* 1170 * Note: this function is weak just to make it possible to exclude it from 1171 * the unpaged area. 1172 */ 1173 void __weak boot_init_primary_late(unsigned long fdt, 1174 unsigned long manifest) 1175 { 1176 init_external_dt(fdt); 1177 /* 1178 * With an SPMC at S-EL2 we have saved the physical fdt address 1179 * from the passed boot info. 1180 */ 1181 if (IS_ENABLED(CFG_CORE_SEL2_SPMC)) 1182 manifest = (unsigned long)get_manifest_dt(); 1183 init_manifest_dt(manifest); 1184 #ifdef CFG_CORE_SEL1_SPMC 1185 tpm_map_log_area(get_manifest_dt()); 1186 #else 1187 tpm_map_log_area(get_external_dt()); 1188 #endif 1189 discover_nsec_memory(); 1190 update_external_dt(); 1191 configure_console_from_dt(); 1192 1193 IMSG("OP-TEE version: %s", core_v_str); 1194 if (IS_ENABLED(CFG_WARN_INSECURE)) { 1195 IMSG("WARNING: This OP-TEE configuration might be insecure!"); 1196 IMSG("WARNING: Please check https://optee.readthedocs.io/en/latest/architecture/porting_guidelines.html"); 1197 } 1198 IMSG("Primary CPU initializing"); 1199 #ifdef CFG_CORE_ASLR 1200 DMSG("Executing at offset %#lx with virtual load address %#"PRIxVA, 1201 (unsigned long)boot_mmu_config.map_offset, VCORE_START_VA); 1202 #endif 1203 if (IS_ENABLED(CFG_MEMTAG)) 1204 DMSG("Memory tagging %s", 1205 memtag_is_enabled() ? "enabled" : "disabled"); 1206 1207 /* Check if platform needs NMFI workaround */ 1208 if (cpu_nmfi_enabled()) { 1209 if (!IS_ENABLED(CFG_CORE_WORKAROUND_ARM_NMFI)) 1210 IMSG("WARNING: This ARM core has NMFI enabled, please apply workaround!"); 1211 } else { 1212 if (IS_ENABLED(CFG_CORE_WORKAROUND_ARM_NMFI)) 1213 IMSG("WARNING: This ARM core does not have NMFI enabled, no need for workaround"); 1214 } 1215 1216 boot_primary_init_intc(); 1217 init_vfp_nsec(); 1218 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 1219 IMSG("Initializing virtualization support"); 1220 core_mmu_init_virtualization(); 1221 } else { 1222 init_tee_runtime(); 1223 } 1224 call_finalcalls(); 1225 IMSG("Primary CPU switching to normal world boot"); 1226 } 1227 1228 static void init_secondary_helper(unsigned long nsec_entry) 1229 { 1230 IMSG("Secondary CPU %zu initializing", get_core_pos()); 1231 1232 /* 1233 * Mask asynchronous exceptions before switch to the thread vector 1234 * as the thread handler requires those to be masked while 1235 * executing with the temporary stack. The thread subsystem also 1236 * asserts that the foreign interrupts are blocked when using most of 1237 * its functions. 1238 */ 1239 thread_set_exceptions(THREAD_EXCP_ALL); 1240 1241 secondary_init_cntfrq(); 1242 thread_init_per_cpu(); 1243 init_sec_mon(nsec_entry); 1244 boot_secondary_init_intc(); 1245 init_vfp_sec(); 1246 init_vfp_nsec(); 1247 1248 IMSG("Secondary CPU %zu switching to normal world boot", get_core_pos()); 1249 } 1250 1251 /* 1252 * Note: this function is weak just to make it possible to exclude it from 1253 * the unpaged area so that it lies in the init area. 1254 */ 1255 void __weak boot_init_primary_early(unsigned long pageable_part, 1256 unsigned long nsec_entry __maybe_unused) 1257 { 1258 unsigned long e = PADDR_INVALID; 1259 1260 #if !defined(CFG_WITH_ARM_TRUSTED_FW) 1261 e = nsec_entry; 1262 #endif 1263 1264 init_primary(pageable_part, e); 1265 } 1266 1267 #if defined(CFG_WITH_ARM_TRUSTED_FW) 1268 unsigned long boot_cpu_on_handler(unsigned long a0 __maybe_unused, 1269 unsigned long a1 __unused) 1270 { 1271 init_secondary_helper(PADDR_INVALID); 1272 return 0; 1273 } 1274 #else 1275 void boot_init_secondary(unsigned long nsec_entry) 1276 { 1277 init_secondary_helper(nsec_entry); 1278 } 1279 #endif 1280 1281 #if defined(CFG_BOOT_SECONDARY_REQUEST) 1282 void boot_set_core_ns_entry(size_t core_idx, uintptr_t entry, 1283 uintptr_t context_id) 1284 { 1285 ns_entry_contexts[core_idx].entry_point = entry; 1286 ns_entry_contexts[core_idx].context_id = context_id; 1287 dsb_ishst(); 1288 } 1289 1290 int boot_core_release(size_t core_idx, paddr_t entry) 1291 { 1292 if (!core_idx || core_idx >= CFG_TEE_CORE_NB_CORE) 1293 return -1; 1294 1295 ns_entry_contexts[core_idx].entry_point = entry; 1296 dmb(); 1297 spin_table[core_idx] = 1; 1298 dsb(); 1299 sev(); 1300 1301 return 0; 1302 } 1303 1304 /* 1305 * spin until secondary boot request, then returns with 1306 * the secondary core entry address. 1307 */ 1308 struct ns_entry_context *boot_core_hpen(void) 1309 { 1310 #ifdef CFG_PSCI_ARM32 1311 return &ns_entry_contexts[get_core_pos()]; 1312 #else 1313 do { 1314 wfe(); 1315 } while (!spin_table[get_core_pos()]); 1316 dmb(); 1317 return &ns_entry_contexts[get_core_pos()]; 1318 #endif 1319 } 1320 #endif 1321 1322 #if defined(CFG_CORE_ASLR) 1323 #if defined(CFG_DT) 1324 unsigned long __weak get_aslr_seed(void *fdt) 1325 { 1326 int rc = 0; 1327 const uint64_t *seed = NULL; 1328 int offs = 0; 1329 int len = 0; 1330 1331 if (!fdt) { 1332 DMSG("No fdt"); 1333 goto err; 1334 } 1335 1336 rc = fdt_check_header(fdt); 1337 if (rc) { 1338 DMSG("Bad fdt: %d", rc); 1339 goto err; 1340 } 1341 1342 offs = fdt_path_offset(fdt, "/secure-chosen"); 1343 if (offs < 0) { 1344 DMSG("Cannot find /secure-chosen"); 1345 goto err; 1346 } 1347 seed = fdt_getprop(fdt, offs, "kaslr-seed", &len); 1348 if (!seed || len != sizeof(*seed)) { 1349 DMSG("Cannot find valid kaslr-seed"); 1350 goto err; 1351 } 1352 1353 return fdt64_to_cpu(*seed); 1354 1355 err: 1356 /* Try platform implementation */ 1357 return plat_get_aslr_seed(); 1358 } 1359 #else /*!CFG_DT*/ 1360 unsigned long __weak get_aslr_seed(void *fdt __unused) 1361 { 1362 /* Try platform implementation */ 1363 return plat_get_aslr_seed(); 1364 } 1365 #endif /*!CFG_DT*/ 1366 #endif /*CFG_CORE_ASLR*/ 1367 1368 #if defined(CFG_CORE_FFA) 1369 static void *get_fdt_from_boot_info(struct ffa_boot_info_header_1_1 *hdr) 1370 { 1371 struct ffa_boot_info_1_1 *desc = NULL; 1372 uint8_t content_fmt = 0; 1373 uint8_t name_fmt = 0; 1374 void *fdt = NULL; 1375 int ret = 0; 1376 1377 if (hdr->signature != FFA_BOOT_INFO_SIGNATURE) { 1378 EMSG("Bad boot info signature %#"PRIx32, hdr->signature); 1379 panic(); 1380 } 1381 if (hdr->version != FFA_BOOT_INFO_VERSION) { 1382 EMSG("Bad boot info version %#"PRIx32, hdr->version); 1383 panic(); 1384 } 1385 if (hdr->desc_count != 1) { 1386 EMSG("Bad boot info descriptor count %#"PRIx32, 1387 hdr->desc_count); 1388 panic(); 1389 } 1390 desc = (void *)((vaddr_t)hdr + hdr->desc_offset); 1391 name_fmt = desc->flags & FFA_BOOT_INFO_FLAG_NAME_FORMAT_MASK; 1392 if (name_fmt == FFA_BOOT_INFO_FLAG_NAME_FORMAT_STRING) 1393 DMSG("Boot info descriptor name \"%16s\"", desc->name); 1394 else if (name_fmt == FFA_BOOT_INFO_FLAG_NAME_FORMAT_UUID) 1395 DMSG("Boot info descriptor UUID %pUl", (void *)desc->name); 1396 else 1397 DMSG("Boot info descriptor: unknown name format %"PRIu8, 1398 name_fmt); 1399 1400 content_fmt = (desc->flags & FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_MASK) >> 1401 FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_SHIFT; 1402 if (content_fmt != FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_ADDR) { 1403 EMSG("Bad boot info content format %"PRIu8", expected %u (address)", 1404 content_fmt, FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_ADDR); 1405 panic(); 1406 } 1407 1408 fdt = (void *)(vaddr_t)desc->contents; 1409 ret = fdt_check_full(fdt, desc->size); 1410 if (ret < 0) { 1411 EMSG("Invalid Device Tree at %p: error %d", fdt, ret); 1412 panic(); 1413 } 1414 return fdt; 1415 } 1416 1417 static void get_sec_mem_from_manifest(void *fdt, paddr_t *base, size_t *size) 1418 { 1419 int ret = 0; 1420 uint64_t num = 0; 1421 1422 ret = fdt_node_check_compatible(fdt, 0, "arm,ffa-manifest-1.0"); 1423 if (ret < 0) { 1424 EMSG("Invalid FF-A manifest at %p: error %d", fdt, ret); 1425 panic(); 1426 } 1427 ret = dt_getprop_as_number(fdt, 0, "load-address", &num); 1428 if (ret < 0) { 1429 EMSG("Can't read \"load-address\" from FF-A manifest at %p: error %d", 1430 fdt, ret); 1431 panic(); 1432 } 1433 *base = num; 1434 /* "mem-size" is currently an undocumented extension to the spec. */ 1435 ret = dt_getprop_as_number(fdt, 0, "mem-size", &num); 1436 if (ret < 0) { 1437 EMSG("Can't read \"mem-size\" from FF-A manifest at %p: error %d", 1438 fdt, ret); 1439 panic(); 1440 } 1441 *size = num; 1442 } 1443 1444 void __weak boot_save_boot_info(void *boot_info) 1445 { 1446 paddr_t base = 0; 1447 size_t size = 0; 1448 1449 manifest_dt = get_fdt_from_boot_info(boot_info); 1450 if (IS_ENABLED(CFG_CORE_SEL2_SPMC) && 1451 IS_ENABLED(CFG_CORE_PHYS_RELOCATABLE)) { 1452 get_sec_mem_from_manifest(manifest_dt, &base, &size); 1453 core_mmu_set_secure_memory(base, size); 1454 } 1455 } 1456 #endif /*CFG_CORE_FFA*/ 1457