1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2015-2023, Linaro Limited 4 * Copyright (c) 2023, Arm Limited 5 */ 6 7 #include <arm.h> 8 #include <assert.h> 9 #include <compiler.h> 10 #include <config.h> 11 #include <console.h> 12 #include <crypto/crypto.h> 13 #include <drivers/gic.h> 14 #include <dt-bindings/interrupt-controller/arm-gic.h> 15 #include <ffa.h> 16 #include <initcall.h> 17 #include <inttypes.h> 18 #include <io.h> 19 #include <keep.h> 20 #include <kernel/asan.h> 21 #include <kernel/boot.h> 22 #include <kernel/dt.h> 23 #include <kernel/linker.h> 24 #include <kernel/misc.h> 25 #include <kernel/panic.h> 26 #include <kernel/tee_misc.h> 27 #include <kernel/thread.h> 28 #include <kernel/tpm.h> 29 #include <kernel/transfer_list.h> 30 #include <libfdt.h> 31 #include <malloc.h> 32 #include <memtag.h> 33 #include <mm/core_memprot.h> 34 #include <mm/core_mmu.h> 35 #include <mm/fobj.h> 36 #include <mm/phys_mem.h> 37 #include <mm/tee_mm.h> 38 #include <mm/tee_pager.h> 39 #include <sm/psci.h> 40 #include <trace.h> 41 #include <utee_defines.h> 42 #include <util.h> 43 44 #include <platform_config.h> 45 46 #if !defined(CFG_WITH_ARM_TRUSTED_FW) 47 #include <sm/sm.h> 48 #endif 49 50 #if defined(CFG_WITH_VFP) 51 #include <kernel/vfp.h> 52 #endif 53 54 /* 55 * In this file we're using unsigned long to represent physical pointers as 56 * they are received in a single register when OP-TEE is initially entered. 57 * This limits 32-bit systems to only use make use of the lower 32 bits 58 * of a physical address for initial parameters. 59 * 60 * 64-bit systems on the other hand can use full 64-bit physical pointers. 61 */ 62 #define PADDR_INVALID ULONG_MAX 63 64 #if defined(CFG_BOOT_SECONDARY_REQUEST) 65 struct ns_entry_context { 66 uintptr_t entry_point; 67 uintptr_t context_id; 68 }; 69 struct ns_entry_context ns_entry_contexts[CFG_TEE_CORE_NB_CORE]; 70 static uint32_t spin_table[CFG_TEE_CORE_NB_CORE]; 71 #endif 72 73 #ifdef CFG_BOOT_SYNC_CPU 74 /* 75 * Array used when booting, to synchronize cpu. 76 * When 0, the cpu has not started. 77 * When 1, it has started 78 */ 79 uint32_t sem_cpu_sync[CFG_TEE_CORE_NB_CORE]; 80 DECLARE_KEEP_PAGER(sem_cpu_sync); 81 #endif 82 83 static unsigned long boot_arg_fdt __nex_bss; 84 static unsigned long boot_arg_nsec_entry __nex_bss; 85 static unsigned long boot_arg_pageable_part __nex_bss; 86 static unsigned long boot_arg_transfer_list __nex_bss; 87 static struct transfer_list_header *mapped_tl __nex_bss; 88 89 #ifdef CFG_SECONDARY_INIT_CNTFRQ 90 static uint32_t cntfrq; 91 #endif 92 93 /* May be overridden in plat-$(PLATFORM)/main.c */ 94 __weak void plat_primary_init_early(void) 95 { 96 } 97 DECLARE_KEEP_PAGER(plat_primary_init_early); 98 99 /* May be overridden in plat-$(PLATFORM)/main.c */ 100 __weak void boot_primary_init_intc(void) 101 { 102 } 103 104 /* May be overridden in plat-$(PLATFORM)/main.c */ 105 __weak void boot_secondary_init_intc(void) 106 { 107 } 108 109 /* May be overridden in plat-$(PLATFORM)/main.c */ 110 __weak unsigned long plat_get_aslr_seed(void) 111 { 112 DMSG("Warning: no ASLR seed"); 113 114 return 0; 115 } 116 117 #if defined(_CFG_CORE_STACK_PROTECTOR) || defined(CFG_WITH_STACK_CANARIES) 118 /* Generate random stack canary value on boot up */ 119 __weak void plat_get_random_stack_canaries(void *buf, size_t ncan, size_t size) 120 { 121 TEE_Result ret = TEE_ERROR_GENERIC; 122 size_t i = 0; 123 124 assert(buf && ncan && size); 125 126 /* 127 * With virtualization the RNG is not initialized in Nexus core. 128 * Need to override with platform specific implementation. 129 */ 130 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 131 IMSG("WARNING: Using fixed value for stack canary"); 132 memset(buf, 0xab, ncan * size); 133 goto out; 134 } 135 136 ret = crypto_rng_read(buf, ncan * size); 137 if (ret != TEE_SUCCESS) 138 panic("Failed to generate random stack canary"); 139 140 out: 141 /* Leave null byte in canary to prevent string base exploit */ 142 for (i = 0; i < ncan; i++) 143 *((uint8_t *)buf + size * i) = 0; 144 } 145 #endif /* _CFG_CORE_STACK_PROTECTOR || CFG_WITH_STACK_CANARIES */ 146 147 /* 148 * This function is called as a guard after each smc call which is not 149 * supposed to return. 150 */ 151 void __panic_at_smc_return(void) 152 { 153 panic(); 154 } 155 156 #if defined(CFG_WITH_ARM_TRUSTED_FW) 157 void init_sec_mon(unsigned long nsec_entry __maybe_unused) 158 { 159 assert(nsec_entry == PADDR_INVALID); 160 /* Do nothing as we don't have a secure monitor */ 161 } 162 #else 163 /* May be overridden in plat-$(PLATFORM)/main.c */ 164 __weak void init_sec_mon(unsigned long nsec_entry) 165 { 166 struct sm_nsec_ctx *nsec_ctx; 167 168 assert(nsec_entry != PADDR_INVALID); 169 170 /* Initialize secure monitor */ 171 nsec_ctx = sm_get_nsec_ctx(); 172 nsec_ctx->mon_lr = nsec_entry; 173 nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I; 174 if (nsec_entry & 1) 175 nsec_ctx->mon_spsr |= CPSR_T; 176 } 177 #endif 178 179 #if defined(CFG_WITH_ARM_TRUSTED_FW) 180 static void init_vfp_nsec(void) 181 { 182 } 183 #else 184 static void init_vfp_nsec(void) 185 { 186 /* Normal world can use CP10 and CP11 (SIMD/VFP) */ 187 write_nsacr(read_nsacr() | NSACR_CP10 | NSACR_CP11); 188 } 189 #endif 190 191 static void check_crypto_extensions(void) 192 { 193 bool ce_supported = true; 194 195 if (!feat_aes_implemented() && 196 IS_ENABLED(CFG_CRYPTO_AES_ARM_CE)) { 197 EMSG("AES instructions are not supported"); 198 ce_supported = false; 199 } 200 201 if (!feat_sha1_implemented() && 202 IS_ENABLED(CFG_CRYPTO_SHA1_ARM_CE)) { 203 EMSG("SHA1 instructions are not supported"); 204 ce_supported = false; 205 } 206 207 if (!feat_sha256_implemented() && 208 IS_ENABLED(CFG_CRYPTO_SHA256_ARM_CE)) { 209 EMSG("SHA256 instructions are not supported"); 210 ce_supported = false; 211 } 212 213 /* Check aarch64 specific instructions */ 214 if (IS_ENABLED(CFG_ARM64_core)) { 215 if (!feat_sha512_implemented() && 216 IS_ENABLED(CFG_CRYPTO_SHA512_ARM_CE)) { 217 EMSG("SHA512 instructions are not supported"); 218 ce_supported = false; 219 } 220 221 if (!feat_sha3_implemented() && 222 IS_ENABLED(CFG_CRYPTO_SHA3_ARM_CE)) { 223 EMSG("SHA3 instructions are not supported"); 224 ce_supported = false; 225 } 226 227 if (!feat_sm3_implemented() && 228 IS_ENABLED(CFG_CRYPTO_SM3_ARM_CE)) { 229 EMSG("SM3 instructions are not supported"); 230 ce_supported = false; 231 } 232 233 if (!feat_sm4_implemented() && 234 IS_ENABLED(CFG_CRYPTO_SM4_ARM_CE)) { 235 EMSG("SM4 instructions are not supported"); 236 ce_supported = false; 237 } 238 } 239 240 if (!ce_supported) 241 panic("HW doesn't support CE instructions"); 242 } 243 244 #if defined(CFG_WITH_VFP) 245 246 #ifdef ARM32 247 static void init_vfp_sec(void) 248 { 249 uint32_t cpacr = read_cpacr(); 250 251 /* 252 * Enable Advanced SIMD functionality. 253 * Enable use of D16-D31 of the Floating-point Extension register 254 * file. 255 */ 256 cpacr &= ~(CPACR_ASEDIS | CPACR_D32DIS); 257 /* 258 * Enable usage of CP10 and CP11 (SIMD/VFP) (both kernel and user 259 * mode. 260 */ 261 cpacr |= CPACR_CP(10, CPACR_CP_ACCESS_FULL); 262 cpacr |= CPACR_CP(11, CPACR_CP_ACCESS_FULL); 263 write_cpacr(cpacr); 264 } 265 #endif /* ARM32 */ 266 267 #ifdef ARM64 268 static void init_vfp_sec(void) 269 { 270 /* Not using VFP until thread_kernel_enable_vfp() */ 271 vfp_disable(); 272 } 273 #endif /* ARM64 */ 274 275 #else /* CFG_WITH_VFP */ 276 277 static void init_vfp_sec(void) 278 { 279 /* Not using VFP */ 280 } 281 #endif 282 283 #ifdef CFG_SECONDARY_INIT_CNTFRQ 284 static void primary_save_cntfrq(void) 285 { 286 assert(cntfrq == 0); 287 288 /* 289 * CNTFRQ should be initialized on the primary CPU by a 290 * previous boot stage 291 */ 292 cntfrq = read_cntfrq(); 293 } 294 295 static void secondary_init_cntfrq(void) 296 { 297 assert(cntfrq != 0); 298 write_cntfrq(cntfrq); 299 } 300 #else /* CFG_SECONDARY_INIT_CNTFRQ */ 301 static void primary_save_cntfrq(void) 302 { 303 } 304 305 static void secondary_init_cntfrq(void) 306 { 307 } 308 #endif 309 310 #ifdef CFG_CORE_SANITIZE_KADDRESS 311 static void init_run_constructors(void) 312 { 313 const vaddr_t *ctor; 314 315 for (ctor = &__ctor_list; ctor < &__ctor_end; ctor++) 316 ((void (*)(void))(*ctor))(); 317 } 318 319 static void init_asan(void) 320 { 321 322 /* 323 * CFG_ASAN_SHADOW_OFFSET is also supplied as 324 * -fasan-shadow-offset=$(CFG_ASAN_SHADOW_OFFSET) to the compiler. 325 * Since all the needed values to calculate the value of 326 * CFG_ASAN_SHADOW_OFFSET isn't available in to make we need to 327 * calculate it in advance and hard code it into the platform 328 * conf.mk. Here where we have all the needed values we double 329 * check that the compiler is supplied the correct value. 330 */ 331 332 #define __ASAN_SHADOW_START \ 333 ROUNDUP(TEE_RAM_START + (TEE_RAM_VA_SIZE * 8) / 9 - 8, 8) 334 assert(__ASAN_SHADOW_START == (vaddr_t)&__asan_shadow_start); 335 #define __CFG_ASAN_SHADOW_OFFSET \ 336 (__ASAN_SHADOW_START - (TEE_RAM_START / 8)) 337 COMPILE_TIME_ASSERT(CFG_ASAN_SHADOW_OFFSET == __CFG_ASAN_SHADOW_OFFSET); 338 #undef __ASAN_SHADOW_START 339 #undef __CFG_ASAN_SHADOW_OFFSET 340 341 /* 342 * Assign area covered by the shadow area, everything from start up 343 * to the beginning of the shadow area. 344 */ 345 asan_set_shadowed((void *)TEE_LOAD_ADDR, &__asan_shadow_start); 346 347 /* 348 * Add access to areas that aren't opened automatically by a 349 * constructor. 350 */ 351 asan_tag_access(&__ctor_list, &__ctor_end); 352 asan_tag_access(__rodata_start, __rodata_end); 353 #ifdef CFG_WITH_PAGER 354 asan_tag_access(__pageable_start, __pageable_end); 355 #endif /*CFG_WITH_PAGER*/ 356 asan_tag_access(__nozi_start, __nozi_end); 357 #ifdef ARM32 358 asan_tag_access(__exidx_start, __exidx_end); 359 asan_tag_access(__extab_start, __extab_end); 360 #endif 361 362 init_run_constructors(); 363 364 /* Everything is tagged correctly, let's start address sanitizing. */ 365 asan_start(); 366 } 367 #else /*CFG_CORE_SANITIZE_KADDRESS*/ 368 static void init_asan(void) 369 { 370 } 371 #endif /*CFG_CORE_SANITIZE_KADDRESS*/ 372 373 #if defined(CFG_MEMTAG) 374 /* Called from entry_a64.S only when MEMTAG is configured */ 375 void boot_init_memtag(void) 376 { 377 memtag_init_ops(feat_mte_implemented()); 378 } 379 380 static TEE_Result mmap_clear_memtag(struct tee_mmap_region *map, 381 void *ptr __unused) 382 { 383 switch (map->type) { 384 case MEM_AREA_TEE_RAM: 385 case MEM_AREA_TEE_RAM_RW: 386 case MEM_AREA_NEX_RAM_RO: 387 case MEM_AREA_NEX_RAM_RW: 388 case MEM_AREA_TEE_ASAN: 389 case MEM_AREA_TA_RAM: 390 DMSG("Clearing tags for VA %#"PRIxVA"..%#"PRIxVA, 391 map->va, map->va + map->size - 1); 392 memtag_set_tags((void *)map->va, map->size, 0); 393 break; 394 default: 395 break; 396 } 397 398 return TEE_SUCCESS; 399 } 400 401 /* Called from entry_a64.S only when MEMTAG is configured */ 402 void boot_clear_memtag(void) 403 { 404 core_mmu_for_each_map(NULL, mmap_clear_memtag); 405 } 406 #endif 407 408 #ifdef CFG_WITH_PAGER 409 410 #ifdef CFG_CORE_SANITIZE_KADDRESS 411 static void carve_out_asan_mem(void) 412 { 413 nex_phys_mem_partial_carve_out(ASAN_MAP_PA, ASAN_MAP_SZ); 414 } 415 #else 416 static void carve_out_asan_mem(void) 417 { 418 } 419 #endif 420 421 static void print_pager_pool_size(void) 422 { 423 struct tee_pager_stats __maybe_unused stats; 424 425 tee_pager_get_stats(&stats); 426 IMSG("Pager pool size: %zukB", 427 stats.npages_all * SMALL_PAGE_SIZE / 1024); 428 } 429 430 static void init_virt_pool(tee_mm_pool_t *virt_pool) 431 { 432 const vaddr_t begin = VCORE_START_VA; 433 size_t size = TEE_RAM_VA_SIZE; 434 435 #ifdef CFG_CORE_SANITIZE_KADDRESS 436 /* Carve out asan memory, flat maped after core memory */ 437 if (begin + size > ASAN_SHADOW_PA) 438 size = ASAN_MAP_PA - begin; 439 #endif 440 441 if (!tee_mm_init(virt_pool, begin, size, SMALL_PAGE_SHIFT, 442 TEE_MM_POOL_NO_FLAGS)) 443 panic("core_virt_mem_pool init failed"); 444 } 445 446 /* 447 * With CFG_CORE_ASLR=y the init part is relocated very early during boot. 448 * The init part is also paged just as the rest of the normal paged code, with 449 * the difference that it's preloaded during boot. When the backing store 450 * is configured the entire paged binary is copied in place and then also 451 * the init part. Since the init part has been relocated (references to 452 * addresses updated to compensate for the new load address) this has to be 453 * undone for the hashes of those pages to match with the original binary. 454 * 455 * If CFG_CORE_ASLR=n, nothing needs to be done as the code/ro pages are 456 * unchanged. 457 */ 458 static void undo_init_relocation(uint8_t *paged_store __maybe_unused) 459 { 460 #ifdef CFG_CORE_ASLR 461 unsigned long *ptr = NULL; 462 const uint32_t *reloc = NULL; 463 const uint32_t *reloc_end = NULL; 464 unsigned long offs = boot_mmu_config.map_offset; 465 const struct boot_embdata *embdata = (const void *)__init_end; 466 vaddr_t addr_end = (vaddr_t)__init_end - offs - TEE_LOAD_ADDR; 467 vaddr_t addr_start = (vaddr_t)__init_start - offs - TEE_LOAD_ADDR; 468 469 reloc = (const void *)((vaddr_t)embdata + embdata->reloc_offset); 470 reloc_end = reloc + embdata->reloc_len / sizeof(*reloc); 471 472 for (; reloc < reloc_end; reloc++) { 473 if (*reloc < addr_start) 474 continue; 475 if (*reloc >= addr_end) 476 break; 477 ptr = (void *)(paged_store + *reloc - addr_start); 478 *ptr -= offs; 479 } 480 #endif 481 } 482 483 static struct fobj *ro_paged_alloc(tee_mm_entry_t *mm, void *hashes, 484 void *store) 485 { 486 const unsigned int num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE; 487 #ifdef CFG_CORE_ASLR 488 unsigned int reloc_offs = (vaddr_t)__pageable_start - VCORE_START_VA; 489 const struct boot_embdata *embdata = (const void *)__init_end; 490 const void *reloc = __init_end + embdata->reloc_offset; 491 492 return fobj_ro_reloc_paged_alloc(num_pages, hashes, reloc_offs, 493 reloc, embdata->reloc_len, store); 494 #else 495 return fobj_ro_paged_alloc(num_pages, hashes, store); 496 #endif 497 } 498 499 static void init_runtime(unsigned long pageable_part) 500 { 501 size_t n; 502 size_t init_size = (size_t)(__init_end - __init_start); 503 size_t pageable_start = (size_t)__pageable_start; 504 size_t pageable_end = (size_t)__pageable_end; 505 size_t pageable_size = pageable_end - pageable_start; 506 vaddr_t tzsram_end = TZSRAM_BASE + TZSRAM_SIZE - TEE_LOAD_ADDR + 507 VCORE_START_VA; 508 size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) * 509 TEE_SHA256_HASH_SIZE; 510 const struct boot_embdata *embdata = (const void *)__init_end; 511 const void *tmp_hashes = NULL; 512 tee_mm_entry_t *mm = NULL; 513 struct fobj *fobj = NULL; 514 uint8_t *paged_store = NULL; 515 uint8_t *hashes = NULL; 516 517 assert(pageable_size % SMALL_PAGE_SIZE == 0); 518 assert(embdata->total_len >= embdata->hashes_offset + 519 embdata->hashes_len); 520 assert(hash_size == embdata->hashes_len); 521 522 tmp_hashes = __init_end + embdata->hashes_offset; 523 524 init_asan(); 525 526 /* Add heap2 first as heap1 may be too small as initial bget pool */ 527 malloc_add_pool(__heap2_start, __heap2_end - __heap2_start); 528 malloc_add_pool(__heap1_start, __heap1_end - __heap1_start); 529 530 /* 531 * This needs to be initialized early to support address lookup 532 * in MEM_AREA_TEE_RAM 533 */ 534 tee_pager_early_init(); 535 536 hashes = malloc(hash_size); 537 IMSG_RAW("\n"); 538 IMSG("Pager is enabled. Hashes: %zu bytes", hash_size); 539 assert(hashes); 540 asan_memcpy_unchecked(hashes, tmp_hashes, hash_size); 541 542 /* 543 * Need physical memory pool initialized to be able to allocate 544 * secure physical memory below. 545 */ 546 core_mmu_init_phys_mem(); 547 548 carve_out_asan_mem(); 549 550 mm = nex_phys_mem_ta_alloc(pageable_size); 551 assert(mm); 552 paged_store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM, 553 pageable_size); 554 /* 555 * Load pageable part in the dedicated allocated area: 556 * - Move pageable non-init part into pageable area. Note bootloader 557 * may have loaded it anywhere in TA RAM hence use memmove(). 558 * - Copy pageable init part from current location into pageable area. 559 */ 560 memmove(paged_store + init_size, 561 phys_to_virt(pageable_part, 562 core_mmu_get_type_by_pa(pageable_part), 563 __pageable_part_end - __pageable_part_start), 564 __pageable_part_end - __pageable_part_start); 565 asan_memcpy_unchecked(paged_store, __init_start, init_size); 566 /* 567 * Undo eventual relocation for the init part so the hash checks 568 * can pass. 569 */ 570 undo_init_relocation(paged_store); 571 572 /* Check that hashes of what's in pageable area is OK */ 573 DMSG("Checking hashes of pageable area"); 574 for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) { 575 const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE; 576 const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE; 577 TEE_Result res; 578 579 DMSG("hash pg_idx %zu hash %p page %p", n, hash, page); 580 res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE); 581 if (res != TEE_SUCCESS) { 582 EMSG("Hash failed for page %zu at %p: res 0x%x", 583 n, (void *)page, res); 584 panic(); 585 } 586 } 587 588 /* 589 * Assert prepaged init sections are page aligned so that nothing 590 * trails uninited at the end of the premapped init area. 591 */ 592 assert(!(init_size & SMALL_PAGE_MASK)); 593 594 /* 595 * Initialize the virtual memory pool used for main_mmu_l2_ttb which 596 * is supplied to tee_pager_init() below. 597 */ 598 init_virt_pool(&core_virt_mem_pool); 599 600 /* 601 * Assign alias area for pager end of the small page block the rest 602 * of the binary is loaded into. We're taking more than needed, but 603 * we're guaranteed to not need more than the physical amount of 604 * TZSRAM. 605 */ 606 mm = tee_mm_alloc2(&core_virt_mem_pool, 607 (vaddr_t)core_virt_mem_pool.lo + 608 core_virt_mem_pool.size - TZSRAM_SIZE, 609 TZSRAM_SIZE); 610 assert(mm); 611 tee_pager_set_alias_area(mm); 612 613 /* 614 * Claim virtual memory which isn't paged. 615 * Linear memory (flat map core memory) ends there. 616 */ 617 mm = tee_mm_alloc2(&core_virt_mem_pool, VCORE_UNPG_RX_PA, 618 (vaddr_t)(__pageable_start - VCORE_UNPG_RX_PA)); 619 assert(mm); 620 621 /* 622 * Allocate virtual memory for the pageable area and let the pager 623 * take charge of all the pages already assigned to that memory. 624 */ 625 mm = tee_mm_alloc2(&core_virt_mem_pool, (vaddr_t)__pageable_start, 626 pageable_size); 627 assert(mm); 628 fobj = ro_paged_alloc(mm, hashes, paged_store); 629 assert(fobj); 630 tee_pager_add_core_region(tee_mm_get_smem(mm), PAGED_REGION_TYPE_RO, 631 fobj); 632 fobj_put(fobj); 633 634 tee_pager_add_pages(pageable_start, init_size / SMALL_PAGE_SIZE, false); 635 tee_pager_add_pages(pageable_start + init_size, 636 (pageable_size - init_size) / SMALL_PAGE_SIZE, 637 true); 638 if (pageable_end < tzsram_end) 639 tee_pager_add_pages(pageable_end, (tzsram_end - pageable_end) / 640 SMALL_PAGE_SIZE, true); 641 642 /* 643 * There may be physical pages in TZSRAM before the core load address. 644 * These pages can be added to the physical pages pool of the pager. 645 * This setup may happen when a the secure bootloader runs in TZRAM 646 * and its memory can be reused by OP-TEE once boot stages complete. 647 */ 648 tee_pager_add_pages(core_virt_mem_pool.lo, 649 (VCORE_UNPG_RX_PA - core_virt_mem_pool.lo) / 650 SMALL_PAGE_SIZE, 651 true); 652 653 print_pager_pool_size(); 654 } 655 #else 656 657 static void init_runtime(unsigned long pageable_part __unused) 658 { 659 init_asan(); 660 661 /* 662 * By default whole OP-TEE uses malloc, so we need to initialize 663 * it early. But, when virtualization is enabled, malloc is used 664 * only by TEE runtime, so malloc should be initialized later, for 665 * every virtual partition separately. Core code uses nex_malloc 666 * instead. 667 */ 668 #ifdef CFG_NS_VIRTUALIZATION 669 nex_malloc_add_pool(__nex_heap_start, __nex_heap_end - 670 __nex_heap_start); 671 #else 672 malloc_add_pool(__heap1_start, __heap1_end - __heap1_start); 673 #endif 674 675 IMSG_RAW("\n"); 676 } 677 #endif 678 679 #if defined(CFG_DT) 680 static int add_optee_dt_node(struct dt_descriptor *dt) 681 { 682 int offs; 683 int ret; 684 685 if (fdt_path_offset(dt->blob, "/firmware/optee") >= 0) { 686 DMSG("OP-TEE Device Tree node already exists!"); 687 return 0; 688 } 689 690 offs = fdt_path_offset(dt->blob, "/firmware"); 691 if (offs < 0) { 692 offs = add_dt_path_subnode(dt, "/", "firmware"); 693 if (offs < 0) 694 return -1; 695 } 696 697 offs = fdt_add_subnode(dt->blob, offs, "optee"); 698 if (offs < 0) 699 return -1; 700 701 ret = fdt_setprop_string(dt->blob, offs, "compatible", 702 "linaro,optee-tz"); 703 if (ret < 0) 704 return -1; 705 ret = fdt_setprop_string(dt->blob, offs, "method", "smc"); 706 if (ret < 0) 707 return -1; 708 709 if (CFG_CORE_ASYNC_NOTIF_GIC_INTID) { 710 /* 711 * The format of the interrupt property is defined by the 712 * binding of the interrupt domain root. In this case it's 713 * one Arm GIC v1, v2 or v3 so we must be compatible with 714 * these. 715 * 716 * An SPI type of interrupt is indicated with a 0 in the 717 * first cell. A PPI type is indicated with value 1. 718 * 719 * The interrupt number goes in the second cell where 720 * SPIs ranges from 0 to 987 and PPI ranges from 0 to 15. 721 * 722 * Flags are passed in the third cells. 723 */ 724 uint32_t itr_trigger = 0; 725 uint32_t itr_type = 0; 726 uint32_t itr_id = 0; 727 uint32_t val[3] = { }; 728 729 /* PPI are visible only in current CPU cluster */ 730 static_assert(IS_ENABLED(CFG_CORE_FFA) || 731 !CFG_CORE_ASYNC_NOTIF_GIC_INTID || 732 (CFG_CORE_ASYNC_NOTIF_GIC_INTID >= 733 GIC_SPI_BASE) || 734 ((CFG_TEE_CORE_NB_CORE <= 8) && 735 (CFG_CORE_ASYNC_NOTIF_GIC_INTID >= 736 GIC_PPI_BASE))); 737 738 if (CFG_CORE_ASYNC_NOTIF_GIC_INTID >= GIC_SPI_BASE) { 739 itr_type = GIC_SPI; 740 itr_id = CFG_CORE_ASYNC_NOTIF_GIC_INTID - GIC_SPI_BASE; 741 itr_trigger = IRQ_TYPE_EDGE_RISING; 742 } else { 743 itr_type = GIC_PPI; 744 itr_id = CFG_CORE_ASYNC_NOTIF_GIC_INTID - GIC_PPI_BASE; 745 itr_trigger = IRQ_TYPE_EDGE_RISING | 746 GIC_CPU_MASK_SIMPLE(CFG_TEE_CORE_NB_CORE); 747 } 748 749 val[0] = TEE_U32_TO_BIG_ENDIAN(itr_type); 750 val[1] = TEE_U32_TO_BIG_ENDIAN(itr_id); 751 val[2] = TEE_U32_TO_BIG_ENDIAN(itr_trigger); 752 753 ret = fdt_setprop(dt->blob, offs, "interrupts", val, 754 sizeof(val)); 755 if (ret < 0) 756 return -1; 757 } 758 return 0; 759 } 760 761 #ifdef CFG_PSCI_ARM32 762 static int append_psci_compatible(void *fdt, int offs, const char *str) 763 { 764 return fdt_appendprop(fdt, offs, "compatible", str, strlen(str) + 1); 765 } 766 767 static int dt_add_psci_node(struct dt_descriptor *dt) 768 { 769 int offs; 770 771 if (fdt_path_offset(dt->blob, "/psci") >= 0) { 772 DMSG("PSCI Device Tree node already exists!"); 773 return 0; 774 } 775 776 offs = add_dt_path_subnode(dt, "/", "psci"); 777 if (offs < 0) 778 return -1; 779 if (append_psci_compatible(dt->blob, offs, "arm,psci-1.0")) 780 return -1; 781 if (append_psci_compatible(dt->blob, offs, "arm,psci-0.2")) 782 return -1; 783 if (append_psci_compatible(dt->blob, offs, "arm,psci")) 784 return -1; 785 if (fdt_setprop_string(dt->blob, offs, "method", "smc")) 786 return -1; 787 if (fdt_setprop_u32(dt->blob, offs, "cpu_suspend", PSCI_CPU_SUSPEND)) 788 return -1; 789 if (fdt_setprop_u32(dt->blob, offs, "cpu_off", PSCI_CPU_OFF)) 790 return -1; 791 if (fdt_setprop_u32(dt->blob, offs, "cpu_on", PSCI_CPU_ON)) 792 return -1; 793 if (fdt_setprop_u32(dt->blob, offs, "sys_poweroff", PSCI_SYSTEM_OFF)) 794 return -1; 795 if (fdt_setprop_u32(dt->blob, offs, "sys_reset", PSCI_SYSTEM_RESET)) 796 return -1; 797 return 0; 798 } 799 800 static int check_node_compat_prefix(struct dt_descriptor *dt, int offs, 801 const char *prefix) 802 { 803 const size_t prefix_len = strlen(prefix); 804 size_t l; 805 int plen; 806 const char *prop; 807 808 prop = fdt_getprop(dt->blob, offs, "compatible", &plen); 809 if (!prop) 810 return -1; 811 812 while (plen > 0) { 813 if (memcmp(prop, prefix, prefix_len) == 0) 814 return 0; /* match */ 815 816 l = strlen(prop) + 1; 817 prop += l; 818 plen -= l; 819 } 820 821 return -1; 822 } 823 824 static int dt_add_psci_cpu_enable_methods(struct dt_descriptor *dt) 825 { 826 int offs = 0; 827 828 while (1) { 829 offs = fdt_next_node(dt->blob, offs, NULL); 830 if (offs < 0) 831 break; 832 if (fdt_getprop(dt->blob, offs, "enable-method", NULL)) 833 continue; /* already set */ 834 if (check_node_compat_prefix(dt, offs, "arm,cortex-a")) 835 continue; /* no compatible */ 836 if (fdt_setprop_string(dt->blob, offs, "enable-method", "psci")) 837 return -1; 838 /* Need to restart scanning as offsets may have changed */ 839 offs = 0; 840 } 841 return 0; 842 } 843 844 static int config_psci(struct dt_descriptor *dt) 845 { 846 if (dt_add_psci_node(dt)) 847 return -1; 848 return dt_add_psci_cpu_enable_methods(dt); 849 } 850 #else 851 static int config_psci(struct dt_descriptor *dt __unused) 852 { 853 return 0; 854 } 855 #endif /*CFG_PSCI_ARM32*/ 856 857 static int mark_tzdram_as_reserved(struct dt_descriptor *dt) 858 { 859 return add_res_mem_dt_node(dt, "optee_core", CFG_TZDRAM_START, 860 CFG_TZDRAM_SIZE); 861 } 862 863 static void update_external_dt(void) 864 { 865 struct dt_descriptor *dt = get_external_dt_desc(); 866 867 if (!dt || !dt->blob) 868 return; 869 870 if (!IS_ENABLED(CFG_CORE_FFA) && add_optee_dt_node(dt)) 871 panic("Failed to add OP-TEE Device Tree node"); 872 873 if (config_psci(dt)) 874 panic("Failed to config PSCI"); 875 876 #ifdef CFG_CORE_RESERVED_SHM 877 if (mark_static_shm_as_reserved(dt)) 878 panic("Failed to config non-secure memory"); 879 #endif 880 881 if (mark_tzdram_as_reserved(dt)) 882 panic("Failed to config secure memory"); 883 } 884 #else /*CFG_DT*/ 885 static void update_external_dt(void) 886 { 887 } 888 #endif /*!CFG_DT*/ 889 890 void init_tee_runtime(void) 891 { 892 #ifndef CFG_WITH_PAGER 893 /* Pager initializes TA RAM early */ 894 core_mmu_init_phys_mem(); 895 #endif 896 /* 897 * With virtualization we call this function when creating the 898 * OP-TEE partition instead. 899 */ 900 if (!IS_ENABLED(CFG_NS_VIRTUALIZATION)) 901 call_preinitcalls(); 902 call_early_initcalls(); 903 call_service_initcalls(); 904 905 /* 906 * These two functions uses crypto_rng_read() to initialize the 907 * pauth keys. Once call_initcalls() returns we're guaranteed that 908 * crypto_rng_read() is ready to be used. 909 */ 910 thread_init_core_local_pauth_keys(); 911 thread_init_thread_pauth_keys(); 912 913 /* 914 * Reinitialize canaries around the stacks with crypto_rng_read(). 915 * 916 * TODO: Updating canaries when CFG_NS_VIRTUALIZATION is enabled will 917 * require synchronization between thread_check_canaries() and 918 * thread_update_canaries(). 919 */ 920 if (!IS_ENABLED(CFG_NS_VIRTUALIZATION)) 921 thread_update_canaries(); 922 } 923 924 static void init_primary(unsigned long pageable_part, unsigned long nsec_entry) 925 { 926 thread_init_core_local_stacks(); 927 /* 928 * Mask asynchronous exceptions before switch to the thread vector 929 * as the thread handler requires those to be masked while 930 * executing with the temporary stack. The thread subsystem also 931 * asserts that the foreign interrupts are blocked when using most of 932 * its functions. 933 */ 934 thread_set_exceptions(THREAD_EXCP_ALL); 935 primary_save_cntfrq(); 936 init_vfp_sec(); 937 938 if (IS_ENABLED(CFG_CRYPTO_WITH_CE)) 939 check_crypto_extensions(); 940 941 /* 942 * Pager: init_runtime() calls thread_kernel_enable_vfp() so we must 943 * set a current thread right now to avoid a chicken-and-egg problem 944 * (thread_init_boot_thread() sets the current thread but needs 945 * things set by init_runtime()). 946 */ 947 thread_get_core_local()->curr_thread = 0; 948 init_runtime(pageable_part); 949 950 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 951 /* 952 * Virtualization: We can't initialize threads right now because 953 * threads belong to "tee" part and will be initialized 954 * separately per each new virtual guest. So, we'll clear 955 * "curr_thread" and call it done. 956 */ 957 thread_get_core_local()->curr_thread = -1; 958 } else { 959 thread_init_boot_thread(); 960 } 961 thread_init_primary(); 962 thread_init_per_cpu(); 963 init_sec_mon(nsec_entry); 964 } 965 966 static bool cpu_nmfi_enabled(void) 967 { 968 #if defined(ARM32) 969 return read_sctlr() & SCTLR_NMFI; 970 #else 971 /* Note: ARM64 does not feature non-maskable FIQ support. */ 972 return false; 973 #endif 974 } 975 976 /* 977 * Note: this function is weak just to make it possible to exclude it from 978 * the unpaged area. 979 */ 980 void __weak boot_init_primary_late(unsigned long fdt __unused, 981 unsigned long manifest __unused) 982 { 983 size_t fdt_size = CFG_DTB_MAX_SIZE; 984 985 if (IS_ENABLED(CFG_TRANSFER_LIST) && mapped_tl) { 986 struct transfer_list_entry *tl_e = NULL; 987 988 tl_e = transfer_list_find(mapped_tl, TL_TAG_FDT); 989 if (tl_e) 990 fdt_size = tl_e->data_size; 991 } 992 993 init_external_dt(boot_arg_fdt, fdt_size); 994 reinit_manifest_dt(); 995 #ifdef CFG_CORE_SEL1_SPMC 996 tpm_map_log_area(get_manifest_dt()); 997 #else 998 tpm_map_log_area(get_external_dt()); 999 #endif 1000 discover_nsec_memory(); 1001 update_external_dt(); 1002 configure_console_from_dt(); 1003 1004 IMSG("OP-TEE version: %s", core_v_str); 1005 if (IS_ENABLED(CFG_INSECURE)) { 1006 IMSG("WARNING: This OP-TEE configuration might be insecure!"); 1007 IMSG("WARNING: Please check https://optee.readthedocs.io/en/latest/architecture/porting_guidelines.html"); 1008 } 1009 IMSG("Primary CPU initializing"); 1010 #ifdef CFG_CORE_ASLR 1011 DMSG("Executing at offset %#lx with virtual load address %#"PRIxVA, 1012 (unsigned long)boot_mmu_config.map_offset, VCORE_START_VA); 1013 #endif 1014 if (IS_ENABLED(CFG_MEMTAG)) 1015 DMSG("Memory tagging %s", 1016 memtag_is_enabled() ? "enabled" : "disabled"); 1017 1018 /* Check if platform needs NMFI workaround */ 1019 if (cpu_nmfi_enabled()) { 1020 if (!IS_ENABLED(CFG_CORE_WORKAROUND_ARM_NMFI)) 1021 IMSG("WARNING: This ARM core has NMFI enabled, please apply workaround!"); 1022 } else { 1023 if (IS_ENABLED(CFG_CORE_WORKAROUND_ARM_NMFI)) 1024 IMSG("WARNING: This ARM core does not have NMFI enabled, no need for workaround"); 1025 } 1026 1027 boot_primary_init_intc(); 1028 init_vfp_nsec(); 1029 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 1030 IMSG("Initializing virtualization support"); 1031 core_mmu_init_virtualization(); 1032 } else { 1033 init_tee_runtime(); 1034 } 1035 } 1036 1037 /* 1038 * Note: this function is weak just to make it possible to exclude it from 1039 * the unpaged area. 1040 */ 1041 void __weak boot_init_primary_final(void) 1042 { 1043 if (!IS_ENABLED(CFG_NS_VIRTUALIZATION)) 1044 call_driver_initcalls(); 1045 call_finalcalls(); 1046 IMSG("Primary CPU switching to normal world boot"); 1047 } 1048 1049 static void init_secondary_helper(unsigned long nsec_entry) 1050 { 1051 IMSG("Secondary CPU %zu initializing", get_core_pos()); 1052 1053 /* 1054 * Mask asynchronous exceptions before switch to the thread vector 1055 * as the thread handler requires those to be masked while 1056 * executing with the temporary stack. The thread subsystem also 1057 * asserts that the foreign interrupts are blocked when using most of 1058 * its functions. 1059 */ 1060 thread_set_exceptions(THREAD_EXCP_ALL); 1061 1062 secondary_init_cntfrq(); 1063 thread_init_per_cpu(); 1064 init_sec_mon(nsec_entry); 1065 boot_secondary_init_intc(); 1066 init_vfp_sec(); 1067 init_vfp_nsec(); 1068 1069 IMSG("Secondary CPU %zu switching to normal world boot", get_core_pos()); 1070 } 1071 1072 /* 1073 * Note: this function is weak just to make it possible to exclude it from 1074 * the unpaged area so that it lies in the init area. 1075 */ 1076 void __weak boot_init_primary_early(void) 1077 { 1078 unsigned long pageable_part = 0; 1079 unsigned long e = PADDR_INVALID; 1080 struct transfer_list_entry *tl_e = NULL; 1081 1082 if (!IS_ENABLED(CFG_WITH_ARM_TRUSTED_FW)) 1083 e = boot_arg_nsec_entry; 1084 1085 if (IS_ENABLED(CFG_TRANSFER_LIST) && boot_arg_transfer_list) { 1086 /* map and save the TL */ 1087 mapped_tl = transfer_list_map(boot_arg_transfer_list); 1088 if (!mapped_tl) 1089 panic("Failed to map transfer list"); 1090 1091 transfer_list_dump(mapped_tl); 1092 tl_e = transfer_list_find(mapped_tl, TL_TAG_FDT); 1093 if (tl_e) { 1094 /* 1095 * Expand the data size of the DTB entry to the maximum 1096 * allocable mapped memory to reserve sufficient space 1097 * for inserting new nodes, avoid potentially corrupting 1098 * next entries. 1099 */ 1100 uint32_t dtb_max_sz = mapped_tl->max_size - 1101 mapped_tl->size + tl_e->data_size; 1102 1103 if (!transfer_list_set_data_size(mapped_tl, tl_e, 1104 dtb_max_sz)) { 1105 EMSG("Failed to extend DTB size to %#"PRIx32, 1106 dtb_max_sz); 1107 panic(); 1108 } 1109 } 1110 tl_e = transfer_list_find(mapped_tl, TL_TAG_OPTEE_PAGABLE_PART); 1111 } 1112 1113 if (IS_ENABLED(CFG_WITH_PAGER)) { 1114 if (IS_ENABLED(CFG_TRANSFER_LIST) && tl_e) 1115 pageable_part = 1116 get_le64(transfer_list_entry_data(tl_e)); 1117 else 1118 pageable_part = boot_arg_pageable_part; 1119 } 1120 1121 init_primary(pageable_part, e); 1122 } 1123 1124 static void boot_save_transfer_list(unsigned long zero_reg, 1125 unsigned long transfer_list, 1126 unsigned long fdt) 1127 { 1128 struct transfer_list_header *tl = (void *)transfer_list; 1129 struct transfer_list_entry *tl_e = NULL; 1130 1131 if (zero_reg != 0) 1132 panic("Incorrect transfer list register convention"); 1133 1134 if (!IS_ALIGNED_WITH_TYPE(transfer_list, struct transfer_list_header) || 1135 !IS_ALIGNED(transfer_list, TL_ALIGNMENT_FROM_ORDER(tl->alignment))) 1136 panic("Transfer list base address is not aligned"); 1137 1138 if (transfer_list_check_header(tl) == TL_OPS_NONE) 1139 panic("Invalid transfer list"); 1140 1141 tl_e = transfer_list_find(tl, TL_TAG_FDT); 1142 if (fdt != (unsigned long)transfer_list_entry_data(tl_e)) 1143 panic("DT does not match to the DT entry of the TL"); 1144 1145 boot_arg_transfer_list = transfer_list; 1146 } 1147 1148 #if defined(CFG_WITH_ARM_TRUSTED_FW) 1149 unsigned long boot_cpu_on_handler(unsigned long a0 __maybe_unused, 1150 unsigned long a1 __unused) 1151 { 1152 init_secondary_helper(PADDR_INVALID); 1153 return 0; 1154 } 1155 #else 1156 void boot_init_secondary(unsigned long nsec_entry) 1157 { 1158 init_secondary_helper(nsec_entry); 1159 } 1160 #endif 1161 1162 #if defined(CFG_BOOT_SECONDARY_REQUEST) 1163 void boot_set_core_ns_entry(size_t core_idx, uintptr_t entry, 1164 uintptr_t context_id) 1165 { 1166 ns_entry_contexts[core_idx].entry_point = entry; 1167 ns_entry_contexts[core_idx].context_id = context_id; 1168 dsb_ishst(); 1169 } 1170 1171 int boot_core_release(size_t core_idx, paddr_t entry) 1172 { 1173 if (!core_idx || core_idx >= CFG_TEE_CORE_NB_CORE) 1174 return -1; 1175 1176 ns_entry_contexts[core_idx].entry_point = entry; 1177 dmb(); 1178 spin_table[core_idx] = 1; 1179 dsb(); 1180 sev(); 1181 1182 return 0; 1183 } 1184 1185 /* 1186 * spin until secondary boot request, then returns with 1187 * the secondary core entry address. 1188 */ 1189 struct ns_entry_context *boot_core_hpen(void) 1190 { 1191 #ifdef CFG_PSCI_ARM32 1192 return &ns_entry_contexts[get_core_pos()]; 1193 #else 1194 do { 1195 wfe(); 1196 } while (!spin_table[get_core_pos()]); 1197 dmb(); 1198 return &ns_entry_contexts[get_core_pos()]; 1199 #endif 1200 } 1201 #endif 1202 1203 #if defined(CFG_CORE_ASLR) 1204 #if defined(CFG_DT) 1205 unsigned long __weak get_aslr_seed(void) 1206 { 1207 void *fdt = NULL; 1208 int rc = 0; 1209 const uint64_t *seed = NULL; 1210 int offs = 0; 1211 int len = 0; 1212 1213 if (!IS_ENABLED(CFG_CORE_SEL2_SPMC)) 1214 fdt = (void *)boot_arg_fdt; 1215 1216 if (!fdt) { 1217 DMSG("No fdt"); 1218 goto err; 1219 } 1220 1221 rc = fdt_check_header(fdt); 1222 if (rc) { 1223 DMSG("Bad fdt: %d", rc); 1224 goto err; 1225 } 1226 1227 offs = fdt_path_offset(fdt, "/secure-chosen"); 1228 if (offs < 0) { 1229 DMSG("Cannot find /secure-chosen"); 1230 goto err; 1231 } 1232 seed = fdt_getprop(fdt, offs, "kaslr-seed", &len); 1233 if (!seed || len != sizeof(*seed)) { 1234 DMSG("Cannot find valid kaslr-seed"); 1235 goto err; 1236 } 1237 1238 return fdt64_to_cpu(fdt64_ld(seed)); 1239 1240 err: 1241 /* Try platform implementation */ 1242 return plat_get_aslr_seed(); 1243 } 1244 #else /*!CFG_DT*/ 1245 unsigned long __weak get_aslr_seed(void) 1246 { 1247 /* Try platform implementation */ 1248 return plat_get_aslr_seed(); 1249 } 1250 #endif /*!CFG_DT*/ 1251 #endif /*CFG_CORE_ASLR*/ 1252 1253 static void *get_fdt_from_boot_info(struct ffa_boot_info_header_1_1 *hdr) 1254 { 1255 struct ffa_boot_info_1_1 *desc = NULL; 1256 uint8_t content_fmt = 0; 1257 uint8_t name_fmt = 0; 1258 void *fdt = NULL; 1259 int ret = 0; 1260 1261 if (hdr->signature != FFA_BOOT_INFO_SIGNATURE) { 1262 EMSG("Bad boot info signature %#"PRIx32, hdr->signature); 1263 panic(); 1264 } 1265 if (hdr->version != FFA_BOOT_INFO_VERSION) { 1266 EMSG("Bad boot info version %#"PRIx32, hdr->version); 1267 panic(); 1268 } 1269 if (hdr->desc_count != 1) { 1270 EMSG("Bad boot info descriptor count %#"PRIx32, 1271 hdr->desc_count); 1272 panic(); 1273 } 1274 desc = (void *)((vaddr_t)hdr + hdr->desc_offset); 1275 name_fmt = desc->flags & FFA_BOOT_INFO_FLAG_NAME_FORMAT_MASK; 1276 if (name_fmt == FFA_BOOT_INFO_FLAG_NAME_FORMAT_STRING) 1277 DMSG("Boot info descriptor name \"%16s\"", desc->name); 1278 else if (name_fmt == FFA_BOOT_INFO_FLAG_NAME_FORMAT_UUID) 1279 DMSG("Boot info descriptor UUID %pUl", (void *)desc->name); 1280 else 1281 DMSG("Boot info descriptor: unknown name format %"PRIu8, 1282 name_fmt); 1283 1284 content_fmt = (desc->flags & FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_MASK) >> 1285 FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_SHIFT; 1286 if (content_fmt != FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_ADDR) { 1287 EMSG("Bad boot info content format %"PRIu8", expected %u (address)", 1288 content_fmt, FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_ADDR); 1289 panic(); 1290 } 1291 1292 fdt = (void *)(vaddr_t)desc->contents; 1293 ret = fdt_check_full(fdt, desc->size); 1294 if (ret < 0) { 1295 EMSG("Invalid Device Tree at %p: error %d", fdt, ret); 1296 panic(); 1297 } 1298 return fdt; 1299 } 1300 1301 static void get_sec_mem_from_manifest(void *fdt, paddr_t *base, size_t *size) 1302 { 1303 int ret = 0; 1304 uint64_t num = 0; 1305 1306 ret = fdt_node_check_compatible(fdt, 0, "arm,ffa-manifest-1.0"); 1307 if (ret < 0) { 1308 EMSG("Invalid FF-A manifest at %p: error %d", fdt, ret); 1309 panic(); 1310 } 1311 ret = dt_getprop_as_number(fdt, 0, "load-address", &num); 1312 if (ret < 0) { 1313 EMSG("Can't read \"load-address\" from FF-A manifest at %p: error %d", 1314 fdt, ret); 1315 panic(); 1316 } 1317 *base = num; 1318 /* "mem-size" is currently an undocumented extension to the spec. */ 1319 ret = dt_getprop_as_number(fdt, 0, "mem-size", &num); 1320 if (ret < 0) { 1321 EMSG("Can't read \"mem-size\" from FF-A manifest at %p: error %d", 1322 fdt, ret); 1323 panic(); 1324 } 1325 *size = num; 1326 } 1327 1328 void __weak boot_save_args(unsigned long a0, unsigned long a1, 1329 unsigned long a2, unsigned long a3, 1330 unsigned long a4 __maybe_unused) 1331 { 1332 /* 1333 * Register use: 1334 * 1335 * Scenario A: Default arguments 1336 * a0 - CFG_CORE_FFA=y && CFG_CORE_SEL2_SPMC=n: 1337 * if non-NULL holds the TOS FW config [1] address 1338 * - CFG_CORE_FFA=y && 1339 (CFG_CORE_SEL2_SPMC=y || CFG_CORE_EL3_SPMC=y): 1340 * address of FF-A Boot Information Blob 1341 * - CFG_CORE_FFA=n: 1342 * if non-NULL holds the pagable part address 1343 * a1 - CFG_WITH_ARM_TRUSTED_FW=n (Armv7): 1344 * Armv7 standard bootarg #1 (kept track of in entry_a32.S) 1345 * a2 - CFG_CORE_SEL2_SPMC=n: 1346 * if non-NULL holds the system DTB address 1347 * - CFG_WITH_ARM_TRUSTED_FW=n (Armv7): 1348 * Armv7 standard bootarg #2 (system DTB address, kept track 1349 * of in entry_a32.S) 1350 * a3 - Not used 1351 * a4 - CFG_WITH_ARM_TRUSTED_FW=n: 1352 * Non-secure entry address 1353 * 1354 * [1] A TF-A concept: TOS_FW_CONFIG - Trusted OS Firmware 1355 * configuration file. Used by Trusted OS (BL32), that is, OP-TEE 1356 * here. This is also called Manifest DT, related to the Manifest DT 1357 * passed in the FF-A Boot Information Blob, but with a different 1358 * compatible string. 1359 1360 * Scenario B: FW Handoff via Transfer List 1361 * Note: FF-A and non-secure entry are not yet supported with 1362 * Transfer List 1363 * a0 - DTB address or 0 (AArch64) 1364 * - must be 0 (AArch32) 1365 * a1 - 1 << 32 | TRANSFER_LIST_SIGNATURE[0:31] (AArch64) 1366 * - 1 << 24 | TRANSFER_LIST_SIGNATURE[0:23] (AArch32) 1367 * a2 - must be 0 (AArch64) 1368 * - DTB address or 0 (AArch32) 1369 * a3 - Transfer list base address 1370 * a4 - Not used 1371 */ 1372 1373 if (IS_ENABLED(CFG_TRANSFER_LIST)) { 1374 if (IS_ENABLED(CFG_ARM64_core) && 1375 a1 == TL_HANDOFF_X1_VALUE(TL_REG_CONVENTION_VER)) { 1376 boot_save_transfer_list(a2, a3, a0); 1377 boot_arg_fdt = a0; 1378 } else if (IS_ENABLED(CFG_ARM32_core) && 1379 a1 == TL_HANDOFF_R1_VALUE(TL_REG_CONVENTION_VER)) { 1380 boot_save_transfer_list(a0, a3, a2); 1381 boot_arg_fdt = a2; 1382 } 1383 1384 return; 1385 } 1386 1387 if (!IS_ENABLED(CFG_CORE_SEL2_SPMC)) { 1388 #if defined(CFG_DT_ADDR) 1389 boot_arg_fdt = CFG_DT_ADDR; 1390 #else 1391 boot_arg_fdt = a2; 1392 #endif 1393 } 1394 1395 if (IS_ENABLED(CFG_CORE_FFA)) { 1396 if (IS_ENABLED(CFG_CORE_SEL2_SPMC) || 1397 IS_ENABLED(CFG_CORE_EL3_SPMC)) 1398 init_manifest_dt(get_fdt_from_boot_info((void *)a0)); 1399 else 1400 init_manifest_dt((void *)a0); 1401 if (IS_ENABLED(CFG_CORE_SEL2_SPMC) && 1402 IS_ENABLED(CFG_CORE_PHYS_RELOCATABLE)) { 1403 paddr_t base = 0; 1404 size_t size = 0; 1405 1406 get_sec_mem_from_manifest(get_manifest_dt(), 1407 &base, &size); 1408 core_mmu_set_secure_memory(base, size); 1409 } 1410 } else { 1411 if (IS_ENABLED(CFG_WITH_PAGER)) { 1412 #if defined(CFG_PAGEABLE_ADDR) 1413 boot_arg_pageable_part = CFG_PAGEABLE_ADDR; 1414 #else 1415 boot_arg_pageable_part = a0; 1416 #endif 1417 } 1418 if (!IS_ENABLED(CFG_WITH_ARM_TRUSTED_FW)) { 1419 #if defined(CFG_NS_ENTRY_ADDR) 1420 boot_arg_nsec_entry = CFG_NS_ENTRY_ADDR; 1421 #else 1422 boot_arg_nsec_entry = a4; 1423 #endif 1424 } 1425 } 1426 } 1427 1428 #if defined(CFG_TRANSFER_LIST) 1429 static TEE_Result release_transfer_list(void) 1430 { 1431 struct dt_descriptor *dt = get_external_dt_desc(); 1432 1433 if (!mapped_tl) 1434 return TEE_SUCCESS; 1435 1436 if (dt) { 1437 int ret = 0; 1438 struct transfer_list_entry *tl_e = NULL; 1439 1440 /* 1441 * Pack the DTB and update the transfer list before un-mapping 1442 */ 1443 ret = fdt_pack(dt->blob); 1444 if (ret < 0) { 1445 EMSG("Failed to pack Device Tree at 0x%" PRIxPA 1446 ": error %d", virt_to_phys(dt->blob), ret); 1447 panic(); 1448 } 1449 1450 tl_e = transfer_list_find(mapped_tl, TL_TAG_FDT); 1451 assert(dt->blob == transfer_list_entry_data(tl_e)); 1452 transfer_list_set_data_size(mapped_tl, tl_e, 1453 fdt_totalsize(dt->blob)); 1454 dt->blob = NULL; 1455 } 1456 1457 transfer_list_unmap_sync(mapped_tl); 1458 mapped_tl = NULL; 1459 1460 return TEE_SUCCESS; 1461 } 1462 1463 boot_final(release_transfer_list); 1464 #endif 1465