1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2015-2023, Linaro Limited 4 * Copyright (c) 2023, Arm Limited 5 */ 6 7 #include <arm.h> 8 #include <assert.h> 9 #include <compiler.h> 10 #include <config.h> 11 #include <console.h> 12 #include <crypto/crypto.h> 13 #include <drivers/gic.h> 14 #include <dt-bindings/interrupt-controller/arm-gic.h> 15 #include <ffa.h> 16 #include <initcall.h> 17 #include <inttypes.h> 18 #include <io.h> 19 #include <keep.h> 20 #include <kernel/asan.h> 21 #include <kernel/boot.h> 22 #include <kernel/dt.h> 23 #include <kernel/linker.h> 24 #include <kernel/misc.h> 25 #include <kernel/panic.h> 26 #include <kernel/tee_misc.h> 27 #include <kernel/thread.h> 28 #include <kernel/tpm.h> 29 #include <kernel/transfer_list.h> 30 #include <libfdt.h> 31 #include <malloc.h> 32 #include <memtag.h> 33 #include <mm/core_memprot.h> 34 #include <mm/core_mmu.h> 35 #include <mm/fobj.h> 36 #include <mm/phys_mem.h> 37 #include <mm/tee_mm.h> 38 #include <mm/tee_pager.h> 39 #include <sm/psci.h> 40 #include <trace.h> 41 #include <utee_defines.h> 42 #include <util.h> 43 44 #include <platform_config.h> 45 46 #if !defined(CFG_WITH_ARM_TRUSTED_FW) 47 #include <sm/sm.h> 48 #endif 49 50 #if defined(CFG_WITH_VFP) 51 #include <kernel/vfp.h> 52 #endif 53 54 /* 55 * In this file we're using unsigned long to represent physical pointers as 56 * they are received in a single register when OP-TEE is initially entered. 57 * This limits 32-bit systems to only use make use of the lower 32 bits 58 * of a physical address for initial parameters. 59 * 60 * 64-bit systems on the other hand can use full 64-bit physical pointers. 61 */ 62 #define PADDR_INVALID ULONG_MAX 63 64 #if defined(CFG_BOOT_SECONDARY_REQUEST) 65 struct ns_entry_context { 66 uintptr_t entry_point; 67 uintptr_t context_id; 68 }; 69 struct ns_entry_context ns_entry_contexts[CFG_TEE_CORE_NB_CORE]; 70 static uint32_t spin_table[CFG_TEE_CORE_NB_CORE]; 71 #endif 72 73 #ifdef CFG_BOOT_SYNC_CPU 74 /* 75 * Array used when booting, to synchronize cpu. 76 * When 0, the cpu has not started. 77 * When 1, it has started 78 */ 79 uint32_t sem_cpu_sync[CFG_TEE_CORE_NB_CORE]; 80 DECLARE_KEEP_PAGER(sem_cpu_sync); 81 #endif 82 83 /* 84 * Must not be in .bss since it's initialized and used from assembly before 85 * .bss is cleared. 86 */ 87 vaddr_t boot_cached_mem_end __nex_data = 1; 88 89 static unsigned long boot_arg_fdt __nex_bss; 90 static unsigned long boot_arg_nsec_entry __nex_bss; 91 static unsigned long boot_arg_pageable_part __nex_bss; 92 static unsigned long boot_arg_transfer_list __nex_bss; 93 static struct transfer_list_header *mapped_tl __nex_bss; 94 95 #ifdef CFG_SECONDARY_INIT_CNTFRQ 96 static uint32_t cntfrq; 97 #endif 98 99 /* May be overridden in plat-$(PLATFORM)/main.c */ 100 __weak void plat_primary_init_early(void) 101 { 102 } 103 DECLARE_KEEP_PAGER(plat_primary_init_early); 104 105 /* May be overridden in plat-$(PLATFORM)/main.c */ 106 __weak void boot_primary_init_intc(void) 107 { 108 } 109 110 /* May be overridden in plat-$(PLATFORM)/main.c */ 111 __weak void boot_secondary_init_intc(void) 112 { 113 } 114 115 /* May be overridden in plat-$(PLATFORM)/main.c */ 116 __weak unsigned long plat_get_aslr_seed(void) 117 { 118 DMSG("Warning: no ASLR seed"); 119 120 return 0; 121 } 122 123 #if defined(_CFG_CORE_STACK_PROTECTOR) || defined(CFG_WITH_STACK_CANARIES) 124 /* Generate random stack canary value on boot up */ 125 __weak void plat_get_random_stack_canaries(void *buf, size_t ncan, size_t size) 126 { 127 TEE_Result ret = TEE_ERROR_GENERIC; 128 size_t i = 0; 129 130 assert(buf && ncan && size); 131 132 /* 133 * With virtualization the RNG is not initialized in Nexus core. 134 * Need to override with platform specific implementation. 135 */ 136 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 137 IMSG("WARNING: Using fixed value for stack canary"); 138 memset(buf, 0xab, ncan * size); 139 goto out; 140 } 141 142 ret = crypto_rng_read(buf, ncan * size); 143 if (ret != TEE_SUCCESS) 144 panic("Failed to generate random stack canary"); 145 146 out: 147 /* Leave null byte in canary to prevent string base exploit */ 148 for (i = 0; i < ncan; i++) 149 *((uint8_t *)buf + size * i) = 0; 150 } 151 #endif /* _CFG_CORE_STACK_PROTECTOR || CFG_WITH_STACK_CANARIES */ 152 153 /* 154 * This function is called as a guard after each smc call which is not 155 * supposed to return. 156 */ 157 void __panic_at_smc_return(void) 158 { 159 panic(); 160 } 161 162 #if defined(CFG_WITH_ARM_TRUSTED_FW) 163 void init_sec_mon(unsigned long nsec_entry __maybe_unused) 164 { 165 assert(nsec_entry == PADDR_INVALID); 166 /* Do nothing as we don't have a secure monitor */ 167 } 168 #else 169 /* May be overridden in plat-$(PLATFORM)/main.c */ 170 __weak void init_sec_mon(unsigned long nsec_entry) 171 { 172 struct sm_nsec_ctx *nsec_ctx; 173 174 assert(nsec_entry != PADDR_INVALID); 175 176 /* Initialize secure monitor */ 177 nsec_ctx = sm_get_nsec_ctx(); 178 nsec_ctx->mon_lr = nsec_entry; 179 nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I; 180 if (nsec_entry & 1) 181 nsec_ctx->mon_spsr |= CPSR_T; 182 } 183 #endif 184 185 #if defined(CFG_WITH_ARM_TRUSTED_FW) 186 static void init_vfp_nsec(void) 187 { 188 } 189 #else 190 static void init_vfp_nsec(void) 191 { 192 /* Normal world can use CP10 and CP11 (SIMD/VFP) */ 193 write_nsacr(read_nsacr() | NSACR_CP10 | NSACR_CP11); 194 } 195 #endif 196 197 static void check_crypto_extensions(void) 198 { 199 bool ce_supported = true; 200 201 if (!feat_aes_implemented() && 202 IS_ENABLED(CFG_CRYPTO_AES_ARM_CE)) { 203 EMSG("AES instructions are not supported"); 204 ce_supported = false; 205 } 206 207 if (!feat_sha1_implemented() && 208 IS_ENABLED(CFG_CRYPTO_SHA1_ARM_CE)) { 209 EMSG("SHA1 instructions are not supported"); 210 ce_supported = false; 211 } 212 213 if (!feat_sha256_implemented() && 214 IS_ENABLED(CFG_CRYPTO_SHA256_ARM_CE)) { 215 EMSG("SHA256 instructions are not supported"); 216 ce_supported = false; 217 } 218 219 /* Check aarch64 specific instructions */ 220 if (IS_ENABLED(CFG_ARM64_core)) { 221 if (!feat_sha512_implemented() && 222 IS_ENABLED(CFG_CRYPTO_SHA512_ARM_CE)) { 223 EMSG("SHA512 instructions are not supported"); 224 ce_supported = false; 225 } 226 227 if (!feat_sha3_implemented() && 228 IS_ENABLED(CFG_CRYPTO_SHA3_ARM_CE)) { 229 EMSG("SHA3 instructions are not supported"); 230 ce_supported = false; 231 } 232 233 if (!feat_sm3_implemented() && 234 IS_ENABLED(CFG_CRYPTO_SM3_ARM_CE)) { 235 EMSG("SM3 instructions are not supported"); 236 ce_supported = false; 237 } 238 239 if (!feat_sm4_implemented() && 240 IS_ENABLED(CFG_CRYPTO_SM4_ARM_CE)) { 241 EMSG("SM4 instructions are not supported"); 242 ce_supported = false; 243 } 244 } 245 246 if (!ce_supported) 247 panic("HW doesn't support CE instructions"); 248 } 249 250 #if defined(CFG_WITH_VFP) 251 252 #ifdef ARM32 253 static void init_vfp_sec(void) 254 { 255 uint32_t cpacr = read_cpacr(); 256 257 /* 258 * Enable Advanced SIMD functionality. 259 * Enable use of D16-D31 of the Floating-point Extension register 260 * file. 261 */ 262 cpacr &= ~(CPACR_ASEDIS | CPACR_D32DIS); 263 /* 264 * Enable usage of CP10 and CP11 (SIMD/VFP) (both kernel and user 265 * mode. 266 */ 267 cpacr |= CPACR_CP(10, CPACR_CP_ACCESS_FULL); 268 cpacr |= CPACR_CP(11, CPACR_CP_ACCESS_FULL); 269 write_cpacr(cpacr); 270 } 271 #endif /* ARM32 */ 272 273 #ifdef ARM64 274 static void init_vfp_sec(void) 275 { 276 /* Not using VFP until thread_kernel_enable_vfp() */ 277 vfp_disable(); 278 } 279 #endif /* ARM64 */ 280 281 #else /* CFG_WITH_VFP */ 282 283 static void init_vfp_sec(void) 284 { 285 /* Not using VFP */ 286 } 287 #endif 288 289 #ifdef CFG_SECONDARY_INIT_CNTFRQ 290 static void primary_save_cntfrq(void) 291 { 292 assert(cntfrq == 0); 293 294 /* 295 * CNTFRQ should be initialized on the primary CPU by a 296 * previous boot stage 297 */ 298 cntfrq = read_cntfrq(); 299 } 300 301 static void secondary_init_cntfrq(void) 302 { 303 assert(cntfrq != 0); 304 write_cntfrq(cntfrq); 305 } 306 #else /* CFG_SECONDARY_INIT_CNTFRQ */ 307 static void primary_save_cntfrq(void) 308 { 309 } 310 311 static void secondary_init_cntfrq(void) 312 { 313 } 314 #endif 315 316 #ifdef CFG_CORE_SANITIZE_KADDRESS 317 static void init_run_constructors(void) 318 { 319 const vaddr_t *ctor; 320 321 for (ctor = &__ctor_list; ctor < &__ctor_end; ctor++) 322 ((void (*)(void))(*ctor))(); 323 } 324 325 static void init_asan(void) 326 { 327 328 /* 329 * CFG_ASAN_SHADOW_OFFSET is also supplied as 330 * -fasan-shadow-offset=$(CFG_ASAN_SHADOW_OFFSET) to the compiler. 331 * Since all the needed values to calculate the value of 332 * CFG_ASAN_SHADOW_OFFSET isn't available in to make we need to 333 * calculate it in advance and hard code it into the platform 334 * conf.mk. Here where we have all the needed values we double 335 * check that the compiler is supplied the correct value. 336 */ 337 338 #define __ASAN_SHADOW_START \ 339 ROUNDUP(TEE_RAM_START + (TEE_RAM_VA_SIZE * 8) / 9 - 8, 8) 340 assert(__ASAN_SHADOW_START == (vaddr_t)&__asan_shadow_start); 341 #define __CFG_ASAN_SHADOW_OFFSET \ 342 (__ASAN_SHADOW_START - (TEE_RAM_START / 8)) 343 COMPILE_TIME_ASSERT(CFG_ASAN_SHADOW_OFFSET == __CFG_ASAN_SHADOW_OFFSET); 344 #undef __ASAN_SHADOW_START 345 #undef __CFG_ASAN_SHADOW_OFFSET 346 347 /* 348 * Assign area covered by the shadow area, everything from start up 349 * to the beginning of the shadow area. 350 */ 351 asan_set_shadowed((void *)TEE_LOAD_ADDR, &__asan_shadow_start); 352 353 /* 354 * Add access to areas that aren't opened automatically by a 355 * constructor. 356 */ 357 asan_tag_access(&__ctor_list, &__ctor_end); 358 asan_tag_access(__rodata_start, __rodata_end); 359 #ifdef CFG_WITH_PAGER 360 asan_tag_access(__pageable_start, __pageable_end); 361 #endif /*CFG_WITH_PAGER*/ 362 asan_tag_access(__nozi_start, __nozi_end); 363 #ifdef ARM32 364 asan_tag_access(__exidx_start, __exidx_end); 365 asan_tag_access(__extab_start, __extab_end); 366 #endif 367 368 init_run_constructors(); 369 370 /* Everything is tagged correctly, let's start address sanitizing. */ 371 asan_start(); 372 } 373 #else /*CFG_CORE_SANITIZE_KADDRESS*/ 374 static void init_asan(void) 375 { 376 } 377 #endif /*CFG_CORE_SANITIZE_KADDRESS*/ 378 379 #if defined(CFG_MEMTAG) 380 /* Called from entry_a64.S only when MEMTAG is configured */ 381 void boot_init_memtag(void) 382 { 383 memtag_init_ops(feat_mte_implemented()); 384 } 385 386 static TEE_Result mmap_clear_memtag(struct tee_mmap_region *map, 387 void *ptr __unused) 388 { 389 switch (map->type) { 390 case MEM_AREA_NEX_RAM_RO: 391 case MEM_AREA_SEC_RAM_OVERALL: 392 DMSG("Clearing tags for VA %#"PRIxVA"..%#"PRIxVA, 393 map->va, map->va + map->size - 1); 394 memtag_set_tags((void *)map->va, map->size, 0); 395 break; 396 default: 397 break; 398 } 399 400 return TEE_SUCCESS; 401 } 402 403 /* Called from entry_a64.S only when MEMTAG is configured */ 404 void boot_clear_memtag(void) 405 { 406 core_mmu_for_each_map(NULL, mmap_clear_memtag); 407 } 408 #endif 409 410 #ifdef CFG_WITH_PAGER 411 412 #ifdef CFG_CORE_SANITIZE_KADDRESS 413 static void carve_out_asan_mem(void) 414 { 415 nex_phys_mem_partial_carve_out(ASAN_MAP_PA, ASAN_MAP_SZ); 416 } 417 #else 418 static void carve_out_asan_mem(void) 419 { 420 } 421 #endif 422 423 static void print_pager_pool_size(void) 424 { 425 struct tee_pager_stats __maybe_unused stats; 426 427 tee_pager_get_stats(&stats); 428 IMSG("Pager pool size: %zukB", 429 stats.npages_all * SMALL_PAGE_SIZE / 1024); 430 } 431 432 static void init_virt_pool(tee_mm_pool_t *virt_pool) 433 { 434 const vaddr_t begin = VCORE_START_VA; 435 size_t size = TEE_RAM_VA_SIZE; 436 437 #ifdef CFG_CORE_SANITIZE_KADDRESS 438 /* Carve out asan memory, flat maped after core memory */ 439 if (begin + size > ASAN_SHADOW_PA) 440 size = ASAN_MAP_PA - begin; 441 #endif 442 443 if (!tee_mm_init(virt_pool, begin, size, SMALL_PAGE_SHIFT, 444 TEE_MM_POOL_NO_FLAGS)) 445 panic("core_virt_mem_pool init failed"); 446 } 447 448 /* 449 * With CFG_CORE_ASLR=y the init part is relocated very early during boot. 450 * The init part is also paged just as the rest of the normal paged code, with 451 * the difference that it's preloaded during boot. When the backing store 452 * is configured the entire paged binary is copied in place and then also 453 * the init part. Since the init part has been relocated (references to 454 * addresses updated to compensate for the new load address) this has to be 455 * undone for the hashes of those pages to match with the original binary. 456 * 457 * If CFG_CORE_ASLR=n, nothing needs to be done as the code/ro pages are 458 * unchanged. 459 */ 460 static void undo_init_relocation(uint8_t *paged_store __maybe_unused) 461 { 462 #ifdef CFG_CORE_ASLR 463 unsigned long *ptr = NULL; 464 const uint32_t *reloc = NULL; 465 const uint32_t *reloc_end = NULL; 466 unsigned long offs = boot_mmu_config.map_offset; 467 const struct boot_embdata *embdata = (const void *)__init_end; 468 vaddr_t addr_end = (vaddr_t)__init_end - offs - TEE_LOAD_ADDR; 469 vaddr_t addr_start = (vaddr_t)__init_start - offs - TEE_LOAD_ADDR; 470 471 reloc = (const void *)((vaddr_t)embdata + embdata->reloc_offset); 472 reloc_end = reloc + embdata->reloc_len / sizeof(*reloc); 473 474 for (; reloc < reloc_end; reloc++) { 475 if (*reloc < addr_start) 476 continue; 477 if (*reloc >= addr_end) 478 break; 479 ptr = (void *)(paged_store + *reloc - addr_start); 480 *ptr -= offs; 481 } 482 #endif 483 } 484 485 static struct fobj *ro_paged_alloc(tee_mm_entry_t *mm, void *hashes, 486 void *store) 487 { 488 const unsigned int num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE; 489 #ifdef CFG_CORE_ASLR 490 unsigned int reloc_offs = (vaddr_t)__pageable_start - VCORE_START_VA; 491 const struct boot_embdata *embdata = (const void *)__init_end; 492 const void *reloc = __init_end + embdata->reloc_offset; 493 494 return fobj_ro_reloc_paged_alloc(num_pages, hashes, reloc_offs, 495 reloc, embdata->reloc_len, store); 496 #else 497 return fobj_ro_paged_alloc(num_pages, hashes, store); 498 #endif 499 } 500 501 static void init_runtime(unsigned long pageable_part) 502 { 503 size_t n; 504 size_t init_size = (size_t)(__init_end - __init_start); 505 size_t pageable_start = (size_t)__pageable_start; 506 size_t pageable_end = (size_t)__pageable_end; 507 size_t pageable_size = pageable_end - pageable_start; 508 vaddr_t tzsram_end = TZSRAM_BASE + TZSRAM_SIZE - TEE_LOAD_ADDR + 509 VCORE_START_VA; 510 size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) * 511 TEE_SHA256_HASH_SIZE; 512 const struct boot_embdata *embdata = (const void *)__init_end; 513 const void *tmp_hashes = NULL; 514 tee_mm_entry_t *mm = NULL; 515 struct fobj *fobj = NULL; 516 uint8_t *paged_store = NULL; 517 uint8_t *hashes = NULL; 518 519 assert(pageable_size % SMALL_PAGE_SIZE == 0); 520 assert(embdata->total_len >= embdata->hashes_offset + 521 embdata->hashes_len); 522 assert(hash_size == embdata->hashes_len); 523 524 tmp_hashes = __init_end + embdata->hashes_offset; 525 526 init_asan(); 527 528 /* Add heap2 first as heap1 may be too small as initial bget pool */ 529 malloc_add_pool(__heap2_start, __heap2_end - __heap2_start); 530 malloc_add_pool(__heap1_start, __heap1_end - __heap1_start); 531 532 /* 533 * This needs to be initialized early to support address lookup 534 * in MEM_AREA_TEE_RAM 535 */ 536 tee_pager_early_init(); 537 538 hashes = malloc(hash_size); 539 IMSG_RAW("\n"); 540 IMSG("Pager is enabled. Hashes: %zu bytes", hash_size); 541 assert(hashes); 542 asan_memcpy_unchecked(hashes, tmp_hashes, hash_size); 543 544 /* 545 * Need physical memory pool initialized to be able to allocate 546 * secure physical memory below. 547 */ 548 core_mmu_init_phys_mem(); 549 550 carve_out_asan_mem(); 551 552 mm = nex_phys_mem_ta_alloc(pageable_size); 553 assert(mm); 554 paged_store = phys_to_virt(tee_mm_get_smem(mm), 555 MEM_AREA_SEC_RAM_OVERALL, pageable_size); 556 /* 557 * Load pageable part in the dedicated allocated area: 558 * - Move pageable non-init part into pageable area. Note bootloader 559 * may have loaded it anywhere in TA RAM hence use memmove(). 560 * - Copy pageable init part from current location into pageable area. 561 */ 562 memmove(paged_store + init_size, 563 phys_to_virt(pageable_part, 564 core_mmu_get_type_by_pa(pageable_part), 565 __pageable_part_end - __pageable_part_start), 566 __pageable_part_end - __pageable_part_start); 567 asan_memcpy_unchecked(paged_store, __init_start, init_size); 568 /* 569 * Undo eventual relocation for the init part so the hash checks 570 * can pass. 571 */ 572 undo_init_relocation(paged_store); 573 574 /* Check that hashes of what's in pageable area is OK */ 575 DMSG("Checking hashes of pageable area"); 576 for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) { 577 const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE; 578 const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE; 579 TEE_Result res; 580 581 DMSG("hash pg_idx %zu hash %p page %p", n, hash, page); 582 res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE); 583 if (res != TEE_SUCCESS) { 584 EMSG("Hash failed for page %zu at %p: res 0x%x", 585 n, (void *)page, res); 586 panic(); 587 } 588 } 589 590 /* 591 * Assert prepaged init sections are page aligned so that nothing 592 * trails uninited at the end of the premapped init area. 593 */ 594 assert(!(init_size & SMALL_PAGE_MASK)); 595 596 /* 597 * Initialize the virtual memory pool used for main_mmu_l2_ttb which 598 * is supplied to tee_pager_init() below. 599 */ 600 init_virt_pool(&core_virt_mem_pool); 601 602 /* 603 * Assign alias area for pager end of the small page block the rest 604 * of the binary is loaded into. We're taking more than needed, but 605 * we're guaranteed to not need more than the physical amount of 606 * TZSRAM. 607 */ 608 mm = tee_mm_alloc2(&core_virt_mem_pool, 609 (vaddr_t)core_virt_mem_pool.lo + 610 core_virt_mem_pool.size - TZSRAM_SIZE, 611 TZSRAM_SIZE); 612 assert(mm); 613 tee_pager_set_alias_area(mm); 614 615 /* 616 * Claim virtual memory which isn't paged. 617 * Linear memory (flat map core memory) ends there. 618 */ 619 mm = tee_mm_alloc2(&core_virt_mem_pool, VCORE_UNPG_RX_PA, 620 (vaddr_t)(__pageable_start - VCORE_UNPG_RX_PA)); 621 assert(mm); 622 623 /* 624 * Allocate virtual memory for the pageable area and let the pager 625 * take charge of all the pages already assigned to that memory. 626 */ 627 mm = tee_mm_alloc2(&core_virt_mem_pool, (vaddr_t)__pageable_start, 628 pageable_size); 629 assert(mm); 630 fobj = ro_paged_alloc(mm, hashes, paged_store); 631 assert(fobj); 632 tee_pager_add_core_region(tee_mm_get_smem(mm), PAGED_REGION_TYPE_RO, 633 fobj); 634 fobj_put(fobj); 635 636 tee_pager_add_pages(pageable_start, init_size / SMALL_PAGE_SIZE, false); 637 tee_pager_add_pages(pageable_start + init_size, 638 (pageable_size - init_size) / SMALL_PAGE_SIZE, 639 true); 640 if (pageable_end < tzsram_end) 641 tee_pager_add_pages(pageable_end, (tzsram_end - pageable_end) / 642 SMALL_PAGE_SIZE, true); 643 644 /* 645 * There may be physical pages in TZSRAM before the core load address. 646 * These pages can be added to the physical pages pool of the pager. 647 * This setup may happen when a the secure bootloader runs in TZRAM 648 * and its memory can be reused by OP-TEE once boot stages complete. 649 */ 650 tee_pager_add_pages(core_virt_mem_pool.lo, 651 (VCORE_UNPG_RX_PA - core_virt_mem_pool.lo) / 652 SMALL_PAGE_SIZE, 653 true); 654 655 print_pager_pool_size(); 656 } 657 #else 658 659 static void init_runtime(unsigned long pageable_part __unused) 660 { 661 init_asan(); 662 663 /* 664 * By default whole OP-TEE uses malloc, so we need to initialize 665 * it early. But, when virtualization is enabled, malloc is used 666 * only by TEE runtime, so malloc should be initialized later, for 667 * every virtual partition separately. Core code uses nex_malloc 668 * instead. 669 */ 670 #ifdef CFG_NS_VIRTUALIZATION 671 nex_malloc_add_pool(__nex_heap_start, __nex_heap_end - 672 __nex_heap_start); 673 #else 674 malloc_add_pool(__heap1_start, __heap1_end - __heap1_start); 675 #endif 676 677 IMSG_RAW("\n"); 678 } 679 #endif 680 681 #if defined(CFG_DT) 682 static int add_optee_dt_node(struct dt_descriptor *dt) 683 { 684 int offs; 685 int ret; 686 687 if (fdt_path_offset(dt->blob, "/firmware/optee") >= 0) { 688 DMSG("OP-TEE Device Tree node already exists!"); 689 return 0; 690 } 691 692 offs = fdt_path_offset(dt->blob, "/firmware"); 693 if (offs < 0) { 694 offs = add_dt_path_subnode(dt, "/", "firmware"); 695 if (offs < 0) 696 return -1; 697 } 698 699 offs = fdt_add_subnode(dt->blob, offs, "optee"); 700 if (offs < 0) 701 return -1; 702 703 ret = fdt_setprop_string(dt->blob, offs, "compatible", 704 "linaro,optee-tz"); 705 if (ret < 0) 706 return -1; 707 ret = fdt_setprop_string(dt->blob, offs, "method", "smc"); 708 if (ret < 0) 709 return -1; 710 711 if (CFG_CORE_ASYNC_NOTIF_GIC_INTID) { 712 /* 713 * The format of the interrupt property is defined by the 714 * binding of the interrupt domain root. In this case it's 715 * one Arm GIC v1, v2 or v3 so we must be compatible with 716 * these. 717 * 718 * An SPI type of interrupt is indicated with a 0 in the 719 * first cell. A PPI type is indicated with value 1. 720 * 721 * The interrupt number goes in the second cell where 722 * SPIs ranges from 0 to 987 and PPI ranges from 0 to 15. 723 * 724 * Flags are passed in the third cells. 725 */ 726 uint32_t itr_trigger = 0; 727 uint32_t itr_type = 0; 728 uint32_t itr_id = 0; 729 uint32_t val[3] = { }; 730 731 /* PPI are visible only in current CPU cluster */ 732 static_assert(IS_ENABLED(CFG_CORE_FFA) || 733 !CFG_CORE_ASYNC_NOTIF_GIC_INTID || 734 (CFG_CORE_ASYNC_NOTIF_GIC_INTID >= 735 GIC_SPI_BASE) || 736 ((CFG_TEE_CORE_NB_CORE <= 8) && 737 (CFG_CORE_ASYNC_NOTIF_GIC_INTID >= 738 GIC_PPI_BASE))); 739 740 if (CFG_CORE_ASYNC_NOTIF_GIC_INTID >= GIC_SPI_BASE) { 741 itr_type = GIC_SPI; 742 itr_id = CFG_CORE_ASYNC_NOTIF_GIC_INTID - GIC_SPI_BASE; 743 itr_trigger = IRQ_TYPE_EDGE_RISING; 744 } else { 745 itr_type = GIC_PPI; 746 itr_id = CFG_CORE_ASYNC_NOTIF_GIC_INTID - GIC_PPI_BASE; 747 itr_trigger = IRQ_TYPE_EDGE_RISING | 748 GIC_CPU_MASK_SIMPLE(CFG_TEE_CORE_NB_CORE); 749 } 750 751 val[0] = TEE_U32_TO_BIG_ENDIAN(itr_type); 752 val[1] = TEE_U32_TO_BIG_ENDIAN(itr_id); 753 val[2] = TEE_U32_TO_BIG_ENDIAN(itr_trigger); 754 755 ret = fdt_setprop(dt->blob, offs, "interrupts", val, 756 sizeof(val)); 757 if (ret < 0) 758 return -1; 759 } 760 return 0; 761 } 762 763 #ifdef CFG_PSCI_ARM32 764 static int append_psci_compatible(void *fdt, int offs, const char *str) 765 { 766 return fdt_appendprop(fdt, offs, "compatible", str, strlen(str) + 1); 767 } 768 769 static int dt_add_psci_node(struct dt_descriptor *dt) 770 { 771 int offs; 772 773 if (fdt_path_offset(dt->blob, "/psci") >= 0) { 774 DMSG("PSCI Device Tree node already exists!"); 775 return 0; 776 } 777 778 offs = add_dt_path_subnode(dt, "/", "psci"); 779 if (offs < 0) 780 return -1; 781 if (append_psci_compatible(dt->blob, offs, "arm,psci-1.0")) 782 return -1; 783 if (append_psci_compatible(dt->blob, offs, "arm,psci-0.2")) 784 return -1; 785 if (append_psci_compatible(dt->blob, offs, "arm,psci")) 786 return -1; 787 if (fdt_setprop_string(dt->blob, offs, "method", "smc")) 788 return -1; 789 if (fdt_setprop_u32(dt->blob, offs, "cpu_suspend", PSCI_CPU_SUSPEND)) 790 return -1; 791 if (fdt_setprop_u32(dt->blob, offs, "cpu_off", PSCI_CPU_OFF)) 792 return -1; 793 if (fdt_setprop_u32(dt->blob, offs, "cpu_on", PSCI_CPU_ON)) 794 return -1; 795 if (fdt_setprop_u32(dt->blob, offs, "sys_poweroff", PSCI_SYSTEM_OFF)) 796 return -1; 797 if (fdt_setprop_u32(dt->blob, offs, "sys_reset", PSCI_SYSTEM_RESET)) 798 return -1; 799 return 0; 800 } 801 802 static int check_node_compat_prefix(struct dt_descriptor *dt, int offs, 803 const char *prefix) 804 { 805 const size_t prefix_len = strlen(prefix); 806 size_t l; 807 int plen; 808 const char *prop; 809 810 prop = fdt_getprop(dt->blob, offs, "compatible", &plen); 811 if (!prop) 812 return -1; 813 814 while (plen > 0) { 815 if (memcmp(prop, prefix, prefix_len) == 0) 816 return 0; /* match */ 817 818 l = strlen(prop) + 1; 819 prop += l; 820 plen -= l; 821 } 822 823 return -1; 824 } 825 826 static int dt_add_psci_cpu_enable_methods(struct dt_descriptor *dt) 827 { 828 int offs = 0; 829 830 while (1) { 831 offs = fdt_next_node(dt->blob, offs, NULL); 832 if (offs < 0) 833 break; 834 if (fdt_getprop(dt->blob, offs, "enable-method", NULL)) 835 continue; /* already set */ 836 if (check_node_compat_prefix(dt, offs, "arm,cortex-a")) 837 continue; /* no compatible */ 838 if (fdt_setprop_string(dt->blob, offs, "enable-method", "psci")) 839 return -1; 840 /* Need to restart scanning as offsets may have changed */ 841 offs = 0; 842 } 843 return 0; 844 } 845 846 static int config_psci(struct dt_descriptor *dt) 847 { 848 if (dt_add_psci_node(dt)) 849 return -1; 850 return dt_add_psci_cpu_enable_methods(dt); 851 } 852 #else 853 static int config_psci(struct dt_descriptor *dt __unused) 854 { 855 return 0; 856 } 857 #endif /*CFG_PSCI_ARM32*/ 858 859 static int mark_tzdram_as_reserved(struct dt_descriptor *dt) 860 { 861 return add_res_mem_dt_node(dt, "optee_core", CFG_TZDRAM_START, 862 CFG_TZDRAM_SIZE); 863 } 864 865 static void update_external_dt(void) 866 { 867 struct dt_descriptor *dt = get_external_dt_desc(); 868 869 if (!dt || !dt->blob) 870 return; 871 872 if (!IS_ENABLED(CFG_CORE_FFA) && add_optee_dt_node(dt)) 873 panic("Failed to add OP-TEE Device Tree node"); 874 875 if (config_psci(dt)) 876 panic("Failed to config PSCI"); 877 878 #ifdef CFG_CORE_RESERVED_SHM 879 if (mark_static_shm_as_reserved(dt)) 880 panic("Failed to config non-secure memory"); 881 #endif 882 883 if (mark_tzdram_as_reserved(dt)) 884 panic("Failed to config secure memory"); 885 } 886 #else /*CFG_DT*/ 887 static void update_external_dt(void) 888 { 889 } 890 #endif /*!CFG_DT*/ 891 892 void init_tee_runtime(void) 893 { 894 #ifndef CFG_WITH_PAGER 895 /* Pager initializes TA RAM early */ 896 core_mmu_init_phys_mem(); 897 #endif 898 /* 899 * With virtualization we call this function when creating the 900 * OP-TEE partition instead. 901 */ 902 if (!IS_ENABLED(CFG_NS_VIRTUALIZATION)) 903 call_preinitcalls(); 904 call_early_initcalls(); 905 call_service_initcalls(); 906 907 /* 908 * These two functions uses crypto_rng_read() to initialize the 909 * pauth keys. Once call_initcalls() returns we're guaranteed that 910 * crypto_rng_read() is ready to be used. 911 */ 912 thread_init_core_local_pauth_keys(); 913 thread_init_thread_pauth_keys(); 914 915 /* 916 * Reinitialize canaries around the stacks with crypto_rng_read(). 917 * 918 * TODO: Updating canaries when CFG_NS_VIRTUALIZATION is enabled will 919 * require synchronization between thread_check_canaries() and 920 * thread_update_canaries(). 921 */ 922 if (!IS_ENABLED(CFG_NS_VIRTUALIZATION)) 923 thread_update_canaries(); 924 } 925 926 static void init_primary(unsigned long pageable_part, unsigned long nsec_entry) 927 { 928 thread_init_core_local_stacks(); 929 /* 930 * Mask asynchronous exceptions before switch to the thread vector 931 * as the thread handler requires those to be masked while 932 * executing with the temporary stack. The thread subsystem also 933 * asserts that the foreign interrupts are blocked when using most of 934 * its functions. 935 */ 936 thread_set_exceptions(THREAD_EXCP_ALL); 937 primary_save_cntfrq(); 938 init_vfp_sec(); 939 940 if (IS_ENABLED(CFG_CRYPTO_WITH_CE)) 941 check_crypto_extensions(); 942 943 /* 944 * Pager: init_runtime() calls thread_kernel_enable_vfp() so we must 945 * set a current thread right now to avoid a chicken-and-egg problem 946 * (thread_init_boot_thread() sets the current thread but needs 947 * things set by init_runtime()). 948 */ 949 thread_get_core_local()->curr_thread = 0; 950 init_runtime(pageable_part); 951 952 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 953 /* 954 * Virtualization: We can't initialize threads right now because 955 * threads belong to "tee" part and will be initialized 956 * separately per each new virtual guest. So, we'll clear 957 * "curr_thread" and call it done. 958 */ 959 thread_get_core_local()->curr_thread = -1; 960 } else { 961 thread_init_boot_thread(); 962 } 963 thread_init_primary(); 964 thread_init_per_cpu(); 965 init_sec_mon(nsec_entry); 966 } 967 968 static bool cpu_nmfi_enabled(void) 969 { 970 #if defined(ARM32) 971 return read_sctlr() & SCTLR_NMFI; 972 #else 973 /* Note: ARM64 does not feature non-maskable FIQ support. */ 974 return false; 975 #endif 976 } 977 978 /* 979 * Note: this function is weak just to make it possible to exclude it from 980 * the unpaged area. 981 */ 982 void __weak boot_init_primary_late(unsigned long fdt __unused, 983 unsigned long manifest __unused) 984 { 985 size_t fdt_size = CFG_DTB_MAX_SIZE; 986 987 if (IS_ENABLED(CFG_TRANSFER_LIST) && mapped_tl) { 988 struct transfer_list_entry *tl_e = NULL; 989 990 tl_e = transfer_list_find(mapped_tl, TL_TAG_FDT); 991 if (tl_e) 992 fdt_size = tl_e->data_size; 993 } 994 995 init_external_dt(boot_arg_fdt, fdt_size); 996 reinit_manifest_dt(); 997 #ifdef CFG_CORE_SEL1_SPMC 998 tpm_map_log_area(get_manifest_dt()); 999 #else 1000 tpm_map_log_area(get_external_dt()); 1001 #endif 1002 discover_nsec_memory(); 1003 update_external_dt(); 1004 configure_console_from_dt(); 1005 1006 IMSG("OP-TEE version: %s", core_v_str); 1007 if (IS_ENABLED(CFG_INSECURE)) { 1008 IMSG("WARNING: This OP-TEE configuration might be insecure!"); 1009 IMSG("WARNING: Please check https://optee.readthedocs.io/en/latest/architecture/porting_guidelines.html"); 1010 } 1011 IMSG("Primary CPU initializing"); 1012 #ifdef CFG_CORE_ASLR 1013 DMSG("Executing at offset %#lx with virtual load address %#"PRIxVA, 1014 (unsigned long)boot_mmu_config.map_offset, VCORE_START_VA); 1015 #endif 1016 if (IS_ENABLED(CFG_MEMTAG)) 1017 DMSG("Memory tagging %s", 1018 memtag_is_enabled() ? "enabled" : "disabled"); 1019 1020 /* Check if platform needs NMFI workaround */ 1021 if (cpu_nmfi_enabled()) { 1022 if (!IS_ENABLED(CFG_CORE_WORKAROUND_ARM_NMFI)) 1023 IMSG("WARNING: This ARM core has NMFI enabled, please apply workaround!"); 1024 } else { 1025 if (IS_ENABLED(CFG_CORE_WORKAROUND_ARM_NMFI)) 1026 IMSG("WARNING: This ARM core does not have NMFI enabled, no need for workaround"); 1027 } 1028 1029 boot_primary_init_intc(); 1030 init_vfp_nsec(); 1031 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 1032 IMSG("Initializing virtualization support"); 1033 core_mmu_init_virtualization(); 1034 } else { 1035 init_tee_runtime(); 1036 } 1037 } 1038 1039 /* 1040 * Note: this function is weak just to make it possible to exclude it from 1041 * the unpaged area. 1042 */ 1043 void __weak boot_init_primary_final(void) 1044 { 1045 if (!IS_ENABLED(CFG_NS_VIRTUALIZATION)) 1046 call_driver_initcalls(); 1047 call_finalcalls(); 1048 IMSG("Primary CPU switching to normal world boot"); 1049 } 1050 1051 static void init_secondary_helper(unsigned long nsec_entry) 1052 { 1053 IMSG("Secondary CPU %zu initializing", get_core_pos()); 1054 1055 /* 1056 * Mask asynchronous exceptions before switch to the thread vector 1057 * as the thread handler requires those to be masked while 1058 * executing with the temporary stack. The thread subsystem also 1059 * asserts that the foreign interrupts are blocked when using most of 1060 * its functions. 1061 */ 1062 thread_set_exceptions(THREAD_EXCP_ALL); 1063 1064 secondary_init_cntfrq(); 1065 thread_init_per_cpu(); 1066 init_sec_mon(nsec_entry); 1067 boot_secondary_init_intc(); 1068 init_vfp_sec(); 1069 init_vfp_nsec(); 1070 1071 IMSG("Secondary CPU %zu switching to normal world boot", get_core_pos()); 1072 } 1073 1074 /* 1075 * Note: this function is weak just to make it possible to exclude it from 1076 * the unpaged area so that it lies in the init area. 1077 */ 1078 void __weak boot_init_primary_early(void) 1079 { 1080 unsigned long pageable_part = 0; 1081 unsigned long e = PADDR_INVALID; 1082 struct transfer_list_entry *tl_e = NULL; 1083 1084 if (!IS_ENABLED(CFG_WITH_ARM_TRUSTED_FW)) 1085 e = boot_arg_nsec_entry; 1086 1087 if (IS_ENABLED(CFG_TRANSFER_LIST) && boot_arg_transfer_list) { 1088 /* map and save the TL */ 1089 mapped_tl = transfer_list_map(boot_arg_transfer_list); 1090 if (!mapped_tl) 1091 panic("Failed to map transfer list"); 1092 1093 transfer_list_dump(mapped_tl); 1094 tl_e = transfer_list_find(mapped_tl, TL_TAG_FDT); 1095 if (tl_e) { 1096 /* 1097 * Expand the data size of the DTB entry to the maximum 1098 * allocable mapped memory to reserve sufficient space 1099 * for inserting new nodes, avoid potentially corrupting 1100 * next entries. 1101 */ 1102 uint32_t dtb_max_sz = mapped_tl->max_size - 1103 mapped_tl->size + tl_e->data_size; 1104 1105 if (!transfer_list_set_data_size(mapped_tl, tl_e, 1106 dtb_max_sz)) { 1107 EMSG("Failed to extend DTB size to %#"PRIx32, 1108 dtb_max_sz); 1109 panic(); 1110 } 1111 } 1112 tl_e = transfer_list_find(mapped_tl, TL_TAG_OPTEE_PAGABLE_PART); 1113 } 1114 1115 if (IS_ENABLED(CFG_WITH_PAGER)) { 1116 if (IS_ENABLED(CFG_TRANSFER_LIST) && tl_e) 1117 pageable_part = 1118 get_le64(transfer_list_entry_data(tl_e)); 1119 else 1120 pageable_part = boot_arg_pageable_part; 1121 } 1122 1123 init_primary(pageable_part, e); 1124 } 1125 1126 static void boot_save_transfer_list(unsigned long zero_reg, 1127 unsigned long transfer_list, 1128 unsigned long fdt) 1129 { 1130 struct transfer_list_header *tl = (void *)transfer_list; 1131 struct transfer_list_entry *tl_e = NULL; 1132 1133 if (zero_reg != 0) 1134 panic("Incorrect transfer list register convention"); 1135 1136 if (!IS_ALIGNED_WITH_TYPE(transfer_list, struct transfer_list_header) || 1137 !IS_ALIGNED(transfer_list, TL_ALIGNMENT_FROM_ORDER(tl->alignment))) 1138 panic("Transfer list base address is not aligned"); 1139 1140 if (transfer_list_check_header(tl) == TL_OPS_NONE) 1141 panic("Invalid transfer list"); 1142 1143 tl_e = transfer_list_find(tl, TL_TAG_FDT); 1144 if (fdt != (unsigned long)transfer_list_entry_data(tl_e)) 1145 panic("DT does not match to the DT entry of the TL"); 1146 1147 boot_arg_transfer_list = transfer_list; 1148 } 1149 1150 #if defined(CFG_WITH_ARM_TRUSTED_FW) 1151 unsigned long boot_cpu_on_handler(unsigned long a0 __maybe_unused, 1152 unsigned long a1 __unused) 1153 { 1154 init_secondary_helper(PADDR_INVALID); 1155 return 0; 1156 } 1157 #else 1158 void boot_init_secondary(unsigned long nsec_entry) 1159 { 1160 init_secondary_helper(nsec_entry); 1161 } 1162 #endif 1163 1164 #if defined(CFG_BOOT_SECONDARY_REQUEST) 1165 void boot_set_core_ns_entry(size_t core_idx, uintptr_t entry, 1166 uintptr_t context_id) 1167 { 1168 ns_entry_contexts[core_idx].entry_point = entry; 1169 ns_entry_contexts[core_idx].context_id = context_id; 1170 dsb_ishst(); 1171 } 1172 1173 int boot_core_release(size_t core_idx, paddr_t entry) 1174 { 1175 if (!core_idx || core_idx >= CFG_TEE_CORE_NB_CORE) 1176 return -1; 1177 1178 ns_entry_contexts[core_idx].entry_point = entry; 1179 dmb(); 1180 spin_table[core_idx] = 1; 1181 dsb(); 1182 sev(); 1183 1184 return 0; 1185 } 1186 1187 /* 1188 * spin until secondary boot request, then returns with 1189 * the secondary core entry address. 1190 */ 1191 struct ns_entry_context *boot_core_hpen(void) 1192 { 1193 #ifdef CFG_PSCI_ARM32 1194 return &ns_entry_contexts[get_core_pos()]; 1195 #else 1196 do { 1197 wfe(); 1198 } while (!spin_table[get_core_pos()]); 1199 dmb(); 1200 return &ns_entry_contexts[get_core_pos()]; 1201 #endif 1202 } 1203 #endif 1204 1205 #if defined(CFG_CORE_ASLR) 1206 #if defined(CFG_DT) 1207 unsigned long __weak get_aslr_seed(void) 1208 { 1209 void *fdt = NULL; 1210 int rc = 0; 1211 const uint64_t *seed = NULL; 1212 int offs = 0; 1213 int len = 0; 1214 1215 if (!IS_ENABLED(CFG_CORE_SEL2_SPMC)) 1216 fdt = (void *)boot_arg_fdt; 1217 1218 if (!fdt) { 1219 DMSG("No fdt"); 1220 goto err; 1221 } 1222 1223 rc = fdt_check_header(fdt); 1224 if (rc) { 1225 DMSG("Bad fdt: %d", rc); 1226 goto err; 1227 } 1228 1229 offs = fdt_path_offset(fdt, "/secure-chosen"); 1230 if (offs < 0) { 1231 DMSG("Cannot find /secure-chosen"); 1232 goto err; 1233 } 1234 seed = fdt_getprop(fdt, offs, "kaslr-seed", &len); 1235 if (!seed || len != sizeof(*seed)) { 1236 DMSG("Cannot find valid kaslr-seed"); 1237 goto err; 1238 } 1239 1240 return fdt64_to_cpu(fdt64_ld(seed)); 1241 1242 err: 1243 /* Try platform implementation */ 1244 return plat_get_aslr_seed(); 1245 } 1246 #else /*!CFG_DT*/ 1247 unsigned long __weak get_aslr_seed(void) 1248 { 1249 /* Try platform implementation */ 1250 return plat_get_aslr_seed(); 1251 } 1252 #endif /*!CFG_DT*/ 1253 #endif /*CFG_CORE_ASLR*/ 1254 1255 static void *get_fdt_from_boot_info(struct ffa_boot_info_header_1_1 *hdr) 1256 { 1257 struct ffa_boot_info_1_1 *desc = NULL; 1258 uint8_t content_fmt = 0; 1259 uint8_t name_fmt = 0; 1260 void *fdt = NULL; 1261 int ret = 0; 1262 1263 if (hdr->signature != FFA_BOOT_INFO_SIGNATURE) { 1264 EMSG("Bad boot info signature %#"PRIx32, hdr->signature); 1265 panic(); 1266 } 1267 if (hdr->version != FFA_BOOT_INFO_VERSION) { 1268 EMSG("Bad boot info version %#"PRIx32, hdr->version); 1269 panic(); 1270 } 1271 if (hdr->desc_count != 1) { 1272 EMSG("Bad boot info descriptor count %#"PRIx32, 1273 hdr->desc_count); 1274 panic(); 1275 } 1276 desc = (void *)((vaddr_t)hdr + hdr->desc_offset); 1277 name_fmt = desc->flags & FFA_BOOT_INFO_FLAG_NAME_FORMAT_MASK; 1278 if (name_fmt == FFA_BOOT_INFO_FLAG_NAME_FORMAT_STRING) 1279 DMSG("Boot info descriptor name \"%16s\"", desc->name); 1280 else if (name_fmt == FFA_BOOT_INFO_FLAG_NAME_FORMAT_UUID) 1281 DMSG("Boot info descriptor UUID %pUl", (void *)desc->name); 1282 else 1283 DMSG("Boot info descriptor: unknown name format %"PRIu8, 1284 name_fmt); 1285 1286 content_fmt = (desc->flags & FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_MASK) >> 1287 FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_SHIFT; 1288 if (content_fmt != FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_ADDR) { 1289 EMSG("Bad boot info content format %"PRIu8", expected %u (address)", 1290 content_fmt, FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_ADDR); 1291 panic(); 1292 } 1293 1294 fdt = (void *)(vaddr_t)desc->contents; 1295 ret = fdt_check_full(fdt, desc->size); 1296 if (ret < 0) { 1297 EMSG("Invalid Device Tree at %p: error %d", fdt, ret); 1298 panic(); 1299 } 1300 return fdt; 1301 } 1302 1303 static void get_sec_mem_from_manifest(void *fdt, paddr_t *base, size_t *size) 1304 { 1305 int ret = 0; 1306 uint64_t num = 0; 1307 1308 ret = fdt_node_check_compatible(fdt, 0, "arm,ffa-manifest-1.0"); 1309 if (ret < 0) { 1310 EMSG("Invalid FF-A manifest at %p: error %d", fdt, ret); 1311 panic(); 1312 } 1313 ret = dt_getprop_as_number(fdt, 0, "load-address", &num); 1314 if (ret < 0) { 1315 EMSG("Can't read \"load-address\" from FF-A manifest at %p: error %d", 1316 fdt, ret); 1317 panic(); 1318 } 1319 *base = num; 1320 /* "mem-size" is currently an undocumented extension to the spec. */ 1321 ret = dt_getprop_as_number(fdt, 0, "mem-size", &num); 1322 if (ret < 0) { 1323 EMSG("Can't read \"mem-size\" from FF-A manifest at %p: error %d", 1324 fdt, ret); 1325 panic(); 1326 } 1327 *size = num; 1328 } 1329 1330 void __weak boot_save_args(unsigned long a0, unsigned long a1, 1331 unsigned long a2, unsigned long a3, 1332 unsigned long a4 __maybe_unused) 1333 { 1334 /* 1335 * Register use: 1336 * 1337 * Scenario A: Default arguments 1338 * a0 - CFG_CORE_FFA=y && CFG_CORE_SEL2_SPMC=n: 1339 * if non-NULL holds the TOS FW config [1] address 1340 * - CFG_CORE_FFA=y && 1341 (CFG_CORE_SEL2_SPMC=y || CFG_CORE_EL3_SPMC=y): 1342 * address of FF-A Boot Information Blob 1343 * - CFG_CORE_FFA=n: 1344 * if non-NULL holds the pagable part address 1345 * a1 - CFG_WITH_ARM_TRUSTED_FW=n (Armv7): 1346 * Armv7 standard bootarg #1 (kept track of in entry_a32.S) 1347 * a2 - CFG_CORE_SEL2_SPMC=n: 1348 * if non-NULL holds the system DTB address 1349 * - CFG_WITH_ARM_TRUSTED_FW=n (Armv7): 1350 * Armv7 standard bootarg #2 (system DTB address, kept track 1351 * of in entry_a32.S) 1352 * a3 - Not used 1353 * a4 - CFG_WITH_ARM_TRUSTED_FW=n: 1354 * Non-secure entry address 1355 * 1356 * [1] A TF-A concept: TOS_FW_CONFIG - Trusted OS Firmware 1357 * configuration file. Used by Trusted OS (BL32), that is, OP-TEE 1358 * here. This is also called Manifest DT, related to the Manifest DT 1359 * passed in the FF-A Boot Information Blob, but with a different 1360 * compatible string. 1361 1362 * Scenario B: FW Handoff via Transfer List 1363 * Note: FF-A and non-secure entry are not yet supported with 1364 * Transfer List 1365 * a0 - DTB address or 0 (AArch64) 1366 * - must be 0 (AArch32) 1367 * a1 - 1 << 32 | TRANSFER_LIST_SIGNATURE[0:31] (AArch64) 1368 * - 1 << 24 | TRANSFER_LIST_SIGNATURE[0:23] (AArch32) 1369 * a2 - must be 0 (AArch64) 1370 * - DTB address or 0 (AArch32) 1371 * a3 - Transfer list base address 1372 * a4 - Not used 1373 */ 1374 1375 if (IS_ENABLED(CFG_TRANSFER_LIST)) { 1376 if (IS_ENABLED(CFG_ARM64_core) && 1377 a1 == TL_HANDOFF_X1_VALUE(TL_REG_CONVENTION_VER)) { 1378 boot_save_transfer_list(a2, a3, a0); 1379 boot_arg_fdt = a0; 1380 } else if (IS_ENABLED(CFG_ARM32_core) && 1381 a1 == TL_HANDOFF_R1_VALUE(TL_REG_CONVENTION_VER)) { 1382 boot_save_transfer_list(a0, a3, a2); 1383 boot_arg_fdt = a2; 1384 } 1385 1386 return; 1387 } 1388 1389 if (!IS_ENABLED(CFG_CORE_SEL2_SPMC)) { 1390 #if defined(CFG_DT_ADDR) 1391 boot_arg_fdt = CFG_DT_ADDR; 1392 #else 1393 boot_arg_fdt = a2; 1394 #endif 1395 } 1396 1397 if (IS_ENABLED(CFG_CORE_FFA)) { 1398 if (IS_ENABLED(CFG_CORE_SEL2_SPMC) || 1399 IS_ENABLED(CFG_CORE_EL3_SPMC)) 1400 init_manifest_dt(get_fdt_from_boot_info((void *)a0)); 1401 else 1402 init_manifest_dt((void *)a0); 1403 if (IS_ENABLED(CFG_CORE_SEL2_SPMC) && 1404 IS_ENABLED(CFG_CORE_PHYS_RELOCATABLE)) { 1405 paddr_t base = 0; 1406 size_t size = 0; 1407 1408 get_sec_mem_from_manifest(get_manifest_dt(), 1409 &base, &size); 1410 core_mmu_set_secure_memory(base, size); 1411 } 1412 } else { 1413 if (IS_ENABLED(CFG_WITH_PAGER)) { 1414 #if defined(CFG_PAGEABLE_ADDR) 1415 boot_arg_pageable_part = CFG_PAGEABLE_ADDR; 1416 #else 1417 boot_arg_pageable_part = a0; 1418 #endif 1419 } 1420 if (!IS_ENABLED(CFG_WITH_ARM_TRUSTED_FW)) { 1421 #if defined(CFG_NS_ENTRY_ADDR) 1422 boot_arg_nsec_entry = CFG_NS_ENTRY_ADDR; 1423 #else 1424 boot_arg_nsec_entry = a4; 1425 #endif 1426 } 1427 } 1428 } 1429 1430 #if defined(CFG_TRANSFER_LIST) 1431 static TEE_Result release_transfer_list(void) 1432 { 1433 struct dt_descriptor *dt = get_external_dt_desc(); 1434 1435 if (!mapped_tl) 1436 return TEE_SUCCESS; 1437 1438 if (dt) { 1439 int ret = 0; 1440 struct transfer_list_entry *tl_e = NULL; 1441 1442 /* 1443 * Pack the DTB and update the transfer list before un-mapping 1444 */ 1445 ret = fdt_pack(dt->blob); 1446 if (ret < 0) { 1447 EMSG("Failed to pack Device Tree at 0x%" PRIxPA 1448 ": error %d", virt_to_phys(dt->blob), ret); 1449 panic(); 1450 } 1451 1452 tl_e = transfer_list_find(mapped_tl, TL_TAG_FDT); 1453 assert(dt->blob == transfer_list_entry_data(tl_e)); 1454 transfer_list_set_data_size(mapped_tl, tl_e, 1455 fdt_totalsize(dt->blob)); 1456 dt->blob = NULL; 1457 } 1458 1459 transfer_list_unmap_sync(mapped_tl); 1460 mapped_tl = NULL; 1461 1462 return TEE_SUCCESS; 1463 } 1464 1465 boot_final(release_transfer_list); 1466 #endif 1467