1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2015-2023, Linaro Limited 4 * Copyright (c) 2023, Arm Limited 5 */ 6 7 #include <arm.h> 8 #include <assert.h> 9 #include <compiler.h> 10 #include <config.h> 11 #include <console.h> 12 #include <crypto/crypto.h> 13 #include <drivers/gic.h> 14 #include <dt-bindings/interrupt-controller/arm-gic.h> 15 #include <ffa.h> 16 #include <initcall.h> 17 #include <inttypes.h> 18 #include <io.h> 19 #include <keep.h> 20 #include <kernel/asan.h> 21 #include <kernel/boot.h> 22 #include <kernel/dt.h> 23 #include <kernel/linker.h> 24 #include <kernel/misc.h> 25 #include <kernel/panic.h> 26 #include <kernel/tee_misc.h> 27 #include <kernel/thread.h> 28 #include <kernel/tpm.h> 29 #include <kernel/transfer_list.h> 30 #include <libfdt.h> 31 #include <malloc.h> 32 #include <memtag.h> 33 #include <mm/core_memprot.h> 34 #include <mm/core_mmu.h> 35 #include <mm/fobj.h> 36 #include <mm/tee_mm.h> 37 #include <mm/tee_pager.h> 38 #include <sm/psci.h> 39 #include <trace.h> 40 #include <utee_defines.h> 41 #include <util.h> 42 43 #include <platform_config.h> 44 45 #if !defined(CFG_WITH_ARM_TRUSTED_FW) 46 #include <sm/sm.h> 47 #endif 48 49 #if defined(CFG_WITH_VFP) 50 #include <kernel/vfp.h> 51 #endif 52 53 /* 54 * In this file we're using unsigned long to represent physical pointers as 55 * they are received in a single register when OP-TEE is initially entered. 56 * This limits 32-bit systems to only use make use of the lower 32 bits 57 * of a physical address for initial parameters. 58 * 59 * 64-bit systems on the other hand can use full 64-bit physical pointers. 60 */ 61 #define PADDR_INVALID ULONG_MAX 62 63 #if defined(CFG_BOOT_SECONDARY_REQUEST) 64 struct ns_entry_context { 65 uintptr_t entry_point; 66 uintptr_t context_id; 67 }; 68 struct ns_entry_context ns_entry_contexts[CFG_TEE_CORE_NB_CORE]; 69 static uint32_t spin_table[CFG_TEE_CORE_NB_CORE]; 70 #endif 71 72 #ifdef CFG_BOOT_SYNC_CPU 73 /* 74 * Array used when booting, to synchronize cpu. 75 * When 0, the cpu has not started. 76 * When 1, it has started 77 */ 78 uint32_t sem_cpu_sync[CFG_TEE_CORE_NB_CORE]; 79 DECLARE_KEEP_PAGER(sem_cpu_sync); 80 #endif 81 82 static void *manifest_dt __nex_bss; 83 static unsigned long boot_arg_fdt __nex_bss; 84 static unsigned long boot_arg_nsec_entry __nex_bss; 85 static unsigned long boot_arg_pageable_part __nex_bss; 86 static unsigned long boot_arg_transfer_list __nex_bss; 87 static struct transfer_list_header *mapped_tl __nex_bss; 88 89 #ifdef CFG_SECONDARY_INIT_CNTFRQ 90 static uint32_t cntfrq; 91 #endif 92 93 /* May be overridden in plat-$(PLATFORM)/main.c */ 94 __weak void plat_primary_init_early(void) 95 { 96 } 97 DECLARE_KEEP_PAGER(plat_primary_init_early); 98 99 /* May be overridden in plat-$(PLATFORM)/main.c */ 100 __weak void boot_primary_init_intc(void) 101 { 102 } 103 104 /* May be overridden in plat-$(PLATFORM)/main.c */ 105 __weak void boot_secondary_init_intc(void) 106 { 107 } 108 109 /* May be overridden in plat-$(PLATFORM)/main.c */ 110 __weak unsigned long plat_get_aslr_seed(void) 111 { 112 DMSG("Warning: no ASLR seed"); 113 114 return 0; 115 } 116 117 #if defined(_CFG_CORE_STACK_PROTECTOR) || defined(CFG_WITH_STACK_CANARIES) 118 /* Generate random stack canary value on boot up */ 119 __weak void plat_get_random_stack_canaries(void *buf, size_t ncan, size_t size) 120 { 121 TEE_Result ret = TEE_ERROR_GENERIC; 122 size_t i = 0; 123 124 assert(buf && ncan && size); 125 126 /* 127 * With virtualization the RNG is not initialized in Nexus core. 128 * Need to override with platform specific implementation. 129 */ 130 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 131 IMSG("WARNING: Using fixed value for stack canary"); 132 memset(buf, 0xab, ncan * size); 133 goto out; 134 } 135 136 ret = crypto_rng_read(buf, ncan * size); 137 if (ret != TEE_SUCCESS) 138 panic("Failed to generate random stack canary"); 139 140 out: 141 /* Leave null byte in canary to prevent string base exploit */ 142 for (i = 0; i < ncan; i++) 143 *((uint8_t *)buf + size * i) = 0; 144 } 145 #endif /* _CFG_CORE_STACK_PROTECTOR || CFG_WITH_STACK_CANARIES */ 146 147 /* 148 * This function is called as a guard after each smc call which is not 149 * supposed to return. 150 */ 151 void __panic_at_smc_return(void) 152 { 153 panic(); 154 } 155 156 #if defined(CFG_WITH_ARM_TRUSTED_FW) 157 void init_sec_mon(unsigned long nsec_entry __maybe_unused) 158 { 159 assert(nsec_entry == PADDR_INVALID); 160 /* Do nothing as we don't have a secure monitor */ 161 } 162 #else 163 /* May be overridden in plat-$(PLATFORM)/main.c */ 164 __weak void init_sec_mon(unsigned long nsec_entry) 165 { 166 struct sm_nsec_ctx *nsec_ctx; 167 168 assert(nsec_entry != PADDR_INVALID); 169 170 /* Initialize secure monitor */ 171 nsec_ctx = sm_get_nsec_ctx(); 172 nsec_ctx->mon_lr = nsec_entry; 173 nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I; 174 if (nsec_entry & 1) 175 nsec_ctx->mon_spsr |= CPSR_T; 176 } 177 #endif 178 179 #if defined(CFG_WITH_ARM_TRUSTED_FW) 180 static void init_vfp_nsec(void) 181 { 182 } 183 #else 184 static void init_vfp_nsec(void) 185 { 186 /* Normal world can use CP10 and CP11 (SIMD/VFP) */ 187 write_nsacr(read_nsacr() | NSACR_CP10 | NSACR_CP11); 188 } 189 #endif 190 191 static void check_crypto_extensions(void) 192 { 193 bool ce_supported = true; 194 195 if (!feat_aes_implemented() && 196 IS_ENABLED(CFG_CRYPTO_AES_ARM_CE)) { 197 EMSG("AES instructions are not supported"); 198 ce_supported = false; 199 } 200 201 if (!feat_sha1_implemented() && 202 IS_ENABLED(CFG_CRYPTO_SHA1_ARM_CE)) { 203 EMSG("SHA1 instructions are not supported"); 204 ce_supported = false; 205 } 206 207 if (!feat_sha256_implemented() && 208 IS_ENABLED(CFG_CRYPTO_SHA256_ARM_CE)) { 209 EMSG("SHA256 instructions are not supported"); 210 ce_supported = false; 211 } 212 213 /* Check aarch64 specific instructions */ 214 if (IS_ENABLED(CFG_ARM64_core)) { 215 if (!feat_sha512_implemented() && 216 IS_ENABLED(CFG_CRYPTO_SHA512_ARM_CE)) { 217 EMSG("SHA512 instructions are not supported"); 218 ce_supported = false; 219 } 220 221 if (!feat_sha3_implemented() && 222 IS_ENABLED(CFG_CRYPTO_SHA3_ARM_CE)) { 223 EMSG("SHA3 instructions are not supported"); 224 ce_supported = false; 225 } 226 227 if (!feat_sm3_implemented() && 228 IS_ENABLED(CFG_CRYPTO_SM3_ARM_CE)) { 229 EMSG("SM3 instructions are not supported"); 230 ce_supported = false; 231 } 232 233 if (!feat_sm4_implemented() && 234 IS_ENABLED(CFG_CRYPTO_SM4_ARM_CE)) { 235 EMSG("SM4 instructions are not supported"); 236 ce_supported = false; 237 } 238 } 239 240 if (!ce_supported) 241 panic("HW doesn't support CE instructions"); 242 } 243 244 #if defined(CFG_WITH_VFP) 245 246 #ifdef ARM32 247 static void init_vfp_sec(void) 248 { 249 uint32_t cpacr = read_cpacr(); 250 251 /* 252 * Enable Advanced SIMD functionality. 253 * Enable use of D16-D31 of the Floating-point Extension register 254 * file. 255 */ 256 cpacr &= ~(CPACR_ASEDIS | CPACR_D32DIS); 257 /* 258 * Enable usage of CP10 and CP11 (SIMD/VFP) (both kernel and user 259 * mode. 260 */ 261 cpacr |= CPACR_CP(10, CPACR_CP_ACCESS_FULL); 262 cpacr |= CPACR_CP(11, CPACR_CP_ACCESS_FULL); 263 write_cpacr(cpacr); 264 } 265 #endif /* ARM32 */ 266 267 #ifdef ARM64 268 static void init_vfp_sec(void) 269 { 270 /* Not using VFP until thread_kernel_enable_vfp() */ 271 vfp_disable(); 272 } 273 #endif /* ARM64 */ 274 275 #else /* CFG_WITH_VFP */ 276 277 static void init_vfp_sec(void) 278 { 279 /* Not using VFP */ 280 } 281 #endif 282 283 #ifdef CFG_SECONDARY_INIT_CNTFRQ 284 static void primary_save_cntfrq(void) 285 { 286 assert(cntfrq == 0); 287 288 /* 289 * CNTFRQ should be initialized on the primary CPU by a 290 * previous boot stage 291 */ 292 cntfrq = read_cntfrq(); 293 } 294 295 static void secondary_init_cntfrq(void) 296 { 297 assert(cntfrq != 0); 298 write_cntfrq(cntfrq); 299 } 300 #else /* CFG_SECONDARY_INIT_CNTFRQ */ 301 static void primary_save_cntfrq(void) 302 { 303 } 304 305 static void secondary_init_cntfrq(void) 306 { 307 } 308 #endif 309 310 #ifdef CFG_CORE_SANITIZE_KADDRESS 311 static void init_run_constructors(void) 312 { 313 const vaddr_t *ctor; 314 315 for (ctor = &__ctor_list; ctor < &__ctor_end; ctor++) 316 ((void (*)(void))(*ctor))(); 317 } 318 319 static void init_asan(void) 320 { 321 322 /* 323 * CFG_ASAN_SHADOW_OFFSET is also supplied as 324 * -fasan-shadow-offset=$(CFG_ASAN_SHADOW_OFFSET) to the compiler. 325 * Since all the needed values to calculate the value of 326 * CFG_ASAN_SHADOW_OFFSET isn't available in to make we need to 327 * calculate it in advance and hard code it into the platform 328 * conf.mk. Here where we have all the needed values we double 329 * check that the compiler is supplied the correct value. 330 */ 331 332 #define __ASAN_SHADOW_START \ 333 ROUNDUP(TEE_RAM_START + (TEE_RAM_VA_SIZE * 8) / 9 - 8, 8) 334 assert(__ASAN_SHADOW_START == (vaddr_t)&__asan_shadow_start); 335 #define __CFG_ASAN_SHADOW_OFFSET \ 336 (__ASAN_SHADOW_START - (TEE_RAM_START / 8)) 337 COMPILE_TIME_ASSERT(CFG_ASAN_SHADOW_OFFSET == __CFG_ASAN_SHADOW_OFFSET); 338 #undef __ASAN_SHADOW_START 339 #undef __CFG_ASAN_SHADOW_OFFSET 340 341 /* 342 * Assign area covered by the shadow area, everything from start up 343 * to the beginning of the shadow area. 344 */ 345 asan_set_shadowed((void *)TEE_LOAD_ADDR, &__asan_shadow_start); 346 347 /* 348 * Add access to areas that aren't opened automatically by a 349 * constructor. 350 */ 351 asan_tag_access(&__ctor_list, &__ctor_end); 352 asan_tag_access(__rodata_start, __rodata_end); 353 #ifdef CFG_WITH_PAGER 354 asan_tag_access(__pageable_start, __pageable_end); 355 #endif /*CFG_WITH_PAGER*/ 356 asan_tag_access(__nozi_start, __nozi_end); 357 #ifdef ARM32 358 asan_tag_access(__exidx_start, __exidx_end); 359 asan_tag_access(__extab_start, __extab_end); 360 #endif 361 362 init_run_constructors(); 363 364 /* Everything is tagged correctly, let's start address sanitizing. */ 365 asan_start(); 366 } 367 #else /*CFG_CORE_SANITIZE_KADDRESS*/ 368 static void init_asan(void) 369 { 370 } 371 #endif /*CFG_CORE_SANITIZE_KADDRESS*/ 372 373 #if defined(CFG_MEMTAG) 374 /* Called from entry_a64.S only when MEMTAG is configured */ 375 void boot_init_memtag(void) 376 { 377 paddr_t base = 0; 378 paddr_size_t size = 0; 379 380 memtag_init_ops(feat_mte_implemented()); 381 core_mmu_get_secure_memory(&base, &size); 382 memtag_set_tags((void *)(vaddr_t)base, size, 0); 383 } 384 #endif 385 386 #ifdef CFG_WITH_PAGER 387 388 #ifdef CFG_CORE_SANITIZE_KADDRESS 389 static void carve_out_asan_mem(tee_mm_pool_t *pool) 390 { 391 const size_t s = pool->hi - pool->lo; 392 tee_mm_entry_t *mm; 393 paddr_t apa = ASAN_MAP_PA; 394 size_t asz = ASAN_MAP_SZ; 395 396 if (core_is_buffer_outside(apa, asz, pool->lo, s)) 397 return; 398 399 /* Reserve the shadow area */ 400 if (!core_is_buffer_inside(apa, asz, pool->lo, s)) { 401 if (apa < pool->lo) { 402 /* 403 * ASAN buffer is overlapping with the beginning of 404 * the pool. 405 */ 406 asz -= pool->lo - apa; 407 apa = pool->lo; 408 } else { 409 /* 410 * ASAN buffer is overlapping with the end of the 411 * pool. 412 */ 413 asz = pool->hi - apa; 414 } 415 } 416 mm = tee_mm_alloc2(pool, apa, asz); 417 assert(mm); 418 } 419 #else 420 static void carve_out_asan_mem(tee_mm_pool_t *pool __unused) 421 { 422 } 423 #endif 424 425 static void print_pager_pool_size(void) 426 { 427 struct tee_pager_stats __maybe_unused stats; 428 429 tee_pager_get_stats(&stats); 430 IMSG("Pager pool size: %zukB", 431 stats.npages_all * SMALL_PAGE_SIZE / 1024); 432 } 433 434 static void init_vcore(tee_mm_pool_t *mm_vcore) 435 { 436 const vaddr_t begin = VCORE_START_VA; 437 size_t size = TEE_RAM_VA_SIZE; 438 439 #ifdef CFG_CORE_SANITIZE_KADDRESS 440 /* Carve out asan memory, flat maped after core memory */ 441 if (begin + size > ASAN_SHADOW_PA) 442 size = ASAN_MAP_PA - begin; 443 #endif 444 445 if (!tee_mm_init(mm_vcore, begin, size, SMALL_PAGE_SHIFT, 446 TEE_MM_POOL_NO_FLAGS)) 447 panic("tee_mm_vcore init failed"); 448 } 449 450 /* 451 * With CFG_CORE_ASLR=y the init part is relocated very early during boot. 452 * The init part is also paged just as the rest of the normal paged code, with 453 * the difference that it's preloaded during boot. When the backing store 454 * is configured the entire paged binary is copied in place and then also 455 * the init part. Since the init part has been relocated (references to 456 * addresses updated to compensate for the new load address) this has to be 457 * undone for the hashes of those pages to match with the original binary. 458 * 459 * If CFG_CORE_ASLR=n, nothing needs to be done as the code/ro pages are 460 * unchanged. 461 */ 462 static void undo_init_relocation(uint8_t *paged_store __maybe_unused) 463 { 464 #ifdef CFG_CORE_ASLR 465 unsigned long *ptr = NULL; 466 const uint32_t *reloc = NULL; 467 const uint32_t *reloc_end = NULL; 468 unsigned long offs = boot_mmu_config.map_offset; 469 const struct boot_embdata *embdata = (const void *)__init_end; 470 vaddr_t addr_end = (vaddr_t)__init_end - offs - TEE_LOAD_ADDR; 471 vaddr_t addr_start = (vaddr_t)__init_start - offs - TEE_LOAD_ADDR; 472 473 reloc = (const void *)((vaddr_t)embdata + embdata->reloc_offset); 474 reloc_end = reloc + embdata->reloc_len / sizeof(*reloc); 475 476 for (; reloc < reloc_end; reloc++) { 477 if (*reloc < addr_start) 478 continue; 479 if (*reloc >= addr_end) 480 break; 481 ptr = (void *)(paged_store + *reloc - addr_start); 482 *ptr -= offs; 483 } 484 #endif 485 } 486 487 static struct fobj *ro_paged_alloc(tee_mm_entry_t *mm, void *hashes, 488 void *store) 489 { 490 const unsigned int num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE; 491 #ifdef CFG_CORE_ASLR 492 unsigned int reloc_offs = (vaddr_t)__pageable_start - VCORE_START_VA; 493 const struct boot_embdata *embdata = (const void *)__init_end; 494 const void *reloc = __init_end + embdata->reloc_offset; 495 496 return fobj_ro_reloc_paged_alloc(num_pages, hashes, reloc_offs, 497 reloc, embdata->reloc_len, store); 498 #else 499 return fobj_ro_paged_alloc(num_pages, hashes, store); 500 #endif 501 } 502 503 static void init_runtime(unsigned long pageable_part) 504 { 505 size_t n; 506 size_t init_size = (size_t)(__init_end - __init_start); 507 size_t pageable_start = (size_t)__pageable_start; 508 size_t pageable_end = (size_t)__pageable_end; 509 size_t pageable_size = pageable_end - pageable_start; 510 vaddr_t tzsram_end = TZSRAM_BASE + TZSRAM_SIZE - TEE_LOAD_ADDR + 511 VCORE_START_VA; 512 size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) * 513 TEE_SHA256_HASH_SIZE; 514 const struct boot_embdata *embdata = (const void *)__init_end; 515 const void *tmp_hashes = NULL; 516 tee_mm_entry_t *mm = NULL; 517 struct fobj *fobj = NULL; 518 uint8_t *paged_store = NULL; 519 uint8_t *hashes = NULL; 520 521 assert(pageable_size % SMALL_PAGE_SIZE == 0); 522 assert(embdata->total_len >= embdata->hashes_offset + 523 embdata->hashes_len); 524 assert(hash_size == embdata->hashes_len); 525 526 tmp_hashes = __init_end + embdata->hashes_offset; 527 528 init_asan(); 529 530 /* Add heap2 first as heap1 may be too small as initial bget pool */ 531 malloc_add_pool(__heap2_start, __heap2_end - __heap2_start); 532 malloc_add_pool(__heap1_start, __heap1_end - __heap1_start); 533 534 /* 535 * This needs to be initialized early to support address lookup 536 * in MEM_AREA_TEE_RAM 537 */ 538 tee_pager_early_init(); 539 540 hashes = malloc(hash_size); 541 IMSG_RAW("\n"); 542 IMSG("Pager is enabled. Hashes: %zu bytes", hash_size); 543 assert(hashes); 544 asan_memcpy_unchecked(hashes, tmp_hashes, hash_size); 545 546 /* 547 * Need tee_mm_sec_ddr initialized to be able to allocate secure 548 * DDR below. 549 */ 550 core_mmu_init_ta_ram(); 551 552 carve_out_asan_mem(&tee_mm_sec_ddr); 553 554 mm = tee_mm_alloc(&tee_mm_sec_ddr, pageable_size); 555 assert(mm); 556 paged_store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM, 557 pageable_size); 558 /* 559 * Load pageable part in the dedicated allocated area: 560 * - Move pageable non-init part into pageable area. Note bootloader 561 * may have loaded it anywhere in TA RAM hence use memmove(). 562 * - Copy pageable init part from current location into pageable area. 563 */ 564 memmove(paged_store + init_size, 565 phys_to_virt(pageable_part, 566 core_mmu_get_type_by_pa(pageable_part), 567 __pageable_part_end - __pageable_part_start), 568 __pageable_part_end - __pageable_part_start); 569 asan_memcpy_unchecked(paged_store, __init_start, init_size); 570 /* 571 * Undo eventual relocation for the init part so the hash checks 572 * can pass. 573 */ 574 undo_init_relocation(paged_store); 575 576 /* Check that hashes of what's in pageable area is OK */ 577 DMSG("Checking hashes of pageable area"); 578 for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) { 579 const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE; 580 const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE; 581 TEE_Result res; 582 583 DMSG("hash pg_idx %zu hash %p page %p", n, hash, page); 584 res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE); 585 if (res != TEE_SUCCESS) { 586 EMSG("Hash failed for page %zu at %p: res 0x%x", 587 n, (void *)page, res); 588 panic(); 589 } 590 } 591 592 /* 593 * Assert prepaged init sections are page aligned so that nothing 594 * trails uninited at the end of the premapped init area. 595 */ 596 assert(!(init_size & SMALL_PAGE_MASK)); 597 598 /* 599 * Initialize the virtual memory pool used for main_mmu_l2_ttb which 600 * is supplied to tee_pager_init() below. 601 */ 602 init_vcore(&tee_mm_vcore); 603 604 /* 605 * Assign alias area for pager end of the small page block the rest 606 * of the binary is loaded into. We're taking more than needed, but 607 * we're guaranteed to not need more than the physical amount of 608 * TZSRAM. 609 */ 610 mm = tee_mm_alloc2(&tee_mm_vcore, 611 (vaddr_t)tee_mm_vcore.lo + 612 tee_mm_vcore.size - TZSRAM_SIZE, 613 TZSRAM_SIZE); 614 assert(mm); 615 tee_pager_set_alias_area(mm); 616 617 /* 618 * Claim virtual memory which isn't paged. 619 * Linear memory (flat map core memory) ends there. 620 */ 621 mm = tee_mm_alloc2(&tee_mm_vcore, VCORE_UNPG_RX_PA, 622 (vaddr_t)(__pageable_start - VCORE_UNPG_RX_PA)); 623 assert(mm); 624 625 /* 626 * Allocate virtual memory for the pageable area and let the pager 627 * take charge of all the pages already assigned to that memory. 628 */ 629 mm = tee_mm_alloc2(&tee_mm_vcore, (vaddr_t)__pageable_start, 630 pageable_size); 631 assert(mm); 632 fobj = ro_paged_alloc(mm, hashes, paged_store); 633 assert(fobj); 634 tee_pager_add_core_region(tee_mm_get_smem(mm), PAGED_REGION_TYPE_RO, 635 fobj); 636 fobj_put(fobj); 637 638 tee_pager_add_pages(pageable_start, init_size / SMALL_PAGE_SIZE, false); 639 tee_pager_add_pages(pageable_start + init_size, 640 (pageable_size - init_size) / SMALL_PAGE_SIZE, 641 true); 642 if (pageable_end < tzsram_end) 643 tee_pager_add_pages(pageable_end, (tzsram_end - pageable_end) / 644 SMALL_PAGE_SIZE, true); 645 646 /* 647 * There may be physical pages in TZSRAM before the core load address. 648 * These pages can be added to the physical pages pool of the pager. 649 * This setup may happen when a the secure bootloader runs in TZRAM 650 * and its memory can be reused by OP-TEE once boot stages complete. 651 */ 652 tee_pager_add_pages(tee_mm_vcore.lo, 653 (VCORE_UNPG_RX_PA - tee_mm_vcore.lo) / SMALL_PAGE_SIZE, 654 true); 655 656 print_pager_pool_size(); 657 } 658 #else 659 660 static void init_runtime(unsigned long pageable_part __unused) 661 { 662 init_asan(); 663 664 /* 665 * By default whole OP-TEE uses malloc, so we need to initialize 666 * it early. But, when virtualization is enabled, malloc is used 667 * only by TEE runtime, so malloc should be initialized later, for 668 * every virtual partition separately. Core code uses nex_malloc 669 * instead. 670 */ 671 #ifdef CFG_NS_VIRTUALIZATION 672 nex_malloc_add_pool(__nex_heap_start, __nex_heap_end - 673 __nex_heap_start); 674 #else 675 malloc_add_pool(__heap1_start, __heap1_end - __heap1_start); 676 #endif 677 678 IMSG_RAW("\n"); 679 } 680 #endif 681 682 #if defined(CFG_DT) 683 static int add_optee_dt_node(struct dt_descriptor *dt) 684 { 685 int offs; 686 int ret; 687 688 if (fdt_path_offset(dt->blob, "/firmware/optee") >= 0) { 689 DMSG("OP-TEE Device Tree node already exists!"); 690 return 0; 691 } 692 693 offs = fdt_path_offset(dt->blob, "/firmware"); 694 if (offs < 0) { 695 offs = add_dt_path_subnode(dt, "/", "firmware"); 696 if (offs < 0) 697 return -1; 698 } 699 700 offs = fdt_add_subnode(dt->blob, offs, "optee"); 701 if (offs < 0) 702 return -1; 703 704 ret = fdt_setprop_string(dt->blob, offs, "compatible", 705 "linaro,optee-tz"); 706 if (ret < 0) 707 return -1; 708 ret = fdt_setprop_string(dt->blob, offs, "method", "smc"); 709 if (ret < 0) 710 return -1; 711 712 if (CFG_CORE_ASYNC_NOTIF_GIC_INTID) { 713 /* 714 * The format of the interrupt property is defined by the 715 * binding of the interrupt domain root. In this case it's 716 * one Arm GIC v1, v2 or v3 so we must be compatible with 717 * these. 718 * 719 * An SPI type of interrupt is indicated with a 0 in the 720 * first cell. A PPI type is indicated with value 1. 721 * 722 * The interrupt number goes in the second cell where 723 * SPIs ranges from 0 to 987 and PPI ranges from 0 to 15. 724 * 725 * Flags are passed in the third cells. 726 */ 727 uint32_t itr_trigger = 0; 728 uint32_t itr_type = 0; 729 uint32_t itr_id = 0; 730 uint32_t val[3] = { }; 731 732 /* PPI are visible only in current CPU cluster */ 733 static_assert(IS_ENABLED(CFG_CORE_FFA) || 734 !CFG_CORE_ASYNC_NOTIF_GIC_INTID || 735 (CFG_CORE_ASYNC_NOTIF_GIC_INTID >= 736 GIC_SPI_BASE) || 737 ((CFG_TEE_CORE_NB_CORE <= 8) && 738 (CFG_CORE_ASYNC_NOTIF_GIC_INTID >= 739 GIC_PPI_BASE))); 740 741 if (CFG_CORE_ASYNC_NOTIF_GIC_INTID >= GIC_SPI_BASE) { 742 itr_type = GIC_SPI; 743 itr_id = CFG_CORE_ASYNC_NOTIF_GIC_INTID - GIC_SPI_BASE; 744 itr_trigger = IRQ_TYPE_EDGE_RISING; 745 } else { 746 itr_type = GIC_PPI; 747 itr_id = CFG_CORE_ASYNC_NOTIF_GIC_INTID - GIC_PPI_BASE; 748 itr_trigger = IRQ_TYPE_EDGE_RISING | 749 GIC_CPU_MASK_SIMPLE(CFG_TEE_CORE_NB_CORE); 750 } 751 752 val[0] = TEE_U32_TO_BIG_ENDIAN(itr_type); 753 val[1] = TEE_U32_TO_BIG_ENDIAN(itr_id); 754 val[2] = TEE_U32_TO_BIG_ENDIAN(itr_trigger); 755 756 ret = fdt_setprop(dt->blob, offs, "interrupts", val, 757 sizeof(val)); 758 if (ret < 0) 759 return -1; 760 } 761 return 0; 762 } 763 764 #ifdef CFG_PSCI_ARM32 765 static int append_psci_compatible(void *fdt, int offs, const char *str) 766 { 767 return fdt_appendprop(fdt, offs, "compatible", str, strlen(str) + 1); 768 } 769 770 static int dt_add_psci_node(struct dt_descriptor *dt) 771 { 772 int offs; 773 774 if (fdt_path_offset(dt->blob, "/psci") >= 0) { 775 DMSG("PSCI Device Tree node already exists!"); 776 return 0; 777 } 778 779 offs = add_dt_path_subnode(dt, "/", "psci"); 780 if (offs < 0) 781 return -1; 782 if (append_psci_compatible(dt->blob, offs, "arm,psci-1.0")) 783 return -1; 784 if (append_psci_compatible(dt->blob, offs, "arm,psci-0.2")) 785 return -1; 786 if (append_psci_compatible(dt->blob, offs, "arm,psci")) 787 return -1; 788 if (fdt_setprop_string(dt->blob, offs, "method", "smc")) 789 return -1; 790 if (fdt_setprop_u32(dt->blob, offs, "cpu_suspend", PSCI_CPU_SUSPEND)) 791 return -1; 792 if (fdt_setprop_u32(dt->blob, offs, "cpu_off", PSCI_CPU_OFF)) 793 return -1; 794 if (fdt_setprop_u32(dt->blob, offs, "cpu_on", PSCI_CPU_ON)) 795 return -1; 796 if (fdt_setprop_u32(dt->blob, offs, "sys_poweroff", PSCI_SYSTEM_OFF)) 797 return -1; 798 if (fdt_setprop_u32(dt->blob, offs, "sys_reset", PSCI_SYSTEM_RESET)) 799 return -1; 800 return 0; 801 } 802 803 static int check_node_compat_prefix(struct dt_descriptor *dt, int offs, 804 const char *prefix) 805 { 806 const size_t prefix_len = strlen(prefix); 807 size_t l; 808 int plen; 809 const char *prop; 810 811 prop = fdt_getprop(dt->blob, offs, "compatible", &plen); 812 if (!prop) 813 return -1; 814 815 while (plen > 0) { 816 if (memcmp(prop, prefix, prefix_len) == 0) 817 return 0; /* match */ 818 819 l = strlen(prop) + 1; 820 prop += l; 821 plen -= l; 822 } 823 824 return -1; 825 } 826 827 static int dt_add_psci_cpu_enable_methods(struct dt_descriptor *dt) 828 { 829 int offs = 0; 830 831 while (1) { 832 offs = fdt_next_node(dt->blob, offs, NULL); 833 if (offs < 0) 834 break; 835 if (fdt_getprop(dt->blob, offs, "enable-method", NULL)) 836 continue; /* already set */ 837 if (check_node_compat_prefix(dt, offs, "arm,cortex-a")) 838 continue; /* no compatible */ 839 if (fdt_setprop_string(dt->blob, offs, "enable-method", "psci")) 840 return -1; 841 /* Need to restart scanning as offsets may have changed */ 842 offs = 0; 843 } 844 return 0; 845 } 846 847 static int config_psci(struct dt_descriptor *dt) 848 { 849 if (dt_add_psci_node(dt)) 850 return -1; 851 return dt_add_psci_cpu_enable_methods(dt); 852 } 853 #else 854 static int config_psci(struct dt_descriptor *dt __unused) 855 { 856 return 0; 857 } 858 #endif /*CFG_PSCI_ARM32*/ 859 860 #ifdef CFG_CORE_DYN_SHM 861 static uint64_t get_dt_val_and_advance(const void *data, size_t *offs, 862 uint32_t cell_size) 863 { 864 uint64_t rv = 0; 865 866 if (cell_size == 1) { 867 uint32_t v; 868 869 memcpy(&v, (const uint8_t *)data + *offs, sizeof(v)); 870 *offs += sizeof(v); 871 rv = fdt32_to_cpu(v); 872 } else { 873 uint64_t v; 874 875 memcpy(&v, (const uint8_t *)data + *offs, sizeof(v)); 876 *offs += sizeof(v); 877 rv = fdt64_to_cpu(v); 878 } 879 880 return rv; 881 } 882 883 /* 884 * Find all non-secure memory from DT. Memory marked inaccessible by Secure 885 * World is ignored since it could not be mapped to be used as dynamic shared 886 * memory. 887 */ 888 static int get_nsec_memory_helper(void *fdt, struct core_mmu_phys_mem *mem) 889 { 890 const uint8_t *prop = NULL; 891 uint64_t a = 0; 892 uint64_t l = 0; 893 size_t prop_offs = 0; 894 size_t prop_len = 0; 895 int elems_total = 0; 896 int addr_size = 0; 897 int len_size = 0; 898 int offs = 0; 899 size_t n = 0; 900 int len = 0; 901 902 addr_size = fdt_address_cells(fdt, 0); 903 if (addr_size < 0) 904 return 0; 905 906 len_size = fdt_size_cells(fdt, 0); 907 if (len_size < 0) 908 return 0; 909 910 while (true) { 911 offs = fdt_node_offset_by_prop_value(fdt, offs, "device_type", 912 "memory", 913 sizeof("memory")); 914 if (offs < 0) 915 break; 916 917 if (fdt_get_status(fdt, offs) != (DT_STATUS_OK_NSEC | 918 DT_STATUS_OK_SEC)) 919 continue; 920 921 prop = fdt_getprop(fdt, offs, "reg", &len); 922 if (!prop) 923 continue; 924 925 prop_len = len; 926 for (n = 0, prop_offs = 0; prop_offs < prop_len; n++) { 927 a = get_dt_val_and_advance(prop, &prop_offs, addr_size); 928 if (prop_offs >= prop_len) { 929 n--; 930 break; 931 } 932 933 l = get_dt_val_and_advance(prop, &prop_offs, len_size); 934 if (mem) { 935 mem->type = MEM_AREA_DDR_OVERALL; 936 mem->addr = a; 937 mem->size = l; 938 mem++; 939 } 940 } 941 942 elems_total += n; 943 } 944 945 return elems_total; 946 } 947 948 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt, size_t *nelems) 949 { 950 struct core_mmu_phys_mem *mem = NULL; 951 int elems_total = 0; 952 953 elems_total = get_nsec_memory_helper(fdt, NULL); 954 if (elems_total <= 0) 955 return NULL; 956 957 mem = nex_calloc(elems_total, sizeof(*mem)); 958 if (!mem) 959 panic(); 960 961 elems_total = get_nsec_memory_helper(fdt, mem); 962 assert(elems_total > 0); 963 964 *nelems = elems_total; 965 966 return mem; 967 } 968 #endif /*CFG_CORE_DYN_SHM*/ 969 970 #ifdef CFG_CORE_RESERVED_SHM 971 static int mark_static_shm_as_reserved(struct dt_descriptor *dt) 972 { 973 vaddr_t shm_start; 974 vaddr_t shm_end; 975 976 core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &shm_start, &shm_end); 977 if (shm_start != shm_end) 978 return add_res_mem_dt_node(dt, "optee_shm", 979 virt_to_phys((void *)shm_start), 980 shm_end - shm_start); 981 982 DMSG("No SHM configured"); 983 return -1; 984 } 985 #endif /*CFG_CORE_RESERVED_SHM*/ 986 987 static int mark_tzdram_as_reserved(struct dt_descriptor *dt) 988 { 989 return add_res_mem_dt_node(dt, "optee_core", CFG_TZDRAM_START, 990 CFG_TZDRAM_SIZE); 991 } 992 993 static void update_external_dt(void) 994 { 995 struct dt_descriptor *dt = get_external_dt_desc(); 996 997 if (!dt || !dt->blob) 998 return; 999 1000 if (!IS_ENABLED(CFG_CORE_FFA) && add_optee_dt_node(dt)) 1001 panic("Failed to add OP-TEE Device Tree node"); 1002 1003 if (config_psci(dt)) 1004 panic("Failed to config PSCI"); 1005 1006 #ifdef CFG_CORE_RESERVED_SHM 1007 if (mark_static_shm_as_reserved(dt)) 1008 panic("Failed to config non-secure memory"); 1009 #endif 1010 1011 if (mark_tzdram_as_reserved(dt)) 1012 panic("Failed to config secure memory"); 1013 } 1014 #else /*CFG_DT*/ 1015 static void update_external_dt(void) 1016 { 1017 } 1018 1019 #ifdef CFG_CORE_DYN_SHM 1020 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt __unused, 1021 size_t *nelems __unused) 1022 { 1023 return NULL; 1024 } 1025 #endif /*CFG_CORE_DYN_SHM*/ 1026 #endif /*!CFG_DT*/ 1027 1028 #if defined(CFG_CORE_FFA) 1029 void *get_manifest_dt(void) 1030 { 1031 return manifest_dt; 1032 } 1033 1034 static void reinit_manifest_dt(void) 1035 { 1036 paddr_t pa = (unsigned long)manifest_dt; 1037 void *fdt = NULL; 1038 int ret = 0; 1039 1040 if (!pa) { 1041 EMSG("No manifest DT found"); 1042 return; 1043 } 1044 1045 fdt = core_mmu_add_mapping(MEM_AREA_MANIFEST_DT, pa, CFG_DTB_MAX_SIZE); 1046 if (!fdt) 1047 panic("Failed to map manifest DT"); 1048 1049 manifest_dt = fdt; 1050 1051 ret = fdt_check_full(fdt, CFG_DTB_MAX_SIZE); 1052 if (ret < 0) { 1053 EMSG("Invalid manifest Device Tree at %#lx: error %d", pa, ret); 1054 panic(); 1055 } 1056 1057 IMSG("manifest DT found"); 1058 } 1059 1060 static TEE_Result release_manifest_dt(void) 1061 { 1062 if (!manifest_dt) 1063 return TEE_SUCCESS; 1064 1065 if (core_mmu_remove_mapping(MEM_AREA_MANIFEST_DT, manifest_dt, 1066 CFG_DTB_MAX_SIZE)) 1067 panic("Failed to remove temporary manifest DT mapping"); 1068 manifest_dt = NULL; 1069 1070 return TEE_SUCCESS; 1071 } 1072 1073 boot_final(release_manifest_dt); 1074 #else 1075 void *get_manifest_dt(void) 1076 { 1077 return NULL; 1078 } 1079 1080 static void reinit_manifest_dt(void) 1081 { 1082 } 1083 #endif /*CFG_CORE_FFA*/ 1084 1085 #ifdef CFG_CORE_DYN_SHM 1086 static void discover_nsec_memory(void) 1087 { 1088 struct core_mmu_phys_mem *mem; 1089 const struct core_mmu_phys_mem *mem_begin = NULL; 1090 const struct core_mmu_phys_mem *mem_end = NULL; 1091 size_t nelems; 1092 void *fdt = get_external_dt(); 1093 1094 if (fdt) { 1095 mem = get_nsec_memory(fdt, &nelems); 1096 if (mem) { 1097 core_mmu_set_discovered_nsec_ddr(mem, nelems); 1098 return; 1099 } 1100 1101 DMSG("No non-secure memory found in external DT"); 1102 } 1103 1104 fdt = get_embedded_dt(); 1105 if (fdt) { 1106 mem = get_nsec_memory(fdt, &nelems); 1107 if (mem) { 1108 core_mmu_set_discovered_nsec_ddr(mem, nelems); 1109 return; 1110 } 1111 1112 DMSG("No non-secure memory found in embedded DT"); 1113 } 1114 1115 mem_begin = phys_ddr_overall_begin; 1116 mem_end = phys_ddr_overall_end; 1117 nelems = mem_end - mem_begin; 1118 if (nelems) { 1119 /* 1120 * Platform cannot use both register_ddr() and the now 1121 * deprecated register_dynamic_shm(). 1122 */ 1123 assert(phys_ddr_overall_compat_begin == 1124 phys_ddr_overall_compat_end); 1125 } else { 1126 mem_begin = phys_ddr_overall_compat_begin; 1127 mem_end = phys_ddr_overall_compat_end; 1128 nelems = mem_end - mem_begin; 1129 if (!nelems) 1130 return; 1131 DMSG("Warning register_dynamic_shm() is deprecated, please use register_ddr() instead"); 1132 } 1133 1134 mem = nex_calloc(nelems, sizeof(*mem)); 1135 if (!mem) 1136 panic(); 1137 1138 memcpy(mem, phys_ddr_overall_begin, sizeof(*mem) * nelems); 1139 core_mmu_set_discovered_nsec_ddr(mem, nelems); 1140 } 1141 #else /*CFG_CORE_DYN_SHM*/ 1142 static void discover_nsec_memory(void) 1143 { 1144 } 1145 #endif /*!CFG_CORE_DYN_SHM*/ 1146 1147 #ifdef CFG_NS_VIRTUALIZATION 1148 static TEE_Result virt_init_heap(void) 1149 { 1150 /* We need to initialize pool for every virtual guest partition */ 1151 malloc_add_pool(__heap1_start, __heap1_end - __heap1_start); 1152 1153 return TEE_SUCCESS; 1154 } 1155 preinit_early(virt_init_heap); 1156 #endif 1157 1158 void init_tee_runtime(void) 1159 { 1160 #ifndef CFG_WITH_PAGER 1161 /* Pager initializes TA RAM early */ 1162 core_mmu_init_ta_ram(); 1163 #endif 1164 /* 1165 * With virtualization we call this function when creating the 1166 * OP-TEE partition instead. 1167 */ 1168 if (!IS_ENABLED(CFG_NS_VIRTUALIZATION)) 1169 call_preinitcalls(); 1170 call_initcalls(); 1171 1172 /* 1173 * These two functions uses crypto_rng_read() to initialize the 1174 * pauth keys. Once call_initcalls() returns we're guaranteed that 1175 * crypto_rng_read() is ready to be used. 1176 */ 1177 thread_init_core_local_pauth_keys(); 1178 thread_init_thread_pauth_keys(); 1179 1180 /* 1181 * Reinitialize canaries around the stacks with crypto_rng_read(). 1182 * 1183 * TODO: Updating canaries when CFG_NS_VIRTUALIZATION is enabled will 1184 * require synchronization between thread_check_canaries() and 1185 * thread_update_canaries(). 1186 */ 1187 if (!IS_ENABLED(CFG_NS_VIRTUALIZATION)) 1188 thread_update_canaries(); 1189 } 1190 1191 static void init_primary(unsigned long pageable_part, unsigned long nsec_entry) 1192 { 1193 thread_init_core_local_stacks(); 1194 /* 1195 * Mask asynchronous exceptions before switch to the thread vector 1196 * as the thread handler requires those to be masked while 1197 * executing with the temporary stack. The thread subsystem also 1198 * asserts that the foreign interrupts are blocked when using most of 1199 * its functions. 1200 */ 1201 thread_set_exceptions(THREAD_EXCP_ALL); 1202 primary_save_cntfrq(); 1203 init_vfp_sec(); 1204 1205 if (IS_ENABLED(CFG_CRYPTO_WITH_CE)) 1206 check_crypto_extensions(); 1207 1208 /* 1209 * Pager: init_runtime() calls thread_kernel_enable_vfp() so we must 1210 * set a current thread right now to avoid a chicken-and-egg problem 1211 * (thread_init_boot_thread() sets the current thread but needs 1212 * things set by init_runtime()). 1213 */ 1214 thread_get_core_local()->curr_thread = 0; 1215 init_runtime(pageable_part); 1216 1217 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 1218 /* 1219 * Virtualization: We can't initialize threads right now because 1220 * threads belong to "tee" part and will be initialized 1221 * separately per each new virtual guest. So, we'll clear 1222 * "curr_thread" and call it done. 1223 */ 1224 thread_get_core_local()->curr_thread = -1; 1225 } else { 1226 thread_init_boot_thread(); 1227 } 1228 thread_init_primary(); 1229 thread_init_per_cpu(); 1230 init_sec_mon(nsec_entry); 1231 } 1232 1233 static bool cpu_nmfi_enabled(void) 1234 { 1235 #if defined(ARM32) 1236 return read_sctlr() & SCTLR_NMFI; 1237 #else 1238 /* Note: ARM64 does not feature non-maskable FIQ support. */ 1239 return false; 1240 #endif 1241 } 1242 1243 /* 1244 * Note: this function is weak just to make it possible to exclude it from 1245 * the unpaged area. 1246 */ 1247 void __weak boot_init_primary_late(unsigned long fdt __unused, 1248 unsigned long manifest __unused) 1249 { 1250 size_t fdt_size = CFG_DTB_MAX_SIZE; 1251 1252 if (IS_ENABLED(CFG_TRANSFER_LIST) && mapped_tl) { 1253 struct transfer_list_entry *tl_e = NULL; 1254 1255 tl_e = transfer_list_find(mapped_tl, TL_TAG_FDT); 1256 if (tl_e) 1257 fdt_size = tl_e->data_size; 1258 } 1259 1260 init_external_dt(boot_arg_fdt, fdt_size); 1261 reinit_manifest_dt(); 1262 #ifdef CFG_CORE_SEL1_SPMC 1263 tpm_map_log_area(get_manifest_dt()); 1264 #else 1265 tpm_map_log_area(get_external_dt()); 1266 #endif 1267 discover_nsec_memory(); 1268 update_external_dt(); 1269 configure_console_from_dt(); 1270 1271 IMSG("OP-TEE version: %s", core_v_str); 1272 if (IS_ENABLED(CFG_INSECURE)) { 1273 IMSG("WARNING: This OP-TEE configuration might be insecure!"); 1274 IMSG("WARNING: Please check https://optee.readthedocs.io/en/latest/architecture/porting_guidelines.html"); 1275 } 1276 IMSG("Primary CPU initializing"); 1277 #ifdef CFG_CORE_ASLR 1278 DMSG("Executing at offset %#lx with virtual load address %#"PRIxVA, 1279 (unsigned long)boot_mmu_config.map_offset, VCORE_START_VA); 1280 #endif 1281 if (IS_ENABLED(CFG_MEMTAG)) 1282 DMSG("Memory tagging %s", 1283 memtag_is_enabled() ? "enabled" : "disabled"); 1284 1285 /* Check if platform needs NMFI workaround */ 1286 if (cpu_nmfi_enabled()) { 1287 if (!IS_ENABLED(CFG_CORE_WORKAROUND_ARM_NMFI)) 1288 IMSG("WARNING: This ARM core has NMFI enabled, please apply workaround!"); 1289 } else { 1290 if (IS_ENABLED(CFG_CORE_WORKAROUND_ARM_NMFI)) 1291 IMSG("WARNING: This ARM core does not have NMFI enabled, no need for workaround"); 1292 } 1293 1294 boot_primary_init_intc(); 1295 init_vfp_nsec(); 1296 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 1297 IMSG("Initializing virtualization support"); 1298 core_mmu_init_virtualization(); 1299 } else { 1300 init_tee_runtime(); 1301 } 1302 call_finalcalls(); 1303 IMSG("Primary CPU switching to normal world boot"); 1304 } 1305 1306 static void init_secondary_helper(unsigned long nsec_entry) 1307 { 1308 IMSG("Secondary CPU %zu initializing", get_core_pos()); 1309 1310 /* 1311 * Mask asynchronous exceptions before switch to the thread vector 1312 * as the thread handler requires those to be masked while 1313 * executing with the temporary stack. The thread subsystem also 1314 * asserts that the foreign interrupts are blocked when using most of 1315 * its functions. 1316 */ 1317 thread_set_exceptions(THREAD_EXCP_ALL); 1318 1319 secondary_init_cntfrq(); 1320 thread_init_per_cpu(); 1321 init_sec_mon(nsec_entry); 1322 boot_secondary_init_intc(); 1323 init_vfp_sec(); 1324 init_vfp_nsec(); 1325 1326 IMSG("Secondary CPU %zu switching to normal world boot", get_core_pos()); 1327 } 1328 1329 /* 1330 * Note: this function is weak just to make it possible to exclude it from 1331 * the unpaged area so that it lies in the init area. 1332 */ 1333 void __weak boot_init_primary_early(void) 1334 { 1335 unsigned long pageable_part = 0; 1336 unsigned long e = PADDR_INVALID; 1337 struct transfer_list_entry *tl_e = NULL; 1338 1339 if (!IS_ENABLED(CFG_WITH_ARM_TRUSTED_FW)) 1340 e = boot_arg_nsec_entry; 1341 1342 if (IS_ENABLED(CFG_TRANSFER_LIST) && boot_arg_transfer_list) { 1343 /* map and save the TL */ 1344 mapped_tl = transfer_list_map(boot_arg_transfer_list); 1345 if (!mapped_tl) 1346 panic("Failed to map transfer list"); 1347 1348 transfer_list_dump(mapped_tl); 1349 tl_e = transfer_list_find(mapped_tl, TL_TAG_FDT); 1350 if (tl_e) { 1351 /* 1352 * Expand the data size of the DTB entry to the maximum 1353 * allocable mapped memory to reserve sufficient space 1354 * for inserting new nodes, avoid potentially corrupting 1355 * next entries. 1356 */ 1357 uint32_t dtb_max_sz = mapped_tl->max_size - 1358 mapped_tl->size + tl_e->data_size; 1359 1360 if (!transfer_list_set_data_size(mapped_tl, tl_e, 1361 dtb_max_sz)) { 1362 EMSG("Failed to extend DTB size to %#"PRIx32, 1363 dtb_max_sz); 1364 panic(); 1365 } 1366 } 1367 tl_e = transfer_list_find(mapped_tl, TL_TAG_OPTEE_PAGABLE_PART); 1368 } 1369 1370 if (IS_ENABLED(CFG_WITH_PAGER)) { 1371 if (IS_ENABLED(CFG_TRANSFER_LIST) && tl_e) 1372 pageable_part = 1373 get_le64(transfer_list_entry_data(tl_e)); 1374 else 1375 pageable_part = boot_arg_pageable_part; 1376 } 1377 1378 init_primary(pageable_part, e); 1379 } 1380 1381 static void boot_save_transfer_list(unsigned long zero_reg, 1382 unsigned long transfer_list, 1383 unsigned long fdt) 1384 { 1385 struct transfer_list_header *tl = (void *)transfer_list; 1386 struct transfer_list_entry *tl_e = NULL; 1387 1388 if (zero_reg != 0) 1389 panic("Incorrect transfer list register convention"); 1390 1391 if (!IS_ALIGNED_WITH_TYPE(transfer_list, struct transfer_list_header) || 1392 !IS_ALIGNED(transfer_list, TL_ALIGNMENT_FROM_ORDER(tl->alignment))) 1393 panic("Transfer list base address is not aligned"); 1394 1395 if (transfer_list_check_header(tl) == TL_OPS_NONE) 1396 panic("Invalid transfer list"); 1397 1398 tl_e = transfer_list_find(tl, TL_TAG_FDT); 1399 if (fdt != (unsigned long)transfer_list_entry_data(tl_e)) 1400 panic("DT does not match to the DT entry of the TL"); 1401 1402 boot_arg_transfer_list = transfer_list; 1403 } 1404 1405 #if defined(CFG_WITH_ARM_TRUSTED_FW) 1406 unsigned long boot_cpu_on_handler(unsigned long a0 __maybe_unused, 1407 unsigned long a1 __unused) 1408 { 1409 init_secondary_helper(PADDR_INVALID); 1410 return 0; 1411 } 1412 #else 1413 void boot_init_secondary(unsigned long nsec_entry) 1414 { 1415 init_secondary_helper(nsec_entry); 1416 } 1417 #endif 1418 1419 #if defined(CFG_BOOT_SECONDARY_REQUEST) 1420 void boot_set_core_ns_entry(size_t core_idx, uintptr_t entry, 1421 uintptr_t context_id) 1422 { 1423 ns_entry_contexts[core_idx].entry_point = entry; 1424 ns_entry_contexts[core_idx].context_id = context_id; 1425 dsb_ishst(); 1426 } 1427 1428 int boot_core_release(size_t core_idx, paddr_t entry) 1429 { 1430 if (!core_idx || core_idx >= CFG_TEE_CORE_NB_CORE) 1431 return -1; 1432 1433 ns_entry_contexts[core_idx].entry_point = entry; 1434 dmb(); 1435 spin_table[core_idx] = 1; 1436 dsb(); 1437 sev(); 1438 1439 return 0; 1440 } 1441 1442 /* 1443 * spin until secondary boot request, then returns with 1444 * the secondary core entry address. 1445 */ 1446 struct ns_entry_context *boot_core_hpen(void) 1447 { 1448 #ifdef CFG_PSCI_ARM32 1449 return &ns_entry_contexts[get_core_pos()]; 1450 #else 1451 do { 1452 wfe(); 1453 } while (!spin_table[get_core_pos()]); 1454 dmb(); 1455 return &ns_entry_contexts[get_core_pos()]; 1456 #endif 1457 } 1458 #endif 1459 1460 #if defined(CFG_CORE_ASLR) 1461 #if defined(CFG_DT) 1462 unsigned long __weak get_aslr_seed(void) 1463 { 1464 void *fdt = NULL; 1465 int rc = 0; 1466 const uint64_t *seed = NULL; 1467 int offs = 0; 1468 int len = 0; 1469 1470 if (!IS_ENABLED(CFG_CORE_SEL2_SPMC)) 1471 fdt = (void *)boot_arg_fdt; 1472 1473 if (!fdt) { 1474 DMSG("No fdt"); 1475 goto err; 1476 } 1477 1478 rc = fdt_check_header(fdt); 1479 if (rc) { 1480 DMSG("Bad fdt: %d", rc); 1481 goto err; 1482 } 1483 1484 offs = fdt_path_offset(fdt, "/secure-chosen"); 1485 if (offs < 0) { 1486 DMSG("Cannot find /secure-chosen"); 1487 goto err; 1488 } 1489 seed = fdt_getprop(fdt, offs, "kaslr-seed", &len); 1490 if (!seed || len != sizeof(*seed)) { 1491 DMSG("Cannot find valid kaslr-seed"); 1492 goto err; 1493 } 1494 1495 return fdt64_to_cpu(*seed); 1496 1497 err: 1498 /* Try platform implementation */ 1499 return plat_get_aslr_seed(); 1500 } 1501 #else /*!CFG_DT*/ 1502 unsigned long __weak get_aslr_seed(void) 1503 { 1504 /* Try platform implementation */ 1505 return plat_get_aslr_seed(); 1506 } 1507 #endif /*!CFG_DT*/ 1508 #endif /*CFG_CORE_ASLR*/ 1509 1510 static void *get_fdt_from_boot_info(struct ffa_boot_info_header_1_1 *hdr) 1511 { 1512 struct ffa_boot_info_1_1 *desc = NULL; 1513 uint8_t content_fmt = 0; 1514 uint8_t name_fmt = 0; 1515 void *fdt = NULL; 1516 int ret = 0; 1517 1518 if (hdr->signature != FFA_BOOT_INFO_SIGNATURE) { 1519 EMSG("Bad boot info signature %#"PRIx32, hdr->signature); 1520 panic(); 1521 } 1522 if (hdr->version != FFA_BOOT_INFO_VERSION) { 1523 EMSG("Bad boot info version %#"PRIx32, hdr->version); 1524 panic(); 1525 } 1526 if (hdr->desc_count != 1) { 1527 EMSG("Bad boot info descriptor count %#"PRIx32, 1528 hdr->desc_count); 1529 panic(); 1530 } 1531 desc = (void *)((vaddr_t)hdr + hdr->desc_offset); 1532 name_fmt = desc->flags & FFA_BOOT_INFO_FLAG_NAME_FORMAT_MASK; 1533 if (name_fmt == FFA_BOOT_INFO_FLAG_NAME_FORMAT_STRING) 1534 DMSG("Boot info descriptor name \"%16s\"", desc->name); 1535 else if (name_fmt == FFA_BOOT_INFO_FLAG_NAME_FORMAT_UUID) 1536 DMSG("Boot info descriptor UUID %pUl", (void *)desc->name); 1537 else 1538 DMSG("Boot info descriptor: unknown name format %"PRIu8, 1539 name_fmt); 1540 1541 content_fmt = (desc->flags & FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_MASK) >> 1542 FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_SHIFT; 1543 if (content_fmt != FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_ADDR) { 1544 EMSG("Bad boot info content format %"PRIu8", expected %u (address)", 1545 content_fmt, FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_ADDR); 1546 panic(); 1547 } 1548 1549 fdt = (void *)(vaddr_t)desc->contents; 1550 ret = fdt_check_full(fdt, desc->size); 1551 if (ret < 0) { 1552 EMSG("Invalid Device Tree at %p: error %d", fdt, ret); 1553 panic(); 1554 } 1555 return fdt; 1556 } 1557 1558 static void get_sec_mem_from_manifest(void *fdt, paddr_t *base, size_t *size) 1559 { 1560 int ret = 0; 1561 uint64_t num = 0; 1562 1563 ret = fdt_node_check_compatible(fdt, 0, "arm,ffa-manifest-1.0"); 1564 if (ret < 0) { 1565 EMSG("Invalid FF-A manifest at %p: error %d", fdt, ret); 1566 panic(); 1567 } 1568 ret = dt_getprop_as_number(fdt, 0, "load-address", &num); 1569 if (ret < 0) { 1570 EMSG("Can't read \"load-address\" from FF-A manifest at %p: error %d", 1571 fdt, ret); 1572 panic(); 1573 } 1574 *base = num; 1575 /* "mem-size" is currently an undocumented extension to the spec. */ 1576 ret = dt_getprop_as_number(fdt, 0, "mem-size", &num); 1577 if (ret < 0) { 1578 EMSG("Can't read \"mem-size\" from FF-A manifest at %p: error %d", 1579 fdt, ret); 1580 panic(); 1581 } 1582 *size = num; 1583 } 1584 1585 void __weak boot_save_args(unsigned long a0, unsigned long a1, 1586 unsigned long a2, unsigned long a3, 1587 unsigned long a4 __maybe_unused) 1588 { 1589 /* 1590 * Register use: 1591 * 1592 * Scenario A: Default arguments 1593 * a0 - CFG_CORE_FFA=y && CFG_CORE_SEL2_SPMC=n: 1594 * if non-NULL holds the TOS FW config [1] address 1595 * - CFG_CORE_FFA=y && 1596 (CFG_CORE_SEL2_SPMC=y || CFG_CORE_EL3_SPMC=y): 1597 * address of FF-A Boot Information Blob 1598 * - CFG_CORE_FFA=n: 1599 * if non-NULL holds the pagable part address 1600 * a1 - CFG_WITH_ARM_TRUSTED_FW=n (Armv7): 1601 * Armv7 standard bootarg #1 (kept track of in entry_a32.S) 1602 * a2 - CFG_CORE_SEL2_SPMC=n: 1603 * if non-NULL holds the system DTB address 1604 * - CFG_WITH_ARM_TRUSTED_FW=n (Armv7): 1605 * Armv7 standard bootarg #2 (system DTB address, kept track 1606 * of in entry_a32.S) 1607 * a3 - Not used 1608 * a4 - CFG_WITH_ARM_TRUSTED_FW=n: 1609 * Non-secure entry address 1610 * 1611 * [1] A TF-A concept: TOS_FW_CONFIG - Trusted OS Firmware 1612 * configuration file. Used by Trusted OS (BL32), that is, OP-TEE 1613 * here. This is also called Manifest DT, related to the Manifest DT 1614 * passed in the FF-A Boot Information Blob, but with a different 1615 * compatible string. 1616 1617 * Scenario B: FW Handoff via Transfer List 1618 * Note: FF-A and non-secure entry are not yet supported with 1619 * Transfer List 1620 * a0 - DTB address or 0 (AArch64) 1621 * - must be 0 (AArch32) 1622 * a1 - TRANSFER_LIST_SIGNATURE | REG_CONVENTION_VER_MASK 1623 * a2 - must be 0 (AArch64) 1624 * - DTB address or 0 (AArch32) 1625 * a3 - Transfer list base address 1626 * a4 - Not used 1627 */ 1628 1629 if (IS_ENABLED(CFG_TRANSFER_LIST) && 1630 a1 == (TRANSFER_LIST_SIGNATURE | REG_CONVENTION_VER_MASK)) { 1631 if (IS_ENABLED(CFG_ARM64_core)) { 1632 boot_save_transfer_list(a2, a3, a0); 1633 boot_arg_fdt = a0; 1634 } else { 1635 boot_save_transfer_list(a0, a3, a2); 1636 boot_arg_fdt = a2; 1637 } 1638 return; 1639 } 1640 1641 if (!IS_ENABLED(CFG_CORE_SEL2_SPMC)) { 1642 #if defined(CFG_DT_ADDR) 1643 boot_arg_fdt = CFG_DT_ADDR; 1644 #else 1645 boot_arg_fdt = a2; 1646 #endif 1647 } 1648 1649 if (IS_ENABLED(CFG_CORE_FFA)) { 1650 if (IS_ENABLED(CFG_CORE_SEL2_SPMC) || 1651 IS_ENABLED(CFG_CORE_EL3_SPMC)) 1652 manifest_dt = get_fdt_from_boot_info((void *)a0); 1653 else 1654 manifest_dt = (void *)a0; 1655 if (IS_ENABLED(CFG_CORE_SEL2_SPMC) && 1656 IS_ENABLED(CFG_CORE_PHYS_RELOCATABLE)) { 1657 paddr_t base = 0; 1658 size_t size = 0; 1659 1660 get_sec_mem_from_manifest(manifest_dt, &base, &size); 1661 core_mmu_set_secure_memory(base, size); 1662 } 1663 } else { 1664 if (IS_ENABLED(CFG_WITH_PAGER)) { 1665 #if defined(CFG_PAGEABLE_ADDR) 1666 boot_arg_pageable_part = CFG_PAGEABLE_ADDR; 1667 #else 1668 boot_arg_pageable_part = a0; 1669 #endif 1670 } 1671 if (!IS_ENABLED(CFG_WITH_ARM_TRUSTED_FW)) { 1672 #if defined(CFG_NS_ENTRY_ADDR) 1673 boot_arg_nsec_entry = CFG_NS_ENTRY_ADDR; 1674 #else 1675 boot_arg_nsec_entry = a4; 1676 #endif 1677 } 1678 } 1679 } 1680 1681 #if defined(CFG_TRANSFER_LIST) 1682 static TEE_Result release_transfer_list(void) 1683 { 1684 struct dt_descriptor *dt = get_external_dt_desc(); 1685 1686 if (!mapped_tl) 1687 return TEE_SUCCESS; 1688 1689 if (dt) { 1690 int ret = 0; 1691 struct transfer_list_entry *tl_e = NULL; 1692 1693 /* 1694 * Pack the DTB and update the transfer list before un-mapping 1695 */ 1696 ret = fdt_pack(dt->blob); 1697 if (ret < 0) { 1698 EMSG("Failed to pack Device Tree at 0x%" PRIxPA 1699 ": error %d", virt_to_phys(dt->blob), ret); 1700 panic(); 1701 } 1702 1703 tl_e = transfer_list_find(mapped_tl, TL_TAG_FDT); 1704 assert(dt->blob == transfer_list_entry_data(tl_e)); 1705 transfer_list_set_data_size(mapped_tl, tl_e, 1706 fdt_totalsize(dt->blob)); 1707 dt->blob = NULL; 1708 } 1709 1710 transfer_list_unmap_sync(mapped_tl); 1711 mapped_tl = NULL; 1712 1713 return TEE_SUCCESS; 1714 } 1715 1716 boot_final(release_transfer_list); 1717 #endif 1718