1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2015-2023, Linaro Limited 4 * Copyright (c) 2023, Arm Limited 5 */ 6 7 #include <arm.h> 8 #include <assert.h> 9 #include <compiler.h> 10 #include <config.h> 11 #include <console.h> 12 #include <crypto/crypto.h> 13 #include <drivers/gic.h> 14 #include <dt-bindings/interrupt-controller/arm-gic.h> 15 #include <ffa.h> 16 #include <initcall.h> 17 #include <inttypes.h> 18 #include <io.h> 19 #include <keep.h> 20 #include <kernel/asan.h> 21 #include <kernel/boot.h> 22 #include <kernel/dt.h> 23 #include <kernel/linker.h> 24 #include <kernel/misc.h> 25 #include <kernel/panic.h> 26 #include <kernel/tee_misc.h> 27 #include <kernel/thread.h> 28 #include <kernel/tpm.h> 29 #include <kernel/transfer_list.h> 30 #include <libfdt.h> 31 #include <malloc.h> 32 #include <memtag.h> 33 #include <mm/core_memprot.h> 34 #include <mm/core_mmu.h> 35 #include <mm/fobj.h> 36 #include <mm/tee_mm.h> 37 #include <mm/tee_pager.h> 38 #include <sm/psci.h> 39 #include <stdio.h> 40 #include <trace.h> 41 #include <utee_defines.h> 42 #include <util.h> 43 44 #include <platform_config.h> 45 46 #if !defined(CFG_WITH_ARM_TRUSTED_FW) 47 #include <sm/sm.h> 48 #endif 49 50 #if defined(CFG_WITH_VFP) 51 #include <kernel/vfp.h> 52 #endif 53 54 /* 55 * In this file we're using unsigned long to represent physical pointers as 56 * they are received in a single register when OP-TEE is initially entered. 57 * This limits 32-bit systems to only use make use of the lower 32 bits 58 * of a physical address for initial parameters. 59 * 60 * 64-bit systems on the other hand can use full 64-bit physical pointers. 61 */ 62 #define PADDR_INVALID ULONG_MAX 63 64 #if defined(CFG_BOOT_SECONDARY_REQUEST) 65 struct ns_entry_context { 66 uintptr_t entry_point; 67 uintptr_t context_id; 68 }; 69 struct ns_entry_context ns_entry_contexts[CFG_TEE_CORE_NB_CORE]; 70 static uint32_t spin_table[CFG_TEE_CORE_NB_CORE]; 71 #endif 72 73 #ifdef CFG_BOOT_SYNC_CPU 74 /* 75 * Array used when booting, to synchronize cpu. 76 * When 0, the cpu has not started. 77 * When 1, it has started 78 */ 79 uint32_t sem_cpu_sync[CFG_TEE_CORE_NB_CORE]; 80 DECLARE_KEEP_PAGER(sem_cpu_sync); 81 #endif 82 83 static void *manifest_dt __nex_bss; 84 static unsigned long boot_arg_fdt __nex_bss; 85 static unsigned long boot_arg_nsec_entry __nex_bss; 86 static unsigned long boot_arg_pageable_part __nex_bss; 87 static unsigned long boot_arg_transfer_list __nex_bss; 88 static struct transfer_list_header *mapped_tl __nex_bss; 89 90 #ifdef CFG_SECONDARY_INIT_CNTFRQ 91 static uint32_t cntfrq; 92 #endif 93 94 /* May be overridden in plat-$(PLATFORM)/main.c */ 95 __weak void plat_primary_init_early(void) 96 { 97 } 98 DECLARE_KEEP_PAGER(plat_primary_init_early); 99 100 /* May be overridden in plat-$(PLATFORM)/main.c */ 101 __weak void boot_primary_init_intc(void) 102 { 103 } 104 105 /* May be overridden in plat-$(PLATFORM)/main.c */ 106 __weak void boot_secondary_init_intc(void) 107 { 108 } 109 110 /* May be overridden in plat-$(PLATFORM)/main.c */ 111 __weak unsigned long plat_get_aslr_seed(void) 112 { 113 DMSG("Warning: no ASLR seed"); 114 115 return 0; 116 } 117 118 #if defined(_CFG_CORE_STACK_PROTECTOR) || defined(CFG_WITH_STACK_CANARIES) 119 /* Generate random stack canary value on boot up */ 120 __weak void plat_get_random_stack_canaries(void *buf, size_t ncan, size_t size) 121 { 122 TEE_Result ret = TEE_ERROR_GENERIC; 123 size_t i = 0; 124 125 assert(buf && ncan && size); 126 127 /* 128 * With virtualization the RNG is not initialized in Nexus core. 129 * Need to override with platform specific implementation. 130 */ 131 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 132 IMSG("WARNING: Using fixed value for stack canary"); 133 memset(buf, 0xab, ncan * size); 134 goto out; 135 } 136 137 ret = crypto_rng_read(buf, ncan * size); 138 if (ret != TEE_SUCCESS) 139 panic("Failed to generate random stack canary"); 140 141 out: 142 /* Leave null byte in canary to prevent string base exploit */ 143 for (i = 0; i < ncan; i++) 144 *((uint8_t *)buf + size * i) = 0; 145 } 146 #endif /* _CFG_CORE_STACK_PROTECTOR || CFG_WITH_STACK_CANARIES */ 147 148 /* 149 * This function is called as a guard after each smc call which is not 150 * supposed to return. 151 */ 152 void __panic_at_smc_return(void) 153 { 154 panic(); 155 } 156 157 #if defined(CFG_WITH_ARM_TRUSTED_FW) 158 void init_sec_mon(unsigned long nsec_entry __maybe_unused) 159 { 160 assert(nsec_entry == PADDR_INVALID); 161 /* Do nothing as we don't have a secure monitor */ 162 } 163 #else 164 /* May be overridden in plat-$(PLATFORM)/main.c */ 165 __weak void init_sec_mon(unsigned long nsec_entry) 166 { 167 struct sm_nsec_ctx *nsec_ctx; 168 169 assert(nsec_entry != PADDR_INVALID); 170 171 /* Initialize secure monitor */ 172 nsec_ctx = sm_get_nsec_ctx(); 173 nsec_ctx->mon_lr = nsec_entry; 174 nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I; 175 if (nsec_entry & 1) 176 nsec_ctx->mon_spsr |= CPSR_T; 177 } 178 #endif 179 180 #if defined(CFG_WITH_ARM_TRUSTED_FW) 181 static void init_vfp_nsec(void) 182 { 183 } 184 #else 185 static void init_vfp_nsec(void) 186 { 187 /* Normal world can use CP10 and CP11 (SIMD/VFP) */ 188 write_nsacr(read_nsacr() | NSACR_CP10 | NSACR_CP11); 189 } 190 #endif 191 192 #if defined(CFG_WITH_VFP) 193 194 #ifdef ARM32 195 static void init_vfp_sec(void) 196 { 197 uint32_t cpacr = read_cpacr(); 198 199 /* 200 * Enable Advanced SIMD functionality. 201 * Enable use of D16-D31 of the Floating-point Extension register 202 * file. 203 */ 204 cpacr &= ~(CPACR_ASEDIS | CPACR_D32DIS); 205 /* 206 * Enable usage of CP10 and CP11 (SIMD/VFP) (both kernel and user 207 * mode. 208 */ 209 cpacr |= CPACR_CP(10, CPACR_CP_ACCESS_FULL); 210 cpacr |= CPACR_CP(11, CPACR_CP_ACCESS_FULL); 211 write_cpacr(cpacr); 212 } 213 #endif /* ARM32 */ 214 215 #ifdef ARM64 216 static void init_vfp_sec(void) 217 { 218 /* Not using VFP until thread_kernel_enable_vfp() */ 219 vfp_disable(); 220 } 221 #endif /* ARM64 */ 222 223 #else /* CFG_WITH_VFP */ 224 225 static void init_vfp_sec(void) 226 { 227 /* Not using VFP */ 228 } 229 #endif 230 231 #ifdef CFG_SECONDARY_INIT_CNTFRQ 232 static void primary_save_cntfrq(void) 233 { 234 assert(cntfrq == 0); 235 236 /* 237 * CNTFRQ should be initialized on the primary CPU by a 238 * previous boot stage 239 */ 240 cntfrq = read_cntfrq(); 241 } 242 243 static void secondary_init_cntfrq(void) 244 { 245 assert(cntfrq != 0); 246 write_cntfrq(cntfrq); 247 } 248 #else /* CFG_SECONDARY_INIT_CNTFRQ */ 249 static void primary_save_cntfrq(void) 250 { 251 } 252 253 static void secondary_init_cntfrq(void) 254 { 255 } 256 #endif 257 258 #ifdef CFG_CORE_SANITIZE_KADDRESS 259 static void init_run_constructors(void) 260 { 261 const vaddr_t *ctor; 262 263 for (ctor = &__ctor_list; ctor < &__ctor_end; ctor++) 264 ((void (*)(void))(*ctor))(); 265 } 266 267 static void init_asan(void) 268 { 269 270 /* 271 * CFG_ASAN_SHADOW_OFFSET is also supplied as 272 * -fasan-shadow-offset=$(CFG_ASAN_SHADOW_OFFSET) to the compiler. 273 * Since all the needed values to calculate the value of 274 * CFG_ASAN_SHADOW_OFFSET isn't available in to make we need to 275 * calculate it in advance and hard code it into the platform 276 * conf.mk. Here where we have all the needed values we double 277 * check that the compiler is supplied the correct value. 278 */ 279 280 #define __ASAN_SHADOW_START \ 281 ROUNDUP(TEE_RAM_START + (TEE_RAM_VA_SIZE * 8) / 9 - 8, 8) 282 assert(__ASAN_SHADOW_START == (vaddr_t)&__asan_shadow_start); 283 #define __CFG_ASAN_SHADOW_OFFSET \ 284 (__ASAN_SHADOW_START - (TEE_RAM_START / 8)) 285 COMPILE_TIME_ASSERT(CFG_ASAN_SHADOW_OFFSET == __CFG_ASAN_SHADOW_OFFSET); 286 #undef __ASAN_SHADOW_START 287 #undef __CFG_ASAN_SHADOW_OFFSET 288 289 /* 290 * Assign area covered by the shadow area, everything from start up 291 * to the beginning of the shadow area. 292 */ 293 asan_set_shadowed((void *)TEE_LOAD_ADDR, &__asan_shadow_start); 294 295 /* 296 * Add access to areas that aren't opened automatically by a 297 * constructor. 298 */ 299 asan_tag_access(&__ctor_list, &__ctor_end); 300 asan_tag_access(__rodata_start, __rodata_end); 301 #ifdef CFG_WITH_PAGER 302 asan_tag_access(__pageable_start, __pageable_end); 303 #endif /*CFG_WITH_PAGER*/ 304 asan_tag_access(__nozi_start, __nozi_end); 305 #ifdef ARM32 306 asan_tag_access(__exidx_start, __exidx_end); 307 asan_tag_access(__extab_start, __extab_end); 308 #endif 309 310 init_run_constructors(); 311 312 /* Everything is tagged correctly, let's start address sanitizing. */ 313 asan_start(); 314 } 315 #else /*CFG_CORE_SANITIZE_KADDRESS*/ 316 static void init_asan(void) 317 { 318 } 319 #endif /*CFG_CORE_SANITIZE_KADDRESS*/ 320 321 #if defined(CFG_MEMTAG) 322 /* Called from entry_a64.S only when MEMTAG is configured */ 323 void boot_init_memtag(void) 324 { 325 paddr_t base = 0; 326 paddr_size_t size = 0; 327 328 memtag_init_ops(feat_mte_implemented()); 329 core_mmu_get_secure_memory(&base, &size); 330 memtag_set_tags((void *)(vaddr_t)base, size, 0); 331 } 332 #endif 333 334 #ifdef CFG_WITH_PAGER 335 336 #ifdef CFG_CORE_SANITIZE_KADDRESS 337 static void carve_out_asan_mem(tee_mm_pool_t *pool) 338 { 339 const size_t s = pool->hi - pool->lo; 340 tee_mm_entry_t *mm; 341 paddr_t apa = ASAN_MAP_PA; 342 size_t asz = ASAN_MAP_SZ; 343 344 if (core_is_buffer_outside(apa, asz, pool->lo, s)) 345 return; 346 347 /* Reserve the shadow area */ 348 if (!core_is_buffer_inside(apa, asz, pool->lo, s)) { 349 if (apa < pool->lo) { 350 /* 351 * ASAN buffer is overlapping with the beginning of 352 * the pool. 353 */ 354 asz -= pool->lo - apa; 355 apa = pool->lo; 356 } else { 357 /* 358 * ASAN buffer is overlapping with the end of the 359 * pool. 360 */ 361 asz = pool->hi - apa; 362 } 363 } 364 mm = tee_mm_alloc2(pool, apa, asz); 365 assert(mm); 366 } 367 #else 368 static void carve_out_asan_mem(tee_mm_pool_t *pool __unused) 369 { 370 } 371 #endif 372 373 static void print_pager_pool_size(void) 374 { 375 struct tee_pager_stats __maybe_unused stats; 376 377 tee_pager_get_stats(&stats); 378 IMSG("Pager pool size: %zukB", 379 stats.npages_all * SMALL_PAGE_SIZE / 1024); 380 } 381 382 static void init_vcore(tee_mm_pool_t *mm_vcore) 383 { 384 const vaddr_t begin = VCORE_START_VA; 385 size_t size = TEE_RAM_VA_SIZE; 386 387 #ifdef CFG_CORE_SANITIZE_KADDRESS 388 /* Carve out asan memory, flat maped after core memory */ 389 if (begin + size > ASAN_SHADOW_PA) 390 size = ASAN_MAP_PA - begin; 391 #endif 392 393 if (!tee_mm_init(mm_vcore, begin, size, SMALL_PAGE_SHIFT, 394 TEE_MM_POOL_NO_FLAGS)) 395 panic("tee_mm_vcore init failed"); 396 } 397 398 /* 399 * With CFG_CORE_ASLR=y the init part is relocated very early during boot. 400 * The init part is also paged just as the rest of the normal paged code, with 401 * the difference that it's preloaded during boot. When the backing store 402 * is configured the entire paged binary is copied in place and then also 403 * the init part. Since the init part has been relocated (references to 404 * addresses updated to compensate for the new load address) this has to be 405 * undone for the hashes of those pages to match with the original binary. 406 * 407 * If CFG_CORE_ASLR=n, nothing needs to be done as the code/ro pages are 408 * unchanged. 409 */ 410 static void undo_init_relocation(uint8_t *paged_store __maybe_unused) 411 { 412 #ifdef CFG_CORE_ASLR 413 unsigned long *ptr = NULL; 414 const uint32_t *reloc = NULL; 415 const uint32_t *reloc_end = NULL; 416 unsigned long offs = boot_mmu_config.map_offset; 417 const struct boot_embdata *embdata = (const void *)__init_end; 418 vaddr_t addr_end = (vaddr_t)__init_end - offs - TEE_LOAD_ADDR; 419 vaddr_t addr_start = (vaddr_t)__init_start - offs - TEE_LOAD_ADDR; 420 421 reloc = (const void *)((vaddr_t)embdata + embdata->reloc_offset); 422 reloc_end = reloc + embdata->reloc_len / sizeof(*reloc); 423 424 for (; reloc < reloc_end; reloc++) { 425 if (*reloc < addr_start) 426 continue; 427 if (*reloc >= addr_end) 428 break; 429 ptr = (void *)(paged_store + *reloc - addr_start); 430 *ptr -= offs; 431 } 432 #endif 433 } 434 435 static struct fobj *ro_paged_alloc(tee_mm_entry_t *mm, void *hashes, 436 void *store) 437 { 438 const unsigned int num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE; 439 #ifdef CFG_CORE_ASLR 440 unsigned int reloc_offs = (vaddr_t)__pageable_start - VCORE_START_VA; 441 const struct boot_embdata *embdata = (const void *)__init_end; 442 const void *reloc = __init_end + embdata->reloc_offset; 443 444 return fobj_ro_reloc_paged_alloc(num_pages, hashes, reloc_offs, 445 reloc, embdata->reloc_len, store); 446 #else 447 return fobj_ro_paged_alloc(num_pages, hashes, store); 448 #endif 449 } 450 451 static void init_runtime(unsigned long pageable_part) 452 { 453 size_t n; 454 size_t init_size = (size_t)(__init_end - __init_start); 455 size_t pageable_start = (size_t)__pageable_start; 456 size_t pageable_end = (size_t)__pageable_end; 457 size_t pageable_size = pageable_end - pageable_start; 458 vaddr_t tzsram_end = TZSRAM_BASE + TZSRAM_SIZE - TEE_LOAD_ADDR + 459 VCORE_START_VA; 460 size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) * 461 TEE_SHA256_HASH_SIZE; 462 const struct boot_embdata *embdata = (const void *)__init_end; 463 const void *tmp_hashes = NULL; 464 tee_mm_entry_t *mm = NULL; 465 struct fobj *fobj = NULL; 466 uint8_t *paged_store = NULL; 467 uint8_t *hashes = NULL; 468 469 assert(pageable_size % SMALL_PAGE_SIZE == 0); 470 assert(embdata->total_len >= embdata->hashes_offset + 471 embdata->hashes_len); 472 assert(hash_size == embdata->hashes_len); 473 474 tmp_hashes = __init_end + embdata->hashes_offset; 475 476 init_asan(); 477 478 /* Add heap2 first as heap1 may be too small as initial bget pool */ 479 malloc_add_pool(__heap2_start, __heap2_end - __heap2_start); 480 malloc_add_pool(__heap1_start, __heap1_end - __heap1_start); 481 482 /* 483 * This needs to be initialized early to support address lookup 484 * in MEM_AREA_TEE_RAM 485 */ 486 tee_pager_early_init(); 487 488 hashes = malloc(hash_size); 489 IMSG_RAW("\n"); 490 IMSG("Pager is enabled. Hashes: %zu bytes", hash_size); 491 assert(hashes); 492 asan_memcpy_unchecked(hashes, tmp_hashes, hash_size); 493 494 /* 495 * Need tee_mm_sec_ddr initialized to be able to allocate secure 496 * DDR below. 497 */ 498 core_mmu_init_ta_ram(); 499 500 carve_out_asan_mem(&tee_mm_sec_ddr); 501 502 mm = tee_mm_alloc(&tee_mm_sec_ddr, pageable_size); 503 assert(mm); 504 paged_store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM, 505 pageable_size); 506 /* 507 * Load pageable part in the dedicated allocated area: 508 * - Move pageable non-init part into pageable area. Note bootloader 509 * may have loaded it anywhere in TA RAM hence use memmove(). 510 * - Copy pageable init part from current location into pageable area. 511 */ 512 memmove(paged_store + init_size, 513 phys_to_virt(pageable_part, 514 core_mmu_get_type_by_pa(pageable_part), 515 __pageable_part_end - __pageable_part_start), 516 __pageable_part_end - __pageable_part_start); 517 asan_memcpy_unchecked(paged_store, __init_start, init_size); 518 /* 519 * Undo eventual relocation for the init part so the hash checks 520 * can pass. 521 */ 522 undo_init_relocation(paged_store); 523 524 /* Check that hashes of what's in pageable area is OK */ 525 DMSG("Checking hashes of pageable area"); 526 for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) { 527 const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE; 528 const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE; 529 TEE_Result res; 530 531 DMSG("hash pg_idx %zu hash %p page %p", n, hash, page); 532 res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE); 533 if (res != TEE_SUCCESS) { 534 EMSG("Hash failed for page %zu at %p: res 0x%x", 535 n, (void *)page, res); 536 panic(); 537 } 538 } 539 540 /* 541 * Assert prepaged init sections are page aligned so that nothing 542 * trails uninited at the end of the premapped init area. 543 */ 544 assert(!(init_size & SMALL_PAGE_MASK)); 545 546 /* 547 * Initialize the virtual memory pool used for main_mmu_l2_ttb which 548 * is supplied to tee_pager_init() below. 549 */ 550 init_vcore(&tee_mm_vcore); 551 552 /* 553 * Assign alias area for pager end of the small page block the rest 554 * of the binary is loaded into. We're taking more than needed, but 555 * we're guaranteed to not need more than the physical amount of 556 * TZSRAM. 557 */ 558 mm = tee_mm_alloc2(&tee_mm_vcore, 559 (vaddr_t)tee_mm_vcore.lo + 560 tee_mm_vcore.size - TZSRAM_SIZE, 561 TZSRAM_SIZE); 562 assert(mm); 563 tee_pager_set_alias_area(mm); 564 565 /* 566 * Claim virtual memory which isn't paged. 567 * Linear memory (flat map core memory) ends there. 568 */ 569 mm = tee_mm_alloc2(&tee_mm_vcore, VCORE_UNPG_RX_PA, 570 (vaddr_t)(__pageable_start - VCORE_UNPG_RX_PA)); 571 assert(mm); 572 573 /* 574 * Allocate virtual memory for the pageable area and let the pager 575 * take charge of all the pages already assigned to that memory. 576 */ 577 mm = tee_mm_alloc2(&tee_mm_vcore, (vaddr_t)__pageable_start, 578 pageable_size); 579 assert(mm); 580 fobj = ro_paged_alloc(mm, hashes, paged_store); 581 assert(fobj); 582 tee_pager_add_core_region(tee_mm_get_smem(mm), PAGED_REGION_TYPE_RO, 583 fobj); 584 fobj_put(fobj); 585 586 tee_pager_add_pages(pageable_start, init_size / SMALL_PAGE_SIZE, false); 587 tee_pager_add_pages(pageable_start + init_size, 588 (pageable_size - init_size) / SMALL_PAGE_SIZE, 589 true); 590 if (pageable_end < tzsram_end) 591 tee_pager_add_pages(pageable_end, (tzsram_end - pageable_end) / 592 SMALL_PAGE_SIZE, true); 593 594 /* 595 * There may be physical pages in TZSRAM before the core load address. 596 * These pages can be added to the physical pages pool of the pager. 597 * This setup may happen when a the secure bootloader runs in TZRAM 598 * and its memory can be reused by OP-TEE once boot stages complete. 599 */ 600 tee_pager_add_pages(tee_mm_vcore.lo, 601 (VCORE_UNPG_RX_PA - tee_mm_vcore.lo) / SMALL_PAGE_SIZE, 602 true); 603 604 print_pager_pool_size(); 605 } 606 #else 607 608 static void init_runtime(unsigned long pageable_part __unused) 609 { 610 init_asan(); 611 612 /* 613 * By default whole OP-TEE uses malloc, so we need to initialize 614 * it early. But, when virtualization is enabled, malloc is used 615 * only by TEE runtime, so malloc should be initialized later, for 616 * every virtual partition separately. Core code uses nex_malloc 617 * instead. 618 */ 619 #ifdef CFG_NS_VIRTUALIZATION 620 nex_malloc_add_pool(__nex_heap_start, __nex_heap_end - 621 __nex_heap_start); 622 #else 623 malloc_add_pool(__heap1_start, __heap1_end - __heap1_start); 624 #endif 625 626 IMSG_RAW("\n"); 627 } 628 #endif 629 630 #if defined(CFG_DT) 631 static int add_optee_dt_node(struct dt_descriptor *dt) 632 { 633 int offs; 634 int ret; 635 636 if (fdt_path_offset(dt->blob, "/firmware/optee") >= 0) { 637 DMSG("OP-TEE Device Tree node already exists!"); 638 return 0; 639 } 640 641 offs = fdt_path_offset(dt->blob, "/firmware"); 642 if (offs < 0) { 643 offs = add_dt_path_subnode(dt, "/", "firmware"); 644 if (offs < 0) 645 return -1; 646 } 647 648 offs = fdt_add_subnode(dt->blob, offs, "optee"); 649 if (offs < 0) 650 return -1; 651 652 ret = fdt_setprop_string(dt->blob, offs, "compatible", 653 "linaro,optee-tz"); 654 if (ret < 0) 655 return -1; 656 ret = fdt_setprop_string(dt->blob, offs, "method", "smc"); 657 if (ret < 0) 658 return -1; 659 660 if (CFG_CORE_ASYNC_NOTIF_GIC_INTID) { 661 /* 662 * The format of the interrupt property is defined by the 663 * binding of the interrupt domain root. In this case it's 664 * one Arm GIC v1, v2 or v3 so we must be compatible with 665 * these. 666 * 667 * An SPI type of interrupt is indicated with a 0 in the 668 * first cell. A PPI type is indicated with value 1. 669 * 670 * The interrupt number goes in the second cell where 671 * SPIs ranges from 0 to 987 and PPI ranges from 0 to 15. 672 * 673 * Flags are passed in the third cells. 674 */ 675 uint32_t itr_trigger = 0; 676 uint32_t itr_type = 0; 677 uint32_t itr_id = 0; 678 uint32_t val[3] = { }; 679 680 /* PPI are visible only in current CPU cluster */ 681 static_assert(IS_ENABLED(CFG_CORE_FFA) || 682 !CFG_CORE_ASYNC_NOTIF_GIC_INTID || 683 (CFG_CORE_ASYNC_NOTIF_GIC_INTID >= 684 GIC_SPI_BASE) || 685 ((CFG_TEE_CORE_NB_CORE <= 8) && 686 (CFG_CORE_ASYNC_NOTIF_GIC_INTID >= 687 GIC_PPI_BASE))); 688 689 if (CFG_CORE_ASYNC_NOTIF_GIC_INTID >= GIC_SPI_BASE) { 690 itr_type = GIC_SPI; 691 itr_id = CFG_CORE_ASYNC_NOTIF_GIC_INTID - GIC_SPI_BASE; 692 itr_trigger = IRQ_TYPE_EDGE_RISING; 693 } else { 694 itr_type = GIC_PPI; 695 itr_id = CFG_CORE_ASYNC_NOTIF_GIC_INTID - GIC_PPI_BASE; 696 itr_trigger = IRQ_TYPE_EDGE_RISING | 697 GIC_CPU_MASK_SIMPLE(CFG_TEE_CORE_NB_CORE); 698 } 699 700 val[0] = TEE_U32_TO_BIG_ENDIAN(itr_type); 701 val[1] = TEE_U32_TO_BIG_ENDIAN(itr_id); 702 val[2] = TEE_U32_TO_BIG_ENDIAN(itr_trigger); 703 704 ret = fdt_setprop(dt->blob, offs, "interrupts", val, 705 sizeof(val)); 706 if (ret < 0) 707 return -1; 708 } 709 return 0; 710 } 711 712 #ifdef CFG_PSCI_ARM32 713 static int append_psci_compatible(void *fdt, int offs, const char *str) 714 { 715 return fdt_appendprop(fdt, offs, "compatible", str, strlen(str) + 1); 716 } 717 718 static int dt_add_psci_node(struct dt_descriptor *dt) 719 { 720 int offs; 721 722 if (fdt_path_offset(dt->blob, "/psci") >= 0) { 723 DMSG("PSCI Device Tree node already exists!"); 724 return 0; 725 } 726 727 offs = add_dt_path_subnode(dt, "/", "psci"); 728 if (offs < 0) 729 return -1; 730 if (append_psci_compatible(dt->blob, offs, "arm,psci-1.0")) 731 return -1; 732 if (append_psci_compatible(dt->blob, offs, "arm,psci-0.2")) 733 return -1; 734 if (append_psci_compatible(dt->blob, offs, "arm,psci")) 735 return -1; 736 if (fdt_setprop_string(dt->blob, offs, "method", "smc")) 737 return -1; 738 if (fdt_setprop_u32(dt->blob, offs, "cpu_suspend", PSCI_CPU_SUSPEND)) 739 return -1; 740 if (fdt_setprop_u32(dt->blob, offs, "cpu_off", PSCI_CPU_OFF)) 741 return -1; 742 if (fdt_setprop_u32(dt->blob, offs, "cpu_on", PSCI_CPU_ON)) 743 return -1; 744 if (fdt_setprop_u32(dt->blob, offs, "sys_poweroff", PSCI_SYSTEM_OFF)) 745 return -1; 746 if (fdt_setprop_u32(dt->blob, offs, "sys_reset", PSCI_SYSTEM_RESET)) 747 return -1; 748 return 0; 749 } 750 751 static int check_node_compat_prefix(struct dt_descriptor *dt, int offs, 752 const char *prefix) 753 { 754 const size_t prefix_len = strlen(prefix); 755 size_t l; 756 int plen; 757 const char *prop; 758 759 prop = fdt_getprop(dt->blob, offs, "compatible", &plen); 760 if (!prop) 761 return -1; 762 763 while (plen > 0) { 764 if (memcmp(prop, prefix, prefix_len) == 0) 765 return 0; /* match */ 766 767 l = strlen(prop) + 1; 768 prop += l; 769 plen -= l; 770 } 771 772 return -1; 773 } 774 775 static int dt_add_psci_cpu_enable_methods(struct dt_descriptor *dt) 776 { 777 int offs = 0; 778 779 while (1) { 780 offs = fdt_next_node(dt->blob, offs, NULL); 781 if (offs < 0) 782 break; 783 if (fdt_getprop(dt->blob, offs, "enable-method", NULL)) 784 continue; /* already set */ 785 if (check_node_compat_prefix(dt, offs, "arm,cortex-a")) 786 continue; /* no compatible */ 787 if (fdt_setprop_string(dt->blob, offs, "enable-method", "psci")) 788 return -1; 789 /* Need to restart scanning as offsets may have changed */ 790 offs = 0; 791 } 792 return 0; 793 } 794 795 static int config_psci(struct dt_descriptor *dt) 796 { 797 if (dt_add_psci_node(dt)) 798 return -1; 799 return dt_add_psci_cpu_enable_methods(dt); 800 } 801 #else 802 static int config_psci(struct dt_descriptor *dt __unused) 803 { 804 return 0; 805 } 806 #endif /*CFG_PSCI_ARM32*/ 807 808 #ifdef CFG_CORE_DYN_SHM 809 static uint64_t get_dt_val_and_advance(const void *data, size_t *offs, 810 uint32_t cell_size) 811 { 812 uint64_t rv = 0; 813 814 if (cell_size == 1) { 815 uint32_t v; 816 817 memcpy(&v, (const uint8_t *)data + *offs, sizeof(v)); 818 *offs += sizeof(v); 819 rv = fdt32_to_cpu(v); 820 } else { 821 uint64_t v; 822 823 memcpy(&v, (const uint8_t *)data + *offs, sizeof(v)); 824 *offs += sizeof(v); 825 rv = fdt64_to_cpu(v); 826 } 827 828 return rv; 829 } 830 831 /* 832 * Find all non-secure memory from DT. Memory marked inaccessible by Secure 833 * World is ignored since it could not be mapped to be used as dynamic shared 834 * memory. 835 */ 836 static int get_nsec_memory_helper(void *fdt, struct core_mmu_phys_mem *mem) 837 { 838 const uint8_t *prop = NULL; 839 uint64_t a = 0; 840 uint64_t l = 0; 841 size_t prop_offs = 0; 842 size_t prop_len = 0; 843 int elems_total = 0; 844 int addr_size = 0; 845 int len_size = 0; 846 int offs = 0; 847 size_t n = 0; 848 int len = 0; 849 850 addr_size = fdt_address_cells(fdt, 0); 851 if (addr_size < 0) 852 return 0; 853 854 len_size = fdt_size_cells(fdt, 0); 855 if (len_size < 0) 856 return 0; 857 858 while (true) { 859 offs = fdt_node_offset_by_prop_value(fdt, offs, "device_type", 860 "memory", 861 sizeof("memory")); 862 if (offs < 0) 863 break; 864 865 if (fdt_get_status(fdt, offs) != (DT_STATUS_OK_NSEC | 866 DT_STATUS_OK_SEC)) 867 continue; 868 869 prop = fdt_getprop(fdt, offs, "reg", &len); 870 if (!prop) 871 continue; 872 873 prop_len = len; 874 for (n = 0, prop_offs = 0; prop_offs < prop_len; n++) { 875 a = get_dt_val_and_advance(prop, &prop_offs, addr_size); 876 if (prop_offs >= prop_len) { 877 n--; 878 break; 879 } 880 881 l = get_dt_val_and_advance(prop, &prop_offs, len_size); 882 if (mem) { 883 mem->type = MEM_AREA_DDR_OVERALL; 884 mem->addr = a; 885 mem->size = l; 886 mem++; 887 } 888 } 889 890 elems_total += n; 891 } 892 893 return elems_total; 894 } 895 896 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt, size_t *nelems) 897 { 898 struct core_mmu_phys_mem *mem = NULL; 899 int elems_total = 0; 900 901 elems_total = get_nsec_memory_helper(fdt, NULL); 902 if (elems_total <= 0) 903 return NULL; 904 905 mem = nex_calloc(elems_total, sizeof(*mem)); 906 if (!mem) 907 panic(); 908 909 elems_total = get_nsec_memory_helper(fdt, mem); 910 assert(elems_total > 0); 911 912 *nelems = elems_total; 913 914 return mem; 915 } 916 #endif /*CFG_CORE_DYN_SHM*/ 917 918 #ifdef CFG_CORE_RESERVED_SHM 919 static int mark_static_shm_as_reserved(struct dt_descriptor *dt) 920 { 921 vaddr_t shm_start; 922 vaddr_t shm_end; 923 924 core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &shm_start, &shm_end); 925 if (shm_start != shm_end) 926 return add_res_mem_dt_node(dt, "optee_shm", 927 virt_to_phys((void *)shm_start), 928 shm_end - shm_start); 929 930 DMSG("No SHM configured"); 931 return -1; 932 } 933 #endif /*CFG_CORE_RESERVED_SHM*/ 934 935 static int mark_tzdram_as_reserved(struct dt_descriptor *dt) 936 { 937 return add_res_mem_dt_node(dt, "optee_core", CFG_TZDRAM_START, 938 CFG_TZDRAM_SIZE); 939 } 940 941 static void update_external_dt(void) 942 { 943 struct dt_descriptor *dt = get_external_dt_desc(); 944 945 if (!dt || !dt->blob) 946 return; 947 948 if (!IS_ENABLED(CFG_CORE_FFA) && add_optee_dt_node(dt)) 949 panic("Failed to add OP-TEE Device Tree node"); 950 951 if (config_psci(dt)) 952 panic("Failed to config PSCI"); 953 954 #ifdef CFG_CORE_RESERVED_SHM 955 if (mark_static_shm_as_reserved(dt)) 956 panic("Failed to config non-secure memory"); 957 #endif 958 959 if (mark_tzdram_as_reserved(dt)) 960 panic("Failed to config secure memory"); 961 } 962 #else /*CFG_DT*/ 963 static void update_external_dt(void) 964 { 965 } 966 967 #ifdef CFG_CORE_DYN_SHM 968 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt __unused, 969 size_t *nelems __unused) 970 { 971 return NULL; 972 } 973 #endif /*CFG_CORE_DYN_SHM*/ 974 #endif /*!CFG_DT*/ 975 976 #if defined(CFG_CORE_FFA) 977 void *get_manifest_dt(void) 978 { 979 return manifest_dt; 980 } 981 982 static void reinit_manifest_dt(void) 983 { 984 paddr_t pa = (unsigned long)manifest_dt; 985 void *fdt = NULL; 986 int ret = 0; 987 988 if (!pa) { 989 EMSG("No manifest DT found"); 990 return; 991 } 992 993 fdt = core_mmu_add_mapping(MEM_AREA_MANIFEST_DT, pa, CFG_DTB_MAX_SIZE); 994 if (!fdt) 995 panic("Failed to map manifest DT"); 996 997 manifest_dt = fdt; 998 999 ret = fdt_check_full(fdt, CFG_DTB_MAX_SIZE); 1000 if (ret < 0) { 1001 EMSG("Invalid manifest Device Tree at %#lx: error %d", pa, ret); 1002 panic(); 1003 } 1004 1005 IMSG("manifest DT found"); 1006 } 1007 1008 static TEE_Result release_manifest_dt(void) 1009 { 1010 if (!manifest_dt) 1011 return TEE_SUCCESS; 1012 1013 if (core_mmu_remove_mapping(MEM_AREA_MANIFEST_DT, manifest_dt, 1014 CFG_DTB_MAX_SIZE)) 1015 panic("Failed to remove temporary manifest DT mapping"); 1016 manifest_dt = NULL; 1017 1018 return TEE_SUCCESS; 1019 } 1020 1021 boot_final(release_manifest_dt); 1022 #else 1023 void *get_manifest_dt(void) 1024 { 1025 return NULL; 1026 } 1027 1028 static void reinit_manifest_dt(void) 1029 { 1030 } 1031 #endif /*CFG_CORE_FFA*/ 1032 1033 #ifdef CFG_CORE_DYN_SHM 1034 static void discover_nsec_memory(void) 1035 { 1036 struct core_mmu_phys_mem *mem; 1037 const struct core_mmu_phys_mem *mem_begin = NULL; 1038 const struct core_mmu_phys_mem *mem_end = NULL; 1039 size_t nelems; 1040 void *fdt = get_external_dt(); 1041 1042 if (fdt) { 1043 mem = get_nsec_memory(fdt, &nelems); 1044 if (mem) { 1045 core_mmu_set_discovered_nsec_ddr(mem, nelems); 1046 return; 1047 } 1048 1049 DMSG("No non-secure memory found in external DT"); 1050 } 1051 1052 fdt = get_embedded_dt(); 1053 if (fdt) { 1054 mem = get_nsec_memory(fdt, &nelems); 1055 if (mem) { 1056 core_mmu_set_discovered_nsec_ddr(mem, nelems); 1057 return; 1058 } 1059 1060 DMSG("No non-secure memory found in embedded DT"); 1061 } 1062 1063 mem_begin = phys_ddr_overall_begin; 1064 mem_end = phys_ddr_overall_end; 1065 nelems = mem_end - mem_begin; 1066 if (nelems) { 1067 /* 1068 * Platform cannot use both register_ddr() and the now 1069 * deprecated register_dynamic_shm(). 1070 */ 1071 assert(phys_ddr_overall_compat_begin == 1072 phys_ddr_overall_compat_end); 1073 } else { 1074 mem_begin = phys_ddr_overall_compat_begin; 1075 mem_end = phys_ddr_overall_compat_end; 1076 nelems = mem_end - mem_begin; 1077 if (!nelems) 1078 return; 1079 DMSG("Warning register_dynamic_shm() is deprecated, please use register_ddr() instead"); 1080 } 1081 1082 mem = nex_calloc(nelems, sizeof(*mem)); 1083 if (!mem) 1084 panic(); 1085 1086 memcpy(mem, phys_ddr_overall_begin, sizeof(*mem) * nelems); 1087 core_mmu_set_discovered_nsec_ddr(mem, nelems); 1088 } 1089 #else /*CFG_CORE_DYN_SHM*/ 1090 static void discover_nsec_memory(void) 1091 { 1092 } 1093 #endif /*!CFG_CORE_DYN_SHM*/ 1094 1095 #ifdef CFG_NS_VIRTUALIZATION 1096 static TEE_Result virt_init_heap(void) 1097 { 1098 /* We need to initialize pool for every virtual guest partition */ 1099 malloc_add_pool(__heap1_start, __heap1_end - __heap1_start); 1100 1101 return TEE_SUCCESS; 1102 } 1103 preinit_early(virt_init_heap); 1104 #endif 1105 1106 void init_tee_runtime(void) 1107 { 1108 #ifndef CFG_WITH_PAGER 1109 /* Pager initializes TA RAM early */ 1110 core_mmu_init_ta_ram(); 1111 #endif 1112 /* 1113 * With virtualization we call this function when creating the 1114 * OP-TEE partition instead. 1115 */ 1116 if (!IS_ENABLED(CFG_NS_VIRTUALIZATION)) 1117 call_preinitcalls(); 1118 call_initcalls(); 1119 1120 /* 1121 * These two functions uses crypto_rng_read() to initialize the 1122 * pauth keys. Once call_initcalls() returns we're guaranteed that 1123 * crypto_rng_read() is ready to be used. 1124 */ 1125 thread_init_core_local_pauth_keys(); 1126 thread_init_thread_pauth_keys(); 1127 1128 /* 1129 * Reinitialize canaries around the stacks with crypto_rng_read(). 1130 * 1131 * TODO: Updating canaries when CFG_NS_VIRTUALIZATION is enabled will 1132 * require synchronization between thread_check_canaries() and 1133 * thread_update_canaries(). 1134 */ 1135 if (!IS_ENABLED(CFG_NS_VIRTUALIZATION)) 1136 thread_update_canaries(); 1137 } 1138 1139 static void init_primary(unsigned long pageable_part, unsigned long nsec_entry) 1140 { 1141 thread_init_core_local_stacks(); 1142 /* 1143 * Mask asynchronous exceptions before switch to the thread vector 1144 * as the thread handler requires those to be masked while 1145 * executing with the temporary stack. The thread subsystem also 1146 * asserts that the foreign interrupts are blocked when using most of 1147 * its functions. 1148 */ 1149 thread_set_exceptions(THREAD_EXCP_ALL); 1150 primary_save_cntfrq(); 1151 init_vfp_sec(); 1152 /* 1153 * Pager: init_runtime() calls thread_kernel_enable_vfp() so we must 1154 * set a current thread right now to avoid a chicken-and-egg problem 1155 * (thread_init_boot_thread() sets the current thread but needs 1156 * things set by init_runtime()). 1157 */ 1158 thread_get_core_local()->curr_thread = 0; 1159 init_runtime(pageable_part); 1160 1161 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 1162 /* 1163 * Virtualization: We can't initialize threads right now because 1164 * threads belong to "tee" part and will be initialized 1165 * separately per each new virtual guest. So, we'll clear 1166 * "curr_thread" and call it done. 1167 */ 1168 thread_get_core_local()->curr_thread = -1; 1169 } else { 1170 thread_init_boot_thread(); 1171 } 1172 thread_init_primary(); 1173 thread_init_per_cpu(); 1174 init_sec_mon(nsec_entry); 1175 } 1176 1177 static bool cpu_nmfi_enabled(void) 1178 { 1179 #if defined(ARM32) 1180 return read_sctlr() & SCTLR_NMFI; 1181 #else 1182 /* Note: ARM64 does not feature non-maskable FIQ support. */ 1183 return false; 1184 #endif 1185 } 1186 1187 /* 1188 * Note: this function is weak just to make it possible to exclude it from 1189 * the unpaged area. 1190 */ 1191 void __weak boot_init_primary_late(unsigned long fdt __unused, 1192 unsigned long manifest __unused) 1193 { 1194 size_t fdt_size = CFG_DTB_MAX_SIZE; 1195 1196 if (IS_ENABLED(CFG_TRANSFER_LIST) && mapped_tl) { 1197 struct transfer_list_entry *tl_e = NULL; 1198 1199 tl_e = transfer_list_find(mapped_tl, TL_TAG_FDT); 1200 if (tl_e) 1201 fdt_size = tl_e->data_size; 1202 } 1203 1204 init_external_dt(boot_arg_fdt, fdt_size); 1205 reinit_manifest_dt(); 1206 #ifdef CFG_CORE_SEL1_SPMC 1207 tpm_map_log_area(get_manifest_dt()); 1208 #else 1209 tpm_map_log_area(get_external_dt()); 1210 #endif 1211 discover_nsec_memory(); 1212 update_external_dt(); 1213 configure_console_from_dt(); 1214 1215 IMSG("OP-TEE version: %s", core_v_str); 1216 if (IS_ENABLED(CFG_INSECURE)) { 1217 IMSG("WARNING: This OP-TEE configuration might be insecure!"); 1218 IMSG("WARNING: Please check https://optee.readthedocs.io/en/latest/architecture/porting_guidelines.html"); 1219 } 1220 IMSG("Primary CPU initializing"); 1221 #ifdef CFG_CORE_ASLR 1222 DMSG("Executing at offset %#lx with virtual load address %#"PRIxVA, 1223 (unsigned long)boot_mmu_config.map_offset, VCORE_START_VA); 1224 #endif 1225 if (IS_ENABLED(CFG_MEMTAG)) 1226 DMSG("Memory tagging %s", 1227 memtag_is_enabled() ? "enabled" : "disabled"); 1228 1229 /* Check if platform needs NMFI workaround */ 1230 if (cpu_nmfi_enabled()) { 1231 if (!IS_ENABLED(CFG_CORE_WORKAROUND_ARM_NMFI)) 1232 IMSG("WARNING: This ARM core has NMFI enabled, please apply workaround!"); 1233 } else { 1234 if (IS_ENABLED(CFG_CORE_WORKAROUND_ARM_NMFI)) 1235 IMSG("WARNING: This ARM core does not have NMFI enabled, no need for workaround"); 1236 } 1237 1238 boot_primary_init_intc(); 1239 init_vfp_nsec(); 1240 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 1241 IMSG("Initializing virtualization support"); 1242 core_mmu_init_virtualization(); 1243 } else { 1244 init_tee_runtime(); 1245 } 1246 call_finalcalls(); 1247 IMSG("Primary CPU switching to normal world boot"); 1248 } 1249 1250 static void init_secondary_helper(unsigned long nsec_entry) 1251 { 1252 IMSG("Secondary CPU %zu initializing", get_core_pos()); 1253 1254 /* 1255 * Mask asynchronous exceptions before switch to the thread vector 1256 * as the thread handler requires those to be masked while 1257 * executing with the temporary stack. The thread subsystem also 1258 * asserts that the foreign interrupts are blocked when using most of 1259 * its functions. 1260 */ 1261 thread_set_exceptions(THREAD_EXCP_ALL); 1262 1263 secondary_init_cntfrq(); 1264 thread_init_per_cpu(); 1265 init_sec_mon(nsec_entry); 1266 boot_secondary_init_intc(); 1267 init_vfp_sec(); 1268 init_vfp_nsec(); 1269 1270 IMSG("Secondary CPU %zu switching to normal world boot", get_core_pos()); 1271 } 1272 1273 /* 1274 * Note: this function is weak just to make it possible to exclude it from 1275 * the unpaged area so that it lies in the init area. 1276 */ 1277 void __weak boot_init_primary_early(void) 1278 { 1279 unsigned long pageable_part = 0; 1280 unsigned long e = PADDR_INVALID; 1281 struct transfer_list_entry *tl_e = NULL; 1282 1283 if (!IS_ENABLED(CFG_WITH_ARM_TRUSTED_FW)) 1284 e = boot_arg_nsec_entry; 1285 1286 if (IS_ENABLED(CFG_TRANSFER_LIST) && boot_arg_transfer_list) { 1287 /* map and save the TL */ 1288 mapped_tl = transfer_list_map(boot_arg_transfer_list); 1289 if (!mapped_tl) 1290 panic("Failed to map transfer list"); 1291 1292 transfer_list_dump(mapped_tl); 1293 tl_e = transfer_list_find(mapped_tl, TL_TAG_FDT); 1294 if (tl_e) { 1295 /* 1296 * Expand the data size of the DTB entry to the maximum 1297 * allocable mapped memory to reserve sufficient space 1298 * for inserting new nodes, avoid potentially corrupting 1299 * next entries. 1300 */ 1301 uint32_t dtb_max_sz = mapped_tl->max_size - 1302 mapped_tl->size + tl_e->data_size; 1303 1304 if (!transfer_list_set_data_size(mapped_tl, tl_e, 1305 dtb_max_sz)) { 1306 EMSG("Failed to extend DTB size to %#"PRIx32, 1307 dtb_max_sz); 1308 panic(); 1309 } 1310 } 1311 tl_e = transfer_list_find(mapped_tl, TL_TAG_OPTEE_PAGABLE_PART); 1312 } 1313 1314 if (IS_ENABLED(CFG_WITH_PAGER)) { 1315 if (IS_ENABLED(CFG_TRANSFER_LIST) && tl_e) 1316 pageable_part = 1317 get_le64(transfer_list_entry_data(tl_e)); 1318 else 1319 pageable_part = boot_arg_pageable_part; 1320 } 1321 1322 init_primary(pageable_part, e); 1323 } 1324 1325 static void boot_save_transfer_list(unsigned long zero_reg, 1326 unsigned long transfer_list, 1327 unsigned long fdt) 1328 { 1329 struct transfer_list_header *tl = (void *)transfer_list; 1330 struct transfer_list_entry *tl_e = NULL; 1331 1332 if (zero_reg != 0) 1333 panic("Incorrect transfer list register convention"); 1334 1335 if (!IS_ALIGNED_WITH_TYPE(transfer_list, struct transfer_list_header) || 1336 !IS_ALIGNED(transfer_list, TL_ALIGNMENT_FROM_ORDER(tl->alignment))) 1337 panic("Transfer list base address is not aligned"); 1338 1339 if (transfer_list_check_header(tl) == TL_OPS_NONE) 1340 panic("Invalid transfer list"); 1341 1342 tl_e = transfer_list_find(tl, TL_TAG_FDT); 1343 if (fdt != (unsigned long)transfer_list_entry_data(tl_e)) 1344 panic("DT does not match to the DT entry of the TL"); 1345 1346 boot_arg_transfer_list = transfer_list; 1347 } 1348 1349 #if defined(CFG_WITH_ARM_TRUSTED_FW) 1350 unsigned long boot_cpu_on_handler(unsigned long a0 __maybe_unused, 1351 unsigned long a1 __unused) 1352 { 1353 init_secondary_helper(PADDR_INVALID); 1354 return 0; 1355 } 1356 #else 1357 void boot_init_secondary(unsigned long nsec_entry) 1358 { 1359 init_secondary_helper(nsec_entry); 1360 } 1361 #endif 1362 1363 #if defined(CFG_BOOT_SECONDARY_REQUEST) 1364 void boot_set_core_ns_entry(size_t core_idx, uintptr_t entry, 1365 uintptr_t context_id) 1366 { 1367 ns_entry_contexts[core_idx].entry_point = entry; 1368 ns_entry_contexts[core_idx].context_id = context_id; 1369 dsb_ishst(); 1370 } 1371 1372 int boot_core_release(size_t core_idx, paddr_t entry) 1373 { 1374 if (!core_idx || core_idx >= CFG_TEE_CORE_NB_CORE) 1375 return -1; 1376 1377 ns_entry_contexts[core_idx].entry_point = entry; 1378 dmb(); 1379 spin_table[core_idx] = 1; 1380 dsb(); 1381 sev(); 1382 1383 return 0; 1384 } 1385 1386 /* 1387 * spin until secondary boot request, then returns with 1388 * the secondary core entry address. 1389 */ 1390 struct ns_entry_context *boot_core_hpen(void) 1391 { 1392 #ifdef CFG_PSCI_ARM32 1393 return &ns_entry_contexts[get_core_pos()]; 1394 #else 1395 do { 1396 wfe(); 1397 } while (!spin_table[get_core_pos()]); 1398 dmb(); 1399 return &ns_entry_contexts[get_core_pos()]; 1400 #endif 1401 } 1402 #endif 1403 1404 #if defined(CFG_CORE_ASLR) 1405 #if defined(CFG_DT) 1406 unsigned long __weak get_aslr_seed(void) 1407 { 1408 void *fdt = NULL; 1409 int rc = 0; 1410 const uint64_t *seed = NULL; 1411 int offs = 0; 1412 int len = 0; 1413 1414 if (!IS_ENABLED(CFG_CORE_SEL2_SPMC)) 1415 fdt = (void *)boot_arg_fdt; 1416 1417 if (!fdt) { 1418 DMSG("No fdt"); 1419 goto err; 1420 } 1421 1422 rc = fdt_check_header(fdt); 1423 if (rc) { 1424 DMSG("Bad fdt: %d", rc); 1425 goto err; 1426 } 1427 1428 offs = fdt_path_offset(fdt, "/secure-chosen"); 1429 if (offs < 0) { 1430 DMSG("Cannot find /secure-chosen"); 1431 goto err; 1432 } 1433 seed = fdt_getprop(fdt, offs, "kaslr-seed", &len); 1434 if (!seed || len != sizeof(*seed)) { 1435 DMSG("Cannot find valid kaslr-seed"); 1436 goto err; 1437 } 1438 1439 return fdt64_to_cpu(*seed); 1440 1441 err: 1442 /* Try platform implementation */ 1443 return plat_get_aslr_seed(); 1444 } 1445 #else /*!CFG_DT*/ 1446 unsigned long __weak get_aslr_seed(void) 1447 { 1448 /* Try platform implementation */ 1449 return plat_get_aslr_seed(); 1450 } 1451 #endif /*!CFG_DT*/ 1452 #endif /*CFG_CORE_ASLR*/ 1453 1454 static void *get_fdt_from_boot_info(struct ffa_boot_info_header_1_1 *hdr) 1455 { 1456 struct ffa_boot_info_1_1 *desc = NULL; 1457 uint8_t content_fmt = 0; 1458 uint8_t name_fmt = 0; 1459 void *fdt = NULL; 1460 int ret = 0; 1461 1462 if (hdr->signature != FFA_BOOT_INFO_SIGNATURE) { 1463 EMSG("Bad boot info signature %#"PRIx32, hdr->signature); 1464 panic(); 1465 } 1466 if (hdr->version != FFA_BOOT_INFO_VERSION) { 1467 EMSG("Bad boot info version %#"PRIx32, hdr->version); 1468 panic(); 1469 } 1470 if (hdr->desc_count != 1) { 1471 EMSG("Bad boot info descriptor count %#"PRIx32, 1472 hdr->desc_count); 1473 panic(); 1474 } 1475 desc = (void *)((vaddr_t)hdr + hdr->desc_offset); 1476 name_fmt = desc->flags & FFA_BOOT_INFO_FLAG_NAME_FORMAT_MASK; 1477 if (name_fmt == FFA_BOOT_INFO_FLAG_NAME_FORMAT_STRING) 1478 DMSG("Boot info descriptor name \"%16s\"", desc->name); 1479 else if (name_fmt == FFA_BOOT_INFO_FLAG_NAME_FORMAT_UUID) 1480 DMSG("Boot info descriptor UUID %pUl", (void *)desc->name); 1481 else 1482 DMSG("Boot info descriptor: unknown name format %"PRIu8, 1483 name_fmt); 1484 1485 content_fmt = (desc->flags & FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_MASK) >> 1486 FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_SHIFT; 1487 if (content_fmt != FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_ADDR) { 1488 EMSG("Bad boot info content format %"PRIu8", expected %u (address)", 1489 content_fmt, FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_ADDR); 1490 panic(); 1491 } 1492 1493 fdt = (void *)(vaddr_t)desc->contents; 1494 ret = fdt_check_full(fdt, desc->size); 1495 if (ret < 0) { 1496 EMSG("Invalid Device Tree at %p: error %d", fdt, ret); 1497 panic(); 1498 } 1499 return fdt; 1500 } 1501 1502 static void get_sec_mem_from_manifest(void *fdt, paddr_t *base, size_t *size) 1503 { 1504 int ret = 0; 1505 uint64_t num = 0; 1506 1507 ret = fdt_node_check_compatible(fdt, 0, "arm,ffa-manifest-1.0"); 1508 if (ret < 0) { 1509 EMSG("Invalid FF-A manifest at %p: error %d", fdt, ret); 1510 panic(); 1511 } 1512 ret = dt_getprop_as_number(fdt, 0, "load-address", &num); 1513 if (ret < 0) { 1514 EMSG("Can't read \"load-address\" from FF-A manifest at %p: error %d", 1515 fdt, ret); 1516 panic(); 1517 } 1518 *base = num; 1519 /* "mem-size" is currently an undocumented extension to the spec. */ 1520 ret = dt_getprop_as_number(fdt, 0, "mem-size", &num); 1521 if (ret < 0) { 1522 EMSG("Can't read \"mem-size\" from FF-A manifest at %p: error %d", 1523 fdt, ret); 1524 panic(); 1525 } 1526 *size = num; 1527 } 1528 1529 void __weak boot_save_args(unsigned long a0, unsigned long a1, 1530 unsigned long a2, unsigned long a3, 1531 unsigned long a4 __maybe_unused) 1532 { 1533 /* 1534 * Register use: 1535 * 1536 * Scenario A: Default arguments 1537 * a0 - CFG_CORE_FFA=y && CFG_CORE_SEL2_SPMC=n: 1538 * if non-NULL holds the TOS FW config [1] address 1539 * - CFG_CORE_FFA=y && 1540 (CFG_CORE_SEL2_SPMC=y || CFG_CORE_EL3_SPMC=y): 1541 * address of FF-A Boot Information Blob 1542 * - CFG_CORE_FFA=n: 1543 * if non-NULL holds the pagable part address 1544 * a1 - CFG_WITH_ARM_TRUSTED_FW=n (Armv7): 1545 * Armv7 standard bootarg #1 (kept track of in entry_a32.S) 1546 * a2 - CFG_CORE_SEL2_SPMC=n: 1547 * if non-NULL holds the system DTB address 1548 * - CFG_WITH_ARM_TRUSTED_FW=n (Armv7): 1549 * Armv7 standard bootarg #2 (system DTB address, kept track 1550 * of in entry_a32.S) 1551 * a3 - Not used 1552 * a4 - CFG_WITH_ARM_TRUSTED_FW=n: 1553 * Non-secure entry address 1554 * 1555 * [1] A TF-A concept: TOS_FW_CONFIG - Trusted OS Firmware 1556 * configuration file. Used by Trusted OS (BL32), that is, OP-TEE 1557 * here. This is also called Manifest DT, related to the Manifest DT 1558 * passed in the FF-A Boot Information Blob, but with a different 1559 * compatible string. 1560 1561 * Scenario B: FW Handoff via Transfer List 1562 * Note: FF-A and non-secure entry are not yet supported with 1563 * Transfer List 1564 * a0 - DTB address or 0 (AArch64) 1565 * - must be 0 (AArch32) 1566 * a1 - TRANSFER_LIST_SIGNATURE | REG_CONVENTION_VER_MASK 1567 * a2 - must be 0 (AArch64) 1568 * - DTB address or 0 (AArch32) 1569 * a3 - Transfer list base address 1570 * a4 - Not used 1571 */ 1572 1573 if (IS_ENABLED(CFG_TRANSFER_LIST) && 1574 a1 == (TRANSFER_LIST_SIGNATURE | REG_CONVENTION_VER_MASK)) { 1575 if (IS_ENABLED(CFG_ARM64_core)) { 1576 boot_save_transfer_list(a2, a3, a0); 1577 boot_arg_fdt = a0; 1578 } else { 1579 boot_save_transfer_list(a0, a3, a2); 1580 boot_arg_fdt = a2; 1581 } 1582 return; 1583 } 1584 1585 if (!IS_ENABLED(CFG_CORE_SEL2_SPMC)) { 1586 #if defined(CFG_DT_ADDR) 1587 boot_arg_fdt = CFG_DT_ADDR; 1588 #else 1589 boot_arg_fdt = a2; 1590 #endif 1591 } 1592 1593 if (IS_ENABLED(CFG_CORE_FFA)) { 1594 if (IS_ENABLED(CFG_CORE_SEL2_SPMC) || 1595 IS_ENABLED(CFG_CORE_EL3_SPMC)) 1596 manifest_dt = get_fdt_from_boot_info((void *)a0); 1597 else 1598 manifest_dt = (void *)a0; 1599 if (IS_ENABLED(CFG_CORE_SEL2_SPMC) && 1600 IS_ENABLED(CFG_CORE_PHYS_RELOCATABLE)) { 1601 paddr_t base = 0; 1602 size_t size = 0; 1603 1604 get_sec_mem_from_manifest(manifest_dt, &base, &size); 1605 core_mmu_set_secure_memory(base, size); 1606 } 1607 } else { 1608 if (IS_ENABLED(CFG_WITH_PAGER)) { 1609 #if defined(CFG_PAGEABLE_ADDR) 1610 boot_arg_pageable_part = CFG_PAGEABLE_ADDR; 1611 #else 1612 boot_arg_pageable_part = a0; 1613 #endif 1614 } 1615 if (!IS_ENABLED(CFG_WITH_ARM_TRUSTED_FW)) { 1616 #if defined(CFG_NS_ENTRY_ADDR) 1617 boot_arg_nsec_entry = CFG_NS_ENTRY_ADDR; 1618 #else 1619 boot_arg_nsec_entry = a4; 1620 #endif 1621 } 1622 } 1623 } 1624 1625 #if defined(CFG_TRANSFER_LIST) 1626 static TEE_Result release_transfer_list(void) 1627 { 1628 struct dt_descriptor *dt = get_external_dt_desc(); 1629 1630 if (!mapped_tl) 1631 return TEE_SUCCESS; 1632 1633 if (dt) { 1634 int ret = 0; 1635 struct transfer_list_entry *tl_e = NULL; 1636 1637 /* 1638 * Pack the DTB and update the transfer list before un-mapping 1639 */ 1640 ret = fdt_pack(dt->blob); 1641 if (ret < 0) { 1642 EMSG("Failed to pack Device Tree at 0x%" PRIxPA 1643 ": error %d", virt_to_phys(dt->blob), ret); 1644 panic(); 1645 } 1646 1647 tl_e = transfer_list_find(mapped_tl, TL_TAG_FDT); 1648 assert(dt->blob == transfer_list_entry_data(tl_e)); 1649 transfer_list_set_data_size(mapped_tl, tl_e, 1650 fdt_totalsize(dt->blob)); 1651 dt->blob = NULL; 1652 } 1653 1654 transfer_list_unmap_sync(mapped_tl); 1655 mapped_tl = NULL; 1656 1657 return TEE_SUCCESS; 1658 } 1659 1660 boot_final(release_transfer_list); 1661 #endif 1662