1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2015-2022, Linaro Limited 4 */ 5 6 #include <arm.h> 7 #include <assert.h> 8 #include <compiler.h> 9 #include <config.h> 10 #include <console.h> 11 #include <crypto/crypto.h> 12 #include <drivers/gic.h> 13 #include <initcall.h> 14 #include <inttypes.h> 15 #include <keep.h> 16 #include <kernel/asan.h> 17 #include <kernel/boot.h> 18 #include <kernel/linker.h> 19 #include <kernel/misc.h> 20 #include <kernel/panic.h> 21 #include <kernel/tee_misc.h> 22 #include <kernel/thread.h> 23 #include <kernel/tpm.h> 24 #include <libfdt.h> 25 #include <malloc.h> 26 #include <memtag.h> 27 #include <mm/core_memprot.h> 28 #include <mm/core_mmu.h> 29 #include <mm/fobj.h> 30 #include <mm/tee_mm.h> 31 #include <mm/tee_pager.h> 32 #include <sm/psci.h> 33 #include <stdio.h> 34 #include <trace.h> 35 #include <utee_defines.h> 36 #include <util.h> 37 38 #include <platform_config.h> 39 40 #if !defined(CFG_WITH_ARM_TRUSTED_FW) 41 #include <sm/sm.h> 42 #endif 43 44 #if defined(CFG_WITH_VFP) 45 #include <kernel/vfp.h> 46 #endif 47 48 /* 49 * In this file we're using unsigned long to represent physical pointers as 50 * they are received in a single register when OP-TEE is initially entered. 51 * This limits 32-bit systems to only use make use of the lower 32 bits 52 * of a physical address for initial parameters. 53 * 54 * 64-bit systems on the other hand can use full 64-bit physical pointers. 55 */ 56 #define PADDR_INVALID ULONG_MAX 57 58 #if defined(CFG_BOOT_SECONDARY_REQUEST) 59 struct ns_entry_context { 60 uintptr_t entry_point; 61 uintptr_t context_id; 62 }; 63 struct ns_entry_context ns_entry_contexts[CFG_TEE_CORE_NB_CORE]; 64 static uint32_t spin_table[CFG_TEE_CORE_NB_CORE]; 65 #endif 66 67 #ifdef CFG_BOOT_SYNC_CPU 68 /* 69 * Array used when booting, to synchronize cpu. 70 * When 0, the cpu has not started. 71 * When 1, it has started 72 */ 73 uint32_t sem_cpu_sync[CFG_TEE_CORE_NB_CORE]; 74 DECLARE_KEEP_PAGER(sem_cpu_sync); 75 #endif 76 77 #ifdef CFG_DT 78 struct dt_descriptor { 79 void *blob; 80 #ifdef _CFG_USE_DTB_OVERLAY 81 int frag_id; 82 #endif 83 }; 84 85 static struct dt_descriptor external_dt __nex_bss; 86 #endif 87 88 #ifdef CFG_SECONDARY_INIT_CNTFRQ 89 static uint32_t cntfrq; 90 #endif 91 92 /* May be overridden in plat-$(PLATFORM)/main.c */ 93 __weak void plat_primary_init_early(void) 94 { 95 } 96 DECLARE_KEEP_PAGER(plat_primary_init_early); 97 98 /* May be overridden in plat-$(PLATFORM)/main.c */ 99 __weak void main_init_gic(void) 100 { 101 } 102 103 /* May be overridden in plat-$(PLATFORM)/main.c */ 104 __weak void main_secondary_init_gic(void) 105 { 106 } 107 108 /* May be overridden in plat-$(PLATFORM)/main.c */ 109 __weak unsigned long plat_get_aslr_seed(void) 110 { 111 DMSG("Warning: no ASLR seed"); 112 113 return 0; 114 } 115 116 /* 117 * This function is called as a guard after each smc call which is not 118 * supposed to return. 119 */ 120 void __panic_at_smc_return(void) 121 { 122 panic(); 123 } 124 125 #if defined(CFG_WITH_ARM_TRUSTED_FW) 126 void init_sec_mon(unsigned long nsec_entry __maybe_unused) 127 { 128 assert(nsec_entry == PADDR_INVALID); 129 /* Do nothing as we don't have a secure monitor */ 130 } 131 #else 132 /* May be overridden in plat-$(PLATFORM)/main.c */ 133 __weak void init_sec_mon(unsigned long nsec_entry) 134 { 135 struct sm_nsec_ctx *nsec_ctx; 136 137 assert(nsec_entry != PADDR_INVALID); 138 139 /* Initialize secure monitor */ 140 nsec_ctx = sm_get_nsec_ctx(); 141 nsec_ctx->mon_lr = nsec_entry; 142 nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I; 143 if (nsec_entry & 1) 144 nsec_ctx->mon_spsr |= CPSR_T; 145 } 146 #endif 147 148 #if defined(CFG_WITH_ARM_TRUSTED_FW) 149 static void init_vfp_nsec(void) 150 { 151 } 152 #else 153 static void init_vfp_nsec(void) 154 { 155 /* Normal world can use CP10 and CP11 (SIMD/VFP) */ 156 write_nsacr(read_nsacr() | NSACR_CP10 | NSACR_CP11); 157 } 158 #endif 159 160 #if defined(CFG_WITH_VFP) 161 162 #ifdef ARM32 163 static void init_vfp_sec(void) 164 { 165 uint32_t cpacr = read_cpacr(); 166 167 /* 168 * Enable Advanced SIMD functionality. 169 * Enable use of D16-D31 of the Floating-point Extension register 170 * file. 171 */ 172 cpacr &= ~(CPACR_ASEDIS | CPACR_D32DIS); 173 /* 174 * Enable usage of CP10 and CP11 (SIMD/VFP) (both kernel and user 175 * mode. 176 */ 177 cpacr |= CPACR_CP(10, CPACR_CP_ACCESS_FULL); 178 cpacr |= CPACR_CP(11, CPACR_CP_ACCESS_FULL); 179 write_cpacr(cpacr); 180 } 181 #endif /* ARM32 */ 182 183 #ifdef ARM64 184 static void init_vfp_sec(void) 185 { 186 /* Not using VFP until thread_kernel_enable_vfp() */ 187 vfp_disable(); 188 } 189 #endif /* ARM64 */ 190 191 #else /* CFG_WITH_VFP */ 192 193 static void init_vfp_sec(void) 194 { 195 /* Not using VFP */ 196 } 197 #endif 198 199 #ifdef CFG_SECONDARY_INIT_CNTFRQ 200 static void primary_save_cntfrq(void) 201 { 202 assert(cntfrq == 0); 203 204 /* 205 * CNTFRQ should be initialized on the primary CPU by a 206 * previous boot stage 207 */ 208 cntfrq = read_cntfrq(); 209 } 210 211 static void secondary_init_cntfrq(void) 212 { 213 assert(cntfrq != 0); 214 write_cntfrq(cntfrq); 215 } 216 #else /* CFG_SECONDARY_INIT_CNTFRQ */ 217 static void primary_save_cntfrq(void) 218 { 219 } 220 221 static void secondary_init_cntfrq(void) 222 { 223 } 224 #endif 225 226 #ifdef CFG_CORE_SANITIZE_KADDRESS 227 static void init_run_constructors(void) 228 { 229 const vaddr_t *ctor; 230 231 for (ctor = &__ctor_list; ctor < &__ctor_end; ctor++) 232 ((void (*)(void))(*ctor))(); 233 } 234 235 static void init_asan(void) 236 { 237 238 /* 239 * CFG_ASAN_SHADOW_OFFSET is also supplied as 240 * -fasan-shadow-offset=$(CFG_ASAN_SHADOW_OFFSET) to the compiler. 241 * Since all the needed values to calculate the value of 242 * CFG_ASAN_SHADOW_OFFSET isn't available in to make we need to 243 * calculate it in advance and hard code it into the platform 244 * conf.mk. Here where we have all the needed values we double 245 * check that the compiler is supplied the correct value. 246 */ 247 248 #define __ASAN_SHADOW_START \ 249 ROUNDUP(TEE_RAM_VA_START + (TEE_RAM_VA_SIZE * 8) / 9 - 8, 8) 250 assert(__ASAN_SHADOW_START == (vaddr_t)&__asan_shadow_start); 251 #define __CFG_ASAN_SHADOW_OFFSET \ 252 (__ASAN_SHADOW_START - (TEE_RAM_VA_START / 8)) 253 COMPILE_TIME_ASSERT(CFG_ASAN_SHADOW_OFFSET == __CFG_ASAN_SHADOW_OFFSET); 254 #undef __ASAN_SHADOW_START 255 #undef __CFG_ASAN_SHADOW_OFFSET 256 257 /* 258 * Assign area covered by the shadow area, everything from start up 259 * to the beginning of the shadow area. 260 */ 261 asan_set_shadowed((void *)TEE_TEXT_VA_START, &__asan_shadow_start); 262 263 /* 264 * Add access to areas that aren't opened automatically by a 265 * constructor. 266 */ 267 asan_tag_access(&__ctor_list, &__ctor_end); 268 asan_tag_access(__rodata_start, __rodata_end); 269 #ifdef CFG_WITH_PAGER 270 asan_tag_access(__pageable_start, __pageable_end); 271 #endif /*CFG_WITH_PAGER*/ 272 asan_tag_access(__nozi_start, __nozi_end); 273 asan_tag_access(__exidx_start, __exidx_end); 274 asan_tag_access(__extab_start, __extab_end); 275 276 init_run_constructors(); 277 278 /* Everything is tagged correctly, let's start address sanitizing. */ 279 asan_start(); 280 } 281 #else /*CFG_CORE_SANITIZE_KADDRESS*/ 282 static void init_asan(void) 283 { 284 } 285 #endif /*CFG_CORE_SANITIZE_KADDRESS*/ 286 287 #if defined(CFG_MEMTAG) 288 /* Called from entry_a64.S only when MEMTAG is configured */ 289 void boot_init_memtag(void) 290 { 291 memtag_init_ops(feat_mte_implemented()); 292 memtag_set_tags((void *)TEE_RAM_START, TEE_RAM_PH_SIZE, 0); 293 } 294 #endif 295 296 #ifdef CFG_WITH_PAGER 297 298 #ifdef CFG_CORE_SANITIZE_KADDRESS 299 static void carve_out_asan_mem(tee_mm_pool_t *pool) 300 { 301 const size_t s = pool->hi - pool->lo; 302 tee_mm_entry_t *mm; 303 paddr_t apa = ASAN_MAP_PA; 304 size_t asz = ASAN_MAP_SZ; 305 306 if (core_is_buffer_outside(apa, asz, pool->lo, s)) 307 return; 308 309 /* Reserve the shadow area */ 310 if (!core_is_buffer_inside(apa, asz, pool->lo, s)) { 311 if (apa < pool->lo) { 312 /* 313 * ASAN buffer is overlapping with the beginning of 314 * the pool. 315 */ 316 asz -= pool->lo - apa; 317 apa = pool->lo; 318 } else { 319 /* 320 * ASAN buffer is overlapping with the end of the 321 * pool. 322 */ 323 asz = pool->hi - apa; 324 } 325 } 326 mm = tee_mm_alloc2(pool, apa, asz); 327 assert(mm); 328 } 329 #else 330 static void carve_out_asan_mem(tee_mm_pool_t *pool __unused) 331 { 332 } 333 #endif 334 335 static void print_pager_pool_size(void) 336 { 337 struct tee_pager_stats __maybe_unused stats; 338 339 tee_pager_get_stats(&stats); 340 IMSG("Pager pool size: %zukB", 341 stats.npages_all * SMALL_PAGE_SIZE / 1024); 342 } 343 344 static void init_vcore(tee_mm_pool_t *mm_vcore) 345 { 346 const vaddr_t begin = VCORE_START_VA; 347 size_t size = TEE_RAM_VA_SIZE; 348 349 #ifdef CFG_CORE_SANITIZE_KADDRESS 350 /* Carve out asan memory, flat maped after core memory */ 351 if (begin + size > ASAN_SHADOW_PA) 352 size = ASAN_MAP_PA - begin; 353 #endif 354 355 if (!tee_mm_init(mm_vcore, begin, size, SMALL_PAGE_SHIFT, 356 TEE_MM_POOL_NO_FLAGS)) 357 panic("tee_mm_vcore init failed"); 358 } 359 360 /* 361 * With CFG_CORE_ASLR=y the init part is relocated very early during boot. 362 * The init part is also paged just as the rest of the normal paged code, with 363 * the difference that it's preloaded during boot. When the backing store 364 * is configured the entire paged binary is copied in place and then also 365 * the init part. Since the init part has been relocated (references to 366 * addresses updated to compensate for the new load address) this has to be 367 * undone for the hashes of those pages to match with the original binary. 368 * 369 * If CFG_CORE_ASLR=n, nothing needs to be done as the code/ro pages are 370 * unchanged. 371 */ 372 static void undo_init_relocation(uint8_t *paged_store __maybe_unused) 373 { 374 #ifdef CFG_CORE_ASLR 375 unsigned long *ptr = NULL; 376 const uint32_t *reloc = NULL; 377 const uint32_t *reloc_end = NULL; 378 unsigned long offs = boot_mmu_config.load_offset; 379 const struct boot_embdata *embdata = (const void *)__init_end; 380 vaddr_t addr_end = (vaddr_t)__init_end - offs - TEE_RAM_START; 381 vaddr_t addr_start = (vaddr_t)__init_start - offs - TEE_RAM_START; 382 383 reloc = (const void *)((vaddr_t)embdata + embdata->reloc_offset); 384 reloc_end = reloc + embdata->reloc_len / sizeof(*reloc); 385 386 for (; reloc < reloc_end; reloc++) { 387 if (*reloc < addr_start) 388 continue; 389 if (*reloc >= addr_end) 390 break; 391 ptr = (void *)(paged_store + *reloc - addr_start); 392 *ptr -= offs; 393 } 394 #endif 395 } 396 397 static struct fobj *ro_paged_alloc(tee_mm_entry_t *mm, void *hashes, 398 void *store) 399 { 400 const unsigned int num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE; 401 #ifdef CFG_CORE_ASLR 402 unsigned int reloc_offs = (vaddr_t)__pageable_start - VCORE_START_VA; 403 const struct boot_embdata *embdata = (const void *)__init_end; 404 const void *reloc = __init_end + embdata->reloc_offset; 405 406 return fobj_ro_reloc_paged_alloc(num_pages, hashes, reloc_offs, 407 reloc, embdata->reloc_len, store); 408 #else 409 return fobj_ro_paged_alloc(num_pages, hashes, store); 410 #endif 411 } 412 413 static void init_runtime(unsigned long pageable_part) 414 { 415 size_t n; 416 size_t init_size = (size_t)(__init_end - __init_start); 417 size_t pageable_start = (size_t)__pageable_start; 418 size_t pageable_end = (size_t)__pageable_end; 419 size_t pageable_size = pageable_end - pageable_start; 420 vaddr_t tzsram_end = TZSRAM_BASE + TZSRAM_SIZE - TEE_LOAD_ADDR + 421 VCORE_START_VA; 422 size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) * 423 TEE_SHA256_HASH_SIZE; 424 const struct boot_embdata *embdata = (const void *)__init_end; 425 const void *tmp_hashes = NULL; 426 tee_mm_entry_t *mm = NULL; 427 struct fobj *fobj = NULL; 428 uint8_t *paged_store = NULL; 429 uint8_t *hashes = NULL; 430 431 assert(pageable_size % SMALL_PAGE_SIZE == 0); 432 assert(embdata->total_len >= embdata->hashes_offset + 433 embdata->hashes_len); 434 assert(hash_size == embdata->hashes_len); 435 436 tmp_hashes = __init_end + embdata->hashes_offset; 437 438 init_asan(); 439 440 /* Add heap2 first as heap1 may be too small as initial bget pool */ 441 malloc_add_pool(__heap2_start, __heap2_end - __heap2_start); 442 malloc_add_pool(__heap1_start, __heap1_end - __heap1_start); 443 444 /* 445 * This needs to be initialized early to support address lookup 446 * in MEM_AREA_TEE_RAM 447 */ 448 tee_pager_early_init(); 449 450 hashes = malloc(hash_size); 451 IMSG_RAW("\n"); 452 IMSG("Pager is enabled. Hashes: %zu bytes", hash_size); 453 assert(hashes); 454 asan_memcpy_unchecked(hashes, tmp_hashes, hash_size); 455 456 /* 457 * Need tee_mm_sec_ddr initialized to be able to allocate secure 458 * DDR below. 459 */ 460 core_mmu_init_ta_ram(); 461 462 carve_out_asan_mem(&tee_mm_sec_ddr); 463 464 mm = tee_mm_alloc(&tee_mm_sec_ddr, pageable_size); 465 assert(mm); 466 paged_store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM, 467 pageable_size); 468 /* 469 * Load pageable part in the dedicated allocated area: 470 * - Move pageable non-init part into pageable area. Note bootloader 471 * may have loaded it anywhere in TA RAM hence use memmove(). 472 * - Copy pageable init part from current location into pageable area. 473 */ 474 memmove(paged_store + init_size, 475 phys_to_virt(pageable_part, 476 core_mmu_get_type_by_pa(pageable_part), 477 __pageable_part_end - __pageable_part_start), 478 __pageable_part_end - __pageable_part_start); 479 asan_memcpy_unchecked(paged_store, __init_start, init_size); 480 /* 481 * Undo eventual relocation for the init part so the hash checks 482 * can pass. 483 */ 484 undo_init_relocation(paged_store); 485 486 /* Check that hashes of what's in pageable area is OK */ 487 DMSG("Checking hashes of pageable area"); 488 for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) { 489 const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE; 490 const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE; 491 TEE_Result res; 492 493 DMSG("hash pg_idx %zu hash %p page %p", n, hash, page); 494 res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE); 495 if (res != TEE_SUCCESS) { 496 EMSG("Hash failed for page %zu at %p: res 0x%x", 497 n, (void *)page, res); 498 panic(); 499 } 500 } 501 502 /* 503 * Assert prepaged init sections are page aligned so that nothing 504 * trails uninited at the end of the premapped init area. 505 */ 506 assert(!(init_size & SMALL_PAGE_MASK)); 507 508 /* 509 * Initialize the virtual memory pool used for main_mmu_l2_ttb which 510 * is supplied to tee_pager_init() below. 511 */ 512 init_vcore(&tee_mm_vcore); 513 514 /* 515 * Assign alias area for pager end of the small page block the rest 516 * of the binary is loaded into. We're taking more than needed, but 517 * we're guaranteed to not need more than the physical amount of 518 * TZSRAM. 519 */ 520 mm = tee_mm_alloc2(&tee_mm_vcore, 521 (vaddr_t)tee_mm_vcore.lo + 522 tee_mm_vcore.size - TZSRAM_SIZE, 523 TZSRAM_SIZE); 524 assert(mm); 525 tee_pager_set_alias_area(mm); 526 527 /* 528 * Claim virtual memory which isn't paged. 529 * Linear memory (flat map core memory) ends there. 530 */ 531 mm = tee_mm_alloc2(&tee_mm_vcore, VCORE_UNPG_RX_PA, 532 (vaddr_t)(__pageable_start - VCORE_UNPG_RX_PA)); 533 assert(mm); 534 535 /* 536 * Allocate virtual memory for the pageable area and let the pager 537 * take charge of all the pages already assigned to that memory. 538 */ 539 mm = tee_mm_alloc2(&tee_mm_vcore, (vaddr_t)__pageable_start, 540 pageable_size); 541 assert(mm); 542 fobj = ro_paged_alloc(mm, hashes, paged_store); 543 assert(fobj); 544 tee_pager_add_core_region(tee_mm_get_smem(mm), PAGED_REGION_TYPE_RO, 545 fobj); 546 fobj_put(fobj); 547 548 tee_pager_add_pages(pageable_start, init_size / SMALL_PAGE_SIZE, false); 549 tee_pager_add_pages(pageable_start + init_size, 550 (pageable_size - init_size) / SMALL_PAGE_SIZE, 551 true); 552 if (pageable_end < tzsram_end) 553 tee_pager_add_pages(pageable_end, (tzsram_end - pageable_end) / 554 SMALL_PAGE_SIZE, true); 555 556 /* 557 * There may be physical pages in TZSRAM before the core load address. 558 * These pages can be added to the physical pages pool of the pager. 559 * This setup may happen when a the secure bootloader runs in TZRAM 560 * and its memory can be reused by OP-TEE once boot stages complete. 561 */ 562 tee_pager_add_pages(tee_mm_vcore.lo, 563 (VCORE_UNPG_RX_PA - tee_mm_vcore.lo) / SMALL_PAGE_SIZE, 564 true); 565 566 print_pager_pool_size(); 567 } 568 #else 569 570 static void init_runtime(unsigned long pageable_part __unused) 571 { 572 init_asan(); 573 574 /* 575 * By default whole OP-TEE uses malloc, so we need to initialize 576 * it early. But, when virtualization is enabled, malloc is used 577 * only by TEE runtime, so malloc should be initialized later, for 578 * every virtual partition separately. Core code uses nex_malloc 579 * instead. 580 */ 581 #ifdef CFG_VIRTUALIZATION 582 nex_malloc_add_pool(__nex_heap_start, __nex_heap_end - 583 __nex_heap_start); 584 #else 585 malloc_add_pool(__heap1_start, __heap1_end - __heap1_start); 586 #endif 587 588 IMSG_RAW("\n"); 589 } 590 #endif 591 592 void *get_dt(void) 593 { 594 void *fdt = get_embedded_dt(); 595 596 if (!fdt) 597 fdt = get_external_dt(); 598 599 return fdt; 600 } 601 602 #if defined(CFG_EMBED_DTB) 603 void *get_embedded_dt(void) 604 { 605 static bool checked; 606 607 assert(cpu_mmu_enabled()); 608 609 if (!checked) { 610 IMSG("Embedded DTB found"); 611 612 if (fdt_check_header(embedded_secure_dtb)) 613 panic("Invalid embedded DTB"); 614 615 checked = true; 616 } 617 618 return embedded_secure_dtb; 619 } 620 #else 621 void *get_embedded_dt(void) 622 { 623 return NULL; 624 } 625 #endif /*CFG_EMBED_DTB*/ 626 627 #if defined(CFG_DT) 628 void *get_external_dt(void) 629 { 630 if (!IS_ENABLED(CFG_EXTERNAL_DT)) 631 return NULL; 632 633 assert(cpu_mmu_enabled()); 634 return external_dt.blob; 635 } 636 637 static TEE_Result release_external_dt(void) 638 { 639 int ret = 0; 640 641 if (!IS_ENABLED(CFG_EXTERNAL_DT)) 642 return TEE_SUCCESS; 643 644 if (!external_dt.blob) 645 return TEE_SUCCESS; 646 647 ret = fdt_pack(external_dt.blob); 648 if (ret < 0) { 649 EMSG("Failed to pack Device Tree at 0x%" PRIxPA ": error %d", 650 virt_to_phys(external_dt.blob), ret); 651 panic(); 652 } 653 654 if (core_mmu_remove_mapping(MEM_AREA_EXT_DT, external_dt.blob, 655 CFG_DTB_MAX_SIZE)) 656 panic("Failed to remove temporary Device Tree mapping"); 657 658 /* External DTB no more reached, reset pointer to invalid */ 659 external_dt.blob = NULL; 660 661 return TEE_SUCCESS; 662 } 663 boot_final(release_external_dt); 664 665 #ifdef _CFG_USE_DTB_OVERLAY 666 static int add_dt_overlay_fragment(struct dt_descriptor *dt, int ioffs) 667 { 668 char frag[32]; 669 int offs; 670 int ret; 671 672 snprintf(frag, sizeof(frag), "fragment@%d", dt->frag_id); 673 offs = fdt_add_subnode(dt->blob, ioffs, frag); 674 if (offs < 0) 675 return offs; 676 677 dt->frag_id += 1; 678 679 ret = fdt_setprop_string(dt->blob, offs, "target-path", "/"); 680 if (ret < 0) 681 return -1; 682 683 return fdt_add_subnode(dt->blob, offs, "__overlay__"); 684 } 685 686 static int init_dt_overlay(struct dt_descriptor *dt, int __maybe_unused dt_size) 687 { 688 int fragment; 689 690 if (IS_ENABLED(CFG_EXTERNAL_DTB_OVERLAY)) { 691 if (!fdt_check_header(dt->blob)) { 692 fdt_for_each_subnode(fragment, dt->blob, 0) 693 dt->frag_id += 1; 694 return 0; 695 } 696 } 697 698 return fdt_create_empty_tree(dt->blob, dt_size); 699 } 700 #else 701 static int add_dt_overlay_fragment(struct dt_descriptor *dt __unused, int offs) 702 { 703 return offs; 704 } 705 706 static int init_dt_overlay(struct dt_descriptor *dt __unused, 707 int dt_size __unused) 708 { 709 return 0; 710 } 711 #endif /* _CFG_USE_DTB_OVERLAY */ 712 713 static int add_dt_path_subnode(struct dt_descriptor *dt, const char *path, 714 const char *subnode) 715 { 716 int offs; 717 718 offs = fdt_path_offset(dt->blob, path); 719 if (offs < 0) 720 return -1; 721 offs = add_dt_overlay_fragment(dt, offs); 722 if (offs < 0) 723 return -1; 724 offs = fdt_add_subnode(dt->blob, offs, subnode); 725 if (offs < 0) 726 return -1; 727 return offs; 728 } 729 730 static int add_optee_dt_node(struct dt_descriptor *dt) 731 { 732 int offs; 733 int ret; 734 735 if (fdt_path_offset(dt->blob, "/firmware/optee") >= 0) { 736 DMSG("OP-TEE Device Tree node already exists!"); 737 return 0; 738 } 739 740 offs = fdt_path_offset(dt->blob, "/firmware"); 741 if (offs < 0) { 742 offs = add_dt_path_subnode(dt, "/", "firmware"); 743 if (offs < 0) 744 return -1; 745 } 746 747 offs = fdt_add_subnode(dt->blob, offs, "optee"); 748 if (offs < 0) 749 return -1; 750 751 ret = fdt_setprop_string(dt->blob, offs, "compatible", 752 "linaro,optee-tz"); 753 if (ret < 0) 754 return -1; 755 ret = fdt_setprop_string(dt->blob, offs, "method", "smc"); 756 if (ret < 0) 757 return -1; 758 if (CFG_CORE_ASYNC_NOTIF_GIC_INTID) { 759 /* 760 * The format of the interrupt property is defined by the 761 * binding of the interrupt domain root. In this case it's 762 * one Arm GIC v1, v2 or v3 so we must be compatible with 763 * these. 764 * 765 * An SPI type of interrupt is indicated with a 0 in the 766 * first cell. 767 * 768 * The interrupt number goes in the second cell where 769 * SPIs ranges from 0 to 987. 770 * 771 * Flags are passed in the third cell where a 1 means edge 772 * triggered. 773 */ 774 const uint32_t gic_spi = 0; 775 const uint32_t irq_type_edge = 1; 776 uint32_t val[] = { 777 TEE_U32_TO_BIG_ENDIAN(gic_spi), 778 TEE_U32_TO_BIG_ENDIAN(CFG_CORE_ASYNC_NOTIF_GIC_INTID - 779 GIC_SPI_BASE), 780 TEE_U32_TO_BIG_ENDIAN(irq_type_edge), 781 }; 782 783 ret = fdt_setprop(dt->blob, offs, "interrupts", val, 784 sizeof(val)); 785 if (ret < 0) 786 return -1; 787 } 788 return 0; 789 } 790 791 #ifdef CFG_PSCI_ARM32 792 static int append_psci_compatible(void *fdt, int offs, const char *str) 793 { 794 return fdt_appendprop(fdt, offs, "compatible", str, strlen(str) + 1); 795 } 796 797 static int dt_add_psci_node(struct dt_descriptor *dt) 798 { 799 int offs; 800 801 if (fdt_path_offset(dt->blob, "/psci") >= 0) { 802 DMSG("PSCI Device Tree node already exists!"); 803 return 0; 804 } 805 806 offs = add_dt_path_subnode(dt, "/", "psci"); 807 if (offs < 0) 808 return -1; 809 if (append_psci_compatible(dt->blob, offs, "arm,psci-1.0")) 810 return -1; 811 if (append_psci_compatible(dt->blob, offs, "arm,psci-0.2")) 812 return -1; 813 if (append_psci_compatible(dt->blob, offs, "arm,psci")) 814 return -1; 815 if (fdt_setprop_string(dt->blob, offs, "method", "smc")) 816 return -1; 817 if (fdt_setprop_u32(dt->blob, offs, "cpu_suspend", PSCI_CPU_SUSPEND)) 818 return -1; 819 if (fdt_setprop_u32(dt->blob, offs, "cpu_off", PSCI_CPU_OFF)) 820 return -1; 821 if (fdt_setprop_u32(dt->blob, offs, "cpu_on", PSCI_CPU_ON)) 822 return -1; 823 if (fdt_setprop_u32(dt->blob, offs, "sys_poweroff", PSCI_SYSTEM_OFF)) 824 return -1; 825 if (fdt_setprop_u32(dt->blob, offs, "sys_reset", PSCI_SYSTEM_RESET)) 826 return -1; 827 return 0; 828 } 829 830 static int check_node_compat_prefix(struct dt_descriptor *dt, int offs, 831 const char *prefix) 832 { 833 const size_t prefix_len = strlen(prefix); 834 size_t l; 835 int plen; 836 const char *prop; 837 838 prop = fdt_getprop(dt->blob, offs, "compatible", &plen); 839 if (!prop) 840 return -1; 841 842 while (plen > 0) { 843 if (memcmp(prop, prefix, prefix_len) == 0) 844 return 0; /* match */ 845 846 l = strlen(prop) + 1; 847 prop += l; 848 plen -= l; 849 } 850 851 return -1; 852 } 853 854 static int dt_add_psci_cpu_enable_methods(struct dt_descriptor *dt) 855 { 856 int offs = 0; 857 858 while (1) { 859 offs = fdt_next_node(dt->blob, offs, NULL); 860 if (offs < 0) 861 break; 862 if (fdt_getprop(dt->blob, offs, "enable-method", NULL)) 863 continue; /* already set */ 864 if (check_node_compat_prefix(dt, offs, "arm,cortex-a")) 865 continue; /* no compatible */ 866 if (fdt_setprop_string(dt->blob, offs, "enable-method", "psci")) 867 return -1; 868 /* Need to restart scanning as offsets may have changed */ 869 offs = 0; 870 } 871 return 0; 872 } 873 874 static int config_psci(struct dt_descriptor *dt) 875 { 876 if (dt_add_psci_node(dt)) 877 return -1; 878 return dt_add_psci_cpu_enable_methods(dt); 879 } 880 #else 881 static int config_psci(struct dt_descriptor *dt __unused) 882 { 883 return 0; 884 } 885 #endif /*CFG_PSCI_ARM32*/ 886 887 static void set_dt_val(void *data, uint32_t cell_size, uint64_t val) 888 { 889 if (cell_size == 1) { 890 fdt32_t v = cpu_to_fdt32((uint32_t)val); 891 892 memcpy(data, &v, sizeof(v)); 893 } else { 894 fdt64_t v = cpu_to_fdt64(val); 895 896 memcpy(data, &v, sizeof(v)); 897 } 898 } 899 900 static int add_res_mem_dt_node(struct dt_descriptor *dt, const char *name, 901 paddr_t pa, size_t size) 902 { 903 int offs = 0; 904 int ret = 0; 905 int addr_size = -1; 906 int len_size = -1; 907 bool found = true; 908 char subnode_name[80] = { 0 }; 909 910 offs = fdt_path_offset(dt->blob, "/reserved-memory"); 911 912 if (offs < 0) { 913 found = false; 914 offs = 0; 915 } 916 917 if (IS_ENABLED(_CFG_USE_DTB_OVERLAY)) { 918 len_size = sizeof(paddr_t) / sizeof(uint32_t); 919 addr_size = sizeof(paddr_t) / sizeof(uint32_t); 920 } else { 921 len_size = fdt_size_cells(dt->blob, offs); 922 if (len_size < 0) 923 return -1; 924 addr_size = fdt_address_cells(dt->blob, offs); 925 if (addr_size < 0) 926 return -1; 927 } 928 929 if (!found) { 930 offs = add_dt_path_subnode(dt, "/", "reserved-memory"); 931 if (offs < 0) 932 return -1; 933 ret = fdt_setprop_cell(dt->blob, offs, "#address-cells", 934 addr_size); 935 if (ret < 0) 936 return -1; 937 ret = fdt_setprop_cell(dt->blob, offs, "#size-cells", len_size); 938 if (ret < 0) 939 return -1; 940 ret = fdt_setprop(dt->blob, offs, "ranges", NULL, 0); 941 if (ret < 0) 942 return -1; 943 } 944 945 ret = snprintf(subnode_name, sizeof(subnode_name), 946 "%s@%" PRIxPA, name, pa); 947 if (ret < 0 || ret >= (int)sizeof(subnode_name)) 948 DMSG("truncated node \"%s@%" PRIxPA"\"", name, pa); 949 offs = fdt_add_subnode(dt->blob, offs, subnode_name); 950 if (offs >= 0) { 951 uint32_t data[FDT_MAX_NCELLS * 2]; 952 953 set_dt_val(data, addr_size, pa); 954 set_dt_val(data + addr_size, len_size, size); 955 ret = fdt_setprop(dt->blob, offs, "reg", data, 956 sizeof(uint32_t) * (addr_size + len_size)); 957 if (ret < 0) 958 return -1; 959 ret = fdt_setprop(dt->blob, offs, "no-map", NULL, 0); 960 if (ret < 0) 961 return -1; 962 } else { 963 return -1; 964 } 965 return 0; 966 } 967 968 #ifdef CFG_CORE_DYN_SHM 969 static uint64_t get_dt_val_and_advance(const void *data, size_t *offs, 970 uint32_t cell_size) 971 { 972 uint64_t rv = 0; 973 974 if (cell_size == 1) { 975 uint32_t v; 976 977 memcpy(&v, (const uint8_t *)data + *offs, sizeof(v)); 978 *offs += sizeof(v); 979 rv = fdt32_to_cpu(v); 980 } else { 981 uint64_t v; 982 983 memcpy(&v, (const uint8_t *)data + *offs, sizeof(v)); 984 *offs += sizeof(v); 985 rv = fdt64_to_cpu(v); 986 } 987 988 return rv; 989 } 990 991 /* 992 * Find all non-secure memory from DT. Memory marked inaccessible by Secure 993 * World is ignored since it could not be mapped to be used as dynamic shared 994 * memory. 995 */ 996 static int get_nsec_memory_helper(void *fdt, struct core_mmu_phys_mem *mem) 997 { 998 const uint8_t *prop = NULL; 999 uint64_t a = 0; 1000 uint64_t l = 0; 1001 size_t prop_offs = 0; 1002 size_t prop_len = 0; 1003 int elems_total = 0; 1004 int addr_size = 0; 1005 int len_size = 0; 1006 int offs = 0; 1007 size_t n = 0; 1008 int len = 0; 1009 1010 addr_size = fdt_address_cells(fdt, 0); 1011 if (addr_size < 0) 1012 return 0; 1013 1014 len_size = fdt_size_cells(fdt, 0); 1015 if (len_size < 0) 1016 return 0; 1017 1018 while (true) { 1019 offs = fdt_node_offset_by_prop_value(fdt, offs, "device_type", 1020 "memory", 1021 sizeof("memory")); 1022 if (offs < 0) 1023 break; 1024 1025 if (_fdt_get_status(fdt, offs) != (DT_STATUS_OK_NSEC | 1026 DT_STATUS_OK_SEC)) 1027 continue; 1028 1029 prop = fdt_getprop(fdt, offs, "reg", &len); 1030 if (!prop) 1031 continue; 1032 1033 prop_len = len; 1034 for (n = 0, prop_offs = 0; prop_offs < prop_len; n++) { 1035 a = get_dt_val_and_advance(prop, &prop_offs, addr_size); 1036 if (prop_offs >= prop_len) { 1037 n--; 1038 break; 1039 } 1040 1041 l = get_dt_val_and_advance(prop, &prop_offs, len_size); 1042 if (mem) { 1043 mem->type = MEM_AREA_DDR_OVERALL; 1044 mem->addr = a; 1045 mem->size = l; 1046 mem++; 1047 } 1048 } 1049 1050 elems_total += n; 1051 } 1052 1053 return elems_total; 1054 } 1055 1056 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt, size_t *nelems) 1057 { 1058 struct core_mmu_phys_mem *mem = NULL; 1059 int elems_total = 0; 1060 1061 elems_total = get_nsec_memory_helper(fdt, NULL); 1062 if (elems_total <= 0) 1063 return NULL; 1064 1065 mem = nex_calloc(elems_total, sizeof(*mem)); 1066 if (!mem) 1067 panic(); 1068 1069 elems_total = get_nsec_memory_helper(fdt, mem); 1070 assert(elems_total > 0); 1071 1072 *nelems = elems_total; 1073 1074 return mem; 1075 } 1076 #endif /*CFG_CORE_DYN_SHM*/ 1077 1078 #ifdef CFG_CORE_RESERVED_SHM 1079 static int mark_static_shm_as_reserved(struct dt_descriptor *dt) 1080 { 1081 vaddr_t shm_start; 1082 vaddr_t shm_end; 1083 1084 core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &shm_start, &shm_end); 1085 if (shm_start != shm_end) 1086 return add_res_mem_dt_node(dt, "optee_shm", 1087 virt_to_phys((void *)shm_start), 1088 shm_end - shm_start); 1089 1090 DMSG("No SHM configured"); 1091 return -1; 1092 } 1093 #endif /*CFG_CORE_RESERVED_SHM*/ 1094 1095 static void init_external_dt(unsigned long phys_dt) 1096 { 1097 struct dt_descriptor *dt = &external_dt; 1098 void *fdt; 1099 int ret; 1100 1101 if (!IS_ENABLED(CFG_EXTERNAL_DT)) 1102 return; 1103 1104 if (!phys_dt) { 1105 /* 1106 * No need to panic as we're not using the DT in OP-TEE 1107 * yet, we're only adding some nodes for normal world use. 1108 * This makes the switch to using DT easier as we can boot 1109 * a newer OP-TEE with older boot loaders. Once we start to 1110 * initialize devices based on DT we'll likely panic 1111 * instead of returning here. 1112 */ 1113 IMSG("No non-secure external DT"); 1114 return; 1115 } 1116 1117 fdt = core_mmu_add_mapping(MEM_AREA_EXT_DT, phys_dt, CFG_DTB_MAX_SIZE); 1118 if (!fdt) 1119 panic("Failed to map external DTB"); 1120 1121 dt->blob = fdt; 1122 1123 ret = init_dt_overlay(dt, CFG_DTB_MAX_SIZE); 1124 if (ret < 0) { 1125 EMSG("Device Tree Overlay init fail @ %#lx: error %d", phys_dt, 1126 ret); 1127 panic(); 1128 } 1129 1130 ret = fdt_open_into(fdt, fdt, CFG_DTB_MAX_SIZE); 1131 if (ret < 0) { 1132 EMSG("Invalid Device Tree at %#lx: error %d", phys_dt, ret); 1133 panic(); 1134 } 1135 1136 IMSG("Non-secure external DT found"); 1137 } 1138 1139 static int mark_tzdram_as_reserved(struct dt_descriptor *dt) 1140 { 1141 return add_res_mem_dt_node(dt, "optee_core", CFG_TZDRAM_START, 1142 CFG_TZDRAM_SIZE); 1143 } 1144 1145 static void update_external_dt(void) 1146 { 1147 struct dt_descriptor *dt = &external_dt; 1148 1149 if (!IS_ENABLED(CFG_EXTERNAL_DT)) 1150 return; 1151 1152 if (!dt->blob) 1153 return; 1154 1155 if (!IS_ENABLED(CFG_CORE_FFA) && add_optee_dt_node(dt)) 1156 panic("Failed to add OP-TEE Device Tree node"); 1157 1158 if (config_psci(dt)) 1159 panic("Failed to config PSCI"); 1160 1161 #ifdef CFG_CORE_RESERVED_SHM 1162 if (mark_static_shm_as_reserved(dt)) 1163 panic("Failed to config non-secure memory"); 1164 #endif 1165 1166 if (mark_tzdram_as_reserved(dt)) 1167 panic("Failed to config secure memory"); 1168 } 1169 #else /*CFG_DT*/ 1170 void *get_external_dt(void) 1171 { 1172 return NULL; 1173 } 1174 1175 static void init_external_dt(unsigned long phys_dt __unused) 1176 { 1177 } 1178 1179 static void update_external_dt(void) 1180 { 1181 } 1182 1183 #ifdef CFG_CORE_DYN_SHM 1184 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt __unused, 1185 size_t *nelems __unused) 1186 { 1187 return NULL; 1188 } 1189 #endif /*CFG_CORE_DYN_SHM*/ 1190 #endif /*!CFG_DT*/ 1191 1192 #ifdef CFG_CORE_DYN_SHM 1193 static void discover_nsec_memory(void) 1194 { 1195 struct core_mmu_phys_mem *mem; 1196 const struct core_mmu_phys_mem *mem_begin = NULL; 1197 const struct core_mmu_phys_mem *mem_end = NULL; 1198 size_t nelems; 1199 void *fdt = get_external_dt(); 1200 1201 if (fdt) { 1202 mem = get_nsec_memory(fdt, &nelems); 1203 if (mem) { 1204 core_mmu_set_discovered_nsec_ddr(mem, nelems); 1205 return; 1206 } 1207 1208 DMSG("No non-secure memory found in FDT"); 1209 } 1210 1211 mem_begin = phys_ddr_overall_begin; 1212 mem_end = phys_ddr_overall_end; 1213 nelems = mem_end - mem_begin; 1214 if (nelems) { 1215 /* 1216 * Platform cannot use both register_ddr() and the now 1217 * deprecated register_dynamic_shm(). 1218 */ 1219 assert(phys_ddr_overall_compat_begin == 1220 phys_ddr_overall_compat_end); 1221 } else { 1222 mem_begin = phys_ddr_overall_compat_begin; 1223 mem_end = phys_ddr_overall_compat_end; 1224 nelems = mem_end - mem_begin; 1225 if (!nelems) 1226 return; 1227 DMSG("Warning register_dynamic_shm() is deprecated, please use register_ddr() instead"); 1228 } 1229 1230 mem = nex_calloc(nelems, sizeof(*mem)); 1231 if (!mem) 1232 panic(); 1233 1234 memcpy(mem, phys_ddr_overall_begin, sizeof(*mem) * nelems); 1235 core_mmu_set_discovered_nsec_ddr(mem, nelems); 1236 } 1237 #else /*CFG_CORE_DYN_SHM*/ 1238 static void discover_nsec_memory(void) 1239 { 1240 } 1241 #endif /*!CFG_CORE_DYN_SHM*/ 1242 1243 #ifdef CFG_VIRTUALIZATION 1244 static TEE_Result virt_init_heap(void) 1245 { 1246 /* We need to initialize pool for every virtual guest partition */ 1247 malloc_add_pool(__heap1_start, __heap1_end - __heap1_start); 1248 1249 return TEE_SUCCESS; 1250 } 1251 preinit_early(virt_init_heap); 1252 #endif 1253 1254 void init_tee_runtime(void) 1255 { 1256 #ifndef CFG_WITH_PAGER 1257 /* Pager initializes TA RAM early */ 1258 core_mmu_init_ta_ram(); 1259 #endif 1260 /* 1261 * With virtualization we call this function when creating the 1262 * OP-TEE partition instead. 1263 */ 1264 if (!IS_ENABLED(CFG_VIRTUALIZATION)) 1265 call_preinitcalls(); 1266 call_initcalls(); 1267 1268 /* 1269 * These two functions uses crypto_rng_read() to initialize the 1270 * pauth keys. Once call_initcalls() returns we're guaranteed that 1271 * crypto_rng_read() is ready to be used. 1272 */ 1273 thread_init_core_local_pauth_keys(); 1274 thread_init_thread_pauth_keys(); 1275 } 1276 1277 static void init_primary(unsigned long pageable_part, unsigned long nsec_entry) 1278 { 1279 thread_init_core_local_stacks(); 1280 /* 1281 * Mask asynchronous exceptions before switch to the thread vector 1282 * as the thread handler requires those to be masked while 1283 * executing with the temporary stack. The thread subsystem also 1284 * asserts that the foreign interrupts are blocked when using most of 1285 * its functions. 1286 */ 1287 thread_set_exceptions(THREAD_EXCP_ALL); 1288 primary_save_cntfrq(); 1289 init_vfp_sec(); 1290 /* 1291 * Pager: init_runtime() calls thread_kernel_enable_vfp() so we must 1292 * set a current thread right now to avoid a chicken-and-egg problem 1293 * (thread_init_boot_thread() sets the current thread but needs 1294 * things set by init_runtime()). 1295 */ 1296 thread_get_core_local()->curr_thread = 0; 1297 init_runtime(pageable_part); 1298 1299 if (IS_ENABLED(CFG_VIRTUALIZATION)) { 1300 /* 1301 * Virtualization: We can't initialize threads right now because 1302 * threads belong to "tee" part and will be initialized 1303 * separately per each new virtual guest. So, we'll clear 1304 * "curr_thread" and call it done. 1305 */ 1306 thread_get_core_local()->curr_thread = -1; 1307 } else { 1308 thread_init_boot_thread(); 1309 } 1310 thread_init_primary(); 1311 thread_init_per_cpu(); 1312 init_sec_mon(nsec_entry); 1313 } 1314 1315 /* 1316 * Note: this function is weak just to make it possible to exclude it from 1317 * the unpaged area. 1318 */ 1319 void __weak boot_init_primary_late(unsigned long fdt) 1320 { 1321 init_external_dt(fdt); 1322 tpm_map_log_area(get_external_dt()); 1323 discover_nsec_memory(); 1324 update_external_dt(); 1325 configure_console_from_dt(); 1326 1327 IMSG("OP-TEE version: %s", core_v_str); 1328 if (IS_ENABLED(CFG_WARN_INSECURE)) { 1329 IMSG("WARNING: This OP-TEE configuration might be insecure!"); 1330 IMSG("WARNING: Please check https://optee.readthedocs.io/en/latest/architecture/porting_guidelines.html"); 1331 } 1332 IMSG("Primary CPU initializing"); 1333 #ifdef CFG_CORE_ASLR 1334 DMSG("Executing at offset %#lx with virtual load address %#"PRIxVA, 1335 (unsigned long)boot_mmu_config.load_offset, VCORE_START_VA); 1336 #endif 1337 if (IS_ENABLED(CFG_MEMTAG)) 1338 DMSG("Memory tagging %s", 1339 memtag_is_enabled() ? "enabled" : "disabled"); 1340 1341 main_init_gic(); 1342 init_vfp_nsec(); 1343 if (IS_ENABLED(CFG_VIRTUALIZATION)) { 1344 IMSG("Initializing virtualization support"); 1345 core_mmu_init_virtualization(); 1346 } else { 1347 init_tee_runtime(); 1348 } 1349 call_finalcalls(); 1350 IMSG("Primary CPU switching to normal world boot"); 1351 } 1352 1353 static void init_secondary_helper(unsigned long nsec_entry) 1354 { 1355 IMSG("Secondary CPU %zu initializing", get_core_pos()); 1356 1357 /* 1358 * Mask asynchronous exceptions before switch to the thread vector 1359 * as the thread handler requires those to be masked while 1360 * executing with the temporary stack. The thread subsystem also 1361 * asserts that the foreign interrupts are blocked when using most of 1362 * its functions. 1363 */ 1364 thread_set_exceptions(THREAD_EXCP_ALL); 1365 1366 secondary_init_cntfrq(); 1367 thread_init_per_cpu(); 1368 init_sec_mon(nsec_entry); 1369 main_secondary_init_gic(); 1370 init_vfp_sec(); 1371 init_vfp_nsec(); 1372 1373 IMSG("Secondary CPU %zu switching to normal world boot", get_core_pos()); 1374 } 1375 1376 /* 1377 * Note: this function is weak just to make it possible to exclude it from 1378 * the unpaged area so that it lies in the init area. 1379 */ 1380 void __weak boot_init_primary_early(unsigned long pageable_part, 1381 unsigned long nsec_entry __maybe_unused) 1382 { 1383 unsigned long e = PADDR_INVALID; 1384 1385 #if !defined(CFG_WITH_ARM_TRUSTED_FW) 1386 e = nsec_entry; 1387 #endif 1388 1389 init_primary(pageable_part, e); 1390 } 1391 1392 #if defined(CFG_WITH_ARM_TRUSTED_FW) 1393 unsigned long boot_cpu_on_handler(unsigned long a0 __maybe_unused, 1394 unsigned long a1 __unused) 1395 { 1396 init_secondary_helper(PADDR_INVALID); 1397 return 0; 1398 } 1399 #else 1400 void boot_init_secondary(unsigned long nsec_entry) 1401 { 1402 init_secondary_helper(nsec_entry); 1403 } 1404 #endif 1405 1406 #if defined(CFG_BOOT_SECONDARY_REQUEST) 1407 void boot_set_core_ns_entry(size_t core_idx, uintptr_t entry, 1408 uintptr_t context_id) 1409 { 1410 ns_entry_contexts[core_idx].entry_point = entry; 1411 ns_entry_contexts[core_idx].context_id = context_id; 1412 dsb_ishst(); 1413 } 1414 1415 int boot_core_release(size_t core_idx, paddr_t entry) 1416 { 1417 if (!core_idx || core_idx >= CFG_TEE_CORE_NB_CORE) 1418 return -1; 1419 1420 ns_entry_contexts[core_idx].entry_point = entry; 1421 dmb(); 1422 spin_table[core_idx] = 1; 1423 dsb(); 1424 sev(); 1425 1426 return 0; 1427 } 1428 1429 /* 1430 * spin until secondary boot request, then returns with 1431 * the secondary core entry address. 1432 */ 1433 struct ns_entry_context *boot_core_hpen(void) 1434 { 1435 #ifdef CFG_PSCI_ARM32 1436 return &ns_entry_contexts[get_core_pos()]; 1437 #else 1438 do { 1439 wfe(); 1440 } while (!spin_table[get_core_pos()]); 1441 dmb(); 1442 return &ns_entry_contexts[get_core_pos()]; 1443 #endif 1444 } 1445 #endif 1446 1447 #if defined(CFG_CORE_ASLR) 1448 #if defined(CFG_DT) 1449 unsigned long __weak get_aslr_seed(void *fdt) 1450 { 1451 int rc = fdt_check_header(fdt); 1452 const uint64_t *seed = NULL; 1453 int offs = 0; 1454 int len = 0; 1455 1456 if (rc) { 1457 DMSG("Bad fdt: %d", rc); 1458 goto err; 1459 } 1460 1461 offs = fdt_path_offset(fdt, "/secure-chosen"); 1462 if (offs < 0) { 1463 DMSG("Cannot find /secure-chosen"); 1464 goto err; 1465 } 1466 seed = fdt_getprop(fdt, offs, "kaslr-seed", &len); 1467 if (!seed || len != sizeof(*seed)) { 1468 DMSG("Cannot find valid kaslr-seed"); 1469 goto err; 1470 } 1471 1472 return fdt64_to_cpu(*seed); 1473 1474 err: 1475 /* Try platform implementation */ 1476 return plat_get_aslr_seed(); 1477 } 1478 #else /*!CFG_DT*/ 1479 unsigned long __weak get_aslr_seed(void *fdt __unused) 1480 { 1481 /* Try platform implementation */ 1482 return plat_get_aslr_seed(); 1483 } 1484 #endif /*!CFG_DT*/ 1485 #endif /*CFG_CORE_ASLR*/ 1486