1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2015-2022, Linaro Limited 4 */ 5 6 #include <arm.h> 7 #include <assert.h> 8 #include <compiler.h> 9 #include <config.h> 10 #include <console.h> 11 #include <crypto/crypto.h> 12 #include <drivers/gic.h> 13 #include <initcall.h> 14 #include <inttypes.h> 15 #include <keep.h> 16 #include <kernel/asan.h> 17 #include <kernel/boot.h> 18 #include <kernel/linker.h> 19 #include <kernel/misc.h> 20 #include <kernel/panic.h> 21 #include <kernel/tee_misc.h> 22 #include <kernel/thread.h> 23 #include <kernel/tpm.h> 24 #include <libfdt.h> 25 #include <malloc.h> 26 #include <memtag.h> 27 #include <mm/core_memprot.h> 28 #include <mm/core_mmu.h> 29 #include <mm/fobj.h> 30 #include <mm/tee_mm.h> 31 #include <mm/tee_pager.h> 32 #include <sm/psci.h> 33 #include <stdio.h> 34 #include <trace.h> 35 #include <utee_defines.h> 36 #include <util.h> 37 38 #include <platform_config.h> 39 40 #if !defined(CFG_WITH_ARM_TRUSTED_FW) 41 #include <sm/sm.h> 42 #endif 43 44 #if defined(CFG_WITH_VFP) 45 #include <kernel/vfp.h> 46 #endif 47 48 /* 49 * In this file we're using unsigned long to represent physical pointers as 50 * they are received in a single register when OP-TEE is initially entered. 51 * This limits 32-bit systems to only use make use of the lower 32 bits 52 * of a physical address for initial parameters. 53 * 54 * 64-bit systems on the other hand can use full 64-bit physical pointers. 55 */ 56 #define PADDR_INVALID ULONG_MAX 57 58 #if defined(CFG_BOOT_SECONDARY_REQUEST) 59 struct ns_entry_context { 60 uintptr_t entry_point; 61 uintptr_t context_id; 62 }; 63 struct ns_entry_context ns_entry_contexts[CFG_TEE_CORE_NB_CORE]; 64 static uint32_t spin_table[CFG_TEE_CORE_NB_CORE]; 65 #endif 66 67 #ifdef CFG_BOOT_SYNC_CPU 68 /* 69 * Array used when booting, to synchronize cpu. 70 * When 0, the cpu has not started. 71 * When 1, it has started 72 */ 73 uint32_t sem_cpu_sync[CFG_TEE_CORE_NB_CORE]; 74 DECLARE_KEEP_PAGER(sem_cpu_sync); 75 #endif 76 77 #ifdef CFG_DT 78 struct dt_descriptor { 79 void *blob; 80 #ifdef _CFG_USE_DTB_OVERLAY 81 int frag_id; 82 #endif 83 }; 84 85 static struct dt_descriptor external_dt __nex_bss; 86 #endif 87 88 #ifdef CFG_SECONDARY_INIT_CNTFRQ 89 static uint32_t cntfrq; 90 #endif 91 92 /* May be overridden in plat-$(PLATFORM)/main.c */ 93 __weak void plat_primary_init_early(void) 94 { 95 } 96 DECLARE_KEEP_PAGER(plat_primary_init_early); 97 98 /* May be overridden in plat-$(PLATFORM)/main.c */ 99 __weak void main_init_gic(void) 100 { 101 } 102 103 /* May be overridden in plat-$(PLATFORM)/main.c */ 104 __weak void main_secondary_init_gic(void) 105 { 106 } 107 108 /* May be overridden in plat-$(PLATFORM)/main.c */ 109 __weak unsigned long plat_get_aslr_seed(void) 110 { 111 DMSG("Warning: no ASLR seed"); 112 113 return 0; 114 } 115 116 /* 117 * This function is called as a guard after each smc call which is not 118 * supposed to return. 119 */ 120 void __panic_at_smc_return(void) 121 { 122 panic(); 123 } 124 125 #if defined(CFG_WITH_ARM_TRUSTED_FW) 126 void init_sec_mon(unsigned long nsec_entry __maybe_unused) 127 { 128 assert(nsec_entry == PADDR_INVALID); 129 /* Do nothing as we don't have a secure monitor */ 130 } 131 #else 132 /* May be overridden in plat-$(PLATFORM)/main.c */ 133 __weak void init_sec_mon(unsigned long nsec_entry) 134 { 135 struct sm_nsec_ctx *nsec_ctx; 136 137 assert(nsec_entry != PADDR_INVALID); 138 139 /* Initialize secure monitor */ 140 nsec_ctx = sm_get_nsec_ctx(); 141 nsec_ctx->mon_lr = nsec_entry; 142 nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I; 143 if (nsec_entry & 1) 144 nsec_ctx->mon_spsr |= CPSR_T; 145 } 146 #endif 147 148 #if defined(CFG_WITH_ARM_TRUSTED_FW) 149 static void init_vfp_nsec(void) 150 { 151 } 152 #else 153 static void init_vfp_nsec(void) 154 { 155 /* Normal world can use CP10 and CP11 (SIMD/VFP) */ 156 write_nsacr(read_nsacr() | NSACR_CP10 | NSACR_CP11); 157 } 158 #endif 159 160 #if defined(CFG_WITH_VFP) 161 162 #ifdef ARM32 163 static void init_vfp_sec(void) 164 { 165 uint32_t cpacr = read_cpacr(); 166 167 /* 168 * Enable Advanced SIMD functionality. 169 * Enable use of D16-D31 of the Floating-point Extension register 170 * file. 171 */ 172 cpacr &= ~(CPACR_ASEDIS | CPACR_D32DIS); 173 /* 174 * Enable usage of CP10 and CP11 (SIMD/VFP) (both kernel and user 175 * mode. 176 */ 177 cpacr |= CPACR_CP(10, CPACR_CP_ACCESS_FULL); 178 cpacr |= CPACR_CP(11, CPACR_CP_ACCESS_FULL); 179 write_cpacr(cpacr); 180 } 181 #endif /* ARM32 */ 182 183 #ifdef ARM64 184 static void init_vfp_sec(void) 185 { 186 /* Not using VFP until thread_kernel_enable_vfp() */ 187 vfp_disable(); 188 } 189 #endif /* ARM64 */ 190 191 #else /* CFG_WITH_VFP */ 192 193 static void init_vfp_sec(void) 194 { 195 /* Not using VFP */ 196 } 197 #endif 198 199 #ifdef CFG_SECONDARY_INIT_CNTFRQ 200 static void primary_save_cntfrq(void) 201 { 202 assert(cntfrq == 0); 203 204 /* 205 * CNTFRQ should be initialized on the primary CPU by a 206 * previous boot stage 207 */ 208 cntfrq = read_cntfrq(); 209 } 210 211 static void secondary_init_cntfrq(void) 212 { 213 assert(cntfrq != 0); 214 write_cntfrq(cntfrq); 215 } 216 #else /* CFG_SECONDARY_INIT_CNTFRQ */ 217 static void primary_save_cntfrq(void) 218 { 219 } 220 221 static void secondary_init_cntfrq(void) 222 { 223 } 224 #endif 225 226 #ifdef CFG_CORE_SANITIZE_KADDRESS 227 static void init_run_constructors(void) 228 { 229 const vaddr_t *ctor; 230 231 for (ctor = &__ctor_list; ctor < &__ctor_end; ctor++) 232 ((void (*)(void))(*ctor))(); 233 } 234 235 static void init_asan(void) 236 { 237 238 /* 239 * CFG_ASAN_SHADOW_OFFSET is also supplied as 240 * -fasan-shadow-offset=$(CFG_ASAN_SHADOW_OFFSET) to the compiler. 241 * Since all the needed values to calculate the value of 242 * CFG_ASAN_SHADOW_OFFSET isn't available in to make we need to 243 * calculate it in advance and hard code it into the platform 244 * conf.mk. Here where we have all the needed values we double 245 * check that the compiler is supplied the correct value. 246 */ 247 248 #define __ASAN_SHADOW_START \ 249 ROUNDUP(TEE_RAM_VA_START + (TEE_RAM_VA_SIZE * 8) / 9 - 8, 8) 250 assert(__ASAN_SHADOW_START == (vaddr_t)&__asan_shadow_start); 251 #define __CFG_ASAN_SHADOW_OFFSET \ 252 (__ASAN_SHADOW_START - (TEE_RAM_VA_START / 8)) 253 COMPILE_TIME_ASSERT(CFG_ASAN_SHADOW_OFFSET == __CFG_ASAN_SHADOW_OFFSET); 254 #undef __ASAN_SHADOW_START 255 #undef __CFG_ASAN_SHADOW_OFFSET 256 257 /* 258 * Assign area covered by the shadow area, everything from start up 259 * to the beginning of the shadow area. 260 */ 261 asan_set_shadowed((void *)TEE_TEXT_VA_START, &__asan_shadow_start); 262 263 /* 264 * Add access to areas that aren't opened automatically by a 265 * constructor. 266 */ 267 asan_tag_access(&__ctor_list, &__ctor_end); 268 asan_tag_access(__rodata_start, __rodata_end); 269 #ifdef CFG_WITH_PAGER 270 asan_tag_access(__pageable_start, __pageable_end); 271 #endif /*CFG_WITH_PAGER*/ 272 asan_tag_access(__nozi_start, __nozi_end); 273 asan_tag_access(__exidx_start, __exidx_end); 274 asan_tag_access(__extab_start, __extab_end); 275 276 init_run_constructors(); 277 278 /* Everything is tagged correctly, let's start address sanitizing. */ 279 asan_start(); 280 } 281 #else /*CFG_CORE_SANITIZE_KADDRESS*/ 282 static void init_asan(void) 283 { 284 } 285 #endif /*CFG_CORE_SANITIZE_KADDRESS*/ 286 287 #if defined(CFG_MEMTAG) 288 /* Called from entry_a64.S only when MEMTAG is configured */ 289 void boot_init_memtag(void) 290 { 291 memtag_init_ops(feat_mte_implemented()); 292 memtag_set_tags((void *)TEE_RAM_START, TEE_RAM_PH_SIZE, 0); 293 } 294 #endif 295 296 #ifdef CFG_WITH_PAGER 297 298 #ifdef CFG_CORE_SANITIZE_KADDRESS 299 static void carve_out_asan_mem(tee_mm_pool_t *pool) 300 { 301 const size_t s = pool->hi - pool->lo; 302 tee_mm_entry_t *mm; 303 paddr_t apa = ASAN_MAP_PA; 304 size_t asz = ASAN_MAP_SZ; 305 306 if (core_is_buffer_outside(apa, asz, pool->lo, s)) 307 return; 308 309 /* Reserve the shadow area */ 310 if (!core_is_buffer_inside(apa, asz, pool->lo, s)) { 311 if (apa < pool->lo) { 312 /* 313 * ASAN buffer is overlapping with the beginning of 314 * the pool. 315 */ 316 asz -= pool->lo - apa; 317 apa = pool->lo; 318 } else { 319 /* 320 * ASAN buffer is overlapping with the end of the 321 * pool. 322 */ 323 asz = pool->hi - apa; 324 } 325 } 326 mm = tee_mm_alloc2(pool, apa, asz); 327 assert(mm); 328 } 329 #else 330 static void carve_out_asan_mem(tee_mm_pool_t *pool __unused) 331 { 332 } 333 #endif 334 335 static void print_pager_pool_size(void) 336 { 337 struct tee_pager_stats __maybe_unused stats; 338 339 tee_pager_get_stats(&stats); 340 IMSG("Pager pool size: %zukB", 341 stats.npages_all * SMALL_PAGE_SIZE / 1024); 342 } 343 344 static void init_vcore(tee_mm_pool_t *mm_vcore) 345 { 346 const vaddr_t begin = VCORE_START_VA; 347 size_t size = TEE_RAM_VA_SIZE; 348 349 #ifdef CFG_CORE_SANITIZE_KADDRESS 350 /* Carve out asan memory, flat maped after core memory */ 351 if (begin + size > ASAN_SHADOW_PA) 352 size = ASAN_MAP_PA - begin; 353 #endif 354 355 if (!tee_mm_init(mm_vcore, begin, size, SMALL_PAGE_SHIFT, 356 TEE_MM_POOL_NO_FLAGS)) 357 panic("tee_mm_vcore init failed"); 358 } 359 360 /* 361 * With CFG_CORE_ASLR=y the init part is relocated very early during boot. 362 * The init part is also paged just as the rest of the normal paged code, with 363 * the difference that it's preloaded during boot. When the backing store 364 * is configured the entire paged binary is copied in place and then also 365 * the init part. Since the init part has been relocated (references to 366 * addresses updated to compensate for the new load address) this has to be 367 * undone for the hashes of those pages to match with the original binary. 368 * 369 * If CFG_CORE_ASLR=n, nothing needs to be done as the code/ro pages are 370 * unchanged. 371 */ 372 static void undo_init_relocation(uint8_t *paged_store __maybe_unused) 373 { 374 #ifdef CFG_CORE_ASLR 375 unsigned long *ptr = NULL; 376 const uint32_t *reloc = NULL; 377 const uint32_t *reloc_end = NULL; 378 unsigned long offs = boot_mmu_config.load_offset; 379 const struct boot_embdata *embdata = (const void *)__init_end; 380 vaddr_t addr_end = (vaddr_t)__init_end - offs - TEE_RAM_START; 381 vaddr_t addr_start = (vaddr_t)__init_start - offs - TEE_RAM_START; 382 383 reloc = (const void *)((vaddr_t)embdata + embdata->reloc_offset); 384 reloc_end = reloc + embdata->reloc_len / sizeof(*reloc); 385 386 for (; reloc < reloc_end; reloc++) { 387 if (*reloc < addr_start) 388 continue; 389 if (*reloc >= addr_end) 390 break; 391 ptr = (void *)(paged_store + *reloc - addr_start); 392 *ptr -= offs; 393 } 394 #endif 395 } 396 397 static struct fobj *ro_paged_alloc(tee_mm_entry_t *mm, void *hashes, 398 void *store) 399 { 400 const unsigned int num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE; 401 #ifdef CFG_CORE_ASLR 402 unsigned int reloc_offs = (vaddr_t)__pageable_start - VCORE_START_VA; 403 const struct boot_embdata *embdata = (const void *)__init_end; 404 const void *reloc = __init_end + embdata->reloc_offset; 405 406 return fobj_ro_reloc_paged_alloc(num_pages, hashes, reloc_offs, 407 reloc, embdata->reloc_len, store); 408 #else 409 return fobj_ro_paged_alloc(num_pages, hashes, store); 410 #endif 411 } 412 413 static void init_runtime(unsigned long pageable_part) 414 { 415 size_t n; 416 size_t init_size = (size_t)(__init_end - __init_start); 417 size_t pageable_start = (size_t)__pageable_start; 418 size_t pageable_end = (size_t)__pageable_end; 419 size_t pageable_size = pageable_end - pageable_start; 420 vaddr_t tzsram_end = TZSRAM_BASE + TZSRAM_SIZE - TEE_LOAD_ADDR + 421 VCORE_START_VA; 422 size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) * 423 TEE_SHA256_HASH_SIZE; 424 const struct boot_embdata *embdata = (const void *)__init_end; 425 const void *tmp_hashes = NULL; 426 tee_mm_entry_t *mm = NULL; 427 struct fobj *fobj = NULL; 428 uint8_t *paged_store = NULL; 429 uint8_t *hashes = NULL; 430 431 assert(pageable_size % SMALL_PAGE_SIZE == 0); 432 assert(embdata->total_len >= embdata->hashes_offset + 433 embdata->hashes_len); 434 assert(hash_size == embdata->hashes_len); 435 436 tmp_hashes = __init_end + embdata->hashes_offset; 437 438 init_asan(); 439 440 /* Add heap2 first as heap1 may be too small as initial bget pool */ 441 malloc_add_pool(__heap2_start, __heap2_end - __heap2_start); 442 malloc_add_pool(__heap1_start, __heap1_end - __heap1_start); 443 444 /* 445 * This needs to be initialized early to support address lookup 446 * in MEM_AREA_TEE_RAM 447 */ 448 tee_pager_early_init(); 449 450 hashes = malloc(hash_size); 451 IMSG_RAW("\n"); 452 IMSG("Pager is enabled. Hashes: %zu bytes", hash_size); 453 assert(hashes); 454 asan_memcpy_unchecked(hashes, tmp_hashes, hash_size); 455 456 /* 457 * Need tee_mm_sec_ddr initialized to be able to allocate secure 458 * DDR below. 459 */ 460 core_mmu_init_ta_ram(); 461 462 carve_out_asan_mem(&tee_mm_sec_ddr); 463 464 mm = tee_mm_alloc(&tee_mm_sec_ddr, pageable_size); 465 assert(mm); 466 paged_store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM, 467 pageable_size); 468 /* 469 * Load pageable part in the dedicated allocated area: 470 * - Move pageable non-init part into pageable area. Note bootloader 471 * may have loaded it anywhere in TA RAM hence use memmove(). 472 * - Copy pageable init part from current location into pageable area. 473 */ 474 memmove(paged_store + init_size, 475 phys_to_virt(pageable_part, 476 core_mmu_get_type_by_pa(pageable_part), 477 __pageable_part_end - __pageable_part_start), 478 __pageable_part_end - __pageable_part_start); 479 asan_memcpy_unchecked(paged_store, __init_start, init_size); 480 /* 481 * Undo eventual relocation for the init part so the hash checks 482 * can pass. 483 */ 484 undo_init_relocation(paged_store); 485 486 /* Check that hashes of what's in pageable area is OK */ 487 DMSG("Checking hashes of pageable area"); 488 for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) { 489 const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE; 490 const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE; 491 TEE_Result res; 492 493 DMSG("hash pg_idx %zu hash %p page %p", n, hash, page); 494 res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE); 495 if (res != TEE_SUCCESS) { 496 EMSG("Hash failed for page %zu at %p: res 0x%x", 497 n, (void *)page, res); 498 panic(); 499 } 500 } 501 502 /* 503 * Assert prepaged init sections are page aligned so that nothing 504 * trails uninited at the end of the premapped init area. 505 */ 506 assert(!(init_size & SMALL_PAGE_MASK)); 507 508 /* 509 * Initialize the virtual memory pool used for main_mmu_l2_ttb which 510 * is supplied to tee_pager_init() below. 511 */ 512 init_vcore(&tee_mm_vcore); 513 514 /* 515 * Assign alias area for pager end of the small page block the rest 516 * of the binary is loaded into. We're taking more than needed, but 517 * we're guaranteed to not need more than the physical amount of 518 * TZSRAM. 519 */ 520 mm = tee_mm_alloc2(&tee_mm_vcore, 521 (vaddr_t)tee_mm_vcore.lo + 522 tee_mm_vcore.size - TZSRAM_SIZE, 523 TZSRAM_SIZE); 524 assert(mm); 525 tee_pager_set_alias_area(mm); 526 527 /* 528 * Claim virtual memory which isn't paged. 529 * Linear memory (flat map core memory) ends there. 530 */ 531 mm = tee_mm_alloc2(&tee_mm_vcore, VCORE_UNPG_RX_PA, 532 (vaddr_t)(__pageable_start - VCORE_UNPG_RX_PA)); 533 assert(mm); 534 535 /* 536 * Allocate virtual memory for the pageable area and let the pager 537 * take charge of all the pages already assigned to that memory. 538 */ 539 mm = tee_mm_alloc2(&tee_mm_vcore, (vaddr_t)__pageable_start, 540 pageable_size); 541 assert(mm); 542 fobj = ro_paged_alloc(mm, hashes, paged_store); 543 assert(fobj); 544 tee_pager_add_core_region(tee_mm_get_smem(mm), PAGED_REGION_TYPE_RO, 545 fobj); 546 fobj_put(fobj); 547 548 tee_pager_add_pages(pageable_start, init_size / SMALL_PAGE_SIZE, false); 549 tee_pager_add_pages(pageable_start + init_size, 550 (pageable_size - init_size) / SMALL_PAGE_SIZE, 551 true); 552 if (pageable_end < tzsram_end) 553 tee_pager_add_pages(pageable_end, (tzsram_end - pageable_end) / 554 SMALL_PAGE_SIZE, true); 555 556 /* 557 * There may be physical pages in TZSRAM before the core load address. 558 * These pages can be added to the physical pages pool of the pager. 559 * This setup may happen when a the secure bootloader runs in TZRAM 560 * and its memory can be reused by OP-TEE once boot stages complete. 561 */ 562 tee_pager_add_pages(tee_mm_vcore.lo, 563 (VCORE_UNPG_RX_PA - tee_mm_vcore.lo) / SMALL_PAGE_SIZE, 564 true); 565 566 print_pager_pool_size(); 567 } 568 #else 569 570 static void init_runtime(unsigned long pageable_part __unused) 571 { 572 init_asan(); 573 574 /* 575 * By default whole OP-TEE uses malloc, so we need to initialize 576 * it early. But, when virtualization is enabled, malloc is used 577 * only by TEE runtime, so malloc should be initialized later, for 578 * every virtual partition separately. Core code uses nex_malloc 579 * instead. 580 */ 581 #ifdef CFG_VIRTUALIZATION 582 nex_malloc_add_pool(__nex_heap_start, __nex_heap_end - 583 __nex_heap_start); 584 #else 585 malloc_add_pool(__heap1_start, __heap1_end - __heap1_start); 586 #endif 587 588 IMSG_RAW("\n"); 589 } 590 #endif 591 592 void *get_dt(void) 593 { 594 void *fdt = get_embedded_dt(); 595 596 if (!fdt) 597 fdt = get_external_dt(); 598 599 return fdt; 600 } 601 602 #if defined(CFG_EMBED_DTB) 603 void *get_embedded_dt(void) 604 { 605 static bool checked; 606 607 assert(cpu_mmu_enabled()); 608 609 if (!checked) { 610 IMSG("Embedded DTB found"); 611 612 if (fdt_check_header(embedded_secure_dtb)) 613 panic("Invalid embedded DTB"); 614 615 checked = true; 616 } 617 618 return embedded_secure_dtb; 619 } 620 #else 621 void *get_embedded_dt(void) 622 { 623 return NULL; 624 } 625 #endif /*CFG_EMBED_DTB*/ 626 627 #if defined(CFG_DT) 628 void *get_external_dt(void) 629 { 630 assert(cpu_mmu_enabled()); 631 return external_dt.blob; 632 } 633 634 static TEE_Result release_external_dt(void) 635 { 636 int ret = 0; 637 638 if (!external_dt.blob) 639 return TEE_SUCCESS; 640 641 ret = fdt_pack(external_dt.blob); 642 if (ret < 0) { 643 EMSG("Failed to pack Device Tree at 0x%" PRIxPA ": error %d", 644 virt_to_phys(external_dt.blob), ret); 645 panic(); 646 } 647 648 if (core_mmu_remove_mapping(MEM_AREA_EXT_DT, external_dt.blob, 649 CFG_DTB_MAX_SIZE)) 650 panic("Failed to remove temporary Device Tree mapping"); 651 652 /* External DTB no more reached, reset pointer to invalid */ 653 external_dt.blob = NULL; 654 655 return TEE_SUCCESS; 656 } 657 boot_final(release_external_dt); 658 659 #ifdef _CFG_USE_DTB_OVERLAY 660 static int add_dt_overlay_fragment(struct dt_descriptor *dt, int ioffs) 661 { 662 char frag[32]; 663 int offs; 664 int ret; 665 666 snprintf(frag, sizeof(frag), "fragment@%d", dt->frag_id); 667 offs = fdt_add_subnode(dt->blob, ioffs, frag); 668 if (offs < 0) 669 return offs; 670 671 dt->frag_id += 1; 672 673 ret = fdt_setprop_string(dt->blob, offs, "target-path", "/"); 674 if (ret < 0) 675 return -1; 676 677 return fdt_add_subnode(dt->blob, offs, "__overlay__"); 678 } 679 680 static int init_dt_overlay(struct dt_descriptor *dt, int __maybe_unused dt_size) 681 { 682 int fragment; 683 684 if (IS_ENABLED(CFG_EXTERNAL_DTB_OVERLAY)) { 685 if (!fdt_check_header(dt->blob)) { 686 fdt_for_each_subnode(fragment, dt->blob, 0) 687 dt->frag_id += 1; 688 return 0; 689 } 690 } 691 692 return fdt_create_empty_tree(dt->blob, dt_size); 693 } 694 #else 695 static int add_dt_overlay_fragment(struct dt_descriptor *dt __unused, int offs) 696 { 697 return offs; 698 } 699 700 static int init_dt_overlay(struct dt_descriptor *dt __unused, 701 int dt_size __unused) 702 { 703 return 0; 704 } 705 #endif /* _CFG_USE_DTB_OVERLAY */ 706 707 static int add_dt_path_subnode(struct dt_descriptor *dt, const char *path, 708 const char *subnode) 709 { 710 int offs; 711 712 offs = fdt_path_offset(dt->blob, path); 713 if (offs < 0) 714 return -1; 715 offs = add_dt_overlay_fragment(dt, offs); 716 if (offs < 0) 717 return -1; 718 offs = fdt_add_subnode(dt->blob, offs, subnode); 719 if (offs < 0) 720 return -1; 721 return offs; 722 } 723 724 static int add_optee_dt_node(struct dt_descriptor *dt) 725 { 726 int offs; 727 int ret; 728 729 if (fdt_path_offset(dt->blob, "/firmware/optee") >= 0) { 730 DMSG("OP-TEE Device Tree node already exists!"); 731 return 0; 732 } 733 734 offs = fdt_path_offset(dt->blob, "/firmware"); 735 if (offs < 0) { 736 offs = add_dt_path_subnode(dt, "/", "firmware"); 737 if (offs < 0) 738 return -1; 739 } 740 741 offs = fdt_add_subnode(dt->blob, offs, "optee"); 742 if (offs < 0) 743 return -1; 744 745 ret = fdt_setprop_string(dt->blob, offs, "compatible", 746 "linaro,optee-tz"); 747 if (ret < 0) 748 return -1; 749 ret = fdt_setprop_string(dt->blob, offs, "method", "smc"); 750 if (ret < 0) 751 return -1; 752 if (CFG_CORE_ASYNC_NOTIF_GIC_INTID) { 753 /* 754 * The format of the interrupt property is defined by the 755 * binding of the interrupt domain root. In this case it's 756 * one Arm GIC v1, v2 or v3 so we must be compatible with 757 * these. 758 * 759 * An SPI type of interrupt is indicated with a 0 in the 760 * first cell. 761 * 762 * The interrupt number goes in the second cell where 763 * SPIs ranges from 0 to 987. 764 * 765 * Flags are passed in the third cell where a 1 means edge 766 * triggered. 767 */ 768 const uint32_t gic_spi = 0; 769 const uint32_t irq_type_edge = 1; 770 uint32_t val[] = { 771 TEE_U32_TO_BIG_ENDIAN(gic_spi), 772 TEE_U32_TO_BIG_ENDIAN(CFG_CORE_ASYNC_NOTIF_GIC_INTID - 773 GIC_SPI_BASE), 774 TEE_U32_TO_BIG_ENDIAN(irq_type_edge), 775 }; 776 777 ret = fdt_setprop(dt->blob, offs, "interrupts", val, 778 sizeof(val)); 779 if (ret < 0) 780 return -1; 781 } 782 return 0; 783 } 784 785 #ifdef CFG_PSCI_ARM32 786 static int append_psci_compatible(void *fdt, int offs, const char *str) 787 { 788 return fdt_appendprop(fdt, offs, "compatible", str, strlen(str) + 1); 789 } 790 791 static int dt_add_psci_node(struct dt_descriptor *dt) 792 { 793 int offs; 794 795 if (fdt_path_offset(dt->blob, "/psci") >= 0) { 796 DMSG("PSCI Device Tree node already exists!"); 797 return 0; 798 } 799 800 offs = add_dt_path_subnode(dt, "/", "psci"); 801 if (offs < 0) 802 return -1; 803 if (append_psci_compatible(dt->blob, offs, "arm,psci-1.0")) 804 return -1; 805 if (append_psci_compatible(dt->blob, offs, "arm,psci-0.2")) 806 return -1; 807 if (append_psci_compatible(dt->blob, offs, "arm,psci")) 808 return -1; 809 if (fdt_setprop_string(dt->blob, offs, "method", "smc")) 810 return -1; 811 if (fdt_setprop_u32(dt->blob, offs, "cpu_suspend", PSCI_CPU_SUSPEND)) 812 return -1; 813 if (fdt_setprop_u32(dt->blob, offs, "cpu_off", PSCI_CPU_OFF)) 814 return -1; 815 if (fdt_setprop_u32(dt->blob, offs, "cpu_on", PSCI_CPU_ON)) 816 return -1; 817 if (fdt_setprop_u32(dt->blob, offs, "sys_poweroff", PSCI_SYSTEM_OFF)) 818 return -1; 819 if (fdt_setprop_u32(dt->blob, offs, "sys_reset", PSCI_SYSTEM_RESET)) 820 return -1; 821 return 0; 822 } 823 824 static int check_node_compat_prefix(struct dt_descriptor *dt, int offs, 825 const char *prefix) 826 { 827 const size_t prefix_len = strlen(prefix); 828 size_t l; 829 int plen; 830 const char *prop; 831 832 prop = fdt_getprop(dt->blob, offs, "compatible", &plen); 833 if (!prop) 834 return -1; 835 836 while (plen > 0) { 837 if (memcmp(prop, prefix, prefix_len) == 0) 838 return 0; /* match */ 839 840 l = strlen(prop) + 1; 841 prop += l; 842 plen -= l; 843 } 844 845 return -1; 846 } 847 848 static int dt_add_psci_cpu_enable_methods(struct dt_descriptor *dt) 849 { 850 int offs = 0; 851 852 while (1) { 853 offs = fdt_next_node(dt->blob, offs, NULL); 854 if (offs < 0) 855 break; 856 if (fdt_getprop(dt->blob, offs, "enable-method", NULL)) 857 continue; /* already set */ 858 if (check_node_compat_prefix(dt, offs, "arm,cortex-a")) 859 continue; /* no compatible */ 860 if (fdt_setprop_string(dt->blob, offs, "enable-method", "psci")) 861 return -1; 862 /* Need to restart scanning as offsets may have changed */ 863 offs = 0; 864 } 865 return 0; 866 } 867 868 static int config_psci(struct dt_descriptor *dt) 869 { 870 if (dt_add_psci_node(dt)) 871 return -1; 872 return dt_add_psci_cpu_enable_methods(dt); 873 } 874 #else 875 static int config_psci(struct dt_descriptor *dt __unused) 876 { 877 return 0; 878 } 879 #endif /*CFG_PSCI_ARM32*/ 880 881 static void set_dt_val(void *data, uint32_t cell_size, uint64_t val) 882 { 883 if (cell_size == 1) { 884 fdt32_t v = cpu_to_fdt32((uint32_t)val); 885 886 memcpy(data, &v, sizeof(v)); 887 } else { 888 fdt64_t v = cpu_to_fdt64(val); 889 890 memcpy(data, &v, sizeof(v)); 891 } 892 } 893 894 static int add_res_mem_dt_node(struct dt_descriptor *dt, const char *name, 895 paddr_t pa, size_t size) 896 { 897 int offs = 0; 898 int ret = 0; 899 int addr_size = -1; 900 int len_size = -1; 901 bool found = true; 902 char subnode_name[80] = { 0 }; 903 904 offs = fdt_path_offset(dt->blob, "/reserved-memory"); 905 906 if (offs < 0) { 907 found = false; 908 offs = 0; 909 } 910 911 if (IS_ENABLED(_CFG_USE_DTB_OVERLAY)) { 912 len_size = sizeof(paddr_t) / sizeof(uint32_t); 913 addr_size = sizeof(paddr_t) / sizeof(uint32_t); 914 } else { 915 len_size = fdt_size_cells(dt->blob, offs); 916 if (len_size < 0) 917 return -1; 918 addr_size = fdt_address_cells(dt->blob, offs); 919 if (addr_size < 0) 920 return -1; 921 } 922 923 if (!found) { 924 offs = add_dt_path_subnode(dt, "/", "reserved-memory"); 925 if (offs < 0) 926 return -1; 927 ret = fdt_setprop_cell(dt->blob, offs, "#address-cells", 928 addr_size); 929 if (ret < 0) 930 return -1; 931 ret = fdt_setprop_cell(dt->blob, offs, "#size-cells", len_size); 932 if (ret < 0) 933 return -1; 934 ret = fdt_setprop(dt->blob, offs, "ranges", NULL, 0); 935 if (ret < 0) 936 return -1; 937 } 938 939 ret = snprintf(subnode_name, sizeof(subnode_name), 940 "%s@%" PRIxPA, name, pa); 941 if (ret < 0 || ret >= (int)sizeof(subnode_name)) 942 DMSG("truncated node \"%s@%" PRIxPA"\"", name, pa); 943 offs = fdt_add_subnode(dt->blob, offs, subnode_name); 944 if (offs >= 0) { 945 uint32_t data[FDT_MAX_NCELLS * 2]; 946 947 set_dt_val(data, addr_size, pa); 948 set_dt_val(data + addr_size, len_size, size); 949 ret = fdt_setprop(dt->blob, offs, "reg", data, 950 sizeof(uint32_t) * (addr_size + len_size)); 951 if (ret < 0) 952 return -1; 953 ret = fdt_setprop(dt->blob, offs, "no-map", NULL, 0); 954 if (ret < 0) 955 return -1; 956 } else { 957 return -1; 958 } 959 return 0; 960 } 961 962 #ifdef CFG_CORE_DYN_SHM 963 static uint64_t get_dt_val_and_advance(const void *data, size_t *offs, 964 uint32_t cell_size) 965 { 966 uint64_t rv = 0; 967 968 if (cell_size == 1) { 969 uint32_t v; 970 971 memcpy(&v, (const uint8_t *)data + *offs, sizeof(v)); 972 *offs += sizeof(v); 973 rv = fdt32_to_cpu(v); 974 } else { 975 uint64_t v; 976 977 memcpy(&v, (const uint8_t *)data + *offs, sizeof(v)); 978 *offs += sizeof(v); 979 rv = fdt64_to_cpu(v); 980 } 981 982 return rv; 983 } 984 985 /* 986 * Find all non-secure memory from DT. Memory marked inaccessible by Secure 987 * World is ignored since it could not be mapped to be used as dynamic shared 988 * memory. 989 */ 990 static int get_nsec_memory_helper(void *fdt, struct core_mmu_phys_mem *mem) 991 { 992 const uint8_t *prop = NULL; 993 uint64_t a = 0; 994 uint64_t l = 0; 995 size_t prop_offs = 0; 996 size_t prop_len = 0; 997 int elems_total = 0; 998 int addr_size = 0; 999 int len_size = 0; 1000 int offs = 0; 1001 size_t n = 0; 1002 int len = 0; 1003 1004 addr_size = fdt_address_cells(fdt, 0); 1005 if (addr_size < 0) 1006 return 0; 1007 1008 len_size = fdt_size_cells(fdt, 0); 1009 if (len_size < 0) 1010 return 0; 1011 1012 while (true) { 1013 offs = fdt_node_offset_by_prop_value(fdt, offs, "device_type", 1014 "memory", 1015 sizeof("memory")); 1016 if (offs < 0) 1017 break; 1018 1019 if (_fdt_get_status(fdt, offs) != (DT_STATUS_OK_NSEC | 1020 DT_STATUS_OK_SEC)) 1021 continue; 1022 1023 prop = fdt_getprop(fdt, offs, "reg", &len); 1024 if (!prop) 1025 continue; 1026 1027 prop_len = len; 1028 for (n = 0, prop_offs = 0; prop_offs < prop_len; n++) { 1029 a = get_dt_val_and_advance(prop, &prop_offs, addr_size); 1030 if (prop_offs >= prop_len) { 1031 n--; 1032 break; 1033 } 1034 1035 l = get_dt_val_and_advance(prop, &prop_offs, len_size); 1036 if (mem) { 1037 mem->type = MEM_AREA_DDR_OVERALL; 1038 mem->addr = a; 1039 mem->size = l; 1040 mem++; 1041 } 1042 } 1043 1044 elems_total += n; 1045 } 1046 1047 return elems_total; 1048 } 1049 1050 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt, size_t *nelems) 1051 { 1052 struct core_mmu_phys_mem *mem = NULL; 1053 int elems_total = 0; 1054 1055 elems_total = get_nsec_memory_helper(fdt, NULL); 1056 if (elems_total <= 0) 1057 return NULL; 1058 1059 mem = nex_calloc(elems_total, sizeof(*mem)); 1060 if (!mem) 1061 panic(); 1062 1063 elems_total = get_nsec_memory_helper(fdt, mem); 1064 assert(elems_total > 0); 1065 1066 *nelems = elems_total; 1067 1068 return mem; 1069 } 1070 #endif /*CFG_CORE_DYN_SHM*/ 1071 1072 #ifdef CFG_CORE_RESERVED_SHM 1073 static int mark_static_shm_as_reserved(struct dt_descriptor *dt) 1074 { 1075 vaddr_t shm_start; 1076 vaddr_t shm_end; 1077 1078 core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &shm_start, &shm_end); 1079 if (shm_start != shm_end) 1080 return add_res_mem_dt_node(dt, "optee_shm", 1081 virt_to_phys((void *)shm_start), 1082 shm_end - shm_start); 1083 1084 DMSG("No SHM configured"); 1085 return -1; 1086 } 1087 #endif /*CFG_CORE_RESERVED_SHM*/ 1088 1089 static void init_external_dt(unsigned long phys_dt) 1090 { 1091 struct dt_descriptor *dt = &external_dt; 1092 void *fdt; 1093 int ret; 1094 1095 if (!phys_dt) { 1096 /* 1097 * No need to panic as we're not using the DT in OP-TEE 1098 * yet, we're only adding some nodes for normal world use. 1099 * This makes the switch to using DT easier as we can boot 1100 * a newer OP-TEE with older boot loaders. Once we start to 1101 * initialize devices based on DT we'll likely panic 1102 * instead of returning here. 1103 */ 1104 IMSG("No non-secure external DT"); 1105 return; 1106 } 1107 1108 fdt = core_mmu_add_mapping(MEM_AREA_EXT_DT, phys_dt, CFG_DTB_MAX_SIZE); 1109 if (!fdt) 1110 panic("Failed to map external DTB"); 1111 1112 dt->blob = fdt; 1113 1114 ret = init_dt_overlay(dt, CFG_DTB_MAX_SIZE); 1115 if (ret < 0) { 1116 EMSG("Device Tree Overlay init fail @ %#lx: error %d", phys_dt, 1117 ret); 1118 panic(); 1119 } 1120 1121 ret = fdt_open_into(fdt, fdt, CFG_DTB_MAX_SIZE); 1122 if (ret < 0) { 1123 EMSG("Invalid Device Tree at %#lx: error %d", phys_dt, ret); 1124 panic(); 1125 } 1126 1127 IMSG("Non-secure external DT found"); 1128 } 1129 1130 static int mark_tzdram_as_reserved(struct dt_descriptor *dt) 1131 { 1132 return add_res_mem_dt_node(dt, "optee_core", CFG_TZDRAM_START, 1133 CFG_TZDRAM_SIZE); 1134 } 1135 1136 static void update_external_dt(void) 1137 { 1138 struct dt_descriptor *dt = &external_dt; 1139 1140 if (!dt->blob) 1141 return; 1142 1143 if (!IS_ENABLED(CFG_CORE_FFA) && add_optee_dt_node(dt)) 1144 panic("Failed to add OP-TEE Device Tree node"); 1145 1146 if (config_psci(dt)) 1147 panic("Failed to config PSCI"); 1148 1149 #ifdef CFG_CORE_RESERVED_SHM 1150 if (mark_static_shm_as_reserved(dt)) 1151 panic("Failed to config non-secure memory"); 1152 #endif 1153 1154 if (mark_tzdram_as_reserved(dt)) 1155 panic("Failed to config secure memory"); 1156 } 1157 #else /*CFG_DT*/ 1158 void *get_external_dt(void) 1159 { 1160 return NULL; 1161 } 1162 1163 static void init_external_dt(unsigned long phys_dt __unused) 1164 { 1165 } 1166 1167 static void update_external_dt(void) 1168 { 1169 } 1170 1171 #ifdef CFG_CORE_DYN_SHM 1172 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt __unused, 1173 size_t *nelems __unused) 1174 { 1175 return NULL; 1176 } 1177 #endif /*CFG_CORE_DYN_SHM*/ 1178 #endif /*!CFG_DT*/ 1179 1180 #ifdef CFG_CORE_DYN_SHM 1181 static void discover_nsec_memory(void) 1182 { 1183 struct core_mmu_phys_mem *mem; 1184 const struct core_mmu_phys_mem *mem_begin = NULL; 1185 const struct core_mmu_phys_mem *mem_end = NULL; 1186 size_t nelems; 1187 void *fdt = get_external_dt(); 1188 1189 if (fdt) { 1190 mem = get_nsec_memory(fdt, &nelems); 1191 if (mem) { 1192 core_mmu_set_discovered_nsec_ddr(mem, nelems); 1193 return; 1194 } 1195 1196 DMSG("No non-secure memory found in FDT"); 1197 } 1198 1199 mem_begin = phys_ddr_overall_begin; 1200 mem_end = phys_ddr_overall_end; 1201 nelems = mem_end - mem_begin; 1202 if (nelems) { 1203 /* 1204 * Platform cannot use both register_ddr() and the now 1205 * deprecated register_dynamic_shm(). 1206 */ 1207 assert(phys_ddr_overall_compat_begin == 1208 phys_ddr_overall_compat_end); 1209 } else { 1210 mem_begin = phys_ddr_overall_compat_begin; 1211 mem_end = phys_ddr_overall_compat_end; 1212 nelems = mem_end - mem_begin; 1213 if (!nelems) 1214 return; 1215 DMSG("Warning register_dynamic_shm() is deprecated, please use register_ddr() instead"); 1216 } 1217 1218 mem = nex_calloc(nelems, sizeof(*mem)); 1219 if (!mem) 1220 panic(); 1221 1222 memcpy(mem, phys_ddr_overall_begin, sizeof(*mem) * nelems); 1223 core_mmu_set_discovered_nsec_ddr(mem, nelems); 1224 } 1225 #else /*CFG_CORE_DYN_SHM*/ 1226 static void discover_nsec_memory(void) 1227 { 1228 } 1229 #endif /*!CFG_CORE_DYN_SHM*/ 1230 1231 #ifdef CFG_VIRTUALIZATION 1232 static TEE_Result virt_init_heap(void) 1233 { 1234 /* We need to initialize pool for every virtual guest partition */ 1235 malloc_add_pool(__heap1_start, __heap1_end - __heap1_start); 1236 1237 return TEE_SUCCESS; 1238 } 1239 preinit_early(virt_init_heap); 1240 #endif 1241 1242 void init_tee_runtime(void) 1243 { 1244 #ifndef CFG_WITH_PAGER 1245 /* Pager initializes TA RAM early */ 1246 core_mmu_init_ta_ram(); 1247 #endif 1248 /* 1249 * With virtualization we call this function when creating the 1250 * OP-TEE partition instead. 1251 */ 1252 if (!IS_ENABLED(CFG_VIRTUALIZATION)) 1253 call_preinitcalls(); 1254 call_initcalls(); 1255 } 1256 1257 static void init_primary(unsigned long pageable_part, unsigned long nsec_entry) 1258 { 1259 thread_init_core_local_stacks(); 1260 /* 1261 * Mask asynchronous exceptions before switch to the thread vector 1262 * as the thread handler requires those to be masked while 1263 * executing with the temporary stack. The thread subsystem also 1264 * asserts that the foreign interrupts are blocked when using most of 1265 * its functions. 1266 */ 1267 thread_set_exceptions(THREAD_EXCP_ALL); 1268 primary_save_cntfrq(); 1269 init_vfp_sec(); 1270 /* 1271 * Pager: init_runtime() calls thread_kernel_enable_vfp() so we must 1272 * set a current thread right now to avoid a chicken-and-egg problem 1273 * (thread_init_boot_thread() sets the current thread but needs 1274 * things set by init_runtime()). 1275 */ 1276 thread_get_core_local()->curr_thread = 0; 1277 init_runtime(pageable_part); 1278 1279 if (IS_ENABLED(CFG_VIRTUALIZATION)) { 1280 /* 1281 * Virtualization: We can't initialize threads right now because 1282 * threads belong to "tee" part and will be initialized 1283 * separately per each new virtual guest. So, we'll clear 1284 * "curr_thread" and call it done. 1285 */ 1286 thread_get_core_local()->curr_thread = -1; 1287 } else { 1288 thread_init_boot_thread(); 1289 } 1290 thread_init_primary(); 1291 thread_init_per_cpu(); 1292 init_sec_mon(nsec_entry); 1293 } 1294 1295 /* 1296 * Note: this function is weak just to make it possible to exclude it from 1297 * the unpaged area. 1298 */ 1299 void __weak boot_init_primary_late(unsigned long fdt) 1300 { 1301 init_external_dt(fdt); 1302 tpm_map_log_area(get_external_dt()); 1303 discover_nsec_memory(); 1304 update_external_dt(); 1305 configure_console_from_dt(); 1306 1307 IMSG("OP-TEE version: %s", core_v_str); 1308 if (IS_ENABLED(CFG_WARN_INSECURE)) { 1309 IMSG("WARNING: This OP-TEE configuration might be insecure!"); 1310 IMSG("WARNING: Please check https://optee.readthedocs.io/en/latest/architecture/porting_guidelines.html"); 1311 } 1312 IMSG("Primary CPU initializing"); 1313 #ifdef CFG_CORE_ASLR 1314 DMSG("Executing at offset %#lx with virtual load address %#"PRIxVA, 1315 (unsigned long)boot_mmu_config.load_offset, VCORE_START_VA); 1316 #endif 1317 if (IS_ENABLED(CFG_MEMTAG)) 1318 DMSG("Memory tagging %s", 1319 memtag_is_enabled() ? "enabled" : "disabled"); 1320 1321 main_init_gic(); 1322 init_vfp_nsec(); 1323 if (IS_ENABLED(CFG_VIRTUALIZATION)) { 1324 IMSG("Initializing virtualization support"); 1325 core_mmu_init_virtualization(); 1326 } else { 1327 init_tee_runtime(); 1328 } 1329 call_finalcalls(); 1330 IMSG("Primary CPU switching to normal world boot"); 1331 } 1332 1333 static void init_secondary_helper(unsigned long nsec_entry) 1334 { 1335 IMSG("Secondary CPU %zu initializing", get_core_pos()); 1336 1337 /* 1338 * Mask asynchronous exceptions before switch to the thread vector 1339 * as the thread handler requires those to be masked while 1340 * executing with the temporary stack. The thread subsystem also 1341 * asserts that the foreign interrupts are blocked when using most of 1342 * its functions. 1343 */ 1344 thread_set_exceptions(THREAD_EXCP_ALL); 1345 1346 secondary_init_cntfrq(); 1347 thread_init_per_cpu(); 1348 init_sec_mon(nsec_entry); 1349 main_secondary_init_gic(); 1350 init_vfp_sec(); 1351 init_vfp_nsec(); 1352 1353 IMSG("Secondary CPU %zu switching to normal world boot", get_core_pos()); 1354 } 1355 1356 /* 1357 * Note: this function is weak just to make it possible to exclude it from 1358 * the unpaged area so that it lies in the init area. 1359 */ 1360 void __weak boot_init_primary_early(unsigned long pageable_part, 1361 unsigned long nsec_entry __maybe_unused) 1362 { 1363 unsigned long e = PADDR_INVALID; 1364 1365 #if !defined(CFG_WITH_ARM_TRUSTED_FW) 1366 e = nsec_entry; 1367 #endif 1368 1369 init_primary(pageable_part, e); 1370 } 1371 1372 #if defined(CFG_WITH_ARM_TRUSTED_FW) 1373 unsigned long boot_cpu_on_handler(unsigned long a0 __maybe_unused, 1374 unsigned long a1 __unused) 1375 { 1376 init_secondary_helper(PADDR_INVALID); 1377 return 0; 1378 } 1379 #else 1380 void boot_init_secondary(unsigned long nsec_entry) 1381 { 1382 init_secondary_helper(nsec_entry); 1383 } 1384 #endif 1385 1386 #if defined(CFG_BOOT_SECONDARY_REQUEST) 1387 void boot_set_core_ns_entry(size_t core_idx, uintptr_t entry, 1388 uintptr_t context_id) 1389 { 1390 ns_entry_contexts[core_idx].entry_point = entry; 1391 ns_entry_contexts[core_idx].context_id = context_id; 1392 dsb_ishst(); 1393 } 1394 1395 int boot_core_release(size_t core_idx, paddr_t entry) 1396 { 1397 if (!core_idx || core_idx >= CFG_TEE_CORE_NB_CORE) 1398 return -1; 1399 1400 ns_entry_contexts[core_idx].entry_point = entry; 1401 dmb(); 1402 spin_table[core_idx] = 1; 1403 dsb(); 1404 sev(); 1405 1406 return 0; 1407 } 1408 1409 /* 1410 * spin until secondary boot request, then returns with 1411 * the secondary core entry address. 1412 */ 1413 struct ns_entry_context *boot_core_hpen(void) 1414 { 1415 #ifdef CFG_PSCI_ARM32 1416 return &ns_entry_contexts[get_core_pos()]; 1417 #else 1418 do { 1419 wfe(); 1420 } while (!spin_table[get_core_pos()]); 1421 dmb(); 1422 return &ns_entry_contexts[get_core_pos()]; 1423 #endif 1424 } 1425 #endif 1426 1427 #if defined(CFG_CORE_ASLR) 1428 #if defined(CFG_DT) 1429 unsigned long __weak get_aslr_seed(void *fdt) 1430 { 1431 int rc = fdt_check_header(fdt); 1432 const uint64_t *seed = NULL; 1433 int offs = 0; 1434 int len = 0; 1435 1436 if (rc) { 1437 DMSG("Bad fdt: %d", rc); 1438 goto err; 1439 } 1440 1441 offs = fdt_path_offset(fdt, "/secure-chosen"); 1442 if (offs < 0) { 1443 DMSG("Cannot find /secure-chosen"); 1444 goto err; 1445 } 1446 seed = fdt_getprop(fdt, offs, "kaslr-seed", &len); 1447 if (!seed || len != sizeof(*seed)) { 1448 DMSG("Cannot find valid kaslr-seed"); 1449 goto err; 1450 } 1451 1452 return fdt64_to_cpu(*seed); 1453 1454 err: 1455 /* Try platform implementation */ 1456 return plat_get_aslr_seed(); 1457 } 1458 #else /*!CFG_DT*/ 1459 unsigned long __weak get_aslr_seed(void *fdt __unused) 1460 { 1461 /* Try platform implementation */ 1462 return plat_get_aslr_seed(); 1463 } 1464 #endif /*!CFG_DT*/ 1465 #endif /*CFG_CORE_ASLR*/ 1466