1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2015-2022, Linaro Limited 4 */ 5 6 #include <arm.h> 7 #include <assert.h> 8 #include <compiler.h> 9 #include <config.h> 10 #include <console.h> 11 #include <crypto/crypto.h> 12 #include <drivers/gic.h> 13 #include <initcall.h> 14 #include <inttypes.h> 15 #include <keep.h> 16 #include <kernel/asan.h> 17 #include <kernel/boot.h> 18 #include <kernel/linker.h> 19 #include <kernel/misc.h> 20 #include <kernel/panic.h> 21 #include <kernel/tee_misc.h> 22 #include <kernel/thread.h> 23 #include <kernel/tpm.h> 24 #include <libfdt.h> 25 #include <malloc.h> 26 #include <memtag.h> 27 #include <mm/core_memprot.h> 28 #include <mm/core_mmu.h> 29 #include <mm/fobj.h> 30 #include <mm/tee_mm.h> 31 #include <mm/tee_pager.h> 32 #include <sm/psci.h> 33 #include <stdio.h> 34 #include <trace.h> 35 #include <utee_defines.h> 36 #include <util.h> 37 38 #include <platform_config.h> 39 40 #if !defined(CFG_WITH_ARM_TRUSTED_FW) 41 #include <sm/sm.h> 42 #endif 43 44 #if defined(CFG_WITH_VFP) 45 #include <kernel/vfp.h> 46 #endif 47 48 /* 49 * In this file we're using unsigned long to represent physical pointers as 50 * they are received in a single register when OP-TEE is initially entered. 51 * This limits 32-bit systems to only use make use of the lower 32 bits 52 * of a physical address for initial parameters. 53 * 54 * 64-bit systems on the other hand can use full 64-bit physical pointers. 55 */ 56 #define PADDR_INVALID ULONG_MAX 57 58 #if defined(CFG_BOOT_SECONDARY_REQUEST) 59 struct ns_entry_context { 60 uintptr_t entry_point; 61 uintptr_t context_id; 62 }; 63 struct ns_entry_context ns_entry_contexts[CFG_TEE_CORE_NB_CORE]; 64 static uint32_t spin_table[CFG_TEE_CORE_NB_CORE]; 65 #endif 66 67 #ifdef CFG_BOOT_SYNC_CPU 68 /* 69 * Array used when booting, to synchronize cpu. 70 * When 0, the cpu has not started. 71 * When 1, it has started 72 */ 73 uint32_t sem_cpu_sync[CFG_TEE_CORE_NB_CORE]; 74 DECLARE_KEEP_PAGER(sem_cpu_sync); 75 #endif 76 77 #ifdef CFG_DT 78 struct dt_descriptor { 79 void *blob; 80 #ifdef _CFG_USE_DTB_OVERLAY 81 int frag_id; 82 #endif 83 }; 84 85 static struct dt_descriptor external_dt __nex_bss; 86 #endif 87 88 #ifdef CFG_SECONDARY_INIT_CNTFRQ 89 static uint32_t cntfrq; 90 #endif 91 92 /* May be overridden in plat-$(PLATFORM)/main.c */ 93 __weak void plat_primary_init_early(void) 94 { 95 } 96 DECLARE_KEEP_PAGER(plat_primary_init_early); 97 98 /* May be overridden in plat-$(PLATFORM)/main.c */ 99 __weak void main_init_gic(void) 100 { 101 } 102 103 /* May be overridden in plat-$(PLATFORM)/main.c */ 104 __weak void main_secondary_init_gic(void) 105 { 106 } 107 108 /* May be overridden in plat-$(PLATFORM)/main.c */ 109 __weak unsigned long plat_get_aslr_seed(void) 110 { 111 DMSG("Warning: no ASLR seed"); 112 113 return 0; 114 } 115 116 /* 117 * This function is called as a guard after each smc call which is not 118 * supposed to return. 119 */ 120 void __panic_at_smc_return(void) 121 { 122 panic(); 123 } 124 125 #if defined(CFG_WITH_ARM_TRUSTED_FW) 126 void init_sec_mon(unsigned long nsec_entry __maybe_unused) 127 { 128 assert(nsec_entry == PADDR_INVALID); 129 /* Do nothing as we don't have a secure monitor */ 130 } 131 #else 132 /* May be overridden in plat-$(PLATFORM)/main.c */ 133 __weak void init_sec_mon(unsigned long nsec_entry) 134 { 135 struct sm_nsec_ctx *nsec_ctx; 136 137 assert(nsec_entry != PADDR_INVALID); 138 139 /* Initialize secure monitor */ 140 nsec_ctx = sm_get_nsec_ctx(); 141 nsec_ctx->mon_lr = nsec_entry; 142 nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I; 143 if (nsec_entry & 1) 144 nsec_ctx->mon_spsr |= CPSR_T; 145 } 146 #endif 147 148 #if defined(CFG_WITH_ARM_TRUSTED_FW) 149 static void init_vfp_nsec(void) 150 { 151 } 152 #else 153 static void init_vfp_nsec(void) 154 { 155 /* Normal world can use CP10 and CP11 (SIMD/VFP) */ 156 write_nsacr(read_nsacr() | NSACR_CP10 | NSACR_CP11); 157 } 158 #endif 159 160 #if defined(CFG_WITH_VFP) 161 162 #ifdef ARM32 163 static void init_vfp_sec(void) 164 { 165 uint32_t cpacr = read_cpacr(); 166 167 /* 168 * Enable Advanced SIMD functionality. 169 * Enable use of D16-D31 of the Floating-point Extension register 170 * file. 171 */ 172 cpacr &= ~(CPACR_ASEDIS | CPACR_D32DIS); 173 /* 174 * Enable usage of CP10 and CP11 (SIMD/VFP) (both kernel and user 175 * mode. 176 */ 177 cpacr |= CPACR_CP(10, CPACR_CP_ACCESS_FULL); 178 cpacr |= CPACR_CP(11, CPACR_CP_ACCESS_FULL); 179 write_cpacr(cpacr); 180 } 181 #endif /* ARM32 */ 182 183 #ifdef ARM64 184 static void init_vfp_sec(void) 185 { 186 /* Not using VFP until thread_kernel_enable_vfp() */ 187 vfp_disable(); 188 } 189 #endif /* ARM64 */ 190 191 #else /* CFG_WITH_VFP */ 192 193 static void init_vfp_sec(void) 194 { 195 /* Not using VFP */ 196 } 197 #endif 198 199 #ifdef CFG_SECONDARY_INIT_CNTFRQ 200 static void primary_save_cntfrq(void) 201 { 202 assert(cntfrq == 0); 203 204 /* 205 * CNTFRQ should be initialized on the primary CPU by a 206 * previous boot stage 207 */ 208 cntfrq = read_cntfrq(); 209 } 210 211 static void secondary_init_cntfrq(void) 212 { 213 assert(cntfrq != 0); 214 write_cntfrq(cntfrq); 215 } 216 #else /* CFG_SECONDARY_INIT_CNTFRQ */ 217 static void primary_save_cntfrq(void) 218 { 219 } 220 221 static void secondary_init_cntfrq(void) 222 { 223 } 224 #endif 225 226 #ifdef CFG_CORE_SANITIZE_KADDRESS 227 static void init_run_constructors(void) 228 { 229 const vaddr_t *ctor; 230 231 for (ctor = &__ctor_list; ctor < &__ctor_end; ctor++) 232 ((void (*)(void))(*ctor))(); 233 } 234 235 static void init_asan(void) 236 { 237 238 /* 239 * CFG_ASAN_SHADOW_OFFSET is also supplied as 240 * -fasan-shadow-offset=$(CFG_ASAN_SHADOW_OFFSET) to the compiler. 241 * Since all the needed values to calculate the value of 242 * CFG_ASAN_SHADOW_OFFSET isn't available in to make we need to 243 * calculate it in advance and hard code it into the platform 244 * conf.mk. Here where we have all the needed values we double 245 * check that the compiler is supplied the correct value. 246 */ 247 248 #define __ASAN_SHADOW_START \ 249 ROUNDUP(TEE_RAM_VA_START + (TEE_RAM_VA_SIZE * 8) / 9 - 8, 8) 250 assert(__ASAN_SHADOW_START == (vaddr_t)&__asan_shadow_start); 251 #define __CFG_ASAN_SHADOW_OFFSET \ 252 (__ASAN_SHADOW_START - (TEE_RAM_VA_START / 8)) 253 COMPILE_TIME_ASSERT(CFG_ASAN_SHADOW_OFFSET == __CFG_ASAN_SHADOW_OFFSET); 254 #undef __ASAN_SHADOW_START 255 #undef __CFG_ASAN_SHADOW_OFFSET 256 257 /* 258 * Assign area covered by the shadow area, everything from start up 259 * to the beginning of the shadow area. 260 */ 261 asan_set_shadowed((void *)TEE_TEXT_VA_START, &__asan_shadow_start); 262 263 /* 264 * Add access to areas that aren't opened automatically by a 265 * constructor. 266 */ 267 asan_tag_access(&__ctor_list, &__ctor_end); 268 asan_tag_access(__rodata_start, __rodata_end); 269 #ifdef CFG_WITH_PAGER 270 asan_tag_access(__pageable_start, __pageable_end); 271 #endif /*CFG_WITH_PAGER*/ 272 asan_tag_access(__nozi_start, __nozi_end); 273 asan_tag_access(__exidx_start, __exidx_end); 274 asan_tag_access(__extab_start, __extab_end); 275 276 init_run_constructors(); 277 278 /* Everything is tagged correctly, let's start address sanitizing. */ 279 asan_start(); 280 } 281 #else /*CFG_CORE_SANITIZE_KADDRESS*/ 282 static void init_asan(void) 283 { 284 } 285 #endif /*CFG_CORE_SANITIZE_KADDRESS*/ 286 287 #if defined(CFG_MEMTAG) 288 /* Called from entry_a64.S only when MEMTAG is configured */ 289 void boot_init_memtag(void) 290 { 291 memtag_init_ops(feat_mte_implemented()); 292 memtag_set_tags((void *)TEE_RAM_START, TEE_RAM_PH_SIZE, 0); 293 } 294 #endif 295 296 #ifdef CFG_WITH_PAGER 297 298 #ifdef CFG_CORE_SANITIZE_KADDRESS 299 static void carve_out_asan_mem(tee_mm_pool_t *pool) 300 { 301 const size_t s = pool->hi - pool->lo; 302 tee_mm_entry_t *mm; 303 paddr_t apa = ASAN_MAP_PA; 304 size_t asz = ASAN_MAP_SZ; 305 306 if (core_is_buffer_outside(apa, asz, pool->lo, s)) 307 return; 308 309 /* Reserve the shadow area */ 310 if (!core_is_buffer_inside(apa, asz, pool->lo, s)) { 311 if (apa < pool->lo) { 312 /* 313 * ASAN buffer is overlapping with the beginning of 314 * the pool. 315 */ 316 asz -= pool->lo - apa; 317 apa = pool->lo; 318 } else { 319 /* 320 * ASAN buffer is overlapping with the end of the 321 * pool. 322 */ 323 asz = pool->hi - apa; 324 } 325 } 326 mm = tee_mm_alloc2(pool, apa, asz); 327 assert(mm); 328 } 329 #else 330 static void carve_out_asan_mem(tee_mm_pool_t *pool __unused) 331 { 332 } 333 #endif 334 335 static void print_pager_pool_size(void) 336 { 337 struct tee_pager_stats __maybe_unused stats; 338 339 tee_pager_get_stats(&stats); 340 IMSG("Pager pool size: %zukB", 341 stats.npages_all * SMALL_PAGE_SIZE / 1024); 342 } 343 344 static void init_vcore(tee_mm_pool_t *mm_vcore) 345 { 346 const vaddr_t begin = VCORE_START_VA; 347 size_t size = TEE_RAM_VA_SIZE; 348 349 #ifdef CFG_CORE_SANITIZE_KADDRESS 350 /* Carve out asan memory, flat maped after core memory */ 351 if (begin + size > ASAN_SHADOW_PA) 352 size = ASAN_MAP_PA - begin; 353 #endif 354 355 if (!tee_mm_init(mm_vcore, begin, size, SMALL_PAGE_SHIFT, 356 TEE_MM_POOL_NO_FLAGS)) 357 panic("tee_mm_vcore init failed"); 358 } 359 360 /* 361 * With CFG_CORE_ASLR=y the init part is relocated very early during boot. 362 * The init part is also paged just as the rest of the normal paged code, with 363 * the difference that it's preloaded during boot. When the backing store 364 * is configured the entire paged binary is copied in place and then also 365 * the init part. Since the init part has been relocated (references to 366 * addresses updated to compensate for the new load address) this has to be 367 * undone for the hashes of those pages to match with the original binary. 368 * 369 * If CFG_CORE_ASLR=n, nothing needs to be done as the code/ro pages are 370 * unchanged. 371 */ 372 static void undo_init_relocation(uint8_t *paged_store __maybe_unused) 373 { 374 #ifdef CFG_CORE_ASLR 375 unsigned long *ptr = NULL; 376 const uint32_t *reloc = NULL; 377 const uint32_t *reloc_end = NULL; 378 unsigned long offs = boot_mmu_config.load_offset; 379 const struct boot_embdata *embdata = (const void *)__init_end; 380 vaddr_t addr_end = (vaddr_t)__init_end - offs - TEE_RAM_START; 381 vaddr_t addr_start = (vaddr_t)__init_start - offs - TEE_RAM_START; 382 383 reloc = (const void *)((vaddr_t)embdata + embdata->reloc_offset); 384 reloc_end = reloc + embdata->reloc_len / sizeof(*reloc); 385 386 for (; reloc < reloc_end; reloc++) { 387 if (*reloc < addr_start) 388 continue; 389 if (*reloc >= addr_end) 390 break; 391 ptr = (void *)(paged_store + *reloc - addr_start); 392 *ptr -= offs; 393 } 394 #endif 395 } 396 397 static struct fobj *ro_paged_alloc(tee_mm_entry_t *mm, void *hashes, 398 void *store) 399 { 400 const unsigned int num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE; 401 #ifdef CFG_CORE_ASLR 402 unsigned int reloc_offs = (vaddr_t)__pageable_start - VCORE_START_VA; 403 const struct boot_embdata *embdata = (const void *)__init_end; 404 const void *reloc = __init_end + embdata->reloc_offset; 405 406 return fobj_ro_reloc_paged_alloc(num_pages, hashes, reloc_offs, 407 reloc, embdata->reloc_len, store); 408 #else 409 return fobj_ro_paged_alloc(num_pages, hashes, store); 410 #endif 411 } 412 413 static void init_runtime(unsigned long pageable_part) 414 { 415 size_t n; 416 size_t init_size = (size_t)(__init_end - __init_start); 417 size_t pageable_start = (size_t)__pageable_start; 418 size_t pageable_end = (size_t)__pageable_end; 419 size_t pageable_size = pageable_end - pageable_start; 420 size_t tzsram_end = TZSRAM_BASE + TZSRAM_SIZE; 421 size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) * 422 TEE_SHA256_HASH_SIZE; 423 const struct boot_embdata *embdata = (const void *)__init_end; 424 const void *tmp_hashes = NULL; 425 tee_mm_entry_t *mm = NULL; 426 struct fobj *fobj = NULL; 427 uint8_t *paged_store = NULL; 428 uint8_t *hashes = NULL; 429 430 assert(pageable_size % SMALL_PAGE_SIZE == 0); 431 assert(embdata->total_len >= embdata->hashes_offset + 432 embdata->hashes_len); 433 assert(hash_size == embdata->hashes_len); 434 435 tmp_hashes = __init_end + embdata->hashes_offset; 436 437 init_asan(); 438 439 /* Add heap2 first as heap1 may be too small as initial bget pool */ 440 malloc_add_pool(__heap2_start, __heap2_end - __heap2_start); 441 malloc_add_pool(__heap1_start, __heap1_end - __heap1_start); 442 443 /* 444 * This needs to be initialized early to support address lookup 445 * in MEM_AREA_TEE_RAM 446 */ 447 tee_pager_early_init(); 448 449 hashes = malloc(hash_size); 450 IMSG_RAW("\n"); 451 IMSG("Pager is enabled. Hashes: %zu bytes", hash_size); 452 assert(hashes); 453 asan_memcpy_unchecked(hashes, tmp_hashes, hash_size); 454 455 /* 456 * Need tee_mm_sec_ddr initialized to be able to allocate secure 457 * DDR below. 458 */ 459 core_mmu_init_ta_ram(); 460 461 carve_out_asan_mem(&tee_mm_sec_ddr); 462 463 mm = tee_mm_alloc(&tee_mm_sec_ddr, pageable_size); 464 assert(mm); 465 paged_store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM, 466 pageable_size); 467 /* 468 * Load pageable part in the dedicated allocated area: 469 * - Move pageable non-init part into pageable area. Note bootloader 470 * may have loaded it anywhere in TA RAM hence use memmove(). 471 * - Copy pageable init part from current location into pageable area. 472 */ 473 memmove(paged_store + init_size, 474 phys_to_virt(pageable_part, 475 core_mmu_get_type_by_pa(pageable_part), 476 __pageable_part_end - __pageable_part_start), 477 __pageable_part_end - __pageable_part_start); 478 asan_memcpy_unchecked(paged_store, __init_start, init_size); 479 /* 480 * Undo eventual relocation for the init part so the hash checks 481 * can pass. 482 */ 483 undo_init_relocation(paged_store); 484 485 /* Check that hashes of what's in pageable area is OK */ 486 DMSG("Checking hashes of pageable area"); 487 for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) { 488 const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE; 489 const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE; 490 TEE_Result res; 491 492 DMSG("hash pg_idx %zu hash %p page %p", n, hash, page); 493 res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE); 494 if (res != TEE_SUCCESS) { 495 EMSG("Hash failed for page %zu at %p: res 0x%x", 496 n, (void *)page, res); 497 panic(); 498 } 499 } 500 501 /* 502 * Assert prepaged init sections are page aligned so that nothing 503 * trails uninited at the end of the premapped init area. 504 */ 505 assert(!(init_size & SMALL_PAGE_MASK)); 506 507 /* 508 * Initialize the virtual memory pool used for main_mmu_l2_ttb which 509 * is supplied to tee_pager_init() below. 510 */ 511 init_vcore(&tee_mm_vcore); 512 513 /* 514 * Assign alias area for pager end of the small page block the rest 515 * of the binary is loaded into. We're taking more than needed, but 516 * we're guaranteed to not need more than the physical amount of 517 * TZSRAM. 518 */ 519 mm = tee_mm_alloc2(&tee_mm_vcore, 520 (vaddr_t)tee_mm_vcore.lo + 521 tee_mm_vcore.size - TZSRAM_SIZE, 522 TZSRAM_SIZE); 523 assert(mm); 524 tee_pager_set_alias_area(mm); 525 526 /* 527 * Claim virtual memory which isn't paged. 528 * Linear memory (flat map core memory) ends there. 529 */ 530 mm = tee_mm_alloc2(&tee_mm_vcore, VCORE_UNPG_RX_PA, 531 (vaddr_t)(__pageable_start - VCORE_UNPG_RX_PA)); 532 assert(mm); 533 534 /* 535 * Allocate virtual memory for the pageable area and let the pager 536 * take charge of all the pages already assigned to that memory. 537 */ 538 mm = tee_mm_alloc2(&tee_mm_vcore, (vaddr_t)__pageable_start, 539 pageable_size); 540 assert(mm); 541 fobj = ro_paged_alloc(mm, hashes, paged_store); 542 assert(fobj); 543 tee_pager_add_core_region(tee_mm_get_smem(mm), PAGED_REGION_TYPE_RO, 544 fobj); 545 fobj_put(fobj); 546 547 tee_pager_add_pages(pageable_start, init_size / SMALL_PAGE_SIZE, false); 548 tee_pager_add_pages(pageable_start + init_size, 549 (pageable_size - init_size) / SMALL_PAGE_SIZE, 550 true); 551 if (pageable_end < tzsram_end) 552 tee_pager_add_pages(pageable_end, (tzsram_end - pageable_end) / 553 SMALL_PAGE_SIZE, true); 554 555 /* 556 * There may be physical pages in TZSRAM before the core load address. 557 * These pages can be added to the physical pages pool of the pager. 558 * This setup may happen when a the secure bootloader runs in TZRAM 559 * and its memory can be reused by OP-TEE once boot stages complete. 560 */ 561 tee_pager_add_pages(tee_mm_vcore.lo, 562 (VCORE_UNPG_RX_PA - tee_mm_vcore.lo) / SMALL_PAGE_SIZE, 563 true); 564 565 print_pager_pool_size(); 566 } 567 #else 568 569 static void init_runtime(unsigned long pageable_part __unused) 570 { 571 init_asan(); 572 573 /* 574 * By default whole OP-TEE uses malloc, so we need to initialize 575 * it early. But, when virtualization is enabled, malloc is used 576 * only by TEE runtime, so malloc should be initialized later, for 577 * every virtual partition separately. Core code uses nex_malloc 578 * instead. 579 */ 580 #ifdef CFG_VIRTUALIZATION 581 nex_malloc_add_pool(__nex_heap_start, __nex_heap_end - 582 __nex_heap_start); 583 #else 584 malloc_add_pool(__heap1_start, __heap1_end - __heap1_start); 585 #endif 586 587 IMSG_RAW("\n"); 588 } 589 #endif 590 591 void *get_dt(void) 592 { 593 void *fdt = get_embedded_dt(); 594 595 if (!fdt) 596 fdt = get_external_dt(); 597 598 return fdt; 599 } 600 601 #if defined(CFG_EMBED_DTB) 602 void *get_embedded_dt(void) 603 { 604 static bool checked; 605 606 assert(cpu_mmu_enabled()); 607 608 if (!checked) { 609 IMSG("Embedded DTB found"); 610 611 if (fdt_check_header(embedded_secure_dtb)) 612 panic("Invalid embedded DTB"); 613 614 checked = true; 615 } 616 617 return embedded_secure_dtb; 618 } 619 #else 620 void *get_embedded_dt(void) 621 { 622 return NULL; 623 } 624 #endif /*CFG_EMBED_DTB*/ 625 626 #if defined(CFG_DT) 627 void *get_external_dt(void) 628 { 629 assert(cpu_mmu_enabled()); 630 return external_dt.blob; 631 } 632 633 static TEE_Result release_external_dt(void) 634 { 635 int ret = 0; 636 637 if (!external_dt.blob) 638 return TEE_SUCCESS; 639 640 ret = fdt_pack(external_dt.blob); 641 if (ret < 0) { 642 EMSG("Failed to pack Device Tree at 0x%" PRIxPA ": error %d", 643 virt_to_phys(external_dt.blob), ret); 644 panic(); 645 } 646 647 if (core_mmu_remove_mapping(MEM_AREA_EXT_DT, external_dt.blob, 648 CFG_DTB_MAX_SIZE)) 649 panic("Failed to remove temporary Device Tree mapping"); 650 651 /* External DTB no more reached, reset pointer to invalid */ 652 external_dt.blob = NULL; 653 654 return TEE_SUCCESS; 655 } 656 boot_final(release_external_dt); 657 658 #ifdef _CFG_USE_DTB_OVERLAY 659 static int add_dt_overlay_fragment(struct dt_descriptor *dt, int ioffs) 660 { 661 char frag[32]; 662 int offs; 663 int ret; 664 665 snprintf(frag, sizeof(frag), "fragment@%d", dt->frag_id); 666 offs = fdt_add_subnode(dt->blob, ioffs, frag); 667 if (offs < 0) 668 return offs; 669 670 dt->frag_id += 1; 671 672 ret = fdt_setprop_string(dt->blob, offs, "target-path", "/"); 673 if (ret < 0) 674 return -1; 675 676 return fdt_add_subnode(dt->blob, offs, "__overlay__"); 677 } 678 679 static int init_dt_overlay(struct dt_descriptor *dt, int __maybe_unused dt_size) 680 { 681 int fragment; 682 683 if (IS_ENABLED(CFG_EXTERNAL_DTB_OVERLAY)) { 684 if (!fdt_check_header(dt->blob)) { 685 fdt_for_each_subnode(fragment, dt->blob, 0) 686 dt->frag_id += 1; 687 return 0; 688 } 689 } 690 691 return fdt_create_empty_tree(dt->blob, dt_size); 692 } 693 #else 694 static int add_dt_overlay_fragment(struct dt_descriptor *dt __unused, int offs) 695 { 696 return offs; 697 } 698 699 static int init_dt_overlay(struct dt_descriptor *dt __unused, 700 int dt_size __unused) 701 { 702 return 0; 703 } 704 #endif /* _CFG_USE_DTB_OVERLAY */ 705 706 static int add_dt_path_subnode(struct dt_descriptor *dt, const char *path, 707 const char *subnode) 708 { 709 int offs; 710 711 offs = fdt_path_offset(dt->blob, path); 712 if (offs < 0) 713 return -1; 714 offs = add_dt_overlay_fragment(dt, offs); 715 if (offs < 0) 716 return -1; 717 offs = fdt_add_subnode(dt->blob, offs, subnode); 718 if (offs < 0) 719 return -1; 720 return offs; 721 } 722 723 static int add_optee_dt_node(struct dt_descriptor *dt) 724 { 725 int offs; 726 int ret; 727 728 if (fdt_path_offset(dt->blob, "/firmware/optee") >= 0) { 729 DMSG("OP-TEE Device Tree node already exists!"); 730 return 0; 731 } 732 733 offs = fdt_path_offset(dt->blob, "/firmware"); 734 if (offs < 0) { 735 offs = add_dt_path_subnode(dt, "/", "firmware"); 736 if (offs < 0) 737 return -1; 738 } 739 740 offs = fdt_add_subnode(dt->blob, offs, "optee"); 741 if (offs < 0) 742 return -1; 743 744 ret = fdt_setprop_string(dt->blob, offs, "compatible", 745 "linaro,optee-tz"); 746 if (ret < 0) 747 return -1; 748 ret = fdt_setprop_string(dt->blob, offs, "method", "smc"); 749 if (ret < 0) 750 return -1; 751 if (CFG_CORE_ASYNC_NOTIF_GIC_INTID) { 752 /* 753 * The format of the interrupt property is defined by the 754 * binding of the interrupt domain root. In this case it's 755 * one Arm GIC v1, v2 or v3 so we must be compatible with 756 * these. 757 * 758 * An SPI type of interrupt is indicated with a 0 in the 759 * first cell. 760 * 761 * The interrupt number goes in the second cell where 762 * SPIs ranges from 0 to 987. 763 * 764 * Flags are passed in the third cell where a 1 means edge 765 * triggered. 766 */ 767 const uint32_t gic_spi = 0; 768 const uint32_t irq_type_edge = 1; 769 uint32_t val[] = { 770 TEE_U32_TO_BIG_ENDIAN(gic_spi), 771 TEE_U32_TO_BIG_ENDIAN(CFG_CORE_ASYNC_NOTIF_GIC_INTID - 772 GIC_SPI_BASE), 773 TEE_U32_TO_BIG_ENDIAN(irq_type_edge), 774 }; 775 776 ret = fdt_setprop(dt->blob, offs, "interrupts", val, 777 sizeof(val)); 778 if (ret < 0) 779 return -1; 780 } 781 return 0; 782 } 783 784 #ifdef CFG_PSCI_ARM32 785 static int append_psci_compatible(void *fdt, int offs, const char *str) 786 { 787 return fdt_appendprop(fdt, offs, "compatible", str, strlen(str) + 1); 788 } 789 790 static int dt_add_psci_node(struct dt_descriptor *dt) 791 { 792 int offs; 793 794 if (fdt_path_offset(dt->blob, "/psci") >= 0) { 795 DMSG("PSCI Device Tree node already exists!"); 796 return 0; 797 } 798 799 offs = add_dt_path_subnode(dt, "/", "psci"); 800 if (offs < 0) 801 return -1; 802 if (append_psci_compatible(dt->blob, offs, "arm,psci-1.0")) 803 return -1; 804 if (append_psci_compatible(dt->blob, offs, "arm,psci-0.2")) 805 return -1; 806 if (append_psci_compatible(dt->blob, offs, "arm,psci")) 807 return -1; 808 if (fdt_setprop_string(dt->blob, offs, "method", "smc")) 809 return -1; 810 if (fdt_setprop_u32(dt->blob, offs, "cpu_suspend", PSCI_CPU_SUSPEND)) 811 return -1; 812 if (fdt_setprop_u32(dt->blob, offs, "cpu_off", PSCI_CPU_OFF)) 813 return -1; 814 if (fdt_setprop_u32(dt->blob, offs, "cpu_on", PSCI_CPU_ON)) 815 return -1; 816 if (fdt_setprop_u32(dt->blob, offs, "sys_poweroff", PSCI_SYSTEM_OFF)) 817 return -1; 818 if (fdt_setprop_u32(dt->blob, offs, "sys_reset", PSCI_SYSTEM_RESET)) 819 return -1; 820 return 0; 821 } 822 823 static int check_node_compat_prefix(struct dt_descriptor *dt, int offs, 824 const char *prefix) 825 { 826 const size_t prefix_len = strlen(prefix); 827 size_t l; 828 int plen; 829 const char *prop; 830 831 prop = fdt_getprop(dt->blob, offs, "compatible", &plen); 832 if (!prop) 833 return -1; 834 835 while (plen > 0) { 836 if (memcmp(prop, prefix, prefix_len) == 0) 837 return 0; /* match */ 838 839 l = strlen(prop) + 1; 840 prop += l; 841 plen -= l; 842 } 843 844 return -1; 845 } 846 847 static int dt_add_psci_cpu_enable_methods(struct dt_descriptor *dt) 848 { 849 int offs = 0; 850 851 while (1) { 852 offs = fdt_next_node(dt->blob, offs, NULL); 853 if (offs < 0) 854 break; 855 if (fdt_getprop(dt->blob, offs, "enable-method", NULL)) 856 continue; /* already set */ 857 if (check_node_compat_prefix(dt, offs, "arm,cortex-a")) 858 continue; /* no compatible */ 859 if (fdt_setprop_string(dt->blob, offs, "enable-method", "psci")) 860 return -1; 861 /* Need to restart scanning as offsets may have changed */ 862 offs = 0; 863 } 864 return 0; 865 } 866 867 static int config_psci(struct dt_descriptor *dt) 868 { 869 if (dt_add_psci_node(dt)) 870 return -1; 871 return dt_add_psci_cpu_enable_methods(dt); 872 } 873 #else 874 static int config_psci(struct dt_descriptor *dt __unused) 875 { 876 return 0; 877 } 878 #endif /*CFG_PSCI_ARM32*/ 879 880 static void set_dt_val(void *data, uint32_t cell_size, uint64_t val) 881 { 882 if (cell_size == 1) { 883 fdt32_t v = cpu_to_fdt32((uint32_t)val); 884 885 memcpy(data, &v, sizeof(v)); 886 } else { 887 fdt64_t v = cpu_to_fdt64(val); 888 889 memcpy(data, &v, sizeof(v)); 890 } 891 } 892 893 static int add_res_mem_dt_node(struct dt_descriptor *dt, const char *name, 894 paddr_t pa, size_t size) 895 { 896 int offs = 0; 897 int ret = 0; 898 int addr_size = -1; 899 int len_size = -1; 900 bool found = true; 901 char subnode_name[80] = { 0 }; 902 903 offs = fdt_path_offset(dt->blob, "/reserved-memory"); 904 905 if (offs < 0) { 906 found = false; 907 offs = 0; 908 } 909 910 if (IS_ENABLED(_CFG_USE_DTB_OVERLAY)) { 911 len_size = sizeof(paddr_t) / sizeof(uint32_t); 912 addr_size = sizeof(paddr_t) / sizeof(uint32_t); 913 } else { 914 len_size = fdt_size_cells(dt->blob, offs); 915 if (len_size < 0) 916 return -1; 917 addr_size = fdt_address_cells(dt->blob, offs); 918 if (addr_size < 0) 919 return -1; 920 } 921 922 if (!found) { 923 offs = add_dt_path_subnode(dt, "/", "reserved-memory"); 924 if (offs < 0) 925 return -1; 926 ret = fdt_setprop_cell(dt->blob, offs, "#address-cells", 927 addr_size); 928 if (ret < 0) 929 return -1; 930 ret = fdt_setprop_cell(dt->blob, offs, "#size-cells", len_size); 931 if (ret < 0) 932 return -1; 933 ret = fdt_setprop(dt->blob, offs, "ranges", NULL, 0); 934 if (ret < 0) 935 return -1; 936 } 937 938 ret = snprintf(subnode_name, sizeof(subnode_name), 939 "%s@%" PRIxPA, name, pa); 940 if (ret < 0 || ret >= (int)sizeof(subnode_name)) 941 DMSG("truncated node \"%s@%" PRIxPA"\"", name, pa); 942 offs = fdt_add_subnode(dt->blob, offs, subnode_name); 943 if (offs >= 0) { 944 uint32_t data[FDT_MAX_NCELLS * 2]; 945 946 set_dt_val(data, addr_size, pa); 947 set_dt_val(data + addr_size, len_size, size); 948 ret = fdt_setprop(dt->blob, offs, "reg", data, 949 sizeof(uint32_t) * (addr_size + len_size)); 950 if (ret < 0) 951 return -1; 952 ret = fdt_setprop(dt->blob, offs, "no-map", NULL, 0); 953 if (ret < 0) 954 return -1; 955 } else { 956 return -1; 957 } 958 return 0; 959 } 960 961 #ifdef CFG_CORE_DYN_SHM 962 static uint64_t get_dt_val_and_advance(const void *data, size_t *offs, 963 uint32_t cell_size) 964 { 965 uint64_t rv = 0; 966 967 if (cell_size == 1) { 968 uint32_t v; 969 970 memcpy(&v, (const uint8_t *)data + *offs, sizeof(v)); 971 *offs += sizeof(v); 972 rv = fdt32_to_cpu(v); 973 } else { 974 uint64_t v; 975 976 memcpy(&v, (const uint8_t *)data + *offs, sizeof(v)); 977 *offs += sizeof(v); 978 rv = fdt64_to_cpu(v); 979 } 980 981 return rv; 982 } 983 984 /* 985 * Find all non-secure memory from DT. Memory marked inaccessible by Secure 986 * World is ignored since it could not be mapped to be used as dynamic shared 987 * memory. 988 */ 989 static int get_nsec_memory_helper(void *fdt, struct core_mmu_phys_mem *mem) 990 { 991 const uint8_t *prop = NULL; 992 uint64_t a = 0; 993 uint64_t l = 0; 994 size_t prop_offs = 0; 995 size_t prop_len = 0; 996 int elems_total = 0; 997 int addr_size = 0; 998 int len_size = 0; 999 int offs = 0; 1000 size_t n = 0; 1001 int len = 0; 1002 1003 addr_size = fdt_address_cells(fdt, 0); 1004 if (addr_size < 0) 1005 return 0; 1006 1007 len_size = fdt_size_cells(fdt, 0); 1008 if (len_size < 0) 1009 return 0; 1010 1011 while (true) { 1012 offs = fdt_node_offset_by_prop_value(fdt, offs, "device_type", 1013 "memory", 1014 sizeof("memory")); 1015 if (offs < 0) 1016 break; 1017 1018 if (_fdt_get_status(fdt, offs) != (DT_STATUS_OK_NSEC | 1019 DT_STATUS_OK_SEC)) 1020 continue; 1021 1022 prop = fdt_getprop(fdt, offs, "reg", &len); 1023 if (!prop) 1024 continue; 1025 1026 prop_len = len; 1027 for (n = 0, prop_offs = 0; prop_offs < prop_len; n++) { 1028 a = get_dt_val_and_advance(prop, &prop_offs, addr_size); 1029 if (prop_offs >= prop_len) { 1030 n--; 1031 break; 1032 } 1033 1034 l = get_dt_val_and_advance(prop, &prop_offs, len_size); 1035 if (mem) { 1036 mem->type = MEM_AREA_DDR_OVERALL; 1037 mem->addr = a; 1038 mem->size = l; 1039 mem++; 1040 } 1041 } 1042 1043 elems_total += n; 1044 } 1045 1046 return elems_total; 1047 } 1048 1049 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt, size_t *nelems) 1050 { 1051 struct core_mmu_phys_mem *mem = NULL; 1052 int elems_total = 0; 1053 1054 elems_total = get_nsec_memory_helper(fdt, NULL); 1055 if (elems_total <= 0) 1056 return NULL; 1057 1058 mem = nex_calloc(elems_total, sizeof(*mem)); 1059 if (!mem) 1060 panic(); 1061 1062 elems_total = get_nsec_memory_helper(fdt, mem); 1063 assert(elems_total > 0); 1064 1065 *nelems = elems_total; 1066 1067 return mem; 1068 } 1069 #endif /*CFG_CORE_DYN_SHM*/ 1070 1071 #ifdef CFG_CORE_RESERVED_SHM 1072 static int mark_static_shm_as_reserved(struct dt_descriptor *dt) 1073 { 1074 vaddr_t shm_start; 1075 vaddr_t shm_end; 1076 1077 core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &shm_start, &shm_end); 1078 if (shm_start != shm_end) 1079 return add_res_mem_dt_node(dt, "optee_shm", 1080 virt_to_phys((void *)shm_start), 1081 shm_end - shm_start); 1082 1083 DMSG("No SHM configured"); 1084 return -1; 1085 } 1086 #endif /*CFG_CORE_RESERVED_SHM*/ 1087 1088 static void init_external_dt(unsigned long phys_dt) 1089 { 1090 struct dt_descriptor *dt = &external_dt; 1091 void *fdt; 1092 int ret; 1093 1094 if (!phys_dt) { 1095 /* 1096 * No need to panic as we're not using the DT in OP-TEE 1097 * yet, we're only adding some nodes for normal world use. 1098 * This makes the switch to using DT easier as we can boot 1099 * a newer OP-TEE with older boot loaders. Once we start to 1100 * initialize devices based on DT we'll likely panic 1101 * instead of returning here. 1102 */ 1103 IMSG("No non-secure external DT"); 1104 return; 1105 } 1106 1107 fdt = core_mmu_add_mapping(MEM_AREA_EXT_DT, phys_dt, CFG_DTB_MAX_SIZE); 1108 if (!fdt) 1109 panic("Failed to map external DTB"); 1110 1111 dt->blob = fdt; 1112 1113 ret = init_dt_overlay(dt, CFG_DTB_MAX_SIZE); 1114 if (ret < 0) { 1115 EMSG("Device Tree Overlay init fail @ %#lx: error %d", phys_dt, 1116 ret); 1117 panic(); 1118 } 1119 1120 ret = fdt_open_into(fdt, fdt, CFG_DTB_MAX_SIZE); 1121 if (ret < 0) { 1122 EMSG("Invalid Device Tree at %#lx: error %d", phys_dt, ret); 1123 panic(); 1124 } 1125 1126 IMSG("Non-secure external DT found"); 1127 } 1128 1129 static int mark_tzdram_as_reserved(struct dt_descriptor *dt) 1130 { 1131 return add_res_mem_dt_node(dt, "optee_core", CFG_TZDRAM_START, 1132 CFG_TZDRAM_SIZE); 1133 } 1134 1135 static void update_external_dt(void) 1136 { 1137 struct dt_descriptor *dt = &external_dt; 1138 1139 if (!dt->blob) 1140 return; 1141 1142 if (!IS_ENABLED(CFG_CORE_FFA) && add_optee_dt_node(dt)) 1143 panic("Failed to add OP-TEE Device Tree node"); 1144 1145 if (config_psci(dt)) 1146 panic("Failed to config PSCI"); 1147 1148 #ifdef CFG_CORE_RESERVED_SHM 1149 if (mark_static_shm_as_reserved(dt)) 1150 panic("Failed to config non-secure memory"); 1151 #endif 1152 1153 if (mark_tzdram_as_reserved(dt)) 1154 panic("Failed to config secure memory"); 1155 } 1156 #else /*CFG_DT*/ 1157 void *get_external_dt(void) 1158 { 1159 return NULL; 1160 } 1161 1162 static void init_external_dt(unsigned long phys_dt __unused) 1163 { 1164 } 1165 1166 static void update_external_dt(void) 1167 { 1168 } 1169 1170 #ifdef CFG_CORE_DYN_SHM 1171 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt __unused, 1172 size_t *nelems __unused) 1173 { 1174 return NULL; 1175 } 1176 #endif /*CFG_CORE_DYN_SHM*/ 1177 #endif /*!CFG_DT*/ 1178 1179 #ifdef CFG_CORE_DYN_SHM 1180 static void discover_nsec_memory(void) 1181 { 1182 struct core_mmu_phys_mem *mem; 1183 const struct core_mmu_phys_mem *mem_begin = NULL; 1184 const struct core_mmu_phys_mem *mem_end = NULL; 1185 size_t nelems; 1186 void *fdt = get_external_dt(); 1187 1188 if (fdt) { 1189 mem = get_nsec_memory(fdt, &nelems); 1190 if (mem) { 1191 core_mmu_set_discovered_nsec_ddr(mem, nelems); 1192 return; 1193 } 1194 1195 DMSG("No non-secure memory found in FDT"); 1196 } 1197 1198 mem_begin = phys_ddr_overall_begin; 1199 mem_end = phys_ddr_overall_end; 1200 nelems = mem_end - mem_begin; 1201 if (nelems) { 1202 /* 1203 * Platform cannot use both register_ddr() and the now 1204 * deprecated register_dynamic_shm(). 1205 */ 1206 assert(phys_ddr_overall_compat_begin == 1207 phys_ddr_overall_compat_end); 1208 } else { 1209 mem_begin = phys_ddr_overall_compat_begin; 1210 mem_end = phys_ddr_overall_compat_end; 1211 nelems = mem_end - mem_begin; 1212 if (!nelems) 1213 return; 1214 DMSG("Warning register_dynamic_shm() is deprecated, please use register_ddr() instead"); 1215 } 1216 1217 mem = nex_calloc(nelems, sizeof(*mem)); 1218 if (!mem) 1219 panic(); 1220 1221 memcpy(mem, phys_ddr_overall_begin, sizeof(*mem) * nelems); 1222 core_mmu_set_discovered_nsec_ddr(mem, nelems); 1223 } 1224 #else /*CFG_CORE_DYN_SHM*/ 1225 static void discover_nsec_memory(void) 1226 { 1227 } 1228 #endif /*!CFG_CORE_DYN_SHM*/ 1229 1230 #ifdef CFG_VIRTUALIZATION 1231 static TEE_Result virt_init_heap(void) 1232 { 1233 /* We need to initialize pool for every virtual guest partition */ 1234 malloc_add_pool(__heap1_start, __heap1_end - __heap1_start); 1235 1236 return TEE_SUCCESS; 1237 } 1238 preinit_early(virt_init_heap); 1239 #endif 1240 1241 void init_tee_runtime(void) 1242 { 1243 #ifndef CFG_WITH_PAGER 1244 /* Pager initializes TA RAM early */ 1245 core_mmu_init_ta_ram(); 1246 #endif 1247 /* 1248 * With virtualization we call this function when creating the 1249 * OP-TEE partition instead. 1250 */ 1251 if (!IS_ENABLED(CFG_VIRTUALIZATION)) 1252 call_preinitcalls(); 1253 call_initcalls(); 1254 } 1255 1256 static void init_primary(unsigned long pageable_part, unsigned long nsec_entry) 1257 { 1258 thread_init_core_local_stacks(); 1259 /* 1260 * Mask asynchronous exceptions before switch to the thread vector 1261 * as the thread handler requires those to be masked while 1262 * executing with the temporary stack. The thread subsystem also 1263 * asserts that the foreign interrupts are blocked when using most of 1264 * its functions. 1265 */ 1266 thread_set_exceptions(THREAD_EXCP_ALL); 1267 primary_save_cntfrq(); 1268 init_vfp_sec(); 1269 /* 1270 * Pager: init_runtime() calls thread_kernel_enable_vfp() so we must 1271 * set a current thread right now to avoid a chicken-and-egg problem 1272 * (thread_init_boot_thread() sets the current thread but needs 1273 * things set by init_runtime()). 1274 */ 1275 thread_get_core_local()->curr_thread = 0; 1276 init_runtime(pageable_part); 1277 1278 if (IS_ENABLED(CFG_VIRTUALIZATION)) { 1279 /* 1280 * Virtualization: We can't initialize threads right now because 1281 * threads belong to "tee" part and will be initialized 1282 * separately per each new virtual guest. So, we'll clear 1283 * "curr_thread" and call it done. 1284 */ 1285 thread_get_core_local()->curr_thread = -1; 1286 } else { 1287 thread_init_boot_thread(); 1288 } 1289 thread_init_primary(); 1290 thread_init_per_cpu(); 1291 init_sec_mon(nsec_entry); 1292 } 1293 1294 /* 1295 * Note: this function is weak just to make it possible to exclude it from 1296 * the unpaged area. 1297 */ 1298 void __weak boot_init_primary_late(unsigned long fdt) 1299 { 1300 init_external_dt(fdt); 1301 tpm_map_log_area(get_external_dt()); 1302 discover_nsec_memory(); 1303 update_external_dt(); 1304 configure_console_from_dt(); 1305 1306 IMSG("OP-TEE version: %s", core_v_str); 1307 if (IS_ENABLED(CFG_WARN_INSECURE)) { 1308 IMSG("WARNING: This OP-TEE configuration might be insecure!"); 1309 IMSG("WARNING: Please check https://optee.readthedocs.io/en/latest/architecture/porting_guidelines.html"); 1310 } 1311 IMSG("Primary CPU initializing"); 1312 #ifdef CFG_CORE_ASLR 1313 DMSG("Executing at offset %#lx with virtual load address %#"PRIxVA, 1314 (unsigned long)boot_mmu_config.load_offset, VCORE_START_VA); 1315 #endif 1316 if (IS_ENABLED(CFG_MEMTAG)) 1317 DMSG("Memory tagging %s", 1318 memtag_is_enabled() ? "enabled" : "disabled"); 1319 1320 main_init_gic(); 1321 init_vfp_nsec(); 1322 if (IS_ENABLED(CFG_VIRTUALIZATION)) { 1323 IMSG("Initializing virtualization support"); 1324 core_mmu_init_virtualization(); 1325 } else { 1326 init_tee_runtime(); 1327 } 1328 call_finalcalls(); 1329 IMSG("Primary CPU switching to normal world boot"); 1330 } 1331 1332 static void init_secondary_helper(unsigned long nsec_entry) 1333 { 1334 IMSG("Secondary CPU %zu initializing", get_core_pos()); 1335 1336 /* 1337 * Mask asynchronous exceptions before switch to the thread vector 1338 * as the thread handler requires those to be masked while 1339 * executing with the temporary stack. The thread subsystem also 1340 * asserts that the foreign interrupts are blocked when using most of 1341 * its functions. 1342 */ 1343 thread_set_exceptions(THREAD_EXCP_ALL); 1344 1345 secondary_init_cntfrq(); 1346 thread_init_per_cpu(); 1347 init_sec_mon(nsec_entry); 1348 main_secondary_init_gic(); 1349 init_vfp_sec(); 1350 init_vfp_nsec(); 1351 1352 IMSG("Secondary CPU %zu switching to normal world boot", get_core_pos()); 1353 } 1354 1355 /* 1356 * Note: this function is weak just to make it possible to exclude it from 1357 * the unpaged area so that it lies in the init area. 1358 */ 1359 void __weak boot_init_primary_early(unsigned long pageable_part, 1360 unsigned long nsec_entry __maybe_unused) 1361 { 1362 unsigned long e = PADDR_INVALID; 1363 1364 #if !defined(CFG_WITH_ARM_TRUSTED_FW) 1365 e = nsec_entry; 1366 #endif 1367 1368 init_primary(pageable_part, e); 1369 } 1370 1371 #if defined(CFG_WITH_ARM_TRUSTED_FW) 1372 unsigned long boot_cpu_on_handler(unsigned long a0 __maybe_unused, 1373 unsigned long a1 __unused) 1374 { 1375 init_secondary_helper(PADDR_INVALID); 1376 return 0; 1377 } 1378 #else 1379 void boot_init_secondary(unsigned long nsec_entry) 1380 { 1381 init_secondary_helper(nsec_entry); 1382 } 1383 #endif 1384 1385 #if defined(CFG_BOOT_SECONDARY_REQUEST) 1386 void boot_set_core_ns_entry(size_t core_idx, uintptr_t entry, 1387 uintptr_t context_id) 1388 { 1389 ns_entry_contexts[core_idx].entry_point = entry; 1390 ns_entry_contexts[core_idx].context_id = context_id; 1391 dsb_ishst(); 1392 } 1393 1394 int boot_core_release(size_t core_idx, paddr_t entry) 1395 { 1396 if (!core_idx || core_idx >= CFG_TEE_CORE_NB_CORE) 1397 return -1; 1398 1399 ns_entry_contexts[core_idx].entry_point = entry; 1400 dmb(); 1401 spin_table[core_idx] = 1; 1402 dsb(); 1403 sev(); 1404 1405 return 0; 1406 } 1407 1408 /* 1409 * spin until secondary boot request, then returns with 1410 * the secondary core entry address. 1411 */ 1412 struct ns_entry_context *boot_core_hpen(void) 1413 { 1414 #ifdef CFG_PSCI_ARM32 1415 return &ns_entry_contexts[get_core_pos()]; 1416 #else 1417 do { 1418 wfe(); 1419 } while (!spin_table[get_core_pos()]); 1420 dmb(); 1421 return &ns_entry_contexts[get_core_pos()]; 1422 #endif 1423 } 1424 #endif 1425 1426 #if defined(CFG_CORE_ASLR) 1427 #if defined(CFG_DT) 1428 unsigned long __weak get_aslr_seed(void *fdt) 1429 { 1430 int rc = fdt_check_header(fdt); 1431 const uint64_t *seed = NULL; 1432 int offs = 0; 1433 int len = 0; 1434 1435 if (rc) { 1436 DMSG("Bad fdt: %d", rc); 1437 goto err; 1438 } 1439 1440 offs = fdt_path_offset(fdt, "/secure-chosen"); 1441 if (offs < 0) { 1442 DMSG("Cannot find /secure-chosen"); 1443 goto err; 1444 } 1445 seed = fdt_getprop(fdt, offs, "kaslr-seed", &len); 1446 if (!seed || len != sizeof(*seed)) { 1447 DMSG("Cannot find valid kaslr-seed"); 1448 goto err; 1449 } 1450 1451 return fdt64_to_cpu(*seed); 1452 1453 err: 1454 /* Try platform implementation */ 1455 return plat_get_aslr_seed(); 1456 } 1457 #else /*!CFG_DT*/ 1458 unsigned long __weak get_aslr_seed(void *fdt __unused) 1459 { 1460 /* Try platform implementation */ 1461 return plat_get_aslr_seed(); 1462 } 1463 #endif /*!CFG_DT*/ 1464 #endif /*CFG_CORE_ASLR*/ 1465