1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2015-2020, Linaro Limited 4 */ 5 6 #include <arm.h> 7 #include <assert.h> 8 #include <compiler.h> 9 #include <config.h> 10 #include <console.h> 11 #include <crypto/crypto.h> 12 #include <initcall.h> 13 #include <inttypes.h> 14 #include <keep.h> 15 #include <kernel/asan.h> 16 #include <kernel/boot.h> 17 #include <kernel/linker.h> 18 #include <kernel/misc.h> 19 #include <kernel/panic.h> 20 #include <kernel/tee_misc.h> 21 #include <kernel/thread.h> 22 #include <kernel/tpm.h> 23 #include <libfdt.h> 24 #include <malloc.h> 25 #include <mm/core_memprot.h> 26 #include <mm/core_mmu.h> 27 #include <mm/fobj.h> 28 #include <mm/tee_mm.h> 29 #include <mm/tee_mmu.h> 30 #include <mm/tee_pager.h> 31 #include <sm/psci.h> 32 #include <stdio.h> 33 #include <trace.h> 34 #include <utee_defines.h> 35 #include <util.h> 36 37 #include <platform_config.h> 38 39 #if !defined(CFG_WITH_ARM_TRUSTED_FW) 40 #include <sm/sm.h> 41 #endif 42 43 #if defined(CFG_WITH_VFP) 44 #include <kernel/vfp.h> 45 #endif 46 47 /* 48 * In this file we're using unsigned long to represent physical pointers as 49 * they are received in a single register when OP-TEE is initially entered. 50 * This limits 32-bit systems to only use make use of the lower 32 bits 51 * of a physical address for initial parameters. 52 * 53 * 64-bit systems on the other hand can use full 64-bit physical pointers. 54 */ 55 #define PADDR_INVALID ULONG_MAX 56 57 #if defined(CFG_BOOT_SECONDARY_REQUEST) 58 struct ns_entry_context { 59 uintptr_t entry_point; 60 uintptr_t context_id; 61 }; 62 struct ns_entry_context ns_entry_contexts[CFG_TEE_CORE_NB_CORE]; 63 static uint32_t spin_table[CFG_TEE_CORE_NB_CORE]; 64 #endif 65 66 #ifdef CFG_BOOT_SYNC_CPU 67 /* 68 * Array used when booting, to synchronize cpu. 69 * When 0, the cpu has not started. 70 * When 1, it has started 71 */ 72 uint32_t sem_cpu_sync[CFG_TEE_CORE_NB_CORE]; 73 DECLARE_KEEP_PAGER(sem_cpu_sync); 74 #endif 75 76 #ifdef CFG_DT 77 struct dt_descriptor { 78 void *blob; 79 int frag_id; 80 }; 81 82 static struct dt_descriptor external_dt __nex_bss; 83 #endif 84 85 #ifdef CFG_SECONDARY_INIT_CNTFRQ 86 static uint32_t cntfrq; 87 #endif 88 89 /* May be overridden in plat-$(PLATFORM)/main.c */ 90 __weak void plat_primary_init_early(void) 91 { 92 } 93 DECLARE_KEEP_PAGER(plat_primary_init_early); 94 95 /* May be overridden in plat-$(PLATFORM)/main.c */ 96 __weak void main_init_gic(void) 97 { 98 } 99 100 /* May be overridden in plat-$(PLATFORM)/main.c */ 101 __weak void main_secondary_init_gic(void) 102 { 103 } 104 105 #if defined(CFG_WITH_ARM_TRUSTED_FW) 106 void init_sec_mon(unsigned long nsec_entry __maybe_unused) 107 { 108 assert(nsec_entry == PADDR_INVALID); 109 /* Do nothing as we don't have a secure monitor */ 110 } 111 #else 112 /* May be overridden in plat-$(PLATFORM)/main.c */ 113 __weak void init_sec_mon(unsigned long nsec_entry) 114 { 115 struct sm_nsec_ctx *nsec_ctx; 116 117 assert(nsec_entry != PADDR_INVALID); 118 119 /* Initialize secure monitor */ 120 nsec_ctx = sm_get_nsec_ctx(); 121 nsec_ctx->mon_lr = nsec_entry; 122 nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I; 123 if (nsec_entry & 1) 124 nsec_ctx->mon_spsr |= CPSR_T; 125 } 126 #endif 127 128 #if defined(CFG_WITH_ARM_TRUSTED_FW) 129 static void init_vfp_nsec(void) 130 { 131 } 132 #else 133 static void init_vfp_nsec(void) 134 { 135 /* Normal world can use CP10 and CP11 (SIMD/VFP) */ 136 write_nsacr(read_nsacr() | NSACR_CP10 | NSACR_CP11); 137 } 138 #endif 139 140 #if defined(CFG_WITH_VFP) 141 142 #ifdef ARM32 143 static void init_vfp_sec(void) 144 { 145 uint32_t cpacr = read_cpacr(); 146 147 /* 148 * Enable Advanced SIMD functionality. 149 * Enable use of D16-D31 of the Floating-point Extension register 150 * file. 151 */ 152 cpacr &= ~(CPACR_ASEDIS | CPACR_D32DIS); 153 /* 154 * Enable usage of CP10 and CP11 (SIMD/VFP) (both kernel and user 155 * mode. 156 */ 157 cpacr |= CPACR_CP(10, CPACR_CP_ACCESS_FULL); 158 cpacr |= CPACR_CP(11, CPACR_CP_ACCESS_FULL); 159 write_cpacr(cpacr); 160 } 161 #endif /* ARM32 */ 162 163 #ifdef ARM64 164 static void init_vfp_sec(void) 165 { 166 /* Not using VFP until thread_kernel_enable_vfp() */ 167 vfp_disable(); 168 } 169 #endif /* ARM64 */ 170 171 #else /* CFG_WITH_VFP */ 172 173 static void init_vfp_sec(void) 174 { 175 /* Not using VFP */ 176 } 177 #endif 178 179 #ifdef CFG_SECONDARY_INIT_CNTFRQ 180 static void primary_save_cntfrq(void) 181 { 182 assert(cntfrq == 0); 183 184 /* 185 * CNTFRQ should be initialized on the primary CPU by a 186 * previous boot stage 187 */ 188 cntfrq = read_cntfrq(); 189 } 190 191 static void secondary_init_cntfrq(void) 192 { 193 assert(cntfrq != 0); 194 write_cntfrq(cntfrq); 195 } 196 #else /* CFG_SECONDARY_INIT_CNTFRQ */ 197 static void primary_save_cntfrq(void) 198 { 199 } 200 201 static void secondary_init_cntfrq(void) 202 { 203 } 204 #endif 205 206 #ifdef CFG_CORE_SANITIZE_KADDRESS 207 static void init_run_constructors(void) 208 { 209 const vaddr_t *ctor; 210 211 for (ctor = &__ctor_list; ctor < &__ctor_end; ctor++) 212 ((void (*)(void))(*ctor))(); 213 } 214 215 static void init_asan(void) 216 { 217 218 /* 219 * CFG_ASAN_SHADOW_OFFSET is also supplied as 220 * -fasan-shadow-offset=$(CFG_ASAN_SHADOW_OFFSET) to the compiler. 221 * Since all the needed values to calculate the value of 222 * CFG_ASAN_SHADOW_OFFSET isn't available in to make we need to 223 * calculate it in advance and hard code it into the platform 224 * conf.mk. Here where we have all the needed values we double 225 * check that the compiler is supplied the correct value. 226 */ 227 228 #define __ASAN_SHADOW_START \ 229 ROUNDUP(TEE_RAM_VA_START + (TEE_RAM_VA_SIZE * 8) / 9 - 8, 8) 230 assert(__ASAN_SHADOW_START == (vaddr_t)&__asan_shadow_start); 231 #define __CFG_ASAN_SHADOW_OFFSET \ 232 (__ASAN_SHADOW_START - (TEE_RAM_VA_START / 8)) 233 COMPILE_TIME_ASSERT(CFG_ASAN_SHADOW_OFFSET == __CFG_ASAN_SHADOW_OFFSET); 234 #undef __ASAN_SHADOW_START 235 #undef __CFG_ASAN_SHADOW_OFFSET 236 237 /* 238 * Assign area covered by the shadow area, everything from start up 239 * to the beginning of the shadow area. 240 */ 241 asan_set_shadowed((void *)TEE_TEXT_VA_START, &__asan_shadow_start); 242 243 /* 244 * Add access to areas that aren't opened automatically by a 245 * constructor. 246 */ 247 asan_tag_access(&__ctor_list, &__ctor_end); 248 asan_tag_access(__rodata_start, __rodata_end); 249 #ifdef CFG_WITH_PAGER 250 asan_tag_access(__pageable_start, __pageable_end); 251 #endif /*CFG_WITH_PAGER*/ 252 asan_tag_access(__nozi_start, __nozi_end); 253 asan_tag_access(__exidx_start, __exidx_end); 254 asan_tag_access(__extab_start, __extab_end); 255 256 init_run_constructors(); 257 258 /* Everything is tagged correctly, let's start address sanitizing. */ 259 asan_start(); 260 } 261 #else /*CFG_CORE_SANITIZE_KADDRESS*/ 262 static void init_asan(void) 263 { 264 } 265 #endif /*CFG_CORE_SANITIZE_KADDRESS*/ 266 267 #ifdef CFG_WITH_PAGER 268 269 #ifdef CFG_CORE_SANITIZE_KADDRESS 270 static void carve_out_asan_mem(tee_mm_pool_t *pool) 271 { 272 const size_t s = pool->hi - pool->lo; 273 tee_mm_entry_t *mm; 274 paddr_t apa = ASAN_MAP_PA; 275 size_t asz = ASAN_MAP_SZ; 276 277 if (core_is_buffer_outside(apa, asz, pool->lo, s)) 278 return; 279 280 /* Reserve the shadow area */ 281 if (!core_is_buffer_inside(apa, asz, pool->lo, s)) { 282 if (apa < pool->lo) { 283 /* 284 * ASAN buffer is overlapping with the beginning of 285 * the pool. 286 */ 287 asz -= pool->lo - apa; 288 apa = pool->lo; 289 } else { 290 /* 291 * ASAN buffer is overlapping with the end of the 292 * pool. 293 */ 294 asz = pool->hi - apa; 295 } 296 } 297 mm = tee_mm_alloc2(pool, apa, asz); 298 assert(mm); 299 } 300 #else 301 static void carve_out_asan_mem(tee_mm_pool_t *pool __unused) 302 { 303 } 304 #endif 305 306 static void print_pager_pool_size(void) 307 { 308 struct tee_pager_stats __maybe_unused stats; 309 310 tee_pager_get_stats(&stats); 311 IMSG("Pager pool size: %zukB", 312 stats.npages_all * SMALL_PAGE_SIZE / 1024); 313 } 314 315 static void init_vcore(tee_mm_pool_t *mm_vcore) 316 { 317 const vaddr_t begin = VCORE_START_VA; 318 vaddr_t end = begin + TEE_RAM_VA_SIZE; 319 320 #ifdef CFG_CORE_SANITIZE_KADDRESS 321 /* Carve out asan memory, flat maped after core memory */ 322 if (end > ASAN_SHADOW_PA) 323 end = ASAN_MAP_PA; 324 #endif 325 326 if (!tee_mm_init(mm_vcore, begin, end, SMALL_PAGE_SHIFT, 327 TEE_MM_POOL_NO_FLAGS)) 328 panic("tee_mm_vcore init failed"); 329 } 330 331 /* 332 * With CFG_CORE_ASLR=y the init part is relocated very early during boot. 333 * The init part is also paged just as the rest of the normal paged code, with 334 * the difference that it's preloaded during boot. When the backing store 335 * is configured the entire paged binary is copied in place and then also 336 * the init part. Since the init part has been relocated (references to 337 * addresses updated to compensate for the new load address) this has to be 338 * undone for the hashes of those pages to match with the original binary. 339 * 340 * If CFG_CORE_ASLR=n, nothing needs to be done as the code/ro pages are 341 * unchanged. 342 */ 343 static void undo_init_relocation(uint8_t *paged_store __maybe_unused) 344 { 345 #ifdef CFG_CORE_ASLR 346 unsigned long *ptr = NULL; 347 const uint32_t *reloc = NULL; 348 const uint32_t *reloc_end = NULL; 349 unsigned long offs = boot_mmu_config.load_offset; 350 const struct boot_embdata *embdata = (const void *)__init_end; 351 vaddr_t addr_end = (vaddr_t)__init_end - offs - TEE_RAM_START; 352 vaddr_t addr_start = (vaddr_t)__init_start - offs - TEE_RAM_START; 353 354 reloc = (const void *)((vaddr_t)embdata + embdata->reloc_offset); 355 reloc_end = reloc + embdata->reloc_len / sizeof(*reloc); 356 357 for (; reloc < reloc_end; reloc++) { 358 if (*reloc < addr_start) 359 continue; 360 if (*reloc >= addr_end) 361 break; 362 ptr = (void *)(paged_store + *reloc - addr_start); 363 *ptr -= offs; 364 } 365 #endif 366 } 367 368 static struct fobj *ro_paged_alloc(tee_mm_entry_t *mm, void *hashes, 369 void *store) 370 { 371 const unsigned int num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE; 372 #ifdef CFG_CORE_ASLR 373 unsigned int reloc_offs = (vaddr_t)__pageable_start - VCORE_START_VA; 374 const struct boot_embdata *embdata = (const void *)__init_end; 375 const void *reloc = __init_end + embdata->reloc_offset; 376 377 return fobj_ro_reloc_paged_alloc(num_pages, hashes, reloc_offs, 378 reloc, embdata->reloc_len, store); 379 #else 380 return fobj_ro_paged_alloc(num_pages, hashes, store); 381 #endif 382 } 383 384 static void init_runtime(unsigned long pageable_part) 385 { 386 size_t n; 387 size_t init_size = (size_t)(__init_end - __init_start); 388 size_t pageable_start = (size_t)__pageable_start; 389 size_t pageable_end = (size_t)__pageable_end; 390 size_t pageable_size = pageable_end - pageable_start; 391 size_t tzsram_end = TZSRAM_BASE + TZSRAM_SIZE; 392 size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) * 393 TEE_SHA256_HASH_SIZE; 394 const struct boot_embdata *embdata = (const void *)__init_end; 395 const void *tmp_hashes = NULL; 396 tee_mm_entry_t *mm = NULL; 397 struct fobj *fobj = NULL; 398 uint8_t *paged_store = NULL; 399 uint8_t *hashes = NULL; 400 401 assert(pageable_size % SMALL_PAGE_SIZE == 0); 402 assert(embdata->total_len >= embdata->hashes_offset + 403 embdata->hashes_len); 404 assert(hash_size == embdata->hashes_len); 405 406 tmp_hashes = __init_end + embdata->hashes_offset; 407 408 init_asan(); 409 410 malloc_add_pool(__heap1_start, __heap1_end - __heap1_start); 411 malloc_add_pool(__heap2_start, __heap2_end - __heap2_start); 412 413 /* 414 * This needs to be initialized early to support address lookup 415 * in MEM_AREA_TEE_RAM 416 */ 417 tee_pager_early_init(); 418 419 hashes = malloc(hash_size); 420 IMSG_RAW("\n"); 421 IMSG("Pager is enabled. Hashes: %zu bytes", hash_size); 422 assert(hashes); 423 asan_memcpy_unchecked(hashes, tmp_hashes, hash_size); 424 425 /* 426 * Need tee_mm_sec_ddr initialized to be able to allocate secure 427 * DDR below. 428 */ 429 teecore_init_ta_ram(); 430 431 carve_out_asan_mem(&tee_mm_sec_ddr); 432 433 mm = tee_mm_alloc(&tee_mm_sec_ddr, pageable_size); 434 assert(mm); 435 paged_store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM); 436 /* 437 * Load pageable part in the dedicated allocated area: 438 * - Move pageable non-init part into pageable area. Note bootloader 439 * may have loaded it anywhere in TA RAM hence use memmove(). 440 * - Copy pageable init part from current location into pageable area. 441 */ 442 memmove(paged_store + init_size, 443 phys_to_virt(pageable_part, 444 core_mmu_get_type_by_pa(pageable_part)), 445 __pageable_part_end - __pageable_part_start); 446 asan_memcpy_unchecked(paged_store, __init_start, init_size); 447 /* 448 * Undo eventual relocation for the init part so the hash checks 449 * can pass. 450 */ 451 undo_init_relocation(paged_store); 452 453 /* Check that hashes of what's in pageable area is OK */ 454 DMSG("Checking hashes of pageable area"); 455 for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) { 456 const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE; 457 const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE; 458 TEE_Result res; 459 460 DMSG("hash pg_idx %zu hash %p page %p", n, hash, page); 461 res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE); 462 if (res != TEE_SUCCESS) { 463 EMSG("Hash failed for page %zu at %p: res 0x%x", 464 n, (void *)page, res); 465 panic(); 466 } 467 } 468 469 /* 470 * Assert prepaged init sections are page aligned so that nothing 471 * trails uninited at the end of the premapped init area. 472 */ 473 assert(!(init_size & SMALL_PAGE_MASK)); 474 475 /* 476 * Initialize the virtual memory pool used for main_mmu_l2_ttb which 477 * is supplied to tee_pager_init() below. 478 */ 479 init_vcore(&tee_mm_vcore); 480 481 /* 482 * Assign alias area for pager end of the small page block the rest 483 * of the binary is loaded into. We're taking more than needed, but 484 * we're guaranteed to not need more than the physical amount of 485 * TZSRAM. 486 */ 487 mm = tee_mm_alloc2(&tee_mm_vcore, 488 (vaddr_t)tee_mm_vcore.hi - TZSRAM_SIZE, TZSRAM_SIZE); 489 assert(mm); 490 tee_pager_set_alias_area(mm); 491 492 /* 493 * Claim virtual memory which isn't paged. 494 * Linear memory (flat map core memory) ends there. 495 */ 496 mm = tee_mm_alloc2(&tee_mm_vcore, VCORE_UNPG_RX_PA, 497 (vaddr_t)(__pageable_start - VCORE_UNPG_RX_PA)); 498 assert(mm); 499 500 /* 501 * Allocate virtual memory for the pageable area and let the pager 502 * take charge of all the pages already assigned to that memory. 503 */ 504 mm = tee_mm_alloc2(&tee_mm_vcore, (vaddr_t)__pageable_start, 505 pageable_size); 506 assert(mm); 507 fobj = ro_paged_alloc(mm, hashes, paged_store); 508 assert(fobj); 509 tee_pager_add_core_area(tee_mm_get_smem(mm), PAGER_AREA_TYPE_RO, fobj); 510 fobj_put(fobj); 511 512 tee_pager_add_pages(pageable_start, init_size / SMALL_PAGE_SIZE, false); 513 tee_pager_add_pages(pageable_start + init_size, 514 (pageable_size - init_size) / SMALL_PAGE_SIZE, 515 true); 516 if (pageable_end < tzsram_end) 517 tee_pager_add_pages(pageable_end, (tzsram_end - pageable_end) / 518 SMALL_PAGE_SIZE, true); 519 520 /* 521 * There may be physical pages in TZSRAM before the core load address. 522 * These pages can be added to the physical pages pool of the pager. 523 * This setup may happen when a the secure bootloader runs in TZRAM 524 * and its memory can be reused by OP-TEE once boot stages complete. 525 */ 526 tee_pager_add_pages(tee_mm_vcore.lo, 527 (VCORE_UNPG_RX_PA - tee_mm_vcore.lo) / SMALL_PAGE_SIZE, 528 true); 529 530 print_pager_pool_size(); 531 } 532 #else 533 534 static void init_runtime(unsigned long pageable_part __unused) 535 { 536 init_asan(); 537 538 /* 539 * By default whole OP-TEE uses malloc, so we need to initialize 540 * it early. But, when virtualization is enabled, malloc is used 541 * only by TEE runtime, so malloc should be initialized later, for 542 * every virtual partition separately. Core code uses nex_malloc 543 * instead. 544 */ 545 #ifdef CFG_VIRTUALIZATION 546 nex_malloc_add_pool(__nex_heap_start, __nex_heap_end - 547 __nex_heap_start); 548 #else 549 malloc_add_pool(__heap1_start, __heap1_end - __heap1_start); 550 #endif 551 552 IMSG_RAW("\n"); 553 } 554 #endif 555 556 void *get_dt(void) 557 { 558 void *fdt = get_embedded_dt(); 559 560 if (!fdt) 561 fdt = get_external_dt(); 562 563 return fdt; 564 } 565 566 #if defined(CFG_EMBED_DTB) 567 void *get_embedded_dt(void) 568 { 569 static bool checked; 570 571 assert(cpu_mmu_enabled()); 572 573 if (!checked) { 574 IMSG("Embedded DTB found"); 575 576 if (fdt_check_header(embedded_secure_dtb)) 577 panic("Invalid embedded DTB"); 578 579 checked = true; 580 } 581 582 return embedded_secure_dtb; 583 } 584 #else 585 void *get_embedded_dt(void) 586 { 587 return NULL; 588 } 589 #endif /*CFG_EMBED_DTB*/ 590 591 #if defined(CFG_DT) 592 void *get_external_dt(void) 593 { 594 assert(cpu_mmu_enabled()); 595 return external_dt.blob; 596 } 597 598 static TEE_Result release_external_dt(void) 599 { 600 int ret = 0; 601 602 if (!external_dt.blob) 603 return TEE_SUCCESS; 604 605 ret = fdt_pack(external_dt.blob); 606 if (ret < 0) { 607 EMSG("Failed to pack Device Tree at 0x%" PRIxPA ": error %d", 608 virt_to_phys(external_dt.blob), ret); 609 panic(); 610 } 611 612 /* External DTB no more reached, reset pointer to invalid */ 613 external_dt.blob = NULL; 614 615 return TEE_SUCCESS; 616 } 617 boot_final(release_external_dt); 618 619 #ifdef CFG_EXTERNAL_DTB_OVERLAY 620 static int add_dt_overlay_fragment(struct dt_descriptor *dt, int ioffs) 621 { 622 char frag[32]; 623 int offs; 624 int ret; 625 626 snprintf(frag, sizeof(frag), "fragment@%d", dt->frag_id); 627 offs = fdt_add_subnode(dt->blob, ioffs, frag); 628 if (offs < 0) 629 return offs; 630 631 dt->frag_id += 1; 632 633 ret = fdt_setprop_string(dt->blob, offs, "target-path", "/"); 634 if (ret < 0) 635 return -1; 636 637 return fdt_add_subnode(dt->blob, offs, "__overlay__"); 638 } 639 640 static int init_dt_overlay(struct dt_descriptor *dt, int __maybe_unused dt_size) 641 { 642 int fragment; 643 int ret; 644 645 ret = fdt_check_header(dt->blob); 646 if (!ret) { 647 fdt_for_each_subnode(fragment, dt->blob, 0) 648 dt->frag_id += 1; 649 return ret; 650 } 651 652 #ifdef CFG_DT_ADDR 653 return fdt_create_empty_tree(dt->blob, dt_size); 654 #else 655 return -1; 656 #endif 657 } 658 #else 659 static int add_dt_overlay_fragment(struct dt_descriptor *dt __unused, int offs) 660 { 661 return offs; 662 } 663 664 static int init_dt_overlay(struct dt_descriptor *dt __unused, 665 int dt_size __unused) 666 { 667 return 0; 668 } 669 #endif /* CFG_EXTERNAL_DTB_OVERLAY */ 670 671 static int add_dt_path_subnode(struct dt_descriptor *dt, const char *path, 672 const char *subnode) 673 { 674 int offs; 675 676 offs = fdt_path_offset(dt->blob, path); 677 if (offs < 0) 678 return -1; 679 offs = add_dt_overlay_fragment(dt, offs); 680 if (offs < 0) 681 return -1; 682 offs = fdt_add_subnode(dt->blob, offs, subnode); 683 if (offs < 0) 684 return -1; 685 return offs; 686 } 687 688 static int add_optee_dt_node(struct dt_descriptor *dt) 689 { 690 int offs; 691 int ret; 692 693 if (fdt_path_offset(dt->blob, "/firmware/optee") >= 0) { 694 DMSG("OP-TEE Device Tree node already exists!"); 695 return 0; 696 } 697 698 offs = fdt_path_offset(dt->blob, "/firmware"); 699 if (offs < 0) { 700 offs = add_dt_path_subnode(dt, "/", "firmware"); 701 if (offs < 0) 702 return -1; 703 } 704 705 offs = fdt_add_subnode(dt->blob, offs, "optee"); 706 if (offs < 0) 707 return -1; 708 709 ret = fdt_setprop_string(dt->blob, offs, "compatible", 710 "linaro,optee-tz"); 711 if (ret < 0) 712 return -1; 713 ret = fdt_setprop_string(dt->blob, offs, "method", "smc"); 714 if (ret < 0) 715 return -1; 716 return 0; 717 } 718 719 #ifdef CFG_PSCI_ARM32 720 static int append_psci_compatible(void *fdt, int offs, const char *str) 721 { 722 return fdt_appendprop(fdt, offs, "compatible", str, strlen(str) + 1); 723 } 724 725 static int dt_add_psci_node(struct dt_descriptor *dt) 726 { 727 int offs; 728 729 if (fdt_path_offset(dt->blob, "/psci") >= 0) { 730 DMSG("PSCI Device Tree node already exists!"); 731 return 0; 732 } 733 734 offs = add_dt_path_subnode(dt, "/", "psci"); 735 if (offs < 0) 736 return -1; 737 if (append_psci_compatible(dt->blob, offs, "arm,psci-1.0")) 738 return -1; 739 if (append_psci_compatible(dt->blob, offs, "arm,psci-0.2")) 740 return -1; 741 if (append_psci_compatible(dt->blob, offs, "arm,psci")) 742 return -1; 743 if (fdt_setprop_string(dt->blob, offs, "method", "smc")) 744 return -1; 745 if (fdt_setprop_u32(dt->blob, offs, "cpu_suspend", PSCI_CPU_SUSPEND)) 746 return -1; 747 if (fdt_setprop_u32(dt->blob, offs, "cpu_off", PSCI_CPU_OFF)) 748 return -1; 749 if (fdt_setprop_u32(dt->blob, offs, "cpu_on", PSCI_CPU_ON)) 750 return -1; 751 if (fdt_setprop_u32(dt->blob, offs, "sys_poweroff", PSCI_SYSTEM_OFF)) 752 return -1; 753 if (fdt_setprop_u32(dt->blob, offs, "sys_reset", PSCI_SYSTEM_RESET)) 754 return -1; 755 return 0; 756 } 757 758 static int check_node_compat_prefix(struct dt_descriptor *dt, int offs, 759 const char *prefix) 760 { 761 const size_t prefix_len = strlen(prefix); 762 size_t l; 763 int plen; 764 const char *prop; 765 766 prop = fdt_getprop(dt->blob, offs, "compatible", &plen); 767 if (!prop) 768 return -1; 769 770 while (plen > 0) { 771 if (memcmp(prop, prefix, prefix_len) == 0) 772 return 0; /* match */ 773 774 l = strlen(prop) + 1; 775 prop += l; 776 plen -= l; 777 } 778 779 return -1; 780 } 781 782 static int dt_add_psci_cpu_enable_methods(struct dt_descriptor *dt) 783 { 784 int offs = 0; 785 786 while (1) { 787 offs = fdt_next_node(dt->blob, offs, NULL); 788 if (offs < 0) 789 break; 790 if (fdt_getprop(dt->blob, offs, "enable-method", NULL)) 791 continue; /* already set */ 792 if (check_node_compat_prefix(dt, offs, "arm,cortex-a")) 793 continue; /* no compatible */ 794 if (fdt_setprop_string(dt->blob, offs, "enable-method", "psci")) 795 return -1; 796 /* Need to restart scanning as offsets may have changed */ 797 offs = 0; 798 } 799 return 0; 800 } 801 802 static int config_psci(struct dt_descriptor *dt) 803 { 804 if (dt_add_psci_node(dt)) 805 return -1; 806 return dt_add_psci_cpu_enable_methods(dt); 807 } 808 #else 809 static int config_psci(struct dt_descriptor *dt __unused) 810 { 811 return 0; 812 } 813 #endif /*CFG_PSCI_ARM32*/ 814 815 static void set_dt_val(void *data, uint32_t cell_size, uint64_t val) 816 { 817 if (cell_size == 1) { 818 fdt32_t v = cpu_to_fdt32((uint32_t)val); 819 820 memcpy(data, &v, sizeof(v)); 821 } else { 822 fdt64_t v = cpu_to_fdt64(val); 823 824 memcpy(data, &v, sizeof(v)); 825 } 826 } 827 828 static int add_res_mem_dt_node(struct dt_descriptor *dt, const char *name, 829 paddr_t pa, size_t size) 830 { 831 int offs = 0; 832 int ret = 0; 833 int addr_size = -1; 834 int len_size = -1; 835 bool found = true; 836 char subnode_name[80] = { 0 }; 837 838 offs = fdt_path_offset(dt->blob, "/reserved-memory"); 839 840 if (offs < 0) { 841 found = false; 842 offs = 0; 843 } 844 845 if (IS_ENABLED(CFG_EXTERNAL_DTB_OVERLAY)) { 846 len_size = sizeof(paddr_t) / sizeof(uint32_t); 847 addr_size = sizeof(paddr_t) / sizeof(uint32_t); 848 } else { 849 len_size = fdt_size_cells(dt->blob, offs); 850 if (len_size < 0) 851 return -1; 852 addr_size = fdt_address_cells(dt->blob, offs); 853 if (addr_size < 0) 854 return -1; 855 } 856 857 if (!found) { 858 offs = add_dt_path_subnode(dt, "/", "reserved-memory"); 859 if (offs < 0) 860 return -1; 861 ret = fdt_setprop_cell(dt->blob, offs, "#address-cells", 862 addr_size); 863 if (ret < 0) 864 return -1; 865 ret = fdt_setprop_cell(dt->blob, offs, "#size-cells", len_size); 866 if (ret < 0) 867 return -1; 868 ret = fdt_setprop(dt->blob, offs, "ranges", NULL, 0); 869 if (ret < 0) 870 return -1; 871 } 872 873 snprintf(subnode_name, sizeof(subnode_name), 874 "%s@0x%" PRIxPA, name, pa); 875 offs = fdt_add_subnode(dt->blob, offs, subnode_name); 876 if (offs >= 0) { 877 uint32_t data[FDT_MAX_NCELLS * 2]; 878 879 set_dt_val(data, addr_size, pa); 880 set_dt_val(data + addr_size, len_size, size); 881 ret = fdt_setprop(dt->blob, offs, "reg", data, 882 sizeof(uint32_t) * (addr_size + len_size)); 883 if (ret < 0) 884 return -1; 885 ret = fdt_setprop(dt->blob, offs, "no-map", NULL, 0); 886 if (ret < 0) 887 return -1; 888 } else { 889 return -1; 890 } 891 return 0; 892 } 893 894 #ifdef CFG_CORE_DYN_SHM 895 static uint64_t get_dt_val_and_advance(const void *data, size_t *offs, 896 uint32_t cell_size) 897 { 898 uint64_t rv = 0; 899 900 if (cell_size == 1) { 901 uint32_t v; 902 903 memcpy(&v, (const uint8_t *)data + *offs, sizeof(v)); 904 *offs += sizeof(v); 905 rv = fdt32_to_cpu(v); 906 } else { 907 uint64_t v; 908 909 memcpy(&v, (const uint8_t *)data + *offs, sizeof(v)); 910 *offs += sizeof(v); 911 rv = fdt64_to_cpu(v); 912 } 913 914 return rv; 915 } 916 917 /* 918 * Find all non-secure memory from DT. Memory marked inaccessible by Secure 919 * World is ignored since it could not be mapped to be used as dynamic shared 920 * memory. 921 */ 922 static int get_nsec_memory_helper(void *fdt, struct core_mmu_phys_mem *mem) 923 { 924 const uint8_t *prop = NULL; 925 uint64_t a = 0; 926 uint64_t l = 0; 927 size_t prop_offs = 0; 928 size_t prop_len = 0; 929 int elems_total = 0; 930 int addr_size = 0; 931 int len_size = 0; 932 int offs = 0; 933 size_t n = 0; 934 int len = 0; 935 936 addr_size = fdt_address_cells(fdt, 0); 937 if (addr_size < 0) 938 return 0; 939 940 len_size = fdt_size_cells(fdt, 0); 941 if (len_size < 0) 942 return 0; 943 944 while (true) { 945 offs = fdt_node_offset_by_prop_value(fdt, offs, "device_type", 946 "memory", 947 sizeof("memory")); 948 if (offs < 0) 949 break; 950 951 if (_fdt_get_status(fdt, offs) != (DT_STATUS_OK_NSEC | 952 DT_STATUS_OK_SEC)) 953 continue; 954 955 prop = fdt_getprop(fdt, offs, "reg", &len); 956 if (!prop) 957 continue; 958 959 prop_len = len; 960 for (n = 0, prop_offs = 0; prop_offs < prop_len; n++) { 961 a = get_dt_val_and_advance(prop, &prop_offs, addr_size); 962 if (prop_offs >= prop_len) { 963 n--; 964 break; 965 } 966 967 l = get_dt_val_and_advance(prop, &prop_offs, len_size); 968 if (mem) { 969 mem->type = MEM_AREA_DDR_OVERALL; 970 mem->addr = a; 971 mem->size = l; 972 mem++; 973 } 974 } 975 976 elems_total += n; 977 } 978 979 return elems_total; 980 } 981 982 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt, size_t *nelems) 983 { 984 struct core_mmu_phys_mem *mem = NULL; 985 int elems_total = 0; 986 987 elems_total = get_nsec_memory_helper(fdt, NULL); 988 if (elems_total <= 0) 989 return NULL; 990 991 mem = nex_calloc(elems_total, sizeof(*mem)); 992 if (!mem) 993 panic(); 994 995 elems_total = get_nsec_memory_helper(fdt, mem); 996 assert(elems_total > 0); 997 998 *nelems = elems_total; 999 1000 return mem; 1001 } 1002 #endif /*CFG_CORE_DYN_SHM*/ 1003 1004 #ifdef CFG_CORE_RESERVED_SHM 1005 static int mark_static_shm_as_reserved(struct dt_descriptor *dt) 1006 { 1007 vaddr_t shm_start; 1008 vaddr_t shm_end; 1009 1010 core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &shm_start, &shm_end); 1011 if (shm_start != shm_end) 1012 return add_res_mem_dt_node(dt, "optee_shm", 1013 virt_to_phys((void *)shm_start), 1014 shm_end - shm_start); 1015 1016 DMSG("No SHM configured"); 1017 return -1; 1018 } 1019 #endif /*CFG_CORE_RESERVED_SHM*/ 1020 1021 static void init_external_dt(unsigned long phys_dt) 1022 { 1023 struct dt_descriptor *dt = &external_dt; 1024 void *fdt; 1025 int ret; 1026 1027 if (!phys_dt) { 1028 /* 1029 * No need to panic as we're not using the DT in OP-TEE 1030 * yet, we're only adding some nodes for normal world use. 1031 * This makes the switch to using DT easier as we can boot 1032 * a newer OP-TEE with older boot loaders. Once we start to 1033 * initialize devices based on DT we'll likely panic 1034 * instead of returning here. 1035 */ 1036 IMSG("No non-secure external DT"); 1037 return; 1038 } 1039 1040 if (!core_mmu_add_mapping(MEM_AREA_EXT_DT, phys_dt, CFG_DTB_MAX_SIZE)) 1041 panic("Failed to map external DTB"); 1042 1043 fdt = phys_to_virt(phys_dt, MEM_AREA_EXT_DT); 1044 if (!fdt) 1045 panic(); 1046 1047 dt->blob = fdt; 1048 1049 ret = init_dt_overlay(dt, CFG_DTB_MAX_SIZE); 1050 if (ret < 0) { 1051 EMSG("Device Tree Overlay init fail @ %#lx: error %d", phys_dt, 1052 ret); 1053 panic(); 1054 } 1055 1056 ret = fdt_open_into(fdt, fdt, CFG_DTB_MAX_SIZE); 1057 if (ret < 0) { 1058 EMSG("Invalid Device Tree at %#lx: error %d", phys_dt, ret); 1059 panic(); 1060 } 1061 1062 IMSG("Non-secure external DT found"); 1063 } 1064 1065 static int mark_tzdram_as_reserved(struct dt_descriptor *dt) 1066 { 1067 return add_res_mem_dt_node(dt, "optee_core", CFG_TZDRAM_START, 1068 CFG_TZDRAM_SIZE); 1069 } 1070 1071 static void update_external_dt(void) 1072 { 1073 struct dt_descriptor *dt = &external_dt; 1074 1075 if (!dt->blob) 1076 return; 1077 1078 if (add_optee_dt_node(dt)) 1079 panic("Failed to add OP-TEE Device Tree node"); 1080 1081 if (config_psci(dt)) 1082 panic("Failed to config PSCI"); 1083 1084 #ifdef CFG_CORE_RESERVED_SHM 1085 if (mark_static_shm_as_reserved(dt)) 1086 panic("Failed to config non-secure memory"); 1087 #endif 1088 1089 if (mark_tzdram_as_reserved(dt)) 1090 panic("Failed to config secure memory"); 1091 } 1092 #else /*CFG_DT*/ 1093 void *get_external_dt(void) 1094 { 1095 return NULL; 1096 } 1097 1098 static void init_external_dt(unsigned long phys_dt __unused) 1099 { 1100 } 1101 1102 static void update_external_dt(void) 1103 { 1104 } 1105 1106 #ifdef CFG_CORE_DYN_SHM 1107 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt __unused, 1108 size_t *nelems __unused) 1109 { 1110 return NULL; 1111 } 1112 #endif /*CFG_CORE_DYN_SHM*/ 1113 #endif /*!CFG_DT*/ 1114 1115 #ifdef CFG_CORE_DYN_SHM 1116 static void discover_nsec_memory(void) 1117 { 1118 struct core_mmu_phys_mem *mem; 1119 const struct core_mmu_phys_mem *mem_begin = NULL; 1120 const struct core_mmu_phys_mem *mem_end = NULL; 1121 size_t nelems; 1122 void *fdt = get_external_dt(); 1123 1124 if (fdt) { 1125 mem = get_nsec_memory(fdt, &nelems); 1126 if (mem) { 1127 core_mmu_set_discovered_nsec_ddr(mem, nelems); 1128 return; 1129 } 1130 1131 DMSG("No non-secure memory found in FDT"); 1132 } 1133 1134 mem_begin = phys_ddr_overall_begin; 1135 mem_end = phys_ddr_overall_end; 1136 nelems = mem_end - mem_begin; 1137 if (nelems) { 1138 /* 1139 * Platform cannot use both register_ddr() and the now 1140 * deprecated register_dynamic_shm(). 1141 */ 1142 assert(phys_ddr_overall_compat_begin == 1143 phys_ddr_overall_compat_end); 1144 } else { 1145 mem_begin = phys_ddr_overall_compat_begin; 1146 mem_end = phys_ddr_overall_compat_end; 1147 nelems = mem_end - mem_begin; 1148 if (!nelems) 1149 return; 1150 DMSG("Warning register_dynamic_shm() is deprecated, please use register_ddr() instead"); 1151 } 1152 1153 mem = nex_calloc(nelems, sizeof(*mem)); 1154 if (!mem) 1155 panic(); 1156 1157 memcpy(mem, phys_ddr_overall_begin, sizeof(*mem) * nelems); 1158 core_mmu_set_discovered_nsec_ddr(mem, nelems); 1159 } 1160 #else /*CFG_CORE_DYN_SHM*/ 1161 static void discover_nsec_memory(void) 1162 { 1163 } 1164 #endif /*!CFG_CORE_DYN_SHM*/ 1165 1166 void init_tee_runtime(void) 1167 { 1168 #ifdef CFG_VIRTUALIZATION 1169 /* We need to initialize pool for every virtual guest partition */ 1170 malloc_add_pool(__heap1_start, __heap1_end - __heap1_start); 1171 #endif 1172 1173 #ifndef CFG_WITH_PAGER 1174 /* Pager initializes TA RAM early */ 1175 teecore_init_ta_ram(); 1176 #endif 1177 call_initcalls(); 1178 } 1179 1180 static void init_primary(unsigned long pageable_part, unsigned long nsec_entry) 1181 { 1182 /* 1183 * Mask asynchronous exceptions before switch to the thread vector 1184 * as the thread handler requires those to be masked while 1185 * executing with the temporary stack. The thread subsystem also 1186 * asserts that the foreign interrupts are blocked when using most of 1187 * its functions. 1188 */ 1189 thread_set_exceptions(THREAD_EXCP_ALL); 1190 primary_save_cntfrq(); 1191 init_vfp_sec(); 1192 /* 1193 * Pager: init_runtime() calls thread_kernel_enable_vfp() so we must 1194 * set a current thread right now to avoid a chicken-and-egg problem 1195 * (thread_init_boot_thread() sets the current thread but needs 1196 * things set by init_runtime()). 1197 */ 1198 thread_get_core_local()->curr_thread = 0; 1199 init_runtime(pageable_part); 1200 1201 if (IS_ENABLED(CFG_VIRTUALIZATION)) { 1202 /* 1203 * Virtualization: We can't initialize threads right now because 1204 * threads belong to "tee" part and will be initialized 1205 * separately per each new virtual guest. So, we'll clear 1206 * "curr_thread" and call it done. 1207 */ 1208 thread_get_core_local()->curr_thread = -1; 1209 } else { 1210 thread_init_boot_thread(); 1211 } 1212 thread_init_primary(); 1213 thread_init_per_cpu(); 1214 init_sec_mon(nsec_entry); 1215 } 1216 1217 /* 1218 * Note: this function is weak just to make it possible to exclude it from 1219 * the unpaged area. 1220 */ 1221 void __weak paged_init_primary(unsigned long fdt) 1222 { 1223 init_external_dt(fdt); 1224 tpm_map_log_area(get_external_dt()); 1225 discover_nsec_memory(); 1226 update_external_dt(); 1227 configure_console_from_dt(); 1228 1229 IMSG("OP-TEE version: %s", core_v_str); 1230 IMSG("Primary CPU initializing"); 1231 #ifdef CFG_CORE_ASLR 1232 DMSG("Executing at offset %#lx with virtual load address %#"PRIxVA, 1233 (unsigned long)boot_mmu_config.load_offset, VCORE_START_VA); 1234 #endif 1235 1236 main_init_gic(); 1237 init_vfp_nsec(); 1238 #ifndef CFG_VIRTUALIZATION 1239 init_tee_runtime(); 1240 #endif 1241 #ifdef CFG_VIRTUALIZATION 1242 IMSG("Initializing virtualization support"); 1243 core_mmu_init_virtualization(); 1244 #endif 1245 call_finalcalls(); 1246 IMSG("Primary CPU switching to normal world boot"); 1247 } 1248 1249 static void init_secondary_helper(unsigned long nsec_entry) 1250 { 1251 IMSG("Secondary CPU %zu initializing", get_core_pos()); 1252 1253 /* 1254 * Mask asynchronous exceptions before switch to the thread vector 1255 * as the thread handler requires those to be masked while 1256 * executing with the temporary stack. The thread subsystem also 1257 * asserts that the foreign interrupts are blocked when using most of 1258 * its functions. 1259 */ 1260 thread_set_exceptions(THREAD_EXCP_ALL); 1261 1262 secondary_init_cntfrq(); 1263 thread_init_per_cpu(); 1264 init_sec_mon(nsec_entry); 1265 main_secondary_init_gic(); 1266 init_vfp_sec(); 1267 init_vfp_nsec(); 1268 1269 IMSG("Secondary CPU %zu switching to normal world boot", get_core_pos()); 1270 } 1271 1272 /* 1273 * Note: this function is weak just to make it possible to exclude it from 1274 * the unpaged area so that it lies in the init area. 1275 */ 1276 void __weak boot_init_primary(unsigned long pageable_part, 1277 unsigned long nsec_entry __maybe_unused, 1278 unsigned long fdt) 1279 { 1280 unsigned long e = PADDR_INVALID; 1281 1282 #if !defined(CFG_WITH_ARM_TRUSTED_FW) 1283 e = nsec_entry; 1284 #endif 1285 1286 init_primary(pageable_part, e); 1287 paged_init_primary(fdt); 1288 } 1289 1290 #if defined(CFG_WITH_ARM_TRUSTED_FW) 1291 unsigned long boot_cpu_on_handler(unsigned long a0 __maybe_unused, 1292 unsigned long a1 __unused) 1293 { 1294 init_secondary_helper(PADDR_INVALID); 1295 return 0; 1296 } 1297 #else 1298 void boot_init_secondary(unsigned long nsec_entry) 1299 { 1300 init_secondary_helper(nsec_entry); 1301 } 1302 #endif 1303 1304 #if defined(CFG_BOOT_SECONDARY_REQUEST) 1305 void boot_set_core_ns_entry(size_t core_idx, uintptr_t entry, 1306 uintptr_t context_id) 1307 { 1308 ns_entry_contexts[core_idx].entry_point = entry; 1309 ns_entry_contexts[core_idx].context_id = context_id; 1310 dsb_ishst(); 1311 } 1312 1313 int boot_core_release(size_t core_idx, paddr_t entry) 1314 { 1315 if (!core_idx || core_idx >= CFG_TEE_CORE_NB_CORE) 1316 return -1; 1317 1318 ns_entry_contexts[core_idx].entry_point = entry; 1319 dmb(); 1320 spin_table[core_idx] = 1; 1321 dsb(); 1322 sev(); 1323 1324 return 0; 1325 } 1326 1327 /* 1328 * spin until secondary boot request, then returns with 1329 * the secondary core entry address. 1330 */ 1331 struct ns_entry_context *boot_core_hpen(void) 1332 { 1333 #ifdef CFG_PSCI_ARM32 1334 return &ns_entry_contexts[get_core_pos()]; 1335 #else 1336 do { 1337 wfe(); 1338 } while (!spin_table[get_core_pos()]); 1339 dmb(); 1340 return &ns_entry_contexts[get_core_pos()]; 1341 #endif 1342 } 1343 #endif 1344 1345 #if defined(CFG_CORE_ASLR) 1346 #if defined(CFG_DT) 1347 unsigned long __weak get_aslr_seed(void *fdt) 1348 { 1349 int rc = fdt_check_header(fdt); 1350 const uint64_t *seed = NULL; 1351 int offs = 0; 1352 int len = 0; 1353 1354 if (rc) { 1355 DMSG("Bad fdt: %d", rc); 1356 return 0; 1357 } 1358 1359 offs = fdt_path_offset(fdt, "/secure-chosen"); 1360 if (offs < 0) { 1361 DMSG("Cannot find /secure-chosen"); 1362 return 0; 1363 } 1364 seed = fdt_getprop(fdt, offs, "kaslr-seed", &len); 1365 if (!seed || len != sizeof(*seed)) { 1366 DMSG("Cannot find valid kaslr-seed"); 1367 return 0; 1368 } 1369 1370 return fdt64_to_cpu(*seed); 1371 } 1372 #else /*!CFG_DT*/ 1373 unsigned long __weak get_aslr_seed(void *fdt __unused) 1374 { 1375 DMSG("Warning: no ASLR seed"); 1376 return 0; 1377 } 1378 #endif /*!CFG_DT*/ 1379 #endif /*CFG_CORE_ASLR*/ 1380