1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2020-2024, Arm Limited. 4 */ 5 #include <crypto/crypto.h> 6 #include <initcall.h> 7 #include <kernel/boot.h> 8 #include <kernel/embedded_ts.h> 9 #include <kernel/ldelf_loader.h> 10 #include <kernel/secure_partition.h> 11 #include <kernel/spinlock.h> 12 #include <kernel/spmc_sp_handler.h> 13 #include <kernel/thread_private.h> 14 #include <kernel/thread_spmc.h> 15 #include <kernel/tpm.h> 16 #include <kernel/ts_store.h> 17 #include <ldelf.h> 18 #include <libfdt.h> 19 #include <mm/core_mmu.h> 20 #include <mm/fobj.h> 21 #include <mm/mobj.h> 22 #include <mm/vm.h> 23 #include <optee_ffa.h> 24 #include <stdio.h> 25 #include <string.h> 26 #include <tee_api_types.h> 27 #include <tee/uuid.h> 28 #include <trace.h> 29 #include <types_ext.h> 30 #include <utee_defines.h> 31 #include <util.h> 32 #include <zlib.h> 33 34 #define BOUNCE_BUFFER_SIZE 4096 35 36 #define SP_MANIFEST_ATTR_READ BIT(0) 37 #define SP_MANIFEST_ATTR_WRITE BIT(1) 38 #define SP_MANIFEST_ATTR_EXEC BIT(2) 39 #define SP_MANIFEST_ATTR_NSEC BIT(3) 40 #define SP_MANIFEST_ATTR_GP BIT(4) 41 42 #define SP_MANIFEST_ATTR_RO (SP_MANIFEST_ATTR_READ) 43 #define SP_MANIFEST_ATTR_RW (SP_MANIFEST_ATTR_READ | \ 44 SP_MANIFEST_ATTR_WRITE) 45 #define SP_MANIFEST_ATTR_RX (SP_MANIFEST_ATTR_READ | \ 46 SP_MANIFEST_ATTR_EXEC) 47 #define SP_MANIFEST_ATTR_RWX (SP_MANIFEST_ATTR_READ | \ 48 SP_MANIFEST_ATTR_WRITE | \ 49 SP_MANIFEST_ATTR_EXEC) 50 51 #define SP_MANIFEST_FLAG_NOBITS BIT(0) 52 53 #define SP_MANIFEST_NS_INT_QUEUED (0x0) 54 #define SP_MANIFEST_NS_INT_MANAGED_EXIT (0x1) 55 #define SP_MANIFEST_NS_INT_SIGNALED (0x2) 56 57 #define SP_MANIFEST_EXEC_STATE_AARCH64 (0x0) 58 #define SP_MANIFEST_EXEC_STATE_AARCH32 (0x1) 59 60 #define SP_MANIFEST_DIRECT_REQ_RECEIVE BIT(0) 61 #define SP_MANIFEST_DIRECT_REQ_SEND BIT(1) 62 #define SP_MANIFEST_INDIRECT_REQ BIT(2) 63 64 #define SP_MANIFEST_VM_CREATED_MSG BIT(0) 65 #define SP_MANIFEST_VM_DESTROYED_MSG BIT(1) 66 67 #define SP_PKG_HEADER_MAGIC (0x474b5053) 68 #define SP_PKG_HEADER_VERSION_V1 (0x1) 69 #define SP_PKG_HEADER_VERSION_V2 (0x2) 70 71 struct sp_pkg_header { 72 uint32_t magic; 73 uint32_t version; 74 uint32_t pm_offset; 75 uint32_t pm_size; 76 uint32_t img_offset; 77 uint32_t img_size; 78 }; 79 80 struct fip_sp_head fip_sp_list = STAILQ_HEAD_INITIALIZER(fip_sp_list); 81 82 static const struct ts_ops sp_ops; 83 84 /* List that holds all of the loaded SP's */ 85 static struct sp_sessions_head open_sp_sessions = 86 TAILQ_HEAD_INITIALIZER(open_sp_sessions); 87 88 static const struct embedded_ts *find_secure_partition(const TEE_UUID *uuid) 89 { 90 const struct sp_image *sp = NULL; 91 const struct fip_sp *fip_sp = NULL; 92 93 for_each_secure_partition(sp) { 94 if (!memcmp(&sp->image.uuid, uuid, sizeof(*uuid))) 95 return &sp->image; 96 } 97 98 for_each_fip_sp(fip_sp) { 99 if (!memcmp(&fip_sp->sp_img.image.uuid, uuid, sizeof(*uuid))) 100 return &fip_sp->sp_img.image; 101 } 102 103 return NULL; 104 } 105 106 bool is_sp_ctx(struct ts_ctx *ctx) 107 { 108 return ctx && (ctx->ops == &sp_ops); 109 } 110 111 static void set_sp_ctx_ops(struct ts_ctx *ctx) 112 { 113 ctx->ops = &sp_ops; 114 } 115 116 struct sp_session *sp_get_session(uint32_t session_id) 117 { 118 struct sp_session *s = NULL; 119 120 TAILQ_FOREACH(s, &open_sp_sessions, link) { 121 if (s->endpoint_id == session_id) 122 return s; 123 } 124 125 return NULL; 126 } 127 128 TEE_Result sp_partition_info_get(uint32_t ffa_vers, void *buf, size_t buf_size, 129 const TEE_UUID *ffa_uuid, size_t *elem_count, 130 bool count_only) 131 { 132 TEE_Result res = TEE_SUCCESS; 133 struct sp_session *s = NULL; 134 135 TAILQ_FOREACH(s, &open_sp_sessions, link) { 136 if (ffa_uuid && 137 memcmp(&s->ffa_uuid, ffa_uuid, sizeof(*ffa_uuid))) 138 continue; 139 140 if (s->state == sp_dead) 141 continue; 142 if (!count_only && !res) { 143 uint32_t uuid_words[4] = { 0 }; 144 145 tee_uuid_to_octets((uint8_t *)uuid_words, &s->ffa_uuid); 146 res = spmc_fill_partition_entry(ffa_vers, buf, buf_size, 147 *elem_count, 148 s->endpoint_id, 1, 149 s->props, uuid_words); 150 } 151 *elem_count += 1; 152 } 153 154 return res; 155 } 156 157 bool sp_has_exclusive_access(struct sp_mem_map_region *mem, 158 struct user_mode_ctx *uctx) 159 { 160 /* 161 * Check that we have access to the region if it is supposed to be 162 * mapped to the current context. 163 */ 164 if (uctx) { 165 struct vm_region *region = NULL; 166 167 /* Make sure that each mobj belongs to the SP */ 168 TAILQ_FOREACH(region, &uctx->vm_info.regions, link) { 169 if (region->mobj == mem->mobj) 170 break; 171 } 172 173 if (!region) 174 return false; 175 } 176 177 /* Check that it is not shared with another SP */ 178 return !sp_mem_is_shared(mem); 179 } 180 181 static bool endpoint_id_is_valid(uint32_t id) 182 { 183 /* 184 * These IDs are assigned at the SPMC init so already have valid values 185 * by the time this function gets first called 186 */ 187 return id != spmd_id && id != spmc_id && id != optee_endpoint_id && 188 id >= FFA_SWD_ID_MIN && id <= FFA_SWD_ID_MAX; 189 } 190 191 static TEE_Result new_session_id(uint16_t *endpoint_id) 192 { 193 uint32_t id = 0; 194 195 /* Find the first available endpoint id */ 196 for (id = FFA_SWD_ID_MIN; id <= FFA_SWD_ID_MAX; id++) { 197 if (endpoint_id_is_valid(id) && !sp_get_session(id)) { 198 *endpoint_id = id; 199 return TEE_SUCCESS; 200 } 201 } 202 203 return TEE_ERROR_BAD_FORMAT; 204 } 205 206 static TEE_Result sp_create_ctx(const TEE_UUID *bin_uuid, struct sp_session *s) 207 { 208 TEE_Result res = TEE_SUCCESS; 209 struct sp_ctx *spc = NULL; 210 211 /* Register context */ 212 spc = calloc(1, sizeof(struct sp_ctx)); 213 if (!spc) 214 return TEE_ERROR_OUT_OF_MEMORY; 215 216 spc->open_session = s; 217 s->ts_sess.ctx = &spc->ts_ctx; 218 spc->ts_ctx.uuid = *bin_uuid; 219 220 res = vm_info_init(&spc->uctx, &spc->ts_ctx); 221 if (res) 222 goto err; 223 224 set_sp_ctx_ops(&spc->ts_ctx); 225 226 #ifdef CFG_TA_PAUTH 227 crypto_rng_read(&spc->uctx.keys, sizeof(spc->uctx.keys)); 228 #endif 229 230 return TEE_SUCCESS; 231 232 err: 233 free(spc); 234 return res; 235 } 236 237 /* 238 * Insert a new sp_session to the sessions list, so that it is ordered 239 * by boot_order. 240 */ 241 static void insert_session_ordered(struct sp_sessions_head *open_sessions, 242 struct sp_session *session) 243 { 244 struct sp_session *s = NULL; 245 246 if (!open_sessions || !session) 247 return; 248 249 TAILQ_FOREACH(s, &open_sp_sessions, link) { 250 if (s->boot_order > session->boot_order) 251 break; 252 } 253 254 if (!s) 255 TAILQ_INSERT_TAIL(open_sessions, session, link); 256 else 257 TAILQ_INSERT_BEFORE(s, session, link); 258 } 259 260 static TEE_Result sp_create_session(struct sp_sessions_head *open_sessions, 261 const TEE_UUID *bin_uuid, 262 const uint32_t boot_order, 263 struct sp_session **sess) 264 { 265 TEE_Result res = TEE_SUCCESS; 266 struct sp_session *s = calloc(1, sizeof(struct sp_session)); 267 268 if (!s) 269 return TEE_ERROR_OUT_OF_MEMORY; 270 271 s->boot_order = boot_order; 272 273 /* Other properties are filled later, based on the SP's manifest */ 274 s->props = FFA_PART_PROP_IS_PE_ID; 275 276 res = new_session_id(&s->endpoint_id); 277 if (res) 278 goto err; 279 280 DMSG("Loading Secure Partition %pUl", (void *)bin_uuid); 281 res = sp_create_ctx(bin_uuid, s); 282 if (res) 283 goto err; 284 285 insert_session_ordered(open_sessions, s); 286 *sess = s; 287 return TEE_SUCCESS; 288 289 err: 290 free(s); 291 return res; 292 } 293 294 static TEE_Result sp_init_set_registers(struct sp_ctx *ctx) 295 { 296 struct thread_ctx_regs *sp_regs = &ctx->sp_regs; 297 298 memset(sp_regs, 0, sizeof(*sp_regs)); 299 sp_regs->sp = ctx->uctx.stack_ptr; 300 sp_regs->pc = ctx->uctx.entry_func; 301 302 return TEE_SUCCESS; 303 } 304 305 TEE_Result sp_map_shared(struct sp_session *s, 306 struct sp_mem_receiver *receiver, 307 struct sp_mem *smem, 308 uint64_t *va) 309 { 310 TEE_Result res = TEE_SUCCESS; 311 struct sp_ctx *ctx = NULL; 312 uint32_t perm = TEE_MATTR_UR; 313 struct sp_mem_map_region *reg = NULL; 314 315 ctx = to_sp_ctx(s->ts_sess.ctx); 316 317 /* Get the permission */ 318 if (receiver->perm.perm & FFA_MEM_ACC_EXE) 319 perm |= TEE_MATTR_UX; 320 321 if (receiver->perm.perm & FFA_MEM_ACC_RW) { 322 if (receiver->perm.perm & FFA_MEM_ACC_EXE) 323 return TEE_ERROR_ACCESS_CONFLICT; 324 325 perm |= TEE_MATTR_UW; 326 } 327 /* 328 * Currently we don't support passing a va. We can't guarantee that the 329 * full region will be mapped in a contiguous region. A smem->region can 330 * have multiple mobj for one share. Currently there doesn't seem to be 331 * an option to guarantee that these will be mapped in a contiguous va 332 * space. 333 */ 334 if (*va) 335 return TEE_ERROR_NOT_SUPPORTED; 336 337 SLIST_FOREACH(reg, &smem->regions, link) { 338 res = vm_map(&ctx->uctx, va, reg->page_count * SMALL_PAGE_SIZE, 339 perm, 0, reg->mobj, reg->page_offset); 340 341 if (res != TEE_SUCCESS) { 342 EMSG("Failed to map memory region %#"PRIx32, res); 343 return res; 344 } 345 } 346 return TEE_SUCCESS; 347 } 348 349 TEE_Result sp_unmap_ffa_regions(struct sp_session *s, struct sp_mem *smem) 350 { 351 TEE_Result res = TEE_SUCCESS; 352 vaddr_t vaddr = 0; 353 size_t len = 0; 354 struct sp_ctx *ctx = to_sp_ctx(s->ts_sess.ctx); 355 struct sp_mem_map_region *reg = NULL; 356 357 SLIST_FOREACH(reg, &smem->regions, link) { 358 vaddr = (vaddr_t)sp_mem_get_va(&ctx->uctx, reg->page_offset, 359 reg->mobj); 360 len = reg->page_count * SMALL_PAGE_SIZE; 361 362 res = vm_unmap(&ctx->uctx, vaddr, len); 363 if (res != TEE_SUCCESS) 364 return res; 365 } 366 367 return TEE_SUCCESS; 368 } 369 370 static TEE_Result sp_dt_get_u64(const void *fdt, int node, const char *property, 371 uint64_t *value) 372 { 373 const fdt64_t *p = NULL; 374 int len = 0; 375 376 p = fdt_getprop(fdt, node, property, &len); 377 if (!p) 378 return TEE_ERROR_ITEM_NOT_FOUND; 379 380 if (len != sizeof(*p)) 381 return TEE_ERROR_BAD_FORMAT; 382 383 *value = fdt64_ld(p); 384 385 return TEE_SUCCESS; 386 } 387 388 static TEE_Result sp_dt_get_u32(const void *fdt, int node, const char *property, 389 uint32_t *value) 390 { 391 const fdt32_t *p = NULL; 392 int len = 0; 393 394 p = fdt_getprop(fdt, node, property, &len); 395 if (!p) 396 return TEE_ERROR_ITEM_NOT_FOUND; 397 398 if (len != sizeof(*p)) 399 return TEE_ERROR_BAD_FORMAT; 400 401 *value = fdt32_to_cpu(*p); 402 403 return TEE_SUCCESS; 404 } 405 406 static TEE_Result sp_dt_get_u16(const void *fdt, int node, const char *property, 407 uint16_t *value) 408 { 409 const fdt16_t *p = NULL; 410 int len = 0; 411 412 p = fdt_getprop(fdt, node, property, &len); 413 if (!p) 414 return TEE_ERROR_ITEM_NOT_FOUND; 415 416 if (len != sizeof(*p)) 417 return TEE_ERROR_BAD_FORMAT; 418 419 *value = fdt16_to_cpu(*p); 420 421 return TEE_SUCCESS; 422 } 423 424 static TEE_Result sp_dt_get_uuid(const void *fdt, int node, 425 const char *property, TEE_UUID *uuid) 426 { 427 uint32_t uuid_array[4] = { 0 }; 428 const fdt32_t *p = NULL; 429 int len = 0; 430 int i = 0; 431 432 p = fdt_getprop(fdt, node, property, &len); 433 if (!p) 434 return TEE_ERROR_ITEM_NOT_FOUND; 435 436 if (len != sizeof(TEE_UUID)) 437 return TEE_ERROR_BAD_FORMAT; 438 439 for (i = 0; i < 4; i++) 440 uuid_array[i] = fdt32_to_cpu(p[i]); 441 442 tee_uuid_from_octets(uuid, (uint8_t *)uuid_array); 443 444 return TEE_SUCCESS; 445 } 446 447 static TEE_Result sp_is_elf_format(const void *fdt, int sp_node, 448 bool *is_elf_format) 449 { 450 TEE_Result res = TEE_SUCCESS; 451 uint32_t elf_format = 0; 452 453 res = sp_dt_get_u32(fdt, sp_node, "elf-format", &elf_format); 454 if (res != TEE_SUCCESS && res != TEE_ERROR_ITEM_NOT_FOUND) 455 return res; 456 457 *is_elf_format = (elf_format != 0); 458 459 return TEE_SUCCESS; 460 } 461 462 static TEE_Result sp_binary_open(const TEE_UUID *uuid, 463 const struct ts_store_ops **ops, 464 struct ts_store_handle **handle) 465 { 466 TEE_Result res = TEE_ERROR_ITEM_NOT_FOUND; 467 468 SCATTERED_ARRAY_FOREACH(*ops, sp_stores, struct ts_store_ops) { 469 res = (*ops)->open(uuid, handle); 470 if (res != TEE_ERROR_ITEM_NOT_FOUND && 471 res != TEE_ERROR_STORAGE_NOT_AVAILABLE) 472 break; 473 } 474 475 return res; 476 } 477 478 static TEE_Result load_binary_sp(struct ts_session *s, 479 struct user_mode_ctx *uctx) 480 { 481 size_t bin_size = 0, bin_size_rounded = 0, bin_page_count = 0; 482 size_t bb_size = ROUNDUP(BOUNCE_BUFFER_SIZE, SMALL_PAGE_SIZE); 483 size_t bb_num_pages = bb_size / SMALL_PAGE_SIZE; 484 const struct ts_store_ops *store_ops = NULL; 485 struct ts_store_handle *handle = NULL; 486 TEE_Result res = TEE_SUCCESS; 487 tee_mm_entry_t *mm = NULL; 488 struct fobj *fobj = NULL; 489 struct mobj *mobj = NULL; 490 uaddr_t base_addr = 0; 491 uint32_t vm_flags = 0; 492 unsigned int idx = 0; 493 vaddr_t va = 0; 494 495 if (!s || !uctx) 496 return TEE_ERROR_BAD_PARAMETERS; 497 498 DMSG("Loading raw binary format SP %pUl", &uctx->ts_ctx->uuid); 499 500 /* Initialize the bounce buffer */ 501 fobj = fobj_sec_mem_alloc(bb_num_pages); 502 mobj = mobj_with_fobj_alloc(fobj, NULL, TEE_MATTR_MEM_TYPE_TAGGED); 503 fobj_put(fobj); 504 if (!mobj) 505 return TEE_ERROR_OUT_OF_MEMORY; 506 507 res = vm_map(uctx, &va, bb_size, TEE_MATTR_PRW, 0, mobj, 0); 508 mobj_put(mobj); 509 if (res) 510 return res; 511 512 uctx->bbuf = (uint8_t *)va; 513 uctx->bbuf_size = BOUNCE_BUFFER_SIZE; 514 515 vm_set_ctx(uctx->ts_ctx); 516 517 /* Find TS store and open SP binary */ 518 res = sp_binary_open(&uctx->ts_ctx->uuid, &store_ops, &handle); 519 if (res != TEE_SUCCESS) { 520 EMSG("Failed to open SP binary"); 521 return res; 522 } 523 524 /* Query binary size and calculate page count */ 525 res = store_ops->get_size(handle, &bin_size); 526 if (res != TEE_SUCCESS) 527 goto err; 528 529 if (ROUNDUP_OVERFLOW(bin_size, SMALL_PAGE_SIZE, &bin_size_rounded)) { 530 res = TEE_ERROR_OVERFLOW; 531 goto err; 532 } 533 534 bin_page_count = bin_size_rounded / SMALL_PAGE_SIZE; 535 536 /* Allocate memory */ 537 mm = tee_mm_alloc(&tee_mm_sec_ddr, bin_size_rounded); 538 if (!mm) { 539 res = TEE_ERROR_OUT_OF_MEMORY; 540 goto err; 541 } 542 543 base_addr = tee_mm_get_smem(mm); 544 545 /* Create mobj */ 546 mobj = sp_mem_new_mobj(bin_page_count, TEE_MATTR_MEM_TYPE_CACHED, true); 547 if (!mobj) { 548 res = TEE_ERROR_OUT_OF_MEMORY; 549 goto err_free_tee_mm; 550 } 551 552 res = sp_mem_add_pages(mobj, &idx, base_addr, bin_page_count); 553 if (res) 554 goto err_free_mobj; 555 556 /* Map memory area for the SP binary */ 557 va = 0; 558 res = vm_map(uctx, &va, bin_size_rounded, TEE_MATTR_URWX, 559 vm_flags, mobj, 0); 560 if (res) 561 goto err_free_mobj; 562 563 /* Read SP binary into the previously mapped memory area */ 564 res = store_ops->read(handle, NULL, (void *)va, bin_size); 565 if (res) 566 goto err_unmap; 567 568 /* Set memory protection to allow execution */ 569 res = vm_set_prot(uctx, va, bin_size_rounded, TEE_MATTR_UX); 570 if (res) 571 goto err_unmap; 572 573 mobj_put(mobj); 574 store_ops->close(handle); 575 576 /* The entry point must be at the beginning of the SP binary. */ 577 uctx->entry_func = va; 578 uctx->load_addr = va; 579 uctx->is_32bit = false; 580 581 s->handle_scall = s->ctx->ops->handle_scall; 582 583 return TEE_SUCCESS; 584 585 err_unmap: 586 vm_unmap(uctx, va, bin_size_rounded); 587 588 err_free_mobj: 589 mobj_put(mobj); 590 591 err_free_tee_mm: 592 tee_mm_free(mm); 593 594 err: 595 store_ops->close(handle); 596 597 return res; 598 } 599 600 static TEE_Result sp_open_session(struct sp_session **sess, 601 struct sp_sessions_head *open_sessions, 602 const TEE_UUID *ffa_uuid, 603 const TEE_UUID *bin_uuid, 604 const uint32_t boot_order, 605 const void *fdt) 606 { 607 TEE_Result res = TEE_SUCCESS; 608 struct sp_session *s = NULL; 609 struct sp_ctx *ctx = NULL; 610 bool is_elf_format = false; 611 612 if (!find_secure_partition(bin_uuid)) 613 return TEE_ERROR_ITEM_NOT_FOUND; 614 615 res = sp_create_session(open_sessions, bin_uuid, boot_order, &s); 616 if (res != TEE_SUCCESS) { 617 DMSG("sp_create_session failed %#"PRIx32, res); 618 return res; 619 } 620 621 ctx = to_sp_ctx(s->ts_sess.ctx); 622 assert(ctx); 623 if (!ctx) 624 return TEE_ERROR_TARGET_DEAD; 625 *sess = s; 626 627 ts_push_current_session(&s->ts_sess); 628 629 res = sp_is_elf_format(fdt, 0, &is_elf_format); 630 if (res == TEE_SUCCESS) { 631 if (is_elf_format) { 632 /* Load the SP using ldelf. */ 633 ldelf_load_ldelf(&ctx->uctx); 634 res = ldelf_init_with_ldelf(&s->ts_sess, &ctx->uctx); 635 } else { 636 /* Raw binary format SP */ 637 res = load_binary_sp(&s->ts_sess, &ctx->uctx); 638 } 639 } else { 640 EMSG("Failed to detect SP format"); 641 } 642 643 if (res != TEE_SUCCESS) { 644 EMSG("Failed loading SP %#"PRIx32, res); 645 ts_pop_current_session(); 646 return TEE_ERROR_TARGET_DEAD; 647 } 648 649 /* 650 * Make the SP ready for its first run. 651 * Set state to busy to prevent other endpoints from sending messages to 652 * the SP before its boot phase is done. 653 */ 654 s->state = sp_busy; 655 s->caller_id = 0; 656 sp_init_set_registers(ctx); 657 memcpy(&s->ffa_uuid, ffa_uuid, sizeof(*ffa_uuid)); 658 ts_pop_current_session(); 659 660 return TEE_SUCCESS; 661 } 662 663 static TEE_Result fdt_get_uuid(const void * const fdt, TEE_UUID *uuid) 664 { 665 const struct fdt_property *description = NULL; 666 int description_name_len = 0; 667 668 if (fdt_node_check_compatible(fdt, 0, "arm,ffa-manifest-1.0")) { 669 EMSG("Failed loading SP, manifest not found"); 670 return TEE_ERROR_BAD_PARAMETERS; 671 } 672 673 description = fdt_get_property(fdt, 0, "description", 674 &description_name_len); 675 if (description) 676 DMSG("Loading SP: %s", description->data); 677 678 if (sp_dt_get_uuid(fdt, 0, "uuid", uuid)) { 679 EMSG("Missing or invalid UUID in SP manifest"); 680 return TEE_ERROR_BAD_FORMAT; 681 } 682 683 return TEE_SUCCESS; 684 } 685 686 static TEE_Result copy_and_map_fdt(struct sp_ctx *ctx, const void * const fdt, 687 void **fdt_copy, size_t *mapped_size) 688 { 689 size_t total_size = ROUNDUP(fdt_totalsize(fdt), SMALL_PAGE_SIZE); 690 size_t num_pages = total_size / SMALL_PAGE_SIZE; 691 uint32_t perm = TEE_MATTR_UR | TEE_MATTR_PRW; 692 TEE_Result res = TEE_SUCCESS; 693 struct mobj *m = NULL; 694 struct fobj *f = NULL; 695 vaddr_t va = 0; 696 697 f = fobj_sec_mem_alloc(num_pages); 698 m = mobj_with_fobj_alloc(f, NULL, TEE_MATTR_MEM_TYPE_TAGGED); 699 fobj_put(f); 700 if (!m) 701 return TEE_ERROR_OUT_OF_MEMORY; 702 703 res = vm_map(&ctx->uctx, &va, total_size, perm, 0, m, 0); 704 mobj_put(m); 705 if (res) 706 return res; 707 708 if (fdt_open_into(fdt, (void *)va, total_size)) 709 return TEE_ERROR_GENERIC; 710 711 *fdt_copy = (void *)va; 712 *mapped_size = total_size; 713 714 return res; 715 } 716 717 static void fill_boot_info_1_0(vaddr_t buf, const void *fdt) 718 { 719 struct ffa_boot_info_1_0 *info = (struct ffa_boot_info_1_0 *)buf; 720 static const char fdt_name[16] = "TYPE_DT\0\0\0\0\0\0\0\0"; 721 722 memcpy(&info->magic, "FF-A", 4); 723 info->count = 1; 724 725 COMPILE_TIME_ASSERT(sizeof(info->nvp[0].name) == sizeof(fdt_name)); 726 memcpy(info->nvp[0].name, fdt_name, sizeof(fdt_name)); 727 info->nvp[0].value = (uintptr_t)fdt; 728 info->nvp[0].size = fdt_totalsize(fdt); 729 } 730 731 static void fill_boot_info_1_1(vaddr_t buf, const void *fdt) 732 { 733 size_t desc_offs = ROUNDUP(sizeof(struct ffa_boot_info_header_1_1), 8); 734 struct ffa_boot_info_header_1_1 *header = 735 (struct ffa_boot_info_header_1_1 *)buf; 736 struct ffa_boot_info_1_1 *desc = 737 (struct ffa_boot_info_1_1 *)(buf + desc_offs); 738 739 header->signature = FFA_BOOT_INFO_SIGNATURE; 740 header->version = FFA_BOOT_INFO_VERSION; 741 header->blob_size = desc_offs + sizeof(struct ffa_boot_info_1_1); 742 header->desc_size = sizeof(struct ffa_boot_info_1_1); 743 header->desc_count = 1; 744 header->desc_offset = desc_offs; 745 746 memset(&desc[0].name, 0, sizeof(desc[0].name)); 747 /* Type: Standard boot info (bit[7] == 0), FDT type */ 748 desc[0].type = FFA_BOOT_INFO_TYPE_ID_FDT; 749 /* Flags: Contents field contains an address */ 750 desc[0].flags = FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_ADDR << 751 FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_SHIFT; 752 desc[0].size = fdt_totalsize(fdt); 753 desc[0].contents = (uintptr_t)fdt; 754 } 755 756 static TEE_Result create_and_map_boot_info(struct sp_ctx *ctx, const void *fdt, 757 struct thread_smc_args *args, 758 vaddr_t *va, size_t *mapped_size, 759 uint32_t sp_ffa_version) 760 { 761 size_t total_size = ROUNDUP(CFG_SP_INIT_INFO_MAX_SIZE, SMALL_PAGE_SIZE); 762 size_t num_pages = total_size / SMALL_PAGE_SIZE; 763 uint32_t perm = TEE_MATTR_UR | TEE_MATTR_PRW; 764 TEE_Result res = TEE_SUCCESS; 765 struct fobj *f = NULL; 766 struct mobj *m = NULL; 767 uint32_t info_reg = 0; 768 769 f = fobj_sec_mem_alloc(num_pages); 770 m = mobj_with_fobj_alloc(f, NULL, TEE_MATTR_MEM_TYPE_TAGGED); 771 fobj_put(f); 772 if (!m) 773 return TEE_ERROR_OUT_OF_MEMORY; 774 775 res = vm_map(&ctx->uctx, va, total_size, perm, 0, m, 0); 776 mobj_put(m); 777 if (res) 778 return res; 779 780 *mapped_size = total_size; 781 782 switch (sp_ffa_version) { 783 case MAKE_FFA_VERSION(1, 0): 784 fill_boot_info_1_0(*va, fdt); 785 break; 786 case MAKE_FFA_VERSION(1, 1): 787 fill_boot_info_1_1(*va, fdt); 788 break; 789 default: 790 EMSG("Unknown FF-A version: %#"PRIx32, sp_ffa_version); 791 return TEE_ERROR_NOT_SUPPORTED; 792 } 793 794 res = sp_dt_get_u32(fdt, 0, "gp-register-num", &info_reg); 795 if (res) { 796 if (res == TEE_ERROR_ITEM_NOT_FOUND) { 797 /* If the property is not present, set default to x0 */ 798 info_reg = 0; 799 } else { 800 return TEE_ERROR_BAD_FORMAT; 801 } 802 } 803 804 switch (info_reg) { 805 case 0: 806 args->a0 = *va; 807 break; 808 case 1: 809 args->a1 = *va; 810 break; 811 case 2: 812 args->a2 = *va; 813 break; 814 case 3: 815 args->a3 = *va; 816 break; 817 default: 818 EMSG("Invalid register selected for passing boot info"); 819 return TEE_ERROR_BAD_FORMAT; 820 } 821 822 return TEE_SUCCESS; 823 } 824 825 static TEE_Result handle_fdt_load_relative_mem_regions(struct sp_ctx *ctx, 826 const void *fdt) 827 { 828 int node = 0; 829 int subnode = 0; 830 tee_mm_entry_t *mm = NULL; 831 TEE_Result res = TEE_SUCCESS; 832 833 /* 834 * Memory regions are optional in the SP manifest, it's not an error if 835 * we don't find any. 836 */ 837 node = fdt_node_offset_by_compatible(fdt, 0, 838 "arm,ffa-manifest-memory-regions"); 839 if (node < 0) 840 return TEE_SUCCESS; 841 842 fdt_for_each_subnode(subnode, fdt, node) { 843 uint64_t load_rel_offset = 0; 844 uint32_t attributes = 0; 845 uint64_t base_addr = 0; 846 uint32_t pages_cnt = 0; 847 uint32_t flags = 0; 848 uint32_t perm = 0; 849 size_t size = 0; 850 vaddr_t va = 0; 851 852 mm = NULL; 853 854 /* Load address relative offset of a memory region */ 855 if (!sp_dt_get_u64(fdt, subnode, "load-address-relative-offset", 856 &load_rel_offset)) { 857 va = ctx->uctx.load_addr + load_rel_offset; 858 } else { 859 /* Skip non load address relative memory regions */ 860 continue; 861 } 862 863 if (!sp_dt_get_u64(fdt, subnode, "base-address", &base_addr)) { 864 EMSG("Both base-address and load-address-relative-offset fields are set"); 865 return TEE_ERROR_BAD_FORMAT; 866 } 867 868 /* Size of memory region as count of 4K pages */ 869 if (sp_dt_get_u32(fdt, subnode, "pages-count", &pages_cnt)) { 870 EMSG("Mandatory field is missing: pages-count"); 871 return TEE_ERROR_BAD_FORMAT; 872 } 873 874 if (MUL_OVERFLOW(pages_cnt, SMALL_PAGE_SIZE, &size)) 875 return TEE_ERROR_OVERFLOW; 876 877 /* Memory region attributes */ 878 if (sp_dt_get_u32(fdt, subnode, "attributes", &attributes)) { 879 EMSG("Mandatory field is missing: attributes"); 880 return TEE_ERROR_BAD_FORMAT; 881 } 882 883 /* Check instruction and data access permissions */ 884 switch (attributes & SP_MANIFEST_ATTR_RWX) { 885 case SP_MANIFEST_ATTR_RO: 886 perm = TEE_MATTR_UR; 887 break; 888 case SP_MANIFEST_ATTR_RW: 889 perm = TEE_MATTR_URW; 890 break; 891 case SP_MANIFEST_ATTR_RX: 892 perm = TEE_MATTR_URX; 893 break; 894 default: 895 EMSG("Invalid memory access permissions"); 896 return TEE_ERROR_BAD_FORMAT; 897 } 898 899 if (IS_ENABLED(CFG_TA_BTI) && 900 attributes & SP_MANIFEST_ATTR_GP) { 901 if (!(attributes & SP_MANIFEST_ATTR_RX)) { 902 EMSG("Guard only executable region"); 903 return TEE_ERROR_BAD_FORMAT; 904 } 905 perm |= TEE_MATTR_GUARDED; 906 } 907 908 res = sp_dt_get_u32(fdt, subnode, "load-flags", &flags); 909 if (res != TEE_SUCCESS && res != TEE_ERROR_ITEM_NOT_FOUND) { 910 EMSG("Optional field with invalid value: flags"); 911 return TEE_ERROR_BAD_FORMAT; 912 } 913 914 /* Load relative regions must be secure */ 915 if (attributes & SP_MANIFEST_ATTR_NSEC) { 916 EMSG("Invalid memory security attribute"); 917 return TEE_ERROR_BAD_FORMAT; 918 } 919 920 if (flags & SP_MANIFEST_FLAG_NOBITS) { 921 /* 922 * NOBITS flag is set, which means that loaded binary 923 * doesn't contain this area, so it's need to be 924 * allocated. 925 */ 926 struct mobj *m = NULL; 927 unsigned int idx = 0; 928 929 mm = tee_mm_alloc(&tee_mm_sec_ddr, size); 930 if (!mm) 931 return TEE_ERROR_OUT_OF_MEMORY; 932 933 base_addr = tee_mm_get_smem(mm); 934 935 m = sp_mem_new_mobj(pages_cnt, 936 TEE_MATTR_MEM_TYPE_CACHED, true); 937 if (!m) { 938 res = TEE_ERROR_OUT_OF_MEMORY; 939 goto err_mm_free; 940 } 941 942 res = sp_mem_add_pages(m, &idx, base_addr, pages_cnt); 943 if (res) { 944 mobj_put(m); 945 goto err_mm_free; 946 } 947 948 res = vm_map(&ctx->uctx, &va, size, perm, 0, m, 0); 949 mobj_put(m); 950 if (res) 951 goto err_mm_free; 952 } else { 953 /* 954 * If NOBITS is not present the memory area is already 955 * mapped and only need to set the correct permissions. 956 */ 957 res = vm_set_prot(&ctx->uctx, va, size, perm); 958 if (res) 959 return res; 960 } 961 } 962 963 return TEE_SUCCESS; 964 965 err_mm_free: 966 tee_mm_free(mm); 967 return res; 968 } 969 970 static TEE_Result handle_fdt_dev_regions(struct sp_ctx *ctx, void *fdt) 971 { 972 int node = 0; 973 int subnode = 0; 974 TEE_Result res = TEE_SUCCESS; 975 const char *dt_device_match_table = { 976 "arm,ffa-manifest-device-regions", 977 }; 978 979 /* 980 * Device regions are optional in the SP manifest, it's not an error if 981 * we don't find any 982 */ 983 node = fdt_node_offset_by_compatible(fdt, 0, dt_device_match_table); 984 if (node < 0) 985 return TEE_SUCCESS; 986 987 fdt_for_each_subnode(subnode, fdt, node) { 988 uint64_t base_addr = 0; 989 uint32_t pages_cnt = 0; 990 uint32_t attributes = 0; 991 struct mobj *m = NULL; 992 bool is_secure = true; 993 uint32_t perm = 0; 994 vaddr_t va = 0; 995 unsigned int idx = 0; 996 997 /* 998 * Physical base address of a device MMIO region. 999 * Currently only physically contiguous region is supported. 1000 */ 1001 if (sp_dt_get_u64(fdt, subnode, "base-address", &base_addr)) { 1002 EMSG("Mandatory field is missing: base-address"); 1003 return TEE_ERROR_BAD_FORMAT; 1004 } 1005 1006 /* Total size of MMIO region as count of 4K pages */ 1007 if (sp_dt_get_u32(fdt, subnode, "pages-count", &pages_cnt)) { 1008 EMSG("Mandatory field is missing: pages-count"); 1009 return TEE_ERROR_BAD_FORMAT; 1010 } 1011 1012 /* Data access, instruction access and security attributes */ 1013 if (sp_dt_get_u32(fdt, subnode, "attributes", &attributes)) { 1014 EMSG("Mandatory field is missing: attributes"); 1015 return TEE_ERROR_BAD_FORMAT; 1016 } 1017 1018 /* Check instruction and data access permissions */ 1019 switch (attributes & SP_MANIFEST_ATTR_RWX) { 1020 case SP_MANIFEST_ATTR_RO: 1021 perm = TEE_MATTR_UR; 1022 break; 1023 case SP_MANIFEST_ATTR_RW: 1024 perm = TEE_MATTR_URW; 1025 break; 1026 default: 1027 EMSG("Invalid memory access permissions"); 1028 return TEE_ERROR_BAD_FORMAT; 1029 } 1030 1031 /* 1032 * The SP is a secure endpoint, security attribute can be 1033 * secure or non-secure 1034 */ 1035 if (attributes & SP_MANIFEST_ATTR_NSEC) 1036 is_secure = false; 1037 1038 /* Memory attributes must be Device-nGnRnE */ 1039 m = sp_mem_new_mobj(pages_cnt, TEE_MATTR_MEM_TYPE_STRONGLY_O, 1040 is_secure); 1041 if (!m) 1042 return TEE_ERROR_OUT_OF_MEMORY; 1043 1044 res = sp_mem_add_pages(m, &idx, (paddr_t)base_addr, pages_cnt); 1045 if (res) { 1046 mobj_put(m); 1047 return res; 1048 } 1049 1050 res = vm_map(&ctx->uctx, &va, pages_cnt * SMALL_PAGE_SIZE, 1051 perm, 0, m, 0); 1052 mobj_put(m); 1053 if (res) 1054 return res; 1055 1056 /* 1057 * Overwrite the device region's PA in the fdt with the VA. This 1058 * fdt will be passed to the SP. 1059 */ 1060 res = fdt_setprop_u64(fdt, subnode, "base-address", va); 1061 1062 /* 1063 * Unmap the region if the overwrite failed since the SP won't 1064 * be able to access it without knowing the VA. 1065 */ 1066 if (res) { 1067 vm_unmap(&ctx->uctx, va, pages_cnt * SMALL_PAGE_SIZE); 1068 return res; 1069 } 1070 } 1071 1072 return TEE_SUCCESS; 1073 } 1074 1075 static TEE_Result swap_sp_endpoints(uint32_t endpoint_id, 1076 uint32_t new_endpoint_id) 1077 { 1078 struct sp_session *session = sp_get_session(endpoint_id); 1079 uint32_t manifest_endpoint_id = 0; 1080 1081 /* 1082 * We don't know in which order the SPs are loaded. The endpoint ID 1083 * defined in the manifest could already be generated by 1084 * new_session_id() and used by another SP. If this is the case, we swap 1085 * the ID's of the two SPs. We also have to make sure that the ID's are 1086 * not defined twice in the manifest. 1087 */ 1088 1089 /* The endpoint ID was not assigned yet */ 1090 if (!session) 1091 return TEE_SUCCESS; 1092 1093 /* 1094 * Read the manifest file from the SP who originally had the endpoint. 1095 * We can safely swap the endpoint ID's if the manifest file doesn't 1096 * have an endpoint ID defined. 1097 */ 1098 if (!sp_dt_get_u32(session->fdt, 0, "id", &manifest_endpoint_id)) { 1099 assert(manifest_endpoint_id == endpoint_id); 1100 EMSG("SP: Found duplicated endpoint ID %#"PRIx32, endpoint_id); 1101 return TEE_ERROR_ACCESS_CONFLICT; 1102 } 1103 1104 session->endpoint_id = new_endpoint_id; 1105 1106 return TEE_SUCCESS; 1107 } 1108 1109 static TEE_Result read_manifest_endpoint_id(struct sp_session *s) 1110 { 1111 uint32_t endpoint_id = 0; 1112 1113 /* 1114 * The endpoint ID can be optionally defined in the manifest file. We 1115 * have to map the ID inside the manifest to the SP if it's defined. 1116 * If not, the endpoint ID generated inside new_session_id() will be 1117 * used. 1118 */ 1119 if (!sp_dt_get_u32(s->fdt, 0, "id", &endpoint_id)) { 1120 TEE_Result res = TEE_ERROR_GENERIC; 1121 1122 if (!endpoint_id_is_valid(endpoint_id)) { 1123 EMSG("Invalid endpoint ID 0x%"PRIx32, endpoint_id); 1124 return TEE_ERROR_BAD_FORMAT; 1125 } 1126 1127 res = swap_sp_endpoints(endpoint_id, s->endpoint_id); 1128 if (res) 1129 return res; 1130 1131 DMSG("SP: endpoint ID (0x%"PRIx32") found in manifest", 1132 endpoint_id); 1133 /* Assign the endpoint ID to the current SP */ 1134 s->endpoint_id = endpoint_id; 1135 } 1136 return TEE_SUCCESS; 1137 } 1138 1139 static TEE_Result handle_fdt_mem_regions(struct sp_ctx *ctx, void *fdt) 1140 { 1141 int node = 0; 1142 int subnode = 0; 1143 tee_mm_entry_t *mm = NULL; 1144 TEE_Result res = TEE_SUCCESS; 1145 1146 /* 1147 * Memory regions are optional in the SP manifest, it's not an error if 1148 * we don't find any. 1149 */ 1150 node = fdt_node_offset_by_compatible(fdt, 0, 1151 "arm,ffa-manifest-memory-regions"); 1152 if (node < 0) 1153 return TEE_SUCCESS; 1154 1155 fdt_for_each_subnode(subnode, fdt, node) { 1156 uint64_t load_rel_offset = 0; 1157 bool alloc_needed = false; 1158 uint32_t attributes = 0; 1159 uint64_t base_addr = 0; 1160 uint32_t pages_cnt = 0; 1161 bool is_secure = true; 1162 struct mobj *m = NULL; 1163 unsigned int idx = 0; 1164 uint32_t perm = 0; 1165 size_t size = 0; 1166 vaddr_t va = 0; 1167 1168 mm = NULL; 1169 1170 /* Load address relative offset of a memory region */ 1171 if (!sp_dt_get_u64(fdt, subnode, "load-address-relative-offset", 1172 &load_rel_offset)) { 1173 /* 1174 * At this point the memory region is already mapped by 1175 * handle_fdt_load_relative_mem_regions. 1176 * Only need to set the base-address in the manifest and 1177 * then skip the rest of the mapping process. 1178 */ 1179 va = ctx->uctx.load_addr + load_rel_offset; 1180 res = fdt_setprop_u64(fdt, subnode, "base-address", va); 1181 if (res) 1182 return res; 1183 1184 continue; 1185 } 1186 1187 /* 1188 * Base address of a memory region. 1189 * If not present, we have to allocate the specified memory. 1190 * If present, this field could specify a PA or VA. Currently 1191 * only a PA is supported. 1192 */ 1193 if (sp_dt_get_u64(fdt, subnode, "base-address", &base_addr)) 1194 alloc_needed = true; 1195 1196 /* Size of memory region as count of 4K pages */ 1197 if (sp_dt_get_u32(fdt, subnode, "pages-count", &pages_cnt)) { 1198 EMSG("Mandatory field is missing: pages-count"); 1199 return TEE_ERROR_BAD_FORMAT; 1200 } 1201 1202 if (MUL_OVERFLOW(pages_cnt, SMALL_PAGE_SIZE, &size)) 1203 return TEE_ERROR_OVERFLOW; 1204 1205 /* 1206 * Memory region attributes: 1207 * - Instruction/data access permissions 1208 * - Cacheability/shareability attributes 1209 * - Security attributes 1210 * 1211 * Cacheability/shareability attributes can be ignored for now. 1212 * OP-TEE only supports a single type for normal cached memory 1213 * and currently there is no use case that would require to 1214 * change this. 1215 */ 1216 if (sp_dt_get_u32(fdt, subnode, "attributes", &attributes)) { 1217 EMSG("Mandatory field is missing: attributes"); 1218 return TEE_ERROR_BAD_FORMAT; 1219 } 1220 1221 /* Check instruction and data access permissions */ 1222 switch (attributes & SP_MANIFEST_ATTR_RWX) { 1223 case SP_MANIFEST_ATTR_RO: 1224 perm = TEE_MATTR_UR; 1225 break; 1226 case SP_MANIFEST_ATTR_RW: 1227 perm = TEE_MATTR_URW; 1228 break; 1229 case SP_MANIFEST_ATTR_RX: 1230 perm = TEE_MATTR_URX; 1231 break; 1232 default: 1233 EMSG("Invalid memory access permissions"); 1234 return TEE_ERROR_BAD_FORMAT; 1235 } 1236 1237 if (IS_ENABLED(CFG_TA_BTI) && 1238 attributes & SP_MANIFEST_ATTR_GP) { 1239 if (!(attributes & SP_MANIFEST_ATTR_RX)) { 1240 EMSG("Guard only executable region"); 1241 return TEE_ERROR_BAD_FORMAT; 1242 } 1243 perm |= TEE_MATTR_GUARDED; 1244 } 1245 1246 /* 1247 * The SP is a secure endpoint, security attribute can be 1248 * secure or non-secure. 1249 * The SPMC cannot allocate non-secure memory, i.e. if the base 1250 * address is missing this attribute must be secure. 1251 */ 1252 if (attributes & SP_MANIFEST_ATTR_NSEC) { 1253 if (alloc_needed) { 1254 EMSG("Invalid memory security attribute"); 1255 return TEE_ERROR_BAD_FORMAT; 1256 } 1257 is_secure = false; 1258 } 1259 1260 if (alloc_needed) { 1261 /* Base address is missing, we have to allocate */ 1262 mm = tee_mm_alloc(&tee_mm_sec_ddr, size); 1263 if (!mm) 1264 return TEE_ERROR_OUT_OF_MEMORY; 1265 1266 base_addr = tee_mm_get_smem(mm); 1267 } 1268 1269 m = sp_mem_new_mobj(pages_cnt, TEE_MATTR_MEM_TYPE_CACHED, 1270 is_secure); 1271 if (!m) { 1272 res = TEE_ERROR_OUT_OF_MEMORY; 1273 goto err_mm_free; 1274 } 1275 1276 res = sp_mem_add_pages(m, &idx, base_addr, pages_cnt); 1277 if (res) { 1278 mobj_put(m); 1279 goto err_mm_free; 1280 } 1281 1282 res = vm_map(&ctx->uctx, &va, size, perm, 0, m, 0); 1283 mobj_put(m); 1284 if (res) 1285 goto err_mm_free; 1286 1287 /* 1288 * Overwrite the memory region's base address in the fdt with 1289 * the VA. This fdt will be passed to the SP. 1290 * If the base-address field was not present in the original 1291 * fdt, this function will create it. This doesn't cause issues 1292 * since the necessary extra space has been allocated when 1293 * opening the fdt. 1294 */ 1295 res = fdt_setprop_u64(fdt, subnode, "base-address", va); 1296 1297 /* 1298 * Unmap the region if the overwrite failed since the SP won't 1299 * be able to access it without knowing the VA. 1300 */ 1301 if (res) { 1302 vm_unmap(&ctx->uctx, va, size); 1303 goto err_mm_free; 1304 } 1305 } 1306 1307 return TEE_SUCCESS; 1308 1309 err_mm_free: 1310 tee_mm_free(mm); 1311 return res; 1312 } 1313 1314 static TEE_Result handle_tpm_event_log(struct sp_ctx *ctx, void *fdt) 1315 { 1316 uint32_t perm = TEE_MATTR_URW | TEE_MATTR_PRW; 1317 uint32_t dummy_size __maybe_unused = 0; 1318 TEE_Result res = TEE_SUCCESS; 1319 size_t page_count = 0; 1320 struct fobj *f = NULL; 1321 struct mobj *m = NULL; 1322 vaddr_t log_addr = 0; 1323 size_t log_size = 0; 1324 int node = 0; 1325 1326 node = fdt_node_offset_by_compatible(fdt, 0, "arm,tpm_event_log"); 1327 if (node < 0) 1328 return TEE_SUCCESS; 1329 1330 /* Checking the existence and size of the event log properties */ 1331 if (sp_dt_get_u64(fdt, node, "tpm_event_log_addr", &log_addr)) { 1332 EMSG("tpm_event_log_addr not found or has invalid size"); 1333 return TEE_ERROR_BAD_FORMAT; 1334 } 1335 1336 if (sp_dt_get_u32(fdt, node, "tpm_event_log_size", &dummy_size)) { 1337 EMSG("tpm_event_log_size not found or has invalid size"); 1338 return TEE_ERROR_BAD_FORMAT; 1339 } 1340 1341 /* Validating event log */ 1342 res = tpm_get_event_log_size(&log_size); 1343 if (res) 1344 return res; 1345 1346 if (!log_size) { 1347 EMSG("Empty TPM event log was provided"); 1348 return TEE_ERROR_ITEM_NOT_FOUND; 1349 } 1350 1351 /* Allocating memory area for the event log to share with the SP */ 1352 page_count = ROUNDUP_DIV(log_size, SMALL_PAGE_SIZE); 1353 1354 f = fobj_sec_mem_alloc(page_count); 1355 m = mobj_with_fobj_alloc(f, NULL, TEE_MATTR_MEM_TYPE_TAGGED); 1356 fobj_put(f); 1357 if (!m) 1358 return TEE_ERROR_OUT_OF_MEMORY; 1359 1360 res = vm_map(&ctx->uctx, &log_addr, log_size, perm, 0, m, 0); 1361 mobj_put(m); 1362 if (res) 1363 return res; 1364 1365 /* Copy event log */ 1366 res = tpm_get_event_log((void *)log_addr, &log_size); 1367 if (res) 1368 goto err_unmap; 1369 1370 /* Setting event log details in the manifest */ 1371 res = fdt_setprop_u64(fdt, node, "tpm_event_log_addr", log_addr); 1372 if (res) 1373 goto err_unmap; 1374 1375 res = fdt_setprop_u32(fdt, node, "tpm_event_log_size", log_size); 1376 if (res) 1377 goto err_unmap; 1378 1379 return TEE_SUCCESS; 1380 1381 err_unmap: 1382 vm_unmap(&ctx->uctx, log_addr, log_size); 1383 1384 return res; 1385 } 1386 1387 /* 1388 * Note: this function is called only on the primary CPU. It assumes that the 1389 * features present on the primary CPU are available on all of the secondary 1390 * CPUs as well. 1391 */ 1392 static TEE_Result handle_hw_features(void *fdt) 1393 { 1394 uint32_t val __maybe_unused = 0; 1395 TEE_Result res = TEE_SUCCESS; 1396 int node = 0; 1397 1398 /* 1399 * HW feature descriptions are optional in the SP manifest, it's not an 1400 * error if we don't find any. 1401 */ 1402 node = fdt_node_offset_by_compatible(fdt, 0, "arm,hw-features"); 1403 if (node < 0) 1404 return TEE_SUCCESS; 1405 1406 /* Modify the crc32 property only if it's already present */ 1407 if (!sp_dt_get_u32(fdt, node, "crc32", &val)) { 1408 res = fdt_setprop_u32(fdt, node, "crc32", 1409 feat_crc32_implemented()); 1410 if (res) 1411 return res; 1412 } 1413 1414 return TEE_SUCCESS; 1415 } 1416 1417 static TEE_Result read_ns_interrupts_action(const void *fdt, 1418 struct sp_session *s) 1419 { 1420 TEE_Result res = TEE_ERROR_BAD_PARAMETERS; 1421 1422 res = sp_dt_get_u32(fdt, 0, "ns-interrupts-action", &s->ns_int_mode); 1423 1424 if (res) { 1425 EMSG("Mandatory property is missing: ns-interrupts-action"); 1426 return res; 1427 } 1428 1429 switch (s->ns_int_mode) { 1430 case SP_MANIFEST_NS_INT_QUEUED: 1431 case SP_MANIFEST_NS_INT_SIGNALED: 1432 /* OK */ 1433 break; 1434 1435 case SP_MANIFEST_NS_INT_MANAGED_EXIT: 1436 EMSG("Managed exit is not implemented"); 1437 return TEE_ERROR_NOT_IMPLEMENTED; 1438 1439 default: 1440 EMSG("Invalid ns-interrupts-action value: %"PRIu32, 1441 s->ns_int_mode); 1442 return TEE_ERROR_BAD_PARAMETERS; 1443 } 1444 1445 return TEE_SUCCESS; 1446 } 1447 1448 static TEE_Result read_ffa_version(const void *fdt, struct sp_session *s) 1449 { 1450 TEE_Result res = TEE_ERROR_BAD_PARAMETERS; 1451 uint32_t ffa_version = 0; 1452 1453 res = sp_dt_get_u32(fdt, 0, "ffa-version", &ffa_version); 1454 if (res) { 1455 EMSG("Mandatory property is missing: ffa-version"); 1456 return res; 1457 } 1458 1459 if (ffa_version != FFA_VERSION_1_0 && ffa_version != FFA_VERSION_1_1) { 1460 EMSG("Invalid FF-A version value: 0x%08"PRIx32, ffa_version); 1461 return TEE_ERROR_BAD_PARAMETERS; 1462 } 1463 1464 s->rxtx.ffa_vers = ffa_version; 1465 1466 return TEE_SUCCESS; 1467 } 1468 1469 static TEE_Result read_sp_exec_state(const void *fdt, struct sp_session *s) 1470 { 1471 TEE_Result res = TEE_ERROR_BAD_PARAMETERS; 1472 uint32_t exec_state = 0; 1473 1474 res = sp_dt_get_u32(fdt, 0, "execution-state", &exec_state); 1475 if (res) { 1476 EMSG("Mandatory property is missing: execution-state"); 1477 return res; 1478 } 1479 1480 /* Currently only AArch64 SPs are supported */ 1481 if (exec_state == SP_MANIFEST_EXEC_STATE_AARCH64) { 1482 s->props |= FFA_PART_PROP_AARCH64_STATE; 1483 } else { 1484 EMSG("Invalid execution-state value: %"PRIu32, exec_state); 1485 return TEE_ERROR_BAD_PARAMETERS; 1486 } 1487 1488 return TEE_SUCCESS; 1489 } 1490 1491 static TEE_Result read_sp_msg_types(const void *fdt, struct sp_session *s) 1492 { 1493 TEE_Result res = TEE_ERROR_BAD_PARAMETERS; 1494 uint32_t msg_method = 0; 1495 1496 res = sp_dt_get_u32(fdt, 0, "messaging-method", &msg_method); 1497 if (res) { 1498 EMSG("Mandatory property is missing: messaging-method"); 1499 return res; 1500 } 1501 1502 if (msg_method & SP_MANIFEST_DIRECT_REQ_RECEIVE) 1503 s->props |= FFA_PART_PROP_DIRECT_REQ_RECV; 1504 1505 if (msg_method & SP_MANIFEST_DIRECT_REQ_SEND) 1506 s->props |= FFA_PART_PROP_DIRECT_REQ_SEND; 1507 1508 if (msg_method & SP_MANIFEST_INDIRECT_REQ) 1509 IMSG("Indirect messaging is not supported"); 1510 1511 return TEE_SUCCESS; 1512 } 1513 1514 static TEE_Result read_vm_availability_msg(const void *fdt, 1515 struct sp_session *s) 1516 { 1517 TEE_Result res = TEE_ERROR_BAD_PARAMETERS; 1518 uint32_t v = 0; 1519 1520 res = sp_dt_get_u32(fdt, 0, "vm-availability-messages", &v); 1521 1522 /* This field in the manifest is optional */ 1523 if (res == TEE_ERROR_ITEM_NOT_FOUND) 1524 return TEE_SUCCESS; 1525 1526 if (res) 1527 return res; 1528 1529 if (v & ~(SP_MANIFEST_VM_CREATED_MSG | SP_MANIFEST_VM_DESTROYED_MSG)) { 1530 EMSG("Invalid vm-availability-messages value: %"PRIu32, v); 1531 return TEE_ERROR_BAD_PARAMETERS; 1532 } 1533 1534 if (v & SP_MANIFEST_VM_CREATED_MSG) 1535 s->props |= FFA_PART_PROP_NOTIF_CREATED; 1536 1537 if (v & SP_MANIFEST_VM_DESTROYED_MSG) 1538 s->props |= FFA_PART_PROP_NOTIF_DESTROYED; 1539 1540 return TEE_SUCCESS; 1541 } 1542 1543 static TEE_Result sp_init_uuid(const TEE_UUID *bin_uuid, const void * const fdt) 1544 { 1545 TEE_Result res = TEE_SUCCESS; 1546 struct sp_session *sess = NULL; 1547 TEE_UUID ffa_uuid = {}; 1548 uint16_t boot_order = 0; 1549 uint32_t boot_order_arg = 0; 1550 1551 res = fdt_get_uuid(fdt, &ffa_uuid); 1552 if (res) 1553 return res; 1554 1555 res = sp_dt_get_u16(fdt, 0, "boot-order", &boot_order); 1556 if (res == TEE_SUCCESS) { 1557 boot_order_arg = boot_order; 1558 } else if (res == TEE_ERROR_ITEM_NOT_FOUND) { 1559 boot_order_arg = UINT32_MAX; 1560 } else { 1561 EMSG("Failed reading boot-order property err:%#"PRIx32, res); 1562 return res; 1563 } 1564 1565 res = sp_open_session(&sess, 1566 &open_sp_sessions, 1567 &ffa_uuid, bin_uuid, boot_order_arg, fdt); 1568 if (res) 1569 return res; 1570 1571 sess->fdt = fdt; 1572 1573 res = read_manifest_endpoint_id(sess); 1574 if (res) 1575 return res; 1576 DMSG("endpoint is 0x%"PRIx16, sess->endpoint_id); 1577 1578 res = read_ns_interrupts_action(fdt, sess); 1579 if (res) 1580 return res; 1581 1582 res = read_ffa_version(fdt, sess); 1583 if (res) 1584 return res; 1585 1586 res = read_sp_exec_state(fdt, sess); 1587 if (res) 1588 return res; 1589 1590 res = read_sp_msg_types(fdt, sess); 1591 if (res) 1592 return res; 1593 1594 res = read_vm_availability_msg(fdt, sess); 1595 if (res) 1596 return res; 1597 1598 return TEE_SUCCESS; 1599 } 1600 1601 static TEE_Result sp_first_run(struct sp_session *sess) 1602 { 1603 TEE_Result res = TEE_SUCCESS; 1604 struct thread_smc_args args = { }; 1605 struct sp_ctx *ctx = NULL; 1606 vaddr_t boot_info_va = 0; 1607 size_t boot_info_size = 0; 1608 void *fdt_copy = NULL; 1609 size_t fdt_size = 0; 1610 1611 ctx = to_sp_ctx(sess->ts_sess.ctx); 1612 ts_push_current_session(&sess->ts_sess); 1613 sess->is_initialized = false; 1614 1615 /* 1616 * Load relative memory regions must be handled before doing any other 1617 * mapping to prevent conflicts in the VA space. 1618 */ 1619 res = handle_fdt_load_relative_mem_regions(ctx, sess->fdt); 1620 if (res) { 1621 ts_pop_current_session(); 1622 return res; 1623 } 1624 1625 res = copy_and_map_fdt(ctx, sess->fdt, &fdt_copy, &fdt_size); 1626 if (res) 1627 goto out; 1628 1629 res = handle_fdt_dev_regions(ctx, fdt_copy); 1630 if (res) 1631 goto out; 1632 1633 res = handle_fdt_mem_regions(ctx, fdt_copy); 1634 if (res) 1635 goto out; 1636 1637 if (IS_ENABLED(CFG_CORE_TPM_EVENT_LOG)) { 1638 res = handle_tpm_event_log(ctx, fdt_copy); 1639 if (res) 1640 goto out; 1641 } 1642 1643 res = handle_hw_features(fdt_copy); 1644 if (res) 1645 goto out; 1646 1647 res = create_and_map_boot_info(ctx, fdt_copy, &args, &boot_info_va, 1648 &boot_info_size, sess->rxtx.ffa_vers); 1649 if (res) 1650 goto out; 1651 1652 ts_pop_current_session(); 1653 1654 res = sp_enter(&args, sess); 1655 if (res) { 1656 ts_push_current_session(&sess->ts_sess); 1657 goto out; 1658 } 1659 1660 spmc_sp_msg_handler(&args, sess); 1661 1662 ts_push_current_session(&sess->ts_sess); 1663 sess->is_initialized = true; 1664 1665 out: 1666 /* Free the boot info page from the SP memory */ 1667 vm_unmap(&ctx->uctx, boot_info_va, boot_info_size); 1668 vm_unmap(&ctx->uctx, (vaddr_t)fdt_copy, fdt_size); 1669 ts_pop_current_session(); 1670 1671 return res; 1672 } 1673 1674 TEE_Result sp_enter(struct thread_smc_args *args, struct sp_session *sp) 1675 { 1676 TEE_Result res = TEE_SUCCESS; 1677 struct sp_ctx *ctx = to_sp_ctx(sp->ts_sess.ctx); 1678 1679 ctx->sp_regs.x[0] = args->a0; 1680 ctx->sp_regs.x[1] = args->a1; 1681 ctx->sp_regs.x[2] = args->a2; 1682 ctx->sp_regs.x[3] = args->a3; 1683 ctx->sp_regs.x[4] = args->a4; 1684 ctx->sp_regs.x[5] = args->a5; 1685 ctx->sp_regs.x[6] = args->a6; 1686 ctx->sp_regs.x[7] = args->a7; 1687 #ifdef CFG_TA_PAUTH 1688 ctx->sp_regs.apiakey_hi = ctx->uctx.keys.apia_hi; 1689 ctx->sp_regs.apiakey_lo = ctx->uctx.keys.apia_lo; 1690 #endif 1691 1692 res = sp->ts_sess.ctx->ops->enter_invoke_cmd(&sp->ts_sess, 0); 1693 1694 args->a0 = ctx->sp_regs.x[0]; 1695 args->a1 = ctx->sp_regs.x[1]; 1696 args->a2 = ctx->sp_regs.x[2]; 1697 args->a3 = ctx->sp_regs.x[3]; 1698 args->a4 = ctx->sp_regs.x[4]; 1699 args->a5 = ctx->sp_regs.x[5]; 1700 args->a6 = ctx->sp_regs.x[6]; 1701 args->a7 = ctx->sp_regs.x[7]; 1702 1703 return res; 1704 } 1705 1706 /* 1707 * According to FF-A v1.1 section 8.3.1.4 if a caller requires less permissive 1708 * active on NS interrupt than the callee, the callee must inherit the caller's 1709 * configuration. 1710 * Each SP's own NS action setting is stored in ns_int_mode. The effective 1711 * action will be MIN([self action], [caller's action]) which is stored in the 1712 * ns_int_mode_inherited field. 1713 */ 1714 static void sp_cpsr_configure_foreign_interrupts(struct sp_session *s, 1715 struct ts_session *caller, 1716 uint64_t *cpsr) 1717 { 1718 if (caller) { 1719 struct sp_session *caller_sp = to_sp_session(caller); 1720 1721 s->ns_int_mode_inherited = MIN(caller_sp->ns_int_mode_inherited, 1722 s->ns_int_mode); 1723 } else { 1724 s->ns_int_mode_inherited = s->ns_int_mode; 1725 } 1726 1727 if (s->ns_int_mode_inherited == SP_MANIFEST_NS_INT_QUEUED) 1728 *cpsr |= SHIFT_U32(THREAD_EXCP_FOREIGN_INTR, 1729 ARM32_CPSR_F_SHIFT); 1730 else 1731 *cpsr &= ~SHIFT_U32(THREAD_EXCP_FOREIGN_INTR, 1732 ARM32_CPSR_F_SHIFT); 1733 } 1734 1735 static TEE_Result sp_enter_invoke_cmd(struct ts_session *s, 1736 uint32_t cmd __unused) 1737 { 1738 struct sp_ctx *ctx = to_sp_ctx(s->ctx); 1739 TEE_Result res = TEE_SUCCESS; 1740 uint32_t exceptions = 0; 1741 struct sp_session *sp_s = to_sp_session(s); 1742 struct ts_session *sess = NULL; 1743 struct thread_ctx_regs *sp_regs = NULL; 1744 uint32_t thread_id = THREAD_ID_INVALID; 1745 struct ts_session *caller = NULL; 1746 uint32_t rpc_target_info = 0; 1747 uint32_t panicked = false; 1748 uint32_t panic_code = 0; 1749 1750 sp_regs = &ctx->sp_regs; 1751 ts_push_current_session(s); 1752 1753 exceptions = thread_mask_exceptions(THREAD_EXCP_ALL); 1754 1755 /* Enable/disable foreign interrupts in CPSR/SPSR */ 1756 caller = ts_get_calling_session(); 1757 sp_cpsr_configure_foreign_interrupts(sp_s, caller, &sp_regs->cpsr); 1758 1759 /* 1760 * Store endpoint ID and thread ID in rpc_target_info. This will be used 1761 * as w1 in FFA_INTERRUPT in case of a foreign interrupt. 1762 */ 1763 rpc_target_info = thread_get_tsd()->rpc_target_info; 1764 thread_id = thread_get_id(); 1765 assert(thread_id <= UINT16_MAX); 1766 thread_get_tsd()->rpc_target_info = 1767 FFA_TARGET_INFO_SET(sp_s->endpoint_id, thread_id); 1768 1769 __thread_enter_user_mode(sp_regs, &panicked, &panic_code); 1770 1771 /* Restore rpc_target_info */ 1772 thread_get_tsd()->rpc_target_info = rpc_target_info; 1773 1774 thread_unmask_exceptions(exceptions); 1775 1776 thread_user_clear_vfp(&ctx->uctx); 1777 1778 if (panicked) { 1779 DMSG("SP panicked with code %#"PRIx32, panic_code); 1780 abort_print_current_ts(); 1781 1782 sess = ts_pop_current_session(); 1783 cpu_spin_lock(&sp_s->spinlock); 1784 sp_s->state = sp_dead; 1785 cpu_spin_unlock(&sp_s->spinlock); 1786 1787 return TEE_ERROR_TARGET_DEAD; 1788 } 1789 1790 sess = ts_pop_current_session(); 1791 assert(sess == s); 1792 1793 return res; 1794 } 1795 1796 /* We currently don't support 32 bits */ 1797 #ifdef ARM64 1798 static void sp_svc_store_registers(struct thread_scall_regs *regs, 1799 struct thread_ctx_regs *sp_regs) 1800 { 1801 COMPILE_TIME_ASSERT(sizeof(sp_regs->x[0]) == sizeof(regs->x0)); 1802 memcpy(sp_regs->x, ®s->x0, 31 * sizeof(regs->x0)); 1803 sp_regs->pc = regs->elr; 1804 sp_regs->sp = regs->sp_el0; 1805 } 1806 #endif 1807 1808 static bool sp_handle_scall(struct thread_scall_regs *regs) 1809 { 1810 struct ts_session *ts = ts_get_current_session(); 1811 struct sp_ctx *uctx = to_sp_ctx(ts->ctx); 1812 struct sp_session *s = uctx->open_session; 1813 1814 assert(s); 1815 1816 sp_svc_store_registers(regs, &uctx->sp_regs); 1817 1818 regs->x0 = 0; 1819 regs->x1 = 0; /* panic */ 1820 regs->x2 = 0; /* panic code */ 1821 1822 /* 1823 * All the registers of the SP are saved in the SP session by the SVC 1824 * handler. 1825 * We always return to S-El1 after handling the SVC. We will continue 1826 * in sp_enter_invoke_cmd() (return from __thread_enter_user_mode). 1827 * The sp_enter() function copies the FF-A parameters (a0-a7) from the 1828 * saved registers to the thread_smc_args. The thread_smc_args object is 1829 * afterward used by the spmc_sp_msg_handler() to handle the 1830 * FF-A message send by the SP. 1831 */ 1832 return false; 1833 } 1834 1835 static void sp_dump_state(struct ts_ctx *ctx) 1836 { 1837 struct sp_ctx *utc = to_sp_ctx(ctx); 1838 1839 if (utc->uctx.dump_entry_func) { 1840 TEE_Result res = ldelf_dump_state(&utc->uctx); 1841 1842 if (!res || res == TEE_ERROR_TARGET_DEAD) 1843 return; 1844 } 1845 1846 user_mode_ctx_print_mappings(&utc->uctx); 1847 } 1848 1849 static const struct ts_ops sp_ops = { 1850 .enter_invoke_cmd = sp_enter_invoke_cmd, 1851 .handle_scall = sp_handle_scall, 1852 .dump_state = sp_dump_state, 1853 }; 1854 1855 static TEE_Result process_sp_pkg(uint64_t sp_pkg_pa, TEE_UUID *sp_uuid) 1856 { 1857 enum teecore_memtypes mtype = MEM_AREA_TA_RAM; 1858 struct sp_pkg_header *sp_pkg_hdr = NULL; 1859 struct fip_sp *sp = NULL; 1860 uint64_t sp_fdt_end = 0; 1861 size_t sp_pkg_size = 0; 1862 vaddr_t sp_pkg_va = 0; 1863 1864 /* Process the first page which contains the SP package header */ 1865 sp_pkg_va = (vaddr_t)phys_to_virt(sp_pkg_pa, mtype, SMALL_PAGE_SIZE); 1866 if (!sp_pkg_va) { 1867 EMSG("Cannot find mapping for PA %#" PRIxPA, sp_pkg_pa); 1868 return TEE_ERROR_GENERIC; 1869 } 1870 1871 sp_pkg_hdr = (struct sp_pkg_header *)sp_pkg_va; 1872 1873 if (sp_pkg_hdr->magic != SP_PKG_HEADER_MAGIC) { 1874 EMSG("Invalid SP package magic"); 1875 return TEE_ERROR_BAD_FORMAT; 1876 } 1877 1878 if (sp_pkg_hdr->version != SP_PKG_HEADER_VERSION_V1 && 1879 sp_pkg_hdr->version != SP_PKG_HEADER_VERSION_V2) { 1880 EMSG("Invalid SP header version"); 1881 return TEE_ERROR_BAD_FORMAT; 1882 } 1883 1884 if (ADD_OVERFLOW(sp_pkg_hdr->img_offset, sp_pkg_hdr->img_size, 1885 &sp_pkg_size)) { 1886 EMSG("Invalid SP package size"); 1887 return TEE_ERROR_BAD_FORMAT; 1888 } 1889 1890 if (ADD_OVERFLOW(sp_pkg_hdr->pm_offset, sp_pkg_hdr->pm_size, 1891 &sp_fdt_end) || sp_fdt_end > sp_pkg_hdr->img_offset) { 1892 EMSG("Invalid SP manifest size"); 1893 return TEE_ERROR_BAD_FORMAT; 1894 } 1895 1896 /* Process the whole SP package now that the size is known */ 1897 sp_pkg_va = (vaddr_t)phys_to_virt(sp_pkg_pa, mtype, sp_pkg_size); 1898 if (!sp_pkg_va) { 1899 EMSG("Cannot find mapping for PA %#" PRIxPA, sp_pkg_pa); 1900 return TEE_ERROR_GENERIC; 1901 } 1902 1903 sp_pkg_hdr = (struct sp_pkg_header *)sp_pkg_va; 1904 1905 sp = calloc(1, sizeof(struct fip_sp)); 1906 if (!sp) 1907 return TEE_ERROR_OUT_OF_MEMORY; 1908 1909 memcpy(&sp->sp_img.image.uuid, sp_uuid, sizeof(*sp_uuid)); 1910 sp->sp_img.image.ts = (uint8_t *)(sp_pkg_va + sp_pkg_hdr->img_offset); 1911 sp->sp_img.image.size = sp_pkg_hdr->img_size; 1912 sp->sp_img.image.flags = 0; 1913 sp->sp_img.fdt = (uint8_t *)(sp_pkg_va + sp_pkg_hdr->pm_offset); 1914 1915 STAILQ_INSERT_TAIL(&fip_sp_list, sp, link); 1916 1917 return TEE_SUCCESS; 1918 } 1919 1920 static TEE_Result fip_sp_init_all(void) 1921 { 1922 TEE_Result res = TEE_SUCCESS; 1923 uint64_t sp_pkg_addr = 0; 1924 const void *fdt = NULL; 1925 TEE_UUID sp_uuid = { }; 1926 int sp_pkgs_node = 0; 1927 int subnode = 0; 1928 int root = 0; 1929 1930 fdt = get_manifest_dt(); 1931 if (!fdt) { 1932 EMSG("No SPMC manifest found"); 1933 return TEE_ERROR_GENERIC; 1934 } 1935 1936 root = fdt_path_offset(fdt, "/"); 1937 if (root < 0) 1938 return TEE_ERROR_BAD_FORMAT; 1939 1940 if (fdt_node_check_compatible(fdt, root, "arm,ffa-core-manifest-1.0")) 1941 return TEE_ERROR_BAD_FORMAT; 1942 1943 /* SP packages are optional, it's not an error if we don't find any */ 1944 sp_pkgs_node = fdt_node_offset_by_compatible(fdt, root, "arm,sp_pkg"); 1945 if (sp_pkgs_node < 0) 1946 return TEE_SUCCESS; 1947 1948 fdt_for_each_subnode(subnode, fdt, sp_pkgs_node) { 1949 res = sp_dt_get_u64(fdt, subnode, "load-address", &sp_pkg_addr); 1950 if (res) { 1951 EMSG("Invalid FIP SP load address"); 1952 return res; 1953 } 1954 1955 res = sp_dt_get_uuid(fdt, subnode, "uuid", &sp_uuid); 1956 if (res) { 1957 EMSG("Invalid FIP SP uuid"); 1958 return res; 1959 } 1960 1961 res = process_sp_pkg(sp_pkg_addr, &sp_uuid); 1962 if (res) { 1963 EMSG("Invalid FIP SP package"); 1964 return res; 1965 } 1966 } 1967 1968 return TEE_SUCCESS; 1969 } 1970 1971 static void fip_sp_deinit_all(void) 1972 { 1973 while (!STAILQ_EMPTY(&fip_sp_list)) { 1974 struct fip_sp *sp = STAILQ_FIRST(&fip_sp_list); 1975 1976 STAILQ_REMOVE_HEAD(&fip_sp_list, link); 1977 free(sp); 1978 } 1979 } 1980 1981 static TEE_Result sp_init_all(void) 1982 { 1983 TEE_Result res = TEE_SUCCESS; 1984 const struct sp_image *sp = NULL; 1985 const struct fip_sp *fip_sp = NULL; 1986 char __maybe_unused msg[60] = { '\0', }; 1987 struct sp_session *s = NULL; 1988 struct sp_session *prev_sp = NULL; 1989 1990 for_each_secure_partition(sp) { 1991 if (sp->image.uncompressed_size) 1992 snprintf(msg, sizeof(msg), 1993 " (compressed, uncompressed %u)", 1994 sp->image.uncompressed_size); 1995 else 1996 msg[0] = '\0'; 1997 DMSG("SP %pUl size %u%s", (void *)&sp->image.uuid, 1998 sp->image.size, msg); 1999 2000 res = sp_init_uuid(&sp->image.uuid, sp->fdt); 2001 2002 if (res != TEE_SUCCESS) { 2003 EMSG("Failed initializing SP(%pUl) err:%#"PRIx32, 2004 &sp->image.uuid, res); 2005 if (!IS_ENABLED(CFG_SP_SKIP_FAILED)) 2006 panic(); 2007 } 2008 } 2009 2010 res = fip_sp_init_all(); 2011 if (res) 2012 panic("Failed initializing FIP SPs"); 2013 2014 for_each_fip_sp(fip_sp) { 2015 sp = &fip_sp->sp_img; 2016 2017 DMSG("SP %pUl size %u", (void *)&sp->image.uuid, 2018 sp->image.size); 2019 2020 res = sp_init_uuid(&sp->image.uuid, sp->fdt); 2021 2022 if (res != TEE_SUCCESS) { 2023 EMSG("Failed initializing SP(%pUl) err:%#"PRIx32, 2024 &sp->image.uuid, res); 2025 if (!IS_ENABLED(CFG_SP_SKIP_FAILED)) 2026 panic(); 2027 } 2028 } 2029 2030 /* 2031 * At this point all FIP SPs are loaded by ldelf or by the raw binary SP 2032 * loader, so the original images (loaded by BL2) are not needed anymore 2033 */ 2034 fip_sp_deinit_all(); 2035 2036 /* 2037 * Now that all SPs are loaded, check through the boot order values, 2038 * and warn in case there is a non-unique value. 2039 */ 2040 TAILQ_FOREACH(s, &open_sp_sessions, link) { 2041 /* User specified boot-order values are uint16 */ 2042 if (s->boot_order > UINT16_MAX) 2043 break; 2044 2045 if (prev_sp && prev_sp->boot_order == s->boot_order) 2046 IMSG("WARNING: duplicated boot-order (%pUl vs %pUl)", 2047 &prev_sp->ts_sess.ctx->uuid, 2048 &s->ts_sess.ctx->uuid); 2049 2050 prev_sp = s; 2051 } 2052 2053 /* Continue the initialization and run the SP */ 2054 TAILQ_FOREACH(s, &open_sp_sessions, link) { 2055 DMSG("Starting SP: 0x%"PRIx16, s->endpoint_id); 2056 2057 res = sp_first_run(s); 2058 if (res != TEE_SUCCESS) { 2059 EMSG("Failed starting SP(0x%"PRIx16") err:%#"PRIx32, 2060 s->endpoint_id, res); 2061 if (!IS_ENABLED(CFG_SP_SKIP_FAILED)) 2062 panic(); 2063 } 2064 } 2065 2066 return TEE_SUCCESS; 2067 } 2068 2069 boot_final(sp_init_all); 2070 2071 static TEE_Result secure_partition_open(const TEE_UUID *uuid, 2072 struct ts_store_handle **h) 2073 { 2074 return emb_ts_open(uuid, h, find_secure_partition); 2075 } 2076 2077 REGISTER_SP_STORE(2) = { 2078 .description = "SP store", 2079 .open = secure_partition_open, 2080 .get_size = emb_ts_get_size, 2081 .get_tag = emb_ts_get_tag, 2082 .read = emb_ts_read, 2083 .close = emb_ts_close, 2084 }; 2085