1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2020-2024, Arm Limited. 4 */ 5 #include <crypto/crypto.h> 6 #include <initcall.h> 7 #include <kernel/boot.h> 8 #include <kernel/embedded_ts.h> 9 #include <kernel/ldelf_loader.h> 10 #include <kernel/secure_partition.h> 11 #include <kernel/spinlock.h> 12 #include <kernel/spmc_sp_handler.h> 13 #include <kernel/thread_private.h> 14 #include <kernel/thread_spmc.h> 15 #include <kernel/tpm.h> 16 #include <kernel/ts_store.h> 17 #include <ldelf.h> 18 #include <libfdt.h> 19 #include <mm/core_mmu.h> 20 #include <mm/fobj.h> 21 #include <mm/mobj.h> 22 #include <mm/vm.h> 23 #include <optee_ffa.h> 24 #include <stdio.h> 25 #include <string.h> 26 #include <tee_api_types.h> 27 #include <tee/uuid.h> 28 #include <trace.h> 29 #include <types_ext.h> 30 #include <utee_defines.h> 31 #include <util.h> 32 #include <zlib.h> 33 34 #define BOUNCE_BUFFER_SIZE 4096 35 36 #define SP_MANIFEST_ATTR_READ BIT(0) 37 #define SP_MANIFEST_ATTR_WRITE BIT(1) 38 #define SP_MANIFEST_ATTR_EXEC BIT(2) 39 #define SP_MANIFEST_ATTR_NSEC BIT(3) 40 41 #define SP_MANIFEST_ATTR_RO (SP_MANIFEST_ATTR_READ) 42 #define SP_MANIFEST_ATTR_RW (SP_MANIFEST_ATTR_READ | \ 43 SP_MANIFEST_ATTR_WRITE) 44 #define SP_MANIFEST_ATTR_RX (SP_MANIFEST_ATTR_READ | \ 45 SP_MANIFEST_ATTR_EXEC) 46 #define SP_MANIFEST_ATTR_RWX (SP_MANIFEST_ATTR_READ | \ 47 SP_MANIFEST_ATTR_WRITE | \ 48 SP_MANIFEST_ATTR_EXEC) 49 50 #define SP_MANIFEST_FLAG_NOBITS BIT(0) 51 52 #define SP_MANIFEST_NS_INT_QUEUED (0x0) 53 #define SP_MANIFEST_NS_INT_MANAGED_EXIT (0x1) 54 #define SP_MANIFEST_NS_INT_SIGNALED (0x2) 55 56 #define SP_MANIFEST_EXEC_STATE_AARCH64 (0x0) 57 #define SP_MANIFEST_EXEC_STATE_AARCH32 (0x1) 58 59 #define SP_MANIFEST_DIRECT_REQ_RECEIVE BIT(0) 60 #define SP_MANIFEST_DIRECT_REQ_SEND BIT(1) 61 #define SP_MANIFEST_INDIRECT_REQ BIT(2) 62 63 #define SP_PKG_HEADER_MAGIC (0x474b5053) 64 #define SP_PKG_HEADER_VERSION_V1 (0x1) 65 #define SP_PKG_HEADER_VERSION_V2 (0x2) 66 67 struct sp_pkg_header { 68 uint32_t magic; 69 uint32_t version; 70 uint32_t pm_offset; 71 uint32_t pm_size; 72 uint32_t img_offset; 73 uint32_t img_size; 74 }; 75 76 struct fip_sp_head fip_sp_list = STAILQ_HEAD_INITIALIZER(fip_sp_list); 77 78 static const struct ts_ops sp_ops; 79 80 /* List that holds all of the loaded SP's */ 81 static struct sp_sessions_head open_sp_sessions = 82 TAILQ_HEAD_INITIALIZER(open_sp_sessions); 83 84 static const struct embedded_ts *find_secure_partition(const TEE_UUID *uuid) 85 { 86 const struct sp_image *sp = NULL; 87 const struct fip_sp *fip_sp = NULL; 88 89 for_each_secure_partition(sp) { 90 if (!memcmp(&sp->image.uuid, uuid, sizeof(*uuid))) 91 return &sp->image; 92 } 93 94 for_each_fip_sp(fip_sp) { 95 if (!memcmp(&fip_sp->sp_img.image.uuid, uuid, sizeof(*uuid))) 96 return &fip_sp->sp_img.image; 97 } 98 99 return NULL; 100 } 101 102 bool is_sp_ctx(struct ts_ctx *ctx) 103 { 104 return ctx && (ctx->ops == &sp_ops); 105 } 106 107 static void set_sp_ctx_ops(struct ts_ctx *ctx) 108 { 109 ctx->ops = &sp_ops; 110 } 111 112 struct sp_session *sp_get_session(uint32_t session_id) 113 { 114 struct sp_session *s = NULL; 115 116 TAILQ_FOREACH(s, &open_sp_sessions, link) { 117 if (s->endpoint_id == session_id) 118 return s; 119 } 120 121 return NULL; 122 } 123 124 TEE_Result sp_partition_info_get(uint32_t ffa_vers, void *buf, size_t buf_size, 125 const TEE_UUID *ffa_uuid, size_t *elem_count, 126 bool count_only) 127 { 128 TEE_Result res = TEE_SUCCESS; 129 struct sp_session *s = NULL; 130 131 TAILQ_FOREACH(s, &open_sp_sessions, link) { 132 if (ffa_uuid && 133 memcmp(&s->ffa_uuid, ffa_uuid, sizeof(*ffa_uuid))) 134 continue; 135 136 if (s->state == sp_dead) 137 continue; 138 if (!count_only && !res) { 139 uint32_t uuid_words[4] = { 0 }; 140 141 tee_uuid_to_octets((uint8_t *)uuid_words, &s->ffa_uuid); 142 res = spmc_fill_partition_entry(ffa_vers, buf, buf_size, 143 *elem_count, 144 s->endpoint_id, 1, 145 s->props, uuid_words); 146 } 147 *elem_count += 1; 148 } 149 150 return res; 151 } 152 153 bool sp_has_exclusive_access(struct sp_mem_map_region *mem, 154 struct user_mode_ctx *uctx) 155 { 156 /* 157 * Check that we have access to the region if it is supposed to be 158 * mapped to the current context. 159 */ 160 if (uctx) { 161 struct vm_region *region = NULL; 162 163 /* Make sure that each mobj belongs to the SP */ 164 TAILQ_FOREACH(region, &uctx->vm_info.regions, link) { 165 if (region->mobj == mem->mobj) 166 break; 167 } 168 169 if (!region) 170 return false; 171 } 172 173 /* Check that it is not shared with another SP */ 174 return !sp_mem_is_shared(mem); 175 } 176 177 static bool endpoint_id_is_valid(uint32_t id) 178 { 179 /* 180 * These IDs are assigned at the SPMC init so already have valid values 181 * by the time this function gets first called 182 */ 183 return id != spmd_id && id != spmc_id && id != optee_endpoint_id && 184 id >= FFA_SWD_ID_MIN && id <= FFA_SWD_ID_MAX; 185 } 186 187 static TEE_Result new_session_id(uint16_t *endpoint_id) 188 { 189 uint32_t id = 0; 190 191 /* Find the first available endpoint id */ 192 for (id = FFA_SWD_ID_MIN; id <= FFA_SWD_ID_MAX; id++) { 193 if (endpoint_id_is_valid(id) && !sp_get_session(id)) { 194 *endpoint_id = id; 195 return TEE_SUCCESS; 196 } 197 } 198 199 return TEE_ERROR_BAD_FORMAT; 200 } 201 202 static TEE_Result sp_create_ctx(const TEE_UUID *bin_uuid, struct sp_session *s) 203 { 204 TEE_Result res = TEE_SUCCESS; 205 struct sp_ctx *spc = NULL; 206 207 /* Register context */ 208 spc = calloc(1, sizeof(struct sp_ctx)); 209 if (!spc) 210 return TEE_ERROR_OUT_OF_MEMORY; 211 212 spc->open_session = s; 213 s->ts_sess.ctx = &spc->ts_ctx; 214 spc->ts_ctx.uuid = *bin_uuid; 215 216 res = vm_info_init(&spc->uctx, &spc->ts_ctx); 217 if (res) 218 goto err; 219 220 set_sp_ctx_ops(&spc->ts_ctx); 221 222 return TEE_SUCCESS; 223 224 err: 225 free(spc); 226 return res; 227 } 228 229 /* 230 * Insert a new sp_session to the sessions list, so that it is ordered 231 * by boot_order. 232 */ 233 static void insert_session_ordered(struct sp_sessions_head *open_sessions, 234 struct sp_session *session) 235 { 236 struct sp_session *s = NULL; 237 238 if (!open_sessions || !session) 239 return; 240 241 TAILQ_FOREACH(s, &open_sp_sessions, link) { 242 if (s->boot_order > session->boot_order) 243 break; 244 } 245 246 if (!s) 247 TAILQ_INSERT_TAIL(open_sessions, session, link); 248 else 249 TAILQ_INSERT_BEFORE(s, session, link); 250 } 251 252 static TEE_Result sp_create_session(struct sp_sessions_head *open_sessions, 253 const TEE_UUID *bin_uuid, 254 const uint32_t boot_order, 255 struct sp_session **sess) 256 { 257 TEE_Result res = TEE_SUCCESS; 258 struct sp_session *s = calloc(1, sizeof(struct sp_session)); 259 260 if (!s) 261 return TEE_ERROR_OUT_OF_MEMORY; 262 263 s->boot_order = boot_order; 264 265 /* Other properties are filled later, based on the SP's manifest */ 266 s->props = FFA_PART_PROP_IS_PE_ID; 267 268 res = new_session_id(&s->endpoint_id); 269 if (res) 270 goto err; 271 272 DMSG("Loading Secure Partition %pUl", (void *)bin_uuid); 273 res = sp_create_ctx(bin_uuid, s); 274 if (res) 275 goto err; 276 277 insert_session_ordered(open_sessions, s); 278 *sess = s; 279 return TEE_SUCCESS; 280 281 err: 282 free(s); 283 return res; 284 } 285 286 static TEE_Result sp_init_set_registers(struct sp_ctx *ctx) 287 { 288 struct thread_ctx_regs *sp_regs = &ctx->sp_regs; 289 290 memset(sp_regs, 0, sizeof(*sp_regs)); 291 sp_regs->sp = ctx->uctx.stack_ptr; 292 sp_regs->pc = ctx->uctx.entry_func; 293 294 return TEE_SUCCESS; 295 } 296 297 TEE_Result sp_map_shared(struct sp_session *s, 298 struct sp_mem_receiver *receiver, 299 struct sp_mem *smem, 300 uint64_t *va) 301 { 302 TEE_Result res = TEE_SUCCESS; 303 struct sp_ctx *ctx = NULL; 304 uint32_t perm = TEE_MATTR_UR; 305 struct sp_mem_map_region *reg = NULL; 306 307 ctx = to_sp_ctx(s->ts_sess.ctx); 308 309 /* Get the permission */ 310 if (receiver->perm.perm & FFA_MEM_ACC_EXE) 311 perm |= TEE_MATTR_UX; 312 313 if (receiver->perm.perm & FFA_MEM_ACC_RW) { 314 if (receiver->perm.perm & FFA_MEM_ACC_EXE) 315 return TEE_ERROR_ACCESS_CONFLICT; 316 317 perm |= TEE_MATTR_UW; 318 } 319 /* 320 * Currently we don't support passing a va. We can't guarantee that the 321 * full region will be mapped in a contiguous region. A smem->region can 322 * have multiple mobj for one share. Currently there doesn't seem to be 323 * an option to guarantee that these will be mapped in a contiguous va 324 * space. 325 */ 326 if (*va) 327 return TEE_ERROR_NOT_SUPPORTED; 328 329 SLIST_FOREACH(reg, &smem->regions, link) { 330 res = vm_map(&ctx->uctx, va, reg->page_count * SMALL_PAGE_SIZE, 331 perm, 0, reg->mobj, reg->page_offset); 332 333 if (res != TEE_SUCCESS) { 334 EMSG("Failed to map memory region %#"PRIx32, res); 335 return res; 336 } 337 } 338 return TEE_SUCCESS; 339 } 340 341 TEE_Result sp_unmap_ffa_regions(struct sp_session *s, struct sp_mem *smem) 342 { 343 TEE_Result res = TEE_SUCCESS; 344 vaddr_t vaddr = 0; 345 size_t len = 0; 346 struct sp_ctx *ctx = to_sp_ctx(s->ts_sess.ctx); 347 struct sp_mem_map_region *reg = NULL; 348 349 SLIST_FOREACH(reg, &smem->regions, link) { 350 vaddr = (vaddr_t)sp_mem_get_va(&ctx->uctx, reg->page_offset, 351 reg->mobj); 352 len = reg->page_count * SMALL_PAGE_SIZE; 353 354 res = vm_unmap(&ctx->uctx, vaddr, len); 355 if (res != TEE_SUCCESS) 356 return res; 357 } 358 359 return TEE_SUCCESS; 360 } 361 362 static TEE_Result sp_dt_get_u64(const void *fdt, int node, const char *property, 363 uint64_t *value) 364 { 365 const fdt64_t *p = NULL; 366 int len = 0; 367 368 p = fdt_getprop(fdt, node, property, &len); 369 if (!p) 370 return TEE_ERROR_ITEM_NOT_FOUND; 371 372 if (len != sizeof(*p)) 373 return TEE_ERROR_BAD_FORMAT; 374 375 *value = fdt64_ld(p); 376 377 return TEE_SUCCESS; 378 } 379 380 static TEE_Result sp_dt_get_u32(const void *fdt, int node, const char *property, 381 uint32_t *value) 382 { 383 const fdt32_t *p = NULL; 384 int len = 0; 385 386 p = fdt_getprop(fdt, node, property, &len); 387 if (!p) 388 return TEE_ERROR_ITEM_NOT_FOUND; 389 390 if (len != sizeof(*p)) 391 return TEE_ERROR_BAD_FORMAT; 392 393 *value = fdt32_to_cpu(*p); 394 395 return TEE_SUCCESS; 396 } 397 398 static TEE_Result sp_dt_get_u16(const void *fdt, int node, const char *property, 399 uint16_t *value) 400 { 401 const fdt16_t *p = NULL; 402 int len = 0; 403 404 p = fdt_getprop(fdt, node, property, &len); 405 if (!p) 406 return TEE_ERROR_ITEM_NOT_FOUND; 407 408 if (len != sizeof(*p)) 409 return TEE_ERROR_BAD_FORMAT; 410 411 *value = fdt16_to_cpu(*p); 412 413 return TEE_SUCCESS; 414 } 415 416 static TEE_Result sp_dt_get_uuid(const void *fdt, int node, 417 const char *property, TEE_UUID *uuid) 418 { 419 uint32_t uuid_array[4] = { 0 }; 420 const fdt32_t *p = NULL; 421 int len = 0; 422 int i = 0; 423 424 p = fdt_getprop(fdt, node, property, &len); 425 if (!p) 426 return TEE_ERROR_ITEM_NOT_FOUND; 427 428 if (len != sizeof(TEE_UUID)) 429 return TEE_ERROR_BAD_FORMAT; 430 431 for (i = 0; i < 4; i++) 432 uuid_array[i] = fdt32_to_cpu(p[i]); 433 434 tee_uuid_from_octets(uuid, (uint8_t *)uuid_array); 435 436 return TEE_SUCCESS; 437 } 438 439 static TEE_Result sp_is_elf_format(const void *fdt, int sp_node, 440 bool *is_elf_format) 441 { 442 TEE_Result res = TEE_SUCCESS; 443 uint32_t elf_format = 0; 444 445 res = sp_dt_get_u32(fdt, sp_node, "elf-format", &elf_format); 446 if (res != TEE_SUCCESS && res != TEE_ERROR_ITEM_NOT_FOUND) 447 return res; 448 449 *is_elf_format = (elf_format != 0); 450 451 return TEE_SUCCESS; 452 } 453 454 static TEE_Result sp_binary_open(const TEE_UUID *uuid, 455 const struct ts_store_ops **ops, 456 struct ts_store_handle **handle) 457 { 458 TEE_Result res = TEE_ERROR_ITEM_NOT_FOUND; 459 460 SCATTERED_ARRAY_FOREACH(*ops, sp_stores, struct ts_store_ops) { 461 res = (*ops)->open(uuid, handle); 462 if (res != TEE_ERROR_ITEM_NOT_FOUND && 463 res != TEE_ERROR_STORAGE_NOT_AVAILABLE) 464 break; 465 } 466 467 return res; 468 } 469 470 static TEE_Result load_binary_sp(struct ts_session *s, 471 struct user_mode_ctx *uctx) 472 { 473 size_t bin_size = 0, bin_size_rounded = 0, bin_page_count = 0; 474 size_t bb_size = ROUNDUP(BOUNCE_BUFFER_SIZE, SMALL_PAGE_SIZE); 475 size_t bb_num_pages = bb_size / SMALL_PAGE_SIZE; 476 const struct ts_store_ops *store_ops = NULL; 477 struct ts_store_handle *handle = NULL; 478 TEE_Result res = TEE_SUCCESS; 479 tee_mm_entry_t *mm = NULL; 480 struct fobj *fobj = NULL; 481 struct mobj *mobj = NULL; 482 uaddr_t base_addr = 0; 483 uint32_t vm_flags = 0; 484 unsigned int idx = 0; 485 vaddr_t va = 0; 486 487 if (!s || !uctx) 488 return TEE_ERROR_BAD_PARAMETERS; 489 490 DMSG("Loading raw binary format SP %pUl", &uctx->ts_ctx->uuid); 491 492 /* Initialize the bounce buffer */ 493 fobj = fobj_sec_mem_alloc(bb_num_pages); 494 mobj = mobj_with_fobj_alloc(fobj, NULL, TEE_MATTR_MEM_TYPE_TAGGED); 495 fobj_put(fobj); 496 if (!mobj) 497 return TEE_ERROR_OUT_OF_MEMORY; 498 499 res = vm_map(uctx, &va, bb_size, TEE_MATTR_PRW, 0, mobj, 0); 500 mobj_put(mobj); 501 if (res) 502 return res; 503 504 uctx->bbuf = (uint8_t *)va; 505 uctx->bbuf_size = BOUNCE_BUFFER_SIZE; 506 507 vm_set_ctx(uctx->ts_ctx); 508 509 /* Find TS store and open SP binary */ 510 res = sp_binary_open(&uctx->ts_ctx->uuid, &store_ops, &handle); 511 if (res != TEE_SUCCESS) { 512 EMSG("Failed to open SP binary"); 513 return res; 514 } 515 516 /* Query binary size and calculate page count */ 517 res = store_ops->get_size(handle, &bin_size); 518 if (res != TEE_SUCCESS) 519 goto err; 520 521 if (ROUNDUP_OVERFLOW(bin_size, SMALL_PAGE_SIZE, &bin_size_rounded)) { 522 res = TEE_ERROR_OVERFLOW; 523 goto err; 524 } 525 526 bin_page_count = bin_size_rounded / SMALL_PAGE_SIZE; 527 528 /* Allocate memory */ 529 mm = tee_mm_alloc(&tee_mm_sec_ddr, bin_size_rounded); 530 if (!mm) { 531 res = TEE_ERROR_OUT_OF_MEMORY; 532 goto err; 533 } 534 535 base_addr = tee_mm_get_smem(mm); 536 537 /* Create mobj */ 538 mobj = sp_mem_new_mobj(bin_page_count, TEE_MATTR_MEM_TYPE_CACHED, true); 539 if (!mobj) { 540 res = TEE_ERROR_OUT_OF_MEMORY; 541 goto err_free_tee_mm; 542 } 543 544 res = sp_mem_add_pages(mobj, &idx, base_addr, bin_page_count); 545 if (res) 546 goto err_free_mobj; 547 548 /* Map memory area for the SP binary */ 549 va = 0; 550 res = vm_map(uctx, &va, bin_size_rounded, TEE_MATTR_URWX, 551 vm_flags, mobj, 0); 552 if (res) 553 goto err_free_mobj; 554 555 /* Read SP binary into the previously mapped memory area */ 556 res = store_ops->read(handle, NULL, (void *)va, bin_size); 557 if (res) 558 goto err_unmap; 559 560 /* Set memory protection to allow execution */ 561 res = vm_set_prot(uctx, va, bin_size_rounded, TEE_MATTR_UX); 562 if (res) 563 goto err_unmap; 564 565 mobj_put(mobj); 566 store_ops->close(handle); 567 568 /* The entry point must be at the beginning of the SP binary. */ 569 uctx->entry_func = va; 570 uctx->load_addr = va; 571 uctx->is_32bit = false; 572 573 s->handle_scall = s->ctx->ops->handle_scall; 574 575 return TEE_SUCCESS; 576 577 err_unmap: 578 vm_unmap(uctx, va, bin_size_rounded); 579 580 err_free_mobj: 581 mobj_put(mobj); 582 583 err_free_tee_mm: 584 tee_mm_free(mm); 585 586 err: 587 store_ops->close(handle); 588 589 return res; 590 } 591 592 static TEE_Result sp_open_session(struct sp_session **sess, 593 struct sp_sessions_head *open_sessions, 594 const TEE_UUID *ffa_uuid, 595 const TEE_UUID *bin_uuid, 596 const uint32_t boot_order, 597 const void *fdt) 598 { 599 TEE_Result res = TEE_SUCCESS; 600 struct sp_session *s = NULL; 601 struct sp_ctx *ctx = NULL; 602 bool is_elf_format = false; 603 604 if (!find_secure_partition(bin_uuid)) 605 return TEE_ERROR_ITEM_NOT_FOUND; 606 607 res = sp_create_session(open_sessions, bin_uuid, boot_order, &s); 608 if (res != TEE_SUCCESS) { 609 DMSG("sp_create_session failed %#"PRIx32, res); 610 return res; 611 } 612 613 ctx = to_sp_ctx(s->ts_sess.ctx); 614 assert(ctx); 615 if (!ctx) 616 return TEE_ERROR_TARGET_DEAD; 617 *sess = s; 618 619 ts_push_current_session(&s->ts_sess); 620 621 res = sp_is_elf_format(fdt, 0, &is_elf_format); 622 if (res == TEE_SUCCESS) { 623 if (is_elf_format) { 624 /* Load the SP using ldelf. */ 625 ldelf_load_ldelf(&ctx->uctx); 626 res = ldelf_init_with_ldelf(&s->ts_sess, &ctx->uctx); 627 } else { 628 /* Raw binary format SP */ 629 res = load_binary_sp(&s->ts_sess, &ctx->uctx); 630 } 631 } else { 632 EMSG("Failed to detect SP format"); 633 } 634 635 if (res != TEE_SUCCESS) { 636 EMSG("Failed loading SP %#"PRIx32, res); 637 ts_pop_current_session(); 638 return TEE_ERROR_TARGET_DEAD; 639 } 640 641 /* 642 * Make the SP ready for its first run. 643 * Set state to busy to prevent other endpoints from sending messages to 644 * the SP before its boot phase is done. 645 */ 646 s->state = sp_busy; 647 s->caller_id = 0; 648 sp_init_set_registers(ctx); 649 memcpy(&s->ffa_uuid, ffa_uuid, sizeof(*ffa_uuid)); 650 ts_pop_current_session(); 651 652 return TEE_SUCCESS; 653 } 654 655 static TEE_Result fdt_get_uuid(const void * const fdt, TEE_UUID *uuid) 656 { 657 const struct fdt_property *description = NULL; 658 int description_name_len = 0; 659 660 if (fdt_node_check_compatible(fdt, 0, "arm,ffa-manifest-1.0")) { 661 EMSG("Failed loading SP, manifest not found"); 662 return TEE_ERROR_BAD_PARAMETERS; 663 } 664 665 description = fdt_get_property(fdt, 0, "description", 666 &description_name_len); 667 if (description) 668 DMSG("Loading SP: %s", description->data); 669 670 if (sp_dt_get_uuid(fdt, 0, "uuid", uuid)) { 671 EMSG("Missing or invalid UUID in SP manifest"); 672 return TEE_ERROR_BAD_FORMAT; 673 } 674 675 return TEE_SUCCESS; 676 } 677 678 static TEE_Result copy_and_map_fdt(struct sp_ctx *ctx, const void * const fdt, 679 void **fdt_copy, size_t *mapped_size) 680 { 681 size_t total_size = ROUNDUP(fdt_totalsize(fdt), SMALL_PAGE_SIZE); 682 size_t num_pages = total_size / SMALL_PAGE_SIZE; 683 uint32_t perm = TEE_MATTR_UR | TEE_MATTR_PRW; 684 TEE_Result res = TEE_SUCCESS; 685 struct mobj *m = NULL; 686 struct fobj *f = NULL; 687 vaddr_t va = 0; 688 689 f = fobj_sec_mem_alloc(num_pages); 690 m = mobj_with_fobj_alloc(f, NULL, TEE_MATTR_MEM_TYPE_TAGGED); 691 fobj_put(f); 692 if (!m) 693 return TEE_ERROR_OUT_OF_MEMORY; 694 695 res = vm_map(&ctx->uctx, &va, total_size, perm, 0, m, 0); 696 mobj_put(m); 697 if (res) 698 return res; 699 700 if (fdt_open_into(fdt, (void *)va, total_size)) 701 return TEE_ERROR_GENERIC; 702 703 *fdt_copy = (void *)va; 704 *mapped_size = total_size; 705 706 return res; 707 } 708 709 static void fill_boot_info_1_0(vaddr_t buf, const void *fdt) 710 { 711 struct ffa_boot_info_1_0 *info = (struct ffa_boot_info_1_0 *)buf; 712 static const char fdt_name[16] = "TYPE_DT\0\0\0\0\0\0\0\0"; 713 714 memcpy(&info->magic, "FF-A", 4); 715 info->count = 1; 716 717 COMPILE_TIME_ASSERT(sizeof(info->nvp[0].name) == sizeof(fdt_name)); 718 memcpy(info->nvp[0].name, fdt_name, sizeof(fdt_name)); 719 info->nvp[0].value = (uintptr_t)fdt; 720 info->nvp[0].size = fdt_totalsize(fdt); 721 } 722 723 static void fill_boot_info_1_1(vaddr_t buf, const void *fdt) 724 { 725 size_t desc_offs = ROUNDUP(sizeof(struct ffa_boot_info_header_1_1), 8); 726 struct ffa_boot_info_header_1_1 *header = 727 (struct ffa_boot_info_header_1_1 *)buf; 728 struct ffa_boot_info_1_1 *desc = 729 (struct ffa_boot_info_1_1 *)(buf + desc_offs); 730 731 header->signature = FFA_BOOT_INFO_SIGNATURE; 732 header->version = FFA_BOOT_INFO_VERSION; 733 header->blob_size = desc_offs + sizeof(struct ffa_boot_info_1_1); 734 header->desc_size = sizeof(struct ffa_boot_info_1_1); 735 header->desc_count = 1; 736 header->desc_offset = desc_offs; 737 738 memset(&desc[0].name, 0, sizeof(desc[0].name)); 739 /* Type: Standard boot info (bit[7] == 0), FDT type */ 740 desc[0].type = FFA_BOOT_INFO_TYPE_ID_FDT; 741 /* Flags: Contents field contains an address */ 742 desc[0].flags = FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_ADDR << 743 FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_SHIFT; 744 desc[0].size = fdt_totalsize(fdt); 745 desc[0].contents = (uintptr_t)fdt; 746 } 747 748 static TEE_Result create_and_map_boot_info(struct sp_ctx *ctx, const void *fdt, 749 struct thread_smc_args *args, 750 vaddr_t *va, size_t *mapped_size, 751 uint32_t sp_ffa_version) 752 { 753 size_t total_size = ROUNDUP(CFG_SP_INIT_INFO_MAX_SIZE, SMALL_PAGE_SIZE); 754 size_t num_pages = total_size / SMALL_PAGE_SIZE; 755 uint32_t perm = TEE_MATTR_UR | TEE_MATTR_PRW; 756 TEE_Result res = TEE_SUCCESS; 757 struct fobj *f = NULL; 758 struct mobj *m = NULL; 759 uint32_t info_reg = 0; 760 761 f = fobj_sec_mem_alloc(num_pages); 762 m = mobj_with_fobj_alloc(f, NULL, TEE_MATTR_MEM_TYPE_TAGGED); 763 fobj_put(f); 764 if (!m) 765 return TEE_ERROR_OUT_OF_MEMORY; 766 767 res = vm_map(&ctx->uctx, va, total_size, perm, 0, m, 0); 768 mobj_put(m); 769 if (res) 770 return res; 771 772 *mapped_size = total_size; 773 774 switch (sp_ffa_version) { 775 case MAKE_FFA_VERSION(1, 0): 776 fill_boot_info_1_0(*va, fdt); 777 break; 778 case MAKE_FFA_VERSION(1, 1): 779 fill_boot_info_1_1(*va, fdt); 780 break; 781 default: 782 EMSG("Unknown FF-A version: %#"PRIx32, sp_ffa_version); 783 return TEE_ERROR_NOT_SUPPORTED; 784 } 785 786 res = sp_dt_get_u32(fdt, 0, "gp-register-num", &info_reg); 787 if (res) { 788 if (res == TEE_ERROR_ITEM_NOT_FOUND) { 789 /* If the property is not present, set default to x0 */ 790 info_reg = 0; 791 } else { 792 return TEE_ERROR_BAD_FORMAT; 793 } 794 } 795 796 switch (info_reg) { 797 case 0: 798 args->a0 = *va; 799 break; 800 case 1: 801 args->a1 = *va; 802 break; 803 case 2: 804 args->a2 = *va; 805 break; 806 case 3: 807 args->a3 = *va; 808 break; 809 default: 810 EMSG("Invalid register selected for passing boot info"); 811 return TEE_ERROR_BAD_FORMAT; 812 } 813 814 return TEE_SUCCESS; 815 } 816 817 static TEE_Result handle_fdt_load_relative_mem_regions(struct sp_ctx *ctx, 818 const void *fdt) 819 { 820 int node = 0; 821 int subnode = 0; 822 tee_mm_entry_t *mm = NULL; 823 TEE_Result res = TEE_SUCCESS; 824 825 /* 826 * Memory regions are optional in the SP manifest, it's not an error if 827 * we don't find any. 828 */ 829 node = fdt_node_offset_by_compatible(fdt, 0, 830 "arm,ffa-manifest-memory-regions"); 831 if (node < 0) 832 return TEE_SUCCESS; 833 834 fdt_for_each_subnode(subnode, fdt, node) { 835 uint64_t load_rel_offset = 0; 836 uint32_t attributes = 0; 837 uint64_t base_addr = 0; 838 uint32_t pages_cnt = 0; 839 uint32_t flags = 0; 840 uint32_t perm = 0; 841 size_t size = 0; 842 vaddr_t va = 0; 843 844 mm = NULL; 845 846 /* Load address relative offset of a memory region */ 847 if (!sp_dt_get_u64(fdt, subnode, "load-address-relative-offset", 848 &load_rel_offset)) { 849 va = ctx->uctx.load_addr + load_rel_offset; 850 } else { 851 /* Skip non load address relative memory regions */ 852 continue; 853 } 854 855 if (!sp_dt_get_u64(fdt, subnode, "base-address", &base_addr)) { 856 EMSG("Both base-address and load-address-relative-offset fields are set"); 857 return TEE_ERROR_BAD_FORMAT; 858 } 859 860 /* Size of memory region as count of 4K pages */ 861 if (sp_dt_get_u32(fdt, subnode, "pages-count", &pages_cnt)) { 862 EMSG("Mandatory field is missing: pages-count"); 863 return TEE_ERROR_BAD_FORMAT; 864 } 865 866 if (MUL_OVERFLOW(pages_cnt, SMALL_PAGE_SIZE, &size)) 867 return TEE_ERROR_OVERFLOW; 868 869 /* Memory region attributes */ 870 if (sp_dt_get_u32(fdt, subnode, "attributes", &attributes)) { 871 EMSG("Mandatory field is missing: attributes"); 872 return TEE_ERROR_BAD_FORMAT; 873 } 874 875 /* Check instruction and data access permissions */ 876 switch (attributes & SP_MANIFEST_ATTR_RWX) { 877 case SP_MANIFEST_ATTR_RO: 878 perm = TEE_MATTR_UR; 879 break; 880 case SP_MANIFEST_ATTR_RW: 881 perm = TEE_MATTR_URW; 882 break; 883 case SP_MANIFEST_ATTR_RX: 884 perm = TEE_MATTR_URX; 885 break; 886 default: 887 EMSG("Invalid memory access permissions"); 888 return TEE_ERROR_BAD_FORMAT; 889 } 890 891 res = sp_dt_get_u32(fdt, subnode, "load-flags", &flags); 892 if (res != TEE_SUCCESS && res != TEE_ERROR_ITEM_NOT_FOUND) { 893 EMSG("Optional field with invalid value: flags"); 894 return TEE_ERROR_BAD_FORMAT; 895 } 896 897 /* Load relative regions must be secure */ 898 if (attributes & SP_MANIFEST_ATTR_NSEC) { 899 EMSG("Invalid memory security attribute"); 900 return TEE_ERROR_BAD_FORMAT; 901 } 902 903 if (flags & SP_MANIFEST_FLAG_NOBITS) { 904 /* 905 * NOBITS flag is set, which means that loaded binary 906 * doesn't contain this area, so it's need to be 907 * allocated. 908 */ 909 struct mobj *m = NULL; 910 unsigned int idx = 0; 911 912 mm = tee_mm_alloc(&tee_mm_sec_ddr, size); 913 if (!mm) 914 return TEE_ERROR_OUT_OF_MEMORY; 915 916 base_addr = tee_mm_get_smem(mm); 917 918 m = sp_mem_new_mobj(pages_cnt, 919 TEE_MATTR_MEM_TYPE_CACHED, true); 920 if (!m) { 921 res = TEE_ERROR_OUT_OF_MEMORY; 922 goto err_mm_free; 923 } 924 925 res = sp_mem_add_pages(m, &idx, base_addr, pages_cnt); 926 if (res) { 927 mobj_put(m); 928 goto err_mm_free; 929 } 930 931 res = vm_map(&ctx->uctx, &va, size, perm, 0, m, 0); 932 mobj_put(m); 933 if (res) 934 goto err_mm_free; 935 } else { 936 /* 937 * If NOBITS is not present the memory area is already 938 * mapped and only need to set the correct permissions. 939 */ 940 res = vm_set_prot(&ctx->uctx, va, size, perm); 941 if (res) 942 return res; 943 } 944 } 945 946 return TEE_SUCCESS; 947 948 err_mm_free: 949 tee_mm_free(mm); 950 return res; 951 } 952 953 static TEE_Result handle_fdt_dev_regions(struct sp_ctx *ctx, void *fdt) 954 { 955 int node = 0; 956 int subnode = 0; 957 TEE_Result res = TEE_SUCCESS; 958 const char *dt_device_match_table = { 959 "arm,ffa-manifest-device-regions", 960 }; 961 962 /* 963 * Device regions are optional in the SP manifest, it's not an error if 964 * we don't find any 965 */ 966 node = fdt_node_offset_by_compatible(fdt, 0, dt_device_match_table); 967 if (node < 0) 968 return TEE_SUCCESS; 969 970 fdt_for_each_subnode(subnode, fdt, node) { 971 uint64_t base_addr = 0; 972 uint32_t pages_cnt = 0; 973 uint32_t attributes = 0; 974 struct mobj *m = NULL; 975 bool is_secure = true; 976 uint32_t perm = 0; 977 vaddr_t va = 0; 978 unsigned int idx = 0; 979 980 /* 981 * Physical base address of a device MMIO region. 982 * Currently only physically contiguous region is supported. 983 */ 984 if (sp_dt_get_u64(fdt, subnode, "base-address", &base_addr)) { 985 EMSG("Mandatory field is missing: base-address"); 986 return TEE_ERROR_BAD_FORMAT; 987 } 988 989 /* Total size of MMIO region as count of 4K pages */ 990 if (sp_dt_get_u32(fdt, subnode, "pages-count", &pages_cnt)) { 991 EMSG("Mandatory field is missing: pages-count"); 992 return TEE_ERROR_BAD_FORMAT; 993 } 994 995 /* Data access, instruction access and security attributes */ 996 if (sp_dt_get_u32(fdt, subnode, "attributes", &attributes)) { 997 EMSG("Mandatory field is missing: attributes"); 998 return TEE_ERROR_BAD_FORMAT; 999 } 1000 1001 /* Check instruction and data access permissions */ 1002 switch (attributes & SP_MANIFEST_ATTR_RWX) { 1003 case SP_MANIFEST_ATTR_RO: 1004 perm = TEE_MATTR_UR; 1005 break; 1006 case SP_MANIFEST_ATTR_RW: 1007 perm = TEE_MATTR_URW; 1008 break; 1009 default: 1010 EMSG("Invalid memory access permissions"); 1011 return TEE_ERROR_BAD_FORMAT; 1012 } 1013 1014 /* 1015 * The SP is a secure endpoint, security attribute can be 1016 * secure or non-secure 1017 */ 1018 if (attributes & SP_MANIFEST_ATTR_NSEC) 1019 is_secure = false; 1020 1021 /* Memory attributes must be Device-nGnRnE */ 1022 m = sp_mem_new_mobj(pages_cnt, TEE_MATTR_MEM_TYPE_STRONGLY_O, 1023 is_secure); 1024 if (!m) 1025 return TEE_ERROR_OUT_OF_MEMORY; 1026 1027 res = sp_mem_add_pages(m, &idx, (paddr_t)base_addr, pages_cnt); 1028 if (res) { 1029 mobj_put(m); 1030 return res; 1031 } 1032 1033 res = vm_map(&ctx->uctx, &va, pages_cnt * SMALL_PAGE_SIZE, 1034 perm, 0, m, 0); 1035 mobj_put(m); 1036 if (res) 1037 return res; 1038 1039 /* 1040 * Overwrite the device region's PA in the fdt with the VA. This 1041 * fdt will be passed to the SP. 1042 */ 1043 res = fdt_setprop_u64(fdt, subnode, "base-address", va); 1044 1045 /* 1046 * Unmap the region if the overwrite failed since the SP won't 1047 * be able to access it without knowing the VA. 1048 */ 1049 if (res) { 1050 vm_unmap(&ctx->uctx, va, pages_cnt * SMALL_PAGE_SIZE); 1051 return res; 1052 } 1053 } 1054 1055 return TEE_SUCCESS; 1056 } 1057 1058 static TEE_Result swap_sp_endpoints(uint32_t endpoint_id, 1059 uint32_t new_endpoint_id) 1060 { 1061 struct sp_session *session = sp_get_session(endpoint_id); 1062 uint32_t manifest_endpoint_id = 0; 1063 1064 /* 1065 * We don't know in which order the SPs are loaded. The endpoint ID 1066 * defined in the manifest could already be generated by 1067 * new_session_id() and used by another SP. If this is the case, we swap 1068 * the ID's of the two SPs. We also have to make sure that the ID's are 1069 * not defined twice in the manifest. 1070 */ 1071 1072 /* The endpoint ID was not assigned yet */ 1073 if (!session) 1074 return TEE_SUCCESS; 1075 1076 /* 1077 * Read the manifest file from the SP who originally had the endpoint. 1078 * We can safely swap the endpoint ID's if the manifest file doesn't 1079 * have an endpoint ID defined. 1080 */ 1081 if (!sp_dt_get_u32(session->fdt, 0, "id", &manifest_endpoint_id)) { 1082 assert(manifest_endpoint_id == endpoint_id); 1083 EMSG("SP: Found duplicated endpoint ID %#"PRIx32, endpoint_id); 1084 return TEE_ERROR_ACCESS_CONFLICT; 1085 } 1086 1087 session->endpoint_id = new_endpoint_id; 1088 1089 return TEE_SUCCESS; 1090 } 1091 1092 static TEE_Result read_manifest_endpoint_id(struct sp_session *s) 1093 { 1094 uint32_t endpoint_id = 0; 1095 1096 /* 1097 * The endpoint ID can be optionally defined in the manifest file. We 1098 * have to map the ID inside the manifest to the SP if it's defined. 1099 * If not, the endpoint ID generated inside new_session_id() will be 1100 * used. 1101 */ 1102 if (!sp_dt_get_u32(s->fdt, 0, "id", &endpoint_id)) { 1103 TEE_Result res = TEE_ERROR_GENERIC; 1104 1105 if (!endpoint_id_is_valid(endpoint_id)) { 1106 EMSG("Invalid endpoint ID 0x%"PRIx32, endpoint_id); 1107 return TEE_ERROR_BAD_FORMAT; 1108 } 1109 1110 res = swap_sp_endpoints(endpoint_id, s->endpoint_id); 1111 if (res) 1112 return res; 1113 1114 DMSG("SP: endpoint ID (0x%"PRIx32") found in manifest", 1115 endpoint_id); 1116 /* Assign the endpoint ID to the current SP */ 1117 s->endpoint_id = endpoint_id; 1118 } 1119 return TEE_SUCCESS; 1120 } 1121 1122 static TEE_Result handle_fdt_mem_regions(struct sp_ctx *ctx, void *fdt) 1123 { 1124 int node = 0; 1125 int subnode = 0; 1126 tee_mm_entry_t *mm = NULL; 1127 TEE_Result res = TEE_SUCCESS; 1128 1129 /* 1130 * Memory regions are optional in the SP manifest, it's not an error if 1131 * we don't find any. 1132 */ 1133 node = fdt_node_offset_by_compatible(fdt, 0, 1134 "arm,ffa-manifest-memory-regions"); 1135 if (node < 0) 1136 return TEE_SUCCESS; 1137 1138 fdt_for_each_subnode(subnode, fdt, node) { 1139 uint64_t load_rel_offset = 0; 1140 bool alloc_needed = false; 1141 uint32_t attributes = 0; 1142 uint64_t base_addr = 0; 1143 uint32_t pages_cnt = 0; 1144 bool is_secure = true; 1145 struct mobj *m = NULL; 1146 unsigned int idx = 0; 1147 uint32_t perm = 0; 1148 size_t size = 0; 1149 vaddr_t va = 0; 1150 1151 mm = NULL; 1152 1153 /* Load address relative offset of a memory region */ 1154 if (!sp_dt_get_u64(fdt, subnode, "load-address-relative-offset", 1155 &load_rel_offset)) { 1156 /* 1157 * At this point the memory region is already mapped by 1158 * handle_fdt_load_relative_mem_regions. 1159 * Only need to set the base-address in the manifest and 1160 * then skip the rest of the mapping process. 1161 */ 1162 va = ctx->uctx.load_addr + load_rel_offset; 1163 res = fdt_setprop_u64(fdt, subnode, "base-address", va); 1164 if (res) 1165 return res; 1166 1167 continue; 1168 } 1169 1170 /* 1171 * Base address of a memory region. 1172 * If not present, we have to allocate the specified memory. 1173 * If present, this field could specify a PA or VA. Currently 1174 * only a PA is supported. 1175 */ 1176 if (sp_dt_get_u64(fdt, subnode, "base-address", &base_addr)) 1177 alloc_needed = true; 1178 1179 /* Size of memory region as count of 4K pages */ 1180 if (sp_dt_get_u32(fdt, subnode, "pages-count", &pages_cnt)) { 1181 EMSG("Mandatory field is missing: pages-count"); 1182 return TEE_ERROR_BAD_FORMAT; 1183 } 1184 1185 if (MUL_OVERFLOW(pages_cnt, SMALL_PAGE_SIZE, &size)) 1186 return TEE_ERROR_OVERFLOW; 1187 1188 /* 1189 * Memory region attributes: 1190 * - Instruction/data access permissions 1191 * - Cacheability/shareability attributes 1192 * - Security attributes 1193 * 1194 * Cacheability/shareability attributes can be ignored for now. 1195 * OP-TEE only supports a single type for normal cached memory 1196 * and currently there is no use case that would require to 1197 * change this. 1198 */ 1199 if (sp_dt_get_u32(fdt, subnode, "attributes", &attributes)) { 1200 EMSG("Mandatory field is missing: attributes"); 1201 return TEE_ERROR_BAD_FORMAT; 1202 } 1203 1204 /* Check instruction and data access permissions */ 1205 switch (attributes & SP_MANIFEST_ATTR_RWX) { 1206 case SP_MANIFEST_ATTR_RO: 1207 perm = TEE_MATTR_UR; 1208 break; 1209 case SP_MANIFEST_ATTR_RW: 1210 perm = TEE_MATTR_URW; 1211 break; 1212 case SP_MANIFEST_ATTR_RX: 1213 perm = TEE_MATTR_URX; 1214 break; 1215 default: 1216 EMSG("Invalid memory access permissions"); 1217 return TEE_ERROR_BAD_FORMAT; 1218 } 1219 1220 /* 1221 * The SP is a secure endpoint, security attribute can be 1222 * secure or non-secure. 1223 * The SPMC cannot allocate non-secure memory, i.e. if the base 1224 * address is missing this attribute must be secure. 1225 */ 1226 if (attributes & SP_MANIFEST_ATTR_NSEC) { 1227 if (alloc_needed) { 1228 EMSG("Invalid memory security attribute"); 1229 return TEE_ERROR_BAD_FORMAT; 1230 } 1231 is_secure = false; 1232 } 1233 1234 if (alloc_needed) { 1235 /* Base address is missing, we have to allocate */ 1236 mm = tee_mm_alloc(&tee_mm_sec_ddr, size); 1237 if (!mm) 1238 return TEE_ERROR_OUT_OF_MEMORY; 1239 1240 base_addr = tee_mm_get_smem(mm); 1241 } 1242 1243 m = sp_mem_new_mobj(pages_cnt, TEE_MATTR_MEM_TYPE_CACHED, 1244 is_secure); 1245 if (!m) { 1246 res = TEE_ERROR_OUT_OF_MEMORY; 1247 goto err_mm_free; 1248 } 1249 1250 res = sp_mem_add_pages(m, &idx, base_addr, pages_cnt); 1251 if (res) { 1252 mobj_put(m); 1253 goto err_mm_free; 1254 } 1255 1256 res = vm_map(&ctx->uctx, &va, size, perm, 0, m, 0); 1257 mobj_put(m); 1258 if (res) 1259 goto err_mm_free; 1260 1261 /* 1262 * Overwrite the memory region's base address in the fdt with 1263 * the VA. This fdt will be passed to the SP. 1264 * If the base-address field was not present in the original 1265 * fdt, this function will create it. This doesn't cause issues 1266 * since the necessary extra space has been allocated when 1267 * opening the fdt. 1268 */ 1269 res = fdt_setprop_u64(fdt, subnode, "base-address", va); 1270 1271 /* 1272 * Unmap the region if the overwrite failed since the SP won't 1273 * be able to access it without knowing the VA. 1274 */ 1275 if (res) { 1276 vm_unmap(&ctx->uctx, va, size); 1277 goto err_mm_free; 1278 } 1279 } 1280 1281 return TEE_SUCCESS; 1282 1283 err_mm_free: 1284 tee_mm_free(mm); 1285 return res; 1286 } 1287 1288 static TEE_Result handle_tpm_event_log(struct sp_ctx *ctx, void *fdt) 1289 { 1290 uint32_t perm = TEE_MATTR_URW | TEE_MATTR_PRW; 1291 uint32_t dummy_size __maybe_unused = 0; 1292 TEE_Result res = TEE_SUCCESS; 1293 size_t page_count = 0; 1294 struct fobj *f = NULL; 1295 struct mobj *m = NULL; 1296 vaddr_t log_addr = 0; 1297 size_t log_size = 0; 1298 int node = 0; 1299 1300 node = fdt_node_offset_by_compatible(fdt, 0, "arm,tpm_event_log"); 1301 if (node < 0) 1302 return TEE_SUCCESS; 1303 1304 /* Checking the existence and size of the event log properties */ 1305 if (sp_dt_get_u64(fdt, node, "tpm_event_log_addr", &log_addr)) { 1306 EMSG("tpm_event_log_addr not found or has invalid size"); 1307 return TEE_ERROR_BAD_FORMAT; 1308 } 1309 1310 if (sp_dt_get_u32(fdt, node, "tpm_event_log_size", &dummy_size)) { 1311 EMSG("tpm_event_log_size not found or has invalid size"); 1312 return TEE_ERROR_BAD_FORMAT; 1313 } 1314 1315 /* Validating event log */ 1316 res = tpm_get_event_log_size(&log_size); 1317 if (res) 1318 return res; 1319 1320 if (!log_size) { 1321 EMSG("Empty TPM event log was provided"); 1322 return TEE_ERROR_ITEM_NOT_FOUND; 1323 } 1324 1325 /* Allocating memory area for the event log to share with the SP */ 1326 page_count = ROUNDUP_DIV(log_size, SMALL_PAGE_SIZE); 1327 1328 f = fobj_sec_mem_alloc(page_count); 1329 m = mobj_with_fobj_alloc(f, NULL, TEE_MATTR_MEM_TYPE_TAGGED); 1330 fobj_put(f); 1331 if (!m) 1332 return TEE_ERROR_OUT_OF_MEMORY; 1333 1334 res = vm_map(&ctx->uctx, &log_addr, log_size, perm, 0, m, 0); 1335 mobj_put(m); 1336 if (res) 1337 return res; 1338 1339 /* Copy event log */ 1340 res = tpm_get_event_log((void *)log_addr, &log_size); 1341 if (res) 1342 goto err_unmap; 1343 1344 /* Setting event log details in the manifest */ 1345 res = fdt_setprop_u64(fdt, node, "tpm_event_log_addr", log_addr); 1346 if (res) 1347 goto err_unmap; 1348 1349 res = fdt_setprop_u32(fdt, node, "tpm_event_log_size", log_size); 1350 if (res) 1351 goto err_unmap; 1352 1353 return TEE_SUCCESS; 1354 1355 err_unmap: 1356 vm_unmap(&ctx->uctx, log_addr, log_size); 1357 1358 return res; 1359 } 1360 1361 /* 1362 * Note: this function is called only on the primary CPU. It assumes that the 1363 * features present on the primary CPU are available on all of the secondary 1364 * CPUs as well. 1365 */ 1366 static TEE_Result handle_hw_features(void *fdt) 1367 { 1368 uint32_t val __maybe_unused = 0; 1369 TEE_Result res = TEE_SUCCESS; 1370 int node = 0; 1371 1372 /* 1373 * HW feature descriptions are optional in the SP manifest, it's not an 1374 * error if we don't find any. 1375 */ 1376 node = fdt_node_offset_by_compatible(fdt, 0, "arm,hw-features"); 1377 if (node < 0) 1378 return TEE_SUCCESS; 1379 1380 /* Modify the crc32 property only if it's already present */ 1381 if (!sp_dt_get_u32(fdt, node, "crc32", &val)) { 1382 res = fdt_setprop_u32(fdt, node, "crc32", 1383 feat_crc32_implemented()); 1384 if (res) 1385 return res; 1386 } 1387 1388 return TEE_SUCCESS; 1389 } 1390 1391 static TEE_Result read_ns_interrupts_action(const void *fdt, 1392 struct sp_session *s) 1393 { 1394 TEE_Result res = TEE_ERROR_BAD_PARAMETERS; 1395 1396 res = sp_dt_get_u32(fdt, 0, "ns-interrupts-action", &s->ns_int_mode); 1397 1398 if (res) { 1399 EMSG("Mandatory property is missing: ns-interrupts-action"); 1400 return res; 1401 } 1402 1403 switch (s->ns_int_mode) { 1404 case SP_MANIFEST_NS_INT_QUEUED: 1405 case SP_MANIFEST_NS_INT_SIGNALED: 1406 /* OK */ 1407 break; 1408 1409 case SP_MANIFEST_NS_INT_MANAGED_EXIT: 1410 EMSG("Managed exit is not implemented"); 1411 return TEE_ERROR_NOT_IMPLEMENTED; 1412 1413 default: 1414 EMSG("Invalid ns-interrupts-action value: %"PRIu32, 1415 s->ns_int_mode); 1416 return TEE_ERROR_BAD_PARAMETERS; 1417 } 1418 1419 return TEE_SUCCESS; 1420 } 1421 1422 static TEE_Result read_ffa_version(const void *fdt, struct sp_session *s) 1423 { 1424 TEE_Result res = TEE_ERROR_BAD_PARAMETERS; 1425 uint32_t ffa_version = 0; 1426 1427 res = sp_dt_get_u32(fdt, 0, "ffa-version", &ffa_version); 1428 if (res) { 1429 EMSG("Mandatory property is missing: ffa-version"); 1430 return res; 1431 } 1432 1433 if (ffa_version != FFA_VERSION_1_0 && ffa_version != FFA_VERSION_1_1) { 1434 EMSG("Invalid FF-A version value: 0x%08"PRIx32, ffa_version); 1435 return TEE_ERROR_BAD_PARAMETERS; 1436 } 1437 1438 s->rxtx.ffa_vers = ffa_version; 1439 1440 return TEE_SUCCESS; 1441 } 1442 1443 static TEE_Result read_sp_exec_state(const void *fdt, struct sp_session *s) 1444 { 1445 TEE_Result res = TEE_ERROR_BAD_PARAMETERS; 1446 uint32_t exec_state = 0; 1447 1448 res = sp_dt_get_u32(fdt, 0, "execution-state", &exec_state); 1449 if (res) { 1450 EMSG("Mandatory property is missing: execution-state"); 1451 return res; 1452 } 1453 1454 /* Currently only AArch64 SPs are supported */ 1455 if (exec_state == SP_MANIFEST_EXEC_STATE_AARCH64) { 1456 s->props |= FFA_PART_PROP_AARCH64_STATE; 1457 } else { 1458 EMSG("Invalid execution-state value: %"PRIu32, exec_state); 1459 return TEE_ERROR_BAD_PARAMETERS; 1460 } 1461 1462 return TEE_SUCCESS; 1463 } 1464 1465 static TEE_Result read_sp_msg_types(const void *fdt, struct sp_session *s) 1466 { 1467 TEE_Result res = TEE_ERROR_BAD_PARAMETERS; 1468 uint32_t msg_method = 0; 1469 1470 res = sp_dt_get_u32(fdt, 0, "messaging-method", &msg_method); 1471 if (res) { 1472 EMSG("Mandatory property is missing: messaging-method"); 1473 return res; 1474 } 1475 1476 if (msg_method & SP_MANIFEST_DIRECT_REQ_RECEIVE) 1477 s->props |= FFA_PART_PROP_DIRECT_REQ_RECV; 1478 1479 if (msg_method & SP_MANIFEST_DIRECT_REQ_SEND) 1480 s->props |= FFA_PART_PROP_DIRECT_REQ_SEND; 1481 1482 if (msg_method & SP_MANIFEST_INDIRECT_REQ) 1483 IMSG("Indirect messaging is not supported"); 1484 1485 return TEE_SUCCESS; 1486 } 1487 1488 static TEE_Result sp_init_uuid(const TEE_UUID *bin_uuid, const void * const fdt) 1489 { 1490 TEE_Result res = TEE_SUCCESS; 1491 struct sp_session *sess = NULL; 1492 TEE_UUID ffa_uuid = {}; 1493 uint16_t boot_order = 0; 1494 uint32_t boot_order_arg = 0; 1495 1496 res = fdt_get_uuid(fdt, &ffa_uuid); 1497 if (res) 1498 return res; 1499 1500 res = sp_dt_get_u16(fdt, 0, "boot-order", &boot_order); 1501 if (res == TEE_SUCCESS) { 1502 boot_order_arg = boot_order; 1503 } else if (res == TEE_ERROR_ITEM_NOT_FOUND) { 1504 boot_order_arg = UINT32_MAX; 1505 } else { 1506 EMSG("Failed reading boot-order property err:%#"PRIx32, res); 1507 return res; 1508 } 1509 1510 res = sp_open_session(&sess, 1511 &open_sp_sessions, 1512 &ffa_uuid, bin_uuid, boot_order_arg, fdt); 1513 if (res) 1514 return res; 1515 1516 sess->fdt = fdt; 1517 1518 res = read_manifest_endpoint_id(sess); 1519 if (res) 1520 return res; 1521 DMSG("endpoint is 0x%"PRIx16, sess->endpoint_id); 1522 1523 res = read_ns_interrupts_action(fdt, sess); 1524 if (res) 1525 return res; 1526 1527 res = read_ffa_version(fdt, sess); 1528 if (res) 1529 return res; 1530 1531 res = read_sp_exec_state(fdt, sess); 1532 if (res) 1533 return res; 1534 1535 res = read_sp_msg_types(fdt, sess); 1536 if (res) 1537 return res; 1538 1539 return TEE_SUCCESS; 1540 } 1541 1542 static TEE_Result sp_first_run(struct sp_session *sess) 1543 { 1544 TEE_Result res = TEE_SUCCESS; 1545 struct thread_smc_args args = { }; 1546 struct sp_ctx *ctx = NULL; 1547 vaddr_t boot_info_va = 0; 1548 size_t boot_info_size = 0; 1549 void *fdt_copy = NULL; 1550 size_t fdt_size = 0; 1551 1552 ctx = to_sp_ctx(sess->ts_sess.ctx); 1553 ts_push_current_session(&sess->ts_sess); 1554 sess->is_initialized = false; 1555 1556 /* 1557 * Load relative memory regions must be handled before doing any other 1558 * mapping to prevent conflicts in the VA space. 1559 */ 1560 res = handle_fdt_load_relative_mem_regions(ctx, sess->fdt); 1561 if (res) { 1562 ts_pop_current_session(); 1563 return res; 1564 } 1565 1566 res = copy_and_map_fdt(ctx, sess->fdt, &fdt_copy, &fdt_size); 1567 if (res) 1568 goto out; 1569 1570 res = handle_fdt_dev_regions(ctx, fdt_copy); 1571 if (res) 1572 goto out; 1573 1574 res = handle_fdt_mem_regions(ctx, fdt_copy); 1575 if (res) 1576 goto out; 1577 1578 if (IS_ENABLED(CFG_CORE_TPM_EVENT_LOG)) { 1579 res = handle_tpm_event_log(ctx, fdt_copy); 1580 if (res) 1581 goto out; 1582 } 1583 1584 res = handle_hw_features(fdt_copy); 1585 if (res) 1586 goto out; 1587 1588 res = create_and_map_boot_info(ctx, fdt_copy, &args, &boot_info_va, 1589 &boot_info_size, sess->rxtx.ffa_vers); 1590 if (res) 1591 goto out; 1592 1593 ts_pop_current_session(); 1594 1595 res = sp_enter(&args, sess); 1596 if (res) { 1597 ts_push_current_session(&sess->ts_sess); 1598 goto out; 1599 } 1600 1601 spmc_sp_msg_handler(&args, sess); 1602 1603 ts_push_current_session(&sess->ts_sess); 1604 sess->is_initialized = true; 1605 1606 out: 1607 /* Free the boot info page from the SP memory */ 1608 vm_unmap(&ctx->uctx, boot_info_va, boot_info_size); 1609 vm_unmap(&ctx->uctx, (vaddr_t)fdt_copy, fdt_size); 1610 ts_pop_current_session(); 1611 1612 return res; 1613 } 1614 1615 TEE_Result sp_enter(struct thread_smc_args *args, struct sp_session *sp) 1616 { 1617 TEE_Result res = TEE_SUCCESS; 1618 struct sp_ctx *ctx = to_sp_ctx(sp->ts_sess.ctx); 1619 1620 ctx->sp_regs.x[0] = args->a0; 1621 ctx->sp_regs.x[1] = args->a1; 1622 ctx->sp_regs.x[2] = args->a2; 1623 ctx->sp_regs.x[3] = args->a3; 1624 ctx->sp_regs.x[4] = args->a4; 1625 ctx->sp_regs.x[5] = args->a5; 1626 ctx->sp_regs.x[6] = args->a6; 1627 ctx->sp_regs.x[7] = args->a7; 1628 1629 res = sp->ts_sess.ctx->ops->enter_invoke_cmd(&sp->ts_sess, 0); 1630 1631 args->a0 = ctx->sp_regs.x[0]; 1632 args->a1 = ctx->sp_regs.x[1]; 1633 args->a2 = ctx->sp_regs.x[2]; 1634 args->a3 = ctx->sp_regs.x[3]; 1635 args->a4 = ctx->sp_regs.x[4]; 1636 args->a5 = ctx->sp_regs.x[5]; 1637 args->a6 = ctx->sp_regs.x[6]; 1638 args->a7 = ctx->sp_regs.x[7]; 1639 1640 return res; 1641 } 1642 1643 /* 1644 * According to FF-A v1.1 section 8.3.1.4 if a caller requires less permissive 1645 * active on NS interrupt than the callee, the callee must inherit the caller's 1646 * configuration. 1647 * Each SP's own NS action setting is stored in ns_int_mode. The effective 1648 * action will be MIN([self action], [caller's action]) which is stored in the 1649 * ns_int_mode_inherited field. 1650 */ 1651 static void sp_cpsr_configure_foreign_interrupts(struct sp_session *s, 1652 struct ts_session *caller, 1653 uint64_t *cpsr) 1654 { 1655 if (caller) { 1656 struct sp_session *caller_sp = to_sp_session(caller); 1657 1658 s->ns_int_mode_inherited = MIN(caller_sp->ns_int_mode_inherited, 1659 s->ns_int_mode); 1660 } else { 1661 s->ns_int_mode_inherited = s->ns_int_mode; 1662 } 1663 1664 if (s->ns_int_mode_inherited == SP_MANIFEST_NS_INT_QUEUED) 1665 *cpsr |= SHIFT_U32(THREAD_EXCP_FOREIGN_INTR, 1666 ARM32_CPSR_F_SHIFT); 1667 else 1668 *cpsr &= ~SHIFT_U32(THREAD_EXCP_FOREIGN_INTR, 1669 ARM32_CPSR_F_SHIFT); 1670 } 1671 1672 static TEE_Result sp_enter_invoke_cmd(struct ts_session *s, 1673 uint32_t cmd __unused) 1674 { 1675 struct sp_ctx *ctx = to_sp_ctx(s->ctx); 1676 TEE_Result res = TEE_SUCCESS; 1677 uint32_t exceptions = 0; 1678 struct sp_session *sp_s = to_sp_session(s); 1679 struct ts_session *sess = NULL; 1680 struct thread_ctx_regs *sp_regs = NULL; 1681 uint32_t thread_id = THREAD_ID_INVALID; 1682 struct ts_session *caller = NULL; 1683 uint32_t rpc_target_info = 0; 1684 uint32_t panicked = false; 1685 uint32_t panic_code = 0; 1686 1687 sp_regs = &ctx->sp_regs; 1688 ts_push_current_session(s); 1689 1690 exceptions = thread_mask_exceptions(THREAD_EXCP_ALL); 1691 1692 /* Enable/disable foreign interrupts in CPSR/SPSR */ 1693 caller = ts_get_calling_session(); 1694 sp_cpsr_configure_foreign_interrupts(sp_s, caller, &sp_regs->cpsr); 1695 1696 /* 1697 * Store endpoint ID and thread ID in rpc_target_info. This will be used 1698 * as w1 in FFA_INTERRUPT in case of a foreign interrupt. 1699 */ 1700 rpc_target_info = thread_get_tsd()->rpc_target_info; 1701 thread_id = thread_get_id(); 1702 assert(thread_id <= UINT16_MAX); 1703 thread_get_tsd()->rpc_target_info = 1704 FFA_TARGET_INFO_SET(sp_s->endpoint_id, thread_id); 1705 1706 __thread_enter_user_mode(sp_regs, &panicked, &panic_code); 1707 1708 /* Restore rpc_target_info */ 1709 thread_get_tsd()->rpc_target_info = rpc_target_info; 1710 1711 thread_unmask_exceptions(exceptions); 1712 1713 thread_user_clear_vfp(&ctx->uctx); 1714 1715 if (panicked) { 1716 DMSG("SP panicked with code %#"PRIx32, panic_code); 1717 abort_print_current_ts(); 1718 1719 sess = ts_pop_current_session(); 1720 cpu_spin_lock(&sp_s->spinlock); 1721 sp_s->state = sp_dead; 1722 cpu_spin_unlock(&sp_s->spinlock); 1723 1724 return TEE_ERROR_TARGET_DEAD; 1725 } 1726 1727 sess = ts_pop_current_session(); 1728 assert(sess == s); 1729 1730 return res; 1731 } 1732 1733 /* We currently don't support 32 bits */ 1734 #ifdef ARM64 1735 static void sp_svc_store_registers(struct thread_scall_regs *regs, 1736 struct thread_ctx_regs *sp_regs) 1737 { 1738 COMPILE_TIME_ASSERT(sizeof(sp_regs->x[0]) == sizeof(regs->x0)); 1739 memcpy(sp_regs->x, ®s->x0, 31 * sizeof(regs->x0)); 1740 sp_regs->pc = regs->elr; 1741 sp_regs->sp = regs->sp_el0; 1742 } 1743 #endif 1744 1745 static bool sp_handle_scall(struct thread_scall_regs *regs) 1746 { 1747 struct ts_session *ts = ts_get_current_session(); 1748 struct sp_ctx *uctx = to_sp_ctx(ts->ctx); 1749 struct sp_session *s = uctx->open_session; 1750 1751 assert(s); 1752 1753 sp_svc_store_registers(regs, &uctx->sp_regs); 1754 1755 regs->x0 = 0; 1756 regs->x1 = 0; /* panic */ 1757 regs->x2 = 0; /* panic code */ 1758 1759 /* 1760 * All the registers of the SP are saved in the SP session by the SVC 1761 * handler. 1762 * We always return to S-El1 after handling the SVC. We will continue 1763 * in sp_enter_invoke_cmd() (return from __thread_enter_user_mode). 1764 * The sp_enter() function copies the FF-A parameters (a0-a7) from the 1765 * saved registers to the thread_smc_args. The thread_smc_args object is 1766 * afterward used by the spmc_sp_msg_handler() to handle the 1767 * FF-A message send by the SP. 1768 */ 1769 return false; 1770 } 1771 1772 static void sp_dump_state(struct ts_ctx *ctx) 1773 { 1774 struct sp_ctx *utc = to_sp_ctx(ctx); 1775 1776 if (utc->uctx.dump_entry_func) { 1777 TEE_Result res = ldelf_dump_state(&utc->uctx); 1778 1779 if (!res || res == TEE_ERROR_TARGET_DEAD) 1780 return; 1781 } 1782 1783 user_mode_ctx_print_mappings(&utc->uctx); 1784 } 1785 1786 static const struct ts_ops sp_ops = { 1787 .enter_invoke_cmd = sp_enter_invoke_cmd, 1788 .handle_scall = sp_handle_scall, 1789 .dump_state = sp_dump_state, 1790 }; 1791 1792 static TEE_Result process_sp_pkg(uint64_t sp_pkg_pa, TEE_UUID *sp_uuid) 1793 { 1794 enum teecore_memtypes mtype = MEM_AREA_TA_RAM; 1795 struct sp_pkg_header *sp_pkg_hdr = NULL; 1796 struct fip_sp *sp = NULL; 1797 uint64_t sp_fdt_end = 0; 1798 size_t sp_pkg_size = 0; 1799 vaddr_t sp_pkg_va = 0; 1800 1801 /* Process the first page which contains the SP package header */ 1802 sp_pkg_va = (vaddr_t)phys_to_virt(sp_pkg_pa, mtype, SMALL_PAGE_SIZE); 1803 if (!sp_pkg_va) { 1804 EMSG("Cannot find mapping for PA %#" PRIxPA, sp_pkg_pa); 1805 return TEE_ERROR_GENERIC; 1806 } 1807 1808 sp_pkg_hdr = (struct sp_pkg_header *)sp_pkg_va; 1809 1810 if (sp_pkg_hdr->magic != SP_PKG_HEADER_MAGIC) { 1811 EMSG("Invalid SP package magic"); 1812 return TEE_ERROR_BAD_FORMAT; 1813 } 1814 1815 if (sp_pkg_hdr->version != SP_PKG_HEADER_VERSION_V1 && 1816 sp_pkg_hdr->version != SP_PKG_HEADER_VERSION_V2) { 1817 EMSG("Invalid SP header version"); 1818 return TEE_ERROR_BAD_FORMAT; 1819 } 1820 1821 if (ADD_OVERFLOW(sp_pkg_hdr->img_offset, sp_pkg_hdr->img_size, 1822 &sp_pkg_size)) { 1823 EMSG("Invalid SP package size"); 1824 return TEE_ERROR_BAD_FORMAT; 1825 } 1826 1827 if (ADD_OVERFLOW(sp_pkg_hdr->pm_offset, sp_pkg_hdr->pm_size, 1828 &sp_fdt_end) || sp_fdt_end > sp_pkg_hdr->img_offset) { 1829 EMSG("Invalid SP manifest size"); 1830 return TEE_ERROR_BAD_FORMAT; 1831 } 1832 1833 /* Process the whole SP package now that the size is known */ 1834 sp_pkg_va = (vaddr_t)phys_to_virt(sp_pkg_pa, mtype, sp_pkg_size); 1835 if (!sp_pkg_va) { 1836 EMSG("Cannot find mapping for PA %#" PRIxPA, sp_pkg_pa); 1837 return TEE_ERROR_GENERIC; 1838 } 1839 1840 sp_pkg_hdr = (struct sp_pkg_header *)sp_pkg_va; 1841 1842 sp = calloc(1, sizeof(struct fip_sp)); 1843 if (!sp) 1844 return TEE_ERROR_OUT_OF_MEMORY; 1845 1846 memcpy(&sp->sp_img.image.uuid, sp_uuid, sizeof(*sp_uuid)); 1847 sp->sp_img.image.ts = (uint8_t *)(sp_pkg_va + sp_pkg_hdr->img_offset); 1848 sp->sp_img.image.size = sp_pkg_hdr->img_size; 1849 sp->sp_img.image.flags = 0; 1850 sp->sp_img.fdt = (uint8_t *)(sp_pkg_va + sp_pkg_hdr->pm_offset); 1851 1852 STAILQ_INSERT_TAIL(&fip_sp_list, sp, link); 1853 1854 return TEE_SUCCESS; 1855 } 1856 1857 static TEE_Result fip_sp_init_all(void) 1858 { 1859 TEE_Result res = TEE_SUCCESS; 1860 uint64_t sp_pkg_addr = 0; 1861 const void *fdt = NULL; 1862 TEE_UUID sp_uuid = { }; 1863 int sp_pkgs_node = 0; 1864 int subnode = 0; 1865 int root = 0; 1866 1867 fdt = get_manifest_dt(); 1868 if (!fdt) { 1869 EMSG("No SPMC manifest found"); 1870 return TEE_ERROR_GENERIC; 1871 } 1872 1873 root = fdt_path_offset(fdt, "/"); 1874 if (root < 0) 1875 return TEE_ERROR_BAD_FORMAT; 1876 1877 if (fdt_node_check_compatible(fdt, root, "arm,ffa-core-manifest-1.0")) 1878 return TEE_ERROR_BAD_FORMAT; 1879 1880 /* SP packages are optional, it's not an error if we don't find any */ 1881 sp_pkgs_node = fdt_node_offset_by_compatible(fdt, root, "arm,sp_pkg"); 1882 if (sp_pkgs_node < 0) 1883 return TEE_SUCCESS; 1884 1885 fdt_for_each_subnode(subnode, fdt, sp_pkgs_node) { 1886 res = sp_dt_get_u64(fdt, subnode, "load-address", &sp_pkg_addr); 1887 if (res) { 1888 EMSG("Invalid FIP SP load address"); 1889 return res; 1890 } 1891 1892 res = sp_dt_get_uuid(fdt, subnode, "uuid", &sp_uuid); 1893 if (res) { 1894 EMSG("Invalid FIP SP uuid"); 1895 return res; 1896 } 1897 1898 res = process_sp_pkg(sp_pkg_addr, &sp_uuid); 1899 if (res) { 1900 EMSG("Invalid FIP SP package"); 1901 return res; 1902 } 1903 } 1904 1905 return TEE_SUCCESS; 1906 } 1907 1908 static void fip_sp_deinit_all(void) 1909 { 1910 while (!STAILQ_EMPTY(&fip_sp_list)) { 1911 struct fip_sp *sp = STAILQ_FIRST(&fip_sp_list); 1912 1913 STAILQ_REMOVE_HEAD(&fip_sp_list, link); 1914 free(sp); 1915 } 1916 } 1917 1918 static TEE_Result sp_init_all(void) 1919 { 1920 TEE_Result res = TEE_SUCCESS; 1921 const struct sp_image *sp = NULL; 1922 const struct fip_sp *fip_sp = NULL; 1923 char __maybe_unused msg[60] = { '\0', }; 1924 struct sp_session *s = NULL; 1925 struct sp_session *prev_sp = NULL; 1926 1927 for_each_secure_partition(sp) { 1928 if (sp->image.uncompressed_size) 1929 snprintf(msg, sizeof(msg), 1930 " (compressed, uncompressed %u)", 1931 sp->image.uncompressed_size); 1932 else 1933 msg[0] = '\0'; 1934 DMSG("SP %pUl size %u%s", (void *)&sp->image.uuid, 1935 sp->image.size, msg); 1936 1937 res = sp_init_uuid(&sp->image.uuid, sp->fdt); 1938 1939 if (res != TEE_SUCCESS) { 1940 EMSG("Failed initializing SP(%pUl) err:%#"PRIx32, 1941 &sp->image.uuid, res); 1942 if (!IS_ENABLED(CFG_SP_SKIP_FAILED)) 1943 panic(); 1944 } 1945 } 1946 1947 res = fip_sp_init_all(); 1948 if (res) 1949 panic("Failed initializing FIP SPs"); 1950 1951 for_each_fip_sp(fip_sp) { 1952 sp = &fip_sp->sp_img; 1953 1954 DMSG("SP %pUl size %u", (void *)&sp->image.uuid, 1955 sp->image.size); 1956 1957 res = sp_init_uuid(&sp->image.uuid, sp->fdt); 1958 1959 if (res != TEE_SUCCESS) { 1960 EMSG("Failed initializing SP(%pUl) err:%#"PRIx32, 1961 &sp->image.uuid, res); 1962 if (!IS_ENABLED(CFG_SP_SKIP_FAILED)) 1963 panic(); 1964 } 1965 } 1966 1967 /* 1968 * At this point all FIP SPs are loaded by ldelf or by the raw binary SP 1969 * loader, so the original images (loaded by BL2) are not needed anymore 1970 */ 1971 fip_sp_deinit_all(); 1972 1973 /* 1974 * Now that all SPs are loaded, check through the boot order values, 1975 * and warn in case there is a non-unique value. 1976 */ 1977 TAILQ_FOREACH(s, &open_sp_sessions, link) { 1978 /* User specified boot-order values are uint16 */ 1979 if (s->boot_order > UINT16_MAX) 1980 break; 1981 1982 if (prev_sp && prev_sp->boot_order == s->boot_order) 1983 IMSG("WARNING: duplicated boot-order (%pUl vs %pUl)", 1984 &prev_sp->ts_sess.ctx->uuid, 1985 &s->ts_sess.ctx->uuid); 1986 1987 prev_sp = s; 1988 } 1989 1990 /* Continue the initialization and run the SP */ 1991 TAILQ_FOREACH(s, &open_sp_sessions, link) { 1992 DMSG("Starting SP: 0x%"PRIx16, s->endpoint_id); 1993 1994 res = sp_first_run(s); 1995 if (res != TEE_SUCCESS) { 1996 EMSG("Failed starting SP(0x%"PRIx16") err:%#"PRIx32, 1997 s->endpoint_id, res); 1998 if (!IS_ENABLED(CFG_SP_SKIP_FAILED)) 1999 panic(); 2000 } 2001 } 2002 2003 return TEE_SUCCESS; 2004 } 2005 2006 boot_final(sp_init_all); 2007 2008 static TEE_Result secure_partition_open(const TEE_UUID *uuid, 2009 struct ts_store_handle **h) 2010 { 2011 return emb_ts_open(uuid, h, find_secure_partition); 2012 } 2013 2014 REGISTER_SP_STORE(2) = { 2015 .description = "SP store", 2016 .open = secure_partition_open, 2017 .get_size = emb_ts_get_size, 2018 .get_tag = emb_ts_get_tag, 2019 .read = emb_ts_read, 2020 .close = emb_ts_close, 2021 }; 2022