1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2020-2024, Arm Limited. 4 */ 5 #include <crypto/crypto.h> 6 #include <initcall.h> 7 #include <kernel/boot.h> 8 #include <kernel/embedded_ts.h> 9 #include <kernel/ldelf_loader.h> 10 #include <kernel/secure_partition.h> 11 #include <kernel/spinlock.h> 12 #include <kernel/spmc_sp_handler.h> 13 #include <kernel/thread_private.h> 14 #include <kernel/thread_spmc.h> 15 #include <kernel/tpm.h> 16 #include <kernel/ts_store.h> 17 #include <ldelf.h> 18 #include <libfdt.h> 19 #include <mm/core_mmu.h> 20 #include <mm/fobj.h> 21 #include <mm/mobj.h> 22 #include <mm/vm.h> 23 #include <optee_ffa.h> 24 #include <stdio.h> 25 #include <string.h> 26 #include <tee_api_types.h> 27 #include <tee/uuid.h> 28 #include <trace.h> 29 #include <types_ext.h> 30 #include <utee_defines.h> 31 #include <util.h> 32 #include <zlib.h> 33 34 #define BOUNCE_BUFFER_SIZE 4096 35 36 #define SP_MANIFEST_ATTR_READ BIT(0) 37 #define SP_MANIFEST_ATTR_WRITE BIT(1) 38 #define SP_MANIFEST_ATTR_EXEC BIT(2) 39 #define SP_MANIFEST_ATTR_NSEC BIT(3) 40 #define SP_MANIFEST_ATTR_GP BIT(4) 41 42 #define SP_MANIFEST_ATTR_RO (SP_MANIFEST_ATTR_READ) 43 #define SP_MANIFEST_ATTR_RW (SP_MANIFEST_ATTR_READ | \ 44 SP_MANIFEST_ATTR_WRITE) 45 #define SP_MANIFEST_ATTR_RX (SP_MANIFEST_ATTR_READ | \ 46 SP_MANIFEST_ATTR_EXEC) 47 #define SP_MANIFEST_ATTR_RWX (SP_MANIFEST_ATTR_READ | \ 48 SP_MANIFEST_ATTR_WRITE | \ 49 SP_MANIFEST_ATTR_EXEC) 50 51 #define SP_MANIFEST_FLAG_NOBITS BIT(0) 52 53 #define SP_MANIFEST_NS_INT_QUEUED (0x0) 54 #define SP_MANIFEST_NS_INT_MANAGED_EXIT (0x1) 55 #define SP_MANIFEST_NS_INT_SIGNALED (0x2) 56 57 #define SP_MANIFEST_EXEC_STATE_AARCH64 (0x0) 58 #define SP_MANIFEST_EXEC_STATE_AARCH32 (0x1) 59 60 #define SP_MANIFEST_DIRECT_REQ_RECEIVE BIT(0) 61 #define SP_MANIFEST_DIRECT_REQ_SEND BIT(1) 62 #define SP_MANIFEST_INDIRECT_REQ BIT(2) 63 64 #define SP_MANIFEST_VM_CREATED_MSG BIT(0) 65 #define SP_MANIFEST_VM_DESTROYED_MSG BIT(1) 66 67 #define SP_PKG_HEADER_MAGIC (0x474b5053) 68 #define SP_PKG_HEADER_VERSION_V1 (0x1) 69 #define SP_PKG_HEADER_VERSION_V2 (0x2) 70 71 struct sp_pkg_header { 72 uint32_t magic; 73 uint32_t version; 74 uint32_t pm_offset; 75 uint32_t pm_size; 76 uint32_t img_offset; 77 uint32_t img_size; 78 }; 79 80 struct fip_sp_head fip_sp_list = STAILQ_HEAD_INITIALIZER(fip_sp_list); 81 82 static const struct ts_ops sp_ops; 83 84 /* List that holds all of the loaded SP's */ 85 static struct sp_sessions_head open_sp_sessions = 86 TAILQ_HEAD_INITIALIZER(open_sp_sessions); 87 88 static const struct embedded_ts *find_secure_partition(const TEE_UUID *uuid) 89 { 90 const struct sp_image *sp = NULL; 91 const struct fip_sp *fip_sp = NULL; 92 93 for_each_secure_partition(sp) { 94 if (!memcmp(&sp->image.uuid, uuid, sizeof(*uuid))) 95 return &sp->image; 96 } 97 98 for_each_fip_sp(fip_sp) { 99 if (!memcmp(&fip_sp->sp_img.image.uuid, uuid, sizeof(*uuid))) 100 return &fip_sp->sp_img.image; 101 } 102 103 return NULL; 104 } 105 106 bool is_sp_ctx(struct ts_ctx *ctx) 107 { 108 return ctx && (ctx->ops == &sp_ops); 109 } 110 111 static void set_sp_ctx_ops(struct ts_ctx *ctx) 112 { 113 ctx->ops = &sp_ops; 114 } 115 116 struct sp_session *sp_get_session(uint32_t session_id) 117 { 118 struct sp_session *s = NULL; 119 120 TAILQ_FOREACH(s, &open_sp_sessions, link) { 121 if (s->endpoint_id == session_id) 122 return s; 123 } 124 125 return NULL; 126 } 127 128 TEE_Result sp_partition_info_get(uint32_t ffa_vers, void *buf, size_t buf_size, 129 const TEE_UUID *ffa_uuid, size_t *elem_count, 130 bool count_only) 131 { 132 TEE_Result res = TEE_SUCCESS; 133 struct sp_session *s = NULL; 134 135 TAILQ_FOREACH(s, &open_sp_sessions, link) { 136 if (ffa_uuid && 137 memcmp(&s->ffa_uuid, ffa_uuid, sizeof(*ffa_uuid))) 138 continue; 139 140 if (s->state == sp_dead) 141 continue; 142 if (!count_only && !res) { 143 uint32_t uuid_words[4] = { 0 }; 144 145 tee_uuid_to_octets((uint8_t *)uuid_words, &s->ffa_uuid); 146 res = spmc_fill_partition_entry(ffa_vers, buf, buf_size, 147 *elem_count, 148 s->endpoint_id, 1, 149 s->props, uuid_words); 150 } 151 *elem_count += 1; 152 } 153 154 return res; 155 } 156 157 bool sp_has_exclusive_access(struct sp_mem_map_region *mem, 158 struct user_mode_ctx *uctx) 159 { 160 /* 161 * Check that we have access to the region if it is supposed to be 162 * mapped to the current context. 163 */ 164 if (uctx) { 165 struct vm_region *region = NULL; 166 167 /* Make sure that each mobj belongs to the SP */ 168 TAILQ_FOREACH(region, &uctx->vm_info.regions, link) { 169 if (region->mobj == mem->mobj) 170 break; 171 } 172 173 if (!region) 174 return false; 175 } 176 177 /* Check that it is not shared with another SP */ 178 return !sp_mem_is_shared(mem); 179 } 180 181 static bool endpoint_id_is_valid(uint32_t id) 182 { 183 /* 184 * These IDs are assigned at the SPMC init so already have valid values 185 * by the time this function gets first called 186 */ 187 return id != spmd_id && id != spmc_id && id != optee_endpoint_id && 188 id >= FFA_SWD_ID_MIN && id <= FFA_SWD_ID_MAX; 189 } 190 191 static TEE_Result new_session_id(uint16_t *endpoint_id) 192 { 193 uint32_t id = 0; 194 195 /* Find the first available endpoint id */ 196 for (id = FFA_SWD_ID_MIN; id <= FFA_SWD_ID_MAX; id++) { 197 if (endpoint_id_is_valid(id) && !sp_get_session(id)) { 198 *endpoint_id = id; 199 return TEE_SUCCESS; 200 } 201 } 202 203 return TEE_ERROR_BAD_FORMAT; 204 } 205 206 static TEE_Result sp_create_ctx(const TEE_UUID *bin_uuid, struct sp_session *s) 207 { 208 TEE_Result res = TEE_SUCCESS; 209 struct sp_ctx *spc = NULL; 210 211 /* Register context */ 212 spc = calloc(1, sizeof(struct sp_ctx)); 213 if (!spc) 214 return TEE_ERROR_OUT_OF_MEMORY; 215 216 spc->open_session = s; 217 s->ts_sess.ctx = &spc->ts_ctx; 218 spc->ts_ctx.uuid = *bin_uuid; 219 220 res = vm_info_init(&spc->uctx, &spc->ts_ctx); 221 if (res) 222 goto err; 223 224 set_sp_ctx_ops(&spc->ts_ctx); 225 226 return TEE_SUCCESS; 227 228 err: 229 free(spc); 230 return res; 231 } 232 233 /* 234 * Insert a new sp_session to the sessions list, so that it is ordered 235 * by boot_order. 236 */ 237 static void insert_session_ordered(struct sp_sessions_head *open_sessions, 238 struct sp_session *session) 239 { 240 struct sp_session *s = NULL; 241 242 if (!open_sessions || !session) 243 return; 244 245 TAILQ_FOREACH(s, &open_sp_sessions, link) { 246 if (s->boot_order > session->boot_order) 247 break; 248 } 249 250 if (!s) 251 TAILQ_INSERT_TAIL(open_sessions, session, link); 252 else 253 TAILQ_INSERT_BEFORE(s, session, link); 254 } 255 256 static TEE_Result sp_create_session(struct sp_sessions_head *open_sessions, 257 const TEE_UUID *bin_uuid, 258 const uint32_t boot_order, 259 struct sp_session **sess) 260 { 261 TEE_Result res = TEE_SUCCESS; 262 struct sp_session *s = calloc(1, sizeof(struct sp_session)); 263 264 if (!s) 265 return TEE_ERROR_OUT_OF_MEMORY; 266 267 s->boot_order = boot_order; 268 269 /* Other properties are filled later, based on the SP's manifest */ 270 s->props = FFA_PART_PROP_IS_PE_ID; 271 272 res = new_session_id(&s->endpoint_id); 273 if (res) 274 goto err; 275 276 DMSG("Loading Secure Partition %pUl", (void *)bin_uuid); 277 res = sp_create_ctx(bin_uuid, s); 278 if (res) 279 goto err; 280 281 insert_session_ordered(open_sessions, s); 282 *sess = s; 283 return TEE_SUCCESS; 284 285 err: 286 free(s); 287 return res; 288 } 289 290 static TEE_Result sp_init_set_registers(struct sp_ctx *ctx) 291 { 292 struct thread_ctx_regs *sp_regs = &ctx->sp_regs; 293 294 memset(sp_regs, 0, sizeof(*sp_regs)); 295 sp_regs->sp = ctx->uctx.stack_ptr; 296 sp_regs->pc = ctx->uctx.entry_func; 297 298 return TEE_SUCCESS; 299 } 300 301 TEE_Result sp_map_shared(struct sp_session *s, 302 struct sp_mem_receiver *receiver, 303 struct sp_mem *smem, 304 uint64_t *va) 305 { 306 TEE_Result res = TEE_SUCCESS; 307 struct sp_ctx *ctx = NULL; 308 uint32_t perm = TEE_MATTR_UR; 309 struct sp_mem_map_region *reg = NULL; 310 311 ctx = to_sp_ctx(s->ts_sess.ctx); 312 313 /* Get the permission */ 314 if (receiver->perm.perm & FFA_MEM_ACC_EXE) 315 perm |= TEE_MATTR_UX; 316 317 if (receiver->perm.perm & FFA_MEM_ACC_RW) { 318 if (receiver->perm.perm & FFA_MEM_ACC_EXE) 319 return TEE_ERROR_ACCESS_CONFLICT; 320 321 perm |= TEE_MATTR_UW; 322 } 323 /* 324 * Currently we don't support passing a va. We can't guarantee that the 325 * full region will be mapped in a contiguous region. A smem->region can 326 * have multiple mobj for one share. Currently there doesn't seem to be 327 * an option to guarantee that these will be mapped in a contiguous va 328 * space. 329 */ 330 if (*va) 331 return TEE_ERROR_NOT_SUPPORTED; 332 333 SLIST_FOREACH(reg, &smem->regions, link) { 334 res = vm_map(&ctx->uctx, va, reg->page_count * SMALL_PAGE_SIZE, 335 perm, 0, reg->mobj, reg->page_offset); 336 337 if (res != TEE_SUCCESS) { 338 EMSG("Failed to map memory region %#"PRIx32, res); 339 return res; 340 } 341 } 342 return TEE_SUCCESS; 343 } 344 345 TEE_Result sp_unmap_ffa_regions(struct sp_session *s, struct sp_mem *smem) 346 { 347 TEE_Result res = TEE_SUCCESS; 348 vaddr_t vaddr = 0; 349 size_t len = 0; 350 struct sp_ctx *ctx = to_sp_ctx(s->ts_sess.ctx); 351 struct sp_mem_map_region *reg = NULL; 352 353 SLIST_FOREACH(reg, &smem->regions, link) { 354 vaddr = (vaddr_t)sp_mem_get_va(&ctx->uctx, reg->page_offset, 355 reg->mobj); 356 len = reg->page_count * SMALL_PAGE_SIZE; 357 358 res = vm_unmap(&ctx->uctx, vaddr, len); 359 if (res != TEE_SUCCESS) 360 return res; 361 } 362 363 return TEE_SUCCESS; 364 } 365 366 static TEE_Result sp_dt_get_u64(const void *fdt, int node, const char *property, 367 uint64_t *value) 368 { 369 const fdt64_t *p = NULL; 370 int len = 0; 371 372 p = fdt_getprop(fdt, node, property, &len); 373 if (!p) 374 return TEE_ERROR_ITEM_NOT_FOUND; 375 376 if (len != sizeof(*p)) 377 return TEE_ERROR_BAD_FORMAT; 378 379 *value = fdt64_ld(p); 380 381 return TEE_SUCCESS; 382 } 383 384 static TEE_Result sp_dt_get_u32(const void *fdt, int node, const char *property, 385 uint32_t *value) 386 { 387 const fdt32_t *p = NULL; 388 int len = 0; 389 390 p = fdt_getprop(fdt, node, property, &len); 391 if (!p) 392 return TEE_ERROR_ITEM_NOT_FOUND; 393 394 if (len != sizeof(*p)) 395 return TEE_ERROR_BAD_FORMAT; 396 397 *value = fdt32_to_cpu(*p); 398 399 return TEE_SUCCESS; 400 } 401 402 static TEE_Result sp_dt_get_u16(const void *fdt, int node, const char *property, 403 uint16_t *value) 404 { 405 const fdt16_t *p = NULL; 406 int len = 0; 407 408 p = fdt_getprop(fdt, node, property, &len); 409 if (!p) 410 return TEE_ERROR_ITEM_NOT_FOUND; 411 412 if (len != sizeof(*p)) 413 return TEE_ERROR_BAD_FORMAT; 414 415 *value = fdt16_to_cpu(*p); 416 417 return TEE_SUCCESS; 418 } 419 420 static TEE_Result sp_dt_get_uuid(const void *fdt, int node, 421 const char *property, TEE_UUID *uuid) 422 { 423 uint32_t uuid_array[4] = { 0 }; 424 const fdt32_t *p = NULL; 425 int len = 0; 426 int i = 0; 427 428 p = fdt_getprop(fdt, node, property, &len); 429 if (!p) 430 return TEE_ERROR_ITEM_NOT_FOUND; 431 432 if (len != sizeof(TEE_UUID)) 433 return TEE_ERROR_BAD_FORMAT; 434 435 for (i = 0; i < 4; i++) 436 uuid_array[i] = fdt32_to_cpu(p[i]); 437 438 tee_uuid_from_octets(uuid, (uint8_t *)uuid_array); 439 440 return TEE_SUCCESS; 441 } 442 443 static TEE_Result sp_is_elf_format(const void *fdt, int sp_node, 444 bool *is_elf_format) 445 { 446 TEE_Result res = TEE_SUCCESS; 447 uint32_t elf_format = 0; 448 449 res = sp_dt_get_u32(fdt, sp_node, "elf-format", &elf_format); 450 if (res != TEE_SUCCESS && res != TEE_ERROR_ITEM_NOT_FOUND) 451 return res; 452 453 *is_elf_format = (elf_format != 0); 454 455 return TEE_SUCCESS; 456 } 457 458 static TEE_Result sp_binary_open(const TEE_UUID *uuid, 459 const struct ts_store_ops **ops, 460 struct ts_store_handle **handle) 461 { 462 TEE_Result res = TEE_ERROR_ITEM_NOT_FOUND; 463 464 SCATTERED_ARRAY_FOREACH(*ops, sp_stores, struct ts_store_ops) { 465 res = (*ops)->open(uuid, handle); 466 if (res != TEE_ERROR_ITEM_NOT_FOUND && 467 res != TEE_ERROR_STORAGE_NOT_AVAILABLE) 468 break; 469 } 470 471 return res; 472 } 473 474 static TEE_Result load_binary_sp(struct ts_session *s, 475 struct user_mode_ctx *uctx) 476 { 477 size_t bin_size = 0, bin_size_rounded = 0, bin_page_count = 0; 478 size_t bb_size = ROUNDUP(BOUNCE_BUFFER_SIZE, SMALL_PAGE_SIZE); 479 size_t bb_num_pages = bb_size / SMALL_PAGE_SIZE; 480 const struct ts_store_ops *store_ops = NULL; 481 struct ts_store_handle *handle = NULL; 482 TEE_Result res = TEE_SUCCESS; 483 tee_mm_entry_t *mm = NULL; 484 struct fobj *fobj = NULL; 485 struct mobj *mobj = NULL; 486 uaddr_t base_addr = 0; 487 uint32_t vm_flags = 0; 488 unsigned int idx = 0; 489 vaddr_t va = 0; 490 491 if (!s || !uctx) 492 return TEE_ERROR_BAD_PARAMETERS; 493 494 DMSG("Loading raw binary format SP %pUl", &uctx->ts_ctx->uuid); 495 496 /* Initialize the bounce buffer */ 497 fobj = fobj_sec_mem_alloc(bb_num_pages); 498 mobj = mobj_with_fobj_alloc(fobj, NULL, TEE_MATTR_MEM_TYPE_TAGGED); 499 fobj_put(fobj); 500 if (!mobj) 501 return TEE_ERROR_OUT_OF_MEMORY; 502 503 res = vm_map(uctx, &va, bb_size, TEE_MATTR_PRW, 0, mobj, 0); 504 mobj_put(mobj); 505 if (res) 506 return res; 507 508 uctx->bbuf = (uint8_t *)va; 509 uctx->bbuf_size = BOUNCE_BUFFER_SIZE; 510 511 vm_set_ctx(uctx->ts_ctx); 512 513 /* Find TS store and open SP binary */ 514 res = sp_binary_open(&uctx->ts_ctx->uuid, &store_ops, &handle); 515 if (res != TEE_SUCCESS) { 516 EMSG("Failed to open SP binary"); 517 return res; 518 } 519 520 /* Query binary size and calculate page count */ 521 res = store_ops->get_size(handle, &bin_size); 522 if (res != TEE_SUCCESS) 523 goto err; 524 525 if (ROUNDUP_OVERFLOW(bin_size, SMALL_PAGE_SIZE, &bin_size_rounded)) { 526 res = TEE_ERROR_OVERFLOW; 527 goto err; 528 } 529 530 bin_page_count = bin_size_rounded / SMALL_PAGE_SIZE; 531 532 /* Allocate memory */ 533 mm = tee_mm_alloc(&tee_mm_sec_ddr, bin_size_rounded); 534 if (!mm) { 535 res = TEE_ERROR_OUT_OF_MEMORY; 536 goto err; 537 } 538 539 base_addr = tee_mm_get_smem(mm); 540 541 /* Create mobj */ 542 mobj = sp_mem_new_mobj(bin_page_count, TEE_MATTR_MEM_TYPE_CACHED, true); 543 if (!mobj) { 544 res = TEE_ERROR_OUT_OF_MEMORY; 545 goto err_free_tee_mm; 546 } 547 548 res = sp_mem_add_pages(mobj, &idx, base_addr, bin_page_count); 549 if (res) 550 goto err_free_mobj; 551 552 /* Map memory area for the SP binary */ 553 va = 0; 554 res = vm_map(uctx, &va, bin_size_rounded, TEE_MATTR_URWX, 555 vm_flags, mobj, 0); 556 if (res) 557 goto err_free_mobj; 558 559 /* Read SP binary into the previously mapped memory area */ 560 res = store_ops->read(handle, NULL, (void *)va, bin_size); 561 if (res) 562 goto err_unmap; 563 564 /* Set memory protection to allow execution */ 565 res = vm_set_prot(uctx, va, bin_size_rounded, TEE_MATTR_UX); 566 if (res) 567 goto err_unmap; 568 569 mobj_put(mobj); 570 store_ops->close(handle); 571 572 /* The entry point must be at the beginning of the SP binary. */ 573 uctx->entry_func = va; 574 uctx->load_addr = va; 575 uctx->is_32bit = false; 576 577 s->handle_scall = s->ctx->ops->handle_scall; 578 579 return TEE_SUCCESS; 580 581 err_unmap: 582 vm_unmap(uctx, va, bin_size_rounded); 583 584 err_free_mobj: 585 mobj_put(mobj); 586 587 err_free_tee_mm: 588 tee_mm_free(mm); 589 590 err: 591 store_ops->close(handle); 592 593 return res; 594 } 595 596 static TEE_Result sp_open_session(struct sp_session **sess, 597 struct sp_sessions_head *open_sessions, 598 const TEE_UUID *ffa_uuid, 599 const TEE_UUID *bin_uuid, 600 const uint32_t boot_order, 601 const void *fdt) 602 { 603 TEE_Result res = TEE_SUCCESS; 604 struct sp_session *s = NULL; 605 struct sp_ctx *ctx = NULL; 606 bool is_elf_format = false; 607 608 if (!find_secure_partition(bin_uuid)) 609 return TEE_ERROR_ITEM_NOT_FOUND; 610 611 res = sp_create_session(open_sessions, bin_uuid, boot_order, &s); 612 if (res != TEE_SUCCESS) { 613 DMSG("sp_create_session failed %#"PRIx32, res); 614 return res; 615 } 616 617 ctx = to_sp_ctx(s->ts_sess.ctx); 618 assert(ctx); 619 if (!ctx) 620 return TEE_ERROR_TARGET_DEAD; 621 *sess = s; 622 623 ts_push_current_session(&s->ts_sess); 624 625 res = sp_is_elf_format(fdt, 0, &is_elf_format); 626 if (res == TEE_SUCCESS) { 627 if (is_elf_format) { 628 /* Load the SP using ldelf. */ 629 ldelf_load_ldelf(&ctx->uctx); 630 res = ldelf_init_with_ldelf(&s->ts_sess, &ctx->uctx); 631 } else { 632 /* Raw binary format SP */ 633 res = load_binary_sp(&s->ts_sess, &ctx->uctx); 634 } 635 } else { 636 EMSG("Failed to detect SP format"); 637 } 638 639 if (res != TEE_SUCCESS) { 640 EMSG("Failed loading SP %#"PRIx32, res); 641 ts_pop_current_session(); 642 return TEE_ERROR_TARGET_DEAD; 643 } 644 645 /* 646 * Make the SP ready for its first run. 647 * Set state to busy to prevent other endpoints from sending messages to 648 * the SP before its boot phase is done. 649 */ 650 s->state = sp_busy; 651 s->caller_id = 0; 652 sp_init_set_registers(ctx); 653 memcpy(&s->ffa_uuid, ffa_uuid, sizeof(*ffa_uuid)); 654 ts_pop_current_session(); 655 656 return TEE_SUCCESS; 657 } 658 659 static TEE_Result fdt_get_uuid(const void * const fdt, TEE_UUID *uuid) 660 { 661 const struct fdt_property *description = NULL; 662 int description_name_len = 0; 663 664 if (fdt_node_check_compatible(fdt, 0, "arm,ffa-manifest-1.0")) { 665 EMSG("Failed loading SP, manifest not found"); 666 return TEE_ERROR_BAD_PARAMETERS; 667 } 668 669 description = fdt_get_property(fdt, 0, "description", 670 &description_name_len); 671 if (description) 672 DMSG("Loading SP: %s", description->data); 673 674 if (sp_dt_get_uuid(fdt, 0, "uuid", uuid)) { 675 EMSG("Missing or invalid UUID in SP manifest"); 676 return TEE_ERROR_BAD_FORMAT; 677 } 678 679 return TEE_SUCCESS; 680 } 681 682 static TEE_Result copy_and_map_fdt(struct sp_ctx *ctx, const void * const fdt, 683 void **fdt_copy, size_t *mapped_size) 684 { 685 size_t total_size = ROUNDUP(fdt_totalsize(fdt), SMALL_PAGE_SIZE); 686 size_t num_pages = total_size / SMALL_PAGE_SIZE; 687 uint32_t perm = TEE_MATTR_UR | TEE_MATTR_PRW; 688 TEE_Result res = TEE_SUCCESS; 689 struct mobj *m = NULL; 690 struct fobj *f = NULL; 691 vaddr_t va = 0; 692 693 f = fobj_sec_mem_alloc(num_pages); 694 m = mobj_with_fobj_alloc(f, NULL, TEE_MATTR_MEM_TYPE_TAGGED); 695 fobj_put(f); 696 if (!m) 697 return TEE_ERROR_OUT_OF_MEMORY; 698 699 res = vm_map(&ctx->uctx, &va, total_size, perm, 0, m, 0); 700 mobj_put(m); 701 if (res) 702 return res; 703 704 if (fdt_open_into(fdt, (void *)va, total_size)) 705 return TEE_ERROR_GENERIC; 706 707 *fdt_copy = (void *)va; 708 *mapped_size = total_size; 709 710 return res; 711 } 712 713 static void fill_boot_info_1_0(vaddr_t buf, const void *fdt) 714 { 715 struct ffa_boot_info_1_0 *info = (struct ffa_boot_info_1_0 *)buf; 716 static const char fdt_name[16] = "TYPE_DT\0\0\0\0\0\0\0\0"; 717 718 memcpy(&info->magic, "FF-A", 4); 719 info->count = 1; 720 721 COMPILE_TIME_ASSERT(sizeof(info->nvp[0].name) == sizeof(fdt_name)); 722 memcpy(info->nvp[0].name, fdt_name, sizeof(fdt_name)); 723 info->nvp[0].value = (uintptr_t)fdt; 724 info->nvp[0].size = fdt_totalsize(fdt); 725 } 726 727 static void fill_boot_info_1_1(vaddr_t buf, const void *fdt) 728 { 729 size_t desc_offs = ROUNDUP(sizeof(struct ffa_boot_info_header_1_1), 8); 730 struct ffa_boot_info_header_1_1 *header = 731 (struct ffa_boot_info_header_1_1 *)buf; 732 struct ffa_boot_info_1_1 *desc = 733 (struct ffa_boot_info_1_1 *)(buf + desc_offs); 734 735 header->signature = FFA_BOOT_INFO_SIGNATURE; 736 header->version = FFA_BOOT_INFO_VERSION; 737 header->blob_size = desc_offs + sizeof(struct ffa_boot_info_1_1); 738 header->desc_size = sizeof(struct ffa_boot_info_1_1); 739 header->desc_count = 1; 740 header->desc_offset = desc_offs; 741 742 memset(&desc[0].name, 0, sizeof(desc[0].name)); 743 /* Type: Standard boot info (bit[7] == 0), FDT type */ 744 desc[0].type = FFA_BOOT_INFO_TYPE_ID_FDT; 745 /* Flags: Contents field contains an address */ 746 desc[0].flags = FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_ADDR << 747 FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_SHIFT; 748 desc[0].size = fdt_totalsize(fdt); 749 desc[0].contents = (uintptr_t)fdt; 750 } 751 752 static TEE_Result create_and_map_boot_info(struct sp_ctx *ctx, const void *fdt, 753 struct thread_smc_args *args, 754 vaddr_t *va, size_t *mapped_size, 755 uint32_t sp_ffa_version) 756 { 757 size_t total_size = ROUNDUP(CFG_SP_INIT_INFO_MAX_SIZE, SMALL_PAGE_SIZE); 758 size_t num_pages = total_size / SMALL_PAGE_SIZE; 759 uint32_t perm = TEE_MATTR_UR | TEE_MATTR_PRW; 760 TEE_Result res = TEE_SUCCESS; 761 struct fobj *f = NULL; 762 struct mobj *m = NULL; 763 uint32_t info_reg = 0; 764 765 f = fobj_sec_mem_alloc(num_pages); 766 m = mobj_with_fobj_alloc(f, NULL, TEE_MATTR_MEM_TYPE_TAGGED); 767 fobj_put(f); 768 if (!m) 769 return TEE_ERROR_OUT_OF_MEMORY; 770 771 res = vm_map(&ctx->uctx, va, total_size, perm, 0, m, 0); 772 mobj_put(m); 773 if (res) 774 return res; 775 776 *mapped_size = total_size; 777 778 switch (sp_ffa_version) { 779 case MAKE_FFA_VERSION(1, 0): 780 fill_boot_info_1_0(*va, fdt); 781 break; 782 case MAKE_FFA_VERSION(1, 1): 783 fill_boot_info_1_1(*va, fdt); 784 break; 785 default: 786 EMSG("Unknown FF-A version: %#"PRIx32, sp_ffa_version); 787 return TEE_ERROR_NOT_SUPPORTED; 788 } 789 790 res = sp_dt_get_u32(fdt, 0, "gp-register-num", &info_reg); 791 if (res) { 792 if (res == TEE_ERROR_ITEM_NOT_FOUND) { 793 /* If the property is not present, set default to x0 */ 794 info_reg = 0; 795 } else { 796 return TEE_ERROR_BAD_FORMAT; 797 } 798 } 799 800 switch (info_reg) { 801 case 0: 802 args->a0 = *va; 803 break; 804 case 1: 805 args->a1 = *va; 806 break; 807 case 2: 808 args->a2 = *va; 809 break; 810 case 3: 811 args->a3 = *va; 812 break; 813 default: 814 EMSG("Invalid register selected for passing boot info"); 815 return TEE_ERROR_BAD_FORMAT; 816 } 817 818 return TEE_SUCCESS; 819 } 820 821 static TEE_Result handle_fdt_load_relative_mem_regions(struct sp_ctx *ctx, 822 const void *fdt) 823 { 824 int node = 0; 825 int subnode = 0; 826 tee_mm_entry_t *mm = NULL; 827 TEE_Result res = TEE_SUCCESS; 828 829 /* 830 * Memory regions are optional in the SP manifest, it's not an error if 831 * we don't find any. 832 */ 833 node = fdt_node_offset_by_compatible(fdt, 0, 834 "arm,ffa-manifest-memory-regions"); 835 if (node < 0) 836 return TEE_SUCCESS; 837 838 fdt_for_each_subnode(subnode, fdt, node) { 839 uint64_t load_rel_offset = 0; 840 uint32_t attributes = 0; 841 uint64_t base_addr = 0; 842 uint32_t pages_cnt = 0; 843 uint32_t flags = 0; 844 uint32_t perm = 0; 845 size_t size = 0; 846 vaddr_t va = 0; 847 848 mm = NULL; 849 850 /* Load address relative offset of a memory region */ 851 if (!sp_dt_get_u64(fdt, subnode, "load-address-relative-offset", 852 &load_rel_offset)) { 853 va = ctx->uctx.load_addr + load_rel_offset; 854 } else { 855 /* Skip non load address relative memory regions */ 856 continue; 857 } 858 859 if (!sp_dt_get_u64(fdt, subnode, "base-address", &base_addr)) { 860 EMSG("Both base-address and load-address-relative-offset fields are set"); 861 return TEE_ERROR_BAD_FORMAT; 862 } 863 864 /* Size of memory region as count of 4K pages */ 865 if (sp_dt_get_u32(fdt, subnode, "pages-count", &pages_cnt)) { 866 EMSG("Mandatory field is missing: pages-count"); 867 return TEE_ERROR_BAD_FORMAT; 868 } 869 870 if (MUL_OVERFLOW(pages_cnt, SMALL_PAGE_SIZE, &size)) 871 return TEE_ERROR_OVERFLOW; 872 873 /* Memory region attributes */ 874 if (sp_dt_get_u32(fdt, subnode, "attributes", &attributes)) { 875 EMSG("Mandatory field is missing: attributes"); 876 return TEE_ERROR_BAD_FORMAT; 877 } 878 879 /* Check instruction and data access permissions */ 880 switch (attributes & SP_MANIFEST_ATTR_RWX) { 881 case SP_MANIFEST_ATTR_RO: 882 perm = TEE_MATTR_UR; 883 break; 884 case SP_MANIFEST_ATTR_RW: 885 perm = TEE_MATTR_URW; 886 break; 887 case SP_MANIFEST_ATTR_RX: 888 perm = TEE_MATTR_URX; 889 break; 890 default: 891 EMSG("Invalid memory access permissions"); 892 return TEE_ERROR_BAD_FORMAT; 893 } 894 895 if (IS_ENABLED(CFG_TA_BTI) && 896 attributes & SP_MANIFEST_ATTR_GP) { 897 if (!(attributes & SP_MANIFEST_ATTR_RX)) { 898 EMSG("Guard only executable region"); 899 return TEE_ERROR_BAD_FORMAT; 900 } 901 perm |= TEE_MATTR_GUARDED; 902 } 903 904 res = sp_dt_get_u32(fdt, subnode, "load-flags", &flags); 905 if (res != TEE_SUCCESS && res != TEE_ERROR_ITEM_NOT_FOUND) { 906 EMSG("Optional field with invalid value: flags"); 907 return TEE_ERROR_BAD_FORMAT; 908 } 909 910 /* Load relative regions must be secure */ 911 if (attributes & SP_MANIFEST_ATTR_NSEC) { 912 EMSG("Invalid memory security attribute"); 913 return TEE_ERROR_BAD_FORMAT; 914 } 915 916 if (flags & SP_MANIFEST_FLAG_NOBITS) { 917 /* 918 * NOBITS flag is set, which means that loaded binary 919 * doesn't contain this area, so it's need to be 920 * allocated. 921 */ 922 struct mobj *m = NULL; 923 unsigned int idx = 0; 924 925 mm = tee_mm_alloc(&tee_mm_sec_ddr, size); 926 if (!mm) 927 return TEE_ERROR_OUT_OF_MEMORY; 928 929 base_addr = tee_mm_get_smem(mm); 930 931 m = sp_mem_new_mobj(pages_cnt, 932 TEE_MATTR_MEM_TYPE_CACHED, true); 933 if (!m) { 934 res = TEE_ERROR_OUT_OF_MEMORY; 935 goto err_mm_free; 936 } 937 938 res = sp_mem_add_pages(m, &idx, base_addr, pages_cnt); 939 if (res) { 940 mobj_put(m); 941 goto err_mm_free; 942 } 943 944 res = vm_map(&ctx->uctx, &va, size, perm, 0, m, 0); 945 mobj_put(m); 946 if (res) 947 goto err_mm_free; 948 } else { 949 /* 950 * If NOBITS is not present the memory area is already 951 * mapped and only need to set the correct permissions. 952 */ 953 res = vm_set_prot(&ctx->uctx, va, size, perm); 954 if (res) 955 return res; 956 } 957 } 958 959 return TEE_SUCCESS; 960 961 err_mm_free: 962 tee_mm_free(mm); 963 return res; 964 } 965 966 static TEE_Result handle_fdt_dev_regions(struct sp_ctx *ctx, void *fdt) 967 { 968 int node = 0; 969 int subnode = 0; 970 TEE_Result res = TEE_SUCCESS; 971 const char *dt_device_match_table = { 972 "arm,ffa-manifest-device-regions", 973 }; 974 975 /* 976 * Device regions are optional in the SP manifest, it's not an error if 977 * we don't find any 978 */ 979 node = fdt_node_offset_by_compatible(fdt, 0, dt_device_match_table); 980 if (node < 0) 981 return TEE_SUCCESS; 982 983 fdt_for_each_subnode(subnode, fdt, node) { 984 uint64_t base_addr = 0; 985 uint32_t pages_cnt = 0; 986 uint32_t attributes = 0; 987 struct mobj *m = NULL; 988 bool is_secure = true; 989 uint32_t perm = 0; 990 vaddr_t va = 0; 991 unsigned int idx = 0; 992 993 /* 994 * Physical base address of a device MMIO region. 995 * Currently only physically contiguous region is supported. 996 */ 997 if (sp_dt_get_u64(fdt, subnode, "base-address", &base_addr)) { 998 EMSG("Mandatory field is missing: base-address"); 999 return TEE_ERROR_BAD_FORMAT; 1000 } 1001 1002 /* Total size of MMIO region as count of 4K pages */ 1003 if (sp_dt_get_u32(fdt, subnode, "pages-count", &pages_cnt)) { 1004 EMSG("Mandatory field is missing: pages-count"); 1005 return TEE_ERROR_BAD_FORMAT; 1006 } 1007 1008 /* Data access, instruction access and security attributes */ 1009 if (sp_dt_get_u32(fdt, subnode, "attributes", &attributes)) { 1010 EMSG("Mandatory field is missing: attributes"); 1011 return TEE_ERROR_BAD_FORMAT; 1012 } 1013 1014 /* Check instruction and data access permissions */ 1015 switch (attributes & SP_MANIFEST_ATTR_RWX) { 1016 case SP_MANIFEST_ATTR_RO: 1017 perm = TEE_MATTR_UR; 1018 break; 1019 case SP_MANIFEST_ATTR_RW: 1020 perm = TEE_MATTR_URW; 1021 break; 1022 default: 1023 EMSG("Invalid memory access permissions"); 1024 return TEE_ERROR_BAD_FORMAT; 1025 } 1026 1027 /* 1028 * The SP is a secure endpoint, security attribute can be 1029 * secure or non-secure 1030 */ 1031 if (attributes & SP_MANIFEST_ATTR_NSEC) 1032 is_secure = false; 1033 1034 /* Memory attributes must be Device-nGnRnE */ 1035 m = sp_mem_new_mobj(pages_cnt, TEE_MATTR_MEM_TYPE_STRONGLY_O, 1036 is_secure); 1037 if (!m) 1038 return TEE_ERROR_OUT_OF_MEMORY; 1039 1040 res = sp_mem_add_pages(m, &idx, (paddr_t)base_addr, pages_cnt); 1041 if (res) { 1042 mobj_put(m); 1043 return res; 1044 } 1045 1046 res = vm_map(&ctx->uctx, &va, pages_cnt * SMALL_PAGE_SIZE, 1047 perm, 0, m, 0); 1048 mobj_put(m); 1049 if (res) 1050 return res; 1051 1052 /* 1053 * Overwrite the device region's PA in the fdt with the VA. This 1054 * fdt will be passed to the SP. 1055 */ 1056 res = fdt_setprop_u64(fdt, subnode, "base-address", va); 1057 1058 /* 1059 * Unmap the region if the overwrite failed since the SP won't 1060 * be able to access it without knowing the VA. 1061 */ 1062 if (res) { 1063 vm_unmap(&ctx->uctx, va, pages_cnt * SMALL_PAGE_SIZE); 1064 return res; 1065 } 1066 } 1067 1068 return TEE_SUCCESS; 1069 } 1070 1071 static TEE_Result swap_sp_endpoints(uint32_t endpoint_id, 1072 uint32_t new_endpoint_id) 1073 { 1074 struct sp_session *session = sp_get_session(endpoint_id); 1075 uint32_t manifest_endpoint_id = 0; 1076 1077 /* 1078 * We don't know in which order the SPs are loaded. The endpoint ID 1079 * defined in the manifest could already be generated by 1080 * new_session_id() and used by another SP. If this is the case, we swap 1081 * the ID's of the two SPs. We also have to make sure that the ID's are 1082 * not defined twice in the manifest. 1083 */ 1084 1085 /* The endpoint ID was not assigned yet */ 1086 if (!session) 1087 return TEE_SUCCESS; 1088 1089 /* 1090 * Read the manifest file from the SP who originally had the endpoint. 1091 * We can safely swap the endpoint ID's if the manifest file doesn't 1092 * have an endpoint ID defined. 1093 */ 1094 if (!sp_dt_get_u32(session->fdt, 0, "id", &manifest_endpoint_id)) { 1095 assert(manifest_endpoint_id == endpoint_id); 1096 EMSG("SP: Found duplicated endpoint ID %#"PRIx32, endpoint_id); 1097 return TEE_ERROR_ACCESS_CONFLICT; 1098 } 1099 1100 session->endpoint_id = new_endpoint_id; 1101 1102 return TEE_SUCCESS; 1103 } 1104 1105 static TEE_Result read_manifest_endpoint_id(struct sp_session *s) 1106 { 1107 uint32_t endpoint_id = 0; 1108 1109 /* 1110 * The endpoint ID can be optionally defined in the manifest file. We 1111 * have to map the ID inside the manifest to the SP if it's defined. 1112 * If not, the endpoint ID generated inside new_session_id() will be 1113 * used. 1114 */ 1115 if (!sp_dt_get_u32(s->fdt, 0, "id", &endpoint_id)) { 1116 TEE_Result res = TEE_ERROR_GENERIC; 1117 1118 if (!endpoint_id_is_valid(endpoint_id)) { 1119 EMSG("Invalid endpoint ID 0x%"PRIx32, endpoint_id); 1120 return TEE_ERROR_BAD_FORMAT; 1121 } 1122 1123 res = swap_sp_endpoints(endpoint_id, s->endpoint_id); 1124 if (res) 1125 return res; 1126 1127 DMSG("SP: endpoint ID (0x%"PRIx32") found in manifest", 1128 endpoint_id); 1129 /* Assign the endpoint ID to the current SP */ 1130 s->endpoint_id = endpoint_id; 1131 } 1132 return TEE_SUCCESS; 1133 } 1134 1135 static TEE_Result handle_fdt_mem_regions(struct sp_ctx *ctx, void *fdt) 1136 { 1137 int node = 0; 1138 int subnode = 0; 1139 tee_mm_entry_t *mm = NULL; 1140 TEE_Result res = TEE_SUCCESS; 1141 1142 /* 1143 * Memory regions are optional in the SP manifest, it's not an error if 1144 * we don't find any. 1145 */ 1146 node = fdt_node_offset_by_compatible(fdt, 0, 1147 "arm,ffa-manifest-memory-regions"); 1148 if (node < 0) 1149 return TEE_SUCCESS; 1150 1151 fdt_for_each_subnode(subnode, fdt, node) { 1152 uint64_t load_rel_offset = 0; 1153 bool alloc_needed = false; 1154 uint32_t attributes = 0; 1155 uint64_t base_addr = 0; 1156 uint32_t pages_cnt = 0; 1157 bool is_secure = true; 1158 struct mobj *m = NULL; 1159 unsigned int idx = 0; 1160 uint32_t perm = 0; 1161 size_t size = 0; 1162 vaddr_t va = 0; 1163 1164 mm = NULL; 1165 1166 /* Load address relative offset of a memory region */ 1167 if (!sp_dt_get_u64(fdt, subnode, "load-address-relative-offset", 1168 &load_rel_offset)) { 1169 /* 1170 * At this point the memory region is already mapped by 1171 * handle_fdt_load_relative_mem_regions. 1172 * Only need to set the base-address in the manifest and 1173 * then skip the rest of the mapping process. 1174 */ 1175 va = ctx->uctx.load_addr + load_rel_offset; 1176 res = fdt_setprop_u64(fdt, subnode, "base-address", va); 1177 if (res) 1178 return res; 1179 1180 continue; 1181 } 1182 1183 /* 1184 * Base address of a memory region. 1185 * If not present, we have to allocate the specified memory. 1186 * If present, this field could specify a PA or VA. Currently 1187 * only a PA is supported. 1188 */ 1189 if (sp_dt_get_u64(fdt, subnode, "base-address", &base_addr)) 1190 alloc_needed = true; 1191 1192 /* Size of memory region as count of 4K pages */ 1193 if (sp_dt_get_u32(fdt, subnode, "pages-count", &pages_cnt)) { 1194 EMSG("Mandatory field is missing: pages-count"); 1195 return TEE_ERROR_BAD_FORMAT; 1196 } 1197 1198 if (MUL_OVERFLOW(pages_cnt, SMALL_PAGE_SIZE, &size)) 1199 return TEE_ERROR_OVERFLOW; 1200 1201 /* 1202 * Memory region attributes: 1203 * - Instruction/data access permissions 1204 * - Cacheability/shareability attributes 1205 * - Security attributes 1206 * 1207 * Cacheability/shareability attributes can be ignored for now. 1208 * OP-TEE only supports a single type for normal cached memory 1209 * and currently there is no use case that would require to 1210 * change this. 1211 */ 1212 if (sp_dt_get_u32(fdt, subnode, "attributes", &attributes)) { 1213 EMSG("Mandatory field is missing: attributes"); 1214 return TEE_ERROR_BAD_FORMAT; 1215 } 1216 1217 /* Check instruction and data access permissions */ 1218 switch (attributes & SP_MANIFEST_ATTR_RWX) { 1219 case SP_MANIFEST_ATTR_RO: 1220 perm = TEE_MATTR_UR; 1221 break; 1222 case SP_MANIFEST_ATTR_RW: 1223 perm = TEE_MATTR_URW; 1224 break; 1225 case SP_MANIFEST_ATTR_RX: 1226 perm = TEE_MATTR_URX; 1227 break; 1228 default: 1229 EMSG("Invalid memory access permissions"); 1230 return TEE_ERROR_BAD_FORMAT; 1231 } 1232 1233 if (IS_ENABLED(CFG_TA_BTI) && 1234 attributes & SP_MANIFEST_ATTR_GP) { 1235 if (!(attributes & SP_MANIFEST_ATTR_RX)) { 1236 EMSG("Guard only executable region"); 1237 return TEE_ERROR_BAD_FORMAT; 1238 } 1239 perm |= TEE_MATTR_GUARDED; 1240 } 1241 1242 /* 1243 * The SP is a secure endpoint, security attribute can be 1244 * secure or non-secure. 1245 * The SPMC cannot allocate non-secure memory, i.e. if the base 1246 * address is missing this attribute must be secure. 1247 */ 1248 if (attributes & SP_MANIFEST_ATTR_NSEC) { 1249 if (alloc_needed) { 1250 EMSG("Invalid memory security attribute"); 1251 return TEE_ERROR_BAD_FORMAT; 1252 } 1253 is_secure = false; 1254 } 1255 1256 if (alloc_needed) { 1257 /* Base address is missing, we have to allocate */ 1258 mm = tee_mm_alloc(&tee_mm_sec_ddr, size); 1259 if (!mm) 1260 return TEE_ERROR_OUT_OF_MEMORY; 1261 1262 base_addr = tee_mm_get_smem(mm); 1263 } 1264 1265 m = sp_mem_new_mobj(pages_cnt, TEE_MATTR_MEM_TYPE_CACHED, 1266 is_secure); 1267 if (!m) { 1268 res = TEE_ERROR_OUT_OF_MEMORY; 1269 goto err_mm_free; 1270 } 1271 1272 res = sp_mem_add_pages(m, &idx, base_addr, pages_cnt); 1273 if (res) { 1274 mobj_put(m); 1275 goto err_mm_free; 1276 } 1277 1278 res = vm_map(&ctx->uctx, &va, size, perm, 0, m, 0); 1279 mobj_put(m); 1280 if (res) 1281 goto err_mm_free; 1282 1283 /* 1284 * Overwrite the memory region's base address in the fdt with 1285 * the VA. This fdt will be passed to the SP. 1286 * If the base-address field was not present in the original 1287 * fdt, this function will create it. This doesn't cause issues 1288 * since the necessary extra space has been allocated when 1289 * opening the fdt. 1290 */ 1291 res = fdt_setprop_u64(fdt, subnode, "base-address", va); 1292 1293 /* 1294 * Unmap the region if the overwrite failed since the SP won't 1295 * be able to access it without knowing the VA. 1296 */ 1297 if (res) { 1298 vm_unmap(&ctx->uctx, va, size); 1299 goto err_mm_free; 1300 } 1301 } 1302 1303 return TEE_SUCCESS; 1304 1305 err_mm_free: 1306 tee_mm_free(mm); 1307 return res; 1308 } 1309 1310 static TEE_Result handle_tpm_event_log(struct sp_ctx *ctx, void *fdt) 1311 { 1312 uint32_t perm = TEE_MATTR_URW | TEE_MATTR_PRW; 1313 uint32_t dummy_size __maybe_unused = 0; 1314 TEE_Result res = TEE_SUCCESS; 1315 size_t page_count = 0; 1316 struct fobj *f = NULL; 1317 struct mobj *m = NULL; 1318 vaddr_t log_addr = 0; 1319 size_t log_size = 0; 1320 int node = 0; 1321 1322 node = fdt_node_offset_by_compatible(fdt, 0, "arm,tpm_event_log"); 1323 if (node < 0) 1324 return TEE_SUCCESS; 1325 1326 /* Checking the existence and size of the event log properties */ 1327 if (sp_dt_get_u64(fdt, node, "tpm_event_log_addr", &log_addr)) { 1328 EMSG("tpm_event_log_addr not found or has invalid size"); 1329 return TEE_ERROR_BAD_FORMAT; 1330 } 1331 1332 if (sp_dt_get_u32(fdt, node, "tpm_event_log_size", &dummy_size)) { 1333 EMSG("tpm_event_log_size not found or has invalid size"); 1334 return TEE_ERROR_BAD_FORMAT; 1335 } 1336 1337 /* Validating event log */ 1338 res = tpm_get_event_log_size(&log_size); 1339 if (res) 1340 return res; 1341 1342 if (!log_size) { 1343 EMSG("Empty TPM event log was provided"); 1344 return TEE_ERROR_ITEM_NOT_FOUND; 1345 } 1346 1347 /* Allocating memory area for the event log to share with the SP */ 1348 page_count = ROUNDUP_DIV(log_size, SMALL_PAGE_SIZE); 1349 1350 f = fobj_sec_mem_alloc(page_count); 1351 m = mobj_with_fobj_alloc(f, NULL, TEE_MATTR_MEM_TYPE_TAGGED); 1352 fobj_put(f); 1353 if (!m) 1354 return TEE_ERROR_OUT_OF_MEMORY; 1355 1356 res = vm_map(&ctx->uctx, &log_addr, log_size, perm, 0, m, 0); 1357 mobj_put(m); 1358 if (res) 1359 return res; 1360 1361 /* Copy event log */ 1362 res = tpm_get_event_log((void *)log_addr, &log_size); 1363 if (res) 1364 goto err_unmap; 1365 1366 /* Setting event log details in the manifest */ 1367 res = fdt_setprop_u64(fdt, node, "tpm_event_log_addr", log_addr); 1368 if (res) 1369 goto err_unmap; 1370 1371 res = fdt_setprop_u32(fdt, node, "tpm_event_log_size", log_size); 1372 if (res) 1373 goto err_unmap; 1374 1375 return TEE_SUCCESS; 1376 1377 err_unmap: 1378 vm_unmap(&ctx->uctx, log_addr, log_size); 1379 1380 return res; 1381 } 1382 1383 /* 1384 * Note: this function is called only on the primary CPU. It assumes that the 1385 * features present on the primary CPU are available on all of the secondary 1386 * CPUs as well. 1387 */ 1388 static TEE_Result handle_hw_features(void *fdt) 1389 { 1390 uint32_t val __maybe_unused = 0; 1391 TEE_Result res = TEE_SUCCESS; 1392 int node = 0; 1393 1394 /* 1395 * HW feature descriptions are optional in the SP manifest, it's not an 1396 * error if we don't find any. 1397 */ 1398 node = fdt_node_offset_by_compatible(fdt, 0, "arm,hw-features"); 1399 if (node < 0) 1400 return TEE_SUCCESS; 1401 1402 /* Modify the crc32 property only if it's already present */ 1403 if (!sp_dt_get_u32(fdt, node, "crc32", &val)) { 1404 res = fdt_setprop_u32(fdt, node, "crc32", 1405 feat_crc32_implemented()); 1406 if (res) 1407 return res; 1408 } 1409 1410 return TEE_SUCCESS; 1411 } 1412 1413 static TEE_Result read_ns_interrupts_action(const void *fdt, 1414 struct sp_session *s) 1415 { 1416 TEE_Result res = TEE_ERROR_BAD_PARAMETERS; 1417 1418 res = sp_dt_get_u32(fdt, 0, "ns-interrupts-action", &s->ns_int_mode); 1419 1420 if (res) { 1421 EMSG("Mandatory property is missing: ns-interrupts-action"); 1422 return res; 1423 } 1424 1425 switch (s->ns_int_mode) { 1426 case SP_MANIFEST_NS_INT_QUEUED: 1427 case SP_MANIFEST_NS_INT_SIGNALED: 1428 /* OK */ 1429 break; 1430 1431 case SP_MANIFEST_NS_INT_MANAGED_EXIT: 1432 EMSG("Managed exit is not implemented"); 1433 return TEE_ERROR_NOT_IMPLEMENTED; 1434 1435 default: 1436 EMSG("Invalid ns-interrupts-action value: %"PRIu32, 1437 s->ns_int_mode); 1438 return TEE_ERROR_BAD_PARAMETERS; 1439 } 1440 1441 return TEE_SUCCESS; 1442 } 1443 1444 static TEE_Result read_ffa_version(const void *fdt, struct sp_session *s) 1445 { 1446 TEE_Result res = TEE_ERROR_BAD_PARAMETERS; 1447 uint32_t ffa_version = 0; 1448 1449 res = sp_dt_get_u32(fdt, 0, "ffa-version", &ffa_version); 1450 if (res) { 1451 EMSG("Mandatory property is missing: ffa-version"); 1452 return res; 1453 } 1454 1455 if (ffa_version != FFA_VERSION_1_0 && ffa_version != FFA_VERSION_1_1) { 1456 EMSG("Invalid FF-A version value: 0x%08"PRIx32, ffa_version); 1457 return TEE_ERROR_BAD_PARAMETERS; 1458 } 1459 1460 s->rxtx.ffa_vers = ffa_version; 1461 1462 return TEE_SUCCESS; 1463 } 1464 1465 static TEE_Result read_sp_exec_state(const void *fdt, struct sp_session *s) 1466 { 1467 TEE_Result res = TEE_ERROR_BAD_PARAMETERS; 1468 uint32_t exec_state = 0; 1469 1470 res = sp_dt_get_u32(fdt, 0, "execution-state", &exec_state); 1471 if (res) { 1472 EMSG("Mandatory property is missing: execution-state"); 1473 return res; 1474 } 1475 1476 /* Currently only AArch64 SPs are supported */ 1477 if (exec_state == SP_MANIFEST_EXEC_STATE_AARCH64) { 1478 s->props |= FFA_PART_PROP_AARCH64_STATE; 1479 } else { 1480 EMSG("Invalid execution-state value: %"PRIu32, exec_state); 1481 return TEE_ERROR_BAD_PARAMETERS; 1482 } 1483 1484 return TEE_SUCCESS; 1485 } 1486 1487 static TEE_Result read_sp_msg_types(const void *fdt, struct sp_session *s) 1488 { 1489 TEE_Result res = TEE_ERROR_BAD_PARAMETERS; 1490 uint32_t msg_method = 0; 1491 1492 res = sp_dt_get_u32(fdt, 0, "messaging-method", &msg_method); 1493 if (res) { 1494 EMSG("Mandatory property is missing: messaging-method"); 1495 return res; 1496 } 1497 1498 if (msg_method & SP_MANIFEST_DIRECT_REQ_RECEIVE) 1499 s->props |= FFA_PART_PROP_DIRECT_REQ_RECV; 1500 1501 if (msg_method & SP_MANIFEST_DIRECT_REQ_SEND) 1502 s->props |= FFA_PART_PROP_DIRECT_REQ_SEND; 1503 1504 if (msg_method & SP_MANIFEST_INDIRECT_REQ) 1505 IMSG("Indirect messaging is not supported"); 1506 1507 return TEE_SUCCESS; 1508 } 1509 1510 static TEE_Result read_vm_availability_msg(const void *fdt, 1511 struct sp_session *s) 1512 { 1513 TEE_Result res = TEE_ERROR_BAD_PARAMETERS; 1514 uint32_t v = 0; 1515 1516 res = sp_dt_get_u32(fdt, 0, "vm-availability-messages", &v); 1517 1518 /* This field in the manifest is optional */ 1519 if (res == TEE_ERROR_ITEM_NOT_FOUND) 1520 return TEE_SUCCESS; 1521 1522 if (res) 1523 return res; 1524 1525 if (v & ~(SP_MANIFEST_VM_CREATED_MSG | SP_MANIFEST_VM_DESTROYED_MSG)) { 1526 EMSG("Invalid vm-availability-messages value: %"PRIu32, v); 1527 return TEE_ERROR_BAD_PARAMETERS; 1528 } 1529 1530 if (v & SP_MANIFEST_VM_CREATED_MSG) 1531 s->props |= FFA_PART_PROP_NOTIF_CREATED; 1532 1533 if (v & SP_MANIFEST_VM_DESTROYED_MSG) 1534 s->props |= FFA_PART_PROP_NOTIF_DESTROYED; 1535 1536 return TEE_SUCCESS; 1537 } 1538 1539 static TEE_Result sp_init_uuid(const TEE_UUID *bin_uuid, const void * const fdt) 1540 { 1541 TEE_Result res = TEE_SUCCESS; 1542 struct sp_session *sess = NULL; 1543 TEE_UUID ffa_uuid = {}; 1544 uint16_t boot_order = 0; 1545 uint32_t boot_order_arg = 0; 1546 1547 res = fdt_get_uuid(fdt, &ffa_uuid); 1548 if (res) 1549 return res; 1550 1551 res = sp_dt_get_u16(fdt, 0, "boot-order", &boot_order); 1552 if (res == TEE_SUCCESS) { 1553 boot_order_arg = boot_order; 1554 } else if (res == TEE_ERROR_ITEM_NOT_FOUND) { 1555 boot_order_arg = UINT32_MAX; 1556 } else { 1557 EMSG("Failed reading boot-order property err:%#"PRIx32, res); 1558 return res; 1559 } 1560 1561 res = sp_open_session(&sess, 1562 &open_sp_sessions, 1563 &ffa_uuid, bin_uuid, boot_order_arg, fdt); 1564 if (res) 1565 return res; 1566 1567 sess->fdt = fdt; 1568 1569 res = read_manifest_endpoint_id(sess); 1570 if (res) 1571 return res; 1572 DMSG("endpoint is 0x%"PRIx16, sess->endpoint_id); 1573 1574 res = read_ns_interrupts_action(fdt, sess); 1575 if (res) 1576 return res; 1577 1578 res = read_ffa_version(fdt, sess); 1579 if (res) 1580 return res; 1581 1582 res = read_sp_exec_state(fdt, sess); 1583 if (res) 1584 return res; 1585 1586 res = read_sp_msg_types(fdt, sess); 1587 if (res) 1588 return res; 1589 1590 res = read_vm_availability_msg(fdt, sess); 1591 if (res) 1592 return res; 1593 1594 return TEE_SUCCESS; 1595 } 1596 1597 static TEE_Result sp_first_run(struct sp_session *sess) 1598 { 1599 TEE_Result res = TEE_SUCCESS; 1600 struct thread_smc_args args = { }; 1601 struct sp_ctx *ctx = NULL; 1602 vaddr_t boot_info_va = 0; 1603 size_t boot_info_size = 0; 1604 void *fdt_copy = NULL; 1605 size_t fdt_size = 0; 1606 1607 ctx = to_sp_ctx(sess->ts_sess.ctx); 1608 ts_push_current_session(&sess->ts_sess); 1609 sess->is_initialized = false; 1610 1611 /* 1612 * Load relative memory regions must be handled before doing any other 1613 * mapping to prevent conflicts in the VA space. 1614 */ 1615 res = handle_fdt_load_relative_mem_regions(ctx, sess->fdt); 1616 if (res) { 1617 ts_pop_current_session(); 1618 return res; 1619 } 1620 1621 res = copy_and_map_fdt(ctx, sess->fdt, &fdt_copy, &fdt_size); 1622 if (res) 1623 goto out; 1624 1625 res = handle_fdt_dev_regions(ctx, fdt_copy); 1626 if (res) 1627 goto out; 1628 1629 res = handle_fdt_mem_regions(ctx, fdt_copy); 1630 if (res) 1631 goto out; 1632 1633 if (IS_ENABLED(CFG_CORE_TPM_EVENT_LOG)) { 1634 res = handle_tpm_event_log(ctx, fdt_copy); 1635 if (res) 1636 goto out; 1637 } 1638 1639 res = handle_hw_features(fdt_copy); 1640 if (res) 1641 goto out; 1642 1643 res = create_and_map_boot_info(ctx, fdt_copy, &args, &boot_info_va, 1644 &boot_info_size, sess->rxtx.ffa_vers); 1645 if (res) 1646 goto out; 1647 1648 ts_pop_current_session(); 1649 1650 res = sp_enter(&args, sess); 1651 if (res) { 1652 ts_push_current_session(&sess->ts_sess); 1653 goto out; 1654 } 1655 1656 spmc_sp_msg_handler(&args, sess); 1657 1658 ts_push_current_session(&sess->ts_sess); 1659 sess->is_initialized = true; 1660 1661 out: 1662 /* Free the boot info page from the SP memory */ 1663 vm_unmap(&ctx->uctx, boot_info_va, boot_info_size); 1664 vm_unmap(&ctx->uctx, (vaddr_t)fdt_copy, fdt_size); 1665 ts_pop_current_session(); 1666 1667 return res; 1668 } 1669 1670 TEE_Result sp_enter(struct thread_smc_args *args, struct sp_session *sp) 1671 { 1672 TEE_Result res = TEE_SUCCESS; 1673 struct sp_ctx *ctx = to_sp_ctx(sp->ts_sess.ctx); 1674 1675 ctx->sp_regs.x[0] = args->a0; 1676 ctx->sp_regs.x[1] = args->a1; 1677 ctx->sp_regs.x[2] = args->a2; 1678 ctx->sp_regs.x[3] = args->a3; 1679 ctx->sp_regs.x[4] = args->a4; 1680 ctx->sp_regs.x[5] = args->a5; 1681 ctx->sp_regs.x[6] = args->a6; 1682 ctx->sp_regs.x[7] = args->a7; 1683 1684 res = sp->ts_sess.ctx->ops->enter_invoke_cmd(&sp->ts_sess, 0); 1685 1686 args->a0 = ctx->sp_regs.x[0]; 1687 args->a1 = ctx->sp_regs.x[1]; 1688 args->a2 = ctx->sp_regs.x[2]; 1689 args->a3 = ctx->sp_regs.x[3]; 1690 args->a4 = ctx->sp_regs.x[4]; 1691 args->a5 = ctx->sp_regs.x[5]; 1692 args->a6 = ctx->sp_regs.x[6]; 1693 args->a7 = ctx->sp_regs.x[7]; 1694 1695 return res; 1696 } 1697 1698 /* 1699 * According to FF-A v1.1 section 8.3.1.4 if a caller requires less permissive 1700 * active on NS interrupt than the callee, the callee must inherit the caller's 1701 * configuration. 1702 * Each SP's own NS action setting is stored in ns_int_mode. The effective 1703 * action will be MIN([self action], [caller's action]) which is stored in the 1704 * ns_int_mode_inherited field. 1705 */ 1706 static void sp_cpsr_configure_foreign_interrupts(struct sp_session *s, 1707 struct ts_session *caller, 1708 uint64_t *cpsr) 1709 { 1710 if (caller) { 1711 struct sp_session *caller_sp = to_sp_session(caller); 1712 1713 s->ns_int_mode_inherited = MIN(caller_sp->ns_int_mode_inherited, 1714 s->ns_int_mode); 1715 } else { 1716 s->ns_int_mode_inherited = s->ns_int_mode; 1717 } 1718 1719 if (s->ns_int_mode_inherited == SP_MANIFEST_NS_INT_QUEUED) 1720 *cpsr |= SHIFT_U32(THREAD_EXCP_FOREIGN_INTR, 1721 ARM32_CPSR_F_SHIFT); 1722 else 1723 *cpsr &= ~SHIFT_U32(THREAD_EXCP_FOREIGN_INTR, 1724 ARM32_CPSR_F_SHIFT); 1725 } 1726 1727 static TEE_Result sp_enter_invoke_cmd(struct ts_session *s, 1728 uint32_t cmd __unused) 1729 { 1730 struct sp_ctx *ctx = to_sp_ctx(s->ctx); 1731 TEE_Result res = TEE_SUCCESS; 1732 uint32_t exceptions = 0; 1733 struct sp_session *sp_s = to_sp_session(s); 1734 struct ts_session *sess = NULL; 1735 struct thread_ctx_regs *sp_regs = NULL; 1736 uint32_t thread_id = THREAD_ID_INVALID; 1737 struct ts_session *caller = NULL; 1738 uint32_t rpc_target_info = 0; 1739 uint32_t panicked = false; 1740 uint32_t panic_code = 0; 1741 1742 sp_regs = &ctx->sp_regs; 1743 ts_push_current_session(s); 1744 1745 exceptions = thread_mask_exceptions(THREAD_EXCP_ALL); 1746 1747 /* Enable/disable foreign interrupts in CPSR/SPSR */ 1748 caller = ts_get_calling_session(); 1749 sp_cpsr_configure_foreign_interrupts(sp_s, caller, &sp_regs->cpsr); 1750 1751 /* 1752 * Store endpoint ID and thread ID in rpc_target_info. This will be used 1753 * as w1 in FFA_INTERRUPT in case of a foreign interrupt. 1754 */ 1755 rpc_target_info = thread_get_tsd()->rpc_target_info; 1756 thread_id = thread_get_id(); 1757 assert(thread_id <= UINT16_MAX); 1758 thread_get_tsd()->rpc_target_info = 1759 FFA_TARGET_INFO_SET(sp_s->endpoint_id, thread_id); 1760 1761 __thread_enter_user_mode(sp_regs, &panicked, &panic_code); 1762 1763 /* Restore rpc_target_info */ 1764 thread_get_tsd()->rpc_target_info = rpc_target_info; 1765 1766 thread_unmask_exceptions(exceptions); 1767 1768 thread_user_clear_vfp(&ctx->uctx); 1769 1770 if (panicked) { 1771 DMSG("SP panicked with code %#"PRIx32, panic_code); 1772 abort_print_current_ts(); 1773 1774 sess = ts_pop_current_session(); 1775 cpu_spin_lock(&sp_s->spinlock); 1776 sp_s->state = sp_dead; 1777 cpu_spin_unlock(&sp_s->spinlock); 1778 1779 return TEE_ERROR_TARGET_DEAD; 1780 } 1781 1782 sess = ts_pop_current_session(); 1783 assert(sess == s); 1784 1785 return res; 1786 } 1787 1788 /* We currently don't support 32 bits */ 1789 #ifdef ARM64 1790 static void sp_svc_store_registers(struct thread_scall_regs *regs, 1791 struct thread_ctx_regs *sp_regs) 1792 { 1793 COMPILE_TIME_ASSERT(sizeof(sp_regs->x[0]) == sizeof(regs->x0)); 1794 memcpy(sp_regs->x, ®s->x0, 31 * sizeof(regs->x0)); 1795 sp_regs->pc = regs->elr; 1796 sp_regs->sp = regs->sp_el0; 1797 } 1798 #endif 1799 1800 static bool sp_handle_scall(struct thread_scall_regs *regs) 1801 { 1802 struct ts_session *ts = ts_get_current_session(); 1803 struct sp_ctx *uctx = to_sp_ctx(ts->ctx); 1804 struct sp_session *s = uctx->open_session; 1805 1806 assert(s); 1807 1808 sp_svc_store_registers(regs, &uctx->sp_regs); 1809 1810 regs->x0 = 0; 1811 regs->x1 = 0; /* panic */ 1812 regs->x2 = 0; /* panic code */ 1813 1814 /* 1815 * All the registers of the SP are saved in the SP session by the SVC 1816 * handler. 1817 * We always return to S-El1 after handling the SVC. We will continue 1818 * in sp_enter_invoke_cmd() (return from __thread_enter_user_mode). 1819 * The sp_enter() function copies the FF-A parameters (a0-a7) from the 1820 * saved registers to the thread_smc_args. The thread_smc_args object is 1821 * afterward used by the spmc_sp_msg_handler() to handle the 1822 * FF-A message send by the SP. 1823 */ 1824 return false; 1825 } 1826 1827 static void sp_dump_state(struct ts_ctx *ctx) 1828 { 1829 struct sp_ctx *utc = to_sp_ctx(ctx); 1830 1831 if (utc->uctx.dump_entry_func) { 1832 TEE_Result res = ldelf_dump_state(&utc->uctx); 1833 1834 if (!res || res == TEE_ERROR_TARGET_DEAD) 1835 return; 1836 } 1837 1838 user_mode_ctx_print_mappings(&utc->uctx); 1839 } 1840 1841 static const struct ts_ops sp_ops = { 1842 .enter_invoke_cmd = sp_enter_invoke_cmd, 1843 .handle_scall = sp_handle_scall, 1844 .dump_state = sp_dump_state, 1845 }; 1846 1847 static TEE_Result process_sp_pkg(uint64_t sp_pkg_pa, TEE_UUID *sp_uuid) 1848 { 1849 enum teecore_memtypes mtype = MEM_AREA_TA_RAM; 1850 struct sp_pkg_header *sp_pkg_hdr = NULL; 1851 struct fip_sp *sp = NULL; 1852 uint64_t sp_fdt_end = 0; 1853 size_t sp_pkg_size = 0; 1854 vaddr_t sp_pkg_va = 0; 1855 1856 /* Process the first page which contains the SP package header */ 1857 sp_pkg_va = (vaddr_t)phys_to_virt(sp_pkg_pa, mtype, SMALL_PAGE_SIZE); 1858 if (!sp_pkg_va) { 1859 EMSG("Cannot find mapping for PA %#" PRIxPA, sp_pkg_pa); 1860 return TEE_ERROR_GENERIC; 1861 } 1862 1863 sp_pkg_hdr = (struct sp_pkg_header *)sp_pkg_va; 1864 1865 if (sp_pkg_hdr->magic != SP_PKG_HEADER_MAGIC) { 1866 EMSG("Invalid SP package magic"); 1867 return TEE_ERROR_BAD_FORMAT; 1868 } 1869 1870 if (sp_pkg_hdr->version != SP_PKG_HEADER_VERSION_V1 && 1871 sp_pkg_hdr->version != SP_PKG_HEADER_VERSION_V2) { 1872 EMSG("Invalid SP header version"); 1873 return TEE_ERROR_BAD_FORMAT; 1874 } 1875 1876 if (ADD_OVERFLOW(sp_pkg_hdr->img_offset, sp_pkg_hdr->img_size, 1877 &sp_pkg_size)) { 1878 EMSG("Invalid SP package size"); 1879 return TEE_ERROR_BAD_FORMAT; 1880 } 1881 1882 if (ADD_OVERFLOW(sp_pkg_hdr->pm_offset, sp_pkg_hdr->pm_size, 1883 &sp_fdt_end) || sp_fdt_end > sp_pkg_hdr->img_offset) { 1884 EMSG("Invalid SP manifest size"); 1885 return TEE_ERROR_BAD_FORMAT; 1886 } 1887 1888 /* Process the whole SP package now that the size is known */ 1889 sp_pkg_va = (vaddr_t)phys_to_virt(sp_pkg_pa, mtype, sp_pkg_size); 1890 if (!sp_pkg_va) { 1891 EMSG("Cannot find mapping for PA %#" PRIxPA, sp_pkg_pa); 1892 return TEE_ERROR_GENERIC; 1893 } 1894 1895 sp_pkg_hdr = (struct sp_pkg_header *)sp_pkg_va; 1896 1897 sp = calloc(1, sizeof(struct fip_sp)); 1898 if (!sp) 1899 return TEE_ERROR_OUT_OF_MEMORY; 1900 1901 memcpy(&sp->sp_img.image.uuid, sp_uuid, sizeof(*sp_uuid)); 1902 sp->sp_img.image.ts = (uint8_t *)(sp_pkg_va + sp_pkg_hdr->img_offset); 1903 sp->sp_img.image.size = sp_pkg_hdr->img_size; 1904 sp->sp_img.image.flags = 0; 1905 sp->sp_img.fdt = (uint8_t *)(sp_pkg_va + sp_pkg_hdr->pm_offset); 1906 1907 STAILQ_INSERT_TAIL(&fip_sp_list, sp, link); 1908 1909 return TEE_SUCCESS; 1910 } 1911 1912 static TEE_Result fip_sp_init_all(void) 1913 { 1914 TEE_Result res = TEE_SUCCESS; 1915 uint64_t sp_pkg_addr = 0; 1916 const void *fdt = NULL; 1917 TEE_UUID sp_uuid = { }; 1918 int sp_pkgs_node = 0; 1919 int subnode = 0; 1920 int root = 0; 1921 1922 fdt = get_manifest_dt(); 1923 if (!fdt) { 1924 EMSG("No SPMC manifest found"); 1925 return TEE_ERROR_GENERIC; 1926 } 1927 1928 root = fdt_path_offset(fdt, "/"); 1929 if (root < 0) 1930 return TEE_ERROR_BAD_FORMAT; 1931 1932 if (fdt_node_check_compatible(fdt, root, "arm,ffa-core-manifest-1.0")) 1933 return TEE_ERROR_BAD_FORMAT; 1934 1935 /* SP packages are optional, it's not an error if we don't find any */ 1936 sp_pkgs_node = fdt_node_offset_by_compatible(fdt, root, "arm,sp_pkg"); 1937 if (sp_pkgs_node < 0) 1938 return TEE_SUCCESS; 1939 1940 fdt_for_each_subnode(subnode, fdt, sp_pkgs_node) { 1941 res = sp_dt_get_u64(fdt, subnode, "load-address", &sp_pkg_addr); 1942 if (res) { 1943 EMSG("Invalid FIP SP load address"); 1944 return res; 1945 } 1946 1947 res = sp_dt_get_uuid(fdt, subnode, "uuid", &sp_uuid); 1948 if (res) { 1949 EMSG("Invalid FIP SP uuid"); 1950 return res; 1951 } 1952 1953 res = process_sp_pkg(sp_pkg_addr, &sp_uuid); 1954 if (res) { 1955 EMSG("Invalid FIP SP package"); 1956 return res; 1957 } 1958 } 1959 1960 return TEE_SUCCESS; 1961 } 1962 1963 static void fip_sp_deinit_all(void) 1964 { 1965 while (!STAILQ_EMPTY(&fip_sp_list)) { 1966 struct fip_sp *sp = STAILQ_FIRST(&fip_sp_list); 1967 1968 STAILQ_REMOVE_HEAD(&fip_sp_list, link); 1969 free(sp); 1970 } 1971 } 1972 1973 static TEE_Result sp_init_all(void) 1974 { 1975 TEE_Result res = TEE_SUCCESS; 1976 const struct sp_image *sp = NULL; 1977 const struct fip_sp *fip_sp = NULL; 1978 char __maybe_unused msg[60] = { '\0', }; 1979 struct sp_session *s = NULL; 1980 struct sp_session *prev_sp = NULL; 1981 1982 for_each_secure_partition(sp) { 1983 if (sp->image.uncompressed_size) 1984 snprintf(msg, sizeof(msg), 1985 " (compressed, uncompressed %u)", 1986 sp->image.uncompressed_size); 1987 else 1988 msg[0] = '\0'; 1989 DMSG("SP %pUl size %u%s", (void *)&sp->image.uuid, 1990 sp->image.size, msg); 1991 1992 res = sp_init_uuid(&sp->image.uuid, sp->fdt); 1993 1994 if (res != TEE_SUCCESS) { 1995 EMSG("Failed initializing SP(%pUl) err:%#"PRIx32, 1996 &sp->image.uuid, res); 1997 if (!IS_ENABLED(CFG_SP_SKIP_FAILED)) 1998 panic(); 1999 } 2000 } 2001 2002 res = fip_sp_init_all(); 2003 if (res) 2004 panic("Failed initializing FIP SPs"); 2005 2006 for_each_fip_sp(fip_sp) { 2007 sp = &fip_sp->sp_img; 2008 2009 DMSG("SP %pUl size %u", (void *)&sp->image.uuid, 2010 sp->image.size); 2011 2012 res = sp_init_uuid(&sp->image.uuid, sp->fdt); 2013 2014 if (res != TEE_SUCCESS) { 2015 EMSG("Failed initializing SP(%pUl) err:%#"PRIx32, 2016 &sp->image.uuid, res); 2017 if (!IS_ENABLED(CFG_SP_SKIP_FAILED)) 2018 panic(); 2019 } 2020 } 2021 2022 /* 2023 * At this point all FIP SPs are loaded by ldelf or by the raw binary SP 2024 * loader, so the original images (loaded by BL2) are not needed anymore 2025 */ 2026 fip_sp_deinit_all(); 2027 2028 /* 2029 * Now that all SPs are loaded, check through the boot order values, 2030 * and warn in case there is a non-unique value. 2031 */ 2032 TAILQ_FOREACH(s, &open_sp_sessions, link) { 2033 /* User specified boot-order values are uint16 */ 2034 if (s->boot_order > UINT16_MAX) 2035 break; 2036 2037 if (prev_sp && prev_sp->boot_order == s->boot_order) 2038 IMSG("WARNING: duplicated boot-order (%pUl vs %pUl)", 2039 &prev_sp->ts_sess.ctx->uuid, 2040 &s->ts_sess.ctx->uuid); 2041 2042 prev_sp = s; 2043 } 2044 2045 /* Continue the initialization and run the SP */ 2046 TAILQ_FOREACH(s, &open_sp_sessions, link) { 2047 DMSG("Starting SP: 0x%"PRIx16, s->endpoint_id); 2048 2049 res = sp_first_run(s); 2050 if (res != TEE_SUCCESS) { 2051 EMSG("Failed starting SP(0x%"PRIx16") err:%#"PRIx32, 2052 s->endpoint_id, res); 2053 if (!IS_ENABLED(CFG_SP_SKIP_FAILED)) 2054 panic(); 2055 } 2056 } 2057 2058 return TEE_SUCCESS; 2059 } 2060 2061 boot_final(sp_init_all); 2062 2063 static TEE_Result secure_partition_open(const TEE_UUID *uuid, 2064 struct ts_store_handle **h) 2065 { 2066 return emb_ts_open(uuid, h, find_secure_partition); 2067 } 2068 2069 REGISTER_SP_STORE(2) = { 2070 .description = "SP store", 2071 .open = secure_partition_open, 2072 .get_size = emb_ts_get_size, 2073 .get_tag = emb_ts_get_tag, 2074 .read = emb_ts_read, 2075 .close = emb_ts_close, 2076 }; 2077