1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2020-2024, Arm Limited. 4 */ 5 #include <crypto/crypto.h> 6 #include <initcall.h> 7 #include <kernel/boot.h> 8 #include <kernel/embedded_ts.h> 9 #include <kernel/ldelf_loader.h> 10 #include <kernel/secure_partition.h> 11 #include <kernel/spinlock.h> 12 #include <kernel/spmc_sp_handler.h> 13 #include <kernel/thread_private.h> 14 #include <kernel/thread_spmc.h> 15 #include <kernel/tpm.h> 16 #include <kernel/ts_store.h> 17 #include <ldelf.h> 18 #include <libfdt.h> 19 #include <mm/core_mmu.h> 20 #include <mm/fobj.h> 21 #include <mm/mobj.h> 22 #include <mm/phys_mem.h> 23 #include <mm/vm.h> 24 #include <optee_ffa.h> 25 #include <stdio.h> 26 #include <string.h> 27 #include <tee/uuid.h> 28 #include <tee_api_types.h> 29 #include <trace.h> 30 #include <types_ext.h> 31 #include <utee_defines.h> 32 #include <util.h> 33 #include <zlib.h> 34 35 #define BOUNCE_BUFFER_SIZE 4096 36 37 #define SP_MANIFEST_ATTR_READ BIT(0) 38 #define SP_MANIFEST_ATTR_WRITE BIT(1) 39 #define SP_MANIFEST_ATTR_EXEC BIT(2) 40 #define SP_MANIFEST_ATTR_NSEC BIT(3) 41 #define SP_MANIFEST_ATTR_GP BIT(4) 42 43 #define SP_MANIFEST_ATTR_RO (SP_MANIFEST_ATTR_READ) 44 #define SP_MANIFEST_ATTR_RW (SP_MANIFEST_ATTR_READ | \ 45 SP_MANIFEST_ATTR_WRITE) 46 #define SP_MANIFEST_ATTR_RX (SP_MANIFEST_ATTR_READ | \ 47 SP_MANIFEST_ATTR_EXEC) 48 #define SP_MANIFEST_ATTR_RWX (SP_MANIFEST_ATTR_READ | \ 49 SP_MANIFEST_ATTR_WRITE | \ 50 SP_MANIFEST_ATTR_EXEC) 51 52 #define SP_MANIFEST_FLAG_NOBITS BIT(0) 53 54 #define SP_MANIFEST_NS_INT_QUEUED (0x0) 55 #define SP_MANIFEST_NS_INT_MANAGED_EXIT (0x1) 56 #define SP_MANIFEST_NS_INT_SIGNALED (0x2) 57 58 #define SP_MANIFEST_EXEC_STATE_AARCH64 (0x0) 59 #define SP_MANIFEST_EXEC_STATE_AARCH32 (0x1) 60 61 #define SP_MANIFEST_DIRECT_REQ_RECEIVE BIT(0) 62 #define SP_MANIFEST_DIRECT_REQ_SEND BIT(1) 63 #define SP_MANIFEST_INDIRECT_REQ BIT(2) 64 65 #define SP_MANIFEST_VM_CREATED_MSG BIT(0) 66 #define SP_MANIFEST_VM_DESTROYED_MSG BIT(1) 67 68 #define SP_PKG_HEADER_MAGIC (0x474b5053) 69 #define SP_PKG_HEADER_VERSION_V1 (0x1) 70 #define SP_PKG_HEADER_VERSION_V2 (0x2) 71 72 struct sp_pkg_header { 73 uint32_t magic; 74 uint32_t version; 75 uint32_t pm_offset; 76 uint32_t pm_size; 77 uint32_t img_offset; 78 uint32_t img_size; 79 }; 80 81 struct fip_sp_head fip_sp_list = STAILQ_HEAD_INITIALIZER(fip_sp_list); 82 83 static const struct ts_ops sp_ops; 84 85 /* List that holds all of the loaded SP's */ 86 static struct sp_sessions_head open_sp_sessions = 87 TAILQ_HEAD_INITIALIZER(open_sp_sessions); 88 89 static const struct embedded_ts *find_secure_partition(const TEE_UUID *uuid) 90 { 91 const struct sp_image *sp = NULL; 92 const struct fip_sp *fip_sp = NULL; 93 94 for_each_secure_partition(sp) { 95 if (!memcmp(&sp->image.uuid, uuid, sizeof(*uuid))) 96 return &sp->image; 97 } 98 99 for_each_fip_sp(fip_sp) { 100 if (!memcmp(&fip_sp->sp_img.image.uuid, uuid, sizeof(*uuid))) 101 return &fip_sp->sp_img.image; 102 } 103 104 return NULL; 105 } 106 107 bool is_sp_ctx(struct ts_ctx *ctx) 108 { 109 return ctx && (ctx->ops == &sp_ops); 110 } 111 112 static void set_sp_ctx_ops(struct ts_ctx *ctx) 113 { 114 ctx->ops = &sp_ops; 115 } 116 117 struct sp_session *sp_get_session(uint32_t session_id) 118 { 119 struct sp_session *s = NULL; 120 121 TAILQ_FOREACH(s, &open_sp_sessions, link) { 122 if (s->endpoint_id == session_id) 123 return s; 124 } 125 126 return NULL; 127 } 128 129 TEE_Result sp_partition_info_get(uint32_t ffa_vers, void *buf, size_t buf_size, 130 const uint32_t ffa_uuid_words[4], 131 size_t *elem_count, bool count_only) 132 { 133 TEE_Result res = TEE_SUCCESS; 134 struct sp_session *s = NULL; 135 TEE_UUID uuid = { }; 136 TEE_UUID *ffa_uuid = NULL; 137 138 if (ffa_uuid_words) { 139 tee_uuid_from_octets(&uuid, (void *)ffa_uuid_words); 140 ffa_uuid = &uuid; 141 } 142 143 TAILQ_FOREACH(s, &open_sp_sessions, link) { 144 if (ffa_uuid && 145 memcmp(&s->ffa_uuid, ffa_uuid, sizeof(*ffa_uuid))) 146 continue; 147 148 if (s->state == sp_dead) 149 continue; 150 if (!count_only && !res) { 151 uint32_t uuid_words[4] = { 0 }; 152 153 tee_uuid_to_octets((uint8_t *)uuid_words, &s->ffa_uuid); 154 res = spmc_fill_partition_entry(ffa_vers, buf, buf_size, 155 *elem_count, 156 s->endpoint_id, 1, 157 s->props, uuid_words); 158 } 159 *elem_count += 1; 160 } 161 162 return res; 163 } 164 165 bool sp_has_exclusive_access(struct sp_mem_map_region *mem, 166 struct user_mode_ctx *uctx) 167 { 168 /* 169 * Check that we have access to the region if it is supposed to be 170 * mapped to the current context. 171 */ 172 if (uctx) { 173 struct vm_region *region = NULL; 174 175 /* Make sure that each mobj belongs to the SP */ 176 TAILQ_FOREACH(region, &uctx->vm_info.regions, link) { 177 if (region->mobj == mem->mobj) 178 break; 179 } 180 181 if (!region) 182 return false; 183 } 184 185 /* Check that it is not shared with another SP */ 186 return !sp_mem_is_shared(mem); 187 } 188 189 static bool endpoint_id_is_valid(uint32_t id) 190 { 191 /* 192 * These IDs are assigned at the SPMC init so already have valid values 193 * by the time this function gets first called 194 */ 195 return !spmc_is_reserved_id(id) && !spmc_find_lsp_by_sp_id(id) && 196 id >= FFA_SWD_ID_MIN && id <= FFA_SWD_ID_MAX; 197 } 198 199 static TEE_Result new_session_id(uint16_t *endpoint_id) 200 { 201 uint32_t id = 0; 202 203 /* Find the first available endpoint id */ 204 for (id = FFA_SWD_ID_MIN; id <= FFA_SWD_ID_MAX; id++) { 205 if (endpoint_id_is_valid(id) && !sp_get_session(id)) { 206 *endpoint_id = id; 207 return TEE_SUCCESS; 208 } 209 } 210 211 return TEE_ERROR_BAD_FORMAT; 212 } 213 214 static TEE_Result sp_create_ctx(const TEE_UUID *bin_uuid, struct sp_session *s) 215 { 216 TEE_Result res = TEE_SUCCESS; 217 struct sp_ctx *spc = NULL; 218 219 /* Register context */ 220 spc = calloc(1, sizeof(struct sp_ctx)); 221 if (!spc) 222 return TEE_ERROR_OUT_OF_MEMORY; 223 224 spc->open_session = s; 225 s->ts_sess.ctx = &spc->ts_ctx; 226 spc->ts_ctx.uuid = *bin_uuid; 227 228 res = vm_info_init(&spc->uctx, &spc->ts_ctx); 229 if (res) 230 goto err; 231 232 set_sp_ctx_ops(&spc->ts_ctx); 233 234 #ifdef CFG_TA_PAUTH 235 crypto_rng_read(&spc->uctx.keys, sizeof(spc->uctx.keys)); 236 #endif 237 238 return TEE_SUCCESS; 239 240 err: 241 free(spc); 242 return res; 243 } 244 245 /* 246 * Insert a new sp_session to the sessions list, so that it is ordered 247 * by boot_order. 248 */ 249 static void insert_session_ordered(struct sp_sessions_head *open_sessions, 250 struct sp_session *session) 251 { 252 struct sp_session *s = NULL; 253 254 if (!open_sessions || !session) 255 return; 256 257 TAILQ_FOREACH(s, &open_sp_sessions, link) { 258 if (s->boot_order > session->boot_order) 259 break; 260 } 261 262 if (!s) 263 TAILQ_INSERT_TAIL(open_sessions, session, link); 264 else 265 TAILQ_INSERT_BEFORE(s, session, link); 266 } 267 268 static TEE_Result sp_create_session(struct sp_sessions_head *open_sessions, 269 const TEE_UUID *bin_uuid, 270 const uint32_t boot_order, 271 struct sp_session **sess) 272 { 273 TEE_Result res = TEE_SUCCESS; 274 struct sp_session *s = calloc(1, sizeof(struct sp_session)); 275 276 if (!s) 277 return TEE_ERROR_OUT_OF_MEMORY; 278 279 s->boot_order = boot_order; 280 281 /* Other properties are filled later, based on the SP's manifest */ 282 s->props = FFA_PART_PROP_IS_PE_ID; 283 284 res = new_session_id(&s->endpoint_id); 285 if (res) 286 goto err; 287 288 DMSG("Loading Secure Partition %pUl", (void *)bin_uuid); 289 res = sp_create_ctx(bin_uuid, s); 290 if (res) 291 goto err; 292 293 insert_session_ordered(open_sessions, s); 294 *sess = s; 295 return TEE_SUCCESS; 296 297 err: 298 free(s); 299 return res; 300 } 301 302 static TEE_Result sp_init_set_registers(struct sp_ctx *ctx) 303 { 304 struct thread_ctx_regs *sp_regs = &ctx->sp_regs; 305 306 memset(sp_regs, 0, sizeof(*sp_regs)); 307 sp_regs->sp = ctx->uctx.stack_ptr; 308 sp_regs->pc = ctx->uctx.entry_func; 309 310 return TEE_SUCCESS; 311 } 312 313 TEE_Result sp_map_shared(struct sp_session *s, 314 struct sp_mem_receiver *receiver, 315 struct sp_mem *smem, 316 uint64_t *va) 317 { 318 TEE_Result res = TEE_SUCCESS; 319 struct sp_ctx *ctx = NULL; 320 uint32_t perm = TEE_MATTR_UR; 321 struct sp_mem_map_region *reg = NULL; 322 323 ctx = to_sp_ctx(s->ts_sess.ctx); 324 325 /* Get the permission */ 326 if (receiver->perm.perm & FFA_MEM_ACC_EXE) 327 perm |= TEE_MATTR_UX; 328 329 if (receiver->perm.perm & FFA_MEM_ACC_RW) { 330 if (receiver->perm.perm & FFA_MEM_ACC_EXE) 331 return TEE_ERROR_ACCESS_CONFLICT; 332 333 perm |= TEE_MATTR_UW; 334 } 335 /* 336 * Currently we don't support passing a va. We can't guarantee that the 337 * full region will be mapped in a contiguous region. A smem->region can 338 * have multiple mobj for one share. Currently there doesn't seem to be 339 * an option to guarantee that these will be mapped in a contiguous va 340 * space. 341 */ 342 if (*va) 343 return TEE_ERROR_NOT_SUPPORTED; 344 345 SLIST_FOREACH(reg, &smem->regions, link) { 346 res = vm_map(&ctx->uctx, va, reg->page_count * SMALL_PAGE_SIZE, 347 perm, 0, reg->mobj, reg->page_offset); 348 349 if (res != TEE_SUCCESS) { 350 EMSG("Failed to map memory region %#"PRIx32, res); 351 return res; 352 } 353 } 354 return TEE_SUCCESS; 355 } 356 357 TEE_Result sp_unmap_ffa_regions(struct sp_session *s, struct sp_mem *smem) 358 { 359 TEE_Result res = TEE_SUCCESS; 360 vaddr_t vaddr = 0; 361 size_t len = 0; 362 struct sp_ctx *ctx = to_sp_ctx(s->ts_sess.ctx); 363 struct sp_mem_map_region *reg = NULL; 364 365 SLIST_FOREACH(reg, &smem->regions, link) { 366 vaddr = (vaddr_t)sp_mem_get_va(&ctx->uctx, reg->page_offset, 367 reg->mobj); 368 len = reg->page_count * SMALL_PAGE_SIZE; 369 370 res = vm_unmap(&ctx->uctx, vaddr, len); 371 if (res != TEE_SUCCESS) 372 return res; 373 } 374 375 return TEE_SUCCESS; 376 } 377 378 static TEE_Result sp_dt_get_u64(const void *fdt, int node, const char *property, 379 uint64_t *value) 380 { 381 const fdt64_t *p = NULL; 382 int len = 0; 383 384 p = fdt_getprop(fdt, node, property, &len); 385 if (!p) 386 return TEE_ERROR_ITEM_NOT_FOUND; 387 388 if (len != sizeof(*p)) 389 return TEE_ERROR_BAD_FORMAT; 390 391 *value = fdt64_ld(p); 392 393 return TEE_SUCCESS; 394 } 395 396 static TEE_Result sp_dt_get_u32(const void *fdt, int node, const char *property, 397 uint32_t *value) 398 { 399 const fdt32_t *p = NULL; 400 int len = 0; 401 402 p = fdt_getprop(fdt, node, property, &len); 403 if (!p) 404 return TEE_ERROR_ITEM_NOT_FOUND; 405 406 if (len != sizeof(*p)) 407 return TEE_ERROR_BAD_FORMAT; 408 409 *value = fdt32_to_cpu(*p); 410 411 return TEE_SUCCESS; 412 } 413 414 static TEE_Result sp_dt_get_u16(const void *fdt, int node, const char *property, 415 uint16_t *value) 416 { 417 const fdt16_t *p = NULL; 418 int len = 0; 419 420 p = fdt_getprop(fdt, node, property, &len); 421 if (!p) 422 return TEE_ERROR_ITEM_NOT_FOUND; 423 424 if (len != sizeof(*p)) 425 return TEE_ERROR_BAD_FORMAT; 426 427 *value = fdt16_to_cpu(*p); 428 429 return TEE_SUCCESS; 430 } 431 432 static TEE_Result sp_dt_get_uuid(const void *fdt, int node, 433 const char *property, TEE_UUID *uuid) 434 { 435 uint32_t uuid_array[4] = { 0 }; 436 const fdt32_t *p = NULL; 437 int len = 0; 438 int i = 0; 439 440 p = fdt_getprop(fdt, node, property, &len); 441 if (!p) 442 return TEE_ERROR_ITEM_NOT_FOUND; 443 444 if (len != sizeof(TEE_UUID)) 445 return TEE_ERROR_BAD_FORMAT; 446 447 for (i = 0; i < 4; i++) 448 uuid_array[i] = fdt32_to_cpu(p[i]); 449 450 tee_uuid_from_octets(uuid, (uint8_t *)uuid_array); 451 452 return TEE_SUCCESS; 453 } 454 455 static TEE_Result sp_is_elf_format(const void *fdt, int sp_node, 456 bool *is_elf_format) 457 { 458 TEE_Result res = TEE_SUCCESS; 459 uint32_t elf_format = 0; 460 461 res = sp_dt_get_u32(fdt, sp_node, "elf-format", &elf_format); 462 if (res != TEE_SUCCESS && res != TEE_ERROR_ITEM_NOT_FOUND) 463 return res; 464 465 *is_elf_format = (elf_format != 0); 466 467 return TEE_SUCCESS; 468 } 469 470 static TEE_Result sp_binary_open(const TEE_UUID *uuid, 471 const struct ts_store_ops **ops, 472 struct ts_store_handle **handle) 473 { 474 TEE_Result res = TEE_ERROR_ITEM_NOT_FOUND; 475 476 SCATTERED_ARRAY_FOREACH(*ops, sp_stores, struct ts_store_ops) { 477 res = (*ops)->open(uuid, handle); 478 if (res != TEE_ERROR_ITEM_NOT_FOUND && 479 res != TEE_ERROR_STORAGE_NOT_AVAILABLE) 480 break; 481 } 482 483 return res; 484 } 485 486 static TEE_Result load_binary_sp(struct ts_session *s, 487 struct user_mode_ctx *uctx) 488 { 489 size_t bin_size = 0, bin_size_rounded = 0, bin_page_count = 0; 490 size_t bb_size = ROUNDUP(BOUNCE_BUFFER_SIZE, SMALL_PAGE_SIZE); 491 size_t bb_num_pages = bb_size / SMALL_PAGE_SIZE; 492 const struct ts_store_ops *store_ops = NULL; 493 struct ts_store_handle *handle = NULL; 494 TEE_Result res = TEE_SUCCESS; 495 tee_mm_entry_t *mm = NULL; 496 struct fobj *fobj = NULL; 497 struct mobj *mobj = NULL; 498 uaddr_t base_addr = 0; 499 uint32_t vm_flags = 0; 500 unsigned int idx = 0; 501 vaddr_t va = 0; 502 503 if (!s || !uctx) 504 return TEE_ERROR_BAD_PARAMETERS; 505 506 DMSG("Loading raw binary format SP %pUl", &uctx->ts_ctx->uuid); 507 508 /* Initialize the bounce buffer */ 509 fobj = fobj_sec_mem_alloc(bb_num_pages); 510 mobj = mobj_with_fobj_alloc(fobj, NULL, TEE_MATTR_MEM_TYPE_TAGGED); 511 fobj_put(fobj); 512 if (!mobj) 513 return TEE_ERROR_OUT_OF_MEMORY; 514 515 res = vm_map(uctx, &va, bb_size, TEE_MATTR_PRW, 0, mobj, 0); 516 mobj_put(mobj); 517 if (res) 518 return res; 519 520 uctx->bbuf = (uint8_t *)va; 521 uctx->bbuf_size = BOUNCE_BUFFER_SIZE; 522 523 vm_set_ctx(uctx->ts_ctx); 524 525 /* Find TS store and open SP binary */ 526 res = sp_binary_open(&uctx->ts_ctx->uuid, &store_ops, &handle); 527 if (res != TEE_SUCCESS) { 528 EMSG("Failed to open SP binary"); 529 return res; 530 } 531 532 /* Query binary size and calculate page count */ 533 res = store_ops->get_size(handle, &bin_size); 534 if (res != TEE_SUCCESS) 535 goto err; 536 537 if (ROUNDUP_OVERFLOW(bin_size, SMALL_PAGE_SIZE, &bin_size_rounded)) { 538 res = TEE_ERROR_OVERFLOW; 539 goto err; 540 } 541 542 bin_page_count = bin_size_rounded / SMALL_PAGE_SIZE; 543 544 /* Allocate memory */ 545 mm = phys_mem_ta_alloc(bin_size_rounded); 546 if (!mm) { 547 res = TEE_ERROR_OUT_OF_MEMORY; 548 goto err; 549 } 550 551 base_addr = tee_mm_get_smem(mm); 552 553 /* Create mobj */ 554 mobj = sp_mem_new_mobj(bin_page_count, TEE_MATTR_MEM_TYPE_CACHED, true); 555 if (!mobj) { 556 res = TEE_ERROR_OUT_OF_MEMORY; 557 goto err_free_tee_mm; 558 } 559 560 res = sp_mem_add_pages(mobj, &idx, base_addr, bin_page_count); 561 if (res) 562 goto err_free_mobj; 563 564 /* Map memory area for the SP binary */ 565 va = 0; 566 res = vm_map(uctx, &va, bin_size_rounded, TEE_MATTR_URWX, 567 vm_flags, mobj, 0); 568 if (res) 569 goto err_free_mobj; 570 571 /* Read SP binary into the previously mapped memory area */ 572 res = store_ops->read(handle, NULL, (void *)va, bin_size); 573 if (res) 574 goto err_unmap; 575 576 /* Set memory protection to allow execution */ 577 res = vm_set_prot(uctx, va, bin_size_rounded, TEE_MATTR_UX); 578 if (res) 579 goto err_unmap; 580 581 mobj_put(mobj); 582 store_ops->close(handle); 583 584 /* The entry point must be at the beginning of the SP binary. */ 585 uctx->entry_func = va; 586 uctx->load_addr = va; 587 uctx->is_32bit = false; 588 589 s->handle_scall = s->ctx->ops->handle_scall; 590 591 return TEE_SUCCESS; 592 593 err_unmap: 594 vm_unmap(uctx, va, bin_size_rounded); 595 596 err_free_mobj: 597 mobj_put(mobj); 598 599 err_free_tee_mm: 600 tee_mm_free(mm); 601 602 err: 603 store_ops->close(handle); 604 605 return res; 606 } 607 608 static TEE_Result sp_open_session(struct sp_session **sess, 609 struct sp_sessions_head *open_sessions, 610 const TEE_UUID *ffa_uuid, 611 const TEE_UUID *bin_uuid, 612 const uint32_t boot_order, 613 const void *fdt) 614 { 615 TEE_Result res = TEE_SUCCESS; 616 struct sp_session *s = NULL; 617 struct sp_ctx *ctx = NULL; 618 bool is_elf_format = false; 619 620 if (!find_secure_partition(bin_uuid)) 621 return TEE_ERROR_ITEM_NOT_FOUND; 622 623 res = sp_create_session(open_sessions, bin_uuid, boot_order, &s); 624 if (res != TEE_SUCCESS) { 625 DMSG("sp_create_session failed %#"PRIx32, res); 626 return res; 627 } 628 629 ctx = to_sp_ctx(s->ts_sess.ctx); 630 assert(ctx); 631 if (!ctx) 632 return TEE_ERROR_TARGET_DEAD; 633 *sess = s; 634 635 ts_push_current_session(&s->ts_sess); 636 637 res = sp_is_elf_format(fdt, 0, &is_elf_format); 638 if (res == TEE_SUCCESS) { 639 if (is_elf_format) { 640 /* Load the SP using ldelf. */ 641 ldelf_load_ldelf(&ctx->uctx); 642 res = ldelf_init_with_ldelf(&s->ts_sess, &ctx->uctx); 643 } else { 644 /* Raw binary format SP */ 645 res = load_binary_sp(&s->ts_sess, &ctx->uctx); 646 } 647 } else { 648 EMSG("Failed to detect SP format"); 649 } 650 651 if (res != TEE_SUCCESS) { 652 EMSG("Failed loading SP %#"PRIx32, res); 653 ts_pop_current_session(); 654 return TEE_ERROR_TARGET_DEAD; 655 } 656 657 /* 658 * Make the SP ready for its first run. 659 * Set state to busy to prevent other endpoints from sending messages to 660 * the SP before its boot phase is done. 661 */ 662 s->state = sp_busy; 663 s->caller_id = 0; 664 sp_init_set_registers(ctx); 665 memcpy(&s->ffa_uuid, ffa_uuid, sizeof(*ffa_uuid)); 666 ts_pop_current_session(); 667 668 return TEE_SUCCESS; 669 } 670 671 static TEE_Result fdt_get_uuid(const void * const fdt, TEE_UUID *uuid) 672 { 673 const struct fdt_property *description = NULL; 674 int description_name_len = 0; 675 676 if (fdt_node_check_compatible(fdt, 0, "arm,ffa-manifest-1.0")) { 677 EMSG("Failed loading SP, manifest not found"); 678 return TEE_ERROR_BAD_PARAMETERS; 679 } 680 681 description = fdt_get_property(fdt, 0, "description", 682 &description_name_len); 683 if (description) 684 DMSG("Loading SP: %s", description->data); 685 686 if (sp_dt_get_uuid(fdt, 0, "uuid", uuid)) { 687 EMSG("Missing or invalid UUID in SP manifest"); 688 return TEE_ERROR_BAD_FORMAT; 689 } 690 691 return TEE_SUCCESS; 692 } 693 694 static TEE_Result copy_and_map_fdt(struct sp_ctx *ctx, const void * const fdt, 695 void **fdt_copy, size_t *mapped_size) 696 { 697 size_t total_size = ROUNDUP(fdt_totalsize(fdt), SMALL_PAGE_SIZE); 698 size_t num_pages = total_size / SMALL_PAGE_SIZE; 699 uint32_t perm = TEE_MATTR_UR | TEE_MATTR_PRW; 700 TEE_Result res = TEE_SUCCESS; 701 struct mobj *m = NULL; 702 struct fobj *f = NULL; 703 vaddr_t va = 0; 704 705 f = fobj_sec_mem_alloc(num_pages); 706 m = mobj_with_fobj_alloc(f, NULL, TEE_MATTR_MEM_TYPE_TAGGED); 707 fobj_put(f); 708 if (!m) 709 return TEE_ERROR_OUT_OF_MEMORY; 710 711 res = vm_map(&ctx->uctx, &va, total_size, perm, 0, m, 0); 712 mobj_put(m); 713 if (res) 714 return res; 715 716 if (fdt_open_into(fdt, (void *)va, total_size)) 717 return TEE_ERROR_GENERIC; 718 719 *fdt_copy = (void *)va; 720 *mapped_size = total_size; 721 722 return res; 723 } 724 725 static void fill_boot_info_1_0(vaddr_t buf, const void *fdt) 726 { 727 struct ffa_boot_info_1_0 *info = (struct ffa_boot_info_1_0 *)buf; 728 static const char fdt_name[16] = "TYPE_DT\0\0\0\0\0\0\0\0"; 729 730 memcpy(&info->magic, "FF-A", 4); 731 info->count = 1; 732 733 COMPILE_TIME_ASSERT(sizeof(info->nvp[0].name) == sizeof(fdt_name)); 734 memcpy(info->nvp[0].name, fdt_name, sizeof(fdt_name)); 735 info->nvp[0].value = (uintptr_t)fdt; 736 info->nvp[0].size = fdt_totalsize(fdt); 737 } 738 739 static void fill_boot_info_1_1(vaddr_t buf, const void *fdt, uint32_t vers) 740 { 741 size_t desc_offs = ROUNDUP(sizeof(struct ffa_boot_info_header_1_1), 8); 742 struct ffa_boot_info_header_1_1 *header = 743 (struct ffa_boot_info_header_1_1 *)buf; 744 struct ffa_boot_info_1_1 *desc = 745 (struct ffa_boot_info_1_1 *)(buf + desc_offs); 746 747 header->signature = FFA_BOOT_INFO_SIGNATURE; 748 header->version = vers; 749 header->blob_size = desc_offs + sizeof(struct ffa_boot_info_1_1); 750 header->desc_size = sizeof(struct ffa_boot_info_1_1); 751 header->desc_count = 1; 752 header->desc_offset = desc_offs; 753 754 memset(&desc[0].name, 0, sizeof(desc[0].name)); 755 /* Type: Standard boot info (bit[7] == 0), FDT type */ 756 desc[0].type = FFA_BOOT_INFO_TYPE_ID_FDT; 757 /* Flags: Contents field contains an address */ 758 desc[0].flags = FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_ADDR << 759 FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_SHIFT; 760 desc[0].size = fdt_totalsize(fdt); 761 desc[0].contents = (uintptr_t)fdt; 762 } 763 764 static TEE_Result create_and_map_boot_info(struct sp_ctx *ctx, const void *fdt, 765 struct thread_smc_1_2_regs *args, 766 vaddr_t *va, size_t *mapped_size, 767 uint32_t sp_ffa_version) 768 { 769 size_t total_size = ROUNDUP(CFG_SP_INIT_INFO_MAX_SIZE, SMALL_PAGE_SIZE); 770 size_t num_pages = total_size / SMALL_PAGE_SIZE; 771 uint32_t perm = TEE_MATTR_UR | TEE_MATTR_PRW; 772 TEE_Result res = TEE_SUCCESS; 773 struct fobj *f = NULL; 774 struct mobj *m = NULL; 775 uint32_t info_reg = 0; 776 777 f = fobj_sec_mem_alloc(num_pages); 778 m = mobj_with_fobj_alloc(f, NULL, TEE_MATTR_MEM_TYPE_TAGGED); 779 fobj_put(f); 780 if (!m) 781 return TEE_ERROR_OUT_OF_MEMORY; 782 783 res = vm_map(&ctx->uctx, va, total_size, perm, 0, m, 0); 784 mobj_put(m); 785 if (res) 786 return res; 787 788 *mapped_size = total_size; 789 790 switch (sp_ffa_version) { 791 case MAKE_FFA_VERSION(1, 0): 792 fill_boot_info_1_0(*va, fdt); 793 break; 794 case MAKE_FFA_VERSION(1, 1): 795 case MAKE_FFA_VERSION(1, 2): 796 fill_boot_info_1_1(*va, fdt, sp_ffa_version); 797 break; 798 default: 799 EMSG("Unknown FF-A version: %#"PRIx32, sp_ffa_version); 800 return TEE_ERROR_NOT_SUPPORTED; 801 } 802 803 res = sp_dt_get_u32(fdt, 0, "gp-register-num", &info_reg); 804 if (res) { 805 if (res == TEE_ERROR_ITEM_NOT_FOUND) { 806 /* If the property is not present, set default to x0 */ 807 info_reg = 0; 808 } else { 809 return TEE_ERROR_BAD_FORMAT; 810 } 811 } 812 813 switch (info_reg) { 814 case 0: 815 args->a0 = *va; 816 break; 817 case 1: 818 args->a1 = *va; 819 break; 820 case 2: 821 args->a2 = *va; 822 break; 823 case 3: 824 args->a3 = *va; 825 break; 826 default: 827 EMSG("Invalid register selected for passing boot info"); 828 return TEE_ERROR_BAD_FORMAT; 829 } 830 831 return TEE_SUCCESS; 832 } 833 834 static TEE_Result handle_fdt_load_relative_mem_regions(struct sp_ctx *ctx, 835 const void *fdt) 836 { 837 int node = 0; 838 int subnode = 0; 839 tee_mm_entry_t *mm = NULL; 840 TEE_Result res = TEE_SUCCESS; 841 842 /* 843 * Memory regions are optional in the SP manifest, it's not an error if 844 * we don't find any. 845 */ 846 node = fdt_node_offset_by_compatible(fdt, 0, 847 "arm,ffa-manifest-memory-regions"); 848 if (node < 0) 849 return TEE_SUCCESS; 850 851 fdt_for_each_subnode(subnode, fdt, node) { 852 uint64_t load_rel_offset = 0; 853 uint32_t attributes = 0; 854 uint64_t base_addr = 0; 855 uint32_t pages_cnt = 0; 856 uint32_t flags = 0; 857 uint32_t perm = 0; 858 size_t size = 0; 859 vaddr_t va = 0; 860 861 mm = NULL; 862 863 /* Load address relative offset of a memory region */ 864 if (!sp_dt_get_u64(fdt, subnode, "load-address-relative-offset", 865 &load_rel_offset)) { 866 va = ctx->uctx.load_addr + load_rel_offset; 867 } else { 868 /* Skip non load address relative memory regions */ 869 continue; 870 } 871 872 if (!sp_dt_get_u64(fdt, subnode, "base-address", &base_addr)) { 873 EMSG("Both base-address and load-address-relative-offset fields are set"); 874 return TEE_ERROR_BAD_FORMAT; 875 } 876 877 /* Size of memory region as count of 4K pages */ 878 if (sp_dt_get_u32(fdt, subnode, "pages-count", &pages_cnt)) { 879 EMSG("Mandatory field is missing: pages-count"); 880 return TEE_ERROR_BAD_FORMAT; 881 } 882 883 if (MUL_OVERFLOW(pages_cnt, SMALL_PAGE_SIZE, &size)) 884 return TEE_ERROR_OVERFLOW; 885 886 /* Memory region attributes */ 887 if (sp_dt_get_u32(fdt, subnode, "attributes", &attributes)) { 888 EMSG("Mandatory field is missing: attributes"); 889 return TEE_ERROR_BAD_FORMAT; 890 } 891 892 /* Check instruction and data access permissions */ 893 switch (attributes & SP_MANIFEST_ATTR_RWX) { 894 case SP_MANIFEST_ATTR_RO: 895 perm = TEE_MATTR_UR; 896 break; 897 case SP_MANIFEST_ATTR_RW: 898 perm = TEE_MATTR_URW; 899 break; 900 case SP_MANIFEST_ATTR_RX: 901 perm = TEE_MATTR_URX; 902 break; 903 default: 904 EMSG("Invalid memory access permissions"); 905 return TEE_ERROR_BAD_FORMAT; 906 } 907 908 if (IS_ENABLED(CFG_TA_BTI) && 909 attributes & SP_MANIFEST_ATTR_GP) { 910 if (!(attributes & SP_MANIFEST_ATTR_RX)) { 911 EMSG("Guard only executable region"); 912 return TEE_ERROR_BAD_FORMAT; 913 } 914 perm |= TEE_MATTR_GUARDED; 915 } 916 917 res = sp_dt_get_u32(fdt, subnode, "load-flags", &flags); 918 if (res != TEE_SUCCESS && res != TEE_ERROR_ITEM_NOT_FOUND) { 919 EMSG("Optional field with invalid value: flags"); 920 return TEE_ERROR_BAD_FORMAT; 921 } 922 923 /* Load relative regions must be secure */ 924 if (attributes & SP_MANIFEST_ATTR_NSEC) { 925 EMSG("Invalid memory security attribute"); 926 return TEE_ERROR_BAD_FORMAT; 927 } 928 929 if (flags & SP_MANIFEST_FLAG_NOBITS) { 930 /* 931 * NOBITS flag is set, which means that loaded binary 932 * doesn't contain this area, so it's need to be 933 * allocated. 934 */ 935 struct mobj *m = NULL; 936 unsigned int idx = 0; 937 938 mm = phys_mem_ta_alloc(size); 939 if (!mm) 940 return TEE_ERROR_OUT_OF_MEMORY; 941 942 base_addr = tee_mm_get_smem(mm); 943 944 m = sp_mem_new_mobj(pages_cnt, 945 TEE_MATTR_MEM_TYPE_CACHED, true); 946 if (!m) { 947 res = TEE_ERROR_OUT_OF_MEMORY; 948 goto err_mm_free; 949 } 950 951 res = sp_mem_add_pages(m, &idx, base_addr, pages_cnt); 952 if (res) { 953 mobj_put(m); 954 goto err_mm_free; 955 } 956 957 res = vm_map(&ctx->uctx, &va, size, perm, 0, m, 0); 958 mobj_put(m); 959 if (res) 960 goto err_mm_free; 961 } else { 962 /* 963 * If NOBITS is not present the memory area is already 964 * mapped and only need to set the correct permissions. 965 */ 966 res = vm_set_prot(&ctx->uctx, va, size, perm); 967 if (res) 968 return res; 969 } 970 } 971 972 return TEE_SUCCESS; 973 974 err_mm_free: 975 tee_mm_free(mm); 976 return res; 977 } 978 979 static TEE_Result handle_fdt_dev_regions(struct sp_ctx *ctx, void *fdt) 980 { 981 int node = 0; 982 int subnode = 0; 983 TEE_Result res = TEE_SUCCESS; 984 const char *dt_device_match_table = { 985 "arm,ffa-manifest-device-regions", 986 }; 987 988 /* 989 * Device regions are optional in the SP manifest, it's not an error if 990 * we don't find any 991 */ 992 node = fdt_node_offset_by_compatible(fdt, 0, dt_device_match_table); 993 if (node < 0) 994 return TEE_SUCCESS; 995 996 fdt_for_each_subnode(subnode, fdt, node) { 997 uint64_t base_addr = 0; 998 uint32_t pages_cnt = 0; 999 uint32_t attributes = 0; 1000 struct mobj *m = NULL; 1001 bool is_secure = true; 1002 uint32_t perm = 0; 1003 vaddr_t va = 0; 1004 unsigned int idx = 0; 1005 1006 /* 1007 * Physical base address of a device MMIO region. 1008 * Currently only physically contiguous region is supported. 1009 */ 1010 if (sp_dt_get_u64(fdt, subnode, "base-address", &base_addr)) { 1011 EMSG("Mandatory field is missing: base-address"); 1012 return TEE_ERROR_BAD_FORMAT; 1013 } 1014 1015 /* Total size of MMIO region as count of 4K pages */ 1016 if (sp_dt_get_u32(fdt, subnode, "pages-count", &pages_cnt)) { 1017 EMSG("Mandatory field is missing: pages-count"); 1018 return TEE_ERROR_BAD_FORMAT; 1019 } 1020 1021 /* Data access, instruction access and security attributes */ 1022 if (sp_dt_get_u32(fdt, subnode, "attributes", &attributes)) { 1023 EMSG("Mandatory field is missing: attributes"); 1024 return TEE_ERROR_BAD_FORMAT; 1025 } 1026 1027 /* Check instruction and data access permissions */ 1028 switch (attributes & SP_MANIFEST_ATTR_RWX) { 1029 case SP_MANIFEST_ATTR_RO: 1030 perm = TEE_MATTR_UR; 1031 break; 1032 case SP_MANIFEST_ATTR_RW: 1033 perm = TEE_MATTR_URW; 1034 break; 1035 default: 1036 EMSG("Invalid memory access permissions"); 1037 return TEE_ERROR_BAD_FORMAT; 1038 } 1039 1040 /* 1041 * The SP is a secure endpoint, security attribute can be 1042 * secure or non-secure 1043 */ 1044 if (attributes & SP_MANIFEST_ATTR_NSEC) 1045 is_secure = false; 1046 1047 /* Memory attributes must be Device-nGnRnE */ 1048 m = sp_mem_new_mobj(pages_cnt, TEE_MATTR_MEM_TYPE_STRONGLY_O, 1049 is_secure); 1050 if (!m) 1051 return TEE_ERROR_OUT_OF_MEMORY; 1052 1053 res = sp_mem_add_pages(m, &idx, (paddr_t)base_addr, pages_cnt); 1054 if (res) { 1055 mobj_put(m); 1056 return res; 1057 } 1058 1059 res = vm_map(&ctx->uctx, &va, pages_cnt * SMALL_PAGE_SIZE, 1060 perm, 0, m, 0); 1061 mobj_put(m); 1062 if (res) 1063 return res; 1064 1065 /* 1066 * Overwrite the device region's PA in the fdt with the VA. This 1067 * fdt will be passed to the SP. 1068 */ 1069 res = fdt_setprop_u64(fdt, subnode, "base-address", va); 1070 1071 /* 1072 * Unmap the region if the overwrite failed since the SP won't 1073 * be able to access it without knowing the VA. 1074 */ 1075 if (res) { 1076 vm_unmap(&ctx->uctx, va, pages_cnt * SMALL_PAGE_SIZE); 1077 return res; 1078 } 1079 } 1080 1081 return TEE_SUCCESS; 1082 } 1083 1084 static TEE_Result swap_sp_endpoints(uint32_t endpoint_id, 1085 uint32_t new_endpoint_id) 1086 { 1087 struct sp_session *session = sp_get_session(endpoint_id); 1088 uint32_t manifest_endpoint_id = 0; 1089 1090 /* 1091 * We don't know in which order the SPs are loaded. The endpoint ID 1092 * defined in the manifest could already be generated by 1093 * new_session_id() and used by another SP. If this is the case, we swap 1094 * the ID's of the two SPs. We also have to make sure that the ID's are 1095 * not defined twice in the manifest. 1096 */ 1097 1098 /* The endpoint ID was not assigned yet */ 1099 if (!session) 1100 return TEE_SUCCESS; 1101 1102 /* 1103 * Read the manifest file from the SP who originally had the endpoint. 1104 * We can safely swap the endpoint ID's if the manifest file doesn't 1105 * have an endpoint ID defined. 1106 */ 1107 if (!sp_dt_get_u32(session->fdt, 0, "id", &manifest_endpoint_id)) { 1108 assert(manifest_endpoint_id == endpoint_id); 1109 EMSG("SP: Found duplicated endpoint ID %#"PRIx32, endpoint_id); 1110 return TEE_ERROR_ACCESS_CONFLICT; 1111 } 1112 1113 session->endpoint_id = new_endpoint_id; 1114 1115 return TEE_SUCCESS; 1116 } 1117 1118 static TEE_Result read_manifest_endpoint_id(struct sp_session *s) 1119 { 1120 uint32_t endpoint_id = 0; 1121 1122 /* 1123 * The endpoint ID can be optionally defined in the manifest file. We 1124 * have to map the ID inside the manifest to the SP if it's defined. 1125 * If not, the endpoint ID generated inside new_session_id() will be 1126 * used. 1127 */ 1128 if (!sp_dt_get_u32(s->fdt, 0, "id", &endpoint_id)) { 1129 TEE_Result res = TEE_ERROR_GENERIC; 1130 1131 if (!endpoint_id_is_valid(endpoint_id)) { 1132 EMSG("Invalid endpoint ID 0x%"PRIx32, endpoint_id); 1133 return TEE_ERROR_BAD_FORMAT; 1134 } 1135 1136 res = swap_sp_endpoints(endpoint_id, s->endpoint_id); 1137 if (res) 1138 return res; 1139 1140 DMSG("SP: endpoint ID (0x%"PRIx32") found in manifest", 1141 endpoint_id); 1142 /* Assign the endpoint ID to the current SP */ 1143 s->endpoint_id = endpoint_id; 1144 } 1145 return TEE_SUCCESS; 1146 } 1147 1148 static TEE_Result handle_fdt_mem_regions(struct sp_ctx *ctx, void *fdt) 1149 { 1150 int node = 0; 1151 int subnode = 0; 1152 tee_mm_entry_t *mm = NULL; 1153 TEE_Result res = TEE_SUCCESS; 1154 1155 /* 1156 * Memory regions are optional in the SP manifest, it's not an error if 1157 * we don't find any. 1158 */ 1159 node = fdt_node_offset_by_compatible(fdt, 0, 1160 "arm,ffa-manifest-memory-regions"); 1161 if (node < 0) 1162 return TEE_SUCCESS; 1163 1164 fdt_for_each_subnode(subnode, fdt, node) { 1165 uint64_t load_rel_offset = 0; 1166 bool alloc_needed = false; 1167 uint32_t attributes = 0; 1168 uint64_t base_addr = 0; 1169 uint32_t pages_cnt = 0; 1170 bool is_secure = true; 1171 struct mobj *m = NULL; 1172 unsigned int idx = 0; 1173 uint32_t perm = 0; 1174 size_t size = 0; 1175 vaddr_t va = 0; 1176 1177 mm = NULL; 1178 1179 /* Load address relative offset of a memory region */ 1180 if (!sp_dt_get_u64(fdt, subnode, "load-address-relative-offset", 1181 &load_rel_offset)) { 1182 /* 1183 * At this point the memory region is already mapped by 1184 * handle_fdt_load_relative_mem_regions. 1185 * Only need to set the base-address in the manifest and 1186 * then skip the rest of the mapping process. 1187 */ 1188 va = ctx->uctx.load_addr + load_rel_offset; 1189 res = fdt_setprop_u64(fdt, subnode, "base-address", va); 1190 if (res) 1191 return res; 1192 1193 continue; 1194 } 1195 1196 /* 1197 * Base address of a memory region. 1198 * If not present, we have to allocate the specified memory. 1199 * If present, this field could specify a PA or VA. Currently 1200 * only a PA is supported. 1201 */ 1202 if (sp_dt_get_u64(fdt, subnode, "base-address", &base_addr)) 1203 alloc_needed = true; 1204 1205 /* Size of memory region as count of 4K pages */ 1206 if (sp_dt_get_u32(fdt, subnode, "pages-count", &pages_cnt)) { 1207 EMSG("Mandatory field is missing: pages-count"); 1208 return TEE_ERROR_BAD_FORMAT; 1209 } 1210 1211 if (MUL_OVERFLOW(pages_cnt, SMALL_PAGE_SIZE, &size)) 1212 return TEE_ERROR_OVERFLOW; 1213 1214 /* 1215 * Memory region attributes: 1216 * - Instruction/data access permissions 1217 * - Cacheability/shareability attributes 1218 * - Security attributes 1219 * 1220 * Cacheability/shareability attributes can be ignored for now. 1221 * OP-TEE only supports a single type for normal cached memory 1222 * and currently there is no use case that would require to 1223 * change this. 1224 */ 1225 if (sp_dt_get_u32(fdt, subnode, "attributes", &attributes)) { 1226 EMSG("Mandatory field is missing: attributes"); 1227 return TEE_ERROR_BAD_FORMAT; 1228 } 1229 1230 /* Check instruction and data access permissions */ 1231 switch (attributes & SP_MANIFEST_ATTR_RWX) { 1232 case SP_MANIFEST_ATTR_RO: 1233 perm = TEE_MATTR_UR; 1234 break; 1235 case SP_MANIFEST_ATTR_RW: 1236 perm = TEE_MATTR_URW; 1237 break; 1238 case SP_MANIFEST_ATTR_RX: 1239 perm = TEE_MATTR_URX; 1240 break; 1241 default: 1242 EMSG("Invalid memory access permissions"); 1243 return TEE_ERROR_BAD_FORMAT; 1244 } 1245 1246 if (IS_ENABLED(CFG_TA_BTI) && 1247 attributes & SP_MANIFEST_ATTR_GP) { 1248 if (!(attributes & SP_MANIFEST_ATTR_RX)) { 1249 EMSG("Guard only executable region"); 1250 return TEE_ERROR_BAD_FORMAT; 1251 } 1252 perm |= TEE_MATTR_GUARDED; 1253 } 1254 1255 /* 1256 * The SP is a secure endpoint, security attribute can be 1257 * secure or non-secure. 1258 * The SPMC cannot allocate non-secure memory, i.e. if the base 1259 * address is missing this attribute must be secure. 1260 */ 1261 if (attributes & SP_MANIFEST_ATTR_NSEC) { 1262 if (alloc_needed) { 1263 EMSG("Invalid memory security attribute"); 1264 return TEE_ERROR_BAD_FORMAT; 1265 } 1266 is_secure = false; 1267 } 1268 1269 if (alloc_needed) { 1270 /* Base address is missing, we have to allocate */ 1271 mm = phys_mem_ta_alloc(size); 1272 if (!mm) 1273 return TEE_ERROR_OUT_OF_MEMORY; 1274 1275 base_addr = tee_mm_get_smem(mm); 1276 } 1277 1278 m = sp_mem_new_mobj(pages_cnt, TEE_MATTR_MEM_TYPE_CACHED, 1279 is_secure); 1280 if (!m) { 1281 res = TEE_ERROR_OUT_OF_MEMORY; 1282 goto err_mm_free; 1283 } 1284 1285 res = sp_mem_add_pages(m, &idx, base_addr, pages_cnt); 1286 if (res) { 1287 mobj_put(m); 1288 goto err_mm_free; 1289 } 1290 1291 res = vm_map(&ctx->uctx, &va, size, perm, 0, m, 0); 1292 mobj_put(m); 1293 if (res) 1294 goto err_mm_free; 1295 1296 /* 1297 * Overwrite the memory region's base address in the fdt with 1298 * the VA. This fdt will be passed to the SP. 1299 * If the base-address field was not present in the original 1300 * fdt, this function will create it. This doesn't cause issues 1301 * since the necessary extra space has been allocated when 1302 * opening the fdt. 1303 */ 1304 res = fdt_setprop_u64(fdt, subnode, "base-address", va); 1305 1306 /* 1307 * Unmap the region if the overwrite failed since the SP won't 1308 * be able to access it without knowing the VA. 1309 */ 1310 if (res) { 1311 vm_unmap(&ctx->uctx, va, size); 1312 goto err_mm_free; 1313 } 1314 } 1315 1316 return TEE_SUCCESS; 1317 1318 err_mm_free: 1319 tee_mm_free(mm); 1320 return res; 1321 } 1322 1323 static TEE_Result handle_tpm_event_log(struct sp_ctx *ctx, void *fdt) 1324 { 1325 uint32_t perm = TEE_MATTR_URW | TEE_MATTR_PRW; 1326 uint32_t dummy_size __maybe_unused = 0; 1327 TEE_Result res = TEE_SUCCESS; 1328 size_t page_count = 0; 1329 struct fobj *f = NULL; 1330 struct mobj *m = NULL; 1331 vaddr_t log_addr = 0; 1332 size_t log_size = 0; 1333 int node = 0; 1334 1335 node = fdt_node_offset_by_compatible(fdt, 0, "arm,tpm_event_log"); 1336 if (node < 0) 1337 return TEE_SUCCESS; 1338 1339 /* Checking the existence and size of the event log properties */ 1340 if (sp_dt_get_u64(fdt, node, "tpm_event_log_addr", &log_addr)) { 1341 EMSG("tpm_event_log_addr not found or has invalid size"); 1342 return TEE_ERROR_BAD_FORMAT; 1343 } 1344 1345 if (sp_dt_get_u32(fdt, node, "tpm_event_log_size", &dummy_size)) { 1346 EMSG("tpm_event_log_size not found or has invalid size"); 1347 return TEE_ERROR_BAD_FORMAT; 1348 } 1349 1350 /* Validating event log */ 1351 res = tpm_get_event_log_size(&log_size); 1352 if (res) 1353 return res; 1354 1355 if (!log_size) { 1356 EMSG("Empty TPM event log was provided"); 1357 return TEE_ERROR_ITEM_NOT_FOUND; 1358 } 1359 1360 /* Allocating memory area for the event log to share with the SP */ 1361 page_count = ROUNDUP_DIV(log_size, SMALL_PAGE_SIZE); 1362 1363 f = fobj_sec_mem_alloc(page_count); 1364 m = mobj_with_fobj_alloc(f, NULL, TEE_MATTR_MEM_TYPE_TAGGED); 1365 fobj_put(f); 1366 if (!m) 1367 return TEE_ERROR_OUT_OF_MEMORY; 1368 1369 res = vm_map(&ctx->uctx, &log_addr, log_size, perm, 0, m, 0); 1370 mobj_put(m); 1371 if (res) 1372 return res; 1373 1374 /* Copy event log */ 1375 res = tpm_get_event_log((void *)log_addr, &log_size); 1376 if (res) 1377 goto err_unmap; 1378 1379 /* Setting event log details in the manifest */ 1380 res = fdt_setprop_u64(fdt, node, "tpm_event_log_addr", log_addr); 1381 if (res) 1382 goto err_unmap; 1383 1384 res = fdt_setprop_u32(fdt, node, "tpm_event_log_size", log_size); 1385 if (res) 1386 goto err_unmap; 1387 1388 return TEE_SUCCESS; 1389 1390 err_unmap: 1391 vm_unmap(&ctx->uctx, log_addr, log_size); 1392 1393 return res; 1394 } 1395 1396 /* 1397 * Note: this function is called only on the primary CPU. It assumes that the 1398 * features present on the primary CPU are available on all of the secondary 1399 * CPUs as well. 1400 */ 1401 static TEE_Result handle_hw_features(void *fdt) 1402 { 1403 uint32_t val __maybe_unused = 0; 1404 TEE_Result res = TEE_SUCCESS; 1405 int node = 0; 1406 1407 /* 1408 * HW feature descriptions are optional in the SP manifest, it's not an 1409 * error if we don't find any. 1410 */ 1411 node = fdt_node_offset_by_compatible(fdt, 0, "arm,hw-features"); 1412 if (node < 0) 1413 return TEE_SUCCESS; 1414 1415 /* Modify the crc32 property only if it's already present */ 1416 if (!sp_dt_get_u32(fdt, node, "crc32", &val)) { 1417 res = fdt_setprop_u32(fdt, node, "crc32", 1418 feat_crc32_implemented()); 1419 if (res) 1420 return res; 1421 } 1422 1423 /* Modify the property only if it's already present */ 1424 if (!sp_dt_get_u32(fdt, node, "bti", &val)) { 1425 res = fdt_setprop_u32(fdt, node, "bti", 1426 feat_bti_is_implemented()); 1427 if (res) 1428 return res; 1429 } 1430 1431 /* Modify the property only if it's already present */ 1432 if (!sp_dt_get_u32(fdt, node, "pauth", &val)) { 1433 res = fdt_setprop_u32(fdt, node, "pauth", 1434 feat_pauth_is_implemented()); 1435 if (res) 1436 return res; 1437 } 1438 1439 return TEE_SUCCESS; 1440 } 1441 1442 static TEE_Result read_ns_interrupts_action(const void *fdt, 1443 struct sp_session *s) 1444 { 1445 TEE_Result res = TEE_ERROR_BAD_PARAMETERS; 1446 1447 res = sp_dt_get_u32(fdt, 0, "ns-interrupts-action", &s->ns_int_mode); 1448 1449 if (res) { 1450 EMSG("Mandatory property is missing: ns-interrupts-action"); 1451 return res; 1452 } 1453 1454 switch (s->ns_int_mode) { 1455 case SP_MANIFEST_NS_INT_QUEUED: 1456 case SP_MANIFEST_NS_INT_SIGNALED: 1457 /* OK */ 1458 break; 1459 1460 case SP_MANIFEST_NS_INT_MANAGED_EXIT: 1461 EMSG("Managed exit is not implemented"); 1462 return TEE_ERROR_NOT_IMPLEMENTED; 1463 1464 default: 1465 EMSG("Invalid ns-interrupts-action value: %"PRIu32, 1466 s->ns_int_mode); 1467 return TEE_ERROR_BAD_PARAMETERS; 1468 } 1469 1470 return TEE_SUCCESS; 1471 } 1472 1473 static TEE_Result read_ffa_version(const void *fdt, struct sp_session *s) 1474 { 1475 TEE_Result res = TEE_ERROR_BAD_PARAMETERS; 1476 uint32_t ffa_version = 0; 1477 1478 res = sp_dt_get_u32(fdt, 0, "ffa-version", &ffa_version); 1479 if (res) { 1480 EMSG("Mandatory property is missing: ffa-version"); 1481 return res; 1482 } 1483 1484 if (ffa_version != FFA_VERSION_1_0 && ffa_version != FFA_VERSION_1_1) { 1485 EMSG("Invalid FF-A version value: 0x%08"PRIx32, ffa_version); 1486 return TEE_ERROR_BAD_PARAMETERS; 1487 } 1488 1489 s->rxtx.ffa_vers = ffa_version; 1490 1491 return TEE_SUCCESS; 1492 } 1493 1494 static TEE_Result read_sp_exec_state(const void *fdt, struct sp_session *s) 1495 { 1496 TEE_Result res = TEE_ERROR_BAD_PARAMETERS; 1497 uint32_t exec_state = 0; 1498 1499 res = sp_dt_get_u32(fdt, 0, "execution-state", &exec_state); 1500 if (res) { 1501 EMSG("Mandatory property is missing: execution-state"); 1502 return res; 1503 } 1504 1505 /* Currently only AArch64 SPs are supported */ 1506 if (exec_state == SP_MANIFEST_EXEC_STATE_AARCH64) { 1507 s->props |= FFA_PART_PROP_AARCH64_STATE; 1508 } else { 1509 EMSG("Invalid execution-state value: %"PRIu32, exec_state); 1510 return TEE_ERROR_BAD_PARAMETERS; 1511 } 1512 1513 return TEE_SUCCESS; 1514 } 1515 1516 static TEE_Result read_sp_msg_types(const void *fdt, struct sp_session *s) 1517 { 1518 TEE_Result res = TEE_ERROR_BAD_PARAMETERS; 1519 uint32_t msg_method = 0; 1520 1521 res = sp_dt_get_u32(fdt, 0, "messaging-method", &msg_method); 1522 if (res) { 1523 EMSG("Mandatory property is missing: messaging-method"); 1524 return res; 1525 } 1526 1527 if (msg_method & SP_MANIFEST_DIRECT_REQ_RECEIVE) 1528 s->props |= FFA_PART_PROP_DIRECT_REQ_RECV; 1529 1530 if (msg_method & SP_MANIFEST_DIRECT_REQ_SEND) 1531 s->props |= FFA_PART_PROP_DIRECT_REQ_SEND; 1532 1533 if (msg_method & SP_MANIFEST_INDIRECT_REQ) 1534 IMSG("Indirect messaging is not supported"); 1535 1536 return TEE_SUCCESS; 1537 } 1538 1539 static TEE_Result read_vm_availability_msg(const void *fdt, 1540 struct sp_session *s) 1541 { 1542 TEE_Result res = TEE_ERROR_BAD_PARAMETERS; 1543 uint32_t v = 0; 1544 1545 res = sp_dt_get_u32(fdt, 0, "vm-availability-messages", &v); 1546 1547 /* This field in the manifest is optional */ 1548 if (res == TEE_ERROR_ITEM_NOT_FOUND) 1549 return TEE_SUCCESS; 1550 1551 if (res) 1552 return res; 1553 1554 if (v & ~(SP_MANIFEST_VM_CREATED_MSG | SP_MANIFEST_VM_DESTROYED_MSG)) { 1555 EMSG("Invalid vm-availability-messages value: %"PRIu32, v); 1556 return TEE_ERROR_BAD_PARAMETERS; 1557 } 1558 1559 if (v & SP_MANIFEST_VM_CREATED_MSG) 1560 s->props |= FFA_PART_PROP_NOTIF_CREATED; 1561 1562 if (v & SP_MANIFEST_VM_DESTROYED_MSG) 1563 s->props |= FFA_PART_PROP_NOTIF_DESTROYED; 1564 1565 return TEE_SUCCESS; 1566 } 1567 1568 static TEE_Result sp_init_uuid(const TEE_UUID *bin_uuid, const void * const fdt) 1569 { 1570 TEE_Result res = TEE_SUCCESS; 1571 struct sp_session *sess = NULL; 1572 TEE_UUID ffa_uuid = {}; 1573 uint16_t boot_order = 0; 1574 uint32_t boot_order_arg = 0; 1575 1576 res = fdt_get_uuid(fdt, &ffa_uuid); 1577 if (res) 1578 return res; 1579 1580 res = sp_dt_get_u16(fdt, 0, "boot-order", &boot_order); 1581 if (res == TEE_SUCCESS) { 1582 boot_order_arg = boot_order; 1583 } else if (res == TEE_ERROR_ITEM_NOT_FOUND) { 1584 boot_order_arg = UINT32_MAX; 1585 } else { 1586 EMSG("Failed reading boot-order property err:%#"PRIx32, res); 1587 return res; 1588 } 1589 1590 res = sp_open_session(&sess, 1591 &open_sp_sessions, 1592 &ffa_uuid, bin_uuid, boot_order_arg, fdt); 1593 if (res) 1594 return res; 1595 1596 sess->fdt = fdt; 1597 1598 res = read_manifest_endpoint_id(sess); 1599 if (res) 1600 return res; 1601 DMSG("endpoint is 0x%"PRIx16, sess->endpoint_id); 1602 1603 res = read_ns_interrupts_action(fdt, sess); 1604 if (res) 1605 return res; 1606 1607 res = read_ffa_version(fdt, sess); 1608 if (res) 1609 return res; 1610 1611 res = read_sp_exec_state(fdt, sess); 1612 if (res) 1613 return res; 1614 1615 res = read_sp_msg_types(fdt, sess); 1616 if (res) 1617 return res; 1618 1619 res = read_vm_availability_msg(fdt, sess); 1620 if (res) 1621 return res; 1622 1623 return TEE_SUCCESS; 1624 } 1625 1626 static TEE_Result sp_first_run(struct sp_session *sess) 1627 { 1628 TEE_Result res = TEE_SUCCESS; 1629 struct thread_smc_1_2_regs args = { }; 1630 struct sp_ctx *ctx = NULL; 1631 vaddr_t boot_info_va = 0; 1632 size_t boot_info_size = 0; 1633 void *fdt_copy = NULL; 1634 size_t fdt_size = 0; 1635 1636 ctx = to_sp_ctx(sess->ts_sess.ctx); 1637 ts_push_current_session(&sess->ts_sess); 1638 sess->is_initialized = false; 1639 1640 /* 1641 * Load relative memory regions must be handled before doing any other 1642 * mapping to prevent conflicts in the VA space. 1643 */ 1644 res = handle_fdt_load_relative_mem_regions(ctx, sess->fdt); 1645 if (res) { 1646 ts_pop_current_session(); 1647 return res; 1648 } 1649 1650 res = copy_and_map_fdt(ctx, sess->fdt, &fdt_copy, &fdt_size); 1651 if (res) 1652 goto out; 1653 1654 res = handle_fdt_dev_regions(ctx, fdt_copy); 1655 if (res) 1656 goto out; 1657 1658 res = handle_fdt_mem_regions(ctx, fdt_copy); 1659 if (res) 1660 goto out; 1661 1662 if (IS_ENABLED(CFG_CORE_TPM_EVENT_LOG)) { 1663 res = handle_tpm_event_log(ctx, fdt_copy); 1664 if (res) 1665 goto out; 1666 } 1667 1668 res = handle_hw_features(fdt_copy); 1669 if (res) 1670 goto out; 1671 1672 res = create_and_map_boot_info(ctx, fdt_copy, &args, &boot_info_va, 1673 &boot_info_size, sess->rxtx.ffa_vers); 1674 if (res) 1675 goto out; 1676 1677 ts_pop_current_session(); 1678 1679 res = sp_enter(&args, sess); 1680 if (res) { 1681 ts_push_current_session(&sess->ts_sess); 1682 goto out; 1683 } 1684 1685 spmc_sp_msg_handler(&args, sess); 1686 1687 ts_push_current_session(&sess->ts_sess); 1688 sess->is_initialized = true; 1689 1690 out: 1691 /* Free the boot info page from the SP memory */ 1692 vm_unmap(&ctx->uctx, boot_info_va, boot_info_size); 1693 vm_unmap(&ctx->uctx, (vaddr_t)fdt_copy, fdt_size); 1694 ts_pop_current_session(); 1695 1696 return res; 1697 } 1698 1699 TEE_Result sp_enter(struct thread_smc_1_2_regs *args, struct sp_session *sp) 1700 { 1701 TEE_Result res = TEE_SUCCESS; 1702 struct sp_ctx *ctx = to_sp_ctx(sp->ts_sess.ctx); 1703 1704 ctx->sp_regs.x[0] = args->a0; 1705 ctx->sp_regs.x[1] = args->a1; 1706 ctx->sp_regs.x[2] = args->a2; 1707 ctx->sp_regs.x[3] = args->a3; 1708 ctx->sp_regs.x[4] = args->a4; 1709 ctx->sp_regs.x[5] = args->a5; 1710 ctx->sp_regs.x[6] = args->a6; 1711 ctx->sp_regs.x[7] = args->a7; 1712 #ifdef CFG_TA_PAUTH 1713 ctx->sp_regs.apiakey_hi = ctx->uctx.keys.apia_hi; 1714 ctx->sp_regs.apiakey_lo = ctx->uctx.keys.apia_lo; 1715 #endif 1716 1717 res = sp->ts_sess.ctx->ops->enter_invoke_cmd(&sp->ts_sess, 0); 1718 1719 args->a0 = ctx->sp_regs.x[0]; 1720 args->a1 = ctx->sp_regs.x[1]; 1721 args->a2 = ctx->sp_regs.x[2]; 1722 args->a3 = ctx->sp_regs.x[3]; 1723 args->a4 = ctx->sp_regs.x[4]; 1724 args->a5 = ctx->sp_regs.x[5]; 1725 args->a6 = ctx->sp_regs.x[6]; 1726 args->a7 = ctx->sp_regs.x[7]; 1727 1728 return res; 1729 } 1730 1731 /* 1732 * According to FF-A v1.1 section 8.3.1.4 if a caller requires less permissive 1733 * active on NS interrupt than the callee, the callee must inherit the caller's 1734 * configuration. 1735 * Each SP's own NS action setting is stored in ns_int_mode. The effective 1736 * action will be MIN([self action], [caller's action]) which is stored in the 1737 * ns_int_mode_inherited field. 1738 */ 1739 static void sp_cpsr_configure_foreign_interrupts(struct sp_session *s, 1740 struct ts_session *caller, 1741 uint64_t *cpsr) 1742 { 1743 if (caller) { 1744 struct sp_session *caller_sp = to_sp_session(caller); 1745 1746 s->ns_int_mode_inherited = MIN(caller_sp->ns_int_mode_inherited, 1747 s->ns_int_mode); 1748 } else { 1749 s->ns_int_mode_inherited = s->ns_int_mode; 1750 } 1751 1752 if (s->ns_int_mode_inherited == SP_MANIFEST_NS_INT_QUEUED) 1753 *cpsr |= SHIFT_U32(THREAD_EXCP_FOREIGN_INTR, 1754 ARM32_CPSR_F_SHIFT); 1755 else 1756 *cpsr &= ~SHIFT_U32(THREAD_EXCP_FOREIGN_INTR, 1757 ARM32_CPSR_F_SHIFT); 1758 } 1759 1760 static TEE_Result sp_enter_invoke_cmd(struct ts_session *s, 1761 uint32_t cmd __unused) 1762 { 1763 struct sp_ctx *ctx = to_sp_ctx(s->ctx); 1764 TEE_Result res = TEE_SUCCESS; 1765 uint32_t exceptions = 0; 1766 struct sp_session *sp_s = to_sp_session(s); 1767 struct ts_session *sess = NULL; 1768 struct thread_ctx_regs *sp_regs = NULL; 1769 uint32_t thread_id = THREAD_ID_INVALID; 1770 struct ts_session *caller = NULL; 1771 uint32_t rpc_target_info = 0; 1772 uint32_t panicked = false; 1773 uint32_t panic_code = 0; 1774 1775 sp_regs = &ctx->sp_regs; 1776 ts_push_current_session(s); 1777 1778 exceptions = thread_mask_exceptions(THREAD_EXCP_ALL); 1779 1780 /* Enable/disable foreign interrupts in CPSR/SPSR */ 1781 caller = ts_get_calling_session(); 1782 sp_cpsr_configure_foreign_interrupts(sp_s, caller, &sp_regs->cpsr); 1783 1784 /* 1785 * Store endpoint ID and thread ID in rpc_target_info. This will be used 1786 * as w1 in FFA_INTERRUPT in case of a foreign interrupt. 1787 */ 1788 rpc_target_info = thread_get_tsd()->rpc_target_info; 1789 thread_id = thread_get_id(); 1790 assert(thread_id <= UINT16_MAX); 1791 thread_get_tsd()->rpc_target_info = 1792 FFA_TARGET_INFO_SET(sp_s->endpoint_id, thread_id); 1793 1794 __thread_enter_user_mode(sp_regs, &panicked, &panic_code); 1795 1796 /* Restore rpc_target_info */ 1797 thread_get_tsd()->rpc_target_info = rpc_target_info; 1798 1799 thread_unmask_exceptions(exceptions); 1800 1801 thread_user_clear_vfp(&ctx->uctx); 1802 1803 if (panicked) { 1804 DMSG("SP panicked with code %#"PRIx32, panic_code); 1805 abort_print_current_ts(); 1806 1807 sess = ts_pop_current_session(); 1808 cpu_spin_lock(&sp_s->spinlock); 1809 sp_s->state = sp_dead; 1810 cpu_spin_unlock(&sp_s->spinlock); 1811 1812 return TEE_ERROR_TARGET_DEAD; 1813 } 1814 1815 sess = ts_pop_current_session(); 1816 assert(sess == s); 1817 1818 return res; 1819 } 1820 1821 /* We currently don't support 32 bits */ 1822 #ifdef ARM64 1823 static void sp_svc_store_registers(struct thread_scall_regs *regs, 1824 struct thread_ctx_regs *sp_regs) 1825 { 1826 COMPILE_TIME_ASSERT(sizeof(sp_regs->x[0]) == sizeof(regs->x0)); 1827 memcpy(sp_regs->x, ®s->x0, 31 * sizeof(regs->x0)); 1828 sp_regs->pc = regs->elr; 1829 sp_regs->sp = regs->sp_el0; 1830 } 1831 #endif 1832 1833 static bool sp_handle_scall(struct thread_scall_regs *regs) 1834 { 1835 struct ts_session *ts = ts_get_current_session(); 1836 struct sp_ctx *uctx = to_sp_ctx(ts->ctx); 1837 struct sp_session *s = uctx->open_session; 1838 1839 assert(s); 1840 1841 sp_svc_store_registers(regs, &uctx->sp_regs); 1842 1843 regs->x0 = 0; 1844 regs->x1 = 0; /* panic */ 1845 regs->x2 = 0; /* panic code */ 1846 1847 /* 1848 * All the registers of the SP are saved in the SP session by the SVC 1849 * handler. 1850 * We always return to S-El1 after handling the SVC. We will continue 1851 * in sp_enter_invoke_cmd() (return from __thread_enter_user_mode). 1852 * The sp_enter() function copies the FF-A parameters (a0-a7) from the 1853 * saved registers to the thread_smc_args. The thread_smc_args object is 1854 * afterward used by the spmc_sp_msg_handler() to handle the 1855 * FF-A message send by the SP. 1856 */ 1857 return false; 1858 } 1859 1860 static void sp_dump_state(struct ts_ctx *ctx) 1861 { 1862 struct sp_ctx *utc = to_sp_ctx(ctx); 1863 1864 if (utc->uctx.dump_entry_func) { 1865 TEE_Result res = ldelf_dump_state(&utc->uctx); 1866 1867 if (!res || res == TEE_ERROR_TARGET_DEAD) 1868 return; 1869 } 1870 1871 user_mode_ctx_print_mappings(&utc->uctx); 1872 } 1873 1874 static const struct ts_ops sp_ops = { 1875 .enter_invoke_cmd = sp_enter_invoke_cmd, 1876 .handle_scall = sp_handle_scall, 1877 .dump_state = sp_dump_state, 1878 }; 1879 1880 static TEE_Result process_sp_pkg(uint64_t sp_pkg_pa, TEE_UUID *sp_uuid) 1881 { 1882 enum teecore_memtypes mtype = MEM_AREA_SEC_RAM_OVERALL; 1883 struct sp_pkg_header *sp_pkg_hdr = NULL; 1884 struct fip_sp *sp = NULL; 1885 uint64_t sp_fdt_end = 0; 1886 size_t sp_pkg_size = 0; 1887 vaddr_t sp_pkg_va = 0; 1888 1889 /* Process the first page which contains the SP package header */ 1890 sp_pkg_va = (vaddr_t)phys_to_virt(sp_pkg_pa, mtype, SMALL_PAGE_SIZE); 1891 if (!sp_pkg_va) { 1892 EMSG("Cannot find mapping for PA %#" PRIxPA, sp_pkg_pa); 1893 return TEE_ERROR_GENERIC; 1894 } 1895 1896 sp_pkg_hdr = (struct sp_pkg_header *)sp_pkg_va; 1897 1898 if (sp_pkg_hdr->magic != SP_PKG_HEADER_MAGIC) { 1899 EMSG("Invalid SP package magic"); 1900 return TEE_ERROR_BAD_FORMAT; 1901 } 1902 1903 if (sp_pkg_hdr->version != SP_PKG_HEADER_VERSION_V1 && 1904 sp_pkg_hdr->version != SP_PKG_HEADER_VERSION_V2) { 1905 EMSG("Invalid SP header version"); 1906 return TEE_ERROR_BAD_FORMAT; 1907 } 1908 1909 if (ADD_OVERFLOW(sp_pkg_hdr->img_offset, sp_pkg_hdr->img_size, 1910 &sp_pkg_size)) { 1911 EMSG("Invalid SP package size"); 1912 return TEE_ERROR_BAD_FORMAT; 1913 } 1914 1915 if (ADD_OVERFLOW(sp_pkg_hdr->pm_offset, sp_pkg_hdr->pm_size, 1916 &sp_fdt_end) || sp_fdt_end > sp_pkg_hdr->img_offset) { 1917 EMSG("Invalid SP manifest size"); 1918 return TEE_ERROR_BAD_FORMAT; 1919 } 1920 1921 /* Process the whole SP package now that the size is known */ 1922 sp_pkg_va = (vaddr_t)phys_to_virt(sp_pkg_pa, mtype, sp_pkg_size); 1923 if (!sp_pkg_va) { 1924 EMSG("Cannot find mapping for PA %#" PRIxPA, sp_pkg_pa); 1925 return TEE_ERROR_GENERIC; 1926 } 1927 1928 sp_pkg_hdr = (struct sp_pkg_header *)sp_pkg_va; 1929 1930 sp = calloc(1, sizeof(struct fip_sp)); 1931 if (!sp) 1932 return TEE_ERROR_OUT_OF_MEMORY; 1933 1934 memcpy(&sp->sp_img.image.uuid, sp_uuid, sizeof(*sp_uuid)); 1935 sp->sp_img.image.ts = (uint8_t *)(sp_pkg_va + sp_pkg_hdr->img_offset); 1936 sp->sp_img.image.size = sp_pkg_hdr->img_size; 1937 sp->sp_img.image.flags = 0; 1938 sp->sp_img.fdt = (uint8_t *)(sp_pkg_va + sp_pkg_hdr->pm_offset); 1939 1940 STAILQ_INSERT_TAIL(&fip_sp_list, sp, link); 1941 1942 return TEE_SUCCESS; 1943 } 1944 1945 static TEE_Result fip_sp_init_all(void) 1946 { 1947 TEE_Result res = TEE_SUCCESS; 1948 uint64_t sp_pkg_addr = 0; 1949 const void *fdt = NULL; 1950 TEE_UUID sp_uuid = { }; 1951 int sp_pkgs_node = 0; 1952 int subnode = 0; 1953 int root = 0; 1954 1955 fdt = get_manifest_dt(); 1956 if (!fdt) { 1957 EMSG("No SPMC manifest found"); 1958 return TEE_ERROR_GENERIC; 1959 } 1960 1961 root = fdt_path_offset(fdt, "/"); 1962 if (root < 0) 1963 return TEE_ERROR_BAD_FORMAT; 1964 1965 if (fdt_node_check_compatible(fdt, root, "arm,ffa-core-manifest-1.0")) 1966 return TEE_ERROR_BAD_FORMAT; 1967 1968 /* SP packages are optional, it's not an error if we don't find any */ 1969 sp_pkgs_node = fdt_node_offset_by_compatible(fdt, root, "arm,sp_pkg"); 1970 if (sp_pkgs_node < 0) 1971 return TEE_SUCCESS; 1972 1973 fdt_for_each_subnode(subnode, fdt, sp_pkgs_node) { 1974 res = sp_dt_get_u64(fdt, subnode, "load-address", &sp_pkg_addr); 1975 if (res) { 1976 EMSG("Invalid FIP SP load address"); 1977 return res; 1978 } 1979 1980 res = sp_dt_get_uuid(fdt, subnode, "uuid", &sp_uuid); 1981 if (res) { 1982 EMSG("Invalid FIP SP uuid"); 1983 return res; 1984 } 1985 1986 res = process_sp_pkg(sp_pkg_addr, &sp_uuid); 1987 if (res) { 1988 EMSG("Invalid FIP SP package"); 1989 return res; 1990 } 1991 } 1992 1993 return TEE_SUCCESS; 1994 } 1995 1996 static void fip_sp_deinit_all(void) 1997 { 1998 while (!STAILQ_EMPTY(&fip_sp_list)) { 1999 struct fip_sp *sp = STAILQ_FIRST(&fip_sp_list); 2000 2001 STAILQ_REMOVE_HEAD(&fip_sp_list, link); 2002 free(sp); 2003 } 2004 } 2005 2006 static TEE_Result sp_init_all(void) 2007 { 2008 TEE_Result res = TEE_SUCCESS; 2009 const struct sp_image *sp = NULL; 2010 const struct fip_sp *fip_sp = NULL; 2011 char __maybe_unused msg[60] = { '\0', }; 2012 struct sp_session *s = NULL; 2013 struct sp_session *prev_sp = NULL; 2014 2015 for_each_secure_partition(sp) { 2016 if (sp->image.uncompressed_size) 2017 snprintf(msg, sizeof(msg), 2018 " (compressed, uncompressed %u)", 2019 sp->image.uncompressed_size); 2020 else 2021 msg[0] = '\0'; 2022 DMSG("SP %pUl size %u%s", (void *)&sp->image.uuid, 2023 sp->image.size, msg); 2024 2025 res = sp_init_uuid(&sp->image.uuid, sp->fdt); 2026 2027 if (res != TEE_SUCCESS) { 2028 EMSG("Failed initializing SP(%pUl) err:%#"PRIx32, 2029 &sp->image.uuid, res); 2030 if (!IS_ENABLED(CFG_SP_SKIP_FAILED)) 2031 panic(); 2032 } 2033 } 2034 2035 res = fip_sp_init_all(); 2036 if (res) 2037 panic("Failed initializing FIP SPs"); 2038 2039 for_each_fip_sp(fip_sp) { 2040 sp = &fip_sp->sp_img; 2041 2042 DMSG("SP %pUl size %u", (void *)&sp->image.uuid, 2043 sp->image.size); 2044 2045 res = sp_init_uuid(&sp->image.uuid, sp->fdt); 2046 2047 if (res != TEE_SUCCESS) { 2048 EMSG("Failed initializing SP(%pUl) err:%#"PRIx32, 2049 &sp->image.uuid, res); 2050 if (!IS_ENABLED(CFG_SP_SKIP_FAILED)) 2051 panic(); 2052 } 2053 } 2054 2055 /* 2056 * At this point all FIP SPs are loaded by ldelf or by the raw binary SP 2057 * loader, so the original images (loaded by BL2) are not needed anymore 2058 */ 2059 fip_sp_deinit_all(); 2060 2061 /* 2062 * Now that all SPs are loaded, check through the boot order values, 2063 * and warn in case there is a non-unique value. 2064 */ 2065 TAILQ_FOREACH(s, &open_sp_sessions, link) { 2066 /* User specified boot-order values are uint16 */ 2067 if (s->boot_order > UINT16_MAX) 2068 break; 2069 2070 if (prev_sp && prev_sp->boot_order == s->boot_order) 2071 IMSG("WARNING: duplicated boot-order (%pUl vs %pUl)", 2072 &prev_sp->ts_sess.ctx->uuid, 2073 &s->ts_sess.ctx->uuid); 2074 2075 prev_sp = s; 2076 } 2077 2078 /* Continue the initialization and run the SP */ 2079 TAILQ_FOREACH(s, &open_sp_sessions, link) { 2080 DMSG("Starting SP: 0x%"PRIx16, s->endpoint_id); 2081 2082 res = sp_first_run(s); 2083 if (res != TEE_SUCCESS) { 2084 EMSG("Failed starting SP(0x%"PRIx16") err:%#"PRIx32, 2085 s->endpoint_id, res); 2086 if (!IS_ENABLED(CFG_SP_SKIP_FAILED)) 2087 panic(); 2088 } 2089 } 2090 2091 return TEE_SUCCESS; 2092 } 2093 2094 boot_final(sp_init_all); 2095 2096 static TEE_Result secure_partition_open(const TEE_UUID *uuid, 2097 struct ts_store_handle **h) 2098 { 2099 return emb_ts_open(uuid, h, find_secure_partition); 2100 } 2101 2102 REGISTER_SP_STORE(2) = { 2103 .description = "SP store", 2104 .open = secure_partition_open, 2105 .get_size = emb_ts_get_size, 2106 .get_tag = emb_ts_get_tag, 2107 .read = emb_ts_read, 2108 .close = emb_ts_close, 2109 }; 2110