1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2020-2024, Arm Limited. 4 */ 5 #include <crypto/crypto.h> 6 #include <initcall.h> 7 #include <kernel/boot.h> 8 #include <kernel/embedded_ts.h> 9 #include <kernel/ldelf_loader.h> 10 #include <kernel/secure_partition.h> 11 #include <kernel/spinlock.h> 12 #include <kernel/spmc_sp_handler.h> 13 #include <kernel/thread_private.h> 14 #include <kernel/thread_spmc.h> 15 #include <kernel/tpm.h> 16 #include <kernel/ts_store.h> 17 #include <ldelf.h> 18 #include <libfdt.h> 19 #include <mm/core_mmu.h> 20 #include <mm/fobj.h> 21 #include <mm/mobj.h> 22 #include <mm/vm.h> 23 #include <optee_ffa.h> 24 #include <stdio.h> 25 #include <string.h> 26 #include <tee_api_types.h> 27 #include <tee/uuid.h> 28 #include <trace.h> 29 #include <types_ext.h> 30 #include <utee_defines.h> 31 #include <util.h> 32 #include <zlib.h> 33 34 #define BOUNCE_BUFFER_SIZE 4096 35 36 #define SP_MANIFEST_ATTR_READ BIT(0) 37 #define SP_MANIFEST_ATTR_WRITE BIT(1) 38 #define SP_MANIFEST_ATTR_EXEC BIT(2) 39 #define SP_MANIFEST_ATTR_NSEC BIT(3) 40 #define SP_MANIFEST_ATTR_GP BIT(4) 41 42 #define SP_MANIFEST_ATTR_RO (SP_MANIFEST_ATTR_READ) 43 #define SP_MANIFEST_ATTR_RW (SP_MANIFEST_ATTR_READ | \ 44 SP_MANIFEST_ATTR_WRITE) 45 #define SP_MANIFEST_ATTR_RX (SP_MANIFEST_ATTR_READ | \ 46 SP_MANIFEST_ATTR_EXEC) 47 #define SP_MANIFEST_ATTR_RWX (SP_MANIFEST_ATTR_READ | \ 48 SP_MANIFEST_ATTR_WRITE | \ 49 SP_MANIFEST_ATTR_EXEC) 50 51 #define SP_MANIFEST_FLAG_NOBITS BIT(0) 52 53 #define SP_MANIFEST_NS_INT_QUEUED (0x0) 54 #define SP_MANIFEST_NS_INT_MANAGED_EXIT (0x1) 55 #define SP_MANIFEST_NS_INT_SIGNALED (0x2) 56 57 #define SP_MANIFEST_EXEC_STATE_AARCH64 (0x0) 58 #define SP_MANIFEST_EXEC_STATE_AARCH32 (0x1) 59 60 #define SP_MANIFEST_DIRECT_REQ_RECEIVE BIT(0) 61 #define SP_MANIFEST_DIRECT_REQ_SEND BIT(1) 62 #define SP_MANIFEST_INDIRECT_REQ BIT(2) 63 64 #define SP_MANIFEST_VM_CREATED_MSG BIT(0) 65 #define SP_MANIFEST_VM_DESTROYED_MSG BIT(1) 66 67 #define SP_PKG_HEADER_MAGIC (0x474b5053) 68 #define SP_PKG_HEADER_VERSION_V1 (0x1) 69 #define SP_PKG_HEADER_VERSION_V2 (0x2) 70 71 struct sp_pkg_header { 72 uint32_t magic; 73 uint32_t version; 74 uint32_t pm_offset; 75 uint32_t pm_size; 76 uint32_t img_offset; 77 uint32_t img_size; 78 }; 79 80 struct fip_sp_head fip_sp_list = STAILQ_HEAD_INITIALIZER(fip_sp_list); 81 82 static const struct ts_ops sp_ops; 83 84 /* List that holds all of the loaded SP's */ 85 static struct sp_sessions_head open_sp_sessions = 86 TAILQ_HEAD_INITIALIZER(open_sp_sessions); 87 88 static const struct embedded_ts *find_secure_partition(const TEE_UUID *uuid) 89 { 90 const struct sp_image *sp = NULL; 91 const struct fip_sp *fip_sp = NULL; 92 93 for_each_secure_partition(sp) { 94 if (!memcmp(&sp->image.uuid, uuid, sizeof(*uuid))) 95 return &sp->image; 96 } 97 98 for_each_fip_sp(fip_sp) { 99 if (!memcmp(&fip_sp->sp_img.image.uuid, uuid, sizeof(*uuid))) 100 return &fip_sp->sp_img.image; 101 } 102 103 return NULL; 104 } 105 106 bool is_sp_ctx(struct ts_ctx *ctx) 107 { 108 return ctx && (ctx->ops == &sp_ops); 109 } 110 111 static void set_sp_ctx_ops(struct ts_ctx *ctx) 112 { 113 ctx->ops = &sp_ops; 114 } 115 116 struct sp_session *sp_get_session(uint32_t session_id) 117 { 118 struct sp_session *s = NULL; 119 120 TAILQ_FOREACH(s, &open_sp_sessions, link) { 121 if (s->endpoint_id == session_id) 122 return s; 123 } 124 125 return NULL; 126 } 127 128 TEE_Result sp_partition_info_get(uint32_t ffa_vers, void *buf, size_t buf_size, 129 const TEE_UUID *ffa_uuid, size_t *elem_count, 130 bool count_only) 131 { 132 TEE_Result res = TEE_SUCCESS; 133 struct sp_session *s = NULL; 134 135 TAILQ_FOREACH(s, &open_sp_sessions, link) { 136 if (ffa_uuid && 137 memcmp(&s->ffa_uuid, ffa_uuid, sizeof(*ffa_uuid))) 138 continue; 139 140 if (s->state == sp_dead) 141 continue; 142 if (!count_only && !res) { 143 uint32_t uuid_words[4] = { 0 }; 144 145 tee_uuid_to_octets((uint8_t *)uuid_words, &s->ffa_uuid); 146 res = spmc_fill_partition_entry(ffa_vers, buf, buf_size, 147 *elem_count, 148 s->endpoint_id, 1, 149 s->props, uuid_words); 150 } 151 *elem_count += 1; 152 } 153 154 return res; 155 } 156 157 bool sp_has_exclusive_access(struct sp_mem_map_region *mem, 158 struct user_mode_ctx *uctx) 159 { 160 /* 161 * Check that we have access to the region if it is supposed to be 162 * mapped to the current context. 163 */ 164 if (uctx) { 165 struct vm_region *region = NULL; 166 167 /* Make sure that each mobj belongs to the SP */ 168 TAILQ_FOREACH(region, &uctx->vm_info.regions, link) { 169 if (region->mobj == mem->mobj) 170 break; 171 } 172 173 if (!region) 174 return false; 175 } 176 177 /* Check that it is not shared with another SP */ 178 return !sp_mem_is_shared(mem); 179 } 180 181 static bool endpoint_id_is_valid(uint32_t id) 182 { 183 /* 184 * These IDs are assigned at the SPMC init so already have valid values 185 * by the time this function gets first called 186 */ 187 return id != spmd_id && id != spmc_id && id != optee_endpoint_id && 188 id >= FFA_SWD_ID_MIN && id <= FFA_SWD_ID_MAX; 189 } 190 191 static TEE_Result new_session_id(uint16_t *endpoint_id) 192 { 193 uint32_t id = 0; 194 195 /* Find the first available endpoint id */ 196 for (id = FFA_SWD_ID_MIN; id <= FFA_SWD_ID_MAX; id++) { 197 if (endpoint_id_is_valid(id) && !sp_get_session(id)) { 198 *endpoint_id = id; 199 return TEE_SUCCESS; 200 } 201 } 202 203 return TEE_ERROR_BAD_FORMAT; 204 } 205 206 static TEE_Result sp_create_ctx(const TEE_UUID *bin_uuid, struct sp_session *s) 207 { 208 TEE_Result res = TEE_SUCCESS; 209 struct sp_ctx *spc = NULL; 210 211 /* Register context */ 212 spc = calloc(1, sizeof(struct sp_ctx)); 213 if (!spc) 214 return TEE_ERROR_OUT_OF_MEMORY; 215 216 spc->open_session = s; 217 s->ts_sess.ctx = &spc->ts_ctx; 218 spc->ts_ctx.uuid = *bin_uuid; 219 220 res = vm_info_init(&spc->uctx, &spc->ts_ctx); 221 if (res) 222 goto err; 223 224 set_sp_ctx_ops(&spc->ts_ctx); 225 226 #ifdef CFG_TA_PAUTH 227 crypto_rng_read(&spc->uctx.keys, sizeof(spc->uctx.keys)); 228 #endif 229 230 return TEE_SUCCESS; 231 232 err: 233 free(spc); 234 return res; 235 } 236 237 /* 238 * Insert a new sp_session to the sessions list, so that it is ordered 239 * by boot_order. 240 */ 241 static void insert_session_ordered(struct sp_sessions_head *open_sessions, 242 struct sp_session *session) 243 { 244 struct sp_session *s = NULL; 245 246 if (!open_sessions || !session) 247 return; 248 249 TAILQ_FOREACH(s, &open_sp_sessions, link) { 250 if (s->boot_order > session->boot_order) 251 break; 252 } 253 254 if (!s) 255 TAILQ_INSERT_TAIL(open_sessions, session, link); 256 else 257 TAILQ_INSERT_BEFORE(s, session, link); 258 } 259 260 static TEE_Result sp_create_session(struct sp_sessions_head *open_sessions, 261 const TEE_UUID *bin_uuid, 262 const uint32_t boot_order, 263 struct sp_session **sess) 264 { 265 TEE_Result res = TEE_SUCCESS; 266 struct sp_session *s = calloc(1, sizeof(struct sp_session)); 267 268 if (!s) 269 return TEE_ERROR_OUT_OF_MEMORY; 270 271 s->boot_order = boot_order; 272 273 /* Other properties are filled later, based on the SP's manifest */ 274 s->props = FFA_PART_PROP_IS_PE_ID; 275 276 res = new_session_id(&s->endpoint_id); 277 if (res) 278 goto err; 279 280 DMSG("Loading Secure Partition %pUl", (void *)bin_uuid); 281 res = sp_create_ctx(bin_uuid, s); 282 if (res) 283 goto err; 284 285 insert_session_ordered(open_sessions, s); 286 *sess = s; 287 return TEE_SUCCESS; 288 289 err: 290 free(s); 291 return res; 292 } 293 294 static TEE_Result sp_init_set_registers(struct sp_ctx *ctx) 295 { 296 struct thread_ctx_regs *sp_regs = &ctx->sp_regs; 297 298 memset(sp_regs, 0, sizeof(*sp_regs)); 299 sp_regs->sp = ctx->uctx.stack_ptr; 300 sp_regs->pc = ctx->uctx.entry_func; 301 302 return TEE_SUCCESS; 303 } 304 305 TEE_Result sp_map_shared(struct sp_session *s, 306 struct sp_mem_receiver *receiver, 307 struct sp_mem *smem, 308 uint64_t *va) 309 { 310 TEE_Result res = TEE_SUCCESS; 311 struct sp_ctx *ctx = NULL; 312 uint32_t perm = TEE_MATTR_UR; 313 struct sp_mem_map_region *reg = NULL; 314 315 ctx = to_sp_ctx(s->ts_sess.ctx); 316 317 /* Get the permission */ 318 if (receiver->perm.perm & FFA_MEM_ACC_EXE) 319 perm |= TEE_MATTR_UX; 320 321 if (receiver->perm.perm & FFA_MEM_ACC_RW) { 322 if (receiver->perm.perm & FFA_MEM_ACC_EXE) 323 return TEE_ERROR_ACCESS_CONFLICT; 324 325 perm |= TEE_MATTR_UW; 326 } 327 /* 328 * Currently we don't support passing a va. We can't guarantee that the 329 * full region will be mapped in a contiguous region. A smem->region can 330 * have multiple mobj for one share. Currently there doesn't seem to be 331 * an option to guarantee that these will be mapped in a contiguous va 332 * space. 333 */ 334 if (*va) 335 return TEE_ERROR_NOT_SUPPORTED; 336 337 SLIST_FOREACH(reg, &smem->regions, link) { 338 res = vm_map(&ctx->uctx, va, reg->page_count * SMALL_PAGE_SIZE, 339 perm, 0, reg->mobj, reg->page_offset); 340 341 if (res != TEE_SUCCESS) { 342 EMSG("Failed to map memory region %#"PRIx32, res); 343 return res; 344 } 345 } 346 return TEE_SUCCESS; 347 } 348 349 TEE_Result sp_unmap_ffa_regions(struct sp_session *s, struct sp_mem *smem) 350 { 351 TEE_Result res = TEE_SUCCESS; 352 vaddr_t vaddr = 0; 353 size_t len = 0; 354 struct sp_ctx *ctx = to_sp_ctx(s->ts_sess.ctx); 355 struct sp_mem_map_region *reg = NULL; 356 357 SLIST_FOREACH(reg, &smem->regions, link) { 358 vaddr = (vaddr_t)sp_mem_get_va(&ctx->uctx, reg->page_offset, 359 reg->mobj); 360 len = reg->page_count * SMALL_PAGE_SIZE; 361 362 res = vm_unmap(&ctx->uctx, vaddr, len); 363 if (res != TEE_SUCCESS) 364 return res; 365 } 366 367 return TEE_SUCCESS; 368 } 369 370 static TEE_Result sp_dt_get_u64(const void *fdt, int node, const char *property, 371 uint64_t *value) 372 { 373 const fdt64_t *p = NULL; 374 int len = 0; 375 376 p = fdt_getprop(fdt, node, property, &len); 377 if (!p) 378 return TEE_ERROR_ITEM_NOT_FOUND; 379 380 if (len != sizeof(*p)) 381 return TEE_ERROR_BAD_FORMAT; 382 383 *value = fdt64_ld(p); 384 385 return TEE_SUCCESS; 386 } 387 388 static TEE_Result sp_dt_get_u32(const void *fdt, int node, const char *property, 389 uint32_t *value) 390 { 391 const fdt32_t *p = NULL; 392 int len = 0; 393 394 p = fdt_getprop(fdt, node, property, &len); 395 if (!p) 396 return TEE_ERROR_ITEM_NOT_FOUND; 397 398 if (len != sizeof(*p)) 399 return TEE_ERROR_BAD_FORMAT; 400 401 *value = fdt32_to_cpu(*p); 402 403 return TEE_SUCCESS; 404 } 405 406 static TEE_Result sp_dt_get_u16(const void *fdt, int node, const char *property, 407 uint16_t *value) 408 { 409 const fdt16_t *p = NULL; 410 int len = 0; 411 412 p = fdt_getprop(fdt, node, property, &len); 413 if (!p) 414 return TEE_ERROR_ITEM_NOT_FOUND; 415 416 if (len != sizeof(*p)) 417 return TEE_ERROR_BAD_FORMAT; 418 419 *value = fdt16_to_cpu(*p); 420 421 return TEE_SUCCESS; 422 } 423 424 static TEE_Result sp_dt_get_uuid(const void *fdt, int node, 425 const char *property, TEE_UUID *uuid) 426 { 427 uint32_t uuid_array[4] = { 0 }; 428 const fdt32_t *p = NULL; 429 int len = 0; 430 int i = 0; 431 432 p = fdt_getprop(fdt, node, property, &len); 433 if (!p) 434 return TEE_ERROR_ITEM_NOT_FOUND; 435 436 if (len != sizeof(TEE_UUID)) 437 return TEE_ERROR_BAD_FORMAT; 438 439 for (i = 0; i < 4; i++) 440 uuid_array[i] = fdt32_to_cpu(p[i]); 441 442 tee_uuid_from_octets(uuid, (uint8_t *)uuid_array); 443 444 return TEE_SUCCESS; 445 } 446 447 static TEE_Result sp_is_elf_format(const void *fdt, int sp_node, 448 bool *is_elf_format) 449 { 450 TEE_Result res = TEE_SUCCESS; 451 uint32_t elf_format = 0; 452 453 res = sp_dt_get_u32(fdt, sp_node, "elf-format", &elf_format); 454 if (res != TEE_SUCCESS && res != TEE_ERROR_ITEM_NOT_FOUND) 455 return res; 456 457 *is_elf_format = (elf_format != 0); 458 459 return TEE_SUCCESS; 460 } 461 462 static TEE_Result sp_binary_open(const TEE_UUID *uuid, 463 const struct ts_store_ops **ops, 464 struct ts_store_handle **handle) 465 { 466 TEE_Result res = TEE_ERROR_ITEM_NOT_FOUND; 467 468 SCATTERED_ARRAY_FOREACH(*ops, sp_stores, struct ts_store_ops) { 469 res = (*ops)->open(uuid, handle); 470 if (res != TEE_ERROR_ITEM_NOT_FOUND && 471 res != TEE_ERROR_STORAGE_NOT_AVAILABLE) 472 break; 473 } 474 475 return res; 476 } 477 478 static TEE_Result load_binary_sp(struct ts_session *s, 479 struct user_mode_ctx *uctx) 480 { 481 size_t bin_size = 0, bin_size_rounded = 0, bin_page_count = 0; 482 size_t bb_size = ROUNDUP(BOUNCE_BUFFER_SIZE, SMALL_PAGE_SIZE); 483 size_t bb_num_pages = bb_size / SMALL_PAGE_SIZE; 484 const struct ts_store_ops *store_ops = NULL; 485 struct ts_store_handle *handle = NULL; 486 TEE_Result res = TEE_SUCCESS; 487 tee_mm_entry_t *mm = NULL; 488 struct fobj *fobj = NULL; 489 struct mobj *mobj = NULL; 490 uaddr_t base_addr = 0; 491 uint32_t vm_flags = 0; 492 unsigned int idx = 0; 493 vaddr_t va = 0; 494 495 if (!s || !uctx) 496 return TEE_ERROR_BAD_PARAMETERS; 497 498 DMSG("Loading raw binary format SP %pUl", &uctx->ts_ctx->uuid); 499 500 /* Initialize the bounce buffer */ 501 fobj = fobj_sec_mem_alloc(bb_num_pages); 502 mobj = mobj_with_fobj_alloc(fobj, NULL, TEE_MATTR_MEM_TYPE_TAGGED); 503 fobj_put(fobj); 504 if (!mobj) 505 return TEE_ERROR_OUT_OF_MEMORY; 506 507 res = vm_map(uctx, &va, bb_size, TEE_MATTR_PRW, 0, mobj, 0); 508 mobj_put(mobj); 509 if (res) 510 return res; 511 512 uctx->bbuf = (uint8_t *)va; 513 uctx->bbuf_size = BOUNCE_BUFFER_SIZE; 514 515 vm_set_ctx(uctx->ts_ctx); 516 517 /* Find TS store and open SP binary */ 518 res = sp_binary_open(&uctx->ts_ctx->uuid, &store_ops, &handle); 519 if (res != TEE_SUCCESS) { 520 EMSG("Failed to open SP binary"); 521 return res; 522 } 523 524 /* Query binary size and calculate page count */ 525 res = store_ops->get_size(handle, &bin_size); 526 if (res != TEE_SUCCESS) 527 goto err; 528 529 if (ROUNDUP_OVERFLOW(bin_size, SMALL_PAGE_SIZE, &bin_size_rounded)) { 530 res = TEE_ERROR_OVERFLOW; 531 goto err; 532 } 533 534 bin_page_count = bin_size_rounded / SMALL_PAGE_SIZE; 535 536 /* Allocate memory */ 537 mm = tee_mm_alloc(&tee_mm_sec_ddr, bin_size_rounded); 538 if (!mm) { 539 res = TEE_ERROR_OUT_OF_MEMORY; 540 goto err; 541 } 542 543 base_addr = tee_mm_get_smem(mm); 544 545 /* Create mobj */ 546 mobj = sp_mem_new_mobj(bin_page_count, TEE_MATTR_MEM_TYPE_CACHED, true); 547 if (!mobj) { 548 res = TEE_ERROR_OUT_OF_MEMORY; 549 goto err_free_tee_mm; 550 } 551 552 res = sp_mem_add_pages(mobj, &idx, base_addr, bin_page_count); 553 if (res) 554 goto err_free_mobj; 555 556 /* Map memory area for the SP binary */ 557 va = 0; 558 res = vm_map(uctx, &va, bin_size_rounded, TEE_MATTR_URWX, 559 vm_flags, mobj, 0); 560 if (res) 561 goto err_free_mobj; 562 563 /* Read SP binary into the previously mapped memory area */ 564 res = store_ops->read(handle, NULL, (void *)va, bin_size); 565 if (res) 566 goto err_unmap; 567 568 /* Set memory protection to allow execution */ 569 res = vm_set_prot(uctx, va, bin_size_rounded, TEE_MATTR_UX); 570 if (res) 571 goto err_unmap; 572 573 mobj_put(mobj); 574 store_ops->close(handle); 575 576 /* The entry point must be at the beginning of the SP binary. */ 577 uctx->entry_func = va; 578 uctx->load_addr = va; 579 uctx->is_32bit = false; 580 581 s->handle_scall = s->ctx->ops->handle_scall; 582 583 return TEE_SUCCESS; 584 585 err_unmap: 586 vm_unmap(uctx, va, bin_size_rounded); 587 588 err_free_mobj: 589 mobj_put(mobj); 590 591 err_free_tee_mm: 592 tee_mm_free(mm); 593 594 err: 595 store_ops->close(handle); 596 597 return res; 598 } 599 600 static TEE_Result sp_open_session(struct sp_session **sess, 601 struct sp_sessions_head *open_sessions, 602 const TEE_UUID *ffa_uuid, 603 const TEE_UUID *bin_uuid, 604 const uint32_t boot_order, 605 const void *fdt) 606 { 607 TEE_Result res = TEE_SUCCESS; 608 struct sp_session *s = NULL; 609 struct sp_ctx *ctx = NULL; 610 bool is_elf_format = false; 611 612 if (!find_secure_partition(bin_uuid)) 613 return TEE_ERROR_ITEM_NOT_FOUND; 614 615 res = sp_create_session(open_sessions, bin_uuid, boot_order, &s); 616 if (res != TEE_SUCCESS) { 617 DMSG("sp_create_session failed %#"PRIx32, res); 618 return res; 619 } 620 621 ctx = to_sp_ctx(s->ts_sess.ctx); 622 assert(ctx); 623 if (!ctx) 624 return TEE_ERROR_TARGET_DEAD; 625 *sess = s; 626 627 ts_push_current_session(&s->ts_sess); 628 629 res = sp_is_elf_format(fdt, 0, &is_elf_format); 630 if (res == TEE_SUCCESS) { 631 if (is_elf_format) { 632 /* Load the SP using ldelf. */ 633 ldelf_load_ldelf(&ctx->uctx); 634 res = ldelf_init_with_ldelf(&s->ts_sess, &ctx->uctx); 635 } else { 636 /* Raw binary format SP */ 637 res = load_binary_sp(&s->ts_sess, &ctx->uctx); 638 } 639 } else { 640 EMSG("Failed to detect SP format"); 641 } 642 643 if (res != TEE_SUCCESS) { 644 EMSG("Failed loading SP %#"PRIx32, res); 645 ts_pop_current_session(); 646 return TEE_ERROR_TARGET_DEAD; 647 } 648 649 /* 650 * Make the SP ready for its first run. 651 * Set state to busy to prevent other endpoints from sending messages to 652 * the SP before its boot phase is done. 653 */ 654 s->state = sp_busy; 655 s->caller_id = 0; 656 sp_init_set_registers(ctx); 657 memcpy(&s->ffa_uuid, ffa_uuid, sizeof(*ffa_uuid)); 658 ts_pop_current_session(); 659 660 return TEE_SUCCESS; 661 } 662 663 static TEE_Result fdt_get_uuid(const void * const fdt, TEE_UUID *uuid) 664 { 665 const struct fdt_property *description = NULL; 666 int description_name_len = 0; 667 668 if (fdt_node_check_compatible(fdt, 0, "arm,ffa-manifest-1.0")) { 669 EMSG("Failed loading SP, manifest not found"); 670 return TEE_ERROR_BAD_PARAMETERS; 671 } 672 673 description = fdt_get_property(fdt, 0, "description", 674 &description_name_len); 675 if (description) 676 DMSG("Loading SP: %s", description->data); 677 678 if (sp_dt_get_uuid(fdt, 0, "uuid", uuid)) { 679 EMSG("Missing or invalid UUID in SP manifest"); 680 return TEE_ERROR_BAD_FORMAT; 681 } 682 683 return TEE_SUCCESS; 684 } 685 686 static TEE_Result copy_and_map_fdt(struct sp_ctx *ctx, const void * const fdt, 687 void **fdt_copy, size_t *mapped_size) 688 { 689 size_t total_size = ROUNDUP(fdt_totalsize(fdt), SMALL_PAGE_SIZE); 690 size_t num_pages = total_size / SMALL_PAGE_SIZE; 691 uint32_t perm = TEE_MATTR_UR | TEE_MATTR_PRW; 692 TEE_Result res = TEE_SUCCESS; 693 struct mobj *m = NULL; 694 struct fobj *f = NULL; 695 vaddr_t va = 0; 696 697 f = fobj_sec_mem_alloc(num_pages); 698 m = mobj_with_fobj_alloc(f, NULL, TEE_MATTR_MEM_TYPE_TAGGED); 699 fobj_put(f); 700 if (!m) 701 return TEE_ERROR_OUT_OF_MEMORY; 702 703 res = vm_map(&ctx->uctx, &va, total_size, perm, 0, m, 0); 704 mobj_put(m); 705 if (res) 706 return res; 707 708 if (fdt_open_into(fdt, (void *)va, total_size)) 709 return TEE_ERROR_GENERIC; 710 711 *fdt_copy = (void *)va; 712 *mapped_size = total_size; 713 714 return res; 715 } 716 717 static void fill_boot_info_1_0(vaddr_t buf, const void *fdt) 718 { 719 struct ffa_boot_info_1_0 *info = (struct ffa_boot_info_1_0 *)buf; 720 static const char fdt_name[16] = "TYPE_DT\0\0\0\0\0\0\0\0"; 721 722 memcpy(&info->magic, "FF-A", 4); 723 info->count = 1; 724 725 COMPILE_TIME_ASSERT(sizeof(info->nvp[0].name) == sizeof(fdt_name)); 726 memcpy(info->nvp[0].name, fdt_name, sizeof(fdt_name)); 727 info->nvp[0].value = (uintptr_t)fdt; 728 info->nvp[0].size = fdt_totalsize(fdt); 729 } 730 731 static void fill_boot_info_1_1(vaddr_t buf, const void *fdt) 732 { 733 size_t desc_offs = ROUNDUP(sizeof(struct ffa_boot_info_header_1_1), 8); 734 struct ffa_boot_info_header_1_1 *header = 735 (struct ffa_boot_info_header_1_1 *)buf; 736 struct ffa_boot_info_1_1 *desc = 737 (struct ffa_boot_info_1_1 *)(buf + desc_offs); 738 739 header->signature = FFA_BOOT_INFO_SIGNATURE; 740 header->version = FFA_BOOT_INFO_VERSION; 741 header->blob_size = desc_offs + sizeof(struct ffa_boot_info_1_1); 742 header->desc_size = sizeof(struct ffa_boot_info_1_1); 743 header->desc_count = 1; 744 header->desc_offset = desc_offs; 745 746 memset(&desc[0].name, 0, sizeof(desc[0].name)); 747 /* Type: Standard boot info (bit[7] == 0), FDT type */ 748 desc[0].type = FFA_BOOT_INFO_TYPE_ID_FDT; 749 /* Flags: Contents field contains an address */ 750 desc[0].flags = FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_ADDR << 751 FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_SHIFT; 752 desc[0].size = fdt_totalsize(fdt); 753 desc[0].contents = (uintptr_t)fdt; 754 } 755 756 static TEE_Result create_and_map_boot_info(struct sp_ctx *ctx, const void *fdt, 757 struct thread_smc_args *args, 758 vaddr_t *va, size_t *mapped_size, 759 uint32_t sp_ffa_version) 760 { 761 size_t total_size = ROUNDUP(CFG_SP_INIT_INFO_MAX_SIZE, SMALL_PAGE_SIZE); 762 size_t num_pages = total_size / SMALL_PAGE_SIZE; 763 uint32_t perm = TEE_MATTR_UR | TEE_MATTR_PRW; 764 TEE_Result res = TEE_SUCCESS; 765 struct fobj *f = NULL; 766 struct mobj *m = NULL; 767 uint32_t info_reg = 0; 768 769 f = fobj_sec_mem_alloc(num_pages); 770 m = mobj_with_fobj_alloc(f, NULL, TEE_MATTR_MEM_TYPE_TAGGED); 771 fobj_put(f); 772 if (!m) 773 return TEE_ERROR_OUT_OF_MEMORY; 774 775 res = vm_map(&ctx->uctx, va, total_size, perm, 0, m, 0); 776 mobj_put(m); 777 if (res) 778 return res; 779 780 *mapped_size = total_size; 781 782 switch (sp_ffa_version) { 783 case MAKE_FFA_VERSION(1, 0): 784 fill_boot_info_1_0(*va, fdt); 785 break; 786 case MAKE_FFA_VERSION(1, 1): 787 fill_boot_info_1_1(*va, fdt); 788 break; 789 default: 790 EMSG("Unknown FF-A version: %#"PRIx32, sp_ffa_version); 791 return TEE_ERROR_NOT_SUPPORTED; 792 } 793 794 res = sp_dt_get_u32(fdt, 0, "gp-register-num", &info_reg); 795 if (res) { 796 if (res == TEE_ERROR_ITEM_NOT_FOUND) { 797 /* If the property is not present, set default to x0 */ 798 info_reg = 0; 799 } else { 800 return TEE_ERROR_BAD_FORMAT; 801 } 802 } 803 804 switch (info_reg) { 805 case 0: 806 args->a0 = *va; 807 break; 808 case 1: 809 args->a1 = *va; 810 break; 811 case 2: 812 args->a2 = *va; 813 break; 814 case 3: 815 args->a3 = *va; 816 break; 817 default: 818 EMSG("Invalid register selected for passing boot info"); 819 return TEE_ERROR_BAD_FORMAT; 820 } 821 822 return TEE_SUCCESS; 823 } 824 825 static TEE_Result handle_fdt_load_relative_mem_regions(struct sp_ctx *ctx, 826 const void *fdt) 827 { 828 int node = 0; 829 int subnode = 0; 830 tee_mm_entry_t *mm = NULL; 831 TEE_Result res = TEE_SUCCESS; 832 833 /* 834 * Memory regions are optional in the SP manifest, it's not an error if 835 * we don't find any. 836 */ 837 node = fdt_node_offset_by_compatible(fdt, 0, 838 "arm,ffa-manifest-memory-regions"); 839 if (node < 0) 840 return TEE_SUCCESS; 841 842 fdt_for_each_subnode(subnode, fdt, node) { 843 uint64_t load_rel_offset = 0; 844 uint32_t attributes = 0; 845 uint64_t base_addr = 0; 846 uint32_t pages_cnt = 0; 847 uint32_t flags = 0; 848 uint32_t perm = 0; 849 size_t size = 0; 850 vaddr_t va = 0; 851 852 mm = NULL; 853 854 /* Load address relative offset of a memory region */ 855 if (!sp_dt_get_u64(fdt, subnode, "load-address-relative-offset", 856 &load_rel_offset)) { 857 va = ctx->uctx.load_addr + load_rel_offset; 858 } else { 859 /* Skip non load address relative memory regions */ 860 continue; 861 } 862 863 if (!sp_dt_get_u64(fdt, subnode, "base-address", &base_addr)) { 864 EMSG("Both base-address and load-address-relative-offset fields are set"); 865 return TEE_ERROR_BAD_FORMAT; 866 } 867 868 /* Size of memory region as count of 4K pages */ 869 if (sp_dt_get_u32(fdt, subnode, "pages-count", &pages_cnt)) { 870 EMSG("Mandatory field is missing: pages-count"); 871 return TEE_ERROR_BAD_FORMAT; 872 } 873 874 if (MUL_OVERFLOW(pages_cnt, SMALL_PAGE_SIZE, &size)) 875 return TEE_ERROR_OVERFLOW; 876 877 /* Memory region attributes */ 878 if (sp_dt_get_u32(fdt, subnode, "attributes", &attributes)) { 879 EMSG("Mandatory field is missing: attributes"); 880 return TEE_ERROR_BAD_FORMAT; 881 } 882 883 /* Check instruction and data access permissions */ 884 switch (attributes & SP_MANIFEST_ATTR_RWX) { 885 case SP_MANIFEST_ATTR_RO: 886 perm = TEE_MATTR_UR; 887 break; 888 case SP_MANIFEST_ATTR_RW: 889 perm = TEE_MATTR_URW; 890 break; 891 case SP_MANIFEST_ATTR_RX: 892 perm = TEE_MATTR_URX; 893 break; 894 default: 895 EMSG("Invalid memory access permissions"); 896 return TEE_ERROR_BAD_FORMAT; 897 } 898 899 if (IS_ENABLED(CFG_TA_BTI) && 900 attributes & SP_MANIFEST_ATTR_GP) { 901 if (!(attributes & SP_MANIFEST_ATTR_RX)) { 902 EMSG("Guard only executable region"); 903 return TEE_ERROR_BAD_FORMAT; 904 } 905 perm |= TEE_MATTR_GUARDED; 906 } 907 908 res = sp_dt_get_u32(fdt, subnode, "load-flags", &flags); 909 if (res != TEE_SUCCESS && res != TEE_ERROR_ITEM_NOT_FOUND) { 910 EMSG("Optional field with invalid value: flags"); 911 return TEE_ERROR_BAD_FORMAT; 912 } 913 914 /* Load relative regions must be secure */ 915 if (attributes & SP_MANIFEST_ATTR_NSEC) { 916 EMSG("Invalid memory security attribute"); 917 return TEE_ERROR_BAD_FORMAT; 918 } 919 920 if (flags & SP_MANIFEST_FLAG_NOBITS) { 921 /* 922 * NOBITS flag is set, which means that loaded binary 923 * doesn't contain this area, so it's need to be 924 * allocated. 925 */ 926 struct mobj *m = NULL; 927 unsigned int idx = 0; 928 929 mm = tee_mm_alloc(&tee_mm_sec_ddr, size); 930 if (!mm) 931 return TEE_ERROR_OUT_OF_MEMORY; 932 933 base_addr = tee_mm_get_smem(mm); 934 935 m = sp_mem_new_mobj(pages_cnt, 936 TEE_MATTR_MEM_TYPE_CACHED, true); 937 if (!m) { 938 res = TEE_ERROR_OUT_OF_MEMORY; 939 goto err_mm_free; 940 } 941 942 res = sp_mem_add_pages(m, &idx, base_addr, pages_cnt); 943 if (res) { 944 mobj_put(m); 945 goto err_mm_free; 946 } 947 948 res = vm_map(&ctx->uctx, &va, size, perm, 0, m, 0); 949 mobj_put(m); 950 if (res) 951 goto err_mm_free; 952 } else { 953 /* 954 * If NOBITS is not present the memory area is already 955 * mapped and only need to set the correct permissions. 956 */ 957 res = vm_set_prot(&ctx->uctx, va, size, perm); 958 if (res) 959 return res; 960 } 961 } 962 963 return TEE_SUCCESS; 964 965 err_mm_free: 966 tee_mm_free(mm); 967 return res; 968 } 969 970 static TEE_Result handle_fdt_dev_regions(struct sp_ctx *ctx, void *fdt) 971 { 972 int node = 0; 973 int subnode = 0; 974 TEE_Result res = TEE_SUCCESS; 975 const char *dt_device_match_table = { 976 "arm,ffa-manifest-device-regions", 977 }; 978 979 /* 980 * Device regions are optional in the SP manifest, it's not an error if 981 * we don't find any 982 */ 983 node = fdt_node_offset_by_compatible(fdt, 0, dt_device_match_table); 984 if (node < 0) 985 return TEE_SUCCESS; 986 987 fdt_for_each_subnode(subnode, fdt, node) { 988 uint64_t base_addr = 0; 989 uint32_t pages_cnt = 0; 990 uint32_t attributes = 0; 991 struct mobj *m = NULL; 992 bool is_secure = true; 993 uint32_t perm = 0; 994 vaddr_t va = 0; 995 unsigned int idx = 0; 996 997 /* 998 * Physical base address of a device MMIO region. 999 * Currently only physically contiguous region is supported. 1000 */ 1001 if (sp_dt_get_u64(fdt, subnode, "base-address", &base_addr)) { 1002 EMSG("Mandatory field is missing: base-address"); 1003 return TEE_ERROR_BAD_FORMAT; 1004 } 1005 1006 /* Total size of MMIO region as count of 4K pages */ 1007 if (sp_dt_get_u32(fdt, subnode, "pages-count", &pages_cnt)) { 1008 EMSG("Mandatory field is missing: pages-count"); 1009 return TEE_ERROR_BAD_FORMAT; 1010 } 1011 1012 /* Data access, instruction access and security attributes */ 1013 if (sp_dt_get_u32(fdt, subnode, "attributes", &attributes)) { 1014 EMSG("Mandatory field is missing: attributes"); 1015 return TEE_ERROR_BAD_FORMAT; 1016 } 1017 1018 /* Check instruction and data access permissions */ 1019 switch (attributes & SP_MANIFEST_ATTR_RWX) { 1020 case SP_MANIFEST_ATTR_RO: 1021 perm = TEE_MATTR_UR; 1022 break; 1023 case SP_MANIFEST_ATTR_RW: 1024 perm = TEE_MATTR_URW; 1025 break; 1026 default: 1027 EMSG("Invalid memory access permissions"); 1028 return TEE_ERROR_BAD_FORMAT; 1029 } 1030 1031 /* 1032 * The SP is a secure endpoint, security attribute can be 1033 * secure or non-secure 1034 */ 1035 if (attributes & SP_MANIFEST_ATTR_NSEC) 1036 is_secure = false; 1037 1038 /* Memory attributes must be Device-nGnRnE */ 1039 m = sp_mem_new_mobj(pages_cnt, TEE_MATTR_MEM_TYPE_STRONGLY_O, 1040 is_secure); 1041 if (!m) 1042 return TEE_ERROR_OUT_OF_MEMORY; 1043 1044 res = sp_mem_add_pages(m, &idx, (paddr_t)base_addr, pages_cnt); 1045 if (res) { 1046 mobj_put(m); 1047 return res; 1048 } 1049 1050 res = vm_map(&ctx->uctx, &va, pages_cnt * SMALL_PAGE_SIZE, 1051 perm, 0, m, 0); 1052 mobj_put(m); 1053 if (res) 1054 return res; 1055 1056 /* 1057 * Overwrite the device region's PA in the fdt with the VA. This 1058 * fdt will be passed to the SP. 1059 */ 1060 res = fdt_setprop_u64(fdt, subnode, "base-address", va); 1061 1062 /* 1063 * Unmap the region if the overwrite failed since the SP won't 1064 * be able to access it without knowing the VA. 1065 */ 1066 if (res) { 1067 vm_unmap(&ctx->uctx, va, pages_cnt * SMALL_PAGE_SIZE); 1068 return res; 1069 } 1070 } 1071 1072 return TEE_SUCCESS; 1073 } 1074 1075 static TEE_Result swap_sp_endpoints(uint32_t endpoint_id, 1076 uint32_t new_endpoint_id) 1077 { 1078 struct sp_session *session = sp_get_session(endpoint_id); 1079 uint32_t manifest_endpoint_id = 0; 1080 1081 /* 1082 * We don't know in which order the SPs are loaded. The endpoint ID 1083 * defined in the manifest could already be generated by 1084 * new_session_id() and used by another SP. If this is the case, we swap 1085 * the ID's of the two SPs. We also have to make sure that the ID's are 1086 * not defined twice in the manifest. 1087 */ 1088 1089 /* The endpoint ID was not assigned yet */ 1090 if (!session) 1091 return TEE_SUCCESS; 1092 1093 /* 1094 * Read the manifest file from the SP who originally had the endpoint. 1095 * We can safely swap the endpoint ID's if the manifest file doesn't 1096 * have an endpoint ID defined. 1097 */ 1098 if (!sp_dt_get_u32(session->fdt, 0, "id", &manifest_endpoint_id)) { 1099 assert(manifest_endpoint_id == endpoint_id); 1100 EMSG("SP: Found duplicated endpoint ID %#"PRIx32, endpoint_id); 1101 return TEE_ERROR_ACCESS_CONFLICT; 1102 } 1103 1104 session->endpoint_id = new_endpoint_id; 1105 1106 return TEE_SUCCESS; 1107 } 1108 1109 static TEE_Result read_manifest_endpoint_id(struct sp_session *s) 1110 { 1111 uint32_t endpoint_id = 0; 1112 1113 /* 1114 * The endpoint ID can be optionally defined in the manifest file. We 1115 * have to map the ID inside the manifest to the SP if it's defined. 1116 * If not, the endpoint ID generated inside new_session_id() will be 1117 * used. 1118 */ 1119 if (!sp_dt_get_u32(s->fdt, 0, "id", &endpoint_id)) { 1120 TEE_Result res = TEE_ERROR_GENERIC; 1121 1122 if (!endpoint_id_is_valid(endpoint_id)) { 1123 EMSG("Invalid endpoint ID 0x%"PRIx32, endpoint_id); 1124 return TEE_ERROR_BAD_FORMAT; 1125 } 1126 1127 res = swap_sp_endpoints(endpoint_id, s->endpoint_id); 1128 if (res) 1129 return res; 1130 1131 DMSG("SP: endpoint ID (0x%"PRIx32") found in manifest", 1132 endpoint_id); 1133 /* Assign the endpoint ID to the current SP */ 1134 s->endpoint_id = endpoint_id; 1135 } 1136 return TEE_SUCCESS; 1137 } 1138 1139 static TEE_Result handle_fdt_mem_regions(struct sp_ctx *ctx, void *fdt) 1140 { 1141 int node = 0; 1142 int subnode = 0; 1143 tee_mm_entry_t *mm = NULL; 1144 TEE_Result res = TEE_SUCCESS; 1145 1146 /* 1147 * Memory regions are optional in the SP manifest, it's not an error if 1148 * we don't find any. 1149 */ 1150 node = fdt_node_offset_by_compatible(fdt, 0, 1151 "arm,ffa-manifest-memory-regions"); 1152 if (node < 0) 1153 return TEE_SUCCESS; 1154 1155 fdt_for_each_subnode(subnode, fdt, node) { 1156 uint64_t load_rel_offset = 0; 1157 bool alloc_needed = false; 1158 uint32_t attributes = 0; 1159 uint64_t base_addr = 0; 1160 uint32_t pages_cnt = 0; 1161 bool is_secure = true; 1162 struct mobj *m = NULL; 1163 unsigned int idx = 0; 1164 uint32_t perm = 0; 1165 size_t size = 0; 1166 vaddr_t va = 0; 1167 1168 mm = NULL; 1169 1170 /* Load address relative offset of a memory region */ 1171 if (!sp_dt_get_u64(fdt, subnode, "load-address-relative-offset", 1172 &load_rel_offset)) { 1173 /* 1174 * At this point the memory region is already mapped by 1175 * handle_fdt_load_relative_mem_regions. 1176 * Only need to set the base-address in the manifest and 1177 * then skip the rest of the mapping process. 1178 */ 1179 va = ctx->uctx.load_addr + load_rel_offset; 1180 res = fdt_setprop_u64(fdt, subnode, "base-address", va); 1181 if (res) 1182 return res; 1183 1184 continue; 1185 } 1186 1187 /* 1188 * Base address of a memory region. 1189 * If not present, we have to allocate the specified memory. 1190 * If present, this field could specify a PA or VA. Currently 1191 * only a PA is supported. 1192 */ 1193 if (sp_dt_get_u64(fdt, subnode, "base-address", &base_addr)) 1194 alloc_needed = true; 1195 1196 /* Size of memory region as count of 4K pages */ 1197 if (sp_dt_get_u32(fdt, subnode, "pages-count", &pages_cnt)) { 1198 EMSG("Mandatory field is missing: pages-count"); 1199 return TEE_ERROR_BAD_FORMAT; 1200 } 1201 1202 if (MUL_OVERFLOW(pages_cnt, SMALL_PAGE_SIZE, &size)) 1203 return TEE_ERROR_OVERFLOW; 1204 1205 /* 1206 * Memory region attributes: 1207 * - Instruction/data access permissions 1208 * - Cacheability/shareability attributes 1209 * - Security attributes 1210 * 1211 * Cacheability/shareability attributes can be ignored for now. 1212 * OP-TEE only supports a single type for normal cached memory 1213 * and currently there is no use case that would require to 1214 * change this. 1215 */ 1216 if (sp_dt_get_u32(fdt, subnode, "attributes", &attributes)) { 1217 EMSG("Mandatory field is missing: attributes"); 1218 return TEE_ERROR_BAD_FORMAT; 1219 } 1220 1221 /* Check instruction and data access permissions */ 1222 switch (attributes & SP_MANIFEST_ATTR_RWX) { 1223 case SP_MANIFEST_ATTR_RO: 1224 perm = TEE_MATTR_UR; 1225 break; 1226 case SP_MANIFEST_ATTR_RW: 1227 perm = TEE_MATTR_URW; 1228 break; 1229 case SP_MANIFEST_ATTR_RX: 1230 perm = TEE_MATTR_URX; 1231 break; 1232 default: 1233 EMSG("Invalid memory access permissions"); 1234 return TEE_ERROR_BAD_FORMAT; 1235 } 1236 1237 if (IS_ENABLED(CFG_TA_BTI) && 1238 attributes & SP_MANIFEST_ATTR_GP) { 1239 if (!(attributes & SP_MANIFEST_ATTR_RX)) { 1240 EMSG("Guard only executable region"); 1241 return TEE_ERROR_BAD_FORMAT; 1242 } 1243 perm |= TEE_MATTR_GUARDED; 1244 } 1245 1246 /* 1247 * The SP is a secure endpoint, security attribute can be 1248 * secure or non-secure. 1249 * The SPMC cannot allocate non-secure memory, i.e. if the base 1250 * address is missing this attribute must be secure. 1251 */ 1252 if (attributes & SP_MANIFEST_ATTR_NSEC) { 1253 if (alloc_needed) { 1254 EMSG("Invalid memory security attribute"); 1255 return TEE_ERROR_BAD_FORMAT; 1256 } 1257 is_secure = false; 1258 } 1259 1260 if (alloc_needed) { 1261 /* Base address is missing, we have to allocate */ 1262 mm = tee_mm_alloc(&tee_mm_sec_ddr, size); 1263 if (!mm) 1264 return TEE_ERROR_OUT_OF_MEMORY; 1265 1266 base_addr = tee_mm_get_smem(mm); 1267 } 1268 1269 m = sp_mem_new_mobj(pages_cnt, TEE_MATTR_MEM_TYPE_CACHED, 1270 is_secure); 1271 if (!m) { 1272 res = TEE_ERROR_OUT_OF_MEMORY; 1273 goto err_mm_free; 1274 } 1275 1276 res = sp_mem_add_pages(m, &idx, base_addr, pages_cnt); 1277 if (res) { 1278 mobj_put(m); 1279 goto err_mm_free; 1280 } 1281 1282 res = vm_map(&ctx->uctx, &va, size, perm, 0, m, 0); 1283 mobj_put(m); 1284 if (res) 1285 goto err_mm_free; 1286 1287 /* 1288 * Overwrite the memory region's base address in the fdt with 1289 * the VA. This fdt will be passed to the SP. 1290 * If the base-address field was not present in the original 1291 * fdt, this function will create it. This doesn't cause issues 1292 * since the necessary extra space has been allocated when 1293 * opening the fdt. 1294 */ 1295 res = fdt_setprop_u64(fdt, subnode, "base-address", va); 1296 1297 /* 1298 * Unmap the region if the overwrite failed since the SP won't 1299 * be able to access it without knowing the VA. 1300 */ 1301 if (res) { 1302 vm_unmap(&ctx->uctx, va, size); 1303 goto err_mm_free; 1304 } 1305 } 1306 1307 return TEE_SUCCESS; 1308 1309 err_mm_free: 1310 tee_mm_free(mm); 1311 return res; 1312 } 1313 1314 static TEE_Result handle_tpm_event_log(struct sp_ctx *ctx, void *fdt) 1315 { 1316 uint32_t perm = TEE_MATTR_URW | TEE_MATTR_PRW; 1317 uint32_t dummy_size __maybe_unused = 0; 1318 TEE_Result res = TEE_SUCCESS; 1319 size_t page_count = 0; 1320 struct fobj *f = NULL; 1321 struct mobj *m = NULL; 1322 vaddr_t log_addr = 0; 1323 size_t log_size = 0; 1324 int node = 0; 1325 1326 node = fdt_node_offset_by_compatible(fdt, 0, "arm,tpm_event_log"); 1327 if (node < 0) 1328 return TEE_SUCCESS; 1329 1330 /* Checking the existence and size of the event log properties */ 1331 if (sp_dt_get_u64(fdt, node, "tpm_event_log_addr", &log_addr)) { 1332 EMSG("tpm_event_log_addr not found or has invalid size"); 1333 return TEE_ERROR_BAD_FORMAT; 1334 } 1335 1336 if (sp_dt_get_u32(fdt, node, "tpm_event_log_size", &dummy_size)) { 1337 EMSG("tpm_event_log_size not found or has invalid size"); 1338 return TEE_ERROR_BAD_FORMAT; 1339 } 1340 1341 /* Validating event log */ 1342 res = tpm_get_event_log_size(&log_size); 1343 if (res) 1344 return res; 1345 1346 if (!log_size) { 1347 EMSG("Empty TPM event log was provided"); 1348 return TEE_ERROR_ITEM_NOT_FOUND; 1349 } 1350 1351 /* Allocating memory area for the event log to share with the SP */ 1352 page_count = ROUNDUP_DIV(log_size, SMALL_PAGE_SIZE); 1353 1354 f = fobj_sec_mem_alloc(page_count); 1355 m = mobj_with_fobj_alloc(f, NULL, TEE_MATTR_MEM_TYPE_TAGGED); 1356 fobj_put(f); 1357 if (!m) 1358 return TEE_ERROR_OUT_OF_MEMORY; 1359 1360 res = vm_map(&ctx->uctx, &log_addr, log_size, perm, 0, m, 0); 1361 mobj_put(m); 1362 if (res) 1363 return res; 1364 1365 /* Copy event log */ 1366 res = tpm_get_event_log((void *)log_addr, &log_size); 1367 if (res) 1368 goto err_unmap; 1369 1370 /* Setting event log details in the manifest */ 1371 res = fdt_setprop_u64(fdt, node, "tpm_event_log_addr", log_addr); 1372 if (res) 1373 goto err_unmap; 1374 1375 res = fdt_setprop_u32(fdt, node, "tpm_event_log_size", log_size); 1376 if (res) 1377 goto err_unmap; 1378 1379 return TEE_SUCCESS; 1380 1381 err_unmap: 1382 vm_unmap(&ctx->uctx, log_addr, log_size); 1383 1384 return res; 1385 } 1386 1387 /* 1388 * Note: this function is called only on the primary CPU. It assumes that the 1389 * features present on the primary CPU are available on all of the secondary 1390 * CPUs as well. 1391 */ 1392 static TEE_Result handle_hw_features(void *fdt) 1393 { 1394 uint32_t val __maybe_unused = 0; 1395 TEE_Result res = TEE_SUCCESS; 1396 int node = 0; 1397 1398 /* 1399 * HW feature descriptions are optional in the SP manifest, it's not an 1400 * error if we don't find any. 1401 */ 1402 node = fdt_node_offset_by_compatible(fdt, 0, "arm,hw-features"); 1403 if (node < 0) 1404 return TEE_SUCCESS; 1405 1406 /* Modify the crc32 property only if it's already present */ 1407 if (!sp_dt_get_u32(fdt, node, "crc32", &val)) { 1408 res = fdt_setprop_u32(fdt, node, "crc32", 1409 feat_crc32_implemented()); 1410 if (res) 1411 return res; 1412 } 1413 1414 /* Modify the property only if it's already present */ 1415 if (!sp_dt_get_u32(fdt, node, "bti", &val)) { 1416 res = fdt_setprop_u32(fdt, node, "bti", 1417 feat_bti_is_implemented()); 1418 if (res) 1419 return res; 1420 } 1421 1422 /* Modify the property only if it's already present */ 1423 if (!sp_dt_get_u32(fdt, node, "pauth", &val)) { 1424 res = fdt_setprop_u32(fdt, node, "pauth", 1425 feat_pauth_is_implemented()); 1426 if (res) 1427 return res; 1428 } 1429 1430 return TEE_SUCCESS; 1431 } 1432 1433 static TEE_Result read_ns_interrupts_action(const void *fdt, 1434 struct sp_session *s) 1435 { 1436 TEE_Result res = TEE_ERROR_BAD_PARAMETERS; 1437 1438 res = sp_dt_get_u32(fdt, 0, "ns-interrupts-action", &s->ns_int_mode); 1439 1440 if (res) { 1441 EMSG("Mandatory property is missing: ns-interrupts-action"); 1442 return res; 1443 } 1444 1445 switch (s->ns_int_mode) { 1446 case SP_MANIFEST_NS_INT_QUEUED: 1447 case SP_MANIFEST_NS_INT_SIGNALED: 1448 /* OK */ 1449 break; 1450 1451 case SP_MANIFEST_NS_INT_MANAGED_EXIT: 1452 EMSG("Managed exit is not implemented"); 1453 return TEE_ERROR_NOT_IMPLEMENTED; 1454 1455 default: 1456 EMSG("Invalid ns-interrupts-action value: %"PRIu32, 1457 s->ns_int_mode); 1458 return TEE_ERROR_BAD_PARAMETERS; 1459 } 1460 1461 return TEE_SUCCESS; 1462 } 1463 1464 static TEE_Result read_ffa_version(const void *fdt, struct sp_session *s) 1465 { 1466 TEE_Result res = TEE_ERROR_BAD_PARAMETERS; 1467 uint32_t ffa_version = 0; 1468 1469 res = sp_dt_get_u32(fdt, 0, "ffa-version", &ffa_version); 1470 if (res) { 1471 EMSG("Mandatory property is missing: ffa-version"); 1472 return res; 1473 } 1474 1475 if (ffa_version != FFA_VERSION_1_0 && ffa_version != FFA_VERSION_1_1) { 1476 EMSG("Invalid FF-A version value: 0x%08"PRIx32, ffa_version); 1477 return TEE_ERROR_BAD_PARAMETERS; 1478 } 1479 1480 s->rxtx.ffa_vers = ffa_version; 1481 1482 return TEE_SUCCESS; 1483 } 1484 1485 static TEE_Result read_sp_exec_state(const void *fdt, struct sp_session *s) 1486 { 1487 TEE_Result res = TEE_ERROR_BAD_PARAMETERS; 1488 uint32_t exec_state = 0; 1489 1490 res = sp_dt_get_u32(fdt, 0, "execution-state", &exec_state); 1491 if (res) { 1492 EMSG("Mandatory property is missing: execution-state"); 1493 return res; 1494 } 1495 1496 /* Currently only AArch64 SPs are supported */ 1497 if (exec_state == SP_MANIFEST_EXEC_STATE_AARCH64) { 1498 s->props |= FFA_PART_PROP_AARCH64_STATE; 1499 } else { 1500 EMSG("Invalid execution-state value: %"PRIu32, exec_state); 1501 return TEE_ERROR_BAD_PARAMETERS; 1502 } 1503 1504 return TEE_SUCCESS; 1505 } 1506 1507 static TEE_Result read_sp_msg_types(const void *fdt, struct sp_session *s) 1508 { 1509 TEE_Result res = TEE_ERROR_BAD_PARAMETERS; 1510 uint32_t msg_method = 0; 1511 1512 res = sp_dt_get_u32(fdt, 0, "messaging-method", &msg_method); 1513 if (res) { 1514 EMSG("Mandatory property is missing: messaging-method"); 1515 return res; 1516 } 1517 1518 if (msg_method & SP_MANIFEST_DIRECT_REQ_RECEIVE) 1519 s->props |= FFA_PART_PROP_DIRECT_REQ_RECV; 1520 1521 if (msg_method & SP_MANIFEST_DIRECT_REQ_SEND) 1522 s->props |= FFA_PART_PROP_DIRECT_REQ_SEND; 1523 1524 if (msg_method & SP_MANIFEST_INDIRECT_REQ) 1525 IMSG("Indirect messaging is not supported"); 1526 1527 return TEE_SUCCESS; 1528 } 1529 1530 static TEE_Result read_vm_availability_msg(const void *fdt, 1531 struct sp_session *s) 1532 { 1533 TEE_Result res = TEE_ERROR_BAD_PARAMETERS; 1534 uint32_t v = 0; 1535 1536 res = sp_dt_get_u32(fdt, 0, "vm-availability-messages", &v); 1537 1538 /* This field in the manifest is optional */ 1539 if (res == TEE_ERROR_ITEM_NOT_FOUND) 1540 return TEE_SUCCESS; 1541 1542 if (res) 1543 return res; 1544 1545 if (v & ~(SP_MANIFEST_VM_CREATED_MSG | SP_MANIFEST_VM_DESTROYED_MSG)) { 1546 EMSG("Invalid vm-availability-messages value: %"PRIu32, v); 1547 return TEE_ERROR_BAD_PARAMETERS; 1548 } 1549 1550 if (v & SP_MANIFEST_VM_CREATED_MSG) 1551 s->props |= FFA_PART_PROP_NOTIF_CREATED; 1552 1553 if (v & SP_MANIFEST_VM_DESTROYED_MSG) 1554 s->props |= FFA_PART_PROP_NOTIF_DESTROYED; 1555 1556 return TEE_SUCCESS; 1557 } 1558 1559 static TEE_Result sp_init_uuid(const TEE_UUID *bin_uuid, const void * const fdt) 1560 { 1561 TEE_Result res = TEE_SUCCESS; 1562 struct sp_session *sess = NULL; 1563 TEE_UUID ffa_uuid = {}; 1564 uint16_t boot_order = 0; 1565 uint32_t boot_order_arg = 0; 1566 1567 res = fdt_get_uuid(fdt, &ffa_uuid); 1568 if (res) 1569 return res; 1570 1571 res = sp_dt_get_u16(fdt, 0, "boot-order", &boot_order); 1572 if (res == TEE_SUCCESS) { 1573 boot_order_arg = boot_order; 1574 } else if (res == TEE_ERROR_ITEM_NOT_FOUND) { 1575 boot_order_arg = UINT32_MAX; 1576 } else { 1577 EMSG("Failed reading boot-order property err:%#"PRIx32, res); 1578 return res; 1579 } 1580 1581 res = sp_open_session(&sess, 1582 &open_sp_sessions, 1583 &ffa_uuid, bin_uuid, boot_order_arg, fdt); 1584 if (res) 1585 return res; 1586 1587 sess->fdt = fdt; 1588 1589 res = read_manifest_endpoint_id(sess); 1590 if (res) 1591 return res; 1592 DMSG("endpoint is 0x%"PRIx16, sess->endpoint_id); 1593 1594 res = read_ns_interrupts_action(fdt, sess); 1595 if (res) 1596 return res; 1597 1598 res = read_ffa_version(fdt, sess); 1599 if (res) 1600 return res; 1601 1602 res = read_sp_exec_state(fdt, sess); 1603 if (res) 1604 return res; 1605 1606 res = read_sp_msg_types(fdt, sess); 1607 if (res) 1608 return res; 1609 1610 res = read_vm_availability_msg(fdt, sess); 1611 if (res) 1612 return res; 1613 1614 return TEE_SUCCESS; 1615 } 1616 1617 static TEE_Result sp_first_run(struct sp_session *sess) 1618 { 1619 TEE_Result res = TEE_SUCCESS; 1620 struct thread_smc_args args = { }; 1621 struct sp_ctx *ctx = NULL; 1622 vaddr_t boot_info_va = 0; 1623 size_t boot_info_size = 0; 1624 void *fdt_copy = NULL; 1625 size_t fdt_size = 0; 1626 1627 ctx = to_sp_ctx(sess->ts_sess.ctx); 1628 ts_push_current_session(&sess->ts_sess); 1629 sess->is_initialized = false; 1630 1631 /* 1632 * Load relative memory regions must be handled before doing any other 1633 * mapping to prevent conflicts in the VA space. 1634 */ 1635 res = handle_fdt_load_relative_mem_regions(ctx, sess->fdt); 1636 if (res) { 1637 ts_pop_current_session(); 1638 return res; 1639 } 1640 1641 res = copy_and_map_fdt(ctx, sess->fdt, &fdt_copy, &fdt_size); 1642 if (res) 1643 goto out; 1644 1645 res = handle_fdt_dev_regions(ctx, fdt_copy); 1646 if (res) 1647 goto out; 1648 1649 res = handle_fdt_mem_regions(ctx, fdt_copy); 1650 if (res) 1651 goto out; 1652 1653 if (IS_ENABLED(CFG_CORE_TPM_EVENT_LOG)) { 1654 res = handle_tpm_event_log(ctx, fdt_copy); 1655 if (res) 1656 goto out; 1657 } 1658 1659 res = handle_hw_features(fdt_copy); 1660 if (res) 1661 goto out; 1662 1663 res = create_and_map_boot_info(ctx, fdt_copy, &args, &boot_info_va, 1664 &boot_info_size, sess->rxtx.ffa_vers); 1665 if (res) 1666 goto out; 1667 1668 ts_pop_current_session(); 1669 1670 res = sp_enter(&args, sess); 1671 if (res) { 1672 ts_push_current_session(&sess->ts_sess); 1673 goto out; 1674 } 1675 1676 spmc_sp_msg_handler(&args, sess); 1677 1678 ts_push_current_session(&sess->ts_sess); 1679 sess->is_initialized = true; 1680 1681 out: 1682 /* Free the boot info page from the SP memory */ 1683 vm_unmap(&ctx->uctx, boot_info_va, boot_info_size); 1684 vm_unmap(&ctx->uctx, (vaddr_t)fdt_copy, fdt_size); 1685 ts_pop_current_session(); 1686 1687 return res; 1688 } 1689 1690 TEE_Result sp_enter(struct thread_smc_args *args, struct sp_session *sp) 1691 { 1692 TEE_Result res = TEE_SUCCESS; 1693 struct sp_ctx *ctx = to_sp_ctx(sp->ts_sess.ctx); 1694 1695 ctx->sp_regs.x[0] = args->a0; 1696 ctx->sp_regs.x[1] = args->a1; 1697 ctx->sp_regs.x[2] = args->a2; 1698 ctx->sp_regs.x[3] = args->a3; 1699 ctx->sp_regs.x[4] = args->a4; 1700 ctx->sp_regs.x[5] = args->a5; 1701 ctx->sp_regs.x[6] = args->a6; 1702 ctx->sp_regs.x[7] = args->a7; 1703 #ifdef CFG_TA_PAUTH 1704 ctx->sp_regs.apiakey_hi = ctx->uctx.keys.apia_hi; 1705 ctx->sp_regs.apiakey_lo = ctx->uctx.keys.apia_lo; 1706 #endif 1707 1708 res = sp->ts_sess.ctx->ops->enter_invoke_cmd(&sp->ts_sess, 0); 1709 1710 args->a0 = ctx->sp_regs.x[0]; 1711 args->a1 = ctx->sp_regs.x[1]; 1712 args->a2 = ctx->sp_regs.x[2]; 1713 args->a3 = ctx->sp_regs.x[3]; 1714 args->a4 = ctx->sp_regs.x[4]; 1715 args->a5 = ctx->sp_regs.x[5]; 1716 args->a6 = ctx->sp_regs.x[6]; 1717 args->a7 = ctx->sp_regs.x[7]; 1718 1719 return res; 1720 } 1721 1722 /* 1723 * According to FF-A v1.1 section 8.3.1.4 if a caller requires less permissive 1724 * active on NS interrupt than the callee, the callee must inherit the caller's 1725 * configuration. 1726 * Each SP's own NS action setting is stored in ns_int_mode. The effective 1727 * action will be MIN([self action], [caller's action]) which is stored in the 1728 * ns_int_mode_inherited field. 1729 */ 1730 static void sp_cpsr_configure_foreign_interrupts(struct sp_session *s, 1731 struct ts_session *caller, 1732 uint64_t *cpsr) 1733 { 1734 if (caller) { 1735 struct sp_session *caller_sp = to_sp_session(caller); 1736 1737 s->ns_int_mode_inherited = MIN(caller_sp->ns_int_mode_inherited, 1738 s->ns_int_mode); 1739 } else { 1740 s->ns_int_mode_inherited = s->ns_int_mode; 1741 } 1742 1743 if (s->ns_int_mode_inherited == SP_MANIFEST_NS_INT_QUEUED) 1744 *cpsr |= SHIFT_U32(THREAD_EXCP_FOREIGN_INTR, 1745 ARM32_CPSR_F_SHIFT); 1746 else 1747 *cpsr &= ~SHIFT_U32(THREAD_EXCP_FOREIGN_INTR, 1748 ARM32_CPSR_F_SHIFT); 1749 } 1750 1751 static TEE_Result sp_enter_invoke_cmd(struct ts_session *s, 1752 uint32_t cmd __unused) 1753 { 1754 struct sp_ctx *ctx = to_sp_ctx(s->ctx); 1755 TEE_Result res = TEE_SUCCESS; 1756 uint32_t exceptions = 0; 1757 struct sp_session *sp_s = to_sp_session(s); 1758 struct ts_session *sess = NULL; 1759 struct thread_ctx_regs *sp_regs = NULL; 1760 uint32_t thread_id = THREAD_ID_INVALID; 1761 struct ts_session *caller = NULL; 1762 uint32_t rpc_target_info = 0; 1763 uint32_t panicked = false; 1764 uint32_t panic_code = 0; 1765 1766 sp_regs = &ctx->sp_regs; 1767 ts_push_current_session(s); 1768 1769 exceptions = thread_mask_exceptions(THREAD_EXCP_ALL); 1770 1771 /* Enable/disable foreign interrupts in CPSR/SPSR */ 1772 caller = ts_get_calling_session(); 1773 sp_cpsr_configure_foreign_interrupts(sp_s, caller, &sp_regs->cpsr); 1774 1775 /* 1776 * Store endpoint ID and thread ID in rpc_target_info. This will be used 1777 * as w1 in FFA_INTERRUPT in case of a foreign interrupt. 1778 */ 1779 rpc_target_info = thread_get_tsd()->rpc_target_info; 1780 thread_id = thread_get_id(); 1781 assert(thread_id <= UINT16_MAX); 1782 thread_get_tsd()->rpc_target_info = 1783 FFA_TARGET_INFO_SET(sp_s->endpoint_id, thread_id); 1784 1785 __thread_enter_user_mode(sp_regs, &panicked, &panic_code); 1786 1787 /* Restore rpc_target_info */ 1788 thread_get_tsd()->rpc_target_info = rpc_target_info; 1789 1790 thread_unmask_exceptions(exceptions); 1791 1792 thread_user_clear_vfp(&ctx->uctx); 1793 1794 if (panicked) { 1795 DMSG("SP panicked with code %#"PRIx32, panic_code); 1796 abort_print_current_ts(); 1797 1798 sess = ts_pop_current_session(); 1799 cpu_spin_lock(&sp_s->spinlock); 1800 sp_s->state = sp_dead; 1801 cpu_spin_unlock(&sp_s->spinlock); 1802 1803 return TEE_ERROR_TARGET_DEAD; 1804 } 1805 1806 sess = ts_pop_current_session(); 1807 assert(sess == s); 1808 1809 return res; 1810 } 1811 1812 /* We currently don't support 32 bits */ 1813 #ifdef ARM64 1814 static void sp_svc_store_registers(struct thread_scall_regs *regs, 1815 struct thread_ctx_regs *sp_regs) 1816 { 1817 COMPILE_TIME_ASSERT(sizeof(sp_regs->x[0]) == sizeof(regs->x0)); 1818 memcpy(sp_regs->x, ®s->x0, 31 * sizeof(regs->x0)); 1819 sp_regs->pc = regs->elr; 1820 sp_regs->sp = regs->sp_el0; 1821 } 1822 #endif 1823 1824 static bool sp_handle_scall(struct thread_scall_regs *regs) 1825 { 1826 struct ts_session *ts = ts_get_current_session(); 1827 struct sp_ctx *uctx = to_sp_ctx(ts->ctx); 1828 struct sp_session *s = uctx->open_session; 1829 1830 assert(s); 1831 1832 sp_svc_store_registers(regs, &uctx->sp_regs); 1833 1834 regs->x0 = 0; 1835 regs->x1 = 0; /* panic */ 1836 regs->x2 = 0; /* panic code */ 1837 1838 /* 1839 * All the registers of the SP are saved in the SP session by the SVC 1840 * handler. 1841 * We always return to S-El1 after handling the SVC. We will continue 1842 * in sp_enter_invoke_cmd() (return from __thread_enter_user_mode). 1843 * The sp_enter() function copies the FF-A parameters (a0-a7) from the 1844 * saved registers to the thread_smc_args. The thread_smc_args object is 1845 * afterward used by the spmc_sp_msg_handler() to handle the 1846 * FF-A message send by the SP. 1847 */ 1848 return false; 1849 } 1850 1851 static void sp_dump_state(struct ts_ctx *ctx) 1852 { 1853 struct sp_ctx *utc = to_sp_ctx(ctx); 1854 1855 if (utc->uctx.dump_entry_func) { 1856 TEE_Result res = ldelf_dump_state(&utc->uctx); 1857 1858 if (!res || res == TEE_ERROR_TARGET_DEAD) 1859 return; 1860 } 1861 1862 user_mode_ctx_print_mappings(&utc->uctx); 1863 } 1864 1865 static const struct ts_ops sp_ops = { 1866 .enter_invoke_cmd = sp_enter_invoke_cmd, 1867 .handle_scall = sp_handle_scall, 1868 .dump_state = sp_dump_state, 1869 }; 1870 1871 static TEE_Result process_sp_pkg(uint64_t sp_pkg_pa, TEE_UUID *sp_uuid) 1872 { 1873 enum teecore_memtypes mtype = MEM_AREA_TA_RAM; 1874 struct sp_pkg_header *sp_pkg_hdr = NULL; 1875 struct fip_sp *sp = NULL; 1876 uint64_t sp_fdt_end = 0; 1877 size_t sp_pkg_size = 0; 1878 vaddr_t sp_pkg_va = 0; 1879 1880 /* Process the first page which contains the SP package header */ 1881 sp_pkg_va = (vaddr_t)phys_to_virt(sp_pkg_pa, mtype, SMALL_PAGE_SIZE); 1882 if (!sp_pkg_va) { 1883 EMSG("Cannot find mapping for PA %#" PRIxPA, sp_pkg_pa); 1884 return TEE_ERROR_GENERIC; 1885 } 1886 1887 sp_pkg_hdr = (struct sp_pkg_header *)sp_pkg_va; 1888 1889 if (sp_pkg_hdr->magic != SP_PKG_HEADER_MAGIC) { 1890 EMSG("Invalid SP package magic"); 1891 return TEE_ERROR_BAD_FORMAT; 1892 } 1893 1894 if (sp_pkg_hdr->version != SP_PKG_HEADER_VERSION_V1 && 1895 sp_pkg_hdr->version != SP_PKG_HEADER_VERSION_V2) { 1896 EMSG("Invalid SP header version"); 1897 return TEE_ERROR_BAD_FORMAT; 1898 } 1899 1900 if (ADD_OVERFLOW(sp_pkg_hdr->img_offset, sp_pkg_hdr->img_size, 1901 &sp_pkg_size)) { 1902 EMSG("Invalid SP package size"); 1903 return TEE_ERROR_BAD_FORMAT; 1904 } 1905 1906 if (ADD_OVERFLOW(sp_pkg_hdr->pm_offset, sp_pkg_hdr->pm_size, 1907 &sp_fdt_end) || sp_fdt_end > sp_pkg_hdr->img_offset) { 1908 EMSG("Invalid SP manifest size"); 1909 return TEE_ERROR_BAD_FORMAT; 1910 } 1911 1912 /* Process the whole SP package now that the size is known */ 1913 sp_pkg_va = (vaddr_t)phys_to_virt(sp_pkg_pa, mtype, sp_pkg_size); 1914 if (!sp_pkg_va) { 1915 EMSG("Cannot find mapping for PA %#" PRIxPA, sp_pkg_pa); 1916 return TEE_ERROR_GENERIC; 1917 } 1918 1919 sp_pkg_hdr = (struct sp_pkg_header *)sp_pkg_va; 1920 1921 sp = calloc(1, sizeof(struct fip_sp)); 1922 if (!sp) 1923 return TEE_ERROR_OUT_OF_MEMORY; 1924 1925 memcpy(&sp->sp_img.image.uuid, sp_uuid, sizeof(*sp_uuid)); 1926 sp->sp_img.image.ts = (uint8_t *)(sp_pkg_va + sp_pkg_hdr->img_offset); 1927 sp->sp_img.image.size = sp_pkg_hdr->img_size; 1928 sp->sp_img.image.flags = 0; 1929 sp->sp_img.fdt = (uint8_t *)(sp_pkg_va + sp_pkg_hdr->pm_offset); 1930 1931 STAILQ_INSERT_TAIL(&fip_sp_list, sp, link); 1932 1933 return TEE_SUCCESS; 1934 } 1935 1936 static TEE_Result fip_sp_init_all(void) 1937 { 1938 TEE_Result res = TEE_SUCCESS; 1939 uint64_t sp_pkg_addr = 0; 1940 const void *fdt = NULL; 1941 TEE_UUID sp_uuid = { }; 1942 int sp_pkgs_node = 0; 1943 int subnode = 0; 1944 int root = 0; 1945 1946 fdt = get_manifest_dt(); 1947 if (!fdt) { 1948 EMSG("No SPMC manifest found"); 1949 return TEE_ERROR_GENERIC; 1950 } 1951 1952 root = fdt_path_offset(fdt, "/"); 1953 if (root < 0) 1954 return TEE_ERROR_BAD_FORMAT; 1955 1956 if (fdt_node_check_compatible(fdt, root, "arm,ffa-core-manifest-1.0")) 1957 return TEE_ERROR_BAD_FORMAT; 1958 1959 /* SP packages are optional, it's not an error if we don't find any */ 1960 sp_pkgs_node = fdt_node_offset_by_compatible(fdt, root, "arm,sp_pkg"); 1961 if (sp_pkgs_node < 0) 1962 return TEE_SUCCESS; 1963 1964 fdt_for_each_subnode(subnode, fdt, sp_pkgs_node) { 1965 res = sp_dt_get_u64(fdt, subnode, "load-address", &sp_pkg_addr); 1966 if (res) { 1967 EMSG("Invalid FIP SP load address"); 1968 return res; 1969 } 1970 1971 res = sp_dt_get_uuid(fdt, subnode, "uuid", &sp_uuid); 1972 if (res) { 1973 EMSG("Invalid FIP SP uuid"); 1974 return res; 1975 } 1976 1977 res = process_sp_pkg(sp_pkg_addr, &sp_uuid); 1978 if (res) { 1979 EMSG("Invalid FIP SP package"); 1980 return res; 1981 } 1982 } 1983 1984 return TEE_SUCCESS; 1985 } 1986 1987 static void fip_sp_deinit_all(void) 1988 { 1989 while (!STAILQ_EMPTY(&fip_sp_list)) { 1990 struct fip_sp *sp = STAILQ_FIRST(&fip_sp_list); 1991 1992 STAILQ_REMOVE_HEAD(&fip_sp_list, link); 1993 free(sp); 1994 } 1995 } 1996 1997 static TEE_Result sp_init_all(void) 1998 { 1999 TEE_Result res = TEE_SUCCESS; 2000 const struct sp_image *sp = NULL; 2001 const struct fip_sp *fip_sp = NULL; 2002 char __maybe_unused msg[60] = { '\0', }; 2003 struct sp_session *s = NULL; 2004 struct sp_session *prev_sp = NULL; 2005 2006 for_each_secure_partition(sp) { 2007 if (sp->image.uncompressed_size) 2008 snprintf(msg, sizeof(msg), 2009 " (compressed, uncompressed %u)", 2010 sp->image.uncompressed_size); 2011 else 2012 msg[0] = '\0'; 2013 DMSG("SP %pUl size %u%s", (void *)&sp->image.uuid, 2014 sp->image.size, msg); 2015 2016 res = sp_init_uuid(&sp->image.uuid, sp->fdt); 2017 2018 if (res != TEE_SUCCESS) { 2019 EMSG("Failed initializing SP(%pUl) err:%#"PRIx32, 2020 &sp->image.uuid, res); 2021 if (!IS_ENABLED(CFG_SP_SKIP_FAILED)) 2022 panic(); 2023 } 2024 } 2025 2026 res = fip_sp_init_all(); 2027 if (res) 2028 panic("Failed initializing FIP SPs"); 2029 2030 for_each_fip_sp(fip_sp) { 2031 sp = &fip_sp->sp_img; 2032 2033 DMSG("SP %pUl size %u", (void *)&sp->image.uuid, 2034 sp->image.size); 2035 2036 res = sp_init_uuid(&sp->image.uuid, sp->fdt); 2037 2038 if (res != TEE_SUCCESS) { 2039 EMSG("Failed initializing SP(%pUl) err:%#"PRIx32, 2040 &sp->image.uuid, res); 2041 if (!IS_ENABLED(CFG_SP_SKIP_FAILED)) 2042 panic(); 2043 } 2044 } 2045 2046 /* 2047 * At this point all FIP SPs are loaded by ldelf or by the raw binary SP 2048 * loader, so the original images (loaded by BL2) are not needed anymore 2049 */ 2050 fip_sp_deinit_all(); 2051 2052 /* 2053 * Now that all SPs are loaded, check through the boot order values, 2054 * and warn in case there is a non-unique value. 2055 */ 2056 TAILQ_FOREACH(s, &open_sp_sessions, link) { 2057 /* User specified boot-order values are uint16 */ 2058 if (s->boot_order > UINT16_MAX) 2059 break; 2060 2061 if (prev_sp && prev_sp->boot_order == s->boot_order) 2062 IMSG("WARNING: duplicated boot-order (%pUl vs %pUl)", 2063 &prev_sp->ts_sess.ctx->uuid, 2064 &s->ts_sess.ctx->uuid); 2065 2066 prev_sp = s; 2067 } 2068 2069 /* Continue the initialization and run the SP */ 2070 TAILQ_FOREACH(s, &open_sp_sessions, link) { 2071 DMSG("Starting SP: 0x%"PRIx16, s->endpoint_id); 2072 2073 res = sp_first_run(s); 2074 if (res != TEE_SUCCESS) { 2075 EMSG("Failed starting SP(0x%"PRIx16") err:%#"PRIx32, 2076 s->endpoint_id, res); 2077 if (!IS_ENABLED(CFG_SP_SKIP_FAILED)) 2078 panic(); 2079 } 2080 } 2081 2082 return TEE_SUCCESS; 2083 } 2084 2085 boot_final(sp_init_all); 2086 2087 static TEE_Result secure_partition_open(const TEE_UUID *uuid, 2088 struct ts_store_handle **h) 2089 { 2090 return emb_ts_open(uuid, h, find_secure_partition); 2091 } 2092 2093 REGISTER_SP_STORE(2) = { 2094 .description = "SP store", 2095 .open = secure_partition_open, 2096 .get_size = emb_ts_get_size, 2097 .get_tag = emb_ts_get_tag, 2098 .read = emb_ts_read, 2099 .close = emb_ts_close, 2100 }; 2101