1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2020-2024, Arm Limited. 4 */ 5 #include <crypto/crypto.h> 6 #include <initcall.h> 7 #include <kernel/boot.h> 8 #include <kernel/embedded_ts.h> 9 #include <kernel/ldelf_loader.h> 10 #include <kernel/secure_partition.h> 11 #include <kernel/spinlock.h> 12 #include <kernel/spmc_sp_handler.h> 13 #include <kernel/thread_private.h> 14 #include <kernel/thread_spmc.h> 15 #include <kernel/tpm.h> 16 #include <kernel/ts_store.h> 17 #include <ldelf.h> 18 #include <libfdt.h> 19 #include <mm/core_mmu.h> 20 #include <mm/fobj.h> 21 #include <mm/mobj.h> 22 #include <mm/phys_mem.h> 23 #include <mm/vm.h> 24 #include <optee_ffa.h> 25 #include <stdio.h> 26 #include <string.h> 27 #include <tee/uuid.h> 28 #include <tee_api_types.h> 29 #include <trace.h> 30 #include <types_ext.h> 31 #include <utee_defines.h> 32 #include <util.h> 33 #include <zlib.h> 34 35 #define BOUNCE_BUFFER_SIZE 4096 36 37 #define UNDEFINED_BOOT_ORDER_VALUE UINT32_MAX 38 39 #define SP_MANIFEST_ATTR_READ BIT(0) 40 #define SP_MANIFEST_ATTR_WRITE BIT(1) 41 #define SP_MANIFEST_ATTR_EXEC BIT(2) 42 #define SP_MANIFEST_ATTR_NSEC BIT(3) 43 #define SP_MANIFEST_ATTR_GP BIT(4) 44 45 #define SP_MANIFEST_ATTR_RO (SP_MANIFEST_ATTR_READ) 46 #define SP_MANIFEST_ATTR_RW (SP_MANIFEST_ATTR_READ | \ 47 SP_MANIFEST_ATTR_WRITE) 48 #define SP_MANIFEST_ATTR_RX (SP_MANIFEST_ATTR_READ | \ 49 SP_MANIFEST_ATTR_EXEC) 50 #define SP_MANIFEST_ATTR_RWX (SP_MANIFEST_ATTR_READ | \ 51 SP_MANIFEST_ATTR_WRITE | \ 52 SP_MANIFEST_ATTR_EXEC) 53 54 #define SP_MANIFEST_FLAG_NOBITS BIT(0) 55 56 #define SP_MANIFEST_NS_INT_QUEUED (0x0) 57 #define SP_MANIFEST_NS_INT_MANAGED_EXIT (0x1) 58 #define SP_MANIFEST_NS_INT_SIGNALED (0x2) 59 60 #define SP_MANIFEST_EXEC_STATE_AARCH64 (0x0) 61 #define SP_MANIFEST_EXEC_STATE_AARCH32 (0x1) 62 63 #define SP_MANIFEST_DIRECT_REQ_RECEIVE BIT(0) 64 #define SP_MANIFEST_DIRECT_REQ_SEND BIT(1) 65 #define SP_MANIFEST_INDIRECT_REQ BIT(2) 66 67 #define SP_MANIFEST_VM_CREATED_MSG BIT(0) 68 #define SP_MANIFEST_VM_DESTROYED_MSG BIT(1) 69 70 #define SP_PKG_HEADER_MAGIC (0x474b5053) 71 #define SP_PKG_HEADER_VERSION_V1 (0x1) 72 #define SP_PKG_HEADER_VERSION_V2 (0x2) 73 74 struct sp_pkg_header { 75 uint32_t magic; 76 uint32_t version; 77 uint32_t pm_offset; 78 uint32_t pm_size; 79 uint32_t img_offset; 80 uint32_t img_size; 81 }; 82 83 struct fip_sp_head fip_sp_list = STAILQ_HEAD_INITIALIZER(fip_sp_list); 84 85 static const struct ts_ops sp_ops; 86 87 /* List that holds all of the loaded SP's */ 88 static struct sp_sessions_head open_sp_sessions = 89 TAILQ_HEAD_INITIALIZER(open_sp_sessions); 90 91 static const struct embedded_ts *find_secure_partition(const TEE_UUID *uuid) 92 { 93 const struct sp_image *sp = NULL; 94 const struct fip_sp *fip_sp = NULL; 95 96 for_each_secure_partition(sp) { 97 if (!memcmp(&sp->image.uuid, uuid, sizeof(*uuid))) 98 return &sp->image; 99 } 100 101 for_each_fip_sp(fip_sp) { 102 if (!memcmp(&fip_sp->sp_img.image.uuid, uuid, sizeof(*uuid))) 103 return &fip_sp->sp_img.image; 104 } 105 106 return NULL; 107 } 108 109 bool is_sp_ctx(struct ts_ctx *ctx) 110 { 111 return ctx && (ctx->ops == &sp_ops); 112 } 113 114 static void set_sp_ctx_ops(struct ts_ctx *ctx) 115 { 116 ctx->ops = &sp_ops; 117 } 118 119 struct sp_session *sp_get_session(uint32_t session_id) 120 { 121 struct sp_session *s = NULL; 122 123 TAILQ_FOREACH(s, &open_sp_sessions, link) { 124 if (s->endpoint_id == session_id) 125 return s; 126 } 127 128 return NULL; 129 } 130 131 TEE_Result sp_partition_info_get(uint32_t ffa_vers, void *buf, size_t buf_size, 132 const uint32_t ffa_uuid_words[4], 133 size_t *elem_count, bool count_only) 134 { 135 TEE_Result res = TEE_SUCCESS; 136 struct sp_session *s = NULL; 137 TEE_UUID uuid = { }; 138 TEE_UUID *ffa_uuid = NULL; 139 140 if (ffa_uuid_words) { 141 tee_uuid_from_octets(&uuid, (void *)ffa_uuid_words); 142 ffa_uuid = &uuid; 143 } 144 145 TAILQ_FOREACH(s, &open_sp_sessions, link) { 146 if (ffa_uuid && 147 memcmp(&s->ffa_uuid, ffa_uuid, sizeof(*ffa_uuid))) 148 continue; 149 150 if (s->state == sp_dead) 151 continue; 152 if (!count_only && !res) { 153 uint32_t uuid_words[4] = { 0 }; 154 155 tee_uuid_to_octets((uint8_t *)uuid_words, &s->ffa_uuid); 156 res = spmc_fill_partition_entry(ffa_vers, buf, buf_size, 157 *elem_count, 158 s->endpoint_id, 1, 159 s->props, uuid_words); 160 } 161 *elem_count += 1; 162 } 163 164 return res; 165 } 166 167 bool sp_has_exclusive_access(struct sp_mem_map_region *mem, 168 struct user_mode_ctx *uctx) 169 { 170 /* 171 * Check that we have access to the region if it is supposed to be 172 * mapped to the current context. 173 */ 174 if (uctx) { 175 struct vm_region *region = NULL; 176 177 /* Make sure that each mobj belongs to the SP */ 178 TAILQ_FOREACH(region, &uctx->vm_info.regions, link) { 179 if (region->mobj == mem->mobj) 180 break; 181 } 182 183 if (!region) 184 return false; 185 } 186 187 /* Check that it is not shared with another SP */ 188 return !sp_mem_is_shared(mem); 189 } 190 191 static bool endpoint_id_is_valid(uint32_t id) 192 { 193 /* 194 * These IDs are assigned at the SPMC init so already have valid values 195 * by the time this function gets first called 196 */ 197 return !spmc_is_reserved_id(id) && !spmc_find_lsp_by_sp_id(id) && 198 id >= FFA_SWD_ID_MIN && id <= FFA_SWD_ID_MAX; 199 } 200 201 static TEE_Result new_session_id(uint16_t *endpoint_id) 202 { 203 uint32_t id = 0; 204 205 /* Find the first available endpoint id */ 206 for (id = FFA_SWD_ID_MIN; id <= FFA_SWD_ID_MAX; id++) { 207 if (endpoint_id_is_valid(id) && !sp_get_session(id)) { 208 *endpoint_id = id; 209 return TEE_SUCCESS; 210 } 211 } 212 213 return TEE_ERROR_BAD_FORMAT; 214 } 215 216 static TEE_Result sp_create_ctx(const TEE_UUID *bin_uuid, struct sp_session *s) 217 { 218 TEE_Result res = TEE_SUCCESS; 219 struct sp_ctx *spc = NULL; 220 221 /* Register context */ 222 spc = calloc(1, sizeof(struct sp_ctx)); 223 if (!spc) 224 return TEE_ERROR_OUT_OF_MEMORY; 225 226 spc->open_session = s; 227 s->ts_sess.ctx = &spc->ts_ctx; 228 spc->ts_ctx.uuid = *bin_uuid; 229 230 res = vm_info_init(&spc->uctx, &spc->ts_ctx); 231 if (res) 232 goto err; 233 234 set_sp_ctx_ops(&spc->ts_ctx); 235 236 #ifdef CFG_TA_PAUTH 237 crypto_rng_read(&spc->uctx.keys, sizeof(spc->uctx.keys)); 238 #endif 239 240 return TEE_SUCCESS; 241 242 err: 243 free(spc); 244 return res; 245 } 246 247 /* 248 * Insert a new sp_session to the sessions list, so that it is ordered 249 * by boot_order. 250 */ 251 static void insert_session_ordered(struct sp_sessions_head *open_sessions, 252 struct sp_session *session) 253 { 254 struct sp_session *s = NULL; 255 256 if (!open_sessions || !session) 257 return; 258 259 TAILQ_FOREACH(s, &open_sp_sessions, link) { 260 if (s->boot_order > session->boot_order) 261 break; 262 } 263 264 if (!s) 265 TAILQ_INSERT_TAIL(open_sessions, session, link); 266 else 267 TAILQ_INSERT_BEFORE(s, session, link); 268 } 269 270 static TEE_Result sp_create_session(struct sp_sessions_head *open_sessions, 271 const TEE_UUID *bin_uuid, 272 const uint32_t boot_order, 273 struct sp_session **sess) 274 { 275 TEE_Result res = TEE_SUCCESS; 276 struct sp_session *s = calloc(1, sizeof(struct sp_session)); 277 278 if (!s) 279 return TEE_ERROR_OUT_OF_MEMORY; 280 281 s->boot_order = boot_order; 282 283 /* Other properties are filled later, based on the SP's manifest */ 284 s->props = FFA_PART_PROP_IS_PE_ID; 285 286 res = new_session_id(&s->endpoint_id); 287 if (res) 288 goto err; 289 290 DMSG("Loading Secure Partition %pUl", (void *)bin_uuid); 291 res = sp_create_ctx(bin_uuid, s); 292 if (res) 293 goto err; 294 295 insert_session_ordered(open_sessions, s); 296 *sess = s; 297 return TEE_SUCCESS; 298 299 err: 300 free(s); 301 return res; 302 } 303 304 static TEE_Result sp_init_set_registers(struct sp_ctx *ctx) 305 { 306 struct thread_ctx_regs *sp_regs = &ctx->sp_regs; 307 308 memset(sp_regs, 0, sizeof(*sp_regs)); 309 sp_regs->sp = ctx->uctx.stack_ptr; 310 sp_regs->pc = ctx->uctx.entry_func; 311 312 return TEE_SUCCESS; 313 } 314 315 TEE_Result sp_map_shared(struct sp_session *s, 316 struct sp_mem_receiver *receiver, 317 struct sp_mem *smem, 318 uint64_t *va) 319 { 320 TEE_Result res = TEE_SUCCESS; 321 struct sp_ctx *ctx = NULL; 322 uint32_t perm = TEE_MATTR_UR; 323 struct sp_mem_map_region *reg = NULL; 324 325 ctx = to_sp_ctx(s->ts_sess.ctx); 326 327 /* Get the permission */ 328 if (receiver->perm.perm & FFA_MEM_ACC_EXE) 329 perm |= TEE_MATTR_UX; 330 331 if (receiver->perm.perm & FFA_MEM_ACC_RW) { 332 if (receiver->perm.perm & FFA_MEM_ACC_EXE) 333 return TEE_ERROR_ACCESS_CONFLICT; 334 335 perm |= TEE_MATTR_UW; 336 } 337 /* 338 * Currently we don't support passing a va. We can't guarantee that the 339 * full region will be mapped in a contiguous region. A smem->region can 340 * have multiple mobj for one share. Currently there doesn't seem to be 341 * an option to guarantee that these will be mapped in a contiguous va 342 * space. 343 */ 344 if (*va) 345 return TEE_ERROR_NOT_SUPPORTED; 346 347 SLIST_FOREACH(reg, &smem->regions, link) { 348 res = vm_map(&ctx->uctx, va, reg->page_count * SMALL_PAGE_SIZE, 349 perm, 0, reg->mobj, reg->page_offset); 350 351 if (res != TEE_SUCCESS) { 352 EMSG("Failed to map memory region %#"PRIx32, res); 353 return res; 354 } 355 } 356 return TEE_SUCCESS; 357 } 358 359 TEE_Result sp_unmap_ffa_regions(struct sp_session *s, struct sp_mem *smem) 360 { 361 TEE_Result res = TEE_SUCCESS; 362 vaddr_t vaddr = 0; 363 size_t len = 0; 364 struct sp_ctx *ctx = to_sp_ctx(s->ts_sess.ctx); 365 struct sp_mem_map_region *reg = NULL; 366 367 SLIST_FOREACH(reg, &smem->regions, link) { 368 vaddr = (vaddr_t)sp_mem_get_va(&ctx->uctx, reg->page_offset, 369 reg->mobj); 370 len = reg->page_count * SMALL_PAGE_SIZE; 371 372 res = vm_unmap(&ctx->uctx, vaddr, len); 373 if (res != TEE_SUCCESS) 374 return res; 375 } 376 377 return TEE_SUCCESS; 378 } 379 380 static TEE_Result sp_dt_get_u64(const void *fdt, int node, const char *property, 381 uint64_t *value) 382 { 383 const fdt64_t *p = NULL; 384 int len = 0; 385 386 p = fdt_getprop(fdt, node, property, &len); 387 if (!p) 388 return TEE_ERROR_ITEM_NOT_FOUND; 389 390 if (len != sizeof(*p)) 391 return TEE_ERROR_BAD_FORMAT; 392 393 *value = fdt64_ld(p); 394 395 return TEE_SUCCESS; 396 } 397 398 static TEE_Result sp_dt_get_u32(const void *fdt, int node, const char *property, 399 uint32_t *value) 400 { 401 const fdt32_t *p = NULL; 402 int len = 0; 403 404 p = fdt_getprop(fdt, node, property, &len); 405 if (!p) 406 return TEE_ERROR_ITEM_NOT_FOUND; 407 408 if (len != sizeof(*p)) 409 return TEE_ERROR_BAD_FORMAT; 410 411 *value = fdt32_to_cpu(*p); 412 413 return TEE_SUCCESS; 414 } 415 416 static TEE_Result sp_dt_get_u16(const void *fdt, int node, const char *property, 417 uint16_t *value) 418 { 419 const fdt16_t *p = NULL; 420 int len = 0; 421 422 p = fdt_getprop(fdt, node, property, &len); 423 if (!p) 424 return TEE_ERROR_ITEM_NOT_FOUND; 425 426 if (len != sizeof(*p)) 427 return TEE_ERROR_BAD_FORMAT; 428 429 *value = fdt16_to_cpu(*p); 430 431 return TEE_SUCCESS; 432 } 433 434 static TEE_Result sp_dt_get_uuid(const void *fdt, int node, 435 const char *property, TEE_UUID *uuid) 436 { 437 uint32_t uuid_array[4] = { 0 }; 438 const fdt32_t *p = NULL; 439 int len = 0; 440 int i = 0; 441 442 p = fdt_getprop(fdt, node, property, &len); 443 if (!p) 444 return TEE_ERROR_ITEM_NOT_FOUND; 445 446 if (len != sizeof(TEE_UUID)) 447 return TEE_ERROR_BAD_FORMAT; 448 449 for (i = 0; i < 4; i++) 450 uuid_array[i] = fdt32_to_cpu(p[i]); 451 452 tee_uuid_from_octets(uuid, (uint8_t *)uuid_array); 453 454 return TEE_SUCCESS; 455 } 456 457 static TEE_Result sp_is_elf_format(const void *fdt, int sp_node, 458 bool *is_elf_format) 459 { 460 TEE_Result res = TEE_SUCCESS; 461 uint32_t elf_format = 0; 462 463 res = sp_dt_get_u32(fdt, sp_node, "elf-format", &elf_format); 464 if (res != TEE_SUCCESS && res != TEE_ERROR_ITEM_NOT_FOUND) 465 return res; 466 467 *is_elf_format = (elf_format != 0); 468 469 return TEE_SUCCESS; 470 } 471 472 static TEE_Result sp_binary_open(const TEE_UUID *uuid, 473 const struct ts_store_ops **ops, 474 struct ts_store_handle **handle) 475 { 476 TEE_Result res = TEE_ERROR_ITEM_NOT_FOUND; 477 478 SCATTERED_ARRAY_FOREACH(*ops, sp_stores, struct ts_store_ops) { 479 res = (*ops)->open(uuid, handle); 480 if (res != TEE_ERROR_ITEM_NOT_FOUND && 481 res != TEE_ERROR_STORAGE_NOT_AVAILABLE) 482 break; 483 } 484 485 return res; 486 } 487 488 static TEE_Result load_binary_sp(struct ts_session *s, 489 struct user_mode_ctx *uctx) 490 { 491 size_t bin_size = 0, bin_size_rounded = 0, bin_page_count = 0; 492 size_t bb_size = ROUNDUP(BOUNCE_BUFFER_SIZE, SMALL_PAGE_SIZE); 493 size_t bb_num_pages = bb_size / SMALL_PAGE_SIZE; 494 const struct ts_store_ops *store_ops = NULL; 495 struct ts_store_handle *handle = NULL; 496 TEE_Result res = TEE_SUCCESS; 497 tee_mm_entry_t *mm = NULL; 498 struct fobj *fobj = NULL; 499 struct mobj *mobj = NULL; 500 uaddr_t base_addr = 0; 501 uint32_t vm_flags = 0; 502 unsigned int idx = 0; 503 vaddr_t va = 0; 504 505 if (!s || !uctx) 506 return TEE_ERROR_BAD_PARAMETERS; 507 508 DMSG("Loading raw binary format SP %pUl", &uctx->ts_ctx->uuid); 509 510 /* Initialize the bounce buffer */ 511 fobj = fobj_sec_mem_alloc(bb_num_pages); 512 mobj = mobj_with_fobj_alloc(fobj, NULL, TEE_MATTR_MEM_TYPE_TAGGED); 513 fobj_put(fobj); 514 if (!mobj) 515 return TEE_ERROR_OUT_OF_MEMORY; 516 517 res = vm_map(uctx, &va, bb_size, TEE_MATTR_PRW, 0, mobj, 0); 518 mobj_put(mobj); 519 if (res) 520 return res; 521 522 uctx->bbuf = (uint8_t *)va; 523 uctx->bbuf_size = BOUNCE_BUFFER_SIZE; 524 525 vm_set_ctx(uctx->ts_ctx); 526 527 /* Find TS store and open SP binary */ 528 res = sp_binary_open(&uctx->ts_ctx->uuid, &store_ops, &handle); 529 if (res != TEE_SUCCESS) { 530 EMSG("Failed to open SP binary"); 531 return res; 532 } 533 534 /* Query binary size and calculate page count */ 535 res = store_ops->get_size(handle, &bin_size); 536 if (res != TEE_SUCCESS) 537 goto err; 538 539 if (ROUNDUP_OVERFLOW(bin_size, SMALL_PAGE_SIZE, &bin_size_rounded)) { 540 res = TEE_ERROR_OVERFLOW; 541 goto err; 542 } 543 544 bin_page_count = bin_size_rounded / SMALL_PAGE_SIZE; 545 546 /* Allocate memory */ 547 mm = phys_mem_ta_alloc(bin_size_rounded); 548 if (!mm) { 549 res = TEE_ERROR_OUT_OF_MEMORY; 550 goto err; 551 } 552 553 base_addr = tee_mm_get_smem(mm); 554 555 /* Create mobj */ 556 mobj = sp_mem_new_mobj(bin_page_count, TEE_MATTR_MEM_TYPE_CACHED, true); 557 if (!mobj) { 558 res = TEE_ERROR_OUT_OF_MEMORY; 559 goto err_free_tee_mm; 560 } 561 562 res = sp_mem_add_pages(mobj, &idx, base_addr, bin_page_count); 563 if (res) 564 goto err_free_mobj; 565 566 /* Map memory area for the SP binary */ 567 va = 0; 568 res = vm_map(uctx, &va, bin_size_rounded, TEE_MATTR_URWX, 569 vm_flags, mobj, 0); 570 if (res) 571 goto err_free_mobj; 572 573 /* Read SP binary into the previously mapped memory area */ 574 res = store_ops->read(handle, NULL, (void *)va, bin_size); 575 if (res) 576 goto err_unmap; 577 578 /* Set memory protection to allow execution */ 579 res = vm_set_prot(uctx, va, bin_size_rounded, TEE_MATTR_UX); 580 if (res) 581 goto err_unmap; 582 583 mobj_put(mobj); 584 store_ops->close(handle); 585 586 /* The entry point must be at the beginning of the SP binary. */ 587 uctx->entry_func = va; 588 uctx->load_addr = va; 589 uctx->is_32bit = false; 590 591 s->handle_scall = s->ctx->ops->handle_scall; 592 593 return TEE_SUCCESS; 594 595 err_unmap: 596 vm_unmap(uctx, va, bin_size_rounded); 597 598 err_free_mobj: 599 mobj_put(mobj); 600 601 err_free_tee_mm: 602 tee_mm_free(mm); 603 604 err: 605 store_ops->close(handle); 606 607 return res; 608 } 609 610 static TEE_Result sp_open_session(struct sp_session **sess, 611 struct sp_sessions_head *open_sessions, 612 const TEE_UUID *ffa_uuid, 613 const TEE_UUID *bin_uuid, 614 const uint32_t boot_order, 615 const void *fdt) 616 { 617 TEE_Result res = TEE_SUCCESS; 618 struct sp_session *s = NULL; 619 struct sp_ctx *ctx = NULL; 620 bool is_elf_format = false; 621 622 if (!find_secure_partition(bin_uuid)) 623 return TEE_ERROR_ITEM_NOT_FOUND; 624 625 res = sp_create_session(open_sessions, bin_uuid, boot_order, &s); 626 if (res != TEE_SUCCESS) { 627 DMSG("sp_create_session failed %#"PRIx32, res); 628 return res; 629 } 630 631 ctx = to_sp_ctx(s->ts_sess.ctx); 632 assert(ctx); 633 if (!ctx) 634 return TEE_ERROR_TARGET_DEAD; 635 *sess = s; 636 637 ts_push_current_session(&s->ts_sess); 638 639 res = sp_is_elf_format(fdt, 0, &is_elf_format); 640 if (res == TEE_SUCCESS) { 641 if (is_elf_format) { 642 /* Load the SP using ldelf. */ 643 ldelf_load_ldelf(&ctx->uctx); 644 res = ldelf_init_with_ldelf(&s->ts_sess, &ctx->uctx); 645 } else { 646 /* Raw binary format SP */ 647 res = load_binary_sp(&s->ts_sess, &ctx->uctx); 648 } 649 } else { 650 EMSG("Failed to detect SP format"); 651 } 652 653 if (res != TEE_SUCCESS) { 654 EMSG("Failed loading SP %#"PRIx32, res); 655 ts_pop_current_session(); 656 return TEE_ERROR_TARGET_DEAD; 657 } 658 659 /* 660 * Make the SP ready for its first run. 661 * Set state to busy to prevent other endpoints from sending messages to 662 * the SP before its boot phase is done. 663 */ 664 s->state = sp_busy; 665 s->caller_id = 0; 666 sp_init_set_registers(ctx); 667 memcpy(&s->ffa_uuid, ffa_uuid, sizeof(*ffa_uuid)); 668 ts_pop_current_session(); 669 670 return TEE_SUCCESS; 671 } 672 673 static TEE_Result fdt_get_uuid(const void * const fdt, TEE_UUID *uuid) 674 { 675 const struct fdt_property *description = NULL; 676 int description_name_len = 0; 677 678 if (fdt_node_check_compatible(fdt, 0, "arm,ffa-manifest-1.0")) { 679 EMSG("Failed loading SP, manifest not found"); 680 return TEE_ERROR_BAD_PARAMETERS; 681 } 682 683 description = fdt_get_property(fdt, 0, "description", 684 &description_name_len); 685 if (description) 686 DMSG("Loading SP: %s", description->data); 687 688 if (sp_dt_get_uuid(fdt, 0, "uuid", uuid)) { 689 EMSG("Missing or invalid UUID in SP manifest"); 690 return TEE_ERROR_BAD_FORMAT; 691 } 692 693 return TEE_SUCCESS; 694 } 695 696 static TEE_Result copy_and_map_fdt(struct sp_ctx *ctx, const void * const fdt, 697 void **fdt_copy, size_t *mapped_size) 698 { 699 size_t total_size = ROUNDUP(fdt_totalsize(fdt), SMALL_PAGE_SIZE); 700 size_t num_pages = total_size / SMALL_PAGE_SIZE; 701 uint32_t perm = TEE_MATTR_UR | TEE_MATTR_PRW; 702 TEE_Result res = TEE_SUCCESS; 703 struct mobj *m = NULL; 704 struct fobj *f = NULL; 705 vaddr_t va = 0; 706 707 f = fobj_sec_mem_alloc(num_pages); 708 m = mobj_with_fobj_alloc(f, NULL, TEE_MATTR_MEM_TYPE_TAGGED); 709 fobj_put(f); 710 if (!m) 711 return TEE_ERROR_OUT_OF_MEMORY; 712 713 res = vm_map(&ctx->uctx, &va, total_size, perm, 0, m, 0); 714 mobj_put(m); 715 if (res) 716 return res; 717 718 if (fdt_open_into(fdt, (void *)va, total_size)) 719 return TEE_ERROR_GENERIC; 720 721 *fdt_copy = (void *)va; 722 *mapped_size = total_size; 723 724 return res; 725 } 726 727 static void fill_boot_info_1_0(vaddr_t buf, const void *fdt) 728 { 729 struct ffa_boot_info_1_0 *info = (struct ffa_boot_info_1_0 *)buf; 730 static const char fdt_name[16] = "TYPE_DT\0\0\0\0\0\0\0\0"; 731 732 memcpy(&info->magic, "FF-A", 4); 733 info->count = 1; 734 735 COMPILE_TIME_ASSERT(sizeof(info->nvp[0].name) == sizeof(fdt_name)); 736 memcpy(info->nvp[0].name, fdt_name, sizeof(fdt_name)); 737 info->nvp[0].value = (uintptr_t)fdt; 738 info->nvp[0].size = fdt_totalsize(fdt); 739 } 740 741 static void fill_boot_info_1_1(vaddr_t buf, const void *fdt, uint32_t vers) 742 { 743 size_t desc_offs = ROUNDUP(sizeof(struct ffa_boot_info_header_1_1), 8); 744 struct ffa_boot_info_header_1_1 *header = 745 (struct ffa_boot_info_header_1_1 *)buf; 746 struct ffa_boot_info_1_1 *desc = 747 (struct ffa_boot_info_1_1 *)(buf + desc_offs); 748 749 header->signature = FFA_BOOT_INFO_SIGNATURE; 750 header->version = vers; 751 header->blob_size = desc_offs + sizeof(struct ffa_boot_info_1_1); 752 header->desc_size = sizeof(struct ffa_boot_info_1_1); 753 header->desc_count = 1; 754 header->desc_offset = desc_offs; 755 756 memset(&desc[0].name, 0, sizeof(desc[0].name)); 757 /* Type: Standard boot info (bit[7] == 0), FDT type */ 758 desc[0].type = FFA_BOOT_INFO_TYPE_ID_FDT; 759 /* Flags: Contents field contains an address */ 760 desc[0].flags = FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_ADDR << 761 FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_SHIFT; 762 desc[0].size = fdt_totalsize(fdt); 763 desc[0].contents = (uintptr_t)fdt; 764 } 765 766 static TEE_Result create_and_map_boot_info(struct sp_ctx *ctx, const void *fdt, 767 struct thread_smc_1_2_regs *args, 768 vaddr_t *va, size_t *mapped_size, 769 uint32_t sp_ffa_version) 770 { 771 size_t total_size = ROUNDUP(CFG_SP_INIT_INFO_MAX_SIZE, SMALL_PAGE_SIZE); 772 size_t num_pages = total_size / SMALL_PAGE_SIZE; 773 uint32_t perm = TEE_MATTR_UR | TEE_MATTR_PRW; 774 TEE_Result res = TEE_SUCCESS; 775 struct fobj *f = NULL; 776 struct mobj *m = NULL; 777 uint32_t info_reg = 0; 778 779 f = fobj_sec_mem_alloc(num_pages); 780 m = mobj_with_fobj_alloc(f, NULL, TEE_MATTR_MEM_TYPE_TAGGED); 781 fobj_put(f); 782 if (!m) 783 return TEE_ERROR_OUT_OF_MEMORY; 784 785 res = vm_map(&ctx->uctx, va, total_size, perm, 0, m, 0); 786 mobj_put(m); 787 if (res) 788 return res; 789 790 *mapped_size = total_size; 791 792 switch (sp_ffa_version) { 793 case MAKE_FFA_VERSION(1, 0): 794 fill_boot_info_1_0(*va, fdt); 795 break; 796 case MAKE_FFA_VERSION(1, 1): 797 case MAKE_FFA_VERSION(1, 2): 798 fill_boot_info_1_1(*va, fdt, sp_ffa_version); 799 break; 800 default: 801 EMSG("Unknown FF-A version: %#"PRIx32, sp_ffa_version); 802 return TEE_ERROR_NOT_SUPPORTED; 803 } 804 805 res = sp_dt_get_u32(fdt, 0, "gp-register-num", &info_reg); 806 if (res) { 807 if (res == TEE_ERROR_ITEM_NOT_FOUND) { 808 /* If the property is not present, set default to x0 */ 809 info_reg = 0; 810 } else { 811 return TEE_ERROR_BAD_FORMAT; 812 } 813 } 814 815 switch (info_reg) { 816 case 0: 817 args->a0 = *va; 818 break; 819 case 1: 820 args->a1 = *va; 821 break; 822 case 2: 823 args->a2 = *va; 824 break; 825 case 3: 826 args->a3 = *va; 827 break; 828 default: 829 EMSG("Invalid register selected for passing boot info"); 830 return TEE_ERROR_BAD_FORMAT; 831 } 832 833 return TEE_SUCCESS; 834 } 835 836 static TEE_Result handle_fdt_load_relative_mem_regions(struct sp_ctx *ctx, 837 const void *fdt) 838 { 839 int node = 0; 840 int subnode = 0; 841 tee_mm_entry_t *mm = NULL; 842 TEE_Result res = TEE_SUCCESS; 843 844 /* 845 * Memory regions are optional in the SP manifest, it's not an error if 846 * we don't find any. 847 */ 848 node = fdt_node_offset_by_compatible(fdt, 0, 849 "arm,ffa-manifest-memory-regions"); 850 if (node < 0) 851 return TEE_SUCCESS; 852 853 fdt_for_each_subnode(subnode, fdt, node) { 854 uint64_t load_rel_offset = 0; 855 uint32_t attributes = 0; 856 uint64_t base_addr = 0; 857 uint32_t pages_cnt = 0; 858 uint32_t flags = 0; 859 uint32_t perm = 0; 860 size_t size = 0; 861 vaddr_t va = 0; 862 863 mm = NULL; 864 865 /* Load address relative offset of a memory region */ 866 if (!sp_dt_get_u64(fdt, subnode, "load-address-relative-offset", 867 &load_rel_offset)) { 868 va = ctx->uctx.load_addr + load_rel_offset; 869 } else { 870 /* Skip non load address relative memory regions */ 871 continue; 872 } 873 874 if (!sp_dt_get_u64(fdt, subnode, "base-address", &base_addr)) { 875 EMSG("Both base-address and load-address-relative-offset fields are set"); 876 return TEE_ERROR_BAD_FORMAT; 877 } 878 879 /* Size of memory region as count of 4K pages */ 880 if (sp_dt_get_u32(fdt, subnode, "pages-count", &pages_cnt)) { 881 EMSG("Mandatory field is missing: pages-count"); 882 return TEE_ERROR_BAD_FORMAT; 883 } 884 885 if (MUL_OVERFLOW(pages_cnt, SMALL_PAGE_SIZE, &size)) 886 return TEE_ERROR_OVERFLOW; 887 888 /* Memory region attributes */ 889 if (sp_dt_get_u32(fdt, subnode, "attributes", &attributes)) { 890 EMSG("Mandatory field is missing: attributes"); 891 return TEE_ERROR_BAD_FORMAT; 892 } 893 894 /* Check instruction and data access permissions */ 895 switch (attributes & SP_MANIFEST_ATTR_RWX) { 896 case SP_MANIFEST_ATTR_RO: 897 perm = TEE_MATTR_UR; 898 break; 899 case SP_MANIFEST_ATTR_RW: 900 perm = TEE_MATTR_URW; 901 break; 902 case SP_MANIFEST_ATTR_RX: 903 perm = TEE_MATTR_URX; 904 break; 905 default: 906 EMSG("Invalid memory access permissions"); 907 return TEE_ERROR_BAD_FORMAT; 908 } 909 910 if (IS_ENABLED(CFG_TA_BTI) && 911 attributes & SP_MANIFEST_ATTR_GP) { 912 if (!(attributes & SP_MANIFEST_ATTR_RX)) { 913 EMSG("Guard only executable region"); 914 return TEE_ERROR_BAD_FORMAT; 915 } 916 perm |= TEE_MATTR_GUARDED; 917 } 918 919 res = sp_dt_get_u32(fdt, subnode, "load-flags", &flags); 920 if (res != TEE_SUCCESS && res != TEE_ERROR_ITEM_NOT_FOUND) { 921 EMSG("Optional field with invalid value: flags"); 922 return TEE_ERROR_BAD_FORMAT; 923 } 924 925 /* Load relative regions must be secure */ 926 if (attributes & SP_MANIFEST_ATTR_NSEC) { 927 EMSG("Invalid memory security attribute"); 928 return TEE_ERROR_BAD_FORMAT; 929 } 930 931 if (flags & SP_MANIFEST_FLAG_NOBITS) { 932 /* 933 * NOBITS flag is set, which means that loaded binary 934 * doesn't contain this area, so it's need to be 935 * allocated. 936 */ 937 struct mobj *m = NULL; 938 unsigned int idx = 0; 939 940 mm = phys_mem_ta_alloc(size); 941 if (!mm) 942 return TEE_ERROR_OUT_OF_MEMORY; 943 944 base_addr = tee_mm_get_smem(mm); 945 946 m = sp_mem_new_mobj(pages_cnt, 947 TEE_MATTR_MEM_TYPE_CACHED, true); 948 if (!m) { 949 res = TEE_ERROR_OUT_OF_MEMORY; 950 goto err_mm_free; 951 } 952 953 res = sp_mem_add_pages(m, &idx, base_addr, pages_cnt); 954 if (res) { 955 mobj_put(m); 956 goto err_mm_free; 957 } 958 959 res = vm_map(&ctx->uctx, &va, size, perm, 0, m, 0); 960 mobj_put(m); 961 if (res) 962 goto err_mm_free; 963 } else { 964 /* 965 * If NOBITS is not present the memory area is already 966 * mapped and only need to set the correct permissions. 967 */ 968 res = vm_set_prot(&ctx->uctx, va, size, perm); 969 if (res) 970 return res; 971 } 972 } 973 974 return TEE_SUCCESS; 975 976 err_mm_free: 977 tee_mm_free(mm); 978 return res; 979 } 980 981 static TEE_Result handle_fdt_dev_regions(struct sp_ctx *ctx, void *fdt) 982 { 983 int node = 0; 984 int subnode = 0; 985 TEE_Result res = TEE_SUCCESS; 986 const char *dt_device_match_table = { 987 "arm,ffa-manifest-device-regions", 988 }; 989 990 /* 991 * Device regions are optional in the SP manifest, it's not an error if 992 * we don't find any 993 */ 994 node = fdt_node_offset_by_compatible(fdt, 0, dt_device_match_table); 995 if (node < 0) 996 return TEE_SUCCESS; 997 998 fdt_for_each_subnode(subnode, fdt, node) { 999 uint64_t base_addr = 0; 1000 uint32_t pages_cnt = 0; 1001 uint32_t attributes = 0; 1002 struct mobj *m = NULL; 1003 bool is_secure = true; 1004 uint32_t perm = 0; 1005 vaddr_t va = 0; 1006 unsigned int idx = 0; 1007 1008 /* 1009 * Physical base address of a device MMIO region. 1010 * Currently only physically contiguous region is supported. 1011 */ 1012 if (sp_dt_get_u64(fdt, subnode, "base-address", &base_addr)) { 1013 EMSG("Mandatory field is missing: base-address"); 1014 return TEE_ERROR_BAD_FORMAT; 1015 } 1016 1017 /* Total size of MMIO region as count of 4K pages */ 1018 if (sp_dt_get_u32(fdt, subnode, "pages-count", &pages_cnt)) { 1019 EMSG("Mandatory field is missing: pages-count"); 1020 return TEE_ERROR_BAD_FORMAT; 1021 } 1022 1023 /* Data access, instruction access and security attributes */ 1024 if (sp_dt_get_u32(fdt, subnode, "attributes", &attributes)) { 1025 EMSG("Mandatory field is missing: attributes"); 1026 return TEE_ERROR_BAD_FORMAT; 1027 } 1028 1029 /* Check instruction and data access permissions */ 1030 switch (attributes & SP_MANIFEST_ATTR_RWX) { 1031 case SP_MANIFEST_ATTR_RO: 1032 perm = TEE_MATTR_UR; 1033 break; 1034 case SP_MANIFEST_ATTR_RW: 1035 perm = TEE_MATTR_URW; 1036 break; 1037 default: 1038 EMSG("Invalid memory access permissions"); 1039 return TEE_ERROR_BAD_FORMAT; 1040 } 1041 1042 /* 1043 * The SP is a secure endpoint, security attribute can be 1044 * secure or non-secure 1045 */ 1046 if (attributes & SP_MANIFEST_ATTR_NSEC) 1047 is_secure = false; 1048 1049 /* Memory attributes must be Device-nGnRnE */ 1050 m = sp_mem_new_mobj(pages_cnt, TEE_MATTR_MEM_TYPE_STRONGLY_O, 1051 is_secure); 1052 if (!m) 1053 return TEE_ERROR_OUT_OF_MEMORY; 1054 1055 res = sp_mem_add_pages(m, &idx, (paddr_t)base_addr, pages_cnt); 1056 if (res) { 1057 mobj_put(m); 1058 return res; 1059 } 1060 1061 res = vm_map(&ctx->uctx, &va, pages_cnt * SMALL_PAGE_SIZE, 1062 perm, 0, m, 0); 1063 mobj_put(m); 1064 if (res) 1065 return res; 1066 1067 /* 1068 * Overwrite the device region's PA in the fdt with the VA. This 1069 * fdt will be passed to the SP. 1070 */ 1071 res = fdt_setprop_u64(fdt, subnode, "base-address", va); 1072 1073 /* 1074 * Unmap the region if the overwrite failed since the SP won't 1075 * be able to access it without knowing the VA. 1076 */ 1077 if (res) { 1078 vm_unmap(&ctx->uctx, va, pages_cnt * SMALL_PAGE_SIZE); 1079 return res; 1080 } 1081 } 1082 1083 return TEE_SUCCESS; 1084 } 1085 1086 static TEE_Result swap_sp_endpoints(uint32_t endpoint_id, 1087 uint32_t new_endpoint_id) 1088 { 1089 struct sp_session *session = sp_get_session(endpoint_id); 1090 uint32_t manifest_endpoint_id = 0; 1091 1092 /* 1093 * We don't know in which order the SPs are loaded. The endpoint ID 1094 * defined in the manifest could already be generated by 1095 * new_session_id() and used by another SP. If this is the case, we swap 1096 * the ID's of the two SPs. We also have to make sure that the ID's are 1097 * not defined twice in the manifest. 1098 */ 1099 1100 /* The endpoint ID was not assigned yet */ 1101 if (!session) 1102 return TEE_SUCCESS; 1103 1104 /* 1105 * Read the manifest file from the SP who originally had the endpoint. 1106 * We can safely swap the endpoint ID's if the manifest file doesn't 1107 * have an endpoint ID defined. 1108 */ 1109 if (!sp_dt_get_u32(session->fdt, 0, "id", &manifest_endpoint_id)) { 1110 assert(manifest_endpoint_id == endpoint_id); 1111 EMSG("SP: Found duplicated endpoint ID %#"PRIx32, endpoint_id); 1112 return TEE_ERROR_ACCESS_CONFLICT; 1113 } 1114 1115 session->endpoint_id = new_endpoint_id; 1116 1117 return TEE_SUCCESS; 1118 } 1119 1120 static TEE_Result read_manifest_endpoint_id(struct sp_session *s) 1121 { 1122 uint32_t endpoint_id = 0; 1123 1124 /* 1125 * The endpoint ID can be optionally defined in the manifest file. We 1126 * have to map the ID inside the manifest to the SP if it's defined. 1127 * If not, the endpoint ID generated inside new_session_id() will be 1128 * used. 1129 */ 1130 if (!sp_dt_get_u32(s->fdt, 0, "id", &endpoint_id)) { 1131 TEE_Result res = TEE_ERROR_GENERIC; 1132 1133 if (!endpoint_id_is_valid(endpoint_id)) { 1134 EMSG("Invalid endpoint ID 0x%"PRIx32, endpoint_id); 1135 return TEE_ERROR_BAD_FORMAT; 1136 } 1137 1138 res = swap_sp_endpoints(endpoint_id, s->endpoint_id); 1139 if (res) 1140 return res; 1141 1142 DMSG("SP: endpoint ID (0x%"PRIx32") found in manifest", 1143 endpoint_id); 1144 /* Assign the endpoint ID to the current SP */ 1145 s->endpoint_id = endpoint_id; 1146 } 1147 return TEE_SUCCESS; 1148 } 1149 1150 static TEE_Result handle_fdt_mem_regions(struct sp_ctx *ctx, void *fdt) 1151 { 1152 int node = 0; 1153 int subnode = 0; 1154 tee_mm_entry_t *mm = NULL; 1155 TEE_Result res = TEE_SUCCESS; 1156 1157 /* 1158 * Memory regions are optional in the SP manifest, it's not an error if 1159 * we don't find any. 1160 */ 1161 node = fdt_node_offset_by_compatible(fdt, 0, 1162 "arm,ffa-manifest-memory-regions"); 1163 if (node < 0) 1164 return TEE_SUCCESS; 1165 1166 fdt_for_each_subnode(subnode, fdt, node) { 1167 uint64_t load_rel_offset = 0; 1168 bool alloc_needed = false; 1169 uint32_t attributes = 0; 1170 uint64_t base_addr = 0; 1171 uint32_t pages_cnt = 0; 1172 bool is_secure = true; 1173 struct mobj *m = NULL; 1174 unsigned int idx = 0; 1175 uint32_t perm = 0; 1176 size_t size = 0; 1177 vaddr_t va = 0; 1178 1179 mm = NULL; 1180 1181 /* Load address relative offset of a memory region */ 1182 if (!sp_dt_get_u64(fdt, subnode, "load-address-relative-offset", 1183 &load_rel_offset)) { 1184 /* 1185 * At this point the memory region is already mapped by 1186 * handle_fdt_load_relative_mem_regions. 1187 * Only need to set the base-address in the manifest and 1188 * then skip the rest of the mapping process. 1189 */ 1190 va = ctx->uctx.load_addr + load_rel_offset; 1191 res = fdt_setprop_u64(fdt, subnode, "base-address", va); 1192 if (res) 1193 return res; 1194 1195 continue; 1196 } 1197 1198 /* 1199 * Base address of a memory region. 1200 * If not present, we have to allocate the specified memory. 1201 * If present, this field could specify a PA or VA. Currently 1202 * only a PA is supported. 1203 */ 1204 if (sp_dt_get_u64(fdt, subnode, "base-address", &base_addr)) 1205 alloc_needed = true; 1206 1207 /* Size of memory region as count of 4K pages */ 1208 if (sp_dt_get_u32(fdt, subnode, "pages-count", &pages_cnt)) { 1209 EMSG("Mandatory field is missing: pages-count"); 1210 return TEE_ERROR_BAD_FORMAT; 1211 } 1212 1213 if (MUL_OVERFLOW(pages_cnt, SMALL_PAGE_SIZE, &size)) 1214 return TEE_ERROR_OVERFLOW; 1215 1216 /* 1217 * Memory region attributes: 1218 * - Instruction/data access permissions 1219 * - Cacheability/shareability attributes 1220 * - Security attributes 1221 * 1222 * Cacheability/shareability attributes can be ignored for now. 1223 * OP-TEE only supports a single type for normal cached memory 1224 * and currently there is no use case that would require to 1225 * change this. 1226 */ 1227 if (sp_dt_get_u32(fdt, subnode, "attributes", &attributes)) { 1228 EMSG("Mandatory field is missing: attributes"); 1229 return TEE_ERROR_BAD_FORMAT; 1230 } 1231 1232 /* Check instruction and data access permissions */ 1233 switch (attributes & SP_MANIFEST_ATTR_RWX) { 1234 case SP_MANIFEST_ATTR_RO: 1235 perm = TEE_MATTR_UR; 1236 break; 1237 case SP_MANIFEST_ATTR_RW: 1238 perm = TEE_MATTR_URW; 1239 break; 1240 case SP_MANIFEST_ATTR_RX: 1241 perm = TEE_MATTR_URX; 1242 break; 1243 default: 1244 EMSG("Invalid memory access permissions"); 1245 return TEE_ERROR_BAD_FORMAT; 1246 } 1247 1248 if (IS_ENABLED(CFG_TA_BTI) && 1249 attributes & SP_MANIFEST_ATTR_GP) { 1250 if (!(attributes & SP_MANIFEST_ATTR_RX)) { 1251 EMSG("Guard only executable region"); 1252 return TEE_ERROR_BAD_FORMAT; 1253 } 1254 perm |= TEE_MATTR_GUARDED; 1255 } 1256 1257 /* 1258 * The SP is a secure endpoint, security attribute can be 1259 * secure or non-secure. 1260 * The SPMC cannot allocate non-secure memory, i.e. if the base 1261 * address is missing this attribute must be secure. 1262 */ 1263 if (attributes & SP_MANIFEST_ATTR_NSEC) { 1264 if (alloc_needed) { 1265 EMSG("Invalid memory security attribute"); 1266 return TEE_ERROR_BAD_FORMAT; 1267 } 1268 is_secure = false; 1269 } 1270 1271 if (alloc_needed) { 1272 /* Base address is missing, we have to allocate */ 1273 mm = phys_mem_ta_alloc(size); 1274 if (!mm) 1275 return TEE_ERROR_OUT_OF_MEMORY; 1276 1277 base_addr = tee_mm_get_smem(mm); 1278 } 1279 1280 m = sp_mem_new_mobj(pages_cnt, TEE_MATTR_MEM_TYPE_CACHED, 1281 is_secure); 1282 if (!m) { 1283 res = TEE_ERROR_OUT_OF_MEMORY; 1284 goto err_mm_free; 1285 } 1286 1287 res = sp_mem_add_pages(m, &idx, base_addr, pages_cnt); 1288 if (res) { 1289 mobj_put(m); 1290 goto err_mm_free; 1291 } 1292 1293 res = vm_map(&ctx->uctx, &va, size, perm, 0, m, 0); 1294 mobj_put(m); 1295 if (res) 1296 goto err_mm_free; 1297 1298 /* 1299 * Overwrite the memory region's base address in the fdt with 1300 * the VA. This fdt will be passed to the SP. 1301 * If the base-address field was not present in the original 1302 * fdt, this function will create it. This doesn't cause issues 1303 * since the necessary extra space has been allocated when 1304 * opening the fdt. 1305 */ 1306 res = fdt_setprop_u64(fdt, subnode, "base-address", va); 1307 1308 /* 1309 * Unmap the region if the overwrite failed since the SP won't 1310 * be able to access it without knowing the VA. 1311 */ 1312 if (res) { 1313 vm_unmap(&ctx->uctx, va, size); 1314 goto err_mm_free; 1315 } 1316 } 1317 1318 return TEE_SUCCESS; 1319 1320 err_mm_free: 1321 tee_mm_free(mm); 1322 return res; 1323 } 1324 1325 static TEE_Result handle_tpm_event_log(struct sp_ctx *ctx, void *fdt) 1326 { 1327 uint32_t perm = TEE_MATTR_URW | TEE_MATTR_PRW; 1328 uint32_t dummy_size __maybe_unused = 0; 1329 TEE_Result res = TEE_SUCCESS; 1330 size_t page_count = 0; 1331 struct fobj *f = NULL; 1332 struct mobj *m = NULL; 1333 vaddr_t log_addr = 0; 1334 size_t log_size = 0; 1335 int node = 0; 1336 1337 node = fdt_node_offset_by_compatible(fdt, 0, "arm,tpm_event_log"); 1338 if (node < 0) 1339 return TEE_SUCCESS; 1340 1341 /* Checking the existence and size of the event log properties */ 1342 if (sp_dt_get_u64(fdt, node, "tpm_event_log_addr", &log_addr)) { 1343 EMSG("tpm_event_log_addr not found or has invalid size"); 1344 return TEE_ERROR_BAD_FORMAT; 1345 } 1346 1347 if (sp_dt_get_u32(fdt, node, "tpm_event_log_size", &dummy_size)) { 1348 EMSG("tpm_event_log_size not found or has invalid size"); 1349 return TEE_ERROR_BAD_FORMAT; 1350 } 1351 1352 /* Validating event log */ 1353 res = tpm_get_event_log_size(&log_size); 1354 if (res) 1355 return res; 1356 1357 if (!log_size) { 1358 EMSG("Empty TPM event log was provided"); 1359 return TEE_ERROR_ITEM_NOT_FOUND; 1360 } 1361 1362 /* Allocating memory area for the event log to share with the SP */ 1363 page_count = ROUNDUP_DIV(log_size, SMALL_PAGE_SIZE); 1364 1365 f = fobj_sec_mem_alloc(page_count); 1366 m = mobj_with_fobj_alloc(f, NULL, TEE_MATTR_MEM_TYPE_TAGGED); 1367 fobj_put(f); 1368 if (!m) 1369 return TEE_ERROR_OUT_OF_MEMORY; 1370 1371 res = vm_map(&ctx->uctx, &log_addr, log_size, perm, 0, m, 0); 1372 mobj_put(m); 1373 if (res) 1374 return res; 1375 1376 /* Copy event log */ 1377 res = tpm_get_event_log((void *)log_addr, &log_size); 1378 if (res) 1379 goto err_unmap; 1380 1381 /* Setting event log details in the manifest */ 1382 res = fdt_setprop_u64(fdt, node, "tpm_event_log_addr", log_addr); 1383 if (res) 1384 goto err_unmap; 1385 1386 res = fdt_setprop_u32(fdt, node, "tpm_event_log_size", log_size); 1387 if (res) 1388 goto err_unmap; 1389 1390 return TEE_SUCCESS; 1391 1392 err_unmap: 1393 vm_unmap(&ctx->uctx, log_addr, log_size); 1394 1395 return res; 1396 } 1397 1398 /* 1399 * Note: this function is called only on the primary CPU. It assumes that the 1400 * features present on the primary CPU are available on all of the secondary 1401 * CPUs as well. 1402 */ 1403 static TEE_Result handle_hw_features(void *fdt) 1404 { 1405 uint32_t val __maybe_unused = 0; 1406 TEE_Result res = TEE_SUCCESS; 1407 int node = 0; 1408 1409 /* 1410 * HW feature descriptions are optional in the SP manifest, it's not an 1411 * error if we don't find any. 1412 */ 1413 node = fdt_node_offset_by_compatible(fdt, 0, "arm,hw-features"); 1414 if (node < 0) 1415 return TEE_SUCCESS; 1416 1417 /* Modify the crc32 property only if it's already present */ 1418 if (!sp_dt_get_u32(fdt, node, "crc32", &val)) { 1419 res = fdt_setprop_u32(fdt, node, "crc32", 1420 feat_crc32_implemented()); 1421 if (res) 1422 return res; 1423 } 1424 1425 /* Modify the property only if it's already present */ 1426 if (!sp_dt_get_u32(fdt, node, "bti", &val)) { 1427 res = fdt_setprop_u32(fdt, node, "bti", 1428 feat_bti_is_implemented()); 1429 if (res) 1430 return res; 1431 } 1432 1433 /* Modify the property only if it's already present */ 1434 if (!sp_dt_get_u32(fdt, node, "pauth", &val)) { 1435 res = fdt_setprop_u32(fdt, node, "pauth", 1436 feat_pauth_is_implemented()); 1437 if (res) 1438 return res; 1439 } 1440 1441 return TEE_SUCCESS; 1442 } 1443 1444 static TEE_Result read_ns_interrupts_action(const void *fdt, 1445 struct sp_session *s) 1446 { 1447 TEE_Result res = TEE_ERROR_BAD_PARAMETERS; 1448 1449 res = sp_dt_get_u32(fdt, 0, "ns-interrupts-action", &s->ns_int_mode); 1450 1451 if (res) { 1452 EMSG("Mandatory property is missing: ns-interrupts-action"); 1453 return res; 1454 } 1455 1456 switch (s->ns_int_mode) { 1457 case SP_MANIFEST_NS_INT_QUEUED: 1458 case SP_MANIFEST_NS_INT_SIGNALED: 1459 /* OK */ 1460 break; 1461 1462 case SP_MANIFEST_NS_INT_MANAGED_EXIT: 1463 EMSG("Managed exit is not implemented"); 1464 return TEE_ERROR_NOT_IMPLEMENTED; 1465 1466 default: 1467 EMSG("Invalid ns-interrupts-action value: %"PRIu32, 1468 s->ns_int_mode); 1469 return TEE_ERROR_BAD_PARAMETERS; 1470 } 1471 1472 return TEE_SUCCESS; 1473 } 1474 1475 static TEE_Result read_ffa_version(const void *fdt, struct sp_session *s) 1476 { 1477 TEE_Result res = TEE_ERROR_BAD_PARAMETERS; 1478 uint32_t ffa_version = 0; 1479 1480 res = sp_dt_get_u32(fdt, 0, "ffa-version", &ffa_version); 1481 if (res) { 1482 EMSG("Mandatory property is missing: ffa-version"); 1483 return res; 1484 } 1485 1486 if (ffa_version != FFA_VERSION_1_0 && ffa_version != FFA_VERSION_1_1) { 1487 EMSG("Invalid FF-A version value: 0x%08"PRIx32, ffa_version); 1488 return TEE_ERROR_BAD_PARAMETERS; 1489 } 1490 1491 s->rxtx.ffa_vers = ffa_version; 1492 1493 return TEE_SUCCESS; 1494 } 1495 1496 static TEE_Result read_sp_exec_state(const void *fdt, struct sp_session *s) 1497 { 1498 TEE_Result res = TEE_ERROR_BAD_PARAMETERS; 1499 uint32_t exec_state = 0; 1500 1501 res = sp_dt_get_u32(fdt, 0, "execution-state", &exec_state); 1502 if (res) { 1503 EMSG("Mandatory property is missing: execution-state"); 1504 return res; 1505 } 1506 1507 /* Currently only AArch64 SPs are supported */ 1508 if (exec_state == SP_MANIFEST_EXEC_STATE_AARCH64) { 1509 s->props |= FFA_PART_PROP_AARCH64_STATE; 1510 } else { 1511 EMSG("Invalid execution-state value: %"PRIu32, exec_state); 1512 return TEE_ERROR_BAD_PARAMETERS; 1513 } 1514 1515 return TEE_SUCCESS; 1516 } 1517 1518 static TEE_Result read_sp_msg_types(const void *fdt, struct sp_session *s) 1519 { 1520 TEE_Result res = TEE_ERROR_BAD_PARAMETERS; 1521 uint32_t msg_method = 0; 1522 1523 res = sp_dt_get_u32(fdt, 0, "messaging-method", &msg_method); 1524 if (res) { 1525 EMSG("Mandatory property is missing: messaging-method"); 1526 return res; 1527 } 1528 1529 if (msg_method & SP_MANIFEST_DIRECT_REQ_RECEIVE) 1530 s->props |= FFA_PART_PROP_DIRECT_REQ_RECV; 1531 1532 if (msg_method & SP_MANIFEST_DIRECT_REQ_SEND) 1533 s->props |= FFA_PART_PROP_DIRECT_REQ_SEND; 1534 1535 if (msg_method & SP_MANIFEST_INDIRECT_REQ) 1536 IMSG("Indirect messaging is not supported"); 1537 1538 return TEE_SUCCESS; 1539 } 1540 1541 static TEE_Result read_vm_availability_msg(const void *fdt, 1542 struct sp_session *s) 1543 { 1544 TEE_Result res = TEE_ERROR_BAD_PARAMETERS; 1545 uint32_t v = 0; 1546 1547 res = sp_dt_get_u32(fdt, 0, "vm-availability-messages", &v); 1548 1549 /* This field in the manifest is optional */ 1550 if (res == TEE_ERROR_ITEM_NOT_FOUND) 1551 return TEE_SUCCESS; 1552 1553 if (res) 1554 return res; 1555 1556 if (v & ~(SP_MANIFEST_VM_CREATED_MSG | SP_MANIFEST_VM_DESTROYED_MSG)) { 1557 EMSG("Invalid vm-availability-messages value: %"PRIu32, v); 1558 return TEE_ERROR_BAD_PARAMETERS; 1559 } 1560 1561 if (v & SP_MANIFEST_VM_CREATED_MSG) 1562 s->props |= FFA_PART_PROP_NOTIF_CREATED; 1563 1564 if (v & SP_MANIFEST_VM_DESTROYED_MSG) 1565 s->props |= FFA_PART_PROP_NOTIF_DESTROYED; 1566 1567 return TEE_SUCCESS; 1568 } 1569 1570 static TEE_Result get_boot_order(const void *fdt, uint32_t *boot_order) 1571 { 1572 TEE_Result res = TEE_SUCCESS; 1573 1574 res = sp_dt_get_u32(fdt, 0, "boot-order", boot_order); 1575 1576 if (res == TEE_SUCCESS) { 1577 if (*boot_order > UINT16_MAX) { 1578 EMSG("Value of boot-order property (%"PRIu32") is out of range", 1579 *boot_order); 1580 res = TEE_ERROR_BAD_FORMAT; 1581 } 1582 } else if (res == TEE_ERROR_BAD_FORMAT) { 1583 uint16_t boot_order_u16 = 0; 1584 1585 res = sp_dt_get_u16(fdt, 0, "boot-order", &boot_order_u16); 1586 if (res == TEE_SUCCESS) 1587 *boot_order = boot_order_u16; 1588 } 1589 1590 if (res == TEE_ERROR_ITEM_NOT_FOUND) 1591 *boot_order = UNDEFINED_BOOT_ORDER_VALUE; 1592 else if (res != TEE_SUCCESS) 1593 EMSG("Failed reading boot-order property err: %#"PRIx32, res); 1594 1595 return res; 1596 } 1597 1598 static TEE_Result sp_init_uuid(const TEE_UUID *bin_uuid, const void * const fdt) 1599 { 1600 TEE_Result res = TEE_SUCCESS; 1601 struct sp_session *sess = NULL; 1602 TEE_UUID ffa_uuid = {}; 1603 uint32_t boot_order = 0; 1604 1605 res = fdt_get_uuid(fdt, &ffa_uuid); 1606 if (res) 1607 return res; 1608 1609 res = get_boot_order(fdt, &boot_order); 1610 if (res) 1611 return res; 1612 1613 res = sp_open_session(&sess, 1614 &open_sp_sessions, 1615 &ffa_uuid, bin_uuid, boot_order, fdt); 1616 if (res) 1617 return res; 1618 1619 sess->fdt = fdt; 1620 1621 res = read_manifest_endpoint_id(sess); 1622 if (res) 1623 return res; 1624 DMSG("endpoint is 0x%"PRIx16, sess->endpoint_id); 1625 1626 res = read_ns_interrupts_action(fdt, sess); 1627 if (res) 1628 return res; 1629 1630 res = read_ffa_version(fdt, sess); 1631 if (res) 1632 return res; 1633 1634 res = read_sp_exec_state(fdt, sess); 1635 if (res) 1636 return res; 1637 1638 res = read_sp_msg_types(fdt, sess); 1639 if (res) 1640 return res; 1641 1642 res = read_vm_availability_msg(fdt, sess); 1643 if (res) 1644 return res; 1645 1646 return TEE_SUCCESS; 1647 } 1648 1649 static TEE_Result sp_first_run(struct sp_session *sess) 1650 { 1651 TEE_Result res = TEE_SUCCESS; 1652 struct thread_smc_1_2_regs args = { }; 1653 struct sp_ctx *ctx = NULL; 1654 vaddr_t boot_info_va = 0; 1655 size_t boot_info_size = 0; 1656 void *fdt_copy = NULL; 1657 size_t fdt_size = 0; 1658 1659 ctx = to_sp_ctx(sess->ts_sess.ctx); 1660 ts_push_current_session(&sess->ts_sess); 1661 sess->is_initialized = false; 1662 1663 /* 1664 * Load relative memory regions must be handled before doing any other 1665 * mapping to prevent conflicts in the VA space. 1666 */ 1667 res = handle_fdt_load_relative_mem_regions(ctx, sess->fdt); 1668 if (res) { 1669 ts_pop_current_session(); 1670 return res; 1671 } 1672 1673 res = copy_and_map_fdt(ctx, sess->fdt, &fdt_copy, &fdt_size); 1674 if (res) 1675 goto out; 1676 1677 res = handle_fdt_dev_regions(ctx, fdt_copy); 1678 if (res) 1679 goto out; 1680 1681 res = handle_fdt_mem_regions(ctx, fdt_copy); 1682 if (res) 1683 goto out; 1684 1685 if (IS_ENABLED(CFG_CORE_TPM_EVENT_LOG)) { 1686 res = handle_tpm_event_log(ctx, fdt_copy); 1687 if (res) 1688 goto out; 1689 } 1690 1691 res = handle_hw_features(fdt_copy); 1692 if (res) 1693 goto out; 1694 1695 res = create_and_map_boot_info(ctx, fdt_copy, &args, &boot_info_va, 1696 &boot_info_size, sess->rxtx.ffa_vers); 1697 if (res) 1698 goto out; 1699 1700 ts_pop_current_session(); 1701 1702 res = sp_enter(&args, sess); 1703 if (res) { 1704 ts_push_current_session(&sess->ts_sess); 1705 goto out; 1706 } 1707 1708 spmc_sp_msg_handler(&args, sess); 1709 1710 ts_push_current_session(&sess->ts_sess); 1711 sess->is_initialized = true; 1712 1713 out: 1714 /* Free the boot info page from the SP memory */ 1715 vm_unmap(&ctx->uctx, boot_info_va, boot_info_size); 1716 vm_unmap(&ctx->uctx, (vaddr_t)fdt_copy, fdt_size); 1717 ts_pop_current_session(); 1718 1719 return res; 1720 } 1721 1722 TEE_Result sp_enter(struct thread_smc_1_2_regs *args, struct sp_session *sp) 1723 { 1724 TEE_Result res = TEE_SUCCESS; 1725 struct sp_ctx *ctx = to_sp_ctx(sp->ts_sess.ctx); 1726 1727 ctx->sp_regs.x[0] = args->a0; 1728 ctx->sp_regs.x[1] = args->a1; 1729 ctx->sp_regs.x[2] = args->a2; 1730 ctx->sp_regs.x[3] = args->a3; 1731 ctx->sp_regs.x[4] = args->a4; 1732 ctx->sp_regs.x[5] = args->a5; 1733 ctx->sp_regs.x[6] = args->a6; 1734 ctx->sp_regs.x[7] = args->a7; 1735 #ifdef CFG_TA_PAUTH 1736 ctx->sp_regs.apiakey_hi = ctx->uctx.keys.apia_hi; 1737 ctx->sp_regs.apiakey_lo = ctx->uctx.keys.apia_lo; 1738 #endif 1739 1740 res = sp->ts_sess.ctx->ops->enter_invoke_cmd(&sp->ts_sess, 0); 1741 1742 args->a0 = ctx->sp_regs.x[0]; 1743 args->a1 = ctx->sp_regs.x[1]; 1744 args->a2 = ctx->sp_regs.x[2]; 1745 args->a3 = ctx->sp_regs.x[3]; 1746 args->a4 = ctx->sp_regs.x[4]; 1747 args->a5 = ctx->sp_regs.x[5]; 1748 args->a6 = ctx->sp_regs.x[6]; 1749 args->a7 = ctx->sp_regs.x[7]; 1750 1751 return res; 1752 } 1753 1754 /* 1755 * According to FF-A v1.1 section 8.3.1.4 if a caller requires less permissive 1756 * active on NS interrupt than the callee, the callee must inherit the caller's 1757 * configuration. 1758 * Each SP's own NS action setting is stored in ns_int_mode. The effective 1759 * action will be MIN([self action], [caller's action]) which is stored in the 1760 * ns_int_mode_inherited field. 1761 */ 1762 static void sp_cpsr_configure_foreign_interrupts(struct sp_session *s, 1763 struct ts_session *caller, 1764 uint64_t *cpsr) 1765 { 1766 if (caller) { 1767 struct sp_session *caller_sp = to_sp_session(caller); 1768 1769 s->ns_int_mode_inherited = MIN(caller_sp->ns_int_mode_inherited, 1770 s->ns_int_mode); 1771 } else { 1772 s->ns_int_mode_inherited = s->ns_int_mode; 1773 } 1774 1775 if (s->ns_int_mode_inherited == SP_MANIFEST_NS_INT_QUEUED) 1776 *cpsr |= SHIFT_U32(THREAD_EXCP_FOREIGN_INTR, 1777 ARM32_CPSR_F_SHIFT); 1778 else 1779 *cpsr &= ~SHIFT_U32(THREAD_EXCP_FOREIGN_INTR, 1780 ARM32_CPSR_F_SHIFT); 1781 } 1782 1783 static TEE_Result sp_enter_invoke_cmd(struct ts_session *s, 1784 uint32_t cmd __unused) 1785 { 1786 struct sp_ctx *ctx = to_sp_ctx(s->ctx); 1787 TEE_Result res = TEE_SUCCESS; 1788 uint32_t exceptions = 0; 1789 struct sp_session *sp_s = to_sp_session(s); 1790 struct ts_session *sess = NULL; 1791 struct thread_ctx_regs *sp_regs = NULL; 1792 uint32_t thread_id = THREAD_ID_INVALID; 1793 struct ts_session *caller = NULL; 1794 uint32_t rpc_target_info = 0; 1795 uint32_t panicked = false; 1796 uint32_t panic_code = 0; 1797 1798 sp_regs = &ctx->sp_regs; 1799 ts_push_current_session(s); 1800 1801 exceptions = thread_mask_exceptions(THREAD_EXCP_ALL); 1802 1803 /* Enable/disable foreign interrupts in CPSR/SPSR */ 1804 caller = ts_get_calling_session(); 1805 sp_cpsr_configure_foreign_interrupts(sp_s, caller, &sp_regs->cpsr); 1806 1807 /* 1808 * Store endpoint ID and thread ID in rpc_target_info. This will be used 1809 * as w1 in FFA_INTERRUPT in case of a foreign interrupt. 1810 */ 1811 rpc_target_info = thread_get_tsd()->rpc_target_info; 1812 thread_id = thread_get_id(); 1813 assert(thread_id <= UINT16_MAX); 1814 thread_get_tsd()->rpc_target_info = 1815 FFA_TARGET_INFO_SET(sp_s->endpoint_id, thread_id); 1816 1817 __thread_enter_user_mode(sp_regs, &panicked, &panic_code); 1818 1819 /* Restore rpc_target_info */ 1820 thread_get_tsd()->rpc_target_info = rpc_target_info; 1821 1822 thread_unmask_exceptions(exceptions); 1823 1824 thread_user_clear_vfp(&ctx->uctx); 1825 1826 if (panicked) { 1827 DMSG("SP panicked with code %#"PRIx32, panic_code); 1828 abort_print_current_ts(); 1829 1830 sess = ts_pop_current_session(); 1831 cpu_spin_lock(&sp_s->spinlock); 1832 sp_s->state = sp_dead; 1833 cpu_spin_unlock(&sp_s->spinlock); 1834 1835 return TEE_ERROR_TARGET_DEAD; 1836 } 1837 1838 sess = ts_pop_current_session(); 1839 assert(sess == s); 1840 1841 return res; 1842 } 1843 1844 /* We currently don't support 32 bits */ 1845 #ifdef ARM64 1846 static void sp_svc_store_registers(struct thread_scall_regs *regs, 1847 struct thread_ctx_regs *sp_regs) 1848 { 1849 COMPILE_TIME_ASSERT(sizeof(sp_regs->x[0]) == sizeof(regs->x0)); 1850 memcpy(sp_regs->x, ®s->x0, 31 * sizeof(regs->x0)); 1851 sp_regs->pc = regs->elr; 1852 sp_regs->sp = regs->sp_el0; 1853 } 1854 #endif 1855 1856 static bool sp_handle_scall(struct thread_scall_regs *regs) 1857 { 1858 struct ts_session *ts = ts_get_current_session(); 1859 struct sp_ctx *uctx = to_sp_ctx(ts->ctx); 1860 struct sp_session *s = uctx->open_session; 1861 1862 assert(s); 1863 1864 sp_svc_store_registers(regs, &uctx->sp_regs); 1865 1866 regs->x0 = 0; 1867 regs->x1 = 0; /* panic */ 1868 regs->x2 = 0; /* panic code */ 1869 1870 /* 1871 * All the registers of the SP are saved in the SP session by the SVC 1872 * handler. 1873 * We always return to S-El1 after handling the SVC. We will continue 1874 * in sp_enter_invoke_cmd() (return from __thread_enter_user_mode). 1875 * The sp_enter() function copies the FF-A parameters (a0-a7) from the 1876 * saved registers to the thread_smc_args. The thread_smc_args object is 1877 * afterward used by the spmc_sp_msg_handler() to handle the 1878 * FF-A message send by the SP. 1879 */ 1880 return false; 1881 } 1882 1883 static void sp_dump_state(struct ts_ctx *ctx) 1884 { 1885 struct sp_ctx *utc = to_sp_ctx(ctx); 1886 1887 if (utc->uctx.dump_entry_func) { 1888 TEE_Result res = ldelf_dump_state(&utc->uctx); 1889 1890 if (!res || res == TEE_ERROR_TARGET_DEAD) 1891 return; 1892 } 1893 1894 user_mode_ctx_print_mappings(&utc->uctx); 1895 } 1896 1897 static const struct ts_ops sp_ops = { 1898 .enter_invoke_cmd = sp_enter_invoke_cmd, 1899 .handle_scall = sp_handle_scall, 1900 .dump_state = sp_dump_state, 1901 }; 1902 1903 static TEE_Result process_sp_pkg(uint64_t sp_pkg_pa, TEE_UUID *sp_uuid) 1904 { 1905 enum teecore_memtypes mtype = MEM_AREA_SEC_RAM_OVERALL; 1906 struct sp_pkg_header *sp_pkg_hdr = NULL; 1907 struct fip_sp *sp = NULL; 1908 uint64_t sp_fdt_end = 0; 1909 size_t sp_pkg_size = 0; 1910 vaddr_t sp_pkg_va = 0; 1911 1912 /* Process the first page which contains the SP package header */ 1913 sp_pkg_va = (vaddr_t)phys_to_virt(sp_pkg_pa, mtype, SMALL_PAGE_SIZE); 1914 if (!sp_pkg_va) { 1915 EMSG("Cannot find mapping for PA %#" PRIxPA, sp_pkg_pa); 1916 return TEE_ERROR_GENERIC; 1917 } 1918 1919 sp_pkg_hdr = (struct sp_pkg_header *)sp_pkg_va; 1920 1921 if (sp_pkg_hdr->magic != SP_PKG_HEADER_MAGIC) { 1922 EMSG("Invalid SP package magic"); 1923 return TEE_ERROR_BAD_FORMAT; 1924 } 1925 1926 if (sp_pkg_hdr->version != SP_PKG_HEADER_VERSION_V1 && 1927 sp_pkg_hdr->version != SP_PKG_HEADER_VERSION_V2) { 1928 EMSG("Invalid SP header version"); 1929 return TEE_ERROR_BAD_FORMAT; 1930 } 1931 1932 if (ADD_OVERFLOW(sp_pkg_hdr->img_offset, sp_pkg_hdr->img_size, 1933 &sp_pkg_size)) { 1934 EMSG("Invalid SP package size"); 1935 return TEE_ERROR_BAD_FORMAT; 1936 } 1937 1938 if (ADD_OVERFLOW(sp_pkg_hdr->pm_offset, sp_pkg_hdr->pm_size, 1939 &sp_fdt_end) || sp_fdt_end > sp_pkg_hdr->img_offset) { 1940 EMSG("Invalid SP manifest size"); 1941 return TEE_ERROR_BAD_FORMAT; 1942 } 1943 1944 /* Process the whole SP package now that the size is known */ 1945 sp_pkg_va = (vaddr_t)phys_to_virt(sp_pkg_pa, mtype, sp_pkg_size); 1946 if (!sp_pkg_va) { 1947 EMSG("Cannot find mapping for PA %#" PRIxPA, sp_pkg_pa); 1948 return TEE_ERROR_GENERIC; 1949 } 1950 1951 sp_pkg_hdr = (struct sp_pkg_header *)sp_pkg_va; 1952 1953 sp = calloc(1, sizeof(struct fip_sp)); 1954 if (!sp) 1955 return TEE_ERROR_OUT_OF_MEMORY; 1956 1957 memcpy(&sp->sp_img.image.uuid, sp_uuid, sizeof(*sp_uuid)); 1958 sp->sp_img.image.ts = (uint8_t *)(sp_pkg_va + sp_pkg_hdr->img_offset); 1959 sp->sp_img.image.size = sp_pkg_hdr->img_size; 1960 sp->sp_img.image.flags = 0; 1961 sp->sp_img.fdt = (uint8_t *)(sp_pkg_va + sp_pkg_hdr->pm_offset); 1962 1963 STAILQ_INSERT_TAIL(&fip_sp_list, sp, link); 1964 1965 return TEE_SUCCESS; 1966 } 1967 1968 static TEE_Result fip_sp_init_all(void) 1969 { 1970 TEE_Result res = TEE_SUCCESS; 1971 uint64_t sp_pkg_addr = 0; 1972 const void *fdt = NULL; 1973 TEE_UUID sp_uuid = { }; 1974 int sp_pkgs_node = 0; 1975 int subnode = 0; 1976 int root = 0; 1977 1978 fdt = get_manifest_dt(); 1979 if (!fdt) { 1980 EMSG("No SPMC manifest found"); 1981 return TEE_ERROR_GENERIC; 1982 } 1983 1984 root = fdt_path_offset(fdt, "/"); 1985 if (root < 0) 1986 return TEE_ERROR_BAD_FORMAT; 1987 1988 if (fdt_node_check_compatible(fdt, root, "arm,ffa-core-manifest-1.0")) 1989 return TEE_ERROR_BAD_FORMAT; 1990 1991 /* SP packages are optional, it's not an error if we don't find any */ 1992 sp_pkgs_node = fdt_node_offset_by_compatible(fdt, root, "arm,sp_pkg"); 1993 if (sp_pkgs_node < 0) 1994 return TEE_SUCCESS; 1995 1996 fdt_for_each_subnode(subnode, fdt, sp_pkgs_node) { 1997 res = sp_dt_get_u64(fdt, subnode, "load-address", &sp_pkg_addr); 1998 if (res) { 1999 EMSG("Invalid FIP SP load address"); 2000 return res; 2001 } 2002 2003 res = sp_dt_get_uuid(fdt, subnode, "uuid", &sp_uuid); 2004 if (res) { 2005 EMSG("Invalid FIP SP uuid"); 2006 return res; 2007 } 2008 2009 res = process_sp_pkg(sp_pkg_addr, &sp_uuid); 2010 if (res) { 2011 EMSG("Invalid FIP SP package"); 2012 return res; 2013 } 2014 } 2015 2016 return TEE_SUCCESS; 2017 } 2018 2019 static void fip_sp_deinit_all(void) 2020 { 2021 while (!STAILQ_EMPTY(&fip_sp_list)) { 2022 struct fip_sp *sp = STAILQ_FIRST(&fip_sp_list); 2023 2024 STAILQ_REMOVE_HEAD(&fip_sp_list, link); 2025 free(sp); 2026 } 2027 } 2028 2029 static TEE_Result sp_init_all(void) 2030 { 2031 TEE_Result res = TEE_SUCCESS; 2032 const struct sp_image *sp = NULL; 2033 const struct fip_sp *fip_sp = NULL; 2034 char __maybe_unused msg[60] = { '\0', }; 2035 struct sp_session *s = NULL; 2036 struct sp_session *prev_sp = NULL; 2037 2038 for_each_secure_partition(sp) { 2039 if (sp->image.uncompressed_size) 2040 snprintf(msg, sizeof(msg), 2041 " (compressed, uncompressed %u)", 2042 sp->image.uncompressed_size); 2043 else 2044 msg[0] = '\0'; 2045 DMSG("SP %pUl size %u%s", (void *)&sp->image.uuid, 2046 sp->image.size, msg); 2047 2048 res = sp_init_uuid(&sp->image.uuid, sp->fdt); 2049 2050 if (res != TEE_SUCCESS) { 2051 EMSG("Failed initializing SP(%pUl) err:%#"PRIx32, 2052 &sp->image.uuid, res); 2053 if (!IS_ENABLED(CFG_SP_SKIP_FAILED)) 2054 panic(); 2055 } 2056 } 2057 2058 res = fip_sp_init_all(); 2059 if (res) 2060 panic("Failed initializing FIP SPs"); 2061 2062 for_each_fip_sp(fip_sp) { 2063 sp = &fip_sp->sp_img; 2064 2065 DMSG("SP %pUl size %u", (void *)&sp->image.uuid, 2066 sp->image.size); 2067 2068 res = sp_init_uuid(&sp->image.uuid, sp->fdt); 2069 2070 if (res != TEE_SUCCESS) { 2071 EMSG("Failed initializing SP(%pUl) err:%#"PRIx32, 2072 &sp->image.uuid, res); 2073 if (!IS_ENABLED(CFG_SP_SKIP_FAILED)) 2074 panic(); 2075 } 2076 } 2077 2078 /* 2079 * At this point all FIP SPs are loaded by ldelf or by the raw binary SP 2080 * loader, so the original images (loaded by BL2) are not needed anymore 2081 */ 2082 fip_sp_deinit_all(); 2083 2084 /* 2085 * Now that all SPs are loaded, check through the boot order values, 2086 * and warn in case there is a non-unique value. 2087 */ 2088 TAILQ_FOREACH(s, &open_sp_sessions, link) { 2089 /* Avoid warnings if multiple SP have undefined boot-order. */ 2090 if (s->boot_order == UNDEFINED_BOOT_ORDER_VALUE) 2091 break; 2092 2093 if (prev_sp && prev_sp->boot_order == s->boot_order) 2094 IMSG("WARNING: duplicated boot-order (%pUl vs %pUl)", 2095 &prev_sp->ts_sess.ctx->uuid, 2096 &s->ts_sess.ctx->uuid); 2097 2098 prev_sp = s; 2099 } 2100 2101 /* Continue the initialization and run the SP */ 2102 TAILQ_FOREACH(s, &open_sp_sessions, link) { 2103 DMSG("Starting SP: 0x%"PRIx16, s->endpoint_id); 2104 2105 res = sp_first_run(s); 2106 if (res != TEE_SUCCESS) { 2107 EMSG("Failed starting SP(0x%"PRIx16") err:%#"PRIx32, 2108 s->endpoint_id, res); 2109 if (!IS_ENABLED(CFG_SP_SKIP_FAILED)) 2110 panic(); 2111 } 2112 } 2113 2114 return TEE_SUCCESS; 2115 } 2116 2117 boot_final(sp_init_all); 2118 2119 static TEE_Result secure_partition_open(const TEE_UUID *uuid, 2120 struct ts_store_handle **h) 2121 { 2122 return emb_ts_open(uuid, h, find_secure_partition); 2123 } 2124 2125 REGISTER_SP_STORE(2) = { 2126 .description = "SP store", 2127 .open = secure_partition_open, 2128 .get_size = emb_ts_get_size, 2129 .get_tag = emb_ts_get_tag, 2130 .read = emb_ts_read, 2131 .close = emb_ts_close, 2132 }; 2133