1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2020-2023, Arm Limited. 4 */ 5 #include <bench.h> 6 #include <crypto/crypto.h> 7 #include <initcall.h> 8 #include <kernel/boot.h> 9 #include <kernel/embedded_ts.h> 10 #include <kernel/ldelf_loader.h> 11 #include <kernel/secure_partition.h> 12 #include <kernel/spinlock.h> 13 #include <kernel/spmc_sp_handler.h> 14 #include <kernel/thread_private.h> 15 #include <kernel/thread_spmc.h> 16 #include <kernel/tpm.h> 17 #include <kernel/ts_store.h> 18 #include <ldelf.h> 19 #include <libfdt.h> 20 #include <mm/core_mmu.h> 21 #include <mm/fobj.h> 22 #include <mm/mobj.h> 23 #include <mm/vm.h> 24 #include <optee_ffa.h> 25 #include <stdio.h> 26 #include <string.h> 27 #include <tee_api_types.h> 28 #include <tee/uuid.h> 29 #include <trace.h> 30 #include <types_ext.h> 31 #include <utee_defines.h> 32 #include <util.h> 33 #include <zlib.h> 34 35 #define SP_MANIFEST_ATTR_READ BIT(0) 36 #define SP_MANIFEST_ATTR_WRITE BIT(1) 37 #define SP_MANIFEST_ATTR_EXEC BIT(2) 38 #define SP_MANIFEST_ATTR_NSEC BIT(3) 39 40 #define SP_MANIFEST_ATTR_RO (SP_MANIFEST_ATTR_READ) 41 #define SP_MANIFEST_ATTR_RW (SP_MANIFEST_ATTR_READ | \ 42 SP_MANIFEST_ATTR_WRITE) 43 #define SP_MANIFEST_ATTR_RX (SP_MANIFEST_ATTR_READ | \ 44 SP_MANIFEST_ATTR_EXEC) 45 #define SP_MANIFEST_ATTR_RWX (SP_MANIFEST_ATTR_READ | \ 46 SP_MANIFEST_ATTR_WRITE | \ 47 SP_MANIFEST_ATTR_EXEC) 48 49 #define SP_MANIFEST_FLAG_NOBITS BIT(0) 50 51 #define SP_PKG_HEADER_MAGIC (0x474b5053) 52 #define SP_PKG_HEADER_VERSION_V1 (0x1) 53 #define SP_PKG_HEADER_VERSION_V2 (0x2) 54 55 struct sp_pkg_header { 56 uint32_t magic; 57 uint32_t version; 58 uint32_t pm_offset; 59 uint32_t pm_size; 60 uint32_t img_offset; 61 uint32_t img_size; 62 }; 63 64 struct fip_sp_head fip_sp_list = STAILQ_HEAD_INITIALIZER(fip_sp_list); 65 66 static const struct ts_ops sp_ops; 67 68 /* List that holds all of the loaded SP's */ 69 static struct sp_sessions_head open_sp_sessions = 70 TAILQ_HEAD_INITIALIZER(open_sp_sessions); 71 72 static const struct embedded_ts *find_secure_partition(const TEE_UUID *uuid) 73 { 74 const struct sp_image *sp = NULL; 75 const struct fip_sp *fip_sp = NULL; 76 77 for_each_secure_partition(sp) { 78 if (!memcmp(&sp->image.uuid, uuid, sizeof(*uuid))) 79 return &sp->image; 80 } 81 82 for_each_fip_sp(fip_sp) { 83 if (!memcmp(&fip_sp->sp_img.image.uuid, uuid, sizeof(*uuid))) 84 return &fip_sp->sp_img.image; 85 } 86 87 return NULL; 88 } 89 90 bool is_sp_ctx(struct ts_ctx *ctx) 91 { 92 return ctx && (ctx->ops == &sp_ops); 93 } 94 95 static void set_sp_ctx_ops(struct ts_ctx *ctx) 96 { 97 ctx->ops = &sp_ops; 98 } 99 100 struct sp_session *sp_get_session(uint32_t session_id) 101 { 102 struct sp_session *s = NULL; 103 104 TAILQ_FOREACH(s, &open_sp_sessions, link) { 105 if (s->endpoint_id == session_id) 106 return s; 107 } 108 109 return NULL; 110 } 111 112 TEE_Result sp_partition_info_get(uint32_t ffa_vers, void *buf, size_t buf_size, 113 const TEE_UUID *ffa_uuid, size_t *elem_count, 114 bool count_only) 115 { 116 TEE_Result res = TEE_SUCCESS; 117 uint32_t part_props = FFA_PART_PROP_DIRECT_REQ_RECV | 118 FFA_PART_PROP_DIRECT_REQ_SEND; 119 struct sp_session *s = NULL; 120 121 TAILQ_FOREACH(s, &open_sp_sessions, link) { 122 if (ffa_uuid && 123 memcmp(&s->ffa_uuid, ffa_uuid, sizeof(*ffa_uuid))) 124 continue; 125 126 if (s->state == sp_dead) 127 continue; 128 if (!count_only && !res) { 129 uint32_t uuid_words[4] = { 0 }; 130 131 tee_uuid_to_octets((uint8_t *)uuid_words, &s->ffa_uuid); 132 res = spmc_fill_partition_entry(ffa_vers, buf, buf_size, 133 *elem_count, 134 s->endpoint_id, 1, 135 part_props, uuid_words); 136 } 137 *elem_count += 1; 138 } 139 140 return res; 141 } 142 143 bool sp_has_exclusive_access(struct sp_mem_map_region *mem, 144 struct user_mode_ctx *uctx) 145 { 146 /* 147 * Check that we have access to the region if it is supposed to be 148 * mapped to the current context. 149 */ 150 if (uctx) { 151 struct vm_region *region = NULL; 152 153 /* Make sure that each mobj belongs to the SP */ 154 TAILQ_FOREACH(region, &uctx->vm_info.regions, link) { 155 if (region->mobj == mem->mobj) 156 break; 157 } 158 159 if (!region) 160 return false; 161 } 162 163 /* Check that it is not shared with another SP */ 164 return !sp_mem_is_shared(mem); 165 } 166 167 static uint16_t new_session_id(struct sp_sessions_head *open_sessions) 168 { 169 struct sp_session *last = NULL; 170 uint16_t id = SPMC_ENDPOINT_ID + 1; 171 172 last = TAILQ_LAST(open_sessions, sp_sessions_head); 173 if (last) 174 id = last->endpoint_id + 1; 175 176 assert(id > SPMC_ENDPOINT_ID); 177 return id; 178 } 179 180 static TEE_Result sp_create_ctx(const TEE_UUID *bin_uuid, struct sp_session *s) 181 { 182 TEE_Result res = TEE_SUCCESS; 183 struct sp_ctx *spc = NULL; 184 185 /* Register context */ 186 spc = calloc(1, sizeof(struct sp_ctx)); 187 if (!spc) 188 return TEE_ERROR_OUT_OF_MEMORY; 189 190 spc->open_session = s; 191 s->ts_sess.ctx = &spc->ts_ctx; 192 spc->ts_ctx.uuid = *bin_uuid; 193 194 res = vm_info_init(&spc->uctx, &spc->ts_ctx); 195 if (res) 196 goto err; 197 198 set_sp_ctx_ops(&spc->ts_ctx); 199 200 return TEE_SUCCESS; 201 202 err: 203 free(spc); 204 return res; 205 } 206 207 static TEE_Result sp_create_session(struct sp_sessions_head *open_sessions, 208 const TEE_UUID *bin_uuid, 209 struct sp_session **sess) 210 { 211 TEE_Result res = TEE_SUCCESS; 212 struct sp_session *s = calloc(1, sizeof(struct sp_session)); 213 214 if (!s) 215 return TEE_ERROR_OUT_OF_MEMORY; 216 217 s->endpoint_id = new_session_id(open_sessions); 218 if (!s->endpoint_id) { 219 res = TEE_ERROR_OVERFLOW; 220 goto err; 221 } 222 223 DMSG("Loading Secure Partition %pUl", (void *)bin_uuid); 224 res = sp_create_ctx(bin_uuid, s); 225 if (res) 226 goto err; 227 228 TAILQ_INSERT_TAIL(open_sessions, s, link); 229 *sess = s; 230 return TEE_SUCCESS; 231 232 err: 233 free(s); 234 return res; 235 } 236 237 static TEE_Result sp_init_set_registers(struct sp_ctx *ctx) 238 { 239 struct thread_ctx_regs *sp_regs = &ctx->sp_regs; 240 241 memset(sp_regs, 0, sizeof(*sp_regs)); 242 sp_regs->sp = ctx->uctx.stack_ptr; 243 sp_regs->pc = ctx->uctx.entry_func; 244 245 return TEE_SUCCESS; 246 } 247 248 TEE_Result sp_map_shared(struct sp_session *s, 249 struct sp_mem_receiver *receiver, 250 struct sp_mem *smem, 251 uint64_t *va) 252 { 253 TEE_Result res = TEE_SUCCESS; 254 struct sp_ctx *ctx = NULL; 255 uint32_t perm = TEE_MATTR_UR; 256 struct sp_mem_map_region *reg = NULL; 257 258 ctx = to_sp_ctx(s->ts_sess.ctx); 259 260 /* Get the permission */ 261 if (receiver->perm.perm & FFA_MEM_ACC_EXE) 262 perm |= TEE_MATTR_UX; 263 264 if (receiver->perm.perm & FFA_MEM_ACC_RW) { 265 if (receiver->perm.perm & FFA_MEM_ACC_EXE) 266 return TEE_ERROR_ACCESS_CONFLICT; 267 268 perm |= TEE_MATTR_UW; 269 } 270 /* 271 * Currently we don't support passing a va. We can't guarantee that the 272 * full region will be mapped in a contiguous region. A smem->region can 273 * have multiple mobj for one share. Currently there doesn't seem to be 274 * an option to guarantee that these will be mapped in a contiguous va 275 * space. 276 */ 277 if (*va) 278 return TEE_ERROR_NOT_SUPPORTED; 279 280 SLIST_FOREACH(reg, &smem->regions, link) { 281 res = vm_map(&ctx->uctx, va, reg->page_count * SMALL_PAGE_SIZE, 282 perm, 0, reg->mobj, reg->page_offset); 283 284 if (res != TEE_SUCCESS) { 285 EMSG("Failed to map memory region %#"PRIx32, res); 286 return res; 287 } 288 } 289 return TEE_SUCCESS; 290 } 291 292 TEE_Result sp_unmap_ffa_regions(struct sp_session *s, struct sp_mem *smem) 293 { 294 TEE_Result res = TEE_SUCCESS; 295 vaddr_t vaddr = 0; 296 size_t len = 0; 297 struct sp_ctx *ctx = to_sp_ctx(s->ts_sess.ctx); 298 struct sp_mem_map_region *reg = NULL; 299 300 SLIST_FOREACH(reg, &smem->regions, link) { 301 vaddr = (vaddr_t)sp_mem_get_va(&ctx->uctx, reg->page_offset, 302 reg->mobj); 303 len = reg->page_count * SMALL_PAGE_SIZE; 304 305 res = vm_unmap(&ctx->uctx, vaddr, len); 306 if (res != TEE_SUCCESS) 307 return res; 308 } 309 310 return TEE_SUCCESS; 311 } 312 313 static TEE_Result sp_dt_get_u64(const void *fdt, int node, const char *property, 314 uint64_t *value) 315 { 316 const fdt64_t *p = NULL; 317 int len = 0; 318 319 p = fdt_getprop(fdt, node, property, &len); 320 if (!p) 321 return TEE_ERROR_ITEM_NOT_FOUND; 322 323 if (len != sizeof(*p)) 324 return TEE_ERROR_BAD_FORMAT; 325 326 *value = fdt64_ld(p); 327 328 return TEE_SUCCESS; 329 } 330 331 static TEE_Result sp_dt_get_u32(const void *fdt, int node, const char *property, 332 uint32_t *value) 333 { 334 const fdt32_t *p = NULL; 335 int len = 0; 336 337 p = fdt_getprop(fdt, node, property, &len); 338 if (!p) 339 return TEE_ERROR_ITEM_NOT_FOUND; 340 341 if (len != sizeof(*p)) 342 return TEE_ERROR_BAD_FORMAT; 343 344 *value = fdt32_to_cpu(*p); 345 346 return TEE_SUCCESS; 347 } 348 349 static TEE_Result sp_dt_get_uuid(const void *fdt, int node, 350 const char *property, TEE_UUID *uuid) 351 { 352 uint32_t uuid_array[4] = { 0 }; 353 const fdt32_t *p = NULL; 354 int len = 0; 355 int i = 0; 356 357 p = fdt_getprop(fdt, node, property, &len); 358 if (!p) 359 return TEE_ERROR_ITEM_NOT_FOUND; 360 361 if (len != sizeof(TEE_UUID)) 362 return TEE_ERROR_BAD_FORMAT; 363 364 for (i = 0; i < 4; i++) 365 uuid_array[i] = fdt32_to_cpu(p[i]); 366 367 tee_uuid_from_octets(uuid, (uint8_t *)uuid_array); 368 369 return TEE_SUCCESS; 370 } 371 372 static TEE_Result sp_is_elf_format(const void *fdt, int sp_node, 373 bool *is_elf_format) 374 { 375 TEE_Result res = TEE_SUCCESS; 376 uint32_t elf_format = 0; 377 378 res = sp_dt_get_u32(fdt, sp_node, "elf-format", &elf_format); 379 if (res != TEE_SUCCESS && res != TEE_ERROR_ITEM_NOT_FOUND) 380 return res; 381 382 *is_elf_format = (elf_format != 0); 383 384 return TEE_SUCCESS; 385 } 386 387 static TEE_Result sp_binary_open(const TEE_UUID *uuid, 388 const struct ts_store_ops **ops, 389 struct ts_store_handle **handle) 390 { 391 TEE_Result res = TEE_ERROR_ITEM_NOT_FOUND; 392 393 SCATTERED_ARRAY_FOREACH(*ops, sp_stores, struct ts_store_ops) { 394 res = (*ops)->open(uuid, handle); 395 if (res != TEE_ERROR_ITEM_NOT_FOUND && 396 res != TEE_ERROR_STORAGE_NOT_AVAILABLE) 397 break; 398 } 399 400 return res; 401 } 402 403 static TEE_Result load_binary_sp(struct ts_session *s, 404 struct user_mode_ctx *uctx) 405 { 406 size_t bin_size = 0, bin_size_rounded = 0, bin_page_count = 0; 407 const struct ts_store_ops *store_ops = NULL; 408 struct ts_store_handle *handle = NULL; 409 TEE_Result res = TEE_SUCCESS; 410 tee_mm_entry_t *mm = NULL; 411 struct mobj *mobj = NULL; 412 uaddr_t base_addr = 0; 413 uint32_t vm_flags = 0; 414 unsigned int idx = 0; 415 vaddr_t va = 0; 416 417 if (!s || !uctx) 418 return TEE_ERROR_BAD_PARAMETERS; 419 420 DMSG("Loading raw binary format SP %pUl", &uctx->ts_ctx->uuid); 421 422 vm_set_ctx(uctx->ts_ctx); 423 424 /* Find TS store and open SP binary */ 425 res = sp_binary_open(&uctx->ts_ctx->uuid, &store_ops, &handle); 426 if (res != TEE_SUCCESS) { 427 EMSG("Failed to open SP binary"); 428 return res; 429 } 430 431 /* Query binary size and calculate page count */ 432 res = store_ops->get_size(handle, &bin_size); 433 if (res != TEE_SUCCESS) 434 goto err; 435 436 if (ROUNDUP_OVERFLOW(bin_size, SMALL_PAGE_SIZE, &bin_size_rounded)) { 437 res = TEE_ERROR_OVERFLOW; 438 goto err; 439 } 440 441 bin_page_count = bin_size_rounded / SMALL_PAGE_SIZE; 442 443 /* Allocate memory */ 444 mm = tee_mm_alloc(&tee_mm_sec_ddr, bin_size_rounded); 445 if (!mm) { 446 res = TEE_ERROR_OUT_OF_MEMORY; 447 goto err; 448 } 449 450 base_addr = tee_mm_get_smem(mm); 451 452 /* Create mobj */ 453 mobj = sp_mem_new_mobj(bin_page_count, TEE_MATTR_MEM_TYPE_CACHED, true); 454 if (!mobj) { 455 res = TEE_ERROR_OUT_OF_MEMORY; 456 goto err_free_tee_mm; 457 } 458 459 res = sp_mem_add_pages(mobj, &idx, base_addr, bin_page_count); 460 if (res) 461 goto err_free_mobj; 462 463 /* Map memory area for the SP binary */ 464 res = vm_map(uctx, &va, bin_size_rounded, TEE_MATTR_URWX, 465 vm_flags, mobj, 0); 466 if (res) 467 goto err_free_mobj; 468 469 /* Read SP binary into the previously mapped memory area */ 470 res = store_ops->read(handle, (void *)va, bin_size); 471 if (res) 472 goto err_unmap; 473 474 /* Set memory protection to allow execution */ 475 res = vm_set_prot(uctx, va, bin_size_rounded, TEE_MATTR_UX); 476 if (res) 477 goto err_unmap; 478 479 mobj_put(mobj); 480 store_ops->close(handle); 481 482 /* The entry point must be at the beginning of the SP binary. */ 483 uctx->entry_func = va; 484 uctx->load_addr = va; 485 uctx->is_32bit = false; 486 487 s->handle_scall = s->ctx->ops->handle_scall; 488 489 return TEE_SUCCESS; 490 491 err_unmap: 492 vm_unmap(uctx, va, bin_size_rounded); 493 494 err_free_mobj: 495 mobj_put(mobj); 496 497 err_free_tee_mm: 498 tee_mm_free(mm); 499 500 err: 501 store_ops->close(handle); 502 503 return res; 504 } 505 506 static TEE_Result sp_open_session(struct sp_session **sess, 507 struct sp_sessions_head *open_sessions, 508 const TEE_UUID *ffa_uuid, 509 const TEE_UUID *bin_uuid, 510 const void *fdt) 511 { 512 TEE_Result res = TEE_SUCCESS; 513 struct sp_session *s = NULL; 514 struct sp_ctx *ctx = NULL; 515 bool is_elf_format = false; 516 517 if (!find_secure_partition(bin_uuid)) 518 return TEE_ERROR_ITEM_NOT_FOUND; 519 520 res = sp_create_session(open_sessions, bin_uuid, &s); 521 if (res != TEE_SUCCESS) { 522 DMSG("sp_create_session failed %#"PRIx32, res); 523 return res; 524 } 525 526 ctx = to_sp_ctx(s->ts_sess.ctx); 527 assert(ctx); 528 if (!ctx) 529 return TEE_ERROR_TARGET_DEAD; 530 *sess = s; 531 532 ts_push_current_session(&s->ts_sess); 533 534 res = sp_is_elf_format(fdt, 0, &is_elf_format); 535 if (res == TEE_SUCCESS) { 536 if (is_elf_format) { 537 /* Load the SP using ldelf. */ 538 ldelf_load_ldelf(&ctx->uctx); 539 res = ldelf_init_with_ldelf(&s->ts_sess, &ctx->uctx); 540 } else { 541 /* Raw binary format SP */ 542 res = load_binary_sp(&s->ts_sess, &ctx->uctx); 543 } 544 } else { 545 EMSG("Failed to detect SP format"); 546 } 547 548 if (res != TEE_SUCCESS) { 549 EMSG("Failed loading SP %#"PRIx32, res); 550 ts_pop_current_session(); 551 return TEE_ERROR_TARGET_DEAD; 552 } 553 554 /* Make the SP ready for its first run */ 555 s->state = sp_idle; 556 s->caller_id = 0; 557 sp_init_set_registers(ctx); 558 memcpy(&s->ffa_uuid, ffa_uuid, sizeof(*ffa_uuid)); 559 ts_pop_current_session(); 560 561 return TEE_SUCCESS; 562 } 563 564 static TEE_Result fdt_get_uuid(const void * const fdt, TEE_UUID *uuid) 565 { 566 const struct fdt_property *description = NULL; 567 int description_name_len = 0; 568 569 if (fdt_node_check_compatible(fdt, 0, "arm,ffa-manifest-1.0")) { 570 EMSG("Failed loading SP, manifest not found"); 571 return TEE_ERROR_BAD_PARAMETERS; 572 } 573 574 description = fdt_get_property(fdt, 0, "description", 575 &description_name_len); 576 if (description) 577 DMSG("Loading SP: %s", description->data); 578 579 if (sp_dt_get_uuid(fdt, 0, "uuid", uuid)) { 580 EMSG("Missing or invalid UUID in SP manifest"); 581 return TEE_ERROR_BAD_FORMAT; 582 } 583 584 return TEE_SUCCESS; 585 } 586 587 /* 588 * sp_init_info allocates and maps the sp_ffa_init_info for the SP. It will copy 589 * the fdt into the allocated page(s) and return a pointer to the new location 590 * of the fdt. This pointer can be used to update data inside the fdt. 591 */ 592 static TEE_Result sp_init_info(struct sp_ctx *ctx, struct thread_smc_args *args, 593 const void * const input_fdt, vaddr_t *va, 594 size_t *num_pgs, void **fdt_copy) 595 { 596 struct sp_ffa_init_info *info = NULL; 597 int nvp_count = 1; 598 size_t total_size = ROUNDUP(CFG_SP_INIT_INFO_MAX_SIZE, SMALL_PAGE_SIZE); 599 size_t nvp_size = sizeof(struct sp_name_value_pair) * nvp_count; 600 size_t info_size = sizeof(*info) + nvp_size; 601 size_t fdt_size = total_size - info_size; 602 TEE_Result res = TEE_SUCCESS; 603 uint32_t perm = TEE_MATTR_URW | TEE_MATTR_PRW; 604 struct fobj *f = NULL; 605 struct mobj *m = NULL; 606 static const char fdt_name[16] = "TYPE_DT\0\0\0\0\0\0\0\0"; 607 608 *num_pgs = total_size / SMALL_PAGE_SIZE; 609 610 f = fobj_sec_mem_alloc(*num_pgs); 611 m = mobj_with_fobj_alloc(f, NULL, TEE_MATTR_MEM_TYPE_TAGGED); 612 613 fobj_put(f); 614 if (!m) 615 return TEE_ERROR_OUT_OF_MEMORY; 616 617 res = vm_map(&ctx->uctx, va, total_size, perm, 0, m, 0); 618 mobj_put(m); 619 if (res) 620 return res; 621 622 info = (struct sp_ffa_init_info *)*va; 623 624 /* magic field is 4 bytes, we don't copy /0 byte. */ 625 memcpy(&info->magic, "FF-A", 4); 626 info->count = nvp_count; 627 args->a0 = (vaddr_t)info; 628 629 /* 630 * Store the fdt after the boot_info and store the pointer in the 631 * first element. 632 */ 633 COMPILE_TIME_ASSERT(sizeof(info->nvp[0].name) == sizeof(fdt_name)); 634 memcpy(info->nvp[0].name, fdt_name, sizeof(fdt_name)); 635 info->nvp[0].value = *va + info_size; 636 info->nvp[0].size = fdt_size; 637 *fdt_copy = (void *)info->nvp[0].value; 638 639 if (fdt_open_into(input_fdt, *fdt_copy, fdt_size)) 640 return TEE_ERROR_GENERIC; 641 642 return TEE_SUCCESS; 643 } 644 645 static TEE_Result handle_fdt_load_relative_mem_regions(struct sp_ctx *ctx, 646 const void *fdt) 647 { 648 int node = 0; 649 int subnode = 0; 650 tee_mm_entry_t *mm = NULL; 651 TEE_Result res = TEE_SUCCESS; 652 653 /* 654 * Memory regions are optional in the SP manifest, it's not an error if 655 * we don't find any. 656 */ 657 node = fdt_node_offset_by_compatible(fdt, 0, 658 "arm,ffa-manifest-memory-regions"); 659 if (node < 0) 660 return TEE_SUCCESS; 661 662 fdt_for_each_subnode(subnode, fdt, node) { 663 uint64_t load_rel_offset = 0; 664 uint32_t attributes = 0; 665 uint64_t base_addr = 0; 666 uint32_t pages_cnt = 0; 667 uint32_t flags = 0; 668 uint32_t perm = 0; 669 size_t size = 0; 670 vaddr_t va = 0; 671 672 mm = NULL; 673 674 /* Load address relative offset of a memory region */ 675 if (!sp_dt_get_u64(fdt, subnode, "load-address-relative-offset", 676 &load_rel_offset)) { 677 va = ctx->uctx.load_addr + load_rel_offset; 678 } else { 679 /* Skip non load address relative memory regions */ 680 continue; 681 } 682 683 if (!sp_dt_get_u64(fdt, subnode, "base-address", &base_addr)) { 684 EMSG("Both base-address and load-address-relative-offset fields are set"); 685 return TEE_ERROR_BAD_FORMAT; 686 } 687 688 /* Size of memory region as count of 4K pages */ 689 if (sp_dt_get_u32(fdt, subnode, "pages-count", &pages_cnt)) { 690 EMSG("Mandatory field is missing: pages-count"); 691 return TEE_ERROR_BAD_FORMAT; 692 } 693 694 if (MUL_OVERFLOW(pages_cnt, SMALL_PAGE_SIZE, &size)) 695 return TEE_ERROR_OVERFLOW; 696 697 /* Memory region attributes */ 698 if (sp_dt_get_u32(fdt, subnode, "attributes", &attributes)) { 699 EMSG("Mandatory field is missing: attributes"); 700 return TEE_ERROR_BAD_FORMAT; 701 } 702 703 /* Check instruction and data access permissions */ 704 switch (attributes & SP_MANIFEST_ATTR_RWX) { 705 case SP_MANIFEST_ATTR_RO: 706 perm = TEE_MATTR_UR; 707 break; 708 case SP_MANIFEST_ATTR_RW: 709 perm = TEE_MATTR_URW; 710 break; 711 case SP_MANIFEST_ATTR_RX: 712 perm = TEE_MATTR_URX; 713 break; 714 default: 715 EMSG("Invalid memory access permissions"); 716 return TEE_ERROR_BAD_FORMAT; 717 } 718 719 res = sp_dt_get_u32(fdt, subnode, "load-flags", &flags); 720 if (res != TEE_SUCCESS && res != TEE_ERROR_ITEM_NOT_FOUND) { 721 EMSG("Optional field with invalid value: flags"); 722 return TEE_ERROR_BAD_FORMAT; 723 } 724 725 /* Load relative regions must be secure */ 726 if (attributes & SP_MANIFEST_ATTR_NSEC) { 727 EMSG("Invalid memory security attribute"); 728 return TEE_ERROR_BAD_FORMAT; 729 } 730 731 if (flags & SP_MANIFEST_FLAG_NOBITS) { 732 /* 733 * NOBITS flag is set, which means that loaded binary 734 * doesn't contain this area, so it's need to be 735 * allocated. 736 */ 737 struct mobj *m = NULL; 738 unsigned int idx = 0; 739 740 mm = tee_mm_alloc(&tee_mm_sec_ddr, size); 741 if (!mm) 742 return TEE_ERROR_OUT_OF_MEMORY; 743 744 base_addr = tee_mm_get_smem(mm); 745 746 m = sp_mem_new_mobj(pages_cnt, 747 TEE_MATTR_MEM_TYPE_CACHED, true); 748 if (!m) { 749 res = TEE_ERROR_OUT_OF_MEMORY; 750 goto err_mm_free; 751 } 752 753 res = sp_mem_add_pages(m, &idx, base_addr, pages_cnt); 754 if (res) { 755 mobj_put(m); 756 goto err_mm_free; 757 } 758 759 res = vm_map(&ctx->uctx, &va, size, perm, 0, m, 0); 760 mobj_put(m); 761 if (res) 762 goto err_mm_free; 763 } else { 764 /* 765 * If NOBITS is not present the memory area is already 766 * mapped and only need to set the correct permissions. 767 */ 768 res = vm_set_prot(&ctx->uctx, va, size, perm); 769 if (res) 770 return res; 771 } 772 } 773 774 return TEE_SUCCESS; 775 776 err_mm_free: 777 tee_mm_free(mm); 778 return res; 779 } 780 781 static TEE_Result handle_fdt_dev_regions(struct sp_ctx *ctx, void *fdt) 782 { 783 int node = 0; 784 int subnode = 0; 785 TEE_Result res = TEE_SUCCESS; 786 const char *dt_device_match_table = { 787 "arm,ffa-manifest-device-regions", 788 }; 789 790 /* 791 * Device regions are optional in the SP manifest, it's not an error if 792 * we don't find any 793 */ 794 node = fdt_node_offset_by_compatible(fdt, 0, dt_device_match_table); 795 if (node < 0) 796 return TEE_SUCCESS; 797 798 fdt_for_each_subnode(subnode, fdt, node) { 799 uint64_t base_addr = 0; 800 uint32_t pages_cnt = 0; 801 uint32_t attributes = 0; 802 struct mobj *m = NULL; 803 bool is_secure = true; 804 uint32_t perm = 0; 805 vaddr_t va = 0; 806 unsigned int idx = 0; 807 808 /* 809 * Physical base address of a device MMIO region. 810 * Currently only physically contiguous region is supported. 811 */ 812 if (sp_dt_get_u64(fdt, subnode, "base-address", &base_addr)) { 813 EMSG("Mandatory field is missing: base-address"); 814 return TEE_ERROR_BAD_FORMAT; 815 } 816 817 /* Total size of MMIO region as count of 4K pages */ 818 if (sp_dt_get_u32(fdt, subnode, "pages-count", &pages_cnt)) { 819 EMSG("Mandatory field is missing: pages-count"); 820 return TEE_ERROR_BAD_FORMAT; 821 } 822 823 /* Data access, instruction access and security attributes */ 824 if (sp_dt_get_u32(fdt, subnode, "attributes", &attributes)) { 825 EMSG("Mandatory field is missing: attributes"); 826 return TEE_ERROR_BAD_FORMAT; 827 } 828 829 /* Check instruction and data access permissions */ 830 switch (attributes & SP_MANIFEST_ATTR_RWX) { 831 case SP_MANIFEST_ATTR_RO: 832 perm = TEE_MATTR_UR; 833 break; 834 case SP_MANIFEST_ATTR_RW: 835 perm = TEE_MATTR_URW; 836 break; 837 default: 838 EMSG("Invalid memory access permissions"); 839 return TEE_ERROR_BAD_FORMAT; 840 } 841 842 /* 843 * The SP is a secure endpoint, security attribute can be 844 * secure or non-secure 845 */ 846 if (attributes & SP_MANIFEST_ATTR_NSEC) 847 is_secure = false; 848 849 /* Memory attributes must be Device-nGnRnE */ 850 m = sp_mem_new_mobj(pages_cnt, TEE_MATTR_MEM_TYPE_STRONGLY_O, 851 is_secure); 852 if (!m) 853 return TEE_ERROR_OUT_OF_MEMORY; 854 855 res = sp_mem_add_pages(m, &idx, (paddr_t)base_addr, pages_cnt); 856 if (res) { 857 mobj_put(m); 858 return res; 859 } 860 861 res = vm_map(&ctx->uctx, &va, pages_cnt * SMALL_PAGE_SIZE, 862 perm, 0, m, 0); 863 mobj_put(m); 864 if (res) 865 return res; 866 867 /* 868 * Overwrite the device region's PA in the fdt with the VA. This 869 * fdt will be passed to the SP. 870 */ 871 res = fdt_setprop_u64(fdt, subnode, "base-address", va); 872 873 /* 874 * Unmap the region if the overwrite failed since the SP won't 875 * be able to access it without knowing the VA. 876 */ 877 if (res) { 878 vm_unmap(&ctx->uctx, va, pages_cnt * SMALL_PAGE_SIZE); 879 return res; 880 } 881 } 882 883 return TEE_SUCCESS; 884 } 885 886 static TEE_Result swap_sp_endpoints(uint32_t endpoint_id, 887 uint32_t new_endpoint_id) 888 { 889 struct sp_session *session = sp_get_session(endpoint_id); 890 uint32_t manifest_endpoint_id = 0; 891 892 /* 893 * We don't know in which order the SPs are loaded. The endpoint ID 894 * defined in the manifest could already be generated by 895 * new_session_id() and used by another SP. If this is the case, we swap 896 * the ID's of the two SPs. We also have to make sure that the ID's are 897 * not defined twice in the manifest. 898 */ 899 900 /* The endpoint ID was not assigned yet */ 901 if (!session) 902 return TEE_SUCCESS; 903 904 /* 905 * Read the manifest file from the SP who originally had the endpoint. 906 * We can safely swap the endpoint ID's if the manifest file doesn't 907 * have an endpoint ID defined. 908 */ 909 if (!sp_dt_get_u32(session->fdt, 0, "id", &manifest_endpoint_id)) { 910 assert(manifest_endpoint_id == endpoint_id); 911 EMSG("SP: Found duplicated endpoint ID %#"PRIx32, endpoint_id); 912 return TEE_ERROR_ACCESS_CONFLICT; 913 } 914 915 session->endpoint_id = new_endpoint_id; 916 917 return TEE_SUCCESS; 918 } 919 920 static TEE_Result read_manifest_endpoint_id(struct sp_session *s) 921 { 922 uint32_t endpoint_id = 0; 923 924 /* 925 * The endpoint ID can be optionally defined in the manifest file. We 926 * have to map the ID inside the manifest to the SP if it's defined. 927 * If not, the endpoint ID generated inside new_session_id() will be 928 * used. 929 */ 930 if (!sp_dt_get_u32(s->fdt, 0, "id", &endpoint_id)) { 931 TEE_Result res = TEE_ERROR_GENERIC; 932 933 if (endpoint_id <= SPMC_ENDPOINT_ID) 934 return TEE_ERROR_BAD_FORMAT; 935 936 res = swap_sp_endpoints(endpoint_id, s->endpoint_id); 937 if (res) 938 return res; 939 940 DMSG("SP: endpoint ID (0x%"PRIx32") found in manifest", 941 endpoint_id); 942 /* Assign the endpoint ID to the current SP */ 943 s->endpoint_id = endpoint_id; 944 } 945 return TEE_SUCCESS; 946 } 947 948 static TEE_Result handle_fdt_mem_regions(struct sp_ctx *ctx, void *fdt) 949 { 950 int node = 0; 951 int subnode = 0; 952 tee_mm_entry_t *mm = NULL; 953 TEE_Result res = TEE_SUCCESS; 954 955 /* 956 * Memory regions are optional in the SP manifest, it's not an error if 957 * we don't find any. 958 */ 959 node = fdt_node_offset_by_compatible(fdt, 0, 960 "arm,ffa-manifest-memory-regions"); 961 if (node < 0) 962 return TEE_SUCCESS; 963 964 fdt_for_each_subnode(subnode, fdt, node) { 965 uint64_t load_rel_offset = 0; 966 bool alloc_needed = false; 967 uint32_t attributes = 0; 968 uint64_t base_addr = 0; 969 uint32_t pages_cnt = 0; 970 bool is_secure = true; 971 struct mobj *m = NULL; 972 unsigned int idx = 0; 973 uint32_t perm = 0; 974 size_t size = 0; 975 vaddr_t va = 0; 976 977 mm = NULL; 978 979 /* Load address relative offset of a memory region */ 980 if (!sp_dt_get_u64(fdt, subnode, "load-address-relative-offset", 981 &load_rel_offset)) { 982 /* 983 * At this point the memory region is already mapped by 984 * handle_fdt_load_relative_mem_regions. 985 * Only need to set the base-address in the manifest and 986 * then skip the rest of the mapping process. 987 */ 988 va = ctx->uctx.load_addr + load_rel_offset; 989 res = fdt_setprop_u64(fdt, subnode, "base-address", va); 990 if (res) 991 return res; 992 993 continue; 994 } 995 996 /* 997 * Base address of a memory region. 998 * If not present, we have to allocate the specified memory. 999 * If present, this field could specify a PA or VA. Currently 1000 * only a PA is supported. 1001 */ 1002 if (sp_dt_get_u64(fdt, subnode, "base-address", &base_addr)) 1003 alloc_needed = true; 1004 1005 /* Size of memory region as count of 4K pages */ 1006 if (sp_dt_get_u32(fdt, subnode, "pages-count", &pages_cnt)) { 1007 EMSG("Mandatory field is missing: pages-count"); 1008 return TEE_ERROR_BAD_FORMAT; 1009 } 1010 1011 if (MUL_OVERFLOW(pages_cnt, SMALL_PAGE_SIZE, &size)) 1012 return TEE_ERROR_OVERFLOW; 1013 1014 /* 1015 * Memory region attributes: 1016 * - Instruction/data access permissions 1017 * - Cacheability/shareability attributes 1018 * - Security attributes 1019 * 1020 * Cacheability/shareability attributes can be ignored for now. 1021 * OP-TEE only supports a single type for normal cached memory 1022 * and currently there is no use case that would require to 1023 * change this. 1024 */ 1025 if (sp_dt_get_u32(fdt, subnode, "attributes", &attributes)) { 1026 EMSG("Mandatory field is missing: attributes"); 1027 return TEE_ERROR_BAD_FORMAT; 1028 } 1029 1030 /* Check instruction and data access permissions */ 1031 switch (attributes & SP_MANIFEST_ATTR_RWX) { 1032 case SP_MANIFEST_ATTR_RO: 1033 perm = TEE_MATTR_UR; 1034 break; 1035 case SP_MANIFEST_ATTR_RW: 1036 perm = TEE_MATTR_URW; 1037 break; 1038 case SP_MANIFEST_ATTR_RX: 1039 perm = TEE_MATTR_URX; 1040 break; 1041 default: 1042 EMSG("Invalid memory access permissions"); 1043 return TEE_ERROR_BAD_FORMAT; 1044 } 1045 1046 /* 1047 * The SP is a secure endpoint, security attribute can be 1048 * secure or non-secure. 1049 * The SPMC cannot allocate non-secure memory, i.e. if the base 1050 * address is missing this attribute must be secure. 1051 */ 1052 if (attributes & SP_MANIFEST_ATTR_NSEC) { 1053 if (alloc_needed) { 1054 EMSG("Invalid memory security attribute"); 1055 return TEE_ERROR_BAD_FORMAT; 1056 } 1057 is_secure = false; 1058 } 1059 1060 if (alloc_needed) { 1061 /* Base address is missing, we have to allocate */ 1062 mm = tee_mm_alloc(&tee_mm_sec_ddr, size); 1063 if (!mm) 1064 return TEE_ERROR_OUT_OF_MEMORY; 1065 1066 base_addr = tee_mm_get_smem(mm); 1067 } 1068 1069 m = sp_mem_new_mobj(pages_cnt, TEE_MATTR_MEM_TYPE_CACHED, 1070 is_secure); 1071 if (!m) { 1072 res = TEE_ERROR_OUT_OF_MEMORY; 1073 goto err_mm_free; 1074 } 1075 1076 res = sp_mem_add_pages(m, &idx, base_addr, pages_cnt); 1077 if (res) { 1078 mobj_put(m); 1079 goto err_mm_free; 1080 } 1081 1082 res = vm_map(&ctx->uctx, &va, size, perm, 0, m, 0); 1083 mobj_put(m); 1084 if (res) 1085 goto err_mm_free; 1086 1087 /* 1088 * Overwrite the memory region's base address in the fdt with 1089 * the VA. This fdt will be passed to the SP. 1090 * If the base-address field was not present in the original 1091 * fdt, this function will create it. This doesn't cause issues 1092 * since the necessary extra space has been allocated when 1093 * opening the fdt. 1094 */ 1095 res = fdt_setprop_u64(fdt, subnode, "base-address", va); 1096 1097 /* 1098 * Unmap the region if the overwrite failed since the SP won't 1099 * be able to access it without knowing the VA. 1100 */ 1101 if (res) { 1102 vm_unmap(&ctx->uctx, va, size); 1103 goto err_mm_free; 1104 } 1105 } 1106 1107 return TEE_SUCCESS; 1108 1109 err_mm_free: 1110 tee_mm_free(mm); 1111 return res; 1112 } 1113 1114 static TEE_Result handle_tpm_event_log(struct sp_ctx *ctx, void *fdt) 1115 { 1116 uint32_t perm = TEE_MATTR_URW | TEE_MATTR_PRW; 1117 uint32_t dummy_size __maybe_unused = 0; 1118 TEE_Result res = TEE_SUCCESS; 1119 size_t page_count = 0; 1120 struct fobj *f = NULL; 1121 struct mobj *m = NULL; 1122 vaddr_t log_addr = 0; 1123 size_t log_size = 0; 1124 int node = 0; 1125 1126 node = fdt_node_offset_by_compatible(fdt, 0, "arm,tpm_event_log"); 1127 if (node < 0) 1128 return TEE_SUCCESS; 1129 1130 /* Checking the existence and size of the event log properties */ 1131 if (sp_dt_get_u64(fdt, node, "tpm_event_log_addr", &log_addr)) { 1132 EMSG("tpm_event_log_addr not found or has invalid size"); 1133 return TEE_ERROR_BAD_FORMAT; 1134 } 1135 1136 if (sp_dt_get_u32(fdt, node, "tpm_event_log_size", &dummy_size)) { 1137 EMSG("tpm_event_log_size not found or has invalid size"); 1138 return TEE_ERROR_BAD_FORMAT; 1139 } 1140 1141 /* Validating event log */ 1142 res = tpm_get_event_log_size(&log_size); 1143 if (res) 1144 return res; 1145 1146 if (!log_size) { 1147 EMSG("Empty TPM event log was provided"); 1148 return TEE_ERROR_ITEM_NOT_FOUND; 1149 } 1150 1151 /* Allocating memory area for the event log to share with the SP */ 1152 page_count = ROUNDUP_DIV(log_size, SMALL_PAGE_SIZE); 1153 1154 f = fobj_sec_mem_alloc(page_count); 1155 m = mobj_with_fobj_alloc(f, NULL, TEE_MATTR_MEM_TYPE_TAGGED); 1156 fobj_put(f); 1157 if (!m) 1158 return TEE_ERROR_OUT_OF_MEMORY; 1159 1160 res = vm_map(&ctx->uctx, &log_addr, log_size, perm, 0, m, 0); 1161 mobj_put(m); 1162 if (res) 1163 return res; 1164 1165 /* Copy event log */ 1166 res = tpm_get_event_log((void *)log_addr, &log_size); 1167 if (res) 1168 goto err_unmap; 1169 1170 /* Setting event log details in the manifest */ 1171 res = fdt_setprop_u64(fdt, node, "tpm_event_log_addr", log_addr); 1172 if (res) 1173 goto err_unmap; 1174 1175 res = fdt_setprop_u32(fdt, node, "tpm_event_log_size", log_size); 1176 if (res) 1177 goto err_unmap; 1178 1179 return TEE_SUCCESS; 1180 1181 err_unmap: 1182 vm_unmap(&ctx->uctx, log_addr, log_size); 1183 1184 return res; 1185 } 1186 1187 /* 1188 * Note: this function is called only on the primary CPU. It assumes that the 1189 * features present on the primary CPU are available on all of the secondary 1190 * CPUs as well. 1191 */ 1192 static TEE_Result handle_hw_features(void *fdt) 1193 { 1194 uint32_t val __maybe_unused = 0; 1195 TEE_Result res = TEE_SUCCESS; 1196 int node = 0; 1197 1198 /* 1199 * HW feature descriptions are optional in the SP manifest, it's not an 1200 * error if we don't find any. 1201 */ 1202 node = fdt_node_offset_by_compatible(fdt, 0, "arm,hw-features"); 1203 if (node < 0) 1204 return TEE_SUCCESS; 1205 1206 /* Modify the crc32 property only if it's already present */ 1207 if (!sp_dt_get_u32(fdt, node, "crc32", &val)) { 1208 res = fdt_setprop_u32(fdt, node, "crc32", 1209 feat_crc32_implemented()); 1210 if (res) 1211 return res; 1212 } 1213 1214 return TEE_SUCCESS; 1215 } 1216 1217 static TEE_Result sp_init_uuid(const TEE_UUID *bin_uuid, const void * const fdt) 1218 { 1219 TEE_Result res = TEE_SUCCESS; 1220 struct sp_session *sess = NULL; 1221 TEE_UUID ffa_uuid = {}; 1222 1223 res = fdt_get_uuid(fdt, &ffa_uuid); 1224 if (res) 1225 return res; 1226 1227 res = sp_open_session(&sess, 1228 &open_sp_sessions, 1229 &ffa_uuid, bin_uuid, fdt); 1230 if (res) 1231 return res; 1232 1233 sess->fdt = fdt; 1234 res = read_manifest_endpoint_id(sess); 1235 if (res) 1236 return res; 1237 DMSG("endpoint is 0x%"PRIx16, sess->endpoint_id); 1238 1239 return TEE_SUCCESS; 1240 } 1241 1242 static TEE_Result sp_first_run(struct sp_session *sess) 1243 { 1244 TEE_Result res = TEE_SUCCESS; 1245 struct thread_smc_args args = { }; 1246 vaddr_t va = 0; 1247 size_t num_pgs = 0; 1248 struct sp_ctx *ctx = NULL; 1249 void *fdt_copy = NULL; 1250 1251 ctx = to_sp_ctx(sess->ts_sess.ctx); 1252 ts_push_current_session(&sess->ts_sess); 1253 1254 /* 1255 * Load relative memory regions must be handled before doing any other 1256 * mapping to prevent conflicts in the VA space. 1257 */ 1258 res = handle_fdt_load_relative_mem_regions(ctx, sess->fdt); 1259 if (res) { 1260 ts_pop_current_session(); 1261 return res; 1262 } 1263 1264 res = sp_init_info(ctx, &args, sess->fdt, &va, &num_pgs, &fdt_copy); 1265 if (res) 1266 goto out; 1267 1268 res = handle_fdt_dev_regions(ctx, fdt_copy); 1269 if (res) 1270 goto out; 1271 1272 res = handle_fdt_mem_regions(ctx, fdt_copy); 1273 if (res) 1274 goto out; 1275 1276 if (IS_ENABLED(CFG_CORE_TPM_EVENT_LOG)) { 1277 res = handle_tpm_event_log(ctx, fdt_copy); 1278 if (res) 1279 goto out; 1280 } 1281 1282 res = handle_hw_features(fdt_copy); 1283 if (res) 1284 goto out; 1285 1286 ts_pop_current_session(); 1287 1288 sess->is_initialized = false; 1289 if (sp_enter(&args, sess)) { 1290 vm_unmap(&ctx->uctx, va, num_pgs); 1291 return FFA_ABORTED; 1292 } 1293 1294 spmc_sp_msg_handler(&args, sess); 1295 1296 sess->is_initialized = true; 1297 1298 ts_push_current_session(&sess->ts_sess); 1299 out: 1300 /* Free the boot info page from the SP memory */ 1301 vm_unmap(&ctx->uctx, va, num_pgs); 1302 ts_pop_current_session(); 1303 1304 return res; 1305 } 1306 1307 TEE_Result sp_enter(struct thread_smc_args *args, struct sp_session *sp) 1308 { 1309 TEE_Result res = FFA_OK; 1310 struct sp_ctx *ctx = to_sp_ctx(sp->ts_sess.ctx); 1311 1312 ctx->sp_regs.x[0] = args->a0; 1313 ctx->sp_regs.x[1] = args->a1; 1314 ctx->sp_regs.x[2] = args->a2; 1315 ctx->sp_regs.x[3] = args->a3; 1316 ctx->sp_regs.x[4] = args->a4; 1317 ctx->sp_regs.x[5] = args->a5; 1318 ctx->sp_regs.x[6] = args->a6; 1319 ctx->sp_regs.x[7] = args->a7; 1320 1321 res = sp->ts_sess.ctx->ops->enter_invoke_cmd(&sp->ts_sess, 0); 1322 1323 args->a0 = ctx->sp_regs.x[0]; 1324 args->a1 = ctx->sp_regs.x[1]; 1325 args->a2 = ctx->sp_regs.x[2]; 1326 args->a3 = ctx->sp_regs.x[3]; 1327 args->a4 = ctx->sp_regs.x[4]; 1328 args->a5 = ctx->sp_regs.x[5]; 1329 args->a6 = ctx->sp_regs.x[6]; 1330 args->a7 = ctx->sp_regs.x[7]; 1331 1332 return res; 1333 } 1334 1335 static TEE_Result sp_enter_invoke_cmd(struct ts_session *s, 1336 uint32_t cmd __unused) 1337 { 1338 struct sp_ctx *ctx = to_sp_ctx(s->ctx); 1339 TEE_Result res = TEE_SUCCESS; 1340 uint32_t exceptions = 0; 1341 uint64_t cpsr = 0; 1342 struct sp_session *sp_s = to_sp_session(s); 1343 struct ts_session *sess = NULL; 1344 struct thread_ctx_regs *sp_regs = NULL; 1345 uint32_t thread_id = THREAD_ID_INVALID; 1346 uint32_t rpc_target_info = 0; 1347 uint32_t panicked = false; 1348 uint32_t panic_code = 0; 1349 1350 bm_timestamp(); 1351 1352 sp_regs = &ctx->sp_regs; 1353 ts_push_current_session(s); 1354 1355 cpsr = sp_regs->cpsr; 1356 sp_regs->cpsr = read_daif() & (SPSR_64_DAIF_MASK << SPSR_64_DAIF_SHIFT); 1357 1358 exceptions = thread_mask_exceptions(THREAD_EXCP_ALL); 1359 1360 /* 1361 * Store endpoint ID and thread ID in rpc_target_info. This will be used 1362 * as w1 in FFA_INTERRUPT in case of a foreign interrupt. 1363 */ 1364 rpc_target_info = thread_get_tsd()->rpc_target_info; 1365 thread_id = thread_get_id(); 1366 assert(thread_id <= UINT16_MAX); 1367 thread_get_tsd()->rpc_target_info = 1368 FFA_TARGET_INFO_SET(sp_s->endpoint_id, thread_id); 1369 1370 __thread_enter_user_mode(sp_regs, &panicked, &panic_code); 1371 1372 sp_regs->cpsr = cpsr; 1373 1374 /* Restore rpc_target_info */ 1375 thread_get_tsd()->rpc_target_info = rpc_target_info; 1376 1377 thread_unmask_exceptions(exceptions); 1378 1379 thread_user_clear_vfp(&ctx->uctx); 1380 1381 if (panicked) { 1382 DMSG("SP panicked with code %#"PRIx32, panic_code); 1383 abort_print_current_ts(); 1384 1385 sess = ts_pop_current_session(); 1386 cpu_spin_lock(&sp_s->spinlock); 1387 sp_s->state = sp_dead; 1388 cpu_spin_unlock(&sp_s->spinlock); 1389 1390 return TEE_ERROR_TARGET_DEAD; 1391 } 1392 1393 sess = ts_pop_current_session(); 1394 assert(sess == s); 1395 1396 bm_timestamp(); 1397 1398 return res; 1399 } 1400 1401 /* We currently don't support 32 bits */ 1402 #ifdef ARM64 1403 static void sp_svc_store_registers(struct thread_scall_regs *regs, 1404 struct thread_ctx_regs *sp_regs) 1405 { 1406 COMPILE_TIME_ASSERT(sizeof(sp_regs->x[0]) == sizeof(regs->x0)); 1407 memcpy(sp_regs->x, ®s->x0, 31 * sizeof(regs->x0)); 1408 sp_regs->pc = regs->elr; 1409 sp_regs->sp = regs->sp_el0; 1410 } 1411 #endif 1412 1413 static bool sp_handle_scall(struct thread_scall_regs *regs) 1414 { 1415 struct ts_session *ts = ts_get_current_session(); 1416 struct sp_ctx *uctx = to_sp_ctx(ts->ctx); 1417 struct sp_session *s = uctx->open_session; 1418 1419 assert(s); 1420 1421 sp_svc_store_registers(regs, &uctx->sp_regs); 1422 1423 regs->x0 = 0; 1424 regs->x1 = 0; /* panic */ 1425 regs->x2 = 0; /* panic code */ 1426 1427 /* 1428 * All the registers of the SP are saved in the SP session by the SVC 1429 * handler. 1430 * We always return to S-El1 after handling the SVC. We will continue 1431 * in sp_enter_invoke_cmd() (return from __thread_enter_user_mode). 1432 * The sp_enter() function copies the FF-A parameters (a0-a7) from the 1433 * saved registers to the thread_smc_args. The thread_smc_args object is 1434 * afterward used by the spmc_sp_msg_handler() to handle the 1435 * FF-A message send by the SP. 1436 */ 1437 return false; 1438 } 1439 1440 static void sp_dump_state(struct ts_ctx *ctx) 1441 { 1442 struct sp_ctx *utc = to_sp_ctx(ctx); 1443 1444 if (utc->uctx.dump_entry_func) { 1445 TEE_Result res = ldelf_dump_state(&utc->uctx); 1446 1447 if (!res || res == TEE_ERROR_TARGET_DEAD) 1448 return; 1449 } 1450 1451 user_mode_ctx_print_mappings(&utc->uctx); 1452 } 1453 1454 static const struct ts_ops sp_ops = { 1455 .enter_invoke_cmd = sp_enter_invoke_cmd, 1456 .handle_scall = sp_handle_scall, 1457 .dump_state = sp_dump_state, 1458 }; 1459 1460 static TEE_Result process_sp_pkg(uint64_t sp_pkg_pa, TEE_UUID *sp_uuid) 1461 { 1462 enum teecore_memtypes mtype = MEM_AREA_TA_RAM; 1463 struct sp_pkg_header *sp_pkg_hdr = NULL; 1464 struct fip_sp *sp = NULL; 1465 uint64_t sp_fdt_end = 0; 1466 size_t sp_pkg_size = 0; 1467 vaddr_t sp_pkg_va = 0; 1468 1469 /* Process the first page which contains the SP package header */ 1470 sp_pkg_va = (vaddr_t)phys_to_virt(sp_pkg_pa, mtype, SMALL_PAGE_SIZE); 1471 if (!sp_pkg_va) { 1472 EMSG("Cannot find mapping for PA %#" PRIxPA, sp_pkg_pa); 1473 return TEE_ERROR_GENERIC; 1474 } 1475 1476 sp_pkg_hdr = (struct sp_pkg_header *)sp_pkg_va; 1477 1478 if (sp_pkg_hdr->magic != SP_PKG_HEADER_MAGIC) { 1479 EMSG("Invalid SP package magic"); 1480 return TEE_ERROR_BAD_FORMAT; 1481 } 1482 1483 if (sp_pkg_hdr->version != SP_PKG_HEADER_VERSION_V1 && 1484 sp_pkg_hdr->version != SP_PKG_HEADER_VERSION_V2) { 1485 EMSG("Invalid SP header version"); 1486 return TEE_ERROR_BAD_FORMAT; 1487 } 1488 1489 if (ADD_OVERFLOW(sp_pkg_hdr->img_offset, sp_pkg_hdr->img_size, 1490 &sp_pkg_size)) { 1491 EMSG("Invalid SP package size"); 1492 return TEE_ERROR_BAD_FORMAT; 1493 } 1494 1495 if (ADD_OVERFLOW(sp_pkg_hdr->pm_offset, sp_pkg_hdr->pm_size, 1496 &sp_fdt_end) || sp_fdt_end > sp_pkg_hdr->img_offset) { 1497 EMSG("Invalid SP manifest size"); 1498 return TEE_ERROR_BAD_FORMAT; 1499 } 1500 1501 /* Process the whole SP package now that the size is known */ 1502 sp_pkg_va = (vaddr_t)phys_to_virt(sp_pkg_pa, mtype, sp_pkg_size); 1503 if (!sp_pkg_va) { 1504 EMSG("Cannot find mapping for PA %#" PRIxPA, sp_pkg_pa); 1505 return TEE_ERROR_GENERIC; 1506 } 1507 1508 sp_pkg_hdr = (struct sp_pkg_header *)sp_pkg_va; 1509 1510 sp = calloc(1, sizeof(struct fip_sp)); 1511 if (!sp) 1512 return TEE_ERROR_OUT_OF_MEMORY; 1513 1514 memcpy(&sp->sp_img.image.uuid, sp_uuid, sizeof(*sp_uuid)); 1515 sp->sp_img.image.ts = (uint8_t *)(sp_pkg_va + sp_pkg_hdr->img_offset); 1516 sp->sp_img.image.size = sp_pkg_hdr->img_size; 1517 sp->sp_img.image.flags = 0; 1518 sp->sp_img.fdt = (uint8_t *)(sp_pkg_va + sp_pkg_hdr->pm_offset); 1519 1520 STAILQ_INSERT_TAIL(&fip_sp_list, sp, link); 1521 1522 return TEE_SUCCESS; 1523 } 1524 1525 static TEE_Result fip_sp_init_all(void) 1526 { 1527 TEE_Result res = TEE_SUCCESS; 1528 uint64_t sp_pkg_addr = 0; 1529 const void *fdt = NULL; 1530 TEE_UUID sp_uuid = { }; 1531 int sp_pkgs_node = 0; 1532 int subnode = 0; 1533 int root = 0; 1534 1535 fdt = get_tos_fw_config_dt(); 1536 if (!fdt) { 1537 EMSG("No SPMC manifest found"); 1538 return TEE_ERROR_GENERIC; 1539 } 1540 1541 root = fdt_path_offset(fdt, "/"); 1542 if (root < 0) 1543 return TEE_ERROR_BAD_FORMAT; 1544 1545 if (fdt_node_check_compatible(fdt, root, "arm,ffa-core-manifest-1.0")) 1546 return TEE_ERROR_BAD_FORMAT; 1547 1548 /* SP packages are optional, it's not an error if we don't find any */ 1549 sp_pkgs_node = fdt_node_offset_by_compatible(fdt, root, "arm,sp_pkg"); 1550 if (sp_pkgs_node < 0) 1551 return TEE_SUCCESS; 1552 1553 fdt_for_each_subnode(subnode, fdt, sp_pkgs_node) { 1554 res = sp_dt_get_u64(fdt, subnode, "load-address", &sp_pkg_addr); 1555 if (res) { 1556 EMSG("Invalid FIP SP load address"); 1557 return res; 1558 } 1559 1560 res = sp_dt_get_uuid(fdt, subnode, "uuid", &sp_uuid); 1561 if (res) { 1562 EMSG("Invalid FIP SP uuid"); 1563 return res; 1564 } 1565 1566 res = process_sp_pkg(sp_pkg_addr, &sp_uuid); 1567 if (res) { 1568 EMSG("Invalid FIP SP package"); 1569 return res; 1570 } 1571 } 1572 1573 return TEE_SUCCESS; 1574 } 1575 1576 static void fip_sp_deinit_all(void) 1577 { 1578 while (!STAILQ_EMPTY(&fip_sp_list)) { 1579 struct fip_sp *sp = STAILQ_FIRST(&fip_sp_list); 1580 1581 STAILQ_REMOVE_HEAD(&fip_sp_list, link); 1582 free(sp); 1583 } 1584 } 1585 1586 static TEE_Result sp_init_all(void) 1587 { 1588 TEE_Result res = TEE_SUCCESS; 1589 const struct sp_image *sp = NULL; 1590 const struct fip_sp *fip_sp = NULL; 1591 char __maybe_unused msg[60] = { '\0', }; 1592 struct sp_session *s = NULL; 1593 1594 for_each_secure_partition(sp) { 1595 if (sp->image.uncompressed_size) 1596 snprintf(msg, sizeof(msg), 1597 " (compressed, uncompressed %u)", 1598 sp->image.uncompressed_size); 1599 else 1600 msg[0] = '\0'; 1601 DMSG("SP %pUl size %u%s", (void *)&sp->image.uuid, 1602 sp->image.size, msg); 1603 1604 res = sp_init_uuid(&sp->image.uuid, sp->fdt); 1605 1606 if (res != TEE_SUCCESS) { 1607 EMSG("Failed initializing SP(%pUl) err:%#"PRIx32, 1608 &sp->image.uuid, res); 1609 if (!IS_ENABLED(CFG_SP_SKIP_FAILED)) 1610 panic(); 1611 } 1612 } 1613 1614 res = fip_sp_init_all(); 1615 if (res) 1616 panic("Failed initializing FIP SPs"); 1617 1618 for_each_fip_sp(fip_sp) { 1619 sp = &fip_sp->sp_img; 1620 1621 DMSG("SP %pUl size %u", (void *)&sp->image.uuid, 1622 sp->image.size); 1623 1624 res = sp_init_uuid(&sp->image.uuid, sp->fdt); 1625 1626 if (res != TEE_SUCCESS) { 1627 EMSG("Failed initializing SP(%pUl) err:%#"PRIx32, 1628 &sp->image.uuid, res); 1629 if (!IS_ENABLED(CFG_SP_SKIP_FAILED)) 1630 panic(); 1631 } 1632 } 1633 1634 /* 1635 * At this point all FIP SPs are loaded by ldelf or by the raw binary SP 1636 * loader, so the original images (loaded by BL2) are not needed anymore 1637 */ 1638 fip_sp_deinit_all(); 1639 1640 /* Continue the initialization and run the SP */ 1641 TAILQ_FOREACH(s, &open_sp_sessions, link) { 1642 res = sp_first_run(s); 1643 if (res != TEE_SUCCESS) { 1644 EMSG("Failed starting SP(0x%"PRIx16") err:%#"PRIx32, 1645 s->endpoint_id, res); 1646 if (!IS_ENABLED(CFG_SP_SKIP_FAILED)) 1647 panic(); 1648 } 1649 } 1650 1651 return TEE_SUCCESS; 1652 } 1653 1654 boot_final(sp_init_all); 1655 1656 static TEE_Result secure_partition_open(const TEE_UUID *uuid, 1657 struct ts_store_handle **h) 1658 { 1659 return emb_ts_open(uuid, h, find_secure_partition); 1660 } 1661 1662 REGISTER_SP_STORE(2) = { 1663 .description = "SP store", 1664 .open = secure_partition_open, 1665 .get_size = emb_ts_get_size, 1666 .get_tag = emb_ts_get_tag, 1667 .read = emb_ts_read, 1668 .close = emb_ts_close, 1669 }; 1670