1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2020-2023, Arm Limited. 4 */ 5 #include <bench.h> 6 #include <crypto/crypto.h> 7 #include <initcall.h> 8 #include <kernel/boot.h> 9 #include <kernel/embedded_ts.h> 10 #include <kernel/ldelf_loader.h> 11 #include <kernel/secure_partition.h> 12 #include <kernel/spinlock.h> 13 #include <kernel/spmc_sp_handler.h> 14 #include <kernel/thread_private.h> 15 #include <kernel/thread_spmc.h> 16 #include <kernel/tpm.h> 17 #include <kernel/ts_store.h> 18 #include <ldelf.h> 19 #include <libfdt.h> 20 #include <mm/core_mmu.h> 21 #include <mm/fobj.h> 22 #include <mm/mobj.h> 23 #include <mm/vm.h> 24 #include <optee_ffa.h> 25 #include <stdio.h> 26 #include <string.h> 27 #include <tee_api_types.h> 28 #include <tee/uuid.h> 29 #include <trace.h> 30 #include <types_ext.h> 31 #include <utee_defines.h> 32 #include <util.h> 33 #include <zlib.h> 34 35 #define BOUNCE_BUFFER_SIZE 4096 36 37 #define SP_MANIFEST_ATTR_READ BIT(0) 38 #define SP_MANIFEST_ATTR_WRITE BIT(1) 39 #define SP_MANIFEST_ATTR_EXEC BIT(2) 40 #define SP_MANIFEST_ATTR_NSEC BIT(3) 41 42 #define SP_MANIFEST_ATTR_RO (SP_MANIFEST_ATTR_READ) 43 #define SP_MANIFEST_ATTR_RW (SP_MANIFEST_ATTR_READ | \ 44 SP_MANIFEST_ATTR_WRITE) 45 #define SP_MANIFEST_ATTR_RX (SP_MANIFEST_ATTR_READ | \ 46 SP_MANIFEST_ATTR_EXEC) 47 #define SP_MANIFEST_ATTR_RWX (SP_MANIFEST_ATTR_READ | \ 48 SP_MANIFEST_ATTR_WRITE | \ 49 SP_MANIFEST_ATTR_EXEC) 50 51 #define SP_MANIFEST_FLAG_NOBITS BIT(0) 52 53 #define SP_MANIFEST_NS_INT_QUEUED (0x0) 54 #define SP_MANIFEST_NS_INT_MANAGED_EXIT (0x1) 55 #define SP_MANIFEST_NS_INT_SIGNALED (0x2) 56 57 #define SP_PKG_HEADER_MAGIC (0x474b5053) 58 #define SP_PKG_HEADER_VERSION_V1 (0x1) 59 #define SP_PKG_HEADER_VERSION_V2 (0x2) 60 61 struct sp_pkg_header { 62 uint32_t magic; 63 uint32_t version; 64 uint32_t pm_offset; 65 uint32_t pm_size; 66 uint32_t img_offset; 67 uint32_t img_size; 68 }; 69 70 struct fip_sp_head fip_sp_list = STAILQ_HEAD_INITIALIZER(fip_sp_list); 71 72 static const struct ts_ops sp_ops; 73 74 /* List that holds all of the loaded SP's */ 75 static struct sp_sessions_head open_sp_sessions = 76 TAILQ_HEAD_INITIALIZER(open_sp_sessions); 77 78 static const struct embedded_ts *find_secure_partition(const TEE_UUID *uuid) 79 { 80 const struct sp_image *sp = NULL; 81 const struct fip_sp *fip_sp = NULL; 82 83 for_each_secure_partition(sp) { 84 if (!memcmp(&sp->image.uuid, uuid, sizeof(*uuid))) 85 return &sp->image; 86 } 87 88 for_each_fip_sp(fip_sp) { 89 if (!memcmp(&fip_sp->sp_img.image.uuid, uuid, sizeof(*uuid))) 90 return &fip_sp->sp_img.image; 91 } 92 93 return NULL; 94 } 95 96 bool is_sp_ctx(struct ts_ctx *ctx) 97 { 98 return ctx && (ctx->ops == &sp_ops); 99 } 100 101 static void set_sp_ctx_ops(struct ts_ctx *ctx) 102 { 103 ctx->ops = &sp_ops; 104 } 105 106 struct sp_session *sp_get_session(uint32_t session_id) 107 { 108 struct sp_session *s = NULL; 109 110 TAILQ_FOREACH(s, &open_sp_sessions, link) { 111 if (s->endpoint_id == session_id) 112 return s; 113 } 114 115 return NULL; 116 } 117 118 TEE_Result sp_partition_info_get(uint32_t ffa_vers, void *buf, size_t buf_size, 119 const TEE_UUID *ffa_uuid, size_t *elem_count, 120 bool count_only) 121 { 122 TEE_Result res = TEE_SUCCESS; 123 uint32_t part_props = FFA_PART_PROP_DIRECT_REQ_RECV | 124 FFA_PART_PROP_DIRECT_REQ_SEND; 125 struct sp_session *s = NULL; 126 127 TAILQ_FOREACH(s, &open_sp_sessions, link) { 128 if (ffa_uuid && 129 memcmp(&s->ffa_uuid, ffa_uuid, sizeof(*ffa_uuid))) 130 continue; 131 132 if (s->state == sp_dead) 133 continue; 134 if (!count_only && !res) { 135 uint32_t uuid_words[4] = { 0 }; 136 137 tee_uuid_to_octets((uint8_t *)uuid_words, &s->ffa_uuid); 138 res = spmc_fill_partition_entry(ffa_vers, buf, buf_size, 139 *elem_count, 140 s->endpoint_id, 1, 141 part_props, uuid_words); 142 } 143 *elem_count += 1; 144 } 145 146 return res; 147 } 148 149 bool sp_has_exclusive_access(struct sp_mem_map_region *mem, 150 struct user_mode_ctx *uctx) 151 { 152 /* 153 * Check that we have access to the region if it is supposed to be 154 * mapped to the current context. 155 */ 156 if (uctx) { 157 struct vm_region *region = NULL; 158 159 /* Make sure that each mobj belongs to the SP */ 160 TAILQ_FOREACH(region, &uctx->vm_info.regions, link) { 161 if (region->mobj == mem->mobj) 162 break; 163 } 164 165 if (!region) 166 return false; 167 } 168 169 /* Check that it is not shared with another SP */ 170 return !sp_mem_is_shared(mem); 171 } 172 173 static uint16_t new_session_id(struct sp_sessions_head *open_sessions) 174 { 175 struct sp_session *last = NULL; 176 uint16_t id = SPMC_ENDPOINT_ID + 1; 177 178 last = TAILQ_LAST(open_sessions, sp_sessions_head); 179 if (last) 180 id = last->endpoint_id + 1; 181 182 assert(id > SPMC_ENDPOINT_ID); 183 return id; 184 } 185 186 static TEE_Result sp_create_ctx(const TEE_UUID *bin_uuid, struct sp_session *s) 187 { 188 TEE_Result res = TEE_SUCCESS; 189 struct sp_ctx *spc = NULL; 190 191 /* Register context */ 192 spc = calloc(1, sizeof(struct sp_ctx)); 193 if (!spc) 194 return TEE_ERROR_OUT_OF_MEMORY; 195 196 spc->open_session = s; 197 s->ts_sess.ctx = &spc->ts_ctx; 198 spc->ts_ctx.uuid = *bin_uuid; 199 200 res = vm_info_init(&spc->uctx, &spc->ts_ctx); 201 if (res) 202 goto err; 203 204 set_sp_ctx_ops(&spc->ts_ctx); 205 206 return TEE_SUCCESS; 207 208 err: 209 free(spc); 210 return res; 211 } 212 213 static TEE_Result sp_create_session(struct sp_sessions_head *open_sessions, 214 const TEE_UUID *bin_uuid, 215 struct sp_session **sess) 216 { 217 TEE_Result res = TEE_SUCCESS; 218 struct sp_session *s = calloc(1, sizeof(struct sp_session)); 219 220 if (!s) 221 return TEE_ERROR_OUT_OF_MEMORY; 222 223 s->endpoint_id = new_session_id(open_sessions); 224 if (!s->endpoint_id) { 225 res = TEE_ERROR_OVERFLOW; 226 goto err; 227 } 228 229 DMSG("Loading Secure Partition %pUl", (void *)bin_uuid); 230 res = sp_create_ctx(bin_uuid, s); 231 if (res) 232 goto err; 233 234 TAILQ_INSERT_TAIL(open_sessions, s, link); 235 *sess = s; 236 return TEE_SUCCESS; 237 238 err: 239 free(s); 240 return res; 241 } 242 243 static TEE_Result sp_init_set_registers(struct sp_ctx *ctx) 244 { 245 struct thread_ctx_regs *sp_regs = &ctx->sp_regs; 246 247 memset(sp_regs, 0, sizeof(*sp_regs)); 248 sp_regs->sp = ctx->uctx.stack_ptr; 249 sp_regs->pc = ctx->uctx.entry_func; 250 251 return TEE_SUCCESS; 252 } 253 254 TEE_Result sp_map_shared(struct sp_session *s, 255 struct sp_mem_receiver *receiver, 256 struct sp_mem *smem, 257 uint64_t *va) 258 { 259 TEE_Result res = TEE_SUCCESS; 260 struct sp_ctx *ctx = NULL; 261 uint32_t perm = TEE_MATTR_UR; 262 struct sp_mem_map_region *reg = NULL; 263 264 ctx = to_sp_ctx(s->ts_sess.ctx); 265 266 /* Get the permission */ 267 if (receiver->perm.perm & FFA_MEM_ACC_EXE) 268 perm |= TEE_MATTR_UX; 269 270 if (receiver->perm.perm & FFA_MEM_ACC_RW) { 271 if (receiver->perm.perm & FFA_MEM_ACC_EXE) 272 return TEE_ERROR_ACCESS_CONFLICT; 273 274 perm |= TEE_MATTR_UW; 275 } 276 /* 277 * Currently we don't support passing a va. We can't guarantee that the 278 * full region will be mapped in a contiguous region. A smem->region can 279 * have multiple mobj for one share. Currently there doesn't seem to be 280 * an option to guarantee that these will be mapped in a contiguous va 281 * space. 282 */ 283 if (*va) 284 return TEE_ERROR_NOT_SUPPORTED; 285 286 SLIST_FOREACH(reg, &smem->regions, link) { 287 res = vm_map(&ctx->uctx, va, reg->page_count * SMALL_PAGE_SIZE, 288 perm, 0, reg->mobj, reg->page_offset); 289 290 if (res != TEE_SUCCESS) { 291 EMSG("Failed to map memory region %#"PRIx32, res); 292 return res; 293 } 294 } 295 return TEE_SUCCESS; 296 } 297 298 TEE_Result sp_unmap_ffa_regions(struct sp_session *s, struct sp_mem *smem) 299 { 300 TEE_Result res = TEE_SUCCESS; 301 vaddr_t vaddr = 0; 302 size_t len = 0; 303 struct sp_ctx *ctx = to_sp_ctx(s->ts_sess.ctx); 304 struct sp_mem_map_region *reg = NULL; 305 306 SLIST_FOREACH(reg, &smem->regions, link) { 307 vaddr = (vaddr_t)sp_mem_get_va(&ctx->uctx, reg->page_offset, 308 reg->mobj); 309 len = reg->page_count * SMALL_PAGE_SIZE; 310 311 res = vm_unmap(&ctx->uctx, vaddr, len); 312 if (res != TEE_SUCCESS) 313 return res; 314 } 315 316 return TEE_SUCCESS; 317 } 318 319 static TEE_Result sp_dt_get_u64(const void *fdt, int node, const char *property, 320 uint64_t *value) 321 { 322 const fdt64_t *p = NULL; 323 int len = 0; 324 325 p = fdt_getprop(fdt, node, property, &len); 326 if (!p) 327 return TEE_ERROR_ITEM_NOT_FOUND; 328 329 if (len != sizeof(*p)) 330 return TEE_ERROR_BAD_FORMAT; 331 332 *value = fdt64_ld(p); 333 334 return TEE_SUCCESS; 335 } 336 337 static TEE_Result sp_dt_get_u32(const void *fdt, int node, const char *property, 338 uint32_t *value) 339 { 340 const fdt32_t *p = NULL; 341 int len = 0; 342 343 p = fdt_getprop(fdt, node, property, &len); 344 if (!p) 345 return TEE_ERROR_ITEM_NOT_FOUND; 346 347 if (len != sizeof(*p)) 348 return TEE_ERROR_BAD_FORMAT; 349 350 *value = fdt32_to_cpu(*p); 351 352 return TEE_SUCCESS; 353 } 354 355 static TEE_Result sp_dt_get_uuid(const void *fdt, int node, 356 const char *property, TEE_UUID *uuid) 357 { 358 uint32_t uuid_array[4] = { 0 }; 359 const fdt32_t *p = NULL; 360 int len = 0; 361 int i = 0; 362 363 p = fdt_getprop(fdt, node, property, &len); 364 if (!p) 365 return TEE_ERROR_ITEM_NOT_FOUND; 366 367 if (len != sizeof(TEE_UUID)) 368 return TEE_ERROR_BAD_FORMAT; 369 370 for (i = 0; i < 4; i++) 371 uuid_array[i] = fdt32_to_cpu(p[i]); 372 373 tee_uuid_from_octets(uuid, (uint8_t *)uuid_array); 374 375 return TEE_SUCCESS; 376 } 377 378 static TEE_Result sp_is_elf_format(const void *fdt, int sp_node, 379 bool *is_elf_format) 380 { 381 TEE_Result res = TEE_SUCCESS; 382 uint32_t elf_format = 0; 383 384 res = sp_dt_get_u32(fdt, sp_node, "elf-format", &elf_format); 385 if (res != TEE_SUCCESS && res != TEE_ERROR_ITEM_NOT_FOUND) 386 return res; 387 388 *is_elf_format = (elf_format != 0); 389 390 return TEE_SUCCESS; 391 } 392 393 static TEE_Result sp_binary_open(const TEE_UUID *uuid, 394 const struct ts_store_ops **ops, 395 struct ts_store_handle **handle) 396 { 397 TEE_Result res = TEE_ERROR_ITEM_NOT_FOUND; 398 399 SCATTERED_ARRAY_FOREACH(*ops, sp_stores, struct ts_store_ops) { 400 res = (*ops)->open(uuid, handle); 401 if (res != TEE_ERROR_ITEM_NOT_FOUND && 402 res != TEE_ERROR_STORAGE_NOT_AVAILABLE) 403 break; 404 } 405 406 return res; 407 } 408 409 static TEE_Result load_binary_sp(struct ts_session *s, 410 struct user_mode_ctx *uctx) 411 { 412 size_t bin_size = 0, bin_size_rounded = 0, bin_page_count = 0; 413 size_t bb_size = ROUNDUP(BOUNCE_BUFFER_SIZE, SMALL_PAGE_SIZE); 414 size_t bb_num_pages = bb_size / SMALL_PAGE_SIZE; 415 const struct ts_store_ops *store_ops = NULL; 416 struct ts_store_handle *handle = NULL; 417 TEE_Result res = TEE_SUCCESS; 418 tee_mm_entry_t *mm = NULL; 419 struct fobj *fobj = NULL; 420 struct mobj *mobj = NULL; 421 uaddr_t base_addr = 0; 422 uint32_t vm_flags = 0; 423 unsigned int idx = 0; 424 vaddr_t va = 0; 425 426 if (!s || !uctx) 427 return TEE_ERROR_BAD_PARAMETERS; 428 429 DMSG("Loading raw binary format SP %pUl", &uctx->ts_ctx->uuid); 430 431 /* Initialize the bounce buffer */ 432 fobj = fobj_sec_mem_alloc(bb_num_pages); 433 mobj = mobj_with_fobj_alloc(fobj, NULL, TEE_MATTR_MEM_TYPE_TAGGED); 434 fobj_put(fobj); 435 if (!mobj) 436 return TEE_ERROR_OUT_OF_MEMORY; 437 438 res = vm_map(uctx, &va, bb_size, TEE_MATTR_PRW, 0, mobj, 0); 439 mobj_put(mobj); 440 if (res) 441 return res; 442 443 uctx->bbuf = (uint8_t *)va; 444 uctx->bbuf_size = BOUNCE_BUFFER_SIZE; 445 446 vm_set_ctx(uctx->ts_ctx); 447 448 /* Find TS store and open SP binary */ 449 res = sp_binary_open(&uctx->ts_ctx->uuid, &store_ops, &handle); 450 if (res != TEE_SUCCESS) { 451 EMSG("Failed to open SP binary"); 452 return res; 453 } 454 455 /* Query binary size and calculate page count */ 456 res = store_ops->get_size(handle, &bin_size); 457 if (res != TEE_SUCCESS) 458 goto err; 459 460 if (ROUNDUP_OVERFLOW(bin_size, SMALL_PAGE_SIZE, &bin_size_rounded)) { 461 res = TEE_ERROR_OVERFLOW; 462 goto err; 463 } 464 465 bin_page_count = bin_size_rounded / SMALL_PAGE_SIZE; 466 467 /* Allocate memory */ 468 mm = tee_mm_alloc(&tee_mm_sec_ddr, bin_size_rounded); 469 if (!mm) { 470 res = TEE_ERROR_OUT_OF_MEMORY; 471 goto err; 472 } 473 474 base_addr = tee_mm_get_smem(mm); 475 476 /* Create mobj */ 477 mobj = sp_mem_new_mobj(bin_page_count, TEE_MATTR_MEM_TYPE_CACHED, true); 478 if (!mobj) { 479 res = TEE_ERROR_OUT_OF_MEMORY; 480 goto err_free_tee_mm; 481 } 482 483 res = sp_mem_add_pages(mobj, &idx, base_addr, bin_page_count); 484 if (res) 485 goto err_free_mobj; 486 487 /* Map memory area for the SP binary */ 488 va = 0; 489 res = vm_map(uctx, &va, bin_size_rounded, TEE_MATTR_URWX, 490 vm_flags, mobj, 0); 491 if (res) 492 goto err_free_mobj; 493 494 /* Read SP binary into the previously mapped memory area */ 495 res = store_ops->read(handle, NULL, (void *)va, bin_size); 496 if (res) 497 goto err_unmap; 498 499 /* Set memory protection to allow execution */ 500 res = vm_set_prot(uctx, va, bin_size_rounded, TEE_MATTR_UX); 501 if (res) 502 goto err_unmap; 503 504 mobj_put(mobj); 505 store_ops->close(handle); 506 507 /* The entry point must be at the beginning of the SP binary. */ 508 uctx->entry_func = va; 509 uctx->load_addr = va; 510 uctx->is_32bit = false; 511 512 s->handle_scall = s->ctx->ops->handle_scall; 513 514 return TEE_SUCCESS; 515 516 err_unmap: 517 vm_unmap(uctx, va, bin_size_rounded); 518 519 err_free_mobj: 520 mobj_put(mobj); 521 522 err_free_tee_mm: 523 tee_mm_free(mm); 524 525 err: 526 store_ops->close(handle); 527 528 return res; 529 } 530 531 static TEE_Result sp_open_session(struct sp_session **sess, 532 struct sp_sessions_head *open_sessions, 533 const TEE_UUID *ffa_uuid, 534 const TEE_UUID *bin_uuid, 535 const void *fdt) 536 { 537 TEE_Result res = TEE_SUCCESS; 538 struct sp_session *s = NULL; 539 struct sp_ctx *ctx = NULL; 540 bool is_elf_format = false; 541 542 if (!find_secure_partition(bin_uuid)) 543 return TEE_ERROR_ITEM_NOT_FOUND; 544 545 res = sp_create_session(open_sessions, bin_uuid, &s); 546 if (res != TEE_SUCCESS) { 547 DMSG("sp_create_session failed %#"PRIx32, res); 548 return res; 549 } 550 551 ctx = to_sp_ctx(s->ts_sess.ctx); 552 assert(ctx); 553 if (!ctx) 554 return TEE_ERROR_TARGET_DEAD; 555 *sess = s; 556 557 ts_push_current_session(&s->ts_sess); 558 559 res = sp_is_elf_format(fdt, 0, &is_elf_format); 560 if (res == TEE_SUCCESS) { 561 if (is_elf_format) { 562 /* Load the SP using ldelf. */ 563 ldelf_load_ldelf(&ctx->uctx); 564 res = ldelf_init_with_ldelf(&s->ts_sess, &ctx->uctx); 565 } else { 566 /* Raw binary format SP */ 567 res = load_binary_sp(&s->ts_sess, &ctx->uctx); 568 } 569 } else { 570 EMSG("Failed to detect SP format"); 571 } 572 573 if (res != TEE_SUCCESS) { 574 EMSG("Failed loading SP %#"PRIx32, res); 575 ts_pop_current_session(); 576 return TEE_ERROR_TARGET_DEAD; 577 } 578 579 /* 580 * Make the SP ready for its first run. 581 * Set state to busy to prevent other endpoints from sending messages to 582 * the SP before its boot phase is done. 583 */ 584 s->state = sp_busy; 585 s->caller_id = 0; 586 sp_init_set_registers(ctx); 587 memcpy(&s->ffa_uuid, ffa_uuid, sizeof(*ffa_uuid)); 588 ts_pop_current_session(); 589 590 return TEE_SUCCESS; 591 } 592 593 static TEE_Result fdt_get_uuid(const void * const fdt, TEE_UUID *uuid) 594 { 595 const struct fdt_property *description = NULL; 596 int description_name_len = 0; 597 598 if (fdt_node_check_compatible(fdt, 0, "arm,ffa-manifest-1.0")) { 599 EMSG("Failed loading SP, manifest not found"); 600 return TEE_ERROR_BAD_PARAMETERS; 601 } 602 603 description = fdt_get_property(fdt, 0, "description", 604 &description_name_len); 605 if (description) 606 DMSG("Loading SP: %s", description->data); 607 608 if (sp_dt_get_uuid(fdt, 0, "uuid", uuid)) { 609 EMSG("Missing or invalid UUID in SP manifest"); 610 return TEE_ERROR_BAD_FORMAT; 611 } 612 613 return TEE_SUCCESS; 614 } 615 616 static TEE_Result copy_and_map_fdt(struct sp_ctx *ctx, const void * const fdt, 617 void **fdt_copy, size_t *mapped_size) 618 { 619 size_t total_size = ROUNDUP(fdt_totalsize(fdt), SMALL_PAGE_SIZE); 620 size_t num_pages = total_size / SMALL_PAGE_SIZE; 621 uint32_t perm = TEE_MATTR_UR | TEE_MATTR_PRW; 622 TEE_Result res = TEE_SUCCESS; 623 struct mobj *m = NULL; 624 struct fobj *f = NULL; 625 vaddr_t va = 0; 626 627 f = fobj_sec_mem_alloc(num_pages); 628 m = mobj_with_fobj_alloc(f, NULL, TEE_MATTR_MEM_TYPE_TAGGED); 629 fobj_put(f); 630 if (!m) 631 return TEE_ERROR_OUT_OF_MEMORY; 632 633 res = vm_map(&ctx->uctx, &va, total_size, perm, 0, m, 0); 634 mobj_put(m); 635 if (res) 636 return res; 637 638 if (fdt_open_into(fdt, (void *)va, total_size)) 639 return TEE_ERROR_GENERIC; 640 641 *fdt_copy = (void *)va; 642 *mapped_size = total_size; 643 644 return res; 645 } 646 647 static void fill_boot_info_1_0(vaddr_t buf, const void *fdt) 648 { 649 struct ffa_boot_info_1_0 *info = (struct ffa_boot_info_1_0 *)buf; 650 static const char fdt_name[16] = "TYPE_DT\0\0\0\0\0\0\0\0"; 651 652 memcpy(&info->magic, "FF-A", 4); 653 info->count = 1; 654 655 COMPILE_TIME_ASSERT(sizeof(info->nvp[0].name) == sizeof(fdt_name)); 656 memcpy(info->nvp[0].name, fdt_name, sizeof(fdt_name)); 657 info->nvp[0].value = (uintptr_t)fdt; 658 info->nvp[0].size = fdt_totalsize(fdt); 659 } 660 661 static void fill_boot_info_1_1(vaddr_t buf, const void *fdt) 662 { 663 size_t desc_offs = ROUNDUP(sizeof(struct ffa_boot_info_header_1_1), 8); 664 struct ffa_boot_info_header_1_1 *header = 665 (struct ffa_boot_info_header_1_1 *)buf; 666 struct ffa_boot_info_1_1 *desc = 667 (struct ffa_boot_info_1_1 *)(buf + desc_offs); 668 669 header->signature = FFA_BOOT_INFO_SIGNATURE; 670 header->version = FFA_BOOT_INFO_VERSION; 671 header->blob_size = desc_offs + sizeof(struct ffa_boot_info_1_1); 672 header->desc_size = sizeof(struct ffa_boot_info_1_1); 673 header->desc_count = 1; 674 header->desc_offset = desc_offs; 675 676 memset(&desc[0].name, 0, sizeof(desc[0].name)); 677 /* Type: Standard boot info (bit[7] == 0), FDT type */ 678 desc[0].type = FFA_BOOT_INFO_TYPE_ID_FDT; 679 /* Flags: Contents field contains an address */ 680 desc[0].flags = FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_ADDR << 681 FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_SHIFT; 682 desc[0].size = fdt_totalsize(fdt); 683 desc[0].contents = (uintptr_t)fdt; 684 } 685 686 static TEE_Result create_and_map_boot_info(struct sp_ctx *ctx, const void *fdt, 687 struct thread_smc_args *args, 688 vaddr_t *va, size_t *mapped_size) 689 { 690 size_t total_size = ROUNDUP(CFG_SP_INIT_INFO_MAX_SIZE, SMALL_PAGE_SIZE); 691 size_t num_pages = total_size / SMALL_PAGE_SIZE; 692 uint32_t perm = TEE_MATTR_UR | TEE_MATTR_PRW; 693 TEE_Result res = TEE_SUCCESS; 694 uint32_t sp_ffa_version = 0; 695 struct fobj *f = NULL; 696 struct mobj *m = NULL; 697 uint32_t info_reg = 0; 698 699 res = sp_dt_get_u32(fdt, 0, "ffa-version", &sp_ffa_version); 700 if (res) 701 return res; 702 703 f = fobj_sec_mem_alloc(num_pages); 704 m = mobj_with_fobj_alloc(f, NULL, TEE_MATTR_MEM_TYPE_TAGGED); 705 fobj_put(f); 706 if (!m) 707 return TEE_ERROR_OUT_OF_MEMORY; 708 709 res = vm_map(&ctx->uctx, va, total_size, perm, 0, m, 0); 710 mobj_put(m); 711 if (res) 712 return res; 713 714 *mapped_size = total_size; 715 716 switch (sp_ffa_version) { 717 case MAKE_FFA_VERSION(1, 0): 718 fill_boot_info_1_0(*va, fdt); 719 break; 720 case MAKE_FFA_VERSION(1, 1): 721 fill_boot_info_1_1(*va, fdt); 722 break; 723 default: 724 EMSG("Unknown FF-A version: %#"PRIx32, sp_ffa_version); 725 return TEE_ERROR_NOT_SUPPORTED; 726 } 727 728 res = sp_dt_get_u32(fdt, 0, "gp-register-num", &info_reg); 729 if (res) { 730 if (res == TEE_ERROR_ITEM_NOT_FOUND) { 731 /* If the property is not present, set default to x0 */ 732 info_reg = 0; 733 } else { 734 return TEE_ERROR_BAD_FORMAT; 735 } 736 } 737 738 switch (info_reg) { 739 case 0: 740 args->a0 = *va; 741 break; 742 case 1: 743 args->a1 = *va; 744 break; 745 case 2: 746 args->a2 = *va; 747 break; 748 case 3: 749 args->a3 = *va; 750 break; 751 default: 752 EMSG("Invalid register selected for passing boot info"); 753 return TEE_ERROR_BAD_FORMAT; 754 } 755 756 return TEE_SUCCESS; 757 } 758 759 static TEE_Result handle_fdt_load_relative_mem_regions(struct sp_ctx *ctx, 760 const void *fdt) 761 { 762 int node = 0; 763 int subnode = 0; 764 tee_mm_entry_t *mm = NULL; 765 TEE_Result res = TEE_SUCCESS; 766 767 /* 768 * Memory regions are optional in the SP manifest, it's not an error if 769 * we don't find any. 770 */ 771 node = fdt_node_offset_by_compatible(fdt, 0, 772 "arm,ffa-manifest-memory-regions"); 773 if (node < 0) 774 return TEE_SUCCESS; 775 776 fdt_for_each_subnode(subnode, fdt, node) { 777 uint64_t load_rel_offset = 0; 778 uint32_t attributes = 0; 779 uint64_t base_addr = 0; 780 uint32_t pages_cnt = 0; 781 uint32_t flags = 0; 782 uint32_t perm = 0; 783 size_t size = 0; 784 vaddr_t va = 0; 785 786 mm = NULL; 787 788 /* Load address relative offset of a memory region */ 789 if (!sp_dt_get_u64(fdt, subnode, "load-address-relative-offset", 790 &load_rel_offset)) { 791 va = ctx->uctx.load_addr + load_rel_offset; 792 } else { 793 /* Skip non load address relative memory regions */ 794 continue; 795 } 796 797 if (!sp_dt_get_u64(fdt, subnode, "base-address", &base_addr)) { 798 EMSG("Both base-address and load-address-relative-offset fields are set"); 799 return TEE_ERROR_BAD_FORMAT; 800 } 801 802 /* Size of memory region as count of 4K pages */ 803 if (sp_dt_get_u32(fdt, subnode, "pages-count", &pages_cnt)) { 804 EMSG("Mandatory field is missing: pages-count"); 805 return TEE_ERROR_BAD_FORMAT; 806 } 807 808 if (MUL_OVERFLOW(pages_cnt, SMALL_PAGE_SIZE, &size)) 809 return TEE_ERROR_OVERFLOW; 810 811 /* Memory region attributes */ 812 if (sp_dt_get_u32(fdt, subnode, "attributes", &attributes)) { 813 EMSG("Mandatory field is missing: attributes"); 814 return TEE_ERROR_BAD_FORMAT; 815 } 816 817 /* Check instruction and data access permissions */ 818 switch (attributes & SP_MANIFEST_ATTR_RWX) { 819 case SP_MANIFEST_ATTR_RO: 820 perm = TEE_MATTR_UR; 821 break; 822 case SP_MANIFEST_ATTR_RW: 823 perm = TEE_MATTR_URW; 824 break; 825 case SP_MANIFEST_ATTR_RX: 826 perm = TEE_MATTR_URX; 827 break; 828 default: 829 EMSG("Invalid memory access permissions"); 830 return TEE_ERROR_BAD_FORMAT; 831 } 832 833 res = sp_dt_get_u32(fdt, subnode, "load-flags", &flags); 834 if (res != TEE_SUCCESS && res != TEE_ERROR_ITEM_NOT_FOUND) { 835 EMSG("Optional field with invalid value: flags"); 836 return TEE_ERROR_BAD_FORMAT; 837 } 838 839 /* Load relative regions must be secure */ 840 if (attributes & SP_MANIFEST_ATTR_NSEC) { 841 EMSG("Invalid memory security attribute"); 842 return TEE_ERROR_BAD_FORMAT; 843 } 844 845 if (flags & SP_MANIFEST_FLAG_NOBITS) { 846 /* 847 * NOBITS flag is set, which means that loaded binary 848 * doesn't contain this area, so it's need to be 849 * allocated. 850 */ 851 struct mobj *m = NULL; 852 unsigned int idx = 0; 853 854 mm = tee_mm_alloc(&tee_mm_sec_ddr, size); 855 if (!mm) 856 return TEE_ERROR_OUT_OF_MEMORY; 857 858 base_addr = tee_mm_get_smem(mm); 859 860 m = sp_mem_new_mobj(pages_cnt, 861 TEE_MATTR_MEM_TYPE_CACHED, true); 862 if (!m) { 863 res = TEE_ERROR_OUT_OF_MEMORY; 864 goto err_mm_free; 865 } 866 867 res = sp_mem_add_pages(m, &idx, base_addr, pages_cnt); 868 if (res) { 869 mobj_put(m); 870 goto err_mm_free; 871 } 872 873 res = vm_map(&ctx->uctx, &va, size, perm, 0, m, 0); 874 mobj_put(m); 875 if (res) 876 goto err_mm_free; 877 } else { 878 /* 879 * If NOBITS is not present the memory area is already 880 * mapped and only need to set the correct permissions. 881 */ 882 res = vm_set_prot(&ctx->uctx, va, size, perm); 883 if (res) 884 return res; 885 } 886 } 887 888 return TEE_SUCCESS; 889 890 err_mm_free: 891 tee_mm_free(mm); 892 return res; 893 } 894 895 static TEE_Result handle_fdt_dev_regions(struct sp_ctx *ctx, void *fdt) 896 { 897 int node = 0; 898 int subnode = 0; 899 TEE_Result res = TEE_SUCCESS; 900 const char *dt_device_match_table = { 901 "arm,ffa-manifest-device-regions", 902 }; 903 904 /* 905 * Device regions are optional in the SP manifest, it's not an error if 906 * we don't find any 907 */ 908 node = fdt_node_offset_by_compatible(fdt, 0, dt_device_match_table); 909 if (node < 0) 910 return TEE_SUCCESS; 911 912 fdt_for_each_subnode(subnode, fdt, node) { 913 uint64_t base_addr = 0; 914 uint32_t pages_cnt = 0; 915 uint32_t attributes = 0; 916 struct mobj *m = NULL; 917 bool is_secure = true; 918 uint32_t perm = 0; 919 vaddr_t va = 0; 920 unsigned int idx = 0; 921 922 /* 923 * Physical base address of a device MMIO region. 924 * Currently only physically contiguous region is supported. 925 */ 926 if (sp_dt_get_u64(fdt, subnode, "base-address", &base_addr)) { 927 EMSG("Mandatory field is missing: base-address"); 928 return TEE_ERROR_BAD_FORMAT; 929 } 930 931 /* Total size of MMIO region as count of 4K pages */ 932 if (sp_dt_get_u32(fdt, subnode, "pages-count", &pages_cnt)) { 933 EMSG("Mandatory field is missing: pages-count"); 934 return TEE_ERROR_BAD_FORMAT; 935 } 936 937 /* Data access, instruction access and security attributes */ 938 if (sp_dt_get_u32(fdt, subnode, "attributes", &attributes)) { 939 EMSG("Mandatory field is missing: attributes"); 940 return TEE_ERROR_BAD_FORMAT; 941 } 942 943 /* Check instruction and data access permissions */ 944 switch (attributes & SP_MANIFEST_ATTR_RWX) { 945 case SP_MANIFEST_ATTR_RO: 946 perm = TEE_MATTR_UR; 947 break; 948 case SP_MANIFEST_ATTR_RW: 949 perm = TEE_MATTR_URW; 950 break; 951 default: 952 EMSG("Invalid memory access permissions"); 953 return TEE_ERROR_BAD_FORMAT; 954 } 955 956 /* 957 * The SP is a secure endpoint, security attribute can be 958 * secure or non-secure 959 */ 960 if (attributes & SP_MANIFEST_ATTR_NSEC) 961 is_secure = false; 962 963 /* Memory attributes must be Device-nGnRnE */ 964 m = sp_mem_new_mobj(pages_cnt, TEE_MATTR_MEM_TYPE_STRONGLY_O, 965 is_secure); 966 if (!m) 967 return TEE_ERROR_OUT_OF_MEMORY; 968 969 res = sp_mem_add_pages(m, &idx, (paddr_t)base_addr, pages_cnt); 970 if (res) { 971 mobj_put(m); 972 return res; 973 } 974 975 res = vm_map(&ctx->uctx, &va, pages_cnt * SMALL_PAGE_SIZE, 976 perm, 0, m, 0); 977 mobj_put(m); 978 if (res) 979 return res; 980 981 /* 982 * Overwrite the device region's PA in the fdt with the VA. This 983 * fdt will be passed to the SP. 984 */ 985 res = fdt_setprop_u64(fdt, subnode, "base-address", va); 986 987 /* 988 * Unmap the region if the overwrite failed since the SP won't 989 * be able to access it without knowing the VA. 990 */ 991 if (res) { 992 vm_unmap(&ctx->uctx, va, pages_cnt * SMALL_PAGE_SIZE); 993 return res; 994 } 995 } 996 997 return TEE_SUCCESS; 998 } 999 1000 static TEE_Result swap_sp_endpoints(uint32_t endpoint_id, 1001 uint32_t new_endpoint_id) 1002 { 1003 struct sp_session *session = sp_get_session(endpoint_id); 1004 uint32_t manifest_endpoint_id = 0; 1005 1006 /* 1007 * We don't know in which order the SPs are loaded. The endpoint ID 1008 * defined in the manifest could already be generated by 1009 * new_session_id() and used by another SP. If this is the case, we swap 1010 * the ID's of the two SPs. We also have to make sure that the ID's are 1011 * not defined twice in the manifest. 1012 */ 1013 1014 /* The endpoint ID was not assigned yet */ 1015 if (!session) 1016 return TEE_SUCCESS; 1017 1018 /* 1019 * Read the manifest file from the SP who originally had the endpoint. 1020 * We can safely swap the endpoint ID's if the manifest file doesn't 1021 * have an endpoint ID defined. 1022 */ 1023 if (!sp_dt_get_u32(session->fdt, 0, "id", &manifest_endpoint_id)) { 1024 assert(manifest_endpoint_id == endpoint_id); 1025 EMSG("SP: Found duplicated endpoint ID %#"PRIx32, endpoint_id); 1026 return TEE_ERROR_ACCESS_CONFLICT; 1027 } 1028 1029 session->endpoint_id = new_endpoint_id; 1030 1031 return TEE_SUCCESS; 1032 } 1033 1034 static TEE_Result read_manifest_endpoint_id(struct sp_session *s) 1035 { 1036 uint32_t endpoint_id = 0; 1037 1038 /* 1039 * The endpoint ID can be optionally defined in the manifest file. We 1040 * have to map the ID inside the manifest to the SP if it's defined. 1041 * If not, the endpoint ID generated inside new_session_id() will be 1042 * used. 1043 */ 1044 if (!sp_dt_get_u32(s->fdt, 0, "id", &endpoint_id)) { 1045 TEE_Result res = TEE_ERROR_GENERIC; 1046 1047 if (endpoint_id <= SPMC_ENDPOINT_ID) 1048 return TEE_ERROR_BAD_FORMAT; 1049 1050 res = swap_sp_endpoints(endpoint_id, s->endpoint_id); 1051 if (res) 1052 return res; 1053 1054 DMSG("SP: endpoint ID (0x%"PRIx32") found in manifest", 1055 endpoint_id); 1056 /* Assign the endpoint ID to the current SP */ 1057 s->endpoint_id = endpoint_id; 1058 } 1059 return TEE_SUCCESS; 1060 } 1061 1062 static TEE_Result handle_fdt_mem_regions(struct sp_ctx *ctx, void *fdt) 1063 { 1064 int node = 0; 1065 int subnode = 0; 1066 tee_mm_entry_t *mm = NULL; 1067 TEE_Result res = TEE_SUCCESS; 1068 1069 /* 1070 * Memory regions are optional in the SP manifest, it's not an error if 1071 * we don't find any. 1072 */ 1073 node = fdt_node_offset_by_compatible(fdt, 0, 1074 "arm,ffa-manifest-memory-regions"); 1075 if (node < 0) 1076 return TEE_SUCCESS; 1077 1078 fdt_for_each_subnode(subnode, fdt, node) { 1079 uint64_t load_rel_offset = 0; 1080 bool alloc_needed = false; 1081 uint32_t attributes = 0; 1082 uint64_t base_addr = 0; 1083 uint32_t pages_cnt = 0; 1084 bool is_secure = true; 1085 struct mobj *m = NULL; 1086 unsigned int idx = 0; 1087 uint32_t perm = 0; 1088 size_t size = 0; 1089 vaddr_t va = 0; 1090 1091 mm = NULL; 1092 1093 /* Load address relative offset of a memory region */ 1094 if (!sp_dt_get_u64(fdt, subnode, "load-address-relative-offset", 1095 &load_rel_offset)) { 1096 /* 1097 * At this point the memory region is already mapped by 1098 * handle_fdt_load_relative_mem_regions. 1099 * Only need to set the base-address in the manifest and 1100 * then skip the rest of the mapping process. 1101 */ 1102 va = ctx->uctx.load_addr + load_rel_offset; 1103 res = fdt_setprop_u64(fdt, subnode, "base-address", va); 1104 if (res) 1105 return res; 1106 1107 continue; 1108 } 1109 1110 /* 1111 * Base address of a memory region. 1112 * If not present, we have to allocate the specified memory. 1113 * If present, this field could specify a PA or VA. Currently 1114 * only a PA is supported. 1115 */ 1116 if (sp_dt_get_u64(fdt, subnode, "base-address", &base_addr)) 1117 alloc_needed = true; 1118 1119 /* Size of memory region as count of 4K pages */ 1120 if (sp_dt_get_u32(fdt, subnode, "pages-count", &pages_cnt)) { 1121 EMSG("Mandatory field is missing: pages-count"); 1122 return TEE_ERROR_BAD_FORMAT; 1123 } 1124 1125 if (MUL_OVERFLOW(pages_cnt, SMALL_PAGE_SIZE, &size)) 1126 return TEE_ERROR_OVERFLOW; 1127 1128 /* 1129 * Memory region attributes: 1130 * - Instruction/data access permissions 1131 * - Cacheability/shareability attributes 1132 * - Security attributes 1133 * 1134 * Cacheability/shareability attributes can be ignored for now. 1135 * OP-TEE only supports a single type for normal cached memory 1136 * and currently there is no use case that would require to 1137 * change this. 1138 */ 1139 if (sp_dt_get_u32(fdt, subnode, "attributes", &attributes)) { 1140 EMSG("Mandatory field is missing: attributes"); 1141 return TEE_ERROR_BAD_FORMAT; 1142 } 1143 1144 /* Check instruction and data access permissions */ 1145 switch (attributes & SP_MANIFEST_ATTR_RWX) { 1146 case SP_MANIFEST_ATTR_RO: 1147 perm = TEE_MATTR_UR; 1148 break; 1149 case SP_MANIFEST_ATTR_RW: 1150 perm = TEE_MATTR_URW; 1151 break; 1152 case SP_MANIFEST_ATTR_RX: 1153 perm = TEE_MATTR_URX; 1154 break; 1155 default: 1156 EMSG("Invalid memory access permissions"); 1157 return TEE_ERROR_BAD_FORMAT; 1158 } 1159 1160 /* 1161 * The SP is a secure endpoint, security attribute can be 1162 * secure or non-secure. 1163 * The SPMC cannot allocate non-secure memory, i.e. if the base 1164 * address is missing this attribute must be secure. 1165 */ 1166 if (attributes & SP_MANIFEST_ATTR_NSEC) { 1167 if (alloc_needed) { 1168 EMSG("Invalid memory security attribute"); 1169 return TEE_ERROR_BAD_FORMAT; 1170 } 1171 is_secure = false; 1172 } 1173 1174 if (alloc_needed) { 1175 /* Base address is missing, we have to allocate */ 1176 mm = tee_mm_alloc(&tee_mm_sec_ddr, size); 1177 if (!mm) 1178 return TEE_ERROR_OUT_OF_MEMORY; 1179 1180 base_addr = tee_mm_get_smem(mm); 1181 } 1182 1183 m = sp_mem_new_mobj(pages_cnt, TEE_MATTR_MEM_TYPE_CACHED, 1184 is_secure); 1185 if (!m) { 1186 res = TEE_ERROR_OUT_OF_MEMORY; 1187 goto err_mm_free; 1188 } 1189 1190 res = sp_mem_add_pages(m, &idx, base_addr, pages_cnt); 1191 if (res) { 1192 mobj_put(m); 1193 goto err_mm_free; 1194 } 1195 1196 res = vm_map(&ctx->uctx, &va, size, perm, 0, m, 0); 1197 mobj_put(m); 1198 if (res) 1199 goto err_mm_free; 1200 1201 /* 1202 * Overwrite the memory region's base address in the fdt with 1203 * the VA. This fdt will be passed to the SP. 1204 * If the base-address field was not present in the original 1205 * fdt, this function will create it. This doesn't cause issues 1206 * since the necessary extra space has been allocated when 1207 * opening the fdt. 1208 */ 1209 res = fdt_setprop_u64(fdt, subnode, "base-address", va); 1210 1211 /* 1212 * Unmap the region if the overwrite failed since the SP won't 1213 * be able to access it without knowing the VA. 1214 */ 1215 if (res) { 1216 vm_unmap(&ctx->uctx, va, size); 1217 goto err_mm_free; 1218 } 1219 } 1220 1221 return TEE_SUCCESS; 1222 1223 err_mm_free: 1224 tee_mm_free(mm); 1225 return res; 1226 } 1227 1228 static TEE_Result handle_tpm_event_log(struct sp_ctx *ctx, void *fdt) 1229 { 1230 uint32_t perm = TEE_MATTR_URW | TEE_MATTR_PRW; 1231 uint32_t dummy_size __maybe_unused = 0; 1232 TEE_Result res = TEE_SUCCESS; 1233 size_t page_count = 0; 1234 struct fobj *f = NULL; 1235 struct mobj *m = NULL; 1236 vaddr_t log_addr = 0; 1237 size_t log_size = 0; 1238 int node = 0; 1239 1240 node = fdt_node_offset_by_compatible(fdt, 0, "arm,tpm_event_log"); 1241 if (node < 0) 1242 return TEE_SUCCESS; 1243 1244 /* Checking the existence and size of the event log properties */ 1245 if (sp_dt_get_u64(fdt, node, "tpm_event_log_addr", &log_addr)) { 1246 EMSG("tpm_event_log_addr not found or has invalid size"); 1247 return TEE_ERROR_BAD_FORMAT; 1248 } 1249 1250 if (sp_dt_get_u32(fdt, node, "tpm_event_log_size", &dummy_size)) { 1251 EMSG("tpm_event_log_size not found or has invalid size"); 1252 return TEE_ERROR_BAD_FORMAT; 1253 } 1254 1255 /* Validating event log */ 1256 res = tpm_get_event_log_size(&log_size); 1257 if (res) 1258 return res; 1259 1260 if (!log_size) { 1261 EMSG("Empty TPM event log was provided"); 1262 return TEE_ERROR_ITEM_NOT_FOUND; 1263 } 1264 1265 /* Allocating memory area for the event log to share with the SP */ 1266 page_count = ROUNDUP_DIV(log_size, SMALL_PAGE_SIZE); 1267 1268 f = fobj_sec_mem_alloc(page_count); 1269 m = mobj_with_fobj_alloc(f, NULL, TEE_MATTR_MEM_TYPE_TAGGED); 1270 fobj_put(f); 1271 if (!m) 1272 return TEE_ERROR_OUT_OF_MEMORY; 1273 1274 res = vm_map(&ctx->uctx, &log_addr, log_size, perm, 0, m, 0); 1275 mobj_put(m); 1276 if (res) 1277 return res; 1278 1279 /* Copy event log */ 1280 res = tpm_get_event_log((void *)log_addr, &log_size); 1281 if (res) 1282 goto err_unmap; 1283 1284 /* Setting event log details in the manifest */ 1285 res = fdt_setprop_u64(fdt, node, "tpm_event_log_addr", log_addr); 1286 if (res) 1287 goto err_unmap; 1288 1289 res = fdt_setprop_u32(fdt, node, "tpm_event_log_size", log_size); 1290 if (res) 1291 goto err_unmap; 1292 1293 return TEE_SUCCESS; 1294 1295 err_unmap: 1296 vm_unmap(&ctx->uctx, log_addr, log_size); 1297 1298 return res; 1299 } 1300 1301 /* 1302 * Note: this function is called only on the primary CPU. It assumes that the 1303 * features present on the primary CPU are available on all of the secondary 1304 * CPUs as well. 1305 */ 1306 static TEE_Result handle_hw_features(void *fdt) 1307 { 1308 uint32_t val __maybe_unused = 0; 1309 TEE_Result res = TEE_SUCCESS; 1310 int node = 0; 1311 1312 /* 1313 * HW feature descriptions are optional in the SP manifest, it's not an 1314 * error if we don't find any. 1315 */ 1316 node = fdt_node_offset_by_compatible(fdt, 0, "arm,hw-features"); 1317 if (node < 0) 1318 return TEE_SUCCESS; 1319 1320 /* Modify the crc32 property only if it's already present */ 1321 if (!sp_dt_get_u32(fdt, node, "crc32", &val)) { 1322 res = fdt_setprop_u32(fdt, node, "crc32", 1323 feat_crc32_implemented()); 1324 if (res) 1325 return res; 1326 } 1327 1328 return TEE_SUCCESS; 1329 } 1330 1331 static TEE_Result read_ns_interrupts_action(const void *fdt, 1332 struct sp_session *s) 1333 { 1334 TEE_Result res = TEE_ERROR_BAD_PARAMETERS; 1335 1336 res = sp_dt_get_u32(fdt, 0, "ns-interrupts-action", &s->ns_int_mode); 1337 1338 if (res) { 1339 EMSG("Mandatory property is missing: ns-interrupts-action"); 1340 return res; 1341 } 1342 1343 switch (s->ns_int_mode) { 1344 case SP_MANIFEST_NS_INT_QUEUED: 1345 case SP_MANIFEST_NS_INT_SIGNALED: 1346 /* OK */ 1347 break; 1348 1349 case SP_MANIFEST_NS_INT_MANAGED_EXIT: 1350 EMSG("Managed exit is not implemented"); 1351 return TEE_ERROR_NOT_IMPLEMENTED; 1352 1353 default: 1354 EMSG("Invalid ns-interrupts-action value: %"PRIu32, 1355 s->ns_int_mode); 1356 return TEE_ERROR_BAD_PARAMETERS; 1357 } 1358 1359 return TEE_SUCCESS; 1360 } 1361 1362 static TEE_Result sp_init_uuid(const TEE_UUID *bin_uuid, const void * const fdt) 1363 { 1364 TEE_Result res = TEE_SUCCESS; 1365 struct sp_session *sess = NULL; 1366 TEE_UUID ffa_uuid = {}; 1367 1368 res = fdt_get_uuid(fdt, &ffa_uuid); 1369 if (res) 1370 return res; 1371 1372 res = sp_open_session(&sess, 1373 &open_sp_sessions, 1374 &ffa_uuid, bin_uuid, fdt); 1375 if (res) 1376 return res; 1377 1378 sess->fdt = fdt; 1379 res = read_manifest_endpoint_id(sess); 1380 if (res) 1381 return res; 1382 DMSG("endpoint is 0x%"PRIx16, sess->endpoint_id); 1383 1384 res = read_ns_interrupts_action(fdt, sess); 1385 if (res) 1386 return res; 1387 1388 return TEE_SUCCESS; 1389 } 1390 1391 static TEE_Result sp_first_run(struct sp_session *sess) 1392 { 1393 TEE_Result res = TEE_SUCCESS; 1394 struct thread_smc_args args = { }; 1395 struct sp_ctx *ctx = NULL; 1396 vaddr_t boot_info_va = 0; 1397 size_t boot_info_size = 0; 1398 void *fdt_copy = NULL; 1399 size_t fdt_size = 0; 1400 1401 ctx = to_sp_ctx(sess->ts_sess.ctx); 1402 ts_push_current_session(&sess->ts_sess); 1403 sess->is_initialized = false; 1404 1405 /* 1406 * Load relative memory regions must be handled before doing any other 1407 * mapping to prevent conflicts in the VA space. 1408 */ 1409 res = handle_fdt_load_relative_mem_regions(ctx, sess->fdt); 1410 if (res) { 1411 ts_pop_current_session(); 1412 return res; 1413 } 1414 1415 res = copy_and_map_fdt(ctx, sess->fdt, &fdt_copy, &fdt_size); 1416 if (res) 1417 goto out; 1418 1419 res = handle_fdt_dev_regions(ctx, fdt_copy); 1420 if (res) 1421 goto out; 1422 1423 res = handle_fdt_mem_regions(ctx, fdt_copy); 1424 if (res) 1425 goto out; 1426 1427 if (IS_ENABLED(CFG_CORE_TPM_EVENT_LOG)) { 1428 res = handle_tpm_event_log(ctx, fdt_copy); 1429 if (res) 1430 goto out; 1431 } 1432 1433 res = handle_hw_features(fdt_copy); 1434 if (res) 1435 goto out; 1436 1437 res = create_and_map_boot_info(ctx, fdt_copy, &args, &boot_info_va, 1438 &boot_info_size); 1439 if (res) 1440 goto out; 1441 1442 ts_pop_current_session(); 1443 1444 res = sp_enter(&args, sess); 1445 if (res) { 1446 ts_push_current_session(&sess->ts_sess); 1447 goto out; 1448 } 1449 1450 spmc_sp_msg_handler(&args, sess); 1451 1452 ts_push_current_session(&sess->ts_sess); 1453 sess->is_initialized = true; 1454 1455 out: 1456 /* Free the boot info page from the SP memory */ 1457 vm_unmap(&ctx->uctx, boot_info_va, boot_info_size); 1458 vm_unmap(&ctx->uctx, (vaddr_t)fdt_copy, fdt_size); 1459 ts_pop_current_session(); 1460 1461 return res; 1462 } 1463 1464 TEE_Result sp_enter(struct thread_smc_args *args, struct sp_session *sp) 1465 { 1466 TEE_Result res = TEE_SUCCESS; 1467 struct sp_ctx *ctx = to_sp_ctx(sp->ts_sess.ctx); 1468 1469 ctx->sp_regs.x[0] = args->a0; 1470 ctx->sp_regs.x[1] = args->a1; 1471 ctx->sp_regs.x[2] = args->a2; 1472 ctx->sp_regs.x[3] = args->a3; 1473 ctx->sp_regs.x[4] = args->a4; 1474 ctx->sp_regs.x[5] = args->a5; 1475 ctx->sp_regs.x[6] = args->a6; 1476 ctx->sp_regs.x[7] = args->a7; 1477 1478 res = sp->ts_sess.ctx->ops->enter_invoke_cmd(&sp->ts_sess, 0); 1479 1480 args->a0 = ctx->sp_regs.x[0]; 1481 args->a1 = ctx->sp_regs.x[1]; 1482 args->a2 = ctx->sp_regs.x[2]; 1483 args->a3 = ctx->sp_regs.x[3]; 1484 args->a4 = ctx->sp_regs.x[4]; 1485 args->a5 = ctx->sp_regs.x[5]; 1486 args->a6 = ctx->sp_regs.x[6]; 1487 args->a7 = ctx->sp_regs.x[7]; 1488 1489 return res; 1490 } 1491 1492 /* 1493 * According to FF-A v1.1 section 8.3.1.4 if a caller requires less permissive 1494 * active on NS interrupt than the callee, the callee must inherit the caller's 1495 * configuration. 1496 * Each SP's own NS action setting is stored in ns_int_mode. The effective 1497 * action will be MIN([self action], [caller's action]) which is stored in the 1498 * ns_int_mode_inherited field. 1499 */ 1500 static void sp_cpsr_configure_foreign_interrupts(struct sp_session *s, 1501 struct ts_session *caller, 1502 uint64_t *cpsr) 1503 { 1504 if (caller) { 1505 struct sp_session *caller_sp = to_sp_session(caller); 1506 1507 s->ns_int_mode_inherited = MIN(caller_sp->ns_int_mode_inherited, 1508 s->ns_int_mode); 1509 } else { 1510 s->ns_int_mode_inherited = s->ns_int_mode; 1511 } 1512 1513 if (s->ns_int_mode_inherited == SP_MANIFEST_NS_INT_QUEUED) 1514 *cpsr |= SHIFT_U32(THREAD_EXCP_FOREIGN_INTR, 1515 ARM32_CPSR_F_SHIFT); 1516 else 1517 *cpsr &= ~SHIFT_U32(THREAD_EXCP_FOREIGN_INTR, 1518 ARM32_CPSR_F_SHIFT); 1519 } 1520 1521 static TEE_Result sp_enter_invoke_cmd(struct ts_session *s, 1522 uint32_t cmd __unused) 1523 { 1524 struct sp_ctx *ctx = to_sp_ctx(s->ctx); 1525 TEE_Result res = TEE_SUCCESS; 1526 uint32_t exceptions = 0; 1527 struct sp_session *sp_s = to_sp_session(s); 1528 struct ts_session *sess = NULL; 1529 struct thread_ctx_regs *sp_regs = NULL; 1530 uint32_t thread_id = THREAD_ID_INVALID; 1531 struct ts_session *caller = NULL; 1532 uint32_t rpc_target_info = 0; 1533 uint32_t panicked = false; 1534 uint32_t panic_code = 0; 1535 1536 bm_timestamp(); 1537 1538 sp_regs = &ctx->sp_regs; 1539 ts_push_current_session(s); 1540 1541 exceptions = thread_mask_exceptions(THREAD_EXCP_ALL); 1542 1543 /* Enable/disable foreign interrupts in CPSR/SPSR */ 1544 caller = ts_get_calling_session(); 1545 sp_cpsr_configure_foreign_interrupts(sp_s, caller, &sp_regs->cpsr); 1546 1547 /* 1548 * Store endpoint ID and thread ID in rpc_target_info. This will be used 1549 * as w1 in FFA_INTERRUPT in case of a foreign interrupt. 1550 */ 1551 rpc_target_info = thread_get_tsd()->rpc_target_info; 1552 thread_id = thread_get_id(); 1553 assert(thread_id <= UINT16_MAX); 1554 thread_get_tsd()->rpc_target_info = 1555 FFA_TARGET_INFO_SET(sp_s->endpoint_id, thread_id); 1556 1557 __thread_enter_user_mode(sp_regs, &panicked, &panic_code); 1558 1559 /* Restore rpc_target_info */ 1560 thread_get_tsd()->rpc_target_info = rpc_target_info; 1561 1562 thread_unmask_exceptions(exceptions); 1563 1564 thread_user_clear_vfp(&ctx->uctx); 1565 1566 if (panicked) { 1567 DMSG("SP panicked with code %#"PRIx32, panic_code); 1568 abort_print_current_ts(); 1569 1570 sess = ts_pop_current_session(); 1571 cpu_spin_lock(&sp_s->spinlock); 1572 sp_s->state = sp_dead; 1573 cpu_spin_unlock(&sp_s->spinlock); 1574 1575 return TEE_ERROR_TARGET_DEAD; 1576 } 1577 1578 sess = ts_pop_current_session(); 1579 assert(sess == s); 1580 1581 bm_timestamp(); 1582 1583 return res; 1584 } 1585 1586 /* We currently don't support 32 bits */ 1587 #ifdef ARM64 1588 static void sp_svc_store_registers(struct thread_scall_regs *regs, 1589 struct thread_ctx_regs *sp_regs) 1590 { 1591 COMPILE_TIME_ASSERT(sizeof(sp_regs->x[0]) == sizeof(regs->x0)); 1592 memcpy(sp_regs->x, ®s->x0, 31 * sizeof(regs->x0)); 1593 sp_regs->pc = regs->elr; 1594 sp_regs->sp = regs->sp_el0; 1595 } 1596 #endif 1597 1598 static bool sp_handle_scall(struct thread_scall_regs *regs) 1599 { 1600 struct ts_session *ts = ts_get_current_session(); 1601 struct sp_ctx *uctx = to_sp_ctx(ts->ctx); 1602 struct sp_session *s = uctx->open_session; 1603 1604 assert(s); 1605 1606 sp_svc_store_registers(regs, &uctx->sp_regs); 1607 1608 regs->x0 = 0; 1609 regs->x1 = 0; /* panic */ 1610 regs->x2 = 0; /* panic code */ 1611 1612 /* 1613 * All the registers of the SP are saved in the SP session by the SVC 1614 * handler. 1615 * We always return to S-El1 after handling the SVC. We will continue 1616 * in sp_enter_invoke_cmd() (return from __thread_enter_user_mode). 1617 * The sp_enter() function copies the FF-A parameters (a0-a7) from the 1618 * saved registers to the thread_smc_args. The thread_smc_args object is 1619 * afterward used by the spmc_sp_msg_handler() to handle the 1620 * FF-A message send by the SP. 1621 */ 1622 return false; 1623 } 1624 1625 static void sp_dump_state(struct ts_ctx *ctx) 1626 { 1627 struct sp_ctx *utc = to_sp_ctx(ctx); 1628 1629 if (utc->uctx.dump_entry_func) { 1630 TEE_Result res = ldelf_dump_state(&utc->uctx); 1631 1632 if (!res || res == TEE_ERROR_TARGET_DEAD) 1633 return; 1634 } 1635 1636 user_mode_ctx_print_mappings(&utc->uctx); 1637 } 1638 1639 static const struct ts_ops sp_ops = { 1640 .enter_invoke_cmd = sp_enter_invoke_cmd, 1641 .handle_scall = sp_handle_scall, 1642 .dump_state = sp_dump_state, 1643 }; 1644 1645 static TEE_Result process_sp_pkg(uint64_t sp_pkg_pa, TEE_UUID *sp_uuid) 1646 { 1647 enum teecore_memtypes mtype = MEM_AREA_TA_RAM; 1648 struct sp_pkg_header *sp_pkg_hdr = NULL; 1649 struct fip_sp *sp = NULL; 1650 uint64_t sp_fdt_end = 0; 1651 size_t sp_pkg_size = 0; 1652 vaddr_t sp_pkg_va = 0; 1653 1654 /* Process the first page which contains the SP package header */ 1655 sp_pkg_va = (vaddr_t)phys_to_virt(sp_pkg_pa, mtype, SMALL_PAGE_SIZE); 1656 if (!sp_pkg_va) { 1657 EMSG("Cannot find mapping for PA %#" PRIxPA, sp_pkg_pa); 1658 return TEE_ERROR_GENERIC; 1659 } 1660 1661 sp_pkg_hdr = (struct sp_pkg_header *)sp_pkg_va; 1662 1663 if (sp_pkg_hdr->magic != SP_PKG_HEADER_MAGIC) { 1664 EMSG("Invalid SP package magic"); 1665 return TEE_ERROR_BAD_FORMAT; 1666 } 1667 1668 if (sp_pkg_hdr->version != SP_PKG_HEADER_VERSION_V1 && 1669 sp_pkg_hdr->version != SP_PKG_HEADER_VERSION_V2) { 1670 EMSG("Invalid SP header version"); 1671 return TEE_ERROR_BAD_FORMAT; 1672 } 1673 1674 if (ADD_OVERFLOW(sp_pkg_hdr->img_offset, sp_pkg_hdr->img_size, 1675 &sp_pkg_size)) { 1676 EMSG("Invalid SP package size"); 1677 return TEE_ERROR_BAD_FORMAT; 1678 } 1679 1680 if (ADD_OVERFLOW(sp_pkg_hdr->pm_offset, sp_pkg_hdr->pm_size, 1681 &sp_fdt_end) || sp_fdt_end > sp_pkg_hdr->img_offset) { 1682 EMSG("Invalid SP manifest size"); 1683 return TEE_ERROR_BAD_FORMAT; 1684 } 1685 1686 /* Process the whole SP package now that the size is known */ 1687 sp_pkg_va = (vaddr_t)phys_to_virt(sp_pkg_pa, mtype, sp_pkg_size); 1688 if (!sp_pkg_va) { 1689 EMSG("Cannot find mapping for PA %#" PRIxPA, sp_pkg_pa); 1690 return TEE_ERROR_GENERIC; 1691 } 1692 1693 sp_pkg_hdr = (struct sp_pkg_header *)sp_pkg_va; 1694 1695 sp = calloc(1, sizeof(struct fip_sp)); 1696 if (!sp) 1697 return TEE_ERROR_OUT_OF_MEMORY; 1698 1699 memcpy(&sp->sp_img.image.uuid, sp_uuid, sizeof(*sp_uuid)); 1700 sp->sp_img.image.ts = (uint8_t *)(sp_pkg_va + sp_pkg_hdr->img_offset); 1701 sp->sp_img.image.size = sp_pkg_hdr->img_size; 1702 sp->sp_img.image.flags = 0; 1703 sp->sp_img.fdt = (uint8_t *)(sp_pkg_va + sp_pkg_hdr->pm_offset); 1704 1705 STAILQ_INSERT_TAIL(&fip_sp_list, sp, link); 1706 1707 return TEE_SUCCESS; 1708 } 1709 1710 static TEE_Result fip_sp_init_all(void) 1711 { 1712 TEE_Result res = TEE_SUCCESS; 1713 uint64_t sp_pkg_addr = 0; 1714 const void *fdt = NULL; 1715 TEE_UUID sp_uuid = { }; 1716 int sp_pkgs_node = 0; 1717 int subnode = 0; 1718 int root = 0; 1719 1720 fdt = get_manifest_dt(); 1721 if (!fdt) { 1722 EMSG("No SPMC manifest found"); 1723 return TEE_ERROR_GENERIC; 1724 } 1725 1726 root = fdt_path_offset(fdt, "/"); 1727 if (root < 0) 1728 return TEE_ERROR_BAD_FORMAT; 1729 1730 if (fdt_node_check_compatible(fdt, root, "arm,ffa-core-manifest-1.0")) 1731 return TEE_ERROR_BAD_FORMAT; 1732 1733 /* SP packages are optional, it's not an error if we don't find any */ 1734 sp_pkgs_node = fdt_node_offset_by_compatible(fdt, root, "arm,sp_pkg"); 1735 if (sp_pkgs_node < 0) 1736 return TEE_SUCCESS; 1737 1738 fdt_for_each_subnode(subnode, fdt, sp_pkgs_node) { 1739 res = sp_dt_get_u64(fdt, subnode, "load-address", &sp_pkg_addr); 1740 if (res) { 1741 EMSG("Invalid FIP SP load address"); 1742 return res; 1743 } 1744 1745 res = sp_dt_get_uuid(fdt, subnode, "uuid", &sp_uuid); 1746 if (res) { 1747 EMSG("Invalid FIP SP uuid"); 1748 return res; 1749 } 1750 1751 res = process_sp_pkg(sp_pkg_addr, &sp_uuid); 1752 if (res) { 1753 EMSG("Invalid FIP SP package"); 1754 return res; 1755 } 1756 } 1757 1758 return TEE_SUCCESS; 1759 } 1760 1761 static void fip_sp_deinit_all(void) 1762 { 1763 while (!STAILQ_EMPTY(&fip_sp_list)) { 1764 struct fip_sp *sp = STAILQ_FIRST(&fip_sp_list); 1765 1766 STAILQ_REMOVE_HEAD(&fip_sp_list, link); 1767 free(sp); 1768 } 1769 } 1770 1771 static TEE_Result sp_init_all(void) 1772 { 1773 TEE_Result res = TEE_SUCCESS; 1774 const struct sp_image *sp = NULL; 1775 const struct fip_sp *fip_sp = NULL; 1776 char __maybe_unused msg[60] = { '\0', }; 1777 struct sp_session *s = NULL; 1778 1779 for_each_secure_partition(sp) { 1780 if (sp->image.uncompressed_size) 1781 snprintf(msg, sizeof(msg), 1782 " (compressed, uncompressed %u)", 1783 sp->image.uncompressed_size); 1784 else 1785 msg[0] = '\0'; 1786 DMSG("SP %pUl size %u%s", (void *)&sp->image.uuid, 1787 sp->image.size, msg); 1788 1789 res = sp_init_uuid(&sp->image.uuid, sp->fdt); 1790 1791 if (res != TEE_SUCCESS) { 1792 EMSG("Failed initializing SP(%pUl) err:%#"PRIx32, 1793 &sp->image.uuid, res); 1794 if (!IS_ENABLED(CFG_SP_SKIP_FAILED)) 1795 panic(); 1796 } 1797 } 1798 1799 res = fip_sp_init_all(); 1800 if (res) 1801 panic("Failed initializing FIP SPs"); 1802 1803 for_each_fip_sp(fip_sp) { 1804 sp = &fip_sp->sp_img; 1805 1806 DMSG("SP %pUl size %u", (void *)&sp->image.uuid, 1807 sp->image.size); 1808 1809 res = sp_init_uuid(&sp->image.uuid, sp->fdt); 1810 1811 if (res != TEE_SUCCESS) { 1812 EMSG("Failed initializing SP(%pUl) err:%#"PRIx32, 1813 &sp->image.uuid, res); 1814 if (!IS_ENABLED(CFG_SP_SKIP_FAILED)) 1815 panic(); 1816 } 1817 } 1818 1819 /* 1820 * At this point all FIP SPs are loaded by ldelf or by the raw binary SP 1821 * loader, so the original images (loaded by BL2) are not needed anymore 1822 */ 1823 fip_sp_deinit_all(); 1824 1825 /* Continue the initialization and run the SP */ 1826 TAILQ_FOREACH(s, &open_sp_sessions, link) { 1827 res = sp_first_run(s); 1828 if (res != TEE_SUCCESS) { 1829 EMSG("Failed starting SP(0x%"PRIx16") err:%#"PRIx32, 1830 s->endpoint_id, res); 1831 if (!IS_ENABLED(CFG_SP_SKIP_FAILED)) 1832 panic(); 1833 } 1834 } 1835 1836 return TEE_SUCCESS; 1837 } 1838 1839 boot_final(sp_init_all); 1840 1841 static TEE_Result secure_partition_open(const TEE_UUID *uuid, 1842 struct ts_store_handle **h) 1843 { 1844 return emb_ts_open(uuid, h, find_secure_partition); 1845 } 1846 1847 REGISTER_SP_STORE(2) = { 1848 .description = "SP store", 1849 .open = secure_partition_open, 1850 .get_size = emb_ts_get_size, 1851 .get_tag = emb_ts_get_tag, 1852 .read = emb_ts_read, 1853 .close = emb_ts_close, 1854 }; 1855