1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2020-2022, Arm Limited. 4 */ 5 #include <bench.h> 6 #include <crypto/crypto.h> 7 #include <initcall.h> 8 #include <kernel/boot.h> 9 #include <kernel/embedded_ts.h> 10 #include <kernel/ldelf_loader.h> 11 #include <kernel/secure_partition.h> 12 #include <kernel/spinlock.h> 13 #include <kernel/spmc_sp_handler.h> 14 #include <kernel/thread_private.h> 15 #include <kernel/thread_spmc.h> 16 #include <kernel/tpm.h> 17 #include <kernel/ts_store.h> 18 #include <ldelf.h> 19 #include <libfdt.h> 20 #include <mm/core_mmu.h> 21 #include <mm/fobj.h> 22 #include <mm/mobj.h> 23 #include <mm/vm.h> 24 #include <optee_ffa.h> 25 #include <stdio.h> 26 #include <string.h> 27 #include <tee_api_types.h> 28 #include <tee/uuid.h> 29 #include <trace.h> 30 #include <types_ext.h> 31 #include <utee_defines.h> 32 #include <util.h> 33 #include <zlib.h> 34 35 #define SP_MANIFEST_ATTR_READ BIT(0) 36 #define SP_MANIFEST_ATTR_WRITE BIT(1) 37 #define SP_MANIFEST_ATTR_EXEC BIT(2) 38 #define SP_MANIFEST_ATTR_NSEC BIT(3) 39 40 #define SP_MANIFEST_ATTR_RO (SP_MANIFEST_ATTR_READ) 41 #define SP_MANIFEST_ATTR_RW (SP_MANIFEST_ATTR_READ | \ 42 SP_MANIFEST_ATTR_WRITE) 43 #define SP_MANIFEST_ATTR_RX (SP_MANIFEST_ATTR_READ | \ 44 SP_MANIFEST_ATTR_EXEC) 45 #define SP_MANIFEST_ATTR_RWX (SP_MANIFEST_ATTR_READ | \ 46 SP_MANIFEST_ATTR_WRITE | \ 47 SP_MANIFEST_ATTR_EXEC) 48 49 #define SP_PKG_HEADER_MAGIC (0x474b5053) 50 #define SP_PKG_HEADER_VERSION (0x1) 51 52 struct sp_pkg_header { 53 uint32_t magic; 54 uint32_t version; 55 uint32_t pm_offset; 56 uint32_t pm_size; 57 uint32_t img_offset; 58 uint32_t img_size; 59 }; 60 61 struct fip_sp_head fip_sp_list = STAILQ_HEAD_INITIALIZER(fip_sp_list); 62 63 const struct ts_ops sp_ops; 64 65 /* List that holds all of the loaded SP's */ 66 static struct sp_sessions_head open_sp_sessions = 67 TAILQ_HEAD_INITIALIZER(open_sp_sessions); 68 69 static const struct embedded_ts *find_secure_partition(const TEE_UUID *uuid) 70 { 71 const struct sp_image *sp = NULL; 72 const struct fip_sp *fip_sp = NULL; 73 74 for_each_secure_partition(sp) { 75 if (!memcmp(&sp->image.uuid, uuid, sizeof(*uuid))) 76 return &sp->image; 77 } 78 79 for_each_fip_sp(fip_sp) { 80 if (!memcmp(&fip_sp->sp_img.image.uuid, uuid, sizeof(*uuid))) 81 return &fip_sp->sp_img.image; 82 } 83 84 return NULL; 85 } 86 87 bool is_sp_ctx(struct ts_ctx *ctx) 88 { 89 return ctx && (ctx->ops == &sp_ops); 90 } 91 92 static void set_sp_ctx_ops(struct ts_ctx *ctx) 93 { 94 ctx->ops = &sp_ops; 95 } 96 97 TEE_Result sp_find_session_id(const TEE_UUID *uuid, uint32_t *session_id) 98 { 99 struct sp_session *s = NULL; 100 101 TAILQ_FOREACH(s, &open_sp_sessions, link) { 102 if (!memcmp(&s->ts_sess.ctx->uuid, uuid, sizeof(*uuid))) { 103 if (s->state == sp_dead) 104 return TEE_ERROR_TARGET_DEAD; 105 106 *session_id = s->endpoint_id; 107 return TEE_SUCCESS; 108 } 109 } 110 111 return TEE_ERROR_ITEM_NOT_FOUND; 112 } 113 114 struct sp_session *sp_get_session(uint32_t session_id) 115 { 116 struct sp_session *s = NULL; 117 118 TAILQ_FOREACH(s, &open_sp_sessions, link) { 119 if (s->endpoint_id == session_id) 120 return s; 121 } 122 123 return NULL; 124 } 125 126 TEE_Result sp_partition_info_get_all(struct ffa_partition_info *fpi, 127 size_t *elem_count) 128 { 129 size_t in_count = *elem_count; 130 struct sp_session *s = NULL; 131 size_t count = 0; 132 133 TAILQ_FOREACH(s, &open_sp_sessions, link) { 134 if (s->state == sp_dead) 135 continue; 136 if (count < in_count) { 137 spmc_fill_partition_entry(fpi, s->endpoint_id, 1); 138 fpi++; 139 } 140 count++; 141 } 142 143 *elem_count = count; 144 if (count > in_count) 145 return TEE_ERROR_SHORT_BUFFER; 146 147 return TEE_SUCCESS; 148 } 149 150 bool sp_has_exclusive_access(struct sp_mem_map_region *mem, 151 struct user_mode_ctx *uctx) 152 { 153 /* 154 * Check that we have access to the region if it is supposed to be 155 * mapped to the current context. 156 */ 157 if (uctx) { 158 struct vm_region *region = NULL; 159 160 /* Make sure that each mobj belongs to the SP */ 161 TAILQ_FOREACH(region, &uctx->vm_info.regions, link) { 162 if (region->mobj == mem->mobj) 163 break; 164 } 165 166 if (!region) 167 return false; 168 } 169 170 /* Check that it is not shared with another SP */ 171 return !sp_mem_is_shared(mem); 172 } 173 174 static uint16_t new_session_id(struct sp_sessions_head *open_sessions) 175 { 176 struct sp_session *last = NULL; 177 uint16_t id = SPMC_ENDPOINT_ID + 1; 178 179 last = TAILQ_LAST(open_sessions, sp_sessions_head); 180 if (last) 181 id = last->endpoint_id + 1; 182 183 assert(id > SPMC_ENDPOINT_ID); 184 return id; 185 } 186 187 static TEE_Result sp_create_ctx(const TEE_UUID *uuid, struct sp_session *s) 188 { 189 TEE_Result res = TEE_SUCCESS; 190 struct sp_ctx *spc = NULL; 191 192 /* Register context */ 193 spc = calloc(1, sizeof(struct sp_ctx)); 194 if (!spc) 195 return TEE_ERROR_OUT_OF_MEMORY; 196 197 spc->uctx.ts_ctx = &spc->ts_ctx; 198 spc->open_session = s; 199 s->ts_sess.ctx = &spc->ts_ctx; 200 spc->ts_ctx.uuid = *uuid; 201 202 res = vm_info_init(&spc->uctx); 203 if (res) 204 goto err; 205 206 set_sp_ctx_ops(&spc->ts_ctx); 207 208 return TEE_SUCCESS; 209 210 err: 211 free(spc); 212 return res; 213 } 214 215 static TEE_Result sp_create_session(struct sp_sessions_head *open_sessions, 216 const TEE_UUID *uuid, 217 struct sp_session **sess) 218 { 219 TEE_Result res = TEE_SUCCESS; 220 struct sp_session *s = calloc(1, sizeof(struct sp_session)); 221 222 if (!s) 223 return TEE_ERROR_OUT_OF_MEMORY; 224 225 s->endpoint_id = new_session_id(open_sessions); 226 if (!s->endpoint_id) { 227 res = TEE_ERROR_OVERFLOW; 228 goto err; 229 } 230 231 DMSG("Loading Secure Partition %pUl", (void *)uuid); 232 res = sp_create_ctx(uuid, s); 233 if (res) 234 goto err; 235 236 TAILQ_INSERT_TAIL(open_sessions, s, link); 237 *sess = s; 238 return TEE_SUCCESS; 239 240 err: 241 free(s); 242 return res; 243 } 244 245 static TEE_Result sp_init_set_registers(struct sp_ctx *ctx) 246 { 247 struct thread_ctx_regs *sp_regs = &ctx->sp_regs; 248 249 memset(sp_regs, 0, sizeof(*sp_regs)); 250 sp_regs->sp = ctx->uctx.stack_ptr; 251 sp_regs->pc = ctx->uctx.entry_func; 252 253 return TEE_SUCCESS; 254 } 255 256 TEE_Result sp_map_shared(struct sp_session *s, 257 struct sp_mem_receiver *receiver, 258 struct sp_mem *smem, 259 uint64_t *va) 260 { 261 TEE_Result res = TEE_SUCCESS; 262 struct sp_ctx *ctx = NULL; 263 uint32_t perm = TEE_MATTR_UR; 264 struct sp_mem_map_region *reg = NULL; 265 266 ctx = to_sp_ctx(s->ts_sess.ctx); 267 268 /* Get the permission */ 269 if (receiver->perm.perm & FFA_MEM_ACC_EXE) 270 perm |= TEE_MATTR_UX; 271 272 if (receiver->perm.perm & FFA_MEM_ACC_RW) { 273 if (receiver->perm.perm & FFA_MEM_ACC_EXE) 274 return TEE_ERROR_ACCESS_CONFLICT; 275 276 perm |= TEE_MATTR_UW; 277 } 278 /* 279 * Currently we don't support passing a va. We can't guarantee that the 280 * full region will be mapped in a contiguous region. A smem->region can 281 * have multiple mobj for one share. Currently there doesn't seem to be 282 * an option to guarantee that these will be mapped in a contiguous va 283 * space. 284 */ 285 if (*va) 286 return TEE_ERROR_NOT_SUPPORTED; 287 288 SLIST_FOREACH(reg, &smem->regions, link) { 289 res = vm_map(&ctx->uctx, va, reg->page_count * SMALL_PAGE_SIZE, 290 perm, 0, reg->mobj, reg->page_offset); 291 292 if (res != TEE_SUCCESS) { 293 EMSG("Failed to map memory region %#"PRIx32, res); 294 return res; 295 } 296 } 297 return TEE_SUCCESS; 298 } 299 300 TEE_Result sp_unmap_ffa_regions(struct sp_session *s, struct sp_mem *smem) 301 { 302 TEE_Result res = TEE_SUCCESS; 303 vaddr_t vaddr = 0; 304 size_t len = 0; 305 struct sp_ctx *ctx = to_sp_ctx(s->ts_sess.ctx); 306 struct sp_mem_map_region *reg = NULL; 307 308 SLIST_FOREACH(reg, &smem->regions, link) { 309 vaddr = (vaddr_t)sp_mem_get_va(&ctx->uctx, reg->page_offset, 310 reg->mobj); 311 len = reg->page_count * SMALL_PAGE_SIZE; 312 313 res = vm_unmap(&ctx->uctx, vaddr, len); 314 if (res != TEE_SUCCESS) 315 return res; 316 } 317 318 return TEE_SUCCESS; 319 } 320 321 static TEE_Result sp_open_session(struct sp_session **sess, 322 struct sp_sessions_head *open_sessions, 323 const TEE_UUID *uuid) 324 { 325 TEE_Result res = TEE_SUCCESS; 326 struct sp_session *s = NULL; 327 struct sp_ctx *ctx = NULL; 328 329 if (!find_secure_partition(uuid)) 330 return TEE_ERROR_ITEM_NOT_FOUND; 331 332 res = sp_create_session(open_sessions, uuid, &s); 333 if (res != TEE_SUCCESS) { 334 DMSG("sp_create_session failed %#"PRIx32, res); 335 return res; 336 } 337 338 ctx = to_sp_ctx(s->ts_sess.ctx); 339 assert(ctx); 340 if (!ctx) 341 return TEE_ERROR_TARGET_DEAD; 342 *sess = s; 343 344 ts_push_current_session(&s->ts_sess); 345 /* Load the SP using ldelf. */ 346 ldelf_load_ldelf(&ctx->uctx); 347 res = ldelf_init_with_ldelf(&s->ts_sess, &ctx->uctx); 348 349 if (res != TEE_SUCCESS) { 350 EMSG("Failed. loading SP using ldelf %#"PRIx32, res); 351 ts_pop_current_session(); 352 return TEE_ERROR_TARGET_DEAD; 353 } 354 355 /* Make the SP ready for its first run */ 356 s->state = sp_idle; 357 s->caller_id = 0; 358 sp_init_set_registers(ctx); 359 ts_pop_current_session(); 360 361 return TEE_SUCCESS; 362 } 363 364 static TEE_Result sp_dt_get_u64(const void *fdt, int node, const char *property, 365 uint64_t *value) 366 { 367 const fdt64_t *p = NULL; 368 int len = 0; 369 370 p = fdt_getprop(fdt, node, property, &len); 371 if (!p || len != sizeof(*p)) 372 return TEE_ERROR_ITEM_NOT_FOUND; 373 374 *value = fdt64_ld(p); 375 376 return TEE_SUCCESS; 377 } 378 379 static TEE_Result sp_dt_get_u32(const void *fdt, int node, const char *property, 380 uint32_t *value) 381 { 382 const fdt32_t *p = NULL; 383 int len = 0; 384 385 p = fdt_getprop(fdt, node, property, &len); 386 if (!p || len != sizeof(*p)) 387 return TEE_ERROR_ITEM_NOT_FOUND; 388 389 *value = fdt32_to_cpu(*p); 390 391 return TEE_SUCCESS; 392 } 393 394 static TEE_Result sp_dt_get_uuid(const void *fdt, int node, 395 const char *property, TEE_UUID *uuid) 396 { 397 uint32_t uuid_array[4] = { 0 }; 398 const fdt32_t *p = NULL; 399 int len = 0; 400 int i = 0; 401 402 p = fdt_getprop(fdt, node, property, &len); 403 if (!p || len != sizeof(TEE_UUID)) 404 return TEE_ERROR_ITEM_NOT_FOUND; 405 406 for (i = 0; i < 4; i++) 407 uuid_array[i] = fdt32_to_cpu(p[i]); 408 409 tee_uuid_from_octets(uuid, (uint8_t *)uuid_array); 410 411 return TEE_SUCCESS; 412 } 413 414 static TEE_Result check_fdt(const void * const fdt, const TEE_UUID *uuid) 415 { 416 const struct fdt_property *description = NULL; 417 int description_name_len = 0; 418 TEE_UUID fdt_uuid = { }; 419 420 if (fdt_node_check_compatible(fdt, 0, "arm,ffa-manifest-1.0")) { 421 EMSG("Failed loading SP, manifest not found"); 422 return TEE_ERROR_BAD_PARAMETERS; 423 } 424 425 description = fdt_get_property(fdt, 0, "description", 426 &description_name_len); 427 if (description) 428 DMSG("Loading SP: %s", description->data); 429 430 if (sp_dt_get_uuid(fdt, 0, "uuid", &fdt_uuid)) { 431 EMSG("Missing or invalid UUID in SP manifest"); 432 return TEE_ERROR_BAD_FORMAT; 433 } 434 435 if (memcmp(uuid, &fdt_uuid, sizeof(fdt_uuid))) { 436 EMSG("Failed loading SP, UUID mismatch"); 437 return TEE_ERROR_BAD_FORMAT; 438 } 439 440 return TEE_SUCCESS; 441 } 442 443 /* 444 * sp_init_info allocates and maps the sp_ffa_init_info for the SP. It will copy 445 * the fdt into the allocated page(s) and return a pointer to the new location 446 * of the fdt. This pointer can be used to update data inside the fdt. 447 */ 448 static TEE_Result sp_init_info(struct sp_ctx *ctx, struct thread_smc_args *args, 449 const void * const input_fdt, vaddr_t *va, 450 size_t *num_pgs, void **fdt_copy) 451 { 452 struct sp_ffa_init_info *info = NULL; 453 int nvp_count = 1; 454 size_t total_size = ROUNDUP(CFG_SP_INIT_INFO_MAX_SIZE, SMALL_PAGE_SIZE); 455 size_t nvp_size = sizeof(struct sp_name_value_pair) * nvp_count; 456 size_t info_size = sizeof(*info) + nvp_size; 457 size_t fdt_size = total_size - info_size; 458 TEE_Result res = TEE_SUCCESS; 459 uint32_t perm = TEE_MATTR_URW | TEE_MATTR_PRW; 460 struct fobj *f = NULL; 461 struct mobj *m = NULL; 462 static const char fdt_name[16] = "TYPE_DT\0\0\0\0\0\0\0\0"; 463 464 *num_pgs = total_size / SMALL_PAGE_SIZE; 465 466 f = fobj_sec_mem_alloc(*num_pgs); 467 m = mobj_with_fobj_alloc(f, NULL, TEE_MATTR_MEM_TYPE_TAGGED); 468 469 fobj_put(f); 470 if (!m) 471 return TEE_ERROR_OUT_OF_MEMORY; 472 473 res = vm_map(&ctx->uctx, va, total_size, perm, 0, m, 0); 474 mobj_put(m); 475 if (res) 476 return res; 477 478 info = (struct sp_ffa_init_info *)*va; 479 480 /* magic field is 4 bytes, we don't copy /0 byte. */ 481 memcpy(&info->magic, "FF-A", 4); 482 info->count = nvp_count; 483 args->a0 = (vaddr_t)info; 484 485 /* 486 * Store the fdt after the boot_info and store the pointer in the 487 * first element. 488 */ 489 COMPILE_TIME_ASSERT(sizeof(info->nvp[0].name) == sizeof(fdt_name)); 490 memcpy(info->nvp[0].name, fdt_name, sizeof(fdt_name)); 491 info->nvp[0].value = *va + info_size; 492 info->nvp[0].size = fdt_size; 493 *fdt_copy = (void *)info->nvp[0].value; 494 495 if (fdt_open_into(input_fdt, *fdt_copy, fdt_size)) 496 return TEE_ERROR_GENERIC; 497 498 return TEE_SUCCESS; 499 } 500 501 static TEE_Result handle_fdt_dev_regions(struct sp_ctx *ctx, void *fdt) 502 { 503 int node = 0; 504 int subnode = 0; 505 TEE_Result res = TEE_SUCCESS; 506 const char *dt_device_match_table = { 507 "arm,ffa-manifest-device-regions", 508 }; 509 510 /* 511 * Device regions are optional in the SP manifest, it's not an error if 512 * we don't find any 513 */ 514 node = fdt_node_offset_by_compatible(fdt, 0, dt_device_match_table); 515 if (node < 0) 516 return TEE_SUCCESS; 517 518 fdt_for_each_subnode(subnode, fdt, node) { 519 uint64_t base_addr = 0; 520 uint32_t pages_cnt = 0; 521 uint32_t attributes = 0; 522 struct mobj *m = NULL; 523 bool is_secure = true; 524 uint32_t perm = 0; 525 vaddr_t va = 0; 526 unsigned int idx = 0; 527 528 /* 529 * Physical base address of a device MMIO region. 530 * Currently only physically contiguous region is supported. 531 */ 532 if (sp_dt_get_u64(fdt, subnode, "base-address", &base_addr)) { 533 EMSG("Mandatory field is missing: base-address"); 534 return TEE_ERROR_BAD_FORMAT; 535 } 536 537 /* Total size of MMIO region as count of 4K pages */ 538 if (sp_dt_get_u32(fdt, subnode, "pages-count", &pages_cnt)) { 539 EMSG("Mandatory field is missing: pages-count"); 540 return TEE_ERROR_BAD_FORMAT; 541 } 542 543 /* Data access, instruction access and security attributes */ 544 if (sp_dt_get_u32(fdt, subnode, "attributes", &attributes)) { 545 EMSG("Mandatory field is missing: attributes"); 546 return TEE_ERROR_BAD_FORMAT; 547 } 548 549 /* Check instruction and data access permissions */ 550 switch (attributes & SP_MANIFEST_ATTR_RWX) { 551 case SP_MANIFEST_ATTR_RO: 552 perm = TEE_MATTR_UR; 553 break; 554 case SP_MANIFEST_ATTR_RW: 555 perm = TEE_MATTR_URW; 556 break; 557 default: 558 EMSG("Invalid memory access permissions"); 559 return TEE_ERROR_BAD_FORMAT; 560 } 561 562 /* 563 * The SP is a secure endpoint, security attribute can be 564 * secure or non-secure 565 */ 566 if (attributes & SP_MANIFEST_ATTR_NSEC) 567 is_secure = false; 568 569 /* Memory attributes must be Device-nGnRnE */ 570 m = sp_mem_new_mobj(pages_cnt, TEE_MATTR_MEM_TYPE_STRONGLY_O, 571 is_secure); 572 if (!m) 573 return TEE_ERROR_OUT_OF_MEMORY; 574 575 res = sp_mem_add_pages(m, &idx, (paddr_t)base_addr, pages_cnt); 576 if (res) { 577 mobj_put(m); 578 return res; 579 } 580 581 res = vm_map(&ctx->uctx, &va, pages_cnt * SMALL_PAGE_SIZE, 582 perm, 0, m, 0); 583 mobj_put(m); 584 if (res) 585 return res; 586 587 /* 588 * Overwrite the device region's PA in the fdt with the VA. This 589 * fdt will be passed to the SP. 590 */ 591 res = fdt_setprop_u64(fdt, subnode, "base-address", va); 592 593 /* 594 * Unmap the region if the overwrite failed since the SP won't 595 * be able to access it without knowing the VA. 596 */ 597 if (res) { 598 vm_unmap(&ctx->uctx, va, pages_cnt * SMALL_PAGE_SIZE); 599 return res; 600 } 601 } 602 603 return TEE_SUCCESS; 604 } 605 606 static TEE_Result handle_fdt_mem_regions(struct sp_ctx *ctx, void *fdt) 607 { 608 int node = 0; 609 int subnode = 0; 610 tee_mm_entry_t *mm = NULL; 611 TEE_Result res = TEE_SUCCESS; 612 613 /* 614 * Memory regions are optional in the SP manifest, it's not an error if 615 * we don't find any. 616 */ 617 node = fdt_node_offset_by_compatible(fdt, 0, 618 "arm,ffa-manifest-memory-regions"); 619 if (node < 0) 620 return TEE_SUCCESS; 621 622 fdt_for_each_subnode(subnode, fdt, node) { 623 bool alloc_needed = false; 624 uint32_t attributes = 0; 625 uint64_t base_addr = 0; 626 uint32_t pages_cnt = 0; 627 bool is_secure = true; 628 struct mobj *m = NULL; 629 unsigned int idx = 0; 630 uint32_t perm = 0; 631 size_t size = 0; 632 vaddr_t va = 0; 633 634 mm = NULL; 635 636 /* 637 * Base address of a memory region. 638 * If not present, we have to allocate the specified memory. 639 * If present, this field could specify a PA or VA. Currently 640 * only a PA is supported. 641 */ 642 if (sp_dt_get_u64(fdt, subnode, "base-address", &base_addr)) 643 alloc_needed = true; 644 645 /* Size of memory region as count of 4K pages */ 646 if (sp_dt_get_u32(fdt, subnode, "pages-count", &pages_cnt)) { 647 EMSG("Mandatory field is missing: pages-count"); 648 return TEE_ERROR_BAD_FORMAT; 649 } 650 651 if (MUL_OVERFLOW(pages_cnt, SMALL_PAGE_SIZE, &size)) 652 return TEE_ERROR_OVERFLOW; 653 654 /* 655 * Memory region attributes: 656 * - Instruction/data access permissions 657 * - Cacheability/shareability attributes 658 * - Security attributes 659 * 660 * Cacheability/shareability attributes can be ignored for now. 661 * OP-TEE only supports a single type for normal cached memory 662 * and currently there is no use case that would require to 663 * change this. 664 */ 665 if (sp_dt_get_u32(fdt, subnode, "attributes", &attributes)) { 666 EMSG("Mandatory field is missing: attributes"); 667 return TEE_ERROR_BAD_FORMAT; 668 } 669 670 /* Check instruction and data access permissions */ 671 switch (attributes & SP_MANIFEST_ATTR_RWX) { 672 case SP_MANIFEST_ATTR_RO: 673 perm = TEE_MATTR_UR; 674 break; 675 case SP_MANIFEST_ATTR_RW: 676 perm = TEE_MATTR_URW; 677 break; 678 case SP_MANIFEST_ATTR_RX: 679 perm = TEE_MATTR_URX; 680 break; 681 default: 682 EMSG("Invalid memory access permissions"); 683 return TEE_ERROR_BAD_FORMAT; 684 } 685 686 /* 687 * The SP is a secure endpoint, security attribute can be 688 * secure or non-secure. 689 * The SPMC cannot allocate non-secure memory, i.e. if the base 690 * address is missing this attribute must be secure. 691 */ 692 if (attributes & SP_MANIFEST_ATTR_NSEC) { 693 if (alloc_needed) { 694 EMSG("Invalid memory security attribute"); 695 return TEE_ERROR_BAD_FORMAT; 696 } 697 is_secure = false; 698 } 699 700 if (alloc_needed) { 701 /* Base address is missing, we have to allocate */ 702 mm = tee_mm_alloc(&tee_mm_sec_ddr, size); 703 if (!mm) 704 return TEE_ERROR_OUT_OF_MEMORY; 705 706 base_addr = tee_mm_get_smem(mm); 707 } 708 709 m = sp_mem_new_mobj(pages_cnt, TEE_MATTR_MEM_TYPE_CACHED, 710 is_secure); 711 if (!m) { 712 res = TEE_ERROR_OUT_OF_MEMORY; 713 goto err_mm_free; 714 } 715 716 res = sp_mem_add_pages(m, &idx, base_addr, pages_cnt); 717 if (res) { 718 mobj_put(m); 719 goto err_mm_free; 720 } 721 722 res = vm_map(&ctx->uctx, &va, size, perm, 0, m, 0); 723 mobj_put(m); 724 if (res) 725 goto err_mm_free; 726 727 /* 728 * Overwrite the memory region's base address in the fdt with 729 * the VA. This fdt will be passed to the SP. 730 * If the base-address field was not present in the original 731 * fdt, this function will create it. This doesn't cause issues 732 * since the necessary extra space has been allocated when 733 * opening the fdt. 734 */ 735 res = fdt_setprop_u64(fdt, subnode, "base-address", va); 736 737 /* 738 * Unmap the region if the overwrite failed since the SP won't 739 * be able to access it without knowing the VA. 740 */ 741 if (res) { 742 vm_unmap(&ctx->uctx, va, size); 743 goto err_mm_free; 744 } 745 } 746 747 return TEE_SUCCESS; 748 749 err_mm_free: 750 tee_mm_free(mm); 751 return res; 752 } 753 754 static TEE_Result handle_tpm_event_log(struct sp_ctx *ctx, void *fdt) 755 { 756 uint32_t perm = TEE_MATTR_URW | TEE_MATTR_PRW; 757 uint32_t dummy_size __maybe_unused = 0; 758 TEE_Result res = TEE_SUCCESS; 759 size_t page_count = 0; 760 struct fobj *f = NULL; 761 struct mobj *m = NULL; 762 vaddr_t log_addr = 0; 763 size_t log_size = 0; 764 int node = 0; 765 766 node = fdt_node_offset_by_compatible(fdt, 0, "arm,tpm_event_log"); 767 if (node < 0) 768 return TEE_SUCCESS; 769 770 /* Checking the existence and size of the event log properties */ 771 if (sp_dt_get_u64(fdt, node, "tpm_event_log_addr", &log_addr)) { 772 EMSG("tpm_event_log_addr not found or has invalid size"); 773 return TEE_ERROR_BAD_FORMAT; 774 } 775 776 if (sp_dt_get_u32(fdt, node, "tpm_event_log_size", &dummy_size)) { 777 EMSG("tpm_event_log_size not found or has invalid size"); 778 return TEE_ERROR_BAD_FORMAT; 779 } 780 781 /* Validating event log */ 782 res = tpm_get_event_log_size(&log_size); 783 if (res) 784 return res; 785 786 if (!log_size) { 787 EMSG("Empty TPM event log was provided"); 788 return TEE_ERROR_ITEM_NOT_FOUND; 789 } 790 791 /* Allocating memory area for the event log to share with the SP */ 792 page_count = ROUNDUP_DIV(log_size, SMALL_PAGE_SIZE); 793 794 f = fobj_sec_mem_alloc(page_count); 795 m = mobj_with_fobj_alloc(f, NULL, TEE_MATTR_MEM_TYPE_TAGGED); 796 fobj_put(f); 797 if (!m) 798 return TEE_ERROR_OUT_OF_MEMORY; 799 800 res = vm_map(&ctx->uctx, &log_addr, log_size, perm, 0, m, 0); 801 mobj_put(m); 802 if (res) 803 return res; 804 805 /* Copy event log */ 806 res = tpm_get_event_log((void *)log_addr, &log_size); 807 if (res) 808 goto err_unmap; 809 810 /* Setting event log details in the manifest */ 811 res = fdt_setprop_u64(fdt, node, "tpm_event_log_addr", log_addr); 812 if (res) 813 goto err_unmap; 814 815 res = fdt_setprop_u32(fdt, node, "tpm_event_log_size", log_size); 816 if (res) 817 goto err_unmap; 818 819 return TEE_SUCCESS; 820 821 err_unmap: 822 vm_unmap(&ctx->uctx, log_addr, log_size); 823 824 return res; 825 } 826 827 static TEE_Result sp_init_uuid(const TEE_UUID *uuid, const void * const fdt) 828 { 829 TEE_Result res = TEE_SUCCESS; 830 struct sp_session *sess = NULL; 831 struct thread_smc_args args = { }; 832 vaddr_t va = 0; 833 size_t num_pgs = 0; 834 struct sp_ctx *ctx = NULL; 835 void *fdt_copy = NULL; 836 837 res = sp_open_session(&sess, 838 &open_sp_sessions, 839 uuid); 840 if (res) 841 return res; 842 843 res = check_fdt(fdt, uuid); 844 if (res) 845 return res; 846 847 ctx = to_sp_ctx(sess->ts_sess.ctx); 848 ts_push_current_session(&sess->ts_sess); 849 850 res = sp_init_info(ctx, &args, fdt, &va, &num_pgs, &fdt_copy); 851 if (res) 852 goto out; 853 854 res = handle_fdt_dev_regions(ctx, fdt_copy); 855 if (res) 856 goto out; 857 858 res = handle_fdt_mem_regions(ctx, fdt_copy); 859 if (res) 860 goto out; 861 862 if (IS_ENABLED(CFG_CORE_TPM_EVENT_LOG)) { 863 res = handle_tpm_event_log(ctx, fdt_copy); 864 if (res) 865 goto out; 866 } 867 868 ts_pop_current_session(); 869 870 if (sp_enter(&args, sess)) { 871 vm_unmap(&ctx->uctx, va, num_pgs); 872 return FFA_ABORTED; 873 } 874 875 spmc_sp_msg_handler(&args, sess); 876 877 ts_push_current_session(&sess->ts_sess); 878 out: 879 /* Free the boot info page from the SP memory */ 880 vm_unmap(&ctx->uctx, va, num_pgs); 881 ts_pop_current_session(); 882 883 return res; 884 } 885 886 TEE_Result sp_enter(struct thread_smc_args *args, struct sp_session *sp) 887 { 888 TEE_Result res = FFA_OK; 889 struct sp_ctx *ctx = to_sp_ctx(sp->ts_sess.ctx); 890 891 ctx->sp_regs.x[0] = args->a0; 892 ctx->sp_regs.x[1] = args->a1; 893 ctx->sp_regs.x[2] = args->a2; 894 ctx->sp_regs.x[3] = args->a3; 895 ctx->sp_regs.x[4] = args->a4; 896 ctx->sp_regs.x[5] = args->a5; 897 ctx->sp_regs.x[6] = args->a6; 898 ctx->sp_regs.x[7] = args->a7; 899 900 res = sp->ts_sess.ctx->ops->enter_invoke_cmd(&sp->ts_sess, 0); 901 902 args->a0 = ctx->sp_regs.x[0]; 903 args->a1 = ctx->sp_regs.x[1]; 904 args->a2 = ctx->sp_regs.x[2]; 905 args->a3 = ctx->sp_regs.x[3]; 906 args->a4 = ctx->sp_regs.x[4]; 907 args->a5 = ctx->sp_regs.x[5]; 908 args->a6 = ctx->sp_regs.x[6]; 909 args->a7 = ctx->sp_regs.x[7]; 910 911 return res; 912 } 913 914 static TEE_Result sp_enter_invoke_cmd(struct ts_session *s, 915 uint32_t cmd __unused) 916 { 917 struct sp_ctx *ctx = to_sp_ctx(s->ctx); 918 TEE_Result res = TEE_SUCCESS; 919 uint32_t exceptions = 0; 920 uint64_t cpsr = 0; 921 struct sp_session *sp_s = to_sp_session(s); 922 struct ts_session *sess = NULL; 923 struct thread_ctx_regs *sp_regs = NULL; 924 uint32_t panicked = false; 925 uint32_t panic_code = 0; 926 927 bm_timestamp(); 928 929 sp_regs = &ctx->sp_regs; 930 ts_push_current_session(s); 931 932 cpsr = sp_regs->cpsr; 933 sp_regs->cpsr = read_daif() & (SPSR_64_DAIF_MASK << SPSR_64_DAIF_SHIFT); 934 935 exceptions = thread_mask_exceptions(THREAD_EXCP_ALL); 936 __thread_enter_user_mode(sp_regs, &panicked, &panic_code); 937 sp_regs->cpsr = cpsr; 938 thread_unmask_exceptions(exceptions); 939 940 thread_user_clear_vfp(&ctx->uctx); 941 942 if (panicked) { 943 DMSG("SP panicked with code %#"PRIx32, panic_code); 944 abort_print_current_ts(); 945 946 sess = ts_pop_current_session(); 947 cpu_spin_lock(&sp_s->spinlock); 948 sp_s->state = sp_dead; 949 cpu_spin_unlock(&sp_s->spinlock); 950 951 return TEE_ERROR_TARGET_DEAD; 952 } 953 954 sess = ts_pop_current_session(); 955 assert(sess == s); 956 957 bm_timestamp(); 958 959 return res; 960 } 961 962 /* We currently don't support 32 bits */ 963 #ifdef ARM64 964 static void sp_svc_store_registers(struct thread_svc_regs *regs, 965 struct thread_ctx_regs *sp_regs) 966 { 967 COMPILE_TIME_ASSERT(sizeof(sp_regs->x[0]) == sizeof(regs->x0)); 968 memcpy(sp_regs->x, ®s->x0, 31 * sizeof(regs->x0)); 969 sp_regs->pc = regs->elr; 970 sp_regs->sp = regs->sp_el0; 971 } 972 #endif 973 974 static bool sp_handle_svc(struct thread_svc_regs *regs) 975 { 976 struct ts_session *ts = ts_get_current_session(); 977 struct sp_ctx *uctx = to_sp_ctx(ts->ctx); 978 struct sp_session *s = uctx->open_session; 979 980 assert(s); 981 982 sp_svc_store_registers(regs, &uctx->sp_regs); 983 984 regs->x0 = 0; 985 regs->x1 = 0; /* panic */ 986 regs->x2 = 0; /* panic code */ 987 988 /* 989 * All the registers of the SP are saved in the SP session by the SVC 990 * handler. 991 * We always return to S-El1 after handling the SVC. We will continue 992 * in sp_enter_invoke_cmd() (return from __thread_enter_user_mode). 993 * The sp_enter() function copies the FF-A parameters (a0-a7) from the 994 * saved registers to the thread_smc_args. The thread_smc_args object is 995 * afterward used by the spmc_sp_msg_handler() to handle the 996 * FF-A message send by the SP. 997 */ 998 return false; 999 } 1000 1001 static void sp_dump_state(struct ts_ctx *ctx) 1002 { 1003 struct sp_ctx *utc = to_sp_ctx(ctx); 1004 1005 if (utc->uctx.dump_entry_func) { 1006 TEE_Result res = ldelf_dump_state(&utc->uctx); 1007 1008 if (!res || res == TEE_ERROR_TARGET_DEAD) 1009 return; 1010 } 1011 1012 user_mode_ctx_print_mappings(&utc->uctx); 1013 } 1014 1015 /* 1016 * Note: this variable is weak just to ease breaking its dependency chain 1017 * when added to the unpaged area. 1018 */ 1019 const struct ts_ops sp_ops __weak __relrodata_unpaged("sp_ops") = { 1020 .enter_invoke_cmd = sp_enter_invoke_cmd, 1021 .handle_svc = sp_handle_svc, 1022 .dump_state = sp_dump_state, 1023 }; 1024 1025 static TEE_Result process_sp_pkg(uint64_t sp_pkg_pa, TEE_UUID *sp_uuid) 1026 { 1027 enum teecore_memtypes mtype = MEM_AREA_RAM_SEC; 1028 struct sp_pkg_header *sp_pkg_hdr = NULL; 1029 TEE_Result res = TEE_SUCCESS; 1030 tee_mm_entry_t *mm = NULL; 1031 struct fip_sp *sp = NULL; 1032 uint64_t sp_fdt_end = 0; 1033 size_t sp_pkg_size = 0; 1034 vaddr_t sp_pkg_va = 0; 1035 size_t num_pages = 0; 1036 1037 /* Map only the first page of the SP package to parse the header */ 1038 if (!tee_pbuf_is_sec(sp_pkg_pa, SMALL_PAGE_SIZE)) 1039 return TEE_ERROR_GENERIC; 1040 1041 mm = tee_mm_alloc(&tee_mm_sec_ddr, SMALL_PAGE_SIZE); 1042 if (!mm) 1043 return TEE_ERROR_OUT_OF_MEMORY; 1044 1045 sp_pkg_va = tee_mm_get_smem(mm); 1046 1047 if (core_mmu_map_contiguous_pages(sp_pkg_va, sp_pkg_pa, 1, mtype)) { 1048 res = TEE_ERROR_GENERIC; 1049 goto err; 1050 } 1051 1052 sp_pkg_hdr = (struct sp_pkg_header *)sp_pkg_va; 1053 1054 if (sp_pkg_hdr->magic != SP_PKG_HEADER_MAGIC) { 1055 EMSG("Invalid SP package magic"); 1056 res = TEE_ERROR_BAD_FORMAT; 1057 goto err_unmap; 1058 } 1059 1060 if (sp_pkg_hdr->version != SP_PKG_HEADER_VERSION) { 1061 EMSG("Invalid SP header version"); 1062 res = TEE_ERROR_BAD_FORMAT; 1063 goto err_unmap; 1064 } 1065 1066 if (ADD_OVERFLOW(sp_pkg_hdr->img_offset, sp_pkg_hdr->img_size, 1067 &sp_pkg_size)) { 1068 EMSG("Invalid SP package size"); 1069 res = TEE_ERROR_BAD_FORMAT; 1070 goto err_unmap; 1071 } 1072 1073 if (ADD_OVERFLOW(sp_pkg_hdr->pm_offset, sp_pkg_hdr->pm_size, 1074 &sp_fdt_end) || sp_fdt_end > sp_pkg_hdr->img_offset) { 1075 EMSG("Invalid SP manifest size"); 1076 res = TEE_ERROR_BAD_FORMAT; 1077 goto err_unmap; 1078 } 1079 1080 core_mmu_unmap_pages(sp_pkg_va, 1); 1081 tee_mm_free(mm); 1082 1083 /* Map the whole package */ 1084 if (!tee_pbuf_is_sec(sp_pkg_pa, sp_pkg_size)) 1085 return TEE_ERROR_GENERIC; 1086 1087 num_pages = ROUNDUP_DIV(sp_pkg_size, SMALL_PAGE_SIZE); 1088 1089 mm = tee_mm_alloc(&tee_mm_sec_ddr, sp_pkg_size); 1090 if (!mm) 1091 return TEE_ERROR_OUT_OF_MEMORY; 1092 1093 sp_pkg_va = tee_mm_get_smem(mm); 1094 1095 if (core_mmu_map_contiguous_pages(sp_pkg_va, sp_pkg_pa, num_pages, 1096 mtype)) { 1097 res = TEE_ERROR_GENERIC; 1098 goto err; 1099 } 1100 1101 sp_pkg_hdr = (struct sp_pkg_header *)tee_mm_get_smem(mm); 1102 1103 sp = calloc(1, sizeof(struct fip_sp)); 1104 if (!sp) { 1105 res = TEE_ERROR_OUT_OF_MEMORY; 1106 goto err_unmap; 1107 } 1108 1109 memcpy(&sp->sp_img.image.uuid, sp_uuid, sizeof(*sp_uuid)); 1110 sp->sp_img.image.ts = (uint8_t *)(sp_pkg_va + sp_pkg_hdr->img_offset); 1111 sp->sp_img.image.size = sp_pkg_hdr->img_size; 1112 sp->sp_img.image.flags = 0; 1113 sp->sp_img.fdt = (uint8_t *)(sp_pkg_va + sp_pkg_hdr->pm_offset); 1114 sp->mm = mm; 1115 1116 STAILQ_INSERT_TAIL(&fip_sp_list, sp, link); 1117 1118 return TEE_SUCCESS; 1119 1120 err_unmap: 1121 core_mmu_unmap_pages(tee_mm_get_smem(mm), 1122 ROUNDUP_DIV(tee_mm_get_bytes(mm), 1123 SMALL_PAGE_SIZE)); 1124 err: 1125 tee_mm_free(mm); 1126 1127 return res; 1128 } 1129 1130 static TEE_Result fip_sp_map_all(void) 1131 { 1132 TEE_Result res = TEE_SUCCESS; 1133 uint64_t sp_pkg_addr = 0; 1134 const void *fdt = NULL; 1135 TEE_UUID sp_uuid = { }; 1136 int sp_pkgs_node = 0; 1137 int subnode = 0; 1138 int root = 0; 1139 1140 fdt = get_external_dt(); 1141 if (!fdt) { 1142 EMSG("No SPMC manifest found"); 1143 return TEE_ERROR_GENERIC; 1144 } 1145 1146 root = fdt_path_offset(fdt, "/"); 1147 if (root < 0) 1148 return TEE_ERROR_BAD_FORMAT; 1149 1150 if (fdt_node_check_compatible(fdt, root, "arm,ffa-core-manifest-1.0")) 1151 return TEE_ERROR_BAD_FORMAT; 1152 1153 /* SP packages are optional, it's not an error if we don't find any */ 1154 sp_pkgs_node = fdt_node_offset_by_compatible(fdt, root, "arm,sp_pkg"); 1155 if (sp_pkgs_node < 0) 1156 return TEE_SUCCESS; 1157 1158 fdt_for_each_subnode(subnode, fdt, sp_pkgs_node) { 1159 res = sp_dt_get_u64(fdt, subnode, "load-address", &sp_pkg_addr); 1160 if (res) { 1161 EMSG("Invalid FIP SP load address"); 1162 return res; 1163 } 1164 1165 res = sp_dt_get_uuid(fdt, subnode, "uuid", &sp_uuid); 1166 if (res) { 1167 EMSG("Invalid FIP SP uuid"); 1168 return res; 1169 } 1170 1171 res = process_sp_pkg(sp_pkg_addr, &sp_uuid); 1172 if (res) { 1173 EMSG("Invalid FIP SP package"); 1174 return res; 1175 } 1176 } 1177 1178 return TEE_SUCCESS; 1179 } 1180 1181 static void fip_sp_unmap_all(void) 1182 { 1183 while (!STAILQ_EMPTY(&fip_sp_list)) { 1184 struct fip_sp *sp = STAILQ_FIRST(&fip_sp_list); 1185 1186 STAILQ_REMOVE_HEAD(&fip_sp_list, link); 1187 core_mmu_unmap_pages(tee_mm_get_smem(sp->mm), 1188 ROUNDUP_DIV(tee_mm_get_bytes(sp->mm), 1189 SMALL_PAGE_SIZE)); 1190 tee_mm_free(sp->mm); 1191 free(sp); 1192 } 1193 } 1194 1195 static TEE_Result sp_init_all(void) 1196 { 1197 TEE_Result res = TEE_SUCCESS; 1198 const struct sp_image *sp = NULL; 1199 const struct fip_sp *fip_sp = NULL; 1200 char __maybe_unused msg[60] = { '\0', }; 1201 1202 for_each_secure_partition(sp) { 1203 if (sp->image.uncompressed_size) 1204 snprintf(msg, sizeof(msg), 1205 " (compressed, uncompressed %u)", 1206 sp->image.uncompressed_size); 1207 else 1208 msg[0] = '\0'; 1209 DMSG("SP %pUl size %u%s", (void *)&sp->image.uuid, 1210 sp->image.size, msg); 1211 1212 res = sp_init_uuid(&sp->image.uuid, sp->fdt); 1213 1214 if (res != TEE_SUCCESS) { 1215 EMSG("Failed initializing SP(%pUl) err:%#"PRIx32, 1216 &sp->image.uuid, res); 1217 if (!IS_ENABLED(CFG_SP_SKIP_FAILED)) 1218 panic(); 1219 } 1220 } 1221 1222 res = fip_sp_map_all(); 1223 if (res) 1224 panic("Failed mapping FIP SPs"); 1225 1226 for_each_fip_sp(fip_sp) { 1227 sp = &fip_sp->sp_img; 1228 1229 DMSG("SP %pUl size %u", (void *)&sp->image.uuid, 1230 sp->image.size); 1231 1232 res = sp_init_uuid(&sp->image.uuid, sp->fdt); 1233 1234 if (res != TEE_SUCCESS) { 1235 EMSG("Failed initializing SP(%pUl) err:%#"PRIx32, 1236 &sp->image.uuid, res); 1237 if (!IS_ENABLED(CFG_SP_SKIP_FAILED)) 1238 panic(); 1239 } 1240 } 1241 1242 /* 1243 * At this point all FIP SPs are loaded by ldelf so the original images 1244 * (loaded by BL2 earlier) can be unmapped 1245 */ 1246 fip_sp_unmap_all(); 1247 1248 return TEE_SUCCESS; 1249 } 1250 1251 boot_final(sp_init_all); 1252 1253 static TEE_Result secure_partition_open(const TEE_UUID *uuid, 1254 struct ts_store_handle **h) 1255 { 1256 return emb_ts_open(uuid, h, find_secure_partition); 1257 } 1258 1259 REGISTER_SP_STORE(2) = { 1260 .description = "SP store", 1261 .open = secure_partition_open, 1262 .get_size = emb_ts_get_size, 1263 .get_tag = emb_ts_get_tag, 1264 .read = emb_ts_read, 1265 .close = emb_ts_close, 1266 }; 1267