1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2019, Linaro Limited 4 */ 5 6 #include <assert.h> 7 #include <ctype.h> 8 #include <elf32.h> 9 #include <elf64.h> 10 #include <elf_common.h> 11 #include <ldelf.h> 12 #include <pta_system.h> 13 #include <stdio.h> 14 #include <stdlib.h> 15 #include <string_ext.h> 16 #include <string.h> 17 #include <tee_api_types.h> 18 #include <tee_internal_api_extensions.h> 19 #include <user_ta_header.h> 20 #include <utee_syscalls.h> 21 22 #include "sys.h" 23 #include "ta_elf.h" 24 #include "unwind.h" 25 26 static vaddr_t ta_stack; 27 static vaddr_t ta_stack_size; 28 29 struct ta_elf_queue main_elf_queue = TAILQ_HEAD_INITIALIZER(main_elf_queue); 30 31 static struct ta_elf *queue_elf_helper(const TEE_UUID *uuid) 32 { 33 struct ta_elf *elf = calloc(1, sizeof(*elf)); 34 35 if (!elf) 36 return NULL; 37 38 TAILQ_INIT(&elf->segs); 39 40 elf->uuid = *uuid; 41 TAILQ_INSERT_TAIL(&main_elf_queue, elf, link); 42 return elf; 43 } 44 45 static struct ta_elf *queue_elf(const TEE_UUID *uuid) 46 { 47 struct ta_elf *elf = ta_elf_find_elf(uuid); 48 49 if (elf) 50 return NULL; 51 52 elf = queue_elf_helper(uuid); 53 if (!elf) 54 err(TEE_ERROR_OUT_OF_MEMORY, "queue_elf_helper"); 55 56 return elf; 57 } 58 59 struct ta_elf *ta_elf_find_elf(const TEE_UUID *uuid) 60 { 61 struct ta_elf *elf = NULL; 62 63 TAILQ_FOREACH(elf, &main_elf_queue, link) 64 if (!memcmp(uuid, &elf->uuid, sizeof(*uuid))) 65 return elf; 66 67 return NULL; 68 } 69 70 static TEE_Result e32_parse_ehdr(struct ta_elf *elf, Elf32_Ehdr *ehdr) 71 { 72 if (ehdr->e_ident[EI_VERSION] != EV_CURRENT || 73 ehdr->e_ident[EI_CLASS] != ELFCLASS32 || 74 ehdr->e_ident[EI_DATA] != ELFDATA2LSB || 75 ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE || 76 ehdr->e_type != ET_DYN || ehdr->e_machine != EM_ARM || 77 (ehdr->e_flags & EF_ARM_ABIMASK) != EF_ARM_ABI_VERSION || 78 #ifndef CFG_WITH_VFP 79 (ehdr->e_flags & EF_ARM_ABI_FLOAT_HARD) || 80 #endif 81 ehdr->e_phentsize != sizeof(Elf32_Phdr) || 82 ehdr->e_shentsize != sizeof(Elf32_Shdr)) 83 return TEE_ERROR_BAD_FORMAT; 84 85 elf->is_32bit = true; 86 elf->e_entry = ehdr->e_entry; 87 elf->e_phoff = ehdr->e_phoff; 88 elf->e_shoff = ehdr->e_shoff; 89 elf->e_phnum = ehdr->e_phnum; 90 elf->e_shnum = ehdr->e_shnum; 91 elf->e_phentsize = ehdr->e_phentsize; 92 elf->e_shentsize = ehdr->e_shentsize; 93 94 return TEE_SUCCESS; 95 } 96 97 #ifdef ARM64 98 static TEE_Result e64_parse_ehdr(struct ta_elf *elf, Elf64_Ehdr *ehdr) 99 { 100 if (ehdr->e_ident[EI_VERSION] != EV_CURRENT || 101 ehdr->e_ident[EI_CLASS] != ELFCLASS64 || 102 ehdr->e_ident[EI_DATA] != ELFDATA2LSB || 103 ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE || 104 ehdr->e_type != ET_DYN || ehdr->e_machine != EM_AARCH64 || 105 ehdr->e_flags || ehdr->e_phentsize != sizeof(Elf64_Phdr) || 106 ehdr->e_shentsize != sizeof(Elf64_Shdr)) 107 return TEE_ERROR_BAD_FORMAT; 108 109 110 elf->is_32bit = false; 111 elf->e_entry = ehdr->e_entry; 112 elf->e_phoff = ehdr->e_phoff; 113 elf->e_shoff = ehdr->e_shoff; 114 elf->e_phnum = ehdr->e_phnum; 115 elf->e_shnum = ehdr->e_shnum; 116 elf->e_phentsize = ehdr->e_phentsize; 117 elf->e_shentsize = ehdr->e_shentsize; 118 119 return TEE_SUCCESS; 120 } 121 #else /*ARM64*/ 122 static TEE_Result e64_parse_ehdr(struct ta_elf *elf __unused, 123 Elf64_Ehdr *ehdr __unused) 124 { 125 return TEE_ERROR_NOT_SUPPORTED; 126 } 127 #endif /*ARM64*/ 128 129 static void read_dyn(struct ta_elf *elf, vaddr_t addr, 130 size_t idx, unsigned int *tag, size_t *val) 131 { 132 if (elf->is_32bit) { 133 Elf32_Dyn *dyn = (Elf32_Dyn *)(addr + elf->load_addr); 134 135 *tag = dyn[idx].d_tag; 136 *val = dyn[idx].d_un.d_val; 137 } else { 138 Elf64_Dyn *dyn = (Elf64_Dyn *)(addr + elf->load_addr); 139 140 *tag = dyn[idx].d_tag; 141 *val = dyn[idx].d_un.d_val; 142 } 143 } 144 145 static void save_hashtab_from_segment(struct ta_elf *elf, unsigned int type, 146 vaddr_t addr, size_t memsz) 147 { 148 size_t dyn_entsize = 0; 149 size_t num_dyns = 0; 150 size_t n = 0; 151 unsigned int tag = 0; 152 size_t val = 0; 153 154 if (type != PT_DYNAMIC) 155 return; 156 157 if (elf->is_32bit) 158 dyn_entsize = sizeof(Elf32_Dyn); 159 else 160 dyn_entsize = sizeof(Elf64_Dyn); 161 162 assert(!(memsz % dyn_entsize)); 163 num_dyns = memsz / dyn_entsize; 164 165 for (n = 0; n < num_dyns; n++) { 166 read_dyn(elf, addr, n, &tag, &val); 167 if (tag == DT_HASH) { 168 elf->hashtab = (void *)(val + elf->load_addr); 169 break; 170 } 171 } 172 } 173 174 static void save_hashtab(struct ta_elf *elf) 175 { 176 size_t n = 0; 177 178 if (elf->is_32bit) { 179 Elf32_Phdr *phdr = elf->phdr; 180 181 for (n = 0; n < elf->e_phnum; n++) 182 save_hashtab_from_segment(elf, phdr[n].p_type, 183 phdr[n].p_vaddr, 184 phdr[n].p_memsz); 185 } else { 186 Elf64_Phdr *phdr = elf->phdr; 187 188 for (n = 0; n < elf->e_phnum; n++) 189 save_hashtab_from_segment(elf, phdr[n].p_type, 190 phdr[n].p_vaddr, 191 phdr[n].p_memsz); 192 } 193 assert(elf->hashtab); 194 } 195 196 static void e32_save_symtab(struct ta_elf *elf, size_t tab_idx) 197 { 198 Elf32_Shdr *shdr = elf->shdr; 199 size_t str_idx = shdr[tab_idx].sh_link; 200 201 elf->dynsymtab = (void *)(shdr[tab_idx].sh_addr + elf->load_addr); 202 assert(!(shdr[tab_idx].sh_size % sizeof(Elf32_Sym))); 203 elf->num_dynsyms = shdr[tab_idx].sh_size / sizeof(Elf32_Sym); 204 205 elf->dynstr = (void *)(shdr[str_idx].sh_addr + elf->load_addr); 206 elf->dynstr_size = shdr[str_idx].sh_size; 207 } 208 209 static void e64_save_symtab(struct ta_elf *elf, size_t tab_idx) 210 { 211 Elf64_Shdr *shdr = elf->shdr; 212 size_t str_idx = shdr[tab_idx].sh_link; 213 214 elf->dynsymtab = (void *)(vaddr_t)(shdr[tab_idx].sh_addr + 215 elf->load_addr); 216 assert(!(shdr[tab_idx].sh_size % sizeof(Elf64_Sym))); 217 elf->num_dynsyms = shdr[tab_idx].sh_size / sizeof(Elf64_Sym); 218 219 elf->dynstr = (void *)(vaddr_t)(shdr[str_idx].sh_addr + elf->load_addr); 220 elf->dynstr_size = shdr[str_idx].sh_size; 221 } 222 223 static void save_symtab(struct ta_elf *elf) 224 { 225 size_t n = 0; 226 227 if (elf->is_32bit) { 228 Elf32_Shdr *shdr = elf->shdr; 229 230 for (n = 0; n < elf->e_shnum; n++) { 231 if (shdr[n].sh_type == SHT_DYNSYM) { 232 e32_save_symtab(elf, n); 233 break; 234 } 235 } 236 } else { 237 Elf64_Shdr *shdr = elf->shdr; 238 239 for (n = 0; n < elf->e_shnum; n++) { 240 if (shdr[n].sh_type == SHT_DYNSYM) { 241 e64_save_symtab(elf, n); 242 break; 243 } 244 } 245 246 } 247 248 save_hashtab(elf); 249 } 250 251 static void init_elf(struct ta_elf *elf) 252 { 253 TEE_Result res = TEE_SUCCESS; 254 vaddr_t va = 0; 255 uint32_t flags = PTA_SYSTEM_MAP_FLAG_SHAREABLE; 256 257 res = sys_open_ta_bin(&elf->uuid, &elf->handle); 258 if (res) 259 err(res, "sys_open_ta_bin(%pUl)", (void *)&elf->uuid); 260 261 /* 262 * Map it read-only executable when we're loading a library where 263 * the ELF header is included in a load segment. 264 */ 265 if (!elf->is_main) 266 flags |= PTA_SYSTEM_MAP_FLAG_EXECUTABLE; 267 res = sys_map_ta_bin(&va, SMALL_PAGE_SIZE, flags, elf->handle, 0, 0, 0); 268 if (res) 269 err(res, "sys_map_ta_bin"); 270 elf->ehdr_addr = va; 271 if (!elf->is_main) { 272 elf->load_addr = va; 273 elf->max_addr = va + SMALL_PAGE_SIZE; 274 elf->max_offs = SMALL_PAGE_SIZE; 275 } 276 277 if (!IS_ELF(*(Elf32_Ehdr *)va)) 278 err(TEE_ERROR_BAD_FORMAT, "TA is not an ELF"); 279 280 res = e32_parse_ehdr(elf, (void *)va); 281 if (res == TEE_ERROR_BAD_FORMAT) 282 res = e64_parse_ehdr(elf, (void *)va); 283 if (res) 284 err(res, "Cannot parse ELF"); 285 286 if (elf->e_phoff + elf->e_phnum * elf->e_phentsize > SMALL_PAGE_SIZE) 287 err(TEE_ERROR_NOT_SUPPORTED, "Cannot read program headers"); 288 289 elf->phdr = (void *)(va + elf->e_phoff); 290 } 291 292 static size_t roundup(size_t v) 293 { 294 return ROUNDUP(v, SMALL_PAGE_SIZE); 295 } 296 297 static size_t rounddown(size_t v) 298 { 299 return ROUNDDOWN(v, SMALL_PAGE_SIZE); 300 } 301 302 static void add_segment(struct ta_elf *elf, size_t offset, size_t vaddr, 303 size_t filesz, size_t memsz, size_t flags, size_t align) 304 { 305 struct segment *seg = calloc(1, sizeof(*seg)); 306 307 if (!seg) 308 err(TEE_ERROR_OUT_OF_MEMORY, "calloc"); 309 310 seg->offset = offset; 311 seg->vaddr = vaddr; 312 seg->filesz = filesz; 313 seg->memsz = memsz; 314 seg->flags = flags; 315 seg->align = align; 316 317 TAILQ_INSERT_TAIL(&elf->segs, seg, link); 318 } 319 320 static void parse_load_segments(struct ta_elf *elf) 321 { 322 size_t n = 0; 323 324 if (elf->is_32bit) { 325 Elf32_Phdr *phdr = elf->phdr; 326 327 for (n = 0; n < elf->e_phnum; n++) 328 if (phdr[n].p_type == PT_LOAD) { 329 add_segment(elf, phdr[n].p_offset, 330 phdr[n].p_vaddr, phdr[n].p_filesz, 331 phdr[n].p_memsz, phdr[n].p_flags, 332 phdr[n].p_align); 333 } else if (phdr[n].p_type == PT_ARM_EXIDX) { 334 elf->exidx_start = phdr[n].p_vaddr; 335 elf->exidx_size = phdr[n].p_filesz; 336 } 337 } else { 338 Elf64_Phdr *phdr = elf->phdr; 339 340 for (n = 0; n < elf->e_phnum; n++) 341 if (phdr[n].p_type == PT_LOAD) 342 add_segment(elf, phdr[n].p_offset, 343 phdr[n].p_vaddr, phdr[n].p_filesz, 344 phdr[n].p_memsz, phdr[n].p_flags, 345 phdr[n].p_align); 346 } 347 } 348 349 static void copy_remapped_to(struct ta_elf *elf, const struct segment *seg) 350 { 351 uint8_t *dst = (void *)(seg->vaddr + elf->load_addr); 352 size_t n = 0; 353 size_t offs = seg->offset; 354 size_t num_bytes = seg->filesz; 355 356 if (offs < elf->max_offs) { 357 n = MIN(elf->max_offs - offs, num_bytes); 358 memcpy(dst, (void *)(elf->max_addr + offs - elf->max_offs), n); 359 dst += n; 360 offs += n; 361 num_bytes -= n; 362 } 363 364 if (num_bytes) { 365 TEE_Result res = sys_copy_from_ta_bin(dst, num_bytes, 366 elf->handle, offs); 367 368 if (res) 369 err(res, "sys_copy_from_ta_bin"); 370 elf->max_offs += offs; 371 } 372 } 373 374 static void adjust_segments(struct ta_elf *elf) 375 { 376 struct segment *seg = NULL; 377 struct segment *prev_seg = NULL; 378 size_t prev_end_addr = 0; 379 size_t align = 0; 380 size_t mask = 0; 381 382 /* Sanity check */ 383 TAILQ_FOREACH(seg, &elf->segs, link) { 384 size_t dummy __maybe_unused = 0; 385 386 assert(seg->align >= SMALL_PAGE_SIZE); 387 assert(!ADD_OVERFLOW(seg->vaddr, seg->memsz, &dummy)); 388 assert(seg->filesz <= seg->memsz); 389 assert((seg->offset & SMALL_PAGE_MASK) == 390 (seg->vaddr & SMALL_PAGE_MASK)); 391 392 prev_seg = TAILQ_PREV(seg, segment_head, link); 393 if (prev_seg) { 394 assert(seg->vaddr >= prev_seg->vaddr + prev_seg->memsz); 395 assert(seg->offset >= 396 prev_seg->offset + prev_seg->filesz); 397 } 398 if (!align) 399 align = seg->align; 400 assert(align == seg->align); 401 } 402 403 mask = align - 1; 404 405 seg = TAILQ_FIRST(&elf->segs); 406 if (seg) 407 seg = TAILQ_NEXT(seg, link); 408 while (seg) { 409 prev_seg = TAILQ_PREV(seg, segment_head, link); 410 prev_end_addr = prev_seg->vaddr + prev_seg->memsz; 411 412 /* 413 * This segment may overlap with the last "page" in the 414 * previous segment in two different ways: 415 * 1. Virtual address (and offset) overlaps => 416 * Permissions needs to be merged. The offset must have 417 * the SMALL_PAGE_MASK bits set as vaddr and offset must 418 * add up with prevsion segment. 419 * 420 * 2. Only offset overlaps => 421 * The same page in the ELF is mapped at two different 422 * virtual addresses. As a limitation this segment must 423 * be mapped as writeable. 424 */ 425 426 /* Case 1. */ 427 if (rounddown(seg->vaddr) < prev_end_addr) { 428 assert((seg->vaddr & mask) == (seg->offset & mask)); 429 assert(prev_seg->memsz == prev_seg->filesz); 430 431 /* 432 * Merge the segments and their permissions. 433 * Note that the may be a small hole between the 434 * two sections. 435 */ 436 prev_seg->filesz = seg->vaddr + seg->filesz - 437 prev_seg->vaddr; 438 prev_seg->memsz = seg->vaddr + seg->memsz - 439 prev_seg->vaddr; 440 prev_seg->flags |= seg->flags; 441 442 TAILQ_REMOVE(&elf->segs, seg, link); 443 free(seg); 444 seg = TAILQ_NEXT(prev_seg, link); 445 continue; 446 } 447 448 /* Case 2. */ 449 if ((seg->offset & mask) && 450 rounddown(seg->offset) < 451 (prev_seg->offset + prev_seg->filesz)) { 452 453 assert(seg->flags & PF_W); 454 seg->remapped_writeable = true; 455 } 456 457 /* 458 * No overlap, but we may need to align address, offset and 459 * size. 460 */ 461 seg->filesz += seg->vaddr - rounddown(seg->vaddr); 462 seg->memsz += seg->vaddr - rounddown(seg->vaddr); 463 seg->vaddr = rounddown(seg->vaddr); 464 seg->offset = rounddown(seg->offset); 465 seg = TAILQ_NEXT(seg, link); 466 } 467 468 } 469 470 static void populate_segments_legacy(struct ta_elf *elf) 471 { 472 TEE_Result res = TEE_SUCCESS; 473 struct segment *seg = NULL; 474 vaddr_t va = 0; 475 476 assert(elf->is_legacy); 477 TAILQ_FOREACH(seg, &elf->segs, link) { 478 struct segment *last_seg = TAILQ_LAST(&elf->segs, segment_head); 479 size_t pad_end = roundup(last_seg->vaddr + last_seg->memsz - 480 seg->vaddr - seg->memsz); 481 size_t num_bytes = roundup(seg->memsz); 482 483 if (!elf->load_addr) 484 va = 0; 485 else 486 va = seg->vaddr + elf->load_addr; 487 488 489 if (!(seg->flags & PF_R)) 490 err(TEE_ERROR_NOT_SUPPORTED, 491 "Segment must be readable"); 492 493 res = sys_map_zi(num_bytes, 0, &va, 0, pad_end); 494 if (res) 495 err(res, "sys_map_zi"); 496 res = sys_copy_from_ta_bin((void *)va, seg->filesz, 497 elf->handle, seg->offset); 498 if (res) 499 err(res, "sys_copy_from_ta_bin"); 500 501 if (!elf->load_addr) 502 elf->load_addr = va; 503 elf->max_addr = va + num_bytes; 504 elf->max_offs = seg->offset + seg->filesz; 505 } 506 } 507 508 static size_t get_pad_begin(void) 509 { 510 #ifdef CFG_TA_ASLR 511 size_t min = CFG_TA_ASLR_MIN_OFFSET_PAGES; 512 size_t max = CFG_TA_ASLR_MAX_OFFSET_PAGES; 513 TEE_Result res = TEE_SUCCESS; 514 uint32_t rnd32 = 0; 515 size_t rnd = 0; 516 517 COMPILE_TIME_ASSERT(CFG_TA_ASLR_MIN_OFFSET_PAGES < 518 CFG_TA_ASLR_MAX_OFFSET_PAGES); 519 if (max > min) { 520 res = utee_cryp_random_number_generate(&rnd32, sizeof(rnd32)); 521 if (res) { 522 DMSG("Random read failed: %#"PRIx32, res); 523 return min * SMALL_PAGE_SIZE; 524 } 525 rnd = rnd32 % (max - min); 526 } 527 528 return (min + rnd) * SMALL_PAGE_SIZE; 529 #else /*!CFG_TA_ASLR*/ 530 return 0; 531 #endif /*!CFG_TA_ASLR*/ 532 } 533 534 static void populate_segments(struct ta_elf *elf) 535 { 536 TEE_Result res = TEE_SUCCESS; 537 struct segment *seg = NULL; 538 vaddr_t va = 0; 539 size_t pad_begin = 0; 540 541 assert(!elf->is_legacy); 542 TAILQ_FOREACH(seg, &elf->segs, link) { 543 struct segment *last_seg = TAILQ_LAST(&elf->segs, segment_head); 544 size_t pad_end = roundup(last_seg->vaddr + last_seg->memsz - 545 seg->vaddr - seg->memsz); 546 547 if (seg->remapped_writeable) { 548 size_t num_bytes = roundup(seg->vaddr + seg->memsz) - 549 rounddown(seg->vaddr); 550 551 assert(elf->load_addr); 552 va = rounddown(elf->load_addr + seg->vaddr); 553 assert(va >= elf->max_addr); 554 res = sys_map_zi(num_bytes, 0, &va, 0, pad_end); 555 if (res) 556 err(res, "sys_map_zi"); 557 558 copy_remapped_to(elf, seg); 559 elf->max_addr = va + num_bytes; 560 } else { 561 uint32_t flags = 0; 562 size_t filesz = seg->filesz; 563 size_t memsz = seg->memsz; 564 size_t offset = seg->offset; 565 size_t vaddr = seg->vaddr; 566 567 if (offset < elf->max_offs) { 568 /* 569 * We're in a load segment which overlaps 570 * with (or is covered by) the first page 571 * of a shared library. 572 */ 573 if (vaddr + filesz < SMALL_PAGE_SIZE) { 574 size_t num_bytes = 0; 575 576 /* 577 * If this segment is completely 578 * covered, take next. 579 */ 580 if (vaddr + memsz <= SMALL_PAGE_SIZE) 581 continue; 582 583 /* 584 * All data of the segment is 585 * loaded, but we need to zero 586 * extend it. 587 */ 588 va = elf->max_addr; 589 num_bytes = roundup(vaddr + memsz) - 590 roundup(vaddr) - 591 SMALL_PAGE_SIZE; 592 assert(num_bytes); 593 res = sys_map_zi(num_bytes, 0, &va, 0, 594 0); 595 if (res) 596 err(res, "sys_map_zi"); 597 elf->max_addr = roundup(va + num_bytes); 598 continue; 599 } 600 601 /* Partial overlap, remove the first page. */ 602 vaddr += SMALL_PAGE_SIZE; 603 filesz -= SMALL_PAGE_SIZE; 604 memsz -= SMALL_PAGE_SIZE; 605 offset += SMALL_PAGE_SIZE; 606 } 607 608 if (!elf->load_addr) { 609 va = 0; 610 pad_begin = get_pad_begin(); 611 /* 612 * If mapping with pad_begin fails we'll 613 * retry without pad_begin, effectively 614 * disabling ASLR for the current ELF file. 615 */ 616 } else { 617 va = vaddr + elf->load_addr; 618 pad_begin = 0; 619 } 620 621 if (seg->flags & PF_W) 622 flags |= PTA_SYSTEM_MAP_FLAG_WRITEABLE; 623 else 624 flags |= PTA_SYSTEM_MAP_FLAG_SHAREABLE; 625 if (seg->flags & PF_X) 626 flags |= PTA_SYSTEM_MAP_FLAG_EXECUTABLE; 627 if (!(seg->flags & PF_R)) 628 err(TEE_ERROR_NOT_SUPPORTED, 629 "Segment must be readable"); 630 if (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE) { 631 res = sys_map_zi(memsz, 0, &va, pad_begin, 632 pad_end); 633 if (pad_begin && res == TEE_ERROR_OUT_OF_MEMORY) 634 res = sys_map_zi(memsz, 0, &va, 0, 635 pad_end); 636 if (res) 637 err(res, "sys_map_zi"); 638 res = sys_copy_from_ta_bin((void *)va, filesz, 639 elf->handle, offset); 640 if (res) 641 err(res, "sys_copy_from_ta_bin"); 642 } else { 643 res = sys_map_ta_bin(&va, filesz, flags, 644 elf->handle, offset, 645 pad_begin, pad_end); 646 if (pad_begin && res == TEE_ERROR_OUT_OF_MEMORY) 647 res = sys_map_ta_bin(&va, filesz, flags, 648 elf->handle, 649 offset, 0, 650 pad_end); 651 if (res) 652 err(res, "sys_map_ta_bin"); 653 } 654 655 if (!elf->load_addr) 656 elf->load_addr = va; 657 elf->max_addr = roundup(va + filesz); 658 elf->max_offs += filesz; 659 } 660 } 661 } 662 663 static void map_segments(struct ta_elf *elf) 664 { 665 TEE_Result res = TEE_SUCCESS; 666 667 parse_load_segments(elf); 668 adjust_segments(elf); 669 if (TAILQ_FIRST(&elf->segs)->offset < SMALL_PAGE_SIZE) { 670 vaddr_t va = 0; 671 size_t sz = elf->max_addr - elf->load_addr; 672 struct segment *seg = TAILQ_LAST(&elf->segs, segment_head); 673 size_t pad_begin = get_pad_begin(); 674 675 /* 676 * We're loading a library, if not other parts of the code 677 * need to be updated too. 678 */ 679 assert(!elf->is_main); 680 681 /* 682 * Now that we know how much virtual memory is needed move 683 * the already mapped part to a location which can 684 * accommodate us. 685 */ 686 res = sys_remap(elf->load_addr, &va, sz, pad_begin, 687 roundup(seg->vaddr + seg->memsz)); 688 if (res == TEE_ERROR_OUT_OF_MEMORY) 689 res = sys_remap(elf->load_addr, &va, sz, 0, 690 roundup(seg->vaddr + seg->memsz)); 691 if (res) 692 err(res, "sys_remap"); 693 elf->ehdr_addr = va; 694 elf->load_addr = va; 695 elf->max_addr = va + sz; 696 elf->phdr = (void *)(va + elf->e_phoff); 697 } 698 } 699 700 static void add_deps_from_segment(struct ta_elf *elf, unsigned int type, 701 vaddr_t addr, size_t memsz) 702 { 703 size_t dyn_entsize = 0; 704 size_t num_dyns = 0; 705 size_t n = 0; 706 unsigned int tag = 0; 707 size_t val = 0; 708 TEE_UUID uuid = { }; 709 char *str_tab = NULL; 710 711 if (type != PT_DYNAMIC) 712 return; 713 714 if (elf->is_32bit) 715 dyn_entsize = sizeof(Elf32_Dyn); 716 else 717 dyn_entsize = sizeof(Elf64_Dyn); 718 719 assert(!(memsz % dyn_entsize)); 720 num_dyns = memsz / dyn_entsize; 721 722 for (n = 0; n < num_dyns; n++) { 723 read_dyn(elf, addr, n, &tag, &val); 724 if (tag == DT_STRTAB) { 725 str_tab = (char *)(val + elf->load_addr); 726 break; 727 } 728 } 729 730 for (n = 0; n < num_dyns; n++) { 731 read_dyn(elf, addr, n, &tag, &val); 732 if (tag != DT_NEEDED) 733 continue; 734 tee_uuid_from_str(&uuid, str_tab + val); 735 queue_elf(&uuid); 736 } 737 } 738 739 static void add_dependencies(struct ta_elf *elf) 740 { 741 size_t n = 0; 742 743 if (elf->is_32bit) { 744 Elf32_Phdr *phdr = elf->phdr; 745 746 for (n = 0; n < elf->e_phnum; n++) 747 add_deps_from_segment(elf, phdr[n].p_type, 748 phdr[n].p_vaddr, phdr[n].p_memsz); 749 } else { 750 Elf64_Phdr *phdr = elf->phdr; 751 752 for (n = 0; n < elf->e_phnum; n++) 753 add_deps_from_segment(elf, phdr[n].p_type, 754 phdr[n].p_vaddr, phdr[n].p_memsz); 755 } 756 } 757 758 static void copy_section_headers(struct ta_elf *elf) 759 { 760 TEE_Result res = TEE_SUCCESS; 761 size_t sz = elf->e_shnum * elf->e_shentsize; 762 size_t offs = 0; 763 764 elf->shdr = malloc(sz); 765 if (!elf->shdr) 766 err(TEE_ERROR_OUT_OF_MEMORY, "malloc"); 767 768 /* 769 * We're assuming that section headers comes after the load segments, 770 * but if it's a very small dynamically linked library the section 771 * headers can still end up (partially?) in the first mapped page. 772 */ 773 if (elf->e_shoff < SMALL_PAGE_SIZE) { 774 assert(!elf->is_main); 775 offs = MIN(SMALL_PAGE_SIZE - elf->e_shoff, sz); 776 memcpy(elf->shdr, (void *)(elf->load_addr + elf->e_shoff), 777 offs); 778 } 779 780 if (offs < sz) { 781 res = sys_copy_from_ta_bin((uint8_t *)elf->shdr + offs, 782 sz - offs, elf->handle, 783 elf->e_shoff + offs); 784 if (res) 785 err(res, "sys_copy_from_ta_bin"); 786 } 787 } 788 789 static void close_handle(struct ta_elf *elf) 790 { 791 TEE_Result res = sys_close_ta_bin(elf->handle); 792 793 if (res) 794 err(res, "sys_close_ta_bin"); 795 elf->handle = -1; 796 } 797 798 static void clean_elf_load_main(struct ta_elf *elf) 799 { 800 TEE_Result res = TEE_SUCCESS; 801 802 /* 803 * Clean up from last attempt to load 804 */ 805 res = sys_unmap(elf->ehdr_addr, SMALL_PAGE_SIZE); 806 if (res) 807 err(res, "sys_unmap"); 808 809 while (!TAILQ_EMPTY(&elf->segs)) { 810 struct segment *seg = TAILQ_FIRST(&elf->segs); 811 vaddr_t va = 0; 812 size_t num_bytes = 0; 813 814 va = rounddown(elf->load_addr + seg->vaddr); 815 if (seg->remapped_writeable) 816 num_bytes = roundup(seg->vaddr + seg->memsz) - 817 rounddown(seg->vaddr); 818 else 819 num_bytes = seg->memsz; 820 821 res = sys_unmap(va, num_bytes); 822 if (res) 823 err(res, "sys_unmap"); 824 825 TAILQ_REMOVE(&elf->segs, seg, link); 826 free(seg); 827 } 828 829 free(elf->shdr); 830 memset(&elf->is_32bit, 0, 831 (vaddr_t)&elf->uuid - (vaddr_t)&elf->is_32bit); 832 833 TAILQ_INIT(&elf->segs); 834 } 835 836 static void load_main(struct ta_elf *elf) 837 { 838 init_elf(elf); 839 map_segments(elf); 840 populate_segments(elf); 841 add_dependencies(elf); 842 copy_section_headers(elf); 843 save_symtab(elf); 844 close_handle(elf); 845 846 elf->head = (struct ta_head *)elf->load_addr; 847 if (elf->head->depr_entry != UINT64_MAX) { 848 /* 849 * Legacy TAs sets their entry point in ta_head. For 850 * non-legacy TAs the entry point of the ELF is set instead 851 * and leaving the ta_head entry point set to UINT64_MAX to 852 * indicate that it's not used. 853 * 854 * NB, everything before the commit a73b5878c89d ("Replace 855 * ta_head.entry with elf entry") is considered legacy TAs 856 * for ldelf. 857 * 858 * Legacy TAs cannot be mapped with shared memory segments 859 * so restart the mapping if it turned out we're loading a 860 * legacy TA. 861 */ 862 863 DMSG("Reloading TA %pUl as legacy TA", (void *)&elf->uuid); 864 clean_elf_load_main(elf); 865 elf->is_legacy = true; 866 init_elf(elf); 867 map_segments(elf); 868 populate_segments_legacy(elf); 869 add_dependencies(elf); 870 copy_section_headers(elf); 871 save_symtab(elf); 872 close_handle(elf); 873 elf->head = (struct ta_head *)elf->load_addr; 874 /* 875 * Check that the TA is still a legacy TA, if it isn't give 876 * up now since we're likely under attack. 877 */ 878 if (elf->head->depr_entry == UINT64_MAX) 879 err(TEE_ERROR_GENERIC, 880 "TA %pUl was changed on disk to non-legacy", 881 (void *)&elf->uuid); 882 } 883 884 } 885 886 void ta_elf_load_main(const TEE_UUID *uuid, uint32_t *is_32bit, uint64_t *sp, 887 uint32_t *ta_flags) 888 { 889 struct ta_elf *elf = queue_elf(uuid); 890 vaddr_t va = 0; 891 TEE_Result res = TEE_SUCCESS; 892 893 assert(elf); 894 elf->is_main = true; 895 896 load_main(elf); 897 898 *is_32bit = elf->is_32bit; 899 res = sys_map_zi(elf->head->stack_size, 0, &va, 0, 0); 900 if (res) 901 err(res, "sys_map_zi stack"); 902 903 if (elf->head->flags & ~TA_FLAGS_MASK) 904 err(TEE_ERROR_BAD_FORMAT, "Invalid TA flags(s) %#"PRIx32, 905 elf->head->flags & ~TA_FLAGS_MASK); 906 907 *ta_flags = elf->head->flags; 908 *sp = va + elf->head->stack_size; 909 ta_stack = va; 910 ta_stack_size = elf->head->stack_size; 911 } 912 913 void ta_elf_finalize_load_main(uint64_t *entry) 914 { 915 struct ta_elf *elf = TAILQ_FIRST(&main_elf_queue); 916 917 assert(elf->is_main); 918 919 if (elf->is_legacy) 920 *entry = elf->head->depr_entry; 921 else 922 *entry = elf->e_entry + elf->load_addr; 923 } 924 925 926 void ta_elf_load_dependency(struct ta_elf *elf, bool is_32bit) 927 { 928 if (elf->is_main) 929 return; 930 931 init_elf(elf); 932 if (elf->is_32bit != is_32bit) 933 err(TEE_ERROR_BAD_FORMAT, "ELF %pUl is %sbit (expected %sbit)", 934 (void *)&elf->uuid, elf->is_32bit ? "32" : "64", 935 is_32bit ? "32" : "64"); 936 937 map_segments(elf); 938 populate_segments(elf); 939 add_dependencies(elf); 940 copy_section_headers(elf); 941 save_symtab(elf); 942 close_handle(elf); 943 } 944 945 void ta_elf_finalize_mappings(struct ta_elf *elf) 946 { 947 TEE_Result res = TEE_SUCCESS; 948 struct segment *seg = NULL; 949 950 if (!elf->is_legacy) 951 return; 952 953 TAILQ_FOREACH(seg, &elf->segs, link) { 954 vaddr_t va = elf->load_addr + seg->vaddr; 955 uint32_t flags = 0; 956 957 if (seg->flags & PF_W) 958 flags |= PTA_SYSTEM_MAP_FLAG_WRITEABLE; 959 if (seg->flags & PF_X) 960 flags |= PTA_SYSTEM_MAP_FLAG_EXECUTABLE; 961 962 res = sys_set_prot(va, seg->memsz, flags); 963 if (res) 964 err(res, "sys_set_prot"); 965 } 966 } 967 968 static void __printf(3, 4) print_wrapper(void *pctx, print_func_t print_func, 969 const char *fmt, ...) 970 { 971 va_list ap; 972 973 va_start(ap, fmt); 974 print_func(pctx, fmt, ap); 975 va_end(ap); 976 } 977 978 static void print_seg(void *pctx, print_func_t print_func, 979 size_t idx __maybe_unused, int elf_idx __maybe_unused, 980 vaddr_t va __maybe_unused, paddr_t pa __maybe_unused, 981 size_t sz __maybe_unused, uint32_t flags) 982 { 983 int width __maybe_unused = 8; 984 char desc[14] __maybe_unused = ""; 985 char flags_str[] __maybe_unused = "----"; 986 987 if (elf_idx > -1) { 988 snprintf(desc, sizeof(desc), " [%d]", elf_idx); 989 } else { 990 if (flags & DUMP_MAP_EPHEM) 991 snprintf(desc, sizeof(desc), " (param)"); 992 if (flags & DUMP_MAP_LDELF) 993 snprintf(desc, sizeof(desc), " (ldelf)"); 994 if (va == ta_stack) 995 snprintf(desc, sizeof(desc), " (stack)"); 996 } 997 998 if (flags & DUMP_MAP_READ) 999 flags_str[0] = 'r'; 1000 if (flags & DUMP_MAP_WRITE) 1001 flags_str[1] = 'w'; 1002 if (flags & DUMP_MAP_EXEC) 1003 flags_str[2] = 'x'; 1004 if (flags & DUMP_MAP_SECURE) 1005 flags_str[3] = 's'; 1006 1007 print_wrapper(pctx, print_func, 1008 "region %2zu: va 0x%0*"PRIxVA" pa 0x%0*"PRIxPA" size 0x%06zx flags %s%s\n", 1009 idx, width, va, width, pa, sz, flags_str, desc); 1010 } 1011 1012 static bool get_next_in_order(struct ta_elf_queue *elf_queue, 1013 struct ta_elf **elf, struct segment **seg, 1014 size_t *elf_idx) 1015 { 1016 struct ta_elf *e = NULL; 1017 struct segment *s = NULL; 1018 size_t idx = 0; 1019 vaddr_t va = 0; 1020 struct ta_elf *e2 = NULL; 1021 size_t i2 = 0; 1022 1023 assert(elf && seg && elf_idx); 1024 e = *elf; 1025 s = *seg; 1026 assert((e == NULL && s == NULL) || (e != NULL && s != NULL)); 1027 1028 if (s) { 1029 s = TAILQ_NEXT(s, link); 1030 if (s) { 1031 *seg = s; 1032 return true; 1033 } 1034 } 1035 1036 if (e) 1037 va = e->load_addr; 1038 1039 /* Find the ELF with next load address */ 1040 e = NULL; 1041 TAILQ_FOREACH(e2, elf_queue, link) { 1042 if (e2->load_addr > va) { 1043 if (!e || e2->load_addr < e->load_addr) { 1044 e = e2; 1045 idx = i2; 1046 } 1047 } 1048 i2++; 1049 } 1050 if (!e) 1051 return false; 1052 1053 *elf = e; 1054 *seg = TAILQ_FIRST(&e->segs); 1055 *elf_idx = idx; 1056 return true; 1057 } 1058 1059 void ta_elf_print_mappings(void *pctx, print_func_t print_func, 1060 struct ta_elf_queue *elf_queue, size_t num_maps, 1061 struct dump_map *maps, vaddr_t mpool_base) 1062 { 1063 struct segment *seg = NULL; 1064 struct ta_elf *elf = NULL; 1065 size_t elf_idx = 0; 1066 size_t idx = 0; 1067 size_t map_idx = 0; 1068 1069 /* 1070 * Loop over all segments and maps, printing virtual address in 1071 * order. Segment has priority if the virtual address is present 1072 * in both map and segment. 1073 */ 1074 get_next_in_order(elf_queue, &elf, &seg, &elf_idx); 1075 while (true) { 1076 vaddr_t va = -1; 1077 size_t sz = 0; 1078 uint32_t flags = DUMP_MAP_SECURE; 1079 size_t offs = 0; 1080 1081 if (seg) { 1082 va = rounddown(seg->vaddr + elf->load_addr); 1083 sz = roundup(seg->vaddr + seg->memsz) - 1084 rounddown(seg->vaddr); 1085 } 1086 1087 while (map_idx < num_maps && maps[map_idx].va <= va) { 1088 uint32_t f = 0; 1089 1090 /* If there's a match, it should be the same map */ 1091 if (maps[map_idx].va == va) { 1092 /* 1093 * In shared libraries the first page is 1094 * mapped separately with the rest of that 1095 * segment following back to back in a 1096 * separate entry. 1097 */ 1098 if (map_idx + 1 < num_maps && 1099 maps[map_idx].sz == SMALL_PAGE_SIZE) { 1100 vaddr_t next_va = maps[map_idx].va + 1101 maps[map_idx].sz; 1102 size_t comb_sz = maps[map_idx].sz + 1103 maps[map_idx + 1].sz; 1104 1105 if (next_va == maps[map_idx + 1].va && 1106 comb_sz == sz && 1107 maps[map_idx].flags == 1108 maps[map_idx + 1].flags) { 1109 /* Skip this and next entry */ 1110 map_idx += 2; 1111 continue; 1112 } 1113 } 1114 assert(maps[map_idx].sz == sz); 1115 } else if (maps[map_idx].va < va) { 1116 if (maps[map_idx].va == mpool_base) 1117 f |= DUMP_MAP_LDELF; 1118 print_seg(pctx, print_func, idx, -1, 1119 maps[map_idx].va, maps[map_idx].pa, 1120 maps[map_idx].sz, 1121 maps[map_idx].flags | f); 1122 idx++; 1123 } 1124 map_idx++; 1125 } 1126 1127 if (!seg) 1128 break; 1129 1130 offs = rounddown(seg->offset); 1131 if (seg->flags & PF_R) 1132 flags |= DUMP_MAP_READ; 1133 if (seg->flags & PF_W) 1134 flags |= DUMP_MAP_WRITE; 1135 if (seg->flags & PF_X) 1136 flags |= DUMP_MAP_EXEC; 1137 1138 print_seg(pctx, print_func, idx, elf_idx, va, offs, sz, flags); 1139 idx++; 1140 1141 if (!get_next_in_order(elf_queue, &elf, &seg, &elf_idx)) 1142 seg = NULL; 1143 } 1144 1145 elf_idx = 0; 1146 TAILQ_FOREACH(elf, elf_queue, link) { 1147 print_wrapper(pctx, print_func, 1148 " [%zu] %pUl @ 0x%0*"PRIxVA"\n", 1149 elf_idx, (void *)&elf->uuid, 8, elf->load_addr); 1150 elf_idx++; 1151 } 1152 } 1153 1154 #ifdef CFG_UNWIND 1155 void ta_elf_stack_trace_a32(uint32_t regs[16]) 1156 { 1157 struct unwind_state_arm32 state = { }; 1158 1159 memcpy(state.registers, regs, sizeof(state.registers)); 1160 print_stack_arm32(&state, ta_stack, ta_stack_size); 1161 } 1162 1163 void ta_elf_stack_trace_a64(uint64_t fp, uint64_t sp, uint64_t pc) 1164 { 1165 struct unwind_state_arm64 state = { .fp = fp, .sp = sp, .pc = pc }; 1166 1167 print_stack_arm64(&state, ta_stack, ta_stack_size); 1168 } 1169 #endif 1170 1171 TEE_Result ta_elf_add_library(const TEE_UUID *uuid) 1172 { 1173 struct ta_elf *ta = TAILQ_FIRST(&main_elf_queue); 1174 struct ta_elf *lib = ta_elf_find_elf(uuid); 1175 struct ta_elf *elf = NULL; 1176 1177 if (lib) 1178 return TEE_SUCCESS; /* Already mapped */ 1179 1180 lib = queue_elf_helper(uuid); 1181 if (!lib) 1182 return TEE_ERROR_OUT_OF_MEMORY; 1183 1184 for (elf = lib; elf; elf = TAILQ_NEXT(elf, link)) 1185 ta_elf_load_dependency(elf, ta->is_32bit); 1186 1187 for (elf = lib; elf; elf = TAILQ_NEXT(elf, link)) { 1188 ta_elf_relocate(elf); 1189 ta_elf_finalize_mappings(elf); 1190 } 1191 1192 for (elf = lib; elf; elf = TAILQ_NEXT(elf, link)) 1193 DMSG("ELF (%pUl) at %#"PRIxVA, 1194 (void *)&elf->uuid, elf->load_addr); 1195 1196 return TEE_SUCCESS; 1197 } 1198