1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2019, Linaro Limited 4 */ 5 6 #include <assert.h> 7 #include <ctype.h> 8 #include <elf32.h> 9 #include <elf64.h> 10 #include <elf_common.h> 11 #include <ldelf.h> 12 #include <pta_system.h> 13 #include <stdio.h> 14 #include <stdlib.h> 15 #include <string_ext.h> 16 #include <string.h> 17 #include <tee_api_types.h> 18 #include <tee_internal_api_extensions.h> 19 #include <user_ta_header.h> 20 #include <utee_syscalls.h> 21 #include <util.h> 22 23 #include "sys.h" 24 #include "ta_elf.h" 25 #include "unwind.h" 26 27 static vaddr_t ta_stack; 28 static vaddr_t ta_stack_size; 29 30 struct ta_elf_queue main_elf_queue = TAILQ_HEAD_INITIALIZER(main_elf_queue); 31 32 static struct ta_elf *queue_elf_helper(const TEE_UUID *uuid) 33 { 34 struct ta_elf *elf = calloc(1, sizeof(*elf)); 35 36 if (!elf) 37 return NULL; 38 39 TAILQ_INIT(&elf->segs); 40 41 elf->uuid = *uuid; 42 TAILQ_INSERT_TAIL(&main_elf_queue, elf, link); 43 return elf; 44 } 45 46 static struct ta_elf *queue_elf(const TEE_UUID *uuid) 47 { 48 struct ta_elf *elf = ta_elf_find_elf(uuid); 49 50 if (elf) 51 return NULL; 52 53 elf = queue_elf_helper(uuid); 54 if (!elf) 55 err(TEE_ERROR_OUT_OF_MEMORY, "queue_elf_helper"); 56 57 return elf; 58 } 59 60 struct ta_elf *ta_elf_find_elf(const TEE_UUID *uuid) 61 { 62 struct ta_elf *elf = NULL; 63 64 TAILQ_FOREACH(elf, &main_elf_queue, link) 65 if (!memcmp(uuid, &elf->uuid, sizeof(*uuid))) 66 return elf; 67 68 return NULL; 69 } 70 71 static TEE_Result e32_parse_ehdr(struct ta_elf *elf, Elf32_Ehdr *ehdr) 72 { 73 if (ehdr->e_ident[EI_VERSION] != EV_CURRENT || 74 ehdr->e_ident[EI_CLASS] != ELFCLASS32 || 75 ehdr->e_ident[EI_DATA] != ELFDATA2LSB || 76 ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE || 77 ehdr->e_type != ET_DYN || ehdr->e_machine != EM_ARM || 78 (ehdr->e_flags & EF_ARM_ABIMASK) != EF_ARM_ABI_VERSION || 79 #ifndef CFG_WITH_VFP 80 (ehdr->e_flags & EF_ARM_ABI_FLOAT_HARD) || 81 #endif 82 ehdr->e_phentsize != sizeof(Elf32_Phdr) || 83 ehdr->e_shentsize != sizeof(Elf32_Shdr)) 84 return TEE_ERROR_BAD_FORMAT; 85 86 elf->is_32bit = true; 87 elf->e_entry = ehdr->e_entry; 88 elf->e_phoff = ehdr->e_phoff; 89 elf->e_shoff = ehdr->e_shoff; 90 elf->e_phnum = ehdr->e_phnum; 91 elf->e_shnum = ehdr->e_shnum; 92 elf->e_phentsize = ehdr->e_phentsize; 93 elf->e_shentsize = ehdr->e_shentsize; 94 95 return TEE_SUCCESS; 96 } 97 98 #ifdef ARM64 99 static TEE_Result e64_parse_ehdr(struct ta_elf *elf, Elf64_Ehdr *ehdr) 100 { 101 if (ehdr->e_ident[EI_VERSION] != EV_CURRENT || 102 ehdr->e_ident[EI_CLASS] != ELFCLASS64 || 103 ehdr->e_ident[EI_DATA] != ELFDATA2LSB || 104 ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE || 105 ehdr->e_type != ET_DYN || ehdr->e_machine != EM_AARCH64 || 106 ehdr->e_flags || ehdr->e_phentsize != sizeof(Elf64_Phdr) || 107 ehdr->e_shentsize != sizeof(Elf64_Shdr)) 108 return TEE_ERROR_BAD_FORMAT; 109 110 111 elf->is_32bit = false; 112 elf->e_entry = ehdr->e_entry; 113 elf->e_phoff = ehdr->e_phoff; 114 elf->e_shoff = ehdr->e_shoff; 115 elf->e_phnum = ehdr->e_phnum; 116 elf->e_shnum = ehdr->e_shnum; 117 elf->e_phentsize = ehdr->e_phentsize; 118 elf->e_shentsize = ehdr->e_shentsize; 119 120 return TEE_SUCCESS; 121 } 122 #else /*ARM64*/ 123 static TEE_Result e64_parse_ehdr(struct ta_elf *elf __unused, 124 Elf64_Ehdr *ehdr __unused) 125 { 126 return TEE_ERROR_NOT_SUPPORTED; 127 } 128 #endif /*ARM64*/ 129 130 static void check_phdr_in_range(struct ta_elf *elf, unsigned int type, 131 vaddr_t addr, size_t memsz) 132 { 133 vaddr_t max_addr = 0; 134 135 if (ADD_OVERFLOW(addr, memsz, &max_addr)) 136 err(TEE_ERROR_BAD_FORMAT, "Program header %#x overflow", type); 137 138 /* 139 * elf->load_addr and elf->max_addr are both using the 140 * final virtual addresses, while this program header is 141 * relative to 0. 142 */ 143 if (max_addr > elf->max_addr - elf->load_addr) 144 err(TEE_ERROR_BAD_FORMAT, "Program header %#x out of bounds", 145 type); 146 } 147 148 static void read_dyn(struct ta_elf *elf, vaddr_t addr, 149 size_t idx, unsigned int *tag, size_t *val) 150 { 151 if (elf->is_32bit) { 152 Elf32_Dyn *dyn = (Elf32_Dyn *)(addr + elf->load_addr); 153 154 *tag = dyn[idx].d_tag; 155 *val = dyn[idx].d_un.d_val; 156 } else { 157 Elf64_Dyn *dyn = (Elf64_Dyn *)(addr + elf->load_addr); 158 159 *tag = dyn[idx].d_tag; 160 *val = dyn[idx].d_un.d_val; 161 } 162 } 163 164 static void save_hashtab_from_segment(struct ta_elf *elf, unsigned int type, 165 vaddr_t addr, size_t memsz) 166 { 167 size_t dyn_entsize = 0; 168 size_t num_dyns = 0; 169 size_t n = 0; 170 unsigned int tag = 0; 171 size_t val = 0; 172 173 if (type != PT_DYNAMIC) 174 return; 175 176 check_phdr_in_range(elf, type, addr, memsz); 177 178 if (elf->is_32bit) 179 dyn_entsize = sizeof(Elf32_Dyn); 180 else 181 dyn_entsize = sizeof(Elf64_Dyn); 182 183 assert(!(memsz % dyn_entsize)); 184 num_dyns = memsz / dyn_entsize; 185 186 for (n = 0; n < num_dyns; n++) { 187 read_dyn(elf, addr, n, &tag, &val); 188 if (tag == DT_HASH) { 189 elf->hashtab = (void *)(val + elf->load_addr); 190 break; 191 } 192 } 193 } 194 195 static void check_range(struct ta_elf *elf, const char *name, const void *ptr, 196 size_t sz) 197 { 198 size_t max_addr = 0; 199 200 if ((vaddr_t)ptr < elf->load_addr) 201 err(TEE_ERROR_BAD_FORMAT, "%s %p out of range", name, ptr); 202 203 if (ADD_OVERFLOW((vaddr_t)ptr, sz, &max_addr)) 204 err(TEE_ERROR_BAD_FORMAT, "%s range overflow", name); 205 206 if (max_addr > elf->max_addr) 207 err(TEE_ERROR_BAD_FORMAT, 208 "%s %p..%#zx out of range", name, ptr, max_addr); 209 } 210 211 static void check_hashtab(struct ta_elf *elf, void *ptr, size_t num_buckets, 212 size_t num_chains) 213 { 214 /* 215 * Starting from 2 as the first two words are mandatory and hold 216 * num_buckets and num_chains. So this function is called twice, 217 * first to see that there's indeed room for num_buckets and 218 * num_chains and then to see that all of it fits. 219 * See http://www.sco.com/developers/gabi/latest/ch5.dynamic.html#hash 220 */ 221 size_t num_words = 2; 222 size_t sz = 0; 223 224 if (!ALIGNMENT_IS_OK(ptr, uint32_t)) 225 err(TEE_ERROR_BAD_FORMAT, "Bad alignment of DT_HASH %p", ptr); 226 227 if (ADD_OVERFLOW(num_words, num_buckets, &num_words) || 228 ADD_OVERFLOW(num_words, num_chains, &num_words) || 229 MUL_OVERFLOW(num_words, sizeof(uint32_t), &sz)) 230 err(TEE_ERROR_BAD_FORMAT, "DT_HASH overflow"); 231 232 check_range(elf, "DT_HASH", ptr, sz); 233 } 234 235 static void save_hashtab(struct ta_elf *elf) 236 { 237 uint32_t *hashtab = NULL; 238 size_t n = 0; 239 240 if (elf->is_32bit) { 241 Elf32_Phdr *phdr = elf->phdr; 242 243 for (n = 0; n < elf->e_phnum; n++) 244 save_hashtab_from_segment(elf, phdr[n].p_type, 245 phdr[n].p_vaddr, 246 phdr[n].p_memsz); 247 } else { 248 Elf64_Phdr *phdr = elf->phdr; 249 250 for (n = 0; n < elf->e_phnum; n++) 251 save_hashtab_from_segment(elf, phdr[n].p_type, 252 phdr[n].p_vaddr, 253 phdr[n].p_memsz); 254 } 255 256 check_hashtab(elf, elf->hashtab, 0, 0); 257 hashtab = elf->hashtab; 258 check_hashtab(elf, elf->hashtab, hashtab[0], hashtab[1]); 259 } 260 261 static void e32_save_symtab(struct ta_elf *elf, size_t tab_idx) 262 { 263 Elf32_Shdr *shdr = elf->shdr; 264 size_t str_idx = shdr[tab_idx].sh_link; 265 266 elf->dynsymtab = (void *)(shdr[tab_idx].sh_addr + elf->load_addr); 267 if (!ALIGNMENT_IS_OK(elf->dynsymtab, Elf32_Sym)) 268 err(TEE_ERROR_BAD_FORMAT, "Bad alignment of dynsymtab %p", 269 elf->dynsymtab); 270 check_range(elf, "Dynsymtab", elf->dynsymtab, shdr[tab_idx].sh_size); 271 272 if (shdr[tab_idx].sh_size % sizeof(Elf32_Sym)) 273 err(TEE_ERROR_BAD_FORMAT, 274 "Size of dynsymtab not an even multiple of Elf32_Sym"); 275 elf->num_dynsyms = shdr[tab_idx].sh_size / sizeof(Elf32_Sym); 276 277 if (str_idx >= elf->e_shnum) 278 err(TEE_ERROR_BAD_FORMAT, "Dynstr section index out of range"); 279 elf->dynstr = (void *)(shdr[str_idx].sh_addr + elf->load_addr); 280 check_range(elf, "Dynstr", elf->dynstr, shdr[str_idx].sh_size); 281 282 elf->dynstr_size = shdr[str_idx].sh_size; 283 } 284 285 static void e64_save_symtab(struct ta_elf *elf, size_t tab_idx) 286 { 287 Elf64_Shdr *shdr = elf->shdr; 288 size_t str_idx = shdr[tab_idx].sh_link; 289 290 elf->dynsymtab = (void *)(vaddr_t)(shdr[tab_idx].sh_addr + 291 elf->load_addr); 292 293 if (!ALIGNMENT_IS_OK(elf->dynsymtab, Elf64_Sym)) 294 err(TEE_ERROR_BAD_FORMAT, "Bad alignment of .dynsym/DYNSYM %p", 295 elf->dynsymtab); 296 check_range(elf, ".dynsym/DYNSYM", elf->dynsymtab, 297 shdr[tab_idx].sh_size); 298 299 if (shdr[tab_idx].sh_size % sizeof(Elf64_Sym)) 300 err(TEE_ERROR_BAD_FORMAT, 301 "Size of .dynsym/DYNSYM not an even multiple of Elf64_Sym"); 302 elf->num_dynsyms = shdr[tab_idx].sh_size / sizeof(Elf64_Sym); 303 304 if (str_idx >= elf->e_shnum) 305 err(TEE_ERROR_BAD_FORMAT, 306 ".dynstr/STRTAB section index out of range"); 307 elf->dynstr = (void *)(vaddr_t)(shdr[str_idx].sh_addr + elf->load_addr); 308 check_range(elf, ".dynstr/STRTAB", elf->dynstr, shdr[str_idx].sh_size); 309 310 elf->dynstr_size = shdr[str_idx].sh_size; 311 } 312 313 static void save_symtab(struct ta_elf *elf) 314 { 315 size_t n = 0; 316 317 if (elf->is_32bit) { 318 Elf32_Shdr *shdr = elf->shdr; 319 320 for (n = 0; n < elf->e_shnum; n++) { 321 if (shdr[n].sh_type == SHT_DYNSYM) { 322 e32_save_symtab(elf, n); 323 break; 324 } 325 } 326 } else { 327 Elf64_Shdr *shdr = elf->shdr; 328 329 for (n = 0; n < elf->e_shnum; n++) { 330 if (shdr[n].sh_type == SHT_DYNSYM) { 331 e64_save_symtab(elf, n); 332 break; 333 } 334 } 335 336 } 337 338 save_hashtab(elf); 339 } 340 341 static void init_elf(struct ta_elf *elf) 342 { 343 TEE_Result res = TEE_SUCCESS; 344 vaddr_t va = 0; 345 uint32_t flags = PTA_SYSTEM_MAP_FLAG_SHAREABLE; 346 size_t sz = 0; 347 348 res = sys_open_ta_bin(&elf->uuid, &elf->handle); 349 if (res) 350 err(res, "sys_open_ta_bin(%pUl)", (void *)&elf->uuid); 351 352 /* 353 * Map it read-only executable when we're loading a library where 354 * the ELF header is included in a load segment. 355 */ 356 if (!elf->is_main) 357 flags |= PTA_SYSTEM_MAP_FLAG_EXECUTABLE; 358 res = sys_map_ta_bin(&va, SMALL_PAGE_SIZE, flags, elf->handle, 0, 0, 0); 359 if (res) 360 err(res, "sys_map_ta_bin"); 361 elf->ehdr_addr = va; 362 if (!elf->is_main) { 363 elf->load_addr = va; 364 elf->max_addr = va + SMALL_PAGE_SIZE; 365 elf->max_offs = SMALL_PAGE_SIZE; 366 } 367 368 if (!IS_ELF(*(Elf32_Ehdr *)va)) 369 err(TEE_ERROR_BAD_FORMAT, "TA is not an ELF"); 370 371 res = e32_parse_ehdr(elf, (void *)va); 372 if (res == TEE_ERROR_BAD_FORMAT) 373 res = e64_parse_ehdr(elf, (void *)va); 374 if (res) 375 err(res, "Cannot parse ELF"); 376 377 if (MUL_OVERFLOW(elf->e_phnum, elf->e_phentsize, &sz) || 378 ADD_OVERFLOW(sz, elf->e_phoff, &sz)) 379 err(TEE_ERROR_BAD_FORMAT, "Program headers size overflow"); 380 381 if (sz > SMALL_PAGE_SIZE) 382 err(TEE_ERROR_NOT_SUPPORTED, "Cannot read program headers"); 383 384 elf->phdr = (void *)(va + elf->e_phoff); 385 } 386 387 static size_t roundup(size_t v) 388 { 389 return ROUNDUP(v, SMALL_PAGE_SIZE); 390 } 391 392 static size_t rounddown(size_t v) 393 { 394 return ROUNDDOWN(v, SMALL_PAGE_SIZE); 395 } 396 397 static void add_segment(struct ta_elf *elf, size_t offset, size_t vaddr, 398 size_t filesz, size_t memsz, size_t flags, size_t align) 399 { 400 struct segment *seg = calloc(1, sizeof(*seg)); 401 402 if (!seg) 403 err(TEE_ERROR_OUT_OF_MEMORY, "calloc"); 404 405 if (memsz < filesz) 406 err(TEE_ERROR_BAD_FORMAT, "Memsz smaller than filesz"); 407 408 seg->offset = offset; 409 seg->vaddr = vaddr; 410 seg->filesz = filesz; 411 seg->memsz = memsz; 412 seg->flags = flags; 413 seg->align = align; 414 415 TAILQ_INSERT_TAIL(&elf->segs, seg, link); 416 } 417 418 static void parse_load_segments(struct ta_elf *elf) 419 { 420 size_t n = 0; 421 422 if (elf->is_32bit) { 423 Elf32_Phdr *phdr = elf->phdr; 424 425 for (n = 0; n < elf->e_phnum; n++) 426 if (phdr[n].p_type == PT_LOAD) { 427 add_segment(elf, phdr[n].p_offset, 428 phdr[n].p_vaddr, phdr[n].p_filesz, 429 phdr[n].p_memsz, phdr[n].p_flags, 430 phdr[n].p_align); 431 } else if (phdr[n].p_type == PT_ARM_EXIDX) { 432 elf->exidx_start = phdr[n].p_vaddr; 433 elf->exidx_size = phdr[n].p_filesz; 434 } 435 } else { 436 Elf64_Phdr *phdr = elf->phdr; 437 438 for (n = 0; n < elf->e_phnum; n++) 439 if (phdr[n].p_type == PT_LOAD) 440 add_segment(elf, phdr[n].p_offset, 441 phdr[n].p_vaddr, phdr[n].p_filesz, 442 phdr[n].p_memsz, phdr[n].p_flags, 443 phdr[n].p_align); 444 } 445 } 446 447 static void copy_remapped_to(struct ta_elf *elf, const struct segment *seg) 448 { 449 uint8_t *dst = (void *)(seg->vaddr + elf->load_addr); 450 size_t n = 0; 451 size_t offs = seg->offset; 452 size_t num_bytes = seg->filesz; 453 454 if (offs < elf->max_offs) { 455 n = MIN(elf->max_offs - offs, num_bytes); 456 memcpy(dst, (void *)(elf->max_addr + offs - elf->max_offs), n); 457 dst += n; 458 offs += n; 459 num_bytes -= n; 460 } 461 462 if (num_bytes) { 463 TEE_Result res = sys_copy_from_ta_bin(dst, num_bytes, 464 elf->handle, offs); 465 466 if (res) 467 err(res, "sys_copy_from_ta_bin"); 468 elf->max_offs += offs; 469 } 470 } 471 472 static void adjust_segments(struct ta_elf *elf) 473 { 474 struct segment *seg = NULL; 475 struct segment *prev_seg = NULL; 476 size_t prev_end_addr = 0; 477 size_t align = 0; 478 size_t mask = 0; 479 480 /* Sanity check */ 481 TAILQ_FOREACH(seg, &elf->segs, link) { 482 size_t dummy __maybe_unused = 0; 483 484 assert(seg->align >= SMALL_PAGE_SIZE); 485 assert(!ADD_OVERFLOW(seg->vaddr, seg->memsz, &dummy)); 486 assert(seg->filesz <= seg->memsz); 487 assert((seg->offset & SMALL_PAGE_MASK) == 488 (seg->vaddr & SMALL_PAGE_MASK)); 489 490 prev_seg = TAILQ_PREV(seg, segment_head, link); 491 if (prev_seg) { 492 assert(seg->vaddr >= prev_seg->vaddr + prev_seg->memsz); 493 assert(seg->offset >= 494 prev_seg->offset + prev_seg->filesz); 495 } 496 if (!align) 497 align = seg->align; 498 assert(align == seg->align); 499 } 500 501 mask = align - 1; 502 503 seg = TAILQ_FIRST(&elf->segs); 504 if (seg) 505 seg = TAILQ_NEXT(seg, link); 506 while (seg) { 507 prev_seg = TAILQ_PREV(seg, segment_head, link); 508 prev_end_addr = prev_seg->vaddr + prev_seg->memsz; 509 510 /* 511 * This segment may overlap with the last "page" in the 512 * previous segment in two different ways: 513 * 1. Virtual address (and offset) overlaps => 514 * Permissions needs to be merged. The offset must have 515 * the SMALL_PAGE_MASK bits set as vaddr and offset must 516 * add up with prevsion segment. 517 * 518 * 2. Only offset overlaps => 519 * The same page in the ELF is mapped at two different 520 * virtual addresses. As a limitation this segment must 521 * be mapped as writeable. 522 */ 523 524 /* Case 1. */ 525 if (rounddown(seg->vaddr) < prev_end_addr) { 526 assert((seg->vaddr & mask) == (seg->offset & mask)); 527 assert(prev_seg->memsz == prev_seg->filesz); 528 529 /* 530 * Merge the segments and their permissions. 531 * Note that the may be a small hole between the 532 * two sections. 533 */ 534 prev_seg->filesz = seg->vaddr + seg->filesz - 535 prev_seg->vaddr; 536 prev_seg->memsz = seg->vaddr + seg->memsz - 537 prev_seg->vaddr; 538 prev_seg->flags |= seg->flags; 539 540 TAILQ_REMOVE(&elf->segs, seg, link); 541 free(seg); 542 seg = TAILQ_NEXT(prev_seg, link); 543 continue; 544 } 545 546 /* Case 2. */ 547 if ((seg->offset & mask) && 548 rounddown(seg->offset) < 549 (prev_seg->offset + prev_seg->filesz)) { 550 551 assert(seg->flags & PF_W); 552 seg->remapped_writeable = true; 553 } 554 555 /* 556 * No overlap, but we may need to align address, offset and 557 * size. 558 */ 559 seg->filesz += seg->vaddr - rounddown(seg->vaddr); 560 seg->memsz += seg->vaddr - rounddown(seg->vaddr); 561 seg->vaddr = rounddown(seg->vaddr); 562 seg->offset = rounddown(seg->offset); 563 seg = TAILQ_NEXT(seg, link); 564 } 565 566 } 567 568 static void populate_segments_legacy(struct ta_elf *elf) 569 { 570 TEE_Result res = TEE_SUCCESS; 571 struct segment *seg = NULL; 572 vaddr_t va = 0; 573 574 assert(elf->is_legacy); 575 TAILQ_FOREACH(seg, &elf->segs, link) { 576 struct segment *last_seg = TAILQ_LAST(&elf->segs, segment_head); 577 size_t pad_end = roundup(last_seg->vaddr + last_seg->memsz - 578 seg->vaddr - seg->memsz); 579 size_t num_bytes = roundup(seg->memsz); 580 581 if (!elf->load_addr) 582 va = 0; 583 else 584 va = seg->vaddr + elf->load_addr; 585 586 587 if (!(seg->flags & PF_R)) 588 err(TEE_ERROR_NOT_SUPPORTED, 589 "Segment must be readable"); 590 591 res = sys_map_zi(num_bytes, 0, &va, 0, pad_end); 592 if (res) 593 err(res, "sys_map_zi"); 594 res = sys_copy_from_ta_bin((void *)va, seg->filesz, 595 elf->handle, seg->offset); 596 if (res) 597 err(res, "sys_copy_from_ta_bin"); 598 599 if (!elf->load_addr) 600 elf->load_addr = va; 601 elf->max_addr = va + num_bytes; 602 elf->max_offs = seg->offset + seg->filesz; 603 } 604 } 605 606 static size_t get_pad_begin(void) 607 { 608 #ifdef CFG_TA_ASLR 609 size_t min = CFG_TA_ASLR_MIN_OFFSET_PAGES; 610 size_t max = CFG_TA_ASLR_MAX_OFFSET_PAGES; 611 TEE_Result res = TEE_SUCCESS; 612 uint32_t rnd32 = 0; 613 size_t rnd = 0; 614 615 COMPILE_TIME_ASSERT(CFG_TA_ASLR_MIN_OFFSET_PAGES < 616 CFG_TA_ASLR_MAX_OFFSET_PAGES); 617 if (max > min) { 618 res = utee_cryp_random_number_generate(&rnd32, sizeof(rnd32)); 619 if (res) { 620 DMSG("Random read failed: %#"PRIx32, res); 621 return min * SMALL_PAGE_SIZE; 622 } 623 rnd = rnd32 % (max - min); 624 } 625 626 return (min + rnd) * SMALL_PAGE_SIZE; 627 #else /*!CFG_TA_ASLR*/ 628 return 0; 629 #endif /*!CFG_TA_ASLR*/ 630 } 631 632 static void populate_segments(struct ta_elf *elf) 633 { 634 TEE_Result res = TEE_SUCCESS; 635 struct segment *seg = NULL; 636 vaddr_t va = 0; 637 size_t pad_begin = 0; 638 639 assert(!elf->is_legacy); 640 TAILQ_FOREACH(seg, &elf->segs, link) { 641 struct segment *last_seg = TAILQ_LAST(&elf->segs, segment_head); 642 size_t pad_end = roundup(last_seg->vaddr + last_seg->memsz - 643 seg->vaddr - seg->memsz); 644 645 if (seg->remapped_writeable) { 646 size_t num_bytes = roundup(seg->vaddr + seg->memsz) - 647 rounddown(seg->vaddr); 648 649 assert(elf->load_addr); 650 va = rounddown(elf->load_addr + seg->vaddr); 651 assert(va >= elf->max_addr); 652 res = sys_map_zi(num_bytes, 0, &va, 0, pad_end); 653 if (res) 654 err(res, "sys_map_zi"); 655 656 copy_remapped_to(elf, seg); 657 elf->max_addr = va + num_bytes; 658 } else { 659 uint32_t flags = 0; 660 size_t filesz = seg->filesz; 661 size_t memsz = seg->memsz; 662 size_t offset = seg->offset; 663 size_t vaddr = seg->vaddr; 664 665 if (offset < elf->max_offs) { 666 /* 667 * We're in a load segment which overlaps 668 * with (or is covered by) the first page 669 * of a shared library. 670 */ 671 if (vaddr + filesz < SMALL_PAGE_SIZE) { 672 size_t num_bytes = 0; 673 674 /* 675 * If this segment is completely 676 * covered, take next. 677 */ 678 if (vaddr + memsz <= SMALL_PAGE_SIZE) 679 continue; 680 681 /* 682 * All data of the segment is 683 * loaded, but we need to zero 684 * extend it. 685 */ 686 va = elf->max_addr; 687 num_bytes = roundup(vaddr + memsz) - 688 roundup(vaddr) - 689 SMALL_PAGE_SIZE; 690 assert(num_bytes); 691 res = sys_map_zi(num_bytes, 0, &va, 0, 692 0); 693 if (res) 694 err(res, "sys_map_zi"); 695 elf->max_addr = roundup(va + num_bytes); 696 continue; 697 } 698 699 /* Partial overlap, remove the first page. */ 700 vaddr += SMALL_PAGE_SIZE; 701 filesz -= SMALL_PAGE_SIZE; 702 memsz -= SMALL_PAGE_SIZE; 703 offset += SMALL_PAGE_SIZE; 704 } 705 706 if (!elf->load_addr) { 707 va = 0; 708 pad_begin = get_pad_begin(); 709 /* 710 * If mapping with pad_begin fails we'll 711 * retry without pad_begin, effectively 712 * disabling ASLR for the current ELF file. 713 */ 714 } else { 715 va = vaddr + elf->load_addr; 716 pad_begin = 0; 717 } 718 719 if (seg->flags & PF_W) 720 flags |= PTA_SYSTEM_MAP_FLAG_WRITEABLE; 721 else 722 flags |= PTA_SYSTEM_MAP_FLAG_SHAREABLE; 723 if (seg->flags & PF_X) 724 flags |= PTA_SYSTEM_MAP_FLAG_EXECUTABLE; 725 if (!(seg->flags & PF_R)) 726 err(TEE_ERROR_NOT_SUPPORTED, 727 "Segment must be readable"); 728 if (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE) { 729 res = sys_map_zi(memsz, 0, &va, pad_begin, 730 pad_end); 731 if (pad_begin && res == TEE_ERROR_OUT_OF_MEMORY) 732 res = sys_map_zi(memsz, 0, &va, 0, 733 pad_end); 734 if (res) 735 err(res, "sys_map_zi"); 736 res = sys_copy_from_ta_bin((void *)va, filesz, 737 elf->handle, offset); 738 if (res) 739 err(res, "sys_copy_from_ta_bin"); 740 } else { 741 if (filesz != memsz) 742 err(TEE_ERROR_BAD_FORMAT, 743 "Filesz and memsz mismatch"); 744 res = sys_map_ta_bin(&va, filesz, flags, 745 elf->handle, offset, 746 pad_begin, pad_end); 747 if (pad_begin && res == TEE_ERROR_OUT_OF_MEMORY) 748 res = sys_map_ta_bin(&va, filesz, flags, 749 elf->handle, 750 offset, 0, 751 pad_end); 752 if (res) 753 err(res, "sys_map_ta_bin"); 754 } 755 756 if (!elf->load_addr) 757 elf->load_addr = va; 758 elf->max_addr = roundup(va + memsz); 759 elf->max_offs += filesz; 760 } 761 } 762 } 763 764 static void map_segments(struct ta_elf *elf) 765 { 766 TEE_Result res = TEE_SUCCESS; 767 768 parse_load_segments(elf); 769 adjust_segments(elf); 770 if (TAILQ_FIRST(&elf->segs)->offset < SMALL_PAGE_SIZE) { 771 vaddr_t va = 0; 772 size_t sz = elf->max_addr - elf->load_addr; 773 struct segment *seg = TAILQ_LAST(&elf->segs, segment_head); 774 size_t pad_begin = get_pad_begin(); 775 776 /* 777 * We're loading a library, if not other parts of the code 778 * need to be updated too. 779 */ 780 assert(!elf->is_main); 781 782 /* 783 * Now that we know how much virtual memory is needed move 784 * the already mapped part to a location which can 785 * accommodate us. 786 */ 787 res = sys_remap(elf->load_addr, &va, sz, pad_begin, 788 roundup(seg->vaddr + seg->memsz)); 789 if (res == TEE_ERROR_OUT_OF_MEMORY) 790 res = sys_remap(elf->load_addr, &va, sz, 0, 791 roundup(seg->vaddr + seg->memsz)); 792 if (res) 793 err(res, "sys_remap"); 794 elf->ehdr_addr = va; 795 elf->load_addr = va; 796 elf->max_addr = va + sz; 797 elf->phdr = (void *)(va + elf->e_phoff); 798 } 799 } 800 801 static void add_deps_from_segment(struct ta_elf *elf, unsigned int type, 802 vaddr_t addr, size_t memsz) 803 { 804 size_t dyn_entsize = 0; 805 size_t num_dyns = 0; 806 size_t n = 0; 807 unsigned int tag = 0; 808 size_t val = 0; 809 TEE_UUID uuid = { }; 810 char *str_tab = NULL; 811 size_t str_tab_sz = 0; 812 813 if (type != PT_DYNAMIC) 814 return; 815 816 check_phdr_in_range(elf, type, addr, memsz); 817 818 if (elf->is_32bit) 819 dyn_entsize = sizeof(Elf32_Dyn); 820 else 821 dyn_entsize = sizeof(Elf64_Dyn); 822 823 assert(!(memsz % dyn_entsize)); 824 num_dyns = memsz / dyn_entsize; 825 826 for (n = 0; n < num_dyns && !(str_tab && str_tab_sz); n++) { 827 read_dyn(elf, addr, n, &tag, &val); 828 if (tag == DT_STRTAB) 829 str_tab = (char *)(val + elf->load_addr); 830 else if (tag == DT_STRSZ) 831 str_tab_sz = val; 832 } 833 check_range(elf, ".dynstr/STRTAB", str_tab, str_tab_sz); 834 835 for (n = 0; n < num_dyns; n++) { 836 read_dyn(elf, addr, n, &tag, &val); 837 if (tag != DT_NEEDED) 838 continue; 839 if (val >= str_tab_sz) 840 err(TEE_ERROR_BAD_FORMAT, 841 "Offset into .dynstr/STRTAB out of range"); 842 tee_uuid_from_str(&uuid, str_tab + val); 843 queue_elf(&uuid); 844 } 845 } 846 847 static void add_dependencies(struct ta_elf *elf) 848 { 849 size_t n = 0; 850 851 if (elf->is_32bit) { 852 Elf32_Phdr *phdr = elf->phdr; 853 854 for (n = 0; n < elf->e_phnum; n++) 855 add_deps_from_segment(elf, phdr[n].p_type, 856 phdr[n].p_vaddr, phdr[n].p_memsz); 857 } else { 858 Elf64_Phdr *phdr = elf->phdr; 859 860 for (n = 0; n < elf->e_phnum; n++) 861 add_deps_from_segment(elf, phdr[n].p_type, 862 phdr[n].p_vaddr, phdr[n].p_memsz); 863 } 864 } 865 866 static void copy_section_headers(struct ta_elf *elf) 867 { 868 TEE_Result res = TEE_SUCCESS; 869 size_t sz = 0; 870 size_t offs = 0; 871 872 if (MUL_OVERFLOW(elf->e_shnum, elf->e_shentsize, &sz)) 873 err(TEE_ERROR_BAD_FORMAT, "Section headers size overflow"); 874 875 elf->shdr = malloc(sz); 876 if (!elf->shdr) 877 err(TEE_ERROR_OUT_OF_MEMORY, "malloc"); 878 879 /* 880 * We're assuming that section headers comes after the load segments, 881 * but if it's a very small dynamically linked library the section 882 * headers can still end up (partially?) in the first mapped page. 883 */ 884 if (elf->e_shoff < SMALL_PAGE_SIZE) { 885 assert(!elf->is_main); 886 offs = MIN(SMALL_PAGE_SIZE - elf->e_shoff, sz); 887 memcpy(elf->shdr, (void *)(elf->load_addr + elf->e_shoff), 888 offs); 889 } 890 891 if (offs < sz) { 892 res = sys_copy_from_ta_bin((uint8_t *)elf->shdr + offs, 893 sz - offs, elf->handle, 894 elf->e_shoff + offs); 895 if (res) 896 err(res, "sys_copy_from_ta_bin"); 897 } 898 } 899 900 static void close_handle(struct ta_elf *elf) 901 { 902 TEE_Result res = sys_close_ta_bin(elf->handle); 903 904 if (res) 905 err(res, "sys_close_ta_bin"); 906 elf->handle = -1; 907 } 908 909 static void clean_elf_load_main(struct ta_elf *elf) 910 { 911 TEE_Result res = TEE_SUCCESS; 912 913 /* 914 * Clean up from last attempt to load 915 */ 916 res = sys_unmap(elf->ehdr_addr, SMALL_PAGE_SIZE); 917 if (res) 918 err(res, "sys_unmap"); 919 920 while (!TAILQ_EMPTY(&elf->segs)) { 921 struct segment *seg = TAILQ_FIRST(&elf->segs); 922 vaddr_t va = 0; 923 size_t num_bytes = 0; 924 925 va = rounddown(elf->load_addr + seg->vaddr); 926 if (seg->remapped_writeable) 927 num_bytes = roundup(seg->vaddr + seg->memsz) - 928 rounddown(seg->vaddr); 929 else 930 num_bytes = seg->memsz; 931 932 res = sys_unmap(va, num_bytes); 933 if (res) 934 err(res, "sys_unmap"); 935 936 TAILQ_REMOVE(&elf->segs, seg, link); 937 free(seg); 938 } 939 940 free(elf->shdr); 941 memset(&elf->is_32bit, 0, 942 (vaddr_t)&elf->uuid - (vaddr_t)&elf->is_32bit); 943 944 TAILQ_INIT(&elf->segs); 945 } 946 947 static void load_main(struct ta_elf *elf) 948 { 949 init_elf(elf); 950 map_segments(elf); 951 populate_segments(elf); 952 add_dependencies(elf); 953 copy_section_headers(elf); 954 save_symtab(elf); 955 close_handle(elf); 956 957 elf->head = (struct ta_head *)elf->load_addr; 958 if (elf->head->depr_entry != UINT64_MAX) { 959 /* 960 * Legacy TAs sets their entry point in ta_head. For 961 * non-legacy TAs the entry point of the ELF is set instead 962 * and leaving the ta_head entry point set to UINT64_MAX to 963 * indicate that it's not used. 964 * 965 * NB, everything before the commit a73b5878c89d ("Replace 966 * ta_head.entry with elf entry") is considered legacy TAs 967 * for ldelf. 968 * 969 * Legacy TAs cannot be mapped with shared memory segments 970 * so restart the mapping if it turned out we're loading a 971 * legacy TA. 972 */ 973 974 DMSG("Reloading TA %pUl as legacy TA", (void *)&elf->uuid); 975 clean_elf_load_main(elf); 976 elf->is_legacy = true; 977 init_elf(elf); 978 map_segments(elf); 979 populate_segments_legacy(elf); 980 add_dependencies(elf); 981 copy_section_headers(elf); 982 save_symtab(elf); 983 close_handle(elf); 984 elf->head = (struct ta_head *)elf->load_addr; 985 /* 986 * Check that the TA is still a legacy TA, if it isn't give 987 * up now since we're likely under attack. 988 */ 989 if (elf->head->depr_entry == UINT64_MAX) 990 err(TEE_ERROR_GENERIC, 991 "TA %pUl was changed on disk to non-legacy", 992 (void *)&elf->uuid); 993 } 994 995 } 996 997 void ta_elf_load_main(const TEE_UUID *uuid, uint32_t *is_32bit, uint64_t *sp, 998 uint32_t *ta_flags) 999 { 1000 struct ta_elf *elf = queue_elf(uuid); 1001 vaddr_t va = 0; 1002 TEE_Result res = TEE_SUCCESS; 1003 1004 assert(elf); 1005 elf->is_main = true; 1006 1007 load_main(elf); 1008 1009 *is_32bit = elf->is_32bit; 1010 res = sys_map_zi(elf->head->stack_size, 0, &va, 0, 0); 1011 if (res) 1012 err(res, "sys_map_zi stack"); 1013 1014 if (elf->head->flags & ~TA_FLAGS_MASK) 1015 err(TEE_ERROR_BAD_FORMAT, "Invalid TA flags(s) %#"PRIx32, 1016 elf->head->flags & ~TA_FLAGS_MASK); 1017 1018 *ta_flags = elf->head->flags; 1019 *sp = va + elf->head->stack_size; 1020 ta_stack = va; 1021 ta_stack_size = elf->head->stack_size; 1022 } 1023 1024 void ta_elf_finalize_load_main(uint64_t *entry) 1025 { 1026 struct ta_elf *elf = TAILQ_FIRST(&main_elf_queue); 1027 TEE_Result res = TEE_SUCCESS; 1028 1029 assert(elf->is_main); 1030 1031 res = ta_elf_set_init_fini_info(elf->is_32bit); 1032 if (res) 1033 err(res, "ta_elf_set_init_fini_info"); 1034 1035 if (elf->is_legacy) 1036 *entry = elf->head->depr_entry; 1037 else 1038 *entry = elf->e_entry + elf->load_addr; 1039 } 1040 1041 1042 void ta_elf_load_dependency(struct ta_elf *elf, bool is_32bit) 1043 { 1044 if (elf->is_main) 1045 return; 1046 1047 init_elf(elf); 1048 if (elf->is_32bit != is_32bit) 1049 err(TEE_ERROR_BAD_FORMAT, "ELF %pUl is %sbit (expected %sbit)", 1050 (void *)&elf->uuid, elf->is_32bit ? "32" : "64", 1051 is_32bit ? "32" : "64"); 1052 1053 map_segments(elf); 1054 populate_segments(elf); 1055 add_dependencies(elf); 1056 copy_section_headers(elf); 1057 save_symtab(elf); 1058 close_handle(elf); 1059 } 1060 1061 void ta_elf_finalize_mappings(struct ta_elf *elf) 1062 { 1063 TEE_Result res = TEE_SUCCESS; 1064 struct segment *seg = NULL; 1065 1066 if (!elf->is_legacy) 1067 return; 1068 1069 TAILQ_FOREACH(seg, &elf->segs, link) { 1070 vaddr_t va = elf->load_addr + seg->vaddr; 1071 uint32_t flags = 0; 1072 1073 if (seg->flags & PF_W) 1074 flags |= PTA_SYSTEM_MAP_FLAG_WRITEABLE; 1075 if (seg->flags & PF_X) 1076 flags |= PTA_SYSTEM_MAP_FLAG_EXECUTABLE; 1077 1078 res = sys_set_prot(va, seg->memsz, flags); 1079 if (res) 1080 err(res, "sys_set_prot"); 1081 } 1082 } 1083 1084 static void __printf(3, 4) print_wrapper(void *pctx, print_func_t print_func, 1085 const char *fmt, ...) 1086 { 1087 va_list ap; 1088 1089 va_start(ap, fmt); 1090 print_func(pctx, fmt, ap); 1091 va_end(ap); 1092 } 1093 1094 static void print_seg(void *pctx, print_func_t print_func, 1095 size_t idx __maybe_unused, int elf_idx __maybe_unused, 1096 vaddr_t va __maybe_unused, paddr_t pa __maybe_unused, 1097 size_t sz __maybe_unused, uint32_t flags) 1098 { 1099 int width __maybe_unused = 8; 1100 char desc[14] __maybe_unused = ""; 1101 char flags_str[] __maybe_unused = "----"; 1102 1103 if (elf_idx > -1) { 1104 snprintf(desc, sizeof(desc), " [%d]", elf_idx); 1105 } else { 1106 if (flags & DUMP_MAP_EPHEM) 1107 snprintf(desc, sizeof(desc), " (param)"); 1108 if (flags & DUMP_MAP_LDELF) 1109 snprintf(desc, sizeof(desc), " (ldelf)"); 1110 if (va == ta_stack) 1111 snprintf(desc, sizeof(desc), " (stack)"); 1112 } 1113 1114 if (flags & DUMP_MAP_READ) 1115 flags_str[0] = 'r'; 1116 if (flags & DUMP_MAP_WRITE) 1117 flags_str[1] = 'w'; 1118 if (flags & DUMP_MAP_EXEC) 1119 flags_str[2] = 'x'; 1120 if (flags & DUMP_MAP_SECURE) 1121 flags_str[3] = 's'; 1122 1123 print_wrapper(pctx, print_func, 1124 "region %2zu: va 0x%0*"PRIxVA" pa 0x%0*"PRIxPA" size 0x%06zx flags %s%s\n", 1125 idx, width, va, width, pa, sz, flags_str, desc); 1126 } 1127 1128 static bool get_next_in_order(struct ta_elf_queue *elf_queue, 1129 struct ta_elf **elf, struct segment **seg, 1130 size_t *elf_idx) 1131 { 1132 struct ta_elf *e = NULL; 1133 struct segment *s = NULL; 1134 size_t idx = 0; 1135 vaddr_t va = 0; 1136 struct ta_elf *e2 = NULL; 1137 size_t i2 = 0; 1138 1139 assert(elf && seg && elf_idx); 1140 e = *elf; 1141 s = *seg; 1142 assert((e == NULL && s == NULL) || (e != NULL && s != NULL)); 1143 1144 if (s) { 1145 s = TAILQ_NEXT(s, link); 1146 if (s) { 1147 *seg = s; 1148 return true; 1149 } 1150 } 1151 1152 if (e) 1153 va = e->load_addr; 1154 1155 /* Find the ELF with next load address */ 1156 e = NULL; 1157 TAILQ_FOREACH(e2, elf_queue, link) { 1158 if (e2->load_addr > va) { 1159 if (!e || e2->load_addr < e->load_addr) { 1160 e = e2; 1161 idx = i2; 1162 } 1163 } 1164 i2++; 1165 } 1166 if (!e) 1167 return false; 1168 1169 *elf = e; 1170 *seg = TAILQ_FIRST(&e->segs); 1171 *elf_idx = idx; 1172 return true; 1173 } 1174 1175 void ta_elf_print_mappings(void *pctx, print_func_t print_func, 1176 struct ta_elf_queue *elf_queue, size_t num_maps, 1177 struct dump_map *maps, vaddr_t mpool_base) 1178 { 1179 struct segment *seg = NULL; 1180 struct ta_elf *elf = NULL; 1181 size_t elf_idx = 0; 1182 size_t idx = 0; 1183 size_t map_idx = 0; 1184 1185 /* 1186 * Loop over all segments and maps, printing virtual address in 1187 * order. Segment has priority if the virtual address is present 1188 * in both map and segment. 1189 */ 1190 get_next_in_order(elf_queue, &elf, &seg, &elf_idx); 1191 while (true) { 1192 vaddr_t va = -1; 1193 size_t sz = 0; 1194 uint32_t flags = DUMP_MAP_SECURE; 1195 size_t offs = 0; 1196 1197 if (seg) { 1198 va = rounddown(seg->vaddr + elf->load_addr); 1199 sz = roundup(seg->vaddr + seg->memsz) - 1200 rounddown(seg->vaddr); 1201 } 1202 1203 while (map_idx < num_maps && maps[map_idx].va <= va) { 1204 uint32_t f = 0; 1205 1206 /* If there's a match, it should be the same map */ 1207 if (maps[map_idx].va == va) { 1208 /* 1209 * In shared libraries the first page is 1210 * mapped separately with the rest of that 1211 * segment following back to back in a 1212 * separate entry. 1213 */ 1214 if (map_idx + 1 < num_maps && 1215 maps[map_idx].sz == SMALL_PAGE_SIZE) { 1216 vaddr_t next_va = maps[map_idx].va + 1217 maps[map_idx].sz; 1218 size_t comb_sz = maps[map_idx].sz + 1219 maps[map_idx + 1].sz; 1220 1221 if (next_va == maps[map_idx + 1].va && 1222 comb_sz == sz && 1223 maps[map_idx].flags == 1224 maps[map_idx + 1].flags) { 1225 /* Skip this and next entry */ 1226 map_idx += 2; 1227 continue; 1228 } 1229 } 1230 assert(maps[map_idx].sz == sz); 1231 } else if (maps[map_idx].va < va) { 1232 if (maps[map_idx].va == mpool_base) 1233 f |= DUMP_MAP_LDELF; 1234 print_seg(pctx, print_func, idx, -1, 1235 maps[map_idx].va, maps[map_idx].pa, 1236 maps[map_idx].sz, 1237 maps[map_idx].flags | f); 1238 idx++; 1239 } 1240 map_idx++; 1241 } 1242 1243 if (!seg) 1244 break; 1245 1246 offs = rounddown(seg->offset); 1247 if (seg->flags & PF_R) 1248 flags |= DUMP_MAP_READ; 1249 if (seg->flags & PF_W) 1250 flags |= DUMP_MAP_WRITE; 1251 if (seg->flags & PF_X) 1252 flags |= DUMP_MAP_EXEC; 1253 1254 print_seg(pctx, print_func, idx, elf_idx, va, offs, sz, flags); 1255 idx++; 1256 1257 if (!get_next_in_order(elf_queue, &elf, &seg, &elf_idx)) 1258 seg = NULL; 1259 } 1260 1261 elf_idx = 0; 1262 TAILQ_FOREACH(elf, elf_queue, link) { 1263 print_wrapper(pctx, print_func, 1264 " [%zu] %pUl @ 0x%0*"PRIxVA"\n", 1265 elf_idx, (void *)&elf->uuid, 8, elf->load_addr); 1266 elf_idx++; 1267 } 1268 } 1269 1270 #ifdef CFG_UNWIND 1271 void ta_elf_stack_trace_a32(uint32_t regs[16]) 1272 { 1273 struct unwind_state_arm32 state = { }; 1274 1275 memcpy(state.registers, regs, sizeof(state.registers)); 1276 print_stack_arm32(&state, ta_stack, ta_stack_size); 1277 } 1278 1279 void ta_elf_stack_trace_a64(uint64_t fp, uint64_t sp, uint64_t pc) 1280 { 1281 struct unwind_state_arm64 state = { .fp = fp, .sp = sp, .pc = pc }; 1282 1283 print_stack_arm64(&state, ta_stack, ta_stack_size); 1284 } 1285 #endif 1286 1287 TEE_Result ta_elf_add_library(const TEE_UUID *uuid) 1288 { 1289 struct ta_elf *ta = TAILQ_FIRST(&main_elf_queue); 1290 struct ta_elf *lib = ta_elf_find_elf(uuid); 1291 struct ta_elf *elf = NULL; 1292 1293 if (lib) 1294 return TEE_SUCCESS; /* Already mapped */ 1295 1296 lib = queue_elf_helper(uuid); 1297 if (!lib) 1298 return TEE_ERROR_OUT_OF_MEMORY; 1299 1300 for (elf = lib; elf; elf = TAILQ_NEXT(elf, link)) 1301 ta_elf_load_dependency(elf, ta->is_32bit); 1302 1303 for (elf = lib; elf; elf = TAILQ_NEXT(elf, link)) { 1304 ta_elf_relocate(elf); 1305 ta_elf_finalize_mappings(elf); 1306 } 1307 1308 for (elf = lib; elf; elf = TAILQ_NEXT(elf, link)) 1309 DMSG("ELF (%pUl) at %#"PRIxVA, 1310 (void *)&elf->uuid, elf->load_addr); 1311 1312 return ta_elf_set_init_fini_info(ta->is_32bit); 1313 } 1314 1315 /* Get address/size of .init_array and .fini_array from the dynamic segment */ 1316 static void get_init_fini_array(struct ta_elf *elf, unsigned int type, 1317 vaddr_t addr, size_t memsz, vaddr_t *init, 1318 size_t *init_cnt, vaddr_t *fini, 1319 size_t *fini_cnt) 1320 { 1321 size_t addrsz = 0; 1322 size_t dyn_entsize = 0; 1323 size_t num_dyns = 0; 1324 size_t n = 0; 1325 unsigned int tag = 0; 1326 size_t val = 0; 1327 1328 assert(type == PT_DYNAMIC); 1329 1330 check_phdr_in_range(elf, type, addr, memsz); 1331 1332 if (elf->is_32bit) { 1333 dyn_entsize = sizeof(Elf32_Dyn); 1334 addrsz = 4; 1335 } else { 1336 dyn_entsize = sizeof(Elf64_Dyn); 1337 addrsz = 8; 1338 } 1339 1340 assert(!(memsz % dyn_entsize)); 1341 num_dyns = memsz / dyn_entsize; 1342 1343 for (n = 0; n < num_dyns; n++) { 1344 read_dyn(elf, addr, n, &tag, &val); 1345 if (tag == DT_INIT_ARRAY) 1346 *init = val + elf->load_addr; 1347 else if (tag == DT_FINI_ARRAY) 1348 *fini = val + elf->load_addr; 1349 else if (tag == DT_INIT_ARRAYSZ) 1350 *init_cnt = val / addrsz; 1351 else if (tag == DT_FINI_ARRAYSZ) 1352 *fini_cnt = val / addrsz; 1353 } 1354 } 1355 1356 /* Get address/size of .init_array and .fini_array in @elf (if present) */ 1357 static void elf_get_init_fini_array(struct ta_elf *elf, vaddr_t *init, 1358 size_t *init_cnt, vaddr_t *fini, 1359 size_t *fini_cnt) 1360 { 1361 size_t n = 0; 1362 1363 if (elf->is_32bit) { 1364 Elf32_Phdr *phdr = elf->phdr; 1365 1366 for (n = 0; n < elf->e_phnum; n++) { 1367 if (phdr[n].p_type == PT_DYNAMIC) { 1368 get_init_fini_array(elf, phdr[n].p_type, 1369 phdr[n].p_vaddr, 1370 phdr[n].p_memsz, 1371 init, init_cnt, fini, 1372 fini_cnt); 1373 return; 1374 } 1375 } 1376 } else { 1377 Elf64_Phdr *phdr = elf->phdr; 1378 1379 for (n = 0; n < elf->e_phnum; n++) { 1380 if (phdr[n].p_type == PT_DYNAMIC) { 1381 get_init_fini_array(elf, phdr[n].p_type, 1382 phdr[n].p_vaddr, 1383 phdr[n].p_memsz, 1384 init, init_cnt, fini, 1385 fini_cnt); 1386 return; 1387 } 1388 } 1389 } 1390 } 1391 1392 static TEE_Result realloc_ifs(vaddr_t va, size_t cnt, bool is_32bit) 1393 { 1394 struct __init_fini_info32 *info32 = (struct __init_fini_info32 *)va; 1395 struct __init_fini_info *info = (struct __init_fini_info *)va; 1396 struct __init_fini32 *ifs32 = NULL; 1397 struct __init_fini *ifs = NULL; 1398 size_t prev_cnt = 0; 1399 void *ptr = NULL; 1400 1401 if (is_32bit) { 1402 ptr = (void *)(vaddr_t)info32->ifs; 1403 ptr = realloc(ptr, cnt * sizeof(struct __init_fini32)); 1404 if (!ptr) 1405 return TEE_ERROR_OUT_OF_MEMORY; 1406 ifs32 = ptr; 1407 prev_cnt = info32->size; 1408 if (cnt > prev_cnt) 1409 memset(ifs32 + prev_cnt, 0, 1410 (cnt - prev_cnt) * sizeof(*ifs32)); 1411 info32->ifs = (uint32_t)(vaddr_t)ifs32; 1412 info32->size = cnt; 1413 } else { 1414 ptr = realloc(info->ifs, cnt * sizeof(struct __init_fini)); 1415 if (!ptr) 1416 return TEE_ERROR_OUT_OF_MEMORY; 1417 ifs = ptr; 1418 prev_cnt = info->size; 1419 if (cnt > prev_cnt) 1420 memset(ifs + prev_cnt, 0, 1421 (cnt - prev_cnt) * sizeof(*ifs)); 1422 info->ifs = ifs; 1423 info->size = cnt; 1424 } 1425 1426 return TEE_SUCCESS; 1427 } 1428 1429 static void fill_ifs(vaddr_t va, size_t idx, struct ta_elf *elf, bool is_32bit) 1430 { 1431 struct __init_fini_info32 *info32 = (struct __init_fini_info32 *)va; 1432 struct __init_fini_info *info = (struct __init_fini_info *)va; 1433 struct __init_fini32 *ifs32 = NULL; 1434 struct __init_fini *ifs = NULL; 1435 size_t init_cnt = 0; 1436 size_t fini_cnt = 0; 1437 vaddr_t init = 0; 1438 vaddr_t fini = 0; 1439 1440 if (is_32bit) { 1441 assert(idx < info32->size); 1442 ifs32 = &((struct __init_fini32 *)(vaddr_t)info32->ifs)[idx]; 1443 1444 if (ifs32->flags & __IFS_VALID) 1445 return; 1446 1447 elf_get_init_fini_array(elf, &init, &init_cnt, &fini, 1448 &fini_cnt); 1449 1450 ifs32->init = (uint32_t)init; 1451 ifs32->init_size = init_cnt; 1452 1453 ifs32->fini = (uint32_t)fini; 1454 ifs32->fini_size = fini_cnt; 1455 1456 ifs32->flags |= __IFS_VALID; 1457 } else { 1458 assert(idx < info->size); 1459 ifs = &info->ifs[idx]; 1460 1461 if (ifs->flags & __IFS_VALID) 1462 return; 1463 1464 elf_get_init_fini_array(elf, &init, &init_cnt, &fini, 1465 &fini_cnt); 1466 1467 ifs->init = (void (**)(void))init; 1468 ifs->init_size = init_cnt; 1469 1470 ifs->fini = (void (**)(void))fini; 1471 ifs->fini_size = fini_cnt; 1472 1473 ifs->flags |= __IFS_VALID; 1474 } 1475 } 1476 1477 /* 1478 * Set or update __init_fini_info in the TA with information from the ELF 1479 * queue 1480 */ 1481 TEE_Result ta_elf_set_init_fini_info(bool is_32bit) 1482 { 1483 struct __init_fini_info *info = NULL; 1484 TEE_Result res = TEE_SUCCESS; 1485 struct ta_elf *elf = NULL; 1486 vaddr_t info_va = 0; 1487 size_t cnt = 0; 1488 1489 res = ta_elf_resolve_sym("__init_fini_info", &info_va, NULL); 1490 if (res) { 1491 if (res == TEE_ERROR_ITEM_NOT_FOUND) { 1492 /* Older TA */ 1493 return TEE_SUCCESS; 1494 } 1495 return res; 1496 } 1497 assert(info_va); 1498 1499 info = (struct __init_fini_info *)info_va; 1500 if (info->reserved) 1501 return TEE_ERROR_NOT_SUPPORTED; 1502 1503 TAILQ_FOREACH(elf, &main_elf_queue, link) 1504 cnt++; 1505 1506 /* Queue has at least one file (main) */ 1507 assert(cnt); 1508 1509 res = realloc_ifs(info_va, cnt, is_32bit); 1510 if (res) 1511 goto err; 1512 1513 cnt = 0; 1514 TAILQ_FOREACH(elf, &main_elf_queue, link) { 1515 fill_ifs(info_va, cnt, elf, is_32bit); 1516 cnt++; 1517 } 1518 1519 return TEE_SUCCESS; 1520 err: 1521 free(info); 1522 return res; 1523 } 1524