1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2019, Linaro Limited 4 */ 5 6 #include <assert.h> 7 #include <ctype.h> 8 #include <elf32.h> 9 #include <elf64.h> 10 #include <elf_common.h> 11 #include <ldelf.h> 12 #include <pta_system.h> 13 #include <stdio.h> 14 #include <stdlib.h> 15 #include <string_ext.h> 16 #include <string.h> 17 #include <tee_api_types.h> 18 #include <tee_internal_api_extensions.h> 19 #include <user_ta_header.h> 20 #include <utee_syscalls.h> 21 #include <util.h> 22 23 #include "sys.h" 24 #include "ta_elf.h" 25 #include "unwind.h" 26 27 static vaddr_t ta_stack; 28 static vaddr_t ta_stack_size; 29 30 struct ta_elf_queue main_elf_queue = TAILQ_HEAD_INITIALIZER(main_elf_queue); 31 32 /* 33 * Main application is always ID 1, shared libraries with TLS take IDs 2 and 34 * above 35 */ 36 static void assign_tls_mod_id(struct ta_elf *elf) 37 { 38 static size_t last_tls_mod_id = 1; 39 40 if (elf->is_main) 41 assert(last_tls_mod_id == 1); /* Main always comes first */ 42 elf->tls_mod_id = last_tls_mod_id++; 43 } 44 45 static struct ta_elf *queue_elf_helper(const TEE_UUID *uuid) 46 { 47 struct ta_elf *elf = calloc(1, sizeof(*elf)); 48 49 if (!elf) 50 return NULL; 51 52 TAILQ_INIT(&elf->segs); 53 54 elf->uuid = *uuid; 55 TAILQ_INSERT_TAIL(&main_elf_queue, elf, link); 56 return elf; 57 } 58 59 static struct ta_elf *queue_elf(const TEE_UUID *uuid) 60 { 61 struct ta_elf *elf = ta_elf_find_elf(uuid); 62 63 if (elf) 64 return NULL; 65 66 elf = queue_elf_helper(uuid); 67 if (!elf) 68 err(TEE_ERROR_OUT_OF_MEMORY, "queue_elf_helper"); 69 70 return elf; 71 } 72 73 struct ta_elf *ta_elf_find_elf(const TEE_UUID *uuid) 74 { 75 struct ta_elf *elf = NULL; 76 77 TAILQ_FOREACH(elf, &main_elf_queue, link) 78 if (!memcmp(uuid, &elf->uuid, sizeof(*uuid))) 79 return elf; 80 81 return NULL; 82 } 83 84 static TEE_Result e32_parse_ehdr(struct ta_elf *elf, Elf32_Ehdr *ehdr) 85 { 86 if (ehdr->e_ident[EI_VERSION] != EV_CURRENT || 87 ehdr->e_ident[EI_CLASS] != ELFCLASS32 || 88 ehdr->e_ident[EI_DATA] != ELFDATA2LSB || 89 ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE || 90 ehdr->e_type != ET_DYN || ehdr->e_machine != EM_ARM || 91 (ehdr->e_flags & EF_ARM_ABIMASK) != EF_ARM_ABI_VERSION || 92 #ifndef CFG_WITH_VFP 93 (ehdr->e_flags & EF_ARM_ABI_FLOAT_HARD) || 94 #endif 95 ehdr->e_phentsize != sizeof(Elf32_Phdr) || 96 ehdr->e_shentsize != sizeof(Elf32_Shdr)) 97 return TEE_ERROR_BAD_FORMAT; 98 99 elf->is_32bit = true; 100 elf->e_entry = ehdr->e_entry; 101 elf->e_phoff = ehdr->e_phoff; 102 elf->e_shoff = ehdr->e_shoff; 103 elf->e_phnum = ehdr->e_phnum; 104 elf->e_shnum = ehdr->e_shnum; 105 elf->e_phentsize = ehdr->e_phentsize; 106 elf->e_shentsize = ehdr->e_shentsize; 107 108 return TEE_SUCCESS; 109 } 110 111 #ifdef ARM64 112 static TEE_Result e64_parse_ehdr(struct ta_elf *elf, Elf64_Ehdr *ehdr) 113 { 114 if (ehdr->e_ident[EI_VERSION] != EV_CURRENT || 115 ehdr->e_ident[EI_CLASS] != ELFCLASS64 || 116 ehdr->e_ident[EI_DATA] != ELFDATA2LSB || 117 ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE || 118 ehdr->e_type != ET_DYN || ehdr->e_machine != EM_AARCH64 || 119 ehdr->e_flags || ehdr->e_phentsize != sizeof(Elf64_Phdr) || 120 ehdr->e_shentsize != sizeof(Elf64_Shdr)) 121 return TEE_ERROR_BAD_FORMAT; 122 123 124 elf->is_32bit = false; 125 elf->e_entry = ehdr->e_entry; 126 elf->e_phoff = ehdr->e_phoff; 127 elf->e_shoff = ehdr->e_shoff; 128 elf->e_phnum = ehdr->e_phnum; 129 elf->e_shnum = ehdr->e_shnum; 130 elf->e_phentsize = ehdr->e_phentsize; 131 elf->e_shentsize = ehdr->e_shentsize; 132 133 return TEE_SUCCESS; 134 } 135 #else /*ARM64*/ 136 static TEE_Result e64_parse_ehdr(struct ta_elf *elf __unused, 137 Elf64_Ehdr *ehdr __unused) 138 { 139 return TEE_ERROR_NOT_SUPPORTED; 140 } 141 #endif /*ARM64*/ 142 143 static void check_phdr_in_range(struct ta_elf *elf, unsigned int type, 144 vaddr_t addr, size_t memsz) 145 { 146 vaddr_t max_addr = 0; 147 148 if (ADD_OVERFLOW(addr, memsz, &max_addr)) 149 err(TEE_ERROR_BAD_FORMAT, "Program header %#x overflow", type); 150 151 /* 152 * elf->load_addr and elf->max_addr are both using the 153 * final virtual addresses, while this program header is 154 * relative to 0. 155 */ 156 if (max_addr > elf->max_addr - elf->load_addr) 157 err(TEE_ERROR_BAD_FORMAT, "Program header %#x out of bounds", 158 type); 159 } 160 161 static void read_dyn(struct ta_elf *elf, vaddr_t addr, 162 size_t idx, unsigned int *tag, size_t *val) 163 { 164 if (elf->is_32bit) { 165 Elf32_Dyn *dyn = (Elf32_Dyn *)(addr + elf->load_addr); 166 167 *tag = dyn[idx].d_tag; 168 *val = dyn[idx].d_un.d_val; 169 } else { 170 Elf64_Dyn *dyn = (Elf64_Dyn *)(addr + elf->load_addr); 171 172 *tag = dyn[idx].d_tag; 173 *val = dyn[idx].d_un.d_val; 174 } 175 } 176 177 static void save_hashtab_from_segment(struct ta_elf *elf, unsigned int type, 178 vaddr_t addr, size_t memsz) 179 { 180 size_t dyn_entsize = 0; 181 size_t num_dyns = 0; 182 size_t n = 0; 183 unsigned int tag = 0; 184 size_t val = 0; 185 186 if (type != PT_DYNAMIC) 187 return; 188 189 check_phdr_in_range(elf, type, addr, memsz); 190 191 if (elf->is_32bit) 192 dyn_entsize = sizeof(Elf32_Dyn); 193 else 194 dyn_entsize = sizeof(Elf64_Dyn); 195 196 assert(!(memsz % dyn_entsize)); 197 num_dyns = memsz / dyn_entsize; 198 199 for (n = 0; n < num_dyns; n++) { 200 read_dyn(elf, addr, n, &tag, &val); 201 if (tag == DT_HASH) { 202 elf->hashtab = (void *)(val + elf->load_addr); 203 break; 204 } 205 } 206 } 207 208 static void check_range(struct ta_elf *elf, const char *name, const void *ptr, 209 size_t sz) 210 { 211 size_t max_addr = 0; 212 213 if ((vaddr_t)ptr < elf->load_addr) 214 err(TEE_ERROR_BAD_FORMAT, "%s %p out of range", name, ptr); 215 216 if (ADD_OVERFLOW((vaddr_t)ptr, sz, &max_addr)) 217 err(TEE_ERROR_BAD_FORMAT, "%s range overflow", name); 218 219 if (max_addr > elf->max_addr) 220 err(TEE_ERROR_BAD_FORMAT, 221 "%s %p..%#zx out of range", name, ptr, max_addr); 222 } 223 224 static void check_hashtab(struct ta_elf *elf, void *ptr, size_t num_buckets, 225 size_t num_chains) 226 { 227 /* 228 * Starting from 2 as the first two words are mandatory and hold 229 * num_buckets and num_chains. So this function is called twice, 230 * first to see that there's indeed room for num_buckets and 231 * num_chains and then to see that all of it fits. 232 * See http://www.sco.com/developers/gabi/latest/ch5.dynamic.html#hash 233 */ 234 size_t num_words = 2; 235 size_t sz = 0; 236 237 if (!ALIGNMENT_IS_OK(ptr, uint32_t)) 238 err(TEE_ERROR_BAD_FORMAT, "Bad alignment of DT_HASH %p", ptr); 239 240 if (ADD_OVERFLOW(num_words, num_buckets, &num_words) || 241 ADD_OVERFLOW(num_words, num_chains, &num_words) || 242 MUL_OVERFLOW(num_words, sizeof(uint32_t), &sz)) 243 err(TEE_ERROR_BAD_FORMAT, "DT_HASH overflow"); 244 245 check_range(elf, "DT_HASH", ptr, sz); 246 } 247 248 static void save_hashtab(struct ta_elf *elf) 249 { 250 uint32_t *hashtab = NULL; 251 size_t n = 0; 252 253 if (elf->is_32bit) { 254 Elf32_Phdr *phdr = elf->phdr; 255 256 for (n = 0; n < elf->e_phnum; n++) 257 save_hashtab_from_segment(elf, phdr[n].p_type, 258 phdr[n].p_vaddr, 259 phdr[n].p_memsz); 260 } else { 261 Elf64_Phdr *phdr = elf->phdr; 262 263 for (n = 0; n < elf->e_phnum; n++) 264 save_hashtab_from_segment(elf, phdr[n].p_type, 265 phdr[n].p_vaddr, 266 phdr[n].p_memsz); 267 } 268 269 check_hashtab(elf, elf->hashtab, 0, 0); 270 hashtab = elf->hashtab; 271 check_hashtab(elf, elf->hashtab, hashtab[0], hashtab[1]); 272 } 273 274 static void e32_save_symtab(struct ta_elf *elf, size_t tab_idx) 275 { 276 Elf32_Shdr *shdr = elf->shdr; 277 size_t str_idx = shdr[tab_idx].sh_link; 278 279 elf->dynsymtab = (void *)(shdr[tab_idx].sh_addr + elf->load_addr); 280 if (!ALIGNMENT_IS_OK(elf->dynsymtab, Elf32_Sym)) 281 err(TEE_ERROR_BAD_FORMAT, "Bad alignment of dynsymtab %p", 282 elf->dynsymtab); 283 check_range(elf, "Dynsymtab", elf->dynsymtab, shdr[tab_idx].sh_size); 284 285 if (shdr[tab_idx].sh_size % sizeof(Elf32_Sym)) 286 err(TEE_ERROR_BAD_FORMAT, 287 "Size of dynsymtab not an even multiple of Elf32_Sym"); 288 elf->num_dynsyms = shdr[tab_idx].sh_size / sizeof(Elf32_Sym); 289 290 if (str_idx >= elf->e_shnum) 291 err(TEE_ERROR_BAD_FORMAT, "Dynstr section index out of range"); 292 elf->dynstr = (void *)(shdr[str_idx].sh_addr + elf->load_addr); 293 check_range(elf, "Dynstr", elf->dynstr, shdr[str_idx].sh_size); 294 295 elf->dynstr_size = shdr[str_idx].sh_size; 296 } 297 298 static void e64_save_symtab(struct ta_elf *elf, size_t tab_idx) 299 { 300 Elf64_Shdr *shdr = elf->shdr; 301 size_t str_idx = shdr[tab_idx].sh_link; 302 303 elf->dynsymtab = (void *)(vaddr_t)(shdr[tab_idx].sh_addr + 304 elf->load_addr); 305 306 if (!ALIGNMENT_IS_OK(elf->dynsymtab, Elf64_Sym)) 307 err(TEE_ERROR_BAD_FORMAT, "Bad alignment of .dynsym/DYNSYM %p", 308 elf->dynsymtab); 309 check_range(elf, ".dynsym/DYNSYM", elf->dynsymtab, 310 shdr[tab_idx].sh_size); 311 312 if (shdr[tab_idx].sh_size % sizeof(Elf64_Sym)) 313 err(TEE_ERROR_BAD_FORMAT, 314 "Size of .dynsym/DYNSYM not an even multiple of Elf64_Sym"); 315 elf->num_dynsyms = shdr[tab_idx].sh_size / sizeof(Elf64_Sym); 316 317 if (str_idx >= elf->e_shnum) 318 err(TEE_ERROR_BAD_FORMAT, 319 ".dynstr/STRTAB section index out of range"); 320 elf->dynstr = (void *)(vaddr_t)(shdr[str_idx].sh_addr + elf->load_addr); 321 check_range(elf, ".dynstr/STRTAB", elf->dynstr, shdr[str_idx].sh_size); 322 323 elf->dynstr_size = shdr[str_idx].sh_size; 324 } 325 326 static void save_symtab(struct ta_elf *elf) 327 { 328 size_t n = 0; 329 330 if (elf->is_32bit) { 331 Elf32_Shdr *shdr = elf->shdr; 332 333 for (n = 0; n < elf->e_shnum; n++) { 334 if (shdr[n].sh_type == SHT_DYNSYM) { 335 e32_save_symtab(elf, n); 336 break; 337 } 338 } 339 } else { 340 Elf64_Shdr *shdr = elf->shdr; 341 342 for (n = 0; n < elf->e_shnum; n++) { 343 if (shdr[n].sh_type == SHT_DYNSYM) { 344 e64_save_symtab(elf, n); 345 break; 346 } 347 } 348 349 } 350 351 save_hashtab(elf); 352 } 353 354 static void init_elf(struct ta_elf *elf) 355 { 356 TEE_Result res = TEE_SUCCESS; 357 vaddr_t va = 0; 358 uint32_t flags = PTA_SYSTEM_MAP_FLAG_SHAREABLE; 359 size_t sz = 0; 360 361 res = sys_open_ta_bin(&elf->uuid, &elf->handle); 362 if (res) 363 err(res, "sys_open_ta_bin(%pUl)", (void *)&elf->uuid); 364 365 /* 366 * Map it read-only executable when we're loading a library where 367 * the ELF header is included in a load segment. 368 */ 369 if (!elf->is_main) 370 flags |= PTA_SYSTEM_MAP_FLAG_EXECUTABLE; 371 res = sys_map_ta_bin(&va, SMALL_PAGE_SIZE, flags, elf->handle, 0, 0, 0); 372 if (res) 373 err(res, "sys_map_ta_bin"); 374 elf->ehdr_addr = va; 375 if (!elf->is_main) { 376 elf->load_addr = va; 377 elf->max_addr = va + SMALL_PAGE_SIZE; 378 elf->max_offs = SMALL_PAGE_SIZE; 379 } 380 381 if (!IS_ELF(*(Elf32_Ehdr *)va)) 382 err(TEE_ERROR_BAD_FORMAT, "TA is not an ELF"); 383 384 res = e32_parse_ehdr(elf, (void *)va); 385 if (res == TEE_ERROR_BAD_FORMAT) 386 res = e64_parse_ehdr(elf, (void *)va); 387 if (res) 388 err(res, "Cannot parse ELF"); 389 390 if (MUL_OVERFLOW(elf->e_phnum, elf->e_phentsize, &sz) || 391 ADD_OVERFLOW(sz, elf->e_phoff, &sz)) 392 err(TEE_ERROR_BAD_FORMAT, "Program headers size overflow"); 393 394 if (sz > SMALL_PAGE_SIZE) 395 err(TEE_ERROR_NOT_SUPPORTED, "Cannot read program headers"); 396 397 elf->phdr = (void *)(va + elf->e_phoff); 398 } 399 400 static size_t roundup(size_t v) 401 { 402 return ROUNDUP(v, SMALL_PAGE_SIZE); 403 } 404 405 static size_t rounddown(size_t v) 406 { 407 return ROUNDDOWN(v, SMALL_PAGE_SIZE); 408 } 409 410 static void add_segment(struct ta_elf *elf, size_t offset, size_t vaddr, 411 size_t filesz, size_t memsz, size_t flags, size_t align) 412 { 413 struct segment *seg = calloc(1, sizeof(*seg)); 414 415 if (!seg) 416 err(TEE_ERROR_OUT_OF_MEMORY, "calloc"); 417 418 if (memsz < filesz) 419 err(TEE_ERROR_BAD_FORMAT, "Memsz smaller than filesz"); 420 421 seg->offset = offset; 422 seg->vaddr = vaddr; 423 seg->filesz = filesz; 424 seg->memsz = memsz; 425 seg->flags = flags; 426 seg->align = align; 427 428 TAILQ_INSERT_TAIL(&elf->segs, seg, link); 429 } 430 431 static void parse_load_segments(struct ta_elf *elf) 432 { 433 size_t n = 0; 434 435 if (elf->is_32bit) { 436 Elf32_Phdr *phdr = elf->phdr; 437 438 for (n = 0; n < elf->e_phnum; n++) 439 if (phdr[n].p_type == PT_LOAD) { 440 add_segment(elf, phdr[n].p_offset, 441 phdr[n].p_vaddr, phdr[n].p_filesz, 442 phdr[n].p_memsz, phdr[n].p_flags, 443 phdr[n].p_align); 444 } else if (phdr[n].p_type == PT_ARM_EXIDX) { 445 elf->exidx_start = phdr[n].p_vaddr; 446 elf->exidx_size = phdr[n].p_filesz; 447 } else if (phdr[n].p_type == PT_TLS) { 448 assign_tls_mod_id(elf); 449 } 450 } else { 451 Elf64_Phdr *phdr = elf->phdr; 452 453 for (n = 0; n < elf->e_phnum; n++) 454 if (phdr[n].p_type == PT_LOAD) 455 add_segment(elf, phdr[n].p_offset, 456 phdr[n].p_vaddr, phdr[n].p_filesz, 457 phdr[n].p_memsz, phdr[n].p_flags, 458 phdr[n].p_align); 459 } 460 } 461 462 static void copy_remapped_to(struct ta_elf *elf, const struct segment *seg) 463 { 464 uint8_t *dst = (void *)(seg->vaddr + elf->load_addr); 465 size_t n = 0; 466 size_t offs = seg->offset; 467 size_t num_bytes = seg->filesz; 468 469 if (offs < elf->max_offs) { 470 n = MIN(elf->max_offs - offs, num_bytes); 471 memcpy(dst, (void *)(elf->max_addr + offs - elf->max_offs), n); 472 dst += n; 473 offs += n; 474 num_bytes -= n; 475 } 476 477 if (num_bytes) { 478 TEE_Result res = sys_copy_from_ta_bin(dst, num_bytes, 479 elf->handle, offs); 480 481 if (res) 482 err(res, "sys_copy_from_ta_bin"); 483 elf->max_offs += offs; 484 } 485 } 486 487 static void adjust_segments(struct ta_elf *elf) 488 { 489 struct segment *seg = NULL; 490 struct segment *prev_seg = NULL; 491 size_t prev_end_addr = 0; 492 size_t align = 0; 493 size_t mask = 0; 494 495 /* Sanity check */ 496 TAILQ_FOREACH(seg, &elf->segs, link) { 497 size_t dummy __maybe_unused = 0; 498 499 assert(seg->align >= SMALL_PAGE_SIZE); 500 assert(!ADD_OVERFLOW(seg->vaddr, seg->memsz, &dummy)); 501 assert(seg->filesz <= seg->memsz); 502 assert((seg->offset & SMALL_PAGE_MASK) == 503 (seg->vaddr & SMALL_PAGE_MASK)); 504 505 prev_seg = TAILQ_PREV(seg, segment_head, link); 506 if (prev_seg) { 507 assert(seg->vaddr >= prev_seg->vaddr + prev_seg->memsz); 508 assert(seg->offset >= 509 prev_seg->offset + prev_seg->filesz); 510 } 511 if (!align) 512 align = seg->align; 513 assert(align == seg->align); 514 } 515 516 mask = align - 1; 517 518 seg = TAILQ_FIRST(&elf->segs); 519 if (seg) 520 seg = TAILQ_NEXT(seg, link); 521 while (seg) { 522 prev_seg = TAILQ_PREV(seg, segment_head, link); 523 prev_end_addr = prev_seg->vaddr + prev_seg->memsz; 524 525 /* 526 * This segment may overlap with the last "page" in the 527 * previous segment in two different ways: 528 * 1. Virtual address (and offset) overlaps => 529 * Permissions needs to be merged. The offset must have 530 * the SMALL_PAGE_MASK bits set as vaddr and offset must 531 * add up with prevsion segment. 532 * 533 * 2. Only offset overlaps => 534 * The same page in the ELF is mapped at two different 535 * virtual addresses. As a limitation this segment must 536 * be mapped as writeable. 537 */ 538 539 /* Case 1. */ 540 if (rounddown(seg->vaddr) < prev_end_addr) { 541 assert((seg->vaddr & mask) == (seg->offset & mask)); 542 assert(prev_seg->memsz == prev_seg->filesz); 543 544 /* 545 * Merge the segments and their permissions. 546 * Note that the may be a small hole between the 547 * two sections. 548 */ 549 prev_seg->filesz = seg->vaddr + seg->filesz - 550 prev_seg->vaddr; 551 prev_seg->memsz = seg->vaddr + seg->memsz - 552 prev_seg->vaddr; 553 prev_seg->flags |= seg->flags; 554 555 TAILQ_REMOVE(&elf->segs, seg, link); 556 free(seg); 557 seg = TAILQ_NEXT(prev_seg, link); 558 continue; 559 } 560 561 /* Case 2. */ 562 if ((seg->offset & mask) && 563 rounddown(seg->offset) < 564 (prev_seg->offset + prev_seg->filesz)) { 565 566 assert(seg->flags & PF_W); 567 seg->remapped_writeable = true; 568 } 569 570 /* 571 * No overlap, but we may need to align address, offset and 572 * size. 573 */ 574 seg->filesz += seg->vaddr - rounddown(seg->vaddr); 575 seg->memsz += seg->vaddr - rounddown(seg->vaddr); 576 seg->vaddr = rounddown(seg->vaddr); 577 seg->offset = rounddown(seg->offset); 578 seg = TAILQ_NEXT(seg, link); 579 } 580 581 } 582 583 static void populate_segments_legacy(struct ta_elf *elf) 584 { 585 TEE_Result res = TEE_SUCCESS; 586 struct segment *seg = NULL; 587 vaddr_t va = 0; 588 589 assert(elf->is_legacy); 590 TAILQ_FOREACH(seg, &elf->segs, link) { 591 struct segment *last_seg = TAILQ_LAST(&elf->segs, segment_head); 592 size_t pad_end = roundup(last_seg->vaddr + last_seg->memsz - 593 seg->vaddr - seg->memsz); 594 size_t num_bytes = roundup(seg->memsz); 595 596 if (!elf->load_addr) 597 va = 0; 598 else 599 va = seg->vaddr + elf->load_addr; 600 601 602 if (!(seg->flags & PF_R)) 603 err(TEE_ERROR_NOT_SUPPORTED, 604 "Segment must be readable"); 605 606 res = sys_map_zi(num_bytes, 0, &va, 0, pad_end); 607 if (res) 608 err(res, "sys_map_zi"); 609 res = sys_copy_from_ta_bin((void *)va, seg->filesz, 610 elf->handle, seg->offset); 611 if (res) 612 err(res, "sys_copy_from_ta_bin"); 613 614 if (!elf->load_addr) 615 elf->load_addr = va; 616 elf->max_addr = va + num_bytes; 617 elf->max_offs = seg->offset + seg->filesz; 618 } 619 } 620 621 static size_t get_pad_begin(void) 622 { 623 #ifdef CFG_TA_ASLR 624 size_t min = CFG_TA_ASLR_MIN_OFFSET_PAGES; 625 size_t max = CFG_TA_ASLR_MAX_OFFSET_PAGES; 626 TEE_Result res = TEE_SUCCESS; 627 uint32_t rnd32 = 0; 628 size_t rnd = 0; 629 630 COMPILE_TIME_ASSERT(CFG_TA_ASLR_MIN_OFFSET_PAGES < 631 CFG_TA_ASLR_MAX_OFFSET_PAGES); 632 if (max > min) { 633 res = _utee_cryp_random_number_generate(&rnd32, sizeof(rnd32)); 634 if (res) { 635 DMSG("Random read failed: %#"PRIx32, res); 636 return min * SMALL_PAGE_SIZE; 637 } 638 rnd = rnd32 % (max - min); 639 } 640 641 return (min + rnd) * SMALL_PAGE_SIZE; 642 #else /*!CFG_TA_ASLR*/ 643 return 0; 644 #endif /*!CFG_TA_ASLR*/ 645 } 646 647 static void populate_segments(struct ta_elf *elf) 648 { 649 TEE_Result res = TEE_SUCCESS; 650 struct segment *seg = NULL; 651 vaddr_t va = 0; 652 size_t pad_begin = 0; 653 654 assert(!elf->is_legacy); 655 TAILQ_FOREACH(seg, &elf->segs, link) { 656 struct segment *last_seg = TAILQ_LAST(&elf->segs, segment_head); 657 size_t pad_end = roundup(last_seg->vaddr + last_seg->memsz - 658 seg->vaddr - seg->memsz); 659 660 if (seg->remapped_writeable) { 661 size_t num_bytes = roundup(seg->vaddr + seg->memsz) - 662 rounddown(seg->vaddr); 663 664 assert(elf->load_addr); 665 va = rounddown(elf->load_addr + seg->vaddr); 666 assert(va >= elf->max_addr); 667 res = sys_map_zi(num_bytes, 0, &va, 0, pad_end); 668 if (res) 669 err(res, "sys_map_zi"); 670 671 copy_remapped_to(elf, seg); 672 elf->max_addr = va + num_bytes; 673 } else { 674 uint32_t flags = 0; 675 size_t filesz = seg->filesz; 676 size_t memsz = seg->memsz; 677 size_t offset = seg->offset; 678 size_t vaddr = seg->vaddr; 679 680 if (offset < elf->max_offs) { 681 /* 682 * We're in a load segment which overlaps 683 * with (or is covered by) the first page 684 * of a shared library. 685 */ 686 if (vaddr + filesz < SMALL_PAGE_SIZE) { 687 size_t num_bytes = 0; 688 689 /* 690 * If this segment is completely 691 * covered, take next. 692 */ 693 if (vaddr + memsz <= SMALL_PAGE_SIZE) 694 continue; 695 696 /* 697 * All data of the segment is 698 * loaded, but we need to zero 699 * extend it. 700 */ 701 va = elf->max_addr; 702 num_bytes = roundup(vaddr + memsz) - 703 roundup(vaddr) - 704 SMALL_PAGE_SIZE; 705 assert(num_bytes); 706 res = sys_map_zi(num_bytes, 0, &va, 0, 707 0); 708 if (res) 709 err(res, "sys_map_zi"); 710 elf->max_addr = roundup(va + num_bytes); 711 continue; 712 } 713 714 /* Partial overlap, remove the first page. */ 715 vaddr += SMALL_PAGE_SIZE; 716 filesz -= SMALL_PAGE_SIZE; 717 memsz -= SMALL_PAGE_SIZE; 718 offset += SMALL_PAGE_SIZE; 719 } 720 721 if (!elf->load_addr) { 722 va = 0; 723 pad_begin = get_pad_begin(); 724 /* 725 * If mapping with pad_begin fails we'll 726 * retry without pad_begin, effectively 727 * disabling ASLR for the current ELF file. 728 */ 729 } else { 730 va = vaddr + elf->load_addr; 731 pad_begin = 0; 732 } 733 734 if (seg->flags & PF_W) 735 flags |= PTA_SYSTEM_MAP_FLAG_WRITEABLE; 736 else 737 flags |= PTA_SYSTEM_MAP_FLAG_SHAREABLE; 738 if (seg->flags & PF_X) 739 flags |= PTA_SYSTEM_MAP_FLAG_EXECUTABLE; 740 if (!(seg->flags & PF_R)) 741 err(TEE_ERROR_NOT_SUPPORTED, 742 "Segment must be readable"); 743 if (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE) { 744 res = sys_map_zi(memsz, 0, &va, pad_begin, 745 pad_end); 746 if (pad_begin && res == TEE_ERROR_OUT_OF_MEMORY) 747 res = sys_map_zi(memsz, 0, &va, 0, 748 pad_end); 749 if (res) 750 err(res, "sys_map_zi"); 751 res = sys_copy_from_ta_bin((void *)va, filesz, 752 elf->handle, offset); 753 if (res) 754 err(res, "sys_copy_from_ta_bin"); 755 } else { 756 if (filesz != memsz) 757 err(TEE_ERROR_BAD_FORMAT, 758 "Filesz and memsz mismatch"); 759 res = sys_map_ta_bin(&va, filesz, flags, 760 elf->handle, offset, 761 pad_begin, pad_end); 762 if (pad_begin && res == TEE_ERROR_OUT_OF_MEMORY) 763 res = sys_map_ta_bin(&va, filesz, flags, 764 elf->handle, 765 offset, 0, 766 pad_end); 767 if (res) 768 err(res, "sys_map_ta_bin"); 769 } 770 771 if (!elf->load_addr) 772 elf->load_addr = va; 773 elf->max_addr = roundup(va + memsz); 774 elf->max_offs += filesz; 775 } 776 } 777 } 778 779 static void map_segments(struct ta_elf *elf) 780 { 781 TEE_Result res = TEE_SUCCESS; 782 783 parse_load_segments(elf); 784 adjust_segments(elf); 785 if (TAILQ_FIRST(&elf->segs)->offset < SMALL_PAGE_SIZE) { 786 vaddr_t va = 0; 787 size_t sz = elf->max_addr - elf->load_addr; 788 struct segment *seg = TAILQ_LAST(&elf->segs, segment_head); 789 size_t pad_begin = get_pad_begin(); 790 791 /* 792 * We're loading a library, if not other parts of the code 793 * need to be updated too. 794 */ 795 assert(!elf->is_main); 796 797 /* 798 * Now that we know how much virtual memory is needed move 799 * the already mapped part to a location which can 800 * accommodate us. 801 */ 802 res = sys_remap(elf->load_addr, &va, sz, pad_begin, 803 roundup(seg->vaddr + seg->memsz)); 804 if (res == TEE_ERROR_OUT_OF_MEMORY) 805 res = sys_remap(elf->load_addr, &va, sz, 0, 806 roundup(seg->vaddr + seg->memsz)); 807 if (res) 808 err(res, "sys_remap"); 809 elf->ehdr_addr = va; 810 elf->load_addr = va; 811 elf->max_addr = va + sz; 812 elf->phdr = (void *)(va + elf->e_phoff); 813 } 814 } 815 816 static void add_deps_from_segment(struct ta_elf *elf, unsigned int type, 817 vaddr_t addr, size_t memsz) 818 { 819 size_t dyn_entsize = 0; 820 size_t num_dyns = 0; 821 size_t n = 0; 822 unsigned int tag = 0; 823 size_t val = 0; 824 TEE_UUID uuid = { }; 825 char *str_tab = NULL; 826 size_t str_tab_sz = 0; 827 828 if (type != PT_DYNAMIC) 829 return; 830 831 check_phdr_in_range(elf, type, addr, memsz); 832 833 if (elf->is_32bit) 834 dyn_entsize = sizeof(Elf32_Dyn); 835 else 836 dyn_entsize = sizeof(Elf64_Dyn); 837 838 assert(!(memsz % dyn_entsize)); 839 num_dyns = memsz / dyn_entsize; 840 841 for (n = 0; n < num_dyns && !(str_tab && str_tab_sz); n++) { 842 read_dyn(elf, addr, n, &tag, &val); 843 if (tag == DT_STRTAB) 844 str_tab = (char *)(val + elf->load_addr); 845 else if (tag == DT_STRSZ) 846 str_tab_sz = val; 847 } 848 check_range(elf, ".dynstr/STRTAB", str_tab, str_tab_sz); 849 850 for (n = 0; n < num_dyns; n++) { 851 read_dyn(elf, addr, n, &tag, &val); 852 if (tag != DT_NEEDED) 853 continue; 854 if (val >= str_tab_sz) 855 err(TEE_ERROR_BAD_FORMAT, 856 "Offset into .dynstr/STRTAB out of range"); 857 tee_uuid_from_str(&uuid, str_tab + val); 858 queue_elf(&uuid); 859 } 860 } 861 862 static void add_dependencies(struct ta_elf *elf) 863 { 864 size_t n = 0; 865 866 if (elf->is_32bit) { 867 Elf32_Phdr *phdr = elf->phdr; 868 869 for (n = 0; n < elf->e_phnum; n++) 870 add_deps_from_segment(elf, phdr[n].p_type, 871 phdr[n].p_vaddr, phdr[n].p_memsz); 872 } else { 873 Elf64_Phdr *phdr = elf->phdr; 874 875 for (n = 0; n < elf->e_phnum; n++) 876 add_deps_from_segment(elf, phdr[n].p_type, 877 phdr[n].p_vaddr, phdr[n].p_memsz); 878 } 879 } 880 881 static void copy_section_headers(struct ta_elf *elf) 882 { 883 TEE_Result res = TEE_SUCCESS; 884 size_t sz = 0; 885 size_t offs = 0; 886 887 if (MUL_OVERFLOW(elf->e_shnum, elf->e_shentsize, &sz)) 888 err(TEE_ERROR_BAD_FORMAT, "Section headers size overflow"); 889 890 elf->shdr = malloc(sz); 891 if (!elf->shdr) 892 err(TEE_ERROR_OUT_OF_MEMORY, "malloc"); 893 894 /* 895 * We're assuming that section headers comes after the load segments, 896 * but if it's a very small dynamically linked library the section 897 * headers can still end up (partially?) in the first mapped page. 898 */ 899 if (elf->e_shoff < SMALL_PAGE_SIZE) { 900 assert(!elf->is_main); 901 offs = MIN(SMALL_PAGE_SIZE - elf->e_shoff, sz); 902 memcpy(elf->shdr, (void *)(elf->load_addr + elf->e_shoff), 903 offs); 904 } 905 906 if (offs < sz) { 907 res = sys_copy_from_ta_bin((uint8_t *)elf->shdr + offs, 908 sz - offs, elf->handle, 909 elf->e_shoff + offs); 910 if (res) 911 err(res, "sys_copy_from_ta_bin"); 912 } 913 } 914 915 static void close_handle(struct ta_elf *elf) 916 { 917 TEE_Result res = sys_close_ta_bin(elf->handle); 918 919 if (res) 920 err(res, "sys_close_ta_bin"); 921 elf->handle = -1; 922 } 923 924 static void clean_elf_load_main(struct ta_elf *elf) 925 { 926 TEE_Result res = TEE_SUCCESS; 927 928 /* 929 * Clean up from last attempt to load 930 */ 931 res = sys_unmap(elf->ehdr_addr, SMALL_PAGE_SIZE); 932 if (res) 933 err(res, "sys_unmap"); 934 935 while (!TAILQ_EMPTY(&elf->segs)) { 936 struct segment *seg = TAILQ_FIRST(&elf->segs); 937 vaddr_t va = 0; 938 size_t num_bytes = 0; 939 940 va = rounddown(elf->load_addr + seg->vaddr); 941 if (seg->remapped_writeable) 942 num_bytes = roundup(seg->vaddr + seg->memsz) - 943 rounddown(seg->vaddr); 944 else 945 num_bytes = seg->memsz; 946 947 res = sys_unmap(va, num_bytes); 948 if (res) 949 err(res, "sys_unmap"); 950 951 TAILQ_REMOVE(&elf->segs, seg, link); 952 free(seg); 953 } 954 955 free(elf->shdr); 956 memset(&elf->is_32bit, 0, 957 (vaddr_t)&elf->uuid - (vaddr_t)&elf->is_32bit); 958 959 TAILQ_INIT(&elf->segs); 960 } 961 962 static void load_main(struct ta_elf *elf) 963 { 964 init_elf(elf); 965 map_segments(elf); 966 populate_segments(elf); 967 add_dependencies(elf); 968 copy_section_headers(elf); 969 save_symtab(elf); 970 close_handle(elf); 971 972 elf->head = (struct ta_head *)elf->load_addr; 973 if (elf->head->depr_entry != UINT64_MAX) { 974 /* 975 * Legacy TAs sets their entry point in ta_head. For 976 * non-legacy TAs the entry point of the ELF is set instead 977 * and leaving the ta_head entry point set to UINT64_MAX to 978 * indicate that it's not used. 979 * 980 * NB, everything before the commit a73b5878c89d ("Replace 981 * ta_head.entry with elf entry") is considered legacy TAs 982 * for ldelf. 983 * 984 * Legacy TAs cannot be mapped with shared memory segments 985 * so restart the mapping if it turned out we're loading a 986 * legacy TA. 987 */ 988 989 DMSG("Reloading TA %pUl as legacy TA", (void *)&elf->uuid); 990 clean_elf_load_main(elf); 991 elf->is_legacy = true; 992 init_elf(elf); 993 map_segments(elf); 994 populate_segments_legacy(elf); 995 add_dependencies(elf); 996 copy_section_headers(elf); 997 save_symtab(elf); 998 close_handle(elf); 999 elf->head = (struct ta_head *)elf->load_addr; 1000 /* 1001 * Check that the TA is still a legacy TA, if it isn't give 1002 * up now since we're likely under attack. 1003 */ 1004 if (elf->head->depr_entry == UINT64_MAX) 1005 err(TEE_ERROR_GENERIC, 1006 "TA %pUl was changed on disk to non-legacy", 1007 (void *)&elf->uuid); 1008 } 1009 1010 } 1011 1012 void ta_elf_load_main(const TEE_UUID *uuid, uint32_t *is_32bit, uint64_t *sp, 1013 uint32_t *ta_flags) 1014 { 1015 struct ta_elf *elf = queue_elf(uuid); 1016 vaddr_t va = 0; 1017 TEE_Result res = TEE_SUCCESS; 1018 1019 assert(elf); 1020 elf->is_main = true; 1021 1022 load_main(elf); 1023 1024 *is_32bit = elf->is_32bit; 1025 res = sys_map_zi(elf->head->stack_size, 0, &va, 0, 0); 1026 if (res) 1027 err(res, "sys_map_zi stack"); 1028 1029 if (elf->head->flags & ~TA_FLAGS_MASK) 1030 err(TEE_ERROR_BAD_FORMAT, "Invalid TA flags(s) %#"PRIx32, 1031 elf->head->flags & ~TA_FLAGS_MASK); 1032 1033 *ta_flags = elf->head->flags; 1034 *sp = va + elf->head->stack_size; 1035 ta_stack = va; 1036 ta_stack_size = elf->head->stack_size; 1037 } 1038 1039 void ta_elf_finalize_load_main(uint64_t *entry) 1040 { 1041 struct ta_elf *elf = TAILQ_FIRST(&main_elf_queue); 1042 TEE_Result res = TEE_SUCCESS; 1043 1044 assert(elf->is_main); 1045 1046 res = ta_elf_set_init_fini_info(elf->is_32bit); 1047 if (res) 1048 err(res, "ta_elf_set_init_fini_info"); 1049 1050 if (elf->is_legacy) 1051 *entry = elf->head->depr_entry; 1052 else 1053 *entry = elf->e_entry + elf->load_addr; 1054 } 1055 1056 1057 void ta_elf_load_dependency(struct ta_elf *elf, bool is_32bit) 1058 { 1059 if (elf->is_main) 1060 return; 1061 1062 init_elf(elf); 1063 if (elf->is_32bit != is_32bit) 1064 err(TEE_ERROR_BAD_FORMAT, "ELF %pUl is %sbit (expected %sbit)", 1065 (void *)&elf->uuid, elf->is_32bit ? "32" : "64", 1066 is_32bit ? "32" : "64"); 1067 1068 map_segments(elf); 1069 populate_segments(elf); 1070 add_dependencies(elf); 1071 copy_section_headers(elf); 1072 save_symtab(elf); 1073 close_handle(elf); 1074 } 1075 1076 void ta_elf_finalize_mappings(struct ta_elf *elf) 1077 { 1078 TEE_Result res = TEE_SUCCESS; 1079 struct segment *seg = NULL; 1080 1081 if (!elf->is_legacy) 1082 return; 1083 1084 TAILQ_FOREACH(seg, &elf->segs, link) { 1085 vaddr_t va = elf->load_addr + seg->vaddr; 1086 uint32_t flags = 0; 1087 1088 if (seg->flags & PF_W) 1089 flags |= PTA_SYSTEM_MAP_FLAG_WRITEABLE; 1090 if (seg->flags & PF_X) 1091 flags |= PTA_SYSTEM_MAP_FLAG_EXECUTABLE; 1092 1093 res = sys_set_prot(va, seg->memsz, flags); 1094 if (res) 1095 err(res, "sys_set_prot"); 1096 } 1097 } 1098 1099 static void __printf(3, 4) print_wrapper(void *pctx, print_func_t print_func, 1100 const char *fmt, ...) 1101 { 1102 va_list ap; 1103 1104 va_start(ap, fmt); 1105 print_func(pctx, fmt, ap); 1106 va_end(ap); 1107 } 1108 1109 static void print_seg(void *pctx, print_func_t print_func, 1110 size_t idx __maybe_unused, int elf_idx __maybe_unused, 1111 vaddr_t va __maybe_unused, paddr_t pa __maybe_unused, 1112 size_t sz __maybe_unused, uint32_t flags) 1113 { 1114 int width __maybe_unused = 8; 1115 char desc[14] __maybe_unused = ""; 1116 char flags_str[] __maybe_unused = "----"; 1117 1118 if (elf_idx > -1) { 1119 snprintf(desc, sizeof(desc), " [%d]", elf_idx); 1120 } else { 1121 if (flags & DUMP_MAP_EPHEM) 1122 snprintf(desc, sizeof(desc), " (param)"); 1123 if (flags & DUMP_MAP_LDELF) 1124 snprintf(desc, sizeof(desc), " (ldelf)"); 1125 if (va == ta_stack) 1126 snprintf(desc, sizeof(desc), " (stack)"); 1127 } 1128 1129 if (flags & DUMP_MAP_READ) 1130 flags_str[0] = 'r'; 1131 if (flags & DUMP_MAP_WRITE) 1132 flags_str[1] = 'w'; 1133 if (flags & DUMP_MAP_EXEC) 1134 flags_str[2] = 'x'; 1135 if (flags & DUMP_MAP_SECURE) 1136 flags_str[3] = 's'; 1137 1138 print_wrapper(pctx, print_func, 1139 "region %2zu: va 0x%0*"PRIxVA" pa 0x%0*"PRIxPA" size 0x%06zx flags %s%s\n", 1140 idx, width, va, width, pa, sz, flags_str, desc); 1141 } 1142 1143 static bool get_next_in_order(struct ta_elf_queue *elf_queue, 1144 struct ta_elf **elf, struct segment **seg, 1145 size_t *elf_idx) 1146 { 1147 struct ta_elf *e = NULL; 1148 struct segment *s = NULL; 1149 size_t idx = 0; 1150 vaddr_t va = 0; 1151 struct ta_elf *e2 = NULL; 1152 size_t i2 = 0; 1153 1154 assert(elf && seg && elf_idx); 1155 e = *elf; 1156 s = *seg; 1157 assert((e == NULL && s == NULL) || (e != NULL && s != NULL)); 1158 1159 if (s) { 1160 s = TAILQ_NEXT(s, link); 1161 if (s) { 1162 *seg = s; 1163 return true; 1164 } 1165 } 1166 1167 if (e) 1168 va = e->load_addr; 1169 1170 /* Find the ELF with next load address */ 1171 e = NULL; 1172 TAILQ_FOREACH(e2, elf_queue, link) { 1173 if (e2->load_addr > va) { 1174 if (!e || e2->load_addr < e->load_addr) { 1175 e = e2; 1176 idx = i2; 1177 } 1178 } 1179 i2++; 1180 } 1181 if (!e) 1182 return false; 1183 1184 *elf = e; 1185 *seg = TAILQ_FIRST(&e->segs); 1186 *elf_idx = idx; 1187 return true; 1188 } 1189 1190 void ta_elf_print_mappings(void *pctx, print_func_t print_func, 1191 struct ta_elf_queue *elf_queue, size_t num_maps, 1192 struct dump_map *maps, vaddr_t mpool_base) 1193 { 1194 struct segment *seg = NULL; 1195 struct ta_elf *elf = NULL; 1196 size_t elf_idx = 0; 1197 size_t idx = 0; 1198 size_t map_idx = 0; 1199 1200 /* 1201 * Loop over all segments and maps, printing virtual address in 1202 * order. Segment has priority if the virtual address is present 1203 * in both map and segment. 1204 */ 1205 get_next_in_order(elf_queue, &elf, &seg, &elf_idx); 1206 while (true) { 1207 vaddr_t va = -1; 1208 size_t sz = 0; 1209 uint32_t flags = DUMP_MAP_SECURE; 1210 size_t offs = 0; 1211 1212 if (seg) { 1213 va = rounddown(seg->vaddr + elf->load_addr); 1214 sz = roundup(seg->vaddr + seg->memsz) - 1215 rounddown(seg->vaddr); 1216 } 1217 1218 while (map_idx < num_maps && maps[map_idx].va <= va) { 1219 uint32_t f = 0; 1220 1221 /* If there's a match, it should be the same map */ 1222 if (maps[map_idx].va == va) { 1223 /* 1224 * In shared libraries the first page is 1225 * mapped separately with the rest of that 1226 * segment following back to back in a 1227 * separate entry. 1228 */ 1229 if (map_idx + 1 < num_maps && 1230 maps[map_idx].sz == SMALL_PAGE_SIZE) { 1231 vaddr_t next_va = maps[map_idx].va + 1232 maps[map_idx].sz; 1233 size_t comb_sz = maps[map_idx].sz + 1234 maps[map_idx + 1].sz; 1235 1236 if (next_va == maps[map_idx + 1].va && 1237 comb_sz == sz && 1238 maps[map_idx].flags == 1239 maps[map_idx + 1].flags) { 1240 /* Skip this and next entry */ 1241 map_idx += 2; 1242 continue; 1243 } 1244 } 1245 assert(maps[map_idx].sz == sz); 1246 } else if (maps[map_idx].va < va) { 1247 if (maps[map_idx].va == mpool_base) 1248 f |= DUMP_MAP_LDELF; 1249 print_seg(pctx, print_func, idx, -1, 1250 maps[map_idx].va, maps[map_idx].pa, 1251 maps[map_idx].sz, 1252 maps[map_idx].flags | f); 1253 idx++; 1254 } 1255 map_idx++; 1256 } 1257 1258 if (!seg) 1259 break; 1260 1261 offs = rounddown(seg->offset); 1262 if (seg->flags & PF_R) 1263 flags |= DUMP_MAP_READ; 1264 if (seg->flags & PF_W) 1265 flags |= DUMP_MAP_WRITE; 1266 if (seg->flags & PF_X) 1267 flags |= DUMP_MAP_EXEC; 1268 1269 print_seg(pctx, print_func, idx, elf_idx, va, offs, sz, flags); 1270 idx++; 1271 1272 if (!get_next_in_order(elf_queue, &elf, &seg, &elf_idx)) 1273 seg = NULL; 1274 } 1275 1276 elf_idx = 0; 1277 TAILQ_FOREACH(elf, elf_queue, link) { 1278 print_wrapper(pctx, print_func, 1279 " [%zu] %pUl @ 0x%0*"PRIxVA"\n", 1280 elf_idx, (void *)&elf->uuid, 8, elf->load_addr); 1281 elf_idx++; 1282 } 1283 } 1284 1285 #ifdef CFG_UNWIND 1286 void ta_elf_stack_trace_a32(uint32_t regs[16]) 1287 { 1288 struct unwind_state_arm32 state = { }; 1289 1290 memcpy(state.registers, regs, sizeof(state.registers)); 1291 print_stack_arm32(&state, ta_stack, ta_stack_size); 1292 } 1293 1294 void ta_elf_stack_trace_a64(uint64_t fp, uint64_t sp, uint64_t pc) 1295 { 1296 struct unwind_state_arm64 state = { .fp = fp, .sp = sp, .pc = pc }; 1297 1298 print_stack_arm64(&state, ta_stack, ta_stack_size); 1299 } 1300 #endif 1301 1302 TEE_Result ta_elf_add_library(const TEE_UUID *uuid) 1303 { 1304 struct ta_elf *ta = TAILQ_FIRST(&main_elf_queue); 1305 struct ta_elf *lib = ta_elf_find_elf(uuid); 1306 struct ta_elf *elf = NULL; 1307 1308 if (lib) 1309 return TEE_SUCCESS; /* Already mapped */ 1310 1311 lib = queue_elf_helper(uuid); 1312 if (!lib) 1313 return TEE_ERROR_OUT_OF_MEMORY; 1314 1315 for (elf = lib; elf; elf = TAILQ_NEXT(elf, link)) 1316 ta_elf_load_dependency(elf, ta->is_32bit); 1317 1318 for (elf = lib; elf; elf = TAILQ_NEXT(elf, link)) { 1319 ta_elf_relocate(elf); 1320 ta_elf_finalize_mappings(elf); 1321 } 1322 1323 for (elf = lib; elf; elf = TAILQ_NEXT(elf, link)) 1324 DMSG("ELF (%pUl) at %#"PRIxVA, 1325 (void *)&elf->uuid, elf->load_addr); 1326 1327 return ta_elf_set_init_fini_info(ta->is_32bit); 1328 } 1329 1330 /* Get address/size of .init_array and .fini_array from the dynamic segment */ 1331 static void get_init_fini_array(struct ta_elf *elf, unsigned int type, 1332 vaddr_t addr, size_t memsz, vaddr_t *init, 1333 size_t *init_cnt, vaddr_t *fini, 1334 size_t *fini_cnt) 1335 { 1336 size_t addrsz = 0; 1337 size_t dyn_entsize = 0; 1338 size_t num_dyns = 0; 1339 size_t n = 0; 1340 unsigned int tag = 0; 1341 size_t val = 0; 1342 1343 assert(type == PT_DYNAMIC); 1344 1345 check_phdr_in_range(elf, type, addr, memsz); 1346 1347 if (elf->is_32bit) { 1348 dyn_entsize = sizeof(Elf32_Dyn); 1349 addrsz = 4; 1350 } else { 1351 dyn_entsize = sizeof(Elf64_Dyn); 1352 addrsz = 8; 1353 } 1354 1355 assert(!(memsz % dyn_entsize)); 1356 num_dyns = memsz / dyn_entsize; 1357 1358 for (n = 0; n < num_dyns; n++) { 1359 read_dyn(elf, addr, n, &tag, &val); 1360 if (tag == DT_INIT_ARRAY) 1361 *init = val + elf->load_addr; 1362 else if (tag == DT_FINI_ARRAY) 1363 *fini = val + elf->load_addr; 1364 else if (tag == DT_INIT_ARRAYSZ) 1365 *init_cnt = val / addrsz; 1366 else if (tag == DT_FINI_ARRAYSZ) 1367 *fini_cnt = val / addrsz; 1368 } 1369 } 1370 1371 /* Get address/size of .init_array and .fini_array in @elf (if present) */ 1372 static void elf_get_init_fini_array(struct ta_elf *elf, vaddr_t *init, 1373 size_t *init_cnt, vaddr_t *fini, 1374 size_t *fini_cnt) 1375 { 1376 size_t n = 0; 1377 1378 if (elf->is_32bit) { 1379 Elf32_Phdr *phdr = elf->phdr; 1380 1381 for (n = 0; n < elf->e_phnum; n++) { 1382 if (phdr[n].p_type == PT_DYNAMIC) { 1383 get_init_fini_array(elf, phdr[n].p_type, 1384 phdr[n].p_vaddr, 1385 phdr[n].p_memsz, 1386 init, init_cnt, fini, 1387 fini_cnt); 1388 return; 1389 } 1390 } 1391 } else { 1392 Elf64_Phdr *phdr = elf->phdr; 1393 1394 for (n = 0; n < elf->e_phnum; n++) { 1395 if (phdr[n].p_type == PT_DYNAMIC) { 1396 get_init_fini_array(elf, phdr[n].p_type, 1397 phdr[n].p_vaddr, 1398 phdr[n].p_memsz, 1399 init, init_cnt, fini, 1400 fini_cnt); 1401 return; 1402 } 1403 } 1404 } 1405 } 1406 1407 static TEE_Result realloc_ifs(vaddr_t va, size_t cnt, bool is_32bit) 1408 { 1409 struct __init_fini_info32 *info32 = (struct __init_fini_info32 *)va; 1410 struct __init_fini_info *info = (struct __init_fini_info *)va; 1411 struct __init_fini32 *ifs32 = NULL; 1412 struct __init_fini *ifs = NULL; 1413 size_t prev_cnt = 0; 1414 void *ptr = NULL; 1415 1416 if (is_32bit) { 1417 ptr = (void *)(vaddr_t)info32->ifs; 1418 ptr = realloc(ptr, cnt * sizeof(struct __init_fini32)); 1419 if (!ptr) 1420 return TEE_ERROR_OUT_OF_MEMORY; 1421 ifs32 = ptr; 1422 prev_cnt = info32->size; 1423 if (cnt > prev_cnt) 1424 memset(ifs32 + prev_cnt, 0, 1425 (cnt - prev_cnt) * sizeof(*ifs32)); 1426 info32->ifs = (uint32_t)(vaddr_t)ifs32; 1427 info32->size = cnt; 1428 } else { 1429 ptr = realloc(info->ifs, cnt * sizeof(struct __init_fini)); 1430 if (!ptr) 1431 return TEE_ERROR_OUT_OF_MEMORY; 1432 ifs = ptr; 1433 prev_cnt = info->size; 1434 if (cnt > prev_cnt) 1435 memset(ifs + prev_cnt, 0, 1436 (cnt - prev_cnt) * sizeof(*ifs)); 1437 info->ifs = ifs; 1438 info->size = cnt; 1439 } 1440 1441 return TEE_SUCCESS; 1442 } 1443 1444 static void fill_ifs(vaddr_t va, size_t idx, struct ta_elf *elf, bool is_32bit) 1445 { 1446 struct __init_fini_info32 *info32 = (struct __init_fini_info32 *)va; 1447 struct __init_fini_info *info = (struct __init_fini_info *)va; 1448 struct __init_fini32 *ifs32 = NULL; 1449 struct __init_fini *ifs = NULL; 1450 size_t init_cnt = 0; 1451 size_t fini_cnt = 0; 1452 vaddr_t init = 0; 1453 vaddr_t fini = 0; 1454 1455 if (is_32bit) { 1456 assert(idx < info32->size); 1457 ifs32 = &((struct __init_fini32 *)(vaddr_t)info32->ifs)[idx]; 1458 1459 if (ifs32->flags & __IFS_VALID) 1460 return; 1461 1462 elf_get_init_fini_array(elf, &init, &init_cnt, &fini, 1463 &fini_cnt); 1464 1465 ifs32->init = (uint32_t)init; 1466 ifs32->init_size = init_cnt; 1467 1468 ifs32->fini = (uint32_t)fini; 1469 ifs32->fini_size = fini_cnt; 1470 1471 ifs32->flags |= __IFS_VALID; 1472 } else { 1473 assert(idx < info->size); 1474 ifs = &info->ifs[idx]; 1475 1476 if (ifs->flags & __IFS_VALID) 1477 return; 1478 1479 elf_get_init_fini_array(elf, &init, &init_cnt, &fini, 1480 &fini_cnt); 1481 1482 ifs->init = (void (**)(void))init; 1483 ifs->init_size = init_cnt; 1484 1485 ifs->fini = (void (**)(void))fini; 1486 ifs->fini_size = fini_cnt; 1487 1488 ifs->flags |= __IFS_VALID; 1489 } 1490 } 1491 1492 /* 1493 * Set or update __init_fini_info in the TA with information from the ELF 1494 * queue 1495 */ 1496 TEE_Result ta_elf_set_init_fini_info(bool is_32bit) 1497 { 1498 struct __init_fini_info *info = NULL; 1499 TEE_Result res = TEE_SUCCESS; 1500 struct ta_elf *elf = NULL; 1501 vaddr_t info_va = 0; 1502 size_t cnt = 0; 1503 1504 res = ta_elf_resolve_sym("__init_fini_info", &info_va, NULL, NULL); 1505 if (res) { 1506 if (res == TEE_ERROR_ITEM_NOT_FOUND) { 1507 /* Older TA */ 1508 return TEE_SUCCESS; 1509 } 1510 return res; 1511 } 1512 assert(info_va); 1513 1514 info = (struct __init_fini_info *)info_va; 1515 if (info->reserved) 1516 return TEE_ERROR_NOT_SUPPORTED; 1517 1518 TAILQ_FOREACH(elf, &main_elf_queue, link) 1519 cnt++; 1520 1521 /* Queue has at least one file (main) */ 1522 assert(cnt); 1523 1524 res = realloc_ifs(info_va, cnt, is_32bit); 1525 if (res) 1526 goto err; 1527 1528 cnt = 0; 1529 TAILQ_FOREACH(elf, &main_elf_queue, link) { 1530 fill_ifs(info_va, cnt, elf, is_32bit); 1531 cnt++; 1532 } 1533 1534 return TEE_SUCCESS; 1535 err: 1536 free(info); 1537 return res; 1538 } 1539