1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2019, Linaro Limited 4 * Copyright (c) 2020, Arm Limited 5 */ 6 7 #include <assert.h> 8 #include <config.h> 9 #include <confine_array_index.h> 10 #include <ctype.h> 11 #include <elf32.h> 12 #include <elf64.h> 13 #include <elf_common.h> 14 #include <ldelf.h> 15 #include <link.h> 16 #include <stdio.h> 17 #include <stdlib.h> 18 #include <string_ext.h> 19 #include <string.h> 20 #include <tee_api_types.h> 21 #include <tee_internal_api_extensions.h> 22 #include <unw/unwind.h> 23 #include <user_ta_header.h> 24 #include <util.h> 25 26 #include "sys.h" 27 #include "ta_elf.h" 28 29 /* 30 * Layout of a 32-bit struct dl_phdr_info for a 64-bit ldelf to access a 32-bit 31 * TA 32 */ 33 struct dl_phdr_info32 { 34 uint32_t dlpi_addr; 35 uint32_t dlpi_name; 36 uint32_t dlpi_phdr; 37 uint16_t dlpi_phnum; 38 uint64_t dlpi_adds; 39 uint64_t dlpi_subs; 40 uint32_t dlpi_tls_modid; 41 uint32_t dlpi_tls_data; 42 }; 43 44 static vaddr_t ta_stack; 45 static vaddr_t ta_stack_size; 46 47 struct ta_elf_queue main_elf_queue = TAILQ_HEAD_INITIALIZER(main_elf_queue); 48 49 /* 50 * Main application is always ID 1, shared libraries with TLS take IDs 2 and 51 * above 52 */ 53 static void assign_tls_mod_id(struct ta_elf *elf) 54 { 55 static size_t last_tls_mod_id = 1; 56 57 if (elf->is_main) 58 assert(last_tls_mod_id == 1); /* Main always comes first */ 59 elf->tls_mod_id = last_tls_mod_id++; 60 } 61 62 static struct ta_elf *queue_elf_helper(const TEE_UUID *uuid) 63 { 64 struct ta_elf *elf = calloc(1, sizeof(*elf)); 65 66 if (!elf) 67 return NULL; 68 69 TAILQ_INIT(&elf->segs); 70 71 elf->uuid = *uuid; 72 TAILQ_INSERT_TAIL(&main_elf_queue, elf, link); 73 return elf; 74 } 75 76 static struct ta_elf *queue_elf(const TEE_UUID *uuid) 77 { 78 struct ta_elf *elf = ta_elf_find_elf(uuid); 79 80 if (elf) 81 return NULL; 82 83 elf = queue_elf_helper(uuid); 84 if (!elf) 85 err(TEE_ERROR_OUT_OF_MEMORY, "queue_elf_helper"); 86 87 return elf; 88 } 89 90 struct ta_elf *ta_elf_find_elf(const TEE_UUID *uuid) 91 { 92 struct ta_elf *elf = NULL; 93 94 TAILQ_FOREACH(elf, &main_elf_queue, link) 95 if (!memcmp(uuid, &elf->uuid, sizeof(*uuid))) 96 return elf; 97 98 return NULL; 99 } 100 101 static TEE_Result e32_parse_ehdr(struct ta_elf *elf, Elf32_Ehdr *ehdr) 102 { 103 if (ehdr->e_ident[EI_VERSION] != EV_CURRENT || 104 ehdr->e_ident[EI_CLASS] != ELFCLASS32 || 105 ehdr->e_ident[EI_DATA] != ELFDATA2LSB || 106 ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE || 107 ehdr->e_type != ET_DYN || ehdr->e_machine != EM_ARM || 108 (ehdr->e_flags & EF_ARM_ABIMASK) != EF_ARM_ABI_VERSION || 109 #ifndef CFG_WITH_VFP 110 (ehdr->e_flags & EF_ARM_ABI_FLOAT_HARD) || 111 #endif 112 ehdr->e_phentsize != sizeof(Elf32_Phdr) || 113 ehdr->e_shentsize != sizeof(Elf32_Shdr)) 114 return TEE_ERROR_BAD_FORMAT; 115 116 elf->is_32bit = true; 117 elf->e_entry = ehdr->e_entry; 118 elf->e_phoff = ehdr->e_phoff; 119 elf->e_shoff = ehdr->e_shoff; 120 elf->e_phnum = ehdr->e_phnum; 121 elf->e_shnum = ehdr->e_shnum; 122 elf->e_phentsize = ehdr->e_phentsize; 123 elf->e_shentsize = ehdr->e_shentsize; 124 125 return TEE_SUCCESS; 126 } 127 128 #ifdef ARM64 129 static TEE_Result e64_parse_ehdr(struct ta_elf *elf, Elf64_Ehdr *ehdr) 130 { 131 if (ehdr->e_ident[EI_VERSION] != EV_CURRENT || 132 ehdr->e_ident[EI_CLASS] != ELFCLASS64 || 133 ehdr->e_ident[EI_DATA] != ELFDATA2LSB || 134 ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE || 135 ehdr->e_type != ET_DYN || ehdr->e_machine != EM_AARCH64 || 136 ehdr->e_flags || ehdr->e_phentsize != sizeof(Elf64_Phdr) || 137 ehdr->e_shentsize != sizeof(Elf64_Shdr)) 138 return TEE_ERROR_BAD_FORMAT; 139 140 141 elf->is_32bit = false; 142 elf->e_entry = ehdr->e_entry; 143 elf->e_phoff = ehdr->e_phoff; 144 elf->e_shoff = ehdr->e_shoff; 145 elf->e_phnum = ehdr->e_phnum; 146 elf->e_shnum = ehdr->e_shnum; 147 elf->e_phentsize = ehdr->e_phentsize; 148 elf->e_shentsize = ehdr->e_shentsize; 149 150 return TEE_SUCCESS; 151 } 152 #else /*ARM64*/ 153 static TEE_Result e64_parse_ehdr(struct ta_elf *elf __unused, 154 Elf64_Ehdr *ehdr __unused) 155 { 156 return TEE_ERROR_NOT_SUPPORTED; 157 } 158 #endif /*ARM64*/ 159 160 static void check_phdr_in_range(struct ta_elf *elf, unsigned int type, 161 vaddr_t addr, size_t memsz) 162 { 163 vaddr_t max_addr = 0; 164 165 if (ADD_OVERFLOW(addr, memsz, &max_addr)) 166 err(TEE_ERROR_BAD_FORMAT, "Program header %#x overflow", type); 167 168 /* 169 * elf->load_addr and elf->max_addr are both using the 170 * final virtual addresses, while this program header is 171 * relative to 0. 172 */ 173 if (max_addr > elf->max_addr - elf->load_addr) 174 err(TEE_ERROR_BAD_FORMAT, "Program header %#x out of bounds", 175 type); 176 } 177 178 static void read_dyn(struct ta_elf *elf, vaddr_t addr, 179 size_t idx, unsigned int *tag, size_t *val) 180 { 181 if (elf->is_32bit) { 182 Elf32_Dyn *dyn = (Elf32_Dyn *)(addr + elf->load_addr); 183 184 *tag = dyn[idx].d_tag; 185 *val = dyn[idx].d_un.d_val; 186 } else { 187 Elf64_Dyn *dyn = (Elf64_Dyn *)(addr + elf->load_addr); 188 189 *tag = dyn[idx].d_tag; 190 *val = dyn[idx].d_un.d_val; 191 } 192 } 193 194 static void check_range(struct ta_elf *elf, const char *name, const void *ptr, 195 size_t sz) 196 { 197 size_t max_addr = 0; 198 199 if ((vaddr_t)ptr < elf->load_addr) 200 err(TEE_ERROR_BAD_FORMAT, "%s %p out of range", name, ptr); 201 202 if (ADD_OVERFLOW((vaddr_t)ptr, sz, &max_addr)) 203 err(TEE_ERROR_BAD_FORMAT, "%s range overflow", name); 204 205 if (max_addr > elf->max_addr) 206 err(TEE_ERROR_BAD_FORMAT, 207 "%s %p..%#zx out of range", name, ptr, max_addr); 208 } 209 210 static void check_hashtab(struct ta_elf *elf, void *ptr, size_t num_buckets, 211 size_t num_chains) 212 { 213 /* 214 * Starting from 2 as the first two words are mandatory and hold 215 * num_buckets and num_chains. So this function is called twice, 216 * first to see that there's indeed room for num_buckets and 217 * num_chains and then to see that all of it fits. 218 * See http://www.sco.com/developers/gabi/latest/ch5.dynamic.html#hash 219 */ 220 size_t num_words = 2; 221 size_t sz = 0; 222 223 if (!IS_ALIGNED_WITH_TYPE(ptr, uint32_t)) 224 err(TEE_ERROR_BAD_FORMAT, "Bad alignment of DT_HASH %p", ptr); 225 226 if (ADD_OVERFLOW(num_words, num_buckets, &num_words) || 227 ADD_OVERFLOW(num_words, num_chains, &num_words) || 228 MUL_OVERFLOW(num_words, sizeof(uint32_t), &sz)) 229 err(TEE_ERROR_BAD_FORMAT, "DT_HASH overflow"); 230 231 check_range(elf, "DT_HASH", ptr, sz); 232 } 233 234 static void save_hashtab(struct ta_elf *elf) 235 { 236 uint32_t *hashtab = NULL; 237 size_t n = 0; 238 239 if (elf->is_32bit) { 240 Elf32_Shdr *shdr = elf->shdr; 241 242 for (n = 0; n < elf->e_shnum; n++) { 243 if (shdr[n].sh_type == SHT_HASH) { 244 elf->hashtab = (void *)(shdr[n].sh_addr + 245 elf->load_addr); 246 break; 247 } 248 } 249 } else { 250 Elf64_Shdr *shdr = elf->shdr; 251 252 for (n = 0; n < elf->e_shnum; n++) { 253 if (shdr[n].sh_type == SHT_HASH) { 254 elf->hashtab = (void *)(shdr[n].sh_addr + 255 elf->load_addr); 256 break; 257 } 258 } 259 } 260 261 check_hashtab(elf, elf->hashtab, 0, 0); 262 hashtab = elf->hashtab; 263 check_hashtab(elf, elf->hashtab, hashtab[0], hashtab[1]); 264 } 265 266 static void save_soname_from_segment(struct ta_elf *elf, unsigned int type, 267 vaddr_t addr, size_t memsz) 268 { 269 size_t dyn_entsize = 0; 270 size_t num_dyns = 0; 271 size_t n = 0; 272 unsigned int tag = 0; 273 size_t val = 0; 274 char *str_tab = NULL; 275 276 if (type != PT_DYNAMIC) 277 return; 278 279 if (elf->is_32bit) 280 dyn_entsize = sizeof(Elf32_Dyn); 281 else 282 dyn_entsize = sizeof(Elf64_Dyn); 283 284 assert(!(memsz % dyn_entsize)); 285 num_dyns = memsz / dyn_entsize; 286 287 for (n = 0; n < num_dyns; n++) { 288 read_dyn(elf, addr, n, &tag, &val); 289 if (tag == DT_STRTAB) { 290 str_tab = (char *)(val + elf->load_addr); 291 break; 292 } 293 } 294 for (n = 0; n < num_dyns; n++) { 295 read_dyn(elf, addr, n, &tag, &val); 296 if (tag == DT_SONAME) { 297 elf->soname = str_tab + val; 298 break; 299 } 300 } 301 } 302 303 static void save_soname(struct ta_elf *elf) 304 { 305 size_t n = 0; 306 307 if (elf->is_32bit) { 308 Elf32_Phdr *phdr = elf->phdr; 309 310 for (n = 0; n < elf->e_phnum; n++) 311 save_soname_from_segment(elf, phdr[n].p_type, 312 phdr[n].p_vaddr, 313 phdr[n].p_memsz); 314 } else { 315 Elf64_Phdr *phdr = elf->phdr; 316 317 for (n = 0; n < elf->e_phnum; n++) 318 save_soname_from_segment(elf, phdr[n].p_type, 319 phdr[n].p_vaddr, 320 phdr[n].p_memsz); 321 } 322 } 323 324 static void e32_save_symtab(struct ta_elf *elf, size_t tab_idx) 325 { 326 Elf32_Shdr *shdr = elf->shdr; 327 size_t str_idx = shdr[tab_idx].sh_link; 328 329 elf->dynsymtab = (void *)(shdr[tab_idx].sh_addr + elf->load_addr); 330 if (!IS_ALIGNED_WITH_TYPE(elf->dynsymtab, Elf32_Sym)) 331 err(TEE_ERROR_BAD_FORMAT, "Bad alignment of dynsymtab %p", 332 elf->dynsymtab); 333 check_range(elf, "Dynsymtab", elf->dynsymtab, shdr[tab_idx].sh_size); 334 335 if (shdr[tab_idx].sh_size % sizeof(Elf32_Sym)) 336 err(TEE_ERROR_BAD_FORMAT, 337 "Size of dynsymtab not an even multiple of Elf32_Sym"); 338 elf->num_dynsyms = shdr[tab_idx].sh_size / sizeof(Elf32_Sym); 339 340 if (str_idx >= elf->e_shnum) 341 err(TEE_ERROR_BAD_FORMAT, "Dynstr section index out of range"); 342 elf->dynstr = (void *)(shdr[str_idx].sh_addr + elf->load_addr); 343 check_range(elf, "Dynstr", elf->dynstr, shdr[str_idx].sh_size); 344 345 elf->dynstr_size = shdr[str_idx].sh_size; 346 } 347 348 static void e64_save_symtab(struct ta_elf *elf, size_t tab_idx) 349 { 350 Elf64_Shdr *shdr = elf->shdr; 351 size_t str_idx = shdr[tab_idx].sh_link; 352 353 elf->dynsymtab = (void *)(vaddr_t)(shdr[tab_idx].sh_addr + 354 elf->load_addr); 355 356 if (!IS_ALIGNED_WITH_TYPE(elf->dynsymtab, Elf64_Sym)) 357 err(TEE_ERROR_BAD_FORMAT, "Bad alignment of .dynsym/DYNSYM %p", 358 elf->dynsymtab); 359 check_range(elf, ".dynsym/DYNSYM", elf->dynsymtab, 360 shdr[tab_idx].sh_size); 361 362 if (shdr[tab_idx].sh_size % sizeof(Elf64_Sym)) 363 err(TEE_ERROR_BAD_FORMAT, 364 "Size of .dynsym/DYNSYM not an even multiple of Elf64_Sym"); 365 elf->num_dynsyms = shdr[tab_idx].sh_size / sizeof(Elf64_Sym); 366 367 if (str_idx >= elf->e_shnum) 368 err(TEE_ERROR_BAD_FORMAT, 369 ".dynstr/STRTAB section index out of range"); 370 elf->dynstr = (void *)(vaddr_t)(shdr[str_idx].sh_addr + elf->load_addr); 371 check_range(elf, ".dynstr/STRTAB", elf->dynstr, shdr[str_idx].sh_size); 372 373 elf->dynstr_size = shdr[str_idx].sh_size; 374 } 375 376 static void save_symtab(struct ta_elf *elf) 377 { 378 size_t n = 0; 379 380 if (elf->is_32bit) { 381 Elf32_Shdr *shdr = elf->shdr; 382 383 for (n = 0; n < elf->e_shnum; n++) { 384 if (shdr[n].sh_type == SHT_DYNSYM) { 385 e32_save_symtab(elf, n); 386 break; 387 } 388 } 389 } else { 390 Elf64_Shdr *shdr = elf->shdr; 391 392 for (n = 0; n < elf->e_shnum; n++) { 393 if (shdr[n].sh_type == SHT_DYNSYM) { 394 e64_save_symtab(elf, n); 395 break; 396 } 397 } 398 399 } 400 401 save_hashtab(elf); 402 save_soname(elf); 403 } 404 405 static void init_elf(struct ta_elf *elf) 406 { 407 TEE_Result res = TEE_SUCCESS; 408 vaddr_t va = 0; 409 uint32_t flags = LDELF_MAP_FLAG_SHAREABLE; 410 size_t sz = 0; 411 412 res = sys_open_ta_bin(&elf->uuid, &elf->handle); 413 if (res) 414 err(res, "sys_open_ta_bin(%pUl)", (void *)&elf->uuid); 415 416 /* 417 * Map it read-only executable when we're loading a library where 418 * the ELF header is included in a load segment. 419 */ 420 if (!elf->is_main) 421 flags |= LDELF_MAP_FLAG_EXECUTABLE; 422 res = sys_map_ta_bin(&va, SMALL_PAGE_SIZE, flags, elf->handle, 0, 0, 0); 423 if (res) 424 err(res, "sys_map_ta_bin"); 425 elf->ehdr_addr = va; 426 if (!elf->is_main) { 427 elf->load_addr = va; 428 elf->max_addr = va + SMALL_PAGE_SIZE; 429 elf->max_offs = SMALL_PAGE_SIZE; 430 } 431 432 if (!IS_ELF(*(Elf32_Ehdr *)va)) 433 err(TEE_ERROR_BAD_FORMAT, "TA is not an ELF"); 434 435 res = e32_parse_ehdr(elf, (void *)va); 436 if (res == TEE_ERROR_BAD_FORMAT) 437 res = e64_parse_ehdr(elf, (void *)va); 438 if (res) 439 err(res, "Cannot parse ELF"); 440 441 if (MUL_OVERFLOW(elf->e_phnum, elf->e_phentsize, &sz) || 442 ADD_OVERFLOW(sz, elf->e_phoff, &sz)) 443 err(TEE_ERROR_BAD_FORMAT, "Program headers size overflow"); 444 445 if (sz > SMALL_PAGE_SIZE) 446 err(TEE_ERROR_NOT_SUPPORTED, "Cannot read program headers"); 447 448 elf->phdr = (void *)(va + elf->e_phoff); 449 } 450 451 static size_t roundup(size_t v) 452 { 453 return ROUNDUP(v, SMALL_PAGE_SIZE); 454 } 455 456 static size_t rounddown(size_t v) 457 { 458 return ROUNDDOWN(v, SMALL_PAGE_SIZE); 459 } 460 461 static void add_segment(struct ta_elf *elf, size_t offset, size_t vaddr, 462 size_t filesz, size_t memsz, size_t flags, size_t align) 463 { 464 struct segment *seg = calloc(1, sizeof(*seg)); 465 466 if (!seg) 467 err(TEE_ERROR_OUT_OF_MEMORY, "calloc"); 468 469 if (memsz < filesz) 470 err(TEE_ERROR_BAD_FORMAT, "Memsz smaller than filesz"); 471 472 seg->offset = offset; 473 seg->vaddr = vaddr; 474 seg->filesz = filesz; 475 seg->memsz = memsz; 476 seg->flags = flags; 477 seg->align = align; 478 479 TAILQ_INSERT_TAIL(&elf->segs, seg, link); 480 } 481 482 static void parse_load_segments(struct ta_elf *elf) 483 { 484 size_t n = 0; 485 486 if (elf->is_32bit) { 487 Elf32_Phdr *phdr = elf->phdr; 488 489 for (n = 0; n < elf->e_phnum; n++) 490 if (phdr[n].p_type == PT_LOAD) { 491 add_segment(elf, phdr[n].p_offset, 492 phdr[n].p_vaddr, phdr[n].p_filesz, 493 phdr[n].p_memsz, phdr[n].p_flags, 494 phdr[n].p_align); 495 } else if (phdr[n].p_type == PT_ARM_EXIDX) { 496 elf->exidx_start = phdr[n].p_vaddr; 497 elf->exidx_size = phdr[n].p_filesz; 498 } else if (phdr[n].p_type == PT_TLS) { 499 assign_tls_mod_id(elf); 500 } 501 } else { 502 Elf64_Phdr *phdr = elf->phdr; 503 504 for (n = 0; n < elf->e_phnum; n++) 505 if (phdr[n].p_type == PT_LOAD) { 506 add_segment(elf, phdr[n].p_offset, 507 phdr[n].p_vaddr, phdr[n].p_filesz, 508 phdr[n].p_memsz, phdr[n].p_flags, 509 phdr[n].p_align); 510 } else if (phdr[n].p_type == PT_TLS) { 511 elf->tls_start = phdr[n].p_vaddr; 512 elf->tls_filesz = phdr[n].p_filesz; 513 elf->tls_memsz = phdr[n].p_memsz; 514 } else if (IS_ENABLED(CFG_TA_BTI) && 515 phdr[n].p_type == PT_GNU_PROPERTY) { 516 elf->prop_start = phdr[n].p_vaddr; 517 elf->prop_align = phdr[n].p_align; 518 elf->prop_memsz = phdr[n].p_memsz; 519 } 520 } 521 } 522 523 static void copy_remapped_to(struct ta_elf *elf, const struct segment *seg) 524 { 525 uint8_t *dst = (void *)(seg->vaddr + elf->load_addr); 526 size_t n = 0; 527 size_t offs = seg->offset; 528 size_t num_bytes = seg->filesz; 529 530 if (offs < elf->max_offs) { 531 n = MIN(elf->max_offs - offs, num_bytes); 532 memcpy(dst, (void *)(elf->max_addr + offs - elf->max_offs), n); 533 dst += n; 534 offs += n; 535 num_bytes -= n; 536 } 537 538 if (num_bytes) { 539 TEE_Result res = sys_copy_from_ta_bin(dst, num_bytes, 540 elf->handle, offs); 541 542 if (res) 543 err(res, "sys_copy_from_ta_bin"); 544 elf->max_offs += offs; 545 } 546 } 547 548 static void adjust_segments(struct ta_elf *elf) 549 { 550 struct segment *seg = NULL; 551 struct segment *prev_seg = NULL; 552 size_t prev_end_addr = 0; 553 size_t align = 0; 554 size_t mask = 0; 555 556 /* Sanity check */ 557 TAILQ_FOREACH(seg, &elf->segs, link) { 558 size_t dummy __maybe_unused = 0; 559 560 assert(seg->align >= SMALL_PAGE_SIZE); 561 assert(!ADD_OVERFLOW(seg->vaddr, seg->memsz, &dummy)); 562 assert(seg->filesz <= seg->memsz); 563 assert((seg->offset & SMALL_PAGE_MASK) == 564 (seg->vaddr & SMALL_PAGE_MASK)); 565 566 prev_seg = TAILQ_PREV(seg, segment_head, link); 567 if (prev_seg) { 568 assert(seg->vaddr >= prev_seg->vaddr + prev_seg->memsz); 569 assert(seg->offset >= 570 prev_seg->offset + prev_seg->filesz); 571 } 572 if (!align) 573 align = seg->align; 574 assert(align == seg->align); 575 } 576 577 mask = align - 1; 578 579 seg = TAILQ_FIRST(&elf->segs); 580 if (seg) 581 seg = TAILQ_NEXT(seg, link); 582 while (seg) { 583 prev_seg = TAILQ_PREV(seg, segment_head, link); 584 prev_end_addr = prev_seg->vaddr + prev_seg->memsz; 585 586 /* 587 * This segment may overlap with the last "page" in the 588 * previous segment in two different ways: 589 * 1. Virtual address (and offset) overlaps => 590 * Permissions needs to be merged. The offset must have 591 * the SMALL_PAGE_MASK bits set as vaddr and offset must 592 * add up with prevsion segment. 593 * 594 * 2. Only offset overlaps => 595 * The same page in the ELF is mapped at two different 596 * virtual addresses. As a limitation this segment must 597 * be mapped as writeable. 598 */ 599 600 /* Case 1. */ 601 if (rounddown(seg->vaddr) < prev_end_addr) { 602 assert((seg->vaddr & mask) == (seg->offset & mask)); 603 assert(prev_seg->memsz == prev_seg->filesz); 604 605 /* 606 * Merge the segments and their permissions. 607 * Note that the may be a small hole between the 608 * two sections. 609 */ 610 prev_seg->filesz = seg->vaddr + seg->filesz - 611 prev_seg->vaddr; 612 prev_seg->memsz = seg->vaddr + seg->memsz - 613 prev_seg->vaddr; 614 prev_seg->flags |= seg->flags; 615 616 TAILQ_REMOVE(&elf->segs, seg, link); 617 free(seg); 618 seg = TAILQ_NEXT(prev_seg, link); 619 continue; 620 } 621 622 /* Case 2. */ 623 if ((seg->offset & mask) && 624 rounddown(seg->offset) < 625 (prev_seg->offset + prev_seg->filesz)) { 626 627 assert(seg->flags & PF_W); 628 seg->remapped_writeable = true; 629 } 630 631 /* 632 * No overlap, but we may need to align address, offset and 633 * size. 634 */ 635 seg->filesz += seg->vaddr - rounddown(seg->vaddr); 636 seg->memsz += seg->vaddr - rounddown(seg->vaddr); 637 seg->vaddr = rounddown(seg->vaddr); 638 seg->offset = rounddown(seg->offset); 639 seg = TAILQ_NEXT(seg, link); 640 } 641 642 } 643 644 static void populate_segments_legacy(struct ta_elf *elf) 645 { 646 TEE_Result res = TEE_SUCCESS; 647 struct segment *seg = NULL; 648 vaddr_t va = 0; 649 650 assert(elf->is_legacy); 651 TAILQ_FOREACH(seg, &elf->segs, link) { 652 struct segment *last_seg = TAILQ_LAST(&elf->segs, segment_head); 653 size_t pad_end = roundup(last_seg->vaddr + last_seg->memsz - 654 seg->vaddr - seg->memsz); 655 size_t num_bytes = roundup(seg->memsz); 656 657 if (!elf->load_addr) 658 va = 0; 659 else 660 va = seg->vaddr + elf->load_addr; 661 662 663 if (!(seg->flags & PF_R)) 664 err(TEE_ERROR_NOT_SUPPORTED, 665 "Segment must be readable"); 666 667 res = sys_map_zi(num_bytes, 0, &va, 0, pad_end); 668 if (res) 669 err(res, "sys_map_zi"); 670 res = sys_copy_from_ta_bin((void *)va, seg->filesz, 671 elf->handle, seg->offset); 672 if (res) 673 err(res, "sys_copy_from_ta_bin"); 674 675 if (!elf->load_addr) 676 elf->load_addr = va; 677 elf->max_addr = va + num_bytes; 678 elf->max_offs = seg->offset + seg->filesz; 679 } 680 } 681 682 static size_t get_pad_begin(void) 683 { 684 #ifdef CFG_TA_ASLR 685 size_t min = CFG_TA_ASLR_MIN_OFFSET_PAGES; 686 size_t max = CFG_TA_ASLR_MAX_OFFSET_PAGES; 687 TEE_Result res = TEE_SUCCESS; 688 uint32_t rnd32 = 0; 689 size_t rnd = 0; 690 691 COMPILE_TIME_ASSERT(CFG_TA_ASLR_MIN_OFFSET_PAGES < 692 CFG_TA_ASLR_MAX_OFFSET_PAGES); 693 if (max > min) { 694 res = sys_gen_random_num(&rnd32, sizeof(rnd32)); 695 if (res) { 696 DMSG("Random read failed: %#"PRIx32, res); 697 return min * SMALL_PAGE_SIZE; 698 } 699 rnd = rnd32 % (max - min); 700 } 701 702 return (min + rnd) * SMALL_PAGE_SIZE; 703 #else /*!CFG_TA_ASLR*/ 704 return 0; 705 #endif /*!CFG_TA_ASLR*/ 706 } 707 708 static void populate_segments(struct ta_elf *elf) 709 { 710 TEE_Result res = TEE_SUCCESS; 711 struct segment *seg = NULL; 712 vaddr_t va = 0; 713 size_t pad_begin = 0; 714 715 assert(!elf->is_legacy); 716 TAILQ_FOREACH(seg, &elf->segs, link) { 717 struct segment *last_seg = TAILQ_LAST(&elf->segs, segment_head); 718 size_t pad_end = roundup(last_seg->vaddr + last_seg->memsz - 719 seg->vaddr - seg->memsz); 720 721 if (seg->remapped_writeable) { 722 size_t num_bytes = roundup(seg->vaddr + seg->memsz) - 723 rounddown(seg->vaddr); 724 725 assert(elf->load_addr); 726 va = rounddown(elf->load_addr + seg->vaddr); 727 assert(va >= elf->max_addr); 728 res = sys_map_zi(num_bytes, 0, &va, 0, pad_end); 729 if (res) 730 err(res, "sys_map_zi"); 731 732 copy_remapped_to(elf, seg); 733 elf->max_addr = va + num_bytes; 734 } else { 735 uint32_t flags = 0; 736 size_t filesz = seg->filesz; 737 size_t memsz = seg->memsz; 738 size_t offset = seg->offset; 739 size_t vaddr = seg->vaddr; 740 741 if (offset < elf->max_offs) { 742 /* 743 * We're in a load segment which overlaps 744 * with (or is covered by) the first page 745 * of a shared library. 746 */ 747 if (vaddr + filesz < SMALL_PAGE_SIZE) { 748 size_t num_bytes = 0; 749 750 /* 751 * If this segment is completely 752 * covered, take next. 753 */ 754 if (vaddr + memsz <= SMALL_PAGE_SIZE) 755 continue; 756 757 /* 758 * All data of the segment is 759 * loaded, but we need to zero 760 * extend it. 761 */ 762 va = elf->max_addr; 763 num_bytes = roundup(vaddr + memsz) - 764 roundup(vaddr) - 765 SMALL_PAGE_SIZE; 766 assert(num_bytes); 767 res = sys_map_zi(num_bytes, 0, &va, 0, 768 0); 769 if (res) 770 err(res, "sys_map_zi"); 771 elf->max_addr = roundup(va + num_bytes); 772 continue; 773 } 774 775 /* Partial overlap, remove the first page. */ 776 vaddr += SMALL_PAGE_SIZE; 777 filesz -= SMALL_PAGE_SIZE; 778 memsz -= SMALL_PAGE_SIZE; 779 offset += SMALL_PAGE_SIZE; 780 } 781 782 if (!elf->load_addr) { 783 va = 0; 784 pad_begin = get_pad_begin(); 785 /* 786 * If mapping with pad_begin fails we'll 787 * retry without pad_begin, effectively 788 * disabling ASLR for the current ELF file. 789 */ 790 } else { 791 va = vaddr + elf->load_addr; 792 pad_begin = 0; 793 } 794 795 if (seg->flags & PF_W) 796 flags |= LDELF_MAP_FLAG_WRITEABLE; 797 else 798 flags |= LDELF_MAP_FLAG_SHAREABLE; 799 if (seg->flags & PF_X) 800 flags |= LDELF_MAP_FLAG_EXECUTABLE; 801 if (!(seg->flags & PF_R)) 802 err(TEE_ERROR_NOT_SUPPORTED, 803 "Segment must be readable"); 804 if (flags & LDELF_MAP_FLAG_WRITEABLE) { 805 res = sys_map_zi(memsz, 0, &va, pad_begin, 806 pad_end); 807 if (pad_begin && res == TEE_ERROR_OUT_OF_MEMORY) 808 res = sys_map_zi(memsz, 0, &va, 0, 809 pad_end); 810 if (res) 811 err(res, "sys_map_zi"); 812 res = sys_copy_from_ta_bin((void *)va, filesz, 813 elf->handle, offset); 814 if (res) 815 err(res, "sys_copy_from_ta_bin"); 816 } else { 817 if (filesz != memsz) 818 err(TEE_ERROR_BAD_FORMAT, 819 "Filesz and memsz mismatch"); 820 res = sys_map_ta_bin(&va, filesz, flags, 821 elf->handle, offset, 822 pad_begin, pad_end); 823 if (pad_begin && res == TEE_ERROR_OUT_OF_MEMORY) 824 res = sys_map_ta_bin(&va, filesz, flags, 825 elf->handle, 826 offset, 0, 827 pad_end); 828 if (res) 829 err(res, "sys_map_ta_bin"); 830 } 831 832 if (!elf->load_addr) 833 elf->load_addr = va; 834 elf->max_addr = roundup(va + memsz); 835 elf->max_offs += filesz; 836 } 837 } 838 } 839 840 static void ta_elf_add_bti(struct ta_elf *elf) 841 { 842 TEE_Result res = TEE_SUCCESS; 843 struct segment *seg = NULL; 844 uint32_t flags = LDELF_MAP_FLAG_EXECUTABLE | LDELF_MAP_FLAG_BTI; 845 846 TAILQ_FOREACH(seg, &elf->segs, link) { 847 vaddr_t va = elf->load_addr + seg->vaddr; 848 849 if (seg->flags & PF_X) { 850 res = sys_set_prot(va, seg->memsz, flags); 851 if (res) 852 err(res, "sys_set_prot"); 853 } 854 } 855 } 856 857 static void parse_property_segment(struct ta_elf *elf) 858 { 859 char *desc = NULL; 860 size_t align = elf->prop_align; 861 size_t desc_offset = 0; 862 size_t prop_offset = 0; 863 vaddr_t va = 0; 864 Elf_Note *note = NULL; 865 char *name = NULL; 866 867 if (!IS_ENABLED(CFG_TA_BTI) || !elf->prop_start) 868 return; 869 870 check_phdr_in_range(elf, PT_GNU_PROPERTY, elf->prop_start, 871 elf->prop_memsz); 872 873 va = elf->load_addr + elf->prop_start; 874 note = (void *)va; 875 name = (char *)(note + 1); 876 877 if (elf->prop_memsz < sizeof(*note) + sizeof(ELF_NOTE_GNU)) 878 return; 879 880 if (note->n_type != NT_GNU_PROPERTY_TYPE_0 || 881 note->n_namesz != sizeof(ELF_NOTE_GNU) || 882 memcmp(name, ELF_NOTE_GNU, sizeof(ELF_NOTE_GNU)) || 883 !IS_POWER_OF_TWO(align)) 884 return; 885 886 desc_offset = ROUNDUP(sizeof(*note) + sizeof(ELF_NOTE_GNU), align); 887 888 if (desc_offset > elf->prop_memsz || 889 ROUNDUP(desc_offset + note->n_descsz, align) > elf->prop_memsz) 890 return; 891 892 desc = (char *)(va + desc_offset); 893 894 do { 895 Elf_Prop *prop = (void *)(desc + prop_offset); 896 size_t data_offset = prop_offset + sizeof(*prop); 897 898 if (note->n_descsz < data_offset) 899 return; 900 901 data_offset = confine_array_index(data_offset, note->n_descsz); 902 903 if (prop->pr_type == GNU_PROPERTY_AARCH64_FEATURE_1_AND) { 904 uint32_t *pr_data = (void *)(desc + data_offset); 905 906 if (note->n_descsz < (data_offset + sizeof(*pr_data)) && 907 prop->pr_datasz != sizeof(*pr_data)) 908 return; 909 910 if (*pr_data & GNU_PROPERTY_AARCH64_FEATURE_1_BTI) { 911 DMSG("BTI Feature present in note property"); 912 elf->bti_enabled = true; 913 } 914 } 915 916 prop_offset += ROUNDUP(sizeof(*prop) + prop->pr_datasz, align); 917 } while (prop_offset < note->n_descsz); 918 } 919 920 static void map_segments(struct ta_elf *elf) 921 { 922 TEE_Result res = TEE_SUCCESS; 923 924 parse_load_segments(elf); 925 adjust_segments(elf); 926 if (TAILQ_FIRST(&elf->segs)->offset < SMALL_PAGE_SIZE) { 927 vaddr_t va = 0; 928 size_t sz = elf->max_addr - elf->load_addr; 929 struct segment *seg = TAILQ_LAST(&elf->segs, segment_head); 930 size_t pad_begin = get_pad_begin(); 931 932 /* 933 * We're loading a library, if not other parts of the code 934 * need to be updated too. 935 */ 936 assert(!elf->is_main); 937 938 /* 939 * Now that we know how much virtual memory is needed move 940 * the already mapped part to a location which can 941 * accommodate us. 942 */ 943 res = sys_remap(elf->load_addr, &va, sz, pad_begin, 944 roundup(seg->vaddr + seg->memsz)); 945 if (res == TEE_ERROR_OUT_OF_MEMORY) 946 res = sys_remap(elf->load_addr, &va, sz, 0, 947 roundup(seg->vaddr + seg->memsz)); 948 if (res) 949 err(res, "sys_remap"); 950 elf->ehdr_addr = va; 951 elf->load_addr = va; 952 elf->max_addr = va + sz; 953 elf->phdr = (void *)(va + elf->e_phoff); 954 } 955 } 956 957 static void add_deps_from_segment(struct ta_elf *elf, unsigned int type, 958 vaddr_t addr, size_t memsz) 959 { 960 size_t dyn_entsize = 0; 961 size_t num_dyns = 0; 962 size_t n = 0; 963 unsigned int tag = 0; 964 size_t val = 0; 965 TEE_UUID uuid = { }; 966 char *str_tab = NULL; 967 size_t str_tab_sz = 0; 968 969 if (type != PT_DYNAMIC) 970 return; 971 972 check_phdr_in_range(elf, type, addr, memsz); 973 974 if (elf->is_32bit) 975 dyn_entsize = sizeof(Elf32_Dyn); 976 else 977 dyn_entsize = sizeof(Elf64_Dyn); 978 979 assert(!(memsz % dyn_entsize)); 980 num_dyns = memsz / dyn_entsize; 981 982 for (n = 0; n < num_dyns && !(str_tab && str_tab_sz); n++) { 983 read_dyn(elf, addr, n, &tag, &val); 984 if (tag == DT_STRTAB) 985 str_tab = (char *)(val + elf->load_addr); 986 else if (tag == DT_STRSZ) 987 str_tab_sz = val; 988 } 989 check_range(elf, ".dynstr/STRTAB", str_tab, str_tab_sz); 990 991 for (n = 0; n < num_dyns; n++) { 992 read_dyn(elf, addr, n, &tag, &val); 993 if (tag != DT_NEEDED) 994 continue; 995 if (val >= str_tab_sz) 996 err(TEE_ERROR_BAD_FORMAT, 997 "Offset into .dynstr/STRTAB out of range"); 998 tee_uuid_from_str(&uuid, str_tab + val); 999 queue_elf(&uuid); 1000 } 1001 } 1002 1003 static void add_dependencies(struct ta_elf *elf) 1004 { 1005 size_t n = 0; 1006 1007 if (elf->is_32bit) { 1008 Elf32_Phdr *phdr = elf->phdr; 1009 1010 for (n = 0; n < elf->e_phnum; n++) 1011 add_deps_from_segment(elf, phdr[n].p_type, 1012 phdr[n].p_vaddr, phdr[n].p_memsz); 1013 } else { 1014 Elf64_Phdr *phdr = elf->phdr; 1015 1016 for (n = 0; n < elf->e_phnum; n++) 1017 add_deps_from_segment(elf, phdr[n].p_type, 1018 phdr[n].p_vaddr, phdr[n].p_memsz); 1019 } 1020 } 1021 1022 static void copy_section_headers(struct ta_elf *elf) 1023 { 1024 TEE_Result res = TEE_SUCCESS; 1025 size_t sz = 0; 1026 size_t offs = 0; 1027 1028 if (MUL_OVERFLOW(elf->e_shnum, elf->e_shentsize, &sz)) 1029 err(TEE_ERROR_BAD_FORMAT, "Section headers size overflow"); 1030 1031 elf->shdr = malloc(sz); 1032 if (!elf->shdr) 1033 err(TEE_ERROR_OUT_OF_MEMORY, "malloc"); 1034 1035 /* 1036 * We're assuming that section headers comes after the load segments, 1037 * but if it's a very small dynamically linked library the section 1038 * headers can still end up (partially?) in the first mapped page. 1039 */ 1040 if (elf->e_shoff < SMALL_PAGE_SIZE) { 1041 assert(!elf->is_main); 1042 offs = MIN(SMALL_PAGE_SIZE - elf->e_shoff, sz); 1043 memcpy(elf->shdr, (void *)(elf->load_addr + elf->e_shoff), 1044 offs); 1045 } 1046 1047 if (offs < sz) { 1048 res = sys_copy_from_ta_bin((uint8_t *)elf->shdr + offs, 1049 sz - offs, elf->handle, 1050 elf->e_shoff + offs); 1051 if (res) 1052 err(res, "sys_copy_from_ta_bin"); 1053 } 1054 } 1055 1056 static void close_handle(struct ta_elf *elf) 1057 { 1058 TEE_Result res = sys_close_ta_bin(elf->handle); 1059 1060 if (res) 1061 err(res, "sys_close_ta_bin"); 1062 elf->handle = -1; 1063 } 1064 1065 static void clean_elf_load_main(struct ta_elf *elf) 1066 { 1067 TEE_Result res = TEE_SUCCESS; 1068 1069 /* 1070 * Clean up from last attempt to load 1071 */ 1072 res = sys_unmap(elf->ehdr_addr, SMALL_PAGE_SIZE); 1073 if (res) 1074 err(res, "sys_unmap"); 1075 1076 while (!TAILQ_EMPTY(&elf->segs)) { 1077 struct segment *seg = TAILQ_FIRST(&elf->segs); 1078 vaddr_t va = 0; 1079 size_t num_bytes = 0; 1080 1081 va = rounddown(elf->load_addr + seg->vaddr); 1082 if (seg->remapped_writeable) 1083 num_bytes = roundup(seg->vaddr + seg->memsz) - 1084 rounddown(seg->vaddr); 1085 else 1086 num_bytes = seg->memsz; 1087 1088 res = sys_unmap(va, num_bytes); 1089 if (res) 1090 err(res, "sys_unmap"); 1091 1092 TAILQ_REMOVE(&elf->segs, seg, link); 1093 free(seg); 1094 } 1095 1096 free(elf->shdr); 1097 memset(&elf->is_32bit, 0, 1098 (vaddr_t)&elf->uuid - (vaddr_t)&elf->is_32bit); 1099 1100 TAILQ_INIT(&elf->segs); 1101 } 1102 1103 #ifdef ARM64 1104 /* 1105 * Allocates an offset in the TA's Thread Control Block for the TLS segment of 1106 * the @elf module. 1107 */ 1108 #define TCB_HEAD_SIZE (2 * sizeof(long)) 1109 static void set_tls_offset(struct ta_elf *elf) 1110 { 1111 static size_t next_offs = TCB_HEAD_SIZE; 1112 1113 if (!elf->tls_start) 1114 return; 1115 1116 /* Module has a TLS segment */ 1117 elf->tls_tcb_offs = next_offs; 1118 next_offs += elf->tls_memsz; 1119 } 1120 #else 1121 static void set_tls_offset(struct ta_elf *elf __unused) {} 1122 #endif 1123 1124 static void load_main(struct ta_elf *elf) 1125 { 1126 init_elf(elf); 1127 map_segments(elf); 1128 populate_segments(elf); 1129 add_dependencies(elf); 1130 copy_section_headers(elf); 1131 save_symtab(elf); 1132 close_handle(elf); 1133 set_tls_offset(elf); 1134 parse_property_segment(elf); 1135 if (elf->bti_enabled) 1136 ta_elf_add_bti(elf); 1137 1138 elf->head = (struct ta_head *)elf->load_addr; 1139 if (elf->head->depr_entry != UINT64_MAX) { 1140 /* 1141 * Legacy TAs sets their entry point in ta_head. For 1142 * non-legacy TAs the entry point of the ELF is set instead 1143 * and leaving the ta_head entry point set to UINT64_MAX to 1144 * indicate that it's not used. 1145 * 1146 * NB, everything before the commit a73b5878c89d ("Replace 1147 * ta_head.entry with elf entry") is considered legacy TAs 1148 * for ldelf. 1149 * 1150 * Legacy TAs cannot be mapped with shared memory segments 1151 * so restart the mapping if it turned out we're loading a 1152 * legacy TA. 1153 */ 1154 1155 DMSG("Reloading TA %pUl as legacy TA", (void *)&elf->uuid); 1156 clean_elf_load_main(elf); 1157 elf->is_legacy = true; 1158 init_elf(elf); 1159 map_segments(elf); 1160 populate_segments_legacy(elf); 1161 add_dependencies(elf); 1162 copy_section_headers(elf); 1163 save_symtab(elf); 1164 close_handle(elf); 1165 elf->head = (struct ta_head *)elf->load_addr; 1166 /* 1167 * Check that the TA is still a legacy TA, if it isn't give 1168 * up now since we're likely under attack. 1169 */ 1170 if (elf->head->depr_entry == UINT64_MAX) 1171 err(TEE_ERROR_GENERIC, 1172 "TA %pUl was changed on disk to non-legacy", 1173 (void *)&elf->uuid); 1174 } 1175 1176 } 1177 1178 void ta_elf_load_main(const TEE_UUID *uuid, uint32_t *is_32bit, uint64_t *sp, 1179 uint32_t *ta_flags) 1180 { 1181 struct ta_elf *elf = queue_elf(uuid); 1182 vaddr_t va = 0; 1183 TEE_Result res = TEE_SUCCESS; 1184 1185 assert(elf); 1186 elf->is_main = true; 1187 1188 load_main(elf); 1189 1190 *is_32bit = elf->is_32bit; 1191 res = sys_map_zi(elf->head->stack_size, 0, &va, 0, 0); 1192 if (res) 1193 err(res, "sys_map_zi stack"); 1194 1195 if (elf->head->flags & ~TA_FLAGS_MASK) 1196 err(TEE_ERROR_BAD_FORMAT, "Invalid TA flags(s) %#"PRIx32, 1197 elf->head->flags & ~TA_FLAGS_MASK); 1198 1199 *ta_flags = elf->head->flags; 1200 *sp = va + elf->head->stack_size; 1201 ta_stack = va; 1202 ta_stack_size = elf->head->stack_size; 1203 } 1204 1205 void ta_elf_finalize_load_main(uint64_t *entry) 1206 { 1207 struct ta_elf *elf = TAILQ_FIRST(&main_elf_queue); 1208 TEE_Result res = TEE_SUCCESS; 1209 1210 assert(elf->is_main); 1211 1212 res = ta_elf_set_init_fini_info_compat(elf->is_32bit); 1213 if (res) 1214 err(res, "ta_elf_set_init_fini_info_compat"); 1215 res = ta_elf_set_elf_phdr_info(elf->is_32bit); 1216 if (res) 1217 err(res, "ta_elf_set_elf_phdr_info"); 1218 1219 if (elf->is_legacy) 1220 *entry = elf->head->depr_entry; 1221 else 1222 *entry = elf->e_entry + elf->load_addr; 1223 } 1224 1225 1226 void ta_elf_load_dependency(struct ta_elf *elf, bool is_32bit) 1227 { 1228 if (elf->is_main) 1229 return; 1230 1231 init_elf(elf); 1232 if (elf->is_32bit != is_32bit) 1233 err(TEE_ERROR_BAD_FORMAT, "ELF %pUl is %sbit (expected %sbit)", 1234 (void *)&elf->uuid, elf->is_32bit ? "32" : "64", 1235 is_32bit ? "32" : "64"); 1236 1237 map_segments(elf); 1238 populate_segments(elf); 1239 add_dependencies(elf); 1240 copy_section_headers(elf); 1241 save_symtab(elf); 1242 close_handle(elf); 1243 set_tls_offset(elf); 1244 parse_property_segment(elf); 1245 if (elf->bti_enabled) 1246 ta_elf_add_bti(elf); 1247 } 1248 1249 void ta_elf_finalize_mappings(struct ta_elf *elf) 1250 { 1251 TEE_Result res = TEE_SUCCESS; 1252 struct segment *seg = NULL; 1253 1254 if (!elf->is_legacy) 1255 return; 1256 1257 TAILQ_FOREACH(seg, &elf->segs, link) { 1258 vaddr_t va = elf->load_addr + seg->vaddr; 1259 uint32_t flags = 0; 1260 1261 if (seg->flags & PF_W) 1262 flags |= LDELF_MAP_FLAG_WRITEABLE; 1263 if (seg->flags & PF_X) 1264 flags |= LDELF_MAP_FLAG_EXECUTABLE; 1265 1266 res = sys_set_prot(va, seg->memsz, flags); 1267 if (res) 1268 err(res, "sys_set_prot"); 1269 } 1270 } 1271 1272 static void __printf(3, 4) print_wrapper(void *pctx, print_func_t print_func, 1273 const char *fmt, ...) 1274 { 1275 va_list ap; 1276 1277 va_start(ap, fmt); 1278 print_func(pctx, fmt, ap); 1279 va_end(ap); 1280 } 1281 1282 static void print_seg(void *pctx, print_func_t print_func, 1283 size_t idx __maybe_unused, int elf_idx __maybe_unused, 1284 vaddr_t va __maybe_unused, paddr_t pa __maybe_unused, 1285 size_t sz __maybe_unused, uint32_t flags) 1286 { 1287 int rc __maybe_unused = 0; 1288 int width __maybe_unused = 8; 1289 char desc[14] __maybe_unused = ""; 1290 char flags_str[] __maybe_unused = "----"; 1291 1292 if (elf_idx > -1) { 1293 rc = snprintf(desc, sizeof(desc), " [%d]", elf_idx); 1294 assert(rc >= 0); 1295 } else { 1296 if (flags & DUMP_MAP_EPHEM) { 1297 rc = snprintf(desc, sizeof(desc), " (param)"); 1298 assert(rc >= 0); 1299 } 1300 if (flags & DUMP_MAP_LDELF) { 1301 rc = snprintf(desc, sizeof(desc), " (ldelf)"); 1302 assert(rc >= 0); 1303 } 1304 if (va == ta_stack) { 1305 rc = snprintf(desc, sizeof(desc), " (stack)"); 1306 assert(rc >= 0); 1307 } 1308 } 1309 1310 if (flags & DUMP_MAP_READ) 1311 flags_str[0] = 'r'; 1312 if (flags & DUMP_MAP_WRITE) 1313 flags_str[1] = 'w'; 1314 if (flags & DUMP_MAP_EXEC) 1315 flags_str[2] = 'x'; 1316 if (flags & DUMP_MAP_SECURE) 1317 flags_str[3] = 's'; 1318 1319 print_wrapper(pctx, print_func, 1320 "region %2zu: va 0x%0*"PRIxVA" pa 0x%0*"PRIxPA" size 0x%06zx flags %s%s\n", 1321 idx, width, va, width, pa, sz, flags_str, desc); 1322 } 1323 1324 static bool get_next_in_order(struct ta_elf_queue *elf_queue, 1325 struct ta_elf **elf, struct segment **seg, 1326 size_t *elf_idx) 1327 { 1328 struct ta_elf *e = NULL; 1329 struct segment *s = NULL; 1330 size_t idx = 0; 1331 vaddr_t va = 0; 1332 struct ta_elf *e2 = NULL; 1333 size_t i2 = 0; 1334 1335 assert(elf && seg && elf_idx); 1336 e = *elf; 1337 s = *seg; 1338 assert((e == NULL && s == NULL) || (e != NULL && s != NULL)); 1339 1340 if (s) { 1341 s = TAILQ_NEXT(s, link); 1342 if (s) { 1343 *seg = s; 1344 return true; 1345 } 1346 } 1347 1348 if (e) 1349 va = e->load_addr; 1350 1351 /* Find the ELF with next load address */ 1352 e = NULL; 1353 TAILQ_FOREACH(e2, elf_queue, link) { 1354 if (e2->load_addr > va) { 1355 if (!e || e2->load_addr < e->load_addr) { 1356 e = e2; 1357 idx = i2; 1358 } 1359 } 1360 i2++; 1361 } 1362 if (!e) 1363 return false; 1364 1365 *elf = e; 1366 *seg = TAILQ_FIRST(&e->segs); 1367 *elf_idx = idx; 1368 return true; 1369 } 1370 1371 void ta_elf_print_mappings(void *pctx, print_func_t print_func, 1372 struct ta_elf_queue *elf_queue, size_t num_maps, 1373 struct dump_map *maps, vaddr_t mpool_base) 1374 { 1375 struct segment *seg = NULL; 1376 struct ta_elf *elf = NULL; 1377 size_t elf_idx = 0; 1378 size_t idx = 0; 1379 size_t map_idx = 0; 1380 1381 /* 1382 * Loop over all segments and maps, printing virtual address in 1383 * order. Segment has priority if the virtual address is present 1384 * in both map and segment. 1385 */ 1386 get_next_in_order(elf_queue, &elf, &seg, &elf_idx); 1387 while (true) { 1388 vaddr_t va = -1; 1389 size_t sz = 0; 1390 uint32_t flags = DUMP_MAP_SECURE; 1391 size_t offs = 0; 1392 1393 if (seg) { 1394 va = rounddown(seg->vaddr + elf->load_addr); 1395 sz = roundup(seg->vaddr + seg->memsz) - 1396 rounddown(seg->vaddr); 1397 } 1398 1399 while (map_idx < num_maps && maps[map_idx].va <= va) { 1400 uint32_t f = 0; 1401 1402 /* If there's a match, it should be the same map */ 1403 if (maps[map_idx].va == va) { 1404 /* 1405 * In shared libraries the first page is 1406 * mapped separately with the rest of that 1407 * segment following back to back in a 1408 * separate entry. 1409 */ 1410 if (map_idx + 1 < num_maps && 1411 maps[map_idx].sz == SMALL_PAGE_SIZE) { 1412 vaddr_t next_va = maps[map_idx].va + 1413 maps[map_idx].sz; 1414 size_t comb_sz = maps[map_idx].sz + 1415 maps[map_idx + 1].sz; 1416 1417 if (next_va == maps[map_idx + 1].va && 1418 comb_sz == sz && 1419 maps[map_idx].flags == 1420 maps[map_idx + 1].flags) { 1421 /* Skip this and next entry */ 1422 map_idx += 2; 1423 continue; 1424 } 1425 } 1426 assert(maps[map_idx].sz == sz); 1427 } else if (maps[map_idx].va < va) { 1428 if (maps[map_idx].va == mpool_base) 1429 f |= DUMP_MAP_LDELF; 1430 print_seg(pctx, print_func, idx, -1, 1431 maps[map_idx].va, maps[map_idx].pa, 1432 maps[map_idx].sz, 1433 maps[map_idx].flags | f); 1434 idx++; 1435 } 1436 map_idx++; 1437 } 1438 1439 if (!seg) 1440 break; 1441 1442 offs = rounddown(seg->offset); 1443 if (seg->flags & PF_R) 1444 flags |= DUMP_MAP_READ; 1445 if (seg->flags & PF_W) 1446 flags |= DUMP_MAP_WRITE; 1447 if (seg->flags & PF_X) 1448 flags |= DUMP_MAP_EXEC; 1449 1450 print_seg(pctx, print_func, idx, elf_idx, va, offs, sz, flags); 1451 idx++; 1452 1453 if (!get_next_in_order(elf_queue, &elf, &seg, &elf_idx)) 1454 seg = NULL; 1455 } 1456 1457 elf_idx = 0; 1458 TAILQ_FOREACH(elf, elf_queue, link) { 1459 print_wrapper(pctx, print_func, 1460 " [%zu] %pUl @ 0x%0*"PRIxVA"\n", 1461 elf_idx, (void *)&elf->uuid, 8, elf->load_addr); 1462 elf_idx++; 1463 } 1464 } 1465 1466 #ifdef CFG_UNWIND 1467 /* Called by libunw */ 1468 bool find_exidx(vaddr_t addr, vaddr_t *idx_start, vaddr_t *idx_end) 1469 { 1470 struct segment *seg = NULL; 1471 struct ta_elf *elf = NULL; 1472 vaddr_t a = 0; 1473 1474 TAILQ_FOREACH(elf, &main_elf_queue, link) { 1475 if (addr < elf->load_addr) 1476 continue; 1477 a = addr - elf->load_addr; 1478 TAILQ_FOREACH(seg, &elf->segs, link) { 1479 if (a < seg->vaddr) 1480 continue; 1481 if (a - seg->vaddr < seg->filesz) { 1482 *idx_start = elf->exidx_start + elf->load_addr; 1483 *idx_end = elf->exidx_start + elf->load_addr + 1484 elf->exidx_size; 1485 return true; 1486 } 1487 } 1488 } 1489 1490 return false; 1491 } 1492 1493 void ta_elf_stack_trace_a32(uint32_t regs[16]) 1494 { 1495 struct unwind_state_arm32 state = { }; 1496 1497 memcpy(state.registers, regs, sizeof(state.registers)); 1498 print_stack_arm32(&state, ta_stack, ta_stack_size); 1499 } 1500 1501 void ta_elf_stack_trace_a64(uint64_t fp, uint64_t sp, uint64_t pc) 1502 { 1503 struct unwind_state_arm64 state = { .fp = fp, .sp = sp, .pc = pc }; 1504 1505 print_stack_arm64(&state, ta_stack, ta_stack_size); 1506 } 1507 #endif 1508 1509 TEE_Result ta_elf_add_library(const TEE_UUID *uuid) 1510 { 1511 TEE_Result res = TEE_ERROR_GENERIC; 1512 struct ta_elf *ta = TAILQ_FIRST(&main_elf_queue); 1513 struct ta_elf *lib = ta_elf_find_elf(uuid); 1514 struct ta_elf *elf = NULL; 1515 1516 if (lib) 1517 return TEE_SUCCESS; /* Already mapped */ 1518 1519 lib = queue_elf_helper(uuid); 1520 if (!lib) 1521 return TEE_ERROR_OUT_OF_MEMORY; 1522 1523 for (elf = lib; elf; elf = TAILQ_NEXT(elf, link)) 1524 ta_elf_load_dependency(elf, ta->is_32bit); 1525 1526 for (elf = lib; elf; elf = TAILQ_NEXT(elf, link)) { 1527 ta_elf_relocate(elf); 1528 ta_elf_finalize_mappings(elf); 1529 } 1530 1531 for (elf = lib; elf; elf = TAILQ_NEXT(elf, link)) 1532 DMSG("ELF (%pUl) at %#"PRIxVA, 1533 (void *)&elf->uuid, elf->load_addr); 1534 1535 res = ta_elf_set_init_fini_info_compat(ta->is_32bit); 1536 if (res) 1537 return res; 1538 1539 return ta_elf_set_elf_phdr_info(ta->is_32bit); 1540 } 1541 1542 /* Get address/size of .init_array and .fini_array from the dynamic segment */ 1543 static void get_init_fini_array(struct ta_elf *elf, unsigned int type, 1544 vaddr_t addr, size_t memsz, vaddr_t *init, 1545 size_t *init_cnt, vaddr_t *fini, 1546 size_t *fini_cnt) 1547 { 1548 size_t addrsz = 0; 1549 size_t dyn_entsize = 0; 1550 size_t num_dyns = 0; 1551 size_t n = 0; 1552 unsigned int tag = 0; 1553 size_t val = 0; 1554 1555 assert(type == PT_DYNAMIC); 1556 1557 check_phdr_in_range(elf, type, addr, memsz); 1558 1559 if (elf->is_32bit) { 1560 dyn_entsize = sizeof(Elf32_Dyn); 1561 addrsz = 4; 1562 } else { 1563 dyn_entsize = sizeof(Elf64_Dyn); 1564 addrsz = 8; 1565 } 1566 1567 assert(!(memsz % dyn_entsize)); 1568 num_dyns = memsz / dyn_entsize; 1569 1570 for (n = 0; n < num_dyns; n++) { 1571 read_dyn(elf, addr, n, &tag, &val); 1572 if (tag == DT_INIT_ARRAY) 1573 *init = val + elf->load_addr; 1574 else if (tag == DT_FINI_ARRAY) 1575 *fini = val + elf->load_addr; 1576 else if (tag == DT_INIT_ARRAYSZ) 1577 *init_cnt = val / addrsz; 1578 else if (tag == DT_FINI_ARRAYSZ) 1579 *fini_cnt = val / addrsz; 1580 } 1581 } 1582 1583 /* Get address/size of .init_array and .fini_array in @elf (if present) */ 1584 static void elf_get_init_fini_array(struct ta_elf *elf, vaddr_t *init, 1585 size_t *init_cnt, vaddr_t *fini, 1586 size_t *fini_cnt) 1587 { 1588 size_t n = 0; 1589 1590 if (elf->is_32bit) { 1591 Elf32_Phdr *phdr = elf->phdr; 1592 1593 for (n = 0; n < elf->e_phnum; n++) { 1594 if (phdr[n].p_type == PT_DYNAMIC) { 1595 get_init_fini_array(elf, phdr[n].p_type, 1596 phdr[n].p_vaddr, 1597 phdr[n].p_memsz, 1598 init, init_cnt, fini, 1599 fini_cnt); 1600 return; 1601 } 1602 } 1603 } else { 1604 Elf64_Phdr *phdr = elf->phdr; 1605 1606 for (n = 0; n < elf->e_phnum; n++) { 1607 if (phdr[n].p_type == PT_DYNAMIC) { 1608 get_init_fini_array(elf, phdr[n].p_type, 1609 phdr[n].p_vaddr, 1610 phdr[n].p_memsz, 1611 init, init_cnt, fini, 1612 fini_cnt); 1613 return; 1614 } 1615 } 1616 } 1617 } 1618 1619 /* 1620 * Deprecated by __elf_phdr_info below. Kept for compatibility. 1621 * 1622 * Pointers to ELF initialization and finalization functions are extracted by 1623 * ldelf and stored on the TA heap, then exported to the TA via the global 1624 * symbol __init_fini_info. libutee in OP-TEE 3.9.0 uses this mechanism. 1625 */ 1626 1627 struct __init_fini { 1628 uint32_t flags; 1629 uint16_t init_size; 1630 uint16_t fini_size; 1631 1632 void (**init)(void); /* @init_size entries */ 1633 void (**fini)(void); /* @fini_size entries */ 1634 }; 1635 1636 #define __IFS_VALID BIT(0) 1637 #define __IFS_INIT_HAS_RUN BIT(1) 1638 #define __IFS_FINI_HAS_RUN BIT(2) 1639 1640 struct __init_fini_info { 1641 uint32_t reserved; 1642 uint16_t size; 1643 uint16_t pad; 1644 struct __init_fini *ifs; /* @size entries */ 1645 }; 1646 1647 /* 32-bit variants for a 64-bit ldelf to access a 32-bit TA */ 1648 1649 struct __init_fini32 { 1650 uint32_t flags; 1651 uint16_t init_size; 1652 uint16_t fini_size; 1653 uint32_t init; 1654 uint32_t fini; 1655 }; 1656 1657 struct __init_fini_info32 { 1658 uint32_t reserved; 1659 uint16_t size; 1660 uint16_t pad; 1661 uint32_t ifs; 1662 }; 1663 1664 static TEE_Result realloc_ifs(vaddr_t va, size_t cnt, bool is_32bit) 1665 { 1666 struct __init_fini_info32 *info32 = (struct __init_fini_info32 *)va; 1667 struct __init_fini_info *info = (struct __init_fini_info *)va; 1668 struct __init_fini32 *ifs32 = NULL; 1669 struct __init_fini *ifs = NULL; 1670 size_t prev_cnt = 0; 1671 void *ptr = NULL; 1672 1673 if (is_32bit) { 1674 ptr = (void *)(vaddr_t)info32->ifs; 1675 ptr = realloc(ptr, cnt * sizeof(struct __init_fini32)); 1676 if (!ptr) 1677 return TEE_ERROR_OUT_OF_MEMORY; 1678 ifs32 = ptr; 1679 prev_cnt = info32->size; 1680 if (cnt > prev_cnt) 1681 memset(ifs32 + prev_cnt, 0, 1682 (cnt - prev_cnt) * sizeof(*ifs32)); 1683 info32->ifs = (uint32_t)(vaddr_t)ifs32; 1684 info32->size = cnt; 1685 } else { 1686 ptr = realloc(info->ifs, cnt * sizeof(struct __init_fini)); 1687 if (!ptr) 1688 return TEE_ERROR_OUT_OF_MEMORY; 1689 ifs = ptr; 1690 prev_cnt = info->size; 1691 if (cnt > prev_cnt) 1692 memset(ifs + prev_cnt, 0, 1693 (cnt - prev_cnt) * sizeof(*ifs)); 1694 info->ifs = ifs; 1695 info->size = cnt; 1696 } 1697 1698 return TEE_SUCCESS; 1699 } 1700 1701 static void fill_ifs(vaddr_t va, size_t idx, struct ta_elf *elf, bool is_32bit) 1702 { 1703 struct __init_fini_info32 *info32 = (struct __init_fini_info32 *)va; 1704 struct __init_fini_info *info = (struct __init_fini_info *)va; 1705 struct __init_fini32 *ifs32 = NULL; 1706 struct __init_fini *ifs = NULL; 1707 size_t init_cnt = 0; 1708 size_t fini_cnt = 0; 1709 vaddr_t init = 0; 1710 vaddr_t fini = 0; 1711 1712 if (is_32bit) { 1713 assert(idx < info32->size); 1714 ifs32 = &((struct __init_fini32 *)(vaddr_t)info32->ifs)[idx]; 1715 1716 if (ifs32->flags & __IFS_VALID) 1717 return; 1718 1719 elf_get_init_fini_array(elf, &init, &init_cnt, &fini, 1720 &fini_cnt); 1721 1722 ifs32->init = (uint32_t)init; 1723 ifs32->init_size = init_cnt; 1724 1725 ifs32->fini = (uint32_t)fini; 1726 ifs32->fini_size = fini_cnt; 1727 1728 ifs32->flags |= __IFS_VALID; 1729 } else { 1730 assert(idx < info->size); 1731 ifs = &info->ifs[idx]; 1732 1733 if (ifs->flags & __IFS_VALID) 1734 return; 1735 1736 elf_get_init_fini_array(elf, &init, &init_cnt, &fini, 1737 &fini_cnt); 1738 1739 ifs->init = (void (**)(void))init; 1740 ifs->init_size = init_cnt; 1741 1742 ifs->fini = (void (**)(void))fini; 1743 ifs->fini_size = fini_cnt; 1744 1745 ifs->flags |= __IFS_VALID; 1746 } 1747 } 1748 1749 /* 1750 * Set or update __init_fini_info in the TA with information from the ELF 1751 * queue 1752 */ 1753 TEE_Result ta_elf_set_init_fini_info_compat(bool is_32bit) 1754 { 1755 struct __init_fini_info *info = NULL; 1756 TEE_Result res = TEE_SUCCESS; 1757 struct ta_elf *elf = NULL; 1758 vaddr_t info_va = 0; 1759 size_t cnt = 0; 1760 1761 res = ta_elf_resolve_sym("__init_fini_info", &info_va, NULL, NULL); 1762 if (res) { 1763 if (res == TEE_ERROR_ITEM_NOT_FOUND) { 1764 /* 1765 * Not an error, only TAs linked against libutee from 1766 * OP-TEE 3.9.0 have this symbol. 1767 */ 1768 return TEE_SUCCESS; 1769 } 1770 return res; 1771 } 1772 assert(info_va); 1773 1774 info = (struct __init_fini_info *)info_va; 1775 if (info->reserved) 1776 return TEE_ERROR_NOT_SUPPORTED; 1777 1778 TAILQ_FOREACH(elf, &main_elf_queue, link) 1779 cnt++; 1780 1781 /* Queue has at least one file (main) */ 1782 assert(cnt); 1783 1784 res = realloc_ifs(info_va, cnt, is_32bit); 1785 if (res) 1786 goto err; 1787 1788 cnt = 0; 1789 TAILQ_FOREACH(elf, &main_elf_queue, link) { 1790 fill_ifs(info_va, cnt, elf, is_32bit); 1791 cnt++; 1792 } 1793 1794 return TEE_SUCCESS; 1795 err: 1796 free(info); 1797 return res; 1798 } 1799 1800 static TEE_Result realloc_elf_phdr_info(vaddr_t va, size_t cnt, bool is_32bit) 1801 { 1802 struct __elf_phdr_info32 *info32 = (struct __elf_phdr_info32 *)va; 1803 struct __elf_phdr_info *info = (struct __elf_phdr_info *)va; 1804 struct dl_phdr_info32 *dlpi32 = NULL; 1805 struct dl_phdr_info *dlpi = NULL; 1806 size_t prev_cnt = 0; 1807 void *ptr = NULL; 1808 1809 if (is_32bit) { 1810 ptr = (void *)(vaddr_t)info32->dlpi; 1811 ptr = realloc(ptr, cnt * sizeof(*dlpi32)); 1812 if (!ptr) 1813 return TEE_ERROR_OUT_OF_MEMORY; 1814 dlpi32 = ptr; 1815 prev_cnt = info32->count; 1816 if (cnt > prev_cnt) 1817 memset(dlpi32 + prev_cnt, 0, 1818 (cnt - prev_cnt) * sizeof(*dlpi32)); 1819 info32->dlpi = (uint32_t)(vaddr_t)dlpi32; 1820 info32->count = cnt; 1821 } else { 1822 ptr = realloc(info->dlpi, cnt * sizeof(*dlpi)); 1823 if (!ptr) 1824 return TEE_ERROR_OUT_OF_MEMORY; 1825 dlpi = ptr; 1826 prev_cnt = info->count; 1827 if (cnt > prev_cnt) 1828 memset(dlpi + prev_cnt, 0, 1829 (cnt - prev_cnt) * sizeof(*dlpi)); 1830 info->dlpi = dlpi; 1831 info->count = cnt; 1832 } 1833 1834 return TEE_SUCCESS; 1835 } 1836 1837 static void fill_elf_phdr_info(vaddr_t va, size_t idx, struct ta_elf *elf, 1838 bool is_32bit) 1839 { 1840 struct __elf_phdr_info32 *info32 = (struct __elf_phdr_info32 *)va; 1841 struct __elf_phdr_info *info = (struct __elf_phdr_info *)va; 1842 struct dl_phdr_info32 *dlpi32 = NULL; 1843 struct dl_phdr_info *dlpi = NULL; 1844 1845 if (is_32bit) { 1846 assert(idx < info32->count); 1847 dlpi32 = (struct dl_phdr_info32 *)(vaddr_t)info32->dlpi + idx; 1848 1849 dlpi32->dlpi_addr = elf->load_addr; 1850 if (elf->soname) 1851 dlpi32->dlpi_name = (vaddr_t)elf->soname; 1852 else 1853 dlpi32->dlpi_name = (vaddr_t)&info32->zero; 1854 dlpi32->dlpi_phdr = (vaddr_t)elf->phdr; 1855 dlpi32->dlpi_phnum = elf->e_phnum; 1856 dlpi32->dlpi_adds = 1; /* No unloading on dlclose() currently */ 1857 dlpi32->dlpi_subs = 0; /* No unloading on dlclose() currently */ 1858 dlpi32->dlpi_tls_modid = elf->tls_mod_id; 1859 dlpi32->dlpi_tls_data = elf->tls_start; 1860 } else { 1861 assert(idx < info->count); 1862 dlpi = info->dlpi + idx; 1863 1864 dlpi->dlpi_addr = elf->load_addr; 1865 if (elf->soname) 1866 dlpi->dlpi_name = elf->soname; 1867 else 1868 dlpi->dlpi_name = &info32->zero; 1869 dlpi->dlpi_phdr = elf->phdr; 1870 dlpi->dlpi_phnum = elf->e_phnum; 1871 dlpi->dlpi_adds = 1; /* No unloading on dlclose() currently */ 1872 dlpi->dlpi_subs = 0; /* No unloading on dlclose() currently */ 1873 dlpi->dlpi_tls_modid = elf->tls_mod_id; 1874 dlpi->dlpi_tls_data = (void *)elf->tls_start; 1875 } 1876 } 1877 1878 /* Set or update __elf_hdr_info in the TA with information from the ELF queue */ 1879 TEE_Result ta_elf_set_elf_phdr_info(bool is_32bit) 1880 { 1881 struct __elf_phdr_info *info = NULL; 1882 TEE_Result res = TEE_SUCCESS; 1883 struct ta_elf *elf = NULL; 1884 vaddr_t info_va = 0; 1885 size_t cnt = 0; 1886 1887 res = ta_elf_resolve_sym("__elf_phdr_info", &info_va, NULL, NULL); 1888 if (res) { 1889 if (res == TEE_ERROR_ITEM_NOT_FOUND) { 1890 /* Older TA */ 1891 return TEE_SUCCESS; 1892 } 1893 return res; 1894 } 1895 assert(info_va); 1896 1897 info = (struct __elf_phdr_info *)info_va; 1898 if (info->reserved) 1899 return TEE_ERROR_NOT_SUPPORTED; 1900 1901 TAILQ_FOREACH(elf, &main_elf_queue, link) 1902 cnt++; 1903 1904 res = realloc_elf_phdr_info(info_va, cnt, is_32bit); 1905 if (res) 1906 return res; 1907 1908 cnt = 0; 1909 TAILQ_FOREACH(elf, &main_elf_queue, link) { 1910 fill_elf_phdr_info(info_va, cnt, elf, is_32bit); 1911 cnt++; 1912 } 1913 1914 return TEE_SUCCESS; 1915 } 1916