1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2019, Linaro Limited 4 * Copyright (c) 2020, Arm Limited 5 */ 6 7 #include <assert.h> 8 #include <config.h> 9 #include <confine_array_index.h> 10 #include <ctype.h> 11 #include <elf32.h> 12 #include <elf64.h> 13 #include <elf_common.h> 14 #include <ldelf.h> 15 #include <link.h> 16 #include <stdio.h> 17 #include <stdlib.h> 18 #include <string_ext.h> 19 #include <string.h> 20 #include <tee_api_types.h> 21 #include <tee_internal_api_extensions.h> 22 #include <unw/unwind.h> 23 #include <user_ta_header.h> 24 #include <util.h> 25 26 #include "sys.h" 27 #include "ta_elf.h" 28 29 /* 30 * Layout of a 32-bit struct dl_phdr_info for a 64-bit ldelf to access a 32-bit 31 * TA 32 */ 33 struct dl_phdr_info32 { 34 uint32_t dlpi_addr; 35 uint32_t dlpi_name; 36 uint32_t dlpi_phdr; 37 uint16_t dlpi_phnum; 38 uint64_t dlpi_adds; 39 uint64_t dlpi_subs; 40 uint32_t dlpi_tls_modid; 41 uint32_t dlpi_tls_data; 42 }; 43 44 static vaddr_t ta_stack; 45 static vaddr_t ta_stack_size; 46 47 struct ta_elf_queue main_elf_queue = TAILQ_HEAD_INITIALIZER(main_elf_queue); 48 49 /* 50 * Main application is always ID 1, shared libraries with TLS take IDs 2 and 51 * above 52 */ 53 static void assign_tls_mod_id(struct ta_elf *elf) 54 { 55 static size_t last_tls_mod_id = 1; 56 57 if (elf->is_main) 58 assert(last_tls_mod_id == 1); /* Main always comes first */ 59 elf->tls_mod_id = last_tls_mod_id++; 60 } 61 62 static struct ta_elf *queue_elf_helper(const TEE_UUID *uuid) 63 { 64 struct ta_elf *elf = calloc(1, sizeof(*elf)); 65 66 if (!elf) 67 return NULL; 68 69 TAILQ_INIT(&elf->segs); 70 71 elf->uuid = *uuid; 72 TAILQ_INSERT_TAIL(&main_elf_queue, elf, link); 73 return elf; 74 } 75 76 static struct ta_elf *queue_elf(const TEE_UUID *uuid) 77 { 78 struct ta_elf *elf = ta_elf_find_elf(uuid); 79 80 if (elf) 81 return NULL; 82 83 elf = queue_elf_helper(uuid); 84 if (!elf) 85 err(TEE_ERROR_OUT_OF_MEMORY, "queue_elf_helper"); 86 87 return elf; 88 } 89 90 struct ta_elf *ta_elf_find_elf(const TEE_UUID *uuid) 91 { 92 struct ta_elf *elf = NULL; 93 94 TAILQ_FOREACH(elf, &main_elf_queue, link) 95 if (!memcmp(uuid, &elf->uuid, sizeof(*uuid))) 96 return elf; 97 98 return NULL; 99 } 100 101 static TEE_Result e32_parse_ehdr(struct ta_elf *elf, Elf32_Ehdr *ehdr) 102 { 103 if (ehdr->e_ident[EI_VERSION] != EV_CURRENT || 104 ehdr->e_ident[EI_CLASS] != ELFCLASS32 || 105 ehdr->e_ident[EI_DATA] != ELFDATA2LSB || 106 ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE || 107 ehdr->e_type != ET_DYN || ehdr->e_machine != EM_ARM || 108 (ehdr->e_flags & EF_ARM_ABIMASK) != EF_ARM_ABI_VERSION || 109 #ifndef CFG_WITH_VFP 110 (ehdr->e_flags & EF_ARM_ABI_FLOAT_HARD) || 111 #endif 112 ehdr->e_phentsize != sizeof(Elf32_Phdr) || 113 ehdr->e_shentsize != sizeof(Elf32_Shdr)) 114 return TEE_ERROR_BAD_FORMAT; 115 116 elf->is_32bit = true; 117 elf->e_entry = ehdr->e_entry; 118 elf->e_phoff = ehdr->e_phoff; 119 elf->e_shoff = ehdr->e_shoff; 120 elf->e_phnum = ehdr->e_phnum; 121 elf->e_shnum = ehdr->e_shnum; 122 elf->e_phentsize = ehdr->e_phentsize; 123 elf->e_shentsize = ehdr->e_shentsize; 124 125 return TEE_SUCCESS; 126 } 127 128 #ifdef ARM64 129 static TEE_Result e64_parse_ehdr(struct ta_elf *elf, Elf64_Ehdr *ehdr) 130 { 131 if (ehdr->e_ident[EI_VERSION] != EV_CURRENT || 132 ehdr->e_ident[EI_CLASS] != ELFCLASS64 || 133 ehdr->e_ident[EI_DATA] != ELFDATA2LSB || 134 ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE || 135 ehdr->e_type != ET_DYN || ehdr->e_machine != EM_AARCH64 || 136 ehdr->e_flags || ehdr->e_phentsize != sizeof(Elf64_Phdr) || 137 ehdr->e_shentsize != sizeof(Elf64_Shdr)) 138 return TEE_ERROR_BAD_FORMAT; 139 140 141 elf->is_32bit = false; 142 elf->e_entry = ehdr->e_entry; 143 elf->e_phoff = ehdr->e_phoff; 144 elf->e_shoff = ehdr->e_shoff; 145 elf->e_phnum = ehdr->e_phnum; 146 elf->e_shnum = ehdr->e_shnum; 147 elf->e_phentsize = ehdr->e_phentsize; 148 elf->e_shentsize = ehdr->e_shentsize; 149 150 return TEE_SUCCESS; 151 } 152 #else /*ARM64*/ 153 static TEE_Result e64_parse_ehdr(struct ta_elf *elf __unused, 154 Elf64_Ehdr *ehdr __unused) 155 { 156 return TEE_ERROR_NOT_SUPPORTED; 157 } 158 #endif /*ARM64*/ 159 160 static void check_phdr_in_range(struct ta_elf *elf, unsigned int type, 161 vaddr_t addr, size_t memsz) 162 { 163 vaddr_t max_addr = 0; 164 165 if (ADD_OVERFLOW(addr, memsz, &max_addr)) 166 err(TEE_ERROR_BAD_FORMAT, "Program header %#x overflow", type); 167 168 /* 169 * elf->load_addr and elf->max_addr are both using the 170 * final virtual addresses, while this program header is 171 * relative to 0. 172 */ 173 if (max_addr > elf->max_addr - elf->load_addr) 174 err(TEE_ERROR_BAD_FORMAT, "Program header %#x out of bounds", 175 type); 176 } 177 178 static void read_dyn(struct ta_elf *elf, vaddr_t addr, 179 size_t idx, unsigned int *tag, size_t *val) 180 { 181 if (elf->is_32bit) { 182 Elf32_Dyn *dyn = (Elf32_Dyn *)(addr + elf->load_addr); 183 184 *tag = dyn[idx].d_tag; 185 *val = dyn[idx].d_un.d_val; 186 } else { 187 Elf64_Dyn *dyn = (Elf64_Dyn *)(addr + elf->load_addr); 188 189 *tag = dyn[idx].d_tag; 190 *val = dyn[idx].d_un.d_val; 191 } 192 } 193 194 static void save_hashtab_from_segment(struct ta_elf *elf, unsigned int type, 195 vaddr_t addr, size_t memsz) 196 { 197 size_t dyn_entsize = 0; 198 size_t num_dyns = 0; 199 size_t n = 0; 200 unsigned int tag = 0; 201 size_t val = 0; 202 203 if (type != PT_DYNAMIC) 204 return; 205 206 check_phdr_in_range(elf, type, addr, memsz); 207 208 if (elf->is_32bit) 209 dyn_entsize = sizeof(Elf32_Dyn); 210 else 211 dyn_entsize = sizeof(Elf64_Dyn); 212 213 assert(!(memsz % dyn_entsize)); 214 num_dyns = memsz / dyn_entsize; 215 216 for (n = 0; n < num_dyns; n++) { 217 read_dyn(elf, addr, n, &tag, &val); 218 if (tag == DT_HASH) { 219 elf->hashtab = (void *)(val + elf->load_addr); 220 break; 221 } 222 } 223 } 224 225 static void check_range(struct ta_elf *elf, const char *name, const void *ptr, 226 size_t sz) 227 { 228 size_t max_addr = 0; 229 230 if ((vaddr_t)ptr < elf->load_addr) 231 err(TEE_ERROR_BAD_FORMAT, "%s %p out of range", name, ptr); 232 233 if (ADD_OVERFLOW((vaddr_t)ptr, sz, &max_addr)) 234 err(TEE_ERROR_BAD_FORMAT, "%s range overflow", name); 235 236 if (max_addr > elf->max_addr) 237 err(TEE_ERROR_BAD_FORMAT, 238 "%s %p..%#zx out of range", name, ptr, max_addr); 239 } 240 241 static void check_hashtab(struct ta_elf *elf, void *ptr, size_t num_buckets, 242 size_t num_chains) 243 { 244 /* 245 * Starting from 2 as the first two words are mandatory and hold 246 * num_buckets and num_chains. So this function is called twice, 247 * first to see that there's indeed room for num_buckets and 248 * num_chains and then to see that all of it fits. 249 * See http://www.sco.com/developers/gabi/latest/ch5.dynamic.html#hash 250 */ 251 size_t num_words = 2; 252 size_t sz = 0; 253 254 if (!IS_ALIGNED_WITH_TYPE(ptr, uint32_t)) 255 err(TEE_ERROR_BAD_FORMAT, "Bad alignment of DT_HASH %p", ptr); 256 257 if (ADD_OVERFLOW(num_words, num_buckets, &num_words) || 258 ADD_OVERFLOW(num_words, num_chains, &num_words) || 259 MUL_OVERFLOW(num_words, sizeof(uint32_t), &sz)) 260 err(TEE_ERROR_BAD_FORMAT, "DT_HASH overflow"); 261 262 check_range(elf, "DT_HASH", ptr, sz); 263 } 264 265 static void save_hashtab(struct ta_elf *elf) 266 { 267 uint32_t *hashtab = NULL; 268 size_t n = 0; 269 270 if (elf->is_32bit) { 271 Elf32_Phdr *phdr = elf->phdr; 272 273 for (n = 0; n < elf->e_phnum; n++) 274 save_hashtab_from_segment(elf, phdr[n].p_type, 275 phdr[n].p_vaddr, 276 phdr[n].p_memsz); 277 } else { 278 Elf64_Phdr *phdr = elf->phdr; 279 280 for (n = 0; n < elf->e_phnum; n++) 281 save_hashtab_from_segment(elf, phdr[n].p_type, 282 phdr[n].p_vaddr, 283 phdr[n].p_memsz); 284 } 285 286 check_hashtab(elf, elf->hashtab, 0, 0); 287 hashtab = elf->hashtab; 288 check_hashtab(elf, elf->hashtab, hashtab[0], hashtab[1]); 289 } 290 291 static void save_soname_from_segment(struct ta_elf *elf, unsigned int type, 292 vaddr_t addr, size_t memsz) 293 { 294 size_t dyn_entsize = 0; 295 size_t num_dyns = 0; 296 size_t n = 0; 297 unsigned int tag = 0; 298 size_t val = 0; 299 char *str_tab = NULL; 300 301 if (type != PT_DYNAMIC) 302 return; 303 304 if (elf->is_32bit) 305 dyn_entsize = sizeof(Elf32_Dyn); 306 else 307 dyn_entsize = sizeof(Elf64_Dyn); 308 309 assert(!(memsz % dyn_entsize)); 310 num_dyns = memsz / dyn_entsize; 311 312 for (n = 0; n < num_dyns; n++) { 313 read_dyn(elf, addr, n, &tag, &val); 314 if (tag == DT_STRTAB) { 315 str_tab = (char *)(val + elf->load_addr); 316 break; 317 } 318 } 319 for (n = 0; n < num_dyns; n++) { 320 read_dyn(elf, addr, n, &tag, &val); 321 if (tag == DT_SONAME) { 322 elf->soname = str_tab + val; 323 break; 324 } 325 } 326 } 327 328 static void save_soname(struct ta_elf *elf) 329 { 330 size_t n = 0; 331 332 if (elf->is_32bit) { 333 Elf32_Phdr *phdr = elf->phdr; 334 335 for (n = 0; n < elf->e_phnum; n++) 336 save_soname_from_segment(elf, phdr[n].p_type, 337 phdr[n].p_vaddr, 338 phdr[n].p_memsz); 339 } else { 340 Elf64_Phdr *phdr = elf->phdr; 341 342 for (n = 0; n < elf->e_phnum; n++) 343 save_soname_from_segment(elf, phdr[n].p_type, 344 phdr[n].p_vaddr, 345 phdr[n].p_memsz); 346 } 347 } 348 349 static void e32_save_symtab(struct ta_elf *elf, size_t tab_idx) 350 { 351 Elf32_Shdr *shdr = elf->shdr; 352 size_t str_idx = shdr[tab_idx].sh_link; 353 354 elf->dynsymtab = (void *)(shdr[tab_idx].sh_addr + elf->load_addr); 355 if (!IS_ALIGNED_WITH_TYPE(elf->dynsymtab, Elf32_Sym)) 356 err(TEE_ERROR_BAD_FORMAT, "Bad alignment of dynsymtab %p", 357 elf->dynsymtab); 358 check_range(elf, "Dynsymtab", elf->dynsymtab, shdr[tab_idx].sh_size); 359 360 if (shdr[tab_idx].sh_size % sizeof(Elf32_Sym)) 361 err(TEE_ERROR_BAD_FORMAT, 362 "Size of dynsymtab not an even multiple of Elf32_Sym"); 363 elf->num_dynsyms = shdr[tab_idx].sh_size / sizeof(Elf32_Sym); 364 365 if (str_idx >= elf->e_shnum) 366 err(TEE_ERROR_BAD_FORMAT, "Dynstr section index out of range"); 367 elf->dynstr = (void *)(shdr[str_idx].sh_addr + elf->load_addr); 368 check_range(elf, "Dynstr", elf->dynstr, shdr[str_idx].sh_size); 369 370 elf->dynstr_size = shdr[str_idx].sh_size; 371 } 372 373 static void e64_save_symtab(struct ta_elf *elf, size_t tab_idx) 374 { 375 Elf64_Shdr *shdr = elf->shdr; 376 size_t str_idx = shdr[tab_idx].sh_link; 377 378 elf->dynsymtab = (void *)(vaddr_t)(shdr[tab_idx].sh_addr + 379 elf->load_addr); 380 381 if (!IS_ALIGNED_WITH_TYPE(elf->dynsymtab, Elf64_Sym)) 382 err(TEE_ERROR_BAD_FORMAT, "Bad alignment of .dynsym/DYNSYM %p", 383 elf->dynsymtab); 384 check_range(elf, ".dynsym/DYNSYM", elf->dynsymtab, 385 shdr[tab_idx].sh_size); 386 387 if (shdr[tab_idx].sh_size % sizeof(Elf64_Sym)) 388 err(TEE_ERROR_BAD_FORMAT, 389 "Size of .dynsym/DYNSYM not an even multiple of Elf64_Sym"); 390 elf->num_dynsyms = shdr[tab_idx].sh_size / sizeof(Elf64_Sym); 391 392 if (str_idx >= elf->e_shnum) 393 err(TEE_ERROR_BAD_FORMAT, 394 ".dynstr/STRTAB section index out of range"); 395 elf->dynstr = (void *)(vaddr_t)(shdr[str_idx].sh_addr + elf->load_addr); 396 check_range(elf, ".dynstr/STRTAB", elf->dynstr, shdr[str_idx].sh_size); 397 398 elf->dynstr_size = shdr[str_idx].sh_size; 399 } 400 401 static void save_symtab(struct ta_elf *elf) 402 { 403 size_t n = 0; 404 405 if (elf->is_32bit) { 406 Elf32_Shdr *shdr = elf->shdr; 407 408 for (n = 0; n < elf->e_shnum; n++) { 409 if (shdr[n].sh_type == SHT_DYNSYM) { 410 e32_save_symtab(elf, n); 411 break; 412 } 413 } 414 } else { 415 Elf64_Shdr *shdr = elf->shdr; 416 417 for (n = 0; n < elf->e_shnum; n++) { 418 if (shdr[n].sh_type == SHT_DYNSYM) { 419 e64_save_symtab(elf, n); 420 break; 421 } 422 } 423 424 } 425 426 save_hashtab(elf); 427 save_soname(elf); 428 } 429 430 static void init_elf(struct ta_elf *elf) 431 { 432 TEE_Result res = TEE_SUCCESS; 433 vaddr_t va = 0; 434 uint32_t flags = LDELF_MAP_FLAG_SHAREABLE; 435 size_t sz = 0; 436 437 res = sys_open_ta_bin(&elf->uuid, &elf->handle); 438 if (res) 439 err(res, "sys_open_ta_bin(%pUl)", (void *)&elf->uuid); 440 441 /* 442 * Map it read-only executable when we're loading a library where 443 * the ELF header is included in a load segment. 444 */ 445 if (!elf->is_main) 446 flags |= LDELF_MAP_FLAG_EXECUTABLE; 447 res = sys_map_ta_bin(&va, SMALL_PAGE_SIZE, flags, elf->handle, 0, 0, 0); 448 if (res) 449 err(res, "sys_map_ta_bin"); 450 elf->ehdr_addr = va; 451 if (!elf->is_main) { 452 elf->load_addr = va; 453 elf->max_addr = va + SMALL_PAGE_SIZE; 454 elf->max_offs = SMALL_PAGE_SIZE; 455 } 456 457 if (!IS_ELF(*(Elf32_Ehdr *)va)) 458 err(TEE_ERROR_BAD_FORMAT, "TA is not an ELF"); 459 460 res = e32_parse_ehdr(elf, (void *)va); 461 if (res == TEE_ERROR_BAD_FORMAT) 462 res = e64_parse_ehdr(elf, (void *)va); 463 if (res) 464 err(res, "Cannot parse ELF"); 465 466 if (MUL_OVERFLOW(elf->e_phnum, elf->e_phentsize, &sz) || 467 ADD_OVERFLOW(sz, elf->e_phoff, &sz)) 468 err(TEE_ERROR_BAD_FORMAT, "Program headers size overflow"); 469 470 if (sz > SMALL_PAGE_SIZE) 471 err(TEE_ERROR_NOT_SUPPORTED, "Cannot read program headers"); 472 473 elf->phdr = (void *)(va + elf->e_phoff); 474 } 475 476 static size_t roundup(size_t v) 477 { 478 return ROUNDUP(v, SMALL_PAGE_SIZE); 479 } 480 481 static size_t rounddown(size_t v) 482 { 483 return ROUNDDOWN(v, SMALL_PAGE_SIZE); 484 } 485 486 static void add_segment(struct ta_elf *elf, size_t offset, size_t vaddr, 487 size_t filesz, size_t memsz, size_t flags, size_t align) 488 { 489 struct segment *seg = calloc(1, sizeof(*seg)); 490 491 if (!seg) 492 err(TEE_ERROR_OUT_OF_MEMORY, "calloc"); 493 494 if (memsz < filesz) 495 err(TEE_ERROR_BAD_FORMAT, "Memsz smaller than filesz"); 496 497 seg->offset = offset; 498 seg->vaddr = vaddr; 499 seg->filesz = filesz; 500 seg->memsz = memsz; 501 seg->flags = flags; 502 seg->align = align; 503 504 TAILQ_INSERT_TAIL(&elf->segs, seg, link); 505 } 506 507 static void parse_load_segments(struct ta_elf *elf) 508 { 509 size_t n = 0; 510 511 if (elf->is_32bit) { 512 Elf32_Phdr *phdr = elf->phdr; 513 514 for (n = 0; n < elf->e_phnum; n++) 515 if (phdr[n].p_type == PT_LOAD) { 516 add_segment(elf, phdr[n].p_offset, 517 phdr[n].p_vaddr, phdr[n].p_filesz, 518 phdr[n].p_memsz, phdr[n].p_flags, 519 phdr[n].p_align); 520 } else if (phdr[n].p_type == PT_ARM_EXIDX) { 521 elf->exidx_start = phdr[n].p_vaddr; 522 elf->exidx_size = phdr[n].p_filesz; 523 } else if (phdr[n].p_type == PT_TLS) { 524 assign_tls_mod_id(elf); 525 } 526 } else { 527 Elf64_Phdr *phdr = elf->phdr; 528 529 for (n = 0; n < elf->e_phnum; n++) 530 if (phdr[n].p_type == PT_LOAD) { 531 add_segment(elf, phdr[n].p_offset, 532 phdr[n].p_vaddr, phdr[n].p_filesz, 533 phdr[n].p_memsz, phdr[n].p_flags, 534 phdr[n].p_align); 535 } else if (phdr[n].p_type == PT_TLS) { 536 elf->tls_start = phdr[n].p_vaddr; 537 elf->tls_filesz = phdr[n].p_filesz; 538 elf->tls_memsz = phdr[n].p_memsz; 539 } else if (IS_ENABLED(CFG_TA_BTI) && 540 phdr[n].p_type == PT_GNU_PROPERTY) { 541 elf->prop_start = phdr[n].p_vaddr; 542 elf->prop_align = phdr[n].p_align; 543 elf->prop_memsz = phdr[n].p_memsz; 544 } 545 } 546 } 547 548 static void copy_remapped_to(struct ta_elf *elf, const struct segment *seg) 549 { 550 uint8_t *dst = (void *)(seg->vaddr + elf->load_addr); 551 size_t n = 0; 552 size_t offs = seg->offset; 553 size_t num_bytes = seg->filesz; 554 555 if (offs < elf->max_offs) { 556 n = MIN(elf->max_offs - offs, num_bytes); 557 memcpy(dst, (void *)(elf->max_addr + offs - elf->max_offs), n); 558 dst += n; 559 offs += n; 560 num_bytes -= n; 561 } 562 563 if (num_bytes) { 564 TEE_Result res = sys_copy_from_ta_bin(dst, num_bytes, 565 elf->handle, offs); 566 567 if (res) 568 err(res, "sys_copy_from_ta_bin"); 569 elf->max_offs += offs; 570 } 571 } 572 573 static void adjust_segments(struct ta_elf *elf) 574 { 575 struct segment *seg = NULL; 576 struct segment *prev_seg = NULL; 577 size_t prev_end_addr = 0; 578 size_t align = 0; 579 size_t mask = 0; 580 581 /* Sanity check */ 582 TAILQ_FOREACH(seg, &elf->segs, link) { 583 size_t dummy __maybe_unused = 0; 584 585 assert(seg->align >= SMALL_PAGE_SIZE); 586 assert(!ADD_OVERFLOW(seg->vaddr, seg->memsz, &dummy)); 587 assert(seg->filesz <= seg->memsz); 588 assert((seg->offset & SMALL_PAGE_MASK) == 589 (seg->vaddr & SMALL_PAGE_MASK)); 590 591 prev_seg = TAILQ_PREV(seg, segment_head, link); 592 if (prev_seg) { 593 assert(seg->vaddr >= prev_seg->vaddr + prev_seg->memsz); 594 assert(seg->offset >= 595 prev_seg->offset + prev_seg->filesz); 596 } 597 if (!align) 598 align = seg->align; 599 assert(align == seg->align); 600 } 601 602 mask = align - 1; 603 604 seg = TAILQ_FIRST(&elf->segs); 605 if (seg) 606 seg = TAILQ_NEXT(seg, link); 607 while (seg) { 608 prev_seg = TAILQ_PREV(seg, segment_head, link); 609 prev_end_addr = prev_seg->vaddr + prev_seg->memsz; 610 611 /* 612 * This segment may overlap with the last "page" in the 613 * previous segment in two different ways: 614 * 1. Virtual address (and offset) overlaps => 615 * Permissions needs to be merged. The offset must have 616 * the SMALL_PAGE_MASK bits set as vaddr and offset must 617 * add up with prevsion segment. 618 * 619 * 2. Only offset overlaps => 620 * The same page in the ELF is mapped at two different 621 * virtual addresses. As a limitation this segment must 622 * be mapped as writeable. 623 */ 624 625 /* Case 1. */ 626 if (rounddown(seg->vaddr) < prev_end_addr) { 627 assert((seg->vaddr & mask) == (seg->offset & mask)); 628 assert(prev_seg->memsz == prev_seg->filesz); 629 630 /* 631 * Merge the segments and their permissions. 632 * Note that the may be a small hole between the 633 * two sections. 634 */ 635 prev_seg->filesz = seg->vaddr + seg->filesz - 636 prev_seg->vaddr; 637 prev_seg->memsz = seg->vaddr + seg->memsz - 638 prev_seg->vaddr; 639 prev_seg->flags |= seg->flags; 640 641 TAILQ_REMOVE(&elf->segs, seg, link); 642 free(seg); 643 seg = TAILQ_NEXT(prev_seg, link); 644 continue; 645 } 646 647 /* Case 2. */ 648 if ((seg->offset & mask) && 649 rounddown(seg->offset) < 650 (prev_seg->offset + prev_seg->filesz)) { 651 652 assert(seg->flags & PF_W); 653 seg->remapped_writeable = true; 654 } 655 656 /* 657 * No overlap, but we may need to align address, offset and 658 * size. 659 */ 660 seg->filesz += seg->vaddr - rounddown(seg->vaddr); 661 seg->memsz += seg->vaddr - rounddown(seg->vaddr); 662 seg->vaddr = rounddown(seg->vaddr); 663 seg->offset = rounddown(seg->offset); 664 seg = TAILQ_NEXT(seg, link); 665 } 666 667 } 668 669 static void populate_segments_legacy(struct ta_elf *elf) 670 { 671 TEE_Result res = TEE_SUCCESS; 672 struct segment *seg = NULL; 673 vaddr_t va = 0; 674 675 assert(elf->is_legacy); 676 TAILQ_FOREACH(seg, &elf->segs, link) { 677 struct segment *last_seg = TAILQ_LAST(&elf->segs, segment_head); 678 size_t pad_end = roundup(last_seg->vaddr + last_seg->memsz - 679 seg->vaddr - seg->memsz); 680 size_t num_bytes = roundup(seg->memsz); 681 682 if (!elf->load_addr) 683 va = 0; 684 else 685 va = seg->vaddr + elf->load_addr; 686 687 688 if (!(seg->flags & PF_R)) 689 err(TEE_ERROR_NOT_SUPPORTED, 690 "Segment must be readable"); 691 692 res = sys_map_zi(num_bytes, 0, &va, 0, pad_end); 693 if (res) 694 err(res, "sys_map_zi"); 695 res = sys_copy_from_ta_bin((void *)va, seg->filesz, 696 elf->handle, seg->offset); 697 if (res) 698 err(res, "sys_copy_from_ta_bin"); 699 700 if (!elf->load_addr) 701 elf->load_addr = va; 702 elf->max_addr = va + num_bytes; 703 elf->max_offs = seg->offset + seg->filesz; 704 } 705 } 706 707 static size_t get_pad_begin(void) 708 { 709 #ifdef CFG_TA_ASLR 710 size_t min = CFG_TA_ASLR_MIN_OFFSET_PAGES; 711 size_t max = CFG_TA_ASLR_MAX_OFFSET_PAGES; 712 TEE_Result res = TEE_SUCCESS; 713 uint32_t rnd32 = 0; 714 size_t rnd = 0; 715 716 COMPILE_TIME_ASSERT(CFG_TA_ASLR_MIN_OFFSET_PAGES < 717 CFG_TA_ASLR_MAX_OFFSET_PAGES); 718 if (max > min) { 719 res = sys_gen_random_num(&rnd32, sizeof(rnd32)); 720 if (res) { 721 DMSG("Random read failed: %#"PRIx32, res); 722 return min * SMALL_PAGE_SIZE; 723 } 724 rnd = rnd32 % (max - min); 725 } 726 727 return (min + rnd) * SMALL_PAGE_SIZE; 728 #else /*!CFG_TA_ASLR*/ 729 return 0; 730 #endif /*!CFG_TA_ASLR*/ 731 } 732 733 static void populate_segments(struct ta_elf *elf) 734 { 735 TEE_Result res = TEE_SUCCESS; 736 struct segment *seg = NULL; 737 vaddr_t va = 0; 738 size_t pad_begin = 0; 739 740 assert(!elf->is_legacy); 741 TAILQ_FOREACH(seg, &elf->segs, link) { 742 struct segment *last_seg = TAILQ_LAST(&elf->segs, segment_head); 743 size_t pad_end = roundup(last_seg->vaddr + last_seg->memsz - 744 seg->vaddr - seg->memsz); 745 746 if (seg->remapped_writeable) { 747 size_t num_bytes = roundup(seg->vaddr + seg->memsz) - 748 rounddown(seg->vaddr); 749 750 assert(elf->load_addr); 751 va = rounddown(elf->load_addr + seg->vaddr); 752 assert(va >= elf->max_addr); 753 res = sys_map_zi(num_bytes, 0, &va, 0, pad_end); 754 if (res) 755 err(res, "sys_map_zi"); 756 757 copy_remapped_to(elf, seg); 758 elf->max_addr = va + num_bytes; 759 } else { 760 uint32_t flags = 0; 761 size_t filesz = seg->filesz; 762 size_t memsz = seg->memsz; 763 size_t offset = seg->offset; 764 size_t vaddr = seg->vaddr; 765 766 if (offset < elf->max_offs) { 767 /* 768 * We're in a load segment which overlaps 769 * with (or is covered by) the first page 770 * of a shared library. 771 */ 772 if (vaddr + filesz < SMALL_PAGE_SIZE) { 773 size_t num_bytes = 0; 774 775 /* 776 * If this segment is completely 777 * covered, take next. 778 */ 779 if (vaddr + memsz <= SMALL_PAGE_SIZE) 780 continue; 781 782 /* 783 * All data of the segment is 784 * loaded, but we need to zero 785 * extend it. 786 */ 787 va = elf->max_addr; 788 num_bytes = roundup(vaddr + memsz) - 789 roundup(vaddr) - 790 SMALL_PAGE_SIZE; 791 assert(num_bytes); 792 res = sys_map_zi(num_bytes, 0, &va, 0, 793 0); 794 if (res) 795 err(res, "sys_map_zi"); 796 elf->max_addr = roundup(va + num_bytes); 797 continue; 798 } 799 800 /* Partial overlap, remove the first page. */ 801 vaddr += SMALL_PAGE_SIZE; 802 filesz -= SMALL_PAGE_SIZE; 803 memsz -= SMALL_PAGE_SIZE; 804 offset += SMALL_PAGE_SIZE; 805 } 806 807 if (!elf->load_addr) { 808 va = 0; 809 pad_begin = get_pad_begin(); 810 /* 811 * If mapping with pad_begin fails we'll 812 * retry without pad_begin, effectively 813 * disabling ASLR for the current ELF file. 814 */ 815 } else { 816 va = vaddr + elf->load_addr; 817 pad_begin = 0; 818 } 819 820 if (seg->flags & PF_W) 821 flags |= LDELF_MAP_FLAG_WRITEABLE; 822 else 823 flags |= LDELF_MAP_FLAG_SHAREABLE; 824 if (seg->flags & PF_X) 825 flags |= LDELF_MAP_FLAG_EXECUTABLE; 826 if (!(seg->flags & PF_R)) 827 err(TEE_ERROR_NOT_SUPPORTED, 828 "Segment must be readable"); 829 if (flags & LDELF_MAP_FLAG_WRITEABLE) { 830 res = sys_map_zi(memsz, 0, &va, pad_begin, 831 pad_end); 832 if (pad_begin && res == TEE_ERROR_OUT_OF_MEMORY) 833 res = sys_map_zi(memsz, 0, &va, 0, 834 pad_end); 835 if (res) 836 err(res, "sys_map_zi"); 837 res = sys_copy_from_ta_bin((void *)va, filesz, 838 elf->handle, offset); 839 if (res) 840 err(res, "sys_copy_from_ta_bin"); 841 } else { 842 if (filesz != memsz) 843 err(TEE_ERROR_BAD_FORMAT, 844 "Filesz and memsz mismatch"); 845 res = sys_map_ta_bin(&va, filesz, flags, 846 elf->handle, offset, 847 pad_begin, pad_end); 848 if (pad_begin && res == TEE_ERROR_OUT_OF_MEMORY) 849 res = sys_map_ta_bin(&va, filesz, flags, 850 elf->handle, 851 offset, 0, 852 pad_end); 853 if (res) 854 err(res, "sys_map_ta_bin"); 855 } 856 857 if (!elf->load_addr) 858 elf->load_addr = va; 859 elf->max_addr = roundup(va + memsz); 860 elf->max_offs += filesz; 861 } 862 } 863 } 864 865 static void parse_property_segment(struct ta_elf *elf) 866 { 867 char *desc = NULL; 868 size_t align = elf->prop_align; 869 size_t desc_offset = 0; 870 size_t prop_offset = 0; 871 vaddr_t va = 0; 872 Elf_Note *note = NULL; 873 char *name = NULL; 874 875 if (!IS_ENABLED(CFG_TA_BTI) || !elf->prop_start) 876 return; 877 878 check_phdr_in_range(elf, PT_GNU_PROPERTY, elf->prop_start, 879 elf->prop_memsz); 880 881 va = elf->load_addr + elf->prop_start; 882 note = (void *)va; 883 name = (char *)(note + 1); 884 885 if (elf->prop_memsz < sizeof(*note) + sizeof(ELF_NOTE_GNU)) 886 return; 887 888 if (note->n_type != NT_GNU_PROPERTY_TYPE_0 || 889 note->n_namesz != sizeof(ELF_NOTE_GNU) || 890 memcmp(name, ELF_NOTE_GNU, sizeof(ELF_NOTE_GNU)) || 891 !IS_POWER_OF_TWO(align)) 892 return; 893 894 desc_offset = ROUNDUP(sizeof(*note) + sizeof(ELF_NOTE_GNU), align); 895 896 if (desc_offset > elf->prop_memsz || 897 ROUNDUP(desc_offset + note->n_descsz, align) > elf->prop_memsz) 898 return; 899 900 desc = (char *)(va + desc_offset); 901 902 do { 903 Elf_Prop *prop = (void *)(desc + prop_offset); 904 size_t data_offset = prop_offset + sizeof(*prop); 905 906 if (note->n_descsz < data_offset) 907 return; 908 909 data_offset = confine_array_index(data_offset, note->n_descsz); 910 911 if (prop->pr_type == GNU_PROPERTY_AARCH64_FEATURE_1_AND) { 912 uint32_t *pr_data = (void *)(desc + data_offset); 913 914 if (note->n_descsz < (data_offset + sizeof(*pr_data)) && 915 prop->pr_datasz != sizeof(*pr_data)) 916 return; 917 918 if (*pr_data & GNU_PROPERTY_AARCH64_FEATURE_1_BTI) { 919 DMSG("BTI Feature present in note property"); 920 elf->bti_enabled = true; 921 } 922 } 923 924 prop_offset += ROUNDUP(sizeof(*prop) + prop->pr_datasz, align); 925 } while (prop_offset < note->n_descsz); 926 } 927 928 static void map_segments(struct ta_elf *elf) 929 { 930 TEE_Result res = TEE_SUCCESS; 931 932 parse_load_segments(elf); 933 adjust_segments(elf); 934 if (TAILQ_FIRST(&elf->segs)->offset < SMALL_PAGE_SIZE) { 935 vaddr_t va = 0; 936 size_t sz = elf->max_addr - elf->load_addr; 937 struct segment *seg = TAILQ_LAST(&elf->segs, segment_head); 938 size_t pad_begin = get_pad_begin(); 939 940 /* 941 * We're loading a library, if not other parts of the code 942 * need to be updated too. 943 */ 944 assert(!elf->is_main); 945 946 /* 947 * Now that we know how much virtual memory is needed move 948 * the already mapped part to a location which can 949 * accommodate us. 950 */ 951 res = sys_remap(elf->load_addr, &va, sz, pad_begin, 952 roundup(seg->vaddr + seg->memsz)); 953 if (res == TEE_ERROR_OUT_OF_MEMORY) 954 res = sys_remap(elf->load_addr, &va, sz, 0, 955 roundup(seg->vaddr + seg->memsz)); 956 if (res) 957 err(res, "sys_remap"); 958 elf->ehdr_addr = va; 959 elf->load_addr = va; 960 elf->max_addr = va + sz; 961 elf->phdr = (void *)(va + elf->e_phoff); 962 } 963 } 964 965 static void add_deps_from_segment(struct ta_elf *elf, unsigned int type, 966 vaddr_t addr, size_t memsz) 967 { 968 size_t dyn_entsize = 0; 969 size_t num_dyns = 0; 970 size_t n = 0; 971 unsigned int tag = 0; 972 size_t val = 0; 973 TEE_UUID uuid = { }; 974 char *str_tab = NULL; 975 size_t str_tab_sz = 0; 976 977 if (type != PT_DYNAMIC) 978 return; 979 980 check_phdr_in_range(elf, type, addr, memsz); 981 982 if (elf->is_32bit) 983 dyn_entsize = sizeof(Elf32_Dyn); 984 else 985 dyn_entsize = sizeof(Elf64_Dyn); 986 987 assert(!(memsz % dyn_entsize)); 988 num_dyns = memsz / dyn_entsize; 989 990 for (n = 0; n < num_dyns && !(str_tab && str_tab_sz); n++) { 991 read_dyn(elf, addr, n, &tag, &val); 992 if (tag == DT_STRTAB) 993 str_tab = (char *)(val + elf->load_addr); 994 else if (tag == DT_STRSZ) 995 str_tab_sz = val; 996 } 997 check_range(elf, ".dynstr/STRTAB", str_tab, str_tab_sz); 998 999 for (n = 0; n < num_dyns; n++) { 1000 read_dyn(elf, addr, n, &tag, &val); 1001 if (tag != DT_NEEDED) 1002 continue; 1003 if (val >= str_tab_sz) 1004 err(TEE_ERROR_BAD_FORMAT, 1005 "Offset into .dynstr/STRTAB out of range"); 1006 tee_uuid_from_str(&uuid, str_tab + val); 1007 queue_elf(&uuid); 1008 } 1009 } 1010 1011 static void add_dependencies(struct ta_elf *elf) 1012 { 1013 size_t n = 0; 1014 1015 if (elf->is_32bit) { 1016 Elf32_Phdr *phdr = elf->phdr; 1017 1018 for (n = 0; n < elf->e_phnum; n++) 1019 add_deps_from_segment(elf, phdr[n].p_type, 1020 phdr[n].p_vaddr, phdr[n].p_memsz); 1021 } else { 1022 Elf64_Phdr *phdr = elf->phdr; 1023 1024 for (n = 0; n < elf->e_phnum; n++) 1025 add_deps_from_segment(elf, phdr[n].p_type, 1026 phdr[n].p_vaddr, phdr[n].p_memsz); 1027 } 1028 } 1029 1030 static void copy_section_headers(struct ta_elf *elf) 1031 { 1032 TEE_Result res = TEE_SUCCESS; 1033 size_t sz = 0; 1034 size_t offs = 0; 1035 1036 if (MUL_OVERFLOW(elf->e_shnum, elf->e_shentsize, &sz)) 1037 err(TEE_ERROR_BAD_FORMAT, "Section headers size overflow"); 1038 1039 elf->shdr = malloc(sz); 1040 if (!elf->shdr) 1041 err(TEE_ERROR_OUT_OF_MEMORY, "malloc"); 1042 1043 /* 1044 * We're assuming that section headers comes after the load segments, 1045 * but if it's a very small dynamically linked library the section 1046 * headers can still end up (partially?) in the first mapped page. 1047 */ 1048 if (elf->e_shoff < SMALL_PAGE_SIZE) { 1049 assert(!elf->is_main); 1050 offs = MIN(SMALL_PAGE_SIZE - elf->e_shoff, sz); 1051 memcpy(elf->shdr, (void *)(elf->load_addr + elf->e_shoff), 1052 offs); 1053 } 1054 1055 if (offs < sz) { 1056 res = sys_copy_from_ta_bin((uint8_t *)elf->shdr + offs, 1057 sz - offs, elf->handle, 1058 elf->e_shoff + offs); 1059 if (res) 1060 err(res, "sys_copy_from_ta_bin"); 1061 } 1062 } 1063 1064 static void close_handle(struct ta_elf *elf) 1065 { 1066 TEE_Result res = sys_close_ta_bin(elf->handle); 1067 1068 if (res) 1069 err(res, "sys_close_ta_bin"); 1070 elf->handle = -1; 1071 } 1072 1073 static void clean_elf_load_main(struct ta_elf *elf) 1074 { 1075 TEE_Result res = TEE_SUCCESS; 1076 1077 /* 1078 * Clean up from last attempt to load 1079 */ 1080 res = sys_unmap(elf->ehdr_addr, SMALL_PAGE_SIZE); 1081 if (res) 1082 err(res, "sys_unmap"); 1083 1084 while (!TAILQ_EMPTY(&elf->segs)) { 1085 struct segment *seg = TAILQ_FIRST(&elf->segs); 1086 vaddr_t va = 0; 1087 size_t num_bytes = 0; 1088 1089 va = rounddown(elf->load_addr + seg->vaddr); 1090 if (seg->remapped_writeable) 1091 num_bytes = roundup(seg->vaddr + seg->memsz) - 1092 rounddown(seg->vaddr); 1093 else 1094 num_bytes = seg->memsz; 1095 1096 res = sys_unmap(va, num_bytes); 1097 if (res) 1098 err(res, "sys_unmap"); 1099 1100 TAILQ_REMOVE(&elf->segs, seg, link); 1101 free(seg); 1102 } 1103 1104 free(elf->shdr); 1105 memset(&elf->is_32bit, 0, 1106 (vaddr_t)&elf->uuid - (vaddr_t)&elf->is_32bit); 1107 1108 TAILQ_INIT(&elf->segs); 1109 } 1110 1111 #ifdef ARM64 1112 /* 1113 * Allocates an offset in the TA's Thread Control Block for the TLS segment of 1114 * the @elf module. 1115 */ 1116 #define TCB_HEAD_SIZE (2 * sizeof(long)) 1117 static void set_tls_offset(struct ta_elf *elf) 1118 { 1119 static size_t next_offs = TCB_HEAD_SIZE; 1120 1121 if (!elf->tls_start) 1122 return; 1123 1124 /* Module has a TLS segment */ 1125 elf->tls_tcb_offs = next_offs; 1126 next_offs += elf->tls_memsz; 1127 } 1128 #else 1129 static void set_tls_offset(struct ta_elf *elf __unused) {} 1130 #endif 1131 1132 static void load_main(struct ta_elf *elf) 1133 { 1134 init_elf(elf); 1135 map_segments(elf); 1136 populate_segments(elf); 1137 add_dependencies(elf); 1138 copy_section_headers(elf); 1139 save_symtab(elf); 1140 close_handle(elf); 1141 set_tls_offset(elf); 1142 1143 elf->head = (struct ta_head *)elf->load_addr; 1144 if (elf->head->depr_entry != UINT64_MAX) { 1145 /* 1146 * Legacy TAs sets their entry point in ta_head. For 1147 * non-legacy TAs the entry point of the ELF is set instead 1148 * and leaving the ta_head entry point set to UINT64_MAX to 1149 * indicate that it's not used. 1150 * 1151 * NB, everything before the commit a73b5878c89d ("Replace 1152 * ta_head.entry with elf entry") is considered legacy TAs 1153 * for ldelf. 1154 * 1155 * Legacy TAs cannot be mapped with shared memory segments 1156 * so restart the mapping if it turned out we're loading a 1157 * legacy TA. 1158 */ 1159 1160 DMSG("Reloading TA %pUl as legacy TA", (void *)&elf->uuid); 1161 clean_elf_load_main(elf); 1162 elf->is_legacy = true; 1163 init_elf(elf); 1164 map_segments(elf); 1165 populate_segments_legacy(elf); 1166 add_dependencies(elf); 1167 copy_section_headers(elf); 1168 save_symtab(elf); 1169 close_handle(elf); 1170 elf->head = (struct ta_head *)elf->load_addr; 1171 /* 1172 * Check that the TA is still a legacy TA, if it isn't give 1173 * up now since we're likely under attack. 1174 */ 1175 if (elf->head->depr_entry == UINT64_MAX) 1176 err(TEE_ERROR_GENERIC, 1177 "TA %pUl was changed on disk to non-legacy", 1178 (void *)&elf->uuid); 1179 } 1180 1181 } 1182 1183 void ta_elf_load_main(const TEE_UUID *uuid, uint32_t *is_32bit, uint64_t *sp, 1184 uint32_t *ta_flags) 1185 { 1186 struct ta_elf *elf = queue_elf(uuid); 1187 vaddr_t va = 0; 1188 TEE_Result res = TEE_SUCCESS; 1189 1190 assert(elf); 1191 elf->is_main = true; 1192 1193 load_main(elf); 1194 1195 *is_32bit = elf->is_32bit; 1196 res = sys_map_zi(elf->head->stack_size, 0, &va, 0, 0); 1197 if (res) 1198 err(res, "sys_map_zi stack"); 1199 1200 if (elf->head->flags & ~TA_FLAGS_MASK) 1201 err(TEE_ERROR_BAD_FORMAT, "Invalid TA flags(s) %#"PRIx32, 1202 elf->head->flags & ~TA_FLAGS_MASK); 1203 1204 *ta_flags = elf->head->flags; 1205 *sp = va + elf->head->stack_size; 1206 ta_stack = va; 1207 ta_stack_size = elf->head->stack_size; 1208 } 1209 1210 void ta_elf_finalize_load_main(uint64_t *entry) 1211 { 1212 struct ta_elf *elf = TAILQ_FIRST(&main_elf_queue); 1213 TEE_Result res = TEE_SUCCESS; 1214 1215 assert(elf->is_main); 1216 1217 res = ta_elf_set_init_fini_info_compat(elf->is_32bit); 1218 if (res) 1219 err(res, "ta_elf_set_init_fini_info_compat"); 1220 res = ta_elf_set_elf_phdr_info(elf->is_32bit); 1221 if (res) 1222 err(res, "ta_elf_set_elf_phdr_info"); 1223 1224 if (elf->is_legacy) 1225 *entry = elf->head->depr_entry; 1226 else 1227 *entry = elf->e_entry + elf->load_addr; 1228 } 1229 1230 1231 void ta_elf_load_dependency(struct ta_elf *elf, bool is_32bit) 1232 { 1233 if (elf->is_main) 1234 return; 1235 1236 init_elf(elf); 1237 if (elf->is_32bit != is_32bit) 1238 err(TEE_ERROR_BAD_FORMAT, "ELF %pUl is %sbit (expected %sbit)", 1239 (void *)&elf->uuid, elf->is_32bit ? "32" : "64", 1240 is_32bit ? "32" : "64"); 1241 1242 map_segments(elf); 1243 populate_segments(elf); 1244 add_dependencies(elf); 1245 copy_section_headers(elf); 1246 save_symtab(elf); 1247 close_handle(elf); 1248 set_tls_offset(elf); 1249 } 1250 1251 void ta_elf_finalize_mappings(struct ta_elf *elf) 1252 { 1253 TEE_Result res = TEE_SUCCESS; 1254 struct segment *seg = NULL; 1255 1256 if (!elf->is_legacy) 1257 return; 1258 1259 TAILQ_FOREACH(seg, &elf->segs, link) { 1260 vaddr_t va = elf->load_addr + seg->vaddr; 1261 uint32_t flags = 0; 1262 1263 if (seg->flags & PF_W) 1264 flags |= LDELF_MAP_FLAG_WRITEABLE; 1265 if (seg->flags & PF_X) 1266 flags |= LDELF_MAP_FLAG_EXECUTABLE; 1267 1268 res = sys_set_prot(va, seg->memsz, flags); 1269 if (res) 1270 err(res, "sys_set_prot"); 1271 } 1272 } 1273 1274 static void __printf(3, 4) print_wrapper(void *pctx, print_func_t print_func, 1275 const char *fmt, ...) 1276 { 1277 va_list ap; 1278 1279 va_start(ap, fmt); 1280 print_func(pctx, fmt, ap); 1281 va_end(ap); 1282 } 1283 1284 static void print_seg(void *pctx, print_func_t print_func, 1285 size_t idx __maybe_unused, int elf_idx __maybe_unused, 1286 vaddr_t va __maybe_unused, paddr_t pa __maybe_unused, 1287 size_t sz __maybe_unused, uint32_t flags) 1288 { 1289 int rc __maybe_unused = 0; 1290 int width __maybe_unused = 8; 1291 char desc[14] __maybe_unused = ""; 1292 char flags_str[] __maybe_unused = "----"; 1293 1294 if (elf_idx > -1) { 1295 rc = snprintf(desc, sizeof(desc), " [%d]", elf_idx); 1296 assert(rc >= 0); 1297 } else { 1298 if (flags & DUMP_MAP_EPHEM) { 1299 rc = snprintf(desc, sizeof(desc), " (param)"); 1300 assert(rc >= 0); 1301 } 1302 if (flags & DUMP_MAP_LDELF) { 1303 rc = snprintf(desc, sizeof(desc), " (ldelf)"); 1304 assert(rc >= 0); 1305 } 1306 if (va == ta_stack) { 1307 rc = snprintf(desc, sizeof(desc), " (stack)"); 1308 assert(rc >= 0); 1309 } 1310 } 1311 1312 if (flags & DUMP_MAP_READ) 1313 flags_str[0] = 'r'; 1314 if (flags & DUMP_MAP_WRITE) 1315 flags_str[1] = 'w'; 1316 if (flags & DUMP_MAP_EXEC) 1317 flags_str[2] = 'x'; 1318 if (flags & DUMP_MAP_SECURE) 1319 flags_str[3] = 's'; 1320 1321 print_wrapper(pctx, print_func, 1322 "region %2zu: va 0x%0*"PRIxVA" pa 0x%0*"PRIxPA" size 0x%06zx flags %s%s\n", 1323 idx, width, va, width, pa, sz, flags_str, desc); 1324 } 1325 1326 static bool get_next_in_order(struct ta_elf_queue *elf_queue, 1327 struct ta_elf **elf, struct segment **seg, 1328 size_t *elf_idx) 1329 { 1330 struct ta_elf *e = NULL; 1331 struct segment *s = NULL; 1332 size_t idx = 0; 1333 vaddr_t va = 0; 1334 struct ta_elf *e2 = NULL; 1335 size_t i2 = 0; 1336 1337 assert(elf && seg && elf_idx); 1338 e = *elf; 1339 s = *seg; 1340 assert((e == NULL && s == NULL) || (e != NULL && s != NULL)); 1341 1342 if (s) { 1343 s = TAILQ_NEXT(s, link); 1344 if (s) { 1345 *seg = s; 1346 return true; 1347 } 1348 } 1349 1350 if (e) 1351 va = e->load_addr; 1352 1353 /* Find the ELF with next load address */ 1354 e = NULL; 1355 TAILQ_FOREACH(e2, elf_queue, link) { 1356 if (e2->load_addr > va) { 1357 if (!e || e2->load_addr < e->load_addr) { 1358 e = e2; 1359 idx = i2; 1360 } 1361 } 1362 i2++; 1363 } 1364 if (!e) 1365 return false; 1366 1367 *elf = e; 1368 *seg = TAILQ_FIRST(&e->segs); 1369 *elf_idx = idx; 1370 return true; 1371 } 1372 1373 void ta_elf_print_mappings(void *pctx, print_func_t print_func, 1374 struct ta_elf_queue *elf_queue, size_t num_maps, 1375 struct dump_map *maps, vaddr_t mpool_base) 1376 { 1377 struct segment *seg = NULL; 1378 struct ta_elf *elf = NULL; 1379 size_t elf_idx = 0; 1380 size_t idx = 0; 1381 size_t map_idx = 0; 1382 1383 /* 1384 * Loop over all segments and maps, printing virtual address in 1385 * order. Segment has priority if the virtual address is present 1386 * in both map and segment. 1387 */ 1388 get_next_in_order(elf_queue, &elf, &seg, &elf_idx); 1389 while (true) { 1390 vaddr_t va = -1; 1391 size_t sz = 0; 1392 uint32_t flags = DUMP_MAP_SECURE; 1393 size_t offs = 0; 1394 1395 if (seg) { 1396 va = rounddown(seg->vaddr + elf->load_addr); 1397 sz = roundup(seg->vaddr + seg->memsz) - 1398 rounddown(seg->vaddr); 1399 } 1400 1401 while (map_idx < num_maps && maps[map_idx].va <= va) { 1402 uint32_t f = 0; 1403 1404 /* If there's a match, it should be the same map */ 1405 if (maps[map_idx].va == va) { 1406 /* 1407 * In shared libraries the first page is 1408 * mapped separately with the rest of that 1409 * segment following back to back in a 1410 * separate entry. 1411 */ 1412 if (map_idx + 1 < num_maps && 1413 maps[map_idx].sz == SMALL_PAGE_SIZE) { 1414 vaddr_t next_va = maps[map_idx].va + 1415 maps[map_idx].sz; 1416 size_t comb_sz = maps[map_idx].sz + 1417 maps[map_idx + 1].sz; 1418 1419 if (next_va == maps[map_idx + 1].va && 1420 comb_sz == sz && 1421 maps[map_idx].flags == 1422 maps[map_idx + 1].flags) { 1423 /* Skip this and next entry */ 1424 map_idx += 2; 1425 continue; 1426 } 1427 } 1428 assert(maps[map_idx].sz == sz); 1429 } else if (maps[map_idx].va < va) { 1430 if (maps[map_idx].va == mpool_base) 1431 f |= DUMP_MAP_LDELF; 1432 print_seg(pctx, print_func, idx, -1, 1433 maps[map_idx].va, maps[map_idx].pa, 1434 maps[map_idx].sz, 1435 maps[map_idx].flags | f); 1436 idx++; 1437 } 1438 map_idx++; 1439 } 1440 1441 if (!seg) 1442 break; 1443 1444 offs = rounddown(seg->offset); 1445 if (seg->flags & PF_R) 1446 flags |= DUMP_MAP_READ; 1447 if (seg->flags & PF_W) 1448 flags |= DUMP_MAP_WRITE; 1449 if (seg->flags & PF_X) 1450 flags |= DUMP_MAP_EXEC; 1451 1452 print_seg(pctx, print_func, idx, elf_idx, va, offs, sz, flags); 1453 idx++; 1454 1455 if (!get_next_in_order(elf_queue, &elf, &seg, &elf_idx)) 1456 seg = NULL; 1457 } 1458 1459 elf_idx = 0; 1460 TAILQ_FOREACH(elf, elf_queue, link) { 1461 print_wrapper(pctx, print_func, 1462 " [%zu] %pUl @ 0x%0*"PRIxVA"\n", 1463 elf_idx, (void *)&elf->uuid, 8, elf->load_addr); 1464 elf_idx++; 1465 } 1466 } 1467 1468 #ifdef CFG_UNWIND 1469 /* Called by libunw */ 1470 bool find_exidx(vaddr_t addr, vaddr_t *idx_start, vaddr_t *idx_end) 1471 { 1472 struct segment *seg = NULL; 1473 struct ta_elf *elf = NULL; 1474 vaddr_t a = 0; 1475 1476 TAILQ_FOREACH(elf, &main_elf_queue, link) { 1477 if (addr < elf->load_addr) 1478 continue; 1479 a = addr - elf->load_addr; 1480 TAILQ_FOREACH(seg, &elf->segs, link) { 1481 if (a < seg->vaddr) 1482 continue; 1483 if (a - seg->vaddr < seg->filesz) { 1484 *idx_start = elf->exidx_start + elf->load_addr; 1485 *idx_end = elf->exidx_start + elf->load_addr + 1486 elf->exidx_size; 1487 return true; 1488 } 1489 } 1490 } 1491 1492 return false; 1493 } 1494 1495 void ta_elf_stack_trace_a32(uint32_t regs[16]) 1496 { 1497 struct unwind_state_arm32 state = { }; 1498 1499 memcpy(state.registers, regs, sizeof(state.registers)); 1500 print_stack_arm32(&state, ta_stack, ta_stack_size); 1501 } 1502 1503 void ta_elf_stack_trace_a64(uint64_t fp, uint64_t sp, uint64_t pc) 1504 { 1505 struct unwind_state_arm64 state = { .fp = fp, .sp = sp, .pc = pc }; 1506 1507 print_stack_arm64(&state, ta_stack, ta_stack_size); 1508 } 1509 #endif 1510 1511 TEE_Result ta_elf_add_library(const TEE_UUID *uuid) 1512 { 1513 TEE_Result res = TEE_ERROR_GENERIC; 1514 struct ta_elf *ta = TAILQ_FIRST(&main_elf_queue); 1515 struct ta_elf *lib = ta_elf_find_elf(uuid); 1516 struct ta_elf *elf = NULL; 1517 1518 if (lib) 1519 return TEE_SUCCESS; /* Already mapped */ 1520 1521 lib = queue_elf_helper(uuid); 1522 if (!lib) 1523 return TEE_ERROR_OUT_OF_MEMORY; 1524 1525 for (elf = lib; elf; elf = TAILQ_NEXT(elf, link)) 1526 ta_elf_load_dependency(elf, ta->is_32bit); 1527 1528 for (elf = lib; elf; elf = TAILQ_NEXT(elf, link)) { 1529 ta_elf_relocate(elf); 1530 ta_elf_finalize_mappings(elf); 1531 } 1532 1533 for (elf = lib; elf; elf = TAILQ_NEXT(elf, link)) 1534 DMSG("ELF (%pUl) at %#"PRIxVA, 1535 (void *)&elf->uuid, elf->load_addr); 1536 1537 res = ta_elf_set_init_fini_info_compat(ta->is_32bit); 1538 if (res) 1539 return res; 1540 1541 return ta_elf_set_elf_phdr_info(ta->is_32bit); 1542 } 1543 1544 /* Get address/size of .init_array and .fini_array from the dynamic segment */ 1545 static void get_init_fini_array(struct ta_elf *elf, unsigned int type, 1546 vaddr_t addr, size_t memsz, vaddr_t *init, 1547 size_t *init_cnt, vaddr_t *fini, 1548 size_t *fini_cnt) 1549 { 1550 size_t addrsz = 0; 1551 size_t dyn_entsize = 0; 1552 size_t num_dyns = 0; 1553 size_t n = 0; 1554 unsigned int tag = 0; 1555 size_t val = 0; 1556 1557 assert(type == PT_DYNAMIC); 1558 1559 check_phdr_in_range(elf, type, addr, memsz); 1560 1561 if (elf->is_32bit) { 1562 dyn_entsize = sizeof(Elf32_Dyn); 1563 addrsz = 4; 1564 } else { 1565 dyn_entsize = sizeof(Elf64_Dyn); 1566 addrsz = 8; 1567 } 1568 1569 assert(!(memsz % dyn_entsize)); 1570 num_dyns = memsz / dyn_entsize; 1571 1572 for (n = 0; n < num_dyns; n++) { 1573 read_dyn(elf, addr, n, &tag, &val); 1574 if (tag == DT_INIT_ARRAY) 1575 *init = val + elf->load_addr; 1576 else if (tag == DT_FINI_ARRAY) 1577 *fini = val + elf->load_addr; 1578 else if (tag == DT_INIT_ARRAYSZ) 1579 *init_cnt = val / addrsz; 1580 else if (tag == DT_FINI_ARRAYSZ) 1581 *fini_cnt = val / addrsz; 1582 } 1583 } 1584 1585 /* Get address/size of .init_array and .fini_array in @elf (if present) */ 1586 static void elf_get_init_fini_array(struct ta_elf *elf, vaddr_t *init, 1587 size_t *init_cnt, vaddr_t *fini, 1588 size_t *fini_cnt) 1589 { 1590 size_t n = 0; 1591 1592 if (elf->is_32bit) { 1593 Elf32_Phdr *phdr = elf->phdr; 1594 1595 for (n = 0; n < elf->e_phnum; n++) { 1596 if (phdr[n].p_type == PT_DYNAMIC) { 1597 get_init_fini_array(elf, phdr[n].p_type, 1598 phdr[n].p_vaddr, 1599 phdr[n].p_memsz, 1600 init, init_cnt, fini, 1601 fini_cnt); 1602 return; 1603 } 1604 } 1605 } else { 1606 Elf64_Phdr *phdr = elf->phdr; 1607 1608 for (n = 0; n < elf->e_phnum; n++) { 1609 if (phdr[n].p_type == PT_DYNAMIC) { 1610 get_init_fini_array(elf, phdr[n].p_type, 1611 phdr[n].p_vaddr, 1612 phdr[n].p_memsz, 1613 init, init_cnt, fini, 1614 fini_cnt); 1615 return; 1616 } 1617 } 1618 } 1619 } 1620 1621 /* 1622 * Deprecated by __elf_phdr_info below. Kept for compatibility. 1623 * 1624 * Pointers to ELF initialization and finalization functions are extracted by 1625 * ldelf and stored on the TA heap, then exported to the TA via the global 1626 * symbol __init_fini_info. libutee in OP-TEE 3.9.0 uses this mechanism. 1627 */ 1628 1629 struct __init_fini { 1630 uint32_t flags; 1631 uint16_t init_size; 1632 uint16_t fini_size; 1633 1634 void (**init)(void); /* @init_size entries */ 1635 void (**fini)(void); /* @fini_size entries */ 1636 }; 1637 1638 #define __IFS_VALID BIT(0) 1639 #define __IFS_INIT_HAS_RUN BIT(1) 1640 #define __IFS_FINI_HAS_RUN BIT(2) 1641 1642 struct __init_fini_info { 1643 uint32_t reserved; 1644 uint16_t size; 1645 uint16_t pad; 1646 struct __init_fini *ifs; /* @size entries */ 1647 }; 1648 1649 /* 32-bit variants for a 64-bit ldelf to access a 32-bit TA */ 1650 1651 struct __init_fini32 { 1652 uint32_t flags; 1653 uint16_t init_size; 1654 uint16_t fini_size; 1655 uint32_t init; 1656 uint32_t fini; 1657 }; 1658 1659 struct __init_fini_info32 { 1660 uint32_t reserved; 1661 uint16_t size; 1662 uint16_t pad; 1663 uint32_t ifs; 1664 }; 1665 1666 static TEE_Result realloc_ifs(vaddr_t va, size_t cnt, bool is_32bit) 1667 { 1668 struct __init_fini_info32 *info32 = (struct __init_fini_info32 *)va; 1669 struct __init_fini_info *info = (struct __init_fini_info *)va; 1670 struct __init_fini32 *ifs32 = NULL; 1671 struct __init_fini *ifs = NULL; 1672 size_t prev_cnt = 0; 1673 void *ptr = NULL; 1674 1675 if (is_32bit) { 1676 ptr = (void *)(vaddr_t)info32->ifs; 1677 ptr = realloc(ptr, cnt * sizeof(struct __init_fini32)); 1678 if (!ptr) 1679 return TEE_ERROR_OUT_OF_MEMORY; 1680 ifs32 = ptr; 1681 prev_cnt = info32->size; 1682 if (cnt > prev_cnt) 1683 memset(ifs32 + prev_cnt, 0, 1684 (cnt - prev_cnt) * sizeof(*ifs32)); 1685 info32->ifs = (uint32_t)(vaddr_t)ifs32; 1686 info32->size = cnt; 1687 } else { 1688 ptr = realloc(info->ifs, cnt * sizeof(struct __init_fini)); 1689 if (!ptr) 1690 return TEE_ERROR_OUT_OF_MEMORY; 1691 ifs = ptr; 1692 prev_cnt = info->size; 1693 if (cnt > prev_cnt) 1694 memset(ifs + prev_cnt, 0, 1695 (cnt - prev_cnt) * sizeof(*ifs)); 1696 info->ifs = ifs; 1697 info->size = cnt; 1698 } 1699 1700 return TEE_SUCCESS; 1701 } 1702 1703 static void fill_ifs(vaddr_t va, size_t idx, struct ta_elf *elf, bool is_32bit) 1704 { 1705 struct __init_fini_info32 *info32 = (struct __init_fini_info32 *)va; 1706 struct __init_fini_info *info = (struct __init_fini_info *)va; 1707 struct __init_fini32 *ifs32 = NULL; 1708 struct __init_fini *ifs = NULL; 1709 size_t init_cnt = 0; 1710 size_t fini_cnt = 0; 1711 vaddr_t init = 0; 1712 vaddr_t fini = 0; 1713 1714 if (is_32bit) { 1715 assert(idx < info32->size); 1716 ifs32 = &((struct __init_fini32 *)(vaddr_t)info32->ifs)[idx]; 1717 1718 if (ifs32->flags & __IFS_VALID) 1719 return; 1720 1721 elf_get_init_fini_array(elf, &init, &init_cnt, &fini, 1722 &fini_cnt); 1723 1724 ifs32->init = (uint32_t)init; 1725 ifs32->init_size = init_cnt; 1726 1727 ifs32->fini = (uint32_t)fini; 1728 ifs32->fini_size = fini_cnt; 1729 1730 ifs32->flags |= __IFS_VALID; 1731 } else { 1732 assert(idx < info->size); 1733 ifs = &info->ifs[idx]; 1734 1735 if (ifs->flags & __IFS_VALID) 1736 return; 1737 1738 elf_get_init_fini_array(elf, &init, &init_cnt, &fini, 1739 &fini_cnt); 1740 1741 ifs->init = (void (**)(void))init; 1742 ifs->init_size = init_cnt; 1743 1744 ifs->fini = (void (**)(void))fini; 1745 ifs->fini_size = fini_cnt; 1746 1747 ifs->flags |= __IFS_VALID; 1748 } 1749 } 1750 1751 /* 1752 * Set or update __init_fini_info in the TA with information from the ELF 1753 * queue 1754 */ 1755 TEE_Result ta_elf_set_init_fini_info_compat(bool is_32bit) 1756 { 1757 struct __init_fini_info *info = NULL; 1758 TEE_Result res = TEE_SUCCESS; 1759 struct ta_elf *elf = NULL; 1760 vaddr_t info_va = 0; 1761 size_t cnt = 0; 1762 1763 res = ta_elf_resolve_sym("__init_fini_info", &info_va, NULL, NULL); 1764 if (res) { 1765 if (res == TEE_ERROR_ITEM_NOT_FOUND) { 1766 /* 1767 * Not an error, only TAs linked against libutee from 1768 * OP-TEE 3.9.0 have this symbol. 1769 */ 1770 return TEE_SUCCESS; 1771 } 1772 return res; 1773 } 1774 assert(info_va); 1775 1776 info = (struct __init_fini_info *)info_va; 1777 if (info->reserved) 1778 return TEE_ERROR_NOT_SUPPORTED; 1779 1780 TAILQ_FOREACH(elf, &main_elf_queue, link) 1781 cnt++; 1782 1783 /* Queue has at least one file (main) */ 1784 assert(cnt); 1785 1786 res = realloc_ifs(info_va, cnt, is_32bit); 1787 if (res) 1788 goto err; 1789 1790 cnt = 0; 1791 TAILQ_FOREACH(elf, &main_elf_queue, link) { 1792 fill_ifs(info_va, cnt, elf, is_32bit); 1793 cnt++; 1794 } 1795 1796 return TEE_SUCCESS; 1797 err: 1798 free(info); 1799 return res; 1800 } 1801 1802 static TEE_Result realloc_elf_phdr_info(vaddr_t va, size_t cnt, bool is_32bit) 1803 { 1804 struct __elf_phdr_info32 *info32 = (struct __elf_phdr_info32 *)va; 1805 struct __elf_phdr_info *info = (struct __elf_phdr_info *)va; 1806 struct dl_phdr_info32 *dlpi32 = NULL; 1807 struct dl_phdr_info *dlpi = NULL; 1808 size_t prev_cnt = 0; 1809 void *ptr = NULL; 1810 1811 if (is_32bit) { 1812 ptr = (void *)(vaddr_t)info32->dlpi; 1813 ptr = realloc(ptr, cnt * sizeof(*dlpi32)); 1814 if (!ptr) 1815 return TEE_ERROR_OUT_OF_MEMORY; 1816 dlpi32 = ptr; 1817 prev_cnt = info32->count; 1818 if (cnt > prev_cnt) 1819 memset(dlpi32 + prev_cnt, 0, 1820 (cnt - prev_cnt) * sizeof(*dlpi32)); 1821 info32->dlpi = (uint32_t)(vaddr_t)dlpi32; 1822 info32->count = cnt; 1823 } else { 1824 ptr = realloc(info->dlpi, cnt * sizeof(*dlpi)); 1825 if (!ptr) 1826 return TEE_ERROR_OUT_OF_MEMORY; 1827 dlpi = ptr; 1828 prev_cnt = info->count; 1829 if (cnt > prev_cnt) 1830 memset(dlpi + prev_cnt, 0, 1831 (cnt - prev_cnt) * sizeof(*dlpi)); 1832 info->dlpi = dlpi; 1833 info->count = cnt; 1834 } 1835 1836 return TEE_SUCCESS; 1837 } 1838 1839 static void fill_elf_phdr_info(vaddr_t va, size_t idx, struct ta_elf *elf, 1840 bool is_32bit) 1841 { 1842 struct __elf_phdr_info32 *info32 = (struct __elf_phdr_info32 *)va; 1843 struct __elf_phdr_info *info = (struct __elf_phdr_info *)va; 1844 struct dl_phdr_info32 *dlpi32 = NULL; 1845 struct dl_phdr_info *dlpi = NULL; 1846 1847 if (is_32bit) { 1848 assert(idx < info32->count); 1849 dlpi32 = (struct dl_phdr_info32 *)(vaddr_t)info32->dlpi + idx; 1850 1851 dlpi32->dlpi_addr = elf->load_addr; 1852 if (elf->soname) 1853 dlpi32->dlpi_name = (vaddr_t)elf->soname; 1854 else 1855 dlpi32->dlpi_name = (vaddr_t)&info32->zero; 1856 dlpi32->dlpi_phdr = (vaddr_t)elf->phdr; 1857 dlpi32->dlpi_phnum = elf->e_phnum; 1858 dlpi32->dlpi_adds = 1; /* No unloading on dlclose() currently */ 1859 dlpi32->dlpi_subs = 0; /* No unloading on dlclose() currently */ 1860 dlpi32->dlpi_tls_modid = elf->tls_mod_id; 1861 dlpi32->dlpi_tls_data = elf->tls_start; 1862 } else { 1863 assert(idx < info->count); 1864 dlpi = info->dlpi + idx; 1865 1866 dlpi->dlpi_addr = elf->load_addr; 1867 if (elf->soname) 1868 dlpi->dlpi_name = elf->soname; 1869 else 1870 dlpi->dlpi_name = &info32->zero; 1871 dlpi->dlpi_phdr = elf->phdr; 1872 dlpi->dlpi_phnum = elf->e_phnum; 1873 dlpi->dlpi_adds = 1; /* No unloading on dlclose() currently */ 1874 dlpi->dlpi_subs = 0; /* No unloading on dlclose() currently */ 1875 dlpi->dlpi_tls_modid = elf->tls_mod_id; 1876 dlpi->dlpi_tls_data = (void *)elf->tls_start; 1877 } 1878 } 1879 1880 /* Set or update __elf_hdr_info in the TA with information from the ELF queue */ 1881 TEE_Result ta_elf_set_elf_phdr_info(bool is_32bit) 1882 { 1883 struct __elf_phdr_info *info = NULL; 1884 TEE_Result res = TEE_SUCCESS; 1885 struct ta_elf *elf = NULL; 1886 vaddr_t info_va = 0; 1887 size_t cnt = 0; 1888 1889 res = ta_elf_resolve_sym("__elf_phdr_info", &info_va, NULL, NULL); 1890 if (res) { 1891 if (res == TEE_ERROR_ITEM_NOT_FOUND) { 1892 /* Older TA */ 1893 return TEE_SUCCESS; 1894 } 1895 return res; 1896 } 1897 assert(info_va); 1898 1899 info = (struct __elf_phdr_info *)info_va; 1900 if (info->reserved) 1901 return TEE_ERROR_NOT_SUPPORTED; 1902 1903 TAILQ_FOREACH(elf, &main_elf_queue, link) 1904 cnt++; 1905 1906 res = realloc_elf_phdr_info(info_va, cnt, is_32bit); 1907 if (res) 1908 return res; 1909 1910 cnt = 0; 1911 TAILQ_FOREACH(elf, &main_elf_queue, link) { 1912 fill_elf_phdr_info(info_va, cnt, elf, is_32bit); 1913 cnt++; 1914 } 1915 1916 return TEE_SUCCESS; 1917 } 1918