1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2019, Linaro Limited 4 * Copyright (c) 2020-2023, Arm Limited 5 */ 6 7 #include <assert.h> 8 #include <config.h> 9 #include <confine_array_index.h> 10 #include <ctype.h> 11 #include <elf32.h> 12 #include <elf64.h> 13 #include <elf_common.h> 14 #include <ldelf.h> 15 #include <link.h> 16 #include <stdio.h> 17 #include <stdlib.h> 18 #include <string_ext.h> 19 #include <string.h> 20 #include <tee_api_types.h> 21 #include <tee_internal_api_extensions.h> 22 #include <unw/unwind.h> 23 #include <user_ta_header.h> 24 #include <util.h> 25 26 #include "sys.h" 27 #include "ta_elf.h" 28 29 /* 30 * Layout of a 32-bit struct dl_phdr_info for a 64-bit ldelf to access a 32-bit 31 * TA 32 */ 33 struct dl_phdr_info32 { 34 uint32_t dlpi_addr; 35 uint32_t dlpi_name; 36 uint32_t dlpi_phdr; 37 uint16_t dlpi_phnum; 38 uint64_t dlpi_adds; 39 uint64_t dlpi_subs; 40 uint32_t dlpi_tls_modid; 41 uint32_t dlpi_tls_data; 42 }; 43 44 static vaddr_t ta_stack; 45 static vaddr_t ta_stack_size; 46 47 struct ta_elf_queue main_elf_queue = TAILQ_HEAD_INITIALIZER(main_elf_queue); 48 49 /* 50 * Main application is always ID 1, shared libraries with TLS take IDs 2 and 51 * above 52 */ 53 static void assign_tls_mod_id(struct ta_elf *elf) 54 { 55 static size_t last_tls_mod_id = 1; 56 57 if (elf->is_main) 58 assert(last_tls_mod_id == 1); /* Main always comes first */ 59 elf->tls_mod_id = last_tls_mod_id++; 60 } 61 62 static struct ta_elf *queue_elf_helper(const TEE_UUID *uuid) 63 { 64 struct ta_elf *elf = calloc(1, sizeof(*elf)); 65 66 if (!elf) 67 return NULL; 68 69 TAILQ_INIT(&elf->segs); 70 71 elf->uuid = *uuid; 72 TAILQ_INSERT_TAIL(&main_elf_queue, elf, link); 73 return elf; 74 } 75 76 static struct ta_elf *queue_elf(const TEE_UUID *uuid) 77 { 78 struct ta_elf *elf = ta_elf_find_elf(uuid); 79 80 if (elf) 81 return NULL; 82 83 elf = queue_elf_helper(uuid); 84 if (!elf) 85 err(TEE_ERROR_OUT_OF_MEMORY, "queue_elf_helper"); 86 87 return elf; 88 } 89 90 struct ta_elf *ta_elf_find_elf(const TEE_UUID *uuid) 91 { 92 struct ta_elf *elf = NULL; 93 94 TAILQ_FOREACH(elf, &main_elf_queue, link) 95 if (!memcmp(uuid, &elf->uuid, sizeof(*uuid))) 96 return elf; 97 98 return NULL; 99 } 100 101 #if defined(ARM32) || defined(ARM64) 102 static TEE_Result e32_parse_ehdr(struct ta_elf *elf, Elf32_Ehdr *ehdr) 103 { 104 if (ehdr->e_ident[EI_VERSION] != EV_CURRENT || 105 ehdr->e_ident[EI_CLASS] != ELFCLASS32 || 106 ehdr->e_ident[EI_DATA] != ELFDATA2LSB || 107 ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE || 108 ehdr->e_type != ET_DYN || ehdr->e_machine != EM_ARM || 109 (ehdr->e_flags & EF_ARM_ABIMASK) != EF_ARM_ABI_VERSION || 110 #ifndef CFG_WITH_VFP 111 (ehdr->e_flags & EF_ARM_ABI_FLOAT_HARD) || 112 #endif 113 ehdr->e_phentsize != sizeof(Elf32_Phdr) || 114 ehdr->e_shentsize != sizeof(Elf32_Shdr)) 115 return TEE_ERROR_BAD_FORMAT; 116 117 elf->is_32bit = true; 118 elf->e_entry = ehdr->e_entry; 119 elf->e_phoff = ehdr->e_phoff; 120 elf->e_shoff = ehdr->e_shoff; 121 elf->e_phnum = ehdr->e_phnum; 122 elf->e_shnum = ehdr->e_shnum; 123 elf->e_phentsize = ehdr->e_phentsize; 124 elf->e_shentsize = ehdr->e_shentsize; 125 126 return TEE_SUCCESS; 127 } 128 129 #ifdef ARM64 130 static TEE_Result e64_parse_ehdr(struct ta_elf *elf, Elf64_Ehdr *ehdr) 131 { 132 if (ehdr->e_ident[EI_VERSION] != EV_CURRENT || 133 ehdr->e_ident[EI_CLASS] != ELFCLASS64 || 134 ehdr->e_ident[EI_DATA] != ELFDATA2LSB || 135 ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE || 136 ehdr->e_type != ET_DYN || ehdr->e_machine != EM_AARCH64 || 137 ehdr->e_flags || ehdr->e_phentsize != sizeof(Elf64_Phdr) || 138 ehdr->e_shentsize != sizeof(Elf64_Shdr)) 139 return TEE_ERROR_BAD_FORMAT; 140 141 142 elf->is_32bit = false; 143 elf->e_entry = ehdr->e_entry; 144 elf->e_phoff = ehdr->e_phoff; 145 elf->e_shoff = ehdr->e_shoff; 146 elf->e_phnum = ehdr->e_phnum; 147 elf->e_shnum = ehdr->e_shnum; 148 elf->e_phentsize = ehdr->e_phentsize; 149 elf->e_shentsize = ehdr->e_shentsize; 150 151 return TEE_SUCCESS; 152 } 153 #else /*ARM64*/ 154 static TEE_Result e64_parse_ehdr(struct ta_elf *elf __unused, 155 Elf64_Ehdr *ehdr __unused) 156 { 157 return TEE_ERROR_NOT_SUPPORTED; 158 } 159 #endif /*ARM64*/ 160 #endif /* ARM32 || ARM64 */ 161 162 #if defined(RV64) 163 static TEE_Result e32_parse_ehdr(struct ta_elf *elf __unused, 164 Elf32_Ehdr *ehdr __unused) 165 { 166 return TEE_ERROR_BAD_FORMAT; 167 } 168 169 static TEE_Result e64_parse_ehdr(struct ta_elf *elf, Elf64_Ehdr *ehdr) 170 { 171 if (ehdr->e_ident[EI_VERSION] != EV_CURRENT || 172 ehdr->e_ident[EI_CLASS] != ELFCLASS64 || 173 ehdr->e_ident[EI_DATA] != ELFDATA2LSB || 174 ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE || 175 ehdr->e_type != ET_DYN || ehdr->e_machine != EM_RISCV || 176 ehdr->e_phentsize != sizeof(Elf64_Phdr) || 177 ehdr->e_shentsize != sizeof(Elf64_Shdr)) 178 return TEE_ERROR_BAD_FORMAT; 179 180 elf->is_32bit = false; 181 elf->e_entry = ehdr->e_entry; 182 elf->e_phoff = ehdr->e_phoff; 183 elf->e_shoff = ehdr->e_shoff; 184 elf->e_phnum = ehdr->e_phnum; 185 elf->e_shnum = ehdr->e_shnum; 186 elf->e_phentsize = ehdr->e_phentsize; 187 elf->e_shentsize = ehdr->e_shentsize; 188 189 return TEE_SUCCESS; 190 } 191 #endif /* RV64 */ 192 193 static void check_phdr_in_range(struct ta_elf *elf, unsigned int type, 194 vaddr_t addr, size_t memsz) 195 { 196 vaddr_t max_addr = 0; 197 198 if (ADD_OVERFLOW(addr, memsz, &max_addr)) 199 err(TEE_ERROR_BAD_FORMAT, "Program header %#x overflow", type); 200 201 /* 202 * elf->load_addr and elf->max_addr are both using the 203 * final virtual addresses, while this program header is 204 * relative to 0. 205 */ 206 if (max_addr > elf->max_addr - elf->load_addr) 207 err(TEE_ERROR_BAD_FORMAT, "Program header %#x out of bounds", 208 type); 209 } 210 211 static void read_dyn(struct ta_elf *elf, vaddr_t addr, 212 size_t idx, unsigned int *tag, size_t *val) 213 { 214 if (elf->is_32bit) { 215 Elf32_Dyn *dyn = (Elf32_Dyn *)(addr + elf->load_addr); 216 217 *tag = dyn[idx].d_tag; 218 *val = dyn[idx].d_un.d_val; 219 } else { 220 Elf64_Dyn *dyn = (Elf64_Dyn *)(addr + elf->load_addr); 221 222 *tag = dyn[idx].d_tag; 223 *val = dyn[idx].d_un.d_val; 224 } 225 } 226 227 static void check_range(struct ta_elf *elf, const char *name, const void *ptr, 228 size_t sz) 229 { 230 size_t max_addr = 0; 231 232 if ((vaddr_t)ptr < elf->load_addr) 233 err(TEE_ERROR_BAD_FORMAT, "%s %p out of range", name, ptr); 234 235 if (ADD_OVERFLOW((vaddr_t)ptr, sz, &max_addr)) 236 err(TEE_ERROR_BAD_FORMAT, "%s range overflow", name); 237 238 if (max_addr > elf->max_addr) 239 err(TEE_ERROR_BAD_FORMAT, 240 "%s %p..%#zx out of range", name, ptr, max_addr); 241 } 242 243 static void check_hashtab(struct ta_elf *elf, void *ptr, size_t num_buckets, 244 size_t num_chains) 245 { 246 /* 247 * Starting from 2 as the first two words are mandatory and hold 248 * num_buckets and num_chains. So this function is called twice, 249 * first to see that there's indeed room for num_buckets and 250 * num_chains and then to see that all of it fits. 251 * See http://www.sco.com/developers/gabi/latest/ch5.dynamic.html#hash 252 */ 253 size_t num_words = 2; 254 size_t sz = 0; 255 256 if (!IS_ALIGNED_WITH_TYPE(ptr, uint32_t)) 257 err(TEE_ERROR_BAD_FORMAT, "Bad alignment of DT_HASH %p", ptr); 258 259 if (ADD_OVERFLOW(num_words, num_buckets, &num_words) || 260 ADD_OVERFLOW(num_words, num_chains, &num_words) || 261 MUL_OVERFLOW(num_words, sizeof(uint32_t), &sz)) 262 err(TEE_ERROR_BAD_FORMAT, "DT_HASH overflow"); 263 264 check_range(elf, "DT_HASH", ptr, sz); 265 } 266 267 static void check_gnu_hashtab(struct ta_elf *elf, void *ptr) 268 { 269 struct gnu_hashtab *h = ptr; 270 size_t num_words = 4; /* nbuckets, symoffset, bloom_size, bloom_shift */ 271 size_t bloom_words = 0; 272 size_t sz = 0; 273 274 if (!IS_ALIGNED_WITH_TYPE(ptr, uint32_t)) 275 err(TEE_ERROR_BAD_FORMAT, "Bad alignment of DT_GNU_HASH %p", 276 ptr); 277 278 if (elf->gnu_hashtab_size < sizeof(*h)) 279 err(TEE_ERROR_BAD_FORMAT, "DT_GNU_HASH too small"); 280 281 /* Check validity of h->nbuckets and h->bloom_size */ 282 283 if (elf->is_32bit) 284 bloom_words = h->bloom_size; 285 else 286 bloom_words = h->bloom_size * 2; 287 if (ADD_OVERFLOW(num_words, h->nbuckets, &num_words) || 288 ADD_OVERFLOW(num_words, bloom_words, &num_words) || 289 MUL_OVERFLOW(num_words, sizeof(uint32_t), &sz) || 290 sz > elf->gnu_hashtab_size) 291 err(TEE_ERROR_BAD_FORMAT, "DT_GNU_HASH overflow"); 292 } 293 294 static void save_hashtab(struct ta_elf *elf) 295 { 296 uint32_t *hashtab = NULL; 297 size_t n = 0; 298 299 if (elf->is_32bit) { 300 Elf32_Shdr *shdr = elf->shdr; 301 302 for (n = 0; n < elf->e_shnum; n++) { 303 void *addr = (void *)(vaddr_t)(shdr[n].sh_addr + 304 elf->load_addr); 305 306 if (shdr[n].sh_type == SHT_HASH) { 307 elf->hashtab = addr; 308 } else if (shdr[n].sh_type == SHT_GNU_HASH) { 309 elf->gnu_hashtab = addr; 310 elf->gnu_hashtab_size = shdr[n].sh_size; 311 } 312 } 313 } else { 314 Elf64_Shdr *shdr = elf->shdr; 315 316 for (n = 0; n < elf->e_shnum; n++) { 317 void *addr = (void *)(vaddr_t)(shdr[n].sh_addr + 318 elf->load_addr); 319 320 if (shdr[n].sh_type == SHT_HASH) { 321 elf->hashtab = addr; 322 } else if (shdr[n].sh_type == SHT_GNU_HASH) { 323 elf->gnu_hashtab = addr; 324 elf->gnu_hashtab_size = shdr[n].sh_size; 325 } 326 } 327 } 328 329 if (elf->hashtab) { 330 check_hashtab(elf, elf->hashtab, 0, 0); 331 hashtab = elf->hashtab; 332 check_hashtab(elf, elf->hashtab, hashtab[0], hashtab[1]); 333 } 334 if (elf->gnu_hashtab) 335 check_gnu_hashtab(elf, elf->gnu_hashtab); 336 } 337 338 static void save_soname_from_segment(struct ta_elf *elf, unsigned int type, 339 vaddr_t addr, size_t memsz) 340 { 341 size_t dyn_entsize = 0; 342 size_t num_dyns = 0; 343 size_t n = 0; 344 unsigned int tag = 0; 345 size_t val = 0; 346 char *str_tab = NULL; 347 348 if (type != PT_DYNAMIC) 349 return; 350 351 if (elf->is_32bit) 352 dyn_entsize = sizeof(Elf32_Dyn); 353 else 354 dyn_entsize = sizeof(Elf64_Dyn); 355 356 assert(!(memsz % dyn_entsize)); 357 num_dyns = memsz / dyn_entsize; 358 359 for (n = 0; n < num_dyns; n++) { 360 read_dyn(elf, addr, n, &tag, &val); 361 if (tag == DT_STRTAB) { 362 str_tab = (char *)(val + elf->load_addr); 363 break; 364 } 365 } 366 for (n = 0; n < num_dyns; n++) { 367 read_dyn(elf, addr, n, &tag, &val); 368 if (tag == DT_SONAME) { 369 elf->soname = str_tab + val; 370 break; 371 } 372 } 373 } 374 375 static void save_soname(struct ta_elf *elf) 376 { 377 size_t n = 0; 378 379 if (elf->is_32bit) { 380 Elf32_Phdr *phdr = elf->phdr; 381 382 for (n = 0; n < elf->e_phnum; n++) 383 save_soname_from_segment(elf, phdr[n].p_type, 384 phdr[n].p_vaddr, 385 phdr[n].p_memsz); 386 } else { 387 Elf64_Phdr *phdr = elf->phdr; 388 389 for (n = 0; n < elf->e_phnum; n++) 390 save_soname_from_segment(elf, phdr[n].p_type, 391 phdr[n].p_vaddr, 392 phdr[n].p_memsz); 393 } 394 } 395 396 static void e32_save_symtab(struct ta_elf *elf, size_t tab_idx) 397 { 398 Elf32_Shdr *shdr = elf->shdr; 399 size_t str_idx = shdr[tab_idx].sh_link; 400 401 elf->dynsymtab = (void *)(shdr[tab_idx].sh_addr + elf->load_addr); 402 if (!IS_ALIGNED_WITH_TYPE(elf->dynsymtab, Elf32_Sym)) 403 err(TEE_ERROR_BAD_FORMAT, "Bad alignment of dynsymtab %p", 404 elf->dynsymtab); 405 check_range(elf, "Dynsymtab", elf->dynsymtab, shdr[tab_idx].sh_size); 406 407 if (shdr[tab_idx].sh_size % sizeof(Elf32_Sym)) 408 err(TEE_ERROR_BAD_FORMAT, 409 "Size of dynsymtab not an even multiple of Elf32_Sym"); 410 elf->num_dynsyms = shdr[tab_idx].sh_size / sizeof(Elf32_Sym); 411 412 if (str_idx >= elf->e_shnum) 413 err(TEE_ERROR_BAD_FORMAT, "Dynstr section index out of range"); 414 elf->dynstr = (void *)(shdr[str_idx].sh_addr + elf->load_addr); 415 check_range(elf, "Dynstr", elf->dynstr, shdr[str_idx].sh_size); 416 417 elf->dynstr_size = shdr[str_idx].sh_size; 418 } 419 420 static void e64_save_symtab(struct ta_elf *elf, size_t tab_idx) 421 { 422 Elf64_Shdr *shdr = elf->shdr; 423 size_t str_idx = shdr[tab_idx].sh_link; 424 425 elf->dynsymtab = (void *)(vaddr_t)(shdr[tab_idx].sh_addr + 426 elf->load_addr); 427 428 if (!IS_ALIGNED_WITH_TYPE(elf->dynsymtab, Elf64_Sym)) 429 err(TEE_ERROR_BAD_FORMAT, "Bad alignment of .dynsym/DYNSYM %p", 430 elf->dynsymtab); 431 check_range(elf, ".dynsym/DYNSYM", elf->dynsymtab, 432 shdr[tab_idx].sh_size); 433 434 if (shdr[tab_idx].sh_size % sizeof(Elf64_Sym)) 435 err(TEE_ERROR_BAD_FORMAT, 436 "Size of .dynsym/DYNSYM not an even multiple of Elf64_Sym"); 437 elf->num_dynsyms = shdr[tab_idx].sh_size / sizeof(Elf64_Sym); 438 439 if (str_idx >= elf->e_shnum) 440 err(TEE_ERROR_BAD_FORMAT, 441 ".dynstr/STRTAB section index out of range"); 442 elf->dynstr = (void *)(vaddr_t)(shdr[str_idx].sh_addr + elf->load_addr); 443 check_range(elf, ".dynstr/STRTAB", elf->dynstr, shdr[str_idx].sh_size); 444 445 elf->dynstr_size = shdr[str_idx].sh_size; 446 } 447 448 static void save_symtab(struct ta_elf *elf) 449 { 450 size_t n = 0; 451 452 if (elf->is_32bit) { 453 Elf32_Shdr *shdr = elf->shdr; 454 455 for (n = 0; n < elf->e_shnum; n++) { 456 if (shdr[n].sh_type == SHT_DYNSYM) { 457 e32_save_symtab(elf, n); 458 break; 459 } 460 } 461 } else { 462 Elf64_Shdr *shdr = elf->shdr; 463 464 for (n = 0; n < elf->e_shnum; n++) { 465 if (shdr[n].sh_type == SHT_DYNSYM) { 466 e64_save_symtab(elf, n); 467 break; 468 } 469 } 470 471 } 472 473 save_hashtab(elf); 474 save_soname(elf); 475 } 476 477 static void init_elf(struct ta_elf *elf) 478 { 479 TEE_Result res = TEE_SUCCESS; 480 vaddr_t va = 0; 481 uint32_t flags = LDELF_MAP_FLAG_SHAREABLE; 482 size_t sz = 0; 483 484 res = sys_open_ta_bin(&elf->uuid, &elf->handle); 485 if (res) 486 err(res, "sys_open_ta_bin(%pUl)", (void *)&elf->uuid); 487 488 /* 489 * Map it read-only executable when we're loading a library where 490 * the ELF header is included in a load segment. 491 */ 492 if (!elf->is_main) 493 flags |= LDELF_MAP_FLAG_EXECUTABLE; 494 res = sys_map_ta_bin(&va, SMALL_PAGE_SIZE, flags, elf->handle, 0, 0, 0); 495 if (res) 496 err(res, "sys_map_ta_bin"); 497 elf->ehdr_addr = va; 498 if (!elf->is_main) { 499 elf->load_addr = va; 500 elf->max_addr = va + SMALL_PAGE_SIZE; 501 elf->max_offs = SMALL_PAGE_SIZE; 502 } 503 504 if (!IS_ELF(*(Elf32_Ehdr *)va)) 505 err(TEE_ERROR_BAD_FORMAT, "TA is not an ELF"); 506 507 res = e32_parse_ehdr(elf, (void *)va); 508 if (res == TEE_ERROR_BAD_FORMAT) 509 res = e64_parse_ehdr(elf, (void *)va); 510 if (res) 511 err(res, "Cannot parse ELF"); 512 513 if (MUL_OVERFLOW(elf->e_phnum, elf->e_phentsize, &sz) || 514 ADD_OVERFLOW(sz, elf->e_phoff, &sz)) 515 err(TEE_ERROR_BAD_FORMAT, "Program headers size overflow"); 516 517 if (sz > SMALL_PAGE_SIZE) 518 err(TEE_ERROR_NOT_SUPPORTED, "Cannot read program headers"); 519 520 elf->phdr = (void *)(va + elf->e_phoff); 521 } 522 523 static size_t roundup(size_t v) 524 { 525 return ROUNDUP(v, SMALL_PAGE_SIZE); 526 } 527 528 static size_t rounddown(size_t v) 529 { 530 return ROUNDDOWN(v, SMALL_PAGE_SIZE); 531 } 532 533 static void add_segment(struct ta_elf *elf, size_t offset, size_t vaddr, 534 size_t filesz, size_t memsz, size_t flags, size_t align) 535 { 536 struct segment *seg = calloc(1, sizeof(*seg)); 537 538 if (!seg) 539 err(TEE_ERROR_OUT_OF_MEMORY, "calloc"); 540 541 if (memsz < filesz) 542 err(TEE_ERROR_BAD_FORMAT, "Memsz smaller than filesz"); 543 544 seg->offset = offset; 545 seg->vaddr = vaddr; 546 seg->filesz = filesz; 547 seg->memsz = memsz; 548 seg->flags = flags; 549 seg->align = align; 550 551 TAILQ_INSERT_TAIL(&elf->segs, seg, link); 552 } 553 554 static void parse_load_segments(struct ta_elf *elf) 555 { 556 size_t n = 0; 557 558 if (elf->is_32bit) { 559 Elf32_Phdr *phdr = elf->phdr; 560 561 for (n = 0; n < elf->e_phnum; n++) 562 if (phdr[n].p_type == PT_LOAD) { 563 add_segment(elf, phdr[n].p_offset, 564 phdr[n].p_vaddr, phdr[n].p_filesz, 565 phdr[n].p_memsz, phdr[n].p_flags, 566 phdr[n].p_align); 567 } else if (phdr[n].p_type == PT_ARM_EXIDX) { 568 elf->exidx_start = phdr[n].p_vaddr; 569 elf->exidx_size = phdr[n].p_filesz; 570 } else if (phdr[n].p_type == PT_TLS) { 571 assign_tls_mod_id(elf); 572 } 573 } else { 574 Elf64_Phdr *phdr = elf->phdr; 575 576 for (n = 0; n < elf->e_phnum; n++) 577 if (phdr[n].p_type == PT_LOAD) { 578 add_segment(elf, phdr[n].p_offset, 579 phdr[n].p_vaddr, phdr[n].p_filesz, 580 phdr[n].p_memsz, phdr[n].p_flags, 581 phdr[n].p_align); 582 } else if (phdr[n].p_type == PT_TLS) { 583 elf->tls_start = phdr[n].p_vaddr; 584 elf->tls_filesz = phdr[n].p_filesz; 585 elf->tls_memsz = phdr[n].p_memsz; 586 } else if (IS_ENABLED(CFG_TA_BTI) && 587 phdr[n].p_type == PT_GNU_PROPERTY) { 588 elf->prop_start = phdr[n].p_vaddr; 589 elf->prop_align = phdr[n].p_align; 590 elf->prop_memsz = phdr[n].p_memsz; 591 } 592 } 593 } 594 595 static void copy_remapped_to(struct ta_elf *elf, const struct segment *seg) 596 { 597 uint8_t *dst = (void *)(seg->vaddr + elf->load_addr); 598 size_t n = 0; 599 size_t offs = seg->offset; 600 size_t num_bytes = seg->filesz; 601 602 if (offs < elf->max_offs) { 603 n = MIN(elf->max_offs - offs, num_bytes); 604 memcpy(dst, (void *)(elf->max_addr + offs - elf->max_offs), n); 605 dst += n; 606 offs += n; 607 num_bytes -= n; 608 } 609 610 if (num_bytes) { 611 TEE_Result res = sys_copy_from_ta_bin(dst, num_bytes, 612 elf->handle, offs); 613 614 if (res) 615 err(res, "sys_copy_from_ta_bin"); 616 elf->max_offs += offs; 617 } 618 } 619 620 static void adjust_segments(struct ta_elf *elf) 621 { 622 struct segment *seg = NULL; 623 struct segment *prev_seg = NULL; 624 size_t prev_end_addr = 0; 625 size_t align = 0; 626 size_t mask = 0; 627 628 /* Sanity check */ 629 TAILQ_FOREACH(seg, &elf->segs, link) { 630 size_t dummy __maybe_unused = 0; 631 632 assert(seg->align >= SMALL_PAGE_SIZE); 633 assert(!ADD_OVERFLOW(seg->vaddr, seg->memsz, &dummy)); 634 assert(seg->filesz <= seg->memsz); 635 assert((seg->offset & SMALL_PAGE_MASK) == 636 (seg->vaddr & SMALL_PAGE_MASK)); 637 638 prev_seg = TAILQ_PREV(seg, segment_head, link); 639 if (prev_seg) { 640 assert(seg->vaddr >= prev_seg->vaddr + prev_seg->memsz); 641 assert(seg->offset >= 642 prev_seg->offset + prev_seg->filesz); 643 } 644 if (!align) 645 align = seg->align; 646 assert(align == seg->align); 647 } 648 649 mask = align - 1; 650 651 seg = TAILQ_FIRST(&elf->segs); 652 if (seg) 653 seg = TAILQ_NEXT(seg, link); 654 while (seg) { 655 prev_seg = TAILQ_PREV(seg, segment_head, link); 656 prev_end_addr = prev_seg->vaddr + prev_seg->memsz; 657 658 /* 659 * This segment may overlap with the last "page" in the 660 * previous segment in two different ways: 661 * 1. Virtual address (and offset) overlaps => 662 * Permissions needs to be merged. The offset must have 663 * the SMALL_PAGE_MASK bits set as vaddr and offset must 664 * add up with prevsion segment. 665 * 666 * 2. Only offset overlaps => 667 * The same page in the ELF is mapped at two different 668 * virtual addresses. As a limitation this segment must 669 * be mapped as writeable. 670 */ 671 672 /* Case 1. */ 673 if (rounddown(seg->vaddr) < prev_end_addr) { 674 assert((seg->vaddr & mask) == (seg->offset & mask)); 675 assert(prev_seg->memsz == prev_seg->filesz); 676 677 /* 678 * Merge the segments and their permissions. 679 * Note that the may be a small hole between the 680 * two sections. 681 */ 682 prev_seg->filesz = seg->vaddr + seg->filesz - 683 prev_seg->vaddr; 684 prev_seg->memsz = seg->vaddr + seg->memsz - 685 prev_seg->vaddr; 686 prev_seg->flags |= seg->flags; 687 688 TAILQ_REMOVE(&elf->segs, seg, link); 689 free(seg); 690 seg = TAILQ_NEXT(prev_seg, link); 691 continue; 692 } 693 694 /* Case 2. */ 695 if ((seg->offset & mask) && 696 rounddown(seg->offset) < 697 (prev_seg->offset + prev_seg->filesz)) { 698 699 assert(seg->flags & PF_W); 700 seg->remapped_writeable = true; 701 } 702 703 /* 704 * No overlap, but we may need to align address, offset and 705 * size. 706 */ 707 seg->filesz += seg->vaddr - rounddown(seg->vaddr); 708 seg->memsz += seg->vaddr - rounddown(seg->vaddr); 709 seg->vaddr = rounddown(seg->vaddr); 710 seg->offset = rounddown(seg->offset); 711 seg = TAILQ_NEXT(seg, link); 712 } 713 714 } 715 716 static void populate_segments_legacy(struct ta_elf *elf) 717 { 718 TEE_Result res = TEE_SUCCESS; 719 struct segment *seg = NULL; 720 vaddr_t va = 0; 721 722 assert(elf->is_legacy); 723 TAILQ_FOREACH(seg, &elf->segs, link) { 724 struct segment *last_seg = TAILQ_LAST(&elf->segs, segment_head); 725 size_t pad_end = roundup(last_seg->vaddr + last_seg->memsz - 726 seg->vaddr - seg->memsz); 727 size_t num_bytes = roundup(seg->memsz); 728 729 if (!elf->load_addr) 730 va = 0; 731 else 732 va = seg->vaddr + elf->load_addr; 733 734 735 if (!(seg->flags & PF_R)) 736 err(TEE_ERROR_NOT_SUPPORTED, 737 "Segment must be readable"); 738 739 res = sys_map_zi(num_bytes, 0, &va, 0, pad_end); 740 if (res) 741 err(res, "sys_map_zi"); 742 res = sys_copy_from_ta_bin((void *)va, seg->filesz, 743 elf->handle, seg->offset); 744 if (res) 745 err(res, "sys_copy_from_ta_bin"); 746 747 if (!elf->load_addr) 748 elf->load_addr = va; 749 elf->max_addr = va + num_bytes; 750 elf->max_offs = seg->offset + seg->filesz; 751 } 752 } 753 754 static size_t get_pad_begin(void) 755 { 756 #ifdef CFG_TA_ASLR 757 size_t min = CFG_TA_ASLR_MIN_OFFSET_PAGES; 758 size_t max = CFG_TA_ASLR_MAX_OFFSET_PAGES; 759 TEE_Result res = TEE_SUCCESS; 760 uint32_t rnd32 = 0; 761 size_t rnd = 0; 762 763 COMPILE_TIME_ASSERT(CFG_TA_ASLR_MIN_OFFSET_PAGES < 764 CFG_TA_ASLR_MAX_OFFSET_PAGES); 765 if (max > min) { 766 res = sys_gen_random_num(&rnd32, sizeof(rnd32)); 767 if (res) { 768 DMSG("Random read failed: %#"PRIx32, res); 769 return min * SMALL_PAGE_SIZE; 770 } 771 rnd = rnd32 % (max - min); 772 } 773 774 return (min + rnd) * SMALL_PAGE_SIZE; 775 #else /*!CFG_TA_ASLR*/ 776 return 0; 777 #endif /*!CFG_TA_ASLR*/ 778 } 779 780 static void populate_segments(struct ta_elf *elf) 781 { 782 TEE_Result res = TEE_SUCCESS; 783 struct segment *seg = NULL; 784 vaddr_t va = 0; 785 size_t pad_begin = 0; 786 787 assert(!elf->is_legacy); 788 TAILQ_FOREACH(seg, &elf->segs, link) { 789 struct segment *last_seg = TAILQ_LAST(&elf->segs, segment_head); 790 size_t pad_end = roundup(last_seg->vaddr + last_seg->memsz - 791 seg->vaddr - seg->memsz); 792 793 if (seg->remapped_writeable) { 794 size_t num_bytes = roundup(seg->vaddr + seg->memsz) - 795 rounddown(seg->vaddr); 796 797 assert(elf->load_addr); 798 va = rounddown(elf->load_addr + seg->vaddr); 799 assert(va >= elf->max_addr); 800 res = sys_map_zi(num_bytes, 0, &va, 0, pad_end); 801 if (res) 802 err(res, "sys_map_zi"); 803 804 copy_remapped_to(elf, seg); 805 elf->max_addr = va + num_bytes; 806 } else { 807 uint32_t flags = 0; 808 size_t filesz = seg->filesz; 809 size_t memsz = seg->memsz; 810 size_t offset = seg->offset; 811 size_t vaddr = seg->vaddr; 812 813 if (offset < elf->max_offs) { 814 /* 815 * We're in a load segment which overlaps 816 * with (or is covered by) the first page 817 * of a shared library. 818 */ 819 if (vaddr + filesz < SMALL_PAGE_SIZE) { 820 size_t num_bytes = 0; 821 822 /* 823 * If this segment is completely 824 * covered, take next. 825 */ 826 if (vaddr + memsz <= SMALL_PAGE_SIZE) 827 continue; 828 829 /* 830 * All data of the segment is 831 * loaded, but we need to zero 832 * extend it. 833 */ 834 va = elf->max_addr; 835 num_bytes = roundup(vaddr + memsz) - 836 roundup(vaddr) - 837 SMALL_PAGE_SIZE; 838 assert(num_bytes); 839 res = sys_map_zi(num_bytes, 0, &va, 0, 840 0); 841 if (res) 842 err(res, "sys_map_zi"); 843 elf->max_addr = roundup(va + num_bytes); 844 continue; 845 } 846 847 /* Partial overlap, remove the first page. */ 848 vaddr += SMALL_PAGE_SIZE; 849 filesz -= SMALL_PAGE_SIZE; 850 memsz -= SMALL_PAGE_SIZE; 851 offset += SMALL_PAGE_SIZE; 852 } 853 854 if (!elf->load_addr) { 855 va = 0; 856 pad_begin = get_pad_begin(); 857 /* 858 * If mapping with pad_begin fails we'll 859 * retry without pad_begin, effectively 860 * disabling ASLR for the current ELF file. 861 */ 862 } else { 863 va = vaddr + elf->load_addr; 864 pad_begin = 0; 865 } 866 867 if (seg->flags & PF_W) 868 flags |= LDELF_MAP_FLAG_WRITEABLE; 869 else 870 flags |= LDELF_MAP_FLAG_SHAREABLE; 871 if (seg->flags & PF_X) 872 flags |= LDELF_MAP_FLAG_EXECUTABLE; 873 if (!(seg->flags & PF_R)) 874 err(TEE_ERROR_NOT_SUPPORTED, 875 "Segment must be readable"); 876 if (flags & LDELF_MAP_FLAG_WRITEABLE) { 877 res = sys_map_zi(memsz, 0, &va, pad_begin, 878 pad_end); 879 if (pad_begin && res == TEE_ERROR_OUT_OF_MEMORY) 880 res = sys_map_zi(memsz, 0, &va, 0, 881 pad_end); 882 if (res) 883 err(res, "sys_map_zi"); 884 res = sys_copy_from_ta_bin((void *)va, filesz, 885 elf->handle, offset); 886 if (res) 887 err(res, "sys_copy_from_ta_bin"); 888 } else { 889 if (filesz != memsz) 890 err(TEE_ERROR_BAD_FORMAT, 891 "Filesz and memsz mismatch"); 892 res = sys_map_ta_bin(&va, filesz, flags, 893 elf->handle, offset, 894 pad_begin, pad_end); 895 if (pad_begin && res == TEE_ERROR_OUT_OF_MEMORY) 896 res = sys_map_ta_bin(&va, filesz, flags, 897 elf->handle, 898 offset, 0, 899 pad_end); 900 if (res) 901 err(res, "sys_map_ta_bin"); 902 } 903 904 if (!elf->load_addr) 905 elf->load_addr = va; 906 elf->max_addr = roundup(va + memsz); 907 elf->max_offs += filesz; 908 } 909 } 910 } 911 912 static void ta_elf_add_bti(struct ta_elf *elf) 913 { 914 TEE_Result res = TEE_SUCCESS; 915 struct segment *seg = NULL; 916 uint32_t flags = LDELF_MAP_FLAG_EXECUTABLE | LDELF_MAP_FLAG_BTI; 917 918 TAILQ_FOREACH(seg, &elf->segs, link) { 919 vaddr_t va = elf->load_addr + seg->vaddr; 920 921 if (seg->flags & PF_X) { 922 res = sys_set_prot(va, seg->memsz, flags); 923 if (res) 924 err(res, "sys_set_prot"); 925 } 926 } 927 } 928 929 static void parse_property_segment(struct ta_elf *elf) 930 { 931 char *desc = NULL; 932 size_t align = elf->prop_align; 933 size_t desc_offset = 0; 934 size_t prop_offset = 0; 935 vaddr_t va = 0; 936 Elf_Note *note = NULL; 937 char *name = NULL; 938 939 if (!IS_ENABLED(CFG_TA_BTI) || !elf->prop_start) 940 return; 941 942 check_phdr_in_range(elf, PT_GNU_PROPERTY, elf->prop_start, 943 elf->prop_memsz); 944 945 va = elf->load_addr + elf->prop_start; 946 note = (void *)va; 947 name = (char *)(note + 1); 948 949 if (elf->prop_memsz < sizeof(*note) + sizeof(ELF_NOTE_GNU)) 950 return; 951 952 if (note->n_type != NT_GNU_PROPERTY_TYPE_0 || 953 note->n_namesz != sizeof(ELF_NOTE_GNU) || 954 memcmp(name, ELF_NOTE_GNU, sizeof(ELF_NOTE_GNU)) || 955 !IS_POWER_OF_TWO(align)) 956 return; 957 958 desc_offset = ROUNDUP(sizeof(*note) + sizeof(ELF_NOTE_GNU), align); 959 960 if (desc_offset > elf->prop_memsz || 961 ROUNDUP(desc_offset + note->n_descsz, align) > elf->prop_memsz) 962 return; 963 964 desc = (char *)(va + desc_offset); 965 966 do { 967 Elf_Prop *prop = (void *)(desc + prop_offset); 968 size_t data_offset = prop_offset + sizeof(*prop); 969 970 if (note->n_descsz < data_offset) 971 return; 972 973 data_offset = confine_array_index(data_offset, note->n_descsz); 974 975 if (prop->pr_type == GNU_PROPERTY_AARCH64_FEATURE_1_AND) { 976 uint32_t *pr_data = (void *)(desc + data_offset); 977 978 if (note->n_descsz < (data_offset + sizeof(*pr_data)) && 979 prop->pr_datasz != sizeof(*pr_data)) 980 return; 981 982 if (*pr_data & GNU_PROPERTY_AARCH64_FEATURE_1_BTI) { 983 DMSG("BTI Feature present in note property"); 984 elf->bti_enabled = true; 985 } 986 } 987 988 prop_offset += ROUNDUP(sizeof(*prop) + prop->pr_datasz, align); 989 } while (prop_offset < note->n_descsz); 990 } 991 992 static void map_segments(struct ta_elf *elf) 993 { 994 TEE_Result res = TEE_SUCCESS; 995 996 parse_load_segments(elf); 997 adjust_segments(elf); 998 if (TAILQ_FIRST(&elf->segs)->offset < SMALL_PAGE_SIZE) { 999 vaddr_t va = 0; 1000 size_t sz = elf->max_addr - elf->load_addr; 1001 struct segment *seg = TAILQ_LAST(&elf->segs, segment_head); 1002 size_t pad_begin = get_pad_begin(); 1003 1004 /* 1005 * We're loading a library, if not other parts of the code 1006 * need to be updated too. 1007 */ 1008 assert(!elf->is_main); 1009 1010 /* 1011 * Now that we know how much virtual memory is needed move 1012 * the already mapped part to a location which can 1013 * accommodate us. 1014 */ 1015 res = sys_remap(elf->load_addr, &va, sz, pad_begin, 1016 roundup(seg->vaddr + seg->memsz)); 1017 if (res == TEE_ERROR_OUT_OF_MEMORY) 1018 res = sys_remap(elf->load_addr, &va, sz, 0, 1019 roundup(seg->vaddr + seg->memsz)); 1020 if (res) 1021 err(res, "sys_remap"); 1022 elf->ehdr_addr = va; 1023 elf->load_addr = va; 1024 elf->max_addr = va + sz; 1025 elf->phdr = (void *)(va + elf->e_phoff); 1026 } 1027 } 1028 1029 static void add_deps_from_segment(struct ta_elf *elf, unsigned int type, 1030 vaddr_t addr, size_t memsz) 1031 { 1032 size_t dyn_entsize = 0; 1033 size_t num_dyns = 0; 1034 size_t n = 0; 1035 unsigned int tag = 0; 1036 size_t val = 0; 1037 TEE_UUID uuid = { }; 1038 char *str_tab = NULL; 1039 size_t str_tab_sz = 0; 1040 1041 if (type != PT_DYNAMIC) 1042 return; 1043 1044 check_phdr_in_range(elf, type, addr, memsz); 1045 1046 if (elf->is_32bit) 1047 dyn_entsize = sizeof(Elf32_Dyn); 1048 else 1049 dyn_entsize = sizeof(Elf64_Dyn); 1050 1051 assert(!(memsz % dyn_entsize)); 1052 num_dyns = memsz / dyn_entsize; 1053 1054 for (n = 0; n < num_dyns && !(str_tab && str_tab_sz); n++) { 1055 read_dyn(elf, addr, n, &tag, &val); 1056 if (tag == DT_STRTAB) 1057 str_tab = (char *)(val + elf->load_addr); 1058 else if (tag == DT_STRSZ) 1059 str_tab_sz = val; 1060 } 1061 check_range(elf, ".dynstr/STRTAB", str_tab, str_tab_sz); 1062 1063 for (n = 0; n < num_dyns; n++) { 1064 read_dyn(elf, addr, n, &tag, &val); 1065 if (tag != DT_NEEDED) 1066 continue; 1067 if (val >= str_tab_sz) 1068 err(TEE_ERROR_BAD_FORMAT, 1069 "Offset into .dynstr/STRTAB out of range"); 1070 tee_uuid_from_str(&uuid, str_tab + val); 1071 queue_elf(&uuid); 1072 } 1073 } 1074 1075 static void add_dependencies(struct ta_elf *elf) 1076 { 1077 size_t n = 0; 1078 1079 if (elf->is_32bit) { 1080 Elf32_Phdr *phdr = elf->phdr; 1081 1082 for (n = 0; n < elf->e_phnum; n++) 1083 add_deps_from_segment(elf, phdr[n].p_type, 1084 phdr[n].p_vaddr, phdr[n].p_memsz); 1085 } else { 1086 Elf64_Phdr *phdr = elf->phdr; 1087 1088 for (n = 0; n < elf->e_phnum; n++) 1089 add_deps_from_segment(elf, phdr[n].p_type, 1090 phdr[n].p_vaddr, phdr[n].p_memsz); 1091 } 1092 } 1093 1094 static void copy_section_headers(struct ta_elf *elf) 1095 { 1096 TEE_Result res = TEE_SUCCESS; 1097 size_t sz = 0; 1098 size_t offs = 0; 1099 1100 if (MUL_OVERFLOW(elf->e_shnum, elf->e_shentsize, &sz)) 1101 err(TEE_ERROR_BAD_FORMAT, "Section headers size overflow"); 1102 1103 elf->shdr = malloc(sz); 1104 if (!elf->shdr) 1105 err(TEE_ERROR_OUT_OF_MEMORY, "malloc"); 1106 1107 /* 1108 * We're assuming that section headers comes after the load segments, 1109 * but if it's a very small dynamically linked library the section 1110 * headers can still end up (partially?) in the first mapped page. 1111 */ 1112 if (elf->e_shoff < SMALL_PAGE_SIZE) { 1113 assert(!elf->is_main); 1114 offs = MIN(SMALL_PAGE_SIZE - elf->e_shoff, sz); 1115 memcpy(elf->shdr, (void *)(elf->load_addr + elf->e_shoff), 1116 offs); 1117 } 1118 1119 if (offs < sz) { 1120 res = sys_copy_from_ta_bin((uint8_t *)elf->shdr + offs, 1121 sz - offs, elf->handle, 1122 elf->e_shoff + offs); 1123 if (res) 1124 err(res, "sys_copy_from_ta_bin"); 1125 } 1126 } 1127 1128 static void close_handle(struct ta_elf *elf) 1129 { 1130 TEE_Result res = sys_close_ta_bin(elf->handle); 1131 1132 if (res) 1133 err(res, "sys_close_ta_bin"); 1134 elf->handle = -1; 1135 } 1136 1137 static void clean_elf_load_main(struct ta_elf *elf) 1138 { 1139 TEE_Result res = TEE_SUCCESS; 1140 1141 /* 1142 * Clean up from last attempt to load 1143 */ 1144 res = sys_unmap(elf->ehdr_addr, SMALL_PAGE_SIZE); 1145 if (res) 1146 err(res, "sys_unmap"); 1147 1148 while (!TAILQ_EMPTY(&elf->segs)) { 1149 struct segment *seg = TAILQ_FIRST(&elf->segs); 1150 vaddr_t va = 0; 1151 size_t num_bytes = 0; 1152 1153 va = rounddown(elf->load_addr + seg->vaddr); 1154 if (seg->remapped_writeable) 1155 num_bytes = roundup(seg->vaddr + seg->memsz) - 1156 rounddown(seg->vaddr); 1157 else 1158 num_bytes = seg->memsz; 1159 1160 res = sys_unmap(va, num_bytes); 1161 if (res) 1162 err(res, "sys_unmap"); 1163 1164 TAILQ_REMOVE(&elf->segs, seg, link); 1165 free(seg); 1166 } 1167 1168 free(elf->shdr); 1169 memset(&elf->is_32bit, 0, 1170 (vaddr_t)&elf->uuid - (vaddr_t)&elf->is_32bit); 1171 1172 TAILQ_INIT(&elf->segs); 1173 } 1174 1175 #ifdef ARM64 1176 /* 1177 * Allocates an offset in the TA's Thread Control Block for the TLS segment of 1178 * the @elf module. 1179 */ 1180 #define TCB_HEAD_SIZE (2 * sizeof(long)) 1181 static void set_tls_offset(struct ta_elf *elf) 1182 { 1183 static size_t next_offs = TCB_HEAD_SIZE; 1184 1185 if (!elf->tls_start) 1186 return; 1187 1188 /* Module has a TLS segment */ 1189 elf->tls_tcb_offs = next_offs; 1190 next_offs += elf->tls_memsz; 1191 } 1192 #else 1193 static void set_tls_offset(struct ta_elf *elf __unused) {} 1194 #endif 1195 1196 static void load_main(struct ta_elf *elf) 1197 { 1198 init_elf(elf); 1199 map_segments(elf); 1200 populate_segments(elf); 1201 add_dependencies(elf); 1202 copy_section_headers(elf); 1203 save_symtab(elf); 1204 close_handle(elf); 1205 set_tls_offset(elf); 1206 parse_property_segment(elf); 1207 if (elf->bti_enabled) 1208 ta_elf_add_bti(elf); 1209 1210 elf->head = (struct ta_head *)elf->load_addr; 1211 if (elf->head->depr_entry != UINT64_MAX) { 1212 /* 1213 * Legacy TAs sets their entry point in ta_head. For 1214 * non-legacy TAs the entry point of the ELF is set instead 1215 * and leaving the ta_head entry point set to UINT64_MAX to 1216 * indicate that it's not used. 1217 * 1218 * NB, everything before the commit a73b5878c89d ("Replace 1219 * ta_head.entry with elf entry") is considered legacy TAs 1220 * for ldelf. 1221 * 1222 * Legacy TAs cannot be mapped with shared memory segments 1223 * so restart the mapping if it turned out we're loading a 1224 * legacy TA. 1225 */ 1226 1227 DMSG("Reloading TA %pUl as legacy TA", (void *)&elf->uuid); 1228 clean_elf_load_main(elf); 1229 elf->is_legacy = true; 1230 init_elf(elf); 1231 map_segments(elf); 1232 populate_segments_legacy(elf); 1233 add_dependencies(elf); 1234 copy_section_headers(elf); 1235 save_symtab(elf); 1236 close_handle(elf); 1237 elf->head = (struct ta_head *)elf->load_addr; 1238 /* 1239 * Check that the TA is still a legacy TA, if it isn't give 1240 * up now since we're likely under attack. 1241 */ 1242 if (elf->head->depr_entry == UINT64_MAX) 1243 err(TEE_ERROR_GENERIC, 1244 "TA %pUl was changed on disk to non-legacy", 1245 (void *)&elf->uuid); 1246 } 1247 1248 } 1249 1250 void ta_elf_load_main(const TEE_UUID *uuid, uint32_t *is_32bit, uint64_t *sp, 1251 uint32_t *ta_flags) 1252 { 1253 struct ta_elf *elf = queue_elf(uuid); 1254 vaddr_t va = 0; 1255 TEE_Result res = TEE_SUCCESS; 1256 1257 assert(elf); 1258 elf->is_main = true; 1259 1260 load_main(elf); 1261 1262 *is_32bit = elf->is_32bit; 1263 res = sys_map_zi(elf->head->stack_size, 0, &va, 0, 0); 1264 if (res) 1265 err(res, "sys_map_zi stack"); 1266 1267 if (elf->head->flags & ~TA_FLAGS_MASK) 1268 err(TEE_ERROR_BAD_FORMAT, "Invalid TA flags(s) %#"PRIx32, 1269 elf->head->flags & ~TA_FLAGS_MASK); 1270 1271 *ta_flags = elf->head->flags; 1272 *sp = va + elf->head->stack_size; 1273 ta_stack = va; 1274 ta_stack_size = elf->head->stack_size; 1275 } 1276 1277 void ta_elf_finalize_load_main(uint64_t *entry, uint64_t *load_addr) 1278 { 1279 struct ta_elf *elf = TAILQ_FIRST(&main_elf_queue); 1280 TEE_Result res = TEE_SUCCESS; 1281 1282 assert(elf->is_main); 1283 1284 res = ta_elf_set_init_fini_info_compat(elf->is_32bit); 1285 if (res) 1286 err(res, "ta_elf_set_init_fini_info_compat"); 1287 res = ta_elf_set_elf_phdr_info(elf->is_32bit); 1288 if (res) 1289 err(res, "ta_elf_set_elf_phdr_info"); 1290 1291 if (elf->is_legacy) 1292 *entry = elf->head->depr_entry; 1293 else 1294 *entry = elf->e_entry + elf->load_addr; 1295 1296 *load_addr = elf->load_addr; 1297 } 1298 1299 1300 void ta_elf_load_dependency(struct ta_elf *elf, bool is_32bit) 1301 { 1302 if (elf->is_main) 1303 return; 1304 1305 init_elf(elf); 1306 if (elf->is_32bit != is_32bit) 1307 err(TEE_ERROR_BAD_FORMAT, "ELF %pUl is %sbit (expected %sbit)", 1308 (void *)&elf->uuid, elf->is_32bit ? "32" : "64", 1309 is_32bit ? "32" : "64"); 1310 1311 map_segments(elf); 1312 populate_segments(elf); 1313 add_dependencies(elf); 1314 copy_section_headers(elf); 1315 save_symtab(elf); 1316 close_handle(elf); 1317 set_tls_offset(elf); 1318 parse_property_segment(elf); 1319 if (elf->bti_enabled) 1320 ta_elf_add_bti(elf); 1321 } 1322 1323 void ta_elf_finalize_mappings(struct ta_elf *elf) 1324 { 1325 TEE_Result res = TEE_SUCCESS; 1326 struct segment *seg = NULL; 1327 1328 if (!elf->is_legacy) 1329 return; 1330 1331 TAILQ_FOREACH(seg, &elf->segs, link) { 1332 vaddr_t va = elf->load_addr + seg->vaddr; 1333 uint32_t flags = 0; 1334 1335 if (seg->flags & PF_W) 1336 flags |= LDELF_MAP_FLAG_WRITEABLE; 1337 if (seg->flags & PF_X) 1338 flags |= LDELF_MAP_FLAG_EXECUTABLE; 1339 1340 res = sys_set_prot(va, seg->memsz, flags); 1341 if (res) 1342 err(res, "sys_set_prot"); 1343 } 1344 } 1345 1346 static void __printf(3, 4) print_wrapper(void *pctx, print_func_t print_func, 1347 const char *fmt, ...) 1348 { 1349 va_list ap; 1350 1351 va_start(ap, fmt); 1352 print_func(pctx, fmt, ap); 1353 va_end(ap); 1354 } 1355 1356 static void print_seg(void *pctx, print_func_t print_func, 1357 size_t idx __maybe_unused, int elf_idx __maybe_unused, 1358 vaddr_t va __maybe_unused, paddr_t pa __maybe_unused, 1359 size_t sz __maybe_unused, uint32_t flags) 1360 { 1361 int rc __maybe_unused = 0; 1362 int width __maybe_unused = 8; 1363 char desc[14] __maybe_unused = ""; 1364 char flags_str[] __maybe_unused = "----"; 1365 1366 if (elf_idx > -1) { 1367 rc = snprintf(desc, sizeof(desc), " [%d]", elf_idx); 1368 assert(rc >= 0); 1369 } else { 1370 if (flags & DUMP_MAP_EPHEM) { 1371 rc = snprintf(desc, sizeof(desc), " (param)"); 1372 assert(rc >= 0); 1373 } 1374 if (flags & DUMP_MAP_LDELF) { 1375 rc = snprintf(desc, sizeof(desc), " (ldelf)"); 1376 assert(rc >= 0); 1377 } 1378 if (va == ta_stack) { 1379 rc = snprintf(desc, sizeof(desc), " (stack)"); 1380 assert(rc >= 0); 1381 } 1382 } 1383 1384 if (flags & DUMP_MAP_READ) 1385 flags_str[0] = 'r'; 1386 if (flags & DUMP_MAP_WRITE) 1387 flags_str[1] = 'w'; 1388 if (flags & DUMP_MAP_EXEC) 1389 flags_str[2] = 'x'; 1390 if (flags & DUMP_MAP_SECURE) 1391 flags_str[3] = 's'; 1392 1393 print_wrapper(pctx, print_func, 1394 "region %2zu: va 0x%0*"PRIxVA" pa 0x%0*"PRIxPA" size 0x%06zx flags %s%s\n", 1395 idx, width, va, width, pa, sz, flags_str, desc); 1396 } 1397 1398 static bool get_next_in_order(struct ta_elf_queue *elf_queue, 1399 struct ta_elf **elf, struct segment **seg, 1400 size_t *elf_idx) 1401 { 1402 struct ta_elf *e = NULL; 1403 struct segment *s = NULL; 1404 size_t idx = 0; 1405 vaddr_t va = 0; 1406 struct ta_elf *e2 = NULL; 1407 size_t i2 = 0; 1408 1409 assert(elf && seg && elf_idx); 1410 e = *elf; 1411 s = *seg; 1412 assert((e == NULL && s == NULL) || (e != NULL && s != NULL)); 1413 1414 if (s) { 1415 s = TAILQ_NEXT(s, link); 1416 if (s) { 1417 *seg = s; 1418 return true; 1419 } 1420 } 1421 1422 if (e) 1423 va = e->load_addr; 1424 1425 /* Find the ELF with next load address */ 1426 e = NULL; 1427 TAILQ_FOREACH(e2, elf_queue, link) { 1428 if (e2->load_addr > va) { 1429 if (!e || e2->load_addr < e->load_addr) { 1430 e = e2; 1431 idx = i2; 1432 } 1433 } 1434 i2++; 1435 } 1436 if (!e) 1437 return false; 1438 1439 *elf = e; 1440 *seg = TAILQ_FIRST(&e->segs); 1441 *elf_idx = idx; 1442 return true; 1443 } 1444 1445 void ta_elf_print_mappings(void *pctx, print_func_t print_func, 1446 struct ta_elf_queue *elf_queue, size_t num_maps, 1447 struct dump_map *maps, vaddr_t mpool_base) 1448 { 1449 struct segment *seg = NULL; 1450 struct ta_elf *elf = NULL; 1451 size_t elf_idx = 0; 1452 size_t idx = 0; 1453 size_t map_idx = 0; 1454 1455 /* 1456 * Loop over all segments and maps, printing virtual address in 1457 * order. Segment has priority if the virtual address is present 1458 * in both map and segment. 1459 */ 1460 get_next_in_order(elf_queue, &elf, &seg, &elf_idx); 1461 while (true) { 1462 vaddr_t va = -1; 1463 size_t sz = 0; 1464 uint32_t flags = DUMP_MAP_SECURE; 1465 size_t offs = 0; 1466 1467 if (seg) { 1468 va = rounddown(seg->vaddr + elf->load_addr); 1469 sz = roundup(seg->vaddr + seg->memsz) - 1470 rounddown(seg->vaddr); 1471 } 1472 1473 while (map_idx < num_maps && maps[map_idx].va <= va) { 1474 uint32_t f = 0; 1475 1476 /* If there's a match, it should be the same map */ 1477 if (maps[map_idx].va == va) { 1478 /* 1479 * In shared libraries the first page is 1480 * mapped separately with the rest of that 1481 * segment following back to back in a 1482 * separate entry. 1483 */ 1484 if (map_idx + 1 < num_maps && 1485 maps[map_idx].sz == SMALL_PAGE_SIZE) { 1486 vaddr_t next_va = maps[map_idx].va + 1487 maps[map_idx].sz; 1488 size_t comb_sz = maps[map_idx].sz + 1489 maps[map_idx + 1].sz; 1490 1491 if (next_va == maps[map_idx + 1].va && 1492 comb_sz == sz && 1493 maps[map_idx].flags == 1494 maps[map_idx + 1].flags) { 1495 /* Skip this and next entry */ 1496 map_idx += 2; 1497 continue; 1498 } 1499 } 1500 assert(maps[map_idx].sz == sz); 1501 } else if (maps[map_idx].va < va) { 1502 if (maps[map_idx].va == mpool_base) 1503 f |= DUMP_MAP_LDELF; 1504 print_seg(pctx, print_func, idx, -1, 1505 maps[map_idx].va, maps[map_idx].pa, 1506 maps[map_idx].sz, 1507 maps[map_idx].flags | f); 1508 idx++; 1509 } 1510 map_idx++; 1511 } 1512 1513 if (!seg) 1514 break; 1515 1516 offs = rounddown(seg->offset); 1517 if (seg->flags & PF_R) 1518 flags |= DUMP_MAP_READ; 1519 if (seg->flags & PF_W) 1520 flags |= DUMP_MAP_WRITE; 1521 if (seg->flags & PF_X) 1522 flags |= DUMP_MAP_EXEC; 1523 1524 print_seg(pctx, print_func, idx, elf_idx, va, offs, sz, flags); 1525 idx++; 1526 1527 if (!get_next_in_order(elf_queue, &elf, &seg, &elf_idx)) 1528 seg = NULL; 1529 } 1530 1531 elf_idx = 0; 1532 TAILQ_FOREACH(elf, elf_queue, link) { 1533 print_wrapper(pctx, print_func, 1534 " [%zu] %pUl @ 0x%0*"PRIxVA"\n", 1535 elf_idx, (void *)&elf->uuid, 8, elf->load_addr); 1536 elf_idx++; 1537 } 1538 } 1539 1540 #ifdef CFG_UNWIND 1541 1542 #if defined(ARM32) || defined(ARM64) 1543 /* Called by libunw */ 1544 bool find_exidx(vaddr_t addr, vaddr_t *idx_start, vaddr_t *idx_end) 1545 { 1546 struct segment *seg = NULL; 1547 struct ta_elf *elf = NULL; 1548 vaddr_t a = 0; 1549 1550 TAILQ_FOREACH(elf, &main_elf_queue, link) { 1551 if (addr < elf->load_addr) 1552 continue; 1553 a = addr - elf->load_addr; 1554 TAILQ_FOREACH(seg, &elf->segs, link) { 1555 if (a < seg->vaddr) 1556 continue; 1557 if (a - seg->vaddr < seg->filesz) { 1558 *idx_start = elf->exidx_start + elf->load_addr; 1559 *idx_end = elf->exidx_start + elf->load_addr + 1560 elf->exidx_size; 1561 return true; 1562 } 1563 } 1564 } 1565 1566 return false; 1567 } 1568 1569 void ta_elf_stack_trace_a32(uint32_t regs[16]) 1570 { 1571 struct unwind_state_arm32 state = { }; 1572 1573 memcpy(state.registers, regs, sizeof(state.registers)); 1574 print_stack_arm32(&state, ta_stack, ta_stack_size); 1575 } 1576 1577 void ta_elf_stack_trace_a64(uint64_t fp, uint64_t sp, uint64_t pc) 1578 { 1579 struct unwind_state_arm64 state = { .fp = fp, .sp = sp, .pc = pc }; 1580 1581 print_stack_arm64(&state, ta_stack, ta_stack_size); 1582 } 1583 #elif defined(RV32) || defined(RV64) 1584 void ta_elf_stack_trace_riscv(uint64_t fp, uint64_t pc) 1585 { 1586 struct unwind_state_riscv state = { .fp = fp, .pc = pc }; 1587 1588 print_stack_riscv(&state, ta_stack, ta_stack_size); 1589 } 1590 #endif 1591 1592 #endif /* CFG_UNWIND */ 1593 1594 TEE_Result ta_elf_add_library(const TEE_UUID *uuid) 1595 { 1596 TEE_Result res = TEE_ERROR_GENERIC; 1597 struct ta_elf *ta = TAILQ_FIRST(&main_elf_queue); 1598 struct ta_elf *lib = ta_elf_find_elf(uuid); 1599 struct ta_elf *elf = NULL; 1600 1601 if (lib) 1602 return TEE_SUCCESS; /* Already mapped */ 1603 1604 lib = queue_elf_helper(uuid); 1605 if (!lib) 1606 return TEE_ERROR_OUT_OF_MEMORY; 1607 1608 for (elf = lib; elf; elf = TAILQ_NEXT(elf, link)) 1609 ta_elf_load_dependency(elf, ta->is_32bit); 1610 1611 for (elf = lib; elf; elf = TAILQ_NEXT(elf, link)) { 1612 ta_elf_relocate(elf); 1613 ta_elf_finalize_mappings(elf); 1614 } 1615 1616 for (elf = lib; elf; elf = TAILQ_NEXT(elf, link)) 1617 DMSG("ELF (%pUl) at %#"PRIxVA, 1618 (void *)&elf->uuid, elf->load_addr); 1619 1620 res = ta_elf_set_init_fini_info_compat(ta->is_32bit); 1621 if (res) 1622 return res; 1623 1624 return ta_elf_set_elf_phdr_info(ta->is_32bit); 1625 } 1626 1627 /* Get address/size of .init_array and .fini_array from the dynamic segment */ 1628 static void get_init_fini_array(struct ta_elf *elf, unsigned int type, 1629 vaddr_t addr, size_t memsz, vaddr_t *init, 1630 size_t *init_cnt, vaddr_t *fini, 1631 size_t *fini_cnt) 1632 { 1633 size_t addrsz = 0; 1634 size_t dyn_entsize = 0; 1635 size_t num_dyns = 0; 1636 size_t n = 0; 1637 unsigned int tag = 0; 1638 size_t val = 0; 1639 1640 assert(type == PT_DYNAMIC); 1641 1642 check_phdr_in_range(elf, type, addr, memsz); 1643 1644 if (elf->is_32bit) { 1645 dyn_entsize = sizeof(Elf32_Dyn); 1646 addrsz = 4; 1647 } else { 1648 dyn_entsize = sizeof(Elf64_Dyn); 1649 addrsz = 8; 1650 } 1651 1652 assert(!(memsz % dyn_entsize)); 1653 num_dyns = memsz / dyn_entsize; 1654 1655 for (n = 0; n < num_dyns; n++) { 1656 read_dyn(elf, addr, n, &tag, &val); 1657 if (tag == DT_INIT_ARRAY) 1658 *init = val + elf->load_addr; 1659 else if (tag == DT_FINI_ARRAY) 1660 *fini = val + elf->load_addr; 1661 else if (tag == DT_INIT_ARRAYSZ) 1662 *init_cnt = val / addrsz; 1663 else if (tag == DT_FINI_ARRAYSZ) 1664 *fini_cnt = val / addrsz; 1665 } 1666 } 1667 1668 /* Get address/size of .init_array and .fini_array in @elf (if present) */ 1669 static void elf_get_init_fini_array(struct ta_elf *elf, vaddr_t *init, 1670 size_t *init_cnt, vaddr_t *fini, 1671 size_t *fini_cnt) 1672 { 1673 size_t n = 0; 1674 1675 if (elf->is_32bit) { 1676 Elf32_Phdr *phdr = elf->phdr; 1677 1678 for (n = 0; n < elf->e_phnum; n++) { 1679 if (phdr[n].p_type == PT_DYNAMIC) { 1680 get_init_fini_array(elf, phdr[n].p_type, 1681 phdr[n].p_vaddr, 1682 phdr[n].p_memsz, 1683 init, init_cnt, fini, 1684 fini_cnt); 1685 return; 1686 } 1687 } 1688 } else { 1689 Elf64_Phdr *phdr = elf->phdr; 1690 1691 for (n = 0; n < elf->e_phnum; n++) { 1692 if (phdr[n].p_type == PT_DYNAMIC) { 1693 get_init_fini_array(elf, phdr[n].p_type, 1694 phdr[n].p_vaddr, 1695 phdr[n].p_memsz, 1696 init, init_cnt, fini, 1697 fini_cnt); 1698 return; 1699 } 1700 } 1701 } 1702 } 1703 1704 /* 1705 * Deprecated by __elf_phdr_info below. Kept for compatibility. 1706 * 1707 * Pointers to ELF initialization and finalization functions are extracted by 1708 * ldelf and stored on the TA heap, then exported to the TA via the global 1709 * symbol __init_fini_info. libutee in OP-TEE 3.9.0 uses this mechanism. 1710 */ 1711 1712 struct __init_fini { 1713 uint32_t flags; 1714 uint16_t init_size; 1715 uint16_t fini_size; 1716 1717 void (**init)(void); /* @init_size entries */ 1718 void (**fini)(void); /* @fini_size entries */ 1719 }; 1720 1721 #define __IFS_VALID BIT(0) 1722 #define __IFS_INIT_HAS_RUN BIT(1) 1723 #define __IFS_FINI_HAS_RUN BIT(2) 1724 1725 struct __init_fini_info { 1726 uint32_t reserved; 1727 uint16_t size; 1728 uint16_t pad; 1729 struct __init_fini *ifs; /* @size entries */ 1730 }; 1731 1732 /* 32-bit variants for a 64-bit ldelf to access a 32-bit TA */ 1733 1734 struct __init_fini32 { 1735 uint32_t flags; 1736 uint16_t init_size; 1737 uint16_t fini_size; 1738 uint32_t init; 1739 uint32_t fini; 1740 }; 1741 1742 struct __init_fini_info32 { 1743 uint32_t reserved; 1744 uint16_t size; 1745 uint16_t pad; 1746 uint32_t ifs; 1747 }; 1748 1749 static TEE_Result realloc_ifs(vaddr_t va, size_t cnt, bool is_32bit) 1750 { 1751 struct __init_fini_info32 *info32 = (struct __init_fini_info32 *)va; 1752 struct __init_fini_info *info = (struct __init_fini_info *)va; 1753 struct __init_fini32 *ifs32 = NULL; 1754 struct __init_fini *ifs = NULL; 1755 size_t prev_cnt = 0; 1756 void *ptr = NULL; 1757 1758 if (is_32bit) { 1759 ptr = (void *)(vaddr_t)info32->ifs; 1760 ptr = realloc(ptr, cnt * sizeof(struct __init_fini32)); 1761 if (!ptr) 1762 return TEE_ERROR_OUT_OF_MEMORY; 1763 ifs32 = ptr; 1764 prev_cnt = info32->size; 1765 if (cnt > prev_cnt) 1766 memset(ifs32 + prev_cnt, 0, 1767 (cnt - prev_cnt) * sizeof(*ifs32)); 1768 info32->ifs = (uint32_t)(vaddr_t)ifs32; 1769 info32->size = cnt; 1770 } else { 1771 ptr = realloc(info->ifs, cnt * sizeof(struct __init_fini)); 1772 if (!ptr) 1773 return TEE_ERROR_OUT_OF_MEMORY; 1774 ifs = ptr; 1775 prev_cnt = info->size; 1776 if (cnt > prev_cnt) 1777 memset(ifs + prev_cnt, 0, 1778 (cnt - prev_cnt) * sizeof(*ifs)); 1779 info->ifs = ifs; 1780 info->size = cnt; 1781 } 1782 1783 return TEE_SUCCESS; 1784 } 1785 1786 static void fill_ifs(vaddr_t va, size_t idx, struct ta_elf *elf, bool is_32bit) 1787 { 1788 struct __init_fini_info32 *info32 = (struct __init_fini_info32 *)va; 1789 struct __init_fini_info *info = (struct __init_fini_info *)va; 1790 struct __init_fini32 *ifs32 = NULL; 1791 struct __init_fini *ifs = NULL; 1792 size_t init_cnt = 0; 1793 size_t fini_cnt = 0; 1794 vaddr_t init = 0; 1795 vaddr_t fini = 0; 1796 1797 if (is_32bit) { 1798 assert(idx < info32->size); 1799 ifs32 = &((struct __init_fini32 *)(vaddr_t)info32->ifs)[idx]; 1800 1801 if (ifs32->flags & __IFS_VALID) 1802 return; 1803 1804 elf_get_init_fini_array(elf, &init, &init_cnt, &fini, 1805 &fini_cnt); 1806 1807 ifs32->init = (uint32_t)init; 1808 ifs32->init_size = init_cnt; 1809 1810 ifs32->fini = (uint32_t)fini; 1811 ifs32->fini_size = fini_cnt; 1812 1813 ifs32->flags |= __IFS_VALID; 1814 } else { 1815 assert(idx < info->size); 1816 ifs = &info->ifs[idx]; 1817 1818 if (ifs->flags & __IFS_VALID) 1819 return; 1820 1821 elf_get_init_fini_array(elf, &init, &init_cnt, &fini, 1822 &fini_cnt); 1823 1824 ifs->init = (void (**)(void))init; 1825 ifs->init_size = init_cnt; 1826 1827 ifs->fini = (void (**)(void))fini; 1828 ifs->fini_size = fini_cnt; 1829 1830 ifs->flags |= __IFS_VALID; 1831 } 1832 } 1833 1834 /* 1835 * Set or update __init_fini_info in the TA with information from the ELF 1836 * queue 1837 */ 1838 TEE_Result ta_elf_set_init_fini_info_compat(bool is_32bit) 1839 { 1840 struct __init_fini_info *info = NULL; 1841 TEE_Result res = TEE_SUCCESS; 1842 struct ta_elf *elf = NULL; 1843 vaddr_t info_va = 0; 1844 size_t cnt = 0; 1845 1846 res = ta_elf_resolve_sym("__init_fini_info", &info_va, NULL, NULL); 1847 if (res) { 1848 if (res == TEE_ERROR_ITEM_NOT_FOUND) { 1849 /* 1850 * Not an error, only TAs linked against libutee from 1851 * OP-TEE 3.9.0 have this symbol. 1852 */ 1853 return TEE_SUCCESS; 1854 } 1855 return res; 1856 } 1857 assert(info_va); 1858 1859 info = (struct __init_fini_info *)info_va; 1860 if (info->reserved) 1861 return TEE_ERROR_NOT_SUPPORTED; 1862 1863 TAILQ_FOREACH(elf, &main_elf_queue, link) 1864 cnt++; 1865 1866 /* Queue has at least one file (main) */ 1867 assert(cnt); 1868 1869 res = realloc_ifs(info_va, cnt, is_32bit); 1870 if (res) 1871 goto err; 1872 1873 cnt = 0; 1874 TAILQ_FOREACH(elf, &main_elf_queue, link) { 1875 fill_ifs(info_va, cnt, elf, is_32bit); 1876 cnt++; 1877 } 1878 1879 return TEE_SUCCESS; 1880 err: 1881 free(info); 1882 return res; 1883 } 1884 1885 static TEE_Result realloc_elf_phdr_info(vaddr_t va, size_t cnt, bool is_32bit) 1886 { 1887 struct __elf_phdr_info32 *info32 = (struct __elf_phdr_info32 *)va; 1888 struct __elf_phdr_info *info = (struct __elf_phdr_info *)va; 1889 struct dl_phdr_info32 *dlpi32 = NULL; 1890 struct dl_phdr_info *dlpi = NULL; 1891 size_t prev_cnt = 0; 1892 void *ptr = NULL; 1893 1894 if (is_32bit) { 1895 ptr = (void *)(vaddr_t)info32->dlpi; 1896 ptr = realloc(ptr, cnt * sizeof(*dlpi32)); 1897 if (!ptr) 1898 return TEE_ERROR_OUT_OF_MEMORY; 1899 dlpi32 = ptr; 1900 prev_cnt = info32->count; 1901 if (cnt > prev_cnt) 1902 memset(dlpi32 + prev_cnt, 0, 1903 (cnt - prev_cnt) * sizeof(*dlpi32)); 1904 info32->dlpi = (uint32_t)(vaddr_t)dlpi32; 1905 info32->count = cnt; 1906 } else { 1907 ptr = realloc(info->dlpi, cnt * sizeof(*dlpi)); 1908 if (!ptr) 1909 return TEE_ERROR_OUT_OF_MEMORY; 1910 dlpi = ptr; 1911 prev_cnt = info->count; 1912 if (cnt > prev_cnt) 1913 memset(dlpi + prev_cnt, 0, 1914 (cnt - prev_cnt) * sizeof(*dlpi)); 1915 info->dlpi = dlpi; 1916 info->count = cnt; 1917 } 1918 1919 return TEE_SUCCESS; 1920 } 1921 1922 static void fill_elf_phdr_info(vaddr_t va, size_t idx, struct ta_elf *elf, 1923 bool is_32bit) 1924 { 1925 struct __elf_phdr_info32 *info32 = (struct __elf_phdr_info32 *)va; 1926 struct __elf_phdr_info *info = (struct __elf_phdr_info *)va; 1927 struct dl_phdr_info32 *dlpi32 = NULL; 1928 struct dl_phdr_info *dlpi = NULL; 1929 1930 if (is_32bit) { 1931 assert(idx < info32->count); 1932 dlpi32 = (struct dl_phdr_info32 *)(vaddr_t)info32->dlpi + idx; 1933 1934 dlpi32->dlpi_addr = elf->load_addr; 1935 if (elf->soname) 1936 dlpi32->dlpi_name = (vaddr_t)elf->soname; 1937 else 1938 dlpi32->dlpi_name = (vaddr_t)&info32->zero; 1939 dlpi32->dlpi_phdr = (vaddr_t)elf->phdr; 1940 dlpi32->dlpi_phnum = elf->e_phnum; 1941 dlpi32->dlpi_adds = 1; /* No unloading on dlclose() currently */ 1942 dlpi32->dlpi_subs = 0; /* No unloading on dlclose() currently */ 1943 dlpi32->dlpi_tls_modid = elf->tls_mod_id; 1944 dlpi32->dlpi_tls_data = elf->tls_start; 1945 } else { 1946 assert(idx < info->count); 1947 dlpi = info->dlpi + idx; 1948 1949 dlpi->dlpi_addr = elf->load_addr; 1950 if (elf->soname) 1951 dlpi->dlpi_name = elf->soname; 1952 else 1953 dlpi->dlpi_name = &info32->zero; 1954 dlpi->dlpi_phdr = elf->phdr; 1955 dlpi->dlpi_phnum = elf->e_phnum; 1956 dlpi->dlpi_adds = 1; /* No unloading on dlclose() currently */ 1957 dlpi->dlpi_subs = 0; /* No unloading on dlclose() currently */ 1958 dlpi->dlpi_tls_modid = elf->tls_mod_id; 1959 dlpi->dlpi_tls_data = (void *)elf->tls_start; 1960 } 1961 } 1962 1963 /* Set or update __elf_hdr_info in the TA with information from the ELF queue */ 1964 TEE_Result ta_elf_set_elf_phdr_info(bool is_32bit) 1965 { 1966 struct __elf_phdr_info *info = NULL; 1967 TEE_Result res = TEE_SUCCESS; 1968 struct ta_elf *elf = NULL; 1969 vaddr_t info_va = 0; 1970 size_t cnt = 0; 1971 1972 res = ta_elf_resolve_sym("__elf_phdr_info", &info_va, NULL, NULL); 1973 if (res) { 1974 if (res == TEE_ERROR_ITEM_NOT_FOUND) { 1975 /* Older TA */ 1976 return TEE_SUCCESS; 1977 } 1978 return res; 1979 } 1980 assert(info_va); 1981 1982 info = (struct __elf_phdr_info *)info_va; 1983 if (info->reserved) 1984 return TEE_ERROR_NOT_SUPPORTED; 1985 1986 TAILQ_FOREACH(elf, &main_elf_queue, link) 1987 cnt++; 1988 1989 res = realloc_elf_phdr_info(info_va, cnt, is_32bit); 1990 if (res) 1991 return res; 1992 1993 cnt = 0; 1994 TAILQ_FOREACH(elf, &main_elf_queue, link) { 1995 fill_elf_phdr_info(info_va, cnt, elf, is_32bit); 1996 cnt++; 1997 } 1998 1999 return TEE_SUCCESS; 2000 } 2001