1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2019, Linaro Limited 4 * Copyright (c) 2020-2023, Arm Limited 5 */ 6 7 #include <assert.h> 8 #include <config.h> 9 #include <confine_array_index.h> 10 #include <ctype.h> 11 #include <elf32.h> 12 #include <elf64.h> 13 #include <elf_common.h> 14 #include <ldelf.h> 15 #include <link.h> 16 #include <stdio.h> 17 #include <stdlib.h> 18 #include <string_ext.h> 19 #include <string.h> 20 #include <tee_api_types.h> 21 #include <tee_internal_api_extensions.h> 22 #include <unw/unwind.h> 23 #include <user_ta_header.h> 24 #include <util.h> 25 26 #include "sys.h" 27 #include "ta_elf.h" 28 29 /* 30 * Layout of a 32-bit struct dl_phdr_info for a 64-bit ldelf to access a 32-bit 31 * TA 32 */ 33 struct dl_phdr_info32 { 34 uint32_t dlpi_addr; 35 uint32_t dlpi_name; 36 uint32_t dlpi_phdr; 37 uint16_t dlpi_phnum; 38 uint64_t dlpi_adds; 39 uint64_t dlpi_subs; 40 uint32_t dlpi_tls_modid; 41 uint32_t dlpi_tls_data; 42 }; 43 44 static vaddr_t ta_stack; 45 static vaddr_t ta_stack_size; 46 47 struct ta_elf_queue main_elf_queue = TAILQ_HEAD_INITIALIZER(main_elf_queue); 48 49 /* 50 * Main application is always ID 1, shared libraries with TLS take IDs 2 and 51 * above 52 */ 53 static void assign_tls_mod_id(struct ta_elf *elf) 54 { 55 static size_t last_tls_mod_id = 1; 56 57 if (elf->is_main) 58 assert(last_tls_mod_id == 1); /* Main always comes first */ 59 elf->tls_mod_id = last_tls_mod_id++; 60 } 61 62 static struct ta_elf *queue_elf_helper(const TEE_UUID *uuid) 63 { 64 struct ta_elf *elf = calloc(1, sizeof(*elf)); 65 66 if (!elf) 67 return NULL; 68 69 TAILQ_INIT(&elf->segs); 70 71 elf->uuid = *uuid; 72 TAILQ_INSERT_TAIL(&main_elf_queue, elf, link); 73 return elf; 74 } 75 76 static struct ta_elf *queue_elf(const TEE_UUID *uuid) 77 { 78 struct ta_elf *elf = ta_elf_find_elf(uuid); 79 80 if (elf) 81 return NULL; 82 83 elf = queue_elf_helper(uuid); 84 if (!elf) 85 err(TEE_ERROR_OUT_OF_MEMORY, "queue_elf_helper"); 86 87 return elf; 88 } 89 90 struct ta_elf *ta_elf_find_elf(const TEE_UUID *uuid) 91 { 92 struct ta_elf *elf = NULL; 93 94 TAILQ_FOREACH(elf, &main_elf_queue, link) 95 if (!memcmp(uuid, &elf->uuid, sizeof(*uuid))) 96 return elf; 97 98 return NULL; 99 } 100 101 #if defined(ARM32) || defined(ARM64) 102 static TEE_Result e32_parse_ehdr(struct ta_elf *elf, Elf32_Ehdr *ehdr) 103 { 104 if (ehdr->e_ident[EI_VERSION] != EV_CURRENT || 105 ehdr->e_ident[EI_CLASS] != ELFCLASS32 || 106 ehdr->e_ident[EI_DATA] != ELFDATA2LSB || 107 ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE || 108 ehdr->e_type != ET_DYN || ehdr->e_machine != EM_ARM || 109 (ehdr->e_flags & EF_ARM_ABIMASK) != EF_ARM_ABI_VERSION || 110 #ifndef CFG_WITH_VFP 111 (ehdr->e_flags & EF_ARM_ABI_FLOAT_HARD) || 112 #endif 113 ehdr->e_phentsize != sizeof(Elf32_Phdr) || 114 ehdr->e_shentsize != sizeof(Elf32_Shdr)) 115 return TEE_ERROR_BAD_FORMAT; 116 117 elf->is_32bit = true; 118 elf->e_entry = ehdr->e_entry; 119 elf->e_phoff = ehdr->e_phoff; 120 elf->e_shoff = ehdr->e_shoff; 121 elf->e_phnum = ehdr->e_phnum; 122 elf->e_shnum = ehdr->e_shnum; 123 elf->e_phentsize = ehdr->e_phentsize; 124 elf->e_shentsize = ehdr->e_shentsize; 125 126 return TEE_SUCCESS; 127 } 128 129 #ifdef ARM64 130 static TEE_Result e64_parse_ehdr(struct ta_elf *elf, Elf64_Ehdr *ehdr) 131 { 132 if (ehdr->e_ident[EI_VERSION] != EV_CURRENT || 133 ehdr->e_ident[EI_CLASS] != ELFCLASS64 || 134 ehdr->e_ident[EI_DATA] != ELFDATA2LSB || 135 ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE || 136 ehdr->e_type != ET_DYN || ehdr->e_machine != EM_AARCH64 || 137 ehdr->e_flags || ehdr->e_phentsize != sizeof(Elf64_Phdr) || 138 ehdr->e_shentsize != sizeof(Elf64_Shdr)) 139 return TEE_ERROR_BAD_FORMAT; 140 141 142 elf->is_32bit = false; 143 elf->e_entry = ehdr->e_entry; 144 elf->e_phoff = ehdr->e_phoff; 145 elf->e_shoff = ehdr->e_shoff; 146 elf->e_phnum = ehdr->e_phnum; 147 elf->e_shnum = ehdr->e_shnum; 148 elf->e_phentsize = ehdr->e_phentsize; 149 elf->e_shentsize = ehdr->e_shentsize; 150 151 return TEE_SUCCESS; 152 } 153 #else /*ARM64*/ 154 static TEE_Result e64_parse_ehdr(struct ta_elf *elf __unused, 155 Elf64_Ehdr *ehdr __unused) 156 { 157 return TEE_ERROR_NOT_SUPPORTED; 158 } 159 #endif /*ARM64*/ 160 #endif /* ARM32 || ARM64 */ 161 162 #if defined(RV64) 163 static TEE_Result e32_parse_ehdr(struct ta_elf *elf __unused, 164 Elf32_Ehdr *ehdr __unused) 165 { 166 return TEE_ERROR_BAD_FORMAT; 167 } 168 169 static TEE_Result e64_parse_ehdr(struct ta_elf *elf, Elf64_Ehdr *ehdr) 170 { 171 if (ehdr->e_ident[EI_VERSION] != EV_CURRENT || 172 ehdr->e_ident[EI_CLASS] != ELFCLASS64 || 173 ehdr->e_ident[EI_DATA] != ELFDATA2LSB || 174 ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE || 175 ehdr->e_type != ET_DYN || ehdr->e_machine != EM_RISCV || 176 ehdr->e_phentsize != sizeof(Elf64_Phdr) || 177 ehdr->e_shentsize != sizeof(Elf64_Shdr)) 178 return TEE_ERROR_BAD_FORMAT; 179 180 elf->is_32bit = false; 181 elf->e_entry = ehdr->e_entry; 182 elf->e_phoff = ehdr->e_phoff; 183 elf->e_shoff = ehdr->e_shoff; 184 elf->e_phnum = ehdr->e_phnum; 185 elf->e_shnum = ehdr->e_shnum; 186 elf->e_phentsize = ehdr->e_phentsize; 187 elf->e_shentsize = ehdr->e_shentsize; 188 189 return TEE_SUCCESS; 190 } 191 #endif /* RV64 */ 192 193 static void check_phdr_in_range(struct ta_elf *elf, unsigned int type, 194 vaddr_t addr, size_t memsz) 195 { 196 vaddr_t max_addr = 0; 197 198 if (ADD_OVERFLOW(addr, memsz, &max_addr)) 199 err(TEE_ERROR_BAD_FORMAT, "Program header %#x overflow", type); 200 201 /* 202 * elf->load_addr and elf->max_addr are both using the 203 * final virtual addresses, while this program header is 204 * relative to 0. 205 */ 206 if (max_addr > elf->max_addr - elf->load_addr) 207 err(TEE_ERROR_BAD_FORMAT, "Program header %#x out of bounds", 208 type); 209 } 210 211 static void read_dyn(struct ta_elf *elf, vaddr_t addr, 212 size_t idx, unsigned int *tag, size_t *val) 213 { 214 if (elf->is_32bit) { 215 Elf32_Dyn *dyn = (Elf32_Dyn *)(addr + elf->load_addr); 216 217 *tag = dyn[idx].d_tag; 218 *val = dyn[idx].d_un.d_val; 219 } else { 220 Elf64_Dyn *dyn = (Elf64_Dyn *)(addr + elf->load_addr); 221 222 *tag = dyn[idx].d_tag; 223 *val = dyn[idx].d_un.d_val; 224 } 225 } 226 227 static void check_range(struct ta_elf *elf, const char *name, const void *ptr, 228 size_t sz) 229 { 230 size_t max_addr = 0; 231 232 if ((vaddr_t)ptr < elf->load_addr) 233 err(TEE_ERROR_BAD_FORMAT, "%s %p out of range", name, ptr); 234 235 if (ADD_OVERFLOW((vaddr_t)ptr, sz, &max_addr)) 236 err(TEE_ERROR_BAD_FORMAT, "%s range overflow", name); 237 238 if (max_addr > elf->max_addr) 239 err(TEE_ERROR_BAD_FORMAT, 240 "%s %p..%#zx out of range", name, ptr, max_addr); 241 } 242 243 static void check_hashtab(struct ta_elf *elf, void *ptr, size_t num_buckets, 244 size_t num_chains) 245 { 246 /* 247 * Starting from 2 as the first two words are mandatory and hold 248 * num_buckets and num_chains. So this function is called twice, 249 * first to see that there's indeed room for num_buckets and 250 * num_chains and then to see that all of it fits. 251 * See http://www.sco.com/developers/gabi/latest/ch5.dynamic.html#hash 252 */ 253 size_t num_words = 2; 254 size_t sz = 0; 255 256 if (!IS_ALIGNED_WITH_TYPE(ptr, uint32_t)) 257 err(TEE_ERROR_BAD_FORMAT, "Bad alignment of DT_HASH %p", ptr); 258 259 if (ADD_OVERFLOW(num_words, num_buckets, &num_words) || 260 ADD_OVERFLOW(num_words, num_chains, &num_words) || 261 MUL_OVERFLOW(num_words, sizeof(uint32_t), &sz)) 262 err(TEE_ERROR_BAD_FORMAT, "DT_HASH overflow"); 263 264 check_range(elf, "DT_HASH", ptr, sz); 265 } 266 267 static void check_gnu_hashtab(struct ta_elf *elf, void *ptr) 268 { 269 struct gnu_hashtab *h = ptr; 270 size_t num_words = 4; /* nbuckets, symoffset, bloom_size, bloom_shift */ 271 size_t bloom_words = 0; 272 size_t sz = 0; 273 274 if (!IS_ALIGNED_WITH_TYPE(ptr, uint32_t)) 275 err(TEE_ERROR_BAD_FORMAT, "Bad alignment of DT_GNU_HASH %p", 276 ptr); 277 278 if (elf->gnu_hashtab_size < sizeof(*h)) 279 err(TEE_ERROR_BAD_FORMAT, "DT_GNU_HASH too small"); 280 281 /* Check validity of h->nbuckets and h->bloom_size */ 282 283 if (elf->is_32bit) 284 bloom_words = h->bloom_size; 285 else 286 bloom_words = h->bloom_size * 2; 287 if (ADD_OVERFLOW(num_words, h->nbuckets, &num_words) || 288 ADD_OVERFLOW(num_words, bloom_words, &num_words) || 289 MUL_OVERFLOW(num_words, sizeof(uint32_t), &sz) || 290 sz > elf->gnu_hashtab_size) 291 err(TEE_ERROR_BAD_FORMAT, "DT_GNU_HASH overflow"); 292 } 293 294 static void save_hashtab(struct ta_elf *elf) 295 { 296 uint32_t *hashtab = NULL; 297 size_t n = 0; 298 299 if (elf->is_32bit) { 300 Elf32_Shdr *shdr = elf->shdr; 301 302 for (n = 0; n < elf->e_shnum; n++) { 303 void *addr = (void *)(vaddr_t)(shdr[n].sh_addr + 304 elf->load_addr); 305 306 if (shdr[n].sh_type == SHT_HASH) { 307 elf->hashtab = addr; 308 } else if (shdr[n].sh_type == SHT_GNU_HASH) { 309 elf->gnu_hashtab = addr; 310 elf->gnu_hashtab_size = shdr[n].sh_size; 311 } 312 } 313 } else { 314 Elf64_Shdr *shdr = elf->shdr; 315 316 for (n = 0; n < elf->e_shnum; n++) { 317 void *addr = (void *)(vaddr_t)(shdr[n].sh_addr + 318 elf->load_addr); 319 320 if (shdr[n].sh_type == SHT_HASH) { 321 elf->hashtab = addr; 322 } else if (shdr[n].sh_type == SHT_GNU_HASH) { 323 elf->gnu_hashtab = addr; 324 elf->gnu_hashtab_size = shdr[n].sh_size; 325 } 326 } 327 } 328 329 if (elf->hashtab) { 330 check_hashtab(elf, elf->hashtab, 0, 0); 331 hashtab = elf->hashtab; 332 check_hashtab(elf, elf->hashtab, hashtab[0], hashtab[1]); 333 } 334 if (elf->gnu_hashtab) 335 check_gnu_hashtab(elf, elf->gnu_hashtab); 336 } 337 338 static void save_soname_from_segment(struct ta_elf *elf, unsigned int type, 339 vaddr_t addr, size_t memsz) 340 { 341 size_t dyn_entsize = 0; 342 size_t num_dyns = 0; 343 size_t n = 0; 344 unsigned int tag = 0; 345 size_t val = 0; 346 char *str_tab = NULL; 347 348 if (type != PT_DYNAMIC) 349 return; 350 351 if (elf->is_32bit) 352 dyn_entsize = sizeof(Elf32_Dyn); 353 else 354 dyn_entsize = sizeof(Elf64_Dyn); 355 356 assert(!(memsz % dyn_entsize)); 357 num_dyns = memsz / dyn_entsize; 358 359 for (n = 0; n < num_dyns; n++) { 360 read_dyn(elf, addr, n, &tag, &val); 361 if (tag == DT_STRTAB) { 362 str_tab = (char *)(val + elf->load_addr); 363 break; 364 } 365 } 366 for (n = 0; n < num_dyns; n++) { 367 read_dyn(elf, addr, n, &tag, &val); 368 if (tag == DT_SONAME) { 369 elf->soname = str_tab + val; 370 break; 371 } 372 } 373 } 374 375 static void save_soname(struct ta_elf *elf) 376 { 377 size_t n = 0; 378 379 if (elf->is_32bit) { 380 Elf32_Phdr *phdr = elf->phdr; 381 382 for (n = 0; n < elf->e_phnum; n++) 383 save_soname_from_segment(elf, phdr[n].p_type, 384 phdr[n].p_vaddr, 385 phdr[n].p_memsz); 386 } else { 387 Elf64_Phdr *phdr = elf->phdr; 388 389 for (n = 0; n < elf->e_phnum; n++) 390 save_soname_from_segment(elf, phdr[n].p_type, 391 phdr[n].p_vaddr, 392 phdr[n].p_memsz); 393 } 394 } 395 396 static void e32_save_symtab(struct ta_elf *elf, size_t tab_idx) 397 { 398 Elf32_Shdr *shdr = elf->shdr; 399 size_t str_idx = shdr[tab_idx].sh_link; 400 401 elf->dynsymtab = (void *)(shdr[tab_idx].sh_addr + elf->load_addr); 402 if (!IS_ALIGNED_WITH_TYPE(elf->dynsymtab, Elf32_Sym)) 403 err(TEE_ERROR_BAD_FORMAT, "Bad alignment of dynsymtab %p", 404 elf->dynsymtab); 405 check_range(elf, "Dynsymtab", elf->dynsymtab, shdr[tab_idx].sh_size); 406 407 if (shdr[tab_idx].sh_size % sizeof(Elf32_Sym)) 408 err(TEE_ERROR_BAD_FORMAT, 409 "Size of dynsymtab not an even multiple of Elf32_Sym"); 410 elf->num_dynsyms = shdr[tab_idx].sh_size / sizeof(Elf32_Sym); 411 412 if (str_idx >= elf->e_shnum) 413 err(TEE_ERROR_BAD_FORMAT, "Dynstr section index out of range"); 414 elf->dynstr = (void *)(shdr[str_idx].sh_addr + elf->load_addr); 415 check_range(elf, "Dynstr", elf->dynstr, shdr[str_idx].sh_size); 416 417 elf->dynstr_size = shdr[str_idx].sh_size; 418 } 419 420 static void e64_save_symtab(struct ta_elf *elf, size_t tab_idx) 421 { 422 Elf64_Shdr *shdr = elf->shdr; 423 size_t str_idx = shdr[tab_idx].sh_link; 424 425 elf->dynsymtab = (void *)(vaddr_t)(shdr[tab_idx].sh_addr + 426 elf->load_addr); 427 428 if (!IS_ALIGNED_WITH_TYPE(elf->dynsymtab, Elf64_Sym)) 429 err(TEE_ERROR_BAD_FORMAT, "Bad alignment of .dynsym/DYNSYM %p", 430 elf->dynsymtab); 431 check_range(elf, ".dynsym/DYNSYM", elf->dynsymtab, 432 shdr[tab_idx].sh_size); 433 434 if (shdr[tab_idx].sh_size % sizeof(Elf64_Sym)) 435 err(TEE_ERROR_BAD_FORMAT, 436 "Size of .dynsym/DYNSYM not an even multiple of Elf64_Sym"); 437 elf->num_dynsyms = shdr[tab_idx].sh_size / sizeof(Elf64_Sym); 438 439 if (str_idx >= elf->e_shnum) 440 err(TEE_ERROR_BAD_FORMAT, 441 ".dynstr/STRTAB section index out of range"); 442 elf->dynstr = (void *)(vaddr_t)(shdr[str_idx].sh_addr + elf->load_addr); 443 check_range(elf, ".dynstr/STRTAB", elf->dynstr, shdr[str_idx].sh_size); 444 445 elf->dynstr_size = shdr[str_idx].sh_size; 446 } 447 448 static void save_symtab(struct ta_elf *elf) 449 { 450 size_t n = 0; 451 452 if (elf->is_32bit) { 453 Elf32_Shdr *shdr = elf->shdr; 454 455 for (n = 0; n < elf->e_shnum; n++) { 456 if (shdr[n].sh_type == SHT_DYNSYM) { 457 e32_save_symtab(elf, n); 458 break; 459 } 460 } 461 } else { 462 Elf64_Shdr *shdr = elf->shdr; 463 464 for (n = 0; n < elf->e_shnum; n++) { 465 if (shdr[n].sh_type == SHT_DYNSYM) { 466 e64_save_symtab(elf, n); 467 break; 468 } 469 } 470 471 } 472 473 save_hashtab(elf); 474 save_soname(elf); 475 } 476 477 static void init_elf(struct ta_elf *elf) 478 { 479 TEE_Result res = TEE_SUCCESS; 480 vaddr_t va = 0; 481 uint32_t flags = LDELF_MAP_FLAG_SHAREABLE; 482 size_t sz = 0; 483 484 res = sys_open_ta_bin(&elf->uuid, &elf->handle); 485 if (res) 486 err(res, "sys_open_ta_bin(%pUl)", (void *)&elf->uuid); 487 488 /* 489 * Map it read-only executable when we're loading a library where 490 * the ELF header is included in a load segment. 491 */ 492 if (!elf->is_main) 493 flags |= LDELF_MAP_FLAG_EXECUTABLE; 494 res = sys_map_ta_bin(&va, SMALL_PAGE_SIZE, flags, elf->handle, 0, 0, 0); 495 if (res) 496 err(res, "sys_map_ta_bin"); 497 elf->ehdr_addr = va; 498 if (!elf->is_main) { 499 elf->load_addr = va; 500 elf->max_addr = va + SMALL_PAGE_SIZE; 501 elf->max_offs = SMALL_PAGE_SIZE; 502 } 503 504 if (!IS_ELF(*(Elf32_Ehdr *)va)) 505 err(TEE_ERROR_BAD_FORMAT, "TA is not an ELF"); 506 507 res = e32_parse_ehdr(elf, (void *)va); 508 if (res == TEE_ERROR_BAD_FORMAT) 509 res = e64_parse_ehdr(elf, (void *)va); 510 if (res) 511 err(res, "Cannot parse ELF"); 512 513 if (MUL_OVERFLOW(elf->e_phnum, elf->e_phentsize, &sz) || 514 ADD_OVERFLOW(sz, elf->e_phoff, &sz)) 515 err(TEE_ERROR_BAD_FORMAT, "Program headers size overflow"); 516 517 if (sz > SMALL_PAGE_SIZE) 518 err(TEE_ERROR_NOT_SUPPORTED, "Cannot read program headers"); 519 520 elf->phdr = (void *)(va + elf->e_phoff); 521 } 522 523 static size_t roundup(size_t v) 524 { 525 return ROUNDUP(v, SMALL_PAGE_SIZE); 526 } 527 528 static size_t rounddown(size_t v) 529 { 530 return ROUNDDOWN(v, SMALL_PAGE_SIZE); 531 } 532 533 static void add_segment(struct ta_elf *elf, size_t offset, size_t vaddr, 534 size_t filesz, size_t memsz, size_t flags, size_t align) 535 { 536 struct segment *seg = calloc(1, sizeof(*seg)); 537 538 if (!seg) 539 err(TEE_ERROR_OUT_OF_MEMORY, "calloc"); 540 541 if (memsz < filesz) 542 err(TEE_ERROR_BAD_FORMAT, "Memsz smaller than filesz"); 543 544 seg->offset = offset; 545 seg->vaddr = vaddr; 546 seg->filesz = filesz; 547 seg->memsz = memsz; 548 seg->flags = flags; 549 seg->align = align; 550 551 TAILQ_INSERT_TAIL(&elf->segs, seg, link); 552 } 553 554 static void parse_load_segments(struct ta_elf *elf) 555 { 556 size_t n = 0; 557 558 if (elf->is_32bit) { 559 Elf32_Phdr *phdr = elf->phdr; 560 561 for (n = 0; n < elf->e_phnum; n++) 562 if (phdr[n].p_type == PT_LOAD) { 563 add_segment(elf, phdr[n].p_offset, 564 phdr[n].p_vaddr, phdr[n].p_filesz, 565 phdr[n].p_memsz, phdr[n].p_flags, 566 phdr[n].p_align); 567 } else if (phdr[n].p_type == PT_ARM_EXIDX) { 568 elf->exidx_start = phdr[n].p_vaddr; 569 elf->exidx_size = phdr[n].p_filesz; 570 } else if (phdr[n].p_type == PT_TLS) { 571 assign_tls_mod_id(elf); 572 } 573 } else { 574 Elf64_Phdr *phdr = elf->phdr; 575 576 for (n = 0; n < elf->e_phnum; n++) 577 if (phdr[n].p_type == PT_LOAD) { 578 add_segment(elf, phdr[n].p_offset, 579 phdr[n].p_vaddr, phdr[n].p_filesz, 580 phdr[n].p_memsz, phdr[n].p_flags, 581 phdr[n].p_align); 582 } else if (phdr[n].p_type == PT_TLS) { 583 elf->tls_start = phdr[n].p_vaddr; 584 elf->tls_filesz = phdr[n].p_filesz; 585 elf->tls_memsz = phdr[n].p_memsz; 586 } else if (IS_ENABLED(CFG_TA_BTI) && 587 phdr[n].p_type == PT_GNU_PROPERTY) { 588 elf->prop_start = phdr[n].p_vaddr; 589 elf->prop_align = phdr[n].p_align; 590 elf->prop_memsz = phdr[n].p_memsz; 591 } 592 } 593 } 594 595 static void copy_remapped_to(struct ta_elf *elf, const struct segment *seg) 596 { 597 uint8_t *dst = (void *)(seg->vaddr + elf->load_addr); 598 size_t n = 0; 599 size_t offs = seg->offset; 600 size_t num_bytes = seg->filesz; 601 602 if (offs < elf->max_offs) { 603 n = MIN(elf->max_offs - offs, num_bytes); 604 memcpy(dst, (void *)(elf->max_addr + offs - elf->max_offs), n); 605 dst += n; 606 offs += n; 607 num_bytes -= n; 608 } 609 610 if (num_bytes) { 611 TEE_Result res = sys_copy_from_ta_bin(dst, num_bytes, 612 elf->handle, offs); 613 614 if (res) 615 err(res, "sys_copy_from_ta_bin"); 616 elf->max_offs += offs; 617 } 618 } 619 620 static void adjust_segments(struct ta_elf *elf) 621 { 622 struct segment *seg = NULL; 623 struct segment *prev_seg = NULL; 624 size_t prev_end_addr = 0; 625 size_t align = 0; 626 size_t mask = 0; 627 628 /* Sanity check */ 629 TAILQ_FOREACH(seg, &elf->segs, link) { 630 size_t dummy __maybe_unused = 0; 631 632 assert(seg->align >= SMALL_PAGE_SIZE); 633 assert(!ADD_OVERFLOW(seg->vaddr, seg->memsz, &dummy)); 634 assert(seg->filesz <= seg->memsz); 635 assert((seg->offset & SMALL_PAGE_MASK) == 636 (seg->vaddr & SMALL_PAGE_MASK)); 637 638 prev_seg = TAILQ_PREV(seg, segment_head, link); 639 if (prev_seg) { 640 assert(seg->vaddr >= prev_seg->vaddr + prev_seg->memsz); 641 assert(seg->offset >= 642 prev_seg->offset + prev_seg->filesz); 643 } 644 if (!align) 645 align = seg->align; 646 assert(align == seg->align); 647 } 648 649 mask = align - 1; 650 651 seg = TAILQ_FIRST(&elf->segs); 652 if (seg) 653 seg = TAILQ_NEXT(seg, link); 654 while (seg) { 655 prev_seg = TAILQ_PREV(seg, segment_head, link); 656 prev_end_addr = prev_seg->vaddr + prev_seg->memsz; 657 658 /* 659 * This segment may overlap with the last "page" in the 660 * previous segment in two different ways: 661 * 1. Virtual address (and offset) overlaps => 662 * Permissions needs to be merged. The offset must have 663 * the SMALL_PAGE_MASK bits set as vaddr and offset must 664 * add up with prevsion segment. 665 * 666 * 2. Only offset overlaps => 667 * The same page in the ELF is mapped at two different 668 * virtual addresses. As a limitation this segment must 669 * be mapped as writeable. 670 */ 671 672 /* Case 1. */ 673 if (rounddown(seg->vaddr) < prev_end_addr) { 674 assert((seg->vaddr & mask) == (seg->offset & mask)); 675 assert(prev_seg->memsz == prev_seg->filesz); 676 677 /* 678 * Merge the segments and their permissions. 679 * Note that the may be a small hole between the 680 * two sections. 681 */ 682 prev_seg->filesz = seg->vaddr + seg->filesz - 683 prev_seg->vaddr; 684 prev_seg->memsz = seg->vaddr + seg->memsz - 685 prev_seg->vaddr; 686 prev_seg->flags |= seg->flags; 687 688 TAILQ_REMOVE(&elf->segs, seg, link); 689 free(seg); 690 seg = TAILQ_NEXT(prev_seg, link); 691 continue; 692 } 693 694 /* Case 2. */ 695 if ((seg->offset & mask) && 696 rounddown(seg->offset) < 697 (prev_seg->offset + prev_seg->filesz)) { 698 699 assert(seg->flags & PF_W); 700 seg->remapped_writeable = true; 701 } 702 703 /* 704 * No overlap, but we may need to align address, offset and 705 * size. 706 */ 707 seg->filesz += seg->vaddr - rounddown(seg->vaddr); 708 seg->memsz += seg->vaddr - rounddown(seg->vaddr); 709 seg->vaddr = rounddown(seg->vaddr); 710 seg->offset = rounddown(seg->offset); 711 seg = TAILQ_NEXT(seg, link); 712 } 713 714 } 715 716 static void populate_segments_legacy(struct ta_elf *elf) 717 { 718 TEE_Result res = TEE_SUCCESS; 719 struct segment *seg = NULL; 720 vaddr_t va = 0; 721 722 assert(elf->is_legacy); 723 TAILQ_FOREACH(seg, &elf->segs, link) { 724 struct segment *last_seg = TAILQ_LAST(&elf->segs, segment_head); 725 size_t pad_end = roundup(last_seg->vaddr + last_seg->memsz - 726 seg->vaddr - seg->memsz); 727 size_t num_bytes = roundup(seg->memsz); 728 729 if (!elf->load_addr) 730 va = 0; 731 else 732 va = seg->vaddr + elf->load_addr; 733 734 735 if (!(seg->flags & PF_R)) 736 err(TEE_ERROR_NOT_SUPPORTED, 737 "Segment must be readable"); 738 739 res = sys_map_zi(num_bytes, 0, &va, 0, pad_end); 740 if (res) 741 err(res, "sys_map_zi"); 742 res = sys_copy_from_ta_bin((void *)va, seg->filesz, 743 elf->handle, seg->offset); 744 if (res) 745 err(res, "sys_copy_from_ta_bin"); 746 747 if (!elf->load_addr) 748 elf->load_addr = va; 749 elf->max_addr = va + num_bytes; 750 elf->max_offs = seg->offset + seg->filesz; 751 } 752 } 753 754 static size_t get_pad_begin(void) 755 { 756 #ifdef CFG_TA_ASLR 757 size_t min = CFG_TA_ASLR_MIN_OFFSET_PAGES; 758 size_t max = CFG_TA_ASLR_MAX_OFFSET_PAGES; 759 TEE_Result res = TEE_SUCCESS; 760 uint32_t rnd32 = 0; 761 size_t rnd = 0; 762 763 COMPILE_TIME_ASSERT(CFG_TA_ASLR_MIN_OFFSET_PAGES < 764 CFG_TA_ASLR_MAX_OFFSET_PAGES); 765 if (max > min) { 766 res = sys_gen_random_num(&rnd32, sizeof(rnd32)); 767 if (res) { 768 DMSG("Random read failed: %#"PRIx32, res); 769 return min * SMALL_PAGE_SIZE; 770 } 771 rnd = rnd32 % (max - min); 772 } 773 774 return (min + rnd) * SMALL_PAGE_SIZE; 775 #else /*!CFG_TA_ASLR*/ 776 return 0; 777 #endif /*!CFG_TA_ASLR*/ 778 } 779 780 static void populate_segments(struct ta_elf *elf) 781 { 782 TEE_Result res = TEE_SUCCESS; 783 struct segment *seg = NULL; 784 vaddr_t va = 0; 785 size_t pad_begin = 0; 786 787 assert(!elf->is_legacy); 788 TAILQ_FOREACH(seg, &elf->segs, link) { 789 struct segment *last_seg = TAILQ_LAST(&elf->segs, segment_head); 790 size_t pad_end = roundup(last_seg->vaddr + last_seg->memsz - 791 seg->vaddr - seg->memsz); 792 793 if (seg->remapped_writeable) { 794 size_t num_bytes = roundup(seg->vaddr + seg->memsz) - 795 rounddown(seg->vaddr); 796 797 assert(elf->load_addr); 798 va = rounddown(elf->load_addr + seg->vaddr); 799 assert(va >= elf->max_addr); 800 res = sys_map_zi(num_bytes, 0, &va, 0, pad_end); 801 if (res) 802 err(res, "sys_map_zi"); 803 804 copy_remapped_to(elf, seg); 805 elf->max_addr = va + num_bytes; 806 } else { 807 uint32_t flags = 0; 808 size_t filesz = seg->filesz; 809 size_t memsz = seg->memsz; 810 size_t offset = seg->offset; 811 size_t vaddr = seg->vaddr; 812 813 if (offset < elf->max_offs) { 814 /* 815 * We're in a load segment which overlaps 816 * with (or is covered by) the first page 817 * of a shared library. 818 */ 819 if (vaddr + filesz < SMALL_PAGE_SIZE) { 820 size_t num_bytes = 0; 821 822 /* 823 * If this segment is completely 824 * covered, take next. 825 */ 826 if (vaddr + memsz <= SMALL_PAGE_SIZE) 827 continue; 828 829 /* 830 * All data of the segment is 831 * loaded, but we need to zero 832 * extend it. 833 */ 834 va = elf->max_addr; 835 num_bytes = roundup(vaddr + memsz) - 836 roundup(vaddr) - 837 SMALL_PAGE_SIZE; 838 assert(num_bytes); 839 res = sys_map_zi(num_bytes, 0, &va, 0, 840 0); 841 if (res) 842 err(res, "sys_map_zi"); 843 elf->max_addr = roundup(va + num_bytes); 844 continue; 845 } 846 847 /* Partial overlap, remove the first page. */ 848 vaddr += SMALL_PAGE_SIZE; 849 filesz -= SMALL_PAGE_SIZE; 850 memsz -= SMALL_PAGE_SIZE; 851 offset += SMALL_PAGE_SIZE; 852 } 853 854 if (!elf->load_addr) { 855 va = 0; 856 pad_begin = get_pad_begin(); 857 /* 858 * If mapping with pad_begin fails we'll 859 * retry without pad_begin, effectively 860 * disabling ASLR for the current ELF file. 861 */ 862 } else { 863 va = vaddr + elf->load_addr; 864 pad_begin = 0; 865 } 866 867 if (seg->flags & PF_W) 868 flags |= LDELF_MAP_FLAG_WRITEABLE; 869 else 870 flags |= LDELF_MAP_FLAG_SHAREABLE; 871 if (seg->flags & PF_X) 872 flags |= LDELF_MAP_FLAG_EXECUTABLE; 873 if (!(seg->flags & PF_R)) 874 err(TEE_ERROR_NOT_SUPPORTED, 875 "Segment must be readable"); 876 if (flags & LDELF_MAP_FLAG_WRITEABLE) { 877 res = sys_map_zi(memsz, 0, &va, pad_begin, 878 pad_end); 879 if (pad_begin && res == TEE_ERROR_OUT_OF_MEMORY) 880 res = sys_map_zi(memsz, 0, &va, 0, 881 pad_end); 882 if (res) 883 err(res, "sys_map_zi"); 884 res = sys_copy_from_ta_bin((void *)va, filesz, 885 elf->handle, offset); 886 if (res) 887 err(res, "sys_copy_from_ta_bin"); 888 } else { 889 if (filesz != memsz) 890 err(TEE_ERROR_BAD_FORMAT, 891 "Filesz and memsz mismatch"); 892 res = sys_map_ta_bin(&va, filesz, flags, 893 elf->handle, offset, 894 pad_begin, pad_end); 895 if (pad_begin && res == TEE_ERROR_OUT_OF_MEMORY) 896 res = sys_map_ta_bin(&va, filesz, flags, 897 elf->handle, 898 offset, 0, 899 pad_end); 900 if (res) 901 err(res, "sys_map_ta_bin"); 902 } 903 904 if (!elf->load_addr) 905 elf->load_addr = va; 906 elf->max_addr = roundup(va + memsz); 907 elf->max_offs += filesz; 908 } 909 } 910 } 911 912 static void ta_elf_add_bti(struct ta_elf *elf) 913 { 914 TEE_Result res = TEE_SUCCESS; 915 struct segment *seg = NULL; 916 uint32_t flags = LDELF_MAP_FLAG_EXECUTABLE | LDELF_MAP_FLAG_BTI; 917 918 TAILQ_FOREACH(seg, &elf->segs, link) { 919 vaddr_t va = elf->load_addr + seg->vaddr; 920 921 if (seg->flags & PF_X) { 922 res = sys_set_prot(va, seg->memsz, flags); 923 if (res) 924 err(res, "sys_set_prot"); 925 } 926 } 927 } 928 929 static void parse_property_segment(struct ta_elf *elf) 930 { 931 char *desc = NULL; 932 size_t align = elf->prop_align; 933 size_t desc_offset = 0; 934 size_t prop_offset = 0; 935 vaddr_t va = 0; 936 Elf_Note *note = NULL; 937 char *name = NULL; 938 939 if (!IS_ENABLED(CFG_TA_BTI) || !elf->prop_start) 940 return; 941 942 check_phdr_in_range(elf, PT_GNU_PROPERTY, elf->prop_start, 943 elf->prop_memsz); 944 945 va = elf->load_addr + elf->prop_start; 946 note = (void *)va; 947 name = (char *)(note + 1); 948 949 if (elf->prop_memsz < sizeof(*note) + sizeof(ELF_NOTE_GNU)) 950 return; 951 952 if (note->n_type != NT_GNU_PROPERTY_TYPE_0 || 953 note->n_namesz != sizeof(ELF_NOTE_GNU) || 954 memcmp(name, ELF_NOTE_GNU, sizeof(ELF_NOTE_GNU)) || 955 !IS_POWER_OF_TWO(align)) 956 return; 957 958 desc_offset = ROUNDUP(sizeof(*note) + sizeof(ELF_NOTE_GNU), align); 959 960 if (desc_offset > elf->prop_memsz || 961 ROUNDUP(desc_offset + note->n_descsz, align) > elf->prop_memsz) 962 return; 963 964 desc = (char *)(va + desc_offset); 965 966 do { 967 Elf_Prop *prop = (void *)(desc + prop_offset); 968 size_t data_offset = prop_offset + sizeof(*prop); 969 970 if (note->n_descsz < data_offset) 971 return; 972 973 data_offset = confine_array_index(data_offset, note->n_descsz); 974 975 if (prop->pr_type == GNU_PROPERTY_AARCH64_FEATURE_1_AND) { 976 uint32_t *pr_data = (void *)(desc + data_offset); 977 978 if (note->n_descsz < (data_offset + sizeof(*pr_data)) && 979 prop->pr_datasz != sizeof(*pr_data)) 980 return; 981 982 if (*pr_data & GNU_PROPERTY_AARCH64_FEATURE_1_BTI) { 983 DMSG("BTI Feature present in note property"); 984 elf->bti_enabled = true; 985 } 986 } 987 988 prop_offset += ROUNDUP(sizeof(*prop) + prop->pr_datasz, align); 989 } while (prop_offset < note->n_descsz); 990 } 991 992 static void map_segments(struct ta_elf *elf) 993 { 994 TEE_Result res = TEE_SUCCESS; 995 996 parse_load_segments(elf); 997 adjust_segments(elf); 998 if (TAILQ_FIRST(&elf->segs)->offset < SMALL_PAGE_SIZE) { 999 vaddr_t va = 0; 1000 size_t sz = elf->max_addr - elf->load_addr; 1001 struct segment *seg = TAILQ_LAST(&elf->segs, segment_head); 1002 size_t pad_begin = get_pad_begin(); 1003 1004 /* 1005 * We're loading a library, if not other parts of the code 1006 * need to be updated too. 1007 */ 1008 assert(!elf->is_main); 1009 1010 /* 1011 * Now that we know how much virtual memory is needed move 1012 * the already mapped part to a location which can 1013 * accommodate us. 1014 */ 1015 res = sys_remap(elf->load_addr, &va, sz, pad_begin, 1016 roundup(seg->vaddr + seg->memsz)); 1017 if (res == TEE_ERROR_OUT_OF_MEMORY) 1018 res = sys_remap(elf->load_addr, &va, sz, 0, 1019 roundup(seg->vaddr + seg->memsz)); 1020 if (res) 1021 err(res, "sys_remap"); 1022 elf->ehdr_addr = va; 1023 elf->load_addr = va; 1024 elf->max_addr = va + sz; 1025 elf->phdr = (void *)(va + elf->e_phoff); 1026 } 1027 } 1028 1029 static void add_deps_from_segment(struct ta_elf *elf, unsigned int type, 1030 vaddr_t addr, size_t memsz) 1031 { 1032 size_t dyn_entsize = 0; 1033 size_t num_dyns = 0; 1034 size_t n = 0; 1035 unsigned int tag = 0; 1036 size_t val = 0; 1037 TEE_UUID uuid = { }; 1038 char *str_tab = NULL; 1039 size_t str_tab_sz = 0; 1040 1041 if (type != PT_DYNAMIC) 1042 return; 1043 1044 check_phdr_in_range(elf, type, addr, memsz); 1045 1046 if (elf->is_32bit) 1047 dyn_entsize = sizeof(Elf32_Dyn); 1048 else 1049 dyn_entsize = sizeof(Elf64_Dyn); 1050 1051 assert(!(memsz % dyn_entsize)); 1052 num_dyns = memsz / dyn_entsize; 1053 1054 for (n = 0; n < num_dyns && !(str_tab && str_tab_sz); n++) { 1055 read_dyn(elf, addr, n, &tag, &val); 1056 if (tag == DT_STRTAB) 1057 str_tab = (char *)(val + elf->load_addr); 1058 else if (tag == DT_STRSZ) 1059 str_tab_sz = val; 1060 } 1061 check_range(elf, ".dynstr/STRTAB", str_tab, str_tab_sz); 1062 1063 for (n = 0; n < num_dyns; n++) { 1064 read_dyn(elf, addr, n, &tag, &val); 1065 if (tag != DT_NEEDED) 1066 continue; 1067 if (val >= str_tab_sz) 1068 err(TEE_ERROR_BAD_FORMAT, 1069 "Offset into .dynstr/STRTAB out of range"); 1070 tee_uuid_from_str(&uuid, str_tab + val); 1071 queue_elf(&uuid); 1072 } 1073 } 1074 1075 static void add_dependencies(struct ta_elf *elf) 1076 { 1077 size_t n = 0; 1078 1079 if (elf->is_32bit) { 1080 Elf32_Phdr *phdr = elf->phdr; 1081 1082 for (n = 0; n < elf->e_phnum; n++) 1083 add_deps_from_segment(elf, phdr[n].p_type, 1084 phdr[n].p_vaddr, phdr[n].p_memsz); 1085 } else { 1086 Elf64_Phdr *phdr = elf->phdr; 1087 1088 for (n = 0; n < elf->e_phnum; n++) 1089 add_deps_from_segment(elf, phdr[n].p_type, 1090 phdr[n].p_vaddr, phdr[n].p_memsz); 1091 } 1092 } 1093 1094 static void copy_section_headers(struct ta_elf *elf) 1095 { 1096 TEE_Result res = TEE_SUCCESS; 1097 size_t sz = 0; 1098 size_t offs = 0; 1099 1100 if (MUL_OVERFLOW(elf->e_shnum, elf->e_shentsize, &sz)) 1101 err(TEE_ERROR_BAD_FORMAT, "Section headers size overflow"); 1102 1103 elf->shdr = malloc(sz); 1104 if (!elf->shdr) 1105 err(TEE_ERROR_OUT_OF_MEMORY, "malloc"); 1106 1107 /* 1108 * We're assuming that section headers comes after the load segments, 1109 * but if it's a very small dynamically linked library the section 1110 * headers can still end up (partially?) in the first mapped page. 1111 */ 1112 if (elf->e_shoff < SMALL_PAGE_SIZE) { 1113 assert(!elf->is_main); 1114 offs = MIN(SMALL_PAGE_SIZE - elf->e_shoff, sz); 1115 memcpy(elf->shdr, (void *)(elf->load_addr + elf->e_shoff), 1116 offs); 1117 } 1118 1119 if (offs < sz) { 1120 res = sys_copy_from_ta_bin((uint8_t *)elf->shdr + offs, 1121 sz - offs, elf->handle, 1122 elf->e_shoff + offs); 1123 if (res) 1124 err(res, "sys_copy_from_ta_bin"); 1125 } 1126 } 1127 1128 static void close_handle(struct ta_elf *elf) 1129 { 1130 TEE_Result res = sys_close_ta_bin(elf->handle); 1131 1132 if (res) 1133 err(res, "sys_close_ta_bin"); 1134 elf->handle = -1; 1135 } 1136 1137 static void clean_elf_load_main(struct ta_elf *elf) 1138 { 1139 TEE_Result res = TEE_SUCCESS; 1140 1141 /* 1142 * Clean up from last attempt to load 1143 */ 1144 res = sys_unmap(elf->ehdr_addr, SMALL_PAGE_SIZE); 1145 if (res) 1146 err(res, "sys_unmap"); 1147 1148 while (!TAILQ_EMPTY(&elf->segs)) { 1149 struct segment *seg = TAILQ_FIRST(&elf->segs); 1150 vaddr_t va = 0; 1151 size_t num_bytes = 0; 1152 1153 va = rounddown(elf->load_addr + seg->vaddr); 1154 if (seg->remapped_writeable) 1155 num_bytes = roundup(seg->vaddr + seg->memsz) - 1156 rounddown(seg->vaddr); 1157 else 1158 num_bytes = seg->memsz; 1159 1160 res = sys_unmap(va, num_bytes); 1161 if (res) 1162 err(res, "sys_unmap"); 1163 1164 TAILQ_REMOVE(&elf->segs, seg, link); 1165 free(seg); 1166 } 1167 1168 free(elf->shdr); 1169 memset(&elf->is_32bit, 0, 1170 (vaddr_t)&elf->uuid - (vaddr_t)&elf->is_32bit); 1171 1172 TAILQ_INIT(&elf->segs); 1173 } 1174 1175 #ifdef ARM64 1176 /* 1177 * Allocates an offset in the TA's Thread Control Block for the TLS segment of 1178 * the @elf module. 1179 */ 1180 #define TCB_HEAD_SIZE (2 * sizeof(long)) 1181 static void set_tls_offset(struct ta_elf *elf) 1182 { 1183 static size_t next_offs = TCB_HEAD_SIZE; 1184 1185 if (!elf->tls_start) 1186 return; 1187 1188 /* Module has a TLS segment */ 1189 elf->tls_tcb_offs = next_offs; 1190 next_offs += elf->tls_memsz; 1191 } 1192 #else 1193 static void set_tls_offset(struct ta_elf *elf __unused) {} 1194 #endif 1195 1196 static void load_main(struct ta_elf *elf) 1197 { 1198 vaddr_t va = 0; 1199 1200 init_elf(elf); 1201 map_segments(elf); 1202 populate_segments(elf); 1203 add_dependencies(elf); 1204 copy_section_headers(elf); 1205 save_symtab(elf); 1206 close_handle(elf); 1207 set_tls_offset(elf); 1208 parse_property_segment(elf); 1209 if (elf->bti_enabled) 1210 ta_elf_add_bti(elf); 1211 1212 if (!ta_elf_resolve_sym("ta_head", &va, NULL, elf)) 1213 elf->head = (struct ta_head *)va; 1214 else 1215 elf->head = (struct ta_head *)elf->load_addr; 1216 if (elf->head->depr_entry != UINT64_MAX) { 1217 /* 1218 * Legacy TAs sets their entry point in ta_head. For 1219 * non-legacy TAs the entry point of the ELF is set instead 1220 * and leaving the ta_head entry point set to UINT64_MAX to 1221 * indicate that it's not used. 1222 * 1223 * NB, everything before the commit a73b5878c89d ("Replace 1224 * ta_head.entry with elf entry") is considered legacy TAs 1225 * for ldelf. 1226 * 1227 * Legacy TAs cannot be mapped with shared memory segments 1228 * so restart the mapping if it turned out we're loading a 1229 * legacy TA. 1230 */ 1231 1232 DMSG("Reloading TA %pUl as legacy TA", (void *)&elf->uuid); 1233 clean_elf_load_main(elf); 1234 elf->is_legacy = true; 1235 init_elf(elf); 1236 map_segments(elf); 1237 populate_segments_legacy(elf); 1238 add_dependencies(elf); 1239 copy_section_headers(elf); 1240 save_symtab(elf); 1241 close_handle(elf); 1242 elf->head = (struct ta_head *)elf->load_addr; 1243 /* 1244 * Check that the TA is still a legacy TA, if it isn't give 1245 * up now since we're likely under attack. 1246 */ 1247 if (elf->head->depr_entry == UINT64_MAX) 1248 err(TEE_ERROR_GENERIC, 1249 "TA %pUl was changed on disk to non-legacy", 1250 (void *)&elf->uuid); 1251 } 1252 1253 } 1254 1255 void ta_elf_load_main(const TEE_UUID *uuid, uint32_t *is_32bit, uint64_t *sp, 1256 uint32_t *ta_flags) 1257 { 1258 struct ta_elf *elf = queue_elf(uuid); 1259 vaddr_t va = 0; 1260 TEE_Result res = TEE_SUCCESS; 1261 1262 assert(elf); 1263 elf->is_main = true; 1264 1265 load_main(elf); 1266 1267 *is_32bit = elf->is_32bit; 1268 res = sys_map_zi(elf->head->stack_size, 0, &va, 0, 0); 1269 if (res) 1270 err(res, "sys_map_zi stack"); 1271 1272 if (elf->head->flags & ~TA_FLAGS_MASK) 1273 err(TEE_ERROR_BAD_FORMAT, "Invalid TA flags(s) %#"PRIx32, 1274 elf->head->flags & ~TA_FLAGS_MASK); 1275 1276 *ta_flags = elf->head->flags; 1277 *sp = va + elf->head->stack_size; 1278 ta_stack = va; 1279 ta_stack_size = elf->head->stack_size; 1280 } 1281 1282 void ta_elf_finalize_load_main(uint64_t *entry, uint64_t *load_addr) 1283 { 1284 struct ta_elf *elf = TAILQ_FIRST(&main_elf_queue); 1285 TEE_Result res = TEE_SUCCESS; 1286 1287 assert(elf->is_main); 1288 1289 res = ta_elf_set_init_fini_info_compat(elf->is_32bit); 1290 if (res) 1291 err(res, "ta_elf_set_init_fini_info_compat"); 1292 res = ta_elf_set_elf_phdr_info(elf->is_32bit); 1293 if (res) 1294 err(res, "ta_elf_set_elf_phdr_info"); 1295 1296 if (elf->is_legacy) 1297 *entry = elf->head->depr_entry; 1298 else 1299 *entry = elf->e_entry + elf->load_addr; 1300 1301 *load_addr = elf->load_addr; 1302 } 1303 1304 1305 void ta_elf_load_dependency(struct ta_elf *elf, bool is_32bit) 1306 { 1307 if (elf->is_main) 1308 return; 1309 1310 init_elf(elf); 1311 if (elf->is_32bit != is_32bit) 1312 err(TEE_ERROR_BAD_FORMAT, "ELF %pUl is %sbit (expected %sbit)", 1313 (void *)&elf->uuid, elf->is_32bit ? "32" : "64", 1314 is_32bit ? "32" : "64"); 1315 1316 map_segments(elf); 1317 populate_segments(elf); 1318 add_dependencies(elf); 1319 copy_section_headers(elf); 1320 save_symtab(elf); 1321 close_handle(elf); 1322 set_tls_offset(elf); 1323 parse_property_segment(elf); 1324 if (elf->bti_enabled) 1325 ta_elf_add_bti(elf); 1326 } 1327 1328 void ta_elf_finalize_mappings(struct ta_elf *elf) 1329 { 1330 TEE_Result res = TEE_SUCCESS; 1331 struct segment *seg = NULL; 1332 1333 if (!elf->is_legacy) 1334 return; 1335 1336 TAILQ_FOREACH(seg, &elf->segs, link) { 1337 vaddr_t va = elf->load_addr + seg->vaddr; 1338 uint32_t flags = 0; 1339 1340 if (seg->flags & PF_W) 1341 flags |= LDELF_MAP_FLAG_WRITEABLE; 1342 if (seg->flags & PF_X) 1343 flags |= LDELF_MAP_FLAG_EXECUTABLE; 1344 1345 res = sys_set_prot(va, seg->memsz, flags); 1346 if (res) 1347 err(res, "sys_set_prot"); 1348 } 1349 } 1350 1351 static void __printf(3, 4) print_wrapper(void *pctx, print_func_t print_func, 1352 const char *fmt, ...) 1353 { 1354 va_list ap; 1355 1356 va_start(ap, fmt); 1357 print_func(pctx, fmt, ap); 1358 va_end(ap); 1359 } 1360 1361 static void print_seg(void *pctx, print_func_t print_func, 1362 size_t idx __maybe_unused, int elf_idx __maybe_unused, 1363 vaddr_t va __maybe_unused, paddr_t pa __maybe_unused, 1364 size_t sz __maybe_unused, uint32_t flags) 1365 { 1366 int rc __maybe_unused = 0; 1367 int width __maybe_unused = 8; 1368 char desc[14] __maybe_unused = ""; 1369 char flags_str[] __maybe_unused = "----"; 1370 1371 if (elf_idx > -1) { 1372 rc = snprintf(desc, sizeof(desc), " [%d]", elf_idx); 1373 assert(rc >= 0); 1374 } else { 1375 if (flags & DUMP_MAP_EPHEM) { 1376 rc = snprintf(desc, sizeof(desc), " (param)"); 1377 assert(rc >= 0); 1378 } 1379 if (flags & DUMP_MAP_LDELF) { 1380 rc = snprintf(desc, sizeof(desc), " (ldelf)"); 1381 assert(rc >= 0); 1382 } 1383 if (va == ta_stack) { 1384 rc = snprintf(desc, sizeof(desc), " (stack)"); 1385 assert(rc >= 0); 1386 } 1387 } 1388 1389 if (flags & DUMP_MAP_READ) 1390 flags_str[0] = 'r'; 1391 if (flags & DUMP_MAP_WRITE) 1392 flags_str[1] = 'w'; 1393 if (flags & DUMP_MAP_EXEC) 1394 flags_str[2] = 'x'; 1395 if (flags & DUMP_MAP_SECURE) 1396 flags_str[3] = 's'; 1397 1398 print_wrapper(pctx, print_func, 1399 "region %2zu: va 0x%0*"PRIxVA" pa 0x%0*"PRIxPA" size 0x%06zx flags %s%s\n", 1400 idx, width, va, width, pa, sz, flags_str, desc); 1401 } 1402 1403 static bool get_next_in_order(struct ta_elf_queue *elf_queue, 1404 struct ta_elf **elf, struct segment **seg, 1405 size_t *elf_idx) 1406 { 1407 struct ta_elf *e = NULL; 1408 struct segment *s = NULL; 1409 size_t idx = 0; 1410 vaddr_t va = 0; 1411 struct ta_elf *e2 = NULL; 1412 size_t i2 = 0; 1413 1414 assert(elf && seg && elf_idx); 1415 e = *elf; 1416 s = *seg; 1417 assert((e == NULL && s == NULL) || (e != NULL && s != NULL)); 1418 1419 if (s) { 1420 s = TAILQ_NEXT(s, link); 1421 if (s) { 1422 *seg = s; 1423 return true; 1424 } 1425 } 1426 1427 if (e) 1428 va = e->load_addr; 1429 1430 /* Find the ELF with next load address */ 1431 e = NULL; 1432 TAILQ_FOREACH(e2, elf_queue, link) { 1433 if (e2->load_addr > va) { 1434 if (!e || e2->load_addr < e->load_addr) { 1435 e = e2; 1436 idx = i2; 1437 } 1438 } 1439 i2++; 1440 } 1441 if (!e) 1442 return false; 1443 1444 *elf = e; 1445 *seg = TAILQ_FIRST(&e->segs); 1446 *elf_idx = idx; 1447 return true; 1448 } 1449 1450 void ta_elf_print_mappings(void *pctx, print_func_t print_func, 1451 struct ta_elf_queue *elf_queue, size_t num_maps, 1452 struct dump_map *maps, vaddr_t mpool_base) 1453 { 1454 struct segment *seg = NULL; 1455 struct ta_elf *elf = NULL; 1456 size_t elf_idx = 0; 1457 size_t idx = 0; 1458 size_t map_idx = 0; 1459 1460 /* 1461 * Loop over all segments and maps, printing virtual address in 1462 * order. Segment has priority if the virtual address is present 1463 * in both map and segment. 1464 */ 1465 get_next_in_order(elf_queue, &elf, &seg, &elf_idx); 1466 while (true) { 1467 vaddr_t va = -1; 1468 size_t sz = 0; 1469 uint32_t flags = DUMP_MAP_SECURE; 1470 size_t offs = 0; 1471 1472 if (seg) { 1473 va = rounddown(seg->vaddr + elf->load_addr); 1474 sz = roundup(seg->vaddr + seg->memsz) - 1475 rounddown(seg->vaddr); 1476 } 1477 1478 while (map_idx < num_maps && maps[map_idx].va <= va) { 1479 uint32_t f = 0; 1480 1481 /* If there's a match, it should be the same map */ 1482 if (maps[map_idx].va == va) { 1483 /* 1484 * In shared libraries the first page is 1485 * mapped separately with the rest of that 1486 * segment following back to back in a 1487 * separate entry. 1488 */ 1489 if (map_idx + 1 < num_maps && 1490 maps[map_idx].sz == SMALL_PAGE_SIZE) { 1491 vaddr_t next_va = maps[map_idx].va + 1492 maps[map_idx].sz; 1493 size_t comb_sz = maps[map_idx].sz + 1494 maps[map_idx + 1].sz; 1495 1496 if (next_va == maps[map_idx + 1].va && 1497 comb_sz == sz && 1498 maps[map_idx].flags == 1499 maps[map_idx + 1].flags) { 1500 /* Skip this and next entry */ 1501 map_idx += 2; 1502 continue; 1503 } 1504 } 1505 assert(maps[map_idx].sz == sz); 1506 } else if (maps[map_idx].va < va) { 1507 if (maps[map_idx].va == mpool_base) 1508 f |= DUMP_MAP_LDELF; 1509 print_seg(pctx, print_func, idx, -1, 1510 maps[map_idx].va, maps[map_idx].pa, 1511 maps[map_idx].sz, 1512 maps[map_idx].flags | f); 1513 idx++; 1514 } 1515 map_idx++; 1516 } 1517 1518 if (!seg) 1519 break; 1520 1521 offs = rounddown(seg->offset); 1522 if (seg->flags & PF_R) 1523 flags |= DUMP_MAP_READ; 1524 if (seg->flags & PF_W) 1525 flags |= DUMP_MAP_WRITE; 1526 if (seg->flags & PF_X) 1527 flags |= DUMP_MAP_EXEC; 1528 1529 print_seg(pctx, print_func, idx, elf_idx, va, offs, sz, flags); 1530 idx++; 1531 1532 if (!get_next_in_order(elf_queue, &elf, &seg, &elf_idx)) 1533 seg = NULL; 1534 } 1535 1536 elf_idx = 0; 1537 TAILQ_FOREACH(elf, elf_queue, link) { 1538 print_wrapper(pctx, print_func, 1539 " [%zu] %pUl @ 0x%0*"PRIxVA"\n", 1540 elf_idx, (void *)&elf->uuid, 8, elf->load_addr); 1541 elf_idx++; 1542 } 1543 } 1544 1545 #ifdef CFG_UNWIND 1546 1547 #if defined(ARM32) || defined(ARM64) 1548 /* Called by libunw */ 1549 bool find_exidx(vaddr_t addr, vaddr_t *idx_start, vaddr_t *idx_end) 1550 { 1551 struct segment *seg = NULL; 1552 struct ta_elf *elf = NULL; 1553 vaddr_t a = 0; 1554 1555 TAILQ_FOREACH(elf, &main_elf_queue, link) { 1556 if (addr < elf->load_addr) 1557 continue; 1558 a = addr - elf->load_addr; 1559 TAILQ_FOREACH(seg, &elf->segs, link) { 1560 if (a < seg->vaddr) 1561 continue; 1562 if (a - seg->vaddr < seg->filesz) { 1563 *idx_start = elf->exidx_start + elf->load_addr; 1564 *idx_end = elf->exidx_start + elf->load_addr + 1565 elf->exidx_size; 1566 return true; 1567 } 1568 } 1569 } 1570 1571 return false; 1572 } 1573 1574 void ta_elf_stack_trace_a32(uint32_t regs[16]) 1575 { 1576 struct unwind_state_arm32 state = { }; 1577 1578 memcpy(state.registers, regs, sizeof(state.registers)); 1579 print_stack_arm32(&state, ta_stack, ta_stack_size); 1580 } 1581 1582 void ta_elf_stack_trace_a64(uint64_t fp, uint64_t sp, uint64_t pc) 1583 { 1584 struct unwind_state_arm64 state = { .fp = fp, .sp = sp, .pc = pc }; 1585 1586 print_stack_arm64(&state, ta_stack, ta_stack_size); 1587 } 1588 #elif defined(RV32) || defined(RV64) 1589 void ta_elf_stack_trace_riscv(uint64_t fp, uint64_t pc) 1590 { 1591 struct unwind_state_riscv state = { .fp = fp, .pc = pc }; 1592 1593 print_stack_riscv(&state, ta_stack, ta_stack_size); 1594 } 1595 #endif 1596 1597 #endif /* CFG_UNWIND */ 1598 1599 TEE_Result ta_elf_add_library(const TEE_UUID *uuid) 1600 { 1601 TEE_Result res = TEE_ERROR_GENERIC; 1602 struct ta_elf *ta = TAILQ_FIRST(&main_elf_queue); 1603 struct ta_elf *lib = ta_elf_find_elf(uuid); 1604 struct ta_elf *elf = NULL; 1605 1606 if (lib) 1607 return TEE_SUCCESS; /* Already mapped */ 1608 1609 lib = queue_elf_helper(uuid); 1610 if (!lib) 1611 return TEE_ERROR_OUT_OF_MEMORY; 1612 1613 for (elf = lib; elf; elf = TAILQ_NEXT(elf, link)) 1614 ta_elf_load_dependency(elf, ta->is_32bit); 1615 1616 for (elf = lib; elf; elf = TAILQ_NEXT(elf, link)) { 1617 ta_elf_relocate(elf); 1618 ta_elf_finalize_mappings(elf); 1619 } 1620 1621 for (elf = lib; elf; elf = TAILQ_NEXT(elf, link)) 1622 DMSG("ELF (%pUl) at %#"PRIxVA, 1623 (void *)&elf->uuid, elf->load_addr); 1624 1625 res = ta_elf_set_init_fini_info_compat(ta->is_32bit); 1626 if (res) 1627 return res; 1628 1629 return ta_elf_set_elf_phdr_info(ta->is_32bit); 1630 } 1631 1632 /* Get address/size of .init_array and .fini_array from the dynamic segment */ 1633 static void get_init_fini_array(struct ta_elf *elf, unsigned int type, 1634 vaddr_t addr, size_t memsz, vaddr_t *init, 1635 size_t *init_cnt, vaddr_t *fini, 1636 size_t *fini_cnt) 1637 { 1638 size_t addrsz = 0; 1639 size_t dyn_entsize = 0; 1640 size_t num_dyns = 0; 1641 size_t n = 0; 1642 unsigned int tag = 0; 1643 size_t val = 0; 1644 1645 assert(type == PT_DYNAMIC); 1646 1647 check_phdr_in_range(elf, type, addr, memsz); 1648 1649 if (elf->is_32bit) { 1650 dyn_entsize = sizeof(Elf32_Dyn); 1651 addrsz = 4; 1652 } else { 1653 dyn_entsize = sizeof(Elf64_Dyn); 1654 addrsz = 8; 1655 } 1656 1657 assert(!(memsz % dyn_entsize)); 1658 num_dyns = memsz / dyn_entsize; 1659 1660 for (n = 0; n < num_dyns; n++) { 1661 read_dyn(elf, addr, n, &tag, &val); 1662 if (tag == DT_INIT_ARRAY) 1663 *init = val + elf->load_addr; 1664 else if (tag == DT_FINI_ARRAY) 1665 *fini = val + elf->load_addr; 1666 else if (tag == DT_INIT_ARRAYSZ) 1667 *init_cnt = val / addrsz; 1668 else if (tag == DT_FINI_ARRAYSZ) 1669 *fini_cnt = val / addrsz; 1670 } 1671 } 1672 1673 /* Get address/size of .init_array and .fini_array in @elf (if present) */ 1674 static void elf_get_init_fini_array(struct ta_elf *elf, vaddr_t *init, 1675 size_t *init_cnt, vaddr_t *fini, 1676 size_t *fini_cnt) 1677 { 1678 size_t n = 0; 1679 1680 if (elf->is_32bit) { 1681 Elf32_Phdr *phdr = elf->phdr; 1682 1683 for (n = 0; n < elf->e_phnum; n++) { 1684 if (phdr[n].p_type == PT_DYNAMIC) { 1685 get_init_fini_array(elf, phdr[n].p_type, 1686 phdr[n].p_vaddr, 1687 phdr[n].p_memsz, 1688 init, init_cnt, fini, 1689 fini_cnt); 1690 return; 1691 } 1692 } 1693 } else { 1694 Elf64_Phdr *phdr = elf->phdr; 1695 1696 for (n = 0; n < elf->e_phnum; n++) { 1697 if (phdr[n].p_type == PT_DYNAMIC) { 1698 get_init_fini_array(elf, phdr[n].p_type, 1699 phdr[n].p_vaddr, 1700 phdr[n].p_memsz, 1701 init, init_cnt, fini, 1702 fini_cnt); 1703 return; 1704 } 1705 } 1706 } 1707 } 1708 1709 /* 1710 * Deprecated by __elf_phdr_info below. Kept for compatibility. 1711 * 1712 * Pointers to ELF initialization and finalization functions are extracted by 1713 * ldelf and stored on the TA heap, then exported to the TA via the global 1714 * symbol __init_fini_info. libutee in OP-TEE 3.9.0 uses this mechanism. 1715 */ 1716 1717 struct __init_fini { 1718 uint32_t flags; 1719 uint16_t init_size; 1720 uint16_t fini_size; 1721 1722 void (**init)(void); /* @init_size entries */ 1723 void (**fini)(void); /* @fini_size entries */ 1724 }; 1725 1726 #define __IFS_VALID BIT(0) 1727 #define __IFS_INIT_HAS_RUN BIT(1) 1728 #define __IFS_FINI_HAS_RUN BIT(2) 1729 1730 struct __init_fini_info { 1731 uint32_t reserved; 1732 uint16_t size; 1733 uint16_t pad; 1734 struct __init_fini *ifs; /* @size entries */ 1735 }; 1736 1737 /* 32-bit variants for a 64-bit ldelf to access a 32-bit TA */ 1738 1739 struct __init_fini32 { 1740 uint32_t flags; 1741 uint16_t init_size; 1742 uint16_t fini_size; 1743 uint32_t init; 1744 uint32_t fini; 1745 }; 1746 1747 struct __init_fini_info32 { 1748 uint32_t reserved; 1749 uint16_t size; 1750 uint16_t pad; 1751 uint32_t ifs; 1752 }; 1753 1754 static TEE_Result realloc_ifs(vaddr_t va, size_t cnt, bool is_32bit) 1755 { 1756 struct __init_fini_info32 *info32 = (struct __init_fini_info32 *)va; 1757 struct __init_fini_info *info = (struct __init_fini_info *)va; 1758 struct __init_fini32 *ifs32 = NULL; 1759 struct __init_fini *ifs = NULL; 1760 size_t prev_cnt = 0; 1761 void *ptr = NULL; 1762 1763 if (is_32bit) { 1764 ptr = (void *)(vaddr_t)info32->ifs; 1765 ptr = realloc(ptr, cnt * sizeof(struct __init_fini32)); 1766 if (!ptr) 1767 return TEE_ERROR_OUT_OF_MEMORY; 1768 ifs32 = ptr; 1769 prev_cnt = info32->size; 1770 if (cnt > prev_cnt) 1771 memset(ifs32 + prev_cnt, 0, 1772 (cnt - prev_cnt) * sizeof(*ifs32)); 1773 info32->ifs = (uint32_t)(vaddr_t)ifs32; 1774 info32->size = cnt; 1775 } else { 1776 ptr = realloc(info->ifs, cnt * sizeof(struct __init_fini)); 1777 if (!ptr) 1778 return TEE_ERROR_OUT_OF_MEMORY; 1779 ifs = ptr; 1780 prev_cnt = info->size; 1781 if (cnt > prev_cnt) 1782 memset(ifs + prev_cnt, 0, 1783 (cnt - prev_cnt) * sizeof(*ifs)); 1784 info->ifs = ifs; 1785 info->size = cnt; 1786 } 1787 1788 return TEE_SUCCESS; 1789 } 1790 1791 static void fill_ifs(vaddr_t va, size_t idx, struct ta_elf *elf, bool is_32bit) 1792 { 1793 struct __init_fini_info32 *info32 = (struct __init_fini_info32 *)va; 1794 struct __init_fini_info *info = (struct __init_fini_info *)va; 1795 struct __init_fini32 *ifs32 = NULL; 1796 struct __init_fini *ifs = NULL; 1797 size_t init_cnt = 0; 1798 size_t fini_cnt = 0; 1799 vaddr_t init = 0; 1800 vaddr_t fini = 0; 1801 1802 if (is_32bit) { 1803 assert(idx < info32->size); 1804 ifs32 = &((struct __init_fini32 *)(vaddr_t)info32->ifs)[idx]; 1805 1806 if (ifs32->flags & __IFS_VALID) 1807 return; 1808 1809 elf_get_init_fini_array(elf, &init, &init_cnt, &fini, 1810 &fini_cnt); 1811 1812 ifs32->init = (uint32_t)init; 1813 ifs32->init_size = init_cnt; 1814 1815 ifs32->fini = (uint32_t)fini; 1816 ifs32->fini_size = fini_cnt; 1817 1818 ifs32->flags |= __IFS_VALID; 1819 } else { 1820 assert(idx < info->size); 1821 ifs = &info->ifs[idx]; 1822 1823 if (ifs->flags & __IFS_VALID) 1824 return; 1825 1826 elf_get_init_fini_array(elf, &init, &init_cnt, &fini, 1827 &fini_cnt); 1828 1829 ifs->init = (void (**)(void))init; 1830 ifs->init_size = init_cnt; 1831 1832 ifs->fini = (void (**)(void))fini; 1833 ifs->fini_size = fini_cnt; 1834 1835 ifs->flags |= __IFS_VALID; 1836 } 1837 } 1838 1839 /* 1840 * Set or update __init_fini_info in the TA with information from the ELF 1841 * queue 1842 */ 1843 TEE_Result ta_elf_set_init_fini_info_compat(bool is_32bit) 1844 { 1845 struct __init_fini_info *info = NULL; 1846 TEE_Result res = TEE_SUCCESS; 1847 struct ta_elf *elf = NULL; 1848 vaddr_t info_va = 0; 1849 size_t cnt = 0; 1850 1851 res = ta_elf_resolve_sym("__init_fini_info", &info_va, NULL, NULL); 1852 if (res) { 1853 if (res == TEE_ERROR_ITEM_NOT_FOUND) { 1854 /* 1855 * Not an error, only TAs linked against libutee from 1856 * OP-TEE 3.9.0 have this symbol. 1857 */ 1858 return TEE_SUCCESS; 1859 } 1860 return res; 1861 } 1862 assert(info_va); 1863 1864 info = (struct __init_fini_info *)info_va; 1865 if (info->reserved) 1866 return TEE_ERROR_NOT_SUPPORTED; 1867 1868 TAILQ_FOREACH(elf, &main_elf_queue, link) 1869 cnt++; 1870 1871 /* Queue has at least one file (main) */ 1872 assert(cnt); 1873 1874 res = realloc_ifs(info_va, cnt, is_32bit); 1875 if (res) 1876 goto err; 1877 1878 cnt = 0; 1879 TAILQ_FOREACH(elf, &main_elf_queue, link) { 1880 fill_ifs(info_va, cnt, elf, is_32bit); 1881 cnt++; 1882 } 1883 1884 return TEE_SUCCESS; 1885 err: 1886 free(info); 1887 return res; 1888 } 1889 1890 static TEE_Result realloc_elf_phdr_info(vaddr_t va, size_t cnt, bool is_32bit) 1891 { 1892 struct __elf_phdr_info32 *info32 = (struct __elf_phdr_info32 *)va; 1893 struct __elf_phdr_info *info = (struct __elf_phdr_info *)va; 1894 struct dl_phdr_info32 *dlpi32 = NULL; 1895 struct dl_phdr_info *dlpi = NULL; 1896 size_t prev_cnt = 0; 1897 void *ptr = NULL; 1898 1899 if (is_32bit) { 1900 ptr = (void *)(vaddr_t)info32->dlpi; 1901 ptr = realloc(ptr, cnt * sizeof(*dlpi32)); 1902 if (!ptr) 1903 return TEE_ERROR_OUT_OF_MEMORY; 1904 dlpi32 = ptr; 1905 prev_cnt = info32->count; 1906 if (cnt > prev_cnt) 1907 memset(dlpi32 + prev_cnt, 0, 1908 (cnt - prev_cnt) * sizeof(*dlpi32)); 1909 info32->dlpi = (uint32_t)(vaddr_t)dlpi32; 1910 info32->count = cnt; 1911 } else { 1912 ptr = realloc(info->dlpi, cnt * sizeof(*dlpi)); 1913 if (!ptr) 1914 return TEE_ERROR_OUT_OF_MEMORY; 1915 dlpi = ptr; 1916 prev_cnt = info->count; 1917 if (cnt > prev_cnt) 1918 memset(dlpi + prev_cnt, 0, 1919 (cnt - prev_cnt) * sizeof(*dlpi)); 1920 info->dlpi = dlpi; 1921 info->count = cnt; 1922 } 1923 1924 return TEE_SUCCESS; 1925 } 1926 1927 static void fill_elf_phdr_info(vaddr_t va, size_t idx, struct ta_elf *elf, 1928 bool is_32bit) 1929 { 1930 struct __elf_phdr_info32 *info32 = (struct __elf_phdr_info32 *)va; 1931 struct __elf_phdr_info *info = (struct __elf_phdr_info *)va; 1932 struct dl_phdr_info32 *dlpi32 = NULL; 1933 struct dl_phdr_info *dlpi = NULL; 1934 1935 if (is_32bit) { 1936 assert(idx < info32->count); 1937 dlpi32 = (struct dl_phdr_info32 *)(vaddr_t)info32->dlpi + idx; 1938 1939 dlpi32->dlpi_addr = elf->load_addr; 1940 if (elf->soname) 1941 dlpi32->dlpi_name = (vaddr_t)elf->soname; 1942 else 1943 dlpi32->dlpi_name = (vaddr_t)&info32->zero; 1944 dlpi32->dlpi_phdr = (vaddr_t)elf->phdr; 1945 dlpi32->dlpi_phnum = elf->e_phnum; 1946 dlpi32->dlpi_adds = 1; /* No unloading on dlclose() currently */ 1947 dlpi32->dlpi_subs = 0; /* No unloading on dlclose() currently */ 1948 dlpi32->dlpi_tls_modid = elf->tls_mod_id; 1949 dlpi32->dlpi_tls_data = elf->tls_start; 1950 } else { 1951 assert(idx < info->count); 1952 dlpi = info->dlpi + idx; 1953 1954 dlpi->dlpi_addr = elf->load_addr; 1955 if (elf->soname) 1956 dlpi->dlpi_name = elf->soname; 1957 else 1958 dlpi->dlpi_name = &info32->zero; 1959 dlpi->dlpi_phdr = elf->phdr; 1960 dlpi->dlpi_phnum = elf->e_phnum; 1961 dlpi->dlpi_adds = 1; /* No unloading on dlclose() currently */ 1962 dlpi->dlpi_subs = 0; /* No unloading on dlclose() currently */ 1963 dlpi->dlpi_tls_modid = elf->tls_mod_id; 1964 dlpi->dlpi_tls_data = (void *)elf->tls_start; 1965 } 1966 } 1967 1968 /* Set or update __elf_hdr_info in the TA with information from the ELF queue */ 1969 TEE_Result ta_elf_set_elf_phdr_info(bool is_32bit) 1970 { 1971 struct __elf_phdr_info *info = NULL; 1972 TEE_Result res = TEE_SUCCESS; 1973 struct ta_elf *elf = NULL; 1974 vaddr_t info_va = 0; 1975 size_t cnt = 0; 1976 1977 res = ta_elf_resolve_sym("__elf_phdr_info", &info_va, NULL, NULL); 1978 if (res) { 1979 if (res == TEE_ERROR_ITEM_NOT_FOUND) { 1980 /* Older TA */ 1981 return TEE_SUCCESS; 1982 } 1983 return res; 1984 } 1985 assert(info_va); 1986 1987 info = (struct __elf_phdr_info *)info_va; 1988 if (info->reserved) 1989 return TEE_ERROR_NOT_SUPPORTED; 1990 1991 TAILQ_FOREACH(elf, &main_elf_queue, link) 1992 cnt++; 1993 1994 res = realloc_elf_phdr_info(info_va, cnt, is_32bit); 1995 if (res) 1996 return res; 1997 1998 cnt = 0; 1999 TAILQ_FOREACH(elf, &main_elf_queue, link) { 2000 fill_elf_phdr_info(info_va, cnt, elf, is_32bit); 2001 cnt++; 2002 } 2003 2004 return TEE_SUCCESS; 2005 } 2006