xref: /optee_os/ldelf/ta_elf.c (revision bdf82531f5ce53f04ab265f81aeb76815fe095d2)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2019, Linaro Limited
4  * Copyright (c) 2020, Arm Limited
5  */
6 
7 #include <assert.h>
8 #include <config.h>
9 #include <confine_array_index.h>
10 #include <ctype.h>
11 #include <elf32.h>
12 #include <elf64.h>
13 #include <elf_common.h>
14 #include <ldelf.h>
15 #include <link.h>
16 #include <stdio.h>
17 #include <stdlib.h>
18 #include <string_ext.h>
19 #include <string.h>
20 #include <tee_api_types.h>
21 #include <tee_internal_api_extensions.h>
22 #include <unw/unwind.h>
23 #include <user_ta_header.h>
24 #include <util.h>
25 
26 #include "sys.h"
27 #include "ta_elf.h"
28 
29 /*
30  * Layout of a 32-bit struct dl_phdr_info for a 64-bit ldelf to access a 32-bit
31  * TA
32  */
33 struct dl_phdr_info32 {
34 	uint32_t dlpi_addr;
35 	uint32_t dlpi_name;
36 	uint32_t dlpi_phdr;
37 	uint16_t dlpi_phnum;
38 	uint64_t dlpi_adds;
39 	uint64_t dlpi_subs;
40 	uint32_t dlpi_tls_modid;
41 	uint32_t dlpi_tls_data;
42 };
43 
44 static vaddr_t ta_stack;
45 static vaddr_t ta_stack_size;
46 
47 struct ta_elf_queue main_elf_queue = TAILQ_HEAD_INITIALIZER(main_elf_queue);
48 
49 /*
50  * Main application is always ID 1, shared libraries with TLS take IDs 2 and
51  * above
52  */
53 static void assign_tls_mod_id(struct ta_elf *elf)
54 {
55 	static size_t last_tls_mod_id = 1;
56 
57 	if (elf->is_main)
58 		assert(last_tls_mod_id == 1); /* Main always comes first */
59 	elf->tls_mod_id = last_tls_mod_id++;
60 }
61 
62 static struct ta_elf *queue_elf_helper(const TEE_UUID *uuid)
63 {
64 	struct ta_elf *elf = calloc(1, sizeof(*elf));
65 
66 	if (!elf)
67 		return NULL;
68 
69 	TAILQ_INIT(&elf->segs);
70 
71 	elf->uuid = *uuid;
72 	TAILQ_INSERT_TAIL(&main_elf_queue, elf, link);
73 	return elf;
74 }
75 
76 static struct ta_elf *queue_elf(const TEE_UUID *uuid)
77 {
78 	struct ta_elf *elf = ta_elf_find_elf(uuid);
79 
80 	if (elf)
81 		return NULL;
82 
83 	elf = queue_elf_helper(uuid);
84 	if (!elf)
85 		err(TEE_ERROR_OUT_OF_MEMORY, "queue_elf_helper");
86 
87 	return elf;
88 }
89 
90 struct ta_elf *ta_elf_find_elf(const TEE_UUID *uuid)
91 {
92 	struct ta_elf *elf = NULL;
93 
94 	TAILQ_FOREACH(elf, &main_elf_queue, link)
95 		if (!memcmp(uuid, &elf->uuid, sizeof(*uuid)))
96 			return elf;
97 
98 	return NULL;
99 }
100 
101 static TEE_Result e32_parse_ehdr(struct ta_elf *elf, Elf32_Ehdr *ehdr)
102 {
103 	if (ehdr->e_ident[EI_VERSION] != EV_CURRENT ||
104 	    ehdr->e_ident[EI_CLASS] != ELFCLASS32 ||
105 	    ehdr->e_ident[EI_DATA] != ELFDATA2LSB ||
106 	    ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE ||
107 	    ehdr->e_type != ET_DYN || ehdr->e_machine != EM_ARM ||
108 	    (ehdr->e_flags & EF_ARM_ABIMASK) != EF_ARM_ABI_VERSION ||
109 #ifndef CFG_WITH_VFP
110 	    (ehdr->e_flags & EF_ARM_ABI_FLOAT_HARD) ||
111 #endif
112 	    ehdr->e_phentsize != sizeof(Elf32_Phdr) ||
113 	    ehdr->e_shentsize != sizeof(Elf32_Shdr))
114 		return TEE_ERROR_BAD_FORMAT;
115 
116 	elf->is_32bit = true;
117 	elf->e_entry = ehdr->e_entry;
118 	elf->e_phoff = ehdr->e_phoff;
119 	elf->e_shoff = ehdr->e_shoff;
120 	elf->e_phnum = ehdr->e_phnum;
121 	elf->e_shnum = ehdr->e_shnum;
122 	elf->e_phentsize = ehdr->e_phentsize;
123 	elf->e_shentsize = ehdr->e_shentsize;
124 
125 	return TEE_SUCCESS;
126 }
127 
128 #ifdef ARM64
129 static TEE_Result e64_parse_ehdr(struct ta_elf *elf, Elf64_Ehdr *ehdr)
130 {
131 	if (ehdr->e_ident[EI_VERSION] != EV_CURRENT ||
132 	    ehdr->e_ident[EI_CLASS] != ELFCLASS64 ||
133 	    ehdr->e_ident[EI_DATA] != ELFDATA2LSB ||
134 	    ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE ||
135 	    ehdr->e_type != ET_DYN || ehdr->e_machine != EM_AARCH64 ||
136 	    ehdr->e_flags || ehdr->e_phentsize != sizeof(Elf64_Phdr) ||
137 	    ehdr->e_shentsize != sizeof(Elf64_Shdr))
138 		return TEE_ERROR_BAD_FORMAT;
139 
140 
141 	elf->is_32bit = false;
142 	elf->e_entry = ehdr->e_entry;
143 	elf->e_phoff = ehdr->e_phoff;
144 	elf->e_shoff = ehdr->e_shoff;
145 	elf->e_phnum = ehdr->e_phnum;
146 	elf->e_shnum = ehdr->e_shnum;
147 	elf->e_phentsize = ehdr->e_phentsize;
148 	elf->e_shentsize = ehdr->e_shentsize;
149 
150 	return TEE_SUCCESS;
151 }
152 #else /*ARM64*/
153 static TEE_Result e64_parse_ehdr(struct ta_elf *elf __unused,
154 				 Elf64_Ehdr *ehdr __unused)
155 {
156 	return TEE_ERROR_NOT_SUPPORTED;
157 }
158 #endif /*ARM64*/
159 
160 static void check_phdr_in_range(struct ta_elf *elf, unsigned int type,
161 				vaddr_t addr, size_t memsz)
162 {
163 	vaddr_t max_addr = 0;
164 
165 	if (ADD_OVERFLOW(addr, memsz, &max_addr))
166 		err(TEE_ERROR_BAD_FORMAT, "Program header %#x overflow", type);
167 
168 	/*
169 	 * elf->load_addr and elf->max_addr are both using the
170 	 * final virtual addresses, while this program header is
171 	 * relative to 0.
172 	 */
173 	if (max_addr > elf->max_addr - elf->load_addr)
174 		err(TEE_ERROR_BAD_FORMAT, "Program header %#x out of bounds",
175 		    type);
176 }
177 
178 static void read_dyn(struct ta_elf *elf, vaddr_t addr,
179 		     size_t idx, unsigned int *tag, size_t *val)
180 {
181 	if (elf->is_32bit) {
182 		Elf32_Dyn *dyn = (Elf32_Dyn *)(addr + elf->load_addr);
183 
184 		*tag = dyn[idx].d_tag;
185 		*val = dyn[idx].d_un.d_val;
186 	} else {
187 		Elf64_Dyn *dyn = (Elf64_Dyn *)(addr + elf->load_addr);
188 
189 		*tag = dyn[idx].d_tag;
190 		*val = dyn[idx].d_un.d_val;
191 	}
192 }
193 
194 static void check_range(struct ta_elf *elf, const char *name, const void *ptr,
195 			size_t sz)
196 {
197 	size_t max_addr = 0;
198 
199 	if ((vaddr_t)ptr < elf->load_addr)
200 		err(TEE_ERROR_BAD_FORMAT, "%s %p out of range", name, ptr);
201 
202 	if (ADD_OVERFLOW((vaddr_t)ptr, sz, &max_addr))
203 		err(TEE_ERROR_BAD_FORMAT, "%s range overflow", name);
204 
205 	if (max_addr > elf->max_addr)
206 		err(TEE_ERROR_BAD_FORMAT,
207 		    "%s %p..%#zx out of range", name, ptr, max_addr);
208 }
209 
210 static void check_hashtab(struct ta_elf *elf, void *ptr, size_t num_buckets,
211 			  size_t num_chains)
212 {
213 	/*
214 	 * Starting from 2 as the first two words are mandatory and hold
215 	 * num_buckets and num_chains. So this function is called twice,
216 	 * first to see that there's indeed room for num_buckets and
217 	 * num_chains and then to see that all of it fits.
218 	 * See http://www.sco.com/developers/gabi/latest/ch5.dynamic.html#hash
219 	 */
220 	size_t num_words = 2;
221 	size_t sz = 0;
222 
223 	if (!IS_ALIGNED_WITH_TYPE(ptr, uint32_t))
224 		err(TEE_ERROR_BAD_FORMAT, "Bad alignment of DT_HASH %p", ptr);
225 
226 	if (ADD_OVERFLOW(num_words, num_buckets, &num_words) ||
227 	    ADD_OVERFLOW(num_words, num_chains, &num_words) ||
228 	    MUL_OVERFLOW(num_words, sizeof(uint32_t), &sz))
229 		err(TEE_ERROR_BAD_FORMAT, "DT_HASH overflow");
230 
231 	check_range(elf, "DT_HASH", ptr, sz);
232 }
233 
234 static void check_gnu_hashtab(struct ta_elf *elf, void *ptr)
235 {
236 	struct gnu_hashtab *h = ptr;
237 	size_t num_words = 4; /* nbuckets, symoffset, bloom_size, bloom_shift */
238 	size_t bloom_words = 0;
239 	size_t sz = 0;
240 
241 	if (!IS_ALIGNED_WITH_TYPE(ptr, uint32_t))
242 		err(TEE_ERROR_BAD_FORMAT, "Bad alignment of DT_GNU_HASH %p",
243 		    ptr);
244 
245 	if (elf->gnu_hashtab_size < sizeof(*h))
246 		err(TEE_ERROR_BAD_FORMAT, "DT_GNU_HASH too small");
247 
248 	/* Check validity of h->nbuckets and h->bloom_size */
249 
250 	if (elf->is_32bit)
251 		bloom_words = h->bloom_size;
252 	else
253 		bloom_words = h->bloom_size * 2;
254 	if (ADD_OVERFLOW(num_words, h->nbuckets, &num_words) ||
255 	    ADD_OVERFLOW(num_words, bloom_words, &num_words) ||
256 	    MUL_OVERFLOW(num_words, sizeof(uint32_t), &sz) ||
257 	    sz > elf->gnu_hashtab_size)
258 		err(TEE_ERROR_BAD_FORMAT, "DT_GNU_HASH overflow");
259 }
260 
261 static void save_hashtab(struct ta_elf *elf)
262 {
263 	uint32_t *hashtab = NULL;
264 	size_t n = 0;
265 
266 	if (elf->is_32bit) {
267 		Elf32_Shdr *shdr = elf->shdr;
268 
269 		for (n = 0; n < elf->e_shnum; n++) {
270 			void *addr = (void *)(vaddr_t)(shdr[n].sh_addr +
271 						       elf->load_addr);
272 
273 			if (shdr[n].sh_type == SHT_HASH) {
274 				elf->hashtab = addr;
275 			} else if (shdr[n].sh_type == SHT_GNU_HASH) {
276 				elf->gnu_hashtab = addr;
277 				elf->gnu_hashtab_size = shdr[n].sh_size;
278 			}
279 		}
280 	} else {
281 		Elf64_Shdr *shdr = elf->shdr;
282 
283 		for (n = 0; n < elf->e_shnum; n++) {
284 			void *addr = (void *)(vaddr_t)(shdr[n].sh_addr +
285 						       elf->load_addr);
286 
287 			if (shdr[n].sh_type == SHT_HASH) {
288 				elf->hashtab = addr;
289 			} else if (shdr[n].sh_type == SHT_GNU_HASH) {
290 				elf->gnu_hashtab = addr;
291 				elf->gnu_hashtab_size = shdr[n].sh_size;
292 			}
293 		}
294 	}
295 
296 	if (elf->hashtab) {
297 		check_hashtab(elf, elf->hashtab, 0, 0);
298 		hashtab = elf->hashtab;
299 		check_hashtab(elf, elf->hashtab, hashtab[0], hashtab[1]);
300 	}
301 	if (elf->gnu_hashtab)
302 		check_gnu_hashtab(elf, elf->gnu_hashtab);
303 }
304 
305 static void save_soname_from_segment(struct ta_elf *elf, unsigned int type,
306 				     vaddr_t addr, size_t memsz)
307 {
308 	size_t dyn_entsize = 0;
309 	size_t num_dyns = 0;
310 	size_t n = 0;
311 	unsigned int tag = 0;
312 	size_t val = 0;
313 	char *str_tab = NULL;
314 
315 	if (type != PT_DYNAMIC)
316 		return;
317 
318 	if (elf->is_32bit)
319 		dyn_entsize = sizeof(Elf32_Dyn);
320 	else
321 		dyn_entsize = sizeof(Elf64_Dyn);
322 
323 	assert(!(memsz % dyn_entsize));
324 	num_dyns = memsz / dyn_entsize;
325 
326 	for (n = 0; n < num_dyns; n++) {
327 		read_dyn(elf, addr, n, &tag, &val);
328 		if (tag == DT_STRTAB) {
329 			str_tab = (char *)(val + elf->load_addr);
330 			break;
331 		}
332 	}
333 	for (n = 0; n < num_dyns; n++) {
334 		read_dyn(elf, addr, n, &tag, &val);
335 		if (tag == DT_SONAME) {
336 			elf->soname = str_tab + val;
337 			break;
338 		}
339 	}
340 }
341 
342 static void save_soname(struct ta_elf *elf)
343 {
344 	size_t n = 0;
345 
346 	if (elf->is_32bit) {
347 		Elf32_Phdr *phdr = elf->phdr;
348 
349 		for (n = 0; n < elf->e_phnum; n++)
350 			save_soname_from_segment(elf, phdr[n].p_type,
351 						 phdr[n].p_vaddr,
352 						 phdr[n].p_memsz);
353 	} else {
354 		Elf64_Phdr *phdr = elf->phdr;
355 
356 		for (n = 0; n < elf->e_phnum; n++)
357 			save_soname_from_segment(elf, phdr[n].p_type,
358 						 phdr[n].p_vaddr,
359 						 phdr[n].p_memsz);
360 	}
361 }
362 
363 static void e32_save_symtab(struct ta_elf *elf, size_t tab_idx)
364 {
365 	Elf32_Shdr *shdr = elf->shdr;
366 	size_t str_idx = shdr[tab_idx].sh_link;
367 
368 	elf->dynsymtab = (void *)(shdr[tab_idx].sh_addr + elf->load_addr);
369 	if (!IS_ALIGNED_WITH_TYPE(elf->dynsymtab, Elf32_Sym))
370 		err(TEE_ERROR_BAD_FORMAT, "Bad alignment of dynsymtab %p",
371 		    elf->dynsymtab);
372 	check_range(elf, "Dynsymtab", elf->dynsymtab, shdr[tab_idx].sh_size);
373 
374 	if (shdr[tab_idx].sh_size % sizeof(Elf32_Sym))
375 		err(TEE_ERROR_BAD_FORMAT,
376 		    "Size of dynsymtab not an even multiple of Elf32_Sym");
377 	elf->num_dynsyms = shdr[tab_idx].sh_size / sizeof(Elf32_Sym);
378 
379 	if (str_idx >= elf->e_shnum)
380 		err(TEE_ERROR_BAD_FORMAT, "Dynstr section index out of range");
381 	elf->dynstr = (void *)(shdr[str_idx].sh_addr + elf->load_addr);
382 	check_range(elf, "Dynstr", elf->dynstr, shdr[str_idx].sh_size);
383 
384 	elf->dynstr_size = shdr[str_idx].sh_size;
385 }
386 
387 static void e64_save_symtab(struct ta_elf *elf, size_t tab_idx)
388 {
389 	Elf64_Shdr *shdr = elf->shdr;
390 	size_t str_idx = shdr[tab_idx].sh_link;
391 
392 	elf->dynsymtab = (void *)(vaddr_t)(shdr[tab_idx].sh_addr +
393 					   elf->load_addr);
394 
395 	if (!IS_ALIGNED_WITH_TYPE(elf->dynsymtab, Elf64_Sym))
396 		err(TEE_ERROR_BAD_FORMAT, "Bad alignment of .dynsym/DYNSYM %p",
397 		    elf->dynsymtab);
398 	check_range(elf, ".dynsym/DYNSYM", elf->dynsymtab,
399 		    shdr[tab_idx].sh_size);
400 
401 	if (shdr[tab_idx].sh_size % sizeof(Elf64_Sym))
402 		err(TEE_ERROR_BAD_FORMAT,
403 		    "Size of .dynsym/DYNSYM not an even multiple of Elf64_Sym");
404 	elf->num_dynsyms = shdr[tab_idx].sh_size / sizeof(Elf64_Sym);
405 
406 	if (str_idx >= elf->e_shnum)
407 		err(TEE_ERROR_BAD_FORMAT,
408 		    ".dynstr/STRTAB section index out of range");
409 	elf->dynstr = (void *)(vaddr_t)(shdr[str_idx].sh_addr + elf->load_addr);
410 	check_range(elf, ".dynstr/STRTAB", elf->dynstr, shdr[str_idx].sh_size);
411 
412 	elf->dynstr_size = shdr[str_idx].sh_size;
413 }
414 
415 static void save_symtab(struct ta_elf *elf)
416 {
417 	size_t n = 0;
418 
419 	if (elf->is_32bit) {
420 		Elf32_Shdr *shdr = elf->shdr;
421 
422 		for (n = 0; n < elf->e_shnum; n++) {
423 			if (shdr[n].sh_type == SHT_DYNSYM) {
424 				e32_save_symtab(elf, n);
425 				break;
426 			}
427 		}
428 	} else {
429 		Elf64_Shdr *shdr = elf->shdr;
430 
431 		for (n = 0; n < elf->e_shnum; n++) {
432 			if (shdr[n].sh_type == SHT_DYNSYM) {
433 				e64_save_symtab(elf, n);
434 				break;
435 			}
436 		}
437 
438 	}
439 
440 	save_hashtab(elf);
441 	save_soname(elf);
442 }
443 
444 static void init_elf(struct ta_elf *elf)
445 {
446 	TEE_Result res = TEE_SUCCESS;
447 	vaddr_t va = 0;
448 	uint32_t flags = LDELF_MAP_FLAG_SHAREABLE;
449 	size_t sz = 0;
450 
451 	res = sys_open_ta_bin(&elf->uuid, &elf->handle);
452 	if (res)
453 		err(res, "sys_open_ta_bin(%pUl)", (void *)&elf->uuid);
454 
455 	/*
456 	 * Map it read-only executable when we're loading a library where
457 	 * the ELF header is included in a load segment.
458 	 */
459 	if (!elf->is_main)
460 		flags |= LDELF_MAP_FLAG_EXECUTABLE;
461 	res = sys_map_ta_bin(&va, SMALL_PAGE_SIZE, flags, elf->handle, 0, 0, 0);
462 	if (res)
463 		err(res, "sys_map_ta_bin");
464 	elf->ehdr_addr = va;
465 	if (!elf->is_main) {
466 		elf->load_addr = va;
467 		elf->max_addr = va + SMALL_PAGE_SIZE;
468 		elf->max_offs = SMALL_PAGE_SIZE;
469 	}
470 
471 	if (!IS_ELF(*(Elf32_Ehdr *)va))
472 		err(TEE_ERROR_BAD_FORMAT, "TA is not an ELF");
473 
474 	res = e32_parse_ehdr(elf, (void *)va);
475 	if (res == TEE_ERROR_BAD_FORMAT)
476 		res = e64_parse_ehdr(elf, (void *)va);
477 	if (res)
478 		err(res, "Cannot parse ELF");
479 
480 	if (MUL_OVERFLOW(elf->e_phnum, elf->e_phentsize, &sz) ||
481 	    ADD_OVERFLOW(sz, elf->e_phoff, &sz))
482 		err(TEE_ERROR_BAD_FORMAT, "Program headers size overflow");
483 
484 	if (sz > SMALL_PAGE_SIZE)
485 		err(TEE_ERROR_NOT_SUPPORTED, "Cannot read program headers");
486 
487 	elf->phdr = (void *)(va + elf->e_phoff);
488 }
489 
490 static size_t roundup(size_t v)
491 {
492 	return ROUNDUP(v, SMALL_PAGE_SIZE);
493 }
494 
495 static size_t rounddown(size_t v)
496 {
497 	return ROUNDDOWN(v, SMALL_PAGE_SIZE);
498 }
499 
500 static void add_segment(struct ta_elf *elf, size_t offset, size_t vaddr,
501 			size_t filesz, size_t memsz, size_t flags, size_t align)
502 {
503 	struct segment *seg = calloc(1, sizeof(*seg));
504 
505 	if (!seg)
506 		err(TEE_ERROR_OUT_OF_MEMORY, "calloc");
507 
508 	if (memsz < filesz)
509 		err(TEE_ERROR_BAD_FORMAT, "Memsz smaller than filesz");
510 
511 	seg->offset = offset;
512 	seg->vaddr = vaddr;
513 	seg->filesz = filesz;
514 	seg->memsz = memsz;
515 	seg->flags = flags;
516 	seg->align = align;
517 
518 	TAILQ_INSERT_TAIL(&elf->segs, seg, link);
519 }
520 
521 static void parse_load_segments(struct ta_elf *elf)
522 {
523 	size_t n = 0;
524 
525 	if (elf->is_32bit) {
526 		Elf32_Phdr *phdr = elf->phdr;
527 
528 		for (n = 0; n < elf->e_phnum; n++)
529 			if (phdr[n].p_type == PT_LOAD) {
530 				add_segment(elf, phdr[n].p_offset,
531 					    phdr[n].p_vaddr, phdr[n].p_filesz,
532 					    phdr[n].p_memsz, phdr[n].p_flags,
533 					    phdr[n].p_align);
534 			} else if (phdr[n].p_type == PT_ARM_EXIDX) {
535 				elf->exidx_start = phdr[n].p_vaddr;
536 				elf->exidx_size = phdr[n].p_filesz;
537 			} else if (phdr[n].p_type == PT_TLS) {
538 				assign_tls_mod_id(elf);
539 			}
540 	} else {
541 		Elf64_Phdr *phdr = elf->phdr;
542 
543 		for (n = 0; n < elf->e_phnum; n++)
544 			if (phdr[n].p_type == PT_LOAD) {
545 				add_segment(elf, phdr[n].p_offset,
546 					    phdr[n].p_vaddr, phdr[n].p_filesz,
547 					    phdr[n].p_memsz, phdr[n].p_flags,
548 					    phdr[n].p_align);
549 			} else if (phdr[n].p_type == PT_TLS) {
550 				elf->tls_start = phdr[n].p_vaddr;
551 				elf->tls_filesz = phdr[n].p_filesz;
552 				elf->tls_memsz = phdr[n].p_memsz;
553 			} else if (IS_ENABLED(CFG_TA_BTI) &&
554 				   phdr[n].p_type == PT_GNU_PROPERTY) {
555 				elf->prop_start = phdr[n].p_vaddr;
556 				elf->prop_align = phdr[n].p_align;
557 				elf->prop_memsz = phdr[n].p_memsz;
558 			}
559 	}
560 }
561 
562 static void copy_remapped_to(struct ta_elf *elf, const struct segment *seg)
563 {
564 	uint8_t *dst = (void *)(seg->vaddr + elf->load_addr);
565 	size_t n = 0;
566 	size_t offs = seg->offset;
567 	size_t num_bytes = seg->filesz;
568 
569 	if (offs < elf->max_offs) {
570 		n = MIN(elf->max_offs - offs, num_bytes);
571 		memcpy(dst, (void *)(elf->max_addr + offs - elf->max_offs), n);
572 		dst += n;
573 		offs += n;
574 		num_bytes -= n;
575 	}
576 
577 	if (num_bytes) {
578 		TEE_Result res = sys_copy_from_ta_bin(dst, num_bytes,
579 						      elf->handle, offs);
580 
581 		if (res)
582 			err(res, "sys_copy_from_ta_bin");
583 		elf->max_offs += offs;
584 	}
585 }
586 
587 static void adjust_segments(struct ta_elf *elf)
588 {
589 	struct segment *seg = NULL;
590 	struct segment *prev_seg = NULL;
591 	size_t prev_end_addr = 0;
592 	size_t align = 0;
593 	size_t mask = 0;
594 
595 	/* Sanity check */
596 	TAILQ_FOREACH(seg, &elf->segs, link) {
597 		size_t dummy __maybe_unused = 0;
598 
599 		assert(seg->align >= SMALL_PAGE_SIZE);
600 		assert(!ADD_OVERFLOW(seg->vaddr, seg->memsz, &dummy));
601 		assert(seg->filesz <= seg->memsz);
602 		assert((seg->offset & SMALL_PAGE_MASK) ==
603 		       (seg->vaddr & SMALL_PAGE_MASK));
604 
605 		prev_seg = TAILQ_PREV(seg, segment_head, link);
606 		if (prev_seg) {
607 			assert(seg->vaddr >= prev_seg->vaddr + prev_seg->memsz);
608 			assert(seg->offset >=
609 			       prev_seg->offset + prev_seg->filesz);
610 		}
611 		if (!align)
612 			align = seg->align;
613 		assert(align == seg->align);
614 	}
615 
616 	mask = align - 1;
617 
618 	seg = TAILQ_FIRST(&elf->segs);
619 	if (seg)
620 		seg = TAILQ_NEXT(seg, link);
621 	while (seg) {
622 		prev_seg = TAILQ_PREV(seg, segment_head, link);
623 		prev_end_addr = prev_seg->vaddr + prev_seg->memsz;
624 
625 		/*
626 		 * This segment may overlap with the last "page" in the
627 		 * previous segment in two different ways:
628 		 * 1. Virtual address (and offset) overlaps =>
629 		 *    Permissions needs to be merged. The offset must have
630 		 *    the SMALL_PAGE_MASK bits set as vaddr and offset must
631 		 *    add up with prevsion segment.
632 		 *
633 		 * 2. Only offset overlaps =>
634 		 *    The same page in the ELF is mapped at two different
635 		 *    virtual addresses. As a limitation this segment must
636 		 *    be mapped as writeable.
637 		 */
638 
639 		/* Case 1. */
640 		if (rounddown(seg->vaddr) < prev_end_addr) {
641 			assert((seg->vaddr & mask) == (seg->offset & mask));
642 			assert(prev_seg->memsz == prev_seg->filesz);
643 
644 			/*
645 			 * Merge the segments and their permissions.
646 			 * Note that the may be a small hole between the
647 			 * two sections.
648 			 */
649 			prev_seg->filesz = seg->vaddr + seg->filesz -
650 					   prev_seg->vaddr;
651 			prev_seg->memsz = seg->vaddr + seg->memsz -
652 					   prev_seg->vaddr;
653 			prev_seg->flags |= seg->flags;
654 
655 			TAILQ_REMOVE(&elf->segs, seg, link);
656 			free(seg);
657 			seg = TAILQ_NEXT(prev_seg, link);
658 			continue;
659 		}
660 
661 		/* Case 2. */
662 		if ((seg->offset & mask) &&
663 		    rounddown(seg->offset) <
664 		    (prev_seg->offset + prev_seg->filesz)) {
665 
666 			assert(seg->flags & PF_W);
667 			seg->remapped_writeable = true;
668 		}
669 
670 		/*
671 		 * No overlap, but we may need to align address, offset and
672 		 * size.
673 		 */
674 		seg->filesz += seg->vaddr - rounddown(seg->vaddr);
675 		seg->memsz += seg->vaddr - rounddown(seg->vaddr);
676 		seg->vaddr = rounddown(seg->vaddr);
677 		seg->offset = rounddown(seg->offset);
678 		seg = TAILQ_NEXT(seg, link);
679 	}
680 
681 }
682 
683 static void populate_segments_legacy(struct ta_elf *elf)
684 {
685 	TEE_Result res = TEE_SUCCESS;
686 	struct segment *seg = NULL;
687 	vaddr_t va = 0;
688 
689 	assert(elf->is_legacy);
690 	TAILQ_FOREACH(seg, &elf->segs, link) {
691 		struct segment *last_seg = TAILQ_LAST(&elf->segs, segment_head);
692 		size_t pad_end = roundup(last_seg->vaddr + last_seg->memsz -
693 					 seg->vaddr - seg->memsz);
694 		size_t num_bytes = roundup(seg->memsz);
695 
696 		if (!elf->load_addr)
697 			va = 0;
698 		else
699 			va = seg->vaddr + elf->load_addr;
700 
701 
702 		if (!(seg->flags & PF_R))
703 			err(TEE_ERROR_NOT_SUPPORTED,
704 			    "Segment must be readable");
705 
706 		res = sys_map_zi(num_bytes, 0, &va, 0, pad_end);
707 		if (res)
708 			err(res, "sys_map_zi");
709 		res = sys_copy_from_ta_bin((void *)va, seg->filesz,
710 					   elf->handle, seg->offset);
711 		if (res)
712 			err(res, "sys_copy_from_ta_bin");
713 
714 		if (!elf->load_addr)
715 			elf->load_addr = va;
716 		elf->max_addr = va + num_bytes;
717 		elf->max_offs = seg->offset + seg->filesz;
718 	}
719 }
720 
721 static size_t get_pad_begin(void)
722 {
723 #ifdef CFG_TA_ASLR
724 	size_t min = CFG_TA_ASLR_MIN_OFFSET_PAGES;
725 	size_t max = CFG_TA_ASLR_MAX_OFFSET_PAGES;
726 	TEE_Result res = TEE_SUCCESS;
727 	uint32_t rnd32 = 0;
728 	size_t rnd = 0;
729 
730 	COMPILE_TIME_ASSERT(CFG_TA_ASLR_MIN_OFFSET_PAGES <
731 			    CFG_TA_ASLR_MAX_OFFSET_PAGES);
732 	if (max > min) {
733 		res = sys_gen_random_num(&rnd32, sizeof(rnd32));
734 		if (res) {
735 			DMSG("Random read failed: %#"PRIx32, res);
736 			return min * SMALL_PAGE_SIZE;
737 		}
738 		rnd = rnd32 % (max - min);
739 	}
740 
741 	return (min + rnd) * SMALL_PAGE_SIZE;
742 #else /*!CFG_TA_ASLR*/
743 	return 0;
744 #endif /*!CFG_TA_ASLR*/
745 }
746 
747 static void populate_segments(struct ta_elf *elf)
748 {
749 	TEE_Result res = TEE_SUCCESS;
750 	struct segment *seg = NULL;
751 	vaddr_t va = 0;
752 	size_t pad_begin = 0;
753 
754 	assert(!elf->is_legacy);
755 	TAILQ_FOREACH(seg, &elf->segs, link) {
756 		struct segment *last_seg = TAILQ_LAST(&elf->segs, segment_head);
757 		size_t pad_end = roundup(last_seg->vaddr + last_seg->memsz -
758 					 seg->vaddr - seg->memsz);
759 
760 		if (seg->remapped_writeable) {
761 			size_t num_bytes = roundup(seg->vaddr + seg->memsz) -
762 					   rounddown(seg->vaddr);
763 
764 			assert(elf->load_addr);
765 			va = rounddown(elf->load_addr + seg->vaddr);
766 			assert(va >= elf->max_addr);
767 			res = sys_map_zi(num_bytes, 0, &va, 0, pad_end);
768 			if (res)
769 				err(res, "sys_map_zi");
770 
771 			copy_remapped_to(elf, seg);
772 			elf->max_addr = va + num_bytes;
773 		} else {
774 			uint32_t flags =  0;
775 			size_t filesz = seg->filesz;
776 			size_t memsz = seg->memsz;
777 			size_t offset = seg->offset;
778 			size_t vaddr = seg->vaddr;
779 
780 			if (offset < elf->max_offs) {
781 				/*
782 				 * We're in a load segment which overlaps
783 				 * with (or is covered by) the first page
784 				 * of a shared library.
785 				 */
786 				if (vaddr + filesz < SMALL_PAGE_SIZE) {
787 					size_t num_bytes = 0;
788 
789 					/*
790 					 * If this segment is completely
791 					 * covered, take next.
792 					 */
793 					if (vaddr + memsz <= SMALL_PAGE_SIZE)
794 						continue;
795 
796 					/*
797 					 * All data of the segment is
798 					 * loaded, but we need to zero
799 					 * extend it.
800 					 */
801 					va = elf->max_addr;
802 					num_bytes = roundup(vaddr + memsz) -
803 						    roundup(vaddr) -
804 						    SMALL_PAGE_SIZE;
805 					assert(num_bytes);
806 					res = sys_map_zi(num_bytes, 0, &va, 0,
807 							 0);
808 					if (res)
809 						err(res, "sys_map_zi");
810 					elf->max_addr = roundup(va + num_bytes);
811 					continue;
812 				}
813 
814 				/* Partial overlap, remove the first page. */
815 				vaddr += SMALL_PAGE_SIZE;
816 				filesz -= SMALL_PAGE_SIZE;
817 				memsz -= SMALL_PAGE_SIZE;
818 				offset += SMALL_PAGE_SIZE;
819 			}
820 
821 			if (!elf->load_addr) {
822 				va = 0;
823 				pad_begin = get_pad_begin();
824 				/*
825 				 * If mapping with pad_begin fails we'll
826 				 * retry without pad_begin, effectively
827 				 * disabling ASLR for the current ELF file.
828 				 */
829 			} else {
830 				va = vaddr + elf->load_addr;
831 				pad_begin = 0;
832 			}
833 
834 			if (seg->flags & PF_W)
835 				flags |= LDELF_MAP_FLAG_WRITEABLE;
836 			else
837 				flags |= LDELF_MAP_FLAG_SHAREABLE;
838 			if (seg->flags & PF_X)
839 				flags |= LDELF_MAP_FLAG_EXECUTABLE;
840 			if (!(seg->flags & PF_R))
841 				err(TEE_ERROR_NOT_SUPPORTED,
842 				    "Segment must be readable");
843 			if (flags & LDELF_MAP_FLAG_WRITEABLE) {
844 				res = sys_map_zi(memsz, 0, &va, pad_begin,
845 						 pad_end);
846 				if (pad_begin && res == TEE_ERROR_OUT_OF_MEMORY)
847 					res = sys_map_zi(memsz, 0, &va, 0,
848 							 pad_end);
849 				if (res)
850 					err(res, "sys_map_zi");
851 				res = sys_copy_from_ta_bin((void *)va, filesz,
852 							   elf->handle, offset);
853 				if (res)
854 					err(res, "sys_copy_from_ta_bin");
855 			} else {
856 				if (filesz != memsz)
857 					err(TEE_ERROR_BAD_FORMAT,
858 					    "Filesz and memsz mismatch");
859 				res = sys_map_ta_bin(&va, filesz, flags,
860 						     elf->handle, offset,
861 						     pad_begin, pad_end);
862 				if (pad_begin && res == TEE_ERROR_OUT_OF_MEMORY)
863 					res = sys_map_ta_bin(&va, filesz, flags,
864 							     elf->handle,
865 							     offset, 0,
866 							     pad_end);
867 				if (res)
868 					err(res, "sys_map_ta_bin");
869 			}
870 
871 			if (!elf->load_addr)
872 				elf->load_addr = va;
873 			elf->max_addr = roundup(va + memsz);
874 			elf->max_offs += filesz;
875 		}
876 	}
877 }
878 
879 static void ta_elf_add_bti(struct ta_elf *elf)
880 {
881 	TEE_Result res = TEE_SUCCESS;
882 	struct segment *seg = NULL;
883 	uint32_t flags = LDELF_MAP_FLAG_EXECUTABLE | LDELF_MAP_FLAG_BTI;
884 
885 	TAILQ_FOREACH(seg, &elf->segs, link) {
886 		vaddr_t va = elf->load_addr + seg->vaddr;
887 
888 		if (seg->flags & PF_X) {
889 			res = sys_set_prot(va, seg->memsz, flags);
890 			if (res)
891 				err(res, "sys_set_prot");
892 		}
893 	}
894 }
895 
896 static void parse_property_segment(struct ta_elf *elf)
897 {
898 	char *desc = NULL;
899 	size_t align = elf->prop_align;
900 	size_t desc_offset = 0;
901 	size_t prop_offset = 0;
902 	vaddr_t va = 0;
903 	Elf_Note *note = NULL;
904 	char *name = NULL;
905 
906 	if (!IS_ENABLED(CFG_TA_BTI) || !elf->prop_start)
907 		return;
908 
909 	check_phdr_in_range(elf, PT_GNU_PROPERTY, elf->prop_start,
910 			    elf->prop_memsz);
911 
912 	va = elf->load_addr + elf->prop_start;
913 	note = (void *)va;
914 	name = (char *)(note + 1);
915 
916 	if (elf->prop_memsz < sizeof(*note) + sizeof(ELF_NOTE_GNU))
917 		return;
918 
919 	if (note->n_type != NT_GNU_PROPERTY_TYPE_0 ||
920 	    note->n_namesz != sizeof(ELF_NOTE_GNU) ||
921 	    memcmp(name, ELF_NOTE_GNU, sizeof(ELF_NOTE_GNU)) ||
922 	    !IS_POWER_OF_TWO(align))
923 		return;
924 
925 	desc_offset = ROUNDUP(sizeof(*note) + sizeof(ELF_NOTE_GNU), align);
926 
927 	if (desc_offset > elf->prop_memsz ||
928 	    ROUNDUP(desc_offset + note->n_descsz, align) > elf->prop_memsz)
929 		return;
930 
931 	desc = (char *)(va + desc_offset);
932 
933 	do {
934 		Elf_Prop *prop = (void *)(desc + prop_offset);
935 		size_t data_offset = prop_offset + sizeof(*prop);
936 
937 		if (note->n_descsz < data_offset)
938 			return;
939 
940 		data_offset = confine_array_index(data_offset, note->n_descsz);
941 
942 		if (prop->pr_type == GNU_PROPERTY_AARCH64_FEATURE_1_AND) {
943 			uint32_t *pr_data = (void *)(desc + data_offset);
944 
945 			if (note->n_descsz < (data_offset + sizeof(*pr_data)) &&
946 			    prop->pr_datasz != sizeof(*pr_data))
947 				return;
948 
949 			if (*pr_data & GNU_PROPERTY_AARCH64_FEATURE_1_BTI) {
950 				DMSG("BTI Feature present in note property");
951 				elf->bti_enabled = true;
952 			}
953 		}
954 
955 		prop_offset += ROUNDUP(sizeof(*prop) + prop->pr_datasz, align);
956 	} while (prop_offset < note->n_descsz);
957 }
958 
959 static void map_segments(struct ta_elf *elf)
960 {
961 	TEE_Result res = TEE_SUCCESS;
962 
963 	parse_load_segments(elf);
964 	adjust_segments(elf);
965 	if (TAILQ_FIRST(&elf->segs)->offset < SMALL_PAGE_SIZE) {
966 		vaddr_t va = 0;
967 		size_t sz = elf->max_addr - elf->load_addr;
968 		struct segment *seg = TAILQ_LAST(&elf->segs, segment_head);
969 		size_t pad_begin = get_pad_begin();
970 
971 		/*
972 		 * We're loading a library, if not other parts of the code
973 		 * need to be updated too.
974 		 */
975 		assert(!elf->is_main);
976 
977 		/*
978 		 * Now that we know how much virtual memory is needed move
979 		 * the already mapped part to a location which can
980 		 * accommodate us.
981 		 */
982 		res = sys_remap(elf->load_addr, &va, sz, pad_begin,
983 				roundup(seg->vaddr + seg->memsz));
984 		if (res == TEE_ERROR_OUT_OF_MEMORY)
985 			res = sys_remap(elf->load_addr, &va, sz, 0,
986 					roundup(seg->vaddr + seg->memsz));
987 		if (res)
988 			err(res, "sys_remap");
989 		elf->ehdr_addr = va;
990 		elf->load_addr = va;
991 		elf->max_addr = va + sz;
992 		elf->phdr = (void *)(va + elf->e_phoff);
993 	}
994 }
995 
996 static void add_deps_from_segment(struct ta_elf *elf, unsigned int type,
997 				  vaddr_t addr, size_t memsz)
998 {
999 	size_t dyn_entsize = 0;
1000 	size_t num_dyns = 0;
1001 	size_t n = 0;
1002 	unsigned int tag = 0;
1003 	size_t val = 0;
1004 	TEE_UUID uuid = { };
1005 	char *str_tab = NULL;
1006 	size_t str_tab_sz = 0;
1007 
1008 	if (type != PT_DYNAMIC)
1009 		return;
1010 
1011 	check_phdr_in_range(elf, type, addr, memsz);
1012 
1013 	if (elf->is_32bit)
1014 		dyn_entsize = sizeof(Elf32_Dyn);
1015 	else
1016 		dyn_entsize = sizeof(Elf64_Dyn);
1017 
1018 	assert(!(memsz % dyn_entsize));
1019 	num_dyns = memsz / dyn_entsize;
1020 
1021 	for (n = 0; n < num_dyns && !(str_tab && str_tab_sz); n++) {
1022 		read_dyn(elf, addr, n, &tag, &val);
1023 		if (tag == DT_STRTAB)
1024 			str_tab = (char *)(val + elf->load_addr);
1025 		else if (tag == DT_STRSZ)
1026 			str_tab_sz = val;
1027 	}
1028 	check_range(elf, ".dynstr/STRTAB", str_tab, str_tab_sz);
1029 
1030 	for (n = 0; n < num_dyns; n++) {
1031 		read_dyn(elf, addr, n, &tag, &val);
1032 		if (tag != DT_NEEDED)
1033 			continue;
1034 		if (val >= str_tab_sz)
1035 			err(TEE_ERROR_BAD_FORMAT,
1036 			    "Offset into .dynstr/STRTAB out of range");
1037 		tee_uuid_from_str(&uuid, str_tab + val);
1038 		queue_elf(&uuid);
1039 	}
1040 }
1041 
1042 static void add_dependencies(struct ta_elf *elf)
1043 {
1044 	size_t n = 0;
1045 
1046 	if (elf->is_32bit) {
1047 		Elf32_Phdr *phdr = elf->phdr;
1048 
1049 		for (n = 0; n < elf->e_phnum; n++)
1050 			add_deps_from_segment(elf, phdr[n].p_type,
1051 					      phdr[n].p_vaddr, phdr[n].p_memsz);
1052 	} else {
1053 		Elf64_Phdr *phdr = elf->phdr;
1054 
1055 		for (n = 0; n < elf->e_phnum; n++)
1056 			add_deps_from_segment(elf, phdr[n].p_type,
1057 					      phdr[n].p_vaddr, phdr[n].p_memsz);
1058 	}
1059 }
1060 
1061 static void copy_section_headers(struct ta_elf *elf)
1062 {
1063 	TEE_Result res = TEE_SUCCESS;
1064 	size_t sz = 0;
1065 	size_t offs = 0;
1066 
1067 	if (MUL_OVERFLOW(elf->e_shnum, elf->e_shentsize, &sz))
1068 		err(TEE_ERROR_BAD_FORMAT, "Section headers size overflow");
1069 
1070 	elf->shdr = malloc(sz);
1071 	if (!elf->shdr)
1072 		err(TEE_ERROR_OUT_OF_MEMORY, "malloc");
1073 
1074 	/*
1075 	 * We're assuming that section headers comes after the load segments,
1076 	 * but if it's a very small dynamically linked library the section
1077 	 * headers can still end up (partially?) in the first mapped page.
1078 	 */
1079 	if (elf->e_shoff < SMALL_PAGE_SIZE) {
1080 		assert(!elf->is_main);
1081 		offs = MIN(SMALL_PAGE_SIZE - elf->e_shoff, sz);
1082 		memcpy(elf->shdr, (void *)(elf->load_addr + elf->e_shoff),
1083 		       offs);
1084 	}
1085 
1086 	if (offs < sz) {
1087 		res = sys_copy_from_ta_bin((uint8_t *)elf->shdr + offs,
1088 					   sz - offs, elf->handle,
1089 					   elf->e_shoff + offs);
1090 		if (res)
1091 			err(res, "sys_copy_from_ta_bin");
1092 	}
1093 }
1094 
1095 static void close_handle(struct ta_elf *elf)
1096 {
1097 	TEE_Result res = sys_close_ta_bin(elf->handle);
1098 
1099 	if (res)
1100 		err(res, "sys_close_ta_bin");
1101 	elf->handle = -1;
1102 }
1103 
1104 static void clean_elf_load_main(struct ta_elf *elf)
1105 {
1106 	TEE_Result res = TEE_SUCCESS;
1107 
1108 	/*
1109 	 * Clean up from last attempt to load
1110 	 */
1111 	res = sys_unmap(elf->ehdr_addr, SMALL_PAGE_SIZE);
1112 	if (res)
1113 		err(res, "sys_unmap");
1114 
1115 	while (!TAILQ_EMPTY(&elf->segs)) {
1116 		struct segment *seg = TAILQ_FIRST(&elf->segs);
1117 		vaddr_t va = 0;
1118 		size_t num_bytes = 0;
1119 
1120 		va = rounddown(elf->load_addr + seg->vaddr);
1121 		if (seg->remapped_writeable)
1122 			num_bytes = roundup(seg->vaddr + seg->memsz) -
1123 				    rounddown(seg->vaddr);
1124 		else
1125 			num_bytes = seg->memsz;
1126 
1127 		res = sys_unmap(va, num_bytes);
1128 		if (res)
1129 			err(res, "sys_unmap");
1130 
1131 		TAILQ_REMOVE(&elf->segs, seg, link);
1132 		free(seg);
1133 	}
1134 
1135 	free(elf->shdr);
1136 	memset(&elf->is_32bit, 0,
1137 	       (vaddr_t)&elf->uuid - (vaddr_t)&elf->is_32bit);
1138 
1139 	TAILQ_INIT(&elf->segs);
1140 }
1141 
1142 #ifdef ARM64
1143 /*
1144  * Allocates an offset in the TA's Thread Control Block for the TLS segment of
1145  * the @elf module.
1146  */
1147 #define TCB_HEAD_SIZE (2 * sizeof(long))
1148 static void set_tls_offset(struct ta_elf *elf)
1149 {
1150 	static size_t next_offs = TCB_HEAD_SIZE;
1151 
1152 	if (!elf->tls_start)
1153 		return;
1154 
1155 	/* Module has a TLS segment */
1156 	elf->tls_tcb_offs = next_offs;
1157 	next_offs += elf->tls_memsz;
1158 }
1159 #else
1160 static void set_tls_offset(struct ta_elf *elf __unused) {}
1161 #endif
1162 
1163 static void load_main(struct ta_elf *elf)
1164 {
1165 	init_elf(elf);
1166 	map_segments(elf);
1167 	populate_segments(elf);
1168 	add_dependencies(elf);
1169 	copy_section_headers(elf);
1170 	save_symtab(elf);
1171 	close_handle(elf);
1172 	set_tls_offset(elf);
1173 	parse_property_segment(elf);
1174 	if (elf->bti_enabled)
1175 		ta_elf_add_bti(elf);
1176 
1177 	elf->head = (struct ta_head *)elf->load_addr;
1178 	if (elf->head->depr_entry != UINT64_MAX) {
1179 		/*
1180 		 * Legacy TAs sets their entry point in ta_head. For
1181 		 * non-legacy TAs the entry point of the ELF is set instead
1182 		 * and leaving the ta_head entry point set to UINT64_MAX to
1183 		 * indicate that it's not used.
1184 		 *
1185 		 * NB, everything before the commit a73b5878c89d ("Replace
1186 		 * ta_head.entry with elf entry") is considered legacy TAs
1187 		 * for ldelf.
1188 		 *
1189 		 * Legacy TAs cannot be mapped with shared memory segments
1190 		 * so restart the mapping if it turned out we're loading a
1191 		 * legacy TA.
1192 		 */
1193 
1194 		DMSG("Reloading TA %pUl as legacy TA", (void *)&elf->uuid);
1195 		clean_elf_load_main(elf);
1196 		elf->is_legacy = true;
1197 		init_elf(elf);
1198 		map_segments(elf);
1199 		populate_segments_legacy(elf);
1200 		add_dependencies(elf);
1201 		copy_section_headers(elf);
1202 		save_symtab(elf);
1203 		close_handle(elf);
1204 		elf->head = (struct ta_head *)elf->load_addr;
1205 		/*
1206 		 * Check that the TA is still a legacy TA, if it isn't give
1207 		 * up now since we're likely under attack.
1208 		 */
1209 		if (elf->head->depr_entry == UINT64_MAX)
1210 			err(TEE_ERROR_GENERIC,
1211 			    "TA %pUl was changed on disk to non-legacy",
1212 			    (void *)&elf->uuid);
1213 	}
1214 
1215 }
1216 
1217 void ta_elf_load_main(const TEE_UUID *uuid, uint32_t *is_32bit, uint64_t *sp,
1218 		      uint32_t *ta_flags)
1219 {
1220 	struct ta_elf *elf = queue_elf(uuid);
1221 	vaddr_t va = 0;
1222 	TEE_Result res = TEE_SUCCESS;
1223 
1224 	assert(elf);
1225 	elf->is_main = true;
1226 
1227 	load_main(elf);
1228 
1229 	*is_32bit = elf->is_32bit;
1230 	res = sys_map_zi(elf->head->stack_size, 0, &va, 0, 0);
1231 	if (res)
1232 		err(res, "sys_map_zi stack");
1233 
1234 	if (elf->head->flags & ~TA_FLAGS_MASK)
1235 		err(TEE_ERROR_BAD_FORMAT, "Invalid TA flags(s) %#"PRIx32,
1236 		    elf->head->flags & ~TA_FLAGS_MASK);
1237 
1238 	*ta_flags = elf->head->flags;
1239 	*sp = va + elf->head->stack_size;
1240 	ta_stack = va;
1241 	ta_stack_size = elf->head->stack_size;
1242 }
1243 
1244 void ta_elf_finalize_load_main(uint64_t *entry)
1245 {
1246 	struct ta_elf *elf = TAILQ_FIRST(&main_elf_queue);
1247 	TEE_Result res = TEE_SUCCESS;
1248 
1249 	assert(elf->is_main);
1250 
1251 	res = ta_elf_set_init_fini_info_compat(elf->is_32bit);
1252 	if (res)
1253 		err(res, "ta_elf_set_init_fini_info_compat");
1254 	res = ta_elf_set_elf_phdr_info(elf->is_32bit);
1255 	if (res)
1256 		err(res, "ta_elf_set_elf_phdr_info");
1257 
1258 	if (elf->is_legacy)
1259 		*entry = elf->head->depr_entry;
1260 	else
1261 		*entry = elf->e_entry + elf->load_addr;
1262 }
1263 
1264 
1265 void ta_elf_load_dependency(struct ta_elf *elf, bool is_32bit)
1266 {
1267 	if (elf->is_main)
1268 		return;
1269 
1270 	init_elf(elf);
1271 	if (elf->is_32bit != is_32bit)
1272 		err(TEE_ERROR_BAD_FORMAT, "ELF %pUl is %sbit (expected %sbit)",
1273 		    (void *)&elf->uuid, elf->is_32bit ? "32" : "64",
1274 		    is_32bit ? "32" : "64");
1275 
1276 	map_segments(elf);
1277 	populate_segments(elf);
1278 	add_dependencies(elf);
1279 	copy_section_headers(elf);
1280 	save_symtab(elf);
1281 	close_handle(elf);
1282 	set_tls_offset(elf);
1283 	parse_property_segment(elf);
1284 	if (elf->bti_enabled)
1285 		ta_elf_add_bti(elf);
1286 }
1287 
1288 void ta_elf_finalize_mappings(struct ta_elf *elf)
1289 {
1290 	TEE_Result res = TEE_SUCCESS;
1291 	struct segment *seg = NULL;
1292 
1293 	if (!elf->is_legacy)
1294 		return;
1295 
1296 	TAILQ_FOREACH(seg, &elf->segs, link) {
1297 		vaddr_t va = elf->load_addr + seg->vaddr;
1298 		uint32_t flags =  0;
1299 
1300 		if (seg->flags & PF_W)
1301 			flags |= LDELF_MAP_FLAG_WRITEABLE;
1302 		if (seg->flags & PF_X)
1303 			flags |= LDELF_MAP_FLAG_EXECUTABLE;
1304 
1305 		res = sys_set_prot(va, seg->memsz, flags);
1306 		if (res)
1307 			err(res, "sys_set_prot");
1308 	}
1309 }
1310 
1311 static void __printf(3, 4) print_wrapper(void *pctx, print_func_t print_func,
1312 					 const char *fmt, ...)
1313 {
1314 	va_list ap;
1315 
1316 	va_start(ap, fmt);
1317 	print_func(pctx, fmt, ap);
1318 	va_end(ap);
1319 }
1320 
1321 static void print_seg(void *pctx, print_func_t print_func,
1322 		      size_t idx __maybe_unused, int elf_idx __maybe_unused,
1323 		      vaddr_t va __maybe_unused, paddr_t pa __maybe_unused,
1324 		      size_t sz __maybe_unused, uint32_t flags)
1325 {
1326 	int rc __maybe_unused = 0;
1327 	int width __maybe_unused = 8;
1328 	char desc[14] __maybe_unused = "";
1329 	char flags_str[] __maybe_unused = "----";
1330 
1331 	if (elf_idx > -1) {
1332 		rc = snprintf(desc, sizeof(desc), " [%d]", elf_idx);
1333 		assert(rc >= 0);
1334 	} else {
1335 		if (flags & DUMP_MAP_EPHEM) {
1336 			rc = snprintf(desc, sizeof(desc), " (param)");
1337 			assert(rc >= 0);
1338 		}
1339 		if (flags & DUMP_MAP_LDELF) {
1340 			rc = snprintf(desc, sizeof(desc), " (ldelf)");
1341 			assert(rc >= 0);
1342 		}
1343 		if (va == ta_stack) {
1344 			rc = snprintf(desc, sizeof(desc), " (stack)");
1345 			assert(rc >= 0);
1346 		}
1347 	}
1348 
1349 	if (flags & DUMP_MAP_READ)
1350 		flags_str[0] = 'r';
1351 	if (flags & DUMP_MAP_WRITE)
1352 		flags_str[1] = 'w';
1353 	if (flags & DUMP_MAP_EXEC)
1354 		flags_str[2] = 'x';
1355 	if (flags & DUMP_MAP_SECURE)
1356 		flags_str[3] = 's';
1357 
1358 	print_wrapper(pctx, print_func,
1359 		      "region %2zu: va 0x%0*"PRIxVA" pa 0x%0*"PRIxPA" size 0x%06zx flags %s%s\n",
1360 		      idx, width, va, width, pa, sz, flags_str, desc);
1361 }
1362 
1363 static bool get_next_in_order(struct ta_elf_queue *elf_queue,
1364 			      struct ta_elf **elf, struct segment **seg,
1365 			      size_t *elf_idx)
1366 {
1367 	struct ta_elf *e = NULL;
1368 	struct segment *s = NULL;
1369 	size_t idx = 0;
1370 	vaddr_t va = 0;
1371 	struct ta_elf *e2 = NULL;
1372 	size_t i2 = 0;
1373 
1374 	assert(elf && seg && elf_idx);
1375 	e = *elf;
1376 	s = *seg;
1377 	assert((e == NULL && s == NULL) || (e != NULL && s != NULL));
1378 
1379 	if (s) {
1380 		s = TAILQ_NEXT(s, link);
1381 		if (s) {
1382 			*seg = s;
1383 			return true;
1384 		}
1385 	}
1386 
1387 	if (e)
1388 		va = e->load_addr;
1389 
1390 	/* Find the ELF with next load address */
1391 	e = NULL;
1392 	TAILQ_FOREACH(e2, elf_queue, link) {
1393 		if (e2->load_addr > va) {
1394 			if (!e || e2->load_addr < e->load_addr) {
1395 				e = e2;
1396 				idx = i2;
1397 			}
1398 		}
1399 		i2++;
1400 	}
1401 	if (!e)
1402 		return false;
1403 
1404 	*elf = e;
1405 	*seg = TAILQ_FIRST(&e->segs);
1406 	*elf_idx = idx;
1407 	return true;
1408 }
1409 
1410 void ta_elf_print_mappings(void *pctx, print_func_t print_func,
1411 			   struct ta_elf_queue *elf_queue, size_t num_maps,
1412 			   struct dump_map *maps, vaddr_t mpool_base)
1413 {
1414 	struct segment *seg = NULL;
1415 	struct ta_elf *elf = NULL;
1416 	size_t elf_idx = 0;
1417 	size_t idx = 0;
1418 	size_t map_idx = 0;
1419 
1420 	/*
1421 	 * Loop over all segments and maps, printing virtual address in
1422 	 * order. Segment has priority if the virtual address is present
1423 	 * in both map and segment.
1424 	 */
1425 	get_next_in_order(elf_queue, &elf, &seg, &elf_idx);
1426 	while (true) {
1427 		vaddr_t va = -1;
1428 		size_t sz = 0;
1429 		uint32_t flags = DUMP_MAP_SECURE;
1430 		size_t offs = 0;
1431 
1432 		if (seg) {
1433 			va = rounddown(seg->vaddr + elf->load_addr);
1434 			sz = roundup(seg->vaddr + seg->memsz) -
1435 				     rounddown(seg->vaddr);
1436 		}
1437 
1438 		while (map_idx < num_maps && maps[map_idx].va <= va) {
1439 			uint32_t f = 0;
1440 
1441 			/* If there's a match, it should be the same map */
1442 			if (maps[map_idx].va == va) {
1443 				/*
1444 				 * In shared libraries the first page is
1445 				 * mapped separately with the rest of that
1446 				 * segment following back to back in a
1447 				 * separate entry.
1448 				 */
1449 				if (map_idx + 1 < num_maps &&
1450 				    maps[map_idx].sz == SMALL_PAGE_SIZE) {
1451 					vaddr_t next_va = maps[map_idx].va +
1452 							  maps[map_idx].sz;
1453 					size_t comb_sz = maps[map_idx].sz +
1454 							 maps[map_idx + 1].sz;
1455 
1456 					if (next_va == maps[map_idx + 1].va &&
1457 					    comb_sz == sz &&
1458 					    maps[map_idx].flags ==
1459 					    maps[map_idx + 1].flags) {
1460 						/* Skip this and next entry */
1461 						map_idx += 2;
1462 						continue;
1463 					}
1464 				}
1465 				assert(maps[map_idx].sz == sz);
1466 			} else if (maps[map_idx].va < va) {
1467 				if (maps[map_idx].va == mpool_base)
1468 					f |= DUMP_MAP_LDELF;
1469 				print_seg(pctx, print_func, idx, -1,
1470 					  maps[map_idx].va, maps[map_idx].pa,
1471 					  maps[map_idx].sz,
1472 					  maps[map_idx].flags | f);
1473 				idx++;
1474 			}
1475 			map_idx++;
1476 		}
1477 
1478 		if (!seg)
1479 			break;
1480 
1481 		offs = rounddown(seg->offset);
1482 		if (seg->flags & PF_R)
1483 			flags |= DUMP_MAP_READ;
1484 		if (seg->flags & PF_W)
1485 			flags |= DUMP_MAP_WRITE;
1486 		if (seg->flags & PF_X)
1487 			flags |= DUMP_MAP_EXEC;
1488 
1489 		print_seg(pctx, print_func, idx, elf_idx, va, offs, sz, flags);
1490 		idx++;
1491 
1492 		if (!get_next_in_order(elf_queue, &elf, &seg, &elf_idx))
1493 			seg = NULL;
1494 	}
1495 
1496 	elf_idx = 0;
1497 	TAILQ_FOREACH(elf, elf_queue, link) {
1498 		print_wrapper(pctx, print_func,
1499 			      " [%zu] %pUl @ 0x%0*"PRIxVA"\n",
1500 			      elf_idx, (void *)&elf->uuid, 8, elf->load_addr);
1501 		elf_idx++;
1502 	}
1503 }
1504 
1505 #ifdef CFG_UNWIND
1506 /* Called by libunw */
1507 bool find_exidx(vaddr_t addr, vaddr_t *idx_start, vaddr_t *idx_end)
1508 {
1509 	struct segment *seg = NULL;
1510 	struct ta_elf *elf = NULL;
1511 	vaddr_t a = 0;
1512 
1513 	TAILQ_FOREACH(elf, &main_elf_queue, link) {
1514 		if (addr < elf->load_addr)
1515 			continue;
1516 		a = addr - elf->load_addr;
1517 		TAILQ_FOREACH(seg, &elf->segs, link) {
1518 			if (a < seg->vaddr)
1519 				continue;
1520 			if (a - seg->vaddr < seg->filesz) {
1521 				*idx_start = elf->exidx_start + elf->load_addr;
1522 				*idx_end = elf->exidx_start + elf->load_addr +
1523 					   elf->exidx_size;
1524 				return true;
1525 			}
1526 		}
1527 	}
1528 
1529 	return false;
1530 }
1531 
1532 void ta_elf_stack_trace_a32(uint32_t regs[16])
1533 {
1534 	struct unwind_state_arm32 state = { };
1535 
1536 	memcpy(state.registers, regs, sizeof(state.registers));
1537 	print_stack_arm32(&state, ta_stack, ta_stack_size);
1538 }
1539 
1540 void ta_elf_stack_trace_a64(uint64_t fp, uint64_t sp, uint64_t pc)
1541 {
1542 	struct unwind_state_arm64 state = { .fp = fp, .sp = sp, .pc = pc };
1543 
1544 	print_stack_arm64(&state, ta_stack, ta_stack_size);
1545 }
1546 #endif
1547 
1548 TEE_Result ta_elf_add_library(const TEE_UUID *uuid)
1549 {
1550 	TEE_Result res = TEE_ERROR_GENERIC;
1551 	struct ta_elf *ta = TAILQ_FIRST(&main_elf_queue);
1552 	struct ta_elf *lib = ta_elf_find_elf(uuid);
1553 	struct ta_elf *elf = NULL;
1554 
1555 	if (lib)
1556 		return TEE_SUCCESS; /* Already mapped */
1557 
1558 	lib = queue_elf_helper(uuid);
1559 	if (!lib)
1560 		return TEE_ERROR_OUT_OF_MEMORY;
1561 
1562 	for (elf = lib; elf; elf = TAILQ_NEXT(elf, link))
1563 		ta_elf_load_dependency(elf, ta->is_32bit);
1564 
1565 	for (elf = lib; elf; elf = TAILQ_NEXT(elf, link)) {
1566 		ta_elf_relocate(elf);
1567 		ta_elf_finalize_mappings(elf);
1568 	}
1569 
1570 	for (elf = lib; elf; elf = TAILQ_NEXT(elf, link))
1571 		DMSG("ELF (%pUl) at %#"PRIxVA,
1572 		     (void *)&elf->uuid, elf->load_addr);
1573 
1574 	res = ta_elf_set_init_fini_info_compat(ta->is_32bit);
1575 	if (res)
1576 		return res;
1577 
1578 	return ta_elf_set_elf_phdr_info(ta->is_32bit);
1579 }
1580 
1581 /* Get address/size of .init_array and .fini_array from the dynamic segment */
1582 static void get_init_fini_array(struct ta_elf *elf, unsigned int type,
1583 				vaddr_t addr, size_t memsz, vaddr_t *init,
1584 				size_t *init_cnt, vaddr_t *fini,
1585 				size_t *fini_cnt)
1586 {
1587 	size_t addrsz = 0;
1588 	size_t dyn_entsize = 0;
1589 	size_t num_dyns = 0;
1590 	size_t n = 0;
1591 	unsigned int tag = 0;
1592 	size_t val = 0;
1593 
1594 	assert(type == PT_DYNAMIC);
1595 
1596 	check_phdr_in_range(elf, type, addr, memsz);
1597 
1598 	if (elf->is_32bit) {
1599 		dyn_entsize = sizeof(Elf32_Dyn);
1600 		addrsz = 4;
1601 	} else {
1602 		dyn_entsize = sizeof(Elf64_Dyn);
1603 		addrsz = 8;
1604 	}
1605 
1606 	assert(!(memsz % dyn_entsize));
1607 	num_dyns = memsz / dyn_entsize;
1608 
1609 	for (n = 0; n < num_dyns; n++) {
1610 		read_dyn(elf, addr, n, &tag, &val);
1611 		if (tag == DT_INIT_ARRAY)
1612 			*init = val + elf->load_addr;
1613 		else if (tag == DT_FINI_ARRAY)
1614 			*fini = val + elf->load_addr;
1615 		else if (tag == DT_INIT_ARRAYSZ)
1616 			*init_cnt = val / addrsz;
1617 		else if (tag == DT_FINI_ARRAYSZ)
1618 			*fini_cnt = val / addrsz;
1619 	}
1620 }
1621 
1622 /* Get address/size of .init_array and .fini_array in @elf (if present) */
1623 static void elf_get_init_fini_array(struct ta_elf *elf, vaddr_t *init,
1624 				    size_t *init_cnt, vaddr_t *fini,
1625 				    size_t *fini_cnt)
1626 {
1627 	size_t n = 0;
1628 
1629 	if (elf->is_32bit) {
1630 		Elf32_Phdr *phdr = elf->phdr;
1631 
1632 		for (n = 0; n < elf->e_phnum; n++) {
1633 			if (phdr[n].p_type == PT_DYNAMIC) {
1634 				get_init_fini_array(elf, phdr[n].p_type,
1635 						    phdr[n].p_vaddr,
1636 						    phdr[n].p_memsz,
1637 						    init, init_cnt, fini,
1638 						    fini_cnt);
1639 				return;
1640 			}
1641 		}
1642 	} else {
1643 		Elf64_Phdr *phdr = elf->phdr;
1644 
1645 		for (n = 0; n < elf->e_phnum; n++) {
1646 			if (phdr[n].p_type == PT_DYNAMIC) {
1647 				get_init_fini_array(elf, phdr[n].p_type,
1648 						    phdr[n].p_vaddr,
1649 						    phdr[n].p_memsz,
1650 						    init, init_cnt, fini,
1651 						    fini_cnt);
1652 				return;
1653 			}
1654 		}
1655 	}
1656 }
1657 
1658 /*
1659  * Deprecated by __elf_phdr_info below. Kept for compatibility.
1660  *
1661  * Pointers to ELF initialization and finalization functions are extracted by
1662  * ldelf and stored on the TA heap, then exported to the TA via the global
1663  * symbol __init_fini_info. libutee in OP-TEE 3.9.0 uses this mechanism.
1664  */
1665 
1666 struct __init_fini {
1667 	uint32_t flags;
1668 	uint16_t init_size;
1669 	uint16_t fini_size;
1670 
1671 	void (**init)(void); /* @init_size entries */
1672 	void (**fini)(void); /* @fini_size entries */
1673 };
1674 
1675 #define __IFS_VALID            BIT(0)
1676 #define __IFS_INIT_HAS_RUN     BIT(1)
1677 #define __IFS_FINI_HAS_RUN     BIT(2)
1678 
1679 struct __init_fini_info {
1680 	uint32_t reserved;
1681 	uint16_t size;
1682 	uint16_t pad;
1683 	struct __init_fini *ifs; /* @size entries */
1684 };
1685 
1686 /* 32-bit variants for a 64-bit ldelf to access a 32-bit TA */
1687 
1688 struct __init_fini32 {
1689 	uint32_t flags;
1690 	uint16_t init_size;
1691 	uint16_t fini_size;
1692 	uint32_t init;
1693 	uint32_t fini;
1694 };
1695 
1696 struct __init_fini_info32 {
1697 	uint32_t reserved;
1698 	uint16_t size;
1699 	uint16_t pad;
1700 	uint32_t ifs;
1701 };
1702 
1703 static TEE_Result realloc_ifs(vaddr_t va, size_t cnt, bool is_32bit)
1704 {
1705 	struct __init_fini_info32 *info32 = (struct __init_fini_info32 *)va;
1706 	struct __init_fini_info *info = (struct __init_fini_info *)va;
1707 	struct __init_fini32 *ifs32 = NULL;
1708 	struct __init_fini *ifs = NULL;
1709 	size_t prev_cnt = 0;
1710 	void *ptr = NULL;
1711 
1712 	if (is_32bit) {
1713 		ptr = (void *)(vaddr_t)info32->ifs;
1714 		ptr = realloc(ptr, cnt * sizeof(struct __init_fini32));
1715 		if (!ptr)
1716 			return TEE_ERROR_OUT_OF_MEMORY;
1717 		ifs32 = ptr;
1718 		prev_cnt = info32->size;
1719 		if (cnt > prev_cnt)
1720 			memset(ifs32 + prev_cnt, 0,
1721 			       (cnt - prev_cnt) * sizeof(*ifs32));
1722 		info32->ifs = (uint32_t)(vaddr_t)ifs32;
1723 		info32->size = cnt;
1724 	} else {
1725 		ptr = realloc(info->ifs, cnt * sizeof(struct __init_fini));
1726 		if (!ptr)
1727 			return TEE_ERROR_OUT_OF_MEMORY;
1728 		ifs = ptr;
1729 		prev_cnt = info->size;
1730 		if (cnt > prev_cnt)
1731 			memset(ifs + prev_cnt, 0,
1732 			       (cnt - prev_cnt) * sizeof(*ifs));
1733 		info->ifs = ifs;
1734 		info->size = cnt;
1735 	}
1736 
1737 	return TEE_SUCCESS;
1738 }
1739 
1740 static void fill_ifs(vaddr_t va, size_t idx, struct ta_elf *elf, bool is_32bit)
1741 {
1742 	struct __init_fini_info32 *info32 = (struct __init_fini_info32 *)va;
1743 	struct __init_fini_info *info = (struct __init_fini_info *)va;
1744 	struct __init_fini32 *ifs32 = NULL;
1745 	struct __init_fini *ifs = NULL;
1746 	size_t init_cnt = 0;
1747 	size_t fini_cnt = 0;
1748 	vaddr_t init = 0;
1749 	vaddr_t fini = 0;
1750 
1751 	if (is_32bit) {
1752 		assert(idx < info32->size);
1753 		ifs32 = &((struct __init_fini32 *)(vaddr_t)info32->ifs)[idx];
1754 
1755 		if (ifs32->flags & __IFS_VALID)
1756 			return;
1757 
1758 		elf_get_init_fini_array(elf, &init, &init_cnt, &fini,
1759 					&fini_cnt);
1760 
1761 		ifs32->init = (uint32_t)init;
1762 		ifs32->init_size = init_cnt;
1763 
1764 		ifs32->fini = (uint32_t)fini;
1765 		ifs32->fini_size = fini_cnt;
1766 
1767 		ifs32->flags |= __IFS_VALID;
1768 	} else {
1769 		assert(idx < info->size);
1770 		ifs = &info->ifs[idx];
1771 
1772 		if (ifs->flags & __IFS_VALID)
1773 			return;
1774 
1775 		elf_get_init_fini_array(elf, &init, &init_cnt, &fini,
1776 					&fini_cnt);
1777 
1778 		ifs->init = (void (**)(void))init;
1779 		ifs->init_size = init_cnt;
1780 
1781 		ifs->fini = (void (**)(void))fini;
1782 		ifs->fini_size = fini_cnt;
1783 
1784 		ifs->flags |= __IFS_VALID;
1785 	}
1786 }
1787 
1788 /*
1789  * Set or update __init_fini_info in the TA with information from the ELF
1790  * queue
1791  */
1792 TEE_Result ta_elf_set_init_fini_info_compat(bool is_32bit)
1793 {
1794 	struct __init_fini_info *info = NULL;
1795 	TEE_Result res = TEE_SUCCESS;
1796 	struct ta_elf *elf = NULL;
1797 	vaddr_t info_va = 0;
1798 	size_t cnt = 0;
1799 
1800 	res = ta_elf_resolve_sym("__init_fini_info", &info_va, NULL, NULL);
1801 	if (res) {
1802 		if (res == TEE_ERROR_ITEM_NOT_FOUND) {
1803 			/*
1804 			 * Not an error, only TAs linked against libutee from
1805 			 * OP-TEE 3.9.0 have this symbol.
1806 			 */
1807 			return TEE_SUCCESS;
1808 		}
1809 		return res;
1810 	}
1811 	assert(info_va);
1812 
1813 	info = (struct __init_fini_info *)info_va;
1814 	if (info->reserved)
1815 		return TEE_ERROR_NOT_SUPPORTED;
1816 
1817 	TAILQ_FOREACH(elf, &main_elf_queue, link)
1818 		cnt++;
1819 
1820 	/* Queue has at least one file (main) */
1821 	assert(cnt);
1822 
1823 	res = realloc_ifs(info_va, cnt, is_32bit);
1824 	if (res)
1825 		goto err;
1826 
1827 	cnt = 0;
1828 	TAILQ_FOREACH(elf, &main_elf_queue, link) {
1829 		fill_ifs(info_va, cnt, elf, is_32bit);
1830 		cnt++;
1831 	}
1832 
1833 	return TEE_SUCCESS;
1834 err:
1835 	free(info);
1836 	return res;
1837 }
1838 
1839 static TEE_Result realloc_elf_phdr_info(vaddr_t va, size_t cnt, bool is_32bit)
1840 {
1841 	struct __elf_phdr_info32 *info32 = (struct __elf_phdr_info32 *)va;
1842 	struct __elf_phdr_info *info = (struct __elf_phdr_info *)va;
1843 	struct dl_phdr_info32 *dlpi32 = NULL;
1844 	struct dl_phdr_info *dlpi = NULL;
1845 	size_t prev_cnt = 0;
1846 	void *ptr = NULL;
1847 
1848 	if (is_32bit) {
1849 		ptr = (void *)(vaddr_t)info32->dlpi;
1850 		ptr = realloc(ptr, cnt * sizeof(*dlpi32));
1851 		if (!ptr)
1852 			return TEE_ERROR_OUT_OF_MEMORY;
1853 		dlpi32 = ptr;
1854 		prev_cnt = info32->count;
1855 		if (cnt > prev_cnt)
1856 			memset(dlpi32 + prev_cnt, 0,
1857 			       (cnt - prev_cnt) * sizeof(*dlpi32));
1858 		info32->dlpi = (uint32_t)(vaddr_t)dlpi32;
1859 		info32->count = cnt;
1860 	} else {
1861 		ptr = realloc(info->dlpi, cnt * sizeof(*dlpi));
1862 		if (!ptr)
1863 			return TEE_ERROR_OUT_OF_MEMORY;
1864 		dlpi = ptr;
1865 		prev_cnt = info->count;
1866 		if (cnt > prev_cnt)
1867 			memset(dlpi + prev_cnt, 0,
1868 			       (cnt - prev_cnt) * sizeof(*dlpi));
1869 		info->dlpi = dlpi;
1870 		info->count = cnt;
1871 	}
1872 
1873 	return TEE_SUCCESS;
1874 }
1875 
1876 static void fill_elf_phdr_info(vaddr_t va, size_t idx, struct ta_elf *elf,
1877 			       bool is_32bit)
1878 {
1879 	struct __elf_phdr_info32 *info32 = (struct __elf_phdr_info32 *)va;
1880 	struct __elf_phdr_info *info = (struct __elf_phdr_info *)va;
1881 	struct dl_phdr_info32 *dlpi32 = NULL;
1882 	struct dl_phdr_info *dlpi = NULL;
1883 
1884 	if (is_32bit) {
1885 		assert(idx < info32->count);
1886 		dlpi32 = (struct dl_phdr_info32 *)(vaddr_t)info32->dlpi + idx;
1887 
1888 		dlpi32->dlpi_addr = elf->load_addr;
1889 		if (elf->soname)
1890 			dlpi32->dlpi_name = (vaddr_t)elf->soname;
1891 		else
1892 			dlpi32->dlpi_name = (vaddr_t)&info32->zero;
1893 		dlpi32->dlpi_phdr = (vaddr_t)elf->phdr;
1894 		dlpi32->dlpi_phnum = elf->e_phnum;
1895 		dlpi32->dlpi_adds = 1; /* No unloading on dlclose() currently */
1896 		dlpi32->dlpi_subs = 0; /* No unloading on dlclose() currently */
1897 		dlpi32->dlpi_tls_modid = elf->tls_mod_id;
1898 		dlpi32->dlpi_tls_data = elf->tls_start;
1899 	} else {
1900 		assert(idx < info->count);
1901 		dlpi = info->dlpi + idx;
1902 
1903 		dlpi->dlpi_addr = elf->load_addr;
1904 		if (elf->soname)
1905 			dlpi->dlpi_name = elf->soname;
1906 		else
1907 			dlpi->dlpi_name = &info32->zero;
1908 		dlpi->dlpi_phdr = elf->phdr;
1909 		dlpi->dlpi_phnum = elf->e_phnum;
1910 		dlpi->dlpi_adds = 1; /* No unloading on dlclose() currently */
1911 		dlpi->dlpi_subs = 0; /* No unloading on dlclose() currently */
1912 		dlpi->dlpi_tls_modid = elf->tls_mod_id;
1913 		dlpi->dlpi_tls_data = (void *)elf->tls_start;
1914 	}
1915 }
1916 
1917 /* Set or update __elf_hdr_info in the TA with information from the ELF queue */
1918 TEE_Result ta_elf_set_elf_phdr_info(bool is_32bit)
1919 {
1920 	struct __elf_phdr_info *info = NULL;
1921 	TEE_Result res = TEE_SUCCESS;
1922 	struct ta_elf *elf = NULL;
1923 	vaddr_t info_va = 0;
1924 	size_t cnt = 0;
1925 
1926 	res = ta_elf_resolve_sym("__elf_phdr_info", &info_va, NULL, NULL);
1927 	if (res) {
1928 		if (res == TEE_ERROR_ITEM_NOT_FOUND) {
1929 			/* Older TA */
1930 			return TEE_SUCCESS;
1931 		}
1932 		return res;
1933 	}
1934 	assert(info_va);
1935 
1936 	info = (struct __elf_phdr_info *)info_va;
1937 	if (info->reserved)
1938 		return TEE_ERROR_NOT_SUPPORTED;
1939 
1940 	TAILQ_FOREACH(elf, &main_elf_queue, link)
1941 		cnt++;
1942 
1943 	res = realloc_elf_phdr_info(info_va, cnt, is_32bit);
1944 	if (res)
1945 		return res;
1946 
1947 	cnt = 0;
1948 	TAILQ_FOREACH(elf, &main_elf_queue, link) {
1949 		fill_elf_phdr_info(info_va, cnt, elf, is_32bit);
1950 		cnt++;
1951 	}
1952 
1953 	return TEE_SUCCESS;
1954 }
1955