xref: /optee_os/ldelf/ta_elf.c (revision c5a3ce04e0122640af5f1eb14e789cdeb9217c84)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2019, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <ctype.h>
8 #include <elf32.h>
9 #include <elf64.h>
10 #include <elf_common.h>
11 #include <ldelf.h>
12 #include <pta_system.h>
13 #include <stdio.h>
14 #include <stdlib.h>
15 #include <string_ext.h>
16 #include <string.h>
17 #include <tee_api_types.h>
18 #include <tee_internal_api_extensions.h>
19 #include <user_ta_header.h>
20 #include <utee_syscalls.h>
21 #include <util.h>
22 
23 #include "sys.h"
24 #include "ta_elf.h"
25 #include "unwind.h"
26 
27 static vaddr_t ta_stack;
28 static vaddr_t ta_stack_size;
29 
30 struct ta_elf_queue main_elf_queue = TAILQ_HEAD_INITIALIZER(main_elf_queue);
31 
32 static struct ta_elf *queue_elf_helper(const TEE_UUID *uuid)
33 {
34 	struct ta_elf *elf = calloc(1, sizeof(*elf));
35 
36 	if (!elf)
37 		return NULL;
38 
39 	TAILQ_INIT(&elf->segs);
40 
41 	elf->uuid = *uuid;
42 	TAILQ_INSERT_TAIL(&main_elf_queue, elf, link);
43 	return elf;
44 }
45 
46 static struct ta_elf *queue_elf(const TEE_UUID *uuid)
47 {
48 	struct ta_elf *elf = ta_elf_find_elf(uuid);
49 
50 	if (elf)
51 		return NULL;
52 
53 	elf = queue_elf_helper(uuid);
54 	if (!elf)
55 		err(TEE_ERROR_OUT_OF_MEMORY, "queue_elf_helper");
56 
57 	return elf;
58 }
59 
60 struct ta_elf *ta_elf_find_elf(const TEE_UUID *uuid)
61 {
62 	struct ta_elf *elf = NULL;
63 
64 	TAILQ_FOREACH(elf, &main_elf_queue, link)
65 		if (!memcmp(uuid, &elf->uuid, sizeof(*uuid)))
66 			return elf;
67 
68 	return NULL;
69 }
70 
71 static TEE_Result e32_parse_ehdr(struct ta_elf *elf, Elf32_Ehdr *ehdr)
72 {
73 	if (ehdr->e_ident[EI_VERSION] != EV_CURRENT ||
74 	    ehdr->e_ident[EI_CLASS] != ELFCLASS32 ||
75 	    ehdr->e_ident[EI_DATA] != ELFDATA2LSB ||
76 	    ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE ||
77 	    ehdr->e_type != ET_DYN || ehdr->e_machine != EM_ARM ||
78 	    (ehdr->e_flags & EF_ARM_ABIMASK) != EF_ARM_ABI_VERSION ||
79 #ifndef CFG_WITH_VFP
80 	    (ehdr->e_flags & EF_ARM_ABI_FLOAT_HARD) ||
81 #endif
82 	    ehdr->e_phentsize != sizeof(Elf32_Phdr) ||
83 	    ehdr->e_shentsize != sizeof(Elf32_Shdr))
84 		return TEE_ERROR_BAD_FORMAT;
85 
86 	elf->is_32bit = true;
87 	elf->e_entry = ehdr->e_entry;
88 	elf->e_phoff = ehdr->e_phoff;
89 	elf->e_shoff = ehdr->e_shoff;
90 	elf->e_phnum = ehdr->e_phnum;
91 	elf->e_shnum = ehdr->e_shnum;
92 	elf->e_phentsize = ehdr->e_phentsize;
93 	elf->e_shentsize = ehdr->e_shentsize;
94 
95 	return TEE_SUCCESS;
96 }
97 
98 #ifdef ARM64
99 static TEE_Result e64_parse_ehdr(struct ta_elf *elf, Elf64_Ehdr *ehdr)
100 {
101 	if (ehdr->e_ident[EI_VERSION] != EV_CURRENT ||
102 	    ehdr->e_ident[EI_CLASS] != ELFCLASS64 ||
103 	    ehdr->e_ident[EI_DATA] != ELFDATA2LSB ||
104 	    ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE ||
105 	    ehdr->e_type != ET_DYN || ehdr->e_machine != EM_AARCH64 ||
106 	    ehdr->e_flags || ehdr->e_phentsize != sizeof(Elf64_Phdr) ||
107 	    ehdr->e_shentsize != sizeof(Elf64_Shdr))
108 		return TEE_ERROR_BAD_FORMAT;
109 
110 
111 	elf->is_32bit = false;
112 	elf->e_entry = ehdr->e_entry;
113 	elf->e_phoff = ehdr->e_phoff;
114 	elf->e_shoff = ehdr->e_shoff;
115 	elf->e_phnum = ehdr->e_phnum;
116 	elf->e_shnum = ehdr->e_shnum;
117 	elf->e_phentsize = ehdr->e_phentsize;
118 	elf->e_shentsize = ehdr->e_shentsize;
119 
120 	return TEE_SUCCESS;
121 }
122 #else /*ARM64*/
123 static TEE_Result e64_parse_ehdr(struct ta_elf *elf __unused,
124 				 Elf64_Ehdr *ehdr __unused)
125 {
126 	return TEE_ERROR_NOT_SUPPORTED;
127 }
128 #endif /*ARM64*/
129 
130 static void check_phdr_in_range(struct ta_elf *elf, unsigned int type,
131 				vaddr_t addr, size_t memsz)
132 {
133 	vaddr_t max_addr = 0;
134 
135 	if (ADD_OVERFLOW(addr, memsz, &max_addr))
136 		err(TEE_ERROR_BAD_FORMAT, "Program header %#x overflow", type);
137 
138 	/*
139 	 * elf->load_addr and elf->max_addr are both using the
140 	 * final virtual addresses, while this program header is
141 	 * relative to 0.
142 	 */
143 	if (max_addr > elf->max_addr - elf->load_addr)
144 		err(TEE_ERROR_BAD_FORMAT, "Program header %#x out of bounds",
145 		    type);
146 }
147 
148 static void read_dyn(struct ta_elf *elf, vaddr_t addr,
149 		     size_t idx, unsigned int *tag, size_t *val)
150 {
151 	if (elf->is_32bit) {
152 		Elf32_Dyn *dyn = (Elf32_Dyn *)(addr + elf->load_addr);
153 
154 		*tag = dyn[idx].d_tag;
155 		*val = dyn[idx].d_un.d_val;
156 	} else {
157 		Elf64_Dyn *dyn = (Elf64_Dyn *)(addr + elf->load_addr);
158 
159 		*tag = dyn[idx].d_tag;
160 		*val = dyn[idx].d_un.d_val;
161 	}
162 }
163 
164 static void save_hashtab_from_segment(struct ta_elf *elf, unsigned int type,
165 				      vaddr_t addr, size_t memsz)
166 {
167 	size_t dyn_entsize = 0;
168 	size_t num_dyns = 0;
169 	size_t n = 0;
170 	unsigned int tag = 0;
171 	size_t val = 0;
172 
173 	if (type != PT_DYNAMIC)
174 		return;
175 
176 	check_phdr_in_range(elf, type, addr, memsz);
177 
178 	if (elf->is_32bit)
179 		dyn_entsize = sizeof(Elf32_Dyn);
180 	else
181 		dyn_entsize = sizeof(Elf64_Dyn);
182 
183 	assert(!(memsz % dyn_entsize));
184 	num_dyns = memsz / dyn_entsize;
185 
186 	for (n = 0; n < num_dyns; n++) {
187 		read_dyn(elf, addr, n, &tag, &val);
188 		if (tag == DT_HASH) {
189 			elf->hashtab = (void *)(val + elf->load_addr);
190 			break;
191 		}
192 	}
193 }
194 
195 static void check_range(struct ta_elf *elf, const char *name, const void *ptr,
196 			size_t sz)
197 {
198 	size_t max_addr = 0;
199 
200 	if ((vaddr_t)ptr < elf->load_addr)
201 		err(TEE_ERROR_GENERIC, "%s %p out of range", name, ptr);
202 
203 	if (ADD_OVERFLOW((vaddr_t)ptr, sz, &max_addr))
204 		err(TEE_ERROR_GENERIC, "%s range overflow", name);
205 
206 	if (max_addr > elf->max_addr)
207 		err(TEE_ERROR_GENERIC,
208 		    "%s %p..%#zx out of range", name, ptr, max_addr);
209 }
210 
211 static void check_hashtab(struct ta_elf *elf, void *ptr, size_t num_buckets,
212 			  size_t num_chains)
213 {
214 	/*
215 	 * Starting from 2 as the first two words are mandatory and hold
216 	 * num_buckets and num_chains. So this function is called twice,
217 	 * first to see that there's indeed room for num_buckets and
218 	 * num_chains and then to see that all of it fits.
219 	 * See http://www.sco.com/developers/gabi/latest/ch5.dynamic.html#hash
220 	 */
221 	size_t num_words = 2;
222 	size_t sz = 0;
223 
224 	if (!ALIGNMENT_IS_OK(ptr, uint32_t))
225 		err(TEE_ERROR_GENERIC, "Bad alignment of hashtab %p", ptr);
226 
227 	if (ADD_OVERFLOW(num_words, num_buckets, &num_words) ||
228 	    ADD_OVERFLOW(num_words, num_chains, &num_words) ||
229 	    MUL_OVERFLOW(num_words, sizeof(uint32_t), &sz))
230 		err(TEE_ERROR_GENERIC, "Hashtab overflow");
231 
232 	check_range(elf, "Hashtab", ptr, sz);
233 }
234 
235 static void save_hashtab(struct ta_elf *elf)
236 {
237 	uint32_t *hashtab = NULL;
238 	size_t n = 0;
239 
240 	if (elf->is_32bit) {
241 		Elf32_Phdr *phdr = elf->phdr;
242 
243 		for (n = 0; n < elf->e_phnum; n++)
244 			save_hashtab_from_segment(elf, phdr[n].p_type,
245 						  phdr[n].p_vaddr,
246 						  phdr[n].p_memsz);
247 	} else {
248 		Elf64_Phdr *phdr = elf->phdr;
249 
250 		for (n = 0; n < elf->e_phnum; n++)
251 			save_hashtab_from_segment(elf, phdr[n].p_type,
252 						  phdr[n].p_vaddr,
253 						  phdr[n].p_memsz);
254 	}
255 
256 	check_hashtab(elf, elf->hashtab, 0, 0);
257 	hashtab = elf->hashtab;
258 	check_hashtab(elf, elf->hashtab, hashtab[0], hashtab[1]);
259 }
260 
261 static void e32_save_symtab(struct ta_elf *elf, size_t tab_idx)
262 {
263 	Elf32_Shdr *shdr = elf->shdr;
264 	size_t str_idx = shdr[tab_idx].sh_link;
265 
266 	elf->dynsymtab = (void *)(shdr[tab_idx].sh_addr + elf->load_addr);
267 	if (!ALIGNMENT_IS_OK(elf->dynsymtab, Elf32_Sym))
268 		err(TEE_ERROR_GENERIC, "Bad alignment of dynsymtab %p",
269 		    elf->dynsymtab);
270 	check_range(elf, "Dynsymtab", elf->dynsymtab, shdr[tab_idx].sh_size);
271 
272 	if (shdr[tab_idx].sh_size % sizeof(Elf32_Sym))
273 		err(TEE_ERROR_GENERIC,
274 		    "Size of dynsymtab not an even multiple of Elf32_Sym");
275 	elf->num_dynsyms = shdr[tab_idx].sh_size / sizeof(Elf32_Sym);
276 
277 	if (str_idx >= elf->e_shnum)
278 		err(TEE_ERROR_GENERIC, "Dynstr section index out of range");
279 	elf->dynstr = (void *)(shdr[str_idx].sh_addr + elf->load_addr);
280 	check_range(elf, "Dynstr", elf->dynstr, shdr[str_idx].sh_size);
281 
282 	elf->dynstr_size = shdr[str_idx].sh_size;
283 }
284 
285 static void e64_save_symtab(struct ta_elf *elf, size_t tab_idx)
286 {
287 	Elf64_Shdr *shdr = elf->shdr;
288 	size_t str_idx = shdr[tab_idx].sh_link;
289 
290 	elf->dynsymtab = (void *)(vaddr_t)(shdr[tab_idx].sh_addr +
291 					   elf->load_addr);
292 
293 	if (!ALIGNMENT_IS_OK(elf->dynsymtab, Elf64_Sym))
294 		err(TEE_ERROR_GENERIC, "Bad alignment of dynsymtab %p",
295 		    elf->dynsymtab);
296 	check_range(elf, "Dynsymtab", elf->dynsymtab, shdr[tab_idx].sh_size);
297 
298 	if (shdr[tab_idx].sh_size % sizeof(Elf64_Sym))
299 		err(TEE_ERROR_GENERIC,
300 		    "Size of dynsymtab not an even multiple of Elf64_Sym");
301 	elf->num_dynsyms = shdr[tab_idx].sh_size / sizeof(Elf64_Sym);
302 
303 	if (str_idx >= elf->e_shnum)
304 		err(TEE_ERROR_GENERIC, "Dynstr section index out of range");
305 	elf->dynstr = (void *)(vaddr_t)(shdr[str_idx].sh_addr + elf->load_addr);
306 	check_range(elf, "Dynstr", elf->dynstr, shdr[str_idx].sh_size);
307 
308 	elf->dynstr_size = shdr[str_idx].sh_size;
309 }
310 
311 static void save_symtab(struct ta_elf *elf)
312 {
313 	size_t n = 0;
314 
315 	if (elf->is_32bit) {
316 		Elf32_Shdr *shdr = elf->shdr;
317 
318 		for (n = 0; n < elf->e_shnum; n++) {
319 			if (shdr[n].sh_type == SHT_DYNSYM) {
320 				e32_save_symtab(elf, n);
321 				break;
322 			}
323 		}
324 	} else {
325 		Elf64_Shdr *shdr = elf->shdr;
326 
327 		for (n = 0; n < elf->e_shnum; n++) {
328 			if (shdr[n].sh_type == SHT_DYNSYM) {
329 				e64_save_symtab(elf, n);
330 				break;
331 			}
332 		}
333 
334 	}
335 
336 	save_hashtab(elf);
337 }
338 
339 static void init_elf(struct ta_elf *elf)
340 {
341 	TEE_Result res = TEE_SUCCESS;
342 	vaddr_t va = 0;
343 	uint32_t flags = PTA_SYSTEM_MAP_FLAG_SHAREABLE;
344 	size_t sz = 0;
345 
346 	res = sys_open_ta_bin(&elf->uuid, &elf->handle);
347 	if (res)
348 		err(res, "sys_open_ta_bin(%pUl)", (void *)&elf->uuid);
349 
350 	/*
351 	 * Map it read-only executable when we're loading a library where
352 	 * the ELF header is included in a load segment.
353 	 */
354 	if (!elf->is_main)
355 		flags |= PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
356 	res = sys_map_ta_bin(&va, SMALL_PAGE_SIZE, flags, elf->handle, 0, 0, 0);
357 	if (res)
358 		err(res, "sys_map_ta_bin");
359 	elf->ehdr_addr = va;
360 	if (!elf->is_main) {
361 		elf->load_addr = va;
362 		elf->max_addr = va + SMALL_PAGE_SIZE;
363 		elf->max_offs = SMALL_PAGE_SIZE;
364 	}
365 
366 	if (!IS_ELF(*(Elf32_Ehdr *)va))
367 		err(TEE_ERROR_BAD_FORMAT, "TA is not an ELF");
368 
369 	res = e32_parse_ehdr(elf, (void *)va);
370 	if (res == TEE_ERROR_BAD_FORMAT)
371 		res = e64_parse_ehdr(elf, (void *)va);
372 	if (res)
373 		err(res, "Cannot parse ELF");
374 
375 	if (MUL_OVERFLOW(elf->e_phnum, elf->e_phentsize, &sz) ||
376 	    ADD_OVERFLOW(sz, elf->e_phoff, &sz))
377 		err(TEE_ERROR_BAD_FORMAT, "Program headers size overflow");
378 
379 	if (sz > SMALL_PAGE_SIZE)
380 		err(TEE_ERROR_NOT_SUPPORTED, "Cannot read program headers");
381 
382 	elf->phdr = (void *)(va + elf->e_phoff);
383 }
384 
385 static size_t roundup(size_t v)
386 {
387 	return ROUNDUP(v, SMALL_PAGE_SIZE);
388 }
389 
390 static size_t rounddown(size_t v)
391 {
392 	return ROUNDDOWN(v, SMALL_PAGE_SIZE);
393 }
394 
395 static void add_segment(struct ta_elf *elf, size_t offset, size_t vaddr,
396 			size_t filesz, size_t memsz, size_t flags, size_t align)
397 {
398 	struct segment *seg = calloc(1, sizeof(*seg));
399 
400 	if (!seg)
401 		err(TEE_ERROR_OUT_OF_MEMORY, "calloc");
402 
403 	if (memsz < filesz)
404 		err(TEE_ERROR_BAD_FORMAT, "Memsz smaller than filesz");
405 
406 	seg->offset = offset;
407 	seg->vaddr = vaddr;
408 	seg->filesz = filesz;
409 	seg->memsz = memsz;
410 	seg->flags = flags;
411 	seg->align = align;
412 
413 	TAILQ_INSERT_TAIL(&elf->segs, seg, link);
414 }
415 
416 static void parse_load_segments(struct ta_elf *elf)
417 {
418 	size_t n = 0;
419 
420 	if (elf->is_32bit) {
421 		Elf32_Phdr *phdr = elf->phdr;
422 
423 		for (n = 0; n < elf->e_phnum; n++)
424 			if (phdr[n].p_type == PT_LOAD) {
425 				add_segment(elf, phdr[n].p_offset,
426 					    phdr[n].p_vaddr, phdr[n].p_filesz,
427 					    phdr[n].p_memsz, phdr[n].p_flags,
428 					    phdr[n].p_align);
429 			} else if (phdr[n].p_type == PT_ARM_EXIDX) {
430 				elf->exidx_start = phdr[n].p_vaddr;
431 				elf->exidx_size = phdr[n].p_filesz;
432 			}
433 	} else {
434 		Elf64_Phdr *phdr = elf->phdr;
435 
436 		for (n = 0; n < elf->e_phnum; n++)
437 			if (phdr[n].p_type == PT_LOAD)
438 				add_segment(elf, phdr[n].p_offset,
439 					    phdr[n].p_vaddr, phdr[n].p_filesz,
440 					    phdr[n].p_memsz, phdr[n].p_flags,
441 					    phdr[n].p_align);
442 	}
443 }
444 
445 static void copy_remapped_to(struct ta_elf *elf, const struct segment *seg)
446 {
447 	uint8_t *dst = (void *)(seg->vaddr + elf->load_addr);
448 	size_t n = 0;
449 	size_t offs = seg->offset;
450 	size_t num_bytes = seg->filesz;
451 
452 	if (offs < elf->max_offs) {
453 		n = MIN(elf->max_offs - offs, num_bytes);
454 		memcpy(dst, (void *)(elf->max_addr + offs - elf->max_offs), n);
455 		dst += n;
456 		offs += n;
457 		num_bytes -= n;
458 	}
459 
460 	if (num_bytes) {
461 		TEE_Result res = sys_copy_from_ta_bin(dst, num_bytes,
462 						      elf->handle, offs);
463 
464 		if (res)
465 			err(res, "sys_copy_from_ta_bin");
466 		elf->max_offs += offs;
467 	}
468 }
469 
470 static void adjust_segments(struct ta_elf *elf)
471 {
472 	struct segment *seg = NULL;
473 	struct segment *prev_seg = NULL;
474 	size_t prev_end_addr = 0;
475 	size_t align = 0;
476 	size_t mask = 0;
477 
478 	/* Sanity check */
479 	TAILQ_FOREACH(seg, &elf->segs, link) {
480 		size_t dummy __maybe_unused = 0;
481 
482 		assert(seg->align >= SMALL_PAGE_SIZE);
483 		assert(!ADD_OVERFLOW(seg->vaddr, seg->memsz, &dummy));
484 		assert(seg->filesz <= seg->memsz);
485 		assert((seg->offset & SMALL_PAGE_MASK) ==
486 		       (seg->vaddr & SMALL_PAGE_MASK));
487 
488 		prev_seg = TAILQ_PREV(seg, segment_head, link);
489 		if (prev_seg) {
490 			assert(seg->vaddr >= prev_seg->vaddr + prev_seg->memsz);
491 			assert(seg->offset >=
492 			       prev_seg->offset + prev_seg->filesz);
493 		}
494 		if (!align)
495 			align = seg->align;
496 		assert(align == seg->align);
497 	}
498 
499 	mask = align - 1;
500 
501 	seg = TAILQ_FIRST(&elf->segs);
502 	if (seg)
503 		seg = TAILQ_NEXT(seg, link);
504 	while (seg) {
505 		prev_seg = TAILQ_PREV(seg, segment_head, link);
506 		prev_end_addr = prev_seg->vaddr + prev_seg->memsz;
507 
508 		/*
509 		 * This segment may overlap with the last "page" in the
510 		 * previous segment in two different ways:
511 		 * 1. Virtual address (and offset) overlaps =>
512 		 *    Permissions needs to be merged. The offset must have
513 		 *    the SMALL_PAGE_MASK bits set as vaddr and offset must
514 		 *    add up with prevsion segment.
515 		 *
516 		 * 2. Only offset overlaps =>
517 		 *    The same page in the ELF is mapped at two different
518 		 *    virtual addresses. As a limitation this segment must
519 		 *    be mapped as writeable.
520 		 */
521 
522 		/* Case 1. */
523 		if (rounddown(seg->vaddr) < prev_end_addr) {
524 			assert((seg->vaddr & mask) == (seg->offset & mask));
525 			assert(prev_seg->memsz == prev_seg->filesz);
526 
527 			/*
528 			 * Merge the segments and their permissions.
529 			 * Note that the may be a small hole between the
530 			 * two sections.
531 			 */
532 			prev_seg->filesz = seg->vaddr + seg->filesz -
533 					   prev_seg->vaddr;
534 			prev_seg->memsz = seg->vaddr + seg->memsz -
535 					   prev_seg->vaddr;
536 			prev_seg->flags |= seg->flags;
537 
538 			TAILQ_REMOVE(&elf->segs, seg, link);
539 			free(seg);
540 			seg = TAILQ_NEXT(prev_seg, link);
541 			continue;
542 		}
543 
544 		/* Case 2. */
545 		if ((seg->offset & mask) &&
546 		    rounddown(seg->offset) <
547 		    (prev_seg->offset + prev_seg->filesz)) {
548 
549 			assert(seg->flags & PF_W);
550 			seg->remapped_writeable = true;
551 		}
552 
553 		/*
554 		 * No overlap, but we may need to align address, offset and
555 		 * size.
556 		 */
557 		seg->filesz += seg->vaddr - rounddown(seg->vaddr);
558 		seg->memsz += seg->vaddr - rounddown(seg->vaddr);
559 		seg->vaddr = rounddown(seg->vaddr);
560 		seg->offset = rounddown(seg->offset);
561 		seg = TAILQ_NEXT(seg, link);
562 	}
563 
564 }
565 
566 static void populate_segments_legacy(struct ta_elf *elf)
567 {
568 	TEE_Result res = TEE_SUCCESS;
569 	struct segment *seg = NULL;
570 	vaddr_t va = 0;
571 
572 	assert(elf->is_legacy);
573 	TAILQ_FOREACH(seg, &elf->segs, link) {
574 		struct segment *last_seg = TAILQ_LAST(&elf->segs, segment_head);
575 		size_t pad_end = roundup(last_seg->vaddr + last_seg->memsz -
576 					 seg->vaddr - seg->memsz);
577 		size_t num_bytes = roundup(seg->memsz);
578 
579 		if (!elf->load_addr)
580 			va = 0;
581 		else
582 			va = seg->vaddr + elf->load_addr;
583 
584 
585 		if (!(seg->flags & PF_R))
586 			err(TEE_ERROR_NOT_SUPPORTED,
587 			    "Segment must be readable");
588 
589 		res = sys_map_zi(num_bytes, 0, &va, 0, pad_end);
590 		if (res)
591 			err(res, "sys_map_zi");
592 		res = sys_copy_from_ta_bin((void *)va, seg->filesz,
593 					   elf->handle, seg->offset);
594 		if (res)
595 			err(res, "sys_copy_from_ta_bin");
596 
597 		if (!elf->load_addr)
598 			elf->load_addr = va;
599 		elf->max_addr = va + num_bytes;
600 		elf->max_offs = seg->offset + seg->filesz;
601 	}
602 }
603 
604 static size_t get_pad_begin(void)
605 {
606 #ifdef CFG_TA_ASLR
607 	size_t min = CFG_TA_ASLR_MIN_OFFSET_PAGES;
608 	size_t max = CFG_TA_ASLR_MAX_OFFSET_PAGES;
609 	TEE_Result res = TEE_SUCCESS;
610 	uint32_t rnd32 = 0;
611 	size_t rnd = 0;
612 
613 	COMPILE_TIME_ASSERT(CFG_TA_ASLR_MIN_OFFSET_PAGES <
614 			    CFG_TA_ASLR_MAX_OFFSET_PAGES);
615 	if (max > min) {
616 		res = utee_cryp_random_number_generate(&rnd32, sizeof(rnd32));
617 		if (res) {
618 			DMSG("Random read failed: %#"PRIx32, res);
619 			return min * SMALL_PAGE_SIZE;
620 		}
621 		rnd = rnd32 % (max - min);
622 	}
623 
624 	return (min + rnd) * SMALL_PAGE_SIZE;
625 #else /*!CFG_TA_ASLR*/
626 	return 0;
627 #endif /*!CFG_TA_ASLR*/
628 }
629 
630 static void populate_segments(struct ta_elf *elf)
631 {
632 	TEE_Result res = TEE_SUCCESS;
633 	struct segment *seg = NULL;
634 	vaddr_t va = 0;
635 	size_t pad_begin = 0;
636 
637 	assert(!elf->is_legacy);
638 	TAILQ_FOREACH(seg, &elf->segs, link) {
639 		struct segment *last_seg = TAILQ_LAST(&elf->segs, segment_head);
640 		size_t pad_end = roundup(last_seg->vaddr + last_seg->memsz -
641 					 seg->vaddr - seg->memsz);
642 
643 		if (seg->remapped_writeable) {
644 			size_t num_bytes = roundup(seg->vaddr + seg->memsz) -
645 					   rounddown(seg->vaddr);
646 
647 			assert(elf->load_addr);
648 			va = rounddown(elf->load_addr + seg->vaddr);
649 			assert(va >= elf->max_addr);
650 			res = sys_map_zi(num_bytes, 0, &va, 0, pad_end);
651 			if (res)
652 				err(res, "sys_map_zi");
653 
654 			copy_remapped_to(elf, seg);
655 			elf->max_addr = va + num_bytes;
656 		} else {
657 			uint32_t flags =  0;
658 			size_t filesz = seg->filesz;
659 			size_t memsz = seg->memsz;
660 			size_t offset = seg->offset;
661 			size_t vaddr = seg->vaddr;
662 
663 			if (offset < elf->max_offs) {
664 				/*
665 				 * We're in a load segment which overlaps
666 				 * with (or is covered by) the first page
667 				 * of a shared library.
668 				 */
669 				if (vaddr + filesz < SMALL_PAGE_SIZE) {
670 					size_t num_bytes = 0;
671 
672 					/*
673 					 * If this segment is completely
674 					 * covered, take next.
675 					 */
676 					if (vaddr + memsz <= SMALL_PAGE_SIZE)
677 						continue;
678 
679 					/*
680 					 * All data of the segment is
681 					 * loaded, but we need to zero
682 					 * extend it.
683 					 */
684 					va = elf->max_addr;
685 					num_bytes = roundup(vaddr + memsz) -
686 						    roundup(vaddr) -
687 						    SMALL_PAGE_SIZE;
688 					assert(num_bytes);
689 					res = sys_map_zi(num_bytes, 0, &va, 0,
690 							 0);
691 					if (res)
692 						err(res, "sys_map_zi");
693 					elf->max_addr = roundup(va + num_bytes);
694 					continue;
695 				}
696 
697 				/* Partial overlap, remove the first page. */
698 				vaddr += SMALL_PAGE_SIZE;
699 				filesz -= SMALL_PAGE_SIZE;
700 				memsz -= SMALL_PAGE_SIZE;
701 				offset += SMALL_PAGE_SIZE;
702 			}
703 
704 			if (!elf->load_addr) {
705 				va = 0;
706 				pad_begin = get_pad_begin();
707 				/*
708 				 * If mapping with pad_begin fails we'll
709 				 * retry without pad_begin, effectively
710 				 * disabling ASLR for the current ELF file.
711 				 */
712 			} else {
713 				va = vaddr + elf->load_addr;
714 				pad_begin = 0;
715 			}
716 
717 			if (seg->flags & PF_W)
718 				flags |= PTA_SYSTEM_MAP_FLAG_WRITEABLE;
719 			else
720 				flags |= PTA_SYSTEM_MAP_FLAG_SHAREABLE;
721 			if (seg->flags & PF_X)
722 				flags |= PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
723 			if (!(seg->flags & PF_R))
724 				err(TEE_ERROR_NOT_SUPPORTED,
725 				    "Segment must be readable");
726 			if (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE) {
727 				res = sys_map_zi(memsz, 0, &va, pad_begin,
728 						 pad_end);
729 				if (pad_begin && res == TEE_ERROR_OUT_OF_MEMORY)
730 					res = sys_map_zi(memsz, 0, &va, 0,
731 							 pad_end);
732 				if (res)
733 					err(res, "sys_map_zi");
734 				res = sys_copy_from_ta_bin((void *)va, filesz,
735 							   elf->handle, offset);
736 				if (res)
737 					err(res, "sys_copy_from_ta_bin");
738 			} else {
739 				if (filesz != memsz)
740 					err(TEE_ERROR_BAD_FORMAT,
741 					    "Filesz and memsz mismatch");
742 				res = sys_map_ta_bin(&va, filesz, flags,
743 						     elf->handle, offset,
744 						     pad_begin, pad_end);
745 				if (pad_begin && res == TEE_ERROR_OUT_OF_MEMORY)
746 					res = sys_map_ta_bin(&va, filesz, flags,
747 							     elf->handle,
748 							     offset, 0,
749 							     pad_end);
750 				if (res)
751 					err(res, "sys_map_ta_bin");
752 			}
753 
754 			if (!elf->load_addr)
755 				elf->load_addr = va;
756 			elf->max_addr = roundup(va + memsz);
757 			elf->max_offs += filesz;
758 		}
759 	}
760 }
761 
762 static void map_segments(struct ta_elf *elf)
763 {
764 	TEE_Result res = TEE_SUCCESS;
765 
766 	parse_load_segments(elf);
767 	adjust_segments(elf);
768 	if (TAILQ_FIRST(&elf->segs)->offset < SMALL_PAGE_SIZE) {
769 		vaddr_t va = 0;
770 		size_t sz = elf->max_addr - elf->load_addr;
771 		struct segment *seg = TAILQ_LAST(&elf->segs, segment_head);
772 		size_t pad_begin = get_pad_begin();
773 
774 		/*
775 		 * We're loading a library, if not other parts of the code
776 		 * need to be updated too.
777 		 */
778 		assert(!elf->is_main);
779 
780 		/*
781 		 * Now that we know how much virtual memory is needed move
782 		 * the already mapped part to a location which can
783 		 * accommodate us.
784 		 */
785 		res = sys_remap(elf->load_addr, &va, sz, pad_begin,
786 				roundup(seg->vaddr + seg->memsz));
787 		if (res == TEE_ERROR_OUT_OF_MEMORY)
788 			res = sys_remap(elf->load_addr, &va, sz, 0,
789 					roundup(seg->vaddr + seg->memsz));
790 		if (res)
791 			err(res, "sys_remap");
792 		elf->ehdr_addr = va;
793 		elf->load_addr = va;
794 		elf->max_addr = va + sz;
795 		elf->phdr = (void *)(va + elf->e_phoff);
796 	}
797 }
798 
799 static void add_deps_from_segment(struct ta_elf *elf, unsigned int type,
800 				  vaddr_t addr, size_t memsz)
801 {
802 	size_t dyn_entsize = 0;
803 	size_t num_dyns = 0;
804 	size_t n = 0;
805 	unsigned int tag = 0;
806 	size_t val = 0;
807 	TEE_UUID uuid = { };
808 	char *str_tab = NULL;
809 	size_t str_tab_sz = 0;
810 
811 	if (type != PT_DYNAMIC)
812 		return;
813 
814 	check_phdr_in_range(elf, type, addr, memsz);
815 
816 	if (elf->is_32bit)
817 		dyn_entsize = sizeof(Elf32_Dyn);
818 	else
819 		dyn_entsize = sizeof(Elf64_Dyn);
820 
821 	assert(!(memsz % dyn_entsize));
822 	num_dyns = memsz / dyn_entsize;
823 
824 	for (n = 0; n < num_dyns && !(str_tab && str_tab_sz); n++) {
825 		read_dyn(elf, addr, n, &tag, &val);
826 		if (tag == DT_STRTAB)
827 			str_tab = (char *)(val + elf->load_addr);
828 		else if (tag == DT_STRSZ)
829 			str_tab_sz = val;
830 	}
831 	check_range(elf, "Strtab", str_tab, str_tab_sz);
832 
833 	for (n = 0; n < num_dyns; n++) {
834 		read_dyn(elf, addr, n, &tag, &val);
835 		if (tag != DT_NEEDED)
836 			continue;
837 		if (val >= str_tab_sz)
838 			err(TEE_ERROR_GENERIC,
839 			    "Offset into strtab out of range");
840 		tee_uuid_from_str(&uuid, str_tab + val);
841 		queue_elf(&uuid);
842 	}
843 }
844 
845 static void add_dependencies(struct ta_elf *elf)
846 {
847 	size_t n = 0;
848 
849 	if (elf->is_32bit) {
850 		Elf32_Phdr *phdr = elf->phdr;
851 
852 		for (n = 0; n < elf->e_phnum; n++)
853 			add_deps_from_segment(elf, phdr[n].p_type,
854 					      phdr[n].p_vaddr, phdr[n].p_memsz);
855 	} else {
856 		Elf64_Phdr *phdr = elf->phdr;
857 
858 		for (n = 0; n < elf->e_phnum; n++)
859 			add_deps_from_segment(elf, phdr[n].p_type,
860 					      phdr[n].p_vaddr, phdr[n].p_memsz);
861 	}
862 }
863 
864 static void copy_section_headers(struct ta_elf *elf)
865 {
866 	TEE_Result res = TEE_SUCCESS;
867 	size_t sz = 0;
868 	size_t offs = 0;
869 
870 	if (MUL_OVERFLOW(elf->e_shnum, elf->e_shentsize, &sz))
871 		err(TEE_ERROR_BAD_FORMAT, "Shdr size overflow");
872 
873 	elf->shdr = malloc(sz);
874 	if (!elf->shdr)
875 		err(TEE_ERROR_OUT_OF_MEMORY, "malloc");
876 
877 	/*
878 	 * We're assuming that section headers comes after the load segments,
879 	 * but if it's a very small dynamically linked library the section
880 	 * headers can still end up (partially?) in the first mapped page.
881 	 */
882 	if (elf->e_shoff < SMALL_PAGE_SIZE) {
883 		assert(!elf->is_main);
884 		offs = MIN(SMALL_PAGE_SIZE - elf->e_shoff, sz);
885 		memcpy(elf->shdr, (void *)(elf->load_addr + elf->e_shoff),
886 		       offs);
887 	}
888 
889 	if (offs < sz) {
890 		res = sys_copy_from_ta_bin((uint8_t *)elf->shdr + offs,
891 					   sz - offs, elf->handle,
892 					   elf->e_shoff + offs);
893 		if (res)
894 			err(res, "sys_copy_from_ta_bin");
895 	}
896 }
897 
898 static void close_handle(struct ta_elf *elf)
899 {
900 	TEE_Result res = sys_close_ta_bin(elf->handle);
901 
902 	if (res)
903 		err(res, "sys_close_ta_bin");
904 	elf->handle = -1;
905 }
906 
907 static void clean_elf_load_main(struct ta_elf *elf)
908 {
909 	TEE_Result res = TEE_SUCCESS;
910 
911 	/*
912 	 * Clean up from last attempt to load
913 	 */
914 	res = sys_unmap(elf->ehdr_addr, SMALL_PAGE_SIZE);
915 	if (res)
916 		err(res, "sys_unmap");
917 
918 	while (!TAILQ_EMPTY(&elf->segs)) {
919 		struct segment *seg = TAILQ_FIRST(&elf->segs);
920 		vaddr_t va = 0;
921 		size_t num_bytes = 0;
922 
923 		va = rounddown(elf->load_addr + seg->vaddr);
924 		if (seg->remapped_writeable)
925 			num_bytes = roundup(seg->vaddr + seg->memsz) -
926 				    rounddown(seg->vaddr);
927 		else
928 			num_bytes = seg->memsz;
929 
930 		res = sys_unmap(va, num_bytes);
931 		if (res)
932 			err(res, "sys_unmap");
933 
934 		TAILQ_REMOVE(&elf->segs, seg, link);
935 		free(seg);
936 	}
937 
938 	free(elf->shdr);
939 	memset(&elf->is_32bit, 0,
940 	       (vaddr_t)&elf->uuid - (vaddr_t)&elf->is_32bit);
941 
942 	TAILQ_INIT(&elf->segs);
943 }
944 
945 static void load_main(struct ta_elf *elf)
946 {
947 	init_elf(elf);
948 	map_segments(elf);
949 	populate_segments(elf);
950 	add_dependencies(elf);
951 	copy_section_headers(elf);
952 	save_symtab(elf);
953 	close_handle(elf);
954 
955 	elf->head = (struct ta_head *)elf->load_addr;
956 	if (elf->head->depr_entry != UINT64_MAX) {
957 		/*
958 		 * Legacy TAs sets their entry point in ta_head. For
959 		 * non-legacy TAs the entry point of the ELF is set instead
960 		 * and leaving the ta_head entry point set to UINT64_MAX to
961 		 * indicate that it's not used.
962 		 *
963 		 * NB, everything before the commit a73b5878c89d ("Replace
964 		 * ta_head.entry with elf entry") is considered legacy TAs
965 		 * for ldelf.
966 		 *
967 		 * Legacy TAs cannot be mapped with shared memory segments
968 		 * so restart the mapping if it turned out we're loading a
969 		 * legacy TA.
970 		 */
971 
972 		DMSG("Reloading TA %pUl as legacy TA", (void *)&elf->uuid);
973 		clean_elf_load_main(elf);
974 		elf->is_legacy = true;
975 		init_elf(elf);
976 		map_segments(elf);
977 		populate_segments_legacy(elf);
978 		add_dependencies(elf);
979 		copy_section_headers(elf);
980 		save_symtab(elf);
981 		close_handle(elf);
982 		elf->head = (struct ta_head *)elf->load_addr;
983 		/*
984 		 * Check that the TA is still a legacy TA, if it isn't give
985 		 * up now since we're likely under attack.
986 		 */
987 		if (elf->head->depr_entry == UINT64_MAX)
988 			err(TEE_ERROR_GENERIC,
989 			    "TA %pUl was changed on disk to non-legacy",
990 			    (void *)&elf->uuid);
991 	}
992 
993 }
994 
995 void ta_elf_load_main(const TEE_UUID *uuid, uint32_t *is_32bit, uint64_t *sp,
996 		      uint32_t *ta_flags)
997 {
998 	struct ta_elf *elf = queue_elf(uuid);
999 	vaddr_t va = 0;
1000 	TEE_Result res = TEE_SUCCESS;
1001 
1002 	assert(elf);
1003 	elf->is_main = true;
1004 
1005 	load_main(elf);
1006 
1007 	*is_32bit = elf->is_32bit;
1008 	res = sys_map_zi(elf->head->stack_size, 0, &va, 0, 0);
1009 	if (res)
1010 		err(res, "sys_map_zi stack");
1011 
1012 	if (elf->head->flags & ~TA_FLAGS_MASK)
1013 		err(TEE_ERROR_BAD_FORMAT, "Invalid TA flags(s) %#"PRIx32,
1014 		    elf->head->flags & ~TA_FLAGS_MASK);
1015 
1016 	*ta_flags = elf->head->flags;
1017 	*sp = va + elf->head->stack_size;
1018 	ta_stack = va;
1019 	ta_stack_size = elf->head->stack_size;
1020 }
1021 
1022 void ta_elf_finalize_load_main(uint64_t *entry)
1023 {
1024 	struct ta_elf *elf = TAILQ_FIRST(&main_elf_queue);
1025 	TEE_Result res = TEE_SUCCESS;
1026 
1027 	assert(elf->is_main);
1028 
1029 	res = ta_elf_set_init_fini_info(elf->is_32bit);
1030 	if (res)
1031 		err(res, "ta_elf_set_init_fini_info");
1032 
1033 	if (elf->is_legacy)
1034 		*entry = elf->head->depr_entry;
1035 	else
1036 		*entry = elf->e_entry + elf->load_addr;
1037 }
1038 
1039 
1040 void ta_elf_load_dependency(struct ta_elf *elf, bool is_32bit)
1041 {
1042 	if (elf->is_main)
1043 		return;
1044 
1045 	init_elf(elf);
1046 	if (elf->is_32bit != is_32bit)
1047 		err(TEE_ERROR_BAD_FORMAT, "ELF %pUl is %sbit (expected %sbit)",
1048 		    (void *)&elf->uuid, elf->is_32bit ? "32" : "64",
1049 		    is_32bit ? "32" : "64");
1050 
1051 	map_segments(elf);
1052 	populate_segments(elf);
1053 	add_dependencies(elf);
1054 	copy_section_headers(elf);
1055 	save_symtab(elf);
1056 	close_handle(elf);
1057 }
1058 
1059 void ta_elf_finalize_mappings(struct ta_elf *elf)
1060 {
1061 	TEE_Result res = TEE_SUCCESS;
1062 	struct segment *seg = NULL;
1063 
1064 	if (!elf->is_legacy)
1065 		return;
1066 
1067 	TAILQ_FOREACH(seg, &elf->segs, link) {
1068 		vaddr_t va = elf->load_addr + seg->vaddr;
1069 		uint32_t flags =  0;
1070 
1071 		if (seg->flags & PF_W)
1072 			flags |= PTA_SYSTEM_MAP_FLAG_WRITEABLE;
1073 		if (seg->flags & PF_X)
1074 			flags |= PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
1075 
1076 		res = sys_set_prot(va, seg->memsz, flags);
1077 		if (res)
1078 			err(res, "sys_set_prot");
1079 	}
1080 }
1081 
1082 static void __printf(3, 4) print_wrapper(void *pctx, print_func_t print_func,
1083 					 const char *fmt, ...)
1084 {
1085 	va_list ap;
1086 
1087 	va_start(ap, fmt);
1088 	print_func(pctx, fmt, ap);
1089 	va_end(ap);
1090 }
1091 
1092 static void print_seg(void *pctx, print_func_t print_func,
1093 		      size_t idx __maybe_unused, int elf_idx __maybe_unused,
1094 		      vaddr_t va __maybe_unused, paddr_t pa __maybe_unused,
1095 		      size_t sz __maybe_unused, uint32_t flags)
1096 {
1097 	int width __maybe_unused = 8;
1098 	char desc[14] __maybe_unused = "";
1099 	char flags_str[] __maybe_unused = "----";
1100 
1101 	if (elf_idx > -1) {
1102 		snprintf(desc, sizeof(desc), " [%d]", elf_idx);
1103 	} else {
1104 		if (flags & DUMP_MAP_EPHEM)
1105 			snprintf(desc, sizeof(desc), " (param)");
1106 		if (flags & DUMP_MAP_LDELF)
1107 			snprintf(desc, sizeof(desc), " (ldelf)");
1108 		if (va == ta_stack)
1109 			snprintf(desc, sizeof(desc), " (stack)");
1110 	}
1111 
1112 	if (flags & DUMP_MAP_READ)
1113 		flags_str[0] = 'r';
1114 	if (flags & DUMP_MAP_WRITE)
1115 		flags_str[1] = 'w';
1116 	if (flags & DUMP_MAP_EXEC)
1117 		flags_str[2] = 'x';
1118 	if (flags & DUMP_MAP_SECURE)
1119 		flags_str[3] = 's';
1120 
1121 	print_wrapper(pctx, print_func,
1122 		      "region %2zu: va 0x%0*"PRIxVA" pa 0x%0*"PRIxPA" size 0x%06zx flags %s%s\n",
1123 		      idx, width, va, width, pa, sz, flags_str, desc);
1124 }
1125 
1126 static bool get_next_in_order(struct ta_elf_queue *elf_queue,
1127 			      struct ta_elf **elf, struct segment **seg,
1128 			      size_t *elf_idx)
1129 {
1130 	struct ta_elf *e = NULL;
1131 	struct segment *s = NULL;
1132 	size_t idx = 0;
1133 	vaddr_t va = 0;
1134 	struct ta_elf *e2 = NULL;
1135 	size_t i2 = 0;
1136 
1137 	assert(elf && seg && elf_idx);
1138 	e = *elf;
1139 	s = *seg;
1140 	assert((e == NULL && s == NULL) || (e != NULL && s != NULL));
1141 
1142 	if (s) {
1143 		s = TAILQ_NEXT(s, link);
1144 		if (s) {
1145 			*seg = s;
1146 			return true;
1147 		}
1148 	}
1149 
1150 	if (e)
1151 		va = e->load_addr;
1152 
1153 	/* Find the ELF with next load address */
1154 	e = NULL;
1155 	TAILQ_FOREACH(e2, elf_queue, link) {
1156 		if (e2->load_addr > va) {
1157 			if (!e || e2->load_addr < e->load_addr) {
1158 				e = e2;
1159 				idx = i2;
1160 			}
1161 		}
1162 		i2++;
1163 	}
1164 	if (!e)
1165 		return false;
1166 
1167 	*elf = e;
1168 	*seg = TAILQ_FIRST(&e->segs);
1169 	*elf_idx = idx;
1170 	return true;
1171 }
1172 
1173 void ta_elf_print_mappings(void *pctx, print_func_t print_func,
1174 			   struct ta_elf_queue *elf_queue, size_t num_maps,
1175 			   struct dump_map *maps, vaddr_t mpool_base)
1176 {
1177 	struct segment *seg = NULL;
1178 	struct ta_elf *elf = NULL;
1179 	size_t elf_idx = 0;
1180 	size_t idx = 0;
1181 	size_t map_idx = 0;
1182 
1183 	/*
1184 	 * Loop over all segments and maps, printing virtual address in
1185 	 * order. Segment has priority if the virtual address is present
1186 	 * in both map and segment.
1187 	 */
1188 	get_next_in_order(elf_queue, &elf, &seg, &elf_idx);
1189 	while (true) {
1190 		vaddr_t va = -1;
1191 		size_t sz = 0;
1192 		uint32_t flags = DUMP_MAP_SECURE;
1193 		size_t offs = 0;
1194 
1195 		if (seg) {
1196 			va = rounddown(seg->vaddr + elf->load_addr);
1197 			sz = roundup(seg->vaddr + seg->memsz) -
1198 				     rounddown(seg->vaddr);
1199 		}
1200 
1201 		while (map_idx < num_maps && maps[map_idx].va <= va) {
1202 			uint32_t f = 0;
1203 
1204 			/* If there's a match, it should be the same map */
1205 			if (maps[map_idx].va == va) {
1206 				/*
1207 				 * In shared libraries the first page is
1208 				 * mapped separately with the rest of that
1209 				 * segment following back to back in a
1210 				 * separate entry.
1211 				 */
1212 				if (map_idx + 1 < num_maps &&
1213 				    maps[map_idx].sz == SMALL_PAGE_SIZE) {
1214 					vaddr_t next_va = maps[map_idx].va +
1215 							  maps[map_idx].sz;
1216 					size_t comb_sz = maps[map_idx].sz +
1217 							 maps[map_idx + 1].sz;
1218 
1219 					if (next_va == maps[map_idx + 1].va &&
1220 					    comb_sz == sz &&
1221 					    maps[map_idx].flags ==
1222 					    maps[map_idx + 1].flags) {
1223 						/* Skip this and next entry */
1224 						map_idx += 2;
1225 						continue;
1226 					}
1227 				}
1228 				assert(maps[map_idx].sz == sz);
1229 			} else if (maps[map_idx].va < va) {
1230 				if (maps[map_idx].va == mpool_base)
1231 					f |= DUMP_MAP_LDELF;
1232 				print_seg(pctx, print_func, idx, -1,
1233 					  maps[map_idx].va, maps[map_idx].pa,
1234 					  maps[map_idx].sz,
1235 					  maps[map_idx].flags | f);
1236 				idx++;
1237 			}
1238 			map_idx++;
1239 		}
1240 
1241 		if (!seg)
1242 			break;
1243 
1244 		offs = rounddown(seg->offset);
1245 		if (seg->flags & PF_R)
1246 			flags |= DUMP_MAP_READ;
1247 		if (seg->flags & PF_W)
1248 			flags |= DUMP_MAP_WRITE;
1249 		if (seg->flags & PF_X)
1250 			flags |= DUMP_MAP_EXEC;
1251 
1252 		print_seg(pctx, print_func, idx, elf_idx, va, offs, sz, flags);
1253 		idx++;
1254 
1255 		if (!get_next_in_order(elf_queue, &elf, &seg, &elf_idx))
1256 			seg = NULL;
1257 	}
1258 
1259 	elf_idx = 0;
1260 	TAILQ_FOREACH(elf, elf_queue, link) {
1261 		print_wrapper(pctx, print_func,
1262 			      " [%zu] %pUl @ 0x%0*"PRIxVA"\n",
1263 			      elf_idx, (void *)&elf->uuid, 8, elf->load_addr);
1264 		elf_idx++;
1265 	}
1266 }
1267 
1268 #ifdef CFG_UNWIND
1269 void ta_elf_stack_trace_a32(uint32_t regs[16])
1270 {
1271 	struct unwind_state_arm32 state = { };
1272 
1273 	memcpy(state.registers, regs, sizeof(state.registers));
1274 	print_stack_arm32(&state, ta_stack, ta_stack_size);
1275 }
1276 
1277 void ta_elf_stack_trace_a64(uint64_t fp, uint64_t sp, uint64_t pc)
1278 {
1279 	struct unwind_state_arm64 state = { .fp = fp, .sp = sp, .pc = pc };
1280 
1281 	print_stack_arm64(&state, ta_stack, ta_stack_size);
1282 }
1283 #endif
1284 
1285 TEE_Result ta_elf_add_library(const TEE_UUID *uuid)
1286 {
1287 	struct ta_elf *ta = TAILQ_FIRST(&main_elf_queue);
1288 	struct ta_elf *lib = ta_elf_find_elf(uuid);
1289 	struct ta_elf *elf = NULL;
1290 
1291 	if (lib)
1292 		return TEE_SUCCESS; /* Already mapped */
1293 
1294 	lib = queue_elf_helper(uuid);
1295 	if (!lib)
1296 		return TEE_ERROR_OUT_OF_MEMORY;
1297 
1298 	for (elf = lib; elf; elf = TAILQ_NEXT(elf, link))
1299 		ta_elf_load_dependency(elf, ta->is_32bit);
1300 
1301 	for (elf = lib; elf; elf = TAILQ_NEXT(elf, link)) {
1302 		ta_elf_relocate(elf);
1303 		ta_elf_finalize_mappings(elf);
1304 	}
1305 
1306 	for (elf = lib; elf; elf = TAILQ_NEXT(elf, link))
1307 		DMSG("ELF (%pUl) at %#"PRIxVA,
1308 		     (void *)&elf->uuid, elf->load_addr);
1309 
1310 	return ta_elf_set_init_fini_info(ta->is_32bit);
1311 }
1312 
1313 /* Get address/size of .init_array and .fini_array from the dynamic segment */
1314 static void get_init_fini_array(struct ta_elf *elf, unsigned int type,
1315 				vaddr_t addr, size_t memsz, vaddr_t *init,
1316 				size_t *init_cnt, vaddr_t *fini,
1317 				size_t *fini_cnt)
1318 {
1319 	size_t addrsz = 0;
1320 	size_t dyn_entsize = 0;
1321 	size_t num_dyns = 0;
1322 	size_t n = 0;
1323 	unsigned int tag = 0;
1324 	size_t val = 0;
1325 
1326 	assert(type == PT_DYNAMIC);
1327 
1328 	check_phdr_in_range(elf, type, addr, memsz);
1329 
1330 	if (elf->is_32bit) {
1331 		dyn_entsize = sizeof(Elf32_Dyn);
1332 		addrsz = 4;
1333 	} else {
1334 		dyn_entsize = sizeof(Elf64_Dyn);
1335 		addrsz = 8;
1336 	}
1337 
1338 	assert(!(memsz % dyn_entsize));
1339 	num_dyns = memsz / dyn_entsize;
1340 
1341 	for (n = 0; n < num_dyns; n++) {
1342 		read_dyn(elf, addr, n, &tag, &val);
1343 		if (tag == DT_INIT_ARRAY)
1344 			*init = val + elf->load_addr;
1345 		else if (tag == DT_FINI_ARRAY)
1346 			*fini = val + elf->load_addr;
1347 		else if (tag == DT_INIT_ARRAYSZ)
1348 			*init_cnt = val / addrsz;
1349 		else if (tag == DT_FINI_ARRAYSZ)
1350 			*fini_cnt = val / addrsz;
1351 	}
1352 }
1353 
1354 /* Get address/size of .init_array and .fini_array in @elf (if present) */
1355 static void elf_get_init_fini_array(struct ta_elf *elf, vaddr_t *init,
1356 				    size_t *init_cnt, vaddr_t *fini,
1357 				    size_t *fini_cnt)
1358 {
1359 	size_t n = 0;
1360 
1361 	if (elf->is_32bit) {
1362 		Elf32_Phdr *phdr = elf->phdr;
1363 
1364 		for (n = 0; n < elf->e_phnum; n++) {
1365 			if (phdr[n].p_type == PT_DYNAMIC) {
1366 				get_init_fini_array(elf, phdr[n].p_type,
1367 						    phdr[n].p_vaddr,
1368 						    phdr[n].p_memsz,
1369 						    init, init_cnt, fini,
1370 						    fini_cnt);
1371 				return;
1372 			}
1373 		}
1374 	} else {
1375 		Elf64_Phdr *phdr = elf->phdr;
1376 
1377 		for (n = 0; n < elf->e_phnum; n++) {
1378 			if (phdr[n].p_type == PT_DYNAMIC) {
1379 				get_init_fini_array(elf, phdr[n].p_type,
1380 						    phdr[n].p_vaddr,
1381 						    phdr[n].p_memsz,
1382 						    init, init_cnt, fini,
1383 						    fini_cnt);
1384 				return;
1385 			}
1386 		}
1387 	}
1388 }
1389 
1390 static TEE_Result realloc_ifs(vaddr_t va, size_t cnt, bool is_32bit)
1391 {
1392 	struct __init_fini_info32 *info32 = (struct __init_fini_info32 *)va;
1393 	struct __init_fini_info *info = (struct __init_fini_info *)va;
1394 	struct __init_fini32 *ifs32 = NULL;
1395 	struct __init_fini *ifs = NULL;
1396 	size_t prev_cnt = 0;
1397 	void *ptr = NULL;
1398 
1399 	if (is_32bit) {
1400 		ptr = (void *)(vaddr_t)info32->ifs;
1401 		ptr = realloc(ptr, cnt * sizeof(struct __init_fini32));
1402 		if (!ptr)
1403 			return TEE_ERROR_OUT_OF_MEMORY;
1404 		ifs32 = ptr;
1405 		prev_cnt = info32->size;
1406 		if (cnt > prev_cnt)
1407 			memset(ifs32 + prev_cnt, 0,
1408 			       (cnt - prev_cnt) * sizeof(*ifs32));
1409 		info32->ifs = (uint32_t)(vaddr_t)ifs32;
1410 		info32->size = cnt;
1411 	} else {
1412 		ptr = realloc(info->ifs, cnt * sizeof(struct __init_fini));
1413 		if (!ptr)
1414 			return TEE_ERROR_OUT_OF_MEMORY;
1415 		ifs = ptr;
1416 		prev_cnt = info->size;
1417 		if (cnt > prev_cnt)
1418 			memset(ifs + prev_cnt, 0,
1419 			       (cnt - prev_cnt) * sizeof(*ifs));
1420 		info->ifs = ifs;
1421 		info->size = cnt;
1422 	}
1423 
1424 	return TEE_SUCCESS;
1425 }
1426 
1427 static void fill_ifs(vaddr_t va, size_t idx, struct ta_elf *elf, bool is_32bit)
1428 {
1429 	struct __init_fini_info32 *info32 = (struct __init_fini_info32 *)va;
1430 	struct __init_fini_info *info = (struct __init_fini_info *)va;
1431 	struct __init_fini32 *ifs32 = NULL;
1432 	struct __init_fini *ifs = NULL;
1433 	size_t init_cnt = 0;
1434 	size_t fini_cnt = 0;
1435 	vaddr_t init = 0;
1436 	vaddr_t fini = 0;
1437 
1438 	if (is_32bit) {
1439 		assert(idx < info32->size);
1440 		ifs32 = &((struct __init_fini32 *)(vaddr_t)info32->ifs)[idx];
1441 
1442 		if (ifs32->flags & __IFS_VALID)
1443 			return;
1444 
1445 		elf_get_init_fini_array(elf, &init, &init_cnt, &fini,
1446 					&fini_cnt);
1447 
1448 		ifs32->init = (uint32_t)init;
1449 		ifs32->init_size = init_cnt;
1450 
1451 		ifs32->fini = (uint32_t)fini;
1452 		ifs32->fini_size = fini_cnt;
1453 
1454 		ifs32->flags |= __IFS_VALID;
1455 	} else {
1456 		assert(idx < info->size);
1457 		ifs = &info->ifs[idx];
1458 
1459 		if (ifs->flags & __IFS_VALID)
1460 			return;
1461 
1462 		elf_get_init_fini_array(elf, &init, &init_cnt, &fini,
1463 					&fini_cnt);
1464 
1465 		ifs->init = (void (**)(void))init;
1466 		ifs->init_size = init_cnt;
1467 
1468 		ifs->fini = (void (**)(void))fini;
1469 		ifs->fini_size = fini_cnt;
1470 
1471 		ifs->flags |= __IFS_VALID;
1472 	}
1473 }
1474 
1475 /*
1476  * Set or update __init_fini_info in the TA with information from the ELF
1477  * queue
1478  */
1479 TEE_Result ta_elf_set_init_fini_info(bool is_32bit)
1480 {
1481 	struct __init_fini_info *info = NULL;
1482 	TEE_Result res = TEE_SUCCESS;
1483 	struct ta_elf *elf = NULL;
1484 	vaddr_t info_va = 0;
1485 	size_t cnt = 0;
1486 
1487 	res = ta_elf_resolve_sym("__init_fini_info", &info_va, NULL);
1488 	if (res) {
1489 		if (res == TEE_ERROR_ITEM_NOT_FOUND) {
1490 			/* Older TA */
1491 			return TEE_SUCCESS;
1492 		}
1493 		return res;
1494 	}
1495 	assert(info_va);
1496 
1497 	info = (struct __init_fini_info *)info_va;
1498 	if (info->reserved)
1499 		return TEE_ERROR_NOT_SUPPORTED;
1500 
1501 	TAILQ_FOREACH(elf, &main_elf_queue, link)
1502 		cnt++;
1503 
1504 	/* Queue has at least one file (main) */
1505 	assert(cnt);
1506 
1507 	res = realloc_ifs(info_va, cnt, is_32bit);
1508 	if (res)
1509 		goto err;
1510 
1511 	cnt = 0;
1512 	TAILQ_FOREACH(elf, &main_elf_queue, link) {
1513 		fill_ifs(info_va, cnt, elf, is_32bit);
1514 		cnt++;
1515 	}
1516 
1517 	return TEE_SUCCESS;
1518 err:
1519 	free(info);
1520 	return res;
1521 }
1522