xref: /optee_os/ldelf/ta_elf.c (revision fe6849487f7845d474828af162ddd882cbd3bb99)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2019, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <ctype.h>
8 #include <elf32.h>
9 #include <elf64.h>
10 #include <elf_common.h>
11 #include <ldelf.h>
12 #include <pta_system.h>
13 #include <stdio.h>
14 #include <stdlib.h>
15 #include <string_ext.h>
16 #include <string.h>
17 #include <tee_api_types.h>
18 #include <tee_internal_api_extensions.h>
19 #include <user_ta_header.h>
20 #include <utee_syscalls.h>
21 #include <util.h>
22 
23 #include "sys.h"
24 #include "ta_elf.h"
25 #include "unwind.h"
26 
27 static vaddr_t ta_stack;
28 static vaddr_t ta_stack_size;
29 
30 struct ta_elf_queue main_elf_queue = TAILQ_HEAD_INITIALIZER(main_elf_queue);
31 
32 /*
33  * Main application is always ID 1, shared libraries with TLS take IDs 2 and
34  * above
35  */
36 static void assign_tls_mod_id(struct ta_elf *elf)
37 {
38 	static size_t last_tls_mod_id = 1;
39 
40 	if (elf->is_main)
41 		assert(last_tls_mod_id == 1); /* Main always comes first */
42 	elf->tls_mod_id = last_tls_mod_id++;
43 }
44 
45 static struct ta_elf *queue_elf_helper(const TEE_UUID *uuid)
46 {
47 	struct ta_elf *elf = calloc(1, sizeof(*elf));
48 
49 	if (!elf)
50 		return NULL;
51 
52 	TAILQ_INIT(&elf->segs);
53 
54 	elf->uuid = *uuid;
55 	TAILQ_INSERT_TAIL(&main_elf_queue, elf, link);
56 	return elf;
57 }
58 
59 static struct ta_elf *queue_elf(const TEE_UUID *uuid)
60 {
61 	struct ta_elf *elf = ta_elf_find_elf(uuid);
62 
63 	if (elf)
64 		return NULL;
65 
66 	elf = queue_elf_helper(uuid);
67 	if (!elf)
68 		err(TEE_ERROR_OUT_OF_MEMORY, "queue_elf_helper");
69 
70 	return elf;
71 }
72 
73 struct ta_elf *ta_elf_find_elf(const TEE_UUID *uuid)
74 {
75 	struct ta_elf *elf = NULL;
76 
77 	TAILQ_FOREACH(elf, &main_elf_queue, link)
78 		if (!memcmp(uuid, &elf->uuid, sizeof(*uuid)))
79 			return elf;
80 
81 	return NULL;
82 }
83 
84 static TEE_Result e32_parse_ehdr(struct ta_elf *elf, Elf32_Ehdr *ehdr)
85 {
86 	if (ehdr->e_ident[EI_VERSION] != EV_CURRENT ||
87 	    ehdr->e_ident[EI_CLASS] != ELFCLASS32 ||
88 	    ehdr->e_ident[EI_DATA] != ELFDATA2LSB ||
89 	    ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE ||
90 	    ehdr->e_type != ET_DYN || ehdr->e_machine != EM_ARM ||
91 	    (ehdr->e_flags & EF_ARM_ABIMASK) != EF_ARM_ABI_VERSION ||
92 #ifndef CFG_WITH_VFP
93 	    (ehdr->e_flags & EF_ARM_ABI_FLOAT_HARD) ||
94 #endif
95 	    ehdr->e_phentsize != sizeof(Elf32_Phdr) ||
96 	    ehdr->e_shentsize != sizeof(Elf32_Shdr))
97 		return TEE_ERROR_BAD_FORMAT;
98 
99 	elf->is_32bit = true;
100 	elf->e_entry = ehdr->e_entry;
101 	elf->e_phoff = ehdr->e_phoff;
102 	elf->e_shoff = ehdr->e_shoff;
103 	elf->e_phnum = ehdr->e_phnum;
104 	elf->e_shnum = ehdr->e_shnum;
105 	elf->e_phentsize = ehdr->e_phentsize;
106 	elf->e_shentsize = ehdr->e_shentsize;
107 
108 	return TEE_SUCCESS;
109 }
110 
111 #ifdef ARM64
112 static TEE_Result e64_parse_ehdr(struct ta_elf *elf, Elf64_Ehdr *ehdr)
113 {
114 	if (ehdr->e_ident[EI_VERSION] != EV_CURRENT ||
115 	    ehdr->e_ident[EI_CLASS] != ELFCLASS64 ||
116 	    ehdr->e_ident[EI_DATA] != ELFDATA2LSB ||
117 	    ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE ||
118 	    ehdr->e_type != ET_DYN || ehdr->e_machine != EM_AARCH64 ||
119 	    ehdr->e_flags || ehdr->e_phentsize != sizeof(Elf64_Phdr) ||
120 	    ehdr->e_shentsize != sizeof(Elf64_Shdr))
121 		return TEE_ERROR_BAD_FORMAT;
122 
123 
124 	elf->is_32bit = false;
125 	elf->e_entry = ehdr->e_entry;
126 	elf->e_phoff = ehdr->e_phoff;
127 	elf->e_shoff = ehdr->e_shoff;
128 	elf->e_phnum = ehdr->e_phnum;
129 	elf->e_shnum = ehdr->e_shnum;
130 	elf->e_phentsize = ehdr->e_phentsize;
131 	elf->e_shentsize = ehdr->e_shentsize;
132 
133 	return TEE_SUCCESS;
134 }
135 #else /*ARM64*/
136 static TEE_Result e64_parse_ehdr(struct ta_elf *elf __unused,
137 				 Elf64_Ehdr *ehdr __unused)
138 {
139 	return TEE_ERROR_NOT_SUPPORTED;
140 }
141 #endif /*ARM64*/
142 
143 static void check_phdr_in_range(struct ta_elf *elf, unsigned int type,
144 				vaddr_t addr, size_t memsz)
145 {
146 	vaddr_t max_addr = 0;
147 
148 	if (ADD_OVERFLOW(addr, memsz, &max_addr))
149 		err(TEE_ERROR_BAD_FORMAT, "Program header %#x overflow", type);
150 
151 	/*
152 	 * elf->load_addr and elf->max_addr are both using the
153 	 * final virtual addresses, while this program header is
154 	 * relative to 0.
155 	 */
156 	if (max_addr > elf->max_addr - elf->load_addr)
157 		err(TEE_ERROR_BAD_FORMAT, "Program header %#x out of bounds",
158 		    type);
159 }
160 
161 static void read_dyn(struct ta_elf *elf, vaddr_t addr,
162 		     size_t idx, unsigned int *tag, size_t *val)
163 {
164 	if (elf->is_32bit) {
165 		Elf32_Dyn *dyn = (Elf32_Dyn *)(addr + elf->load_addr);
166 
167 		*tag = dyn[idx].d_tag;
168 		*val = dyn[idx].d_un.d_val;
169 	} else {
170 		Elf64_Dyn *dyn = (Elf64_Dyn *)(addr + elf->load_addr);
171 
172 		*tag = dyn[idx].d_tag;
173 		*val = dyn[idx].d_un.d_val;
174 	}
175 }
176 
177 static void save_hashtab_from_segment(struct ta_elf *elf, unsigned int type,
178 				      vaddr_t addr, size_t memsz)
179 {
180 	size_t dyn_entsize = 0;
181 	size_t num_dyns = 0;
182 	size_t n = 0;
183 	unsigned int tag = 0;
184 	size_t val = 0;
185 
186 	if (type != PT_DYNAMIC)
187 		return;
188 
189 	check_phdr_in_range(elf, type, addr, memsz);
190 
191 	if (elf->is_32bit)
192 		dyn_entsize = sizeof(Elf32_Dyn);
193 	else
194 		dyn_entsize = sizeof(Elf64_Dyn);
195 
196 	assert(!(memsz % dyn_entsize));
197 	num_dyns = memsz / dyn_entsize;
198 
199 	for (n = 0; n < num_dyns; n++) {
200 		read_dyn(elf, addr, n, &tag, &val);
201 		if (tag == DT_HASH) {
202 			elf->hashtab = (void *)(val + elf->load_addr);
203 			break;
204 		}
205 	}
206 }
207 
208 static void check_range(struct ta_elf *elf, const char *name, const void *ptr,
209 			size_t sz)
210 {
211 	size_t max_addr = 0;
212 
213 	if ((vaddr_t)ptr < elf->load_addr)
214 		err(TEE_ERROR_BAD_FORMAT, "%s %p out of range", name, ptr);
215 
216 	if (ADD_OVERFLOW((vaddr_t)ptr, sz, &max_addr))
217 		err(TEE_ERROR_BAD_FORMAT, "%s range overflow", name);
218 
219 	if (max_addr > elf->max_addr)
220 		err(TEE_ERROR_BAD_FORMAT,
221 		    "%s %p..%#zx out of range", name, ptr, max_addr);
222 }
223 
224 static void check_hashtab(struct ta_elf *elf, void *ptr, size_t num_buckets,
225 			  size_t num_chains)
226 {
227 	/*
228 	 * Starting from 2 as the first two words are mandatory and hold
229 	 * num_buckets and num_chains. So this function is called twice,
230 	 * first to see that there's indeed room for num_buckets and
231 	 * num_chains and then to see that all of it fits.
232 	 * See http://www.sco.com/developers/gabi/latest/ch5.dynamic.html#hash
233 	 */
234 	size_t num_words = 2;
235 	size_t sz = 0;
236 
237 	if (!ALIGNMENT_IS_OK(ptr, uint32_t))
238 		err(TEE_ERROR_BAD_FORMAT, "Bad alignment of DT_HASH %p", ptr);
239 
240 	if (ADD_OVERFLOW(num_words, num_buckets, &num_words) ||
241 	    ADD_OVERFLOW(num_words, num_chains, &num_words) ||
242 	    MUL_OVERFLOW(num_words, sizeof(uint32_t), &sz))
243 		err(TEE_ERROR_BAD_FORMAT, "DT_HASH overflow");
244 
245 	check_range(elf, "DT_HASH", ptr, sz);
246 }
247 
248 static void save_hashtab(struct ta_elf *elf)
249 {
250 	uint32_t *hashtab = NULL;
251 	size_t n = 0;
252 
253 	if (elf->is_32bit) {
254 		Elf32_Phdr *phdr = elf->phdr;
255 
256 		for (n = 0; n < elf->e_phnum; n++)
257 			save_hashtab_from_segment(elf, phdr[n].p_type,
258 						  phdr[n].p_vaddr,
259 						  phdr[n].p_memsz);
260 	} else {
261 		Elf64_Phdr *phdr = elf->phdr;
262 
263 		for (n = 0; n < elf->e_phnum; n++)
264 			save_hashtab_from_segment(elf, phdr[n].p_type,
265 						  phdr[n].p_vaddr,
266 						  phdr[n].p_memsz);
267 	}
268 
269 	check_hashtab(elf, elf->hashtab, 0, 0);
270 	hashtab = elf->hashtab;
271 	check_hashtab(elf, elf->hashtab, hashtab[0], hashtab[1]);
272 }
273 
274 static void e32_save_symtab(struct ta_elf *elf, size_t tab_idx)
275 {
276 	Elf32_Shdr *shdr = elf->shdr;
277 	size_t str_idx = shdr[tab_idx].sh_link;
278 
279 	elf->dynsymtab = (void *)(shdr[tab_idx].sh_addr + elf->load_addr);
280 	if (!ALIGNMENT_IS_OK(elf->dynsymtab, Elf32_Sym))
281 		err(TEE_ERROR_BAD_FORMAT, "Bad alignment of dynsymtab %p",
282 		    elf->dynsymtab);
283 	check_range(elf, "Dynsymtab", elf->dynsymtab, shdr[tab_idx].sh_size);
284 
285 	if (shdr[tab_idx].sh_size % sizeof(Elf32_Sym))
286 		err(TEE_ERROR_BAD_FORMAT,
287 		    "Size of dynsymtab not an even multiple of Elf32_Sym");
288 	elf->num_dynsyms = shdr[tab_idx].sh_size / sizeof(Elf32_Sym);
289 
290 	if (str_idx >= elf->e_shnum)
291 		err(TEE_ERROR_BAD_FORMAT, "Dynstr section index out of range");
292 	elf->dynstr = (void *)(shdr[str_idx].sh_addr + elf->load_addr);
293 	check_range(elf, "Dynstr", elf->dynstr, shdr[str_idx].sh_size);
294 
295 	elf->dynstr_size = shdr[str_idx].sh_size;
296 }
297 
298 static void e64_save_symtab(struct ta_elf *elf, size_t tab_idx)
299 {
300 	Elf64_Shdr *shdr = elf->shdr;
301 	size_t str_idx = shdr[tab_idx].sh_link;
302 
303 	elf->dynsymtab = (void *)(vaddr_t)(shdr[tab_idx].sh_addr +
304 					   elf->load_addr);
305 
306 	if (!ALIGNMENT_IS_OK(elf->dynsymtab, Elf64_Sym))
307 		err(TEE_ERROR_BAD_FORMAT, "Bad alignment of .dynsym/DYNSYM %p",
308 		    elf->dynsymtab);
309 	check_range(elf, ".dynsym/DYNSYM", elf->dynsymtab,
310 		    shdr[tab_idx].sh_size);
311 
312 	if (shdr[tab_idx].sh_size % sizeof(Elf64_Sym))
313 		err(TEE_ERROR_BAD_FORMAT,
314 		    "Size of .dynsym/DYNSYM not an even multiple of Elf64_Sym");
315 	elf->num_dynsyms = shdr[tab_idx].sh_size / sizeof(Elf64_Sym);
316 
317 	if (str_idx >= elf->e_shnum)
318 		err(TEE_ERROR_BAD_FORMAT,
319 		    ".dynstr/STRTAB section index out of range");
320 	elf->dynstr = (void *)(vaddr_t)(shdr[str_idx].sh_addr + elf->load_addr);
321 	check_range(elf, ".dynstr/STRTAB", elf->dynstr, shdr[str_idx].sh_size);
322 
323 	elf->dynstr_size = shdr[str_idx].sh_size;
324 }
325 
326 static void save_symtab(struct ta_elf *elf)
327 {
328 	size_t n = 0;
329 
330 	if (elf->is_32bit) {
331 		Elf32_Shdr *shdr = elf->shdr;
332 
333 		for (n = 0; n < elf->e_shnum; n++) {
334 			if (shdr[n].sh_type == SHT_DYNSYM) {
335 				e32_save_symtab(elf, n);
336 				break;
337 			}
338 		}
339 	} else {
340 		Elf64_Shdr *shdr = elf->shdr;
341 
342 		for (n = 0; n < elf->e_shnum; n++) {
343 			if (shdr[n].sh_type == SHT_DYNSYM) {
344 				e64_save_symtab(elf, n);
345 				break;
346 			}
347 		}
348 
349 	}
350 
351 	save_hashtab(elf);
352 }
353 
354 static void init_elf(struct ta_elf *elf)
355 {
356 	TEE_Result res = TEE_SUCCESS;
357 	vaddr_t va = 0;
358 	uint32_t flags = PTA_SYSTEM_MAP_FLAG_SHAREABLE;
359 	size_t sz = 0;
360 
361 	res = sys_open_ta_bin(&elf->uuid, &elf->handle);
362 	if (res)
363 		err(res, "sys_open_ta_bin(%pUl)", (void *)&elf->uuid);
364 
365 	/*
366 	 * Map it read-only executable when we're loading a library where
367 	 * the ELF header is included in a load segment.
368 	 */
369 	if (!elf->is_main)
370 		flags |= PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
371 	res = sys_map_ta_bin(&va, SMALL_PAGE_SIZE, flags, elf->handle, 0, 0, 0);
372 	if (res)
373 		err(res, "sys_map_ta_bin");
374 	elf->ehdr_addr = va;
375 	if (!elf->is_main) {
376 		elf->load_addr = va;
377 		elf->max_addr = va + SMALL_PAGE_SIZE;
378 		elf->max_offs = SMALL_PAGE_SIZE;
379 	}
380 
381 	if (!IS_ELF(*(Elf32_Ehdr *)va))
382 		err(TEE_ERROR_BAD_FORMAT, "TA is not an ELF");
383 
384 	res = e32_parse_ehdr(elf, (void *)va);
385 	if (res == TEE_ERROR_BAD_FORMAT)
386 		res = e64_parse_ehdr(elf, (void *)va);
387 	if (res)
388 		err(res, "Cannot parse ELF");
389 
390 	if (MUL_OVERFLOW(elf->e_phnum, elf->e_phentsize, &sz) ||
391 	    ADD_OVERFLOW(sz, elf->e_phoff, &sz))
392 		err(TEE_ERROR_BAD_FORMAT, "Program headers size overflow");
393 
394 	if (sz > SMALL_PAGE_SIZE)
395 		err(TEE_ERROR_NOT_SUPPORTED, "Cannot read program headers");
396 
397 	elf->phdr = (void *)(va + elf->e_phoff);
398 }
399 
400 static size_t roundup(size_t v)
401 {
402 	return ROUNDUP(v, SMALL_PAGE_SIZE);
403 }
404 
405 static size_t rounddown(size_t v)
406 {
407 	return ROUNDDOWN(v, SMALL_PAGE_SIZE);
408 }
409 
410 static void add_segment(struct ta_elf *elf, size_t offset, size_t vaddr,
411 			size_t filesz, size_t memsz, size_t flags, size_t align)
412 {
413 	struct segment *seg = calloc(1, sizeof(*seg));
414 
415 	if (!seg)
416 		err(TEE_ERROR_OUT_OF_MEMORY, "calloc");
417 
418 	if (memsz < filesz)
419 		err(TEE_ERROR_BAD_FORMAT, "Memsz smaller than filesz");
420 
421 	seg->offset = offset;
422 	seg->vaddr = vaddr;
423 	seg->filesz = filesz;
424 	seg->memsz = memsz;
425 	seg->flags = flags;
426 	seg->align = align;
427 
428 	TAILQ_INSERT_TAIL(&elf->segs, seg, link);
429 }
430 
431 static void parse_load_segments(struct ta_elf *elf)
432 {
433 	size_t n = 0;
434 
435 	if (elf->is_32bit) {
436 		Elf32_Phdr *phdr = elf->phdr;
437 
438 		for (n = 0; n < elf->e_phnum; n++)
439 			if (phdr[n].p_type == PT_LOAD) {
440 				add_segment(elf, phdr[n].p_offset,
441 					    phdr[n].p_vaddr, phdr[n].p_filesz,
442 					    phdr[n].p_memsz, phdr[n].p_flags,
443 					    phdr[n].p_align);
444 			} else if (phdr[n].p_type == PT_ARM_EXIDX) {
445 				elf->exidx_start = phdr[n].p_vaddr;
446 				elf->exidx_size = phdr[n].p_filesz;
447 			} else if (phdr[n].p_type == PT_TLS) {
448 				assign_tls_mod_id(elf);
449 			}
450 	} else {
451 		Elf64_Phdr *phdr = elf->phdr;
452 
453 		for (n = 0; n < elf->e_phnum; n++)
454 			if (phdr[n].p_type == PT_LOAD) {
455 				add_segment(elf, phdr[n].p_offset,
456 					    phdr[n].p_vaddr, phdr[n].p_filesz,
457 					    phdr[n].p_memsz, phdr[n].p_flags,
458 					    phdr[n].p_align);
459 			} else if (phdr[n].p_type == PT_TLS) {
460 				elf->tls_start = phdr[n].p_vaddr;
461 				elf->tls_filesz = phdr[n].p_filesz;
462 				elf->tls_memsz = phdr[n].p_memsz;
463 			}
464 	}
465 }
466 
467 static void copy_remapped_to(struct ta_elf *elf, const struct segment *seg)
468 {
469 	uint8_t *dst = (void *)(seg->vaddr + elf->load_addr);
470 	size_t n = 0;
471 	size_t offs = seg->offset;
472 	size_t num_bytes = seg->filesz;
473 
474 	if (offs < elf->max_offs) {
475 		n = MIN(elf->max_offs - offs, num_bytes);
476 		memcpy(dst, (void *)(elf->max_addr + offs - elf->max_offs), n);
477 		dst += n;
478 		offs += n;
479 		num_bytes -= n;
480 	}
481 
482 	if (num_bytes) {
483 		TEE_Result res = sys_copy_from_ta_bin(dst, num_bytes,
484 						      elf->handle, offs);
485 
486 		if (res)
487 			err(res, "sys_copy_from_ta_bin");
488 		elf->max_offs += offs;
489 	}
490 }
491 
492 static void adjust_segments(struct ta_elf *elf)
493 {
494 	struct segment *seg = NULL;
495 	struct segment *prev_seg = NULL;
496 	size_t prev_end_addr = 0;
497 	size_t align = 0;
498 	size_t mask = 0;
499 
500 	/* Sanity check */
501 	TAILQ_FOREACH(seg, &elf->segs, link) {
502 		size_t dummy __maybe_unused = 0;
503 
504 		assert(seg->align >= SMALL_PAGE_SIZE);
505 		assert(!ADD_OVERFLOW(seg->vaddr, seg->memsz, &dummy));
506 		assert(seg->filesz <= seg->memsz);
507 		assert((seg->offset & SMALL_PAGE_MASK) ==
508 		       (seg->vaddr & SMALL_PAGE_MASK));
509 
510 		prev_seg = TAILQ_PREV(seg, segment_head, link);
511 		if (prev_seg) {
512 			assert(seg->vaddr >= prev_seg->vaddr + prev_seg->memsz);
513 			assert(seg->offset >=
514 			       prev_seg->offset + prev_seg->filesz);
515 		}
516 		if (!align)
517 			align = seg->align;
518 		assert(align == seg->align);
519 	}
520 
521 	mask = align - 1;
522 
523 	seg = TAILQ_FIRST(&elf->segs);
524 	if (seg)
525 		seg = TAILQ_NEXT(seg, link);
526 	while (seg) {
527 		prev_seg = TAILQ_PREV(seg, segment_head, link);
528 		prev_end_addr = prev_seg->vaddr + prev_seg->memsz;
529 
530 		/*
531 		 * This segment may overlap with the last "page" in the
532 		 * previous segment in two different ways:
533 		 * 1. Virtual address (and offset) overlaps =>
534 		 *    Permissions needs to be merged. The offset must have
535 		 *    the SMALL_PAGE_MASK bits set as vaddr and offset must
536 		 *    add up with prevsion segment.
537 		 *
538 		 * 2. Only offset overlaps =>
539 		 *    The same page in the ELF is mapped at two different
540 		 *    virtual addresses. As a limitation this segment must
541 		 *    be mapped as writeable.
542 		 */
543 
544 		/* Case 1. */
545 		if (rounddown(seg->vaddr) < prev_end_addr) {
546 			assert((seg->vaddr & mask) == (seg->offset & mask));
547 			assert(prev_seg->memsz == prev_seg->filesz);
548 
549 			/*
550 			 * Merge the segments and their permissions.
551 			 * Note that the may be a small hole between the
552 			 * two sections.
553 			 */
554 			prev_seg->filesz = seg->vaddr + seg->filesz -
555 					   prev_seg->vaddr;
556 			prev_seg->memsz = seg->vaddr + seg->memsz -
557 					   prev_seg->vaddr;
558 			prev_seg->flags |= seg->flags;
559 
560 			TAILQ_REMOVE(&elf->segs, seg, link);
561 			free(seg);
562 			seg = TAILQ_NEXT(prev_seg, link);
563 			continue;
564 		}
565 
566 		/* Case 2. */
567 		if ((seg->offset & mask) &&
568 		    rounddown(seg->offset) <
569 		    (prev_seg->offset + prev_seg->filesz)) {
570 
571 			assert(seg->flags & PF_W);
572 			seg->remapped_writeable = true;
573 		}
574 
575 		/*
576 		 * No overlap, but we may need to align address, offset and
577 		 * size.
578 		 */
579 		seg->filesz += seg->vaddr - rounddown(seg->vaddr);
580 		seg->memsz += seg->vaddr - rounddown(seg->vaddr);
581 		seg->vaddr = rounddown(seg->vaddr);
582 		seg->offset = rounddown(seg->offset);
583 		seg = TAILQ_NEXT(seg, link);
584 	}
585 
586 }
587 
588 static void populate_segments_legacy(struct ta_elf *elf)
589 {
590 	TEE_Result res = TEE_SUCCESS;
591 	struct segment *seg = NULL;
592 	vaddr_t va = 0;
593 
594 	assert(elf->is_legacy);
595 	TAILQ_FOREACH(seg, &elf->segs, link) {
596 		struct segment *last_seg = TAILQ_LAST(&elf->segs, segment_head);
597 		size_t pad_end = roundup(last_seg->vaddr + last_seg->memsz -
598 					 seg->vaddr - seg->memsz);
599 		size_t num_bytes = roundup(seg->memsz);
600 
601 		if (!elf->load_addr)
602 			va = 0;
603 		else
604 			va = seg->vaddr + elf->load_addr;
605 
606 
607 		if (!(seg->flags & PF_R))
608 			err(TEE_ERROR_NOT_SUPPORTED,
609 			    "Segment must be readable");
610 
611 		res = sys_map_zi(num_bytes, 0, &va, 0, pad_end);
612 		if (res)
613 			err(res, "sys_map_zi");
614 		res = sys_copy_from_ta_bin((void *)va, seg->filesz,
615 					   elf->handle, seg->offset);
616 		if (res)
617 			err(res, "sys_copy_from_ta_bin");
618 
619 		if (!elf->load_addr)
620 			elf->load_addr = va;
621 		elf->max_addr = va + num_bytes;
622 		elf->max_offs = seg->offset + seg->filesz;
623 	}
624 }
625 
626 static size_t get_pad_begin(void)
627 {
628 #ifdef CFG_TA_ASLR
629 	size_t min = CFG_TA_ASLR_MIN_OFFSET_PAGES;
630 	size_t max = CFG_TA_ASLR_MAX_OFFSET_PAGES;
631 	TEE_Result res = TEE_SUCCESS;
632 	uint32_t rnd32 = 0;
633 	size_t rnd = 0;
634 
635 	COMPILE_TIME_ASSERT(CFG_TA_ASLR_MIN_OFFSET_PAGES <
636 			    CFG_TA_ASLR_MAX_OFFSET_PAGES);
637 	if (max > min) {
638 		res = _utee_cryp_random_number_generate(&rnd32, sizeof(rnd32));
639 		if (res) {
640 			DMSG("Random read failed: %#"PRIx32, res);
641 			return min * SMALL_PAGE_SIZE;
642 		}
643 		rnd = rnd32 % (max - min);
644 	}
645 
646 	return (min + rnd) * SMALL_PAGE_SIZE;
647 #else /*!CFG_TA_ASLR*/
648 	return 0;
649 #endif /*!CFG_TA_ASLR*/
650 }
651 
652 static void populate_segments(struct ta_elf *elf)
653 {
654 	TEE_Result res = TEE_SUCCESS;
655 	struct segment *seg = NULL;
656 	vaddr_t va = 0;
657 	size_t pad_begin = 0;
658 
659 	assert(!elf->is_legacy);
660 	TAILQ_FOREACH(seg, &elf->segs, link) {
661 		struct segment *last_seg = TAILQ_LAST(&elf->segs, segment_head);
662 		size_t pad_end = roundup(last_seg->vaddr + last_seg->memsz -
663 					 seg->vaddr - seg->memsz);
664 
665 		if (seg->remapped_writeable) {
666 			size_t num_bytes = roundup(seg->vaddr + seg->memsz) -
667 					   rounddown(seg->vaddr);
668 
669 			assert(elf->load_addr);
670 			va = rounddown(elf->load_addr + seg->vaddr);
671 			assert(va >= elf->max_addr);
672 			res = sys_map_zi(num_bytes, 0, &va, 0, pad_end);
673 			if (res)
674 				err(res, "sys_map_zi");
675 
676 			copy_remapped_to(elf, seg);
677 			elf->max_addr = va + num_bytes;
678 		} else {
679 			uint32_t flags =  0;
680 			size_t filesz = seg->filesz;
681 			size_t memsz = seg->memsz;
682 			size_t offset = seg->offset;
683 			size_t vaddr = seg->vaddr;
684 
685 			if (offset < elf->max_offs) {
686 				/*
687 				 * We're in a load segment which overlaps
688 				 * with (or is covered by) the first page
689 				 * of a shared library.
690 				 */
691 				if (vaddr + filesz < SMALL_PAGE_SIZE) {
692 					size_t num_bytes = 0;
693 
694 					/*
695 					 * If this segment is completely
696 					 * covered, take next.
697 					 */
698 					if (vaddr + memsz <= SMALL_PAGE_SIZE)
699 						continue;
700 
701 					/*
702 					 * All data of the segment is
703 					 * loaded, but we need to zero
704 					 * extend it.
705 					 */
706 					va = elf->max_addr;
707 					num_bytes = roundup(vaddr + memsz) -
708 						    roundup(vaddr) -
709 						    SMALL_PAGE_SIZE;
710 					assert(num_bytes);
711 					res = sys_map_zi(num_bytes, 0, &va, 0,
712 							 0);
713 					if (res)
714 						err(res, "sys_map_zi");
715 					elf->max_addr = roundup(va + num_bytes);
716 					continue;
717 				}
718 
719 				/* Partial overlap, remove the first page. */
720 				vaddr += SMALL_PAGE_SIZE;
721 				filesz -= SMALL_PAGE_SIZE;
722 				memsz -= SMALL_PAGE_SIZE;
723 				offset += SMALL_PAGE_SIZE;
724 			}
725 
726 			if (!elf->load_addr) {
727 				va = 0;
728 				pad_begin = get_pad_begin();
729 				/*
730 				 * If mapping with pad_begin fails we'll
731 				 * retry without pad_begin, effectively
732 				 * disabling ASLR for the current ELF file.
733 				 */
734 			} else {
735 				va = vaddr + elf->load_addr;
736 				pad_begin = 0;
737 			}
738 
739 			if (seg->flags & PF_W)
740 				flags |= PTA_SYSTEM_MAP_FLAG_WRITEABLE;
741 			else
742 				flags |= PTA_SYSTEM_MAP_FLAG_SHAREABLE;
743 			if (seg->flags & PF_X)
744 				flags |= PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
745 			if (!(seg->flags & PF_R))
746 				err(TEE_ERROR_NOT_SUPPORTED,
747 				    "Segment must be readable");
748 			if (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE) {
749 				res = sys_map_zi(memsz, 0, &va, pad_begin,
750 						 pad_end);
751 				if (pad_begin && res == TEE_ERROR_OUT_OF_MEMORY)
752 					res = sys_map_zi(memsz, 0, &va, 0,
753 							 pad_end);
754 				if (res)
755 					err(res, "sys_map_zi");
756 				res = sys_copy_from_ta_bin((void *)va, filesz,
757 							   elf->handle, offset);
758 				if (res)
759 					err(res, "sys_copy_from_ta_bin");
760 			} else {
761 				if (filesz != memsz)
762 					err(TEE_ERROR_BAD_FORMAT,
763 					    "Filesz and memsz mismatch");
764 				res = sys_map_ta_bin(&va, filesz, flags,
765 						     elf->handle, offset,
766 						     pad_begin, pad_end);
767 				if (pad_begin && res == TEE_ERROR_OUT_OF_MEMORY)
768 					res = sys_map_ta_bin(&va, filesz, flags,
769 							     elf->handle,
770 							     offset, 0,
771 							     pad_end);
772 				if (res)
773 					err(res, "sys_map_ta_bin");
774 			}
775 
776 			if (!elf->load_addr)
777 				elf->load_addr = va;
778 			elf->max_addr = roundup(va + memsz);
779 			elf->max_offs += filesz;
780 		}
781 	}
782 }
783 
784 static void map_segments(struct ta_elf *elf)
785 {
786 	TEE_Result res = TEE_SUCCESS;
787 
788 	parse_load_segments(elf);
789 	adjust_segments(elf);
790 	if (TAILQ_FIRST(&elf->segs)->offset < SMALL_PAGE_SIZE) {
791 		vaddr_t va = 0;
792 		size_t sz = elf->max_addr - elf->load_addr;
793 		struct segment *seg = TAILQ_LAST(&elf->segs, segment_head);
794 		size_t pad_begin = get_pad_begin();
795 
796 		/*
797 		 * We're loading a library, if not other parts of the code
798 		 * need to be updated too.
799 		 */
800 		assert(!elf->is_main);
801 
802 		/*
803 		 * Now that we know how much virtual memory is needed move
804 		 * the already mapped part to a location which can
805 		 * accommodate us.
806 		 */
807 		res = sys_remap(elf->load_addr, &va, sz, pad_begin,
808 				roundup(seg->vaddr + seg->memsz));
809 		if (res == TEE_ERROR_OUT_OF_MEMORY)
810 			res = sys_remap(elf->load_addr, &va, sz, 0,
811 					roundup(seg->vaddr + seg->memsz));
812 		if (res)
813 			err(res, "sys_remap");
814 		elf->ehdr_addr = va;
815 		elf->load_addr = va;
816 		elf->max_addr = va + sz;
817 		elf->phdr = (void *)(va + elf->e_phoff);
818 	}
819 }
820 
821 static void add_deps_from_segment(struct ta_elf *elf, unsigned int type,
822 				  vaddr_t addr, size_t memsz)
823 {
824 	size_t dyn_entsize = 0;
825 	size_t num_dyns = 0;
826 	size_t n = 0;
827 	unsigned int tag = 0;
828 	size_t val = 0;
829 	TEE_UUID uuid = { };
830 	char *str_tab = NULL;
831 	size_t str_tab_sz = 0;
832 
833 	if (type != PT_DYNAMIC)
834 		return;
835 
836 	check_phdr_in_range(elf, type, addr, memsz);
837 
838 	if (elf->is_32bit)
839 		dyn_entsize = sizeof(Elf32_Dyn);
840 	else
841 		dyn_entsize = sizeof(Elf64_Dyn);
842 
843 	assert(!(memsz % dyn_entsize));
844 	num_dyns = memsz / dyn_entsize;
845 
846 	for (n = 0; n < num_dyns && !(str_tab && str_tab_sz); n++) {
847 		read_dyn(elf, addr, n, &tag, &val);
848 		if (tag == DT_STRTAB)
849 			str_tab = (char *)(val + elf->load_addr);
850 		else if (tag == DT_STRSZ)
851 			str_tab_sz = val;
852 	}
853 	check_range(elf, ".dynstr/STRTAB", str_tab, str_tab_sz);
854 
855 	for (n = 0; n < num_dyns; n++) {
856 		read_dyn(elf, addr, n, &tag, &val);
857 		if (tag != DT_NEEDED)
858 			continue;
859 		if (val >= str_tab_sz)
860 			err(TEE_ERROR_BAD_FORMAT,
861 			    "Offset into .dynstr/STRTAB out of range");
862 		tee_uuid_from_str(&uuid, str_tab + val);
863 		queue_elf(&uuid);
864 	}
865 }
866 
867 static void add_dependencies(struct ta_elf *elf)
868 {
869 	size_t n = 0;
870 
871 	if (elf->is_32bit) {
872 		Elf32_Phdr *phdr = elf->phdr;
873 
874 		for (n = 0; n < elf->e_phnum; n++)
875 			add_deps_from_segment(elf, phdr[n].p_type,
876 					      phdr[n].p_vaddr, phdr[n].p_memsz);
877 	} else {
878 		Elf64_Phdr *phdr = elf->phdr;
879 
880 		for (n = 0; n < elf->e_phnum; n++)
881 			add_deps_from_segment(elf, phdr[n].p_type,
882 					      phdr[n].p_vaddr, phdr[n].p_memsz);
883 	}
884 }
885 
886 static void copy_section_headers(struct ta_elf *elf)
887 {
888 	TEE_Result res = TEE_SUCCESS;
889 	size_t sz = 0;
890 	size_t offs = 0;
891 
892 	if (MUL_OVERFLOW(elf->e_shnum, elf->e_shentsize, &sz))
893 		err(TEE_ERROR_BAD_FORMAT, "Section headers size overflow");
894 
895 	elf->shdr = malloc(sz);
896 	if (!elf->shdr)
897 		err(TEE_ERROR_OUT_OF_MEMORY, "malloc");
898 
899 	/*
900 	 * We're assuming that section headers comes after the load segments,
901 	 * but if it's a very small dynamically linked library the section
902 	 * headers can still end up (partially?) in the first mapped page.
903 	 */
904 	if (elf->e_shoff < SMALL_PAGE_SIZE) {
905 		assert(!elf->is_main);
906 		offs = MIN(SMALL_PAGE_SIZE - elf->e_shoff, sz);
907 		memcpy(elf->shdr, (void *)(elf->load_addr + elf->e_shoff),
908 		       offs);
909 	}
910 
911 	if (offs < sz) {
912 		res = sys_copy_from_ta_bin((uint8_t *)elf->shdr + offs,
913 					   sz - offs, elf->handle,
914 					   elf->e_shoff + offs);
915 		if (res)
916 			err(res, "sys_copy_from_ta_bin");
917 	}
918 }
919 
920 static void close_handle(struct ta_elf *elf)
921 {
922 	TEE_Result res = sys_close_ta_bin(elf->handle);
923 
924 	if (res)
925 		err(res, "sys_close_ta_bin");
926 	elf->handle = -1;
927 }
928 
929 static void clean_elf_load_main(struct ta_elf *elf)
930 {
931 	TEE_Result res = TEE_SUCCESS;
932 
933 	/*
934 	 * Clean up from last attempt to load
935 	 */
936 	res = sys_unmap(elf->ehdr_addr, SMALL_PAGE_SIZE);
937 	if (res)
938 		err(res, "sys_unmap");
939 
940 	while (!TAILQ_EMPTY(&elf->segs)) {
941 		struct segment *seg = TAILQ_FIRST(&elf->segs);
942 		vaddr_t va = 0;
943 		size_t num_bytes = 0;
944 
945 		va = rounddown(elf->load_addr + seg->vaddr);
946 		if (seg->remapped_writeable)
947 			num_bytes = roundup(seg->vaddr + seg->memsz) -
948 				    rounddown(seg->vaddr);
949 		else
950 			num_bytes = seg->memsz;
951 
952 		res = sys_unmap(va, num_bytes);
953 		if (res)
954 			err(res, "sys_unmap");
955 
956 		TAILQ_REMOVE(&elf->segs, seg, link);
957 		free(seg);
958 	}
959 
960 	free(elf->shdr);
961 	memset(&elf->is_32bit, 0,
962 	       (vaddr_t)&elf->uuid - (vaddr_t)&elf->is_32bit);
963 
964 	TAILQ_INIT(&elf->segs);
965 }
966 
967 #ifdef ARM64
968 /*
969  * Allocates an offset in the TA's Thread Control Block for the TLS segment of
970  * the @elf module.
971  */
972 #define TCB_HEAD_SIZE (2 * sizeof(long))
973 static void set_tls_offset(struct ta_elf *elf)
974 {
975 	static size_t next_offs = TCB_HEAD_SIZE;
976 
977 	if (!elf->tls_start)
978 		return;
979 
980 	/* Module has a TLS segment */
981 	elf->tls_tcb_offs = next_offs;
982 	next_offs += elf->tls_memsz;
983 }
984 #else
985 static void set_tls_offset(struct ta_elf *elf __unused) {}
986 #endif
987 
988 static void load_main(struct ta_elf *elf)
989 {
990 	init_elf(elf);
991 	map_segments(elf);
992 	populate_segments(elf);
993 	add_dependencies(elf);
994 	copy_section_headers(elf);
995 	save_symtab(elf);
996 	close_handle(elf);
997 	set_tls_offset(elf);
998 
999 	elf->head = (struct ta_head *)elf->load_addr;
1000 	if (elf->head->depr_entry != UINT64_MAX) {
1001 		/*
1002 		 * Legacy TAs sets their entry point in ta_head. For
1003 		 * non-legacy TAs the entry point of the ELF is set instead
1004 		 * and leaving the ta_head entry point set to UINT64_MAX to
1005 		 * indicate that it's not used.
1006 		 *
1007 		 * NB, everything before the commit a73b5878c89d ("Replace
1008 		 * ta_head.entry with elf entry") is considered legacy TAs
1009 		 * for ldelf.
1010 		 *
1011 		 * Legacy TAs cannot be mapped with shared memory segments
1012 		 * so restart the mapping if it turned out we're loading a
1013 		 * legacy TA.
1014 		 */
1015 
1016 		DMSG("Reloading TA %pUl as legacy TA", (void *)&elf->uuid);
1017 		clean_elf_load_main(elf);
1018 		elf->is_legacy = true;
1019 		init_elf(elf);
1020 		map_segments(elf);
1021 		populate_segments_legacy(elf);
1022 		add_dependencies(elf);
1023 		copy_section_headers(elf);
1024 		save_symtab(elf);
1025 		close_handle(elf);
1026 		elf->head = (struct ta_head *)elf->load_addr;
1027 		/*
1028 		 * Check that the TA is still a legacy TA, if it isn't give
1029 		 * up now since we're likely under attack.
1030 		 */
1031 		if (elf->head->depr_entry == UINT64_MAX)
1032 			err(TEE_ERROR_GENERIC,
1033 			    "TA %pUl was changed on disk to non-legacy",
1034 			    (void *)&elf->uuid);
1035 	}
1036 
1037 }
1038 
1039 void ta_elf_load_main(const TEE_UUID *uuid, uint32_t *is_32bit, uint64_t *sp,
1040 		      uint32_t *ta_flags)
1041 {
1042 	struct ta_elf *elf = queue_elf(uuid);
1043 	vaddr_t va = 0;
1044 	TEE_Result res = TEE_SUCCESS;
1045 
1046 	assert(elf);
1047 	elf->is_main = true;
1048 
1049 	load_main(elf);
1050 
1051 	*is_32bit = elf->is_32bit;
1052 	res = sys_map_zi(elf->head->stack_size, 0, &va, 0, 0);
1053 	if (res)
1054 		err(res, "sys_map_zi stack");
1055 
1056 	if (elf->head->flags & ~TA_FLAGS_MASK)
1057 		err(TEE_ERROR_BAD_FORMAT, "Invalid TA flags(s) %#"PRIx32,
1058 		    elf->head->flags & ~TA_FLAGS_MASK);
1059 
1060 	*ta_flags = elf->head->flags;
1061 	*sp = va + elf->head->stack_size;
1062 	ta_stack = va;
1063 	ta_stack_size = elf->head->stack_size;
1064 }
1065 
1066 void ta_elf_finalize_load_main(uint64_t *entry)
1067 {
1068 	struct ta_elf *elf = TAILQ_FIRST(&main_elf_queue);
1069 	TEE_Result res = TEE_SUCCESS;
1070 
1071 	assert(elf->is_main);
1072 
1073 	res = ta_elf_set_init_fini_info(elf->is_32bit);
1074 	if (res)
1075 		err(res, "ta_elf_set_init_fini_info");
1076 
1077 	if (elf->is_legacy)
1078 		*entry = elf->head->depr_entry;
1079 	else
1080 		*entry = elf->e_entry + elf->load_addr;
1081 }
1082 
1083 
1084 void ta_elf_load_dependency(struct ta_elf *elf, bool is_32bit)
1085 {
1086 	if (elf->is_main)
1087 		return;
1088 
1089 	init_elf(elf);
1090 	if (elf->is_32bit != is_32bit)
1091 		err(TEE_ERROR_BAD_FORMAT, "ELF %pUl is %sbit (expected %sbit)",
1092 		    (void *)&elf->uuid, elf->is_32bit ? "32" : "64",
1093 		    is_32bit ? "32" : "64");
1094 
1095 	map_segments(elf);
1096 	populate_segments(elf);
1097 	add_dependencies(elf);
1098 	copy_section_headers(elf);
1099 	save_symtab(elf);
1100 	close_handle(elf);
1101 	set_tls_offset(elf);
1102 }
1103 
1104 void ta_elf_finalize_mappings(struct ta_elf *elf)
1105 {
1106 	TEE_Result res = TEE_SUCCESS;
1107 	struct segment *seg = NULL;
1108 
1109 	if (!elf->is_legacy)
1110 		return;
1111 
1112 	TAILQ_FOREACH(seg, &elf->segs, link) {
1113 		vaddr_t va = elf->load_addr + seg->vaddr;
1114 		uint32_t flags =  0;
1115 
1116 		if (seg->flags & PF_W)
1117 			flags |= PTA_SYSTEM_MAP_FLAG_WRITEABLE;
1118 		if (seg->flags & PF_X)
1119 			flags |= PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
1120 
1121 		res = sys_set_prot(va, seg->memsz, flags);
1122 		if (res)
1123 			err(res, "sys_set_prot");
1124 	}
1125 }
1126 
1127 static void __printf(3, 4) print_wrapper(void *pctx, print_func_t print_func,
1128 					 const char *fmt, ...)
1129 {
1130 	va_list ap;
1131 
1132 	va_start(ap, fmt);
1133 	print_func(pctx, fmt, ap);
1134 	va_end(ap);
1135 }
1136 
1137 static void print_seg(void *pctx, print_func_t print_func,
1138 		      size_t idx __maybe_unused, int elf_idx __maybe_unused,
1139 		      vaddr_t va __maybe_unused, paddr_t pa __maybe_unused,
1140 		      size_t sz __maybe_unused, uint32_t flags)
1141 {
1142 	int width __maybe_unused = 8;
1143 	char desc[14] __maybe_unused = "";
1144 	char flags_str[] __maybe_unused = "----";
1145 
1146 	if (elf_idx > -1) {
1147 		snprintf(desc, sizeof(desc), " [%d]", elf_idx);
1148 	} else {
1149 		if (flags & DUMP_MAP_EPHEM)
1150 			snprintf(desc, sizeof(desc), " (param)");
1151 		if (flags & DUMP_MAP_LDELF)
1152 			snprintf(desc, sizeof(desc), " (ldelf)");
1153 		if (va == ta_stack)
1154 			snprintf(desc, sizeof(desc), " (stack)");
1155 	}
1156 
1157 	if (flags & DUMP_MAP_READ)
1158 		flags_str[0] = 'r';
1159 	if (flags & DUMP_MAP_WRITE)
1160 		flags_str[1] = 'w';
1161 	if (flags & DUMP_MAP_EXEC)
1162 		flags_str[2] = 'x';
1163 	if (flags & DUMP_MAP_SECURE)
1164 		flags_str[3] = 's';
1165 
1166 	print_wrapper(pctx, print_func,
1167 		      "region %2zu: va 0x%0*"PRIxVA" pa 0x%0*"PRIxPA" size 0x%06zx flags %s%s\n",
1168 		      idx, width, va, width, pa, sz, flags_str, desc);
1169 }
1170 
1171 static bool get_next_in_order(struct ta_elf_queue *elf_queue,
1172 			      struct ta_elf **elf, struct segment **seg,
1173 			      size_t *elf_idx)
1174 {
1175 	struct ta_elf *e = NULL;
1176 	struct segment *s = NULL;
1177 	size_t idx = 0;
1178 	vaddr_t va = 0;
1179 	struct ta_elf *e2 = NULL;
1180 	size_t i2 = 0;
1181 
1182 	assert(elf && seg && elf_idx);
1183 	e = *elf;
1184 	s = *seg;
1185 	assert((e == NULL && s == NULL) || (e != NULL && s != NULL));
1186 
1187 	if (s) {
1188 		s = TAILQ_NEXT(s, link);
1189 		if (s) {
1190 			*seg = s;
1191 			return true;
1192 		}
1193 	}
1194 
1195 	if (e)
1196 		va = e->load_addr;
1197 
1198 	/* Find the ELF with next load address */
1199 	e = NULL;
1200 	TAILQ_FOREACH(e2, elf_queue, link) {
1201 		if (e2->load_addr > va) {
1202 			if (!e || e2->load_addr < e->load_addr) {
1203 				e = e2;
1204 				idx = i2;
1205 			}
1206 		}
1207 		i2++;
1208 	}
1209 	if (!e)
1210 		return false;
1211 
1212 	*elf = e;
1213 	*seg = TAILQ_FIRST(&e->segs);
1214 	*elf_idx = idx;
1215 	return true;
1216 }
1217 
1218 void ta_elf_print_mappings(void *pctx, print_func_t print_func,
1219 			   struct ta_elf_queue *elf_queue, size_t num_maps,
1220 			   struct dump_map *maps, vaddr_t mpool_base)
1221 {
1222 	struct segment *seg = NULL;
1223 	struct ta_elf *elf = NULL;
1224 	size_t elf_idx = 0;
1225 	size_t idx = 0;
1226 	size_t map_idx = 0;
1227 
1228 	/*
1229 	 * Loop over all segments and maps, printing virtual address in
1230 	 * order. Segment has priority if the virtual address is present
1231 	 * in both map and segment.
1232 	 */
1233 	get_next_in_order(elf_queue, &elf, &seg, &elf_idx);
1234 	while (true) {
1235 		vaddr_t va = -1;
1236 		size_t sz = 0;
1237 		uint32_t flags = DUMP_MAP_SECURE;
1238 		size_t offs = 0;
1239 
1240 		if (seg) {
1241 			va = rounddown(seg->vaddr + elf->load_addr);
1242 			sz = roundup(seg->vaddr + seg->memsz) -
1243 				     rounddown(seg->vaddr);
1244 		}
1245 
1246 		while (map_idx < num_maps && maps[map_idx].va <= va) {
1247 			uint32_t f = 0;
1248 
1249 			/* If there's a match, it should be the same map */
1250 			if (maps[map_idx].va == va) {
1251 				/*
1252 				 * In shared libraries the first page is
1253 				 * mapped separately with the rest of that
1254 				 * segment following back to back in a
1255 				 * separate entry.
1256 				 */
1257 				if (map_idx + 1 < num_maps &&
1258 				    maps[map_idx].sz == SMALL_PAGE_SIZE) {
1259 					vaddr_t next_va = maps[map_idx].va +
1260 							  maps[map_idx].sz;
1261 					size_t comb_sz = maps[map_idx].sz +
1262 							 maps[map_idx + 1].sz;
1263 
1264 					if (next_va == maps[map_idx + 1].va &&
1265 					    comb_sz == sz &&
1266 					    maps[map_idx].flags ==
1267 					    maps[map_idx + 1].flags) {
1268 						/* Skip this and next entry */
1269 						map_idx += 2;
1270 						continue;
1271 					}
1272 				}
1273 				assert(maps[map_idx].sz == sz);
1274 			} else if (maps[map_idx].va < va) {
1275 				if (maps[map_idx].va == mpool_base)
1276 					f |= DUMP_MAP_LDELF;
1277 				print_seg(pctx, print_func, idx, -1,
1278 					  maps[map_idx].va, maps[map_idx].pa,
1279 					  maps[map_idx].sz,
1280 					  maps[map_idx].flags | f);
1281 				idx++;
1282 			}
1283 			map_idx++;
1284 		}
1285 
1286 		if (!seg)
1287 			break;
1288 
1289 		offs = rounddown(seg->offset);
1290 		if (seg->flags & PF_R)
1291 			flags |= DUMP_MAP_READ;
1292 		if (seg->flags & PF_W)
1293 			flags |= DUMP_MAP_WRITE;
1294 		if (seg->flags & PF_X)
1295 			flags |= DUMP_MAP_EXEC;
1296 
1297 		print_seg(pctx, print_func, idx, elf_idx, va, offs, sz, flags);
1298 		idx++;
1299 
1300 		if (!get_next_in_order(elf_queue, &elf, &seg, &elf_idx))
1301 			seg = NULL;
1302 	}
1303 
1304 	elf_idx = 0;
1305 	TAILQ_FOREACH(elf, elf_queue, link) {
1306 		print_wrapper(pctx, print_func,
1307 			      " [%zu] %pUl @ 0x%0*"PRIxVA"\n",
1308 			      elf_idx, (void *)&elf->uuid, 8, elf->load_addr);
1309 		elf_idx++;
1310 	}
1311 }
1312 
1313 #ifdef CFG_UNWIND
1314 void ta_elf_stack_trace_a32(uint32_t regs[16])
1315 {
1316 	struct unwind_state_arm32 state = { };
1317 
1318 	memcpy(state.registers, regs, sizeof(state.registers));
1319 	print_stack_arm32(&state, ta_stack, ta_stack_size);
1320 }
1321 
1322 void ta_elf_stack_trace_a64(uint64_t fp, uint64_t sp, uint64_t pc)
1323 {
1324 	struct unwind_state_arm64 state = { .fp = fp, .sp = sp, .pc = pc };
1325 
1326 	print_stack_arm64(&state, ta_stack, ta_stack_size);
1327 }
1328 #endif
1329 
1330 TEE_Result ta_elf_add_library(const TEE_UUID *uuid)
1331 {
1332 	struct ta_elf *ta = TAILQ_FIRST(&main_elf_queue);
1333 	struct ta_elf *lib = ta_elf_find_elf(uuid);
1334 	struct ta_elf *elf = NULL;
1335 
1336 	if (lib)
1337 		return TEE_SUCCESS; /* Already mapped */
1338 
1339 	lib = queue_elf_helper(uuid);
1340 	if (!lib)
1341 		return TEE_ERROR_OUT_OF_MEMORY;
1342 
1343 	for (elf = lib; elf; elf = TAILQ_NEXT(elf, link))
1344 		ta_elf_load_dependency(elf, ta->is_32bit);
1345 
1346 	for (elf = lib; elf; elf = TAILQ_NEXT(elf, link)) {
1347 		ta_elf_relocate(elf);
1348 		ta_elf_finalize_mappings(elf);
1349 	}
1350 
1351 	for (elf = lib; elf; elf = TAILQ_NEXT(elf, link))
1352 		DMSG("ELF (%pUl) at %#"PRIxVA,
1353 		     (void *)&elf->uuid, elf->load_addr);
1354 
1355 	return ta_elf_set_init_fini_info(ta->is_32bit);
1356 }
1357 
1358 /* Get address/size of .init_array and .fini_array from the dynamic segment */
1359 static void get_init_fini_array(struct ta_elf *elf, unsigned int type,
1360 				vaddr_t addr, size_t memsz, vaddr_t *init,
1361 				size_t *init_cnt, vaddr_t *fini,
1362 				size_t *fini_cnt)
1363 {
1364 	size_t addrsz = 0;
1365 	size_t dyn_entsize = 0;
1366 	size_t num_dyns = 0;
1367 	size_t n = 0;
1368 	unsigned int tag = 0;
1369 	size_t val = 0;
1370 
1371 	assert(type == PT_DYNAMIC);
1372 
1373 	check_phdr_in_range(elf, type, addr, memsz);
1374 
1375 	if (elf->is_32bit) {
1376 		dyn_entsize = sizeof(Elf32_Dyn);
1377 		addrsz = 4;
1378 	} else {
1379 		dyn_entsize = sizeof(Elf64_Dyn);
1380 		addrsz = 8;
1381 	}
1382 
1383 	assert(!(memsz % dyn_entsize));
1384 	num_dyns = memsz / dyn_entsize;
1385 
1386 	for (n = 0; n < num_dyns; n++) {
1387 		read_dyn(elf, addr, n, &tag, &val);
1388 		if (tag == DT_INIT_ARRAY)
1389 			*init = val + elf->load_addr;
1390 		else if (tag == DT_FINI_ARRAY)
1391 			*fini = val + elf->load_addr;
1392 		else if (tag == DT_INIT_ARRAYSZ)
1393 			*init_cnt = val / addrsz;
1394 		else if (tag == DT_FINI_ARRAYSZ)
1395 			*fini_cnt = val / addrsz;
1396 	}
1397 }
1398 
1399 /* Get address/size of .init_array and .fini_array in @elf (if present) */
1400 static void elf_get_init_fini_array(struct ta_elf *elf, vaddr_t *init,
1401 				    size_t *init_cnt, vaddr_t *fini,
1402 				    size_t *fini_cnt)
1403 {
1404 	size_t n = 0;
1405 
1406 	if (elf->is_32bit) {
1407 		Elf32_Phdr *phdr = elf->phdr;
1408 
1409 		for (n = 0; n < elf->e_phnum; n++) {
1410 			if (phdr[n].p_type == PT_DYNAMIC) {
1411 				get_init_fini_array(elf, phdr[n].p_type,
1412 						    phdr[n].p_vaddr,
1413 						    phdr[n].p_memsz,
1414 						    init, init_cnt, fini,
1415 						    fini_cnt);
1416 				return;
1417 			}
1418 		}
1419 	} else {
1420 		Elf64_Phdr *phdr = elf->phdr;
1421 
1422 		for (n = 0; n < elf->e_phnum; n++) {
1423 			if (phdr[n].p_type == PT_DYNAMIC) {
1424 				get_init_fini_array(elf, phdr[n].p_type,
1425 						    phdr[n].p_vaddr,
1426 						    phdr[n].p_memsz,
1427 						    init, init_cnt, fini,
1428 						    fini_cnt);
1429 				return;
1430 			}
1431 		}
1432 	}
1433 }
1434 
1435 static TEE_Result realloc_ifs(vaddr_t va, size_t cnt, bool is_32bit)
1436 {
1437 	struct __init_fini_info32 *info32 = (struct __init_fini_info32 *)va;
1438 	struct __init_fini_info *info = (struct __init_fini_info *)va;
1439 	struct __init_fini32 *ifs32 = NULL;
1440 	struct __init_fini *ifs = NULL;
1441 	size_t prev_cnt = 0;
1442 	void *ptr = NULL;
1443 
1444 	if (is_32bit) {
1445 		ptr = (void *)(vaddr_t)info32->ifs;
1446 		ptr = realloc(ptr, cnt * sizeof(struct __init_fini32));
1447 		if (!ptr)
1448 			return TEE_ERROR_OUT_OF_MEMORY;
1449 		ifs32 = ptr;
1450 		prev_cnt = info32->size;
1451 		if (cnt > prev_cnt)
1452 			memset(ifs32 + prev_cnt, 0,
1453 			       (cnt - prev_cnt) * sizeof(*ifs32));
1454 		info32->ifs = (uint32_t)(vaddr_t)ifs32;
1455 		info32->size = cnt;
1456 	} else {
1457 		ptr = realloc(info->ifs, cnt * sizeof(struct __init_fini));
1458 		if (!ptr)
1459 			return TEE_ERROR_OUT_OF_MEMORY;
1460 		ifs = ptr;
1461 		prev_cnt = info->size;
1462 		if (cnt > prev_cnt)
1463 			memset(ifs + prev_cnt, 0,
1464 			       (cnt - prev_cnt) * sizeof(*ifs));
1465 		info->ifs = ifs;
1466 		info->size = cnt;
1467 	}
1468 
1469 	return TEE_SUCCESS;
1470 }
1471 
1472 static void fill_ifs(vaddr_t va, size_t idx, struct ta_elf *elf, bool is_32bit)
1473 {
1474 	struct __init_fini_info32 *info32 = (struct __init_fini_info32 *)va;
1475 	struct __init_fini_info *info = (struct __init_fini_info *)va;
1476 	struct __init_fini32 *ifs32 = NULL;
1477 	struct __init_fini *ifs = NULL;
1478 	size_t init_cnt = 0;
1479 	size_t fini_cnt = 0;
1480 	vaddr_t init = 0;
1481 	vaddr_t fini = 0;
1482 
1483 	if (is_32bit) {
1484 		assert(idx < info32->size);
1485 		ifs32 = &((struct __init_fini32 *)(vaddr_t)info32->ifs)[idx];
1486 
1487 		if (ifs32->flags & __IFS_VALID)
1488 			return;
1489 
1490 		elf_get_init_fini_array(elf, &init, &init_cnt, &fini,
1491 					&fini_cnt);
1492 
1493 		ifs32->init = (uint32_t)init;
1494 		ifs32->init_size = init_cnt;
1495 
1496 		ifs32->fini = (uint32_t)fini;
1497 		ifs32->fini_size = fini_cnt;
1498 
1499 		ifs32->flags |= __IFS_VALID;
1500 	} else {
1501 		assert(idx < info->size);
1502 		ifs = &info->ifs[idx];
1503 
1504 		if (ifs->flags & __IFS_VALID)
1505 			return;
1506 
1507 		elf_get_init_fini_array(elf, &init, &init_cnt, &fini,
1508 					&fini_cnt);
1509 
1510 		ifs->init = (void (**)(void))init;
1511 		ifs->init_size = init_cnt;
1512 
1513 		ifs->fini = (void (**)(void))fini;
1514 		ifs->fini_size = fini_cnt;
1515 
1516 		ifs->flags |= __IFS_VALID;
1517 	}
1518 }
1519 
1520 /*
1521  * Set or update __init_fini_info in the TA with information from the ELF
1522  * queue
1523  */
1524 TEE_Result ta_elf_set_init_fini_info(bool is_32bit)
1525 {
1526 	struct __init_fini_info *info = NULL;
1527 	TEE_Result res = TEE_SUCCESS;
1528 	struct ta_elf *elf = NULL;
1529 	vaddr_t info_va = 0;
1530 	size_t cnt = 0;
1531 
1532 	res = ta_elf_resolve_sym("__init_fini_info", &info_va, NULL, NULL);
1533 	if (res) {
1534 		if (res == TEE_ERROR_ITEM_NOT_FOUND) {
1535 			/* Older TA */
1536 			return TEE_SUCCESS;
1537 		}
1538 		return res;
1539 	}
1540 	assert(info_va);
1541 
1542 	info = (struct __init_fini_info *)info_va;
1543 	if (info->reserved)
1544 		return TEE_ERROR_NOT_SUPPORTED;
1545 
1546 	TAILQ_FOREACH(elf, &main_elf_queue, link)
1547 		cnt++;
1548 
1549 	/* Queue has at least one file (main) */
1550 	assert(cnt);
1551 
1552 	res = realloc_ifs(info_va, cnt, is_32bit);
1553 	if (res)
1554 		goto err;
1555 
1556 	cnt = 0;
1557 	TAILQ_FOREACH(elf, &main_elf_queue, link) {
1558 		fill_ifs(info_va, cnt, elf, is_32bit);
1559 		cnt++;
1560 	}
1561 
1562 	return TEE_SUCCESS;
1563 err:
1564 	free(info);
1565 	return res;
1566 }
1567