xref: /optee_os/ldelf/ta_elf.c (revision cfd9b9f7e8ff6baa705a66dc9c56fd2941079cd6)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2019, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <ctype.h>
8 #include <elf32.h>
9 #include <elf64.h>
10 #include <elf_common.h>
11 #include <ldelf.h>
12 #include <pta_system.h>
13 #include <stdio.h>
14 #include <stdlib.h>
15 #include <string_ext.h>
16 #include <string.h>
17 #include <tee_api_types.h>
18 #include <tee_internal_api_extensions.h>
19 #include <user_ta_header.h>
20 #include <utee_syscalls.h>
21 #include <util.h>
22 
23 #include "sys.h"
24 #include "ta_elf.h"
25 #include "unwind.h"
26 
27 static vaddr_t ta_stack;
28 static vaddr_t ta_stack_size;
29 
30 struct ta_elf_queue main_elf_queue = TAILQ_HEAD_INITIALIZER(main_elf_queue);
31 
32 static struct ta_elf *queue_elf_helper(const TEE_UUID *uuid)
33 {
34 	struct ta_elf *elf = calloc(1, sizeof(*elf));
35 
36 	if (!elf)
37 		return NULL;
38 
39 	TAILQ_INIT(&elf->segs);
40 
41 	elf->uuid = *uuid;
42 	TAILQ_INSERT_TAIL(&main_elf_queue, elf, link);
43 	return elf;
44 }
45 
46 static struct ta_elf *queue_elf(const TEE_UUID *uuid)
47 {
48 	struct ta_elf *elf = ta_elf_find_elf(uuid);
49 
50 	if (elf)
51 		return NULL;
52 
53 	elf = queue_elf_helper(uuid);
54 	if (!elf)
55 		err(TEE_ERROR_OUT_OF_MEMORY, "queue_elf_helper");
56 
57 	return elf;
58 }
59 
60 struct ta_elf *ta_elf_find_elf(const TEE_UUID *uuid)
61 {
62 	struct ta_elf *elf = NULL;
63 
64 	TAILQ_FOREACH(elf, &main_elf_queue, link)
65 		if (!memcmp(uuid, &elf->uuid, sizeof(*uuid)))
66 			return elf;
67 
68 	return NULL;
69 }
70 
71 static TEE_Result e32_parse_ehdr(struct ta_elf *elf, Elf32_Ehdr *ehdr)
72 {
73 	if (ehdr->e_ident[EI_VERSION] != EV_CURRENT ||
74 	    ehdr->e_ident[EI_CLASS] != ELFCLASS32 ||
75 	    ehdr->e_ident[EI_DATA] != ELFDATA2LSB ||
76 	    ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE ||
77 	    ehdr->e_type != ET_DYN || ehdr->e_machine != EM_ARM ||
78 	    (ehdr->e_flags & EF_ARM_ABIMASK) != EF_ARM_ABI_VERSION ||
79 #ifndef CFG_WITH_VFP
80 	    (ehdr->e_flags & EF_ARM_ABI_FLOAT_HARD) ||
81 #endif
82 	    ehdr->e_phentsize != sizeof(Elf32_Phdr) ||
83 	    ehdr->e_shentsize != sizeof(Elf32_Shdr))
84 		return TEE_ERROR_BAD_FORMAT;
85 
86 	elf->is_32bit = true;
87 	elf->e_entry = ehdr->e_entry;
88 	elf->e_phoff = ehdr->e_phoff;
89 	elf->e_shoff = ehdr->e_shoff;
90 	elf->e_phnum = ehdr->e_phnum;
91 	elf->e_shnum = ehdr->e_shnum;
92 	elf->e_phentsize = ehdr->e_phentsize;
93 	elf->e_shentsize = ehdr->e_shentsize;
94 
95 	return TEE_SUCCESS;
96 }
97 
98 #ifdef ARM64
99 static TEE_Result e64_parse_ehdr(struct ta_elf *elf, Elf64_Ehdr *ehdr)
100 {
101 	if (ehdr->e_ident[EI_VERSION] != EV_CURRENT ||
102 	    ehdr->e_ident[EI_CLASS] != ELFCLASS64 ||
103 	    ehdr->e_ident[EI_DATA] != ELFDATA2LSB ||
104 	    ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE ||
105 	    ehdr->e_type != ET_DYN || ehdr->e_machine != EM_AARCH64 ||
106 	    ehdr->e_flags || ehdr->e_phentsize != sizeof(Elf64_Phdr) ||
107 	    ehdr->e_shentsize != sizeof(Elf64_Shdr))
108 		return TEE_ERROR_BAD_FORMAT;
109 
110 
111 	elf->is_32bit = false;
112 	elf->e_entry = ehdr->e_entry;
113 	elf->e_phoff = ehdr->e_phoff;
114 	elf->e_shoff = ehdr->e_shoff;
115 	elf->e_phnum = ehdr->e_phnum;
116 	elf->e_shnum = ehdr->e_shnum;
117 	elf->e_phentsize = ehdr->e_phentsize;
118 	elf->e_shentsize = ehdr->e_shentsize;
119 
120 	return TEE_SUCCESS;
121 }
122 #else /*ARM64*/
123 static TEE_Result e64_parse_ehdr(struct ta_elf *elf __unused,
124 				 Elf64_Ehdr *ehdr __unused)
125 {
126 	return TEE_ERROR_NOT_SUPPORTED;
127 }
128 #endif /*ARM64*/
129 
130 static void check_phdr_in_range(struct ta_elf *elf, unsigned int type,
131 				vaddr_t addr, size_t memsz)
132 {
133 	vaddr_t max_addr = 0;
134 
135 	if (ADD_OVERFLOW(addr, memsz, &max_addr))
136 		err(TEE_ERROR_BAD_FORMAT, "Program header %#x overflow", type);
137 
138 	/*
139 	 * elf->load_addr and elf->max_addr are both using the
140 	 * final virtual addresses, while this program header is
141 	 * relative to 0.
142 	 */
143 	if (max_addr > elf->max_addr - elf->load_addr)
144 		err(TEE_ERROR_BAD_FORMAT, "Program header %#x out of bounds",
145 		    type);
146 }
147 
148 static void read_dyn(struct ta_elf *elf, vaddr_t addr,
149 		     size_t idx, unsigned int *tag, size_t *val)
150 {
151 	if (elf->is_32bit) {
152 		Elf32_Dyn *dyn = (Elf32_Dyn *)(addr + elf->load_addr);
153 
154 		*tag = dyn[idx].d_tag;
155 		*val = dyn[idx].d_un.d_val;
156 	} else {
157 		Elf64_Dyn *dyn = (Elf64_Dyn *)(addr + elf->load_addr);
158 
159 		*tag = dyn[idx].d_tag;
160 		*val = dyn[idx].d_un.d_val;
161 	}
162 }
163 
164 static void save_hashtab_from_segment(struct ta_elf *elf, unsigned int type,
165 				      vaddr_t addr, size_t memsz)
166 {
167 	size_t dyn_entsize = 0;
168 	size_t num_dyns = 0;
169 	size_t n = 0;
170 	unsigned int tag = 0;
171 	size_t val = 0;
172 
173 	if (type != PT_DYNAMIC)
174 		return;
175 
176 	check_phdr_in_range(elf, type, addr, memsz);
177 
178 	if (elf->is_32bit)
179 		dyn_entsize = sizeof(Elf32_Dyn);
180 	else
181 		dyn_entsize = sizeof(Elf64_Dyn);
182 
183 	assert(!(memsz % dyn_entsize));
184 	num_dyns = memsz / dyn_entsize;
185 
186 	for (n = 0; n < num_dyns; n++) {
187 		read_dyn(elf, addr, n, &tag, &val);
188 		if (tag == DT_HASH) {
189 			elf->hashtab = (void *)(val + elf->load_addr);
190 			break;
191 		}
192 	}
193 }
194 
195 static void check_hashtab(struct ta_elf *elf, void *ptr, size_t num_buckets,
196 			  size_t num_chains)
197 {
198 	/*
199 	 * Starting from 2 as the first two words are mandatory and hold
200 	 * num_buckets and num_chains. So this function is called twice,
201 	 * first to see that there's indeed room for num_buckets and
202 	 * num_chains and then to see that all of it fits.
203 	 * See http://www.sco.com/developers/gabi/latest/ch5.dynamic.html#hash
204 	 */
205 	size_t num_words = 2;
206 	vaddr_t max_addr = 0;
207 	size_t sz = 0;
208 
209 	if ((vaddr_t)ptr < elf->load_addr)
210 		err(TEE_ERROR_GENERIC, "Hashtab %p out of range", ptr);
211 
212 	if (!ALIGNMENT_IS_OK(ptr, uint32_t))
213 		err(TEE_ERROR_GENERIC, "Bad alignment of hashtab %p", ptr);
214 
215 	if (ADD_OVERFLOW(num_words, num_buckets, &num_words) ||
216 	    ADD_OVERFLOW(num_words, num_chains, &num_words) ||
217 	    MUL_OVERFLOW(num_words, sizeof(uint32_t), &sz) ||
218 	    ADD_OVERFLOW((vaddr_t)ptr, sz, &max_addr))
219 		err(TEE_ERROR_GENERIC, "Hashtab overflow");
220 
221 	if (max_addr > elf->max_addr)
222 		err(TEE_ERROR_GENERIC, "Hashtab %p out of range", ptr);
223 }
224 
225 static void save_hashtab(struct ta_elf *elf)
226 {
227 	uint32_t *hashtab = NULL;
228 	size_t n = 0;
229 
230 	if (elf->is_32bit) {
231 		Elf32_Phdr *phdr = elf->phdr;
232 
233 		for (n = 0; n < elf->e_phnum; n++)
234 			save_hashtab_from_segment(elf, phdr[n].p_type,
235 						  phdr[n].p_vaddr,
236 						  phdr[n].p_memsz);
237 	} else {
238 		Elf64_Phdr *phdr = elf->phdr;
239 
240 		for (n = 0; n < elf->e_phnum; n++)
241 			save_hashtab_from_segment(elf, phdr[n].p_type,
242 						  phdr[n].p_vaddr,
243 						  phdr[n].p_memsz);
244 	}
245 
246 	check_hashtab(elf, elf->hashtab, 0, 0);
247 	hashtab = elf->hashtab;
248 	check_hashtab(elf, elf->hashtab, hashtab[0], hashtab[1]);
249 }
250 
251 static void e32_save_symtab(struct ta_elf *elf, size_t tab_idx)
252 {
253 	Elf32_Shdr *shdr = elf->shdr;
254 	size_t str_idx = shdr[tab_idx].sh_link;
255 
256 	elf->dynsymtab = (void *)(shdr[tab_idx].sh_addr + elf->load_addr);
257 	assert(!(shdr[tab_idx].sh_size % sizeof(Elf32_Sym)));
258 	elf->num_dynsyms = shdr[tab_idx].sh_size / sizeof(Elf32_Sym);
259 
260 	elf->dynstr = (void *)(shdr[str_idx].sh_addr + elf->load_addr);
261 	elf->dynstr_size = shdr[str_idx].sh_size;
262 }
263 
264 static void e64_save_symtab(struct ta_elf *elf, size_t tab_idx)
265 {
266 	Elf64_Shdr *shdr = elf->shdr;
267 	size_t str_idx = shdr[tab_idx].sh_link;
268 
269 	elf->dynsymtab = (void *)(vaddr_t)(shdr[tab_idx].sh_addr +
270 					   elf->load_addr);
271 	assert(!(shdr[tab_idx].sh_size % sizeof(Elf64_Sym)));
272 	elf->num_dynsyms = shdr[tab_idx].sh_size / sizeof(Elf64_Sym);
273 
274 	elf->dynstr = (void *)(vaddr_t)(shdr[str_idx].sh_addr + elf->load_addr);
275 	elf->dynstr_size = shdr[str_idx].sh_size;
276 }
277 
278 static void save_symtab(struct ta_elf *elf)
279 {
280 	size_t n = 0;
281 
282 	if (elf->is_32bit) {
283 		Elf32_Shdr *shdr = elf->shdr;
284 
285 		for (n = 0; n < elf->e_shnum; n++) {
286 			if (shdr[n].sh_type == SHT_DYNSYM) {
287 				e32_save_symtab(elf, n);
288 				break;
289 			}
290 		}
291 	} else {
292 		Elf64_Shdr *shdr = elf->shdr;
293 
294 		for (n = 0; n < elf->e_shnum; n++) {
295 			if (shdr[n].sh_type == SHT_DYNSYM) {
296 				e64_save_symtab(elf, n);
297 				break;
298 			}
299 		}
300 
301 	}
302 
303 	save_hashtab(elf);
304 }
305 
306 static void init_elf(struct ta_elf *elf)
307 {
308 	TEE_Result res = TEE_SUCCESS;
309 	vaddr_t va = 0;
310 	uint32_t flags = PTA_SYSTEM_MAP_FLAG_SHAREABLE;
311 
312 	res = sys_open_ta_bin(&elf->uuid, &elf->handle);
313 	if (res)
314 		err(res, "sys_open_ta_bin(%pUl)", (void *)&elf->uuid);
315 
316 	/*
317 	 * Map it read-only executable when we're loading a library where
318 	 * the ELF header is included in a load segment.
319 	 */
320 	if (!elf->is_main)
321 		flags |= PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
322 	res = sys_map_ta_bin(&va, SMALL_PAGE_SIZE, flags, elf->handle, 0, 0, 0);
323 	if (res)
324 		err(res, "sys_map_ta_bin");
325 	elf->ehdr_addr = va;
326 	if (!elf->is_main) {
327 		elf->load_addr = va;
328 		elf->max_addr = va + SMALL_PAGE_SIZE;
329 		elf->max_offs = SMALL_PAGE_SIZE;
330 	}
331 
332 	if (!IS_ELF(*(Elf32_Ehdr *)va))
333 		err(TEE_ERROR_BAD_FORMAT, "TA is not an ELF");
334 
335 	res = e32_parse_ehdr(elf, (void *)va);
336 	if (res == TEE_ERROR_BAD_FORMAT)
337 		res = e64_parse_ehdr(elf, (void *)va);
338 	if (res)
339 		err(res, "Cannot parse ELF");
340 
341 	if (elf->e_phoff + elf->e_phnum * elf->e_phentsize > SMALL_PAGE_SIZE)
342 		err(TEE_ERROR_NOT_SUPPORTED, "Cannot read program headers");
343 
344 	elf->phdr = (void *)(va + elf->e_phoff);
345 }
346 
347 static size_t roundup(size_t v)
348 {
349 	return ROUNDUP(v, SMALL_PAGE_SIZE);
350 }
351 
352 static size_t rounddown(size_t v)
353 {
354 	return ROUNDDOWN(v, SMALL_PAGE_SIZE);
355 }
356 
357 static void add_segment(struct ta_elf *elf, size_t offset, size_t vaddr,
358 			size_t filesz, size_t memsz, size_t flags, size_t align)
359 {
360 	struct segment *seg = calloc(1, sizeof(*seg));
361 
362 	if (!seg)
363 		err(TEE_ERROR_OUT_OF_MEMORY, "calloc");
364 
365 	seg->offset = offset;
366 	seg->vaddr = vaddr;
367 	seg->filesz = filesz;
368 	seg->memsz = memsz;
369 	seg->flags = flags;
370 	seg->align = align;
371 
372 	TAILQ_INSERT_TAIL(&elf->segs, seg, link);
373 }
374 
375 static void parse_load_segments(struct ta_elf *elf)
376 {
377 	size_t n = 0;
378 
379 	if (elf->is_32bit) {
380 		Elf32_Phdr *phdr = elf->phdr;
381 
382 		for (n = 0; n < elf->e_phnum; n++)
383 			if (phdr[n].p_type == PT_LOAD) {
384 				add_segment(elf, phdr[n].p_offset,
385 					    phdr[n].p_vaddr, phdr[n].p_filesz,
386 					    phdr[n].p_memsz, phdr[n].p_flags,
387 					    phdr[n].p_align);
388 			} else if (phdr[n].p_type == PT_ARM_EXIDX) {
389 				elf->exidx_start = phdr[n].p_vaddr;
390 				elf->exidx_size = phdr[n].p_filesz;
391 			}
392 	} else {
393 		Elf64_Phdr *phdr = elf->phdr;
394 
395 		for (n = 0; n < elf->e_phnum; n++)
396 			if (phdr[n].p_type == PT_LOAD)
397 				add_segment(elf, phdr[n].p_offset,
398 					    phdr[n].p_vaddr, phdr[n].p_filesz,
399 					    phdr[n].p_memsz, phdr[n].p_flags,
400 					    phdr[n].p_align);
401 	}
402 }
403 
404 static void copy_remapped_to(struct ta_elf *elf, const struct segment *seg)
405 {
406 	uint8_t *dst = (void *)(seg->vaddr + elf->load_addr);
407 	size_t n = 0;
408 	size_t offs = seg->offset;
409 	size_t num_bytes = seg->filesz;
410 
411 	if (offs < elf->max_offs) {
412 		n = MIN(elf->max_offs - offs, num_bytes);
413 		memcpy(dst, (void *)(elf->max_addr + offs - elf->max_offs), n);
414 		dst += n;
415 		offs += n;
416 		num_bytes -= n;
417 	}
418 
419 	if (num_bytes) {
420 		TEE_Result res = sys_copy_from_ta_bin(dst, num_bytes,
421 						      elf->handle, offs);
422 
423 		if (res)
424 			err(res, "sys_copy_from_ta_bin");
425 		elf->max_offs += offs;
426 	}
427 }
428 
429 static void adjust_segments(struct ta_elf *elf)
430 {
431 	struct segment *seg = NULL;
432 	struct segment *prev_seg = NULL;
433 	size_t prev_end_addr = 0;
434 	size_t align = 0;
435 	size_t mask = 0;
436 
437 	/* Sanity check */
438 	TAILQ_FOREACH(seg, &elf->segs, link) {
439 		size_t dummy __maybe_unused = 0;
440 
441 		assert(seg->align >= SMALL_PAGE_SIZE);
442 		assert(!ADD_OVERFLOW(seg->vaddr, seg->memsz, &dummy));
443 		assert(seg->filesz <= seg->memsz);
444 		assert((seg->offset & SMALL_PAGE_MASK) ==
445 		       (seg->vaddr & SMALL_PAGE_MASK));
446 
447 		prev_seg = TAILQ_PREV(seg, segment_head, link);
448 		if (prev_seg) {
449 			assert(seg->vaddr >= prev_seg->vaddr + prev_seg->memsz);
450 			assert(seg->offset >=
451 			       prev_seg->offset + prev_seg->filesz);
452 		}
453 		if (!align)
454 			align = seg->align;
455 		assert(align == seg->align);
456 	}
457 
458 	mask = align - 1;
459 
460 	seg = TAILQ_FIRST(&elf->segs);
461 	if (seg)
462 		seg = TAILQ_NEXT(seg, link);
463 	while (seg) {
464 		prev_seg = TAILQ_PREV(seg, segment_head, link);
465 		prev_end_addr = prev_seg->vaddr + prev_seg->memsz;
466 
467 		/*
468 		 * This segment may overlap with the last "page" in the
469 		 * previous segment in two different ways:
470 		 * 1. Virtual address (and offset) overlaps =>
471 		 *    Permissions needs to be merged. The offset must have
472 		 *    the SMALL_PAGE_MASK bits set as vaddr and offset must
473 		 *    add up with prevsion segment.
474 		 *
475 		 * 2. Only offset overlaps =>
476 		 *    The same page in the ELF is mapped at two different
477 		 *    virtual addresses. As a limitation this segment must
478 		 *    be mapped as writeable.
479 		 */
480 
481 		/* Case 1. */
482 		if (rounddown(seg->vaddr) < prev_end_addr) {
483 			assert((seg->vaddr & mask) == (seg->offset & mask));
484 			assert(prev_seg->memsz == prev_seg->filesz);
485 
486 			/*
487 			 * Merge the segments and their permissions.
488 			 * Note that the may be a small hole between the
489 			 * two sections.
490 			 */
491 			prev_seg->filesz = seg->vaddr + seg->filesz -
492 					   prev_seg->vaddr;
493 			prev_seg->memsz = seg->vaddr + seg->memsz -
494 					   prev_seg->vaddr;
495 			prev_seg->flags |= seg->flags;
496 
497 			TAILQ_REMOVE(&elf->segs, seg, link);
498 			free(seg);
499 			seg = TAILQ_NEXT(prev_seg, link);
500 			continue;
501 		}
502 
503 		/* Case 2. */
504 		if ((seg->offset & mask) &&
505 		    rounddown(seg->offset) <
506 		    (prev_seg->offset + prev_seg->filesz)) {
507 
508 			assert(seg->flags & PF_W);
509 			seg->remapped_writeable = true;
510 		}
511 
512 		/*
513 		 * No overlap, but we may need to align address, offset and
514 		 * size.
515 		 */
516 		seg->filesz += seg->vaddr - rounddown(seg->vaddr);
517 		seg->memsz += seg->vaddr - rounddown(seg->vaddr);
518 		seg->vaddr = rounddown(seg->vaddr);
519 		seg->offset = rounddown(seg->offset);
520 		seg = TAILQ_NEXT(seg, link);
521 	}
522 
523 }
524 
525 static void populate_segments_legacy(struct ta_elf *elf)
526 {
527 	TEE_Result res = TEE_SUCCESS;
528 	struct segment *seg = NULL;
529 	vaddr_t va = 0;
530 
531 	assert(elf->is_legacy);
532 	TAILQ_FOREACH(seg, &elf->segs, link) {
533 		struct segment *last_seg = TAILQ_LAST(&elf->segs, segment_head);
534 		size_t pad_end = roundup(last_seg->vaddr + last_seg->memsz -
535 					 seg->vaddr - seg->memsz);
536 		size_t num_bytes = roundup(seg->memsz);
537 
538 		if (!elf->load_addr)
539 			va = 0;
540 		else
541 			va = seg->vaddr + elf->load_addr;
542 
543 
544 		if (!(seg->flags & PF_R))
545 			err(TEE_ERROR_NOT_SUPPORTED,
546 			    "Segment must be readable");
547 
548 		res = sys_map_zi(num_bytes, 0, &va, 0, pad_end);
549 		if (res)
550 			err(res, "sys_map_zi");
551 		res = sys_copy_from_ta_bin((void *)va, seg->filesz,
552 					   elf->handle, seg->offset);
553 		if (res)
554 			err(res, "sys_copy_from_ta_bin");
555 
556 		if (!elf->load_addr)
557 			elf->load_addr = va;
558 		elf->max_addr = va + num_bytes;
559 		elf->max_offs = seg->offset + seg->filesz;
560 	}
561 }
562 
563 static size_t get_pad_begin(void)
564 {
565 #ifdef CFG_TA_ASLR
566 	size_t min = CFG_TA_ASLR_MIN_OFFSET_PAGES;
567 	size_t max = CFG_TA_ASLR_MAX_OFFSET_PAGES;
568 	TEE_Result res = TEE_SUCCESS;
569 	uint32_t rnd32 = 0;
570 	size_t rnd = 0;
571 
572 	COMPILE_TIME_ASSERT(CFG_TA_ASLR_MIN_OFFSET_PAGES <
573 			    CFG_TA_ASLR_MAX_OFFSET_PAGES);
574 	if (max > min) {
575 		res = utee_cryp_random_number_generate(&rnd32, sizeof(rnd32));
576 		if (res) {
577 			DMSG("Random read failed: %#"PRIx32, res);
578 			return min * SMALL_PAGE_SIZE;
579 		}
580 		rnd = rnd32 % (max - min);
581 	}
582 
583 	return (min + rnd) * SMALL_PAGE_SIZE;
584 #else /*!CFG_TA_ASLR*/
585 	return 0;
586 #endif /*!CFG_TA_ASLR*/
587 }
588 
589 static void populate_segments(struct ta_elf *elf)
590 {
591 	TEE_Result res = TEE_SUCCESS;
592 	struct segment *seg = NULL;
593 	vaddr_t va = 0;
594 	size_t pad_begin = 0;
595 
596 	assert(!elf->is_legacy);
597 	TAILQ_FOREACH(seg, &elf->segs, link) {
598 		struct segment *last_seg = TAILQ_LAST(&elf->segs, segment_head);
599 		size_t pad_end = roundup(last_seg->vaddr + last_seg->memsz -
600 					 seg->vaddr - seg->memsz);
601 
602 		if (seg->remapped_writeable) {
603 			size_t num_bytes = roundup(seg->vaddr + seg->memsz) -
604 					   rounddown(seg->vaddr);
605 
606 			assert(elf->load_addr);
607 			va = rounddown(elf->load_addr + seg->vaddr);
608 			assert(va >= elf->max_addr);
609 			res = sys_map_zi(num_bytes, 0, &va, 0, pad_end);
610 			if (res)
611 				err(res, "sys_map_zi");
612 
613 			copy_remapped_to(elf, seg);
614 			elf->max_addr = va + num_bytes;
615 		} else {
616 			uint32_t flags =  0;
617 			size_t filesz = seg->filesz;
618 			size_t memsz = seg->memsz;
619 			size_t offset = seg->offset;
620 			size_t vaddr = seg->vaddr;
621 
622 			if (offset < elf->max_offs) {
623 				/*
624 				 * We're in a load segment which overlaps
625 				 * with (or is covered by) the first page
626 				 * of a shared library.
627 				 */
628 				if (vaddr + filesz < SMALL_PAGE_SIZE) {
629 					size_t num_bytes = 0;
630 
631 					/*
632 					 * If this segment is completely
633 					 * covered, take next.
634 					 */
635 					if (vaddr + memsz <= SMALL_PAGE_SIZE)
636 						continue;
637 
638 					/*
639 					 * All data of the segment is
640 					 * loaded, but we need to zero
641 					 * extend it.
642 					 */
643 					va = elf->max_addr;
644 					num_bytes = roundup(vaddr + memsz) -
645 						    roundup(vaddr) -
646 						    SMALL_PAGE_SIZE;
647 					assert(num_bytes);
648 					res = sys_map_zi(num_bytes, 0, &va, 0,
649 							 0);
650 					if (res)
651 						err(res, "sys_map_zi");
652 					elf->max_addr = roundup(va + num_bytes);
653 					continue;
654 				}
655 
656 				/* Partial overlap, remove the first page. */
657 				vaddr += SMALL_PAGE_SIZE;
658 				filesz -= SMALL_PAGE_SIZE;
659 				memsz -= SMALL_PAGE_SIZE;
660 				offset += SMALL_PAGE_SIZE;
661 			}
662 
663 			if (!elf->load_addr) {
664 				va = 0;
665 				pad_begin = get_pad_begin();
666 				/*
667 				 * If mapping with pad_begin fails we'll
668 				 * retry without pad_begin, effectively
669 				 * disabling ASLR for the current ELF file.
670 				 */
671 			} else {
672 				va = vaddr + elf->load_addr;
673 				pad_begin = 0;
674 			}
675 
676 			if (seg->flags & PF_W)
677 				flags |= PTA_SYSTEM_MAP_FLAG_WRITEABLE;
678 			else
679 				flags |= PTA_SYSTEM_MAP_FLAG_SHAREABLE;
680 			if (seg->flags & PF_X)
681 				flags |= PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
682 			if (!(seg->flags & PF_R))
683 				err(TEE_ERROR_NOT_SUPPORTED,
684 				    "Segment must be readable");
685 			if (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE) {
686 				res = sys_map_zi(memsz, 0, &va, pad_begin,
687 						 pad_end);
688 				if (pad_begin && res == TEE_ERROR_OUT_OF_MEMORY)
689 					res = sys_map_zi(memsz, 0, &va, 0,
690 							 pad_end);
691 				if (res)
692 					err(res, "sys_map_zi");
693 				res = sys_copy_from_ta_bin((void *)va, filesz,
694 							   elf->handle, offset);
695 				if (res)
696 					err(res, "sys_copy_from_ta_bin");
697 			} else {
698 				res = sys_map_ta_bin(&va, filesz, flags,
699 						     elf->handle, offset,
700 						     pad_begin, pad_end);
701 				if (pad_begin && res == TEE_ERROR_OUT_OF_MEMORY)
702 					res = sys_map_ta_bin(&va, filesz, flags,
703 							     elf->handle,
704 							     offset, 0,
705 							     pad_end);
706 				if (res)
707 					err(res, "sys_map_ta_bin");
708 			}
709 
710 			if (!elf->load_addr)
711 				elf->load_addr = va;
712 			elf->max_addr = roundup(va + filesz);
713 			elf->max_offs += filesz;
714 		}
715 	}
716 }
717 
718 static void map_segments(struct ta_elf *elf)
719 {
720 	TEE_Result res = TEE_SUCCESS;
721 
722 	parse_load_segments(elf);
723 	adjust_segments(elf);
724 	if (TAILQ_FIRST(&elf->segs)->offset < SMALL_PAGE_SIZE) {
725 		vaddr_t va = 0;
726 		size_t sz = elf->max_addr - elf->load_addr;
727 		struct segment *seg = TAILQ_LAST(&elf->segs, segment_head);
728 		size_t pad_begin = get_pad_begin();
729 
730 		/*
731 		 * We're loading a library, if not other parts of the code
732 		 * need to be updated too.
733 		 */
734 		assert(!elf->is_main);
735 
736 		/*
737 		 * Now that we know how much virtual memory is needed move
738 		 * the already mapped part to a location which can
739 		 * accommodate us.
740 		 */
741 		res = sys_remap(elf->load_addr, &va, sz, pad_begin,
742 				roundup(seg->vaddr + seg->memsz));
743 		if (res == TEE_ERROR_OUT_OF_MEMORY)
744 			res = sys_remap(elf->load_addr, &va, sz, 0,
745 					roundup(seg->vaddr + seg->memsz));
746 		if (res)
747 			err(res, "sys_remap");
748 		elf->ehdr_addr = va;
749 		elf->load_addr = va;
750 		elf->max_addr = va + sz;
751 		elf->phdr = (void *)(va + elf->e_phoff);
752 	}
753 }
754 
755 static void add_deps_from_segment(struct ta_elf *elf, unsigned int type,
756 				  vaddr_t addr, size_t memsz)
757 {
758 	size_t dyn_entsize = 0;
759 	size_t num_dyns = 0;
760 	size_t n = 0;
761 	unsigned int tag = 0;
762 	size_t val = 0;
763 	TEE_UUID uuid = { };
764 	char *str_tab = NULL;
765 
766 	if (type != PT_DYNAMIC)
767 		return;
768 
769 	check_phdr_in_range(elf, type, addr, memsz);
770 
771 	if (elf->is_32bit)
772 		dyn_entsize = sizeof(Elf32_Dyn);
773 	else
774 		dyn_entsize = sizeof(Elf64_Dyn);
775 
776 	assert(!(memsz % dyn_entsize));
777 	num_dyns = memsz / dyn_entsize;
778 
779 	for (n = 0; n < num_dyns; n++) {
780 		read_dyn(elf, addr, n, &tag, &val);
781 		if (tag == DT_STRTAB) {
782 			str_tab = (char *)(val + elf->load_addr);
783 			break;
784 		}
785 	}
786 
787 	for (n = 0; n < num_dyns; n++) {
788 		read_dyn(elf, addr, n, &tag, &val);
789 		if (tag != DT_NEEDED)
790 			continue;
791 		tee_uuid_from_str(&uuid, str_tab + val);
792 		queue_elf(&uuid);
793 	}
794 }
795 
796 static void add_dependencies(struct ta_elf *elf)
797 {
798 	size_t n = 0;
799 
800 	if (elf->is_32bit) {
801 		Elf32_Phdr *phdr = elf->phdr;
802 
803 		for (n = 0; n < elf->e_phnum; n++)
804 			add_deps_from_segment(elf, phdr[n].p_type,
805 					      phdr[n].p_vaddr, phdr[n].p_memsz);
806 	} else {
807 		Elf64_Phdr *phdr = elf->phdr;
808 
809 		for (n = 0; n < elf->e_phnum; n++)
810 			add_deps_from_segment(elf, phdr[n].p_type,
811 					      phdr[n].p_vaddr, phdr[n].p_memsz);
812 	}
813 }
814 
815 static void copy_section_headers(struct ta_elf *elf)
816 {
817 	TEE_Result res = TEE_SUCCESS;
818 	size_t sz = 0;
819 	size_t offs = 0;
820 
821 	if (MUL_OVERFLOW(elf->e_shnum, elf->e_shentsize, &sz))
822 		err(TEE_ERROR_BAD_FORMAT, "Shdr size overflow");
823 
824 	elf->shdr = malloc(sz);
825 	if (!elf->shdr)
826 		err(TEE_ERROR_OUT_OF_MEMORY, "malloc");
827 
828 	/*
829 	 * We're assuming that section headers comes after the load segments,
830 	 * but if it's a very small dynamically linked library the section
831 	 * headers can still end up (partially?) in the first mapped page.
832 	 */
833 	if (elf->e_shoff < SMALL_PAGE_SIZE) {
834 		assert(!elf->is_main);
835 		offs = MIN(SMALL_PAGE_SIZE - elf->e_shoff, sz);
836 		memcpy(elf->shdr, (void *)(elf->load_addr + elf->e_shoff),
837 		       offs);
838 	}
839 
840 	if (offs < sz) {
841 		res = sys_copy_from_ta_bin((uint8_t *)elf->shdr + offs,
842 					   sz - offs, elf->handle,
843 					   elf->e_shoff + offs);
844 		if (res)
845 			err(res, "sys_copy_from_ta_bin");
846 	}
847 }
848 
849 static void close_handle(struct ta_elf *elf)
850 {
851 	TEE_Result res = sys_close_ta_bin(elf->handle);
852 
853 	if (res)
854 		err(res, "sys_close_ta_bin");
855 	elf->handle = -1;
856 }
857 
858 static void clean_elf_load_main(struct ta_elf *elf)
859 {
860 	TEE_Result res = TEE_SUCCESS;
861 
862 	/*
863 	 * Clean up from last attempt to load
864 	 */
865 	res = sys_unmap(elf->ehdr_addr, SMALL_PAGE_SIZE);
866 	if (res)
867 		err(res, "sys_unmap");
868 
869 	while (!TAILQ_EMPTY(&elf->segs)) {
870 		struct segment *seg = TAILQ_FIRST(&elf->segs);
871 		vaddr_t va = 0;
872 		size_t num_bytes = 0;
873 
874 		va = rounddown(elf->load_addr + seg->vaddr);
875 		if (seg->remapped_writeable)
876 			num_bytes = roundup(seg->vaddr + seg->memsz) -
877 				    rounddown(seg->vaddr);
878 		else
879 			num_bytes = seg->memsz;
880 
881 		res = sys_unmap(va, num_bytes);
882 		if (res)
883 			err(res, "sys_unmap");
884 
885 		TAILQ_REMOVE(&elf->segs, seg, link);
886 		free(seg);
887 	}
888 
889 	free(elf->shdr);
890 	memset(&elf->is_32bit, 0,
891 	       (vaddr_t)&elf->uuid - (vaddr_t)&elf->is_32bit);
892 
893 	TAILQ_INIT(&elf->segs);
894 }
895 
896 static void load_main(struct ta_elf *elf)
897 {
898 	init_elf(elf);
899 	map_segments(elf);
900 	populate_segments(elf);
901 	add_dependencies(elf);
902 	copy_section_headers(elf);
903 	save_symtab(elf);
904 	close_handle(elf);
905 
906 	elf->head = (struct ta_head *)elf->load_addr;
907 	if (elf->head->depr_entry != UINT64_MAX) {
908 		/*
909 		 * Legacy TAs sets their entry point in ta_head. For
910 		 * non-legacy TAs the entry point of the ELF is set instead
911 		 * and leaving the ta_head entry point set to UINT64_MAX to
912 		 * indicate that it's not used.
913 		 *
914 		 * NB, everything before the commit a73b5878c89d ("Replace
915 		 * ta_head.entry with elf entry") is considered legacy TAs
916 		 * for ldelf.
917 		 *
918 		 * Legacy TAs cannot be mapped with shared memory segments
919 		 * so restart the mapping if it turned out we're loading a
920 		 * legacy TA.
921 		 */
922 
923 		DMSG("Reloading TA %pUl as legacy TA", (void *)&elf->uuid);
924 		clean_elf_load_main(elf);
925 		elf->is_legacy = true;
926 		init_elf(elf);
927 		map_segments(elf);
928 		populate_segments_legacy(elf);
929 		add_dependencies(elf);
930 		copy_section_headers(elf);
931 		save_symtab(elf);
932 		close_handle(elf);
933 		elf->head = (struct ta_head *)elf->load_addr;
934 		/*
935 		 * Check that the TA is still a legacy TA, if it isn't give
936 		 * up now since we're likely under attack.
937 		 */
938 		if (elf->head->depr_entry == UINT64_MAX)
939 			err(TEE_ERROR_GENERIC,
940 			    "TA %pUl was changed on disk to non-legacy",
941 			    (void *)&elf->uuid);
942 	}
943 
944 }
945 
946 void ta_elf_load_main(const TEE_UUID *uuid, uint32_t *is_32bit, uint64_t *sp,
947 		      uint32_t *ta_flags)
948 {
949 	struct ta_elf *elf = queue_elf(uuid);
950 	vaddr_t va = 0;
951 	TEE_Result res = TEE_SUCCESS;
952 
953 	assert(elf);
954 	elf->is_main = true;
955 
956 	load_main(elf);
957 
958 	*is_32bit = elf->is_32bit;
959 	res = sys_map_zi(elf->head->stack_size, 0, &va, 0, 0);
960 	if (res)
961 		err(res, "sys_map_zi stack");
962 
963 	if (elf->head->flags & ~TA_FLAGS_MASK)
964 		err(TEE_ERROR_BAD_FORMAT, "Invalid TA flags(s) %#"PRIx32,
965 		    elf->head->flags & ~TA_FLAGS_MASK);
966 
967 	*ta_flags = elf->head->flags;
968 	*sp = va + elf->head->stack_size;
969 	ta_stack = va;
970 	ta_stack_size = elf->head->stack_size;
971 }
972 
973 void ta_elf_finalize_load_main(uint64_t *entry)
974 {
975 	struct ta_elf *elf = TAILQ_FIRST(&main_elf_queue);
976 	TEE_Result res = TEE_SUCCESS;
977 
978 	assert(elf->is_main);
979 
980 	res = ta_elf_set_init_fini_info(elf->is_32bit);
981 	if (res)
982 		err(res, "ta_elf_set_init_fini_info");
983 
984 	if (elf->is_legacy)
985 		*entry = elf->head->depr_entry;
986 	else
987 		*entry = elf->e_entry + elf->load_addr;
988 }
989 
990 
991 void ta_elf_load_dependency(struct ta_elf *elf, bool is_32bit)
992 {
993 	if (elf->is_main)
994 		return;
995 
996 	init_elf(elf);
997 	if (elf->is_32bit != is_32bit)
998 		err(TEE_ERROR_BAD_FORMAT, "ELF %pUl is %sbit (expected %sbit)",
999 		    (void *)&elf->uuid, elf->is_32bit ? "32" : "64",
1000 		    is_32bit ? "32" : "64");
1001 
1002 	map_segments(elf);
1003 	populate_segments(elf);
1004 	add_dependencies(elf);
1005 	copy_section_headers(elf);
1006 	save_symtab(elf);
1007 	close_handle(elf);
1008 }
1009 
1010 void ta_elf_finalize_mappings(struct ta_elf *elf)
1011 {
1012 	TEE_Result res = TEE_SUCCESS;
1013 	struct segment *seg = NULL;
1014 
1015 	if (!elf->is_legacy)
1016 		return;
1017 
1018 	TAILQ_FOREACH(seg, &elf->segs, link) {
1019 		vaddr_t va = elf->load_addr + seg->vaddr;
1020 		uint32_t flags =  0;
1021 
1022 		if (seg->flags & PF_W)
1023 			flags |= PTA_SYSTEM_MAP_FLAG_WRITEABLE;
1024 		if (seg->flags & PF_X)
1025 			flags |= PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
1026 
1027 		res = sys_set_prot(va, seg->memsz, flags);
1028 		if (res)
1029 			err(res, "sys_set_prot");
1030 	}
1031 }
1032 
1033 static void __printf(3, 4) print_wrapper(void *pctx, print_func_t print_func,
1034 					 const char *fmt, ...)
1035 {
1036 	va_list ap;
1037 
1038 	va_start(ap, fmt);
1039 	print_func(pctx, fmt, ap);
1040 	va_end(ap);
1041 }
1042 
1043 static void print_seg(void *pctx, print_func_t print_func,
1044 		      size_t idx __maybe_unused, int elf_idx __maybe_unused,
1045 		      vaddr_t va __maybe_unused, paddr_t pa __maybe_unused,
1046 		      size_t sz __maybe_unused, uint32_t flags)
1047 {
1048 	int width __maybe_unused = 8;
1049 	char desc[14] __maybe_unused = "";
1050 	char flags_str[] __maybe_unused = "----";
1051 
1052 	if (elf_idx > -1) {
1053 		snprintf(desc, sizeof(desc), " [%d]", elf_idx);
1054 	} else {
1055 		if (flags & DUMP_MAP_EPHEM)
1056 			snprintf(desc, sizeof(desc), " (param)");
1057 		if (flags & DUMP_MAP_LDELF)
1058 			snprintf(desc, sizeof(desc), " (ldelf)");
1059 		if (va == ta_stack)
1060 			snprintf(desc, sizeof(desc), " (stack)");
1061 	}
1062 
1063 	if (flags & DUMP_MAP_READ)
1064 		flags_str[0] = 'r';
1065 	if (flags & DUMP_MAP_WRITE)
1066 		flags_str[1] = 'w';
1067 	if (flags & DUMP_MAP_EXEC)
1068 		flags_str[2] = 'x';
1069 	if (flags & DUMP_MAP_SECURE)
1070 		flags_str[3] = 's';
1071 
1072 	print_wrapper(pctx, print_func,
1073 		      "region %2zu: va 0x%0*"PRIxVA" pa 0x%0*"PRIxPA" size 0x%06zx flags %s%s\n",
1074 		      idx, width, va, width, pa, sz, flags_str, desc);
1075 }
1076 
1077 static bool get_next_in_order(struct ta_elf_queue *elf_queue,
1078 			      struct ta_elf **elf, struct segment **seg,
1079 			      size_t *elf_idx)
1080 {
1081 	struct ta_elf *e = NULL;
1082 	struct segment *s = NULL;
1083 	size_t idx = 0;
1084 	vaddr_t va = 0;
1085 	struct ta_elf *e2 = NULL;
1086 	size_t i2 = 0;
1087 
1088 	assert(elf && seg && elf_idx);
1089 	e = *elf;
1090 	s = *seg;
1091 	assert((e == NULL && s == NULL) || (e != NULL && s != NULL));
1092 
1093 	if (s) {
1094 		s = TAILQ_NEXT(s, link);
1095 		if (s) {
1096 			*seg = s;
1097 			return true;
1098 		}
1099 	}
1100 
1101 	if (e)
1102 		va = e->load_addr;
1103 
1104 	/* Find the ELF with next load address */
1105 	e = NULL;
1106 	TAILQ_FOREACH(e2, elf_queue, link) {
1107 		if (e2->load_addr > va) {
1108 			if (!e || e2->load_addr < e->load_addr) {
1109 				e = e2;
1110 				idx = i2;
1111 			}
1112 		}
1113 		i2++;
1114 	}
1115 	if (!e)
1116 		return false;
1117 
1118 	*elf = e;
1119 	*seg = TAILQ_FIRST(&e->segs);
1120 	*elf_idx = idx;
1121 	return true;
1122 }
1123 
1124 void ta_elf_print_mappings(void *pctx, print_func_t print_func,
1125 			   struct ta_elf_queue *elf_queue, size_t num_maps,
1126 			   struct dump_map *maps, vaddr_t mpool_base)
1127 {
1128 	struct segment *seg = NULL;
1129 	struct ta_elf *elf = NULL;
1130 	size_t elf_idx = 0;
1131 	size_t idx = 0;
1132 	size_t map_idx = 0;
1133 
1134 	/*
1135 	 * Loop over all segments and maps, printing virtual address in
1136 	 * order. Segment has priority if the virtual address is present
1137 	 * in both map and segment.
1138 	 */
1139 	get_next_in_order(elf_queue, &elf, &seg, &elf_idx);
1140 	while (true) {
1141 		vaddr_t va = -1;
1142 		size_t sz = 0;
1143 		uint32_t flags = DUMP_MAP_SECURE;
1144 		size_t offs = 0;
1145 
1146 		if (seg) {
1147 			va = rounddown(seg->vaddr + elf->load_addr);
1148 			sz = roundup(seg->vaddr + seg->memsz) -
1149 				     rounddown(seg->vaddr);
1150 		}
1151 
1152 		while (map_idx < num_maps && maps[map_idx].va <= va) {
1153 			uint32_t f = 0;
1154 
1155 			/* If there's a match, it should be the same map */
1156 			if (maps[map_idx].va == va) {
1157 				/*
1158 				 * In shared libraries the first page is
1159 				 * mapped separately with the rest of that
1160 				 * segment following back to back in a
1161 				 * separate entry.
1162 				 */
1163 				if (map_idx + 1 < num_maps &&
1164 				    maps[map_idx].sz == SMALL_PAGE_SIZE) {
1165 					vaddr_t next_va = maps[map_idx].va +
1166 							  maps[map_idx].sz;
1167 					size_t comb_sz = maps[map_idx].sz +
1168 							 maps[map_idx + 1].sz;
1169 
1170 					if (next_va == maps[map_idx + 1].va &&
1171 					    comb_sz == sz &&
1172 					    maps[map_idx].flags ==
1173 					    maps[map_idx + 1].flags) {
1174 						/* Skip this and next entry */
1175 						map_idx += 2;
1176 						continue;
1177 					}
1178 				}
1179 				assert(maps[map_idx].sz == sz);
1180 			} else if (maps[map_idx].va < va) {
1181 				if (maps[map_idx].va == mpool_base)
1182 					f |= DUMP_MAP_LDELF;
1183 				print_seg(pctx, print_func, idx, -1,
1184 					  maps[map_idx].va, maps[map_idx].pa,
1185 					  maps[map_idx].sz,
1186 					  maps[map_idx].flags | f);
1187 				idx++;
1188 			}
1189 			map_idx++;
1190 		}
1191 
1192 		if (!seg)
1193 			break;
1194 
1195 		offs = rounddown(seg->offset);
1196 		if (seg->flags & PF_R)
1197 			flags |= DUMP_MAP_READ;
1198 		if (seg->flags & PF_W)
1199 			flags |= DUMP_MAP_WRITE;
1200 		if (seg->flags & PF_X)
1201 			flags |= DUMP_MAP_EXEC;
1202 
1203 		print_seg(pctx, print_func, idx, elf_idx, va, offs, sz, flags);
1204 		idx++;
1205 
1206 		if (!get_next_in_order(elf_queue, &elf, &seg, &elf_idx))
1207 			seg = NULL;
1208 	}
1209 
1210 	elf_idx = 0;
1211 	TAILQ_FOREACH(elf, elf_queue, link) {
1212 		print_wrapper(pctx, print_func,
1213 			      " [%zu] %pUl @ 0x%0*"PRIxVA"\n",
1214 			      elf_idx, (void *)&elf->uuid, 8, elf->load_addr);
1215 		elf_idx++;
1216 	}
1217 }
1218 
1219 #ifdef CFG_UNWIND
1220 void ta_elf_stack_trace_a32(uint32_t regs[16])
1221 {
1222 	struct unwind_state_arm32 state = { };
1223 
1224 	memcpy(state.registers, regs, sizeof(state.registers));
1225 	print_stack_arm32(&state, ta_stack, ta_stack_size);
1226 }
1227 
1228 void ta_elf_stack_trace_a64(uint64_t fp, uint64_t sp, uint64_t pc)
1229 {
1230 	struct unwind_state_arm64 state = { .fp = fp, .sp = sp, .pc = pc };
1231 
1232 	print_stack_arm64(&state, ta_stack, ta_stack_size);
1233 }
1234 #endif
1235 
1236 TEE_Result ta_elf_add_library(const TEE_UUID *uuid)
1237 {
1238 	struct ta_elf *ta = TAILQ_FIRST(&main_elf_queue);
1239 	struct ta_elf *lib = ta_elf_find_elf(uuid);
1240 	struct ta_elf *elf = NULL;
1241 
1242 	if (lib)
1243 		return TEE_SUCCESS; /* Already mapped */
1244 
1245 	lib = queue_elf_helper(uuid);
1246 	if (!lib)
1247 		return TEE_ERROR_OUT_OF_MEMORY;
1248 
1249 	for (elf = lib; elf; elf = TAILQ_NEXT(elf, link))
1250 		ta_elf_load_dependency(elf, ta->is_32bit);
1251 
1252 	for (elf = lib; elf; elf = TAILQ_NEXT(elf, link)) {
1253 		ta_elf_relocate(elf);
1254 		ta_elf_finalize_mappings(elf);
1255 	}
1256 
1257 	for (elf = lib; elf; elf = TAILQ_NEXT(elf, link))
1258 		DMSG("ELF (%pUl) at %#"PRIxVA,
1259 		     (void *)&elf->uuid, elf->load_addr);
1260 
1261 	return ta_elf_set_init_fini_info(ta->is_32bit);
1262 }
1263 
1264 /* Get address/size of .init_array and .fini_array from the dynamic segment */
1265 static void get_init_fini_array(struct ta_elf *elf, unsigned int type,
1266 				vaddr_t addr, size_t memsz, vaddr_t *init,
1267 				size_t *init_cnt, vaddr_t *fini,
1268 				size_t *fini_cnt)
1269 {
1270 	size_t addrsz = 0;
1271 	size_t dyn_entsize = 0;
1272 	size_t num_dyns = 0;
1273 	size_t n = 0;
1274 	unsigned int tag = 0;
1275 	size_t val = 0;
1276 
1277 	assert(type == PT_DYNAMIC);
1278 
1279 	check_phdr_in_range(elf, type, addr, memsz);
1280 
1281 	if (elf->is_32bit) {
1282 		dyn_entsize = sizeof(Elf32_Dyn);
1283 		addrsz = 4;
1284 	} else {
1285 		dyn_entsize = sizeof(Elf64_Dyn);
1286 		addrsz = 8;
1287 	}
1288 
1289 	assert(!(memsz % dyn_entsize));
1290 	num_dyns = memsz / dyn_entsize;
1291 
1292 	for (n = 0; n < num_dyns; n++) {
1293 		read_dyn(elf, addr, n, &tag, &val);
1294 		if (tag == DT_INIT_ARRAY)
1295 			*init = val + elf->load_addr;
1296 		else if (tag == DT_FINI_ARRAY)
1297 			*fini = val + elf->load_addr;
1298 		else if (tag == DT_INIT_ARRAYSZ)
1299 			*init_cnt = val / addrsz;
1300 		else if (tag == DT_FINI_ARRAYSZ)
1301 			*fini_cnt = val / addrsz;
1302 	}
1303 }
1304 
1305 /* Get address/size of .init_array and .fini_array in @elf (if present) */
1306 static void elf_get_init_fini_array(struct ta_elf *elf, vaddr_t *init,
1307 				    size_t *init_cnt, vaddr_t *fini,
1308 				    size_t *fini_cnt)
1309 {
1310 	size_t n = 0;
1311 
1312 	if (elf->is_32bit) {
1313 		Elf32_Phdr *phdr = elf->phdr;
1314 
1315 		for (n = 0; n < elf->e_phnum; n++) {
1316 			if (phdr[n].p_type == PT_DYNAMIC) {
1317 				get_init_fini_array(elf, phdr[n].p_type,
1318 						    phdr[n].p_vaddr,
1319 						    phdr[n].p_memsz,
1320 						    init, init_cnt, fini,
1321 						    fini_cnt);
1322 				return;
1323 			}
1324 		}
1325 	} else {
1326 		Elf64_Phdr *phdr = elf->phdr;
1327 
1328 		for (n = 0; n < elf->e_phnum; n++) {
1329 			if (phdr[n].p_type == PT_DYNAMIC) {
1330 				get_init_fini_array(elf, phdr[n].p_type,
1331 						    phdr[n].p_vaddr,
1332 						    phdr[n].p_memsz,
1333 						    init, init_cnt, fini,
1334 						    fini_cnt);
1335 				return;
1336 			}
1337 		}
1338 	}
1339 }
1340 
1341 static TEE_Result realloc_ifs(vaddr_t va, size_t cnt, bool is_32bit)
1342 {
1343 	struct __init_fini_info32 *info32 = (struct __init_fini_info32 *)va;
1344 	struct __init_fini_info *info = (struct __init_fini_info *)va;
1345 	struct __init_fini32 *ifs32 = NULL;
1346 	struct __init_fini *ifs = NULL;
1347 	size_t prev_cnt = 0;
1348 	void *ptr = NULL;
1349 
1350 	if (is_32bit) {
1351 		ptr = (void *)(vaddr_t)info32->ifs;
1352 		ptr = realloc(ptr, cnt * sizeof(struct __init_fini32));
1353 		if (!ptr)
1354 			return TEE_ERROR_OUT_OF_MEMORY;
1355 		ifs32 = ptr;
1356 		prev_cnt = info32->size;
1357 		if (cnt > prev_cnt)
1358 			memset(ifs32 + prev_cnt, 0,
1359 			       (cnt - prev_cnt) * sizeof(*ifs32));
1360 		info32->ifs = (uint32_t)(vaddr_t)ifs32;
1361 		info32->size = cnt;
1362 	} else {
1363 		ptr = realloc(info->ifs, cnt * sizeof(struct __init_fini));
1364 		if (!ptr)
1365 			return TEE_ERROR_OUT_OF_MEMORY;
1366 		ifs = ptr;
1367 		prev_cnt = info->size;
1368 		if (cnt > prev_cnt)
1369 			memset(ifs + prev_cnt, 0,
1370 			       (cnt - prev_cnt) * sizeof(*ifs));
1371 		info->ifs = ifs;
1372 		info->size = cnt;
1373 	}
1374 
1375 	return TEE_SUCCESS;
1376 }
1377 
1378 static void fill_ifs(vaddr_t va, size_t idx, struct ta_elf *elf, bool is_32bit)
1379 {
1380 	struct __init_fini_info32 *info32 = (struct __init_fini_info32 *)va;
1381 	struct __init_fini_info *info = (struct __init_fini_info *)va;
1382 	struct __init_fini32 *ifs32 = NULL;
1383 	struct __init_fini *ifs = NULL;
1384 	size_t init_cnt = 0;
1385 	size_t fini_cnt = 0;
1386 	vaddr_t init = 0;
1387 	vaddr_t fini = 0;
1388 
1389 	if (is_32bit) {
1390 		assert(idx < info32->size);
1391 		ifs32 = &((struct __init_fini32 *)(vaddr_t)info32->ifs)[idx];
1392 
1393 		if (ifs32->flags & __IFS_VALID)
1394 			return;
1395 
1396 		elf_get_init_fini_array(elf, &init, &init_cnt, &fini,
1397 					&fini_cnt);
1398 
1399 		ifs32->init = (uint32_t)init;
1400 		ifs32->init_size = init_cnt;
1401 
1402 		ifs32->fini = (uint32_t)fini;
1403 		ifs32->fini_size = fini_cnt;
1404 
1405 		ifs32->flags |= __IFS_VALID;
1406 	} else {
1407 		assert(idx < info->size);
1408 		ifs = &info->ifs[idx];
1409 
1410 		if (ifs->flags & __IFS_VALID)
1411 			return;
1412 
1413 		elf_get_init_fini_array(elf, &init, &init_cnt, &fini,
1414 					&fini_cnt);
1415 
1416 		ifs->init = (void (**)(void))init;
1417 		ifs->init_size = init_cnt;
1418 
1419 		ifs->fini = (void (**)(void))fini;
1420 		ifs->fini_size = fini_cnt;
1421 
1422 		ifs->flags |= __IFS_VALID;
1423 	}
1424 }
1425 
1426 /*
1427  * Set or update __init_fini_info in the TA with information from the ELF
1428  * queue
1429  */
1430 TEE_Result ta_elf_set_init_fini_info(bool is_32bit)
1431 {
1432 	struct __init_fini_info *info = NULL;
1433 	TEE_Result res = TEE_SUCCESS;
1434 	struct ta_elf *elf = NULL;
1435 	vaddr_t info_va = 0;
1436 	size_t cnt = 0;
1437 
1438 	res = ta_elf_resolve_sym("__init_fini_info", &info_va, NULL);
1439 	if (res) {
1440 		if (res == TEE_ERROR_ITEM_NOT_FOUND) {
1441 			/* Older TA */
1442 			return TEE_SUCCESS;
1443 		}
1444 		return res;
1445 	}
1446 	assert(info_va);
1447 
1448 	info = (struct __init_fini_info *)info_va;
1449 	if (info->reserved)
1450 		return TEE_ERROR_NOT_SUPPORTED;
1451 
1452 	TAILQ_FOREACH(elf, &main_elf_queue, link)
1453 		cnt++;
1454 
1455 	/* Queue has at least one file (main) */
1456 	assert(cnt);
1457 
1458 	res = realloc_ifs(info_va, cnt, is_32bit);
1459 	if (res)
1460 		goto err;
1461 
1462 	cnt = 0;
1463 	TAILQ_FOREACH(elf, &main_elf_queue, link) {
1464 		fill_ifs(info_va, cnt, elf, is_32bit);
1465 		cnt++;
1466 	}
1467 
1468 	return TEE_SUCCESS;
1469 err:
1470 	free(info);
1471 	return res;
1472 }
1473