xref: /optee_os/ldelf/ta_elf.c (revision f5a70e3efb80be4b9bff2c9c811ddc139058e05a)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2019, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <ctype.h>
8 #include <elf32.h>
9 #include <elf64.h>
10 #include <elf_common.h>
11 #include <ldelf.h>
12 #include <pta_system.h>
13 #include <stdio.h>
14 #include <stdlib.h>
15 #include <string_ext.h>
16 #include <string.h>
17 #include <tee_api_types.h>
18 #include <tee_internal_api_extensions.h>
19 #include <user_ta_header.h>
20 #include <utee_syscalls.h>
21 
22 #include "sys.h"
23 #include "ta_elf.h"
24 #include "unwind.h"
25 
26 static vaddr_t ta_stack;
27 static vaddr_t ta_stack_size;
28 
29 struct ta_elf_queue main_elf_queue = TAILQ_HEAD_INITIALIZER(main_elf_queue);
30 
31 static struct ta_elf *queue_elf_helper(const TEE_UUID *uuid)
32 {
33 	struct ta_elf *elf = calloc(1, sizeof(*elf));
34 
35 	if (!elf)
36 		return NULL;
37 
38 	TAILQ_INIT(&elf->segs);
39 
40 	elf->uuid = *uuid;
41 	TAILQ_INSERT_TAIL(&main_elf_queue, elf, link);
42 	return elf;
43 }
44 
45 static struct ta_elf *queue_elf(const TEE_UUID *uuid)
46 {
47 	struct ta_elf *elf = ta_elf_find_elf(uuid);
48 
49 	if (elf)
50 		return NULL;
51 
52 	elf = queue_elf_helper(uuid);
53 	if (!elf)
54 		err(TEE_ERROR_OUT_OF_MEMORY, "queue_elf_helper");
55 
56 	return elf;
57 }
58 
59 struct ta_elf *ta_elf_find_elf(const TEE_UUID *uuid)
60 {
61 	struct ta_elf *elf = NULL;
62 
63 	TAILQ_FOREACH(elf, &main_elf_queue, link)
64 		if (!memcmp(uuid, &elf->uuid, sizeof(*uuid)))
65 			return elf;
66 
67 	return NULL;
68 }
69 
70 static TEE_Result e32_parse_ehdr(struct ta_elf *elf, Elf32_Ehdr *ehdr)
71 {
72 	if (ehdr->e_ident[EI_VERSION] != EV_CURRENT ||
73 	    ehdr->e_ident[EI_CLASS] != ELFCLASS32 ||
74 	    ehdr->e_ident[EI_DATA] != ELFDATA2LSB ||
75 	    ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE ||
76 	    ehdr->e_type != ET_DYN || ehdr->e_machine != EM_ARM ||
77 	    (ehdr->e_flags & EF_ARM_ABIMASK) != EF_ARM_ABI_VERSION ||
78 #ifndef CFG_WITH_VFP
79 	    (ehdr->e_flags & EF_ARM_ABI_FLOAT_HARD) ||
80 #endif
81 	    ehdr->e_phentsize != sizeof(Elf32_Phdr) ||
82 	    ehdr->e_shentsize != sizeof(Elf32_Shdr))
83 		return TEE_ERROR_BAD_FORMAT;
84 
85 	elf->is_32bit = true;
86 	elf->e_entry = ehdr->e_entry;
87 	elf->e_phoff = ehdr->e_phoff;
88 	elf->e_shoff = ehdr->e_shoff;
89 	elf->e_phnum = ehdr->e_phnum;
90 	elf->e_shnum = ehdr->e_shnum;
91 	elf->e_phentsize = ehdr->e_phentsize;
92 	elf->e_shentsize = ehdr->e_shentsize;
93 
94 	return TEE_SUCCESS;
95 }
96 
97 #ifdef ARM64
98 static TEE_Result e64_parse_ehdr(struct ta_elf *elf, Elf64_Ehdr *ehdr)
99 {
100 	if (ehdr->e_ident[EI_VERSION] != EV_CURRENT ||
101 	    ehdr->e_ident[EI_CLASS] != ELFCLASS64 ||
102 	    ehdr->e_ident[EI_DATA] != ELFDATA2LSB ||
103 	    ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE ||
104 	    ehdr->e_type != ET_DYN || ehdr->e_machine != EM_AARCH64 ||
105 	    ehdr->e_flags || ehdr->e_phentsize != sizeof(Elf64_Phdr) ||
106 	    ehdr->e_shentsize != sizeof(Elf64_Shdr))
107 		return TEE_ERROR_BAD_FORMAT;
108 
109 
110 	elf->is_32bit = false;
111 	elf->e_entry = ehdr->e_entry;
112 	elf->e_phoff = ehdr->e_phoff;
113 	elf->e_shoff = ehdr->e_shoff;
114 	elf->e_phnum = ehdr->e_phnum;
115 	elf->e_shnum = ehdr->e_shnum;
116 	elf->e_phentsize = ehdr->e_phentsize;
117 	elf->e_shentsize = ehdr->e_shentsize;
118 
119 	return TEE_SUCCESS;
120 }
121 #else /*ARM64*/
122 static TEE_Result e64_parse_ehdr(struct ta_elf *elf __unused,
123 				 Elf64_Ehdr *ehdr __unused)
124 {
125 	return TEE_ERROR_NOT_SUPPORTED;
126 }
127 #endif /*ARM64*/
128 
129 static void read_dyn(struct ta_elf *elf, vaddr_t addr,
130 		     size_t idx, unsigned int *tag, size_t *val)
131 {
132 	if (elf->is_32bit) {
133 		Elf32_Dyn *dyn = (Elf32_Dyn *)(addr + elf->load_addr);
134 
135 		*tag = dyn[idx].d_tag;
136 		*val = dyn[idx].d_un.d_val;
137 	} else {
138 		Elf64_Dyn *dyn = (Elf64_Dyn *)(addr + elf->load_addr);
139 
140 		*tag = dyn[idx].d_tag;
141 		*val = dyn[idx].d_un.d_val;
142 	}
143 }
144 
145 static void save_hashtab_from_segment(struct ta_elf *elf, unsigned int type,
146 				      vaddr_t addr, size_t memsz)
147 {
148 	size_t dyn_entsize = 0;
149 	size_t num_dyns = 0;
150 	size_t n = 0;
151 	unsigned int tag = 0;
152 	size_t val = 0;
153 
154 	if (type != PT_DYNAMIC)
155 		return;
156 
157 	if (elf->is_32bit)
158 		dyn_entsize = sizeof(Elf32_Dyn);
159 	else
160 		dyn_entsize = sizeof(Elf64_Dyn);
161 
162 	assert(!(memsz % dyn_entsize));
163 	num_dyns = memsz / dyn_entsize;
164 
165 	for (n = 0; n < num_dyns; n++) {
166 		read_dyn(elf, addr, n, &tag, &val);
167 		if (tag == DT_HASH) {
168 			elf->hashtab = (void *)(val + elf->load_addr);
169 			break;
170 		}
171 	}
172 }
173 
174 static void save_hashtab(struct ta_elf *elf)
175 {
176 	size_t n = 0;
177 
178 	if (elf->is_32bit) {
179 		Elf32_Phdr *phdr = elf->phdr;
180 
181 		for (n = 0; n < elf->e_phnum; n++)
182 			save_hashtab_from_segment(elf, phdr[n].p_type,
183 						  phdr[n].p_vaddr,
184 						  phdr[n].p_memsz);
185 	} else {
186 		Elf64_Phdr *phdr = elf->phdr;
187 
188 		for (n = 0; n < elf->e_phnum; n++)
189 			save_hashtab_from_segment(elf, phdr[n].p_type,
190 						  phdr[n].p_vaddr,
191 						  phdr[n].p_memsz);
192 	}
193 	assert(elf->hashtab);
194 }
195 
196 static void e32_save_symtab(struct ta_elf *elf, size_t tab_idx)
197 {
198 	Elf32_Shdr *shdr = elf->shdr;
199 	size_t str_idx = shdr[tab_idx].sh_link;
200 
201 	elf->dynsymtab = (void *)(shdr[tab_idx].sh_addr + elf->load_addr);
202 	assert(!(shdr[tab_idx].sh_size % sizeof(Elf32_Sym)));
203 	elf->num_dynsyms = shdr[tab_idx].sh_size / sizeof(Elf32_Sym);
204 
205 	elf->dynstr = (void *)(shdr[str_idx].sh_addr + elf->load_addr);
206 	elf->dynstr_size = shdr[str_idx].sh_size;
207 }
208 
209 static void e64_save_symtab(struct ta_elf *elf, size_t tab_idx)
210 {
211 	Elf64_Shdr *shdr = elf->shdr;
212 	size_t str_idx = shdr[tab_idx].sh_link;
213 
214 	elf->dynsymtab = (void *)(vaddr_t)(shdr[tab_idx].sh_addr +
215 					   elf->load_addr);
216 	assert(!(shdr[tab_idx].sh_size % sizeof(Elf64_Sym)));
217 	elf->num_dynsyms = shdr[tab_idx].sh_size / sizeof(Elf64_Sym);
218 
219 	elf->dynstr = (void *)(vaddr_t)(shdr[str_idx].sh_addr + elf->load_addr);
220 	elf->dynstr_size = shdr[str_idx].sh_size;
221 }
222 
223 static void save_symtab(struct ta_elf *elf)
224 {
225 	size_t n = 0;
226 
227 	if (elf->is_32bit) {
228 		Elf32_Shdr *shdr = elf->shdr;
229 
230 		for (n = 0; n < elf->e_shnum; n++) {
231 			if (shdr[n].sh_type == SHT_DYNSYM) {
232 				e32_save_symtab(elf, n);
233 				break;
234 			}
235 		}
236 	} else {
237 		Elf64_Shdr *shdr = elf->shdr;
238 
239 		for (n = 0; n < elf->e_shnum; n++) {
240 			if (shdr[n].sh_type == SHT_DYNSYM) {
241 				e64_save_symtab(elf, n);
242 				break;
243 			}
244 		}
245 
246 	}
247 
248 	save_hashtab(elf);
249 }
250 
251 static void init_elf(struct ta_elf *elf)
252 {
253 	TEE_Result res = TEE_SUCCESS;
254 	vaddr_t va = 0;
255 	uint32_t flags = PTA_SYSTEM_MAP_FLAG_SHAREABLE;
256 
257 	res = sys_open_ta_bin(&elf->uuid, &elf->handle);
258 	if (res)
259 		err(res, "sys_open_ta_bin(%pUl)", (void *)&elf->uuid);
260 
261 	/*
262 	 * Map it read-only executable when we're loading a library where
263 	 * the ELF header is included in a load segment.
264 	 */
265 	if (!elf->is_main)
266 		flags |= PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
267 	res = sys_map_ta_bin(&va, SMALL_PAGE_SIZE, flags, elf->handle, 0, 0, 0);
268 	if (res)
269 		err(res, "sys_map_ta_bin");
270 	elf->ehdr_addr = va;
271 	if (!elf->is_main) {
272 		elf->load_addr = va;
273 		elf->max_addr = va + SMALL_PAGE_SIZE;
274 		elf->max_offs = SMALL_PAGE_SIZE;
275 	}
276 
277 	if (!IS_ELF(*(Elf32_Ehdr *)va))
278 		err(TEE_ERROR_BAD_FORMAT, "TA is not an ELF");
279 
280 	res = e32_parse_ehdr(elf, (void *)va);
281 	if (res == TEE_ERROR_BAD_FORMAT)
282 		res = e64_parse_ehdr(elf, (void *)va);
283 	if (res)
284 		err(res, "Cannot parse ELF");
285 
286 	if (elf->e_phoff + elf->e_phnum * elf->e_phentsize > SMALL_PAGE_SIZE)
287 		err(TEE_ERROR_NOT_SUPPORTED, "Cannot read program headers");
288 
289 	elf->phdr = (void *)(va + elf->e_phoff);
290 }
291 
292 static size_t roundup(size_t v)
293 {
294 	return ROUNDUP(v, SMALL_PAGE_SIZE);
295 }
296 
297 static size_t rounddown(size_t v)
298 {
299 	return ROUNDDOWN(v, SMALL_PAGE_SIZE);
300 }
301 
302 static void add_segment(struct ta_elf *elf, size_t offset, size_t vaddr,
303 			size_t filesz, size_t memsz, size_t flags, size_t align)
304 {
305 	struct segment *seg = calloc(1, sizeof(*seg));
306 
307 	if (!seg)
308 		err(TEE_ERROR_OUT_OF_MEMORY, "calloc");
309 
310 	seg->offset = offset;
311 	seg->vaddr = vaddr;
312 	seg->filesz = filesz;
313 	seg->memsz = memsz;
314 	seg->flags = flags;
315 	seg->align = align;
316 
317 	TAILQ_INSERT_TAIL(&elf->segs, seg, link);
318 }
319 
320 static void parse_load_segments(struct ta_elf *elf)
321 {
322 	size_t n = 0;
323 
324 	if (elf->is_32bit) {
325 		Elf32_Phdr *phdr = elf->phdr;
326 
327 		for (n = 0; n < elf->e_phnum; n++)
328 			if (phdr[n].p_type == PT_LOAD) {
329 				add_segment(elf, phdr[n].p_offset,
330 					    phdr[n].p_vaddr, phdr[n].p_filesz,
331 					    phdr[n].p_memsz, phdr[n].p_flags,
332 					    phdr[n].p_align);
333 			} else if (phdr[n].p_type == PT_ARM_EXIDX) {
334 				elf->exidx_start = phdr[n].p_vaddr;
335 				elf->exidx_size = phdr[n].p_filesz;
336 			}
337 	} else {
338 		Elf64_Phdr *phdr = elf->phdr;
339 
340 		for (n = 0; n < elf->e_phnum; n++)
341 			if (phdr[n].p_type == PT_LOAD)
342 				add_segment(elf, phdr[n].p_offset,
343 					    phdr[n].p_vaddr, phdr[n].p_filesz,
344 					    phdr[n].p_memsz, phdr[n].p_flags,
345 					    phdr[n].p_align);
346 	}
347 }
348 
349 static void copy_remapped_to(struct ta_elf *elf, const struct segment *seg)
350 {
351 	uint8_t *dst = (void *)(seg->vaddr + elf->load_addr);
352 	size_t n = 0;
353 	size_t offs = seg->offset;
354 	size_t num_bytes = seg->filesz;
355 
356 	if (offs < elf->max_offs) {
357 		n = MIN(elf->max_offs - offs, num_bytes);
358 		memcpy(dst, (void *)(elf->max_addr + offs - elf->max_offs), n);
359 		dst += n;
360 		offs += n;
361 		num_bytes -= n;
362 	}
363 
364 	if (num_bytes) {
365 		TEE_Result res = sys_copy_from_ta_bin(dst, num_bytes,
366 						      elf->handle, offs);
367 
368 		if (res)
369 			err(res, "sys_copy_from_ta_bin");
370 		elf->max_offs += offs;
371 	}
372 }
373 
374 static void adjust_segments(struct ta_elf *elf)
375 {
376 	struct segment *seg = NULL;
377 	struct segment *prev_seg = NULL;
378 	size_t prev_end_addr = 0;
379 	size_t align = 0;
380 	size_t mask = 0;
381 
382 	/* Sanity check */
383 	TAILQ_FOREACH(seg, &elf->segs, link) {
384 		size_t dummy __maybe_unused = 0;
385 
386 		assert(seg->align >= SMALL_PAGE_SIZE);
387 		assert(!ADD_OVERFLOW(seg->vaddr, seg->memsz, &dummy));
388 		assert(seg->filesz <= seg->memsz);
389 		assert((seg->offset & SMALL_PAGE_MASK) ==
390 		       (seg->vaddr & SMALL_PAGE_MASK));
391 
392 		prev_seg = TAILQ_PREV(seg, segment_head, link);
393 		if (prev_seg) {
394 			assert(seg->vaddr >= prev_seg->vaddr + prev_seg->memsz);
395 			assert(seg->offset >=
396 			       prev_seg->offset + prev_seg->filesz);
397 		}
398 		if (!align)
399 			align = seg->align;
400 		assert(align == seg->align);
401 	}
402 
403 	mask = align - 1;
404 
405 	seg = TAILQ_FIRST(&elf->segs);
406 	if (seg)
407 		seg = TAILQ_NEXT(seg, link);
408 	while (seg) {
409 		prev_seg = TAILQ_PREV(seg, segment_head, link);
410 		prev_end_addr = prev_seg->vaddr + prev_seg->memsz;
411 
412 		/*
413 		 * This segment may overlap with the last "page" in the
414 		 * previous segment in two different ways:
415 		 * 1. Virtual address (and offset) overlaps =>
416 		 *    Permissions needs to be merged. The offset must have
417 		 *    the SMALL_PAGE_MASK bits set as vaddr and offset must
418 		 *    add up with prevsion segment.
419 		 *
420 		 * 2. Only offset overlaps =>
421 		 *    The same page in the ELF is mapped at two different
422 		 *    virtual addresses. As a limitation this segment must
423 		 *    be mapped as writeable.
424 		 */
425 
426 		/* Case 1. */
427 		if (rounddown(seg->vaddr) < prev_end_addr) {
428 			assert((seg->vaddr & mask) == (seg->offset & mask));
429 			assert(prev_seg->memsz == prev_seg->filesz);
430 
431 			/*
432 			 * Merge the segments and their permissions.
433 			 * Note that the may be a small hole between the
434 			 * two sections.
435 			 */
436 			prev_seg->filesz = seg->vaddr + seg->filesz -
437 					   prev_seg->vaddr;
438 			prev_seg->memsz = seg->vaddr + seg->memsz -
439 					   prev_seg->vaddr;
440 			prev_seg->flags |= seg->flags;
441 
442 			TAILQ_REMOVE(&elf->segs, seg, link);
443 			free(seg);
444 			seg = TAILQ_NEXT(prev_seg, link);
445 			continue;
446 		}
447 
448 		/* Case 2. */
449 		if ((seg->offset & mask) &&
450 		    rounddown(seg->offset) <
451 		    (prev_seg->offset + prev_seg->filesz)) {
452 
453 			assert(seg->flags & PF_W);
454 			seg->remapped_writeable = true;
455 		}
456 
457 		/*
458 		 * No overlap, but we may need to align address, offset and
459 		 * size.
460 		 */
461 		seg->filesz += seg->vaddr - rounddown(seg->vaddr);
462 		seg->memsz += seg->vaddr - rounddown(seg->vaddr);
463 		seg->vaddr = rounddown(seg->vaddr);
464 		seg->offset = rounddown(seg->offset);
465 		seg = TAILQ_NEXT(seg, link);
466 	}
467 
468 }
469 
470 static void populate_segments_legacy(struct ta_elf *elf)
471 {
472 	TEE_Result res = TEE_SUCCESS;
473 	struct segment *seg = NULL;
474 	vaddr_t va = 0;
475 
476 	assert(elf->is_legacy);
477 	TAILQ_FOREACH(seg, &elf->segs, link) {
478 		struct segment *last_seg = TAILQ_LAST(&elf->segs, segment_head);
479 		size_t pad_end = roundup(last_seg->vaddr + last_seg->memsz -
480 					 seg->vaddr - seg->memsz);
481 		size_t num_bytes = roundup(seg->memsz);
482 
483 		if (!elf->load_addr)
484 			va = 0;
485 		else
486 			va = seg->vaddr + elf->load_addr;
487 
488 
489 		if (!(seg->flags & PF_R))
490 			err(TEE_ERROR_NOT_SUPPORTED,
491 			    "Segment must be readable");
492 
493 		res = sys_map_zi(num_bytes, 0, &va, 0, pad_end);
494 		if (res)
495 			err(res, "sys_map_zi");
496 		res = sys_copy_from_ta_bin((void *)va, seg->filesz,
497 					   elf->handle, seg->offset);
498 		if (res)
499 			err(res, "sys_copy_from_ta_bin");
500 
501 		if (!elf->load_addr)
502 			elf->load_addr = va;
503 		elf->max_addr = va + num_bytes;
504 		elf->max_offs = seg->offset + seg->filesz;
505 	}
506 }
507 
508 static size_t get_pad_begin(void)
509 {
510 #ifdef CFG_TA_ASLR
511 	size_t min = CFG_TA_ASLR_MIN_OFFSET_PAGES;
512 	size_t max = CFG_TA_ASLR_MAX_OFFSET_PAGES;
513 	TEE_Result res = TEE_SUCCESS;
514 	uint32_t rnd32 = 0;
515 	size_t rnd = 0;
516 
517 	COMPILE_TIME_ASSERT(CFG_TA_ASLR_MIN_OFFSET_PAGES <
518 			    CFG_TA_ASLR_MAX_OFFSET_PAGES);
519 	if (max > min) {
520 		res = utee_cryp_random_number_generate(&rnd32, sizeof(rnd32));
521 		if (res) {
522 			DMSG("Random read failed: %#"PRIx32, res);
523 			return min * SMALL_PAGE_SIZE;
524 		}
525 		rnd = rnd32 % (max - min);
526 	}
527 
528 	return (min + rnd) * SMALL_PAGE_SIZE;
529 #else /*!CFG_TA_ASLR*/
530 	return 0;
531 #endif /*!CFG_TA_ASLR*/
532 }
533 
534 static void populate_segments(struct ta_elf *elf)
535 {
536 	TEE_Result res = TEE_SUCCESS;
537 	struct segment *seg = NULL;
538 	vaddr_t va = 0;
539 	size_t pad_begin = 0;
540 
541 	assert(!elf->is_legacy);
542 	TAILQ_FOREACH(seg, &elf->segs, link) {
543 		struct segment *last_seg = TAILQ_LAST(&elf->segs, segment_head);
544 		size_t pad_end = roundup(last_seg->vaddr + last_seg->memsz -
545 					 seg->vaddr - seg->memsz);
546 
547 		if (seg->remapped_writeable) {
548 			size_t num_bytes = roundup(seg->vaddr + seg->memsz) -
549 					   rounddown(seg->vaddr);
550 
551 			assert(elf->load_addr);
552 			va = rounddown(elf->load_addr + seg->vaddr);
553 			assert(va >= elf->max_addr);
554 			res = sys_map_zi(num_bytes, 0, &va, 0, pad_end);
555 			if (res)
556 				err(res, "sys_map_zi");
557 
558 			copy_remapped_to(elf, seg);
559 			elf->max_addr = va + num_bytes;
560 		} else {
561 			uint32_t flags =  0;
562 			size_t filesz = seg->filesz;
563 			size_t memsz = seg->memsz;
564 			size_t offset = seg->offset;
565 			size_t vaddr = seg->vaddr;
566 
567 			if (offset < elf->max_offs) {
568 				/*
569 				 * We're in a load segment which overlaps
570 				 * with (or is covered by) the first page
571 				 * of a shared library.
572 				 */
573 				if (vaddr + filesz < SMALL_PAGE_SIZE) {
574 					size_t num_bytes = 0;
575 
576 					/*
577 					 * If this segment is completely
578 					 * covered, take next.
579 					 */
580 					if (vaddr + memsz <= SMALL_PAGE_SIZE)
581 						continue;
582 
583 					/*
584 					 * All data of the segment is
585 					 * loaded, but we need to zero
586 					 * extend it.
587 					 */
588 					va = elf->max_addr;
589 					num_bytes = roundup(vaddr + memsz) -
590 						    roundup(vaddr) -
591 						    SMALL_PAGE_SIZE;
592 					assert(num_bytes);
593 					res = sys_map_zi(num_bytes, 0, &va, 0,
594 							 0);
595 					if (res)
596 						err(res, "sys_map_zi");
597 					elf->max_addr = roundup(va + num_bytes);
598 					continue;
599 				}
600 
601 				/* Partial overlap, remove the first page. */
602 				vaddr += SMALL_PAGE_SIZE;
603 				filesz -= SMALL_PAGE_SIZE;
604 				memsz -= SMALL_PAGE_SIZE;
605 				offset += SMALL_PAGE_SIZE;
606 			}
607 
608 			if (!elf->load_addr) {
609 				va = 0;
610 				pad_begin = get_pad_begin();
611 				/*
612 				 * If mapping with pad_begin fails we'll
613 				 * retry without pad_begin, effectively
614 				 * disabling ASLR for the current ELF file.
615 				 */
616 			} else {
617 				va = vaddr + elf->load_addr;
618 				pad_begin = 0;
619 			}
620 
621 			if (seg->flags & PF_W)
622 				flags |= PTA_SYSTEM_MAP_FLAG_WRITEABLE;
623 			else
624 				flags |= PTA_SYSTEM_MAP_FLAG_SHAREABLE;
625 			if (seg->flags & PF_X)
626 				flags |= PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
627 			if (!(seg->flags & PF_R))
628 				err(TEE_ERROR_NOT_SUPPORTED,
629 				    "Segment must be readable");
630 			if (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE) {
631 				res = sys_map_zi(memsz, 0, &va, pad_begin,
632 						 pad_end);
633 				if (pad_begin && res == TEE_ERROR_OUT_OF_MEMORY)
634 					res = sys_map_zi(memsz, 0, &va, 0,
635 							 pad_end);
636 				if (res)
637 					err(res, "sys_map_zi");
638 				res = sys_copy_from_ta_bin((void *)va, filesz,
639 							   elf->handle, offset);
640 				if (res)
641 					err(res, "sys_copy_from_ta_bin");
642 			} else {
643 				res = sys_map_ta_bin(&va, filesz, flags,
644 						     elf->handle, offset,
645 						     pad_begin, pad_end);
646 				if (pad_begin && res == TEE_ERROR_OUT_OF_MEMORY)
647 					res = sys_map_ta_bin(&va, filesz, flags,
648 							     elf->handle,
649 							     offset, 0,
650 							     pad_end);
651 				if (res)
652 					err(res, "sys_map_ta_bin");
653 			}
654 
655 			if (!elf->load_addr)
656 				elf->load_addr = va;
657 			elf->max_addr = roundup(va + filesz);
658 			elf->max_offs += filesz;
659 		}
660 	}
661 }
662 
663 static void map_segments(struct ta_elf *elf)
664 {
665 	TEE_Result res = TEE_SUCCESS;
666 
667 	parse_load_segments(elf);
668 	adjust_segments(elf);
669 	if (TAILQ_FIRST(&elf->segs)->offset < SMALL_PAGE_SIZE) {
670 		vaddr_t va = 0;
671 		size_t sz = elf->max_addr - elf->load_addr;
672 		struct segment *seg = TAILQ_LAST(&elf->segs, segment_head);
673 		size_t pad_begin = get_pad_begin();
674 
675 		/*
676 		 * We're loading a library, if not other parts of the code
677 		 * need to be updated too.
678 		 */
679 		assert(!elf->is_main);
680 
681 		/*
682 		 * Now that we know how much virtual memory is needed move
683 		 * the already mapped part to a location which can
684 		 * accommodate us.
685 		 */
686 		res = sys_remap(elf->load_addr, &va, sz, pad_begin,
687 				roundup(seg->vaddr + seg->memsz));
688 		if (res == TEE_ERROR_OUT_OF_MEMORY)
689 			res = sys_remap(elf->load_addr, &va, sz, 0,
690 					roundup(seg->vaddr + seg->memsz));
691 		if (res)
692 			err(res, "sys_remap");
693 		elf->ehdr_addr = va;
694 		elf->load_addr = va;
695 		elf->max_addr = va + sz;
696 		elf->phdr = (void *)(va + elf->e_phoff);
697 	}
698 }
699 
700 static void add_deps_from_segment(struct ta_elf *elf, unsigned int type,
701 				  vaddr_t addr, size_t memsz)
702 {
703 	size_t dyn_entsize = 0;
704 	size_t num_dyns = 0;
705 	size_t n = 0;
706 	unsigned int tag = 0;
707 	size_t val = 0;
708 	TEE_UUID uuid = { };
709 	char *str_tab = NULL;
710 
711 	if (type != PT_DYNAMIC)
712 		return;
713 
714 	if (elf->is_32bit)
715 		dyn_entsize = sizeof(Elf32_Dyn);
716 	else
717 		dyn_entsize = sizeof(Elf64_Dyn);
718 
719 	assert(!(memsz % dyn_entsize));
720 	num_dyns = memsz / dyn_entsize;
721 
722 	for (n = 0; n < num_dyns; n++) {
723 		read_dyn(elf, addr, n, &tag, &val);
724 		if (tag == DT_STRTAB) {
725 			str_tab = (char *)(val + elf->load_addr);
726 			break;
727 		}
728 	}
729 
730 	for (n = 0; n < num_dyns; n++) {
731 		read_dyn(elf, addr, n, &tag, &val);
732 		if (tag != DT_NEEDED)
733 			continue;
734 		tee_uuid_from_str(&uuid, str_tab + val);
735 		queue_elf(&uuid);
736 	}
737 }
738 
739 static void add_dependencies(struct ta_elf *elf)
740 {
741 	size_t n = 0;
742 
743 	if (elf->is_32bit) {
744 		Elf32_Phdr *phdr = elf->phdr;
745 
746 		for (n = 0; n < elf->e_phnum; n++)
747 			add_deps_from_segment(elf, phdr[n].p_type,
748 					      phdr[n].p_vaddr, phdr[n].p_memsz);
749 	} else {
750 		Elf64_Phdr *phdr = elf->phdr;
751 
752 		for (n = 0; n < elf->e_phnum; n++)
753 			add_deps_from_segment(elf, phdr[n].p_type,
754 					      phdr[n].p_vaddr, phdr[n].p_memsz);
755 	}
756 }
757 
758 static void copy_section_headers(struct ta_elf *elf)
759 {
760 	TEE_Result res = TEE_SUCCESS;
761 	size_t sz = elf->e_shnum * elf->e_shentsize;
762 	size_t offs = 0;
763 
764 	elf->shdr = malloc(sz);
765 	if (!elf->shdr)
766 		err(TEE_ERROR_OUT_OF_MEMORY, "malloc");
767 
768 	/*
769 	 * We're assuming that section headers comes after the load segments,
770 	 * but if it's a very small dynamically linked library the section
771 	 * headers can still end up (partially?) in the first mapped page.
772 	 */
773 	if (elf->e_shoff < SMALL_PAGE_SIZE) {
774 		assert(!elf->is_main);
775 		offs = MIN(SMALL_PAGE_SIZE - elf->e_shoff, sz);
776 		memcpy(elf->shdr, (void *)(elf->load_addr + elf->e_shoff),
777 		       offs);
778 	}
779 
780 	if (offs < sz) {
781 		res = sys_copy_from_ta_bin((uint8_t *)elf->shdr + offs,
782 					   sz - offs, elf->handle,
783 					   elf->e_shoff + offs);
784 		if (res)
785 			err(res, "sys_copy_from_ta_bin");
786 	}
787 }
788 
789 static void close_handle(struct ta_elf *elf)
790 {
791 	TEE_Result res = sys_close_ta_bin(elf->handle);
792 
793 	if (res)
794 		err(res, "sys_close_ta_bin");
795 	elf->handle = -1;
796 }
797 
798 static void clean_elf_load_main(struct ta_elf *elf)
799 {
800 	TEE_Result res = TEE_SUCCESS;
801 
802 	/*
803 	 * Clean up from last attempt to load
804 	 */
805 	res = sys_unmap(elf->ehdr_addr, SMALL_PAGE_SIZE);
806 	if (res)
807 		err(res, "sys_unmap");
808 
809 	while (!TAILQ_EMPTY(&elf->segs)) {
810 		struct segment *seg = TAILQ_FIRST(&elf->segs);
811 		vaddr_t va = 0;
812 		size_t num_bytes = 0;
813 
814 		va = rounddown(elf->load_addr + seg->vaddr);
815 		if (seg->remapped_writeable)
816 			num_bytes = roundup(seg->vaddr + seg->memsz) -
817 				    rounddown(seg->vaddr);
818 		else
819 			num_bytes = seg->memsz;
820 
821 		res = sys_unmap(va, num_bytes);
822 		if (res)
823 			err(res, "sys_unmap");
824 
825 		TAILQ_REMOVE(&elf->segs, seg, link);
826 		free(seg);
827 	}
828 
829 	free(elf->shdr);
830 	memset(&elf->is_32bit, 0,
831 	       (vaddr_t)&elf->uuid - (vaddr_t)&elf->is_32bit);
832 
833 	TAILQ_INIT(&elf->segs);
834 }
835 
836 static void load_main(struct ta_elf *elf)
837 {
838 	init_elf(elf);
839 	map_segments(elf);
840 	populate_segments(elf);
841 	add_dependencies(elf);
842 	copy_section_headers(elf);
843 	save_symtab(elf);
844 	close_handle(elf);
845 
846 	elf->head = (struct ta_head *)elf->load_addr;
847 	if (elf->head->depr_entry != UINT64_MAX) {
848 		/*
849 		 * Legacy TAs sets their entry point in ta_head. For
850 		 * non-legacy TAs the entry point of the ELF is set instead
851 		 * and leaving the ta_head entry point set to UINT64_MAX to
852 		 * indicate that it's not used.
853 		 *
854 		 * NB, everything before the commit a73b5878c89d ("Replace
855 		 * ta_head.entry with elf entry") is considered legacy TAs
856 		 * for ldelf.
857 		 *
858 		 * Legacy TAs cannot be mapped with shared memory segments
859 		 * so restart the mapping if it turned out we're loading a
860 		 * legacy TA.
861 		 */
862 
863 		DMSG("Reloading TA %pUl as legacy TA", (void *)&elf->uuid);
864 		clean_elf_load_main(elf);
865 		elf->is_legacy = true;
866 		init_elf(elf);
867 		map_segments(elf);
868 		populate_segments_legacy(elf);
869 		add_dependencies(elf);
870 		copy_section_headers(elf);
871 		save_symtab(elf);
872 		close_handle(elf);
873 		elf->head = (struct ta_head *)elf->load_addr;
874 		/*
875 		 * Check that the TA is still a legacy TA, if it isn't give
876 		 * up now since we're likely under attack.
877 		 */
878 		if (elf->head->depr_entry == UINT64_MAX)
879 			err(TEE_ERROR_GENERIC,
880 			    "TA %pUl was changed on disk to non-legacy",
881 			    (void *)&elf->uuid);
882 	}
883 
884 }
885 
886 void ta_elf_load_main(const TEE_UUID *uuid, uint32_t *is_32bit, uint64_t *sp,
887 		      uint32_t *ta_flags)
888 {
889 	struct ta_elf *elf = queue_elf(uuid);
890 	vaddr_t va = 0;
891 	TEE_Result res = TEE_SUCCESS;
892 
893 	assert(elf);
894 	elf->is_main = true;
895 
896 	load_main(elf);
897 
898 	*is_32bit = elf->is_32bit;
899 	res = sys_map_zi(elf->head->stack_size, 0, &va, 0, 0);
900 	if (res)
901 		err(res, "sys_map_zi stack");
902 
903 	if (elf->head->flags & ~TA_FLAGS_MASK)
904 		err(TEE_ERROR_BAD_FORMAT, "Invalid TA flags(s) %#"PRIx32,
905 		    elf->head->flags & ~TA_FLAGS_MASK);
906 
907 	*ta_flags = elf->head->flags;
908 	*sp = va + elf->head->stack_size;
909 	ta_stack = va;
910 	ta_stack_size = elf->head->stack_size;
911 }
912 
913 void ta_elf_finalize_load_main(uint64_t *entry)
914 {
915 	struct ta_elf *elf = TAILQ_FIRST(&main_elf_queue);
916 	TEE_Result res = TEE_SUCCESS;
917 
918 	assert(elf->is_main);
919 
920 	res = ta_elf_set_init_fini_info(elf->is_32bit);
921 	if (res)
922 		err(res, "ta_elf_set_init_fini_info");
923 
924 	if (elf->is_legacy)
925 		*entry = elf->head->depr_entry;
926 	else
927 		*entry = elf->e_entry + elf->load_addr;
928 }
929 
930 
931 void ta_elf_load_dependency(struct ta_elf *elf, bool is_32bit)
932 {
933 	if (elf->is_main)
934 		return;
935 
936 	init_elf(elf);
937 	if (elf->is_32bit != is_32bit)
938 		err(TEE_ERROR_BAD_FORMAT, "ELF %pUl is %sbit (expected %sbit)",
939 		    (void *)&elf->uuid, elf->is_32bit ? "32" : "64",
940 		    is_32bit ? "32" : "64");
941 
942 	map_segments(elf);
943 	populate_segments(elf);
944 	add_dependencies(elf);
945 	copy_section_headers(elf);
946 	save_symtab(elf);
947 	close_handle(elf);
948 }
949 
950 void ta_elf_finalize_mappings(struct ta_elf *elf)
951 {
952 	TEE_Result res = TEE_SUCCESS;
953 	struct segment *seg = NULL;
954 
955 	if (!elf->is_legacy)
956 		return;
957 
958 	TAILQ_FOREACH(seg, &elf->segs, link) {
959 		vaddr_t va = elf->load_addr + seg->vaddr;
960 		uint32_t flags =  0;
961 
962 		if (seg->flags & PF_W)
963 			flags |= PTA_SYSTEM_MAP_FLAG_WRITEABLE;
964 		if (seg->flags & PF_X)
965 			flags |= PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
966 
967 		res = sys_set_prot(va, seg->memsz, flags);
968 		if (res)
969 			err(res, "sys_set_prot");
970 	}
971 }
972 
973 static void __printf(3, 4) print_wrapper(void *pctx, print_func_t print_func,
974 					 const char *fmt, ...)
975 {
976 	va_list ap;
977 
978 	va_start(ap, fmt);
979 	print_func(pctx, fmt, ap);
980 	va_end(ap);
981 }
982 
983 static void print_seg(void *pctx, print_func_t print_func,
984 		      size_t idx __maybe_unused, int elf_idx __maybe_unused,
985 		      vaddr_t va __maybe_unused, paddr_t pa __maybe_unused,
986 		      size_t sz __maybe_unused, uint32_t flags)
987 {
988 	int width __maybe_unused = 8;
989 	char desc[14] __maybe_unused = "";
990 	char flags_str[] __maybe_unused = "----";
991 
992 	if (elf_idx > -1) {
993 		snprintf(desc, sizeof(desc), " [%d]", elf_idx);
994 	} else {
995 		if (flags & DUMP_MAP_EPHEM)
996 			snprintf(desc, sizeof(desc), " (param)");
997 		if (flags & DUMP_MAP_LDELF)
998 			snprintf(desc, sizeof(desc), " (ldelf)");
999 		if (va == ta_stack)
1000 			snprintf(desc, sizeof(desc), " (stack)");
1001 	}
1002 
1003 	if (flags & DUMP_MAP_READ)
1004 		flags_str[0] = 'r';
1005 	if (flags & DUMP_MAP_WRITE)
1006 		flags_str[1] = 'w';
1007 	if (flags & DUMP_MAP_EXEC)
1008 		flags_str[2] = 'x';
1009 	if (flags & DUMP_MAP_SECURE)
1010 		flags_str[3] = 's';
1011 
1012 	print_wrapper(pctx, print_func,
1013 		      "region %2zu: va 0x%0*"PRIxVA" pa 0x%0*"PRIxPA" size 0x%06zx flags %s%s\n",
1014 		      idx, width, va, width, pa, sz, flags_str, desc);
1015 }
1016 
1017 static bool get_next_in_order(struct ta_elf_queue *elf_queue,
1018 			      struct ta_elf **elf, struct segment **seg,
1019 			      size_t *elf_idx)
1020 {
1021 	struct ta_elf *e = NULL;
1022 	struct segment *s = NULL;
1023 	size_t idx = 0;
1024 	vaddr_t va = 0;
1025 	struct ta_elf *e2 = NULL;
1026 	size_t i2 = 0;
1027 
1028 	assert(elf && seg && elf_idx);
1029 	e = *elf;
1030 	s = *seg;
1031 	assert((e == NULL && s == NULL) || (e != NULL && s != NULL));
1032 
1033 	if (s) {
1034 		s = TAILQ_NEXT(s, link);
1035 		if (s) {
1036 			*seg = s;
1037 			return true;
1038 		}
1039 	}
1040 
1041 	if (e)
1042 		va = e->load_addr;
1043 
1044 	/* Find the ELF with next load address */
1045 	e = NULL;
1046 	TAILQ_FOREACH(e2, elf_queue, link) {
1047 		if (e2->load_addr > va) {
1048 			if (!e || e2->load_addr < e->load_addr) {
1049 				e = e2;
1050 				idx = i2;
1051 			}
1052 		}
1053 		i2++;
1054 	}
1055 	if (!e)
1056 		return false;
1057 
1058 	*elf = e;
1059 	*seg = TAILQ_FIRST(&e->segs);
1060 	*elf_idx = idx;
1061 	return true;
1062 }
1063 
1064 void ta_elf_print_mappings(void *pctx, print_func_t print_func,
1065 			   struct ta_elf_queue *elf_queue, size_t num_maps,
1066 			   struct dump_map *maps, vaddr_t mpool_base)
1067 {
1068 	struct segment *seg = NULL;
1069 	struct ta_elf *elf = NULL;
1070 	size_t elf_idx = 0;
1071 	size_t idx = 0;
1072 	size_t map_idx = 0;
1073 
1074 	/*
1075 	 * Loop over all segments and maps, printing virtual address in
1076 	 * order. Segment has priority if the virtual address is present
1077 	 * in both map and segment.
1078 	 */
1079 	get_next_in_order(elf_queue, &elf, &seg, &elf_idx);
1080 	while (true) {
1081 		vaddr_t va = -1;
1082 		size_t sz = 0;
1083 		uint32_t flags = DUMP_MAP_SECURE;
1084 		size_t offs = 0;
1085 
1086 		if (seg) {
1087 			va = rounddown(seg->vaddr + elf->load_addr);
1088 			sz = roundup(seg->vaddr + seg->memsz) -
1089 				     rounddown(seg->vaddr);
1090 		}
1091 
1092 		while (map_idx < num_maps && maps[map_idx].va <= va) {
1093 			uint32_t f = 0;
1094 
1095 			/* If there's a match, it should be the same map */
1096 			if (maps[map_idx].va == va) {
1097 				/*
1098 				 * In shared libraries the first page is
1099 				 * mapped separately with the rest of that
1100 				 * segment following back to back in a
1101 				 * separate entry.
1102 				 */
1103 				if (map_idx + 1 < num_maps &&
1104 				    maps[map_idx].sz == SMALL_PAGE_SIZE) {
1105 					vaddr_t next_va = maps[map_idx].va +
1106 							  maps[map_idx].sz;
1107 					size_t comb_sz = maps[map_idx].sz +
1108 							 maps[map_idx + 1].sz;
1109 
1110 					if (next_va == maps[map_idx + 1].va &&
1111 					    comb_sz == sz &&
1112 					    maps[map_idx].flags ==
1113 					    maps[map_idx + 1].flags) {
1114 						/* Skip this and next entry */
1115 						map_idx += 2;
1116 						continue;
1117 					}
1118 				}
1119 				assert(maps[map_idx].sz == sz);
1120 			} else if (maps[map_idx].va < va) {
1121 				if (maps[map_idx].va == mpool_base)
1122 					f |= DUMP_MAP_LDELF;
1123 				print_seg(pctx, print_func, idx, -1,
1124 					  maps[map_idx].va, maps[map_idx].pa,
1125 					  maps[map_idx].sz,
1126 					  maps[map_idx].flags | f);
1127 				idx++;
1128 			}
1129 			map_idx++;
1130 		}
1131 
1132 		if (!seg)
1133 			break;
1134 
1135 		offs = rounddown(seg->offset);
1136 		if (seg->flags & PF_R)
1137 			flags |= DUMP_MAP_READ;
1138 		if (seg->flags & PF_W)
1139 			flags |= DUMP_MAP_WRITE;
1140 		if (seg->flags & PF_X)
1141 			flags |= DUMP_MAP_EXEC;
1142 
1143 		print_seg(pctx, print_func, idx, elf_idx, va, offs, sz, flags);
1144 		idx++;
1145 
1146 		if (!get_next_in_order(elf_queue, &elf, &seg, &elf_idx))
1147 			seg = NULL;
1148 	}
1149 
1150 	elf_idx = 0;
1151 	TAILQ_FOREACH(elf, elf_queue, link) {
1152 		print_wrapper(pctx, print_func,
1153 			      " [%zu] %pUl @ 0x%0*"PRIxVA"\n",
1154 			      elf_idx, (void *)&elf->uuid, 8, elf->load_addr);
1155 		elf_idx++;
1156 	}
1157 }
1158 
1159 #ifdef CFG_UNWIND
1160 void ta_elf_stack_trace_a32(uint32_t regs[16])
1161 {
1162 	struct unwind_state_arm32 state = { };
1163 
1164 	memcpy(state.registers, regs, sizeof(state.registers));
1165 	print_stack_arm32(&state, ta_stack, ta_stack_size);
1166 }
1167 
1168 void ta_elf_stack_trace_a64(uint64_t fp, uint64_t sp, uint64_t pc)
1169 {
1170 	struct unwind_state_arm64 state = { .fp = fp, .sp = sp, .pc = pc };
1171 
1172 	print_stack_arm64(&state, ta_stack, ta_stack_size);
1173 }
1174 #endif
1175 
1176 TEE_Result ta_elf_add_library(const TEE_UUID *uuid)
1177 {
1178 	struct ta_elf *ta = TAILQ_FIRST(&main_elf_queue);
1179 	struct ta_elf *lib = ta_elf_find_elf(uuid);
1180 	struct ta_elf *elf = NULL;
1181 
1182 	if (lib)
1183 		return TEE_SUCCESS; /* Already mapped */
1184 
1185 	lib = queue_elf_helper(uuid);
1186 	if (!lib)
1187 		return TEE_ERROR_OUT_OF_MEMORY;
1188 
1189 	for (elf = lib; elf; elf = TAILQ_NEXT(elf, link))
1190 		ta_elf_load_dependency(elf, ta->is_32bit);
1191 
1192 	for (elf = lib; elf; elf = TAILQ_NEXT(elf, link)) {
1193 		ta_elf_relocate(elf);
1194 		ta_elf_finalize_mappings(elf);
1195 	}
1196 
1197 	for (elf = lib; elf; elf = TAILQ_NEXT(elf, link))
1198 		DMSG("ELF (%pUl) at %#"PRIxVA,
1199 		     (void *)&elf->uuid, elf->load_addr);
1200 
1201 	return ta_elf_set_init_fini_info(ta->is_32bit);
1202 }
1203 
1204 /* Get address/size of .init_array and .fini_array from the dynamic segment */
1205 static void get_init_fini_array(struct ta_elf *elf, unsigned int type,
1206 				vaddr_t addr, size_t memsz, vaddr_t *init,
1207 				size_t *init_cnt, vaddr_t *fini,
1208 				size_t *fini_cnt)
1209 {
1210 	size_t addrsz = 0;
1211 	size_t dyn_entsize = 0;
1212 	size_t num_dyns = 0;
1213 	size_t n = 0;
1214 	unsigned int tag = 0;
1215 	size_t val = 0;
1216 
1217 	assert(type == PT_DYNAMIC);
1218 
1219 	if (elf->is_32bit) {
1220 		dyn_entsize = sizeof(Elf32_Dyn);
1221 		addrsz = 4;
1222 	} else {
1223 		dyn_entsize = sizeof(Elf64_Dyn);
1224 		addrsz = 8;
1225 	}
1226 
1227 	assert(!(memsz % dyn_entsize));
1228 	num_dyns = memsz / dyn_entsize;
1229 
1230 	for (n = 0; n < num_dyns; n++) {
1231 		read_dyn(elf, addr, n, &tag, &val);
1232 		if (tag == DT_INIT_ARRAY)
1233 			*init = val + elf->load_addr;
1234 		else if (tag == DT_FINI_ARRAY)
1235 			*fini = val + elf->load_addr;
1236 		else if (tag == DT_INIT_ARRAYSZ)
1237 			*init_cnt = val / addrsz;
1238 		else if (tag == DT_FINI_ARRAYSZ)
1239 			*fini_cnt = val / addrsz;
1240 	}
1241 }
1242 
1243 /* Get address/size of .init_array and .fini_array in @elf (if present) */
1244 static void elf_get_init_fini_array(struct ta_elf *elf, vaddr_t *init,
1245 				    size_t *init_cnt, vaddr_t *fini,
1246 				    size_t *fini_cnt)
1247 {
1248 	size_t n = 0;
1249 
1250 	if (elf->is_32bit) {
1251 		Elf32_Phdr *phdr = elf->phdr;
1252 
1253 		for (n = 0; n < elf->e_phnum; n++) {
1254 			if (phdr[n].p_type == PT_DYNAMIC) {
1255 				get_init_fini_array(elf, phdr[n].p_type,
1256 						    phdr[n].p_vaddr,
1257 						    phdr[n].p_memsz,
1258 						    init, init_cnt, fini,
1259 						    fini_cnt);
1260 				return;
1261 			}
1262 		}
1263 	} else {
1264 		Elf64_Phdr *phdr = elf->phdr;
1265 
1266 		for (n = 0; n < elf->e_phnum; n++) {
1267 			if (phdr[n].p_type == PT_DYNAMIC) {
1268 				get_init_fini_array(elf, phdr[n].p_type,
1269 						    phdr[n].p_vaddr,
1270 						    phdr[n].p_memsz,
1271 						    init, init_cnt, fini,
1272 						    fini_cnt);
1273 				return;
1274 			}
1275 		}
1276 	}
1277 }
1278 
1279 static TEE_Result realloc_ifs(vaddr_t va, size_t cnt, bool is_32bit)
1280 {
1281 	struct __init_fini_info32 *info32 = (struct __init_fini_info32 *)va;
1282 	struct __init_fini_info *info = (struct __init_fini_info *)va;
1283 	struct __init_fini32 *ifs32 = NULL;
1284 	struct __init_fini *ifs = NULL;
1285 	size_t prev_cnt = 0;
1286 	void *ptr = NULL;
1287 
1288 	if (is_32bit) {
1289 		ptr = (void *)(vaddr_t)info32->ifs;
1290 		ptr = realloc(ptr, cnt * sizeof(struct __init_fini32));
1291 		if (!ptr)
1292 			return TEE_ERROR_OUT_OF_MEMORY;
1293 		ifs32 = ptr;
1294 		prev_cnt = info32->size;
1295 		if (cnt > prev_cnt)
1296 			memset(ifs32 + prev_cnt, 0,
1297 			       (cnt - prev_cnt) * sizeof(*ifs32));
1298 		info32->ifs = (uint32_t)(vaddr_t)ifs32;
1299 		info32->size = cnt;
1300 	} else {
1301 		ptr = realloc(info->ifs, cnt * sizeof(struct __init_fini));
1302 		if (!ptr)
1303 			return TEE_ERROR_OUT_OF_MEMORY;
1304 		ifs = ptr;
1305 		prev_cnt = info->size;
1306 		if (cnt > prev_cnt)
1307 			memset(ifs + prev_cnt, 0,
1308 			       (cnt - prev_cnt) * sizeof(*ifs));
1309 		info->ifs = ifs;
1310 		info->size = cnt;
1311 	}
1312 
1313 	return TEE_SUCCESS;
1314 }
1315 
1316 static void fill_ifs(vaddr_t va, size_t idx, struct ta_elf *elf, bool is_32bit)
1317 {
1318 	struct __init_fini_info32 *info32 = (struct __init_fini_info32 *)va;
1319 	struct __init_fini_info *info = (struct __init_fini_info *)va;
1320 	struct __init_fini32 *ifs32 = NULL;
1321 	struct __init_fini *ifs = NULL;
1322 	size_t init_cnt = 0;
1323 	size_t fini_cnt = 0;
1324 	vaddr_t init = 0;
1325 	vaddr_t fini = 0;
1326 
1327 	if (is_32bit) {
1328 		assert(idx < info32->size);
1329 		ifs32 = &((struct __init_fini32 *)(vaddr_t)info32->ifs)[idx];
1330 
1331 		if (ifs32->flags & __IFS_VALID)
1332 			return;
1333 
1334 		elf_get_init_fini_array(elf, &init, &init_cnt, &fini,
1335 					&fini_cnt);
1336 
1337 		ifs32->init = (uint32_t)init;
1338 		ifs32->init_size = init_cnt;
1339 
1340 		ifs32->fini = (uint32_t)fini;
1341 		ifs32->fini_size = fini_cnt;
1342 
1343 		ifs32->flags |= __IFS_VALID;
1344 	} else {
1345 		assert(idx < info->size);
1346 		ifs = &info->ifs[idx];
1347 
1348 		if (ifs->flags & __IFS_VALID)
1349 			return;
1350 
1351 		elf_get_init_fini_array(elf, &init, &init_cnt, &fini,
1352 					&fini_cnt);
1353 
1354 		ifs->init = (void (**)(void))init;
1355 		ifs->init_size = init_cnt;
1356 
1357 		ifs->fini = (void (**)(void))fini;
1358 		ifs->fini_size = fini_cnt;
1359 
1360 		ifs->flags |= __IFS_VALID;
1361 	}
1362 }
1363 
1364 /*
1365  * Set or update __init_fini_info in the TA with information from the ELF
1366  * queue
1367  */
1368 TEE_Result ta_elf_set_init_fini_info(bool is_32bit)
1369 {
1370 	struct __init_fini_info *info = NULL;
1371 	TEE_Result res = TEE_SUCCESS;
1372 	struct ta_elf *elf = NULL;
1373 	vaddr_t info_va = 0;
1374 	size_t cnt = 0;
1375 
1376 	res = ta_elf_resolve_sym("__init_fini_info", &info_va, NULL);
1377 	if (res) {
1378 		if (res == TEE_ERROR_ITEM_NOT_FOUND) {
1379 			/* Older TA */
1380 			return TEE_SUCCESS;
1381 		}
1382 		return res;
1383 	}
1384 	assert(info_va);
1385 
1386 	info = (struct __init_fini_info *)info_va;
1387 	if (info->reserved)
1388 		return TEE_ERROR_NOT_SUPPORTED;
1389 
1390 	TAILQ_FOREACH(elf, &main_elf_queue, link)
1391 		cnt++;
1392 
1393 	/* Queue has at least one file (main) */
1394 	assert(cnt);
1395 
1396 	res = realloc_ifs(info_va, cnt, is_32bit);
1397 	if (res)
1398 		goto err;
1399 
1400 	cnt = 0;
1401 	TAILQ_FOREACH(elf, &main_elf_queue, link) {
1402 		fill_ifs(info_va, cnt, elf, is_32bit);
1403 		cnt++;
1404 	}
1405 
1406 	return TEE_SUCCESS;
1407 err:
1408 	free(info);
1409 	return res;
1410 }
1411