xref: /optee_os/ldelf/ta_elf.c (revision bc1d13c122371e8d16aa154f2bde93e5f28a2df9)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2019, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <ctype.h>
8 #include <elf32.h>
9 #include <elf64.h>
10 #include <elf_common.h>
11 #include <ldelf.h>
12 #include <pta_system.h>
13 #include <stdio.h>
14 #include <stdlib.h>
15 #include <string_ext.h>
16 #include <string.h>
17 #include <tee_api_types.h>
18 #include <tee_internal_api_extensions.h>
19 #include <user_ta_header.h>
20 #include <utee_syscalls.h>
21 #include <util.h>
22 
23 #include "sys.h"
24 #include "ta_elf.h"
25 #include "unwind.h"
26 
27 static vaddr_t ta_stack;
28 static vaddr_t ta_stack_size;
29 
30 struct ta_elf_queue main_elf_queue = TAILQ_HEAD_INITIALIZER(main_elf_queue);
31 
32 static struct ta_elf *queue_elf_helper(const TEE_UUID *uuid)
33 {
34 	struct ta_elf *elf = calloc(1, sizeof(*elf));
35 
36 	if (!elf)
37 		return NULL;
38 
39 	TAILQ_INIT(&elf->segs);
40 
41 	elf->uuid = *uuid;
42 	TAILQ_INSERT_TAIL(&main_elf_queue, elf, link);
43 	return elf;
44 }
45 
46 static struct ta_elf *queue_elf(const TEE_UUID *uuid)
47 {
48 	struct ta_elf *elf = ta_elf_find_elf(uuid);
49 
50 	if (elf)
51 		return NULL;
52 
53 	elf = queue_elf_helper(uuid);
54 	if (!elf)
55 		err(TEE_ERROR_OUT_OF_MEMORY, "queue_elf_helper");
56 
57 	return elf;
58 }
59 
60 struct ta_elf *ta_elf_find_elf(const TEE_UUID *uuid)
61 {
62 	struct ta_elf *elf = NULL;
63 
64 	TAILQ_FOREACH(elf, &main_elf_queue, link)
65 		if (!memcmp(uuid, &elf->uuid, sizeof(*uuid)))
66 			return elf;
67 
68 	return NULL;
69 }
70 
71 static TEE_Result e32_parse_ehdr(struct ta_elf *elf, Elf32_Ehdr *ehdr)
72 {
73 	if (ehdr->e_ident[EI_VERSION] != EV_CURRENT ||
74 	    ehdr->e_ident[EI_CLASS] != ELFCLASS32 ||
75 	    ehdr->e_ident[EI_DATA] != ELFDATA2LSB ||
76 	    ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE ||
77 	    ehdr->e_type != ET_DYN || ehdr->e_machine != EM_ARM ||
78 	    (ehdr->e_flags & EF_ARM_ABIMASK) != EF_ARM_ABI_VERSION ||
79 #ifndef CFG_WITH_VFP
80 	    (ehdr->e_flags & EF_ARM_ABI_FLOAT_HARD) ||
81 #endif
82 	    ehdr->e_phentsize != sizeof(Elf32_Phdr) ||
83 	    ehdr->e_shentsize != sizeof(Elf32_Shdr))
84 		return TEE_ERROR_BAD_FORMAT;
85 
86 	elf->is_32bit = true;
87 	elf->e_entry = ehdr->e_entry;
88 	elf->e_phoff = ehdr->e_phoff;
89 	elf->e_shoff = ehdr->e_shoff;
90 	elf->e_phnum = ehdr->e_phnum;
91 	elf->e_shnum = ehdr->e_shnum;
92 	elf->e_phentsize = ehdr->e_phentsize;
93 	elf->e_shentsize = ehdr->e_shentsize;
94 
95 	return TEE_SUCCESS;
96 }
97 
98 #ifdef ARM64
99 static TEE_Result e64_parse_ehdr(struct ta_elf *elf, Elf64_Ehdr *ehdr)
100 {
101 	if (ehdr->e_ident[EI_VERSION] != EV_CURRENT ||
102 	    ehdr->e_ident[EI_CLASS] != ELFCLASS64 ||
103 	    ehdr->e_ident[EI_DATA] != ELFDATA2LSB ||
104 	    ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE ||
105 	    ehdr->e_type != ET_DYN || ehdr->e_machine != EM_AARCH64 ||
106 	    ehdr->e_flags || ehdr->e_phentsize != sizeof(Elf64_Phdr) ||
107 	    ehdr->e_shentsize != sizeof(Elf64_Shdr))
108 		return TEE_ERROR_BAD_FORMAT;
109 
110 
111 	elf->is_32bit = false;
112 	elf->e_entry = ehdr->e_entry;
113 	elf->e_phoff = ehdr->e_phoff;
114 	elf->e_shoff = ehdr->e_shoff;
115 	elf->e_phnum = ehdr->e_phnum;
116 	elf->e_shnum = ehdr->e_shnum;
117 	elf->e_phentsize = ehdr->e_phentsize;
118 	elf->e_shentsize = ehdr->e_shentsize;
119 
120 	return TEE_SUCCESS;
121 }
122 #else /*ARM64*/
123 static TEE_Result e64_parse_ehdr(struct ta_elf *elf __unused,
124 				 Elf64_Ehdr *ehdr __unused)
125 {
126 	return TEE_ERROR_NOT_SUPPORTED;
127 }
128 #endif /*ARM64*/
129 
130 static void check_phdr_in_range(struct ta_elf *elf, unsigned int type,
131 				vaddr_t addr, size_t memsz)
132 {
133 	vaddr_t max_addr = 0;
134 
135 	if (ADD_OVERFLOW(addr, memsz, &max_addr))
136 		err(TEE_ERROR_BAD_FORMAT, "Program header %#x overflow", type);
137 
138 	/*
139 	 * elf->load_addr and elf->max_addr are both using the
140 	 * final virtual addresses, while this program header is
141 	 * relative to 0.
142 	 */
143 	if (max_addr > elf->max_addr - elf->load_addr)
144 		err(TEE_ERROR_BAD_FORMAT, "Program header %#x out of bounds",
145 		    type);
146 }
147 
148 static void read_dyn(struct ta_elf *elf, vaddr_t addr,
149 		     size_t idx, unsigned int *tag, size_t *val)
150 {
151 	if (elf->is_32bit) {
152 		Elf32_Dyn *dyn = (Elf32_Dyn *)(addr + elf->load_addr);
153 
154 		*tag = dyn[idx].d_tag;
155 		*val = dyn[idx].d_un.d_val;
156 	} else {
157 		Elf64_Dyn *dyn = (Elf64_Dyn *)(addr + elf->load_addr);
158 
159 		*tag = dyn[idx].d_tag;
160 		*val = dyn[idx].d_un.d_val;
161 	}
162 }
163 
164 static void save_hashtab_from_segment(struct ta_elf *elf, unsigned int type,
165 				      vaddr_t addr, size_t memsz)
166 {
167 	size_t dyn_entsize = 0;
168 	size_t num_dyns = 0;
169 	size_t n = 0;
170 	unsigned int tag = 0;
171 	size_t val = 0;
172 
173 	if (type != PT_DYNAMIC)
174 		return;
175 
176 	check_phdr_in_range(elf, type, addr, memsz);
177 
178 	if (elf->is_32bit)
179 		dyn_entsize = sizeof(Elf32_Dyn);
180 	else
181 		dyn_entsize = sizeof(Elf64_Dyn);
182 
183 	assert(!(memsz % dyn_entsize));
184 	num_dyns = memsz / dyn_entsize;
185 
186 	for (n = 0; n < num_dyns; n++) {
187 		read_dyn(elf, addr, n, &tag, &val);
188 		if (tag == DT_HASH) {
189 			elf->hashtab = (void *)(val + elf->load_addr);
190 			break;
191 		}
192 	}
193 }
194 
195 static void save_hashtab(struct ta_elf *elf)
196 {
197 	size_t n = 0;
198 
199 	if (elf->is_32bit) {
200 		Elf32_Phdr *phdr = elf->phdr;
201 
202 		for (n = 0; n < elf->e_phnum; n++)
203 			save_hashtab_from_segment(elf, phdr[n].p_type,
204 						  phdr[n].p_vaddr,
205 						  phdr[n].p_memsz);
206 	} else {
207 		Elf64_Phdr *phdr = elf->phdr;
208 
209 		for (n = 0; n < elf->e_phnum; n++)
210 			save_hashtab_from_segment(elf, phdr[n].p_type,
211 						  phdr[n].p_vaddr,
212 						  phdr[n].p_memsz);
213 	}
214 	assert(elf->hashtab);
215 }
216 
217 static void e32_save_symtab(struct ta_elf *elf, size_t tab_idx)
218 {
219 	Elf32_Shdr *shdr = elf->shdr;
220 	size_t str_idx = shdr[tab_idx].sh_link;
221 
222 	elf->dynsymtab = (void *)(shdr[tab_idx].sh_addr + elf->load_addr);
223 	assert(!(shdr[tab_idx].sh_size % sizeof(Elf32_Sym)));
224 	elf->num_dynsyms = shdr[tab_idx].sh_size / sizeof(Elf32_Sym);
225 
226 	elf->dynstr = (void *)(shdr[str_idx].sh_addr + elf->load_addr);
227 	elf->dynstr_size = shdr[str_idx].sh_size;
228 }
229 
230 static void e64_save_symtab(struct ta_elf *elf, size_t tab_idx)
231 {
232 	Elf64_Shdr *shdr = elf->shdr;
233 	size_t str_idx = shdr[tab_idx].sh_link;
234 
235 	elf->dynsymtab = (void *)(vaddr_t)(shdr[tab_idx].sh_addr +
236 					   elf->load_addr);
237 	assert(!(shdr[tab_idx].sh_size % sizeof(Elf64_Sym)));
238 	elf->num_dynsyms = shdr[tab_idx].sh_size / sizeof(Elf64_Sym);
239 
240 	elf->dynstr = (void *)(vaddr_t)(shdr[str_idx].sh_addr + elf->load_addr);
241 	elf->dynstr_size = shdr[str_idx].sh_size;
242 }
243 
244 static void save_symtab(struct ta_elf *elf)
245 {
246 	size_t n = 0;
247 
248 	if (elf->is_32bit) {
249 		Elf32_Shdr *shdr = elf->shdr;
250 
251 		for (n = 0; n < elf->e_shnum; n++) {
252 			if (shdr[n].sh_type == SHT_DYNSYM) {
253 				e32_save_symtab(elf, n);
254 				break;
255 			}
256 		}
257 	} else {
258 		Elf64_Shdr *shdr = elf->shdr;
259 
260 		for (n = 0; n < elf->e_shnum; n++) {
261 			if (shdr[n].sh_type == SHT_DYNSYM) {
262 				e64_save_symtab(elf, n);
263 				break;
264 			}
265 		}
266 
267 	}
268 
269 	save_hashtab(elf);
270 }
271 
272 static void init_elf(struct ta_elf *elf)
273 {
274 	TEE_Result res = TEE_SUCCESS;
275 	vaddr_t va = 0;
276 	uint32_t flags = PTA_SYSTEM_MAP_FLAG_SHAREABLE;
277 
278 	res = sys_open_ta_bin(&elf->uuid, &elf->handle);
279 	if (res)
280 		err(res, "sys_open_ta_bin(%pUl)", (void *)&elf->uuid);
281 
282 	/*
283 	 * Map it read-only executable when we're loading a library where
284 	 * the ELF header is included in a load segment.
285 	 */
286 	if (!elf->is_main)
287 		flags |= PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
288 	res = sys_map_ta_bin(&va, SMALL_PAGE_SIZE, flags, elf->handle, 0, 0, 0);
289 	if (res)
290 		err(res, "sys_map_ta_bin");
291 	elf->ehdr_addr = va;
292 	if (!elf->is_main) {
293 		elf->load_addr = va;
294 		elf->max_addr = va + SMALL_PAGE_SIZE;
295 		elf->max_offs = SMALL_PAGE_SIZE;
296 	}
297 
298 	if (!IS_ELF(*(Elf32_Ehdr *)va))
299 		err(TEE_ERROR_BAD_FORMAT, "TA is not an ELF");
300 
301 	res = e32_parse_ehdr(elf, (void *)va);
302 	if (res == TEE_ERROR_BAD_FORMAT)
303 		res = e64_parse_ehdr(elf, (void *)va);
304 	if (res)
305 		err(res, "Cannot parse ELF");
306 
307 	if (elf->e_phoff + elf->e_phnum * elf->e_phentsize > SMALL_PAGE_SIZE)
308 		err(TEE_ERROR_NOT_SUPPORTED, "Cannot read program headers");
309 
310 	elf->phdr = (void *)(va + elf->e_phoff);
311 }
312 
313 static size_t roundup(size_t v)
314 {
315 	return ROUNDUP(v, SMALL_PAGE_SIZE);
316 }
317 
318 static size_t rounddown(size_t v)
319 {
320 	return ROUNDDOWN(v, SMALL_PAGE_SIZE);
321 }
322 
323 static void add_segment(struct ta_elf *elf, size_t offset, size_t vaddr,
324 			size_t filesz, size_t memsz, size_t flags, size_t align)
325 {
326 	struct segment *seg = calloc(1, sizeof(*seg));
327 
328 	if (!seg)
329 		err(TEE_ERROR_OUT_OF_MEMORY, "calloc");
330 
331 	seg->offset = offset;
332 	seg->vaddr = vaddr;
333 	seg->filesz = filesz;
334 	seg->memsz = memsz;
335 	seg->flags = flags;
336 	seg->align = align;
337 
338 	TAILQ_INSERT_TAIL(&elf->segs, seg, link);
339 }
340 
341 static void parse_load_segments(struct ta_elf *elf)
342 {
343 	size_t n = 0;
344 
345 	if (elf->is_32bit) {
346 		Elf32_Phdr *phdr = elf->phdr;
347 
348 		for (n = 0; n < elf->e_phnum; n++)
349 			if (phdr[n].p_type == PT_LOAD) {
350 				add_segment(elf, phdr[n].p_offset,
351 					    phdr[n].p_vaddr, phdr[n].p_filesz,
352 					    phdr[n].p_memsz, phdr[n].p_flags,
353 					    phdr[n].p_align);
354 			} else if (phdr[n].p_type == PT_ARM_EXIDX) {
355 				elf->exidx_start = phdr[n].p_vaddr;
356 				elf->exidx_size = phdr[n].p_filesz;
357 			}
358 	} else {
359 		Elf64_Phdr *phdr = elf->phdr;
360 
361 		for (n = 0; n < elf->e_phnum; n++)
362 			if (phdr[n].p_type == PT_LOAD)
363 				add_segment(elf, phdr[n].p_offset,
364 					    phdr[n].p_vaddr, phdr[n].p_filesz,
365 					    phdr[n].p_memsz, phdr[n].p_flags,
366 					    phdr[n].p_align);
367 	}
368 }
369 
370 static void copy_remapped_to(struct ta_elf *elf, const struct segment *seg)
371 {
372 	uint8_t *dst = (void *)(seg->vaddr + elf->load_addr);
373 	size_t n = 0;
374 	size_t offs = seg->offset;
375 	size_t num_bytes = seg->filesz;
376 
377 	if (offs < elf->max_offs) {
378 		n = MIN(elf->max_offs - offs, num_bytes);
379 		memcpy(dst, (void *)(elf->max_addr + offs - elf->max_offs), n);
380 		dst += n;
381 		offs += n;
382 		num_bytes -= n;
383 	}
384 
385 	if (num_bytes) {
386 		TEE_Result res = sys_copy_from_ta_bin(dst, num_bytes,
387 						      elf->handle, offs);
388 
389 		if (res)
390 			err(res, "sys_copy_from_ta_bin");
391 		elf->max_offs += offs;
392 	}
393 }
394 
395 static void adjust_segments(struct ta_elf *elf)
396 {
397 	struct segment *seg = NULL;
398 	struct segment *prev_seg = NULL;
399 	size_t prev_end_addr = 0;
400 	size_t align = 0;
401 	size_t mask = 0;
402 
403 	/* Sanity check */
404 	TAILQ_FOREACH(seg, &elf->segs, link) {
405 		size_t dummy __maybe_unused = 0;
406 
407 		assert(seg->align >= SMALL_PAGE_SIZE);
408 		assert(!ADD_OVERFLOW(seg->vaddr, seg->memsz, &dummy));
409 		assert(seg->filesz <= seg->memsz);
410 		assert((seg->offset & SMALL_PAGE_MASK) ==
411 		       (seg->vaddr & SMALL_PAGE_MASK));
412 
413 		prev_seg = TAILQ_PREV(seg, segment_head, link);
414 		if (prev_seg) {
415 			assert(seg->vaddr >= prev_seg->vaddr + prev_seg->memsz);
416 			assert(seg->offset >=
417 			       prev_seg->offset + prev_seg->filesz);
418 		}
419 		if (!align)
420 			align = seg->align;
421 		assert(align == seg->align);
422 	}
423 
424 	mask = align - 1;
425 
426 	seg = TAILQ_FIRST(&elf->segs);
427 	if (seg)
428 		seg = TAILQ_NEXT(seg, link);
429 	while (seg) {
430 		prev_seg = TAILQ_PREV(seg, segment_head, link);
431 		prev_end_addr = prev_seg->vaddr + prev_seg->memsz;
432 
433 		/*
434 		 * This segment may overlap with the last "page" in the
435 		 * previous segment in two different ways:
436 		 * 1. Virtual address (and offset) overlaps =>
437 		 *    Permissions needs to be merged. The offset must have
438 		 *    the SMALL_PAGE_MASK bits set as vaddr and offset must
439 		 *    add up with prevsion segment.
440 		 *
441 		 * 2. Only offset overlaps =>
442 		 *    The same page in the ELF is mapped at two different
443 		 *    virtual addresses. As a limitation this segment must
444 		 *    be mapped as writeable.
445 		 */
446 
447 		/* Case 1. */
448 		if (rounddown(seg->vaddr) < prev_end_addr) {
449 			assert((seg->vaddr & mask) == (seg->offset & mask));
450 			assert(prev_seg->memsz == prev_seg->filesz);
451 
452 			/*
453 			 * Merge the segments and their permissions.
454 			 * Note that the may be a small hole between the
455 			 * two sections.
456 			 */
457 			prev_seg->filesz = seg->vaddr + seg->filesz -
458 					   prev_seg->vaddr;
459 			prev_seg->memsz = seg->vaddr + seg->memsz -
460 					   prev_seg->vaddr;
461 			prev_seg->flags |= seg->flags;
462 
463 			TAILQ_REMOVE(&elf->segs, seg, link);
464 			free(seg);
465 			seg = TAILQ_NEXT(prev_seg, link);
466 			continue;
467 		}
468 
469 		/* Case 2. */
470 		if ((seg->offset & mask) &&
471 		    rounddown(seg->offset) <
472 		    (prev_seg->offset + prev_seg->filesz)) {
473 
474 			assert(seg->flags & PF_W);
475 			seg->remapped_writeable = true;
476 		}
477 
478 		/*
479 		 * No overlap, but we may need to align address, offset and
480 		 * size.
481 		 */
482 		seg->filesz += seg->vaddr - rounddown(seg->vaddr);
483 		seg->memsz += seg->vaddr - rounddown(seg->vaddr);
484 		seg->vaddr = rounddown(seg->vaddr);
485 		seg->offset = rounddown(seg->offset);
486 		seg = TAILQ_NEXT(seg, link);
487 	}
488 
489 }
490 
491 static void populate_segments_legacy(struct ta_elf *elf)
492 {
493 	TEE_Result res = TEE_SUCCESS;
494 	struct segment *seg = NULL;
495 	vaddr_t va = 0;
496 
497 	assert(elf->is_legacy);
498 	TAILQ_FOREACH(seg, &elf->segs, link) {
499 		struct segment *last_seg = TAILQ_LAST(&elf->segs, segment_head);
500 		size_t pad_end = roundup(last_seg->vaddr + last_seg->memsz -
501 					 seg->vaddr - seg->memsz);
502 		size_t num_bytes = roundup(seg->memsz);
503 
504 		if (!elf->load_addr)
505 			va = 0;
506 		else
507 			va = seg->vaddr + elf->load_addr;
508 
509 
510 		if (!(seg->flags & PF_R))
511 			err(TEE_ERROR_NOT_SUPPORTED,
512 			    "Segment must be readable");
513 
514 		res = sys_map_zi(num_bytes, 0, &va, 0, pad_end);
515 		if (res)
516 			err(res, "sys_map_zi");
517 		res = sys_copy_from_ta_bin((void *)va, seg->filesz,
518 					   elf->handle, seg->offset);
519 		if (res)
520 			err(res, "sys_copy_from_ta_bin");
521 
522 		if (!elf->load_addr)
523 			elf->load_addr = va;
524 		elf->max_addr = va + num_bytes;
525 		elf->max_offs = seg->offset + seg->filesz;
526 	}
527 }
528 
529 static size_t get_pad_begin(void)
530 {
531 #ifdef CFG_TA_ASLR
532 	size_t min = CFG_TA_ASLR_MIN_OFFSET_PAGES;
533 	size_t max = CFG_TA_ASLR_MAX_OFFSET_PAGES;
534 	TEE_Result res = TEE_SUCCESS;
535 	uint32_t rnd32 = 0;
536 	size_t rnd = 0;
537 
538 	COMPILE_TIME_ASSERT(CFG_TA_ASLR_MIN_OFFSET_PAGES <
539 			    CFG_TA_ASLR_MAX_OFFSET_PAGES);
540 	if (max > min) {
541 		res = utee_cryp_random_number_generate(&rnd32, sizeof(rnd32));
542 		if (res) {
543 			DMSG("Random read failed: %#"PRIx32, res);
544 			return min * SMALL_PAGE_SIZE;
545 		}
546 		rnd = rnd32 % (max - min);
547 	}
548 
549 	return (min + rnd) * SMALL_PAGE_SIZE;
550 #else /*!CFG_TA_ASLR*/
551 	return 0;
552 #endif /*!CFG_TA_ASLR*/
553 }
554 
555 static void populate_segments(struct ta_elf *elf)
556 {
557 	TEE_Result res = TEE_SUCCESS;
558 	struct segment *seg = NULL;
559 	vaddr_t va = 0;
560 	size_t pad_begin = 0;
561 
562 	assert(!elf->is_legacy);
563 	TAILQ_FOREACH(seg, &elf->segs, link) {
564 		struct segment *last_seg = TAILQ_LAST(&elf->segs, segment_head);
565 		size_t pad_end = roundup(last_seg->vaddr + last_seg->memsz -
566 					 seg->vaddr - seg->memsz);
567 
568 		if (seg->remapped_writeable) {
569 			size_t num_bytes = roundup(seg->vaddr + seg->memsz) -
570 					   rounddown(seg->vaddr);
571 
572 			assert(elf->load_addr);
573 			va = rounddown(elf->load_addr + seg->vaddr);
574 			assert(va >= elf->max_addr);
575 			res = sys_map_zi(num_bytes, 0, &va, 0, pad_end);
576 			if (res)
577 				err(res, "sys_map_zi");
578 
579 			copy_remapped_to(elf, seg);
580 			elf->max_addr = va + num_bytes;
581 		} else {
582 			uint32_t flags =  0;
583 			size_t filesz = seg->filesz;
584 			size_t memsz = seg->memsz;
585 			size_t offset = seg->offset;
586 			size_t vaddr = seg->vaddr;
587 
588 			if (offset < elf->max_offs) {
589 				/*
590 				 * We're in a load segment which overlaps
591 				 * with (or is covered by) the first page
592 				 * of a shared library.
593 				 */
594 				if (vaddr + filesz < SMALL_PAGE_SIZE) {
595 					size_t num_bytes = 0;
596 
597 					/*
598 					 * If this segment is completely
599 					 * covered, take next.
600 					 */
601 					if (vaddr + memsz <= SMALL_PAGE_SIZE)
602 						continue;
603 
604 					/*
605 					 * All data of the segment is
606 					 * loaded, but we need to zero
607 					 * extend it.
608 					 */
609 					va = elf->max_addr;
610 					num_bytes = roundup(vaddr + memsz) -
611 						    roundup(vaddr) -
612 						    SMALL_PAGE_SIZE;
613 					assert(num_bytes);
614 					res = sys_map_zi(num_bytes, 0, &va, 0,
615 							 0);
616 					if (res)
617 						err(res, "sys_map_zi");
618 					elf->max_addr = roundup(va + num_bytes);
619 					continue;
620 				}
621 
622 				/* Partial overlap, remove the first page. */
623 				vaddr += SMALL_PAGE_SIZE;
624 				filesz -= SMALL_PAGE_SIZE;
625 				memsz -= SMALL_PAGE_SIZE;
626 				offset += SMALL_PAGE_SIZE;
627 			}
628 
629 			if (!elf->load_addr) {
630 				va = 0;
631 				pad_begin = get_pad_begin();
632 				/*
633 				 * If mapping with pad_begin fails we'll
634 				 * retry without pad_begin, effectively
635 				 * disabling ASLR for the current ELF file.
636 				 */
637 			} else {
638 				va = vaddr + elf->load_addr;
639 				pad_begin = 0;
640 			}
641 
642 			if (seg->flags & PF_W)
643 				flags |= PTA_SYSTEM_MAP_FLAG_WRITEABLE;
644 			else
645 				flags |= PTA_SYSTEM_MAP_FLAG_SHAREABLE;
646 			if (seg->flags & PF_X)
647 				flags |= PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
648 			if (!(seg->flags & PF_R))
649 				err(TEE_ERROR_NOT_SUPPORTED,
650 				    "Segment must be readable");
651 			if (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE) {
652 				res = sys_map_zi(memsz, 0, &va, pad_begin,
653 						 pad_end);
654 				if (pad_begin && res == TEE_ERROR_OUT_OF_MEMORY)
655 					res = sys_map_zi(memsz, 0, &va, 0,
656 							 pad_end);
657 				if (res)
658 					err(res, "sys_map_zi");
659 				res = sys_copy_from_ta_bin((void *)va, filesz,
660 							   elf->handle, offset);
661 				if (res)
662 					err(res, "sys_copy_from_ta_bin");
663 			} else {
664 				res = sys_map_ta_bin(&va, filesz, flags,
665 						     elf->handle, offset,
666 						     pad_begin, pad_end);
667 				if (pad_begin && res == TEE_ERROR_OUT_OF_MEMORY)
668 					res = sys_map_ta_bin(&va, filesz, flags,
669 							     elf->handle,
670 							     offset, 0,
671 							     pad_end);
672 				if (res)
673 					err(res, "sys_map_ta_bin");
674 			}
675 
676 			if (!elf->load_addr)
677 				elf->load_addr = va;
678 			elf->max_addr = roundup(va + filesz);
679 			elf->max_offs += filesz;
680 		}
681 	}
682 }
683 
684 static void map_segments(struct ta_elf *elf)
685 {
686 	TEE_Result res = TEE_SUCCESS;
687 
688 	parse_load_segments(elf);
689 	adjust_segments(elf);
690 	if (TAILQ_FIRST(&elf->segs)->offset < SMALL_PAGE_SIZE) {
691 		vaddr_t va = 0;
692 		size_t sz = elf->max_addr - elf->load_addr;
693 		struct segment *seg = TAILQ_LAST(&elf->segs, segment_head);
694 		size_t pad_begin = get_pad_begin();
695 
696 		/*
697 		 * We're loading a library, if not other parts of the code
698 		 * need to be updated too.
699 		 */
700 		assert(!elf->is_main);
701 
702 		/*
703 		 * Now that we know how much virtual memory is needed move
704 		 * the already mapped part to a location which can
705 		 * accommodate us.
706 		 */
707 		res = sys_remap(elf->load_addr, &va, sz, pad_begin,
708 				roundup(seg->vaddr + seg->memsz));
709 		if (res == TEE_ERROR_OUT_OF_MEMORY)
710 			res = sys_remap(elf->load_addr, &va, sz, 0,
711 					roundup(seg->vaddr + seg->memsz));
712 		if (res)
713 			err(res, "sys_remap");
714 		elf->ehdr_addr = va;
715 		elf->load_addr = va;
716 		elf->max_addr = va + sz;
717 		elf->phdr = (void *)(va + elf->e_phoff);
718 	}
719 }
720 
721 static void add_deps_from_segment(struct ta_elf *elf, unsigned int type,
722 				  vaddr_t addr, size_t memsz)
723 {
724 	size_t dyn_entsize = 0;
725 	size_t num_dyns = 0;
726 	size_t n = 0;
727 	unsigned int tag = 0;
728 	size_t val = 0;
729 	TEE_UUID uuid = { };
730 	char *str_tab = NULL;
731 
732 	if (type != PT_DYNAMIC)
733 		return;
734 
735 	check_phdr_in_range(elf, type, addr, memsz);
736 
737 	if (elf->is_32bit)
738 		dyn_entsize = sizeof(Elf32_Dyn);
739 	else
740 		dyn_entsize = sizeof(Elf64_Dyn);
741 
742 	assert(!(memsz % dyn_entsize));
743 	num_dyns = memsz / dyn_entsize;
744 
745 	for (n = 0; n < num_dyns; n++) {
746 		read_dyn(elf, addr, n, &tag, &val);
747 		if (tag == DT_STRTAB) {
748 			str_tab = (char *)(val + elf->load_addr);
749 			break;
750 		}
751 	}
752 
753 	for (n = 0; n < num_dyns; n++) {
754 		read_dyn(elf, addr, n, &tag, &val);
755 		if (tag != DT_NEEDED)
756 			continue;
757 		tee_uuid_from_str(&uuid, str_tab + val);
758 		queue_elf(&uuid);
759 	}
760 }
761 
762 static void add_dependencies(struct ta_elf *elf)
763 {
764 	size_t n = 0;
765 
766 	if (elf->is_32bit) {
767 		Elf32_Phdr *phdr = elf->phdr;
768 
769 		for (n = 0; n < elf->e_phnum; n++)
770 			add_deps_from_segment(elf, phdr[n].p_type,
771 					      phdr[n].p_vaddr, phdr[n].p_memsz);
772 	} else {
773 		Elf64_Phdr *phdr = elf->phdr;
774 
775 		for (n = 0; n < elf->e_phnum; n++)
776 			add_deps_from_segment(elf, phdr[n].p_type,
777 					      phdr[n].p_vaddr, phdr[n].p_memsz);
778 	}
779 }
780 
781 static void copy_section_headers(struct ta_elf *elf)
782 {
783 	TEE_Result res = TEE_SUCCESS;
784 	size_t sz = elf->e_shnum * elf->e_shentsize;
785 	size_t offs = 0;
786 
787 	elf->shdr = malloc(sz);
788 	if (!elf->shdr)
789 		err(TEE_ERROR_OUT_OF_MEMORY, "malloc");
790 
791 	/*
792 	 * We're assuming that section headers comes after the load segments,
793 	 * but if it's a very small dynamically linked library the section
794 	 * headers can still end up (partially?) in the first mapped page.
795 	 */
796 	if (elf->e_shoff < SMALL_PAGE_SIZE) {
797 		assert(!elf->is_main);
798 		offs = MIN(SMALL_PAGE_SIZE - elf->e_shoff, sz);
799 		memcpy(elf->shdr, (void *)(elf->load_addr + elf->e_shoff),
800 		       offs);
801 	}
802 
803 	if (offs < sz) {
804 		res = sys_copy_from_ta_bin((uint8_t *)elf->shdr + offs,
805 					   sz - offs, elf->handle,
806 					   elf->e_shoff + offs);
807 		if (res)
808 			err(res, "sys_copy_from_ta_bin");
809 	}
810 }
811 
812 static void close_handle(struct ta_elf *elf)
813 {
814 	TEE_Result res = sys_close_ta_bin(elf->handle);
815 
816 	if (res)
817 		err(res, "sys_close_ta_bin");
818 	elf->handle = -1;
819 }
820 
821 static void clean_elf_load_main(struct ta_elf *elf)
822 {
823 	TEE_Result res = TEE_SUCCESS;
824 
825 	/*
826 	 * Clean up from last attempt to load
827 	 */
828 	res = sys_unmap(elf->ehdr_addr, SMALL_PAGE_SIZE);
829 	if (res)
830 		err(res, "sys_unmap");
831 
832 	while (!TAILQ_EMPTY(&elf->segs)) {
833 		struct segment *seg = TAILQ_FIRST(&elf->segs);
834 		vaddr_t va = 0;
835 		size_t num_bytes = 0;
836 
837 		va = rounddown(elf->load_addr + seg->vaddr);
838 		if (seg->remapped_writeable)
839 			num_bytes = roundup(seg->vaddr + seg->memsz) -
840 				    rounddown(seg->vaddr);
841 		else
842 			num_bytes = seg->memsz;
843 
844 		res = sys_unmap(va, num_bytes);
845 		if (res)
846 			err(res, "sys_unmap");
847 
848 		TAILQ_REMOVE(&elf->segs, seg, link);
849 		free(seg);
850 	}
851 
852 	free(elf->shdr);
853 	memset(&elf->is_32bit, 0,
854 	       (vaddr_t)&elf->uuid - (vaddr_t)&elf->is_32bit);
855 
856 	TAILQ_INIT(&elf->segs);
857 }
858 
859 static void load_main(struct ta_elf *elf)
860 {
861 	init_elf(elf);
862 	map_segments(elf);
863 	populate_segments(elf);
864 	add_dependencies(elf);
865 	copy_section_headers(elf);
866 	save_symtab(elf);
867 	close_handle(elf);
868 
869 	elf->head = (struct ta_head *)elf->load_addr;
870 	if (elf->head->depr_entry != UINT64_MAX) {
871 		/*
872 		 * Legacy TAs sets their entry point in ta_head. For
873 		 * non-legacy TAs the entry point of the ELF is set instead
874 		 * and leaving the ta_head entry point set to UINT64_MAX to
875 		 * indicate that it's not used.
876 		 *
877 		 * NB, everything before the commit a73b5878c89d ("Replace
878 		 * ta_head.entry with elf entry") is considered legacy TAs
879 		 * for ldelf.
880 		 *
881 		 * Legacy TAs cannot be mapped with shared memory segments
882 		 * so restart the mapping if it turned out we're loading a
883 		 * legacy TA.
884 		 */
885 
886 		DMSG("Reloading TA %pUl as legacy TA", (void *)&elf->uuid);
887 		clean_elf_load_main(elf);
888 		elf->is_legacy = true;
889 		init_elf(elf);
890 		map_segments(elf);
891 		populate_segments_legacy(elf);
892 		add_dependencies(elf);
893 		copy_section_headers(elf);
894 		save_symtab(elf);
895 		close_handle(elf);
896 		elf->head = (struct ta_head *)elf->load_addr;
897 		/*
898 		 * Check that the TA is still a legacy TA, if it isn't give
899 		 * up now since we're likely under attack.
900 		 */
901 		if (elf->head->depr_entry == UINT64_MAX)
902 			err(TEE_ERROR_GENERIC,
903 			    "TA %pUl was changed on disk to non-legacy",
904 			    (void *)&elf->uuid);
905 	}
906 
907 }
908 
909 void ta_elf_load_main(const TEE_UUID *uuid, uint32_t *is_32bit, uint64_t *sp,
910 		      uint32_t *ta_flags)
911 {
912 	struct ta_elf *elf = queue_elf(uuid);
913 	vaddr_t va = 0;
914 	TEE_Result res = TEE_SUCCESS;
915 
916 	assert(elf);
917 	elf->is_main = true;
918 
919 	load_main(elf);
920 
921 	*is_32bit = elf->is_32bit;
922 	res = sys_map_zi(elf->head->stack_size, 0, &va, 0, 0);
923 	if (res)
924 		err(res, "sys_map_zi stack");
925 
926 	if (elf->head->flags & ~TA_FLAGS_MASK)
927 		err(TEE_ERROR_BAD_FORMAT, "Invalid TA flags(s) %#"PRIx32,
928 		    elf->head->flags & ~TA_FLAGS_MASK);
929 
930 	*ta_flags = elf->head->flags;
931 	*sp = va + elf->head->stack_size;
932 	ta_stack = va;
933 	ta_stack_size = elf->head->stack_size;
934 }
935 
936 void ta_elf_finalize_load_main(uint64_t *entry)
937 {
938 	struct ta_elf *elf = TAILQ_FIRST(&main_elf_queue);
939 	TEE_Result res = TEE_SUCCESS;
940 
941 	assert(elf->is_main);
942 
943 	res = ta_elf_set_init_fini_info(elf->is_32bit);
944 	if (res)
945 		err(res, "ta_elf_set_init_fini_info");
946 
947 	if (elf->is_legacy)
948 		*entry = elf->head->depr_entry;
949 	else
950 		*entry = elf->e_entry + elf->load_addr;
951 }
952 
953 
954 void ta_elf_load_dependency(struct ta_elf *elf, bool is_32bit)
955 {
956 	if (elf->is_main)
957 		return;
958 
959 	init_elf(elf);
960 	if (elf->is_32bit != is_32bit)
961 		err(TEE_ERROR_BAD_FORMAT, "ELF %pUl is %sbit (expected %sbit)",
962 		    (void *)&elf->uuid, elf->is_32bit ? "32" : "64",
963 		    is_32bit ? "32" : "64");
964 
965 	map_segments(elf);
966 	populate_segments(elf);
967 	add_dependencies(elf);
968 	copy_section_headers(elf);
969 	save_symtab(elf);
970 	close_handle(elf);
971 }
972 
973 void ta_elf_finalize_mappings(struct ta_elf *elf)
974 {
975 	TEE_Result res = TEE_SUCCESS;
976 	struct segment *seg = NULL;
977 
978 	if (!elf->is_legacy)
979 		return;
980 
981 	TAILQ_FOREACH(seg, &elf->segs, link) {
982 		vaddr_t va = elf->load_addr + seg->vaddr;
983 		uint32_t flags =  0;
984 
985 		if (seg->flags & PF_W)
986 			flags |= PTA_SYSTEM_MAP_FLAG_WRITEABLE;
987 		if (seg->flags & PF_X)
988 			flags |= PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
989 
990 		res = sys_set_prot(va, seg->memsz, flags);
991 		if (res)
992 			err(res, "sys_set_prot");
993 	}
994 }
995 
996 static void __printf(3, 4) print_wrapper(void *pctx, print_func_t print_func,
997 					 const char *fmt, ...)
998 {
999 	va_list ap;
1000 
1001 	va_start(ap, fmt);
1002 	print_func(pctx, fmt, ap);
1003 	va_end(ap);
1004 }
1005 
1006 static void print_seg(void *pctx, print_func_t print_func,
1007 		      size_t idx __maybe_unused, int elf_idx __maybe_unused,
1008 		      vaddr_t va __maybe_unused, paddr_t pa __maybe_unused,
1009 		      size_t sz __maybe_unused, uint32_t flags)
1010 {
1011 	int width __maybe_unused = 8;
1012 	char desc[14] __maybe_unused = "";
1013 	char flags_str[] __maybe_unused = "----";
1014 
1015 	if (elf_idx > -1) {
1016 		snprintf(desc, sizeof(desc), " [%d]", elf_idx);
1017 	} else {
1018 		if (flags & DUMP_MAP_EPHEM)
1019 			snprintf(desc, sizeof(desc), " (param)");
1020 		if (flags & DUMP_MAP_LDELF)
1021 			snprintf(desc, sizeof(desc), " (ldelf)");
1022 		if (va == ta_stack)
1023 			snprintf(desc, sizeof(desc), " (stack)");
1024 	}
1025 
1026 	if (flags & DUMP_MAP_READ)
1027 		flags_str[0] = 'r';
1028 	if (flags & DUMP_MAP_WRITE)
1029 		flags_str[1] = 'w';
1030 	if (flags & DUMP_MAP_EXEC)
1031 		flags_str[2] = 'x';
1032 	if (flags & DUMP_MAP_SECURE)
1033 		flags_str[3] = 's';
1034 
1035 	print_wrapper(pctx, print_func,
1036 		      "region %2zu: va 0x%0*"PRIxVA" pa 0x%0*"PRIxPA" size 0x%06zx flags %s%s\n",
1037 		      idx, width, va, width, pa, sz, flags_str, desc);
1038 }
1039 
1040 static bool get_next_in_order(struct ta_elf_queue *elf_queue,
1041 			      struct ta_elf **elf, struct segment **seg,
1042 			      size_t *elf_idx)
1043 {
1044 	struct ta_elf *e = NULL;
1045 	struct segment *s = NULL;
1046 	size_t idx = 0;
1047 	vaddr_t va = 0;
1048 	struct ta_elf *e2 = NULL;
1049 	size_t i2 = 0;
1050 
1051 	assert(elf && seg && elf_idx);
1052 	e = *elf;
1053 	s = *seg;
1054 	assert((e == NULL && s == NULL) || (e != NULL && s != NULL));
1055 
1056 	if (s) {
1057 		s = TAILQ_NEXT(s, link);
1058 		if (s) {
1059 			*seg = s;
1060 			return true;
1061 		}
1062 	}
1063 
1064 	if (e)
1065 		va = e->load_addr;
1066 
1067 	/* Find the ELF with next load address */
1068 	e = NULL;
1069 	TAILQ_FOREACH(e2, elf_queue, link) {
1070 		if (e2->load_addr > va) {
1071 			if (!e || e2->load_addr < e->load_addr) {
1072 				e = e2;
1073 				idx = i2;
1074 			}
1075 		}
1076 		i2++;
1077 	}
1078 	if (!e)
1079 		return false;
1080 
1081 	*elf = e;
1082 	*seg = TAILQ_FIRST(&e->segs);
1083 	*elf_idx = idx;
1084 	return true;
1085 }
1086 
1087 void ta_elf_print_mappings(void *pctx, print_func_t print_func,
1088 			   struct ta_elf_queue *elf_queue, size_t num_maps,
1089 			   struct dump_map *maps, vaddr_t mpool_base)
1090 {
1091 	struct segment *seg = NULL;
1092 	struct ta_elf *elf = NULL;
1093 	size_t elf_idx = 0;
1094 	size_t idx = 0;
1095 	size_t map_idx = 0;
1096 
1097 	/*
1098 	 * Loop over all segments and maps, printing virtual address in
1099 	 * order. Segment has priority if the virtual address is present
1100 	 * in both map and segment.
1101 	 */
1102 	get_next_in_order(elf_queue, &elf, &seg, &elf_idx);
1103 	while (true) {
1104 		vaddr_t va = -1;
1105 		size_t sz = 0;
1106 		uint32_t flags = DUMP_MAP_SECURE;
1107 		size_t offs = 0;
1108 
1109 		if (seg) {
1110 			va = rounddown(seg->vaddr + elf->load_addr);
1111 			sz = roundup(seg->vaddr + seg->memsz) -
1112 				     rounddown(seg->vaddr);
1113 		}
1114 
1115 		while (map_idx < num_maps && maps[map_idx].va <= va) {
1116 			uint32_t f = 0;
1117 
1118 			/* If there's a match, it should be the same map */
1119 			if (maps[map_idx].va == va) {
1120 				/*
1121 				 * In shared libraries the first page is
1122 				 * mapped separately with the rest of that
1123 				 * segment following back to back in a
1124 				 * separate entry.
1125 				 */
1126 				if (map_idx + 1 < num_maps &&
1127 				    maps[map_idx].sz == SMALL_PAGE_SIZE) {
1128 					vaddr_t next_va = maps[map_idx].va +
1129 							  maps[map_idx].sz;
1130 					size_t comb_sz = maps[map_idx].sz +
1131 							 maps[map_idx + 1].sz;
1132 
1133 					if (next_va == maps[map_idx + 1].va &&
1134 					    comb_sz == sz &&
1135 					    maps[map_idx].flags ==
1136 					    maps[map_idx + 1].flags) {
1137 						/* Skip this and next entry */
1138 						map_idx += 2;
1139 						continue;
1140 					}
1141 				}
1142 				assert(maps[map_idx].sz == sz);
1143 			} else if (maps[map_idx].va < va) {
1144 				if (maps[map_idx].va == mpool_base)
1145 					f |= DUMP_MAP_LDELF;
1146 				print_seg(pctx, print_func, idx, -1,
1147 					  maps[map_idx].va, maps[map_idx].pa,
1148 					  maps[map_idx].sz,
1149 					  maps[map_idx].flags | f);
1150 				idx++;
1151 			}
1152 			map_idx++;
1153 		}
1154 
1155 		if (!seg)
1156 			break;
1157 
1158 		offs = rounddown(seg->offset);
1159 		if (seg->flags & PF_R)
1160 			flags |= DUMP_MAP_READ;
1161 		if (seg->flags & PF_W)
1162 			flags |= DUMP_MAP_WRITE;
1163 		if (seg->flags & PF_X)
1164 			flags |= DUMP_MAP_EXEC;
1165 
1166 		print_seg(pctx, print_func, idx, elf_idx, va, offs, sz, flags);
1167 		idx++;
1168 
1169 		if (!get_next_in_order(elf_queue, &elf, &seg, &elf_idx))
1170 			seg = NULL;
1171 	}
1172 
1173 	elf_idx = 0;
1174 	TAILQ_FOREACH(elf, elf_queue, link) {
1175 		print_wrapper(pctx, print_func,
1176 			      " [%zu] %pUl @ 0x%0*"PRIxVA"\n",
1177 			      elf_idx, (void *)&elf->uuid, 8, elf->load_addr);
1178 		elf_idx++;
1179 	}
1180 }
1181 
1182 #ifdef CFG_UNWIND
1183 void ta_elf_stack_trace_a32(uint32_t regs[16])
1184 {
1185 	struct unwind_state_arm32 state = { };
1186 
1187 	memcpy(state.registers, regs, sizeof(state.registers));
1188 	print_stack_arm32(&state, ta_stack, ta_stack_size);
1189 }
1190 
1191 void ta_elf_stack_trace_a64(uint64_t fp, uint64_t sp, uint64_t pc)
1192 {
1193 	struct unwind_state_arm64 state = { .fp = fp, .sp = sp, .pc = pc };
1194 
1195 	print_stack_arm64(&state, ta_stack, ta_stack_size);
1196 }
1197 #endif
1198 
1199 TEE_Result ta_elf_add_library(const TEE_UUID *uuid)
1200 {
1201 	struct ta_elf *ta = TAILQ_FIRST(&main_elf_queue);
1202 	struct ta_elf *lib = ta_elf_find_elf(uuid);
1203 	struct ta_elf *elf = NULL;
1204 
1205 	if (lib)
1206 		return TEE_SUCCESS; /* Already mapped */
1207 
1208 	lib = queue_elf_helper(uuid);
1209 	if (!lib)
1210 		return TEE_ERROR_OUT_OF_MEMORY;
1211 
1212 	for (elf = lib; elf; elf = TAILQ_NEXT(elf, link))
1213 		ta_elf_load_dependency(elf, ta->is_32bit);
1214 
1215 	for (elf = lib; elf; elf = TAILQ_NEXT(elf, link)) {
1216 		ta_elf_relocate(elf);
1217 		ta_elf_finalize_mappings(elf);
1218 	}
1219 
1220 	for (elf = lib; elf; elf = TAILQ_NEXT(elf, link))
1221 		DMSG("ELF (%pUl) at %#"PRIxVA,
1222 		     (void *)&elf->uuid, elf->load_addr);
1223 
1224 	return ta_elf_set_init_fini_info(ta->is_32bit);
1225 }
1226 
1227 /* Get address/size of .init_array and .fini_array from the dynamic segment */
1228 static void get_init_fini_array(struct ta_elf *elf, unsigned int type,
1229 				vaddr_t addr, size_t memsz, vaddr_t *init,
1230 				size_t *init_cnt, vaddr_t *fini,
1231 				size_t *fini_cnt)
1232 {
1233 	size_t addrsz = 0;
1234 	size_t dyn_entsize = 0;
1235 	size_t num_dyns = 0;
1236 	size_t n = 0;
1237 	unsigned int tag = 0;
1238 	size_t val = 0;
1239 
1240 	assert(type == PT_DYNAMIC);
1241 
1242 	check_phdr_in_range(elf, type, addr, memsz);
1243 
1244 	if (elf->is_32bit) {
1245 		dyn_entsize = sizeof(Elf32_Dyn);
1246 		addrsz = 4;
1247 	} else {
1248 		dyn_entsize = sizeof(Elf64_Dyn);
1249 		addrsz = 8;
1250 	}
1251 
1252 	assert(!(memsz % dyn_entsize));
1253 	num_dyns = memsz / dyn_entsize;
1254 
1255 	for (n = 0; n < num_dyns; n++) {
1256 		read_dyn(elf, addr, n, &tag, &val);
1257 		if (tag == DT_INIT_ARRAY)
1258 			*init = val + elf->load_addr;
1259 		else if (tag == DT_FINI_ARRAY)
1260 			*fini = val + elf->load_addr;
1261 		else if (tag == DT_INIT_ARRAYSZ)
1262 			*init_cnt = val / addrsz;
1263 		else if (tag == DT_FINI_ARRAYSZ)
1264 			*fini_cnt = val / addrsz;
1265 	}
1266 }
1267 
1268 /* Get address/size of .init_array and .fini_array in @elf (if present) */
1269 static void elf_get_init_fini_array(struct ta_elf *elf, vaddr_t *init,
1270 				    size_t *init_cnt, vaddr_t *fini,
1271 				    size_t *fini_cnt)
1272 {
1273 	size_t n = 0;
1274 
1275 	if (elf->is_32bit) {
1276 		Elf32_Phdr *phdr = elf->phdr;
1277 
1278 		for (n = 0; n < elf->e_phnum; n++) {
1279 			if (phdr[n].p_type == PT_DYNAMIC) {
1280 				get_init_fini_array(elf, phdr[n].p_type,
1281 						    phdr[n].p_vaddr,
1282 						    phdr[n].p_memsz,
1283 						    init, init_cnt, fini,
1284 						    fini_cnt);
1285 				return;
1286 			}
1287 		}
1288 	} else {
1289 		Elf64_Phdr *phdr = elf->phdr;
1290 
1291 		for (n = 0; n < elf->e_phnum; n++) {
1292 			if (phdr[n].p_type == PT_DYNAMIC) {
1293 				get_init_fini_array(elf, phdr[n].p_type,
1294 						    phdr[n].p_vaddr,
1295 						    phdr[n].p_memsz,
1296 						    init, init_cnt, fini,
1297 						    fini_cnt);
1298 				return;
1299 			}
1300 		}
1301 	}
1302 }
1303 
1304 static TEE_Result realloc_ifs(vaddr_t va, size_t cnt, bool is_32bit)
1305 {
1306 	struct __init_fini_info32 *info32 = (struct __init_fini_info32 *)va;
1307 	struct __init_fini_info *info = (struct __init_fini_info *)va;
1308 	struct __init_fini32 *ifs32 = NULL;
1309 	struct __init_fini *ifs = NULL;
1310 	size_t prev_cnt = 0;
1311 	void *ptr = NULL;
1312 
1313 	if (is_32bit) {
1314 		ptr = (void *)(vaddr_t)info32->ifs;
1315 		ptr = realloc(ptr, cnt * sizeof(struct __init_fini32));
1316 		if (!ptr)
1317 			return TEE_ERROR_OUT_OF_MEMORY;
1318 		ifs32 = ptr;
1319 		prev_cnt = info32->size;
1320 		if (cnt > prev_cnt)
1321 			memset(ifs32 + prev_cnt, 0,
1322 			       (cnt - prev_cnt) * sizeof(*ifs32));
1323 		info32->ifs = (uint32_t)(vaddr_t)ifs32;
1324 		info32->size = cnt;
1325 	} else {
1326 		ptr = realloc(info->ifs, cnt * sizeof(struct __init_fini));
1327 		if (!ptr)
1328 			return TEE_ERROR_OUT_OF_MEMORY;
1329 		ifs = ptr;
1330 		prev_cnt = info->size;
1331 		if (cnt > prev_cnt)
1332 			memset(ifs + prev_cnt, 0,
1333 			       (cnt - prev_cnt) * sizeof(*ifs));
1334 		info->ifs = ifs;
1335 		info->size = cnt;
1336 	}
1337 
1338 	return TEE_SUCCESS;
1339 }
1340 
1341 static void fill_ifs(vaddr_t va, size_t idx, struct ta_elf *elf, bool is_32bit)
1342 {
1343 	struct __init_fini_info32 *info32 = (struct __init_fini_info32 *)va;
1344 	struct __init_fini_info *info = (struct __init_fini_info *)va;
1345 	struct __init_fini32 *ifs32 = NULL;
1346 	struct __init_fini *ifs = NULL;
1347 	size_t init_cnt = 0;
1348 	size_t fini_cnt = 0;
1349 	vaddr_t init = 0;
1350 	vaddr_t fini = 0;
1351 
1352 	if (is_32bit) {
1353 		assert(idx < info32->size);
1354 		ifs32 = &((struct __init_fini32 *)(vaddr_t)info32->ifs)[idx];
1355 
1356 		if (ifs32->flags & __IFS_VALID)
1357 			return;
1358 
1359 		elf_get_init_fini_array(elf, &init, &init_cnt, &fini,
1360 					&fini_cnt);
1361 
1362 		ifs32->init = (uint32_t)init;
1363 		ifs32->init_size = init_cnt;
1364 
1365 		ifs32->fini = (uint32_t)fini;
1366 		ifs32->fini_size = fini_cnt;
1367 
1368 		ifs32->flags |= __IFS_VALID;
1369 	} else {
1370 		assert(idx < info->size);
1371 		ifs = &info->ifs[idx];
1372 
1373 		if (ifs->flags & __IFS_VALID)
1374 			return;
1375 
1376 		elf_get_init_fini_array(elf, &init, &init_cnt, &fini,
1377 					&fini_cnt);
1378 
1379 		ifs->init = (void (**)(void))init;
1380 		ifs->init_size = init_cnt;
1381 
1382 		ifs->fini = (void (**)(void))fini;
1383 		ifs->fini_size = fini_cnt;
1384 
1385 		ifs->flags |= __IFS_VALID;
1386 	}
1387 }
1388 
1389 /*
1390  * Set or update __init_fini_info in the TA with information from the ELF
1391  * queue
1392  */
1393 TEE_Result ta_elf_set_init_fini_info(bool is_32bit)
1394 {
1395 	struct __init_fini_info *info = NULL;
1396 	TEE_Result res = TEE_SUCCESS;
1397 	struct ta_elf *elf = NULL;
1398 	vaddr_t info_va = 0;
1399 	size_t cnt = 0;
1400 
1401 	res = ta_elf_resolve_sym("__init_fini_info", &info_va, NULL);
1402 	if (res) {
1403 		if (res == TEE_ERROR_ITEM_NOT_FOUND) {
1404 			/* Older TA */
1405 			return TEE_SUCCESS;
1406 		}
1407 		return res;
1408 	}
1409 	assert(info_va);
1410 
1411 	info = (struct __init_fini_info *)info_va;
1412 	if (info->reserved)
1413 		return TEE_ERROR_NOT_SUPPORTED;
1414 
1415 	TAILQ_FOREACH(elf, &main_elf_queue, link)
1416 		cnt++;
1417 
1418 	/* Queue has at least one file (main) */
1419 	assert(cnt);
1420 
1421 	res = realloc_ifs(info_va, cnt, is_32bit);
1422 	if (res)
1423 		goto err;
1424 
1425 	cnt = 0;
1426 	TAILQ_FOREACH(elf, &main_elf_queue, link) {
1427 		fill_ifs(info_va, cnt, elf, is_32bit);
1428 		cnt++;
1429 	}
1430 
1431 	return TEE_SUCCESS;
1432 err:
1433 	free(info);
1434 	return res;
1435 }
1436