xref: /optee_os/ldelf/ta_elf.c (revision 4f5bc11d48c3d1a314e520f9472f83687a27c9cb)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2019, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <ctype.h>
8 #include <elf32.h>
9 #include <elf64.h>
10 #include <elf_common.h>
11 #include <ldelf.h>
12 #include <pta_system.h>
13 #include <stdio.h>
14 #include <stdlib.h>
15 #include <string_ext.h>
16 #include <string.h>
17 #include <tee_api_types.h>
18 #include <tee_internal_api_extensions.h>
19 #include <user_ta_header.h>
20 #include <utee_syscalls.h>
21 #include <util.h>
22 
23 #include "sys.h"
24 #include "ta_elf.h"
25 #include "unwind.h"
26 
27 static vaddr_t ta_stack;
28 static vaddr_t ta_stack_size;
29 
30 struct ta_elf_queue main_elf_queue = TAILQ_HEAD_INITIALIZER(main_elf_queue);
31 
32 static struct ta_elf *queue_elf_helper(const TEE_UUID *uuid)
33 {
34 	struct ta_elf *elf = calloc(1, sizeof(*elf));
35 
36 	if (!elf)
37 		return NULL;
38 
39 	TAILQ_INIT(&elf->segs);
40 
41 	elf->uuid = *uuid;
42 	TAILQ_INSERT_TAIL(&main_elf_queue, elf, link);
43 	return elf;
44 }
45 
46 static struct ta_elf *queue_elf(const TEE_UUID *uuid)
47 {
48 	struct ta_elf *elf = ta_elf_find_elf(uuid);
49 
50 	if (elf)
51 		return NULL;
52 
53 	elf = queue_elf_helper(uuid);
54 	if (!elf)
55 		err(TEE_ERROR_OUT_OF_MEMORY, "queue_elf_helper");
56 
57 	return elf;
58 }
59 
60 struct ta_elf *ta_elf_find_elf(const TEE_UUID *uuid)
61 {
62 	struct ta_elf *elf = NULL;
63 
64 	TAILQ_FOREACH(elf, &main_elf_queue, link)
65 		if (!memcmp(uuid, &elf->uuid, sizeof(*uuid)))
66 			return elf;
67 
68 	return NULL;
69 }
70 
71 static TEE_Result e32_parse_ehdr(struct ta_elf *elf, Elf32_Ehdr *ehdr)
72 {
73 	if (ehdr->e_ident[EI_VERSION] != EV_CURRENT ||
74 	    ehdr->e_ident[EI_CLASS] != ELFCLASS32 ||
75 	    ehdr->e_ident[EI_DATA] != ELFDATA2LSB ||
76 	    ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE ||
77 	    ehdr->e_type != ET_DYN || ehdr->e_machine != EM_ARM ||
78 	    (ehdr->e_flags & EF_ARM_ABIMASK) != EF_ARM_ABI_VERSION ||
79 #ifndef CFG_WITH_VFP
80 	    (ehdr->e_flags & EF_ARM_ABI_FLOAT_HARD) ||
81 #endif
82 	    ehdr->e_phentsize != sizeof(Elf32_Phdr) ||
83 	    ehdr->e_shentsize != sizeof(Elf32_Shdr))
84 		return TEE_ERROR_BAD_FORMAT;
85 
86 	elf->is_32bit = true;
87 	elf->e_entry = ehdr->e_entry;
88 	elf->e_phoff = ehdr->e_phoff;
89 	elf->e_shoff = ehdr->e_shoff;
90 	elf->e_phnum = ehdr->e_phnum;
91 	elf->e_shnum = ehdr->e_shnum;
92 	elf->e_phentsize = ehdr->e_phentsize;
93 	elf->e_shentsize = ehdr->e_shentsize;
94 
95 	return TEE_SUCCESS;
96 }
97 
98 #ifdef ARM64
99 static TEE_Result e64_parse_ehdr(struct ta_elf *elf, Elf64_Ehdr *ehdr)
100 {
101 	if (ehdr->e_ident[EI_VERSION] != EV_CURRENT ||
102 	    ehdr->e_ident[EI_CLASS] != ELFCLASS64 ||
103 	    ehdr->e_ident[EI_DATA] != ELFDATA2LSB ||
104 	    ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE ||
105 	    ehdr->e_type != ET_DYN || ehdr->e_machine != EM_AARCH64 ||
106 	    ehdr->e_flags || ehdr->e_phentsize != sizeof(Elf64_Phdr) ||
107 	    ehdr->e_shentsize != sizeof(Elf64_Shdr))
108 		return TEE_ERROR_BAD_FORMAT;
109 
110 
111 	elf->is_32bit = false;
112 	elf->e_entry = ehdr->e_entry;
113 	elf->e_phoff = ehdr->e_phoff;
114 	elf->e_shoff = ehdr->e_shoff;
115 	elf->e_phnum = ehdr->e_phnum;
116 	elf->e_shnum = ehdr->e_shnum;
117 	elf->e_phentsize = ehdr->e_phentsize;
118 	elf->e_shentsize = ehdr->e_shentsize;
119 
120 	return TEE_SUCCESS;
121 }
122 #else /*ARM64*/
123 static TEE_Result e64_parse_ehdr(struct ta_elf *elf __unused,
124 				 Elf64_Ehdr *ehdr __unused)
125 {
126 	return TEE_ERROR_NOT_SUPPORTED;
127 }
128 #endif /*ARM64*/
129 
130 static void check_phdr_in_range(struct ta_elf *elf, unsigned int type,
131 				vaddr_t addr, size_t memsz)
132 {
133 	vaddr_t max_addr = 0;
134 
135 	if (ADD_OVERFLOW(addr, memsz, &max_addr))
136 		err(TEE_ERROR_BAD_FORMAT, "Program header %#x overflow", type);
137 
138 	/*
139 	 * elf->load_addr and elf->max_addr are both using the
140 	 * final virtual addresses, while this program header is
141 	 * relative to 0.
142 	 */
143 	if (max_addr > elf->max_addr - elf->load_addr)
144 		err(TEE_ERROR_BAD_FORMAT, "Program header %#x out of bounds",
145 		    type);
146 }
147 
148 static void read_dyn(struct ta_elf *elf, vaddr_t addr,
149 		     size_t idx, unsigned int *tag, size_t *val)
150 {
151 	if (elf->is_32bit) {
152 		Elf32_Dyn *dyn = (Elf32_Dyn *)(addr + elf->load_addr);
153 
154 		*tag = dyn[idx].d_tag;
155 		*val = dyn[idx].d_un.d_val;
156 	} else {
157 		Elf64_Dyn *dyn = (Elf64_Dyn *)(addr + elf->load_addr);
158 
159 		*tag = dyn[idx].d_tag;
160 		*val = dyn[idx].d_un.d_val;
161 	}
162 }
163 
164 static void save_hashtab_from_segment(struct ta_elf *elf, unsigned int type,
165 				      vaddr_t addr, size_t memsz)
166 {
167 	size_t dyn_entsize = 0;
168 	size_t num_dyns = 0;
169 	size_t n = 0;
170 	unsigned int tag = 0;
171 	size_t val = 0;
172 
173 	if (type != PT_DYNAMIC)
174 		return;
175 
176 	check_phdr_in_range(elf, type, addr, memsz);
177 
178 	if (elf->is_32bit)
179 		dyn_entsize = sizeof(Elf32_Dyn);
180 	else
181 		dyn_entsize = sizeof(Elf64_Dyn);
182 
183 	assert(!(memsz % dyn_entsize));
184 	num_dyns = memsz / dyn_entsize;
185 
186 	for (n = 0; n < num_dyns; n++) {
187 		read_dyn(elf, addr, n, &tag, &val);
188 		if (tag == DT_HASH) {
189 			elf->hashtab = (void *)(val + elf->load_addr);
190 			break;
191 		}
192 	}
193 }
194 
195 static void check_hashtab(struct ta_elf *elf, void *ptr, size_t num_buckets,
196 			  size_t num_chains)
197 {
198 	/*
199 	 * Starting from 2 as the first two words are mandatory and hold
200 	 * num_buckets and num_chains. So this function is called twice,
201 	 * first to see that there's indeed room for num_buckets and
202 	 * num_chains and then to see that all of it fits.
203 	 * See http://www.sco.com/developers/gabi/latest/ch5.dynamic.html#hash
204 	 */
205 	size_t num_words = 2;
206 	vaddr_t max_addr = 0;
207 	size_t sz = 0;
208 
209 	if ((vaddr_t)ptr < elf->load_addr)
210 		err(TEE_ERROR_GENERIC, "Hashtab %p out of range", ptr);
211 
212 	if (!ALIGNMENT_IS_OK(ptr, uint32_t))
213 		err(TEE_ERROR_GENERIC, "Bad alignment of hashtab %p", ptr);
214 
215 	if (ADD_OVERFLOW(num_words, num_buckets, &num_words) ||
216 	    ADD_OVERFLOW(num_words, num_chains, &num_words) ||
217 	    MUL_OVERFLOW(num_words, sizeof(uint32_t), &sz) ||
218 	    ADD_OVERFLOW((vaddr_t)ptr, sz, &max_addr))
219 		err(TEE_ERROR_GENERIC, "Hashtab overflow");
220 
221 	if (max_addr > elf->max_addr)
222 		err(TEE_ERROR_GENERIC, "Hashtab %p out of range", ptr);
223 }
224 
225 static void save_hashtab(struct ta_elf *elf)
226 {
227 	uint32_t *hashtab = NULL;
228 	size_t n = 0;
229 
230 	if (elf->is_32bit) {
231 		Elf32_Phdr *phdr = elf->phdr;
232 
233 		for (n = 0; n < elf->e_phnum; n++)
234 			save_hashtab_from_segment(elf, phdr[n].p_type,
235 						  phdr[n].p_vaddr,
236 						  phdr[n].p_memsz);
237 	} else {
238 		Elf64_Phdr *phdr = elf->phdr;
239 
240 		for (n = 0; n < elf->e_phnum; n++)
241 			save_hashtab_from_segment(elf, phdr[n].p_type,
242 						  phdr[n].p_vaddr,
243 						  phdr[n].p_memsz);
244 	}
245 
246 	check_hashtab(elf, elf->hashtab, 0, 0);
247 	hashtab = elf->hashtab;
248 	check_hashtab(elf, elf->hashtab, hashtab[0], hashtab[1]);
249 }
250 
251 static void e32_save_symtab(struct ta_elf *elf, size_t tab_idx)
252 {
253 	Elf32_Shdr *shdr = elf->shdr;
254 	size_t str_idx = shdr[tab_idx].sh_link;
255 
256 	elf->dynsymtab = (void *)(shdr[tab_idx].sh_addr + elf->load_addr);
257 	assert(!(shdr[tab_idx].sh_size % sizeof(Elf32_Sym)));
258 	elf->num_dynsyms = shdr[tab_idx].sh_size / sizeof(Elf32_Sym);
259 
260 	elf->dynstr = (void *)(shdr[str_idx].sh_addr + elf->load_addr);
261 	elf->dynstr_size = shdr[str_idx].sh_size;
262 }
263 
264 static void e64_save_symtab(struct ta_elf *elf, size_t tab_idx)
265 {
266 	Elf64_Shdr *shdr = elf->shdr;
267 	size_t str_idx = shdr[tab_idx].sh_link;
268 
269 	elf->dynsymtab = (void *)(vaddr_t)(shdr[tab_idx].sh_addr +
270 					   elf->load_addr);
271 	assert(!(shdr[tab_idx].sh_size % sizeof(Elf64_Sym)));
272 	elf->num_dynsyms = shdr[tab_idx].sh_size / sizeof(Elf64_Sym);
273 
274 	elf->dynstr = (void *)(vaddr_t)(shdr[str_idx].sh_addr + elf->load_addr);
275 	elf->dynstr_size = shdr[str_idx].sh_size;
276 }
277 
278 static void save_symtab(struct ta_elf *elf)
279 {
280 	size_t n = 0;
281 
282 	if (elf->is_32bit) {
283 		Elf32_Shdr *shdr = elf->shdr;
284 
285 		for (n = 0; n < elf->e_shnum; n++) {
286 			if (shdr[n].sh_type == SHT_DYNSYM) {
287 				e32_save_symtab(elf, n);
288 				break;
289 			}
290 		}
291 	} else {
292 		Elf64_Shdr *shdr = elf->shdr;
293 
294 		for (n = 0; n < elf->e_shnum; n++) {
295 			if (shdr[n].sh_type == SHT_DYNSYM) {
296 				e64_save_symtab(elf, n);
297 				break;
298 			}
299 		}
300 
301 	}
302 
303 	save_hashtab(elf);
304 }
305 
306 static void init_elf(struct ta_elf *elf)
307 {
308 	TEE_Result res = TEE_SUCCESS;
309 	vaddr_t va = 0;
310 	uint32_t flags = PTA_SYSTEM_MAP_FLAG_SHAREABLE;
311 
312 	res = sys_open_ta_bin(&elf->uuid, &elf->handle);
313 	if (res)
314 		err(res, "sys_open_ta_bin(%pUl)", (void *)&elf->uuid);
315 
316 	/*
317 	 * Map it read-only executable when we're loading a library where
318 	 * the ELF header is included in a load segment.
319 	 */
320 	if (!elf->is_main)
321 		flags |= PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
322 	res = sys_map_ta_bin(&va, SMALL_PAGE_SIZE, flags, elf->handle, 0, 0, 0);
323 	if (res)
324 		err(res, "sys_map_ta_bin");
325 	elf->ehdr_addr = va;
326 	if (!elf->is_main) {
327 		elf->load_addr = va;
328 		elf->max_addr = va + SMALL_PAGE_SIZE;
329 		elf->max_offs = SMALL_PAGE_SIZE;
330 	}
331 
332 	if (!IS_ELF(*(Elf32_Ehdr *)va))
333 		err(TEE_ERROR_BAD_FORMAT, "TA is not an ELF");
334 
335 	res = e32_parse_ehdr(elf, (void *)va);
336 	if (res == TEE_ERROR_BAD_FORMAT)
337 		res = e64_parse_ehdr(elf, (void *)va);
338 	if (res)
339 		err(res, "Cannot parse ELF");
340 
341 	if (elf->e_phoff + elf->e_phnum * elf->e_phentsize > SMALL_PAGE_SIZE)
342 		err(TEE_ERROR_NOT_SUPPORTED, "Cannot read program headers");
343 
344 	elf->phdr = (void *)(va + elf->e_phoff);
345 }
346 
347 static size_t roundup(size_t v)
348 {
349 	return ROUNDUP(v, SMALL_PAGE_SIZE);
350 }
351 
352 static size_t rounddown(size_t v)
353 {
354 	return ROUNDDOWN(v, SMALL_PAGE_SIZE);
355 }
356 
357 static void add_segment(struct ta_elf *elf, size_t offset, size_t vaddr,
358 			size_t filesz, size_t memsz, size_t flags, size_t align)
359 {
360 	struct segment *seg = calloc(1, sizeof(*seg));
361 
362 	if (!seg)
363 		err(TEE_ERROR_OUT_OF_MEMORY, "calloc");
364 
365 	seg->offset = offset;
366 	seg->vaddr = vaddr;
367 	seg->filesz = filesz;
368 	seg->memsz = memsz;
369 	seg->flags = flags;
370 	seg->align = align;
371 
372 	TAILQ_INSERT_TAIL(&elf->segs, seg, link);
373 }
374 
375 static void parse_load_segments(struct ta_elf *elf)
376 {
377 	size_t n = 0;
378 
379 	if (elf->is_32bit) {
380 		Elf32_Phdr *phdr = elf->phdr;
381 
382 		for (n = 0; n < elf->e_phnum; n++)
383 			if (phdr[n].p_type == PT_LOAD) {
384 				add_segment(elf, phdr[n].p_offset,
385 					    phdr[n].p_vaddr, phdr[n].p_filesz,
386 					    phdr[n].p_memsz, phdr[n].p_flags,
387 					    phdr[n].p_align);
388 			} else if (phdr[n].p_type == PT_ARM_EXIDX) {
389 				elf->exidx_start = phdr[n].p_vaddr;
390 				elf->exidx_size = phdr[n].p_filesz;
391 			}
392 	} else {
393 		Elf64_Phdr *phdr = elf->phdr;
394 
395 		for (n = 0; n < elf->e_phnum; n++)
396 			if (phdr[n].p_type == PT_LOAD)
397 				add_segment(elf, phdr[n].p_offset,
398 					    phdr[n].p_vaddr, phdr[n].p_filesz,
399 					    phdr[n].p_memsz, phdr[n].p_flags,
400 					    phdr[n].p_align);
401 	}
402 }
403 
404 static void copy_remapped_to(struct ta_elf *elf, const struct segment *seg)
405 {
406 	uint8_t *dst = (void *)(seg->vaddr + elf->load_addr);
407 	size_t n = 0;
408 	size_t offs = seg->offset;
409 	size_t num_bytes = seg->filesz;
410 
411 	if (offs < elf->max_offs) {
412 		n = MIN(elf->max_offs - offs, num_bytes);
413 		memcpy(dst, (void *)(elf->max_addr + offs - elf->max_offs), n);
414 		dst += n;
415 		offs += n;
416 		num_bytes -= n;
417 	}
418 
419 	if (num_bytes) {
420 		TEE_Result res = sys_copy_from_ta_bin(dst, num_bytes,
421 						      elf->handle, offs);
422 
423 		if (res)
424 			err(res, "sys_copy_from_ta_bin");
425 		elf->max_offs += offs;
426 	}
427 }
428 
429 static void adjust_segments(struct ta_elf *elf)
430 {
431 	struct segment *seg = NULL;
432 	struct segment *prev_seg = NULL;
433 	size_t prev_end_addr = 0;
434 	size_t align = 0;
435 	size_t mask = 0;
436 
437 	/* Sanity check */
438 	TAILQ_FOREACH(seg, &elf->segs, link) {
439 		size_t dummy __maybe_unused = 0;
440 
441 		assert(seg->align >= SMALL_PAGE_SIZE);
442 		assert(!ADD_OVERFLOW(seg->vaddr, seg->memsz, &dummy));
443 		assert(seg->filesz <= seg->memsz);
444 		assert((seg->offset & SMALL_PAGE_MASK) ==
445 		       (seg->vaddr & SMALL_PAGE_MASK));
446 
447 		prev_seg = TAILQ_PREV(seg, segment_head, link);
448 		if (prev_seg) {
449 			assert(seg->vaddr >= prev_seg->vaddr + prev_seg->memsz);
450 			assert(seg->offset >=
451 			       prev_seg->offset + prev_seg->filesz);
452 		}
453 		if (!align)
454 			align = seg->align;
455 		assert(align == seg->align);
456 	}
457 
458 	mask = align - 1;
459 
460 	seg = TAILQ_FIRST(&elf->segs);
461 	if (seg)
462 		seg = TAILQ_NEXT(seg, link);
463 	while (seg) {
464 		prev_seg = TAILQ_PREV(seg, segment_head, link);
465 		prev_end_addr = prev_seg->vaddr + prev_seg->memsz;
466 
467 		/*
468 		 * This segment may overlap with the last "page" in the
469 		 * previous segment in two different ways:
470 		 * 1. Virtual address (and offset) overlaps =>
471 		 *    Permissions needs to be merged. The offset must have
472 		 *    the SMALL_PAGE_MASK bits set as vaddr and offset must
473 		 *    add up with prevsion segment.
474 		 *
475 		 * 2. Only offset overlaps =>
476 		 *    The same page in the ELF is mapped at two different
477 		 *    virtual addresses. As a limitation this segment must
478 		 *    be mapped as writeable.
479 		 */
480 
481 		/* Case 1. */
482 		if (rounddown(seg->vaddr) < prev_end_addr) {
483 			assert((seg->vaddr & mask) == (seg->offset & mask));
484 			assert(prev_seg->memsz == prev_seg->filesz);
485 
486 			/*
487 			 * Merge the segments and their permissions.
488 			 * Note that the may be a small hole between the
489 			 * two sections.
490 			 */
491 			prev_seg->filesz = seg->vaddr + seg->filesz -
492 					   prev_seg->vaddr;
493 			prev_seg->memsz = seg->vaddr + seg->memsz -
494 					   prev_seg->vaddr;
495 			prev_seg->flags |= seg->flags;
496 
497 			TAILQ_REMOVE(&elf->segs, seg, link);
498 			free(seg);
499 			seg = TAILQ_NEXT(prev_seg, link);
500 			continue;
501 		}
502 
503 		/* Case 2. */
504 		if ((seg->offset & mask) &&
505 		    rounddown(seg->offset) <
506 		    (prev_seg->offset + prev_seg->filesz)) {
507 
508 			assert(seg->flags & PF_W);
509 			seg->remapped_writeable = true;
510 		}
511 
512 		/*
513 		 * No overlap, but we may need to align address, offset and
514 		 * size.
515 		 */
516 		seg->filesz += seg->vaddr - rounddown(seg->vaddr);
517 		seg->memsz += seg->vaddr - rounddown(seg->vaddr);
518 		seg->vaddr = rounddown(seg->vaddr);
519 		seg->offset = rounddown(seg->offset);
520 		seg = TAILQ_NEXT(seg, link);
521 	}
522 
523 }
524 
525 static void populate_segments_legacy(struct ta_elf *elf)
526 {
527 	TEE_Result res = TEE_SUCCESS;
528 	struct segment *seg = NULL;
529 	vaddr_t va = 0;
530 
531 	assert(elf->is_legacy);
532 	TAILQ_FOREACH(seg, &elf->segs, link) {
533 		struct segment *last_seg = TAILQ_LAST(&elf->segs, segment_head);
534 		size_t pad_end = roundup(last_seg->vaddr + last_seg->memsz -
535 					 seg->vaddr - seg->memsz);
536 		size_t num_bytes = roundup(seg->memsz);
537 
538 		if (!elf->load_addr)
539 			va = 0;
540 		else
541 			va = seg->vaddr + elf->load_addr;
542 
543 
544 		if (!(seg->flags & PF_R))
545 			err(TEE_ERROR_NOT_SUPPORTED,
546 			    "Segment must be readable");
547 
548 		res = sys_map_zi(num_bytes, 0, &va, 0, pad_end);
549 		if (res)
550 			err(res, "sys_map_zi");
551 		res = sys_copy_from_ta_bin((void *)va, seg->filesz,
552 					   elf->handle, seg->offset);
553 		if (res)
554 			err(res, "sys_copy_from_ta_bin");
555 
556 		if (!elf->load_addr)
557 			elf->load_addr = va;
558 		elf->max_addr = va + num_bytes;
559 		elf->max_offs = seg->offset + seg->filesz;
560 	}
561 }
562 
563 static size_t get_pad_begin(void)
564 {
565 #ifdef CFG_TA_ASLR
566 	size_t min = CFG_TA_ASLR_MIN_OFFSET_PAGES;
567 	size_t max = CFG_TA_ASLR_MAX_OFFSET_PAGES;
568 	TEE_Result res = TEE_SUCCESS;
569 	uint32_t rnd32 = 0;
570 	size_t rnd = 0;
571 
572 	COMPILE_TIME_ASSERT(CFG_TA_ASLR_MIN_OFFSET_PAGES <
573 			    CFG_TA_ASLR_MAX_OFFSET_PAGES);
574 	if (max > min) {
575 		res = utee_cryp_random_number_generate(&rnd32, sizeof(rnd32));
576 		if (res) {
577 			DMSG("Random read failed: %#"PRIx32, res);
578 			return min * SMALL_PAGE_SIZE;
579 		}
580 		rnd = rnd32 % (max - min);
581 	}
582 
583 	return (min + rnd) * SMALL_PAGE_SIZE;
584 #else /*!CFG_TA_ASLR*/
585 	return 0;
586 #endif /*!CFG_TA_ASLR*/
587 }
588 
589 static void populate_segments(struct ta_elf *elf)
590 {
591 	TEE_Result res = TEE_SUCCESS;
592 	struct segment *seg = NULL;
593 	vaddr_t va = 0;
594 	size_t pad_begin = 0;
595 
596 	assert(!elf->is_legacy);
597 	TAILQ_FOREACH(seg, &elf->segs, link) {
598 		struct segment *last_seg = TAILQ_LAST(&elf->segs, segment_head);
599 		size_t pad_end = roundup(last_seg->vaddr + last_seg->memsz -
600 					 seg->vaddr - seg->memsz);
601 
602 		if (seg->remapped_writeable) {
603 			size_t num_bytes = roundup(seg->vaddr + seg->memsz) -
604 					   rounddown(seg->vaddr);
605 
606 			assert(elf->load_addr);
607 			va = rounddown(elf->load_addr + seg->vaddr);
608 			assert(va >= elf->max_addr);
609 			res = sys_map_zi(num_bytes, 0, &va, 0, pad_end);
610 			if (res)
611 				err(res, "sys_map_zi");
612 
613 			copy_remapped_to(elf, seg);
614 			elf->max_addr = va + num_bytes;
615 		} else {
616 			uint32_t flags =  0;
617 			size_t filesz = seg->filesz;
618 			size_t memsz = seg->memsz;
619 			size_t offset = seg->offset;
620 			size_t vaddr = seg->vaddr;
621 
622 			if (offset < elf->max_offs) {
623 				/*
624 				 * We're in a load segment which overlaps
625 				 * with (or is covered by) the first page
626 				 * of a shared library.
627 				 */
628 				if (vaddr + filesz < SMALL_PAGE_SIZE) {
629 					size_t num_bytes = 0;
630 
631 					/*
632 					 * If this segment is completely
633 					 * covered, take next.
634 					 */
635 					if (vaddr + memsz <= SMALL_PAGE_SIZE)
636 						continue;
637 
638 					/*
639 					 * All data of the segment is
640 					 * loaded, but we need to zero
641 					 * extend it.
642 					 */
643 					va = elf->max_addr;
644 					num_bytes = roundup(vaddr + memsz) -
645 						    roundup(vaddr) -
646 						    SMALL_PAGE_SIZE;
647 					assert(num_bytes);
648 					res = sys_map_zi(num_bytes, 0, &va, 0,
649 							 0);
650 					if (res)
651 						err(res, "sys_map_zi");
652 					elf->max_addr = roundup(va + num_bytes);
653 					continue;
654 				}
655 
656 				/* Partial overlap, remove the first page. */
657 				vaddr += SMALL_PAGE_SIZE;
658 				filesz -= SMALL_PAGE_SIZE;
659 				memsz -= SMALL_PAGE_SIZE;
660 				offset += SMALL_PAGE_SIZE;
661 			}
662 
663 			if (!elf->load_addr) {
664 				va = 0;
665 				pad_begin = get_pad_begin();
666 				/*
667 				 * If mapping with pad_begin fails we'll
668 				 * retry without pad_begin, effectively
669 				 * disabling ASLR for the current ELF file.
670 				 */
671 			} else {
672 				va = vaddr + elf->load_addr;
673 				pad_begin = 0;
674 			}
675 
676 			if (seg->flags & PF_W)
677 				flags |= PTA_SYSTEM_MAP_FLAG_WRITEABLE;
678 			else
679 				flags |= PTA_SYSTEM_MAP_FLAG_SHAREABLE;
680 			if (seg->flags & PF_X)
681 				flags |= PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
682 			if (!(seg->flags & PF_R))
683 				err(TEE_ERROR_NOT_SUPPORTED,
684 				    "Segment must be readable");
685 			if (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE) {
686 				res = sys_map_zi(memsz, 0, &va, pad_begin,
687 						 pad_end);
688 				if (pad_begin && res == TEE_ERROR_OUT_OF_MEMORY)
689 					res = sys_map_zi(memsz, 0, &va, 0,
690 							 pad_end);
691 				if (res)
692 					err(res, "sys_map_zi");
693 				res = sys_copy_from_ta_bin((void *)va, filesz,
694 							   elf->handle, offset);
695 				if (res)
696 					err(res, "sys_copy_from_ta_bin");
697 			} else {
698 				res = sys_map_ta_bin(&va, filesz, flags,
699 						     elf->handle, offset,
700 						     pad_begin, pad_end);
701 				if (pad_begin && res == TEE_ERROR_OUT_OF_MEMORY)
702 					res = sys_map_ta_bin(&va, filesz, flags,
703 							     elf->handle,
704 							     offset, 0,
705 							     pad_end);
706 				if (res)
707 					err(res, "sys_map_ta_bin");
708 			}
709 
710 			if (!elf->load_addr)
711 				elf->load_addr = va;
712 			elf->max_addr = roundup(va + filesz);
713 			elf->max_offs += filesz;
714 		}
715 	}
716 }
717 
718 static void map_segments(struct ta_elf *elf)
719 {
720 	TEE_Result res = TEE_SUCCESS;
721 
722 	parse_load_segments(elf);
723 	adjust_segments(elf);
724 	if (TAILQ_FIRST(&elf->segs)->offset < SMALL_PAGE_SIZE) {
725 		vaddr_t va = 0;
726 		size_t sz = elf->max_addr - elf->load_addr;
727 		struct segment *seg = TAILQ_LAST(&elf->segs, segment_head);
728 		size_t pad_begin = get_pad_begin();
729 
730 		/*
731 		 * We're loading a library, if not other parts of the code
732 		 * need to be updated too.
733 		 */
734 		assert(!elf->is_main);
735 
736 		/*
737 		 * Now that we know how much virtual memory is needed move
738 		 * the already mapped part to a location which can
739 		 * accommodate us.
740 		 */
741 		res = sys_remap(elf->load_addr, &va, sz, pad_begin,
742 				roundup(seg->vaddr + seg->memsz));
743 		if (res == TEE_ERROR_OUT_OF_MEMORY)
744 			res = sys_remap(elf->load_addr, &va, sz, 0,
745 					roundup(seg->vaddr + seg->memsz));
746 		if (res)
747 			err(res, "sys_remap");
748 		elf->ehdr_addr = va;
749 		elf->load_addr = va;
750 		elf->max_addr = va + sz;
751 		elf->phdr = (void *)(va + elf->e_phoff);
752 	}
753 }
754 
755 static void add_deps_from_segment(struct ta_elf *elf, unsigned int type,
756 				  vaddr_t addr, size_t memsz)
757 {
758 	size_t dyn_entsize = 0;
759 	size_t num_dyns = 0;
760 	size_t n = 0;
761 	unsigned int tag = 0;
762 	size_t val = 0;
763 	TEE_UUID uuid = { };
764 	char *str_tab = NULL;
765 
766 	if (type != PT_DYNAMIC)
767 		return;
768 
769 	check_phdr_in_range(elf, type, addr, memsz);
770 
771 	if (elf->is_32bit)
772 		dyn_entsize = sizeof(Elf32_Dyn);
773 	else
774 		dyn_entsize = sizeof(Elf64_Dyn);
775 
776 	assert(!(memsz % dyn_entsize));
777 	num_dyns = memsz / dyn_entsize;
778 
779 	for (n = 0; n < num_dyns; n++) {
780 		read_dyn(elf, addr, n, &tag, &val);
781 		if (tag == DT_STRTAB) {
782 			str_tab = (char *)(val + elf->load_addr);
783 			break;
784 		}
785 	}
786 
787 	for (n = 0; n < num_dyns; n++) {
788 		read_dyn(elf, addr, n, &tag, &val);
789 		if (tag != DT_NEEDED)
790 			continue;
791 		tee_uuid_from_str(&uuid, str_tab + val);
792 		queue_elf(&uuid);
793 	}
794 }
795 
796 static void add_dependencies(struct ta_elf *elf)
797 {
798 	size_t n = 0;
799 
800 	if (elf->is_32bit) {
801 		Elf32_Phdr *phdr = elf->phdr;
802 
803 		for (n = 0; n < elf->e_phnum; n++)
804 			add_deps_from_segment(elf, phdr[n].p_type,
805 					      phdr[n].p_vaddr, phdr[n].p_memsz);
806 	} else {
807 		Elf64_Phdr *phdr = elf->phdr;
808 
809 		for (n = 0; n < elf->e_phnum; n++)
810 			add_deps_from_segment(elf, phdr[n].p_type,
811 					      phdr[n].p_vaddr, phdr[n].p_memsz);
812 	}
813 }
814 
815 static void copy_section_headers(struct ta_elf *elf)
816 {
817 	TEE_Result res = TEE_SUCCESS;
818 	size_t sz = elf->e_shnum * elf->e_shentsize;
819 	size_t offs = 0;
820 
821 	elf->shdr = malloc(sz);
822 	if (!elf->shdr)
823 		err(TEE_ERROR_OUT_OF_MEMORY, "malloc");
824 
825 	/*
826 	 * We're assuming that section headers comes after the load segments,
827 	 * but if it's a very small dynamically linked library the section
828 	 * headers can still end up (partially?) in the first mapped page.
829 	 */
830 	if (elf->e_shoff < SMALL_PAGE_SIZE) {
831 		assert(!elf->is_main);
832 		offs = MIN(SMALL_PAGE_SIZE - elf->e_shoff, sz);
833 		memcpy(elf->shdr, (void *)(elf->load_addr + elf->e_shoff),
834 		       offs);
835 	}
836 
837 	if (offs < sz) {
838 		res = sys_copy_from_ta_bin((uint8_t *)elf->shdr + offs,
839 					   sz - offs, elf->handle,
840 					   elf->e_shoff + offs);
841 		if (res)
842 			err(res, "sys_copy_from_ta_bin");
843 	}
844 }
845 
846 static void close_handle(struct ta_elf *elf)
847 {
848 	TEE_Result res = sys_close_ta_bin(elf->handle);
849 
850 	if (res)
851 		err(res, "sys_close_ta_bin");
852 	elf->handle = -1;
853 }
854 
855 static void clean_elf_load_main(struct ta_elf *elf)
856 {
857 	TEE_Result res = TEE_SUCCESS;
858 
859 	/*
860 	 * Clean up from last attempt to load
861 	 */
862 	res = sys_unmap(elf->ehdr_addr, SMALL_PAGE_SIZE);
863 	if (res)
864 		err(res, "sys_unmap");
865 
866 	while (!TAILQ_EMPTY(&elf->segs)) {
867 		struct segment *seg = TAILQ_FIRST(&elf->segs);
868 		vaddr_t va = 0;
869 		size_t num_bytes = 0;
870 
871 		va = rounddown(elf->load_addr + seg->vaddr);
872 		if (seg->remapped_writeable)
873 			num_bytes = roundup(seg->vaddr + seg->memsz) -
874 				    rounddown(seg->vaddr);
875 		else
876 			num_bytes = seg->memsz;
877 
878 		res = sys_unmap(va, num_bytes);
879 		if (res)
880 			err(res, "sys_unmap");
881 
882 		TAILQ_REMOVE(&elf->segs, seg, link);
883 		free(seg);
884 	}
885 
886 	free(elf->shdr);
887 	memset(&elf->is_32bit, 0,
888 	       (vaddr_t)&elf->uuid - (vaddr_t)&elf->is_32bit);
889 
890 	TAILQ_INIT(&elf->segs);
891 }
892 
893 static void load_main(struct ta_elf *elf)
894 {
895 	init_elf(elf);
896 	map_segments(elf);
897 	populate_segments(elf);
898 	add_dependencies(elf);
899 	copy_section_headers(elf);
900 	save_symtab(elf);
901 	close_handle(elf);
902 
903 	elf->head = (struct ta_head *)elf->load_addr;
904 	if (elf->head->depr_entry != UINT64_MAX) {
905 		/*
906 		 * Legacy TAs sets their entry point in ta_head. For
907 		 * non-legacy TAs the entry point of the ELF is set instead
908 		 * and leaving the ta_head entry point set to UINT64_MAX to
909 		 * indicate that it's not used.
910 		 *
911 		 * NB, everything before the commit a73b5878c89d ("Replace
912 		 * ta_head.entry with elf entry") is considered legacy TAs
913 		 * for ldelf.
914 		 *
915 		 * Legacy TAs cannot be mapped with shared memory segments
916 		 * so restart the mapping if it turned out we're loading a
917 		 * legacy TA.
918 		 */
919 
920 		DMSG("Reloading TA %pUl as legacy TA", (void *)&elf->uuid);
921 		clean_elf_load_main(elf);
922 		elf->is_legacy = true;
923 		init_elf(elf);
924 		map_segments(elf);
925 		populate_segments_legacy(elf);
926 		add_dependencies(elf);
927 		copy_section_headers(elf);
928 		save_symtab(elf);
929 		close_handle(elf);
930 		elf->head = (struct ta_head *)elf->load_addr;
931 		/*
932 		 * Check that the TA is still a legacy TA, if it isn't give
933 		 * up now since we're likely under attack.
934 		 */
935 		if (elf->head->depr_entry == UINT64_MAX)
936 			err(TEE_ERROR_GENERIC,
937 			    "TA %pUl was changed on disk to non-legacy",
938 			    (void *)&elf->uuid);
939 	}
940 
941 }
942 
943 void ta_elf_load_main(const TEE_UUID *uuid, uint32_t *is_32bit, uint64_t *sp,
944 		      uint32_t *ta_flags)
945 {
946 	struct ta_elf *elf = queue_elf(uuid);
947 	vaddr_t va = 0;
948 	TEE_Result res = TEE_SUCCESS;
949 
950 	assert(elf);
951 	elf->is_main = true;
952 
953 	load_main(elf);
954 
955 	*is_32bit = elf->is_32bit;
956 	res = sys_map_zi(elf->head->stack_size, 0, &va, 0, 0);
957 	if (res)
958 		err(res, "sys_map_zi stack");
959 
960 	if (elf->head->flags & ~TA_FLAGS_MASK)
961 		err(TEE_ERROR_BAD_FORMAT, "Invalid TA flags(s) %#"PRIx32,
962 		    elf->head->flags & ~TA_FLAGS_MASK);
963 
964 	*ta_flags = elf->head->flags;
965 	*sp = va + elf->head->stack_size;
966 	ta_stack = va;
967 	ta_stack_size = elf->head->stack_size;
968 }
969 
970 void ta_elf_finalize_load_main(uint64_t *entry)
971 {
972 	struct ta_elf *elf = TAILQ_FIRST(&main_elf_queue);
973 	TEE_Result res = TEE_SUCCESS;
974 
975 	assert(elf->is_main);
976 
977 	res = ta_elf_set_init_fini_info(elf->is_32bit);
978 	if (res)
979 		err(res, "ta_elf_set_init_fini_info");
980 
981 	if (elf->is_legacy)
982 		*entry = elf->head->depr_entry;
983 	else
984 		*entry = elf->e_entry + elf->load_addr;
985 }
986 
987 
988 void ta_elf_load_dependency(struct ta_elf *elf, bool is_32bit)
989 {
990 	if (elf->is_main)
991 		return;
992 
993 	init_elf(elf);
994 	if (elf->is_32bit != is_32bit)
995 		err(TEE_ERROR_BAD_FORMAT, "ELF %pUl is %sbit (expected %sbit)",
996 		    (void *)&elf->uuid, elf->is_32bit ? "32" : "64",
997 		    is_32bit ? "32" : "64");
998 
999 	map_segments(elf);
1000 	populate_segments(elf);
1001 	add_dependencies(elf);
1002 	copy_section_headers(elf);
1003 	save_symtab(elf);
1004 	close_handle(elf);
1005 }
1006 
1007 void ta_elf_finalize_mappings(struct ta_elf *elf)
1008 {
1009 	TEE_Result res = TEE_SUCCESS;
1010 	struct segment *seg = NULL;
1011 
1012 	if (!elf->is_legacy)
1013 		return;
1014 
1015 	TAILQ_FOREACH(seg, &elf->segs, link) {
1016 		vaddr_t va = elf->load_addr + seg->vaddr;
1017 		uint32_t flags =  0;
1018 
1019 		if (seg->flags & PF_W)
1020 			flags |= PTA_SYSTEM_MAP_FLAG_WRITEABLE;
1021 		if (seg->flags & PF_X)
1022 			flags |= PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
1023 
1024 		res = sys_set_prot(va, seg->memsz, flags);
1025 		if (res)
1026 			err(res, "sys_set_prot");
1027 	}
1028 }
1029 
1030 static void __printf(3, 4) print_wrapper(void *pctx, print_func_t print_func,
1031 					 const char *fmt, ...)
1032 {
1033 	va_list ap;
1034 
1035 	va_start(ap, fmt);
1036 	print_func(pctx, fmt, ap);
1037 	va_end(ap);
1038 }
1039 
1040 static void print_seg(void *pctx, print_func_t print_func,
1041 		      size_t idx __maybe_unused, int elf_idx __maybe_unused,
1042 		      vaddr_t va __maybe_unused, paddr_t pa __maybe_unused,
1043 		      size_t sz __maybe_unused, uint32_t flags)
1044 {
1045 	int width __maybe_unused = 8;
1046 	char desc[14] __maybe_unused = "";
1047 	char flags_str[] __maybe_unused = "----";
1048 
1049 	if (elf_idx > -1) {
1050 		snprintf(desc, sizeof(desc), " [%d]", elf_idx);
1051 	} else {
1052 		if (flags & DUMP_MAP_EPHEM)
1053 			snprintf(desc, sizeof(desc), " (param)");
1054 		if (flags & DUMP_MAP_LDELF)
1055 			snprintf(desc, sizeof(desc), " (ldelf)");
1056 		if (va == ta_stack)
1057 			snprintf(desc, sizeof(desc), " (stack)");
1058 	}
1059 
1060 	if (flags & DUMP_MAP_READ)
1061 		flags_str[0] = 'r';
1062 	if (flags & DUMP_MAP_WRITE)
1063 		flags_str[1] = 'w';
1064 	if (flags & DUMP_MAP_EXEC)
1065 		flags_str[2] = 'x';
1066 	if (flags & DUMP_MAP_SECURE)
1067 		flags_str[3] = 's';
1068 
1069 	print_wrapper(pctx, print_func,
1070 		      "region %2zu: va 0x%0*"PRIxVA" pa 0x%0*"PRIxPA" size 0x%06zx flags %s%s\n",
1071 		      idx, width, va, width, pa, sz, flags_str, desc);
1072 }
1073 
1074 static bool get_next_in_order(struct ta_elf_queue *elf_queue,
1075 			      struct ta_elf **elf, struct segment **seg,
1076 			      size_t *elf_idx)
1077 {
1078 	struct ta_elf *e = NULL;
1079 	struct segment *s = NULL;
1080 	size_t idx = 0;
1081 	vaddr_t va = 0;
1082 	struct ta_elf *e2 = NULL;
1083 	size_t i2 = 0;
1084 
1085 	assert(elf && seg && elf_idx);
1086 	e = *elf;
1087 	s = *seg;
1088 	assert((e == NULL && s == NULL) || (e != NULL && s != NULL));
1089 
1090 	if (s) {
1091 		s = TAILQ_NEXT(s, link);
1092 		if (s) {
1093 			*seg = s;
1094 			return true;
1095 		}
1096 	}
1097 
1098 	if (e)
1099 		va = e->load_addr;
1100 
1101 	/* Find the ELF with next load address */
1102 	e = NULL;
1103 	TAILQ_FOREACH(e2, elf_queue, link) {
1104 		if (e2->load_addr > va) {
1105 			if (!e || e2->load_addr < e->load_addr) {
1106 				e = e2;
1107 				idx = i2;
1108 			}
1109 		}
1110 		i2++;
1111 	}
1112 	if (!e)
1113 		return false;
1114 
1115 	*elf = e;
1116 	*seg = TAILQ_FIRST(&e->segs);
1117 	*elf_idx = idx;
1118 	return true;
1119 }
1120 
1121 void ta_elf_print_mappings(void *pctx, print_func_t print_func,
1122 			   struct ta_elf_queue *elf_queue, size_t num_maps,
1123 			   struct dump_map *maps, vaddr_t mpool_base)
1124 {
1125 	struct segment *seg = NULL;
1126 	struct ta_elf *elf = NULL;
1127 	size_t elf_idx = 0;
1128 	size_t idx = 0;
1129 	size_t map_idx = 0;
1130 
1131 	/*
1132 	 * Loop over all segments and maps, printing virtual address in
1133 	 * order. Segment has priority if the virtual address is present
1134 	 * in both map and segment.
1135 	 */
1136 	get_next_in_order(elf_queue, &elf, &seg, &elf_idx);
1137 	while (true) {
1138 		vaddr_t va = -1;
1139 		size_t sz = 0;
1140 		uint32_t flags = DUMP_MAP_SECURE;
1141 		size_t offs = 0;
1142 
1143 		if (seg) {
1144 			va = rounddown(seg->vaddr + elf->load_addr);
1145 			sz = roundup(seg->vaddr + seg->memsz) -
1146 				     rounddown(seg->vaddr);
1147 		}
1148 
1149 		while (map_idx < num_maps && maps[map_idx].va <= va) {
1150 			uint32_t f = 0;
1151 
1152 			/* If there's a match, it should be the same map */
1153 			if (maps[map_idx].va == va) {
1154 				/*
1155 				 * In shared libraries the first page is
1156 				 * mapped separately with the rest of that
1157 				 * segment following back to back in a
1158 				 * separate entry.
1159 				 */
1160 				if (map_idx + 1 < num_maps &&
1161 				    maps[map_idx].sz == SMALL_PAGE_SIZE) {
1162 					vaddr_t next_va = maps[map_idx].va +
1163 							  maps[map_idx].sz;
1164 					size_t comb_sz = maps[map_idx].sz +
1165 							 maps[map_idx + 1].sz;
1166 
1167 					if (next_va == maps[map_idx + 1].va &&
1168 					    comb_sz == sz &&
1169 					    maps[map_idx].flags ==
1170 					    maps[map_idx + 1].flags) {
1171 						/* Skip this and next entry */
1172 						map_idx += 2;
1173 						continue;
1174 					}
1175 				}
1176 				assert(maps[map_idx].sz == sz);
1177 			} else if (maps[map_idx].va < va) {
1178 				if (maps[map_idx].va == mpool_base)
1179 					f |= DUMP_MAP_LDELF;
1180 				print_seg(pctx, print_func, idx, -1,
1181 					  maps[map_idx].va, maps[map_idx].pa,
1182 					  maps[map_idx].sz,
1183 					  maps[map_idx].flags | f);
1184 				idx++;
1185 			}
1186 			map_idx++;
1187 		}
1188 
1189 		if (!seg)
1190 			break;
1191 
1192 		offs = rounddown(seg->offset);
1193 		if (seg->flags & PF_R)
1194 			flags |= DUMP_MAP_READ;
1195 		if (seg->flags & PF_W)
1196 			flags |= DUMP_MAP_WRITE;
1197 		if (seg->flags & PF_X)
1198 			flags |= DUMP_MAP_EXEC;
1199 
1200 		print_seg(pctx, print_func, idx, elf_idx, va, offs, sz, flags);
1201 		idx++;
1202 
1203 		if (!get_next_in_order(elf_queue, &elf, &seg, &elf_idx))
1204 			seg = NULL;
1205 	}
1206 
1207 	elf_idx = 0;
1208 	TAILQ_FOREACH(elf, elf_queue, link) {
1209 		print_wrapper(pctx, print_func,
1210 			      " [%zu] %pUl @ 0x%0*"PRIxVA"\n",
1211 			      elf_idx, (void *)&elf->uuid, 8, elf->load_addr);
1212 		elf_idx++;
1213 	}
1214 }
1215 
1216 #ifdef CFG_UNWIND
1217 void ta_elf_stack_trace_a32(uint32_t regs[16])
1218 {
1219 	struct unwind_state_arm32 state = { };
1220 
1221 	memcpy(state.registers, regs, sizeof(state.registers));
1222 	print_stack_arm32(&state, ta_stack, ta_stack_size);
1223 }
1224 
1225 void ta_elf_stack_trace_a64(uint64_t fp, uint64_t sp, uint64_t pc)
1226 {
1227 	struct unwind_state_arm64 state = { .fp = fp, .sp = sp, .pc = pc };
1228 
1229 	print_stack_arm64(&state, ta_stack, ta_stack_size);
1230 }
1231 #endif
1232 
1233 TEE_Result ta_elf_add_library(const TEE_UUID *uuid)
1234 {
1235 	struct ta_elf *ta = TAILQ_FIRST(&main_elf_queue);
1236 	struct ta_elf *lib = ta_elf_find_elf(uuid);
1237 	struct ta_elf *elf = NULL;
1238 
1239 	if (lib)
1240 		return TEE_SUCCESS; /* Already mapped */
1241 
1242 	lib = queue_elf_helper(uuid);
1243 	if (!lib)
1244 		return TEE_ERROR_OUT_OF_MEMORY;
1245 
1246 	for (elf = lib; elf; elf = TAILQ_NEXT(elf, link))
1247 		ta_elf_load_dependency(elf, ta->is_32bit);
1248 
1249 	for (elf = lib; elf; elf = TAILQ_NEXT(elf, link)) {
1250 		ta_elf_relocate(elf);
1251 		ta_elf_finalize_mappings(elf);
1252 	}
1253 
1254 	for (elf = lib; elf; elf = TAILQ_NEXT(elf, link))
1255 		DMSG("ELF (%pUl) at %#"PRIxVA,
1256 		     (void *)&elf->uuid, elf->load_addr);
1257 
1258 	return ta_elf_set_init_fini_info(ta->is_32bit);
1259 }
1260 
1261 /* Get address/size of .init_array and .fini_array from the dynamic segment */
1262 static void get_init_fini_array(struct ta_elf *elf, unsigned int type,
1263 				vaddr_t addr, size_t memsz, vaddr_t *init,
1264 				size_t *init_cnt, vaddr_t *fini,
1265 				size_t *fini_cnt)
1266 {
1267 	size_t addrsz = 0;
1268 	size_t dyn_entsize = 0;
1269 	size_t num_dyns = 0;
1270 	size_t n = 0;
1271 	unsigned int tag = 0;
1272 	size_t val = 0;
1273 
1274 	assert(type == PT_DYNAMIC);
1275 
1276 	check_phdr_in_range(elf, type, addr, memsz);
1277 
1278 	if (elf->is_32bit) {
1279 		dyn_entsize = sizeof(Elf32_Dyn);
1280 		addrsz = 4;
1281 	} else {
1282 		dyn_entsize = sizeof(Elf64_Dyn);
1283 		addrsz = 8;
1284 	}
1285 
1286 	assert(!(memsz % dyn_entsize));
1287 	num_dyns = memsz / dyn_entsize;
1288 
1289 	for (n = 0; n < num_dyns; n++) {
1290 		read_dyn(elf, addr, n, &tag, &val);
1291 		if (tag == DT_INIT_ARRAY)
1292 			*init = val + elf->load_addr;
1293 		else if (tag == DT_FINI_ARRAY)
1294 			*fini = val + elf->load_addr;
1295 		else if (tag == DT_INIT_ARRAYSZ)
1296 			*init_cnt = val / addrsz;
1297 		else if (tag == DT_FINI_ARRAYSZ)
1298 			*fini_cnt = val / addrsz;
1299 	}
1300 }
1301 
1302 /* Get address/size of .init_array and .fini_array in @elf (if present) */
1303 static void elf_get_init_fini_array(struct ta_elf *elf, vaddr_t *init,
1304 				    size_t *init_cnt, vaddr_t *fini,
1305 				    size_t *fini_cnt)
1306 {
1307 	size_t n = 0;
1308 
1309 	if (elf->is_32bit) {
1310 		Elf32_Phdr *phdr = elf->phdr;
1311 
1312 		for (n = 0; n < elf->e_phnum; n++) {
1313 			if (phdr[n].p_type == PT_DYNAMIC) {
1314 				get_init_fini_array(elf, phdr[n].p_type,
1315 						    phdr[n].p_vaddr,
1316 						    phdr[n].p_memsz,
1317 						    init, init_cnt, fini,
1318 						    fini_cnt);
1319 				return;
1320 			}
1321 		}
1322 	} else {
1323 		Elf64_Phdr *phdr = elf->phdr;
1324 
1325 		for (n = 0; n < elf->e_phnum; n++) {
1326 			if (phdr[n].p_type == PT_DYNAMIC) {
1327 				get_init_fini_array(elf, phdr[n].p_type,
1328 						    phdr[n].p_vaddr,
1329 						    phdr[n].p_memsz,
1330 						    init, init_cnt, fini,
1331 						    fini_cnt);
1332 				return;
1333 			}
1334 		}
1335 	}
1336 }
1337 
1338 static TEE_Result realloc_ifs(vaddr_t va, size_t cnt, bool is_32bit)
1339 {
1340 	struct __init_fini_info32 *info32 = (struct __init_fini_info32 *)va;
1341 	struct __init_fini_info *info = (struct __init_fini_info *)va;
1342 	struct __init_fini32 *ifs32 = NULL;
1343 	struct __init_fini *ifs = NULL;
1344 	size_t prev_cnt = 0;
1345 	void *ptr = NULL;
1346 
1347 	if (is_32bit) {
1348 		ptr = (void *)(vaddr_t)info32->ifs;
1349 		ptr = realloc(ptr, cnt * sizeof(struct __init_fini32));
1350 		if (!ptr)
1351 			return TEE_ERROR_OUT_OF_MEMORY;
1352 		ifs32 = ptr;
1353 		prev_cnt = info32->size;
1354 		if (cnt > prev_cnt)
1355 			memset(ifs32 + prev_cnt, 0,
1356 			       (cnt - prev_cnt) * sizeof(*ifs32));
1357 		info32->ifs = (uint32_t)(vaddr_t)ifs32;
1358 		info32->size = cnt;
1359 	} else {
1360 		ptr = realloc(info->ifs, cnt * sizeof(struct __init_fini));
1361 		if (!ptr)
1362 			return TEE_ERROR_OUT_OF_MEMORY;
1363 		ifs = ptr;
1364 		prev_cnt = info->size;
1365 		if (cnt > prev_cnt)
1366 			memset(ifs + prev_cnt, 0,
1367 			       (cnt - prev_cnt) * sizeof(*ifs));
1368 		info->ifs = ifs;
1369 		info->size = cnt;
1370 	}
1371 
1372 	return TEE_SUCCESS;
1373 }
1374 
1375 static void fill_ifs(vaddr_t va, size_t idx, struct ta_elf *elf, bool is_32bit)
1376 {
1377 	struct __init_fini_info32 *info32 = (struct __init_fini_info32 *)va;
1378 	struct __init_fini_info *info = (struct __init_fini_info *)va;
1379 	struct __init_fini32 *ifs32 = NULL;
1380 	struct __init_fini *ifs = NULL;
1381 	size_t init_cnt = 0;
1382 	size_t fini_cnt = 0;
1383 	vaddr_t init = 0;
1384 	vaddr_t fini = 0;
1385 
1386 	if (is_32bit) {
1387 		assert(idx < info32->size);
1388 		ifs32 = &((struct __init_fini32 *)(vaddr_t)info32->ifs)[idx];
1389 
1390 		if (ifs32->flags & __IFS_VALID)
1391 			return;
1392 
1393 		elf_get_init_fini_array(elf, &init, &init_cnt, &fini,
1394 					&fini_cnt);
1395 
1396 		ifs32->init = (uint32_t)init;
1397 		ifs32->init_size = init_cnt;
1398 
1399 		ifs32->fini = (uint32_t)fini;
1400 		ifs32->fini_size = fini_cnt;
1401 
1402 		ifs32->flags |= __IFS_VALID;
1403 	} else {
1404 		assert(idx < info->size);
1405 		ifs = &info->ifs[idx];
1406 
1407 		if (ifs->flags & __IFS_VALID)
1408 			return;
1409 
1410 		elf_get_init_fini_array(elf, &init, &init_cnt, &fini,
1411 					&fini_cnt);
1412 
1413 		ifs->init = (void (**)(void))init;
1414 		ifs->init_size = init_cnt;
1415 
1416 		ifs->fini = (void (**)(void))fini;
1417 		ifs->fini_size = fini_cnt;
1418 
1419 		ifs->flags |= __IFS_VALID;
1420 	}
1421 }
1422 
1423 /*
1424  * Set or update __init_fini_info in the TA with information from the ELF
1425  * queue
1426  */
1427 TEE_Result ta_elf_set_init_fini_info(bool is_32bit)
1428 {
1429 	struct __init_fini_info *info = NULL;
1430 	TEE_Result res = TEE_SUCCESS;
1431 	struct ta_elf *elf = NULL;
1432 	vaddr_t info_va = 0;
1433 	size_t cnt = 0;
1434 
1435 	res = ta_elf_resolve_sym("__init_fini_info", &info_va, NULL);
1436 	if (res) {
1437 		if (res == TEE_ERROR_ITEM_NOT_FOUND) {
1438 			/* Older TA */
1439 			return TEE_SUCCESS;
1440 		}
1441 		return res;
1442 	}
1443 	assert(info_va);
1444 
1445 	info = (struct __init_fini_info *)info_va;
1446 	if (info->reserved)
1447 		return TEE_ERROR_NOT_SUPPORTED;
1448 
1449 	TAILQ_FOREACH(elf, &main_elf_queue, link)
1450 		cnt++;
1451 
1452 	/* Queue has at least one file (main) */
1453 	assert(cnt);
1454 
1455 	res = realloc_ifs(info_va, cnt, is_32bit);
1456 	if (res)
1457 		goto err;
1458 
1459 	cnt = 0;
1460 	TAILQ_FOREACH(elf, &main_elf_queue, link) {
1461 		fill_ifs(info_va, cnt, elf, is_32bit);
1462 		cnt++;
1463 	}
1464 
1465 	return TEE_SUCCESS;
1466 err:
1467 	free(info);
1468 	return res;
1469 }
1470