xref: /optee_os/ldelf/ta_elf.c (revision 65137432d3a42d885777bf65d65952e3bae53e80)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2019, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <ctype.h>
8 #include <elf32.h>
9 #include <elf64.h>
10 #include <elf_common.h>
11 #include <ldelf.h>
12 #include <pta_system.h>
13 #include <stdio.h>
14 #include <stdlib.h>
15 #include <string_ext.h>
16 #include <string.h>
17 #include <tee_api_types.h>
18 #include <user_ta_header.h>
19 
20 #include "sys.h"
21 #include "ta_elf.h"
22 
23 static vaddr_t ta_stack;
24 
25 struct ta_elf_queue main_elf_queue = TAILQ_HEAD_INITIALIZER(main_elf_queue);
26 
27 static struct ta_elf *queue_elf(const TEE_UUID *uuid)
28 {
29 	struct ta_elf *elf = NULL;
30 
31 	TAILQ_FOREACH(elf, &main_elf_queue, link)
32 		if (!memcmp(uuid, &elf->uuid, sizeof(*uuid)))
33 			return NULL;
34 
35 	elf = calloc(1, sizeof(*elf));
36 	if (!elf)
37 		err(TEE_ERROR_OUT_OF_MEMORY, "calloc");
38 
39 	TAILQ_INIT(&elf->segs);
40 
41 	elf->uuid = *uuid;
42 	TAILQ_INSERT_TAIL(&main_elf_queue, elf, link);
43 	return elf;
44 }
45 
46 static TEE_Result e32_parse_ehdr(struct ta_elf *elf, Elf32_Ehdr *ehdr)
47 {
48 	if (ehdr->e_ident[EI_VERSION] != EV_CURRENT ||
49 	    ehdr->e_ident[EI_CLASS] != ELFCLASS32 ||
50 	    ehdr->e_ident[EI_DATA] != ELFDATA2LSB ||
51 	    ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE ||
52 	    ehdr->e_type != ET_DYN || ehdr->e_machine != EM_ARM ||
53 	    (ehdr->e_flags & EF_ARM_ABIMASK) != EF_ARM_ABI_VERSION ||
54 #ifndef CFG_WITH_VFP
55 	    (ehdr->e_flags & EF_ARM_ABI_FLOAT_HARD) ||
56 #endif
57 	    ehdr->e_phentsize != sizeof(Elf32_Phdr) ||
58 	    ehdr->e_shentsize != sizeof(Elf32_Shdr))
59 		return TEE_ERROR_BAD_FORMAT;
60 
61 	elf->is_32bit = true;
62 	elf->e_entry = ehdr->e_entry;
63 	elf->e_phoff = ehdr->e_phoff;
64 	elf->e_shoff = ehdr->e_shoff;
65 	elf->e_phnum = ehdr->e_phnum;
66 	elf->e_shnum = ehdr->e_shnum;
67 	elf->e_phentsize = ehdr->e_phentsize;
68 	elf->e_shentsize = ehdr->e_shentsize;
69 
70 	return TEE_SUCCESS;
71 }
72 
73 #ifdef ARM64
74 static TEE_Result e64_parse_ehdr(struct ta_elf *elf, Elf64_Ehdr *ehdr)
75 {
76 	if (ehdr->e_ident[EI_VERSION] != EV_CURRENT ||
77 	    ehdr->e_ident[EI_CLASS] != ELFCLASS64 ||
78 	    ehdr->e_ident[EI_DATA] != ELFDATA2LSB ||
79 	    ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE ||
80 	    ehdr->e_type != ET_DYN || ehdr->e_machine != EM_AARCH64 ||
81 	    ehdr->e_flags || ehdr->e_phentsize != sizeof(Elf64_Phdr) ||
82 	    ehdr->e_shentsize != sizeof(Elf64_Shdr))
83 		return TEE_ERROR_BAD_FORMAT;
84 
85 
86 	elf->is_32bit = false;
87 	elf->e_entry = ehdr->e_entry;
88 	elf->e_phoff = ehdr->e_phoff;
89 	elf->e_shoff = ehdr->e_shoff;
90 	elf->e_phnum = ehdr->e_phnum;
91 	elf->e_shnum = ehdr->e_shnum;
92 	elf->e_phentsize = ehdr->e_phentsize;
93 	elf->e_shentsize = ehdr->e_shentsize;
94 
95 	return TEE_SUCCESS;
96 }
97 #else /*ARM64*/
98 static TEE_Result e64_parse_ehdr(struct ta_elf *elf __unused,
99 				 Elf64_Ehdr *ehdr __unused)
100 {
101 	return TEE_ERROR_NOT_SUPPORTED;
102 }
103 #endif /*ARM64*/
104 
105 static void read_dyn(struct ta_elf *elf, vaddr_t addr,
106 		     size_t idx, unsigned int *tag, size_t *val)
107 {
108 	if (elf->is_32bit) {
109 		Elf32_Dyn *dyn = (Elf32_Dyn *)(addr + elf->load_addr);
110 
111 		*tag = dyn[idx].d_tag;
112 		*val = dyn[idx].d_un.d_val;
113 	} else {
114 		Elf64_Dyn *dyn = (Elf64_Dyn *)(addr + elf->load_addr);
115 
116 		*tag = dyn[idx].d_tag;
117 		*val = dyn[idx].d_un.d_val;
118 	}
119 }
120 
121 static void e32_save_symtab(struct ta_elf *elf, size_t tab_idx)
122 {
123 	Elf32_Shdr *shdr = elf->shdr;
124 	size_t str_idx = shdr[tab_idx].sh_link;
125 
126 	elf->dynsymtab = (void *)(shdr[tab_idx].sh_addr + elf->load_addr);
127 	assert(!(shdr[tab_idx].sh_size % sizeof(Elf32_Sym)));
128 	elf->num_dynsyms = shdr[tab_idx].sh_size / sizeof(Elf32_Sym);
129 
130 	elf->dynstr = (void *)(shdr[str_idx].sh_addr + elf->load_addr);
131 	elf->dynstr_size = shdr[str_idx].sh_size;
132 }
133 
134 static void e64_save_symtab(struct ta_elf *elf, size_t tab_idx)
135 {
136 	Elf64_Shdr *shdr = elf->shdr;
137 	size_t str_idx = shdr[tab_idx].sh_link;
138 
139 	elf->dynsymtab = (void *)(vaddr_t)(shdr[tab_idx].sh_addr +
140 					   elf->load_addr);
141 	assert(!(shdr[tab_idx].sh_size % sizeof(Elf64_Sym)));
142 	elf->num_dynsyms = shdr[tab_idx].sh_size / sizeof(Elf64_Sym);
143 
144 	elf->dynstr = (void *)(vaddr_t)(shdr[str_idx].sh_addr + elf->load_addr);
145 	elf->dynstr_size = shdr[str_idx].sh_size;
146 }
147 
148 static void save_symtab(struct ta_elf *elf)
149 {
150 	size_t n = 0;
151 
152 	if (elf->is_32bit) {
153 		Elf32_Shdr *shdr = elf->shdr;
154 
155 		for (n = 0; n < elf->e_shnum; n++) {
156 			if (shdr[n].sh_type == SHT_DYNSYM) {
157 				e32_save_symtab(elf, n);
158 				break;
159 			}
160 		}
161 	} else {
162 		Elf64_Shdr *shdr = elf->shdr;
163 
164 		for (n = 0; n < elf->e_shnum; n++) {
165 			if (shdr[n].sh_type == SHT_DYNSYM) {
166 				e64_save_symtab(elf, n);
167 				break;
168 			}
169 		}
170 
171 	}
172 }
173 
174 static void init_elf(struct ta_elf *elf)
175 {
176 	TEE_Result res = TEE_SUCCESS;
177 	vaddr_t va = 0;
178 	uint32_t flags = PTA_SYSTEM_MAP_FLAG_SHAREABLE;
179 	const size_t max_align = 0x10000;
180 
181 	res = sys_open_ta_bin(&elf->uuid, &elf->handle);
182 	if (res)
183 		err(res, "sys_open_ta_bin(%pUl)", (void *)&elf->uuid);
184 
185 	/*
186 	 * Map it read-only executable when we're loading a library where
187 	 * the ELF header is included in a load segment.
188 	 */
189 	if (!elf->is_main)
190 		flags |= PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
191 	/*
192 	 * Add 1Mb pad at end in case a library with this large alignment
193 	 * has been mapped before. We want to avoid ending up in a hole in
194 	 * the mapping of a library.
195 	 */
196 	res = sys_map_ta_bin(&va, SMALL_PAGE_SIZE, flags, elf->handle, 0, 0,
197 			     max_align);
198 	if (res)
199 		err(res, "sys_map_ta_bin");
200 	elf->ehdr_addr = va;
201 	if (!elf->is_main) {
202 		elf->load_addr = va;
203 		elf->max_addr = va + SMALL_PAGE_SIZE;
204 		elf->max_offs = SMALL_PAGE_SIZE;
205 	}
206 
207 	if (!IS_ELF(*(Elf32_Ehdr *)va))
208 		err(TEE_ERROR_BAD_FORMAT, "TA is not an ELF");
209 
210 	res = e32_parse_ehdr(elf, (void *)va);
211 	if (res == TEE_ERROR_BAD_FORMAT)
212 		res = e64_parse_ehdr(elf, (void *)va);
213 	if (res)
214 		err(res, "Cannot parse ELF");
215 
216 	if (elf->e_phoff + elf->e_phnum * elf->e_phentsize > SMALL_PAGE_SIZE)
217 		err(TEE_ERROR_NOT_SUPPORTED, "Cannot read program headers");
218 
219 	elf->phdr = (void *)(va + elf->e_phoff);
220 }
221 
222 static size_t roundup(size_t v)
223 {
224 	return ROUNDUP(v, SMALL_PAGE_SIZE);
225 }
226 
227 static size_t rounddown(size_t v)
228 {
229 	return ROUNDDOWN(v, SMALL_PAGE_SIZE);
230 }
231 
232 static void add_segment(struct ta_elf *elf, size_t offset, size_t vaddr,
233 			size_t filesz, size_t memsz, size_t flags, size_t align)
234 {
235 	struct segment *seg = calloc(1, sizeof(*seg));
236 
237 	if (!seg)
238 		err(TEE_ERROR_OUT_OF_MEMORY, "calloc");
239 
240 	seg->offset = offset;
241 	seg->vaddr = vaddr;
242 	seg->filesz = filesz;
243 	seg->memsz = memsz;
244 	seg->flags = flags;
245 	seg->align = align;
246 
247 	TAILQ_INSERT_TAIL(&elf->segs, seg, link);
248 }
249 
250 static void parse_load_segments(struct ta_elf *elf)
251 {
252 	size_t n = 0;
253 
254 	if (elf->is_32bit) {
255 		Elf32_Phdr *phdr = elf->phdr;
256 
257 		for (n = 0; n < elf->e_phnum; n++)
258 			if (phdr[n].p_type == PT_LOAD)
259 				add_segment(elf, phdr[n].p_offset,
260 					    phdr[n].p_vaddr, phdr[n].p_filesz,
261 					    phdr[n].p_memsz, phdr[n].p_flags,
262 					    phdr[n].p_align);
263 	} else {
264 		Elf64_Phdr *phdr = elf->phdr;
265 
266 		for (n = 0; n < elf->e_phnum; n++)
267 			if (phdr[n].p_type == PT_LOAD)
268 				add_segment(elf, phdr[n].p_offset,
269 					    phdr[n].p_vaddr, phdr[n].p_filesz,
270 					    phdr[n].p_memsz, phdr[n].p_flags,
271 					    phdr[n].p_align);
272 	}
273 }
274 
275 static void copy_remapped_to(struct ta_elf *elf, const struct segment *seg)
276 {
277 	uint8_t *dst = (void *)(seg->vaddr + elf->load_addr);
278 	size_t n = 0;
279 	size_t offs = seg->offset;
280 	size_t num_bytes = seg->filesz;
281 
282 	if (offs < elf->max_offs) {
283 		n = MIN(elf->max_offs - offs, num_bytes);
284 		memcpy(dst, (void *)(elf->max_addr + offs - elf->max_offs), n);
285 		dst += n;
286 		offs += n;
287 		num_bytes -= n;
288 	}
289 
290 	if (num_bytes) {
291 		TEE_Result res = sys_copy_from_ta_bin(dst, num_bytes,
292 						      elf->handle, offs);
293 
294 		if (res)
295 			err(res, "sys_copy_from_ta_bin");
296 		elf->max_offs += offs;
297 	}
298 }
299 
300 static void adjust_segments(struct ta_elf *elf)
301 {
302 	struct segment *seg = NULL;
303 	struct segment *prev_seg = NULL;
304 	size_t prev_end_addr = 0;
305 	size_t align = 0;
306 	size_t mask = 0;
307 
308 	/* Sanity check */
309 	TAILQ_FOREACH(seg, &elf->segs, link) {
310 		size_t dummy __maybe_unused = 0;
311 
312 		assert(seg->align >= SMALL_PAGE_SIZE);
313 		assert(!ADD_OVERFLOW(seg->vaddr, seg->memsz, &dummy));
314 		assert(seg->filesz <= seg->memsz);
315 		assert((seg->offset & SMALL_PAGE_MASK) ==
316 		       (seg->vaddr & SMALL_PAGE_MASK));
317 
318 		prev_seg = TAILQ_PREV(seg, segment_head, link);
319 		if (prev_seg) {
320 			assert(seg->vaddr >= prev_seg->vaddr + prev_seg->memsz);
321 			assert(seg->offset >=
322 			       prev_seg->offset + prev_seg->filesz);
323 		}
324 		if (!align)
325 			align = seg->align;
326 		assert(align == seg->align);
327 	}
328 
329 	mask = align - 1;
330 
331 	seg = TAILQ_FIRST(&elf->segs);
332 	if (seg)
333 		seg = TAILQ_NEXT(seg, link);
334 	while (seg) {
335 		prev_seg = TAILQ_PREV(seg, segment_head, link);
336 		prev_end_addr = prev_seg->vaddr + prev_seg->memsz;
337 
338 		/*
339 		 * This segment may overlap with the last "page" in the
340 		 * previous segment in two different ways:
341 		 * 1. Virtual address (and offset) overlaps =>
342 		 *    Permissions needs to be merged. The offset must have
343 		 *    the SMALL_PAGE_MASK bits set as vaddr and offset must
344 		 *    add up with prevsion segment.
345 		 *
346 		 * 2. Only offset overlaps =>
347 		 *    The same page in the ELF is mapped at two different
348 		 *    virtual addresses. As a limitation this segment must
349 		 *    be mapped as writeable.
350 		 */
351 
352 		/* Case 1. */
353 		if (rounddown(seg->vaddr) < prev_end_addr) {
354 			assert((seg->vaddr & mask) == (seg->offset & mask));
355 			assert(prev_seg->memsz == prev_seg->filesz);
356 
357 			/*
358 			 * Merge the segments and their permissions.
359 			 * Note that the may be a small hole between the
360 			 * two sections.
361 			 */
362 			prev_seg->filesz = seg->vaddr + seg->filesz -
363 					   prev_seg->vaddr;
364 			prev_seg->memsz = seg->vaddr + seg->memsz -
365 					   prev_seg->vaddr;
366 			prev_seg->flags |= seg->flags;
367 
368 			TAILQ_REMOVE(&elf->segs, seg, link);
369 			free(seg);
370 			seg = TAILQ_NEXT(prev_seg, link);
371 			continue;
372 		}
373 
374 		/* Case 2. */
375 		if ((seg->offset & mask) &&
376 		    rounddown(seg->offset) <
377 		    (prev_seg->offset + prev_seg->filesz)) {
378 
379 			assert(seg->flags & PF_W);
380 			seg->remapped_writeable = true;
381 		}
382 
383 		/*
384 		 * No overlap, but we may need to align address, offset and
385 		 * size.
386 		 */
387 		seg->filesz += seg->vaddr - rounddown(seg->vaddr);
388 		seg->memsz += seg->vaddr - rounddown(seg->vaddr);
389 		seg->vaddr = rounddown(seg->vaddr);
390 		seg->offset = rounddown(seg->offset);
391 		seg = TAILQ_NEXT(seg, link);
392 	}
393 
394 }
395 
396 static void populate_segments_legacy(struct ta_elf *elf)
397 {
398 	TEE_Result res = TEE_SUCCESS;
399 	struct segment *seg = NULL;
400 	vaddr_t va = 0;
401 
402 	TAILQ_FOREACH(seg, &elf->segs, link) {
403 		struct segment *last_seg = TAILQ_LAST(&elf->segs, segment_head);
404 		size_t pad_end = roundup(last_seg->vaddr + last_seg->memsz -
405 					 seg->vaddr - seg->memsz);
406 		size_t num_bytes = roundup(seg->memsz);
407 
408 		if (!elf->load_addr)
409 			va = 0;
410 		else
411 			va = seg->vaddr + elf->load_addr;
412 
413 
414 		if (!(seg->flags & PF_R))
415 			err(TEE_ERROR_NOT_SUPPORTED,
416 			    "Segment must be readable");
417 
418 		res = sys_map_zi(num_bytes, 0, &va, 0, pad_end);
419 		if (res)
420 			err(res, "sys_map_zi");
421 		res = sys_copy_from_ta_bin((void *)va, seg->filesz,
422 					   elf->handle, seg->offset);
423 		if (res)
424 			err(res, "sys_copy_from_ta_bin");
425 
426 		if (!elf->load_addr)
427 			elf->load_addr = va;
428 		elf->max_addr = va + num_bytes;
429 		elf->max_offs = seg->offset + seg->filesz;
430 	}
431 }
432 
433 static void populate_segments(struct ta_elf *elf)
434 {
435 	TEE_Result res = TEE_SUCCESS;
436 	struct segment *seg = NULL;
437 	vaddr_t va = 0;
438 
439 	TAILQ_FOREACH(seg, &elf->segs, link) {
440 		struct segment *last_seg = TAILQ_LAST(&elf->segs, segment_head);
441 		size_t pad_end = roundup(last_seg->vaddr + last_seg->memsz -
442 					 seg->vaddr - seg->memsz);
443 
444 		if (seg->remapped_writeable) {
445 			size_t num_bytes = roundup(seg->vaddr + seg->memsz) -
446 					   rounddown(seg->vaddr);
447 
448 			assert(elf->load_addr);
449 			va = rounddown(elf->load_addr + seg->vaddr);
450 			assert(va >= elf->max_addr);
451 			res = sys_map_zi(num_bytes, 0, &va, 0, pad_end);
452 			if (res)
453 				err(res, "sys_map_zi");
454 
455 			copy_remapped_to(elf, seg);
456 			elf->max_addr = va + num_bytes;
457 		} else {
458 			uint32_t flags =  0;
459 			size_t filesz = seg->filesz;
460 			size_t memsz = seg->memsz;
461 			size_t offset = seg->offset;
462 			size_t vaddr = seg->vaddr;
463 
464 			if (offset < elf->max_offs) {
465 				/*
466 				 * We're in a load segment which overlaps
467 				 * with (or is covered by) the first page
468 				 * of a shared library.
469 				 */
470 				if (vaddr + filesz < SMALL_PAGE_SIZE) {
471 					size_t num_bytes = 0;
472 
473 					/*
474 					 * If this segment is completely
475 					 * covered, take next.
476 					 */
477 					if (vaddr + memsz <= SMALL_PAGE_SIZE)
478 						continue;
479 
480 					/*
481 					 * All data of the segment is
482 					 * loaded, but we need to zero
483 					 * extend it.
484 					 */
485 					va = elf->max_addr;
486 					num_bytes = roundup(vaddr + memsz) -
487 						    roundup(vaddr) -
488 						    SMALL_PAGE_SIZE;
489 					assert(num_bytes);
490 					res = sys_map_zi(num_bytes, 0, &va, 0,
491 							 0);
492 					if (res)
493 						err(res, "sys_map_zi");
494 					elf->max_addr = roundup(va + num_bytes);
495 					continue;
496 				}
497 
498 				/* Partial overlap, remove the first page. */
499 				vaddr += SMALL_PAGE_SIZE;
500 				filesz -= SMALL_PAGE_SIZE;
501 				memsz -= SMALL_PAGE_SIZE;
502 				offset += SMALL_PAGE_SIZE;
503 			}
504 
505 			if (!elf->load_addr)
506 				va = 0;
507 			else
508 				va = vaddr + elf->load_addr;
509 
510 			if (seg->flags & PF_W)
511 				flags |= PTA_SYSTEM_MAP_FLAG_WRITEABLE;
512 			else
513 				flags |= PTA_SYSTEM_MAP_FLAG_SHAREABLE;
514 			if (seg->flags & PF_X)
515 				flags |= PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
516 			if (!(seg->flags & PF_R))
517 				err(TEE_ERROR_NOT_SUPPORTED,
518 				    "Segment must be readable");
519 			if (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE) {
520 				res = sys_map_zi(memsz, 0, &va, 0, pad_end);
521 				if (res)
522 					err(res, "sys_map_zi");
523 				res = sys_copy_from_ta_bin((void *)va, filesz,
524 							   elf->handle, offset);
525 				if (res)
526 					err(res, "sys_copy_from_ta_bin");
527 			} else {
528 				res = sys_map_ta_bin(&va, filesz, flags,
529 						     elf->handle, offset,
530 						     0, pad_end);
531 				if (res)
532 					err(res, "sys_map_ta_bin");
533 			}
534 
535 			if (!elf->load_addr)
536 				elf->load_addr = va;
537 			elf->max_addr = roundup(va + filesz);
538 			elf->max_offs += filesz;
539 		}
540 	}
541 }
542 
543 static void map_segments(struct ta_elf *elf)
544 {
545 	parse_load_segments(elf);
546 	adjust_segments(elf);
547 	if (elf->is_legacy)
548 		populate_segments_legacy(elf);
549 	else
550 		populate_segments(elf);
551 }
552 
553 static int hex(char c)
554 {
555 	char lc = tolower(c);
556 
557 	if (isdigit(lc))
558 		return lc - '0';
559 	if (isxdigit(lc))
560 		return lc - 'a' + 10;
561 	return -1;
562 }
563 
564 static uint32_t parse_hex(const char *s, size_t nchars, uint32_t *res)
565 {
566 	uint32_t v = 0;
567 	size_t n;
568 	int c;
569 
570 	for (n = 0; n < nchars; n++) {
571 		c = hex(s[n]);
572 		if (c == (char)-1) {
573 			*res = TEE_ERROR_BAD_FORMAT;
574 			goto out;
575 		}
576 		v = (v << 4) + c;
577 	}
578 	*res = TEE_SUCCESS;
579 out:
580 	return v;
581 }
582 
583 /*
584  * Convert a UUID string @s into a TEE_UUID @uuid
585  * Expected format for @s is: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
586  * 'x' being any hexadecimal digit (0-9a-fA-F)
587  */
588 static TEE_Result parse_uuid(const char *s, TEE_UUID *uuid)
589 {
590 	TEE_Result res = TEE_SUCCESS;
591 	TEE_UUID u = { 0 };
592 	const char *p = s;
593 	size_t i;
594 
595 	if (strlen(p) != 36)
596 		return TEE_ERROR_BAD_FORMAT;
597 	if (p[8] != '-' || p[13] != '-' || p[18] != '-' || p[23] != '-')
598 		return TEE_ERROR_BAD_FORMAT;
599 
600 	u.timeLow = parse_hex(p, 8, &res);
601 	if (res)
602 		goto out;
603 	p += 9;
604 	u.timeMid = parse_hex(p, 4, &res);
605 	if (res)
606 		goto out;
607 	p += 5;
608 	u.timeHiAndVersion = parse_hex(p, 4, &res);
609 	if (res)
610 		goto out;
611 	p += 5;
612 	for (i = 0; i < 8; i++) {
613 		u.clockSeqAndNode[i] = parse_hex(p, 2, &res);
614 		if (res)
615 			goto out;
616 		if (i == 1)
617 			p += 3;
618 		else
619 			p += 2;
620 	}
621 	*uuid = u;
622 out:
623 	return res;
624 }
625 
626 static void add_deps_from_segment(struct ta_elf *elf, unsigned int type,
627 				  vaddr_t addr, size_t memsz)
628 {
629 	size_t dyn_entsize = 0;
630 	size_t num_dyns = 0;
631 	size_t n = 0;
632 	unsigned int tag = 0;
633 	size_t val = 0;
634 	TEE_UUID uuid = { };
635 	char *str_tab = NULL;
636 
637 	if (type != PT_DYNAMIC)
638 		return;
639 
640 	if (elf->is_32bit)
641 		dyn_entsize = sizeof(Elf32_Dyn);
642 	else
643 		dyn_entsize = sizeof(Elf64_Dyn);
644 
645 	assert(!(memsz % dyn_entsize));
646 	num_dyns = memsz / dyn_entsize;
647 
648 	for (n = 0; n < num_dyns; n++) {
649 		read_dyn(elf, addr, n, &tag, &val);
650 		if (tag == DT_STRTAB) {
651 			str_tab = (char *)(val + elf->load_addr);
652 			break;
653 		}
654 	}
655 
656 	for (n = 0; n < num_dyns; n++) {
657 		read_dyn(elf, addr, n, &tag, &val);
658 		if (tag != DT_NEEDED)
659 			continue;
660 		parse_uuid(str_tab + val, &uuid);
661 		queue_elf(&uuid);
662 	}
663 }
664 
665 static void add_dependencies(struct ta_elf *elf)
666 {
667 	size_t n = 0;
668 
669 	if (elf->is_32bit) {
670 		Elf32_Phdr *phdr = elf->phdr;
671 
672 		for (n = 0; n < elf->e_phnum; n++)
673 			add_deps_from_segment(elf, phdr[n].p_type,
674 					      phdr[n].p_vaddr, phdr[n].p_memsz);
675 	} else {
676 		Elf64_Phdr *phdr = elf->phdr;
677 
678 		for (n = 0; n < elf->e_phnum; n++)
679 			add_deps_from_segment(elf, phdr[n].p_type,
680 					      phdr[n].p_vaddr, phdr[n].p_memsz);
681 	}
682 }
683 
684 static void copy_section_headers(struct ta_elf *elf)
685 {
686 	TEE_Result res = TEE_SUCCESS;
687 	size_t sz = elf->e_shnum * elf->e_shentsize;
688 	size_t offs = 0;
689 
690 	elf->shdr = malloc(sz);
691 	if (!elf->shdr)
692 		err(TEE_ERROR_OUT_OF_MEMORY, "malloc");
693 
694 	/*
695 	 * We're assuming that section headers comes after the load segments,
696 	 * but if it's a very small dynamically linked library the section
697 	 * headers can still end up (partially?) in the first mapped page.
698 	 */
699 	if (elf->e_shoff < SMALL_PAGE_SIZE) {
700 		assert(!elf->is_main);
701 		offs = MIN(SMALL_PAGE_SIZE - elf->e_shoff, sz);
702 		memcpy(elf->shdr, (void *)(elf->load_addr + elf->e_shoff),
703 		       offs);
704 	}
705 
706 	if (offs < sz) {
707 		res = sys_copy_from_ta_bin((uint8_t *)elf->shdr + offs,
708 					   sz - offs, elf->handle,
709 					   elf->e_shoff + offs);
710 		if (res)
711 			err(res, "sys_copy_from_ta_bin");
712 	}
713 }
714 
715 static void close_handle(struct ta_elf *elf)
716 {
717 	TEE_Result res = sys_close_ta_bin(elf->handle);
718 
719 	if (res)
720 		err(res, "sys_close_ta_bin");
721 	elf->handle = -1;
722 }
723 
724 void ta_elf_load_main(const TEE_UUID *uuid, uint32_t *is_32bit,
725 		      uint64_t *entry, uint64_t *sp, uint32_t *ta_flags)
726 {
727 	struct ta_elf *elf = queue_elf(uuid);
728 	struct ta_head *head;
729 	vaddr_t va = 0;
730 	TEE_Result res = TEE_SUCCESS;
731 
732 	assert(elf);
733 	elf->is_main = true;
734 
735 	init_elf(elf);
736 
737 	/*
738 	 * Legacy TAs doesn't set entry point, instead it's set in ta_head.
739 	 * If entry point isn't set explicitly, set to the start of the
740 	 * first executable section by the linker. Since ta_head also
741 	 * always comes first in legacy TA it means that the entry point
742 	 * will be set to 0x20.
743 	 *
744 	 * NB, everything before the commit a73b5878c89d ("Replace
745 	 * ta_head.entry with elf entry") is considered legacy TAs for
746 	 * ldelf.
747 	 */
748 	if (elf->e_entry == sizeof(*head))
749 		elf->is_legacy = true;
750 
751 	map_segments(elf);
752 	add_dependencies(elf);
753 	copy_section_headers(elf);
754 	save_symtab(elf);
755 	close_handle(elf);
756 
757 	head = (struct ta_head *)elf->load_addr;
758 
759 	*is_32bit = elf->is_32bit;
760 	if (elf->is_legacy) {
761 		assert(head->depr_entry != UINT64_MAX);
762 		*entry = head->depr_entry + elf->load_addr;
763 	} else {
764 		assert(head->depr_entry == UINT64_MAX);
765 		*entry = elf->e_entry + elf->load_addr;
766 	}
767 
768 	res = sys_map_zi(head->stack_size, 0, &va, 0, 0);
769 	if (res)
770 		err(res, "sys_map_zi stack");
771 
772 	if (head->flags & ~TA_FLAGS_MASK)
773 		err(TEE_ERROR_BAD_FORMAT, "Invalid TA flags(s) %#"PRIx32,
774 		    head->flags & ~TA_FLAGS_MASK);
775 
776 	*ta_flags = head->flags;
777 	*sp = va + head->stack_size;
778 	ta_stack = va;
779 }
780 
781 void ta_elf_load_dependency(struct ta_elf *elf, bool is_32bit)
782 {
783 	if (elf->is_main)
784 		return;
785 
786 	init_elf(elf);
787 	if (elf->is_32bit != is_32bit)
788 		err(TEE_ERROR_BAD_FORMAT, "ELF %pUl is %sbit (expected %sbit)",
789 		    (void *)&elf->uuid, elf->is_32bit ? "32" : "64",
790 		    is_32bit ? "32" : "64");
791 
792 	map_segments(elf);
793 	add_dependencies(elf);
794 	copy_section_headers(elf);
795 	save_symtab(elf);
796 	close_handle(elf);
797 }
798 
799 void ta_elf_finalize_mappings(struct ta_elf *elf)
800 {
801 	TEE_Result res = TEE_SUCCESS;
802 	struct segment *seg = NULL;
803 
804 	if (!elf->is_legacy)
805 		return;
806 
807 	TAILQ_FOREACH(seg, &elf->segs, link) {
808 		vaddr_t va = elf->load_addr + seg->vaddr;
809 		uint32_t flags =  0;
810 
811 		if (seg->flags & PF_W)
812 			flags |= PTA_SYSTEM_MAP_FLAG_WRITEABLE;
813 		if (seg->flags & PF_X)
814 			flags |= PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
815 
816 		res = sys_set_prot(va, seg->memsz, flags);
817 		if (res)
818 			err(res, "sys_set_prot");
819 	}
820 }
821 
822 static void print_seg(size_t idx __maybe_unused, int elf_idx __maybe_unused,
823 		      vaddr_t va __maybe_unused, paddr_t pa __maybe_unused,
824 		      size_t sz __maybe_unused, uint32_t flags)
825 {
826 	int width __maybe_unused = 8;
827 	char desc[14] __maybe_unused = "";
828 	char flags_str[] __maybe_unused = "----";
829 
830 	if (elf_idx > -1) {
831 		snprintf(desc, sizeof(desc), " [%d]", elf_idx);
832 	} else {
833 		if (flags & DUMP_MAP_EPHEM)
834 			snprintf(desc, sizeof(desc), " (param)");
835 		if (flags & DUMP_MAP_LDELF)
836 			snprintf(desc, sizeof(desc), " (ldelf)");
837 		if (va == ta_stack)
838 			snprintf(desc, sizeof(desc), " (stack)");
839 	}
840 
841 	if (flags & DUMP_MAP_READ)
842 		flags_str[0] = 'r';
843 	if (flags & DUMP_MAP_WRITE)
844 		flags_str[1] = 'w';
845 	if (flags & DUMP_MAP_EXEC)
846 		flags_str[2] = 'x';
847 	if (flags & DUMP_MAP_SECURE)
848 		flags_str[3] = 's';
849 
850 	EMSG_RAW("region %2zu: va 0x%0*"PRIxVA" pa 0x%0*"PRIxPA" size 0x%06zx flags %s%s",
851 		 idx, width, va, width, pa, sz, flags_str, desc);
852 }
853 
854 void ta_elf_print_mappings(struct ta_elf_queue *elf_queue, size_t num_maps,
855 			   struct dump_map *maps, vaddr_t mpool_base)
856 {
857 	struct segment *seg = NULL;
858 	struct ta_elf *elf = NULL;
859 	size_t elf_idx = 0;
860 	size_t idx = 0;
861 	size_t map_idx = 0;
862 
863 	/*
864 	 * Loop over all segments and maps, printing virtual address in
865 	 * order. Segment has priority if the virtual address is present
866 	 * in both map and segment.
867 	 */
868 	elf = TAILQ_FIRST(elf_queue);
869 	if (elf)
870 		seg = TAILQ_FIRST(&elf->segs);
871 	while (true) {
872 		vaddr_t va = -1;
873 		size_t sz = 0;
874 		uint32_t flags = DUMP_MAP_SECURE;
875 		size_t offs = 0;
876 
877 		if (seg) {
878 			va = rounddown(seg->vaddr + elf->load_addr);
879 			sz = roundup(seg->vaddr + seg->memsz) -
880 				     rounddown(seg->vaddr);
881 		}
882 
883 		while (map_idx < num_maps && maps[map_idx].va <= va) {
884 			uint32_t f = 0;
885 
886 			/* If there's a match, it should be the same map */
887 			if (maps[map_idx].va == va) {
888 				/*
889 				 * In shared libraries the first page is
890 				 * mapped separately with the rest of that
891 				 * segment following back to back in a
892 				 * separate entry.
893 				 */
894 				if (map_idx + 1 < num_maps &&
895 				    maps[map_idx].sz == SMALL_PAGE_SIZE) {
896 					vaddr_t next_va = maps[map_idx].va +
897 							  maps[map_idx].sz;
898 					size_t comb_sz = maps[map_idx].sz +
899 							 maps[map_idx + 1].sz;
900 
901 					if (next_va == maps[map_idx + 1].va &&
902 					    comb_sz == sz &&
903 					    maps[map_idx].flags ==
904 					    maps[map_idx + 1].flags) {
905 						/* Skip this and next entry */
906 						map_idx += 2;
907 						continue;
908 					}
909 				}
910 				assert(maps[map_idx].sz == sz);
911 			} else if (maps[map_idx].va < va) {
912 				if (maps[map_idx].va == mpool_base)
913 					f |= DUMP_MAP_LDELF;
914 				print_seg(idx, -1, maps[map_idx].va,
915 					  maps[map_idx].pa, maps[map_idx].sz,
916 					  maps[map_idx].flags | f);
917 				idx++;
918 			}
919 			map_idx++;
920 		}
921 
922 		if (!seg)
923 			break;
924 
925 		offs = rounddown(seg->offset);
926 		if (seg->flags & PF_R)
927 			flags |= DUMP_MAP_READ;
928 		if (seg->flags & PF_W)
929 			flags |= DUMP_MAP_WRITE;
930 		if (seg->flags & PF_X)
931 			flags |= DUMP_MAP_EXEC;
932 
933 		print_seg(idx, elf_idx, va, offs, sz, flags);
934 		idx++;
935 
936 		seg = TAILQ_NEXT(seg, link);
937 		if (!seg) {
938 			elf = TAILQ_NEXT(elf, link);
939 			if (elf)
940 				seg = TAILQ_FIRST(&elf->segs);
941 			elf_idx++;
942 		}
943 	};
944 
945 	elf_idx = 0;
946 	TAILQ_FOREACH(elf, elf_queue, link) {
947 		EMSG_RAW(" [%zu] %pUl @ 0x%0*" PRIxVA,
948 			 elf_idx, (void *)&elf->uuid, 8, elf->load_addr);
949 		elf_idx++;
950 	}
951 }
952