xref: /optee_os/ldelf/ta_elf.c (revision 34db7172f2379ebbe772eaf10c21e9814076afc1)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2019, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <ctype.h>
8 #include <elf32.h>
9 #include <elf64.h>
10 #include <elf_common.h>
11 #include <ldelf.h>
12 #include <pta_system.h>
13 #include <stdio.h>
14 #include <stdlib.h>
15 #include <string_ext.h>
16 #include <string.h>
17 #include <tee_api_types.h>
18 #include <user_ta_header.h>
19 
20 #include "sys.h"
21 #include "ta_elf.h"
22 #include "unwind.h"
23 
24 static vaddr_t ta_stack;
25 static vaddr_t ta_stack_size;
26 
27 struct ta_elf_queue main_elf_queue = TAILQ_HEAD_INITIALIZER(main_elf_queue);
28 
29 static struct ta_elf *queue_elf(const TEE_UUID *uuid)
30 {
31 	struct ta_elf *elf = NULL;
32 
33 	TAILQ_FOREACH(elf, &main_elf_queue, link)
34 		if (!memcmp(uuid, &elf->uuid, sizeof(*uuid)))
35 			return NULL;
36 
37 	elf = calloc(1, sizeof(*elf));
38 	if (!elf)
39 		err(TEE_ERROR_OUT_OF_MEMORY, "calloc");
40 
41 	TAILQ_INIT(&elf->segs);
42 
43 	elf->uuid = *uuid;
44 	TAILQ_INSERT_TAIL(&main_elf_queue, elf, link);
45 	return elf;
46 }
47 
48 static TEE_Result e32_parse_ehdr(struct ta_elf *elf, Elf32_Ehdr *ehdr)
49 {
50 	if (ehdr->e_ident[EI_VERSION] != EV_CURRENT ||
51 	    ehdr->e_ident[EI_CLASS] != ELFCLASS32 ||
52 	    ehdr->e_ident[EI_DATA] != ELFDATA2LSB ||
53 	    ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE ||
54 	    ehdr->e_type != ET_DYN || ehdr->e_machine != EM_ARM ||
55 	    (ehdr->e_flags & EF_ARM_ABIMASK) != EF_ARM_ABI_VERSION ||
56 #ifndef CFG_WITH_VFP
57 	    (ehdr->e_flags & EF_ARM_ABI_FLOAT_HARD) ||
58 #endif
59 	    ehdr->e_phentsize != sizeof(Elf32_Phdr) ||
60 	    ehdr->e_shentsize != sizeof(Elf32_Shdr))
61 		return TEE_ERROR_BAD_FORMAT;
62 
63 	elf->is_32bit = true;
64 	elf->e_entry = ehdr->e_entry;
65 	elf->e_phoff = ehdr->e_phoff;
66 	elf->e_shoff = ehdr->e_shoff;
67 	elf->e_phnum = ehdr->e_phnum;
68 	elf->e_shnum = ehdr->e_shnum;
69 	elf->e_phentsize = ehdr->e_phentsize;
70 	elf->e_shentsize = ehdr->e_shentsize;
71 
72 	return TEE_SUCCESS;
73 }
74 
75 #ifdef ARM64
76 static TEE_Result e64_parse_ehdr(struct ta_elf *elf, Elf64_Ehdr *ehdr)
77 {
78 	if (ehdr->e_ident[EI_VERSION] != EV_CURRENT ||
79 	    ehdr->e_ident[EI_CLASS] != ELFCLASS64 ||
80 	    ehdr->e_ident[EI_DATA] != ELFDATA2LSB ||
81 	    ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE ||
82 	    ehdr->e_type != ET_DYN || ehdr->e_machine != EM_AARCH64 ||
83 	    ehdr->e_flags || ehdr->e_phentsize != sizeof(Elf64_Phdr) ||
84 	    ehdr->e_shentsize != sizeof(Elf64_Shdr))
85 		return TEE_ERROR_BAD_FORMAT;
86 
87 
88 	elf->is_32bit = false;
89 	elf->e_entry = ehdr->e_entry;
90 	elf->e_phoff = ehdr->e_phoff;
91 	elf->e_shoff = ehdr->e_shoff;
92 	elf->e_phnum = ehdr->e_phnum;
93 	elf->e_shnum = ehdr->e_shnum;
94 	elf->e_phentsize = ehdr->e_phentsize;
95 	elf->e_shentsize = ehdr->e_shentsize;
96 
97 	return TEE_SUCCESS;
98 }
99 #else /*ARM64*/
100 static TEE_Result e64_parse_ehdr(struct ta_elf *elf __unused,
101 				 Elf64_Ehdr *ehdr __unused)
102 {
103 	return TEE_ERROR_NOT_SUPPORTED;
104 }
105 #endif /*ARM64*/
106 
107 static void read_dyn(struct ta_elf *elf, vaddr_t addr,
108 		     size_t idx, unsigned int *tag, size_t *val)
109 {
110 	if (elf->is_32bit) {
111 		Elf32_Dyn *dyn = (Elf32_Dyn *)(addr + elf->load_addr);
112 
113 		*tag = dyn[idx].d_tag;
114 		*val = dyn[idx].d_un.d_val;
115 	} else {
116 		Elf64_Dyn *dyn = (Elf64_Dyn *)(addr + elf->load_addr);
117 
118 		*tag = dyn[idx].d_tag;
119 		*val = dyn[idx].d_un.d_val;
120 	}
121 }
122 
123 static void e32_save_symtab(struct ta_elf *elf, size_t tab_idx)
124 {
125 	Elf32_Shdr *shdr = elf->shdr;
126 	size_t str_idx = shdr[tab_idx].sh_link;
127 
128 	elf->dynsymtab = (void *)(shdr[tab_idx].sh_addr + elf->load_addr);
129 	assert(!(shdr[tab_idx].sh_size % sizeof(Elf32_Sym)));
130 	elf->num_dynsyms = shdr[tab_idx].sh_size / sizeof(Elf32_Sym);
131 
132 	elf->dynstr = (void *)(shdr[str_idx].sh_addr + elf->load_addr);
133 	elf->dynstr_size = shdr[str_idx].sh_size;
134 }
135 
136 static void e64_save_symtab(struct ta_elf *elf, size_t tab_idx)
137 {
138 	Elf64_Shdr *shdr = elf->shdr;
139 	size_t str_idx = shdr[tab_idx].sh_link;
140 
141 	elf->dynsymtab = (void *)(vaddr_t)(shdr[tab_idx].sh_addr +
142 					   elf->load_addr);
143 	assert(!(shdr[tab_idx].sh_size % sizeof(Elf64_Sym)));
144 	elf->num_dynsyms = shdr[tab_idx].sh_size / sizeof(Elf64_Sym);
145 
146 	elf->dynstr = (void *)(vaddr_t)(shdr[str_idx].sh_addr + elf->load_addr);
147 	elf->dynstr_size = shdr[str_idx].sh_size;
148 }
149 
150 static void save_symtab(struct ta_elf *elf)
151 {
152 	size_t n = 0;
153 
154 	if (elf->is_32bit) {
155 		Elf32_Shdr *shdr = elf->shdr;
156 
157 		for (n = 0; n < elf->e_shnum; n++) {
158 			if (shdr[n].sh_type == SHT_DYNSYM) {
159 				e32_save_symtab(elf, n);
160 				break;
161 			}
162 		}
163 	} else {
164 		Elf64_Shdr *shdr = elf->shdr;
165 
166 		for (n = 0; n < elf->e_shnum; n++) {
167 			if (shdr[n].sh_type == SHT_DYNSYM) {
168 				e64_save_symtab(elf, n);
169 				break;
170 			}
171 		}
172 
173 	}
174 }
175 
176 static void init_elf(struct ta_elf *elf)
177 {
178 	TEE_Result res = TEE_SUCCESS;
179 	vaddr_t va = 0;
180 	uint32_t flags = PTA_SYSTEM_MAP_FLAG_SHAREABLE;
181 	const size_t max_align = 0x10000;
182 
183 	res = sys_open_ta_bin(&elf->uuid, &elf->handle);
184 	if (res)
185 		err(res, "sys_open_ta_bin(%pUl)", (void *)&elf->uuid);
186 
187 	/*
188 	 * Map it read-only executable when we're loading a library where
189 	 * the ELF header is included in a load segment.
190 	 */
191 	if (!elf->is_main)
192 		flags |= PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
193 	/*
194 	 * Add 1Mb pad at end in case a library with this large alignment
195 	 * has been mapped before. We want to avoid ending up in a hole in
196 	 * the mapping of a library.
197 	 */
198 	res = sys_map_ta_bin(&va, SMALL_PAGE_SIZE, flags, elf->handle, 0, 0,
199 			     max_align);
200 	if (res)
201 		err(res, "sys_map_ta_bin");
202 	elf->ehdr_addr = va;
203 	if (!elf->is_main) {
204 		elf->load_addr = va;
205 		elf->max_addr = va + SMALL_PAGE_SIZE;
206 		elf->max_offs = SMALL_PAGE_SIZE;
207 	}
208 
209 	if (!IS_ELF(*(Elf32_Ehdr *)va))
210 		err(TEE_ERROR_BAD_FORMAT, "TA is not an ELF");
211 
212 	res = e32_parse_ehdr(elf, (void *)va);
213 	if (res == TEE_ERROR_BAD_FORMAT)
214 		res = e64_parse_ehdr(elf, (void *)va);
215 	if (res)
216 		err(res, "Cannot parse ELF");
217 
218 	if (elf->e_phoff + elf->e_phnum * elf->e_phentsize > SMALL_PAGE_SIZE)
219 		err(TEE_ERROR_NOT_SUPPORTED, "Cannot read program headers");
220 
221 	elf->phdr = (void *)(va + elf->e_phoff);
222 }
223 
224 static size_t roundup(size_t v)
225 {
226 	return ROUNDUP(v, SMALL_PAGE_SIZE);
227 }
228 
229 static size_t rounddown(size_t v)
230 {
231 	return ROUNDDOWN(v, SMALL_PAGE_SIZE);
232 }
233 
234 static void add_segment(struct ta_elf *elf, size_t offset, size_t vaddr,
235 			size_t filesz, size_t memsz, size_t flags, size_t align)
236 {
237 	struct segment *seg = calloc(1, sizeof(*seg));
238 
239 	if (!seg)
240 		err(TEE_ERROR_OUT_OF_MEMORY, "calloc");
241 
242 	seg->offset = offset;
243 	seg->vaddr = vaddr;
244 	seg->filesz = filesz;
245 	seg->memsz = memsz;
246 	seg->flags = flags;
247 	seg->align = align;
248 
249 	TAILQ_INSERT_TAIL(&elf->segs, seg, link);
250 }
251 
252 static void parse_load_segments(struct ta_elf *elf)
253 {
254 	size_t n = 0;
255 
256 	if (elf->is_32bit) {
257 		Elf32_Phdr *phdr = elf->phdr;
258 
259 		for (n = 0; n < elf->e_phnum; n++)
260 			if (phdr[n].p_type == PT_LOAD) {
261 				add_segment(elf, phdr[n].p_offset,
262 					    phdr[n].p_vaddr, phdr[n].p_filesz,
263 					    phdr[n].p_memsz, phdr[n].p_flags,
264 					    phdr[n].p_align);
265 			} else if (phdr[n].p_type == PT_ARM_EXIDX) {
266 				elf->exidx_start = phdr[n].p_vaddr;
267 				elf->exidx_size = phdr[n].p_filesz;
268 			}
269 	} else {
270 		Elf64_Phdr *phdr = elf->phdr;
271 
272 		for (n = 0; n < elf->e_phnum; n++)
273 			if (phdr[n].p_type == PT_LOAD)
274 				add_segment(elf, phdr[n].p_offset,
275 					    phdr[n].p_vaddr, phdr[n].p_filesz,
276 					    phdr[n].p_memsz, phdr[n].p_flags,
277 					    phdr[n].p_align);
278 	}
279 }
280 
281 static void copy_remapped_to(struct ta_elf *elf, const struct segment *seg)
282 {
283 	uint8_t *dst = (void *)(seg->vaddr + elf->load_addr);
284 	size_t n = 0;
285 	size_t offs = seg->offset;
286 	size_t num_bytes = seg->filesz;
287 
288 	if (offs < elf->max_offs) {
289 		n = MIN(elf->max_offs - offs, num_bytes);
290 		memcpy(dst, (void *)(elf->max_addr + offs - elf->max_offs), n);
291 		dst += n;
292 		offs += n;
293 		num_bytes -= n;
294 	}
295 
296 	if (num_bytes) {
297 		TEE_Result res = sys_copy_from_ta_bin(dst, num_bytes,
298 						      elf->handle, offs);
299 
300 		if (res)
301 			err(res, "sys_copy_from_ta_bin");
302 		elf->max_offs += offs;
303 	}
304 }
305 
306 static void adjust_segments(struct ta_elf *elf)
307 {
308 	struct segment *seg = NULL;
309 	struct segment *prev_seg = NULL;
310 	size_t prev_end_addr = 0;
311 	size_t align = 0;
312 	size_t mask = 0;
313 
314 	/* Sanity check */
315 	TAILQ_FOREACH(seg, &elf->segs, link) {
316 		size_t dummy __maybe_unused = 0;
317 
318 		assert(seg->align >= SMALL_PAGE_SIZE);
319 		assert(!ADD_OVERFLOW(seg->vaddr, seg->memsz, &dummy));
320 		assert(seg->filesz <= seg->memsz);
321 		assert((seg->offset & SMALL_PAGE_MASK) ==
322 		       (seg->vaddr & SMALL_PAGE_MASK));
323 
324 		prev_seg = TAILQ_PREV(seg, segment_head, link);
325 		if (prev_seg) {
326 			assert(seg->vaddr >= prev_seg->vaddr + prev_seg->memsz);
327 			assert(seg->offset >=
328 			       prev_seg->offset + prev_seg->filesz);
329 		}
330 		if (!align)
331 			align = seg->align;
332 		assert(align == seg->align);
333 	}
334 
335 	mask = align - 1;
336 
337 	seg = TAILQ_FIRST(&elf->segs);
338 	if (seg)
339 		seg = TAILQ_NEXT(seg, link);
340 	while (seg) {
341 		prev_seg = TAILQ_PREV(seg, segment_head, link);
342 		prev_end_addr = prev_seg->vaddr + prev_seg->memsz;
343 
344 		/*
345 		 * This segment may overlap with the last "page" in the
346 		 * previous segment in two different ways:
347 		 * 1. Virtual address (and offset) overlaps =>
348 		 *    Permissions needs to be merged. The offset must have
349 		 *    the SMALL_PAGE_MASK bits set as vaddr and offset must
350 		 *    add up with prevsion segment.
351 		 *
352 		 * 2. Only offset overlaps =>
353 		 *    The same page in the ELF is mapped at two different
354 		 *    virtual addresses. As a limitation this segment must
355 		 *    be mapped as writeable.
356 		 */
357 
358 		/* Case 1. */
359 		if (rounddown(seg->vaddr) < prev_end_addr) {
360 			assert((seg->vaddr & mask) == (seg->offset & mask));
361 			assert(prev_seg->memsz == prev_seg->filesz);
362 
363 			/*
364 			 * Merge the segments and their permissions.
365 			 * Note that the may be a small hole between the
366 			 * two sections.
367 			 */
368 			prev_seg->filesz = seg->vaddr + seg->filesz -
369 					   prev_seg->vaddr;
370 			prev_seg->memsz = seg->vaddr + seg->memsz -
371 					   prev_seg->vaddr;
372 			prev_seg->flags |= seg->flags;
373 
374 			TAILQ_REMOVE(&elf->segs, seg, link);
375 			free(seg);
376 			seg = TAILQ_NEXT(prev_seg, link);
377 			continue;
378 		}
379 
380 		/* Case 2. */
381 		if ((seg->offset & mask) &&
382 		    rounddown(seg->offset) <
383 		    (prev_seg->offset + prev_seg->filesz)) {
384 
385 			assert(seg->flags & PF_W);
386 			seg->remapped_writeable = true;
387 		}
388 
389 		/*
390 		 * No overlap, but we may need to align address, offset and
391 		 * size.
392 		 */
393 		seg->filesz += seg->vaddr - rounddown(seg->vaddr);
394 		seg->memsz += seg->vaddr - rounddown(seg->vaddr);
395 		seg->vaddr = rounddown(seg->vaddr);
396 		seg->offset = rounddown(seg->offset);
397 		seg = TAILQ_NEXT(seg, link);
398 	}
399 
400 }
401 
402 static void populate_segments_legacy(struct ta_elf *elf)
403 {
404 	TEE_Result res = TEE_SUCCESS;
405 	struct segment *seg = NULL;
406 	vaddr_t va = 0;
407 
408 	TAILQ_FOREACH(seg, &elf->segs, link) {
409 		struct segment *last_seg = TAILQ_LAST(&elf->segs, segment_head);
410 		size_t pad_end = roundup(last_seg->vaddr + last_seg->memsz -
411 					 seg->vaddr - seg->memsz);
412 		size_t num_bytes = roundup(seg->memsz);
413 
414 		if (!elf->load_addr)
415 			va = 0;
416 		else
417 			va = seg->vaddr + elf->load_addr;
418 
419 
420 		if (!(seg->flags & PF_R))
421 			err(TEE_ERROR_NOT_SUPPORTED,
422 			    "Segment must be readable");
423 
424 		res = sys_map_zi(num_bytes, 0, &va, 0, pad_end);
425 		if (res)
426 			err(res, "sys_map_zi");
427 		res = sys_copy_from_ta_bin((void *)va, seg->filesz,
428 					   elf->handle, seg->offset);
429 		if (res)
430 			err(res, "sys_copy_from_ta_bin");
431 
432 		if (!elf->load_addr)
433 			elf->load_addr = va;
434 		elf->max_addr = va + num_bytes;
435 		elf->max_offs = seg->offset + seg->filesz;
436 	}
437 }
438 
439 static void populate_segments(struct ta_elf *elf)
440 {
441 	TEE_Result res = TEE_SUCCESS;
442 	struct segment *seg = NULL;
443 	vaddr_t va = 0;
444 
445 	TAILQ_FOREACH(seg, &elf->segs, link) {
446 		struct segment *last_seg = TAILQ_LAST(&elf->segs, segment_head);
447 		size_t pad_end = roundup(last_seg->vaddr + last_seg->memsz -
448 					 seg->vaddr - seg->memsz);
449 
450 		if (seg->remapped_writeable) {
451 			size_t num_bytes = roundup(seg->vaddr + seg->memsz) -
452 					   rounddown(seg->vaddr);
453 
454 			assert(elf->load_addr);
455 			va = rounddown(elf->load_addr + seg->vaddr);
456 			assert(va >= elf->max_addr);
457 			res = sys_map_zi(num_bytes, 0, &va, 0, pad_end);
458 			if (res)
459 				err(res, "sys_map_zi");
460 
461 			copy_remapped_to(elf, seg);
462 			elf->max_addr = va + num_bytes;
463 		} else {
464 			uint32_t flags =  0;
465 			size_t filesz = seg->filesz;
466 			size_t memsz = seg->memsz;
467 			size_t offset = seg->offset;
468 			size_t vaddr = seg->vaddr;
469 
470 			if (offset < elf->max_offs) {
471 				/*
472 				 * We're in a load segment which overlaps
473 				 * with (or is covered by) the first page
474 				 * of a shared library.
475 				 */
476 				if (vaddr + filesz < SMALL_PAGE_SIZE) {
477 					size_t num_bytes = 0;
478 
479 					/*
480 					 * If this segment is completely
481 					 * covered, take next.
482 					 */
483 					if (vaddr + memsz <= SMALL_PAGE_SIZE)
484 						continue;
485 
486 					/*
487 					 * All data of the segment is
488 					 * loaded, but we need to zero
489 					 * extend it.
490 					 */
491 					va = elf->max_addr;
492 					num_bytes = roundup(vaddr + memsz) -
493 						    roundup(vaddr) -
494 						    SMALL_PAGE_SIZE;
495 					assert(num_bytes);
496 					res = sys_map_zi(num_bytes, 0, &va, 0,
497 							 0);
498 					if (res)
499 						err(res, "sys_map_zi");
500 					elf->max_addr = roundup(va + num_bytes);
501 					continue;
502 				}
503 
504 				/* Partial overlap, remove the first page. */
505 				vaddr += SMALL_PAGE_SIZE;
506 				filesz -= SMALL_PAGE_SIZE;
507 				memsz -= SMALL_PAGE_SIZE;
508 				offset += SMALL_PAGE_SIZE;
509 			}
510 
511 			if (!elf->load_addr)
512 				va = 0;
513 			else
514 				va = vaddr + elf->load_addr;
515 
516 			if (seg->flags & PF_W)
517 				flags |= PTA_SYSTEM_MAP_FLAG_WRITEABLE;
518 			else
519 				flags |= PTA_SYSTEM_MAP_FLAG_SHAREABLE;
520 			if (seg->flags & PF_X)
521 				flags |= PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
522 			if (!(seg->flags & PF_R))
523 				err(TEE_ERROR_NOT_SUPPORTED,
524 				    "Segment must be readable");
525 			if (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE) {
526 				res = sys_map_zi(memsz, 0, &va, 0, pad_end);
527 				if (res)
528 					err(res, "sys_map_zi");
529 				res = sys_copy_from_ta_bin((void *)va, filesz,
530 							   elf->handle, offset);
531 				if (res)
532 					err(res, "sys_copy_from_ta_bin");
533 			} else {
534 				res = sys_map_ta_bin(&va, filesz, flags,
535 						     elf->handle, offset,
536 						     0, pad_end);
537 				if (res)
538 					err(res, "sys_map_ta_bin");
539 			}
540 
541 			if (!elf->load_addr)
542 				elf->load_addr = va;
543 			elf->max_addr = roundup(va + filesz);
544 			elf->max_offs += filesz;
545 		}
546 	}
547 }
548 
549 static void map_segments(struct ta_elf *elf)
550 {
551 	parse_load_segments(elf);
552 	adjust_segments(elf);
553 	if (elf->is_legacy)
554 		populate_segments_legacy(elf);
555 	else
556 		populate_segments(elf);
557 }
558 
559 static int hex(char c)
560 {
561 	char lc = tolower(c);
562 
563 	if (isdigit(lc))
564 		return lc - '0';
565 	if (isxdigit(lc))
566 		return lc - 'a' + 10;
567 	return -1;
568 }
569 
570 static uint32_t parse_hex(const char *s, size_t nchars, uint32_t *res)
571 {
572 	uint32_t v = 0;
573 	size_t n;
574 	int c;
575 
576 	for (n = 0; n < nchars; n++) {
577 		c = hex(s[n]);
578 		if (c == (char)-1) {
579 			*res = TEE_ERROR_BAD_FORMAT;
580 			goto out;
581 		}
582 		v = (v << 4) + c;
583 	}
584 	*res = TEE_SUCCESS;
585 out:
586 	return v;
587 }
588 
589 /*
590  * Convert a UUID string @s into a TEE_UUID @uuid
591  * Expected format for @s is: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
592  * 'x' being any hexadecimal digit (0-9a-fA-F)
593  */
594 static TEE_Result parse_uuid(const char *s, TEE_UUID *uuid)
595 {
596 	TEE_Result res = TEE_SUCCESS;
597 	TEE_UUID u = { 0 };
598 	const char *p = s;
599 	size_t i;
600 
601 	if (strlen(p) != 36)
602 		return TEE_ERROR_BAD_FORMAT;
603 	if (p[8] != '-' || p[13] != '-' || p[18] != '-' || p[23] != '-')
604 		return TEE_ERROR_BAD_FORMAT;
605 
606 	u.timeLow = parse_hex(p, 8, &res);
607 	if (res)
608 		goto out;
609 	p += 9;
610 	u.timeMid = parse_hex(p, 4, &res);
611 	if (res)
612 		goto out;
613 	p += 5;
614 	u.timeHiAndVersion = parse_hex(p, 4, &res);
615 	if (res)
616 		goto out;
617 	p += 5;
618 	for (i = 0; i < 8; i++) {
619 		u.clockSeqAndNode[i] = parse_hex(p, 2, &res);
620 		if (res)
621 			goto out;
622 		if (i == 1)
623 			p += 3;
624 		else
625 			p += 2;
626 	}
627 	*uuid = u;
628 out:
629 	return res;
630 }
631 
632 static void add_deps_from_segment(struct ta_elf *elf, unsigned int type,
633 				  vaddr_t addr, size_t memsz)
634 {
635 	size_t dyn_entsize = 0;
636 	size_t num_dyns = 0;
637 	size_t n = 0;
638 	unsigned int tag = 0;
639 	size_t val = 0;
640 	TEE_UUID uuid = { };
641 	char *str_tab = NULL;
642 
643 	if (type != PT_DYNAMIC)
644 		return;
645 
646 	if (elf->is_32bit)
647 		dyn_entsize = sizeof(Elf32_Dyn);
648 	else
649 		dyn_entsize = sizeof(Elf64_Dyn);
650 
651 	assert(!(memsz % dyn_entsize));
652 	num_dyns = memsz / dyn_entsize;
653 
654 	for (n = 0; n < num_dyns; n++) {
655 		read_dyn(elf, addr, n, &tag, &val);
656 		if (tag == DT_STRTAB) {
657 			str_tab = (char *)(val + elf->load_addr);
658 			break;
659 		}
660 	}
661 
662 	for (n = 0; n < num_dyns; n++) {
663 		read_dyn(elf, addr, n, &tag, &val);
664 		if (tag != DT_NEEDED)
665 			continue;
666 		parse_uuid(str_tab + val, &uuid);
667 		queue_elf(&uuid);
668 	}
669 }
670 
671 static void add_dependencies(struct ta_elf *elf)
672 {
673 	size_t n = 0;
674 
675 	if (elf->is_32bit) {
676 		Elf32_Phdr *phdr = elf->phdr;
677 
678 		for (n = 0; n < elf->e_phnum; n++)
679 			add_deps_from_segment(elf, phdr[n].p_type,
680 					      phdr[n].p_vaddr, phdr[n].p_memsz);
681 	} else {
682 		Elf64_Phdr *phdr = elf->phdr;
683 
684 		for (n = 0; n < elf->e_phnum; n++)
685 			add_deps_from_segment(elf, phdr[n].p_type,
686 					      phdr[n].p_vaddr, phdr[n].p_memsz);
687 	}
688 }
689 
690 static void copy_section_headers(struct ta_elf *elf)
691 {
692 	TEE_Result res = TEE_SUCCESS;
693 	size_t sz = elf->e_shnum * elf->e_shentsize;
694 	size_t offs = 0;
695 
696 	elf->shdr = malloc(sz);
697 	if (!elf->shdr)
698 		err(TEE_ERROR_OUT_OF_MEMORY, "malloc");
699 
700 	/*
701 	 * We're assuming that section headers comes after the load segments,
702 	 * but if it's a very small dynamically linked library the section
703 	 * headers can still end up (partially?) in the first mapped page.
704 	 */
705 	if (elf->e_shoff < SMALL_PAGE_SIZE) {
706 		assert(!elf->is_main);
707 		offs = MIN(SMALL_PAGE_SIZE - elf->e_shoff, sz);
708 		memcpy(elf->shdr, (void *)(elf->load_addr + elf->e_shoff),
709 		       offs);
710 	}
711 
712 	if (offs < sz) {
713 		res = sys_copy_from_ta_bin((uint8_t *)elf->shdr + offs,
714 					   sz - offs, elf->handle,
715 					   elf->e_shoff + offs);
716 		if (res)
717 			err(res, "sys_copy_from_ta_bin");
718 	}
719 }
720 
721 static void close_handle(struct ta_elf *elf)
722 {
723 	TEE_Result res = sys_close_ta_bin(elf->handle);
724 
725 	if (res)
726 		err(res, "sys_close_ta_bin");
727 	elf->handle = -1;
728 }
729 
730 void ta_elf_load_main(const TEE_UUID *uuid, uint32_t *is_32bit,
731 		      uint64_t *entry, uint64_t *sp, uint32_t *ta_flags)
732 {
733 	struct ta_elf *elf = queue_elf(uuid);
734 	struct ta_head *head;
735 	vaddr_t va = 0;
736 	TEE_Result res = TEE_SUCCESS;
737 
738 	assert(elf);
739 	elf->is_main = true;
740 
741 	init_elf(elf);
742 
743 	/*
744 	 * Legacy TAs doesn't set entry point, instead it's set in ta_head.
745 	 * If entry point isn't set explicitly, set to the start of the
746 	 * first executable section by the linker. Since ta_head also
747 	 * always comes first in legacy TA it means that the entry point
748 	 * will be set to 0x20.
749 	 *
750 	 * NB, everything before the commit a73b5878c89d ("Replace
751 	 * ta_head.entry with elf entry") is considered legacy TAs for
752 	 * ldelf.
753 	 */
754 	if (elf->e_entry == sizeof(*head))
755 		elf->is_legacy = true;
756 
757 	map_segments(elf);
758 	add_dependencies(elf);
759 	copy_section_headers(elf);
760 	save_symtab(elf);
761 	close_handle(elf);
762 
763 	head = (struct ta_head *)elf->load_addr;
764 
765 	*is_32bit = elf->is_32bit;
766 	if (elf->is_legacy) {
767 		assert(head->depr_entry != UINT64_MAX);
768 		*entry = head->depr_entry + elf->load_addr;
769 	} else {
770 		assert(head->depr_entry == UINT64_MAX);
771 		*entry = elf->e_entry + elf->load_addr;
772 	}
773 
774 	res = sys_map_zi(head->stack_size, 0, &va, 0, 0);
775 	if (res)
776 		err(res, "sys_map_zi stack");
777 
778 	if (head->flags & ~TA_FLAGS_MASK)
779 		err(TEE_ERROR_BAD_FORMAT, "Invalid TA flags(s) %#"PRIx32,
780 		    head->flags & ~TA_FLAGS_MASK);
781 
782 	*ta_flags = head->flags;
783 	*sp = va + head->stack_size;
784 	ta_stack = va;
785 	ta_stack_size = head->stack_size;
786 }
787 
788 void ta_elf_load_dependency(struct ta_elf *elf, bool is_32bit)
789 {
790 	if (elf->is_main)
791 		return;
792 
793 	init_elf(elf);
794 	if (elf->is_32bit != is_32bit)
795 		err(TEE_ERROR_BAD_FORMAT, "ELF %pUl is %sbit (expected %sbit)",
796 		    (void *)&elf->uuid, elf->is_32bit ? "32" : "64",
797 		    is_32bit ? "32" : "64");
798 
799 	map_segments(elf);
800 	add_dependencies(elf);
801 	copy_section_headers(elf);
802 	save_symtab(elf);
803 	close_handle(elf);
804 }
805 
806 void ta_elf_finalize_mappings(struct ta_elf *elf)
807 {
808 	TEE_Result res = TEE_SUCCESS;
809 	struct segment *seg = NULL;
810 
811 	if (!elf->is_legacy)
812 		return;
813 
814 	TAILQ_FOREACH(seg, &elf->segs, link) {
815 		vaddr_t va = elf->load_addr + seg->vaddr;
816 		uint32_t flags =  0;
817 
818 		if (seg->flags & PF_W)
819 			flags |= PTA_SYSTEM_MAP_FLAG_WRITEABLE;
820 		if (seg->flags & PF_X)
821 			flags |= PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
822 
823 		res = sys_set_prot(va, seg->memsz, flags);
824 		if (res)
825 			err(res, "sys_set_prot");
826 	}
827 }
828 
829 static void print_seg(size_t idx __maybe_unused, int elf_idx __maybe_unused,
830 		      vaddr_t va __maybe_unused, paddr_t pa __maybe_unused,
831 		      size_t sz __maybe_unused, uint32_t flags)
832 {
833 	int width __maybe_unused = 8;
834 	char desc[14] __maybe_unused = "";
835 	char flags_str[] __maybe_unused = "----";
836 
837 	if (elf_idx > -1) {
838 		snprintf(desc, sizeof(desc), " [%d]", elf_idx);
839 	} else {
840 		if (flags & DUMP_MAP_EPHEM)
841 			snprintf(desc, sizeof(desc), " (param)");
842 		if (flags & DUMP_MAP_LDELF)
843 			snprintf(desc, sizeof(desc), " (ldelf)");
844 		if (va == ta_stack)
845 			snprintf(desc, sizeof(desc), " (stack)");
846 	}
847 
848 	if (flags & DUMP_MAP_READ)
849 		flags_str[0] = 'r';
850 	if (flags & DUMP_MAP_WRITE)
851 		flags_str[1] = 'w';
852 	if (flags & DUMP_MAP_EXEC)
853 		flags_str[2] = 'x';
854 	if (flags & DUMP_MAP_SECURE)
855 		flags_str[3] = 's';
856 
857 	EMSG_RAW("region %2zu: va 0x%0*"PRIxVA" pa 0x%0*"PRIxPA" size 0x%06zx flags %s%s",
858 		 idx, width, va, width, pa, sz, flags_str, desc);
859 }
860 
861 void ta_elf_print_mappings(struct ta_elf_queue *elf_queue, size_t num_maps,
862 			   struct dump_map *maps, vaddr_t mpool_base)
863 {
864 	struct segment *seg = NULL;
865 	struct ta_elf *elf = NULL;
866 	size_t elf_idx = 0;
867 	size_t idx = 0;
868 	size_t map_idx = 0;
869 
870 	/*
871 	 * Loop over all segments and maps, printing virtual address in
872 	 * order. Segment has priority if the virtual address is present
873 	 * in both map and segment.
874 	 */
875 	elf = TAILQ_FIRST(elf_queue);
876 	if (elf)
877 		seg = TAILQ_FIRST(&elf->segs);
878 	while (true) {
879 		vaddr_t va = -1;
880 		size_t sz = 0;
881 		uint32_t flags = DUMP_MAP_SECURE;
882 		size_t offs = 0;
883 
884 		if (seg) {
885 			va = rounddown(seg->vaddr + elf->load_addr);
886 			sz = roundup(seg->vaddr + seg->memsz) -
887 				     rounddown(seg->vaddr);
888 		}
889 
890 		while (map_idx < num_maps && maps[map_idx].va <= va) {
891 			uint32_t f = 0;
892 
893 			/* If there's a match, it should be the same map */
894 			if (maps[map_idx].va == va) {
895 				/*
896 				 * In shared libraries the first page is
897 				 * mapped separately with the rest of that
898 				 * segment following back to back in a
899 				 * separate entry.
900 				 */
901 				if (map_idx + 1 < num_maps &&
902 				    maps[map_idx].sz == SMALL_PAGE_SIZE) {
903 					vaddr_t next_va = maps[map_idx].va +
904 							  maps[map_idx].sz;
905 					size_t comb_sz = maps[map_idx].sz +
906 							 maps[map_idx + 1].sz;
907 
908 					if (next_va == maps[map_idx + 1].va &&
909 					    comb_sz == sz &&
910 					    maps[map_idx].flags ==
911 					    maps[map_idx + 1].flags) {
912 						/* Skip this and next entry */
913 						map_idx += 2;
914 						continue;
915 					}
916 				}
917 				assert(maps[map_idx].sz == sz);
918 			} else if (maps[map_idx].va < va) {
919 				if (maps[map_idx].va == mpool_base)
920 					f |= DUMP_MAP_LDELF;
921 				print_seg(idx, -1, maps[map_idx].va,
922 					  maps[map_idx].pa, maps[map_idx].sz,
923 					  maps[map_idx].flags | f);
924 				idx++;
925 			}
926 			map_idx++;
927 		}
928 
929 		if (!seg)
930 			break;
931 
932 		offs = rounddown(seg->offset);
933 		if (seg->flags & PF_R)
934 			flags |= DUMP_MAP_READ;
935 		if (seg->flags & PF_W)
936 			flags |= DUMP_MAP_WRITE;
937 		if (seg->flags & PF_X)
938 			flags |= DUMP_MAP_EXEC;
939 
940 		print_seg(idx, elf_idx, va, offs, sz, flags);
941 		idx++;
942 
943 		seg = TAILQ_NEXT(seg, link);
944 		if (!seg) {
945 			elf = TAILQ_NEXT(elf, link);
946 			if (elf)
947 				seg = TAILQ_FIRST(&elf->segs);
948 			elf_idx++;
949 		}
950 	};
951 
952 	elf_idx = 0;
953 	TAILQ_FOREACH(elf, elf_queue, link) {
954 		EMSG_RAW(" [%zu] %pUl @ 0x%0*" PRIxVA,
955 			 elf_idx, (void *)&elf->uuid, 8, elf->load_addr);
956 		elf_idx++;
957 	}
958 }
959 
960 #ifdef CFG_UNWIND
961 void ta_elf_stack_trace_a32(uint32_t regs[16])
962 {
963 	struct unwind_state_arm32 state = { };
964 
965 	memcpy(state.registers, regs, sizeof(state.registers));
966 	print_stack_arm32(&state, ta_stack, ta_stack_size);
967 }
968 
969 void ta_elf_stack_trace_a64(uint64_t fp, uint64_t sp, uint64_t pc)
970 {
971 	struct unwind_state_arm64 state = { .fp = fp, .sp = sp, .pc = pc };
972 
973 	print_stack_arm64(&state, ta_stack, ta_stack_size);
974 }
975 #endif
976