xref: /optee_os/ldelf/ta_elf.c (revision 88796f89484340777b3a507cd78b0896b7ab105b)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2019, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <ctype.h>
8 #include <elf32.h>
9 #include <elf64.h>
10 #include <elf_common.h>
11 #include <ldelf.h>
12 #include <pta_system.h>
13 #include <stdio.h>
14 #include <stdlib.h>
15 #include <string_ext.h>
16 #include <string.h>
17 #include <tee_api_types.h>
18 #include <user_ta_header.h>
19 
20 #include "sys.h"
21 #include "ta_elf.h"
22 #include "unwind.h"
23 
24 static vaddr_t ta_stack;
25 static vaddr_t ta_stack_size;
26 
27 struct ta_elf_queue main_elf_queue = TAILQ_HEAD_INITIALIZER(main_elf_queue);
28 
29 static struct ta_elf *queue_elf(const TEE_UUID *uuid)
30 {
31 	struct ta_elf *elf = NULL;
32 
33 	TAILQ_FOREACH(elf, &main_elf_queue, link)
34 		if (!memcmp(uuid, &elf->uuid, sizeof(*uuid)))
35 			return NULL;
36 
37 	elf = calloc(1, sizeof(*elf));
38 	if (!elf)
39 		err(TEE_ERROR_OUT_OF_MEMORY, "calloc");
40 
41 	TAILQ_INIT(&elf->segs);
42 
43 	elf->uuid = *uuid;
44 	TAILQ_INSERT_TAIL(&main_elf_queue, elf, link);
45 	return elf;
46 }
47 
48 static TEE_Result e32_parse_ehdr(struct ta_elf *elf, Elf32_Ehdr *ehdr)
49 {
50 	if (ehdr->e_ident[EI_VERSION] != EV_CURRENT ||
51 	    ehdr->e_ident[EI_CLASS] != ELFCLASS32 ||
52 	    ehdr->e_ident[EI_DATA] != ELFDATA2LSB ||
53 	    ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE ||
54 	    ehdr->e_type != ET_DYN || ehdr->e_machine != EM_ARM ||
55 	    (ehdr->e_flags & EF_ARM_ABIMASK) != EF_ARM_ABI_VERSION ||
56 #ifndef CFG_WITH_VFP
57 	    (ehdr->e_flags & EF_ARM_ABI_FLOAT_HARD) ||
58 #endif
59 	    ehdr->e_phentsize != sizeof(Elf32_Phdr) ||
60 	    ehdr->e_shentsize != sizeof(Elf32_Shdr))
61 		return TEE_ERROR_BAD_FORMAT;
62 
63 	elf->is_32bit = true;
64 	elf->e_entry = ehdr->e_entry;
65 	elf->e_phoff = ehdr->e_phoff;
66 	elf->e_shoff = ehdr->e_shoff;
67 	elf->e_phnum = ehdr->e_phnum;
68 	elf->e_shnum = ehdr->e_shnum;
69 	elf->e_phentsize = ehdr->e_phentsize;
70 	elf->e_shentsize = ehdr->e_shentsize;
71 
72 	return TEE_SUCCESS;
73 }
74 
75 #ifdef ARM64
76 static TEE_Result e64_parse_ehdr(struct ta_elf *elf, Elf64_Ehdr *ehdr)
77 {
78 	if (ehdr->e_ident[EI_VERSION] != EV_CURRENT ||
79 	    ehdr->e_ident[EI_CLASS] != ELFCLASS64 ||
80 	    ehdr->e_ident[EI_DATA] != ELFDATA2LSB ||
81 	    ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE ||
82 	    ehdr->e_type != ET_DYN || ehdr->e_machine != EM_AARCH64 ||
83 	    ehdr->e_flags || ehdr->e_phentsize != sizeof(Elf64_Phdr) ||
84 	    ehdr->e_shentsize != sizeof(Elf64_Shdr))
85 		return TEE_ERROR_BAD_FORMAT;
86 
87 
88 	elf->is_32bit = false;
89 	elf->e_entry = ehdr->e_entry;
90 	elf->e_phoff = ehdr->e_phoff;
91 	elf->e_shoff = ehdr->e_shoff;
92 	elf->e_phnum = ehdr->e_phnum;
93 	elf->e_shnum = ehdr->e_shnum;
94 	elf->e_phentsize = ehdr->e_phentsize;
95 	elf->e_shentsize = ehdr->e_shentsize;
96 
97 	return TEE_SUCCESS;
98 }
99 #else /*ARM64*/
100 static TEE_Result e64_parse_ehdr(struct ta_elf *elf __unused,
101 				 Elf64_Ehdr *ehdr __unused)
102 {
103 	return TEE_ERROR_NOT_SUPPORTED;
104 }
105 #endif /*ARM64*/
106 
107 static void read_dyn(struct ta_elf *elf, vaddr_t addr,
108 		     size_t idx, unsigned int *tag, size_t *val)
109 {
110 	if (elf->is_32bit) {
111 		Elf32_Dyn *dyn = (Elf32_Dyn *)(addr + elf->load_addr);
112 
113 		*tag = dyn[idx].d_tag;
114 		*val = dyn[idx].d_un.d_val;
115 	} else {
116 		Elf64_Dyn *dyn = (Elf64_Dyn *)(addr + elf->load_addr);
117 
118 		*tag = dyn[idx].d_tag;
119 		*val = dyn[idx].d_un.d_val;
120 	}
121 }
122 
123 static void e32_save_symtab(struct ta_elf *elf, size_t tab_idx)
124 {
125 	Elf32_Shdr *shdr = elf->shdr;
126 	size_t str_idx = shdr[tab_idx].sh_link;
127 
128 	elf->dynsymtab = (void *)(shdr[tab_idx].sh_addr + elf->load_addr);
129 	assert(!(shdr[tab_idx].sh_size % sizeof(Elf32_Sym)));
130 	elf->num_dynsyms = shdr[tab_idx].sh_size / sizeof(Elf32_Sym);
131 
132 	elf->dynstr = (void *)(shdr[str_idx].sh_addr + elf->load_addr);
133 	elf->dynstr_size = shdr[str_idx].sh_size;
134 }
135 
136 static void e64_save_symtab(struct ta_elf *elf, size_t tab_idx)
137 {
138 	Elf64_Shdr *shdr = elf->shdr;
139 	size_t str_idx = shdr[tab_idx].sh_link;
140 
141 	elf->dynsymtab = (void *)(vaddr_t)(shdr[tab_idx].sh_addr +
142 					   elf->load_addr);
143 	assert(!(shdr[tab_idx].sh_size % sizeof(Elf64_Sym)));
144 	elf->num_dynsyms = shdr[tab_idx].sh_size / sizeof(Elf64_Sym);
145 
146 	elf->dynstr = (void *)(vaddr_t)(shdr[str_idx].sh_addr + elf->load_addr);
147 	elf->dynstr_size = shdr[str_idx].sh_size;
148 }
149 
150 static void save_symtab(struct ta_elf *elf)
151 {
152 	size_t n = 0;
153 
154 	if (elf->is_32bit) {
155 		Elf32_Shdr *shdr = elf->shdr;
156 
157 		for (n = 0; n < elf->e_shnum; n++) {
158 			if (shdr[n].sh_type == SHT_DYNSYM) {
159 				e32_save_symtab(elf, n);
160 				break;
161 			}
162 		}
163 	} else {
164 		Elf64_Shdr *shdr = elf->shdr;
165 
166 		for (n = 0; n < elf->e_shnum; n++) {
167 			if (shdr[n].sh_type == SHT_DYNSYM) {
168 				e64_save_symtab(elf, n);
169 				break;
170 			}
171 		}
172 
173 	}
174 }
175 
176 static void init_elf(struct ta_elf *elf)
177 {
178 	TEE_Result res = TEE_SUCCESS;
179 	vaddr_t va = 0;
180 	uint32_t flags = PTA_SYSTEM_MAP_FLAG_SHAREABLE;
181 
182 	res = sys_open_ta_bin(&elf->uuid, &elf->handle);
183 	if (res)
184 		err(res, "sys_open_ta_bin(%pUl)", (void *)&elf->uuid);
185 
186 	/*
187 	 * Map it read-only executable when we're loading a library where
188 	 * the ELF header is included in a load segment.
189 	 */
190 	if (!elf->is_main)
191 		flags |= PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
192 	res = sys_map_ta_bin(&va, SMALL_PAGE_SIZE, flags, elf->handle, 0, 0, 0);
193 	if (res)
194 		err(res, "sys_map_ta_bin");
195 	elf->ehdr_addr = va;
196 	if (!elf->is_main) {
197 		elf->load_addr = va;
198 		elf->max_addr = va + SMALL_PAGE_SIZE;
199 		elf->max_offs = SMALL_PAGE_SIZE;
200 	}
201 
202 	if (!IS_ELF(*(Elf32_Ehdr *)va))
203 		err(TEE_ERROR_BAD_FORMAT, "TA is not an ELF");
204 
205 	res = e32_parse_ehdr(elf, (void *)va);
206 	if (res == TEE_ERROR_BAD_FORMAT)
207 		res = e64_parse_ehdr(elf, (void *)va);
208 	if (res)
209 		err(res, "Cannot parse ELF");
210 
211 	if (elf->e_phoff + elf->e_phnum * elf->e_phentsize > SMALL_PAGE_SIZE)
212 		err(TEE_ERROR_NOT_SUPPORTED, "Cannot read program headers");
213 
214 	elf->phdr = (void *)(va + elf->e_phoff);
215 }
216 
217 static size_t roundup(size_t v)
218 {
219 	return ROUNDUP(v, SMALL_PAGE_SIZE);
220 }
221 
222 static size_t rounddown(size_t v)
223 {
224 	return ROUNDDOWN(v, SMALL_PAGE_SIZE);
225 }
226 
227 static void add_segment(struct ta_elf *elf, size_t offset, size_t vaddr,
228 			size_t filesz, size_t memsz, size_t flags, size_t align)
229 {
230 	struct segment *seg = calloc(1, sizeof(*seg));
231 
232 	if (!seg)
233 		err(TEE_ERROR_OUT_OF_MEMORY, "calloc");
234 
235 	seg->offset = offset;
236 	seg->vaddr = vaddr;
237 	seg->filesz = filesz;
238 	seg->memsz = memsz;
239 	seg->flags = flags;
240 	seg->align = align;
241 
242 	TAILQ_INSERT_TAIL(&elf->segs, seg, link);
243 }
244 
245 static void parse_load_segments(struct ta_elf *elf)
246 {
247 	size_t n = 0;
248 
249 	if (elf->is_32bit) {
250 		Elf32_Phdr *phdr = elf->phdr;
251 
252 		for (n = 0; n < elf->e_phnum; n++)
253 			if (phdr[n].p_type == PT_LOAD) {
254 				add_segment(elf, phdr[n].p_offset,
255 					    phdr[n].p_vaddr, phdr[n].p_filesz,
256 					    phdr[n].p_memsz, phdr[n].p_flags,
257 					    phdr[n].p_align);
258 			} else if (phdr[n].p_type == PT_ARM_EXIDX) {
259 				elf->exidx_start = phdr[n].p_vaddr;
260 				elf->exidx_size = phdr[n].p_filesz;
261 			}
262 	} else {
263 		Elf64_Phdr *phdr = elf->phdr;
264 
265 		for (n = 0; n < elf->e_phnum; n++)
266 			if (phdr[n].p_type == PT_LOAD)
267 				add_segment(elf, phdr[n].p_offset,
268 					    phdr[n].p_vaddr, phdr[n].p_filesz,
269 					    phdr[n].p_memsz, phdr[n].p_flags,
270 					    phdr[n].p_align);
271 	}
272 }
273 
274 static void copy_remapped_to(struct ta_elf *elf, const struct segment *seg)
275 {
276 	uint8_t *dst = (void *)(seg->vaddr + elf->load_addr);
277 	size_t n = 0;
278 	size_t offs = seg->offset;
279 	size_t num_bytes = seg->filesz;
280 
281 	if (offs < elf->max_offs) {
282 		n = MIN(elf->max_offs - offs, num_bytes);
283 		memcpy(dst, (void *)(elf->max_addr + offs - elf->max_offs), n);
284 		dst += n;
285 		offs += n;
286 		num_bytes -= n;
287 	}
288 
289 	if (num_bytes) {
290 		TEE_Result res = sys_copy_from_ta_bin(dst, num_bytes,
291 						      elf->handle, offs);
292 
293 		if (res)
294 			err(res, "sys_copy_from_ta_bin");
295 		elf->max_offs += offs;
296 	}
297 }
298 
299 static void adjust_segments(struct ta_elf *elf)
300 {
301 	struct segment *seg = NULL;
302 	struct segment *prev_seg = NULL;
303 	size_t prev_end_addr = 0;
304 	size_t align = 0;
305 	size_t mask = 0;
306 
307 	/* Sanity check */
308 	TAILQ_FOREACH(seg, &elf->segs, link) {
309 		size_t dummy __maybe_unused = 0;
310 
311 		assert(seg->align >= SMALL_PAGE_SIZE);
312 		assert(!ADD_OVERFLOW(seg->vaddr, seg->memsz, &dummy));
313 		assert(seg->filesz <= seg->memsz);
314 		assert((seg->offset & SMALL_PAGE_MASK) ==
315 		       (seg->vaddr & SMALL_PAGE_MASK));
316 
317 		prev_seg = TAILQ_PREV(seg, segment_head, link);
318 		if (prev_seg) {
319 			assert(seg->vaddr >= prev_seg->vaddr + prev_seg->memsz);
320 			assert(seg->offset >=
321 			       prev_seg->offset + prev_seg->filesz);
322 		}
323 		if (!align)
324 			align = seg->align;
325 		assert(align == seg->align);
326 	}
327 
328 	mask = align - 1;
329 
330 	seg = TAILQ_FIRST(&elf->segs);
331 	if (seg)
332 		seg = TAILQ_NEXT(seg, link);
333 	while (seg) {
334 		prev_seg = TAILQ_PREV(seg, segment_head, link);
335 		prev_end_addr = prev_seg->vaddr + prev_seg->memsz;
336 
337 		/*
338 		 * This segment may overlap with the last "page" in the
339 		 * previous segment in two different ways:
340 		 * 1. Virtual address (and offset) overlaps =>
341 		 *    Permissions needs to be merged. The offset must have
342 		 *    the SMALL_PAGE_MASK bits set as vaddr and offset must
343 		 *    add up with prevsion segment.
344 		 *
345 		 * 2. Only offset overlaps =>
346 		 *    The same page in the ELF is mapped at two different
347 		 *    virtual addresses. As a limitation this segment must
348 		 *    be mapped as writeable.
349 		 */
350 
351 		/* Case 1. */
352 		if (rounddown(seg->vaddr) < prev_end_addr) {
353 			assert((seg->vaddr & mask) == (seg->offset & mask));
354 			assert(prev_seg->memsz == prev_seg->filesz);
355 
356 			/*
357 			 * Merge the segments and their permissions.
358 			 * Note that the may be a small hole between the
359 			 * two sections.
360 			 */
361 			prev_seg->filesz = seg->vaddr + seg->filesz -
362 					   prev_seg->vaddr;
363 			prev_seg->memsz = seg->vaddr + seg->memsz -
364 					   prev_seg->vaddr;
365 			prev_seg->flags |= seg->flags;
366 
367 			TAILQ_REMOVE(&elf->segs, seg, link);
368 			free(seg);
369 			seg = TAILQ_NEXT(prev_seg, link);
370 			continue;
371 		}
372 
373 		/* Case 2. */
374 		if ((seg->offset & mask) &&
375 		    rounddown(seg->offset) <
376 		    (prev_seg->offset + prev_seg->filesz)) {
377 
378 			assert(seg->flags & PF_W);
379 			seg->remapped_writeable = true;
380 		}
381 
382 		/*
383 		 * No overlap, but we may need to align address, offset and
384 		 * size.
385 		 */
386 		seg->filesz += seg->vaddr - rounddown(seg->vaddr);
387 		seg->memsz += seg->vaddr - rounddown(seg->vaddr);
388 		seg->vaddr = rounddown(seg->vaddr);
389 		seg->offset = rounddown(seg->offset);
390 		seg = TAILQ_NEXT(seg, link);
391 	}
392 
393 }
394 
395 static void populate_segments_legacy(struct ta_elf *elf)
396 {
397 	TEE_Result res = TEE_SUCCESS;
398 	struct segment *seg = NULL;
399 	vaddr_t va = 0;
400 
401 	TAILQ_FOREACH(seg, &elf->segs, link) {
402 		struct segment *last_seg = TAILQ_LAST(&elf->segs, segment_head);
403 		size_t pad_end = roundup(last_seg->vaddr + last_seg->memsz -
404 					 seg->vaddr - seg->memsz);
405 		size_t num_bytes = roundup(seg->memsz);
406 
407 		if (!elf->load_addr)
408 			va = 0;
409 		else
410 			va = seg->vaddr + elf->load_addr;
411 
412 
413 		if (!(seg->flags & PF_R))
414 			err(TEE_ERROR_NOT_SUPPORTED,
415 			    "Segment must be readable");
416 
417 		res = sys_map_zi(num_bytes, 0, &va, 0, pad_end);
418 		if (res)
419 			err(res, "sys_map_zi");
420 		res = sys_copy_from_ta_bin((void *)va, seg->filesz,
421 					   elf->handle, seg->offset);
422 		if (res)
423 			err(res, "sys_copy_from_ta_bin");
424 
425 		if (!elf->load_addr)
426 			elf->load_addr = va;
427 		elf->max_addr = va + num_bytes;
428 		elf->max_offs = seg->offset + seg->filesz;
429 	}
430 }
431 
432 static void populate_segments(struct ta_elf *elf)
433 {
434 	TEE_Result res = TEE_SUCCESS;
435 	struct segment *seg = NULL;
436 	vaddr_t va = 0;
437 
438 	TAILQ_FOREACH(seg, &elf->segs, link) {
439 		struct segment *last_seg = TAILQ_LAST(&elf->segs, segment_head);
440 		size_t pad_end = roundup(last_seg->vaddr + last_seg->memsz -
441 					 seg->vaddr - seg->memsz);
442 
443 		if (seg->remapped_writeable) {
444 			size_t num_bytes = roundup(seg->vaddr + seg->memsz) -
445 					   rounddown(seg->vaddr);
446 
447 			assert(elf->load_addr);
448 			va = rounddown(elf->load_addr + seg->vaddr);
449 			assert(va >= elf->max_addr);
450 			res = sys_map_zi(num_bytes, 0, &va, 0, pad_end);
451 			if (res)
452 				err(res, "sys_map_zi");
453 
454 			copy_remapped_to(elf, seg);
455 			elf->max_addr = va + num_bytes;
456 		} else {
457 			uint32_t flags =  0;
458 			size_t filesz = seg->filesz;
459 			size_t memsz = seg->memsz;
460 			size_t offset = seg->offset;
461 			size_t vaddr = seg->vaddr;
462 
463 			if (offset < elf->max_offs) {
464 				/*
465 				 * We're in a load segment which overlaps
466 				 * with (or is covered by) the first page
467 				 * of a shared library.
468 				 */
469 				if (vaddr + filesz < SMALL_PAGE_SIZE) {
470 					size_t num_bytes = 0;
471 
472 					/*
473 					 * If this segment is completely
474 					 * covered, take next.
475 					 */
476 					if (vaddr + memsz <= SMALL_PAGE_SIZE)
477 						continue;
478 
479 					/*
480 					 * All data of the segment is
481 					 * loaded, but we need to zero
482 					 * extend it.
483 					 */
484 					va = elf->max_addr;
485 					num_bytes = roundup(vaddr + memsz) -
486 						    roundup(vaddr) -
487 						    SMALL_PAGE_SIZE;
488 					assert(num_bytes);
489 					res = sys_map_zi(num_bytes, 0, &va, 0,
490 							 0);
491 					if (res)
492 						err(res, "sys_map_zi");
493 					elf->max_addr = roundup(va + num_bytes);
494 					continue;
495 				}
496 
497 				/* Partial overlap, remove the first page. */
498 				vaddr += SMALL_PAGE_SIZE;
499 				filesz -= SMALL_PAGE_SIZE;
500 				memsz -= SMALL_PAGE_SIZE;
501 				offset += SMALL_PAGE_SIZE;
502 			}
503 
504 			if (!elf->load_addr)
505 				va = 0;
506 			else
507 				va = vaddr + elf->load_addr;
508 
509 			if (seg->flags & PF_W)
510 				flags |= PTA_SYSTEM_MAP_FLAG_WRITEABLE;
511 			else
512 				flags |= PTA_SYSTEM_MAP_FLAG_SHAREABLE;
513 			if (seg->flags & PF_X)
514 				flags |= PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
515 			if (!(seg->flags & PF_R))
516 				err(TEE_ERROR_NOT_SUPPORTED,
517 				    "Segment must be readable");
518 			if (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE) {
519 				res = sys_map_zi(memsz, 0, &va, 0, pad_end);
520 				if (res)
521 					err(res, "sys_map_zi");
522 				res = sys_copy_from_ta_bin((void *)va, filesz,
523 							   elf->handle, offset);
524 				if (res)
525 					err(res, "sys_copy_from_ta_bin");
526 			} else {
527 				res = sys_map_ta_bin(&va, filesz, flags,
528 						     elf->handle, offset,
529 						     0, pad_end);
530 				if (res)
531 					err(res, "sys_map_ta_bin");
532 			}
533 
534 			if (!elf->load_addr)
535 				elf->load_addr = va;
536 			elf->max_addr = roundup(va + filesz);
537 			elf->max_offs += filesz;
538 		}
539 	}
540 }
541 
542 static void map_segments(struct ta_elf *elf)
543 {
544 	TEE_Result res = TEE_SUCCESS;
545 
546 	parse_load_segments(elf);
547 	adjust_segments(elf);
548 	if (TAILQ_FIRST(&elf->segs)->offset < SMALL_PAGE_SIZE) {
549 		vaddr_t va = 0;
550 		size_t sz = elf->max_addr - elf->load_addr;
551 		struct segment *seg = TAILQ_LAST(&elf->segs, segment_head);
552 
553 		/*
554 		 * We're loading a library, if not other parts of the code
555 		 * need to be updated too.
556 		 */
557 		assert(!elf->is_main);
558 
559 		/*
560 		 * Now that we know how much virtual memory is needed move
561 		 * the already mapped part to a location which can
562 		 * accommodate us.
563 		 */
564 		res = sys_remap(elf->load_addr, &va, sz, 0,
565 				roundup(seg->vaddr + seg->memsz));
566 		if (res)
567 			err(res, "sys_remap");
568 		elf->ehdr_addr = va;
569 		elf->load_addr = va;
570 		elf->max_addr = va + sz;
571 		elf->phdr = (void *)(va + elf->e_phoff);
572 	}
573 	if (elf->is_legacy)
574 		populate_segments_legacy(elf);
575 	else
576 		populate_segments(elf);
577 }
578 
579 static int hex(char c)
580 {
581 	char lc = tolower(c);
582 
583 	if (isdigit(lc))
584 		return lc - '0';
585 	if (isxdigit(lc))
586 		return lc - 'a' + 10;
587 	return -1;
588 }
589 
590 static uint32_t parse_hex(const char *s, size_t nchars, uint32_t *res)
591 {
592 	uint32_t v = 0;
593 	size_t n;
594 	int c;
595 
596 	for (n = 0; n < nchars; n++) {
597 		c = hex(s[n]);
598 		if (c == (char)-1) {
599 			*res = TEE_ERROR_BAD_FORMAT;
600 			goto out;
601 		}
602 		v = (v << 4) + c;
603 	}
604 	*res = TEE_SUCCESS;
605 out:
606 	return v;
607 }
608 
609 /*
610  * Convert a UUID string @s into a TEE_UUID @uuid
611  * Expected format for @s is: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
612  * 'x' being any hexadecimal digit (0-9a-fA-F)
613  */
614 static TEE_Result parse_uuid(const char *s, TEE_UUID *uuid)
615 {
616 	TEE_Result res = TEE_SUCCESS;
617 	TEE_UUID u = { 0 };
618 	const char *p = s;
619 	size_t i;
620 
621 	if (strlen(p) != 36)
622 		return TEE_ERROR_BAD_FORMAT;
623 	if (p[8] != '-' || p[13] != '-' || p[18] != '-' || p[23] != '-')
624 		return TEE_ERROR_BAD_FORMAT;
625 
626 	u.timeLow = parse_hex(p, 8, &res);
627 	if (res)
628 		goto out;
629 	p += 9;
630 	u.timeMid = parse_hex(p, 4, &res);
631 	if (res)
632 		goto out;
633 	p += 5;
634 	u.timeHiAndVersion = parse_hex(p, 4, &res);
635 	if (res)
636 		goto out;
637 	p += 5;
638 	for (i = 0; i < 8; i++) {
639 		u.clockSeqAndNode[i] = parse_hex(p, 2, &res);
640 		if (res)
641 			goto out;
642 		if (i == 1)
643 			p += 3;
644 		else
645 			p += 2;
646 	}
647 	*uuid = u;
648 out:
649 	return res;
650 }
651 
652 static void add_deps_from_segment(struct ta_elf *elf, unsigned int type,
653 				  vaddr_t addr, size_t memsz)
654 {
655 	size_t dyn_entsize = 0;
656 	size_t num_dyns = 0;
657 	size_t n = 0;
658 	unsigned int tag = 0;
659 	size_t val = 0;
660 	TEE_UUID uuid = { };
661 	char *str_tab = NULL;
662 
663 	if (type != PT_DYNAMIC)
664 		return;
665 
666 	if (elf->is_32bit)
667 		dyn_entsize = sizeof(Elf32_Dyn);
668 	else
669 		dyn_entsize = sizeof(Elf64_Dyn);
670 
671 	assert(!(memsz % dyn_entsize));
672 	num_dyns = memsz / dyn_entsize;
673 
674 	for (n = 0; n < num_dyns; n++) {
675 		read_dyn(elf, addr, n, &tag, &val);
676 		if (tag == DT_STRTAB) {
677 			str_tab = (char *)(val + elf->load_addr);
678 			break;
679 		}
680 	}
681 
682 	for (n = 0; n < num_dyns; n++) {
683 		read_dyn(elf, addr, n, &tag, &val);
684 		if (tag != DT_NEEDED)
685 			continue;
686 		parse_uuid(str_tab + val, &uuid);
687 		queue_elf(&uuid);
688 	}
689 }
690 
691 static void add_dependencies(struct ta_elf *elf)
692 {
693 	size_t n = 0;
694 
695 	if (elf->is_32bit) {
696 		Elf32_Phdr *phdr = elf->phdr;
697 
698 		for (n = 0; n < elf->e_phnum; n++)
699 			add_deps_from_segment(elf, phdr[n].p_type,
700 					      phdr[n].p_vaddr, phdr[n].p_memsz);
701 	} else {
702 		Elf64_Phdr *phdr = elf->phdr;
703 
704 		for (n = 0; n < elf->e_phnum; n++)
705 			add_deps_from_segment(elf, phdr[n].p_type,
706 					      phdr[n].p_vaddr, phdr[n].p_memsz);
707 	}
708 }
709 
710 static void copy_section_headers(struct ta_elf *elf)
711 {
712 	TEE_Result res = TEE_SUCCESS;
713 	size_t sz = elf->e_shnum * elf->e_shentsize;
714 	size_t offs = 0;
715 
716 	elf->shdr = malloc(sz);
717 	if (!elf->shdr)
718 		err(TEE_ERROR_OUT_OF_MEMORY, "malloc");
719 
720 	/*
721 	 * We're assuming that section headers comes after the load segments,
722 	 * but if it's a very small dynamically linked library the section
723 	 * headers can still end up (partially?) in the first mapped page.
724 	 */
725 	if (elf->e_shoff < SMALL_PAGE_SIZE) {
726 		assert(!elf->is_main);
727 		offs = MIN(SMALL_PAGE_SIZE - elf->e_shoff, sz);
728 		memcpy(elf->shdr, (void *)(elf->load_addr + elf->e_shoff),
729 		       offs);
730 	}
731 
732 	if (offs < sz) {
733 		res = sys_copy_from_ta_bin((uint8_t *)elf->shdr + offs,
734 					   sz - offs, elf->handle,
735 					   elf->e_shoff + offs);
736 		if (res)
737 			err(res, "sys_copy_from_ta_bin");
738 	}
739 }
740 
741 static void close_handle(struct ta_elf *elf)
742 {
743 	TEE_Result res = sys_close_ta_bin(elf->handle);
744 
745 	if (res)
746 		err(res, "sys_close_ta_bin");
747 	elf->handle = -1;
748 }
749 
750 void ta_elf_load_main(const TEE_UUID *uuid, uint32_t *is_32bit,
751 		      uint64_t *entry, uint64_t *sp, uint32_t *ta_flags)
752 {
753 	struct ta_elf *elf = queue_elf(uuid);
754 	struct ta_head *head;
755 	vaddr_t va = 0;
756 	TEE_Result res = TEE_SUCCESS;
757 
758 	assert(elf);
759 	elf->is_main = true;
760 
761 	init_elf(elf);
762 
763 	/*
764 	 * Legacy TAs doesn't set entry point, instead it's set in ta_head.
765 	 * If entry point isn't set explicitly, set to the start of the
766 	 * first executable section by the linker. Since ta_head also
767 	 * always comes first in legacy TA it means that the entry point
768 	 * will be set to 0x20.
769 	 *
770 	 * NB, everything before the commit a73b5878c89d ("Replace
771 	 * ta_head.entry with elf entry") is considered legacy TAs for
772 	 * ldelf.
773 	 */
774 	if (elf->e_entry == sizeof(*head))
775 		elf->is_legacy = true;
776 
777 	map_segments(elf);
778 	add_dependencies(elf);
779 	copy_section_headers(elf);
780 	save_symtab(elf);
781 	close_handle(elf);
782 
783 	head = (struct ta_head *)elf->load_addr;
784 
785 	*is_32bit = elf->is_32bit;
786 	if (elf->is_legacy) {
787 		assert(head->depr_entry != UINT64_MAX);
788 		*entry = head->depr_entry + elf->load_addr;
789 	} else {
790 		assert(head->depr_entry == UINT64_MAX);
791 		*entry = elf->e_entry + elf->load_addr;
792 	}
793 
794 	res = sys_map_zi(head->stack_size, 0, &va, 0, 0);
795 	if (res)
796 		err(res, "sys_map_zi stack");
797 
798 	if (head->flags & ~TA_FLAGS_MASK)
799 		err(TEE_ERROR_BAD_FORMAT, "Invalid TA flags(s) %#"PRIx32,
800 		    head->flags & ~TA_FLAGS_MASK);
801 
802 	*ta_flags = head->flags;
803 	*sp = va + head->stack_size;
804 	ta_stack = va;
805 	ta_stack_size = head->stack_size;
806 }
807 
808 void ta_elf_load_dependency(struct ta_elf *elf, bool is_32bit)
809 {
810 	if (elf->is_main)
811 		return;
812 
813 	init_elf(elf);
814 	if (elf->is_32bit != is_32bit)
815 		err(TEE_ERROR_BAD_FORMAT, "ELF %pUl is %sbit (expected %sbit)",
816 		    (void *)&elf->uuid, elf->is_32bit ? "32" : "64",
817 		    is_32bit ? "32" : "64");
818 
819 	map_segments(elf);
820 	add_dependencies(elf);
821 	copy_section_headers(elf);
822 	save_symtab(elf);
823 	close_handle(elf);
824 }
825 
826 void ta_elf_finalize_mappings(struct ta_elf *elf)
827 {
828 	TEE_Result res = TEE_SUCCESS;
829 	struct segment *seg = NULL;
830 
831 	if (!elf->is_legacy)
832 		return;
833 
834 	TAILQ_FOREACH(seg, &elf->segs, link) {
835 		vaddr_t va = elf->load_addr + seg->vaddr;
836 		uint32_t flags =  0;
837 
838 		if (seg->flags & PF_W)
839 			flags |= PTA_SYSTEM_MAP_FLAG_WRITEABLE;
840 		if (seg->flags & PF_X)
841 			flags |= PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
842 
843 		res = sys_set_prot(va, seg->memsz, flags);
844 		if (res)
845 			err(res, "sys_set_prot");
846 	}
847 }
848 
849 static void print_seg(size_t idx __maybe_unused, int elf_idx __maybe_unused,
850 		      vaddr_t va __maybe_unused, paddr_t pa __maybe_unused,
851 		      size_t sz __maybe_unused, uint32_t flags)
852 {
853 	int width __maybe_unused = 8;
854 	char desc[14] __maybe_unused = "";
855 	char flags_str[] __maybe_unused = "----";
856 
857 	if (elf_idx > -1) {
858 		snprintf(desc, sizeof(desc), " [%d]", elf_idx);
859 	} else {
860 		if (flags & DUMP_MAP_EPHEM)
861 			snprintf(desc, sizeof(desc), " (param)");
862 		if (flags & DUMP_MAP_LDELF)
863 			snprintf(desc, sizeof(desc), " (ldelf)");
864 		if (va == ta_stack)
865 			snprintf(desc, sizeof(desc), " (stack)");
866 	}
867 
868 	if (flags & DUMP_MAP_READ)
869 		flags_str[0] = 'r';
870 	if (flags & DUMP_MAP_WRITE)
871 		flags_str[1] = 'w';
872 	if (flags & DUMP_MAP_EXEC)
873 		flags_str[2] = 'x';
874 	if (flags & DUMP_MAP_SECURE)
875 		flags_str[3] = 's';
876 
877 	EMSG_RAW("region %2zu: va 0x%0*"PRIxVA" pa 0x%0*"PRIxPA" size 0x%06zx flags %s%s",
878 		 idx, width, va, width, pa, sz, flags_str, desc);
879 }
880 
881 void ta_elf_print_mappings(struct ta_elf_queue *elf_queue, size_t num_maps,
882 			   struct dump_map *maps, vaddr_t mpool_base)
883 {
884 	struct segment *seg = NULL;
885 	struct ta_elf *elf = NULL;
886 	size_t elf_idx = 0;
887 	size_t idx = 0;
888 	size_t map_idx = 0;
889 
890 	/*
891 	 * Loop over all segments and maps, printing virtual address in
892 	 * order. Segment has priority if the virtual address is present
893 	 * in both map and segment.
894 	 */
895 	elf = TAILQ_FIRST(elf_queue);
896 	if (elf)
897 		seg = TAILQ_FIRST(&elf->segs);
898 	while (true) {
899 		vaddr_t va = -1;
900 		size_t sz = 0;
901 		uint32_t flags = DUMP_MAP_SECURE;
902 		size_t offs = 0;
903 
904 		if (seg) {
905 			va = rounddown(seg->vaddr + elf->load_addr);
906 			sz = roundup(seg->vaddr + seg->memsz) -
907 				     rounddown(seg->vaddr);
908 		}
909 
910 		while (map_idx < num_maps && maps[map_idx].va <= va) {
911 			uint32_t f = 0;
912 
913 			/* If there's a match, it should be the same map */
914 			if (maps[map_idx].va == va) {
915 				/*
916 				 * In shared libraries the first page is
917 				 * mapped separately with the rest of that
918 				 * segment following back to back in a
919 				 * separate entry.
920 				 */
921 				if (map_idx + 1 < num_maps &&
922 				    maps[map_idx].sz == SMALL_PAGE_SIZE) {
923 					vaddr_t next_va = maps[map_idx].va +
924 							  maps[map_idx].sz;
925 					size_t comb_sz = maps[map_idx].sz +
926 							 maps[map_idx + 1].sz;
927 
928 					if (next_va == maps[map_idx + 1].va &&
929 					    comb_sz == sz &&
930 					    maps[map_idx].flags ==
931 					    maps[map_idx + 1].flags) {
932 						/* Skip this and next entry */
933 						map_idx += 2;
934 						continue;
935 					}
936 				}
937 				assert(maps[map_idx].sz == sz);
938 			} else if (maps[map_idx].va < va) {
939 				if (maps[map_idx].va == mpool_base)
940 					f |= DUMP_MAP_LDELF;
941 				print_seg(idx, -1, maps[map_idx].va,
942 					  maps[map_idx].pa, maps[map_idx].sz,
943 					  maps[map_idx].flags | f);
944 				idx++;
945 			}
946 			map_idx++;
947 		}
948 
949 		if (!seg)
950 			break;
951 
952 		offs = rounddown(seg->offset);
953 		if (seg->flags & PF_R)
954 			flags |= DUMP_MAP_READ;
955 		if (seg->flags & PF_W)
956 			flags |= DUMP_MAP_WRITE;
957 		if (seg->flags & PF_X)
958 			flags |= DUMP_MAP_EXEC;
959 
960 		print_seg(idx, elf_idx, va, offs, sz, flags);
961 		idx++;
962 
963 		seg = TAILQ_NEXT(seg, link);
964 		if (!seg) {
965 			elf = TAILQ_NEXT(elf, link);
966 			if (elf)
967 				seg = TAILQ_FIRST(&elf->segs);
968 			elf_idx++;
969 		}
970 	};
971 
972 	elf_idx = 0;
973 	TAILQ_FOREACH(elf, elf_queue, link) {
974 		EMSG_RAW(" [%zu] %pUl @ 0x%0*" PRIxVA,
975 			 elf_idx, (void *)&elf->uuid, 8, elf->load_addr);
976 		elf_idx++;
977 	}
978 }
979 
980 #ifdef CFG_UNWIND
981 void ta_elf_stack_trace_a32(uint32_t regs[16])
982 {
983 	struct unwind_state_arm32 state = { };
984 
985 	memcpy(state.registers, regs, sizeof(state.registers));
986 	print_stack_arm32(&state, ta_stack, ta_stack_size);
987 }
988 
989 void ta_elf_stack_trace_a64(uint64_t fp, uint64_t sp, uint64_t pc)
990 {
991 	struct unwind_state_arm64 state = { .fp = fp, .sp = sp, .pc = pc };
992 
993 	print_stack_arm64(&state, ta_stack, ta_stack_size);
994 }
995 #endif
996