xref: /optee_os/ldelf/ta_elf.c (revision 35bf26309c2d88cc9b291a6720e2ac3af0c487b8)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2019, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <ctype.h>
8 #include <elf32.h>
9 #include <elf64.h>
10 #include <elf_common.h>
11 #include <ldelf.h>
12 #include <pta_system.h>
13 #include <stdio.h>
14 #include <stdlib.h>
15 #include <string_ext.h>
16 #include <string.h>
17 #include <tee_api_types.h>
18 #include <user_ta_header.h>
19 #include <utee_syscalls.h>
20 
21 #include "sys.h"
22 #include "ta_elf.h"
23 #include "unwind.h"
24 
25 static vaddr_t ta_stack;
26 static vaddr_t ta_stack_size;
27 
28 struct ta_elf_queue main_elf_queue = TAILQ_HEAD_INITIALIZER(main_elf_queue);
29 
30 static struct ta_elf *queue_elf(const TEE_UUID *uuid)
31 {
32 	struct ta_elf *elf = NULL;
33 
34 	TAILQ_FOREACH(elf, &main_elf_queue, link)
35 		if (!memcmp(uuid, &elf->uuid, sizeof(*uuid)))
36 			return NULL;
37 
38 	elf = calloc(1, sizeof(*elf));
39 	if (!elf)
40 		err(TEE_ERROR_OUT_OF_MEMORY, "calloc");
41 
42 	TAILQ_INIT(&elf->segs);
43 
44 	elf->uuid = *uuid;
45 	TAILQ_INSERT_TAIL(&main_elf_queue, elf, link);
46 	return elf;
47 }
48 
49 static TEE_Result e32_parse_ehdr(struct ta_elf *elf, Elf32_Ehdr *ehdr)
50 {
51 	if (ehdr->e_ident[EI_VERSION] != EV_CURRENT ||
52 	    ehdr->e_ident[EI_CLASS] != ELFCLASS32 ||
53 	    ehdr->e_ident[EI_DATA] != ELFDATA2LSB ||
54 	    ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE ||
55 	    ehdr->e_type != ET_DYN || ehdr->e_machine != EM_ARM ||
56 	    (ehdr->e_flags & EF_ARM_ABIMASK) != EF_ARM_ABI_VERSION ||
57 #ifndef CFG_WITH_VFP
58 	    (ehdr->e_flags & EF_ARM_ABI_FLOAT_HARD) ||
59 #endif
60 	    ehdr->e_phentsize != sizeof(Elf32_Phdr) ||
61 	    ehdr->e_shentsize != sizeof(Elf32_Shdr))
62 		return TEE_ERROR_BAD_FORMAT;
63 
64 	elf->is_32bit = true;
65 	elf->e_entry = ehdr->e_entry;
66 	elf->e_phoff = ehdr->e_phoff;
67 	elf->e_shoff = ehdr->e_shoff;
68 	elf->e_phnum = ehdr->e_phnum;
69 	elf->e_shnum = ehdr->e_shnum;
70 	elf->e_phentsize = ehdr->e_phentsize;
71 	elf->e_shentsize = ehdr->e_shentsize;
72 
73 	return TEE_SUCCESS;
74 }
75 
76 #ifdef ARM64
77 static TEE_Result e64_parse_ehdr(struct ta_elf *elf, Elf64_Ehdr *ehdr)
78 {
79 	if (ehdr->e_ident[EI_VERSION] != EV_CURRENT ||
80 	    ehdr->e_ident[EI_CLASS] != ELFCLASS64 ||
81 	    ehdr->e_ident[EI_DATA] != ELFDATA2LSB ||
82 	    ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE ||
83 	    ehdr->e_type != ET_DYN || ehdr->e_machine != EM_AARCH64 ||
84 	    ehdr->e_flags || ehdr->e_phentsize != sizeof(Elf64_Phdr) ||
85 	    ehdr->e_shentsize != sizeof(Elf64_Shdr))
86 		return TEE_ERROR_BAD_FORMAT;
87 
88 
89 	elf->is_32bit = false;
90 	elf->e_entry = ehdr->e_entry;
91 	elf->e_phoff = ehdr->e_phoff;
92 	elf->e_shoff = ehdr->e_shoff;
93 	elf->e_phnum = ehdr->e_phnum;
94 	elf->e_shnum = ehdr->e_shnum;
95 	elf->e_phentsize = ehdr->e_phentsize;
96 	elf->e_shentsize = ehdr->e_shentsize;
97 
98 	return TEE_SUCCESS;
99 }
100 #else /*ARM64*/
101 static TEE_Result e64_parse_ehdr(struct ta_elf *elf __unused,
102 				 Elf64_Ehdr *ehdr __unused)
103 {
104 	return TEE_ERROR_NOT_SUPPORTED;
105 }
106 #endif /*ARM64*/
107 
108 static void read_dyn(struct ta_elf *elf, vaddr_t addr,
109 		     size_t idx, unsigned int *tag, size_t *val)
110 {
111 	if (elf->is_32bit) {
112 		Elf32_Dyn *dyn = (Elf32_Dyn *)(addr + elf->load_addr);
113 
114 		*tag = dyn[idx].d_tag;
115 		*val = dyn[idx].d_un.d_val;
116 	} else {
117 		Elf64_Dyn *dyn = (Elf64_Dyn *)(addr + elf->load_addr);
118 
119 		*tag = dyn[idx].d_tag;
120 		*val = dyn[idx].d_un.d_val;
121 	}
122 }
123 
124 static void save_hashtab_from_segment(struct ta_elf *elf, unsigned int type,
125 				      vaddr_t addr, size_t memsz)
126 {
127 	size_t dyn_entsize = 0;
128 	size_t num_dyns = 0;
129 	size_t n = 0;
130 	unsigned int tag = 0;
131 	size_t val = 0;
132 
133 	if (type != PT_DYNAMIC)
134 		return;
135 
136 	if (elf->is_32bit)
137 		dyn_entsize = sizeof(Elf32_Dyn);
138 	else
139 		dyn_entsize = sizeof(Elf64_Dyn);
140 
141 	assert(!(memsz % dyn_entsize));
142 	num_dyns = memsz / dyn_entsize;
143 
144 	for (n = 0; n < num_dyns; n++) {
145 		read_dyn(elf, addr, n, &tag, &val);
146 		if (tag == DT_HASH) {
147 			elf->hashtab = (void *)(val + elf->load_addr);
148 			break;
149 		}
150 	}
151 }
152 
153 static void save_hashtab(struct ta_elf *elf)
154 {
155 	size_t n = 0;
156 
157 	if (elf->is_32bit) {
158 		Elf32_Phdr *phdr = elf->phdr;
159 
160 		for (n = 0; n < elf->e_phnum; n++)
161 			save_hashtab_from_segment(elf, phdr[n].p_type,
162 						  phdr[n].p_vaddr,
163 						  phdr[n].p_memsz);
164 	} else {
165 		Elf64_Phdr *phdr = elf->phdr;
166 
167 		for (n = 0; n < elf->e_phnum; n++)
168 			save_hashtab_from_segment(elf, phdr[n].p_type,
169 						  phdr[n].p_vaddr,
170 						  phdr[n].p_memsz);
171 	}
172 	assert(elf->hashtab);
173 }
174 
175 static void e32_save_symtab(struct ta_elf *elf, size_t tab_idx)
176 {
177 	Elf32_Shdr *shdr = elf->shdr;
178 	size_t str_idx = shdr[tab_idx].sh_link;
179 
180 	elf->dynsymtab = (void *)(shdr[tab_idx].sh_addr + elf->load_addr);
181 	assert(!(shdr[tab_idx].sh_size % sizeof(Elf32_Sym)));
182 	elf->num_dynsyms = shdr[tab_idx].sh_size / sizeof(Elf32_Sym);
183 
184 	elf->dynstr = (void *)(shdr[str_idx].sh_addr + elf->load_addr);
185 	elf->dynstr_size = shdr[str_idx].sh_size;
186 }
187 
188 static void e64_save_symtab(struct ta_elf *elf, size_t tab_idx)
189 {
190 	Elf64_Shdr *shdr = elf->shdr;
191 	size_t str_idx = shdr[tab_idx].sh_link;
192 
193 	elf->dynsymtab = (void *)(vaddr_t)(shdr[tab_idx].sh_addr +
194 					   elf->load_addr);
195 	assert(!(shdr[tab_idx].sh_size % sizeof(Elf64_Sym)));
196 	elf->num_dynsyms = shdr[tab_idx].sh_size / sizeof(Elf64_Sym);
197 
198 	elf->dynstr = (void *)(vaddr_t)(shdr[str_idx].sh_addr + elf->load_addr);
199 	elf->dynstr_size = shdr[str_idx].sh_size;
200 }
201 
202 static void save_symtab(struct ta_elf *elf)
203 {
204 	size_t n = 0;
205 
206 	if (elf->is_32bit) {
207 		Elf32_Shdr *shdr = elf->shdr;
208 
209 		for (n = 0; n < elf->e_shnum; n++) {
210 			if (shdr[n].sh_type == SHT_DYNSYM) {
211 				e32_save_symtab(elf, n);
212 				break;
213 			}
214 		}
215 	} else {
216 		Elf64_Shdr *shdr = elf->shdr;
217 
218 		for (n = 0; n < elf->e_shnum; n++) {
219 			if (shdr[n].sh_type == SHT_DYNSYM) {
220 				e64_save_symtab(elf, n);
221 				break;
222 			}
223 		}
224 
225 	}
226 
227 	save_hashtab(elf);
228 }
229 
230 static void init_elf(struct ta_elf *elf)
231 {
232 	TEE_Result res = TEE_SUCCESS;
233 	vaddr_t va = 0;
234 	uint32_t flags = PTA_SYSTEM_MAP_FLAG_SHAREABLE;
235 
236 	res = sys_open_ta_bin(&elf->uuid, &elf->handle);
237 	if (res)
238 		err(res, "sys_open_ta_bin(%pUl)", (void *)&elf->uuid);
239 
240 	/*
241 	 * Map it read-only executable when we're loading a library where
242 	 * the ELF header is included in a load segment.
243 	 */
244 	if (!elf->is_main)
245 		flags |= PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
246 	res = sys_map_ta_bin(&va, SMALL_PAGE_SIZE, flags, elf->handle, 0, 0, 0);
247 	if (res)
248 		err(res, "sys_map_ta_bin");
249 	elf->ehdr_addr = va;
250 	if (!elf->is_main) {
251 		elf->load_addr = va;
252 		elf->max_addr = va + SMALL_PAGE_SIZE;
253 		elf->max_offs = SMALL_PAGE_SIZE;
254 	}
255 
256 	if (!IS_ELF(*(Elf32_Ehdr *)va))
257 		err(TEE_ERROR_BAD_FORMAT, "TA is not an ELF");
258 
259 	res = e32_parse_ehdr(elf, (void *)va);
260 	if (res == TEE_ERROR_BAD_FORMAT)
261 		res = e64_parse_ehdr(elf, (void *)va);
262 	if (res)
263 		err(res, "Cannot parse ELF");
264 
265 	if (elf->e_phoff + elf->e_phnum * elf->e_phentsize > SMALL_PAGE_SIZE)
266 		err(TEE_ERROR_NOT_SUPPORTED, "Cannot read program headers");
267 
268 	elf->phdr = (void *)(va + elf->e_phoff);
269 }
270 
271 static size_t roundup(size_t v)
272 {
273 	return ROUNDUP(v, SMALL_PAGE_SIZE);
274 }
275 
276 static size_t rounddown(size_t v)
277 {
278 	return ROUNDDOWN(v, SMALL_PAGE_SIZE);
279 }
280 
281 static void add_segment(struct ta_elf *elf, size_t offset, size_t vaddr,
282 			size_t filesz, size_t memsz, size_t flags, size_t align)
283 {
284 	struct segment *seg = calloc(1, sizeof(*seg));
285 
286 	if (!seg)
287 		err(TEE_ERROR_OUT_OF_MEMORY, "calloc");
288 
289 	seg->offset = offset;
290 	seg->vaddr = vaddr;
291 	seg->filesz = filesz;
292 	seg->memsz = memsz;
293 	seg->flags = flags;
294 	seg->align = align;
295 
296 	TAILQ_INSERT_TAIL(&elf->segs, seg, link);
297 }
298 
299 static void parse_load_segments(struct ta_elf *elf)
300 {
301 	size_t n = 0;
302 
303 	if (elf->is_32bit) {
304 		Elf32_Phdr *phdr = elf->phdr;
305 
306 		for (n = 0; n < elf->e_phnum; n++)
307 			if (phdr[n].p_type == PT_LOAD) {
308 				add_segment(elf, phdr[n].p_offset,
309 					    phdr[n].p_vaddr, phdr[n].p_filesz,
310 					    phdr[n].p_memsz, phdr[n].p_flags,
311 					    phdr[n].p_align);
312 			} else if (phdr[n].p_type == PT_ARM_EXIDX) {
313 				elf->exidx_start = phdr[n].p_vaddr;
314 				elf->exidx_size = phdr[n].p_filesz;
315 			}
316 	} else {
317 		Elf64_Phdr *phdr = elf->phdr;
318 
319 		for (n = 0; n < elf->e_phnum; n++)
320 			if (phdr[n].p_type == PT_LOAD)
321 				add_segment(elf, phdr[n].p_offset,
322 					    phdr[n].p_vaddr, phdr[n].p_filesz,
323 					    phdr[n].p_memsz, phdr[n].p_flags,
324 					    phdr[n].p_align);
325 	}
326 }
327 
328 static void copy_remapped_to(struct ta_elf *elf, const struct segment *seg)
329 {
330 	uint8_t *dst = (void *)(seg->vaddr + elf->load_addr);
331 	size_t n = 0;
332 	size_t offs = seg->offset;
333 	size_t num_bytes = seg->filesz;
334 
335 	if (offs < elf->max_offs) {
336 		n = MIN(elf->max_offs - offs, num_bytes);
337 		memcpy(dst, (void *)(elf->max_addr + offs - elf->max_offs), n);
338 		dst += n;
339 		offs += n;
340 		num_bytes -= n;
341 	}
342 
343 	if (num_bytes) {
344 		TEE_Result res = sys_copy_from_ta_bin(dst, num_bytes,
345 						      elf->handle, offs);
346 
347 		if (res)
348 			err(res, "sys_copy_from_ta_bin");
349 		elf->max_offs += offs;
350 	}
351 }
352 
353 static void adjust_segments(struct ta_elf *elf)
354 {
355 	struct segment *seg = NULL;
356 	struct segment *prev_seg = NULL;
357 	size_t prev_end_addr = 0;
358 	size_t align = 0;
359 	size_t mask = 0;
360 
361 	/* Sanity check */
362 	TAILQ_FOREACH(seg, &elf->segs, link) {
363 		size_t dummy __maybe_unused = 0;
364 
365 		assert(seg->align >= SMALL_PAGE_SIZE);
366 		assert(!ADD_OVERFLOW(seg->vaddr, seg->memsz, &dummy));
367 		assert(seg->filesz <= seg->memsz);
368 		assert((seg->offset & SMALL_PAGE_MASK) ==
369 		       (seg->vaddr & SMALL_PAGE_MASK));
370 
371 		prev_seg = TAILQ_PREV(seg, segment_head, link);
372 		if (prev_seg) {
373 			assert(seg->vaddr >= prev_seg->vaddr + prev_seg->memsz);
374 			assert(seg->offset >=
375 			       prev_seg->offset + prev_seg->filesz);
376 		}
377 		if (!align)
378 			align = seg->align;
379 		assert(align == seg->align);
380 	}
381 
382 	mask = align - 1;
383 
384 	seg = TAILQ_FIRST(&elf->segs);
385 	if (seg)
386 		seg = TAILQ_NEXT(seg, link);
387 	while (seg) {
388 		prev_seg = TAILQ_PREV(seg, segment_head, link);
389 		prev_end_addr = prev_seg->vaddr + prev_seg->memsz;
390 
391 		/*
392 		 * This segment may overlap with the last "page" in the
393 		 * previous segment in two different ways:
394 		 * 1. Virtual address (and offset) overlaps =>
395 		 *    Permissions needs to be merged. The offset must have
396 		 *    the SMALL_PAGE_MASK bits set as vaddr and offset must
397 		 *    add up with prevsion segment.
398 		 *
399 		 * 2. Only offset overlaps =>
400 		 *    The same page in the ELF is mapped at two different
401 		 *    virtual addresses. As a limitation this segment must
402 		 *    be mapped as writeable.
403 		 */
404 
405 		/* Case 1. */
406 		if (rounddown(seg->vaddr) < prev_end_addr) {
407 			assert((seg->vaddr & mask) == (seg->offset & mask));
408 			assert(prev_seg->memsz == prev_seg->filesz);
409 
410 			/*
411 			 * Merge the segments and their permissions.
412 			 * Note that the may be a small hole between the
413 			 * two sections.
414 			 */
415 			prev_seg->filesz = seg->vaddr + seg->filesz -
416 					   prev_seg->vaddr;
417 			prev_seg->memsz = seg->vaddr + seg->memsz -
418 					   prev_seg->vaddr;
419 			prev_seg->flags |= seg->flags;
420 
421 			TAILQ_REMOVE(&elf->segs, seg, link);
422 			free(seg);
423 			seg = TAILQ_NEXT(prev_seg, link);
424 			continue;
425 		}
426 
427 		/* Case 2. */
428 		if ((seg->offset & mask) &&
429 		    rounddown(seg->offset) <
430 		    (prev_seg->offset + prev_seg->filesz)) {
431 
432 			assert(seg->flags & PF_W);
433 			seg->remapped_writeable = true;
434 		}
435 
436 		/*
437 		 * No overlap, but we may need to align address, offset and
438 		 * size.
439 		 */
440 		seg->filesz += seg->vaddr - rounddown(seg->vaddr);
441 		seg->memsz += seg->vaddr - rounddown(seg->vaddr);
442 		seg->vaddr = rounddown(seg->vaddr);
443 		seg->offset = rounddown(seg->offset);
444 		seg = TAILQ_NEXT(seg, link);
445 	}
446 
447 }
448 
449 static void populate_segments_legacy(struct ta_elf *elf)
450 {
451 	TEE_Result res = TEE_SUCCESS;
452 	struct segment *seg = NULL;
453 	vaddr_t va = 0;
454 
455 	assert(elf->is_legacy);
456 	TAILQ_FOREACH(seg, &elf->segs, link) {
457 		struct segment *last_seg = TAILQ_LAST(&elf->segs, segment_head);
458 		size_t pad_end = roundup(last_seg->vaddr + last_seg->memsz -
459 					 seg->vaddr - seg->memsz);
460 		size_t num_bytes = roundup(seg->memsz);
461 
462 		if (!elf->load_addr)
463 			va = 0;
464 		else
465 			va = seg->vaddr + elf->load_addr;
466 
467 
468 		if (!(seg->flags & PF_R))
469 			err(TEE_ERROR_NOT_SUPPORTED,
470 			    "Segment must be readable");
471 
472 		res = sys_map_zi(num_bytes, 0, &va, 0, pad_end);
473 		if (res)
474 			err(res, "sys_map_zi");
475 		res = sys_copy_from_ta_bin((void *)va, seg->filesz,
476 					   elf->handle, seg->offset);
477 		if (res)
478 			err(res, "sys_copy_from_ta_bin");
479 
480 		if (!elf->load_addr)
481 			elf->load_addr = va;
482 		elf->max_addr = va + num_bytes;
483 		elf->max_offs = seg->offset + seg->filesz;
484 	}
485 }
486 
487 static size_t get_pad_begin(void)
488 {
489 #ifdef CFG_TA_ASLR
490 	size_t min = CFG_TA_ASLR_MIN_OFFSET_PAGES;
491 	size_t max = CFG_TA_ASLR_MAX_OFFSET_PAGES;
492 	TEE_Result res = TEE_SUCCESS;
493 	uint32_t rnd32 = 0;
494 	size_t rnd = 0;
495 
496 	COMPILE_TIME_ASSERT(CFG_TA_ASLR_MIN_OFFSET_PAGES <
497 			    CFG_TA_ASLR_MAX_OFFSET_PAGES);
498 	if (max > min) {
499 		res = utee_cryp_random_number_generate(&rnd32, sizeof(rnd32));
500 		if (res) {
501 			DMSG("Random read failed: %#"PRIx32, res);
502 			return min * SMALL_PAGE_SIZE;
503 		}
504 		rnd = rnd32 % (max - min);
505 	}
506 
507 	return (min + rnd) * SMALL_PAGE_SIZE;
508 #else /*!CFG_TA_ASLR*/
509 	return 0;
510 #endif /*!CFG_TA_ASLR*/
511 }
512 
513 static void populate_segments(struct ta_elf *elf)
514 {
515 	TEE_Result res = TEE_SUCCESS;
516 	struct segment *seg = NULL;
517 	vaddr_t va = 0;
518 	size_t pad_begin = 0;
519 
520 	assert(!elf->is_legacy);
521 	TAILQ_FOREACH(seg, &elf->segs, link) {
522 		struct segment *last_seg = TAILQ_LAST(&elf->segs, segment_head);
523 		size_t pad_end = roundup(last_seg->vaddr + last_seg->memsz -
524 					 seg->vaddr - seg->memsz);
525 
526 		if (seg->remapped_writeable) {
527 			size_t num_bytes = roundup(seg->vaddr + seg->memsz) -
528 					   rounddown(seg->vaddr);
529 
530 			assert(elf->load_addr);
531 			va = rounddown(elf->load_addr + seg->vaddr);
532 			assert(va >= elf->max_addr);
533 			res = sys_map_zi(num_bytes, 0, &va, 0, pad_end);
534 			if (res)
535 				err(res, "sys_map_zi");
536 
537 			copy_remapped_to(elf, seg);
538 			elf->max_addr = va + num_bytes;
539 		} else {
540 			uint32_t flags =  0;
541 			size_t filesz = seg->filesz;
542 			size_t memsz = seg->memsz;
543 			size_t offset = seg->offset;
544 			size_t vaddr = seg->vaddr;
545 
546 			if (offset < elf->max_offs) {
547 				/*
548 				 * We're in a load segment which overlaps
549 				 * with (or is covered by) the first page
550 				 * of a shared library.
551 				 */
552 				if (vaddr + filesz < SMALL_PAGE_SIZE) {
553 					size_t num_bytes = 0;
554 
555 					/*
556 					 * If this segment is completely
557 					 * covered, take next.
558 					 */
559 					if (vaddr + memsz <= SMALL_PAGE_SIZE)
560 						continue;
561 
562 					/*
563 					 * All data of the segment is
564 					 * loaded, but we need to zero
565 					 * extend it.
566 					 */
567 					va = elf->max_addr;
568 					num_bytes = roundup(vaddr + memsz) -
569 						    roundup(vaddr) -
570 						    SMALL_PAGE_SIZE;
571 					assert(num_bytes);
572 					res = sys_map_zi(num_bytes, 0, &va, 0,
573 							 0);
574 					if (res)
575 						err(res, "sys_map_zi");
576 					elf->max_addr = roundup(va + num_bytes);
577 					continue;
578 				}
579 
580 				/* Partial overlap, remove the first page. */
581 				vaddr += SMALL_PAGE_SIZE;
582 				filesz -= SMALL_PAGE_SIZE;
583 				memsz -= SMALL_PAGE_SIZE;
584 				offset += SMALL_PAGE_SIZE;
585 			}
586 
587 			if (!elf->load_addr) {
588 				va = 0;
589 				pad_begin = get_pad_begin();
590 				/*
591 				 * If mapping with pad_begin fails we'll
592 				 * retry without pad_begin, effectively
593 				 * disabling ASLR for the current ELF file.
594 				 */
595 			} else {
596 				va = vaddr + elf->load_addr;
597 				pad_begin = 0;
598 			}
599 
600 			if (seg->flags & PF_W)
601 				flags |= PTA_SYSTEM_MAP_FLAG_WRITEABLE;
602 			else
603 				flags |= PTA_SYSTEM_MAP_FLAG_SHAREABLE;
604 			if (seg->flags & PF_X)
605 				flags |= PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
606 			if (!(seg->flags & PF_R))
607 				err(TEE_ERROR_NOT_SUPPORTED,
608 				    "Segment must be readable");
609 			if (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE) {
610 				res = sys_map_zi(memsz, 0, &va, pad_begin,
611 						 pad_end);
612 				if (pad_begin && res == TEE_ERROR_OUT_OF_MEMORY)
613 					res = sys_map_zi(memsz, 0, &va, 0,
614 							 pad_end);
615 				if (res)
616 					err(res, "sys_map_zi");
617 				res = sys_copy_from_ta_bin((void *)va, filesz,
618 							   elf->handle, offset);
619 				if (res)
620 					err(res, "sys_copy_from_ta_bin");
621 			} else {
622 				res = sys_map_ta_bin(&va, filesz, flags,
623 						     elf->handle, offset,
624 						     pad_begin, pad_end);
625 				if (pad_begin && res == TEE_ERROR_OUT_OF_MEMORY)
626 					res = sys_map_ta_bin(&va, filesz, flags,
627 							     elf->handle,
628 							     offset, 0,
629 							     pad_end);
630 				if (res)
631 					err(res, "sys_map_ta_bin");
632 			}
633 
634 			if (!elf->load_addr)
635 				elf->load_addr = va;
636 			elf->max_addr = roundup(va + filesz);
637 			elf->max_offs += filesz;
638 		}
639 	}
640 }
641 
642 static void map_segments(struct ta_elf *elf)
643 {
644 	TEE_Result res = TEE_SUCCESS;
645 
646 	parse_load_segments(elf);
647 	adjust_segments(elf);
648 	if (TAILQ_FIRST(&elf->segs)->offset < SMALL_PAGE_SIZE) {
649 		vaddr_t va = 0;
650 		size_t sz = elf->max_addr - elf->load_addr;
651 		struct segment *seg = TAILQ_LAST(&elf->segs, segment_head);
652 		size_t pad_begin = get_pad_begin();
653 
654 		/*
655 		 * We're loading a library, if not other parts of the code
656 		 * need to be updated too.
657 		 */
658 		assert(!elf->is_main);
659 
660 		/*
661 		 * Now that we know how much virtual memory is needed move
662 		 * the already mapped part to a location which can
663 		 * accommodate us.
664 		 */
665 		res = sys_remap(elf->load_addr, &va, sz, pad_begin,
666 				roundup(seg->vaddr + seg->memsz));
667 		if (res == TEE_ERROR_OUT_OF_MEMORY)
668 			res = sys_remap(elf->load_addr, &va, sz, 0,
669 					roundup(seg->vaddr + seg->memsz));
670 		if (res)
671 			err(res, "sys_remap");
672 		elf->ehdr_addr = va;
673 		elf->load_addr = va;
674 		elf->max_addr = va + sz;
675 		elf->phdr = (void *)(va + elf->e_phoff);
676 	}
677 }
678 
679 static int hex(char c)
680 {
681 	char lc = tolower(c);
682 
683 	if (isdigit(lc))
684 		return lc - '0';
685 	if (isxdigit(lc))
686 		return lc - 'a' + 10;
687 	return -1;
688 }
689 
690 static uint32_t parse_hex(const char *s, size_t nchars, uint32_t *res)
691 {
692 	uint32_t v = 0;
693 	size_t n;
694 	int c;
695 
696 	for (n = 0; n < nchars; n++) {
697 		c = hex(s[n]);
698 		if (c == (char)-1) {
699 			*res = TEE_ERROR_BAD_FORMAT;
700 			goto out;
701 		}
702 		v = (v << 4) + c;
703 	}
704 	*res = TEE_SUCCESS;
705 out:
706 	return v;
707 }
708 
709 /*
710  * Convert a UUID string @s into a TEE_UUID @uuid
711  * Expected format for @s is: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
712  * 'x' being any hexadecimal digit (0-9a-fA-F)
713  */
714 static TEE_Result parse_uuid(const char *s, TEE_UUID *uuid)
715 {
716 	TEE_Result res = TEE_SUCCESS;
717 	TEE_UUID u = { 0 };
718 	const char *p = s;
719 	size_t i;
720 
721 	if (strlen(p) != 36)
722 		return TEE_ERROR_BAD_FORMAT;
723 	if (p[8] != '-' || p[13] != '-' || p[18] != '-' || p[23] != '-')
724 		return TEE_ERROR_BAD_FORMAT;
725 
726 	u.timeLow = parse_hex(p, 8, &res);
727 	if (res)
728 		goto out;
729 	p += 9;
730 	u.timeMid = parse_hex(p, 4, &res);
731 	if (res)
732 		goto out;
733 	p += 5;
734 	u.timeHiAndVersion = parse_hex(p, 4, &res);
735 	if (res)
736 		goto out;
737 	p += 5;
738 	for (i = 0; i < 8; i++) {
739 		u.clockSeqAndNode[i] = parse_hex(p, 2, &res);
740 		if (res)
741 			goto out;
742 		if (i == 1)
743 			p += 3;
744 		else
745 			p += 2;
746 	}
747 	*uuid = u;
748 out:
749 	return res;
750 }
751 
752 static void add_deps_from_segment(struct ta_elf *elf, unsigned int type,
753 				  vaddr_t addr, size_t memsz)
754 {
755 	size_t dyn_entsize = 0;
756 	size_t num_dyns = 0;
757 	size_t n = 0;
758 	unsigned int tag = 0;
759 	size_t val = 0;
760 	TEE_UUID uuid = { };
761 	char *str_tab = NULL;
762 
763 	if (type != PT_DYNAMIC)
764 		return;
765 
766 	if (elf->is_32bit)
767 		dyn_entsize = sizeof(Elf32_Dyn);
768 	else
769 		dyn_entsize = sizeof(Elf64_Dyn);
770 
771 	assert(!(memsz % dyn_entsize));
772 	num_dyns = memsz / dyn_entsize;
773 
774 	for (n = 0; n < num_dyns; n++) {
775 		read_dyn(elf, addr, n, &tag, &val);
776 		if (tag == DT_STRTAB) {
777 			str_tab = (char *)(val + elf->load_addr);
778 			break;
779 		}
780 	}
781 
782 	for (n = 0; n < num_dyns; n++) {
783 		read_dyn(elf, addr, n, &tag, &val);
784 		if (tag != DT_NEEDED)
785 			continue;
786 		parse_uuid(str_tab + val, &uuid);
787 		queue_elf(&uuid);
788 	}
789 }
790 
791 static void add_dependencies(struct ta_elf *elf)
792 {
793 	size_t n = 0;
794 
795 	if (elf->is_32bit) {
796 		Elf32_Phdr *phdr = elf->phdr;
797 
798 		for (n = 0; n < elf->e_phnum; n++)
799 			add_deps_from_segment(elf, phdr[n].p_type,
800 					      phdr[n].p_vaddr, phdr[n].p_memsz);
801 	} else {
802 		Elf64_Phdr *phdr = elf->phdr;
803 
804 		for (n = 0; n < elf->e_phnum; n++)
805 			add_deps_from_segment(elf, phdr[n].p_type,
806 					      phdr[n].p_vaddr, phdr[n].p_memsz);
807 	}
808 }
809 
810 static void copy_section_headers(struct ta_elf *elf)
811 {
812 	TEE_Result res = TEE_SUCCESS;
813 	size_t sz = elf->e_shnum * elf->e_shentsize;
814 	size_t offs = 0;
815 
816 	elf->shdr = malloc(sz);
817 	if (!elf->shdr)
818 		err(TEE_ERROR_OUT_OF_MEMORY, "malloc");
819 
820 	/*
821 	 * We're assuming that section headers comes after the load segments,
822 	 * but if it's a very small dynamically linked library the section
823 	 * headers can still end up (partially?) in the first mapped page.
824 	 */
825 	if (elf->e_shoff < SMALL_PAGE_SIZE) {
826 		assert(!elf->is_main);
827 		offs = MIN(SMALL_PAGE_SIZE - elf->e_shoff, sz);
828 		memcpy(elf->shdr, (void *)(elf->load_addr + elf->e_shoff),
829 		       offs);
830 	}
831 
832 	if (offs < sz) {
833 		res = sys_copy_from_ta_bin((uint8_t *)elf->shdr + offs,
834 					   sz - offs, elf->handle,
835 					   elf->e_shoff + offs);
836 		if (res)
837 			err(res, "sys_copy_from_ta_bin");
838 	}
839 }
840 
841 static void close_handle(struct ta_elf *elf)
842 {
843 	TEE_Result res = sys_close_ta_bin(elf->handle);
844 
845 	if (res)
846 		err(res, "sys_close_ta_bin");
847 	elf->handle = -1;
848 }
849 
850 static void clean_elf_load_main(struct ta_elf *elf)
851 {
852 	TEE_Result res = TEE_SUCCESS;
853 
854 	/*
855 	 * Clean up from last attempt to load
856 	 */
857 	res = sys_unmap(elf->ehdr_addr, SMALL_PAGE_SIZE);
858 	if (res)
859 		err(res, "sys_unmap");
860 
861 	while (!TAILQ_EMPTY(&elf->segs)) {
862 		struct segment *seg = TAILQ_FIRST(&elf->segs);
863 		vaddr_t va = 0;
864 		size_t num_bytes = 0;
865 
866 		va = rounddown(elf->load_addr + seg->vaddr);
867 		if (seg->remapped_writeable)
868 			num_bytes = roundup(seg->vaddr + seg->memsz) -
869 				    rounddown(seg->vaddr);
870 		else
871 			num_bytes = seg->memsz;
872 
873 		res = sys_unmap(va, num_bytes);
874 		if (res)
875 			err(res, "sys_unmap");
876 
877 		TAILQ_REMOVE(&elf->segs, seg, link);
878 		free(seg);
879 	}
880 
881 	free(elf->shdr);
882 	memset(&elf->is_32bit, 0,
883 	       (vaddr_t)&elf->uuid - (vaddr_t)&elf->is_32bit);
884 
885 	TAILQ_INIT(&elf->segs);
886 }
887 
888 static void load_main(struct ta_elf *elf)
889 {
890 	init_elf(elf);
891 	map_segments(elf);
892 	populate_segments(elf);
893 	add_dependencies(elf);
894 	copy_section_headers(elf);
895 	save_symtab(elf);
896 	close_handle(elf);
897 
898 	elf->head = (struct ta_head *)elf->load_addr;
899 	if (elf->head->depr_entry != UINT64_MAX) {
900 		/*
901 		 * Legacy TAs sets their entry point in ta_head. For
902 		 * non-legacy TAs the entry point of the ELF is set instead
903 		 * and leaving the ta_head entry point set to UINT64_MAX to
904 		 * indicate that it's not used.
905 		 *
906 		 * NB, everything before the commit a73b5878c89d ("Replace
907 		 * ta_head.entry with elf entry") is considered legacy TAs
908 		 * for ldelf.
909 		 *
910 		 * Legacy TAs cannot be mapped with shared memory segments
911 		 * so restart the mapping if it turned out we're loading a
912 		 * legacy TA.
913 		 */
914 
915 		DMSG("Reloading TA %pUl as legacy TA", (void *)&elf->uuid);
916 		clean_elf_load_main(elf);
917 		elf->is_legacy = true;
918 		init_elf(elf);
919 		map_segments(elf);
920 		populate_segments_legacy(elf);
921 		add_dependencies(elf);
922 		copy_section_headers(elf);
923 		save_symtab(elf);
924 		close_handle(elf);
925 		elf->head = (struct ta_head *)elf->load_addr;
926 		/*
927 		 * Check that the TA is still a legacy TA, if it isn't give
928 		 * up now since we're likely under attack.
929 		 */
930 		if (elf->head->depr_entry == UINT64_MAX)
931 			err(TEE_ERROR_GENERIC,
932 			    "TA %pUl was changed on disk to non-legacy",
933 			    (void *)&elf->uuid);
934 	}
935 
936 }
937 
938 void ta_elf_load_main(const TEE_UUID *uuid, uint32_t *is_32bit, uint64_t *sp,
939 		      uint32_t *ta_flags)
940 {
941 	struct ta_elf *elf = queue_elf(uuid);
942 	vaddr_t va = 0;
943 	TEE_Result res = TEE_SUCCESS;
944 
945 	assert(elf);
946 	elf->is_main = true;
947 
948 	load_main(elf);
949 
950 	*is_32bit = elf->is_32bit;
951 	res = sys_map_zi(elf->head->stack_size, 0, &va, 0, 0);
952 	if (res)
953 		err(res, "sys_map_zi stack");
954 
955 	if (elf->head->flags & ~TA_FLAGS_MASK)
956 		err(TEE_ERROR_BAD_FORMAT, "Invalid TA flags(s) %#"PRIx32,
957 		    elf->head->flags & ~TA_FLAGS_MASK);
958 
959 	*ta_flags = elf->head->flags;
960 	*sp = va + elf->head->stack_size;
961 	ta_stack = va;
962 	ta_stack_size = elf->head->stack_size;
963 }
964 
965 void ta_elf_finalize_load_main(uint64_t *entry)
966 {
967 	struct ta_elf *elf = TAILQ_FIRST(&main_elf_queue);
968 
969 	assert(elf->is_main);
970 
971 	if (elf->is_legacy)
972 		*entry = elf->head->depr_entry;
973 	else
974 		*entry = elf->e_entry + elf->load_addr;
975 }
976 
977 
978 void ta_elf_load_dependency(struct ta_elf *elf, bool is_32bit)
979 {
980 	if (elf->is_main)
981 		return;
982 
983 	init_elf(elf);
984 	if (elf->is_32bit != is_32bit)
985 		err(TEE_ERROR_BAD_FORMAT, "ELF %pUl is %sbit (expected %sbit)",
986 		    (void *)&elf->uuid, elf->is_32bit ? "32" : "64",
987 		    is_32bit ? "32" : "64");
988 
989 	map_segments(elf);
990 	populate_segments(elf);
991 	add_dependencies(elf);
992 	copy_section_headers(elf);
993 	save_symtab(elf);
994 	close_handle(elf);
995 }
996 
997 void ta_elf_finalize_mappings(struct ta_elf *elf)
998 {
999 	TEE_Result res = TEE_SUCCESS;
1000 	struct segment *seg = NULL;
1001 
1002 	if (!elf->is_legacy)
1003 		return;
1004 
1005 	TAILQ_FOREACH(seg, &elf->segs, link) {
1006 		vaddr_t va = elf->load_addr + seg->vaddr;
1007 		uint32_t flags =  0;
1008 
1009 		if (seg->flags & PF_W)
1010 			flags |= PTA_SYSTEM_MAP_FLAG_WRITEABLE;
1011 		if (seg->flags & PF_X)
1012 			flags |= PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
1013 
1014 		res = sys_set_prot(va, seg->memsz, flags);
1015 		if (res)
1016 			err(res, "sys_set_prot");
1017 	}
1018 }
1019 
1020 static void __printf(3, 4) print_wrapper(void *pctx, print_func_t print_func,
1021 					 const char *fmt, ...)
1022 {
1023 	va_list ap;
1024 
1025 	va_start(ap, fmt);
1026 	print_func(pctx, fmt, ap);
1027 	va_end(ap);
1028 }
1029 
1030 static void print_seg(void *pctx, print_func_t print_func,
1031 		      size_t idx __maybe_unused, int elf_idx __maybe_unused,
1032 		      vaddr_t va __maybe_unused, paddr_t pa __maybe_unused,
1033 		      size_t sz __maybe_unused, uint32_t flags)
1034 {
1035 	int width __maybe_unused = 8;
1036 	char desc[14] __maybe_unused = "";
1037 	char flags_str[] __maybe_unused = "----";
1038 
1039 	if (elf_idx > -1) {
1040 		snprintf(desc, sizeof(desc), " [%d]", elf_idx);
1041 	} else {
1042 		if (flags & DUMP_MAP_EPHEM)
1043 			snprintf(desc, sizeof(desc), " (param)");
1044 		if (flags & DUMP_MAP_LDELF)
1045 			snprintf(desc, sizeof(desc), " (ldelf)");
1046 		if (va == ta_stack)
1047 			snprintf(desc, sizeof(desc), " (stack)");
1048 	}
1049 
1050 	if (flags & DUMP_MAP_READ)
1051 		flags_str[0] = 'r';
1052 	if (flags & DUMP_MAP_WRITE)
1053 		flags_str[1] = 'w';
1054 	if (flags & DUMP_MAP_EXEC)
1055 		flags_str[2] = 'x';
1056 	if (flags & DUMP_MAP_SECURE)
1057 		flags_str[3] = 's';
1058 
1059 	print_wrapper(pctx, print_func,
1060 		      "region %2zu: va 0x%0*"PRIxVA" pa 0x%0*"PRIxPA" size 0x%06zx flags %s%s\n",
1061 		      idx, width, va, width, pa, sz, flags_str, desc);
1062 }
1063 
1064 static bool get_next_in_order(struct ta_elf_queue *elf_queue,
1065 			      struct ta_elf **elf, struct segment **seg,
1066 			      size_t *elf_idx)
1067 {
1068 	struct ta_elf *e = NULL;
1069 	struct segment *s = NULL;
1070 	size_t idx = 0;
1071 	vaddr_t va = 0;
1072 	struct ta_elf *e2 = NULL;
1073 	size_t i2 = 0;
1074 
1075 	assert(elf && seg && elf_idx);
1076 	e = *elf;
1077 	s = *seg;
1078 	assert((e == NULL && s == NULL) || (e != NULL && s != NULL));
1079 
1080 	if (s) {
1081 		s = TAILQ_NEXT(s, link);
1082 		if (s) {
1083 			*seg = s;
1084 			return true;
1085 		}
1086 	}
1087 
1088 	if (e)
1089 		va = e->load_addr;
1090 
1091 	/* Find the ELF with next load address */
1092 	e = NULL;
1093 	TAILQ_FOREACH(e2, elf_queue, link) {
1094 		if (e2->load_addr > va) {
1095 			if (!e || e2->load_addr < e->load_addr) {
1096 				e = e2;
1097 				idx = i2;
1098 			}
1099 		}
1100 		i2++;
1101 	}
1102 	if (!e)
1103 		return false;
1104 
1105 	*elf = e;
1106 	*seg = TAILQ_FIRST(&e->segs);
1107 	*elf_idx = idx;
1108 	return true;
1109 }
1110 
1111 void ta_elf_print_mappings(void *pctx, print_func_t print_func,
1112 			   struct ta_elf_queue *elf_queue, size_t num_maps,
1113 			   struct dump_map *maps, vaddr_t mpool_base)
1114 {
1115 	struct segment *seg = NULL;
1116 	struct ta_elf *elf = NULL;
1117 	size_t elf_idx = 0;
1118 	size_t idx = 0;
1119 	size_t map_idx = 0;
1120 
1121 	/*
1122 	 * Loop over all segments and maps, printing virtual address in
1123 	 * order. Segment has priority if the virtual address is present
1124 	 * in both map and segment.
1125 	 */
1126 	get_next_in_order(elf_queue, &elf, &seg, &elf_idx);
1127 	while (true) {
1128 		vaddr_t va = -1;
1129 		size_t sz = 0;
1130 		uint32_t flags = DUMP_MAP_SECURE;
1131 		size_t offs = 0;
1132 
1133 		if (seg) {
1134 			va = rounddown(seg->vaddr + elf->load_addr);
1135 			sz = roundup(seg->vaddr + seg->memsz) -
1136 				     rounddown(seg->vaddr);
1137 		}
1138 
1139 		while (map_idx < num_maps && maps[map_idx].va <= va) {
1140 			uint32_t f = 0;
1141 
1142 			/* If there's a match, it should be the same map */
1143 			if (maps[map_idx].va == va) {
1144 				/*
1145 				 * In shared libraries the first page is
1146 				 * mapped separately with the rest of that
1147 				 * segment following back to back in a
1148 				 * separate entry.
1149 				 */
1150 				if (map_idx + 1 < num_maps &&
1151 				    maps[map_idx].sz == SMALL_PAGE_SIZE) {
1152 					vaddr_t next_va = maps[map_idx].va +
1153 							  maps[map_idx].sz;
1154 					size_t comb_sz = maps[map_idx].sz +
1155 							 maps[map_idx + 1].sz;
1156 
1157 					if (next_va == maps[map_idx + 1].va &&
1158 					    comb_sz == sz &&
1159 					    maps[map_idx].flags ==
1160 					    maps[map_idx + 1].flags) {
1161 						/* Skip this and next entry */
1162 						map_idx += 2;
1163 						continue;
1164 					}
1165 				}
1166 				assert(maps[map_idx].sz == sz);
1167 			} else if (maps[map_idx].va < va) {
1168 				if (maps[map_idx].va == mpool_base)
1169 					f |= DUMP_MAP_LDELF;
1170 				print_seg(pctx, print_func, idx, -1,
1171 					  maps[map_idx].va, maps[map_idx].pa,
1172 					  maps[map_idx].sz,
1173 					  maps[map_idx].flags | f);
1174 				idx++;
1175 			}
1176 			map_idx++;
1177 		}
1178 
1179 		if (!seg)
1180 			break;
1181 
1182 		offs = rounddown(seg->offset);
1183 		if (seg->flags & PF_R)
1184 			flags |= DUMP_MAP_READ;
1185 		if (seg->flags & PF_W)
1186 			flags |= DUMP_MAP_WRITE;
1187 		if (seg->flags & PF_X)
1188 			flags |= DUMP_MAP_EXEC;
1189 
1190 		print_seg(pctx, print_func, idx, elf_idx, va, offs, sz, flags);
1191 		idx++;
1192 
1193 		if (!get_next_in_order(elf_queue, &elf, &seg, &elf_idx))
1194 			seg = NULL;
1195 	}
1196 
1197 	elf_idx = 0;
1198 	TAILQ_FOREACH(elf, elf_queue, link) {
1199 		print_wrapper(pctx, print_func,
1200 			      " [%zu] %pUl @ 0x%0*"PRIxVA"\n",
1201 			      elf_idx, (void *)&elf->uuid, 8, elf->load_addr);
1202 		elf_idx++;
1203 	}
1204 }
1205 
1206 #ifdef CFG_UNWIND
1207 void ta_elf_stack_trace_a32(uint32_t regs[16])
1208 {
1209 	struct unwind_state_arm32 state = { };
1210 
1211 	memcpy(state.registers, regs, sizeof(state.registers));
1212 	print_stack_arm32(&state, ta_stack, ta_stack_size);
1213 }
1214 
1215 void ta_elf_stack_trace_a64(uint64_t fp, uint64_t sp, uint64_t pc)
1216 {
1217 	struct unwind_state_arm64 state = { .fp = fp, .sp = sp, .pc = pc };
1218 
1219 	print_stack_arm64(&state, ta_stack, ta_stack_size);
1220 }
1221 #endif
1222