xref: /optee_os/ldelf/ta_elf.c (revision 6720dd495966ac82823854aecea59f844e73cf81)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2019, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <ctype.h>
8 #include <elf32.h>
9 #include <elf64.h>
10 #include <elf_common.h>
11 #include <ldelf.h>
12 #include <pta_system.h>
13 #include <stdio.h>
14 #include <stdlib.h>
15 #include <string_ext.h>
16 #include <string.h>
17 #include <tee_api_types.h>
18 #include <user_ta_header.h>
19 #include <utee_syscalls.h>
20 
21 #include "sys.h"
22 #include "ta_elf.h"
23 #include "unwind.h"
24 
25 static vaddr_t ta_stack;
26 static vaddr_t ta_stack_size;
27 
28 struct ta_elf_queue main_elf_queue = TAILQ_HEAD_INITIALIZER(main_elf_queue);
29 
30 static struct ta_elf *queue_elf(const TEE_UUID *uuid)
31 {
32 	struct ta_elf *elf = NULL;
33 
34 	TAILQ_FOREACH(elf, &main_elf_queue, link)
35 		if (!memcmp(uuid, &elf->uuid, sizeof(*uuid)))
36 			return NULL;
37 
38 	elf = calloc(1, sizeof(*elf));
39 	if (!elf)
40 		err(TEE_ERROR_OUT_OF_MEMORY, "calloc");
41 
42 	TAILQ_INIT(&elf->segs);
43 
44 	elf->uuid = *uuid;
45 	TAILQ_INSERT_TAIL(&main_elf_queue, elf, link);
46 	return elf;
47 }
48 
49 static TEE_Result e32_parse_ehdr(struct ta_elf *elf, Elf32_Ehdr *ehdr)
50 {
51 	if (ehdr->e_ident[EI_VERSION] != EV_CURRENT ||
52 	    ehdr->e_ident[EI_CLASS] != ELFCLASS32 ||
53 	    ehdr->e_ident[EI_DATA] != ELFDATA2LSB ||
54 	    ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE ||
55 	    ehdr->e_type != ET_DYN || ehdr->e_machine != EM_ARM ||
56 	    (ehdr->e_flags & EF_ARM_ABIMASK) != EF_ARM_ABI_VERSION ||
57 #ifndef CFG_WITH_VFP
58 	    (ehdr->e_flags & EF_ARM_ABI_FLOAT_HARD) ||
59 #endif
60 	    ehdr->e_phentsize != sizeof(Elf32_Phdr) ||
61 	    ehdr->e_shentsize != sizeof(Elf32_Shdr))
62 		return TEE_ERROR_BAD_FORMAT;
63 
64 	elf->is_32bit = true;
65 	elf->e_entry = ehdr->e_entry;
66 	elf->e_phoff = ehdr->e_phoff;
67 	elf->e_shoff = ehdr->e_shoff;
68 	elf->e_phnum = ehdr->e_phnum;
69 	elf->e_shnum = ehdr->e_shnum;
70 	elf->e_phentsize = ehdr->e_phentsize;
71 	elf->e_shentsize = ehdr->e_shentsize;
72 
73 	return TEE_SUCCESS;
74 }
75 
76 #ifdef ARM64
77 static TEE_Result e64_parse_ehdr(struct ta_elf *elf, Elf64_Ehdr *ehdr)
78 {
79 	if (ehdr->e_ident[EI_VERSION] != EV_CURRENT ||
80 	    ehdr->e_ident[EI_CLASS] != ELFCLASS64 ||
81 	    ehdr->e_ident[EI_DATA] != ELFDATA2LSB ||
82 	    ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE ||
83 	    ehdr->e_type != ET_DYN || ehdr->e_machine != EM_AARCH64 ||
84 	    ehdr->e_flags || ehdr->e_phentsize != sizeof(Elf64_Phdr) ||
85 	    ehdr->e_shentsize != sizeof(Elf64_Shdr))
86 		return TEE_ERROR_BAD_FORMAT;
87 
88 
89 	elf->is_32bit = false;
90 	elf->e_entry = ehdr->e_entry;
91 	elf->e_phoff = ehdr->e_phoff;
92 	elf->e_shoff = ehdr->e_shoff;
93 	elf->e_phnum = ehdr->e_phnum;
94 	elf->e_shnum = ehdr->e_shnum;
95 	elf->e_phentsize = ehdr->e_phentsize;
96 	elf->e_shentsize = ehdr->e_shentsize;
97 
98 	return TEE_SUCCESS;
99 }
100 #else /*ARM64*/
101 static TEE_Result e64_parse_ehdr(struct ta_elf *elf __unused,
102 				 Elf64_Ehdr *ehdr __unused)
103 {
104 	return TEE_ERROR_NOT_SUPPORTED;
105 }
106 #endif /*ARM64*/
107 
108 static void read_dyn(struct ta_elf *elf, vaddr_t addr,
109 		     size_t idx, unsigned int *tag, size_t *val)
110 {
111 	if (elf->is_32bit) {
112 		Elf32_Dyn *dyn = (Elf32_Dyn *)(addr + elf->load_addr);
113 
114 		*tag = dyn[idx].d_tag;
115 		*val = dyn[idx].d_un.d_val;
116 	} else {
117 		Elf64_Dyn *dyn = (Elf64_Dyn *)(addr + elf->load_addr);
118 
119 		*tag = dyn[idx].d_tag;
120 		*val = dyn[idx].d_un.d_val;
121 	}
122 }
123 
124 static void e32_save_symtab(struct ta_elf *elf, size_t tab_idx)
125 {
126 	Elf32_Shdr *shdr = elf->shdr;
127 	size_t str_idx = shdr[tab_idx].sh_link;
128 
129 	elf->dynsymtab = (void *)(shdr[tab_idx].sh_addr + elf->load_addr);
130 	assert(!(shdr[tab_idx].sh_size % sizeof(Elf32_Sym)));
131 	elf->num_dynsyms = shdr[tab_idx].sh_size / sizeof(Elf32_Sym);
132 
133 	elf->dynstr = (void *)(shdr[str_idx].sh_addr + elf->load_addr);
134 	elf->dynstr_size = shdr[str_idx].sh_size;
135 }
136 
137 static void e64_save_symtab(struct ta_elf *elf, size_t tab_idx)
138 {
139 	Elf64_Shdr *shdr = elf->shdr;
140 	size_t str_idx = shdr[tab_idx].sh_link;
141 
142 	elf->dynsymtab = (void *)(vaddr_t)(shdr[tab_idx].sh_addr +
143 					   elf->load_addr);
144 	assert(!(shdr[tab_idx].sh_size % sizeof(Elf64_Sym)));
145 	elf->num_dynsyms = shdr[tab_idx].sh_size / sizeof(Elf64_Sym);
146 
147 	elf->dynstr = (void *)(vaddr_t)(shdr[str_idx].sh_addr + elf->load_addr);
148 	elf->dynstr_size = shdr[str_idx].sh_size;
149 }
150 
151 static void save_symtab(struct ta_elf *elf)
152 {
153 	size_t n = 0;
154 
155 	if (elf->is_32bit) {
156 		Elf32_Shdr *shdr = elf->shdr;
157 
158 		for (n = 0; n < elf->e_shnum; n++) {
159 			if (shdr[n].sh_type == SHT_DYNSYM) {
160 				e32_save_symtab(elf, n);
161 				break;
162 			}
163 		}
164 	} else {
165 		Elf64_Shdr *shdr = elf->shdr;
166 
167 		for (n = 0; n < elf->e_shnum; n++) {
168 			if (shdr[n].sh_type == SHT_DYNSYM) {
169 				e64_save_symtab(elf, n);
170 				break;
171 			}
172 		}
173 
174 	}
175 }
176 
177 static void init_elf(struct ta_elf *elf)
178 {
179 	TEE_Result res = TEE_SUCCESS;
180 	vaddr_t va = 0;
181 	uint32_t flags = PTA_SYSTEM_MAP_FLAG_SHAREABLE;
182 
183 	res = sys_open_ta_bin(&elf->uuid, &elf->handle);
184 	if (res)
185 		err(res, "sys_open_ta_bin(%pUl)", (void *)&elf->uuid);
186 
187 	/*
188 	 * Map it read-only executable when we're loading a library where
189 	 * the ELF header is included in a load segment.
190 	 */
191 	if (!elf->is_main)
192 		flags |= PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
193 	res = sys_map_ta_bin(&va, SMALL_PAGE_SIZE, flags, elf->handle, 0, 0, 0);
194 	if (res)
195 		err(res, "sys_map_ta_bin");
196 	elf->ehdr_addr = va;
197 	if (!elf->is_main) {
198 		elf->load_addr = va;
199 		elf->max_addr = va + SMALL_PAGE_SIZE;
200 		elf->max_offs = SMALL_PAGE_SIZE;
201 	}
202 
203 	if (!IS_ELF(*(Elf32_Ehdr *)va))
204 		err(TEE_ERROR_BAD_FORMAT, "TA is not an ELF");
205 
206 	res = e32_parse_ehdr(elf, (void *)va);
207 	if (res == TEE_ERROR_BAD_FORMAT)
208 		res = e64_parse_ehdr(elf, (void *)va);
209 	if (res)
210 		err(res, "Cannot parse ELF");
211 
212 	if (elf->e_phoff + elf->e_phnum * elf->e_phentsize > SMALL_PAGE_SIZE)
213 		err(TEE_ERROR_NOT_SUPPORTED, "Cannot read program headers");
214 
215 	elf->phdr = (void *)(va + elf->e_phoff);
216 }
217 
218 static size_t roundup(size_t v)
219 {
220 	return ROUNDUP(v, SMALL_PAGE_SIZE);
221 }
222 
223 static size_t rounddown(size_t v)
224 {
225 	return ROUNDDOWN(v, SMALL_PAGE_SIZE);
226 }
227 
228 static void add_segment(struct ta_elf *elf, size_t offset, size_t vaddr,
229 			size_t filesz, size_t memsz, size_t flags, size_t align)
230 {
231 	struct segment *seg = calloc(1, sizeof(*seg));
232 
233 	if (!seg)
234 		err(TEE_ERROR_OUT_OF_MEMORY, "calloc");
235 
236 	seg->offset = offset;
237 	seg->vaddr = vaddr;
238 	seg->filesz = filesz;
239 	seg->memsz = memsz;
240 	seg->flags = flags;
241 	seg->align = align;
242 
243 	TAILQ_INSERT_TAIL(&elf->segs, seg, link);
244 }
245 
246 static void parse_load_segments(struct ta_elf *elf)
247 {
248 	size_t n = 0;
249 
250 	if (elf->is_32bit) {
251 		Elf32_Phdr *phdr = elf->phdr;
252 
253 		for (n = 0; n < elf->e_phnum; n++)
254 			if (phdr[n].p_type == PT_LOAD) {
255 				add_segment(elf, phdr[n].p_offset,
256 					    phdr[n].p_vaddr, phdr[n].p_filesz,
257 					    phdr[n].p_memsz, phdr[n].p_flags,
258 					    phdr[n].p_align);
259 			} else if (phdr[n].p_type == PT_ARM_EXIDX) {
260 				elf->exidx_start = phdr[n].p_vaddr;
261 				elf->exidx_size = phdr[n].p_filesz;
262 			}
263 	} else {
264 		Elf64_Phdr *phdr = elf->phdr;
265 
266 		for (n = 0; n < elf->e_phnum; n++)
267 			if (phdr[n].p_type == PT_LOAD)
268 				add_segment(elf, phdr[n].p_offset,
269 					    phdr[n].p_vaddr, phdr[n].p_filesz,
270 					    phdr[n].p_memsz, phdr[n].p_flags,
271 					    phdr[n].p_align);
272 	}
273 }
274 
275 static void copy_remapped_to(struct ta_elf *elf, const struct segment *seg)
276 {
277 	uint8_t *dst = (void *)(seg->vaddr + elf->load_addr);
278 	size_t n = 0;
279 	size_t offs = seg->offset;
280 	size_t num_bytes = seg->filesz;
281 
282 	if (offs < elf->max_offs) {
283 		n = MIN(elf->max_offs - offs, num_bytes);
284 		memcpy(dst, (void *)(elf->max_addr + offs - elf->max_offs), n);
285 		dst += n;
286 		offs += n;
287 		num_bytes -= n;
288 	}
289 
290 	if (num_bytes) {
291 		TEE_Result res = sys_copy_from_ta_bin(dst, num_bytes,
292 						      elf->handle, offs);
293 
294 		if (res)
295 			err(res, "sys_copy_from_ta_bin");
296 		elf->max_offs += offs;
297 	}
298 }
299 
300 static void adjust_segments(struct ta_elf *elf)
301 {
302 	struct segment *seg = NULL;
303 	struct segment *prev_seg = NULL;
304 	size_t prev_end_addr = 0;
305 	size_t align = 0;
306 	size_t mask = 0;
307 
308 	/* Sanity check */
309 	TAILQ_FOREACH(seg, &elf->segs, link) {
310 		size_t dummy __maybe_unused = 0;
311 
312 		assert(seg->align >= SMALL_PAGE_SIZE);
313 		assert(!ADD_OVERFLOW(seg->vaddr, seg->memsz, &dummy));
314 		assert(seg->filesz <= seg->memsz);
315 		assert((seg->offset & SMALL_PAGE_MASK) ==
316 		       (seg->vaddr & SMALL_PAGE_MASK));
317 
318 		prev_seg = TAILQ_PREV(seg, segment_head, link);
319 		if (prev_seg) {
320 			assert(seg->vaddr >= prev_seg->vaddr + prev_seg->memsz);
321 			assert(seg->offset >=
322 			       prev_seg->offset + prev_seg->filesz);
323 		}
324 		if (!align)
325 			align = seg->align;
326 		assert(align == seg->align);
327 	}
328 
329 	mask = align - 1;
330 
331 	seg = TAILQ_FIRST(&elf->segs);
332 	if (seg)
333 		seg = TAILQ_NEXT(seg, link);
334 	while (seg) {
335 		prev_seg = TAILQ_PREV(seg, segment_head, link);
336 		prev_end_addr = prev_seg->vaddr + prev_seg->memsz;
337 
338 		/*
339 		 * This segment may overlap with the last "page" in the
340 		 * previous segment in two different ways:
341 		 * 1. Virtual address (and offset) overlaps =>
342 		 *    Permissions needs to be merged. The offset must have
343 		 *    the SMALL_PAGE_MASK bits set as vaddr and offset must
344 		 *    add up with prevsion segment.
345 		 *
346 		 * 2. Only offset overlaps =>
347 		 *    The same page in the ELF is mapped at two different
348 		 *    virtual addresses. As a limitation this segment must
349 		 *    be mapped as writeable.
350 		 */
351 
352 		/* Case 1. */
353 		if (rounddown(seg->vaddr) < prev_end_addr) {
354 			assert((seg->vaddr & mask) == (seg->offset & mask));
355 			assert(prev_seg->memsz == prev_seg->filesz);
356 
357 			/*
358 			 * Merge the segments and their permissions.
359 			 * Note that the may be a small hole between the
360 			 * two sections.
361 			 */
362 			prev_seg->filesz = seg->vaddr + seg->filesz -
363 					   prev_seg->vaddr;
364 			prev_seg->memsz = seg->vaddr + seg->memsz -
365 					   prev_seg->vaddr;
366 			prev_seg->flags |= seg->flags;
367 
368 			TAILQ_REMOVE(&elf->segs, seg, link);
369 			free(seg);
370 			seg = TAILQ_NEXT(prev_seg, link);
371 			continue;
372 		}
373 
374 		/* Case 2. */
375 		if ((seg->offset & mask) &&
376 		    rounddown(seg->offset) <
377 		    (prev_seg->offset + prev_seg->filesz)) {
378 
379 			assert(seg->flags & PF_W);
380 			seg->remapped_writeable = true;
381 		}
382 
383 		/*
384 		 * No overlap, but we may need to align address, offset and
385 		 * size.
386 		 */
387 		seg->filesz += seg->vaddr - rounddown(seg->vaddr);
388 		seg->memsz += seg->vaddr - rounddown(seg->vaddr);
389 		seg->vaddr = rounddown(seg->vaddr);
390 		seg->offset = rounddown(seg->offset);
391 		seg = TAILQ_NEXT(seg, link);
392 	}
393 
394 }
395 
396 static void populate_segments_legacy(struct ta_elf *elf)
397 {
398 	TEE_Result res = TEE_SUCCESS;
399 	struct segment *seg = NULL;
400 	vaddr_t va = 0;
401 
402 	TAILQ_FOREACH(seg, &elf->segs, link) {
403 		struct segment *last_seg = TAILQ_LAST(&elf->segs, segment_head);
404 		size_t pad_end = roundup(last_seg->vaddr + last_seg->memsz -
405 					 seg->vaddr - seg->memsz);
406 		size_t num_bytes = roundup(seg->memsz);
407 
408 		if (!elf->load_addr)
409 			va = 0;
410 		else
411 			va = seg->vaddr + elf->load_addr;
412 
413 
414 		if (!(seg->flags & PF_R))
415 			err(TEE_ERROR_NOT_SUPPORTED,
416 			    "Segment must be readable");
417 
418 		res = sys_map_zi(num_bytes, 0, &va, 0, pad_end);
419 		if (res)
420 			err(res, "sys_map_zi");
421 		res = sys_copy_from_ta_bin((void *)va, seg->filesz,
422 					   elf->handle, seg->offset);
423 		if (res)
424 			err(res, "sys_copy_from_ta_bin");
425 
426 		if (!elf->load_addr)
427 			elf->load_addr = va;
428 		elf->max_addr = va + num_bytes;
429 		elf->max_offs = seg->offset + seg->filesz;
430 	}
431 }
432 
433 static size_t get_pad_begin(void)
434 {
435 #ifdef CFG_TA_ASLR
436 	size_t min = CFG_TA_ASLR_MIN_OFFSET_PAGES;
437 	size_t max = CFG_TA_ASLR_MAX_OFFSET_PAGES;
438 	TEE_Result res = TEE_SUCCESS;
439 	uint32_t rnd32 = 0;
440 	size_t rnd = 0;
441 
442 	COMPILE_TIME_ASSERT(CFG_TA_ASLR_MIN_OFFSET_PAGES <
443 			    CFG_TA_ASLR_MAX_OFFSET_PAGES);
444 	if (max > min) {
445 		res = utee_cryp_random_number_generate(&rnd32, sizeof(rnd32));
446 		if (res) {
447 			DMSG("Random read failed: %#"PRIx32, res);
448 			return min * SMALL_PAGE_SIZE;
449 		}
450 		rnd = rnd32 % (max - min);
451 	}
452 
453 	return (min + rnd) * SMALL_PAGE_SIZE;
454 #else /*!CFG_TA_ASLR*/
455 	return 0;
456 #endif /*!CFG_TA_ASLR*/
457 }
458 
459 static void populate_segments(struct ta_elf *elf)
460 {
461 	TEE_Result res = TEE_SUCCESS;
462 	struct segment *seg = NULL;
463 	vaddr_t va = 0;
464 	size_t pad_begin = 0;
465 
466 	TAILQ_FOREACH(seg, &elf->segs, link) {
467 		struct segment *last_seg = TAILQ_LAST(&elf->segs, segment_head);
468 		size_t pad_end = roundup(last_seg->vaddr + last_seg->memsz -
469 					 seg->vaddr - seg->memsz);
470 
471 		if (seg->remapped_writeable) {
472 			size_t num_bytes = roundup(seg->vaddr + seg->memsz) -
473 					   rounddown(seg->vaddr);
474 
475 			assert(elf->load_addr);
476 			va = rounddown(elf->load_addr + seg->vaddr);
477 			assert(va >= elf->max_addr);
478 			res = sys_map_zi(num_bytes, 0, &va, 0, pad_end);
479 			if (res)
480 				err(res, "sys_map_zi");
481 
482 			copy_remapped_to(elf, seg);
483 			elf->max_addr = va + num_bytes;
484 		} else {
485 			uint32_t flags =  0;
486 			size_t filesz = seg->filesz;
487 			size_t memsz = seg->memsz;
488 			size_t offset = seg->offset;
489 			size_t vaddr = seg->vaddr;
490 
491 			if (offset < elf->max_offs) {
492 				/*
493 				 * We're in a load segment which overlaps
494 				 * with (or is covered by) the first page
495 				 * of a shared library.
496 				 */
497 				if (vaddr + filesz < SMALL_PAGE_SIZE) {
498 					size_t num_bytes = 0;
499 
500 					/*
501 					 * If this segment is completely
502 					 * covered, take next.
503 					 */
504 					if (vaddr + memsz <= SMALL_PAGE_SIZE)
505 						continue;
506 
507 					/*
508 					 * All data of the segment is
509 					 * loaded, but we need to zero
510 					 * extend it.
511 					 */
512 					va = elf->max_addr;
513 					num_bytes = roundup(vaddr + memsz) -
514 						    roundup(vaddr) -
515 						    SMALL_PAGE_SIZE;
516 					assert(num_bytes);
517 					res = sys_map_zi(num_bytes, 0, &va, 0,
518 							 0);
519 					if (res)
520 						err(res, "sys_map_zi");
521 					elf->max_addr = roundup(va + num_bytes);
522 					continue;
523 				}
524 
525 				/* Partial overlap, remove the first page. */
526 				vaddr += SMALL_PAGE_SIZE;
527 				filesz -= SMALL_PAGE_SIZE;
528 				memsz -= SMALL_PAGE_SIZE;
529 				offset += SMALL_PAGE_SIZE;
530 			}
531 
532 			if (!elf->load_addr) {
533 				va = 0;
534 				pad_begin = get_pad_begin();
535 				/*
536 				 * If mapping with pad_begin fails we'll
537 				 * retry without pad_begin, effectively
538 				 * disabling ASLR for the current ELF file.
539 				 */
540 			} else {
541 				va = vaddr + elf->load_addr;
542 				pad_begin = 0;
543 			}
544 
545 			if (seg->flags & PF_W)
546 				flags |= PTA_SYSTEM_MAP_FLAG_WRITEABLE;
547 			else
548 				flags |= PTA_SYSTEM_MAP_FLAG_SHAREABLE;
549 			if (seg->flags & PF_X)
550 				flags |= PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
551 			if (!(seg->flags & PF_R))
552 				err(TEE_ERROR_NOT_SUPPORTED,
553 				    "Segment must be readable");
554 			if (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE) {
555 				res = sys_map_zi(memsz, 0, &va, pad_begin,
556 						 pad_end);
557 				if (pad_begin && res == TEE_ERROR_OUT_OF_MEMORY)
558 					res = sys_map_zi(memsz, 0, &va, 0,
559 							 pad_end);
560 				if (res)
561 					err(res, "sys_map_zi");
562 				res = sys_copy_from_ta_bin((void *)va, filesz,
563 							   elf->handle, offset);
564 				if (res)
565 					err(res, "sys_copy_from_ta_bin");
566 			} else {
567 				res = sys_map_ta_bin(&va, filesz, flags,
568 						     elf->handle, offset,
569 						     pad_begin, pad_end);
570 				if (pad_begin && res == TEE_ERROR_OUT_OF_MEMORY)
571 					res = sys_map_ta_bin(&va, filesz, flags,
572 							     elf->handle,
573 							     offset, 0,
574 							     pad_end);
575 				if (res)
576 					err(res, "sys_map_ta_bin");
577 			}
578 
579 			if (!elf->load_addr)
580 				elf->load_addr = va;
581 			elf->max_addr = roundup(va + filesz);
582 			elf->max_offs += filesz;
583 		}
584 	}
585 }
586 
587 static void map_segments(struct ta_elf *elf)
588 {
589 	TEE_Result res = TEE_SUCCESS;
590 
591 	parse_load_segments(elf);
592 	adjust_segments(elf);
593 	if (TAILQ_FIRST(&elf->segs)->offset < SMALL_PAGE_SIZE) {
594 		vaddr_t va = 0;
595 		size_t sz = elf->max_addr - elf->load_addr;
596 		struct segment *seg = TAILQ_LAST(&elf->segs, segment_head);
597 		size_t pad_begin = get_pad_begin();
598 
599 		/*
600 		 * We're loading a library, if not other parts of the code
601 		 * need to be updated too.
602 		 */
603 		assert(!elf->is_main);
604 
605 		/*
606 		 * Now that we know how much virtual memory is needed move
607 		 * the already mapped part to a location which can
608 		 * accommodate us.
609 		 */
610 		res = sys_remap(elf->load_addr, &va, sz, pad_begin,
611 				roundup(seg->vaddr + seg->memsz));
612 		if (res == TEE_ERROR_OUT_OF_MEMORY)
613 			res = sys_remap(elf->load_addr, &va, sz, 0,
614 					roundup(seg->vaddr + seg->memsz));
615 		if (res)
616 			err(res, "sys_remap");
617 		elf->ehdr_addr = va;
618 		elf->load_addr = va;
619 		elf->max_addr = va + sz;
620 		elf->phdr = (void *)(va + elf->e_phoff);
621 	}
622 	if (elf->is_legacy)
623 		populate_segments_legacy(elf);
624 	else
625 		populate_segments(elf);
626 }
627 
628 static int hex(char c)
629 {
630 	char lc = tolower(c);
631 
632 	if (isdigit(lc))
633 		return lc - '0';
634 	if (isxdigit(lc))
635 		return lc - 'a' + 10;
636 	return -1;
637 }
638 
639 static uint32_t parse_hex(const char *s, size_t nchars, uint32_t *res)
640 {
641 	uint32_t v = 0;
642 	size_t n;
643 	int c;
644 
645 	for (n = 0; n < nchars; n++) {
646 		c = hex(s[n]);
647 		if (c == (char)-1) {
648 			*res = TEE_ERROR_BAD_FORMAT;
649 			goto out;
650 		}
651 		v = (v << 4) + c;
652 	}
653 	*res = TEE_SUCCESS;
654 out:
655 	return v;
656 }
657 
658 /*
659  * Convert a UUID string @s into a TEE_UUID @uuid
660  * Expected format for @s is: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
661  * 'x' being any hexadecimal digit (0-9a-fA-F)
662  */
663 static TEE_Result parse_uuid(const char *s, TEE_UUID *uuid)
664 {
665 	TEE_Result res = TEE_SUCCESS;
666 	TEE_UUID u = { 0 };
667 	const char *p = s;
668 	size_t i;
669 
670 	if (strlen(p) != 36)
671 		return TEE_ERROR_BAD_FORMAT;
672 	if (p[8] != '-' || p[13] != '-' || p[18] != '-' || p[23] != '-')
673 		return TEE_ERROR_BAD_FORMAT;
674 
675 	u.timeLow = parse_hex(p, 8, &res);
676 	if (res)
677 		goto out;
678 	p += 9;
679 	u.timeMid = parse_hex(p, 4, &res);
680 	if (res)
681 		goto out;
682 	p += 5;
683 	u.timeHiAndVersion = parse_hex(p, 4, &res);
684 	if (res)
685 		goto out;
686 	p += 5;
687 	for (i = 0; i < 8; i++) {
688 		u.clockSeqAndNode[i] = parse_hex(p, 2, &res);
689 		if (res)
690 			goto out;
691 		if (i == 1)
692 			p += 3;
693 		else
694 			p += 2;
695 	}
696 	*uuid = u;
697 out:
698 	return res;
699 }
700 
701 static void add_deps_from_segment(struct ta_elf *elf, unsigned int type,
702 				  vaddr_t addr, size_t memsz)
703 {
704 	size_t dyn_entsize = 0;
705 	size_t num_dyns = 0;
706 	size_t n = 0;
707 	unsigned int tag = 0;
708 	size_t val = 0;
709 	TEE_UUID uuid = { };
710 	char *str_tab = NULL;
711 
712 	if (type != PT_DYNAMIC)
713 		return;
714 
715 	if (elf->is_32bit)
716 		dyn_entsize = sizeof(Elf32_Dyn);
717 	else
718 		dyn_entsize = sizeof(Elf64_Dyn);
719 
720 	assert(!(memsz % dyn_entsize));
721 	num_dyns = memsz / dyn_entsize;
722 
723 	for (n = 0; n < num_dyns; n++) {
724 		read_dyn(elf, addr, n, &tag, &val);
725 		if (tag == DT_STRTAB) {
726 			str_tab = (char *)(val + elf->load_addr);
727 			break;
728 		}
729 	}
730 
731 	for (n = 0; n < num_dyns; n++) {
732 		read_dyn(elf, addr, n, &tag, &val);
733 		if (tag != DT_NEEDED)
734 			continue;
735 		parse_uuid(str_tab + val, &uuid);
736 		queue_elf(&uuid);
737 	}
738 }
739 
740 static void add_dependencies(struct ta_elf *elf)
741 {
742 	size_t n = 0;
743 
744 	if (elf->is_32bit) {
745 		Elf32_Phdr *phdr = elf->phdr;
746 
747 		for (n = 0; n < elf->e_phnum; n++)
748 			add_deps_from_segment(elf, phdr[n].p_type,
749 					      phdr[n].p_vaddr, phdr[n].p_memsz);
750 	} else {
751 		Elf64_Phdr *phdr = elf->phdr;
752 
753 		for (n = 0; n < elf->e_phnum; n++)
754 			add_deps_from_segment(elf, phdr[n].p_type,
755 					      phdr[n].p_vaddr, phdr[n].p_memsz);
756 	}
757 }
758 
759 static void copy_section_headers(struct ta_elf *elf)
760 {
761 	TEE_Result res = TEE_SUCCESS;
762 	size_t sz = elf->e_shnum * elf->e_shentsize;
763 	size_t offs = 0;
764 
765 	elf->shdr = malloc(sz);
766 	if (!elf->shdr)
767 		err(TEE_ERROR_OUT_OF_MEMORY, "malloc");
768 
769 	/*
770 	 * We're assuming that section headers comes after the load segments,
771 	 * but if it's a very small dynamically linked library the section
772 	 * headers can still end up (partially?) in the first mapped page.
773 	 */
774 	if (elf->e_shoff < SMALL_PAGE_SIZE) {
775 		assert(!elf->is_main);
776 		offs = MIN(SMALL_PAGE_SIZE - elf->e_shoff, sz);
777 		memcpy(elf->shdr, (void *)(elf->load_addr + elf->e_shoff),
778 		       offs);
779 	}
780 
781 	if (offs < sz) {
782 		res = sys_copy_from_ta_bin((uint8_t *)elf->shdr + offs,
783 					   sz - offs, elf->handle,
784 					   elf->e_shoff + offs);
785 		if (res)
786 			err(res, "sys_copy_from_ta_bin");
787 	}
788 }
789 
790 static void close_handle(struct ta_elf *elf)
791 {
792 	TEE_Result res = sys_close_ta_bin(elf->handle);
793 
794 	if (res)
795 		err(res, "sys_close_ta_bin");
796 	elf->handle = -1;
797 }
798 
799 void ta_elf_load_main(const TEE_UUID *uuid, uint32_t *is_32bit,
800 		      uint64_t *entry, uint64_t *sp, uint32_t *ta_flags)
801 {
802 	struct ta_elf *elf = queue_elf(uuid);
803 	struct ta_head *head;
804 	vaddr_t va = 0;
805 	TEE_Result res = TEE_SUCCESS;
806 
807 	assert(elf);
808 	elf->is_main = true;
809 
810 	init_elf(elf);
811 
812 	/*
813 	 * Legacy TAs doesn't set entry point, instead it's set in ta_head.
814 	 * If entry point isn't set explicitly, set to the start of the
815 	 * first executable section by the linker. Since ta_head also
816 	 * always comes first in legacy TA it means that the entry point
817 	 * will be set to 0x20.
818 	 *
819 	 * NB, everything before the commit a73b5878c89d ("Replace
820 	 * ta_head.entry with elf entry") is considered legacy TAs for
821 	 * ldelf.
822 	 */
823 	if (elf->e_entry == sizeof(*head))
824 		elf->is_legacy = true;
825 
826 	map_segments(elf);
827 	add_dependencies(elf);
828 	copy_section_headers(elf);
829 	save_symtab(elf);
830 	close_handle(elf);
831 
832 	head = (struct ta_head *)elf->load_addr;
833 
834 	*is_32bit = elf->is_32bit;
835 	if (elf->is_legacy) {
836 		assert(head->depr_entry != UINT64_MAX);
837 		*entry = head->depr_entry + elf->load_addr;
838 	} else {
839 		assert(head->depr_entry == UINT64_MAX);
840 		*entry = elf->e_entry + elf->load_addr;
841 	}
842 
843 	res = sys_map_zi(head->stack_size, 0, &va, 0, 0);
844 	if (res)
845 		err(res, "sys_map_zi stack");
846 
847 	if (head->flags & ~TA_FLAGS_MASK)
848 		err(TEE_ERROR_BAD_FORMAT, "Invalid TA flags(s) %#"PRIx32,
849 		    head->flags & ~TA_FLAGS_MASK);
850 
851 	*ta_flags = head->flags;
852 	*sp = va + head->stack_size;
853 	ta_stack = va;
854 	ta_stack_size = head->stack_size;
855 }
856 
857 void ta_elf_load_dependency(struct ta_elf *elf, bool is_32bit)
858 {
859 	if (elf->is_main)
860 		return;
861 
862 	init_elf(elf);
863 	if (elf->is_32bit != is_32bit)
864 		err(TEE_ERROR_BAD_FORMAT, "ELF %pUl is %sbit (expected %sbit)",
865 		    (void *)&elf->uuid, elf->is_32bit ? "32" : "64",
866 		    is_32bit ? "32" : "64");
867 
868 	map_segments(elf);
869 	add_dependencies(elf);
870 	copy_section_headers(elf);
871 	save_symtab(elf);
872 	close_handle(elf);
873 }
874 
875 void ta_elf_finalize_mappings(struct ta_elf *elf)
876 {
877 	TEE_Result res = TEE_SUCCESS;
878 	struct segment *seg = NULL;
879 
880 	if (!elf->is_legacy)
881 		return;
882 
883 	TAILQ_FOREACH(seg, &elf->segs, link) {
884 		vaddr_t va = elf->load_addr + seg->vaddr;
885 		uint32_t flags =  0;
886 
887 		if (seg->flags & PF_W)
888 			flags |= PTA_SYSTEM_MAP_FLAG_WRITEABLE;
889 		if (seg->flags & PF_X)
890 			flags |= PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
891 
892 		res = sys_set_prot(va, seg->memsz, flags);
893 		if (res)
894 			err(res, "sys_set_prot");
895 	}
896 }
897 
898 static void print_seg(size_t idx __maybe_unused, int elf_idx __maybe_unused,
899 		      vaddr_t va __maybe_unused, paddr_t pa __maybe_unused,
900 		      size_t sz __maybe_unused, uint32_t flags)
901 {
902 	int width __maybe_unused = 8;
903 	char desc[14] __maybe_unused = "";
904 	char flags_str[] __maybe_unused = "----";
905 
906 	if (elf_idx > -1) {
907 		snprintf(desc, sizeof(desc), " [%d]", elf_idx);
908 	} else {
909 		if (flags & DUMP_MAP_EPHEM)
910 			snprintf(desc, sizeof(desc), " (param)");
911 		if (flags & DUMP_MAP_LDELF)
912 			snprintf(desc, sizeof(desc), " (ldelf)");
913 		if (va == ta_stack)
914 			snprintf(desc, sizeof(desc), " (stack)");
915 	}
916 
917 	if (flags & DUMP_MAP_READ)
918 		flags_str[0] = 'r';
919 	if (flags & DUMP_MAP_WRITE)
920 		flags_str[1] = 'w';
921 	if (flags & DUMP_MAP_EXEC)
922 		flags_str[2] = 'x';
923 	if (flags & DUMP_MAP_SECURE)
924 		flags_str[3] = 's';
925 
926 	EMSG_RAW("region %2zu: va 0x%0*"PRIxVA" pa 0x%0*"PRIxPA" size 0x%06zx flags %s%s",
927 		 idx, width, va, width, pa, sz, flags_str, desc);
928 }
929 
930 static bool get_next_in_order(struct ta_elf_queue *elf_queue,
931 			      struct ta_elf **elf, struct segment **seg,
932 			      size_t *elf_idx)
933 {
934 	struct ta_elf *e = NULL;
935 	struct segment *s = NULL;
936 	size_t idx = 0;
937 	vaddr_t va = 0;
938 	struct ta_elf *e2 = NULL;
939 	size_t i2 = 0;
940 
941 	assert(elf && seg && elf_idx);
942 	e = *elf;
943 	s = *seg;
944 	assert((e == NULL && s == NULL) || (e != NULL && s != NULL));
945 
946 	if (s) {
947 		s = TAILQ_NEXT(s, link);
948 		if (s) {
949 			*seg = s;
950 			return true;
951 		}
952 	}
953 
954 	if (e)
955 		va = e->load_addr;
956 
957 	/* Find the ELF with next load address */
958 	e = NULL;
959 	TAILQ_FOREACH(e2, elf_queue, link) {
960 		if (e2->load_addr > va) {
961 			if (!e || e2->load_addr < e->load_addr) {
962 				e = e2;
963 				idx = i2;
964 			}
965 		}
966 		i2++;
967 	}
968 	if (!e)
969 		return false;
970 
971 	*elf = e;
972 	*seg = TAILQ_FIRST(&e->segs);
973 	*elf_idx = idx;
974 	return true;
975 }
976 
977 void ta_elf_print_mappings(struct ta_elf_queue *elf_queue, size_t num_maps,
978 			   struct dump_map *maps, vaddr_t mpool_base)
979 {
980 	struct segment *seg = NULL;
981 	struct ta_elf *elf = NULL;
982 	size_t elf_idx = 0;
983 	size_t idx = 0;
984 	size_t map_idx = 0;
985 
986 	/*
987 	 * Loop over all segments and maps, printing virtual address in
988 	 * order. Segment has priority if the virtual address is present
989 	 * in both map and segment.
990 	 */
991 	get_next_in_order(elf_queue, &elf, &seg, &elf_idx);
992 	while (true) {
993 		vaddr_t va = -1;
994 		size_t sz = 0;
995 		uint32_t flags = DUMP_MAP_SECURE;
996 		size_t offs = 0;
997 
998 		if (seg) {
999 			va = rounddown(seg->vaddr + elf->load_addr);
1000 			sz = roundup(seg->vaddr + seg->memsz) -
1001 				     rounddown(seg->vaddr);
1002 		}
1003 
1004 		while (map_idx < num_maps && maps[map_idx].va <= va) {
1005 			uint32_t f = 0;
1006 
1007 			/* If there's a match, it should be the same map */
1008 			if (maps[map_idx].va == va) {
1009 				/*
1010 				 * In shared libraries the first page is
1011 				 * mapped separately with the rest of that
1012 				 * segment following back to back in a
1013 				 * separate entry.
1014 				 */
1015 				if (map_idx + 1 < num_maps &&
1016 				    maps[map_idx].sz == SMALL_PAGE_SIZE) {
1017 					vaddr_t next_va = maps[map_idx].va +
1018 							  maps[map_idx].sz;
1019 					size_t comb_sz = maps[map_idx].sz +
1020 							 maps[map_idx + 1].sz;
1021 
1022 					if (next_va == maps[map_idx + 1].va &&
1023 					    comb_sz == sz &&
1024 					    maps[map_idx].flags ==
1025 					    maps[map_idx + 1].flags) {
1026 						/* Skip this and next entry */
1027 						map_idx += 2;
1028 						continue;
1029 					}
1030 				}
1031 				assert(maps[map_idx].sz == sz);
1032 			} else if (maps[map_idx].va < va) {
1033 				if (maps[map_idx].va == mpool_base)
1034 					f |= DUMP_MAP_LDELF;
1035 				print_seg(idx, -1, maps[map_idx].va,
1036 					  maps[map_idx].pa, maps[map_idx].sz,
1037 					  maps[map_idx].flags | f);
1038 				idx++;
1039 			}
1040 			map_idx++;
1041 		}
1042 
1043 		if (!seg)
1044 			break;
1045 
1046 		offs = rounddown(seg->offset);
1047 		if (seg->flags & PF_R)
1048 			flags |= DUMP_MAP_READ;
1049 		if (seg->flags & PF_W)
1050 			flags |= DUMP_MAP_WRITE;
1051 		if (seg->flags & PF_X)
1052 			flags |= DUMP_MAP_EXEC;
1053 
1054 		print_seg(idx, elf_idx, va, offs, sz, flags);
1055 		idx++;
1056 
1057 		if (!get_next_in_order(elf_queue, &elf, &seg, &elf_idx))
1058 			seg = NULL;
1059 	}
1060 
1061 	elf_idx = 0;
1062 	TAILQ_FOREACH(elf, elf_queue, link) {
1063 		EMSG_RAW(" [%zu] %pUl @ 0x%0*" PRIxVA,
1064 			 elf_idx, (void *)&elf->uuid, 8, elf->load_addr);
1065 		elf_idx++;
1066 	}
1067 }
1068 
1069 #ifdef CFG_UNWIND
1070 void ta_elf_stack_trace_a32(uint32_t regs[16])
1071 {
1072 	struct unwind_state_arm32 state = { };
1073 
1074 	memcpy(state.registers, regs, sizeof(state.registers));
1075 	print_stack_arm32(&state, ta_stack, ta_stack_size);
1076 }
1077 
1078 void ta_elf_stack_trace_a64(uint64_t fp, uint64_t sp, uint64_t pc)
1079 {
1080 	struct unwind_state_arm64 state = { .fp = fp, .sp = sp, .pc = pc };
1081 
1082 	print_stack_arm64(&state, ta_stack, ta_stack_size);
1083 }
1084 #endif
1085