xref: /optee_os/ldelf/ta_elf.c (revision d1911a85142da16fef5ebdcdac0348d29ce37cd8)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2019, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <ctype.h>
8 #include <elf32.h>
9 #include <elf64.h>
10 #include <elf_common.h>
11 #include <pta_system.h>
12 #include <stdlib.h>
13 #include <string_ext.h>
14 #include <string.h>
15 #include <tee_api_types.h>
16 #include <user_ta_header.h>
17 
18 #include "sys.h"
19 #include "ta_elf.h"
20 
21 struct ta_elf_queue main_elf_queue = TAILQ_HEAD_INITIALIZER(main_elf_queue);
22 
23 static struct ta_elf *queue_elf(const TEE_UUID *uuid)
24 {
25 	struct ta_elf *elf = NULL;
26 
27 	TAILQ_FOREACH(elf, &main_elf_queue, link)
28 		if (!memcmp(uuid, &elf->uuid, sizeof(*uuid)))
29 			return NULL;
30 
31 	elf = calloc(1, sizeof(*elf));
32 	if (!elf)
33 		err(TEE_ERROR_OUT_OF_MEMORY, "calloc");
34 
35 	TAILQ_INIT(&elf->segs);
36 
37 	elf->uuid = *uuid;
38 	TAILQ_INSERT_TAIL(&main_elf_queue, elf, link);
39 	return elf;
40 }
41 
42 static TEE_Result e32_parse_ehdr(struct ta_elf *elf, Elf32_Ehdr *ehdr)
43 {
44 	if (ehdr->e_ident[EI_VERSION] != EV_CURRENT ||
45 	    ehdr->e_ident[EI_CLASS] != ELFCLASS32 ||
46 	    ehdr->e_ident[EI_DATA] != ELFDATA2LSB ||
47 	    ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE ||
48 	    ehdr->e_type != ET_DYN || ehdr->e_machine != EM_ARM ||
49 	    (ehdr->e_flags & EF_ARM_ABIMASK) != EF_ARM_ABI_VERSION ||
50 #ifndef CFG_WITH_VFP
51 	    (ehdr->e_flags & EF_ARM_ABI_FLOAT_HARD) ||
52 #endif
53 	    ehdr->e_phentsize != sizeof(Elf32_Phdr) ||
54 	    ehdr->e_shentsize != sizeof(Elf32_Shdr))
55 		return TEE_ERROR_BAD_FORMAT;
56 
57 	elf->is_32bit = true;
58 	elf->e_entry = ehdr->e_entry;
59 	elf->e_phoff = ehdr->e_phoff;
60 	elf->e_shoff = ehdr->e_shoff;
61 	elf->e_phnum = ehdr->e_phnum;
62 	elf->e_shnum = ehdr->e_shnum;
63 	elf->e_phentsize = ehdr->e_phentsize;
64 	elf->e_shentsize = ehdr->e_shentsize;
65 
66 	return TEE_SUCCESS;
67 }
68 
69 #ifdef ARM64
70 static TEE_Result e64_parse_ehdr(struct ta_elf *elf, Elf64_Ehdr *ehdr)
71 {
72 	if (ehdr->e_ident[EI_VERSION] != EV_CURRENT ||
73 	    ehdr->e_ident[EI_CLASS] != ELFCLASS64 ||
74 	    ehdr->e_ident[EI_DATA] != ELFDATA2LSB ||
75 	    ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE ||
76 	    ehdr->e_type != ET_DYN || ehdr->e_machine != EM_AARCH64 ||
77 	    ehdr->e_flags || ehdr->e_phentsize != sizeof(Elf64_Phdr) ||
78 	    ehdr->e_shentsize != sizeof(Elf64_Shdr))
79 		return TEE_ERROR_BAD_FORMAT;
80 
81 
82 	elf->is_32bit = false;
83 	elf->e_entry = ehdr->e_entry;
84 	elf->e_phoff = ehdr->e_phoff;
85 	elf->e_shoff = ehdr->e_shoff;
86 	elf->e_phnum = ehdr->e_phnum;
87 	elf->e_shnum = ehdr->e_shnum;
88 	elf->e_phentsize = ehdr->e_phentsize;
89 	elf->e_shentsize = ehdr->e_shentsize;
90 
91 	return TEE_SUCCESS;
92 }
93 #else /*ARM64*/
94 static TEE_Result e64_parse_ehdr(struct ta_elf *elf __unused,
95 				 Elf64_Ehdr *ehdr __unused)
96 {
97 	return TEE_ERROR_NOT_SUPPORTED;
98 }
99 #endif /*ARM64*/
100 
101 static void read_dyn(struct ta_elf *elf, vaddr_t addr,
102 		     size_t idx, unsigned int *tag, size_t *val)
103 {
104 	if (elf->is_32bit) {
105 		Elf32_Dyn *dyn = (Elf32_Dyn *)(addr + elf->load_addr);
106 
107 		*tag = dyn[idx].d_tag;
108 		*val = dyn[idx].d_un.d_val;
109 	} else {
110 		Elf64_Dyn *dyn = (Elf64_Dyn *)(addr + elf->load_addr);
111 
112 		*tag = dyn[idx].d_tag;
113 		*val = dyn[idx].d_un.d_val;
114 	}
115 }
116 
117 static void e32_save_symtab(struct ta_elf *elf, size_t tab_idx)
118 {
119 	Elf32_Shdr *shdr = elf->shdr;
120 	size_t str_idx = shdr[tab_idx].sh_link;
121 
122 	elf->dynsymtab = (void *)(shdr[tab_idx].sh_addr + elf->load_addr);
123 	assert(!(shdr[tab_idx].sh_size % sizeof(Elf32_Sym)));
124 	elf->num_dynsyms = shdr[tab_idx].sh_size / sizeof(Elf32_Sym);
125 
126 	elf->dynstr = (void *)(shdr[str_idx].sh_addr + elf->load_addr);
127 	elf->dynstr_size = shdr[str_idx].sh_size;
128 }
129 
130 static void e64_save_symtab(struct ta_elf *elf, size_t tab_idx)
131 {
132 	Elf64_Shdr *shdr = elf->shdr;
133 	size_t str_idx = shdr[tab_idx].sh_link;
134 
135 	elf->dynsymtab = (void *)(vaddr_t)(shdr[tab_idx].sh_addr +
136 					   elf->load_addr);
137 	assert(!(shdr[tab_idx].sh_size % sizeof(Elf64_Sym)));
138 	elf->num_dynsyms = shdr[tab_idx].sh_size / sizeof(Elf64_Sym);
139 
140 	elf->dynstr = (void *)(vaddr_t)(shdr[str_idx].sh_addr + elf->load_addr);
141 	elf->dynstr_size = shdr[str_idx].sh_size;
142 }
143 
144 static void save_symtab(struct ta_elf *elf)
145 {
146 	size_t n = 0;
147 
148 	if (elf->is_32bit) {
149 		Elf32_Shdr *shdr = elf->shdr;
150 
151 		for (n = 0; n < elf->e_shnum; n++) {
152 			if (shdr[n].sh_type == SHT_DYNSYM) {
153 				e32_save_symtab(elf, n);
154 				break;
155 			}
156 		}
157 	} else {
158 		Elf64_Shdr *shdr = elf->shdr;
159 
160 		for (n = 0; n < elf->e_shnum; n++) {
161 			if (shdr[n].sh_type == SHT_DYNSYM) {
162 				e64_save_symtab(elf, n);
163 				break;
164 			}
165 		}
166 
167 	}
168 }
169 
170 static void init_elf(struct ta_elf *elf)
171 {
172 	TEE_Result res = TEE_SUCCESS;
173 	vaddr_t va = 0;
174 	uint32_t flags = PTA_SYSTEM_MAP_FLAG_SHAREABLE;
175 	const size_t max_align = 0x10000;
176 
177 	res = sys_open_ta_bin(&elf->uuid, &elf->handle);
178 	if (res)
179 		err(res, "sys_open_ta_bin(%pUl)", (void *)&elf->uuid);
180 
181 	/*
182 	 * Map it read-only executable when we're loading a library where
183 	 * the ELF header is included in a load segment.
184 	 */
185 	if (!elf->is_main)
186 		flags |= PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
187 	/*
188 	 * Add 1Mb pad at end in case a library with this large alignment
189 	 * has been mapped before. We want to avoid ending up in a hole in
190 	 * the mapping of a library.
191 	 */
192 	res = sys_map_ta_bin(&va, SMALL_PAGE_SIZE, flags, elf->handle, 0, 0,
193 			     max_align);
194 	if (res)
195 		err(res, "sys_map_ta_bin");
196 	elf->ehdr_addr = va;
197 	if (!elf->is_main) {
198 		elf->load_addr = va;
199 		elf->max_addr = va + SMALL_PAGE_SIZE;
200 		elf->max_offs = SMALL_PAGE_SIZE;
201 	}
202 
203 	if (!IS_ELF(*(Elf32_Ehdr *)va))
204 		err(TEE_ERROR_BAD_FORMAT, "TA is not an ELF");
205 
206 	res = e32_parse_ehdr(elf, (void *)va);
207 	if (res == TEE_ERROR_BAD_FORMAT)
208 		res = e64_parse_ehdr(elf, (void *)va);
209 	if (res)
210 		err(res, "Cannot parse ELF");
211 
212 	if (elf->e_phoff + elf->e_phnum * elf->e_phentsize > SMALL_PAGE_SIZE)
213 		err(TEE_ERROR_NOT_SUPPORTED, "Cannot read program headers");
214 
215 	elf->phdr = (void *)(va + elf->e_phoff);
216 }
217 
218 static size_t roundup(size_t v)
219 {
220 	return ROUNDUP(v, SMALL_PAGE_SIZE);
221 }
222 
223 static size_t rounddown(size_t v)
224 {
225 	return ROUNDDOWN(v, SMALL_PAGE_SIZE);
226 }
227 
228 static void add_segment(struct ta_elf *elf, size_t offset, size_t vaddr,
229 			size_t filesz, size_t memsz, size_t flags, size_t align)
230 {
231 	struct segment *seg = calloc(1, sizeof(*seg));
232 
233 	if (!seg)
234 		err(TEE_ERROR_OUT_OF_MEMORY, "calloc");
235 
236 	seg->offset = offset;
237 	seg->vaddr = vaddr;
238 	seg->filesz = filesz;
239 	seg->memsz = memsz;
240 	seg->flags = flags;
241 	seg->align = align;
242 
243 	TAILQ_INSERT_TAIL(&elf->segs, seg, link);
244 }
245 
246 static void parse_load_segments(struct ta_elf *elf)
247 {
248 	size_t n = 0;
249 
250 	if (elf->is_32bit) {
251 		Elf32_Phdr *phdr = elf->phdr;
252 
253 		for (n = 0; n < elf->e_phnum; n++)
254 			if (phdr[n].p_type == PT_LOAD)
255 				add_segment(elf, phdr[n].p_offset,
256 					    phdr[n].p_vaddr, phdr[n].p_filesz,
257 					    phdr[n].p_memsz, phdr[n].p_flags,
258 					    phdr[n].p_align);
259 	} else {
260 		Elf64_Phdr *phdr = elf->phdr;
261 
262 		for (n = 0; n < elf->e_phnum; n++)
263 			if (phdr[n].p_type == PT_LOAD)
264 				add_segment(elf, phdr[n].p_offset,
265 					    phdr[n].p_vaddr, phdr[n].p_filesz,
266 					    phdr[n].p_memsz, phdr[n].p_flags,
267 					    phdr[n].p_align);
268 	}
269 }
270 
271 static void copy_remapped_to(struct ta_elf *elf, const struct segment *seg)
272 {
273 	uint8_t *dst = (void *)(seg->vaddr + elf->load_addr);
274 	size_t n = 0;
275 	size_t offs = seg->offset;
276 	size_t num_bytes = seg->filesz;
277 
278 	if (offs < elf->max_offs) {
279 		n = MIN(elf->max_offs - offs, num_bytes);
280 		memcpy(dst, (void *)(elf->max_addr + offs - elf->max_offs), n);
281 		dst += n;
282 		offs += n;
283 		num_bytes -= n;
284 	}
285 
286 	if (num_bytes) {
287 		TEE_Result res = sys_copy_from_ta_bin(dst, num_bytes,
288 						      elf->handle, offs);
289 
290 		if (res)
291 			err(res, "sys_copy_from_ta_bin");
292 		elf->max_offs += offs;
293 	}
294 }
295 
296 static void adjust_segments(struct ta_elf *elf)
297 {
298 	struct segment *seg = NULL;
299 	struct segment *prev_seg = NULL;
300 	size_t prev_end_addr = 0;
301 	size_t align = 0;
302 	size_t mask = 0;
303 
304 	/* Sanity check */
305 	TAILQ_FOREACH(seg, &elf->segs, link) {
306 		size_t dummy __maybe_unused = 0;
307 
308 		assert(seg->align >= SMALL_PAGE_SIZE);
309 		assert(!ADD_OVERFLOW(seg->vaddr, seg->memsz, &dummy));
310 		assert(seg->filesz <= seg->memsz);
311 		assert((seg->offset & SMALL_PAGE_MASK) ==
312 		       (seg->vaddr & SMALL_PAGE_MASK));
313 
314 		prev_seg = TAILQ_PREV(seg, segment_head, link);
315 		if (prev_seg) {
316 			assert(seg->vaddr >= prev_seg->vaddr + prev_seg->memsz);
317 			assert(seg->offset >=
318 			       prev_seg->offset + prev_seg->filesz);
319 		}
320 		if (!align)
321 			align = seg->align;
322 		assert(align == seg->align);
323 	}
324 
325 	mask = align - 1;
326 
327 	seg = TAILQ_FIRST(&elf->segs);
328 	if (seg)
329 		seg = TAILQ_NEXT(seg, link);
330 	while (seg) {
331 		prev_seg = TAILQ_PREV(seg, segment_head, link);
332 		prev_end_addr = prev_seg->vaddr + prev_seg->memsz;
333 
334 		/*
335 		 * This segment may overlap with the last "page" in the
336 		 * previous segment in two different ways:
337 		 * 1. Virtual address (and offset) overlaps =>
338 		 *    Permissions needs to be merged. The offset must have
339 		 *    the SMALL_PAGE_MASK bits set as vaddr and offset must
340 		 *    add up with prevsion segment.
341 		 *
342 		 * 2. Only offset overlaps =>
343 		 *    The same page in the ELF is mapped at two different
344 		 *    virtual addresses. As a limitation this segment must
345 		 *    be mapped as writeable.
346 		 */
347 
348 		/* Case 1. */
349 		if (rounddown(seg->vaddr) < prev_end_addr) {
350 			assert((seg->vaddr & mask) == (seg->offset & mask));
351 			assert(prev_seg->memsz == prev_seg->filesz);
352 
353 			/*
354 			 * Merge the segments and their permissions.
355 			 * Note that the may be a small hole between the
356 			 * two sections.
357 			 */
358 			prev_seg->filesz = seg->vaddr + seg->filesz -
359 					   prev_seg->vaddr;
360 			prev_seg->memsz = seg->vaddr + seg->memsz -
361 					   prev_seg->vaddr;
362 			prev_seg->flags |= seg->flags;
363 
364 			TAILQ_REMOVE(&elf->segs, seg, link);
365 			free(seg);
366 			seg = TAILQ_NEXT(prev_seg, link);
367 			continue;
368 		}
369 
370 		/* Case 2. */
371 		if ((seg->offset & mask) &&
372 		    rounddown(seg->offset) <
373 		    (prev_seg->offset + prev_seg->filesz)) {
374 
375 			assert(seg->flags & PF_W);
376 			seg->remapped_writeable = true;
377 		}
378 
379 		/*
380 		 * No overlap, but we may need to align address, offset and
381 		 * size.
382 		 */
383 		seg->filesz += seg->vaddr - rounddown(seg->vaddr);
384 		seg->memsz += seg->vaddr - rounddown(seg->vaddr);
385 		seg->vaddr = rounddown(seg->vaddr);
386 		seg->offset = rounddown(seg->offset);
387 		seg = TAILQ_NEXT(seg, link);
388 	}
389 
390 }
391 
392 static void populate_segments_legacy(struct ta_elf *elf)
393 {
394 	TEE_Result res = TEE_SUCCESS;
395 	struct segment *seg = NULL;
396 	vaddr_t va = 0;
397 
398 	TAILQ_FOREACH(seg, &elf->segs, link) {
399 		struct segment *last_seg = TAILQ_LAST(&elf->segs, segment_head);
400 		size_t pad_end = roundup(last_seg->vaddr + last_seg->memsz -
401 					 seg->vaddr - seg->memsz);
402 		size_t num_bytes = roundup(seg->memsz);
403 
404 		if (!elf->load_addr)
405 			va = 0;
406 		else
407 			va = seg->vaddr + elf->load_addr;
408 
409 
410 		if (!(seg->flags & PF_R))
411 			err(TEE_ERROR_NOT_SUPPORTED,
412 			    "Segment must be readable");
413 
414 		res = sys_map_zi(num_bytes, 0, &va, 0, pad_end);
415 		if (res)
416 			err(res, "sys_map_zi");
417 		res = sys_copy_from_ta_bin((void *)va, seg->filesz,
418 					   elf->handle, seg->offset);
419 		if (res)
420 			err(res, "sys_copy_from_ta_bin");
421 
422 		if (!elf->load_addr)
423 			elf->load_addr = va;
424 		elf->max_addr = va + num_bytes;
425 		elf->max_offs = seg->offset + seg->filesz;
426 	}
427 }
428 
429 static void populate_segments(struct ta_elf *elf)
430 {
431 	TEE_Result res = TEE_SUCCESS;
432 	struct segment *seg = NULL;
433 	vaddr_t va = 0;
434 
435 	TAILQ_FOREACH(seg, &elf->segs, link) {
436 		struct segment *last_seg = TAILQ_LAST(&elf->segs, segment_head);
437 		size_t pad_end = roundup(last_seg->vaddr + last_seg->memsz -
438 					 seg->vaddr - seg->memsz);
439 
440 		if (seg->remapped_writeable) {
441 			size_t num_bytes = roundup(seg->vaddr + seg->memsz) -
442 					   rounddown(seg->vaddr);
443 
444 			assert(elf->load_addr);
445 			va = rounddown(elf->load_addr + seg->vaddr);
446 			assert(va >= elf->max_addr);
447 			res = sys_map_zi(num_bytes, 0, &va, 0, pad_end);
448 			if (res)
449 				err(res, "sys_map_zi");
450 
451 			copy_remapped_to(elf, seg);
452 			elf->max_addr = va + num_bytes;
453 		} else {
454 			uint32_t flags =  0;
455 			size_t filesz = seg->filesz;
456 			size_t memsz = seg->memsz;
457 			size_t offset = seg->offset;
458 			size_t vaddr = seg->vaddr;
459 
460 			if (offset < elf->max_offs) {
461 				/*
462 				 * We're in a load segment which overlaps
463 				 * with (or is covered by) the first page
464 				 * of a shared library.
465 				 */
466 				if (vaddr + filesz < SMALL_PAGE_SIZE) {
467 					size_t num_bytes = 0;
468 
469 					/*
470 					 * If this segment is completely
471 					 * covered, take next.
472 					 */
473 					if (vaddr + memsz <= SMALL_PAGE_SIZE)
474 						continue;
475 
476 					/*
477 					 * All data of the segment is
478 					 * loaded, but we need to zero
479 					 * extend it.
480 					 */
481 					va = elf->max_addr;
482 					num_bytes = roundup(vaddr + memsz) -
483 						    roundup(vaddr) -
484 						    SMALL_PAGE_SIZE;
485 					assert(num_bytes);
486 					res = sys_map_zi(num_bytes, 0, &va, 0,
487 							 0);
488 					if (res)
489 						err(res, "sys_map_zi");
490 					elf->max_addr = roundup(va + num_bytes);
491 					continue;
492 				}
493 
494 				/* Partial overlap, remove the first page. */
495 				vaddr += SMALL_PAGE_SIZE;
496 				filesz -= SMALL_PAGE_SIZE;
497 				memsz -= SMALL_PAGE_SIZE;
498 				offset += SMALL_PAGE_SIZE;
499 			}
500 
501 			if (!elf->load_addr)
502 				va = 0;
503 			else
504 				va = vaddr + elf->load_addr;
505 
506 			if (seg->flags & PF_W)
507 				flags |= PTA_SYSTEM_MAP_FLAG_WRITEABLE;
508 			else
509 				flags |= PTA_SYSTEM_MAP_FLAG_SHAREABLE;
510 			if (seg->flags & PF_X)
511 				flags |= PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
512 			if (!(seg->flags & PF_R))
513 				err(TEE_ERROR_NOT_SUPPORTED,
514 				    "Segment must be readable");
515 			if (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE) {
516 				res = sys_map_zi(memsz, 0, &va, 0, pad_end);
517 				if (res)
518 					err(res, "sys_map_zi");
519 				res = sys_copy_from_ta_bin((void *)va, filesz,
520 							   elf->handle, offset);
521 				if (res)
522 					err(res, "sys_copy_from_ta_bin");
523 			} else {
524 				res = sys_map_ta_bin(&va, filesz, flags,
525 						     elf->handle, offset,
526 						     0, pad_end);
527 				if (res)
528 					err(res, "sys_map_ta_bin");
529 			}
530 
531 			if (!elf->load_addr)
532 				elf->load_addr = va;
533 			elf->max_addr = roundup(va + filesz);
534 			elf->max_offs += filesz;
535 		}
536 	}
537 }
538 
539 static void map_segments(struct ta_elf *elf)
540 {
541 	parse_load_segments(elf);
542 	adjust_segments(elf);
543 	if (elf->is_legacy)
544 		populate_segments_legacy(elf);
545 	else
546 		populate_segments(elf);
547 }
548 
549 static int hex(char c)
550 {
551 	char lc = tolower(c);
552 
553 	if (isdigit(lc))
554 		return lc - '0';
555 	if (isxdigit(lc))
556 		return lc - 'a' + 10;
557 	return -1;
558 }
559 
560 static uint32_t parse_hex(const char *s, size_t nchars, uint32_t *res)
561 {
562 	uint32_t v = 0;
563 	size_t n;
564 	int c;
565 
566 	for (n = 0; n < nchars; n++) {
567 		c = hex(s[n]);
568 		if (c == (char)-1) {
569 			*res = TEE_ERROR_BAD_FORMAT;
570 			goto out;
571 		}
572 		v = (v << 4) + c;
573 	}
574 	*res = TEE_SUCCESS;
575 out:
576 	return v;
577 }
578 
579 /*
580  * Convert a UUID string @s into a TEE_UUID @uuid
581  * Expected format for @s is: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
582  * 'x' being any hexadecimal digit (0-9a-fA-F)
583  */
584 static TEE_Result parse_uuid(const char *s, TEE_UUID *uuid)
585 {
586 	TEE_Result res = TEE_SUCCESS;
587 	TEE_UUID u = { 0 };
588 	const char *p = s;
589 	size_t i;
590 
591 	if (strlen(p) != 36)
592 		return TEE_ERROR_BAD_FORMAT;
593 	if (p[8] != '-' || p[13] != '-' || p[18] != '-' || p[23] != '-')
594 		return TEE_ERROR_BAD_FORMAT;
595 
596 	u.timeLow = parse_hex(p, 8, &res);
597 	if (res)
598 		goto out;
599 	p += 9;
600 	u.timeMid = parse_hex(p, 4, &res);
601 	if (res)
602 		goto out;
603 	p += 5;
604 	u.timeHiAndVersion = parse_hex(p, 4, &res);
605 	if (res)
606 		goto out;
607 	p += 5;
608 	for (i = 0; i < 8; i++) {
609 		u.clockSeqAndNode[i] = parse_hex(p, 2, &res);
610 		if (res)
611 			goto out;
612 		if (i == 1)
613 			p += 3;
614 		else
615 			p += 2;
616 	}
617 	*uuid = u;
618 out:
619 	return res;
620 }
621 
622 static void add_deps_from_segment(struct ta_elf *elf, unsigned int type,
623 				  vaddr_t addr, size_t memsz)
624 {
625 	size_t dyn_entsize = 0;
626 	size_t num_dyns = 0;
627 	size_t n = 0;
628 	unsigned int tag = 0;
629 	size_t val = 0;
630 	TEE_UUID uuid = { };
631 	char *str_tab = NULL;
632 
633 	if (type != PT_DYNAMIC)
634 		return;
635 
636 	if (elf->is_32bit)
637 		dyn_entsize = sizeof(Elf32_Dyn);
638 	else
639 		dyn_entsize = sizeof(Elf64_Dyn);
640 
641 	assert(!(memsz % dyn_entsize));
642 	num_dyns = memsz / dyn_entsize;
643 
644 	for (n = 0; n < num_dyns; n++) {
645 		read_dyn(elf, addr, n, &tag, &val);
646 		if (tag == DT_STRTAB) {
647 			str_tab = (char *)(val + elf->load_addr);
648 			break;
649 		}
650 	}
651 
652 	for (n = 0; n < num_dyns; n++) {
653 		read_dyn(elf, addr, n, &tag, &val);
654 		if (tag != DT_NEEDED)
655 			continue;
656 		parse_uuid(str_tab + val, &uuid);
657 		queue_elf(&uuid);
658 	}
659 }
660 
661 static void add_dependencies(struct ta_elf *elf)
662 {
663 	size_t n = 0;
664 
665 	if (elf->is_32bit) {
666 		Elf32_Phdr *phdr = elf->phdr;
667 
668 		for (n = 0; n < elf->e_phnum; n++)
669 			add_deps_from_segment(elf, phdr[n].p_type,
670 					      phdr[n].p_vaddr, phdr[n].p_memsz);
671 	} else {
672 		Elf64_Phdr *phdr = elf->phdr;
673 
674 		for (n = 0; n < elf->e_phnum; n++)
675 			add_deps_from_segment(elf, phdr[n].p_type,
676 					      phdr[n].p_vaddr, phdr[n].p_memsz);
677 	}
678 }
679 
680 static void copy_section_headers(struct ta_elf *elf)
681 {
682 	TEE_Result res = TEE_SUCCESS;
683 	size_t sz = elf->e_shnum * elf->e_shentsize;
684 	size_t offs = 0;
685 
686 	elf->shdr = malloc(sz);
687 	if (!elf->shdr)
688 		err(TEE_ERROR_OUT_OF_MEMORY, "malloc");
689 
690 	/*
691 	 * We're assuming that section headers comes after the load segments,
692 	 * but if it's a very small dynamically linked library the section
693 	 * headers can still end up (partially?) in the first mapped page.
694 	 */
695 	if (elf->e_shoff < SMALL_PAGE_SIZE) {
696 		assert(!elf->is_main);
697 		offs = MIN(SMALL_PAGE_SIZE - elf->e_shoff, sz);
698 		memcpy(elf->shdr, (void *)(elf->load_addr + elf->e_shoff),
699 		       offs);
700 	}
701 
702 	if (offs < sz) {
703 		res = sys_copy_from_ta_bin((uint8_t *)elf->shdr + offs,
704 					   sz - offs, elf->handle,
705 					   elf->e_shoff + offs);
706 		if (res)
707 			err(res, "sys_copy_from_ta_bin");
708 	}
709 }
710 
711 static void close_handle(struct ta_elf *elf)
712 {
713 	TEE_Result res = sys_close_ta_bin(elf->handle);
714 
715 	if (res)
716 		err(res, "sys_close_ta_bin");
717 	elf->handle = -1;
718 }
719 
720 void ta_elf_load_main(const TEE_UUID *uuid, uint32_t *is_32bit,
721 		      uint64_t *entry, uint64_t *sp, uint32_t *ta_flags)
722 {
723 	struct ta_elf *elf = queue_elf(uuid);
724 	struct ta_head *head;
725 	vaddr_t va = 0;
726 	TEE_Result res = TEE_SUCCESS;
727 
728 	assert(elf);
729 	elf->is_main = true;
730 
731 	init_elf(elf);
732 
733 	/*
734 	 * Legacy TAs doesn't set entry point, instead it's set in ta_head.
735 	 * If entry point isn't set explicitly, set to the start of the
736 	 * first executable section by the linker. Since ta_head also
737 	 * always comes first in legacy TA it means that the entry point
738 	 * will be set to 0x20.
739 	 *
740 	 * NB, everything before the commit a73b5878c89d ("Replace
741 	 * ta_head.entry with elf entry") is considered legacy TAs for
742 	 * ldelf.
743 	 */
744 	if (elf->e_entry == sizeof(*head))
745 		elf->is_legacy = true;
746 
747 	map_segments(elf);
748 	add_dependencies(elf);
749 	copy_section_headers(elf);
750 	save_symtab(elf);
751 	close_handle(elf);
752 
753 	head = (struct ta_head *)elf->load_addr;
754 
755 	*is_32bit = elf->is_32bit;
756 	if (elf->is_legacy) {
757 		assert(head->depr_entry != UINT64_MAX);
758 		*entry = head->depr_entry + elf->load_addr;
759 	} else {
760 		assert(head->depr_entry == UINT64_MAX);
761 		*entry = elf->e_entry + elf->load_addr;
762 	}
763 
764 	res = sys_map_zi(head->stack_size, 0, &va, 0, 0);
765 	if (res)
766 		err(res, "sys_map_zi stack");
767 
768 	if (head->flags & ~TA_FLAGS_MASK)
769 		err(TEE_ERROR_BAD_FORMAT, "Invalid TA flags(s) %#"PRIx32,
770 		    head->flags & ~TA_FLAGS_MASK);
771 
772 	*ta_flags = head->flags;
773 	*sp = va + head->stack_size;
774 }
775 
776 void ta_elf_load_dependency(struct ta_elf *elf, bool is_32bit)
777 {
778 	if (elf->is_main)
779 		return;
780 
781 	init_elf(elf);
782 	if (elf->is_32bit != is_32bit)
783 		err(TEE_ERROR_BAD_FORMAT, "ELF %pUl is %sbit (expected %sbit)",
784 		    (void *)&elf->uuid, elf->is_32bit ? "32" : "64",
785 		    is_32bit ? "32" : "64");
786 
787 	map_segments(elf);
788 	add_dependencies(elf);
789 	copy_section_headers(elf);
790 	save_symtab(elf);
791 	close_handle(elf);
792 }
793 
794 void ta_elf_finalize_mappings(struct ta_elf *elf)
795 {
796 	TEE_Result res = TEE_SUCCESS;
797 	struct segment *seg = NULL;
798 
799 	if (!elf->is_legacy)
800 		return;
801 
802 	TAILQ_FOREACH(seg, &elf->segs, link) {
803 		vaddr_t va = elf->load_addr + seg->vaddr;
804 		uint32_t flags =  0;
805 
806 		if (seg->flags & PF_W)
807 			flags |= PTA_SYSTEM_MAP_FLAG_WRITEABLE;
808 		if (seg->flags & PF_X)
809 			flags |= PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
810 
811 		res = sys_set_prot(va, seg->memsz, flags);
812 		if (res)
813 			err(res, "sys_set_prot");
814 	}
815 }
816