xref: /optee_os/ldelf/ta_elf.c (revision 7509ff7ce5e5a7679319e6fa059b71dd6f6cc8b9)
1*7509ff7cSJens Wiklander // SPDX-License-Identifier: BSD-2-Clause
2*7509ff7cSJens Wiklander /*
3*7509ff7cSJens Wiklander  * Copyright (c) 2019, Linaro Limited
4*7509ff7cSJens Wiklander  */
5*7509ff7cSJens Wiklander 
6*7509ff7cSJens Wiklander #include <assert.h>
7*7509ff7cSJens Wiklander #include <ctype.h>
8*7509ff7cSJens Wiklander #include <elf32.h>
9*7509ff7cSJens Wiklander #include <elf64.h>
10*7509ff7cSJens Wiklander #include <elf_common.h>
11*7509ff7cSJens Wiklander #include <pta_system.h>
12*7509ff7cSJens Wiklander #include <stdlib.h>
13*7509ff7cSJens Wiklander #include <string_ext.h>
14*7509ff7cSJens Wiklander #include <string.h>
15*7509ff7cSJens Wiklander #include <tee_api_types.h>
16*7509ff7cSJens Wiklander #include <user_ta_header.h>
17*7509ff7cSJens Wiklander 
18*7509ff7cSJens Wiklander #include "sys.h"
19*7509ff7cSJens Wiklander #include "ta_elf.h"
20*7509ff7cSJens Wiklander 
21*7509ff7cSJens Wiklander struct ta_elf_queue main_elf_queue = TAILQ_HEAD_INITIALIZER(main_elf_queue);
22*7509ff7cSJens Wiklander 
23*7509ff7cSJens Wiklander static struct ta_elf *queue_elf(const TEE_UUID *uuid)
24*7509ff7cSJens Wiklander {
25*7509ff7cSJens Wiklander 	struct ta_elf *elf = NULL;
26*7509ff7cSJens Wiklander 
27*7509ff7cSJens Wiklander 	TAILQ_FOREACH(elf, &main_elf_queue, link)
28*7509ff7cSJens Wiklander 		if (!memcmp(uuid, &elf->uuid, sizeof(*uuid)))
29*7509ff7cSJens Wiklander 			return NULL;
30*7509ff7cSJens Wiklander 
31*7509ff7cSJens Wiklander 	elf = calloc(1, sizeof(*elf));
32*7509ff7cSJens Wiklander 	if (!elf)
33*7509ff7cSJens Wiklander 		err(TEE_ERROR_OUT_OF_MEMORY, "calloc");
34*7509ff7cSJens Wiklander 
35*7509ff7cSJens Wiklander 	TAILQ_INIT(&elf->segs);
36*7509ff7cSJens Wiklander 
37*7509ff7cSJens Wiklander 	elf->uuid = *uuid;
38*7509ff7cSJens Wiklander 	TAILQ_INSERT_TAIL(&main_elf_queue, elf, link);
39*7509ff7cSJens Wiklander 	return elf;
40*7509ff7cSJens Wiklander }
41*7509ff7cSJens Wiklander 
42*7509ff7cSJens Wiklander static TEE_Result e32_parse_ehdr(struct ta_elf *elf, Elf32_Ehdr *ehdr)
43*7509ff7cSJens Wiklander {
44*7509ff7cSJens Wiklander 	if (ehdr->e_ident[EI_VERSION] != EV_CURRENT ||
45*7509ff7cSJens Wiklander 	    ehdr->e_ident[EI_CLASS] != ELFCLASS32 ||
46*7509ff7cSJens Wiklander 	    ehdr->e_ident[EI_DATA] != ELFDATA2LSB ||
47*7509ff7cSJens Wiklander 	    ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE ||
48*7509ff7cSJens Wiklander 	    ehdr->e_type != ET_DYN || ehdr->e_machine != EM_ARM ||
49*7509ff7cSJens Wiklander 	    (ehdr->e_flags & EF_ARM_ABIMASK) != EF_ARM_ABI_VERSION ||
50*7509ff7cSJens Wiklander #ifndef CFG_WITH_VFP
51*7509ff7cSJens Wiklander 	    (ehdr->e_flags & EF_ARM_ABI_FLOAT_HARD) ||
52*7509ff7cSJens Wiklander #endif
53*7509ff7cSJens Wiklander 	    ehdr->e_phentsize != sizeof(Elf32_Phdr) ||
54*7509ff7cSJens Wiklander 	    ehdr->e_shentsize != sizeof(Elf32_Shdr))
55*7509ff7cSJens Wiklander 		return TEE_ERROR_BAD_FORMAT;
56*7509ff7cSJens Wiklander 
57*7509ff7cSJens Wiklander 	elf->is_32bit = true;
58*7509ff7cSJens Wiklander 	elf->e_entry = ehdr->e_entry;
59*7509ff7cSJens Wiklander 	elf->e_phoff = ehdr->e_phoff;
60*7509ff7cSJens Wiklander 	elf->e_shoff = ehdr->e_shoff;
61*7509ff7cSJens Wiklander 	elf->e_phnum = ehdr->e_phnum;
62*7509ff7cSJens Wiklander 	elf->e_shnum = ehdr->e_shnum;
63*7509ff7cSJens Wiklander 	elf->e_phentsize = ehdr->e_phentsize;
64*7509ff7cSJens Wiklander 	elf->e_shentsize = ehdr->e_shentsize;
65*7509ff7cSJens Wiklander 
66*7509ff7cSJens Wiklander 	return TEE_SUCCESS;
67*7509ff7cSJens Wiklander }
68*7509ff7cSJens Wiklander 
69*7509ff7cSJens Wiklander #ifdef ARM64
70*7509ff7cSJens Wiklander static TEE_Result e64_parse_ehdr(struct ta_elf *elf, Elf64_Ehdr *ehdr)
71*7509ff7cSJens Wiklander {
72*7509ff7cSJens Wiklander 	if (ehdr->e_ident[EI_VERSION] != EV_CURRENT ||
73*7509ff7cSJens Wiklander 	    ehdr->e_ident[EI_CLASS] != ELFCLASS64 ||
74*7509ff7cSJens Wiklander 	    ehdr->e_ident[EI_DATA] != ELFDATA2LSB ||
75*7509ff7cSJens Wiklander 	    ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE ||
76*7509ff7cSJens Wiklander 	    ehdr->e_type != ET_DYN || ehdr->e_machine != EM_AARCH64 ||
77*7509ff7cSJens Wiklander 	    ehdr->e_flags || ehdr->e_phentsize != sizeof(Elf64_Phdr) ||
78*7509ff7cSJens Wiklander 	    ehdr->e_shentsize != sizeof(Elf64_Shdr))
79*7509ff7cSJens Wiklander 		return TEE_ERROR_BAD_FORMAT;
80*7509ff7cSJens Wiklander 
81*7509ff7cSJens Wiklander 
82*7509ff7cSJens Wiklander 	elf->is_32bit = false;
83*7509ff7cSJens Wiklander 	elf->e_entry = ehdr->e_entry;
84*7509ff7cSJens Wiklander 	elf->e_phoff = ehdr->e_phoff;
85*7509ff7cSJens Wiklander 	elf->e_shoff = ehdr->e_shoff;
86*7509ff7cSJens Wiklander 	elf->e_phnum = ehdr->e_phnum;
87*7509ff7cSJens Wiklander 	elf->e_shnum = ehdr->e_shnum;
88*7509ff7cSJens Wiklander 	elf->e_phentsize = ehdr->e_phentsize;
89*7509ff7cSJens Wiklander 	elf->e_shentsize = ehdr->e_shentsize;
90*7509ff7cSJens Wiklander 
91*7509ff7cSJens Wiklander 	return TEE_SUCCESS;
92*7509ff7cSJens Wiklander }
93*7509ff7cSJens Wiklander #else /*ARM64*/
94*7509ff7cSJens Wiklander static TEE_Result e64_parse_ehdr(struct ta_elf *elf __unused,
95*7509ff7cSJens Wiklander 				 Elf64_Ehdr *ehdr __unused)
96*7509ff7cSJens Wiklander {
97*7509ff7cSJens Wiklander 	return TEE_ERROR_NOT_SUPPORTED;
98*7509ff7cSJens Wiklander }
99*7509ff7cSJens Wiklander #endif /*ARM64*/
100*7509ff7cSJens Wiklander 
101*7509ff7cSJens Wiklander static void read_dyn(struct ta_elf *elf, vaddr_t addr,
102*7509ff7cSJens Wiklander 		     size_t idx, unsigned int *tag, size_t *val)
103*7509ff7cSJens Wiklander {
104*7509ff7cSJens Wiklander 	if (elf->is_32bit) {
105*7509ff7cSJens Wiklander 		Elf32_Dyn *dyn = (Elf32_Dyn *)(addr + elf->load_addr);
106*7509ff7cSJens Wiklander 
107*7509ff7cSJens Wiklander 		*tag = dyn[idx].d_tag;
108*7509ff7cSJens Wiklander 		*val = dyn[idx].d_un.d_val;
109*7509ff7cSJens Wiklander 	} else {
110*7509ff7cSJens Wiklander 		Elf64_Dyn *dyn = (Elf64_Dyn *)(addr + elf->load_addr);
111*7509ff7cSJens Wiklander 
112*7509ff7cSJens Wiklander 		*tag = dyn[idx].d_tag;
113*7509ff7cSJens Wiklander 		*val = dyn[idx].d_un.d_val;
114*7509ff7cSJens Wiklander 	}
115*7509ff7cSJens Wiklander }
116*7509ff7cSJens Wiklander 
117*7509ff7cSJens Wiklander static void e32_save_symtab(struct ta_elf *elf, size_t tab_idx)
118*7509ff7cSJens Wiklander {
119*7509ff7cSJens Wiklander 	Elf32_Shdr *shdr = elf->shdr;
120*7509ff7cSJens Wiklander 	size_t str_idx = shdr[tab_idx].sh_link;
121*7509ff7cSJens Wiklander 
122*7509ff7cSJens Wiklander 	elf->dynsymtab = (void *)(shdr[tab_idx].sh_addr + elf->load_addr);
123*7509ff7cSJens Wiklander 	assert(!(shdr[tab_idx].sh_size % sizeof(Elf32_Sym)));
124*7509ff7cSJens Wiklander 	elf->num_dynsyms = shdr[tab_idx].sh_size / sizeof(Elf32_Sym);
125*7509ff7cSJens Wiklander 
126*7509ff7cSJens Wiklander 	elf->dynstr = (void *)(shdr[str_idx].sh_addr + elf->load_addr);
127*7509ff7cSJens Wiklander 	elf->dynstr_size = shdr[str_idx].sh_size;
128*7509ff7cSJens Wiklander }
129*7509ff7cSJens Wiklander 
130*7509ff7cSJens Wiklander static void e64_save_symtab(struct ta_elf *elf, size_t tab_idx)
131*7509ff7cSJens Wiklander {
132*7509ff7cSJens Wiklander 	Elf64_Shdr *shdr = elf->shdr;
133*7509ff7cSJens Wiklander 	size_t str_idx = shdr[tab_idx].sh_link;
134*7509ff7cSJens Wiklander 
135*7509ff7cSJens Wiklander 	elf->dynsymtab = (void *)(vaddr_t)(shdr[tab_idx].sh_addr +
136*7509ff7cSJens Wiklander 					   elf->load_addr);
137*7509ff7cSJens Wiklander 	assert(!(shdr[tab_idx].sh_size % sizeof(Elf64_Sym)));
138*7509ff7cSJens Wiklander 	elf->num_dynsyms = shdr[tab_idx].sh_size / sizeof(Elf64_Sym);
139*7509ff7cSJens Wiklander 
140*7509ff7cSJens Wiklander 	elf->dynstr = (void *)(vaddr_t)(shdr[str_idx].sh_addr + elf->load_addr);
141*7509ff7cSJens Wiklander 	elf->dynstr_size = shdr[str_idx].sh_size;
142*7509ff7cSJens Wiklander }
143*7509ff7cSJens Wiklander 
144*7509ff7cSJens Wiklander static void save_symtab(struct ta_elf *elf)
145*7509ff7cSJens Wiklander {
146*7509ff7cSJens Wiklander 	size_t n = 0;
147*7509ff7cSJens Wiklander 
148*7509ff7cSJens Wiklander 	if (elf->is_32bit) {
149*7509ff7cSJens Wiklander 		Elf32_Shdr *shdr = elf->shdr;
150*7509ff7cSJens Wiklander 
151*7509ff7cSJens Wiklander 		for (n = 0; n < elf->e_shnum; n++) {
152*7509ff7cSJens Wiklander 			if (shdr[n].sh_type == SHT_DYNSYM) {
153*7509ff7cSJens Wiklander 				e32_save_symtab(elf, n);
154*7509ff7cSJens Wiklander 				break;
155*7509ff7cSJens Wiklander 			}
156*7509ff7cSJens Wiklander 		}
157*7509ff7cSJens Wiklander 	} else {
158*7509ff7cSJens Wiklander 		Elf64_Shdr *shdr = elf->shdr;
159*7509ff7cSJens Wiklander 
160*7509ff7cSJens Wiklander 		for (n = 0; n < elf->e_shnum; n++) {
161*7509ff7cSJens Wiklander 			if (shdr[n].sh_type == SHT_DYNSYM) {
162*7509ff7cSJens Wiklander 				e64_save_symtab(elf, n);
163*7509ff7cSJens Wiklander 				break;
164*7509ff7cSJens Wiklander 			}
165*7509ff7cSJens Wiklander 		}
166*7509ff7cSJens Wiklander 
167*7509ff7cSJens Wiklander 	}
168*7509ff7cSJens Wiklander }
169*7509ff7cSJens Wiklander 
170*7509ff7cSJens Wiklander static void init_elf(struct ta_elf *elf)
171*7509ff7cSJens Wiklander {
172*7509ff7cSJens Wiklander 	TEE_Result res = TEE_SUCCESS;
173*7509ff7cSJens Wiklander 	vaddr_t va = 0;
174*7509ff7cSJens Wiklander 	uint32_t flags = PTA_SYSTEM_MAP_FLAG_SHAREABLE;
175*7509ff7cSJens Wiklander 	const size_t max_align = 0x10000;
176*7509ff7cSJens Wiklander 
177*7509ff7cSJens Wiklander 	res = sys_open_ta_bin(&elf->uuid, &elf->handle);
178*7509ff7cSJens Wiklander 	if (res)
179*7509ff7cSJens Wiklander 		err(res, "sys_open_ta_bin(%pUl)", (void *)&elf->uuid);
180*7509ff7cSJens Wiklander 
181*7509ff7cSJens Wiklander 	/*
182*7509ff7cSJens Wiklander 	 * Map it read-only executable when we're loading a library where
183*7509ff7cSJens Wiklander 	 * the ELF header is included in a load segment.
184*7509ff7cSJens Wiklander 	 */
185*7509ff7cSJens Wiklander 	if (!elf->is_main)
186*7509ff7cSJens Wiklander 		flags |= PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
187*7509ff7cSJens Wiklander 	/*
188*7509ff7cSJens Wiklander 	 * Add 1Mb pad at end in case a library with this large alignment
189*7509ff7cSJens Wiklander 	 * has been mapped before. We want to avoid ending up in a hole in
190*7509ff7cSJens Wiklander 	 * the mapping of a library.
191*7509ff7cSJens Wiklander 	 */
192*7509ff7cSJens Wiklander 	res = sys_map_ta_bin(&va, SMALL_PAGE_SIZE, flags, elf->handle, 0, 0,
193*7509ff7cSJens Wiklander 			     max_align);
194*7509ff7cSJens Wiklander 	if (res)
195*7509ff7cSJens Wiklander 		err(res, "sys_map_ta_bin");
196*7509ff7cSJens Wiklander 	elf->ehdr_addr = va;
197*7509ff7cSJens Wiklander 	if (!elf->is_main) {
198*7509ff7cSJens Wiklander 		elf->load_addr = va;
199*7509ff7cSJens Wiklander 		elf->max_addr = va + SMALL_PAGE_SIZE;
200*7509ff7cSJens Wiklander 		elf->max_offs = SMALL_PAGE_SIZE;
201*7509ff7cSJens Wiklander 	}
202*7509ff7cSJens Wiklander 
203*7509ff7cSJens Wiklander 	if (!IS_ELF(*(Elf32_Ehdr *)va))
204*7509ff7cSJens Wiklander 		err(TEE_ERROR_BAD_FORMAT, "TA is not an ELF");
205*7509ff7cSJens Wiklander 
206*7509ff7cSJens Wiklander 	res = e32_parse_ehdr(elf, (void *)va);
207*7509ff7cSJens Wiklander 	if (res == TEE_ERROR_BAD_FORMAT)
208*7509ff7cSJens Wiklander 		res = e64_parse_ehdr(elf, (void *)va);
209*7509ff7cSJens Wiklander 	if (res)
210*7509ff7cSJens Wiklander 		err(res, "Cannot parse ELF");
211*7509ff7cSJens Wiklander 
212*7509ff7cSJens Wiklander 	if (elf->e_phoff + elf->e_phnum * elf->e_phentsize > SMALL_PAGE_SIZE)
213*7509ff7cSJens Wiklander 		err(TEE_ERROR_NOT_SUPPORTED, "Cannot read program headers");
214*7509ff7cSJens Wiklander 
215*7509ff7cSJens Wiklander 	elf->phdr = (void *)(va + elf->e_phoff);
216*7509ff7cSJens Wiklander }
217*7509ff7cSJens Wiklander 
218*7509ff7cSJens Wiklander static size_t roundup(size_t v)
219*7509ff7cSJens Wiklander {
220*7509ff7cSJens Wiklander 	return ROUNDUP(v, SMALL_PAGE_SIZE);
221*7509ff7cSJens Wiklander }
222*7509ff7cSJens Wiklander 
223*7509ff7cSJens Wiklander static size_t rounddown(size_t v)
224*7509ff7cSJens Wiklander {
225*7509ff7cSJens Wiklander 	return ROUNDDOWN(v, SMALL_PAGE_SIZE);
226*7509ff7cSJens Wiklander }
227*7509ff7cSJens Wiklander 
228*7509ff7cSJens Wiklander static void add_segment(struct ta_elf *elf, size_t offset, size_t vaddr,
229*7509ff7cSJens Wiklander 			size_t filesz, size_t memsz, size_t flags, size_t align)
230*7509ff7cSJens Wiklander {
231*7509ff7cSJens Wiklander 	struct segment *seg = calloc(1, sizeof(*seg));
232*7509ff7cSJens Wiklander 
233*7509ff7cSJens Wiklander 	if (!seg)
234*7509ff7cSJens Wiklander 		err(TEE_ERROR_OUT_OF_MEMORY, "calloc");
235*7509ff7cSJens Wiklander 
236*7509ff7cSJens Wiklander 	seg->offset = offset;
237*7509ff7cSJens Wiklander 	seg->vaddr = vaddr;
238*7509ff7cSJens Wiklander 	seg->filesz = filesz;
239*7509ff7cSJens Wiklander 	seg->memsz = memsz;
240*7509ff7cSJens Wiklander 	seg->flags = flags;
241*7509ff7cSJens Wiklander 	seg->align = align;
242*7509ff7cSJens Wiklander 
243*7509ff7cSJens Wiklander 	TAILQ_INSERT_TAIL(&elf->segs, seg, link);
244*7509ff7cSJens Wiklander }
245*7509ff7cSJens Wiklander 
246*7509ff7cSJens Wiklander static void parse_load_segments(struct ta_elf *elf)
247*7509ff7cSJens Wiklander {
248*7509ff7cSJens Wiklander 	size_t n = 0;
249*7509ff7cSJens Wiklander 
250*7509ff7cSJens Wiklander 	if (elf->is_32bit) {
251*7509ff7cSJens Wiklander 		Elf32_Phdr *phdr = elf->phdr;
252*7509ff7cSJens Wiklander 
253*7509ff7cSJens Wiklander 		for (n = 0; n < elf->e_phnum; n++)
254*7509ff7cSJens Wiklander 			if (phdr[n].p_type == PT_LOAD)
255*7509ff7cSJens Wiklander 				add_segment(elf, phdr[n].p_offset,
256*7509ff7cSJens Wiklander 					    phdr[n].p_vaddr, phdr[n].p_filesz,
257*7509ff7cSJens Wiklander 					    phdr[n].p_memsz, phdr[n].p_flags,
258*7509ff7cSJens Wiklander 					    phdr[n].p_align);
259*7509ff7cSJens Wiklander 	} else {
260*7509ff7cSJens Wiklander 		Elf64_Phdr *phdr = elf->phdr;
261*7509ff7cSJens Wiklander 
262*7509ff7cSJens Wiklander 		for (n = 0; n < elf->e_phnum; n++)
263*7509ff7cSJens Wiklander 			if (phdr[n].p_type == PT_LOAD)
264*7509ff7cSJens Wiklander 				add_segment(elf, phdr[n].p_offset,
265*7509ff7cSJens Wiklander 					    phdr[n].p_vaddr, phdr[n].p_filesz,
266*7509ff7cSJens Wiklander 					    phdr[n].p_memsz, phdr[n].p_flags,
267*7509ff7cSJens Wiklander 					    phdr[n].p_align);
268*7509ff7cSJens Wiklander 	}
269*7509ff7cSJens Wiklander }
270*7509ff7cSJens Wiklander 
271*7509ff7cSJens Wiklander static void copy_remapped_to(struct ta_elf *elf, const struct segment *seg)
272*7509ff7cSJens Wiklander {
273*7509ff7cSJens Wiklander 	uint8_t *dst = (void *)(seg->vaddr + elf->load_addr);
274*7509ff7cSJens Wiklander 	size_t n = 0;
275*7509ff7cSJens Wiklander 	size_t offs = seg->offset;
276*7509ff7cSJens Wiklander 	size_t num_bytes = seg->filesz;
277*7509ff7cSJens Wiklander 
278*7509ff7cSJens Wiklander 	if (offs < elf->max_offs) {
279*7509ff7cSJens Wiklander 		n = MIN(elf->max_offs - offs, num_bytes);
280*7509ff7cSJens Wiklander 		memcpy(dst, (void *)(elf->max_addr + offs - elf->max_offs), n);
281*7509ff7cSJens Wiklander 		dst += n;
282*7509ff7cSJens Wiklander 		offs += n;
283*7509ff7cSJens Wiklander 		num_bytes -= n;
284*7509ff7cSJens Wiklander 	}
285*7509ff7cSJens Wiklander 
286*7509ff7cSJens Wiklander 	if (num_bytes) {
287*7509ff7cSJens Wiklander 		TEE_Result res = sys_copy_from_ta_bin(dst, num_bytes,
288*7509ff7cSJens Wiklander 						      elf->handle, offs);
289*7509ff7cSJens Wiklander 
290*7509ff7cSJens Wiklander 		if (res)
291*7509ff7cSJens Wiklander 			err(res, "sys_copy_from_ta_bin");
292*7509ff7cSJens Wiklander 		elf->max_offs += offs;
293*7509ff7cSJens Wiklander 	}
294*7509ff7cSJens Wiklander }
295*7509ff7cSJens Wiklander 
296*7509ff7cSJens Wiklander static void adjust_segments(struct ta_elf *elf)
297*7509ff7cSJens Wiklander {
298*7509ff7cSJens Wiklander 	struct segment *seg = NULL;
299*7509ff7cSJens Wiklander 	struct segment *prev_seg = NULL;
300*7509ff7cSJens Wiklander 	size_t prev_end_addr = 0;
301*7509ff7cSJens Wiklander 	size_t align = 0;
302*7509ff7cSJens Wiklander 	size_t mask = 0;
303*7509ff7cSJens Wiklander 
304*7509ff7cSJens Wiklander 	/* Sanity check */
305*7509ff7cSJens Wiklander 	TAILQ_FOREACH(seg, &elf->segs, link) {
306*7509ff7cSJens Wiklander 		size_t dummy __maybe_unused = 0;
307*7509ff7cSJens Wiklander 
308*7509ff7cSJens Wiklander 		assert(seg->align >= SMALL_PAGE_SIZE);
309*7509ff7cSJens Wiklander 		assert(!ADD_OVERFLOW(seg->vaddr, seg->memsz, &dummy));
310*7509ff7cSJens Wiklander 		assert(seg->filesz <= seg->memsz);
311*7509ff7cSJens Wiklander 		assert((seg->offset & SMALL_PAGE_MASK) ==
312*7509ff7cSJens Wiklander 		       (seg->vaddr & SMALL_PAGE_MASK));
313*7509ff7cSJens Wiklander 
314*7509ff7cSJens Wiklander 		prev_seg = TAILQ_PREV(seg, segment_head, link);
315*7509ff7cSJens Wiklander 		if (prev_seg) {
316*7509ff7cSJens Wiklander 			assert(seg->vaddr >= prev_seg->vaddr + prev_seg->memsz);
317*7509ff7cSJens Wiklander 			assert(seg->offset >=
318*7509ff7cSJens Wiklander 			       prev_seg->offset + prev_seg->filesz);
319*7509ff7cSJens Wiklander 		}
320*7509ff7cSJens Wiklander 		if (!align)
321*7509ff7cSJens Wiklander 			align = seg->align;
322*7509ff7cSJens Wiklander 		assert(align == seg->align);
323*7509ff7cSJens Wiklander 	}
324*7509ff7cSJens Wiklander 
325*7509ff7cSJens Wiklander 	mask = align - 1;
326*7509ff7cSJens Wiklander 
327*7509ff7cSJens Wiklander 	seg = TAILQ_FIRST(&elf->segs);
328*7509ff7cSJens Wiklander 	if (seg)
329*7509ff7cSJens Wiklander 		seg = TAILQ_NEXT(seg, link);
330*7509ff7cSJens Wiklander 	while (seg) {
331*7509ff7cSJens Wiklander 		prev_seg = TAILQ_PREV(seg, segment_head, link);
332*7509ff7cSJens Wiklander 		prev_end_addr = prev_seg->vaddr + prev_seg->memsz;
333*7509ff7cSJens Wiklander 
334*7509ff7cSJens Wiklander 		/*
335*7509ff7cSJens Wiklander 		 * This segment may overlap with the last "page" in the
336*7509ff7cSJens Wiklander 		 * previous segment in two different ways:
337*7509ff7cSJens Wiklander 		 * 1. Virtual address (and offset) overlaps =>
338*7509ff7cSJens Wiklander 		 *    Permissions needs to be merged. The offset must have
339*7509ff7cSJens Wiklander 		 *    the SMALL_PAGE_MASK bits set as vaddr and offset must
340*7509ff7cSJens Wiklander 		 *    add up with prevsion segment.
341*7509ff7cSJens Wiklander 		 *
342*7509ff7cSJens Wiklander 		 * 2. Only offset overlaps =>
343*7509ff7cSJens Wiklander 		 *    The same page in the ELF is mapped at two different
344*7509ff7cSJens Wiklander 		 *    virtual addresses. As a limitation this segment must
345*7509ff7cSJens Wiklander 		 *    be mapped as writeable.
346*7509ff7cSJens Wiklander 		 */
347*7509ff7cSJens Wiklander 
348*7509ff7cSJens Wiklander 		/* Case 1. */
349*7509ff7cSJens Wiklander 		if (rounddown(seg->vaddr) < prev_end_addr) {
350*7509ff7cSJens Wiklander 			assert((seg->vaddr & mask) == (seg->offset & mask));
351*7509ff7cSJens Wiklander 			assert(prev_seg->memsz == prev_seg->filesz);
352*7509ff7cSJens Wiklander 
353*7509ff7cSJens Wiklander 			/*
354*7509ff7cSJens Wiklander 			 * Merge the segments and their permissions.
355*7509ff7cSJens Wiklander 			 * Note that the may be a small hole between the
356*7509ff7cSJens Wiklander 			 * two sections.
357*7509ff7cSJens Wiklander 			 */
358*7509ff7cSJens Wiklander 			prev_seg->filesz = seg->vaddr + seg->filesz -
359*7509ff7cSJens Wiklander 					   prev_seg->vaddr;
360*7509ff7cSJens Wiklander 			prev_seg->memsz = seg->vaddr + seg->memsz -
361*7509ff7cSJens Wiklander 					   prev_seg->vaddr;
362*7509ff7cSJens Wiklander 			prev_seg->flags |= seg->flags;
363*7509ff7cSJens Wiklander 
364*7509ff7cSJens Wiklander 			TAILQ_REMOVE(&elf->segs, seg, link);
365*7509ff7cSJens Wiklander 			free(seg);
366*7509ff7cSJens Wiklander 			seg = TAILQ_NEXT(prev_seg, link);
367*7509ff7cSJens Wiklander 			continue;
368*7509ff7cSJens Wiklander 		}
369*7509ff7cSJens Wiklander 
370*7509ff7cSJens Wiklander 		/* Case 2. */
371*7509ff7cSJens Wiklander 		if ((seg->offset & mask) &&
372*7509ff7cSJens Wiklander 		    rounddown(seg->offset) <
373*7509ff7cSJens Wiklander 		    (prev_seg->offset + prev_seg->filesz)) {
374*7509ff7cSJens Wiklander 
375*7509ff7cSJens Wiklander 			assert(seg->flags & PF_W);
376*7509ff7cSJens Wiklander 			seg->remapped_writeable = true;
377*7509ff7cSJens Wiklander 		}
378*7509ff7cSJens Wiklander 
379*7509ff7cSJens Wiklander 		/*
380*7509ff7cSJens Wiklander 		 * No overlap, but we may need to align address, offset and
381*7509ff7cSJens Wiklander 		 * size.
382*7509ff7cSJens Wiklander 		 */
383*7509ff7cSJens Wiklander 		seg->filesz += seg->vaddr - rounddown(seg->vaddr);
384*7509ff7cSJens Wiklander 		seg->memsz += seg->vaddr - rounddown(seg->vaddr);
385*7509ff7cSJens Wiklander 		seg->vaddr = rounddown(seg->vaddr);
386*7509ff7cSJens Wiklander 		seg->offset = rounddown(seg->offset);
387*7509ff7cSJens Wiklander 		seg = TAILQ_NEXT(seg, link);
388*7509ff7cSJens Wiklander 	}
389*7509ff7cSJens Wiklander 
390*7509ff7cSJens Wiklander }
391*7509ff7cSJens Wiklander 
392*7509ff7cSJens Wiklander static void populate_segments_legacy(struct ta_elf *elf)
393*7509ff7cSJens Wiklander {
394*7509ff7cSJens Wiklander 	TEE_Result res = TEE_SUCCESS;
395*7509ff7cSJens Wiklander 	struct segment *seg = NULL;
396*7509ff7cSJens Wiklander 	vaddr_t va = 0;
397*7509ff7cSJens Wiklander 
398*7509ff7cSJens Wiklander 	TAILQ_FOREACH(seg, &elf->segs, link) {
399*7509ff7cSJens Wiklander 		struct segment *last_seg = TAILQ_LAST(&elf->segs, segment_head);
400*7509ff7cSJens Wiklander 		size_t pad_end = roundup(last_seg->vaddr + last_seg->memsz -
401*7509ff7cSJens Wiklander 					 seg->vaddr - seg->memsz);
402*7509ff7cSJens Wiklander 		size_t num_bytes = roundup(seg->memsz);
403*7509ff7cSJens Wiklander 
404*7509ff7cSJens Wiklander 		if (!elf->load_addr)
405*7509ff7cSJens Wiklander 			va = 0;
406*7509ff7cSJens Wiklander 		else
407*7509ff7cSJens Wiklander 			va = seg->vaddr + elf->load_addr;
408*7509ff7cSJens Wiklander 
409*7509ff7cSJens Wiklander 
410*7509ff7cSJens Wiklander 		if (!(seg->flags & PF_R))
411*7509ff7cSJens Wiklander 			err(TEE_ERROR_NOT_SUPPORTED,
412*7509ff7cSJens Wiklander 			    "Segment must be readable");
413*7509ff7cSJens Wiklander 
414*7509ff7cSJens Wiklander 		res = sys_map_zi(num_bytes, 0, &va, 0, pad_end);
415*7509ff7cSJens Wiklander 		if (res)
416*7509ff7cSJens Wiklander 			err(res, "sys_map_zi");
417*7509ff7cSJens Wiklander 		res = sys_copy_from_ta_bin((void *)va, seg->filesz,
418*7509ff7cSJens Wiklander 					   elf->handle, seg->offset);
419*7509ff7cSJens Wiklander 		if (res)
420*7509ff7cSJens Wiklander 			err(res, "sys_copy_from_ta_bin");
421*7509ff7cSJens Wiklander 
422*7509ff7cSJens Wiklander 		if (!elf->load_addr)
423*7509ff7cSJens Wiklander 			elf->load_addr = va;
424*7509ff7cSJens Wiklander 		elf->max_addr = va + num_bytes;
425*7509ff7cSJens Wiklander 		elf->max_offs = seg->offset + seg->filesz;
426*7509ff7cSJens Wiklander 	}
427*7509ff7cSJens Wiklander }
428*7509ff7cSJens Wiklander 
429*7509ff7cSJens Wiklander static void populate_segments(struct ta_elf *elf)
430*7509ff7cSJens Wiklander {
431*7509ff7cSJens Wiklander 	TEE_Result res = TEE_SUCCESS;
432*7509ff7cSJens Wiklander 	struct segment *seg = NULL;
433*7509ff7cSJens Wiklander 	vaddr_t va = 0;
434*7509ff7cSJens Wiklander 
435*7509ff7cSJens Wiklander 	TAILQ_FOREACH(seg, &elf->segs, link) {
436*7509ff7cSJens Wiklander 		struct segment *last_seg = TAILQ_LAST(&elf->segs, segment_head);
437*7509ff7cSJens Wiklander 		size_t pad_end = roundup(last_seg->vaddr + last_seg->memsz -
438*7509ff7cSJens Wiklander 					 seg->vaddr - seg->memsz);
439*7509ff7cSJens Wiklander 
440*7509ff7cSJens Wiklander 		if (seg->remapped_writeable) {
441*7509ff7cSJens Wiklander 			size_t num_bytes = roundup(seg->vaddr + seg->memsz) -
442*7509ff7cSJens Wiklander 					   rounddown(seg->vaddr);
443*7509ff7cSJens Wiklander 
444*7509ff7cSJens Wiklander 			assert(elf->load_addr);
445*7509ff7cSJens Wiklander 			va = rounddown(elf->load_addr + seg->vaddr);
446*7509ff7cSJens Wiklander 			assert(va >= elf->max_addr);
447*7509ff7cSJens Wiklander 			res = sys_map_zi(num_bytes, 0, &va, 0, pad_end);
448*7509ff7cSJens Wiklander 			if (res)
449*7509ff7cSJens Wiklander 				err(res, "sys_map_zi");
450*7509ff7cSJens Wiklander 
451*7509ff7cSJens Wiklander 			copy_remapped_to(elf, seg);
452*7509ff7cSJens Wiklander 			elf->max_addr = va + num_bytes;
453*7509ff7cSJens Wiklander 		} else {
454*7509ff7cSJens Wiklander 			uint32_t flags =  0;
455*7509ff7cSJens Wiklander 			size_t filesz = seg->filesz;
456*7509ff7cSJens Wiklander 			size_t memsz = seg->memsz;
457*7509ff7cSJens Wiklander 			size_t offset = seg->offset;
458*7509ff7cSJens Wiklander 			size_t vaddr = seg->vaddr;
459*7509ff7cSJens Wiklander 
460*7509ff7cSJens Wiklander 			if (offset < elf->max_offs) {
461*7509ff7cSJens Wiklander 				/*
462*7509ff7cSJens Wiklander 				 * We're in a load segment which overlaps
463*7509ff7cSJens Wiklander 				 * with (or is covered by) the first page
464*7509ff7cSJens Wiklander 				 * of a shared library.
465*7509ff7cSJens Wiklander 				 */
466*7509ff7cSJens Wiklander 				if (vaddr + filesz < SMALL_PAGE_SIZE) {
467*7509ff7cSJens Wiklander 					size_t num_bytes = 0;
468*7509ff7cSJens Wiklander 
469*7509ff7cSJens Wiklander 					/*
470*7509ff7cSJens Wiklander 					 * If this segment is completely
471*7509ff7cSJens Wiklander 					 * covered, take next.
472*7509ff7cSJens Wiklander 					 */
473*7509ff7cSJens Wiklander 					if (vaddr + memsz <= SMALL_PAGE_SIZE)
474*7509ff7cSJens Wiklander 						continue;
475*7509ff7cSJens Wiklander 
476*7509ff7cSJens Wiklander 					/*
477*7509ff7cSJens Wiklander 					 * All data of the segment is
478*7509ff7cSJens Wiklander 					 * loaded, but we need to zero
479*7509ff7cSJens Wiklander 					 * extend it.
480*7509ff7cSJens Wiklander 					 */
481*7509ff7cSJens Wiklander 					va = elf->max_addr;
482*7509ff7cSJens Wiklander 					num_bytes = roundup(vaddr + memsz) -
483*7509ff7cSJens Wiklander 						    roundup(vaddr) -
484*7509ff7cSJens Wiklander 						    SMALL_PAGE_SIZE;
485*7509ff7cSJens Wiklander 					assert(num_bytes);
486*7509ff7cSJens Wiklander 					res = sys_map_zi(num_bytes, 0, &va, 0,
487*7509ff7cSJens Wiklander 							 0);
488*7509ff7cSJens Wiklander 					if (res)
489*7509ff7cSJens Wiklander 						err(res, "sys_map_zi");
490*7509ff7cSJens Wiklander 					elf->max_addr = roundup(va + num_bytes);
491*7509ff7cSJens Wiklander 					continue;
492*7509ff7cSJens Wiklander 				}
493*7509ff7cSJens Wiklander 
494*7509ff7cSJens Wiklander 				/* Partial overlap, remove the first page. */
495*7509ff7cSJens Wiklander 				vaddr += SMALL_PAGE_SIZE;
496*7509ff7cSJens Wiklander 				filesz -= SMALL_PAGE_SIZE;
497*7509ff7cSJens Wiklander 				memsz -= SMALL_PAGE_SIZE;
498*7509ff7cSJens Wiklander 				offset += SMALL_PAGE_SIZE;
499*7509ff7cSJens Wiklander 			}
500*7509ff7cSJens Wiklander 
501*7509ff7cSJens Wiklander 			if (!elf->load_addr)
502*7509ff7cSJens Wiklander 				va = 0;
503*7509ff7cSJens Wiklander 			else
504*7509ff7cSJens Wiklander 				va = vaddr + elf->load_addr;
505*7509ff7cSJens Wiklander 
506*7509ff7cSJens Wiklander 			if (seg->flags & PF_W)
507*7509ff7cSJens Wiklander 				flags |= PTA_SYSTEM_MAP_FLAG_WRITEABLE;
508*7509ff7cSJens Wiklander 			else
509*7509ff7cSJens Wiklander 				flags |= PTA_SYSTEM_MAP_FLAG_SHAREABLE;
510*7509ff7cSJens Wiklander 			if (seg->flags & PF_X)
511*7509ff7cSJens Wiklander 				flags |= PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
512*7509ff7cSJens Wiklander 			if (!(seg->flags & PF_R))
513*7509ff7cSJens Wiklander 				err(TEE_ERROR_NOT_SUPPORTED,
514*7509ff7cSJens Wiklander 				    "Segment must be readable");
515*7509ff7cSJens Wiklander 			if (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE) {
516*7509ff7cSJens Wiklander 				res = sys_map_zi(memsz, 0, &va, 0, pad_end);
517*7509ff7cSJens Wiklander 				if (res)
518*7509ff7cSJens Wiklander 					err(res, "sys_map_zi");
519*7509ff7cSJens Wiklander 				res = sys_copy_from_ta_bin((void *)va, filesz,
520*7509ff7cSJens Wiklander 							   elf->handle, offset);
521*7509ff7cSJens Wiklander 				if (res)
522*7509ff7cSJens Wiklander 					err(res, "sys_copy_from_ta_bin");
523*7509ff7cSJens Wiklander 			} else {
524*7509ff7cSJens Wiklander 				res = sys_map_ta_bin(&va, filesz, flags,
525*7509ff7cSJens Wiklander 						     elf->handle, offset,
526*7509ff7cSJens Wiklander 						     0, pad_end);
527*7509ff7cSJens Wiklander 				if (res)
528*7509ff7cSJens Wiklander 					err(res, "sys_map_ta_bin");
529*7509ff7cSJens Wiklander 			}
530*7509ff7cSJens Wiklander 
531*7509ff7cSJens Wiklander 			if (!elf->load_addr)
532*7509ff7cSJens Wiklander 				elf->load_addr = va;
533*7509ff7cSJens Wiklander 			elf->max_addr = roundup(va + filesz);
534*7509ff7cSJens Wiklander 			elf->max_offs += filesz;
535*7509ff7cSJens Wiklander 		}
536*7509ff7cSJens Wiklander 	}
537*7509ff7cSJens Wiklander }
538*7509ff7cSJens Wiklander 
539*7509ff7cSJens Wiklander static void map_segments(struct ta_elf *elf)
540*7509ff7cSJens Wiklander {
541*7509ff7cSJens Wiklander 	parse_load_segments(elf);
542*7509ff7cSJens Wiklander 	adjust_segments(elf);
543*7509ff7cSJens Wiklander 	if (elf->is_legacy)
544*7509ff7cSJens Wiklander 		populate_segments_legacy(elf);
545*7509ff7cSJens Wiklander 	else
546*7509ff7cSJens Wiklander 		populate_segments(elf);
547*7509ff7cSJens Wiklander }
548*7509ff7cSJens Wiklander 
549*7509ff7cSJens Wiklander static int hex(char c)
550*7509ff7cSJens Wiklander {
551*7509ff7cSJens Wiklander 	char lc = tolower(c);
552*7509ff7cSJens Wiklander 
553*7509ff7cSJens Wiklander 	if (isdigit(lc))
554*7509ff7cSJens Wiklander 		return lc - '0';
555*7509ff7cSJens Wiklander 	if (isxdigit(lc))
556*7509ff7cSJens Wiklander 		return lc - 'a' + 10;
557*7509ff7cSJens Wiklander 	return -1;
558*7509ff7cSJens Wiklander }
559*7509ff7cSJens Wiklander 
560*7509ff7cSJens Wiklander static uint32_t parse_hex(const char *s, size_t nchars, uint32_t *res)
561*7509ff7cSJens Wiklander {
562*7509ff7cSJens Wiklander 	uint32_t v = 0;
563*7509ff7cSJens Wiklander 	size_t n;
564*7509ff7cSJens Wiklander 	int c;
565*7509ff7cSJens Wiklander 
566*7509ff7cSJens Wiklander 	for (n = 0; n < nchars; n++) {
567*7509ff7cSJens Wiklander 		c = hex(s[n]);
568*7509ff7cSJens Wiklander 		if (c == (char)-1) {
569*7509ff7cSJens Wiklander 			*res = TEE_ERROR_BAD_FORMAT;
570*7509ff7cSJens Wiklander 			goto out;
571*7509ff7cSJens Wiklander 		}
572*7509ff7cSJens Wiklander 		v = (v << 4) + c;
573*7509ff7cSJens Wiklander 	}
574*7509ff7cSJens Wiklander 	*res = TEE_SUCCESS;
575*7509ff7cSJens Wiklander out:
576*7509ff7cSJens Wiklander 	return v;
577*7509ff7cSJens Wiklander }
578*7509ff7cSJens Wiklander 
579*7509ff7cSJens Wiklander /*
580*7509ff7cSJens Wiklander  * Convert a UUID string @s into a TEE_UUID @uuid
581*7509ff7cSJens Wiklander  * Expected format for @s is: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
582*7509ff7cSJens Wiklander  * 'x' being any hexadecimal digit (0-9a-fA-F)
583*7509ff7cSJens Wiklander  */
584*7509ff7cSJens Wiklander static TEE_Result parse_uuid(const char *s, TEE_UUID *uuid)
585*7509ff7cSJens Wiklander {
586*7509ff7cSJens Wiklander 	TEE_Result res = TEE_SUCCESS;
587*7509ff7cSJens Wiklander 	TEE_UUID u = { 0 };
588*7509ff7cSJens Wiklander 	const char *p = s;
589*7509ff7cSJens Wiklander 	size_t i;
590*7509ff7cSJens Wiklander 
591*7509ff7cSJens Wiklander 	if (strlen(p) != 36)
592*7509ff7cSJens Wiklander 		return TEE_ERROR_BAD_FORMAT;
593*7509ff7cSJens Wiklander 	if (p[8] != '-' || p[13] != '-' || p[18] != '-' || p[23] != '-')
594*7509ff7cSJens Wiklander 		return TEE_ERROR_BAD_FORMAT;
595*7509ff7cSJens Wiklander 
596*7509ff7cSJens Wiklander 	u.timeLow = parse_hex(p, 8, &res);
597*7509ff7cSJens Wiklander 	if (res)
598*7509ff7cSJens Wiklander 		goto out;
599*7509ff7cSJens Wiklander 	p += 9;
600*7509ff7cSJens Wiklander 	u.timeMid = parse_hex(p, 4, &res);
601*7509ff7cSJens Wiklander 	if (res)
602*7509ff7cSJens Wiklander 		goto out;
603*7509ff7cSJens Wiklander 	p += 5;
604*7509ff7cSJens Wiklander 	u.timeHiAndVersion = parse_hex(p, 4, &res);
605*7509ff7cSJens Wiklander 	if (res)
606*7509ff7cSJens Wiklander 		goto out;
607*7509ff7cSJens Wiklander 	p += 5;
608*7509ff7cSJens Wiklander 	for (i = 0; i < 8; i++) {
609*7509ff7cSJens Wiklander 		u.clockSeqAndNode[i] = parse_hex(p, 2, &res);
610*7509ff7cSJens Wiklander 		if (res)
611*7509ff7cSJens Wiklander 			goto out;
612*7509ff7cSJens Wiklander 		if (i == 1)
613*7509ff7cSJens Wiklander 			p += 3;
614*7509ff7cSJens Wiklander 		else
615*7509ff7cSJens Wiklander 			p += 2;
616*7509ff7cSJens Wiklander 	}
617*7509ff7cSJens Wiklander 	*uuid = u;
618*7509ff7cSJens Wiklander out:
619*7509ff7cSJens Wiklander 	return res;
620*7509ff7cSJens Wiklander }
621*7509ff7cSJens Wiklander 
622*7509ff7cSJens Wiklander static void add_deps_from_segment(struct ta_elf *elf, unsigned int type,
623*7509ff7cSJens Wiklander 				  vaddr_t addr, size_t memsz)
624*7509ff7cSJens Wiklander {
625*7509ff7cSJens Wiklander 	size_t dyn_entsize = 0;
626*7509ff7cSJens Wiklander 	size_t num_dyns = 0;
627*7509ff7cSJens Wiklander 	size_t n = 0;
628*7509ff7cSJens Wiklander 	unsigned int tag = 0;
629*7509ff7cSJens Wiklander 	size_t val = 0;
630*7509ff7cSJens Wiklander 	TEE_UUID uuid = { };
631*7509ff7cSJens Wiklander 	char *str_tab = NULL;
632*7509ff7cSJens Wiklander 
633*7509ff7cSJens Wiklander 	if (type != PT_DYNAMIC)
634*7509ff7cSJens Wiklander 		return;
635*7509ff7cSJens Wiklander 
636*7509ff7cSJens Wiklander 	if (elf->is_32bit)
637*7509ff7cSJens Wiklander 		dyn_entsize = sizeof(Elf32_Dyn);
638*7509ff7cSJens Wiklander 	else
639*7509ff7cSJens Wiklander 		dyn_entsize = sizeof(Elf64_Dyn);
640*7509ff7cSJens Wiklander 
641*7509ff7cSJens Wiklander 	assert(!(memsz % dyn_entsize));
642*7509ff7cSJens Wiklander 	num_dyns = memsz / dyn_entsize;
643*7509ff7cSJens Wiklander 
644*7509ff7cSJens Wiklander 	for (n = 0; n < num_dyns; n++) {
645*7509ff7cSJens Wiklander 		read_dyn(elf, addr, n, &tag, &val);
646*7509ff7cSJens Wiklander 		if (tag == DT_STRTAB) {
647*7509ff7cSJens Wiklander 			str_tab = (char *)(val + elf->load_addr);
648*7509ff7cSJens Wiklander 			break;
649*7509ff7cSJens Wiklander 		}
650*7509ff7cSJens Wiklander 	}
651*7509ff7cSJens Wiklander 
652*7509ff7cSJens Wiklander 	for (n = 0; n < num_dyns; n++) {
653*7509ff7cSJens Wiklander 		read_dyn(elf, addr, n, &tag, &val);
654*7509ff7cSJens Wiklander 		if (tag != DT_NEEDED)
655*7509ff7cSJens Wiklander 			continue;
656*7509ff7cSJens Wiklander 		parse_uuid(str_tab + val, &uuid);
657*7509ff7cSJens Wiklander 		queue_elf(&uuid);
658*7509ff7cSJens Wiklander 	}
659*7509ff7cSJens Wiklander }
660*7509ff7cSJens Wiklander 
661*7509ff7cSJens Wiklander static void add_dependencies(struct ta_elf *elf)
662*7509ff7cSJens Wiklander {
663*7509ff7cSJens Wiklander 	size_t n = 0;
664*7509ff7cSJens Wiklander 
665*7509ff7cSJens Wiklander 	if (elf->is_32bit) {
666*7509ff7cSJens Wiklander 		Elf32_Phdr *phdr = elf->phdr;
667*7509ff7cSJens Wiklander 
668*7509ff7cSJens Wiklander 		for (n = 0; n < elf->e_phnum; n++)
669*7509ff7cSJens Wiklander 			add_deps_from_segment(elf, phdr[n].p_type,
670*7509ff7cSJens Wiklander 					      phdr[n].p_vaddr, phdr[n].p_memsz);
671*7509ff7cSJens Wiklander 	} else {
672*7509ff7cSJens Wiklander 		Elf64_Phdr *phdr = elf->phdr;
673*7509ff7cSJens Wiklander 
674*7509ff7cSJens Wiklander 		for (n = 0; n < elf->e_phnum; n++)
675*7509ff7cSJens Wiklander 			add_deps_from_segment(elf, phdr[n].p_type,
676*7509ff7cSJens Wiklander 					      phdr[n].p_vaddr, phdr[n].p_memsz);
677*7509ff7cSJens Wiklander 	}
678*7509ff7cSJens Wiklander }
679*7509ff7cSJens Wiklander 
680*7509ff7cSJens Wiklander static void copy_section_headers(struct ta_elf *elf)
681*7509ff7cSJens Wiklander {
682*7509ff7cSJens Wiklander 	TEE_Result res = TEE_SUCCESS;
683*7509ff7cSJens Wiklander 	size_t sz = elf->e_shnum * elf->e_shentsize;
684*7509ff7cSJens Wiklander 	size_t offs = 0;
685*7509ff7cSJens Wiklander 
686*7509ff7cSJens Wiklander 	elf->shdr = malloc(sz);
687*7509ff7cSJens Wiklander 	if (!elf->shdr)
688*7509ff7cSJens Wiklander 		err(TEE_ERROR_OUT_OF_MEMORY, "malloc");
689*7509ff7cSJens Wiklander 
690*7509ff7cSJens Wiklander 	/*
691*7509ff7cSJens Wiklander 	 * We're assuming that section headers comes after the load segments,
692*7509ff7cSJens Wiklander 	 * but if it's a very small dynamically linked library the section
693*7509ff7cSJens Wiklander 	 * headers can still end up (partially?) in the first mapped page.
694*7509ff7cSJens Wiklander 	 */
695*7509ff7cSJens Wiklander 	if (elf->e_shoff < SMALL_PAGE_SIZE) {
696*7509ff7cSJens Wiklander 		assert(!elf->is_main);
697*7509ff7cSJens Wiklander 		offs = MIN(SMALL_PAGE_SIZE - elf->e_shoff, sz);
698*7509ff7cSJens Wiklander 		memcpy(elf->shdr, (void *)(elf->load_addr + elf->e_shoff),
699*7509ff7cSJens Wiklander 		       offs);
700*7509ff7cSJens Wiklander 	}
701*7509ff7cSJens Wiklander 
702*7509ff7cSJens Wiklander 	if (offs < sz) {
703*7509ff7cSJens Wiklander 		res = sys_copy_from_ta_bin((uint8_t *)elf->shdr + offs,
704*7509ff7cSJens Wiklander 					   sz - offs, elf->handle,
705*7509ff7cSJens Wiklander 					   elf->e_shoff + offs);
706*7509ff7cSJens Wiklander 		if (res)
707*7509ff7cSJens Wiklander 			err(res, "sys_copy_from_ta_bin");
708*7509ff7cSJens Wiklander 	}
709*7509ff7cSJens Wiklander }
710*7509ff7cSJens Wiklander 
711*7509ff7cSJens Wiklander static void close_handle(struct ta_elf *elf)
712*7509ff7cSJens Wiklander {
713*7509ff7cSJens Wiklander 	TEE_Result res = sys_close_ta_bin(elf->handle);
714*7509ff7cSJens Wiklander 
715*7509ff7cSJens Wiklander 	if (res)
716*7509ff7cSJens Wiklander 		err(res, "sys_close_ta_bin");
717*7509ff7cSJens Wiklander 	elf->handle = -1;
718*7509ff7cSJens Wiklander }
719*7509ff7cSJens Wiklander 
720*7509ff7cSJens Wiklander void ta_elf_load_main(const TEE_UUID *uuid, uint32_t *is_32bit,
721*7509ff7cSJens Wiklander 		      uint64_t *entry, uint64_t *sp, uint32_t *ta_flags)
722*7509ff7cSJens Wiklander {
723*7509ff7cSJens Wiklander 	struct ta_elf *elf = queue_elf(uuid);
724*7509ff7cSJens Wiklander 	struct ta_head *head;
725*7509ff7cSJens Wiklander 	vaddr_t va = 0;
726*7509ff7cSJens Wiklander 	TEE_Result res = TEE_SUCCESS;
727*7509ff7cSJens Wiklander 
728*7509ff7cSJens Wiklander 	assert(elf);
729*7509ff7cSJens Wiklander 	elf->is_main = true;
730*7509ff7cSJens Wiklander 
731*7509ff7cSJens Wiklander 	init_elf(elf);
732*7509ff7cSJens Wiklander 
733*7509ff7cSJens Wiklander 	/*
734*7509ff7cSJens Wiklander 	 * Legacy TAs doesn't set entry point, instead it's set in ta_head.
735*7509ff7cSJens Wiklander 	 * If entry point isn't set explicitly, set to the start of the
736*7509ff7cSJens Wiklander 	 * first executable section by the linker. Since ta_head also
737*7509ff7cSJens Wiklander 	 * always comes first in legacy TA it means that the entry point
738*7509ff7cSJens Wiklander 	 * will be set to 0x20.
739*7509ff7cSJens Wiklander 	 *
740*7509ff7cSJens Wiklander 	 * NB, everything before the commit a73b5878c89d ("Replace
741*7509ff7cSJens Wiklander 	 * ta_head.entry with elf entry") is considered legacy TAs for
742*7509ff7cSJens Wiklander 	 * ldelf.
743*7509ff7cSJens Wiklander 	 */
744*7509ff7cSJens Wiklander 	if (elf->e_entry == sizeof(*head))
745*7509ff7cSJens Wiklander 		elf->is_legacy = true;
746*7509ff7cSJens Wiklander 
747*7509ff7cSJens Wiklander 	map_segments(elf);
748*7509ff7cSJens Wiklander 	add_dependencies(elf);
749*7509ff7cSJens Wiklander 	copy_section_headers(elf);
750*7509ff7cSJens Wiklander 	save_symtab(elf);
751*7509ff7cSJens Wiklander 	close_handle(elf);
752*7509ff7cSJens Wiklander 
753*7509ff7cSJens Wiklander 	head = (struct ta_head *)elf->load_addr;
754*7509ff7cSJens Wiklander 
755*7509ff7cSJens Wiklander 	*is_32bit = elf->is_32bit;
756*7509ff7cSJens Wiklander 	if (elf->is_legacy) {
757*7509ff7cSJens Wiklander 		assert(head->depr_entry != UINT64_MAX);
758*7509ff7cSJens Wiklander 		*entry = head->depr_entry + elf->load_addr;
759*7509ff7cSJens Wiklander 	} else {
760*7509ff7cSJens Wiklander 		assert(head->depr_entry == UINT64_MAX);
761*7509ff7cSJens Wiklander 		*entry = elf->e_entry + elf->load_addr;
762*7509ff7cSJens Wiklander 	}
763*7509ff7cSJens Wiklander 
764*7509ff7cSJens Wiklander 	res = sys_map_zi(head->stack_size, 0, &va, 0, 0);
765*7509ff7cSJens Wiklander 	if (res)
766*7509ff7cSJens Wiklander 		err(res, "sys_map_zi stack");
767*7509ff7cSJens Wiklander 
768*7509ff7cSJens Wiklander 	if (head->flags & ~TA_FLAGS_MASK)
769*7509ff7cSJens Wiklander 		err(TEE_ERROR_BAD_FORMAT, "Invalid TA flags(s) %#"PRIx32,
770*7509ff7cSJens Wiklander 		    head->flags & ~TA_FLAGS_MASK);
771*7509ff7cSJens Wiklander 
772*7509ff7cSJens Wiklander 	*ta_flags = head->flags;
773*7509ff7cSJens Wiklander 	*sp = va + head->stack_size;
774*7509ff7cSJens Wiklander }
775*7509ff7cSJens Wiklander 
776*7509ff7cSJens Wiklander void ta_elf_load_dependency(struct ta_elf *elf, bool is_32bit)
777*7509ff7cSJens Wiklander {
778*7509ff7cSJens Wiklander 	if (elf->is_main)
779*7509ff7cSJens Wiklander 		return;
780*7509ff7cSJens Wiklander 
781*7509ff7cSJens Wiklander 	init_elf(elf);
782*7509ff7cSJens Wiklander 	if (elf->is_32bit != is_32bit)
783*7509ff7cSJens Wiklander 		err(TEE_ERROR_BAD_FORMAT, "ELF %pUl is %sbit (expected %sbit)",
784*7509ff7cSJens Wiklander 		    (void *)&elf->uuid, elf->is_32bit ? "32" : "64",
785*7509ff7cSJens Wiklander 		    is_32bit ? "32" : "64");
786*7509ff7cSJens Wiklander 
787*7509ff7cSJens Wiklander 	map_segments(elf);
788*7509ff7cSJens Wiklander 	add_dependencies(elf);
789*7509ff7cSJens Wiklander 	copy_section_headers(elf);
790*7509ff7cSJens Wiklander 	save_symtab(elf);
791*7509ff7cSJens Wiklander 	close_handle(elf);
792*7509ff7cSJens Wiklander }
793*7509ff7cSJens Wiklander 
794*7509ff7cSJens Wiklander void ta_elf_finalize_mappings(struct ta_elf *elf)
795*7509ff7cSJens Wiklander {
796*7509ff7cSJens Wiklander 	TEE_Result res = TEE_SUCCESS;
797*7509ff7cSJens Wiklander 	struct segment *seg = NULL;
798*7509ff7cSJens Wiklander 
799*7509ff7cSJens Wiklander 	if (!elf->is_legacy)
800*7509ff7cSJens Wiklander 		return;
801*7509ff7cSJens Wiklander 
802*7509ff7cSJens Wiklander 	TAILQ_FOREACH(seg, &elf->segs, link) {
803*7509ff7cSJens Wiklander 		vaddr_t va = elf->load_addr + seg->vaddr;
804*7509ff7cSJens Wiklander 		uint32_t flags =  0;
805*7509ff7cSJens Wiklander 
806*7509ff7cSJens Wiklander 		if (seg->flags & PF_W)
807*7509ff7cSJens Wiklander 			flags |= PTA_SYSTEM_MAP_FLAG_WRITEABLE;
808*7509ff7cSJens Wiklander 		if (seg->flags & PF_X)
809*7509ff7cSJens Wiklander 			flags |= PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
810*7509ff7cSJens Wiklander 
811*7509ff7cSJens Wiklander 		res = sys_set_prot(va, seg->memsz, flags);
812*7509ff7cSJens Wiklander 		if (res)
813*7509ff7cSJens Wiklander 			err(res, "sys_set_prot");
814*7509ff7cSJens Wiklander 	}
815*7509ff7cSJens Wiklander }
816