xref: /optee_os/ldelf/ta_elf.c (revision b8a0c52c847baf133e08f19f69759eb8a5de1a2c)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2019, Linaro Limited
4  * Copyright (c) 2020-2023, Arm Limited
5  */
6 
7 #include <asan.h>
8 #include <assert.h>
9 #include <config.h>
10 #include <confine_array_index.h>
11 #include <elf32.h>
12 #include <elf64.h>
13 #include <elf_common.h>
14 #include <ldelf.h>
15 #include <link.h>
16 #include <stdio.h>
17 #include <stdlib.h>
18 #include <string_ext.h>
19 #include <string.h>
20 #include <tee_api_types.h>
21 #include <tee_internal_api_extensions.h>
22 #include <unw/unwind.h>
23 #include <user_ta_header.h>
24 #include <util.h>
25 
26 #include "sys.h"
27 #include "ta_elf.h"
28 
29 /*
30  * Layout of a 32-bit struct dl_phdr_info for a 64-bit ldelf to access a 32-bit
31  * TA
32  */
33 struct dl_phdr_info32 {
34 	uint32_t dlpi_addr;
35 	uint32_t dlpi_name;
36 	uint32_t dlpi_phdr;
37 	uint16_t dlpi_phnum;
38 	uint64_t dlpi_adds;
39 	uint64_t dlpi_subs;
40 	uint32_t dlpi_tls_modid;
41 	uint32_t dlpi_tls_data;
42 };
43 
44 static vaddr_t ta_stack;
45 static vaddr_t ta_stack_size;
46 
47 struct ta_elf_queue main_elf_queue = TAILQ_HEAD_INITIALIZER(main_elf_queue);
48 
49 /*
50  * Main application is always ID 1, shared libraries with TLS take IDs 2 and
51  * above
52  */
assign_tls_mod_id(struct ta_elf * elf)53 static void assign_tls_mod_id(struct ta_elf *elf)
54 {
55 	static size_t last_tls_mod_id = 1;
56 
57 	if (elf->is_main)
58 		assert(last_tls_mod_id == 1); /* Main always comes first */
59 	elf->tls_mod_id = last_tls_mod_id++;
60 }
61 
queue_elf_helper(const TEE_UUID * uuid)62 static struct ta_elf *queue_elf_helper(const TEE_UUID *uuid)
63 {
64 	struct ta_elf *elf = calloc(1, sizeof(*elf));
65 
66 	if (!elf)
67 		return NULL;
68 
69 	TAILQ_INIT(&elf->segs);
70 
71 	elf->uuid = *uuid;
72 	TAILQ_INSERT_TAIL(&main_elf_queue, elf, link);
73 	return elf;
74 }
75 
queue_elf(const TEE_UUID * uuid)76 static struct ta_elf *queue_elf(const TEE_UUID *uuid)
77 {
78 	struct ta_elf *elf = ta_elf_find_elf(uuid);
79 
80 	if (elf)
81 		return NULL;
82 
83 	elf = queue_elf_helper(uuid);
84 	if (!elf)
85 		err(TEE_ERROR_OUT_OF_MEMORY, "queue_elf_helper");
86 
87 	return elf;
88 }
89 
ta_elf_find_elf(const TEE_UUID * uuid)90 struct ta_elf *ta_elf_find_elf(const TEE_UUID *uuid)
91 {
92 	struct ta_elf *elf = NULL;
93 
94 	TAILQ_FOREACH(elf, &main_elf_queue, link)
95 		if (!memcmp(uuid, &elf->uuid, sizeof(*uuid)))
96 			return elf;
97 
98 	return NULL;
99 }
100 
101 #if defined(ARM32) || defined(ARM64)
e32_parse_ehdr(struct ta_elf * elf,Elf32_Ehdr * ehdr)102 static TEE_Result e32_parse_ehdr(struct ta_elf *elf, Elf32_Ehdr *ehdr)
103 {
104 	if (ehdr->e_ident[EI_VERSION] != EV_CURRENT ||
105 	    ehdr->e_ident[EI_CLASS] != ELFCLASS32 ||
106 	    ehdr->e_ident[EI_DATA] != ELFDATA2LSB ||
107 	    (ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE &&
108 	     ehdr->e_ident[EI_OSABI] != ELFOSABI_ARM) ||
109 	    ehdr->e_type != ET_DYN || ehdr->e_machine != EM_ARM ||
110 #ifndef CFG_WITH_VFP
111 	    (ehdr->e_flags & EF_ARM_ABI_FLOAT_HARD) ||
112 #endif
113 	    ehdr->e_phentsize != sizeof(Elf32_Phdr) ||
114 	    ehdr->e_shentsize != sizeof(Elf32_Shdr))
115 		return TEE_ERROR_BAD_FORMAT;
116 
117 	if (ehdr->e_ident[EI_OSABI] == ELFOSABI_NONE &&
118 	    (ehdr->e_flags & EF_ARM_ABIMASK) != EF_ARM_ABI_V5)
119 		return TEE_ERROR_BAD_FORMAT;
120 
121 	if (ehdr->e_ident[EI_OSABI] == ELFOSABI_ARM &&
122 	    (ehdr->e_flags & EF_ARM_ABIMASK) != EF_ARM_ABI_UNKNOWN)
123 		return TEE_ERROR_BAD_FORMAT;
124 
125 	elf->is_32bit = true;
126 	elf->e_entry = ehdr->e_entry;
127 	elf->e_phoff = ehdr->e_phoff;
128 	elf->e_shoff = ehdr->e_shoff;
129 	elf->e_phnum = ehdr->e_phnum;
130 	elf->e_shnum = ehdr->e_shnum;
131 	elf->e_phentsize = ehdr->e_phentsize;
132 	elf->e_shentsize = ehdr->e_shentsize;
133 
134 	return TEE_SUCCESS;
135 }
136 
137 #ifdef ARM64
e64_parse_ehdr(struct ta_elf * elf,Elf64_Ehdr * ehdr)138 static TEE_Result e64_parse_ehdr(struct ta_elf *elf, Elf64_Ehdr *ehdr)
139 {
140 	if (ehdr->e_ident[EI_VERSION] != EV_CURRENT ||
141 	    ehdr->e_ident[EI_CLASS] != ELFCLASS64 ||
142 	    ehdr->e_ident[EI_DATA] != ELFDATA2LSB ||
143 	    ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE ||
144 	    ehdr->e_type != ET_DYN || ehdr->e_machine != EM_AARCH64 ||
145 	    ehdr->e_flags || ehdr->e_phentsize != sizeof(Elf64_Phdr) ||
146 	    ehdr->e_shentsize != sizeof(Elf64_Shdr))
147 		return TEE_ERROR_BAD_FORMAT;
148 
149 
150 	elf->is_32bit = false;
151 	elf->e_entry = ehdr->e_entry;
152 	elf->e_phoff = ehdr->e_phoff;
153 	elf->e_shoff = ehdr->e_shoff;
154 	elf->e_phnum = ehdr->e_phnum;
155 	elf->e_shnum = ehdr->e_shnum;
156 	elf->e_phentsize = ehdr->e_phentsize;
157 	elf->e_shentsize = ehdr->e_shentsize;
158 
159 	return TEE_SUCCESS;
160 }
161 #else /*ARM64*/
e64_parse_ehdr(struct ta_elf * elf __unused,Elf64_Ehdr * ehdr __unused)162 static TEE_Result e64_parse_ehdr(struct ta_elf *elf __unused,
163 				 Elf64_Ehdr *ehdr __unused)
164 {
165 	return TEE_ERROR_NOT_SUPPORTED;
166 }
167 #endif /*ARM64*/
168 #endif /* ARM32 || ARM64 */
169 
170 #if defined(RV64)
e32_parse_ehdr(struct ta_elf * elf __unused,Elf32_Ehdr * ehdr __unused)171 static TEE_Result e32_parse_ehdr(struct ta_elf *elf __unused,
172 				 Elf32_Ehdr *ehdr __unused)
173 {
174 		return TEE_ERROR_BAD_FORMAT;
175 }
176 
e64_parse_ehdr(struct ta_elf * elf,Elf64_Ehdr * ehdr)177 static TEE_Result e64_parse_ehdr(struct ta_elf *elf, Elf64_Ehdr *ehdr)
178 {
179 	if (ehdr->e_ident[EI_VERSION] != EV_CURRENT ||
180 	    ehdr->e_ident[EI_CLASS] != ELFCLASS64 ||
181 	    ehdr->e_ident[EI_DATA] != ELFDATA2LSB ||
182 	    ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE ||
183 	    ehdr->e_type != ET_DYN || ehdr->e_machine != EM_RISCV ||
184 	    ehdr->e_phentsize != sizeof(Elf64_Phdr) ||
185 	    ehdr->e_shentsize != sizeof(Elf64_Shdr))
186 		return TEE_ERROR_BAD_FORMAT;
187 
188 	elf->is_32bit = false;
189 	elf->e_entry = ehdr->e_entry;
190 	elf->e_phoff = ehdr->e_phoff;
191 	elf->e_shoff = ehdr->e_shoff;
192 	elf->e_phnum = ehdr->e_phnum;
193 	elf->e_shnum = ehdr->e_shnum;
194 	elf->e_phentsize = ehdr->e_phentsize;
195 	elf->e_shentsize = ehdr->e_shentsize;
196 
197 	return TEE_SUCCESS;
198 }
199 #endif /* RV64 */
200 
check_phdr_in_range(struct ta_elf * elf,unsigned int type,vaddr_t addr,size_t memsz)201 static void check_phdr_in_range(struct ta_elf *elf, unsigned int type,
202 				vaddr_t addr, size_t memsz)
203 {
204 	vaddr_t max_addr = 0;
205 
206 	if (ADD_OVERFLOW(addr, memsz, &max_addr))
207 		err(TEE_ERROR_BAD_FORMAT, "Program header %#x overflow", type);
208 
209 	/*
210 	 * elf->load_addr and elf->max_addr are both using the
211 	 * final virtual addresses, while this program header is
212 	 * relative to 0.
213 	 */
214 	if (max_addr > elf->max_addr - elf->load_addr)
215 		err(TEE_ERROR_BAD_FORMAT, "Program header %#x out of bounds",
216 		    type);
217 }
218 
read_dyn(struct ta_elf * elf,vaddr_t addr,size_t idx,unsigned int * tag,size_t * val)219 static void read_dyn(struct ta_elf *elf, vaddr_t addr,
220 		     size_t idx, unsigned int *tag, size_t *val)
221 {
222 	if (elf->is_32bit) {
223 		Elf32_Dyn *dyn = (Elf32_Dyn *)(addr + elf->load_addr);
224 
225 		*tag = dyn[idx].d_tag;
226 		*val = dyn[idx].d_un.d_val;
227 	} else {
228 		Elf64_Dyn *dyn = (Elf64_Dyn *)(addr + elf->load_addr);
229 
230 		*tag = dyn[idx].d_tag;
231 		*val = dyn[idx].d_un.d_val;
232 	}
233 }
234 
check_range(struct ta_elf * elf,const char * name,const void * ptr,size_t sz)235 static void check_range(struct ta_elf *elf, const char *name, const void *ptr,
236 			size_t sz)
237 {
238 	size_t max_addr = 0;
239 
240 	if ((vaddr_t)ptr < elf->load_addr)
241 		err(TEE_ERROR_BAD_FORMAT, "%s %p out of range", name, ptr);
242 
243 	if (ADD_OVERFLOW((vaddr_t)ptr, sz, &max_addr))
244 		err(TEE_ERROR_BAD_FORMAT, "%s range overflow", name);
245 
246 	if (max_addr > elf->max_addr)
247 		err(TEE_ERROR_BAD_FORMAT,
248 		    "%s %p..%#zx out of range", name, ptr, max_addr);
249 }
250 
check_hashtab(struct ta_elf * elf,void * ptr,size_t num_buckets,size_t num_chains)251 static void check_hashtab(struct ta_elf *elf, void *ptr, size_t num_buckets,
252 			  size_t num_chains)
253 {
254 	/*
255 	 * Starting from 2 as the first two words are mandatory and hold
256 	 * num_buckets and num_chains. So this function is called twice,
257 	 * first to see that there's indeed room for num_buckets and
258 	 * num_chains and then to see that all of it fits.
259 	 * See http://www.sco.com/developers/gabi/latest/ch5.dynamic.html#hash
260 	 */
261 	size_t num_words = 2;
262 	size_t sz = 0;
263 
264 	if (!IS_ALIGNED_WITH_TYPE(ptr, uint32_t))
265 		err(TEE_ERROR_BAD_FORMAT, "Bad alignment of DT_HASH %p", ptr);
266 
267 	if (ADD_OVERFLOW(num_words, num_buckets, &num_words) ||
268 	    ADD_OVERFLOW(num_words, num_chains, &num_words) ||
269 	    MUL_OVERFLOW(num_words, sizeof(uint32_t), &sz))
270 		err(TEE_ERROR_BAD_FORMAT, "DT_HASH overflow");
271 
272 	check_range(elf, "DT_HASH", ptr, sz);
273 }
274 
check_gnu_hashtab(struct ta_elf * elf,void * ptr)275 static void check_gnu_hashtab(struct ta_elf *elf, void *ptr)
276 {
277 	struct gnu_hashtab *h = ptr;
278 	size_t num_words = 4; /* nbuckets, symoffset, bloom_size, bloom_shift */
279 	size_t bloom_words = 0;
280 	size_t sz = 0;
281 
282 	if (!IS_ALIGNED_WITH_TYPE(ptr, uint32_t))
283 		err(TEE_ERROR_BAD_FORMAT, "Bad alignment of DT_GNU_HASH %p",
284 		    ptr);
285 
286 	if (elf->gnu_hashtab_size < sizeof(*h))
287 		err(TEE_ERROR_BAD_FORMAT, "DT_GNU_HASH too small");
288 
289 	/* Check validity of h->nbuckets and h->bloom_size */
290 
291 	if (elf->is_32bit)
292 		bloom_words = h->bloom_size;
293 	else
294 		bloom_words = h->bloom_size * 2;
295 	if (ADD_OVERFLOW(num_words, h->nbuckets, &num_words) ||
296 	    ADD_OVERFLOW(num_words, bloom_words, &num_words) ||
297 	    MUL_OVERFLOW(num_words, sizeof(uint32_t), &sz) ||
298 	    sz > elf->gnu_hashtab_size)
299 		err(TEE_ERROR_BAD_FORMAT, "DT_GNU_HASH overflow");
300 }
301 
save_hashtab(struct ta_elf * elf)302 static void save_hashtab(struct ta_elf *elf)
303 {
304 	uint32_t *hashtab = NULL;
305 	size_t n = 0;
306 
307 	if (elf->is_32bit) {
308 		Elf32_Shdr *shdr = elf->shdr;
309 
310 		for (n = 0; n < elf->e_shnum; n++) {
311 			void *addr = (void *)(vaddr_t)(shdr[n].sh_addr +
312 						       elf->load_addr);
313 
314 			if (shdr[n].sh_type == SHT_HASH) {
315 				elf->hashtab = addr;
316 			} else if (shdr[n].sh_type == SHT_GNU_HASH) {
317 				elf->gnu_hashtab = addr;
318 				elf->gnu_hashtab_size = shdr[n].sh_size;
319 			}
320 		}
321 	} else {
322 		Elf64_Shdr *shdr = elf->shdr;
323 
324 		for (n = 0; n < elf->e_shnum; n++) {
325 			void *addr = (void *)(vaddr_t)(shdr[n].sh_addr +
326 						       elf->load_addr);
327 
328 			if (shdr[n].sh_type == SHT_HASH) {
329 				elf->hashtab = addr;
330 			} else if (shdr[n].sh_type == SHT_GNU_HASH) {
331 				elf->gnu_hashtab = addr;
332 				elf->gnu_hashtab_size = shdr[n].sh_size;
333 			}
334 		}
335 	}
336 
337 	if (elf->hashtab) {
338 		check_hashtab(elf, elf->hashtab, 0, 0);
339 		hashtab = elf->hashtab;
340 		check_hashtab(elf, elf->hashtab, hashtab[0], hashtab[1]);
341 	}
342 	if (elf->gnu_hashtab)
343 		check_gnu_hashtab(elf, elf->gnu_hashtab);
344 }
345 
save_soname_from_segment(struct ta_elf * elf,unsigned int type,vaddr_t addr,size_t memsz)346 static void save_soname_from_segment(struct ta_elf *elf, unsigned int type,
347 				     vaddr_t addr, size_t memsz)
348 {
349 	size_t dyn_entsize = 0;
350 	size_t num_dyns = 0;
351 	size_t n = 0;
352 	unsigned int tag = 0;
353 	size_t val = 0;
354 	char *str_tab = NULL;
355 
356 	if (type != PT_DYNAMIC)
357 		return;
358 
359 	if (elf->is_32bit)
360 		dyn_entsize = sizeof(Elf32_Dyn);
361 	else
362 		dyn_entsize = sizeof(Elf64_Dyn);
363 
364 	assert(!(memsz % dyn_entsize));
365 	num_dyns = memsz / dyn_entsize;
366 
367 	for (n = 0; n < num_dyns; n++) {
368 		read_dyn(elf, addr, n, &tag, &val);
369 		if (tag == DT_STRTAB) {
370 			str_tab = (char *)(val + elf->load_addr);
371 			break;
372 		}
373 	}
374 	for (n = 0; n < num_dyns; n++) {
375 		read_dyn(elf, addr, n, &tag, &val);
376 		if (tag == DT_SONAME) {
377 			elf->soname = str_tab + val;
378 			break;
379 		}
380 	}
381 }
382 
save_soname(struct ta_elf * elf)383 static void save_soname(struct ta_elf *elf)
384 {
385 	size_t n = 0;
386 
387 	if (elf->is_32bit) {
388 		Elf32_Phdr *phdr = elf->phdr;
389 
390 		for (n = 0; n < elf->e_phnum; n++)
391 			save_soname_from_segment(elf, phdr[n].p_type,
392 						 phdr[n].p_vaddr,
393 						 phdr[n].p_memsz);
394 	} else {
395 		Elf64_Phdr *phdr = elf->phdr;
396 
397 		for (n = 0; n < elf->e_phnum; n++)
398 			save_soname_from_segment(elf, phdr[n].p_type,
399 						 phdr[n].p_vaddr,
400 						 phdr[n].p_memsz);
401 	}
402 }
403 
e32_save_symtab(struct ta_elf * elf,size_t tab_idx)404 static void e32_save_symtab(struct ta_elf *elf, size_t tab_idx)
405 {
406 	Elf32_Shdr *shdr = elf->shdr;
407 	size_t str_idx = shdr[tab_idx].sh_link;
408 
409 	elf->dynsymtab = (void *)(shdr[tab_idx].sh_addr + elf->load_addr);
410 	if (!IS_ALIGNED_WITH_TYPE(elf->dynsymtab, Elf32_Sym))
411 		err(TEE_ERROR_BAD_FORMAT, "Bad alignment of dynsymtab %p",
412 		    elf->dynsymtab);
413 	check_range(elf, "Dynsymtab", elf->dynsymtab, shdr[tab_idx].sh_size);
414 
415 	if (shdr[tab_idx].sh_size % sizeof(Elf32_Sym))
416 		err(TEE_ERROR_BAD_FORMAT,
417 		    "Size of dynsymtab not an even multiple of Elf32_Sym");
418 	elf->num_dynsyms = shdr[tab_idx].sh_size / sizeof(Elf32_Sym);
419 
420 	if (str_idx >= elf->e_shnum)
421 		err(TEE_ERROR_BAD_FORMAT, "Dynstr section index out of range");
422 	elf->dynstr = (void *)(shdr[str_idx].sh_addr + elf->load_addr);
423 	check_range(elf, "Dynstr", elf->dynstr, shdr[str_idx].sh_size);
424 
425 	elf->dynstr_size = shdr[str_idx].sh_size;
426 }
427 
e64_save_symtab(struct ta_elf * elf,size_t tab_idx)428 static void e64_save_symtab(struct ta_elf *elf, size_t tab_idx)
429 {
430 	Elf64_Shdr *shdr = elf->shdr;
431 	size_t str_idx = shdr[tab_idx].sh_link;
432 
433 	elf->dynsymtab = (void *)(vaddr_t)(shdr[tab_idx].sh_addr +
434 					   elf->load_addr);
435 
436 	if (!IS_ALIGNED_WITH_TYPE(elf->dynsymtab, Elf64_Sym))
437 		err(TEE_ERROR_BAD_FORMAT, "Bad alignment of .dynsym/DYNSYM %p",
438 		    elf->dynsymtab);
439 	check_range(elf, ".dynsym/DYNSYM", elf->dynsymtab,
440 		    shdr[tab_idx].sh_size);
441 
442 	if (shdr[tab_idx].sh_size % sizeof(Elf64_Sym))
443 		err(TEE_ERROR_BAD_FORMAT,
444 		    "Size of .dynsym/DYNSYM not an even multiple of Elf64_Sym");
445 	elf->num_dynsyms = shdr[tab_idx].sh_size / sizeof(Elf64_Sym);
446 
447 	if (str_idx >= elf->e_shnum)
448 		err(TEE_ERROR_BAD_FORMAT,
449 		    ".dynstr/STRTAB section index out of range");
450 	elf->dynstr = (void *)(vaddr_t)(shdr[str_idx].sh_addr + elf->load_addr);
451 	check_range(elf, ".dynstr/STRTAB", elf->dynstr, shdr[str_idx].sh_size);
452 
453 	elf->dynstr_size = shdr[str_idx].sh_size;
454 }
455 
save_symtab(struct ta_elf * elf)456 static void save_symtab(struct ta_elf *elf)
457 {
458 	size_t n = 0;
459 
460 	if (elf->is_32bit) {
461 		Elf32_Shdr *shdr = elf->shdr;
462 
463 		for (n = 0; n < elf->e_shnum; n++) {
464 			if (shdr[n].sh_type == SHT_DYNSYM) {
465 				e32_save_symtab(elf, n);
466 				break;
467 			}
468 		}
469 	} else {
470 		Elf64_Shdr *shdr = elf->shdr;
471 
472 		for (n = 0; n < elf->e_shnum; n++) {
473 			if (shdr[n].sh_type == SHT_DYNSYM) {
474 				e64_save_symtab(elf, n);
475 				break;
476 			}
477 		}
478 
479 	}
480 
481 	save_hashtab(elf);
482 	save_soname(elf);
483 }
484 
init_elf(struct ta_elf * elf)485 static void init_elf(struct ta_elf *elf)
486 {
487 	TEE_Result res = TEE_SUCCESS;
488 	vaddr_t va = 0;
489 	uint32_t flags = LDELF_MAP_FLAG_SHAREABLE;
490 	size_t sz = 0;
491 
492 	res = sys_open_ta_bin(&elf->uuid, &elf->handle);
493 	if (res)
494 		err(res, "sys_open_ta_bin(%pUl)", (void *)&elf->uuid);
495 
496 	/*
497 	 * Map it read-only executable when we're loading a library where
498 	 * the ELF header is included in a load segment.
499 	 */
500 	if (!elf->is_main)
501 		flags |= LDELF_MAP_FLAG_EXECUTABLE;
502 	res = sys_map_ta_bin(&va, SMALL_PAGE_SIZE, flags, elf->handle, 0, 0, 0);
503 	if (res)
504 		err(res, "sys_map_ta_bin");
505 	elf->ehdr_addr = va;
506 	if (!elf->is_main) {
507 		elf->load_addr = va;
508 		elf->max_addr = va + SMALL_PAGE_SIZE;
509 		elf->max_offs = SMALL_PAGE_SIZE;
510 	}
511 
512 	if (!IS_ELF(*(Elf32_Ehdr *)va))
513 		err(TEE_ERROR_BAD_FORMAT, "TA is not an ELF");
514 
515 	res = e32_parse_ehdr(elf, (void *)va);
516 	if (res == TEE_ERROR_BAD_FORMAT)
517 		res = e64_parse_ehdr(elf, (void *)va);
518 	if (res)
519 		err(res, "Cannot parse ELF");
520 
521 	if (MUL_OVERFLOW(elf->e_phnum, elf->e_phentsize, &sz) ||
522 	    ADD_OVERFLOW(sz, elf->e_phoff, &sz))
523 		err(TEE_ERROR_BAD_FORMAT, "Program headers size overflow");
524 
525 	if (sz > SMALL_PAGE_SIZE)
526 		err(TEE_ERROR_NOT_SUPPORTED, "Cannot read program headers");
527 
528 	elf->phdr = (void *)(va + elf->e_phoff);
529 }
530 
roundup(size_t v)531 static size_t roundup(size_t v)
532 {
533 	return ROUNDUP(v, SMALL_PAGE_SIZE);
534 }
535 
rounddown(size_t v)536 static size_t rounddown(size_t v)
537 {
538 	return ROUNDDOWN(v, SMALL_PAGE_SIZE);
539 }
540 
add_segment(struct ta_elf * elf,size_t offset,size_t vaddr,size_t filesz,size_t memsz,size_t flags,size_t align)541 static void add_segment(struct ta_elf *elf, size_t offset, size_t vaddr,
542 			size_t filesz, size_t memsz, size_t flags, size_t align)
543 {
544 	struct segment *seg = calloc(1, sizeof(*seg));
545 
546 	if (!seg)
547 		err(TEE_ERROR_OUT_OF_MEMORY, "calloc");
548 
549 	if (memsz < filesz)
550 		err(TEE_ERROR_BAD_FORMAT, "Memsz smaller than filesz");
551 
552 	seg->offset = offset;
553 	seg->vaddr = vaddr;
554 	seg->filesz = filesz;
555 	seg->memsz = memsz;
556 	seg->flags = flags;
557 	seg->align = align;
558 
559 	TAILQ_INSERT_TAIL(&elf->segs, seg, link);
560 }
561 
parse_load_segments(struct ta_elf * elf)562 static void parse_load_segments(struct ta_elf *elf)
563 {
564 	size_t n = 0;
565 
566 	if (elf->is_32bit) {
567 		Elf32_Phdr *phdr = elf->phdr;
568 
569 		for (n = 0; n < elf->e_phnum; n++)
570 			if (phdr[n].p_type == PT_LOAD) {
571 				add_segment(elf, phdr[n].p_offset,
572 					    phdr[n].p_vaddr, phdr[n].p_filesz,
573 					    phdr[n].p_memsz, phdr[n].p_flags,
574 					    phdr[n].p_align);
575 			} else if (phdr[n].p_type == PT_ARM_EXIDX) {
576 				elf->exidx_start = phdr[n].p_vaddr;
577 				elf->exidx_size = phdr[n].p_filesz;
578 			} else if (phdr[n].p_type == PT_TLS) {
579 				assign_tls_mod_id(elf);
580 			}
581 	} else {
582 		Elf64_Phdr *phdr = elf->phdr;
583 
584 		for (n = 0; n < elf->e_phnum; n++)
585 			if (phdr[n].p_type == PT_LOAD) {
586 				add_segment(elf, phdr[n].p_offset,
587 					    phdr[n].p_vaddr, phdr[n].p_filesz,
588 					    phdr[n].p_memsz, phdr[n].p_flags,
589 					    phdr[n].p_align);
590 			} else if (phdr[n].p_type == PT_TLS) {
591 				elf->tls_start = phdr[n].p_vaddr;
592 				elf->tls_filesz = phdr[n].p_filesz;
593 				elf->tls_memsz = phdr[n].p_memsz;
594 			} else if (IS_ENABLED(CFG_TA_BTI) &&
595 				   phdr[n].p_type == PT_GNU_PROPERTY) {
596 				elf->prop_start = phdr[n].p_vaddr;
597 				elf->prop_align = phdr[n].p_align;
598 				elf->prop_memsz = phdr[n].p_memsz;
599 			}
600 	}
601 }
602 
copy_remapped_to(struct ta_elf * elf,const struct segment * seg)603 static void copy_remapped_to(struct ta_elf *elf, const struct segment *seg)
604 {
605 	uint8_t *dst = (void *)(seg->vaddr + elf->load_addr);
606 	size_t n = 0;
607 	size_t offs = seg->offset;
608 	size_t num_bytes = seg->filesz;
609 
610 	if (offs < elf->max_offs) {
611 		n = MIN(elf->max_offs - offs, num_bytes);
612 		memcpy(dst, (void *)(elf->max_addr + offs - elf->max_offs), n);
613 		dst += n;
614 		offs += n;
615 		num_bytes -= n;
616 	}
617 
618 	if (num_bytes) {
619 		TEE_Result res = sys_copy_from_ta_bin(dst, num_bytes,
620 						      elf->handle, offs);
621 
622 		if (res)
623 			err(res, "sys_copy_from_ta_bin");
624 		elf->max_offs += offs;
625 	}
626 }
627 
adjust_segments(struct ta_elf * elf)628 static void adjust_segments(struct ta_elf *elf)
629 {
630 	struct segment *seg = NULL;
631 	struct segment *prev_seg = NULL;
632 	size_t prev_end_addr = 0;
633 	size_t align = 0;
634 	size_t mask = 0;
635 
636 	/* Sanity check */
637 	TAILQ_FOREACH(seg, &elf->segs, link) {
638 		size_t dummy __maybe_unused = 0;
639 
640 		assert(seg->align >= SMALL_PAGE_SIZE);
641 		assert(!ADD_OVERFLOW(seg->vaddr, seg->memsz, &dummy));
642 		assert(seg->filesz <= seg->memsz);
643 		assert((seg->offset & SMALL_PAGE_MASK) ==
644 		       (seg->vaddr & SMALL_PAGE_MASK));
645 
646 		prev_seg = TAILQ_PREV(seg, segment_head, link);
647 		if (prev_seg) {
648 			assert(seg->vaddr >= prev_seg->vaddr + prev_seg->memsz);
649 			assert(seg->offset >=
650 			       prev_seg->offset + prev_seg->filesz);
651 		}
652 		if (!align)
653 			align = seg->align;
654 		assert(align == seg->align);
655 	}
656 
657 	mask = align - 1;
658 
659 	seg = TAILQ_FIRST(&elf->segs);
660 	if (seg)
661 		seg = TAILQ_NEXT(seg, link);
662 	while (seg) {
663 		prev_seg = TAILQ_PREV(seg, segment_head, link);
664 		prev_end_addr = prev_seg->vaddr + prev_seg->memsz;
665 
666 		/*
667 		 * This segment may overlap with the last "page" in the
668 		 * previous segment in two different ways:
669 		 * 1. Virtual address (and offset) overlaps =>
670 		 *    Permissions needs to be merged. The offset must have
671 		 *    the SMALL_PAGE_MASK bits set as vaddr and offset must
672 		 *    add up with prevsion segment.
673 		 *
674 		 * 2. Only offset overlaps =>
675 		 *    The same page in the ELF is mapped at two different
676 		 *    virtual addresses. As a limitation this segment must
677 		 *    be mapped as writeable.
678 		 */
679 
680 		/* Case 1. */
681 		if (rounddown(seg->vaddr) < prev_end_addr) {
682 			assert((seg->vaddr & mask) == (seg->offset & mask));
683 			assert(prev_seg->memsz == prev_seg->filesz);
684 
685 			/*
686 			 * Merge the segments and their permissions.
687 			 * Note that the may be a small hole between the
688 			 * two sections.
689 			 */
690 			prev_seg->filesz = seg->vaddr + seg->filesz -
691 					   prev_seg->vaddr;
692 			prev_seg->memsz = seg->vaddr + seg->memsz -
693 					   prev_seg->vaddr;
694 			prev_seg->flags |= seg->flags;
695 
696 			TAILQ_REMOVE(&elf->segs, seg, link);
697 			free(seg);
698 			seg = TAILQ_NEXT(prev_seg, link);
699 			continue;
700 		}
701 
702 		/* Case 2. */
703 		if ((seg->offset & mask) &&
704 		    rounddown(seg->offset) <
705 		    (prev_seg->offset + prev_seg->filesz)) {
706 
707 			assert(seg->flags & PF_W);
708 			seg->remapped_writeable = true;
709 		}
710 
711 		/*
712 		 * No overlap, but we may need to align address, offset and
713 		 * size.
714 		 */
715 		seg->filesz += seg->vaddr - rounddown(seg->vaddr);
716 		seg->memsz += seg->vaddr - rounddown(seg->vaddr);
717 		seg->vaddr = rounddown(seg->vaddr);
718 		seg->offset = rounddown(seg->offset);
719 		seg = TAILQ_NEXT(seg, link);
720 	}
721 
722 }
723 
populate_segments_legacy(struct ta_elf * elf)724 static void populate_segments_legacy(struct ta_elf *elf)
725 {
726 	TEE_Result res = TEE_SUCCESS;
727 	struct segment *seg = NULL;
728 	vaddr_t va = 0;
729 
730 	assert(elf->is_legacy);
731 	TAILQ_FOREACH(seg, &elf->segs, link) {
732 		struct segment *last_seg = TAILQ_LAST(&elf->segs, segment_head);
733 		size_t pad_end = roundup(last_seg->vaddr + last_seg->memsz -
734 					 seg->vaddr - seg->memsz);
735 		size_t num_bytes = roundup(seg->memsz);
736 
737 		if (!elf->load_addr)
738 			va = 0;
739 		else
740 			va = seg->vaddr + elf->load_addr;
741 
742 
743 		if (!(seg->flags & PF_R))
744 			err(TEE_ERROR_NOT_SUPPORTED,
745 			    "Segment must be readable");
746 
747 		res = sys_map_zi(num_bytes, 0, &va, 0, pad_end);
748 		if (res)
749 			err(res, "sys_map_zi");
750 		res = sys_copy_from_ta_bin((void *)va, seg->filesz,
751 					   elf->handle, seg->offset);
752 		if (res)
753 			err(res, "sys_copy_from_ta_bin");
754 
755 		if (!elf->load_addr)
756 			elf->load_addr = va;
757 		elf->max_addr = va + num_bytes;
758 		elf->max_offs = seg->offset + seg->filesz;
759 	}
760 }
761 
get_pad_begin(void)762 static size_t get_pad_begin(void)
763 {
764 #ifdef CFG_TA_ASLR
765 	size_t min = CFG_TA_ASLR_MIN_OFFSET_PAGES;
766 	size_t max = CFG_TA_ASLR_MAX_OFFSET_PAGES;
767 	TEE_Result res = TEE_SUCCESS;
768 	uint32_t rnd32 = 0;
769 	size_t rnd = 0;
770 
771 	COMPILE_TIME_ASSERT(CFG_TA_ASLR_MIN_OFFSET_PAGES <
772 			    CFG_TA_ASLR_MAX_OFFSET_PAGES);
773 	if (max > min) {
774 		res = sys_gen_random_num(&rnd32, sizeof(rnd32));
775 		if (res) {
776 			DMSG("Random read failed: %#"PRIx32, res);
777 			return min * SMALL_PAGE_SIZE;
778 		}
779 		rnd = rnd32 % (max - min);
780 	}
781 
782 	return (min + rnd) * SMALL_PAGE_SIZE;
783 #else /*!CFG_TA_ASLR*/
784 	return 0;
785 #endif /*!CFG_TA_ASLR*/
786 }
787 
populate_segments(struct ta_elf * elf)788 static void populate_segments(struct ta_elf *elf)
789 {
790 	TEE_Result res = TEE_SUCCESS;
791 	struct segment *seg = NULL;
792 	vaddr_t va = 0;
793 	size_t pad_begin = 0;
794 
795 	assert(!elf->is_legacy);
796 	TAILQ_FOREACH(seg, &elf->segs, link) {
797 		struct segment *last_seg = TAILQ_LAST(&elf->segs, segment_head);
798 		size_t pad_end = roundup(last_seg->vaddr + last_seg->memsz -
799 					 seg->vaddr - seg->memsz);
800 
801 		if (seg->remapped_writeable) {
802 			size_t num_bytes = roundup(seg->vaddr + seg->memsz) -
803 					   rounddown(seg->vaddr);
804 
805 			assert(elf->load_addr);
806 			va = rounddown(elf->load_addr + seg->vaddr);
807 			assert(va >= elf->max_addr);
808 			res = sys_map_zi(num_bytes, 0, &va, 0, pad_end);
809 			if (res)
810 				err(res, "sys_map_zi");
811 
812 			copy_remapped_to(elf, seg);
813 			elf->max_addr = va + num_bytes;
814 		} else {
815 			uint32_t flags =  0;
816 			size_t filesz = seg->filesz;
817 			size_t memsz = seg->memsz;
818 			size_t offset = seg->offset;
819 			size_t vaddr = seg->vaddr;
820 
821 			if (offset < elf->max_offs) {
822 				/*
823 				 * We're in a load segment which overlaps
824 				 * with (or is covered by) the first page
825 				 * of a shared library.
826 				 */
827 				if (vaddr + filesz < SMALL_PAGE_SIZE) {
828 					size_t num_bytes = 0;
829 
830 					/*
831 					 * If this segment is completely
832 					 * covered, take next.
833 					 */
834 					if (vaddr + memsz <= SMALL_PAGE_SIZE)
835 						continue;
836 
837 					/*
838 					 * All data of the segment is
839 					 * loaded, but we need to zero
840 					 * extend it.
841 					 */
842 					va = elf->max_addr;
843 					num_bytes = roundup(vaddr + memsz) -
844 						    roundup(vaddr) -
845 						    SMALL_PAGE_SIZE;
846 					assert(num_bytes);
847 					res = sys_map_zi(num_bytes, 0, &va, 0,
848 							 0);
849 					if (res)
850 						err(res, "sys_map_zi");
851 					elf->max_addr = roundup(va + num_bytes);
852 					continue;
853 				}
854 
855 				/* Partial overlap, remove the first page. */
856 				vaddr += SMALL_PAGE_SIZE;
857 				filesz -= SMALL_PAGE_SIZE;
858 				memsz -= SMALL_PAGE_SIZE;
859 				offset += SMALL_PAGE_SIZE;
860 			}
861 
862 			if (!elf->load_addr) {
863 				va = 0;
864 				pad_begin = get_pad_begin();
865 				/*
866 				 * If mapping with pad_begin fails we'll
867 				 * retry without pad_begin, effectively
868 				 * disabling ASLR for the current ELF file.
869 				 */
870 			} else {
871 				va = vaddr + elf->load_addr;
872 				pad_begin = 0;
873 			}
874 
875 			if (seg->flags & PF_W)
876 				flags |= LDELF_MAP_FLAG_WRITEABLE;
877 			else
878 				flags |= LDELF_MAP_FLAG_SHAREABLE;
879 			if (seg->flags & PF_X)
880 				flags |= LDELF_MAP_FLAG_EXECUTABLE;
881 			if (!(seg->flags & PF_R))
882 				err(TEE_ERROR_NOT_SUPPORTED,
883 				    "Segment must be readable");
884 			if (flags & LDELF_MAP_FLAG_WRITEABLE) {
885 				res = sys_map_zi(memsz, 0, &va, pad_begin,
886 						 pad_end);
887 				if (pad_begin && res == TEE_ERROR_OUT_OF_MEMORY)
888 					res = sys_map_zi(memsz, 0, &va, 0,
889 							 pad_end);
890 				if (res)
891 					err(res, "sys_map_zi");
892 				res = sys_copy_from_ta_bin((void *)va, filesz,
893 							   elf->handle, offset);
894 				if (res)
895 					err(res, "sys_copy_from_ta_bin");
896 			} else {
897 				if (filesz != memsz)
898 					err(TEE_ERROR_BAD_FORMAT,
899 					    "Filesz and memsz mismatch");
900 				res = sys_map_ta_bin(&va, filesz, flags,
901 						     elf->handle, offset,
902 						     pad_begin, pad_end);
903 				if (pad_begin && res == TEE_ERROR_OUT_OF_MEMORY)
904 					res = sys_map_ta_bin(&va, filesz, flags,
905 							     elf->handle,
906 							     offset, 0,
907 							     pad_end);
908 				if (res)
909 					err(res, "sys_map_ta_bin");
910 			}
911 
912 			if (!elf->load_addr)
913 				elf->load_addr = va;
914 			elf->max_addr = roundup(va + memsz);
915 			elf->max_offs += filesz;
916 		}
917 	}
918 }
919 
ta_elf_add_bti(struct ta_elf * elf)920 static void ta_elf_add_bti(struct ta_elf *elf)
921 {
922 	TEE_Result res = TEE_SUCCESS;
923 	struct segment *seg = NULL;
924 	uint32_t flags = LDELF_MAP_FLAG_EXECUTABLE | LDELF_MAP_FLAG_BTI;
925 
926 	TAILQ_FOREACH(seg, &elf->segs, link) {
927 		vaddr_t va = elf->load_addr + seg->vaddr;
928 
929 		if (seg->flags & PF_X) {
930 			res = sys_set_prot(va, seg->memsz, flags);
931 			if (res)
932 				err(res, "sys_set_prot");
933 		}
934 	}
935 }
936 
parse_property_segment(struct ta_elf * elf)937 static void parse_property_segment(struct ta_elf *elf)
938 {
939 	char *desc = NULL;
940 	size_t align = elf->prop_align;
941 	size_t desc_offset = 0;
942 	size_t prop_offset = 0;
943 	vaddr_t va = 0;
944 	Elf_Note *note = NULL;
945 	char *name = NULL;
946 
947 	if (!IS_ENABLED(CFG_TA_BTI) || !elf->prop_start)
948 		return;
949 
950 	check_phdr_in_range(elf, PT_GNU_PROPERTY, elf->prop_start,
951 			    elf->prop_memsz);
952 
953 	va = elf->load_addr + elf->prop_start;
954 	note = (void *)va;
955 	name = (char *)(note + 1);
956 
957 	if (elf->prop_memsz < sizeof(*note) + sizeof(ELF_NOTE_GNU))
958 		return;
959 
960 	if (note->n_type != NT_GNU_PROPERTY_TYPE_0 ||
961 	    note->n_namesz != sizeof(ELF_NOTE_GNU) ||
962 	    memcmp(name, ELF_NOTE_GNU, sizeof(ELF_NOTE_GNU)) ||
963 	    !IS_POWER_OF_TWO(align))
964 		return;
965 
966 	desc_offset = ROUNDUP2(sizeof(*note) + sizeof(ELF_NOTE_GNU), align);
967 
968 	if (desc_offset > elf->prop_memsz ||
969 	    ROUNDUP2(desc_offset + note->n_descsz, align) > elf->prop_memsz)
970 		return;
971 
972 	desc = (char *)(va + desc_offset);
973 
974 	do {
975 		Elf_Prop *prop = (void *)(desc + prop_offset);
976 		size_t data_offset = prop_offset + sizeof(*prop);
977 
978 		if (note->n_descsz < data_offset)
979 			return;
980 
981 		data_offset = confine_array_index(data_offset, note->n_descsz);
982 
983 		if (prop->pr_type == GNU_PROPERTY_AARCH64_FEATURE_1_AND) {
984 			uint32_t *pr_data = (void *)(desc + data_offset);
985 
986 			if (note->n_descsz < (data_offset + sizeof(*pr_data)) &&
987 			    prop->pr_datasz != sizeof(*pr_data))
988 				return;
989 
990 			if (*pr_data & GNU_PROPERTY_AARCH64_FEATURE_1_BTI) {
991 				DMSG("BTI Feature present in note property");
992 				elf->bti_enabled = true;
993 			}
994 		}
995 
996 		prop_offset += ROUNDUP2(sizeof(*prop) + prop->pr_datasz, align);
997 	} while (prop_offset < note->n_descsz);
998 }
999 
map_segments(struct ta_elf * elf)1000 static void map_segments(struct ta_elf *elf)
1001 {
1002 	TEE_Result res = TEE_SUCCESS;
1003 
1004 	parse_load_segments(elf);
1005 	adjust_segments(elf);
1006 	if (TAILQ_FIRST(&elf->segs)->offset < SMALL_PAGE_SIZE) {
1007 		vaddr_t va = 0;
1008 		size_t sz = elf->max_addr - elf->load_addr;
1009 		struct segment *seg = TAILQ_LAST(&elf->segs, segment_head);
1010 		size_t pad_begin = get_pad_begin();
1011 
1012 		/*
1013 		 * We're loading a library, if not other parts of the code
1014 		 * need to be updated too.
1015 		 */
1016 		assert(!elf->is_main);
1017 
1018 		/*
1019 		 * Now that we know how much virtual memory is needed move
1020 		 * the already mapped part to a location which can
1021 		 * accommodate us.
1022 		 */
1023 		res = sys_remap(elf->load_addr, &va, sz, pad_begin,
1024 				roundup(seg->vaddr + seg->memsz));
1025 		if (res == TEE_ERROR_OUT_OF_MEMORY)
1026 			res = sys_remap(elf->load_addr, &va, sz, 0,
1027 					roundup(seg->vaddr + seg->memsz));
1028 		if (res)
1029 			err(res, "sys_remap");
1030 		elf->ehdr_addr = va;
1031 		elf->load_addr = va;
1032 		elf->max_addr = va + sz;
1033 		elf->phdr = (void *)(va + elf->e_phoff);
1034 	}
1035 }
1036 
add_deps_from_segment(struct ta_elf * elf,unsigned int type,vaddr_t addr,size_t memsz)1037 static void add_deps_from_segment(struct ta_elf *elf, unsigned int type,
1038 				  vaddr_t addr, size_t memsz)
1039 {
1040 	size_t dyn_entsize = 0;
1041 	size_t num_dyns = 0;
1042 	size_t n = 0;
1043 	unsigned int tag = 0;
1044 	size_t val = 0;
1045 	TEE_UUID uuid = { };
1046 	char *str_tab = NULL;
1047 	size_t str_tab_sz = 0;
1048 
1049 	if (type != PT_DYNAMIC)
1050 		return;
1051 
1052 	check_phdr_in_range(elf, type, addr, memsz);
1053 
1054 	if (elf->is_32bit)
1055 		dyn_entsize = sizeof(Elf32_Dyn);
1056 	else
1057 		dyn_entsize = sizeof(Elf64_Dyn);
1058 
1059 	assert(!(memsz % dyn_entsize));
1060 	num_dyns = memsz / dyn_entsize;
1061 
1062 	for (n = 0; n < num_dyns && !(str_tab && str_tab_sz); n++) {
1063 		read_dyn(elf, addr, n, &tag, &val);
1064 		if (tag == DT_STRTAB)
1065 			str_tab = (char *)(val + elf->load_addr);
1066 		else if (tag == DT_STRSZ)
1067 			str_tab_sz = val;
1068 	}
1069 	check_range(elf, ".dynstr/STRTAB", str_tab, str_tab_sz);
1070 
1071 	for (n = 0; n < num_dyns; n++) {
1072 		TEE_Result res = TEE_SUCCESS;
1073 
1074 		read_dyn(elf, addr, n, &tag, &val);
1075 		if (tag != DT_NEEDED)
1076 			continue;
1077 		if (val >= str_tab_sz)
1078 			err(TEE_ERROR_BAD_FORMAT,
1079 			    "Offset into .dynstr/STRTAB out of range");
1080 		res = tee_uuid_from_str(&uuid, str_tab + val);
1081 		if (res)
1082 			err(res, "Fail to get UUID from string");
1083 		queue_elf(&uuid);
1084 	}
1085 }
1086 
add_dependencies(struct ta_elf * elf)1087 static void add_dependencies(struct ta_elf *elf)
1088 {
1089 	size_t n = 0;
1090 
1091 	if (elf->is_32bit) {
1092 		Elf32_Phdr *phdr = elf->phdr;
1093 
1094 		for (n = 0; n < elf->e_phnum; n++)
1095 			add_deps_from_segment(elf, phdr[n].p_type,
1096 					      phdr[n].p_vaddr, phdr[n].p_memsz);
1097 	} else {
1098 		Elf64_Phdr *phdr = elf->phdr;
1099 
1100 		for (n = 0; n < elf->e_phnum; n++)
1101 			add_deps_from_segment(elf, phdr[n].p_type,
1102 					      phdr[n].p_vaddr, phdr[n].p_memsz);
1103 	}
1104 }
1105 
copy_section_headers(struct ta_elf * elf)1106 static void copy_section_headers(struct ta_elf *elf)
1107 {
1108 	TEE_Result res = TEE_SUCCESS;
1109 	size_t sz = 0;
1110 	size_t offs = 0;
1111 
1112 	if (MUL_OVERFLOW(elf->e_shnum, elf->e_shentsize, &sz))
1113 		err(TEE_ERROR_BAD_FORMAT, "Section headers size overflow");
1114 
1115 	elf->shdr = malloc(sz);
1116 	if (!elf->shdr)
1117 		err(TEE_ERROR_OUT_OF_MEMORY, "malloc");
1118 
1119 	/*
1120 	 * We're assuming that section headers comes after the load segments,
1121 	 * but if it's a very small dynamically linked library the section
1122 	 * headers can still end up (partially?) in the first mapped page.
1123 	 */
1124 	if (elf->e_shoff < SMALL_PAGE_SIZE) {
1125 		assert(!elf->is_main);
1126 		offs = MIN(SMALL_PAGE_SIZE - elf->e_shoff, sz);
1127 		memcpy(elf->shdr, (void *)(elf->load_addr + elf->e_shoff),
1128 		       offs);
1129 	}
1130 
1131 	if (offs < sz) {
1132 		res = sys_copy_from_ta_bin((uint8_t *)elf->shdr + offs,
1133 					   sz - offs, elf->handle,
1134 					   elf->e_shoff + offs);
1135 		if (res)
1136 			err(res, "sys_copy_from_ta_bin");
1137 	}
1138 }
1139 
close_handle(struct ta_elf * elf)1140 static void close_handle(struct ta_elf *elf)
1141 {
1142 	TEE_Result res = sys_close_ta_bin(elf->handle);
1143 
1144 	if (res)
1145 		err(res, "sys_close_ta_bin");
1146 	elf->handle = -1;
1147 }
1148 
clean_elf_load_main(struct ta_elf * elf)1149 static void clean_elf_load_main(struct ta_elf *elf)
1150 {
1151 	TEE_Result res = TEE_SUCCESS;
1152 
1153 	/*
1154 	 * Clean up from last attempt to load
1155 	 */
1156 	res = sys_unmap(elf->ehdr_addr, SMALL_PAGE_SIZE);
1157 	if (res)
1158 		err(res, "sys_unmap");
1159 
1160 	while (!TAILQ_EMPTY(&elf->segs)) {
1161 		struct segment *seg = TAILQ_FIRST(&elf->segs);
1162 		vaddr_t va = 0;
1163 		size_t num_bytes = 0;
1164 
1165 		va = rounddown(elf->load_addr + seg->vaddr);
1166 		if (seg->remapped_writeable)
1167 			num_bytes = roundup(seg->vaddr + seg->memsz) -
1168 				    rounddown(seg->vaddr);
1169 		else
1170 			num_bytes = seg->memsz;
1171 
1172 		res = sys_unmap(va, num_bytes);
1173 		if (res)
1174 			err(res, "sys_unmap");
1175 
1176 		TAILQ_REMOVE(&elf->segs, seg, link);
1177 		free(seg);
1178 	}
1179 
1180 	free(elf->shdr);
1181 	memset(&elf->is_32bit, 0,
1182 	       (vaddr_t)&elf->uuid - (vaddr_t)&elf->is_32bit);
1183 
1184 	TAILQ_INIT(&elf->segs);
1185 }
1186 
1187 #ifdef ARM64
1188 /*
1189  * Allocates an offset in the TA's Thread Control Block for the TLS segment of
1190  * the @elf module.
1191  */
1192 #define TCB_HEAD_SIZE (2 * sizeof(long))
set_tls_offset(struct ta_elf * elf)1193 static void set_tls_offset(struct ta_elf *elf)
1194 {
1195 	static size_t next_offs = TCB_HEAD_SIZE;
1196 
1197 	if (!elf->tls_start)
1198 		return;
1199 
1200 	/* Module has a TLS segment */
1201 	elf->tls_tcb_offs = next_offs;
1202 	next_offs += elf->tls_memsz;
1203 }
1204 #else
set_tls_offset(struct ta_elf * elf __unused)1205 static void set_tls_offset(struct ta_elf *elf __unused) {}
1206 #endif
1207 
load_main(struct ta_elf * elf)1208 static void load_main(struct ta_elf *elf)
1209 {
1210 	vaddr_t va = 0;
1211 
1212 	init_elf(elf);
1213 	map_segments(elf);
1214 	populate_segments(elf);
1215 	add_dependencies(elf);
1216 	copy_section_headers(elf);
1217 	save_symtab(elf);
1218 	close_handle(elf);
1219 	set_tls_offset(elf);
1220 	parse_property_segment(elf);
1221 	if (elf->bti_enabled)
1222 		ta_elf_add_bti(elf);
1223 
1224 	if (!ta_elf_resolve_sym("ta_head", &va, NULL, elf))
1225 		elf->head = (struct ta_head *)va;
1226 	else
1227 		elf->head = (struct ta_head *)elf->load_addr;
1228 	if (elf->head->depr_entry != UINT64_MAX) {
1229 		/*
1230 		 * Legacy TAs sets their entry point in ta_head. For
1231 		 * non-legacy TAs the entry point of the ELF is set instead
1232 		 * and leaving the ta_head entry point set to UINT64_MAX to
1233 		 * indicate that it's not used.
1234 		 *
1235 		 * NB, everything before the commit a73b5878c89d ("Replace
1236 		 * ta_head.entry with elf entry") is considered legacy TAs
1237 		 * for ldelf.
1238 		 *
1239 		 * Legacy TAs cannot be mapped with shared memory segments
1240 		 * so restart the mapping if it turned out we're loading a
1241 		 * legacy TA.
1242 		 */
1243 
1244 		DMSG("Reloading TA %pUl as legacy TA", (void *)&elf->uuid);
1245 		clean_elf_load_main(elf);
1246 		elf->is_legacy = true;
1247 		init_elf(elf);
1248 		map_segments(elf);
1249 		populate_segments_legacy(elf);
1250 		add_dependencies(elf);
1251 		copy_section_headers(elf);
1252 		save_symtab(elf);
1253 		close_handle(elf);
1254 		elf->head = (struct ta_head *)elf->load_addr;
1255 		/*
1256 		 * Check that the TA is still a legacy TA, if it isn't give
1257 		 * up now since we're likely under attack.
1258 		 */
1259 		if (elf->head->depr_entry == UINT64_MAX)
1260 			err(TEE_ERROR_GENERIC,
1261 			    "TA %pUl was changed on disk to non-legacy",
1262 			    (void *)&elf->uuid);
1263 	}
1264 
1265 }
1266 
ta_elf_load_main(const TEE_UUID * uuid,uint32_t * is_32bit,uint64_t * sp,uint32_t * ta_flags)1267 void ta_elf_load_main(const TEE_UUID *uuid, uint32_t *is_32bit, uint64_t *sp,
1268 		      uint32_t *ta_flags)
1269 {
1270 	struct ta_elf *elf = queue_elf(uuid);
1271 	vaddr_t va = 0;
1272 	TEE_Result res = TEE_SUCCESS;
1273 
1274 	assert(elf);
1275 	elf->is_main = true;
1276 
1277 	load_main(elf);
1278 
1279 	*is_32bit = elf->is_32bit;
1280 	res = sys_map_zi(elf->head->stack_size, 0, &va, 0, 0);
1281 	if (res)
1282 		err(res, "sys_map_zi stack");
1283 
1284 	if (elf->head->flags & ~TA_FLAGS_MASK)
1285 		err(TEE_ERROR_BAD_FORMAT, "Invalid TA flags(s) %#"PRIx32,
1286 		    elf->head->flags & ~TA_FLAGS_MASK);
1287 
1288 	*ta_flags = elf->head->flags;
1289 	*sp = va + elf->head->stack_size;
1290 	ta_stack = va;
1291 	ta_stack_size = elf->head->stack_size;
1292 
1293 	if (IS_ENABLED(CFG_TA_SANITIZE_KADDRESS)) {
1294 		res = asan_user_map_shadow((void *)ta_stack,
1295 					   (void *)(ta_stack +
1296 					   roundup(ta_stack_size)),
1297 					   ASAN_REG_STACK);
1298 		if (res) {
1299 			EMSG("Failed to map shadow stack for ELF (%pUl)",
1300 			     (void *)&elf->uuid);
1301 			panic();
1302 		}
1303 	}
1304 }
1305 
ta_elf_finalize_load_main(uint64_t * entry,uint64_t * load_addr)1306 void ta_elf_finalize_load_main(uint64_t *entry, uint64_t *load_addr)
1307 {
1308 	struct ta_elf *elf = TAILQ_FIRST(&main_elf_queue);
1309 	TEE_Result res = TEE_SUCCESS;
1310 
1311 	assert(elf->is_main);
1312 
1313 	res = ta_elf_set_init_fini_info_compat(elf->is_32bit);
1314 	if (res)
1315 		err(res, "ta_elf_set_init_fini_info_compat");
1316 	res = ta_elf_set_elf_phdr_info(elf->is_32bit);
1317 	if (res)
1318 		err(res, "ta_elf_set_elf_phdr_info");
1319 
1320 	if (elf->is_legacy)
1321 		*entry = elf->head->depr_entry;
1322 	else
1323 		*entry = elf->e_entry + elf->load_addr;
1324 
1325 	*load_addr = elf->load_addr;
1326 }
1327 
1328 
ta_elf_load_dependency(struct ta_elf * elf,bool is_32bit)1329 void ta_elf_load_dependency(struct ta_elf *elf, bool is_32bit)
1330 {
1331 	if (elf->is_main)
1332 		return;
1333 
1334 	init_elf(elf);
1335 	if (elf->is_32bit != is_32bit)
1336 		err(TEE_ERROR_BAD_FORMAT, "ELF %pUl is %sbit (expected %sbit)",
1337 		    (void *)&elf->uuid, elf->is_32bit ? "32" : "64",
1338 		    is_32bit ? "32" : "64");
1339 
1340 	map_segments(elf);
1341 	populate_segments(elf);
1342 	add_dependencies(elf);
1343 	copy_section_headers(elf);
1344 	save_symtab(elf);
1345 	close_handle(elf);
1346 	set_tls_offset(elf);
1347 	parse_property_segment(elf);
1348 	if (elf->bti_enabled)
1349 		ta_elf_add_bti(elf);
1350 }
1351 
ta_elf_finalize_mappings(struct ta_elf * elf)1352 void ta_elf_finalize_mappings(struct ta_elf *elf)
1353 {
1354 	TEE_Result res = TEE_SUCCESS;
1355 	struct segment *seg = NULL;
1356 
1357 	if (!elf->is_legacy)
1358 		return;
1359 
1360 	TAILQ_FOREACH(seg, &elf->segs, link) {
1361 		vaddr_t va = elf->load_addr + seg->vaddr;
1362 		uint32_t flags =  0;
1363 
1364 		if (seg->flags & PF_W)
1365 			flags |= LDELF_MAP_FLAG_WRITEABLE;
1366 		if (seg->flags & PF_X)
1367 			flags |= LDELF_MAP_FLAG_EXECUTABLE;
1368 
1369 		res = sys_set_prot(va, seg->memsz, flags);
1370 		if (res)
1371 			err(res, "sys_set_prot");
1372 	}
1373 }
1374 
print_wrapper(void * pctx,print_func_t print_func,const char * fmt,...)1375 static void __printf(3, 4) print_wrapper(void *pctx, print_func_t print_func,
1376 					 const char *fmt, ...)
1377 {
1378 	va_list ap;
1379 
1380 	va_start(ap, fmt);
1381 	print_func(pctx, fmt, ap);
1382 	va_end(ap);
1383 }
1384 
print_seg(void * pctx,print_func_t print_func,size_t idx __maybe_unused,int elf_idx __maybe_unused,vaddr_t va __maybe_unused,paddr_t pa __maybe_unused,size_t sz __maybe_unused,uint32_t flags)1385 static void print_seg(void *pctx, print_func_t print_func,
1386 		      size_t idx __maybe_unused, int elf_idx __maybe_unused,
1387 		      vaddr_t va __maybe_unused, paddr_t pa __maybe_unused,
1388 		      size_t sz __maybe_unused, uint32_t flags)
1389 {
1390 	int rc __maybe_unused = 0;
1391 	int width __maybe_unused = 8;
1392 	char desc[14] __maybe_unused = "";
1393 	char flags_str[] __maybe_unused = "----";
1394 
1395 	if (elf_idx > -1) {
1396 		rc = snprintf(desc, sizeof(desc), " [%d]", elf_idx);
1397 		assert(rc >= 0);
1398 	} else {
1399 		if (flags & DUMP_MAP_EPHEM) {
1400 			rc = snprintf(desc, sizeof(desc), " (param)");
1401 			assert(rc >= 0);
1402 		}
1403 		if (flags & DUMP_MAP_LDELF) {
1404 			rc = snprintf(desc, sizeof(desc), " (ldelf)");
1405 			assert(rc >= 0);
1406 		}
1407 		if (va == ta_stack) {
1408 			rc = snprintf(desc, sizeof(desc), " (stack)");
1409 			assert(rc >= 0);
1410 		}
1411 	}
1412 
1413 	if (flags & DUMP_MAP_READ)
1414 		flags_str[0] = 'r';
1415 	if (flags & DUMP_MAP_WRITE)
1416 		flags_str[1] = 'w';
1417 	if (flags & DUMP_MAP_EXEC)
1418 		flags_str[2] = 'x';
1419 	if (flags & DUMP_MAP_SECURE)
1420 		flags_str[3] = 's';
1421 
1422 	print_wrapper(pctx, print_func,
1423 		      "region %2zu: va 0x%0*"PRIxVA" pa 0x%0*"PRIxPA" size 0x%06zx flags %s%s\n",
1424 		      idx, width, va, width, pa, sz, flags_str, desc);
1425 }
1426 
get_next_in_order(struct ta_elf_queue * elf_queue,struct ta_elf ** elf,struct segment ** seg,size_t * elf_idx)1427 static bool get_next_in_order(struct ta_elf_queue *elf_queue,
1428 			      struct ta_elf **elf, struct segment **seg,
1429 			      size_t *elf_idx)
1430 {
1431 	struct ta_elf *e = NULL;
1432 	struct segment *s = NULL;
1433 	size_t idx = 0;
1434 	vaddr_t va = 0;
1435 	struct ta_elf *e2 = NULL;
1436 	size_t i2 = 0;
1437 
1438 	assert(elf && seg && elf_idx);
1439 	e = *elf;
1440 	s = *seg;
1441 	assert((e == NULL && s == NULL) || (e != NULL && s != NULL));
1442 
1443 	if (s) {
1444 		s = TAILQ_NEXT(s, link);
1445 		if (s) {
1446 			*seg = s;
1447 			return true;
1448 		}
1449 	}
1450 
1451 	if (e)
1452 		va = e->load_addr;
1453 
1454 	/* Find the ELF with next load address */
1455 	e = NULL;
1456 	TAILQ_FOREACH(e2, elf_queue, link) {
1457 		if (e2->load_addr > va) {
1458 			if (!e || e2->load_addr < e->load_addr) {
1459 				e = e2;
1460 				idx = i2;
1461 			}
1462 		}
1463 		i2++;
1464 	}
1465 	if (!e)
1466 		return false;
1467 
1468 	*elf = e;
1469 	*seg = TAILQ_FIRST(&e->segs);
1470 	*elf_idx = idx;
1471 	return true;
1472 }
1473 
ta_elf_print_mappings(void * pctx,print_func_t print_func,struct ta_elf_queue * elf_queue,size_t num_maps,struct dump_map * maps,vaddr_t mpool_base)1474 void ta_elf_print_mappings(void *pctx, print_func_t print_func,
1475 			   struct ta_elf_queue *elf_queue, size_t num_maps,
1476 			   struct dump_map *maps, vaddr_t mpool_base)
1477 {
1478 	struct segment *seg = NULL;
1479 	struct ta_elf *elf = NULL;
1480 	size_t elf_idx = 0;
1481 	size_t idx = 0;
1482 	size_t map_idx = 0;
1483 
1484 	/*
1485 	 * Loop over all segments and maps, printing virtual address in
1486 	 * order. Segment has priority if the virtual address is present
1487 	 * in both map and segment.
1488 	 */
1489 	get_next_in_order(elf_queue, &elf, &seg, &elf_idx);
1490 	while (true) {
1491 		vaddr_t va = -1;
1492 		paddr_t pa = -1;
1493 		size_t sz = 0;
1494 		uint32_t flags = DUMP_MAP_SECURE;
1495 
1496 		if (seg) {
1497 			va = rounddown(seg->vaddr + elf->load_addr);
1498 			sz = roundup(seg->vaddr + seg->memsz) -
1499 				     rounddown(seg->vaddr);
1500 		}
1501 
1502 		while (map_idx < num_maps && maps[map_idx].va <= va) {
1503 			uint32_t f = 0;
1504 
1505 			/* If there's a match, it should be the same map */
1506 			if (maps[map_idx].va == va) {
1507 				pa = maps[map_idx].pa;
1508 				/*
1509 				 * In shared libraries the first page is
1510 				 * mapped separately with the rest of that
1511 				 * segment following back to back in a
1512 				 * separate entry.
1513 				 */
1514 				if (map_idx + 1 < num_maps &&
1515 				    maps[map_idx].sz == SMALL_PAGE_SIZE) {
1516 					vaddr_t next_va = maps[map_idx].va +
1517 							  maps[map_idx].sz;
1518 					size_t comb_sz = maps[map_idx].sz +
1519 							 maps[map_idx + 1].sz;
1520 
1521 					if (next_va == maps[map_idx + 1].va &&
1522 					    comb_sz == sz &&
1523 					    maps[map_idx].flags ==
1524 					    maps[map_idx + 1].flags) {
1525 						/* Skip this and next entry */
1526 						map_idx += 2;
1527 						continue;
1528 					}
1529 				}
1530 				assert(maps[map_idx].sz == sz);
1531 			} else if (maps[map_idx].va < va) {
1532 				if (maps[map_idx].va == mpool_base)
1533 					f |= DUMP_MAP_LDELF;
1534 				print_seg(pctx, print_func, idx, -1,
1535 					  maps[map_idx].va, maps[map_idx].pa,
1536 					  maps[map_idx].sz,
1537 					  maps[map_idx].flags | f);
1538 				idx++;
1539 			}
1540 			map_idx++;
1541 		}
1542 
1543 		if (!seg)
1544 			break;
1545 
1546 		if (seg->flags & PF_R)
1547 			flags |= DUMP_MAP_READ;
1548 		if (seg->flags & PF_W)
1549 			flags |= DUMP_MAP_WRITE;
1550 		if (seg->flags & PF_X)
1551 			flags |= DUMP_MAP_EXEC;
1552 
1553 		print_seg(pctx, print_func, idx, elf_idx, va, pa, sz, flags);
1554 		idx++;
1555 
1556 		if (!get_next_in_order(elf_queue, &elf, &seg, &elf_idx))
1557 			seg = NULL;
1558 	}
1559 
1560 	elf_idx = 0;
1561 	TAILQ_FOREACH(elf, elf_queue, link) {
1562 		print_wrapper(pctx, print_func,
1563 			      " [%zu] %pUl @ 0x%0*"PRIxVA"\n",
1564 			      elf_idx, (void *)&elf->uuid, 8, elf->load_addr);
1565 		elf_idx++;
1566 	}
1567 }
1568 
1569 #ifdef CFG_UNWIND
1570 
1571 #if defined(ARM32) || defined(ARM64)
1572 /* Called by libunw */
find_exidx(vaddr_t addr,vaddr_t * idx_start,vaddr_t * idx_end)1573 bool find_exidx(vaddr_t addr, vaddr_t *idx_start, vaddr_t *idx_end)
1574 {
1575 	struct segment *seg = NULL;
1576 	struct ta_elf *elf = NULL;
1577 	vaddr_t a = 0;
1578 
1579 	TAILQ_FOREACH(elf, &main_elf_queue, link) {
1580 		if (addr < elf->load_addr)
1581 			continue;
1582 		a = addr - elf->load_addr;
1583 		TAILQ_FOREACH(seg, &elf->segs, link) {
1584 			if (a < seg->vaddr)
1585 				continue;
1586 			if (a - seg->vaddr < seg->filesz) {
1587 				*idx_start = elf->exidx_start + elf->load_addr;
1588 				*idx_end = elf->exidx_start + elf->load_addr +
1589 					   elf->exidx_size;
1590 				return true;
1591 			}
1592 		}
1593 	}
1594 
1595 	return false;
1596 }
1597 
ta_elf_stack_trace_a32(uint32_t regs[16])1598 void ta_elf_stack_trace_a32(uint32_t regs[16])
1599 {
1600 	struct unwind_state_arm32 state = { };
1601 
1602 	memcpy(state.registers, regs, sizeof(state.registers));
1603 	print_stack_arm32(&state, ta_stack, ta_stack_size);
1604 }
1605 
ta_elf_stack_trace_a64(uint64_t fp,uint64_t sp,uint64_t pc)1606 void ta_elf_stack_trace_a64(uint64_t fp, uint64_t sp, uint64_t pc)
1607 {
1608 	struct unwind_state_arm64 state = { .fp = fp, .sp = sp, .pc = pc };
1609 
1610 	print_stack_arm64(&state, ta_stack, ta_stack_size);
1611 }
1612 #elif defined(RV32) || defined(RV64)
ta_elf_stack_trace_riscv(uint64_t fp,uint64_t pc)1613 void ta_elf_stack_trace_riscv(uint64_t fp, uint64_t pc)
1614 {
1615 	struct unwind_state_riscv state = { .fp = fp, .pc = pc };
1616 
1617 	print_stack_riscv(&state, ta_stack, ta_stack_size);
1618 }
1619 #endif
1620 
1621 #endif /* CFG_UNWIND */
1622 
ta_elf_add_library(const TEE_UUID * uuid)1623 TEE_Result ta_elf_add_library(const TEE_UUID *uuid)
1624 {
1625 	TEE_Result res = TEE_ERROR_GENERIC;
1626 	struct ta_elf *ta = TAILQ_FIRST(&main_elf_queue);
1627 	struct ta_elf *lib = ta_elf_find_elf(uuid);
1628 	struct ta_elf *elf = NULL;
1629 
1630 	if (lib)
1631 		return TEE_SUCCESS; /* Already mapped */
1632 
1633 	lib = queue_elf_helper(uuid);
1634 	if (!lib)
1635 		return TEE_ERROR_OUT_OF_MEMORY;
1636 
1637 	for (elf = lib; elf; elf = TAILQ_NEXT(elf, link))
1638 		ta_elf_load_dependency(elf, ta->is_32bit);
1639 
1640 	for (elf = lib; elf; elf = TAILQ_NEXT(elf, link)) {
1641 		ta_elf_relocate(elf);
1642 		ta_elf_finalize_mappings(elf);
1643 	}
1644 
1645 	for (elf = lib; elf; elf = TAILQ_NEXT(elf, link)) {
1646 		if (IS_ENABLED(CFG_TA_SANITIZE_KADDRESS)) {
1647 			int rc;
1648 
1649 			rc = asan_user_map_shadow((void *)elf->load_addr,
1650 						  (void *)elf->max_addr,
1651 						  ASAN_REG_ELF);
1652 			if (rc) {
1653 				EMSG("Failed to map shadow for ELF (%pUl)",
1654 				     (void *)&elf->uuid);
1655 				panic();
1656 			}
1657 		}
1658 		DMSG("ELF (%pUl) at %#"PRIxVA,
1659 		     (void *)&elf->uuid, elf->load_addr);
1660 	}
1661 
1662 	res = ta_elf_set_init_fini_info_compat(ta->is_32bit);
1663 	if (res)
1664 		return res;
1665 
1666 	return ta_elf_set_elf_phdr_info(ta->is_32bit);
1667 }
1668 
1669 /* Get address/size of .init_array and .fini_array from the dynamic segment */
get_init_fini_array(struct ta_elf * elf,unsigned int type,vaddr_t addr,size_t memsz,vaddr_t * init,size_t * init_cnt,vaddr_t * fini,size_t * fini_cnt)1670 static void get_init_fini_array(struct ta_elf *elf, unsigned int type,
1671 				vaddr_t addr, size_t memsz, vaddr_t *init,
1672 				size_t *init_cnt, vaddr_t *fini,
1673 				size_t *fini_cnt)
1674 {
1675 	size_t addrsz = 0;
1676 	size_t dyn_entsize = 0;
1677 	size_t num_dyns = 0;
1678 	size_t n = 0;
1679 	unsigned int tag = 0;
1680 	size_t val = 0;
1681 
1682 	assert(type == PT_DYNAMIC);
1683 
1684 	check_phdr_in_range(elf, type, addr, memsz);
1685 
1686 	if (elf->is_32bit) {
1687 		dyn_entsize = sizeof(Elf32_Dyn);
1688 		addrsz = 4;
1689 	} else {
1690 		dyn_entsize = sizeof(Elf64_Dyn);
1691 		addrsz = 8;
1692 	}
1693 
1694 	assert(!(memsz % dyn_entsize));
1695 	num_dyns = memsz / dyn_entsize;
1696 
1697 	for (n = 0; n < num_dyns; n++) {
1698 		read_dyn(elf, addr, n, &tag, &val);
1699 		if (tag == DT_INIT_ARRAY)
1700 			*init = val + elf->load_addr;
1701 		else if (tag == DT_FINI_ARRAY)
1702 			*fini = val + elf->load_addr;
1703 		else if (tag == DT_INIT_ARRAYSZ)
1704 			*init_cnt = val / addrsz;
1705 		else if (tag == DT_FINI_ARRAYSZ)
1706 			*fini_cnt = val / addrsz;
1707 	}
1708 }
1709 
1710 /* Get address/size of .init_array and .fini_array in @elf (if present) */
elf_get_init_fini_array(struct ta_elf * elf,vaddr_t * init,size_t * init_cnt,vaddr_t * fini,size_t * fini_cnt)1711 static void elf_get_init_fini_array(struct ta_elf *elf, vaddr_t *init,
1712 				    size_t *init_cnt, vaddr_t *fini,
1713 				    size_t *fini_cnt)
1714 {
1715 	size_t n = 0;
1716 
1717 	if (elf->is_32bit) {
1718 		Elf32_Phdr *phdr = elf->phdr;
1719 
1720 		for (n = 0; n < elf->e_phnum; n++) {
1721 			if (phdr[n].p_type == PT_DYNAMIC) {
1722 				get_init_fini_array(elf, phdr[n].p_type,
1723 						    phdr[n].p_vaddr,
1724 						    phdr[n].p_memsz,
1725 						    init, init_cnt, fini,
1726 						    fini_cnt);
1727 				return;
1728 			}
1729 		}
1730 	} else {
1731 		Elf64_Phdr *phdr = elf->phdr;
1732 
1733 		for (n = 0; n < elf->e_phnum; n++) {
1734 			if (phdr[n].p_type == PT_DYNAMIC) {
1735 				get_init_fini_array(elf, phdr[n].p_type,
1736 						    phdr[n].p_vaddr,
1737 						    phdr[n].p_memsz,
1738 						    init, init_cnt, fini,
1739 						    fini_cnt);
1740 				return;
1741 			}
1742 		}
1743 	}
1744 }
1745 
1746 /*
1747  * Deprecated by __elf_phdr_info below. Kept for compatibility.
1748  *
1749  * Pointers to ELF initialization and finalization functions are extracted by
1750  * ldelf and stored on the TA heap, then exported to the TA via the global
1751  * symbol __init_fini_info. libutee in OP-TEE 3.9.0 uses this mechanism.
1752  */
1753 
1754 struct __init_fini {
1755 	uint32_t flags;
1756 	uint16_t init_size;
1757 	uint16_t fini_size;
1758 
1759 	void (**init)(void); /* @init_size entries */
1760 	void (**fini)(void); /* @fini_size entries */
1761 };
1762 
1763 #define __IFS_VALID            BIT(0)
1764 #define __IFS_INIT_HAS_RUN     BIT(1)
1765 #define __IFS_FINI_HAS_RUN     BIT(2)
1766 
1767 struct __init_fini_info {
1768 	uint32_t reserved;
1769 	uint16_t size;
1770 	uint16_t pad;
1771 	struct __init_fini *ifs; /* @size entries */
1772 };
1773 
1774 /* 32-bit variants for a 64-bit ldelf to access a 32-bit TA */
1775 
1776 struct __init_fini32 {
1777 	uint32_t flags;
1778 	uint16_t init_size;
1779 	uint16_t fini_size;
1780 	uint32_t init;
1781 	uint32_t fini;
1782 };
1783 
1784 struct __init_fini_info32 {
1785 	uint32_t reserved;
1786 	uint16_t size;
1787 	uint16_t pad;
1788 	uint32_t ifs;
1789 };
1790 
realloc_ifs(vaddr_t va,size_t cnt,bool is_32bit)1791 static TEE_Result realloc_ifs(vaddr_t va, size_t cnt, bool is_32bit)
1792 {
1793 	struct __init_fini_info32 *info32 = (struct __init_fini_info32 *)va;
1794 	struct __init_fini_info *info = (struct __init_fini_info *)va;
1795 	struct __init_fini32 *ifs32 = NULL;
1796 	struct __init_fini *ifs = NULL;
1797 	size_t prev_cnt = 0;
1798 	void *ptr = NULL;
1799 
1800 	if (is_32bit) {
1801 		ptr = (void *)(vaddr_t)info32->ifs;
1802 		ptr = realloc(ptr, cnt * sizeof(struct __init_fini32));
1803 		if (!ptr)
1804 			return TEE_ERROR_OUT_OF_MEMORY;
1805 		ifs32 = ptr;
1806 		prev_cnt = info32->size;
1807 		if (cnt > prev_cnt)
1808 			memset(ifs32 + prev_cnt, 0,
1809 			       (cnt - prev_cnt) * sizeof(*ifs32));
1810 		info32->ifs = (uint32_t)(vaddr_t)ifs32;
1811 		info32->size = cnt;
1812 	} else {
1813 		ptr = realloc(info->ifs, cnt * sizeof(struct __init_fini));
1814 		if (!ptr)
1815 			return TEE_ERROR_OUT_OF_MEMORY;
1816 		ifs = ptr;
1817 		prev_cnt = info->size;
1818 		if (cnt > prev_cnt)
1819 			memset(ifs + prev_cnt, 0,
1820 			       (cnt - prev_cnt) * sizeof(*ifs));
1821 		info->ifs = ifs;
1822 		info->size = cnt;
1823 	}
1824 
1825 	return TEE_SUCCESS;
1826 }
1827 
fill_ifs(vaddr_t va,size_t idx,struct ta_elf * elf,bool is_32bit)1828 static void fill_ifs(vaddr_t va, size_t idx, struct ta_elf *elf, bool is_32bit)
1829 {
1830 	struct __init_fini_info32 *info32 = (struct __init_fini_info32 *)va;
1831 	struct __init_fini_info *info = (struct __init_fini_info *)va;
1832 	struct __init_fini32 *ifs32 = NULL;
1833 	struct __init_fini *ifs = NULL;
1834 	size_t init_cnt = 0;
1835 	size_t fini_cnt = 0;
1836 	vaddr_t init = 0;
1837 	vaddr_t fini = 0;
1838 
1839 	if (is_32bit) {
1840 		assert(idx < info32->size);
1841 		ifs32 = &((struct __init_fini32 *)(vaddr_t)info32->ifs)[idx];
1842 
1843 		if (ifs32->flags & __IFS_VALID)
1844 			return;
1845 
1846 		elf_get_init_fini_array(elf, &init, &init_cnt, &fini,
1847 					&fini_cnt);
1848 
1849 		ifs32->init = (uint32_t)init;
1850 		ifs32->init_size = init_cnt;
1851 
1852 		ifs32->fini = (uint32_t)fini;
1853 		ifs32->fini_size = fini_cnt;
1854 
1855 		ifs32->flags |= __IFS_VALID;
1856 	} else {
1857 		assert(idx < info->size);
1858 		ifs = &info->ifs[idx];
1859 
1860 		if (ifs->flags & __IFS_VALID)
1861 			return;
1862 
1863 		elf_get_init_fini_array(elf, &init, &init_cnt, &fini,
1864 					&fini_cnt);
1865 
1866 		ifs->init = (void (**)(void))init;
1867 		ifs->init_size = init_cnt;
1868 
1869 		ifs->fini = (void (**)(void))fini;
1870 		ifs->fini_size = fini_cnt;
1871 
1872 		ifs->flags |= __IFS_VALID;
1873 	}
1874 }
1875 
1876 /*
1877  * Set or update __init_fini_info in the TA with information from the ELF
1878  * queue
1879  */
ta_elf_set_init_fini_info_compat(bool is_32bit)1880 TEE_Result ta_elf_set_init_fini_info_compat(bool is_32bit)
1881 {
1882 	struct __init_fini_info *info = NULL;
1883 	TEE_Result res = TEE_SUCCESS;
1884 	struct ta_elf *elf = NULL;
1885 	vaddr_t info_va = 0;
1886 	size_t cnt = 0;
1887 
1888 	res = ta_elf_resolve_sym("__init_fini_info", &info_va, NULL, NULL);
1889 	if (res) {
1890 		if (res == TEE_ERROR_ITEM_NOT_FOUND) {
1891 			/*
1892 			 * Not an error, only TAs linked against libutee from
1893 			 * OP-TEE 3.9.0 have this symbol.
1894 			 */
1895 			return TEE_SUCCESS;
1896 		}
1897 		return res;
1898 	}
1899 	assert(info_va);
1900 
1901 	info = (struct __init_fini_info *)info_va;
1902 	if (info->reserved)
1903 		return TEE_ERROR_NOT_SUPPORTED;
1904 
1905 	TAILQ_FOREACH(elf, &main_elf_queue, link)
1906 		cnt++;
1907 
1908 	/* Queue has at least one file (main) */
1909 	assert(cnt);
1910 
1911 	res = realloc_ifs(info_va, cnt, is_32bit);
1912 	if (res)
1913 		goto err;
1914 
1915 	cnt = 0;
1916 	TAILQ_FOREACH(elf, &main_elf_queue, link) {
1917 		fill_ifs(info_va, cnt, elf, is_32bit);
1918 		cnt++;
1919 	}
1920 
1921 	return TEE_SUCCESS;
1922 err:
1923 	free(info);
1924 	return res;
1925 }
1926 
realloc_elf_phdr_info(vaddr_t va,size_t cnt,bool is_32bit)1927 static TEE_Result realloc_elf_phdr_info(vaddr_t va, size_t cnt, bool is_32bit)
1928 {
1929 	struct __elf_phdr_info32 *info32 = (struct __elf_phdr_info32 *)va;
1930 	struct __elf_phdr_info *info = (struct __elf_phdr_info *)va;
1931 	struct dl_phdr_info32 *dlpi32 = NULL;
1932 	struct dl_phdr_info *dlpi = NULL;
1933 	size_t prev_cnt = 0;
1934 	void *ptr = NULL;
1935 
1936 	if (is_32bit) {
1937 		ptr = (void *)(vaddr_t)info32->dlpi;
1938 		ptr = realloc(ptr, cnt * sizeof(*dlpi32));
1939 		if (!ptr)
1940 			return TEE_ERROR_OUT_OF_MEMORY;
1941 		dlpi32 = ptr;
1942 		prev_cnt = info32->count;
1943 		if (cnt > prev_cnt)
1944 			memset(dlpi32 + prev_cnt, 0,
1945 			       (cnt - prev_cnt) * sizeof(*dlpi32));
1946 		info32->dlpi = (uint32_t)(vaddr_t)dlpi32;
1947 		info32->count = cnt;
1948 	} else {
1949 		ptr = realloc(info->dlpi, cnt * sizeof(*dlpi));
1950 		if (!ptr)
1951 			return TEE_ERROR_OUT_OF_MEMORY;
1952 		dlpi = ptr;
1953 		prev_cnt = info->count;
1954 		if (cnt > prev_cnt)
1955 			memset(dlpi + prev_cnt, 0,
1956 			       (cnt - prev_cnt) * sizeof(*dlpi));
1957 		info->dlpi = dlpi;
1958 		info->count = cnt;
1959 	}
1960 
1961 	return TEE_SUCCESS;
1962 }
1963 
fill_elf_phdr_info(vaddr_t va,size_t idx,struct ta_elf * elf,bool is_32bit)1964 static void fill_elf_phdr_info(vaddr_t va, size_t idx, struct ta_elf *elf,
1965 			       bool is_32bit)
1966 {
1967 	struct __elf_phdr_info32 *info32 = (struct __elf_phdr_info32 *)va;
1968 	struct __elf_phdr_info *info = (struct __elf_phdr_info *)va;
1969 	struct dl_phdr_info32 *dlpi32 = NULL;
1970 	struct dl_phdr_info *dlpi = NULL;
1971 
1972 	if (is_32bit) {
1973 		assert(idx < info32->count);
1974 		dlpi32 = (struct dl_phdr_info32 *)(vaddr_t)info32->dlpi + idx;
1975 
1976 		dlpi32->dlpi_addr = elf->load_addr;
1977 		if (elf->soname)
1978 			dlpi32->dlpi_name = (vaddr_t)elf->soname;
1979 		else
1980 			dlpi32->dlpi_name = (vaddr_t)&info32->zero;
1981 		dlpi32->dlpi_phdr = (vaddr_t)elf->phdr;
1982 		dlpi32->dlpi_phnum = elf->e_phnum;
1983 		dlpi32->dlpi_adds = 1; /* No unloading on dlclose() currently */
1984 		dlpi32->dlpi_subs = 0; /* No unloading on dlclose() currently */
1985 		dlpi32->dlpi_tls_modid = elf->tls_mod_id;
1986 		dlpi32->dlpi_tls_data = elf->tls_start;
1987 	} else {
1988 		assert(idx < info->count);
1989 		dlpi = info->dlpi + idx;
1990 
1991 		dlpi->dlpi_addr = elf->load_addr;
1992 		if (elf->soname)
1993 			dlpi->dlpi_name = elf->soname;
1994 		else
1995 			dlpi->dlpi_name = &info32->zero;
1996 		dlpi->dlpi_phdr = elf->phdr;
1997 		dlpi->dlpi_phnum = elf->e_phnum;
1998 		dlpi->dlpi_adds = 1; /* No unloading on dlclose() currently */
1999 		dlpi->dlpi_subs = 0; /* No unloading on dlclose() currently */
2000 		dlpi->dlpi_tls_modid = elf->tls_mod_id;
2001 		dlpi->dlpi_tls_data = (void *)elf->tls_start;
2002 	}
2003 }
2004 
2005 /* Set or update __elf_hdr_info in the TA with information from the ELF queue */
ta_elf_set_elf_phdr_info(bool is_32bit)2006 TEE_Result ta_elf_set_elf_phdr_info(bool is_32bit)
2007 {
2008 	struct __elf_phdr_info *info = NULL;
2009 	TEE_Result res = TEE_SUCCESS;
2010 	struct ta_elf *elf = NULL;
2011 	vaddr_t info_va = 0;
2012 	size_t cnt = 0;
2013 
2014 	res = ta_elf_resolve_sym("__elf_phdr_info", &info_va, NULL, NULL);
2015 	if (res) {
2016 		if (res == TEE_ERROR_ITEM_NOT_FOUND) {
2017 			/* Older TA */
2018 			return TEE_SUCCESS;
2019 		}
2020 		return res;
2021 	}
2022 	assert(info_va);
2023 
2024 	info = (struct __elf_phdr_info *)info_va;
2025 	if (info->reserved)
2026 		return TEE_ERROR_NOT_SUPPORTED;
2027 
2028 	TAILQ_FOREACH(elf, &main_elf_queue, link)
2029 		cnt++;
2030 
2031 	res = realloc_elf_phdr_info(info_va, cnt, is_32bit);
2032 	if (res)
2033 		return res;
2034 
2035 	cnt = 0;
2036 	TAILQ_FOREACH(elf, &main_elf_queue, link) {
2037 		fill_elf_phdr_info(info_va, cnt, elf, is_32bit);
2038 		cnt++;
2039 	}
2040 
2041 	return TEE_SUCCESS;
2042 }
2043