xref: /optee_os/ldelf/ta_elf_rel.c (revision 5a913ee74d3c71af2a2860ce8a4e7aeab2916f9b)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2019, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <compiler.h>
8 #include <elf32.h>
9 #include <elf64.h>
10 #include <elf_common.h>
11 #include <string.h>
12 #include <tee_api_types.h>
13 #include <util.h>
14 
15 #include "sys.h"
16 #include "ta_elf.h"
17 
18 static uint32_t elf_hash(const char *name)
19 {
20 	const unsigned char *p = (const unsigned char *)name;
21 	uint32_t h = 0;
22 	uint32_t g = 0;
23 
24 	while (*p) {
25 		h = (h << 4) + *p++;
26 		g = h & 0xf0000000;
27 		if (g)
28 			h ^= g >> 24;
29 		h &= ~g;
30 	}
31 	return h;
32 }
33 
34 static bool __resolve_sym(struct ta_elf *elf, unsigned int bind,
35 			  size_t st_shndx, size_t st_name, size_t st_value,
36 			  const char *name, vaddr_t *val)
37 {
38 	if (bind != STB_GLOBAL)
39 		return false;
40 	if (st_shndx == SHN_UNDEF || st_shndx == SHN_XINDEX)
41 		return false;
42 	if (!st_name)
43 		return false;
44 	if (st_name > elf->dynstr_size)
45 		err(TEE_ERROR_BAD_FORMAT, "Symbol out of range");
46 
47 	if (strcmp(name, elf->dynstr + st_name))
48 		return false;
49 
50 	*val = st_value + elf->load_addr;
51 	return true;
52 }
53 
54 static TEE_Result resolve_sym_helper(uint32_t hash, const char *name,
55 				     vaddr_t *val, struct ta_elf *elf)
56 {
57 	/*
58 	 * Using uint32_t here for convenience because both Elf64_Word
59 	 * and Elf32_Word are 32-bit types
60 	 */
61 	uint32_t *hashtab = elf->hashtab;
62 	uint32_t nbuckets = hashtab[0];
63 	uint32_t nchains = hashtab[1];
64 	uint32_t *bucket = &hashtab[2];
65 	uint32_t *chain = &bucket[nbuckets];
66 	size_t n = 0;
67 
68 	if (elf->is_32bit) {
69 		Elf32_Sym *sym = elf->dynsymtab;
70 
71 		for (n = bucket[hash % nbuckets]; n; n = chain[n]) {
72 			assert(n < nchains);
73 			if (__resolve_sym(elf,
74 					  ELF32_ST_BIND(sym[n].st_info),
75 					  sym[n].st_shndx,
76 					  sym[n].st_name,
77 					  sym[n].st_value, name, val))
78 				return TEE_SUCCESS;
79 		}
80 	} else {
81 		Elf64_Sym *sym = elf->dynsymtab;
82 
83 		for (n = bucket[hash % nbuckets]; n; n = chain[n]) {
84 			assert(n < nchains);
85 			if (__resolve_sym(elf,
86 					  ELF64_ST_BIND(sym[n].st_info),
87 					  sym[n].st_shndx,
88 					  sym[n].st_name,
89 					  sym[n].st_value, name, val))
90 				return TEE_SUCCESS;
91 		}
92 	}
93 
94 	return TEE_ERROR_ITEM_NOT_FOUND;
95 }
96 
97 TEE_Result ta_elf_resolve_sym(const char *name, vaddr_t *val,
98 			      struct ta_elf *elf)
99 {
100 	uint32_t hash = elf_hash(name);
101 
102 	if (elf)
103 		return resolve_sym_helper(hash, name, val, elf);
104 
105 	TAILQ_FOREACH(elf, &main_elf_queue, link)
106 		if (!resolve_sym_helper(hash, name, val, elf))
107 			return TEE_SUCCESS;
108 
109 	return TEE_ERROR_ITEM_NOT_FOUND;
110 }
111 
112 static void resolve_sym(const char *name, vaddr_t *val)
113 {
114 	TEE_Result res = ta_elf_resolve_sym(name, val, NULL);
115 
116 	if (res)
117 		err(res, "Symbol %s not found", name);
118 }
119 
120 static void e32_process_dyn_rel(const Elf32_Sym *sym_tab, size_t num_syms,
121 				const char *str_tab, size_t str_tab_size,
122 				Elf32_Rel *rel, Elf32_Addr *where)
123 {
124 	size_t sym_idx = 0;
125 	const char *name = NULL;
126 	vaddr_t val = 0;
127 	size_t name_idx = 0;
128 
129 	sym_idx = ELF32_R_SYM(rel->r_info);
130 	assert(sym_idx < num_syms);
131 
132 	name_idx = sym_tab[sym_idx].st_name;
133 	assert(name_idx < str_tab_size);
134 	name = str_tab + name_idx;
135 
136 	resolve_sym(name, &val);
137 	*where = val;
138 }
139 
140 static void e32_relocate(struct ta_elf *elf, unsigned int rel_sidx)
141 {
142 	Elf32_Shdr *shdr = elf->shdr;
143 	Elf32_Rel *rel = NULL;
144 	Elf32_Rel *rel_end = NULL;
145 	size_t sym_tab_idx = 0;
146 	Elf32_Sym *sym_tab = NULL;
147 	size_t num_syms = 0;
148 	size_t sh_end = 0;
149 	const char *str_tab = NULL;
150 	size_t str_tab_size = 0;
151 
152 	assert(shdr[rel_sidx].sh_type == SHT_REL);
153 
154 	assert(shdr[rel_sidx].sh_entsize == sizeof(Elf32_Rel));
155 
156 	sym_tab_idx = shdr[rel_sidx].sh_link;
157 	if (sym_tab_idx) {
158 		size_t str_tab_idx = 0;
159 
160 		assert(sym_tab_idx < elf->e_shnum);
161 
162 		assert(shdr[sym_tab_idx].sh_entsize == sizeof(Elf32_Sym));
163 
164 		/* Check the address is inside ELF memory */
165 		if (ADD_OVERFLOW(shdr[sym_tab_idx].sh_addr,
166 				 shdr[sym_tab_idx].sh_size, &sh_end))
167 			err(TEE_ERROR_SECURITY, "Overflow");
168 		assert(sh_end < (elf->max_addr - elf->load_addr));
169 
170 		sym_tab = (Elf32_Sym *)(elf->load_addr +
171 					shdr[sym_tab_idx].sh_addr);
172 
173 		num_syms = shdr[sym_tab_idx].sh_size / sizeof(Elf32_Sym);
174 
175 		str_tab_idx = shdr[sym_tab_idx].sh_link;
176 		if (str_tab_idx) {
177 			/* Check the address is inside ELF memory */
178 			if (ADD_OVERFLOW(shdr[str_tab_idx].sh_addr,
179 					 shdr[str_tab_idx].sh_size, &sh_end))
180 				err(TEE_ERROR_SECURITY, "Overflow");
181 			assert(sh_end < (elf->max_addr - elf->load_addr));
182 
183 			str_tab = (const char *)(elf->load_addr +
184 						 shdr[str_tab_idx].sh_addr);
185 			str_tab_size = shdr[str_tab_idx].sh_size;
186 		}
187 	}
188 
189 	/* Check the address is inside TA memory */
190 	assert(shdr[rel_sidx].sh_addr < (elf->max_addr - elf->load_addr));
191 	rel = (Elf32_Rel *)(elf->load_addr + shdr[rel_sidx].sh_addr);
192 
193 	/* Check the address is inside TA memory */
194 	if (ADD_OVERFLOW(shdr[rel_sidx].sh_addr, shdr[rel_sidx].sh_size,
195 			 &sh_end))
196 		err(TEE_ERROR_SECURITY, "Overflow");
197 	assert(sh_end < (elf->max_addr - elf->load_addr));
198 	rel_end = rel + shdr[rel_sidx].sh_size / sizeof(Elf32_Rel);
199 	for (; rel < rel_end; rel++) {
200 		Elf32_Addr *where = NULL;
201 		size_t sym_idx = 0;
202 
203 		/* Check the address is inside TA memory */
204 		assert(rel->r_offset < (elf->max_addr - elf->load_addr));
205 		where = (Elf32_Addr *)(elf->load_addr + rel->r_offset);
206 
207 		switch (ELF32_R_TYPE(rel->r_info)) {
208 		case R_ARM_ABS32:
209 			sym_idx = ELF32_R_SYM(rel->r_info);
210 			assert(sym_idx < num_syms);
211 			if (sym_tab[sym_idx].st_shndx == SHN_UNDEF) {
212 				/* Symbol is external */
213 				e32_process_dyn_rel(sym_tab, num_syms, str_tab,
214 						    str_tab_size, rel, where);
215 			} else {
216 				*where += elf->load_addr +
217 					  sym_tab[sym_idx].st_value;
218 			}
219 			break;
220 		case R_ARM_REL32:
221 			sym_idx = ELF32_R_SYM(rel->r_info);
222 			assert(sym_idx < num_syms);
223 			*where += sym_tab[sym_idx].st_value - rel->r_offset;
224 			break;
225 		case R_ARM_RELATIVE:
226 			*where += elf->load_addr;
227 			break;
228 		case R_ARM_GLOB_DAT:
229 		case R_ARM_JUMP_SLOT:
230 			e32_process_dyn_rel(sym_tab, num_syms, str_tab,
231 					    str_tab_size, rel, where);
232 			break;
233 		default:
234 			err(TEE_ERROR_BAD_FORMAT, "Unknown relocation type %d",
235 			     ELF32_R_TYPE(rel->r_info));
236 		}
237 	}
238 }
239 
240 #ifdef ARM64
241 static void e64_process_dyn_rela(const Elf64_Sym *sym_tab, size_t num_syms,
242 				 const char *str_tab, size_t str_tab_size,
243 				 Elf64_Rela *rela, Elf64_Addr *where)
244 {
245 	size_t sym_idx = 0;
246 	const char *name = NULL;
247 	uintptr_t val = 0;
248 	size_t name_idx = 0;
249 
250 	sym_idx = ELF64_R_SYM(rela->r_info);
251 	assert(sym_idx < num_syms);
252 
253 	name_idx = sym_tab[sym_idx].st_name;
254 	assert(name_idx < str_tab_size);
255 	name = str_tab + name_idx;
256 
257 	resolve_sym(name, &val);
258 	*where = val;
259 }
260 
261 static void e64_relocate(struct ta_elf *elf, unsigned int rel_sidx)
262 {
263 	Elf64_Shdr *shdr = elf->shdr;
264 	Elf64_Rela *rela = NULL;
265 	Elf64_Rela *rela_end = NULL;
266 	size_t sym_tab_idx = 0;
267 	Elf64_Sym *sym_tab = NULL;
268 	size_t num_syms = 0;
269 	size_t sh_end = 0;
270 	const char *str_tab = NULL;
271 	size_t str_tab_size = 0;
272 
273 	assert(shdr[rel_sidx].sh_type == SHT_RELA);
274 
275 	assert(shdr[rel_sidx].sh_entsize == sizeof(Elf64_Rela));
276 
277 	sym_tab_idx = shdr[rel_sidx].sh_link;
278 	if (sym_tab_idx) {
279 		size_t str_tab_idx = 0;
280 
281 		assert(sym_tab_idx < elf->e_shnum);
282 
283 		assert(shdr[sym_tab_idx].sh_entsize == sizeof(Elf64_Sym));
284 
285 		/* Check the address is inside TA memory */
286 		if (ADD_OVERFLOW(shdr[sym_tab_idx].sh_addr,
287 				 shdr[sym_tab_idx].sh_size, &sh_end))
288 			err(TEE_ERROR_SECURITY, "Overflow");
289 		assert(sh_end < (elf->max_addr - elf->load_addr));
290 
291 		sym_tab = (Elf64_Sym *)(elf->load_addr +
292 					shdr[sym_tab_idx].sh_addr);
293 
294 		num_syms = shdr[sym_tab_idx].sh_size / sizeof(Elf64_Sym);
295 
296 		str_tab_idx = shdr[sym_tab_idx].sh_link;
297 		if (str_tab_idx) {
298 			/* Check the address is inside ELF memory */
299 			if (ADD_OVERFLOW(shdr[str_tab_idx].sh_addr,
300 					 shdr[str_tab_idx].sh_size, &sh_end))
301 				err(TEE_ERROR_SECURITY, "Overflow");
302 			assert(sh_end < (elf->max_addr - elf->load_addr));
303 
304 			str_tab = (const char *)(elf->load_addr +
305 						 shdr[str_tab_idx].sh_addr);
306 			str_tab_size = shdr[str_tab_idx].sh_size;
307 		}
308 	}
309 
310 	/* Check the address is inside TA memory */
311 	assert(shdr[rel_sidx].sh_addr < (elf->max_addr - elf->load_addr));
312 	rela = (Elf64_Rela *)(elf->load_addr + shdr[rel_sidx].sh_addr);
313 
314 	/* Check the address is inside TA memory */
315 	if (ADD_OVERFLOW(shdr[rel_sidx].sh_addr, shdr[rel_sidx].sh_size,
316 			 &sh_end))
317 		err(TEE_ERROR_SECURITY, "Overflow");
318 	assert(sh_end < (elf->max_addr - elf->load_addr));
319 	rela_end = rela + shdr[rel_sidx].sh_size / sizeof(Elf64_Rela);
320 	for (; rela < rela_end; rela++) {
321 		Elf64_Addr *where = NULL;
322 		size_t sym_idx = 0;
323 
324 		/* Check the address is inside TA memory */
325 		assert(rela->r_offset < (elf->max_addr - elf->load_addr));
326 
327 		where = (Elf64_Addr *)(elf->load_addr + rela->r_offset);
328 
329 		switch (ELF64_R_TYPE(rela->r_info)) {
330 		case R_AARCH64_ABS64:
331 			sym_idx = ELF64_R_SYM(rela->r_info);
332 			assert(sym_idx < num_syms);
333 			if (sym_tab[sym_idx].st_shndx == SHN_UNDEF) {
334 				/* Symbol is external */
335 				e64_process_dyn_rela(sym_tab, num_syms, str_tab,
336 						     str_tab_size, rela, where);
337 			} else {
338 				*where = rela->r_addend + elf->load_addr +
339 					 sym_tab[sym_idx].st_value;
340 			}
341 			break;
342 		case R_AARCH64_RELATIVE:
343 			*where = rela->r_addend + elf->load_addr;
344 			break;
345 		case R_AARCH64_GLOB_DAT:
346 		case R_AARCH64_JUMP_SLOT:
347 			e64_process_dyn_rela(sym_tab, num_syms, str_tab,
348 					     str_tab_size, rela, where);
349 			break;
350 		default:
351 			err(TEE_ERROR_BAD_FORMAT, "Unknown relocation type %zd",
352 			     ELF64_R_TYPE(rela->r_info));
353 		}
354 	}
355 }
356 #else /*ARM64*/
357 static void __noreturn e64_relocate(struct ta_elf *elf __unused,
358 				    unsigned int rel_sidx __unused)
359 {
360 	err(TEE_ERROR_NOT_SUPPORTED, "arm64 not supported");
361 }
362 #endif /*ARM64*/
363 
364 void ta_elf_relocate(struct ta_elf *elf)
365 {
366 	size_t n = 0;
367 
368 	if (elf->is_32bit) {
369 		Elf32_Shdr *shdr = elf->shdr;
370 
371 		for (n = 0; n < elf->e_shnum; n++)
372 			if (shdr[n].sh_type == SHT_REL)
373 				e32_relocate(elf, n);
374 	} else {
375 		Elf64_Shdr *shdr = elf->shdr;
376 
377 		for (n = 0; n < elf->e_shnum; n++)
378 			if (shdr[n].sh_type == SHT_RELA)
379 				e64_relocate(elf, n);
380 
381 	}
382 }
383