xref: /optee_os/ldelf/ta_elf_rel.c (revision 5c0860db3f473f43b18f0ec6c84ff020b6bb85b4)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2019, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <compiler.h>
8 #include <elf32.h>
9 #include <elf64.h>
10 #include <elf_common.h>
11 #include <string.h>
12 #include <tee_api_types.h>
13 #include <util.h>
14 
15 #include "sys.h"
16 #include "ta_elf.h"
17 
18 static uint32_t elf_hash(const char *name)
19 {
20 	const unsigned char *p = (const unsigned char *)name;
21 	uint32_t h = 0;
22 	uint32_t g = 0;
23 
24 	while (*p) {
25 		h = (h << 4) + *p++;
26 		g = h & 0xf0000000;
27 		if (g)
28 			h ^= g >> 24;
29 		h &= ~g;
30 	}
31 	return h;
32 }
33 
34 static bool __resolve_sym(struct ta_elf *elf, unsigned int bind,
35 			  size_t st_shndx, size_t st_name, size_t st_value,
36 			  const char *name, vaddr_t *val)
37 {
38 	if (bind != STB_GLOBAL)
39 		return false;
40 	if (st_shndx == SHN_UNDEF || st_shndx == SHN_XINDEX)
41 		return false;
42 	if (!st_name)
43 		return false;
44 	if (st_name > elf->dynstr_size)
45 		err(TEE_ERROR_BAD_FORMAT, "Symbol out of range");
46 
47 	if (strcmp(name, elf->dynstr + st_name))
48 		return false;
49 
50 	*val = st_value + elf->load_addr;
51 	return true;
52 }
53 
54 static TEE_Result resolve_sym_helper(uint32_t hash, const char *name,
55 				     vaddr_t *val, struct ta_elf *elf)
56 {
57 	/*
58 	 * Using uint32_t here for convenience because both Elf64_Word
59 	 * and Elf32_Word are 32-bit types
60 	 */
61 	uint32_t *hashtab = elf->hashtab;
62 	uint32_t nbuckets = hashtab[0];
63 	uint32_t nchains = hashtab[1];
64 	uint32_t *bucket = &hashtab[2];
65 	uint32_t *chain = &bucket[nbuckets];
66 	size_t n = 0;
67 
68 	if (elf->is_32bit) {
69 		Elf32_Sym *sym = elf->dynsymtab;
70 
71 		for (n = bucket[hash % nbuckets]; n; n = chain[n]) {
72 			if (n >= nchains)
73 				err(TEE_ERROR_BAD_FORMAT,
74 				    "Index out of range");
75 			if (__resolve_sym(elf,
76 					  ELF32_ST_BIND(sym[n].st_info),
77 					  sym[n].st_shndx,
78 					  sym[n].st_name,
79 					  sym[n].st_value, name, val))
80 				return TEE_SUCCESS;
81 		}
82 	} else {
83 		Elf64_Sym *sym = elf->dynsymtab;
84 
85 		for (n = bucket[hash % nbuckets]; n; n = chain[n]) {
86 			if (n >= nchains)
87 				err(TEE_ERROR_BAD_FORMAT,
88 				    "Index out of range");
89 			if (__resolve_sym(elf,
90 					  ELF64_ST_BIND(sym[n].st_info),
91 					  sym[n].st_shndx,
92 					  sym[n].st_name,
93 					  sym[n].st_value, name, val))
94 				return TEE_SUCCESS;
95 		}
96 	}
97 
98 	return TEE_ERROR_ITEM_NOT_FOUND;
99 }
100 
101 TEE_Result ta_elf_resolve_sym(const char *name, vaddr_t *val,
102 			      struct ta_elf *elf)
103 {
104 	uint32_t hash = elf_hash(name);
105 
106 	if (elf)
107 		return resolve_sym_helper(hash, name, val, elf);
108 
109 	TAILQ_FOREACH(elf, &main_elf_queue, link)
110 		if (!resolve_sym_helper(hash, name, val, elf))
111 			return TEE_SUCCESS;
112 
113 	return TEE_ERROR_ITEM_NOT_FOUND;
114 }
115 
116 static void resolve_sym(const char *name, vaddr_t *val)
117 {
118 	TEE_Result res = ta_elf_resolve_sym(name, val, NULL);
119 
120 	if (res)
121 		err(res, "Symbol %s not found", name);
122 }
123 
124 static void e32_process_dyn_rel(const Elf32_Sym *sym_tab, size_t num_syms,
125 				const char *str_tab, size_t str_tab_size,
126 				Elf32_Rel *rel, Elf32_Addr *where)
127 {
128 	size_t sym_idx = 0;
129 	const char *name = NULL;
130 	vaddr_t val = 0;
131 	size_t name_idx = 0;
132 
133 	sym_idx = ELF32_R_SYM(rel->r_info);
134 	assert(sym_idx < num_syms);
135 
136 	name_idx = sym_tab[sym_idx].st_name;
137 	assert(name_idx < str_tab_size);
138 	name = str_tab + name_idx;
139 
140 	resolve_sym(name, &val);
141 	*where = val;
142 }
143 
144 static void e32_relocate(struct ta_elf *elf, unsigned int rel_sidx)
145 {
146 	Elf32_Shdr *shdr = elf->shdr;
147 	Elf32_Rel *rel = NULL;
148 	Elf32_Rel *rel_end = NULL;
149 	size_t sym_tab_idx = 0;
150 	Elf32_Sym *sym_tab = NULL;
151 	size_t num_syms = 0;
152 	size_t sh_end = 0;
153 	const char *str_tab = NULL;
154 	size_t str_tab_size = 0;
155 
156 	assert(shdr[rel_sidx].sh_type == SHT_REL);
157 
158 	assert(shdr[rel_sidx].sh_entsize == sizeof(Elf32_Rel));
159 
160 	sym_tab_idx = shdr[rel_sidx].sh_link;
161 	if (sym_tab_idx) {
162 		size_t str_tab_idx = 0;
163 
164 		assert(sym_tab_idx < elf->e_shnum);
165 
166 		assert(shdr[sym_tab_idx].sh_entsize == sizeof(Elf32_Sym));
167 
168 		/* Check the address is inside ELF memory */
169 		if (ADD_OVERFLOW(shdr[sym_tab_idx].sh_addr,
170 				 shdr[sym_tab_idx].sh_size, &sh_end))
171 			err(TEE_ERROR_SECURITY, "Overflow");
172 		assert(sh_end < (elf->max_addr - elf->load_addr));
173 
174 		sym_tab = (Elf32_Sym *)(elf->load_addr +
175 					shdr[sym_tab_idx].sh_addr);
176 
177 		num_syms = shdr[sym_tab_idx].sh_size / sizeof(Elf32_Sym);
178 
179 		str_tab_idx = shdr[sym_tab_idx].sh_link;
180 		if (str_tab_idx) {
181 			/* Check the address is inside ELF memory */
182 			if (ADD_OVERFLOW(shdr[str_tab_idx].sh_addr,
183 					 shdr[str_tab_idx].sh_size, &sh_end))
184 				err(TEE_ERROR_SECURITY, "Overflow");
185 			assert(sh_end < (elf->max_addr - elf->load_addr));
186 
187 			str_tab = (const char *)(elf->load_addr +
188 						 shdr[str_tab_idx].sh_addr);
189 			str_tab_size = shdr[str_tab_idx].sh_size;
190 		}
191 	}
192 
193 	/* Check the address is inside TA memory */
194 	assert(shdr[rel_sidx].sh_addr < (elf->max_addr - elf->load_addr));
195 	rel = (Elf32_Rel *)(elf->load_addr + shdr[rel_sidx].sh_addr);
196 
197 	/* Check the address is inside TA memory */
198 	if (ADD_OVERFLOW(shdr[rel_sidx].sh_addr, shdr[rel_sidx].sh_size,
199 			 &sh_end))
200 		err(TEE_ERROR_SECURITY, "Overflow");
201 	assert(sh_end < (elf->max_addr - elf->load_addr));
202 	rel_end = rel + shdr[rel_sidx].sh_size / sizeof(Elf32_Rel);
203 	for (; rel < rel_end; rel++) {
204 		Elf32_Addr *where = NULL;
205 		size_t sym_idx = 0;
206 
207 		/* Check the address is inside TA memory */
208 		assert(rel->r_offset < (elf->max_addr - elf->load_addr));
209 		where = (Elf32_Addr *)(elf->load_addr + rel->r_offset);
210 
211 		switch (ELF32_R_TYPE(rel->r_info)) {
212 		case R_ARM_ABS32:
213 			sym_idx = ELF32_R_SYM(rel->r_info);
214 			assert(sym_idx < num_syms);
215 			if (sym_tab[sym_idx].st_shndx == SHN_UNDEF) {
216 				/* Symbol is external */
217 				e32_process_dyn_rel(sym_tab, num_syms, str_tab,
218 						    str_tab_size, rel, where);
219 			} else {
220 				*where += elf->load_addr +
221 					  sym_tab[sym_idx].st_value;
222 			}
223 			break;
224 		case R_ARM_REL32:
225 			sym_idx = ELF32_R_SYM(rel->r_info);
226 			assert(sym_idx < num_syms);
227 			*where += sym_tab[sym_idx].st_value - rel->r_offset;
228 			break;
229 		case R_ARM_RELATIVE:
230 			*where += elf->load_addr;
231 			break;
232 		case R_ARM_GLOB_DAT:
233 		case R_ARM_JUMP_SLOT:
234 			e32_process_dyn_rel(sym_tab, num_syms, str_tab,
235 					    str_tab_size, rel, where);
236 			break;
237 		default:
238 			err(TEE_ERROR_BAD_FORMAT, "Unknown relocation type %d",
239 			     ELF32_R_TYPE(rel->r_info));
240 		}
241 	}
242 }
243 
244 #ifdef ARM64
245 static void e64_process_dyn_rela(const Elf64_Sym *sym_tab, size_t num_syms,
246 				 const char *str_tab, size_t str_tab_size,
247 				 Elf64_Rela *rela, Elf64_Addr *where)
248 {
249 	size_t sym_idx = 0;
250 	const char *name = NULL;
251 	uintptr_t val = 0;
252 	size_t name_idx = 0;
253 
254 	sym_idx = ELF64_R_SYM(rela->r_info);
255 	assert(sym_idx < num_syms);
256 
257 	name_idx = sym_tab[sym_idx].st_name;
258 	assert(name_idx < str_tab_size);
259 	name = str_tab + name_idx;
260 
261 	resolve_sym(name, &val);
262 	*where = val;
263 }
264 
265 static void e64_relocate(struct ta_elf *elf, unsigned int rel_sidx)
266 {
267 	Elf64_Shdr *shdr = elf->shdr;
268 	Elf64_Rela *rela = NULL;
269 	Elf64_Rela *rela_end = NULL;
270 	size_t sym_tab_idx = 0;
271 	Elf64_Sym *sym_tab = NULL;
272 	size_t num_syms = 0;
273 	size_t sh_end = 0;
274 	const char *str_tab = NULL;
275 	size_t str_tab_size = 0;
276 
277 	assert(shdr[rel_sidx].sh_type == SHT_RELA);
278 
279 	assert(shdr[rel_sidx].sh_entsize == sizeof(Elf64_Rela));
280 
281 	sym_tab_idx = shdr[rel_sidx].sh_link;
282 	if (sym_tab_idx) {
283 		size_t str_tab_idx = 0;
284 
285 		assert(sym_tab_idx < elf->e_shnum);
286 
287 		assert(shdr[sym_tab_idx].sh_entsize == sizeof(Elf64_Sym));
288 
289 		/* Check the address is inside TA memory */
290 		if (ADD_OVERFLOW(shdr[sym_tab_idx].sh_addr,
291 				 shdr[sym_tab_idx].sh_size, &sh_end))
292 			err(TEE_ERROR_SECURITY, "Overflow");
293 		assert(sh_end < (elf->max_addr - elf->load_addr));
294 
295 		sym_tab = (Elf64_Sym *)(elf->load_addr +
296 					shdr[sym_tab_idx].sh_addr);
297 
298 		num_syms = shdr[sym_tab_idx].sh_size / sizeof(Elf64_Sym);
299 
300 		str_tab_idx = shdr[sym_tab_idx].sh_link;
301 		if (str_tab_idx) {
302 			/* Check the address is inside ELF memory */
303 			if (ADD_OVERFLOW(shdr[str_tab_idx].sh_addr,
304 					 shdr[str_tab_idx].sh_size, &sh_end))
305 				err(TEE_ERROR_SECURITY, "Overflow");
306 			assert(sh_end < (elf->max_addr - elf->load_addr));
307 
308 			str_tab = (const char *)(elf->load_addr +
309 						 shdr[str_tab_idx].sh_addr);
310 			str_tab_size = shdr[str_tab_idx].sh_size;
311 		}
312 	}
313 
314 	/* Check the address is inside TA memory */
315 	assert(shdr[rel_sidx].sh_addr < (elf->max_addr - elf->load_addr));
316 	rela = (Elf64_Rela *)(elf->load_addr + shdr[rel_sidx].sh_addr);
317 
318 	/* Check the address is inside TA memory */
319 	if (ADD_OVERFLOW(shdr[rel_sidx].sh_addr, shdr[rel_sidx].sh_size,
320 			 &sh_end))
321 		err(TEE_ERROR_SECURITY, "Overflow");
322 	assert(sh_end < (elf->max_addr - elf->load_addr));
323 	rela_end = rela + shdr[rel_sidx].sh_size / sizeof(Elf64_Rela);
324 	for (; rela < rela_end; rela++) {
325 		Elf64_Addr *where = NULL;
326 		size_t sym_idx = 0;
327 
328 		/* Check the address is inside TA memory */
329 		assert(rela->r_offset < (elf->max_addr - elf->load_addr));
330 
331 		where = (Elf64_Addr *)(elf->load_addr + rela->r_offset);
332 
333 		switch (ELF64_R_TYPE(rela->r_info)) {
334 		case R_AARCH64_ABS64:
335 			sym_idx = ELF64_R_SYM(rela->r_info);
336 			assert(sym_idx < num_syms);
337 			if (sym_tab[sym_idx].st_shndx == SHN_UNDEF) {
338 				/* Symbol is external */
339 				e64_process_dyn_rela(sym_tab, num_syms, str_tab,
340 						     str_tab_size, rela, where);
341 			} else {
342 				*where = rela->r_addend + elf->load_addr +
343 					 sym_tab[sym_idx].st_value;
344 			}
345 			break;
346 		case R_AARCH64_RELATIVE:
347 			*where = rela->r_addend + elf->load_addr;
348 			break;
349 		case R_AARCH64_GLOB_DAT:
350 		case R_AARCH64_JUMP_SLOT:
351 			e64_process_dyn_rela(sym_tab, num_syms, str_tab,
352 					     str_tab_size, rela, where);
353 			break;
354 		default:
355 			err(TEE_ERROR_BAD_FORMAT, "Unknown relocation type %zd",
356 			     ELF64_R_TYPE(rela->r_info));
357 		}
358 	}
359 }
360 #else /*ARM64*/
361 static void __noreturn e64_relocate(struct ta_elf *elf __unused,
362 				    unsigned int rel_sidx __unused)
363 {
364 	err(TEE_ERROR_NOT_SUPPORTED, "arm64 not supported");
365 }
366 #endif /*ARM64*/
367 
368 void ta_elf_relocate(struct ta_elf *elf)
369 {
370 	size_t n = 0;
371 
372 	if (elf->is_32bit) {
373 		Elf32_Shdr *shdr = elf->shdr;
374 
375 		for (n = 0; n < elf->e_shnum; n++)
376 			if (shdr[n].sh_type == SHT_REL)
377 				e32_relocate(elf, n);
378 	} else {
379 		Elf64_Shdr *shdr = elf->shdr;
380 
381 		for (n = 0; n < elf->e_shnum; n++)
382 			if (shdr[n].sh_type == SHT_RELA)
383 				e64_relocate(elf, n);
384 
385 	}
386 }
387