xref: /optee_os/ldelf/ta_elf_rel.c (revision dc57b1101a33ec9bf18ee3d2b88a0d8ff12d2ede)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2019, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <elf32.h>
8 #include <elf64.h>
9 #include <elf_common.h>
10 #include <string.h>
11 #include <tee_api_types.h>
12 #include <util.h>
13 
14 #include "sys.h"
15 #include "ta_elf.h"
16 
17 static uint32_t elf_hash(const char *name)
18 {
19 	const unsigned char *p = (const unsigned char *)name;
20 	uint32_t h = 0;
21 	uint32_t g = 0;
22 
23 	while (*p) {
24 		h = (h << 4) + *p++;
25 		g = h & 0xf0000000;
26 		if (g)
27 			h ^= g >> 24;
28 		h &= ~g;
29 	}
30 	return h;
31 }
32 
33 static bool __resolve_sym(struct ta_elf *elf, unsigned int bind,
34 			  size_t st_shndx, size_t st_name, size_t st_value,
35 			  const char *name, vaddr_t *val)
36 {
37 	if (bind != STB_GLOBAL)
38 		return false;
39 	if (st_shndx == SHN_UNDEF || st_shndx == SHN_XINDEX)
40 		return false;
41 	if (!st_name)
42 		return false;
43 	if (st_name > elf->dynstr_size)
44 		err(TEE_ERROR_BAD_FORMAT, "Symbol out of range");
45 
46 	if (strcmp(name, elf->dynstr + st_name))
47 		return false;
48 
49 	*val = st_value + elf->load_addr;
50 	return true;
51 }
52 
53 static TEE_Result resolve_sym_helper(uint32_t hash, const char *name,
54 				     vaddr_t *val, struct ta_elf *elf)
55 {
56 	/*
57 	 * Using uint32_t here for convenience because both Elf64_Word
58 	 * and Elf32_Word are 32-bit types
59 	 */
60 	uint32_t *hashtab = elf->hashtab;
61 	uint32_t nbuckets = hashtab[0];
62 	uint32_t nchains = hashtab[1];
63 	uint32_t *bucket = &hashtab[2];
64 	uint32_t *chain = &bucket[nbuckets];
65 	size_t n = 0;
66 
67 	if (elf->is_32bit) {
68 		Elf32_Sym *sym = elf->dynsymtab;
69 
70 		for (n = bucket[hash % nbuckets]; n; n = chain[n]) {
71 			assert(n < nchains);
72 			if (__resolve_sym(elf,
73 					  ELF32_ST_BIND(sym[n].st_info),
74 					  sym[n].st_shndx,
75 					  sym[n].st_name,
76 					  sym[n].st_value, name, val))
77 				return TEE_SUCCESS;
78 		}
79 	} else {
80 		Elf64_Sym *sym = elf->dynsymtab;
81 
82 		for (n = bucket[hash % nbuckets]; n; n = chain[n]) {
83 			assert(n < nchains);
84 			if (__resolve_sym(elf,
85 					  ELF64_ST_BIND(sym[n].st_info),
86 					  sym[n].st_shndx,
87 					  sym[n].st_name,
88 					  sym[n].st_value, name, val))
89 				return TEE_SUCCESS;
90 		}
91 	}
92 
93 	return TEE_ERROR_ITEM_NOT_FOUND;
94 }
95 
96 TEE_Result ta_elf_resolve_sym(const char *name, vaddr_t *val,
97 			      struct ta_elf *elf)
98 {
99 	uint32_t hash = elf_hash(name);
100 
101 	if (elf)
102 		return resolve_sym_helper(hash, name, val, elf);
103 
104 	TAILQ_FOREACH(elf, &main_elf_queue, link)
105 		if (!resolve_sym_helper(hash, name, val, elf))
106 			return TEE_SUCCESS;
107 
108 	return TEE_ERROR_ITEM_NOT_FOUND;
109 }
110 
111 static void resolve_sym(const char *name, vaddr_t *val)
112 {
113 	TEE_Result res = ta_elf_resolve_sym(name, val, NULL);
114 
115 	if (res)
116 		err(res, "Symbol %s not found", name);
117 }
118 
119 static void e32_process_dyn_rel(const Elf32_Sym *sym_tab, size_t num_syms,
120 				const char *str_tab, size_t str_tab_size,
121 				Elf32_Rel *rel, Elf32_Addr *where)
122 {
123 	size_t sym_idx = 0;
124 	const char *name = NULL;
125 	vaddr_t val = 0;
126 	size_t name_idx = 0;
127 
128 	sym_idx = ELF32_R_SYM(rel->r_info);
129 	assert(sym_idx < num_syms);
130 
131 	name_idx = sym_tab[sym_idx].st_name;
132 	assert(name_idx < str_tab_size);
133 	name = str_tab + name_idx;
134 
135 	resolve_sym(name, &val);
136 	*where = val;
137 }
138 
139 static void e32_relocate(struct ta_elf *elf, unsigned int rel_sidx)
140 {
141 	Elf32_Shdr *shdr = elf->shdr;
142 	Elf32_Rel *rel = NULL;
143 	Elf32_Rel *rel_end = NULL;
144 	size_t sym_tab_idx = 0;
145 	Elf32_Sym *sym_tab = NULL;
146 	size_t num_syms = 0;
147 	size_t sh_end = 0;
148 	const char *str_tab = NULL;
149 	size_t str_tab_size = 0;
150 
151 	assert(shdr[rel_sidx].sh_type == SHT_REL);
152 
153 	assert(shdr[rel_sidx].sh_entsize == sizeof(Elf32_Rel));
154 
155 	sym_tab_idx = shdr[rel_sidx].sh_link;
156 	if (sym_tab_idx) {
157 		size_t str_tab_idx = 0;
158 
159 		assert(sym_tab_idx < elf->e_shnum);
160 
161 		assert(shdr[sym_tab_idx].sh_entsize == sizeof(Elf32_Sym));
162 
163 		/* Check the address is inside ELF memory */
164 		if (ADD_OVERFLOW(shdr[sym_tab_idx].sh_addr,
165 				 shdr[sym_tab_idx].sh_size, &sh_end))
166 			err(TEE_ERROR_SECURITY, "Overflow");
167 		assert(sh_end < (elf->max_addr - elf->load_addr));
168 
169 		sym_tab = (Elf32_Sym *)(elf->load_addr +
170 					shdr[sym_tab_idx].sh_addr);
171 
172 		num_syms = shdr[sym_tab_idx].sh_size / sizeof(Elf32_Sym);
173 
174 		str_tab_idx = shdr[sym_tab_idx].sh_link;
175 		if (str_tab_idx) {
176 			/* Check the address is inside ELF memory */
177 			if (ADD_OVERFLOW(shdr[str_tab_idx].sh_addr,
178 					 shdr[str_tab_idx].sh_size, &sh_end))
179 				err(TEE_ERROR_SECURITY, "Overflow");
180 			assert(sh_end < (elf->max_addr - elf->load_addr));
181 
182 			str_tab = (const char *)(elf->load_addr +
183 						 shdr[str_tab_idx].sh_addr);
184 			str_tab_size = shdr[str_tab_idx].sh_size;
185 		}
186 	}
187 
188 	/* Check the address is inside TA memory */
189 	assert(shdr[rel_sidx].sh_addr < (elf->max_addr - elf->load_addr));
190 	rel = (Elf32_Rel *)(elf->load_addr + shdr[rel_sidx].sh_addr);
191 
192 	/* Check the address is inside TA memory */
193 	if (ADD_OVERFLOW(shdr[rel_sidx].sh_addr, shdr[rel_sidx].sh_size,
194 			 &sh_end))
195 		err(TEE_ERROR_SECURITY, "Overflow");
196 	assert(sh_end < (elf->max_addr - elf->load_addr));
197 	rel_end = rel + shdr[rel_sidx].sh_size / sizeof(Elf32_Rel);
198 	for (; rel < rel_end; rel++) {
199 		Elf32_Addr *where = NULL;
200 		size_t sym_idx = 0;
201 
202 		/* Check the address is inside TA memory */
203 		assert(rel->r_offset < (elf->max_addr - elf->load_addr));
204 		where = (Elf32_Addr *)(elf->load_addr + rel->r_offset);
205 
206 		switch (ELF32_R_TYPE(rel->r_info)) {
207 		case R_ARM_ABS32:
208 			sym_idx = ELF32_R_SYM(rel->r_info);
209 			assert(sym_idx < num_syms);
210 			if (sym_tab[sym_idx].st_shndx == SHN_UNDEF) {
211 				/* Symbol is external */
212 				e32_process_dyn_rel(sym_tab, num_syms, str_tab,
213 						    str_tab_size, rel, where);
214 			} else {
215 				*where += elf->load_addr +
216 					  sym_tab[sym_idx].st_value;
217 			}
218 			break;
219 		case R_ARM_REL32:
220 			sym_idx = ELF32_R_SYM(rel->r_info);
221 			assert(sym_idx < num_syms);
222 			*where += sym_tab[sym_idx].st_value - rel->r_offset;
223 			break;
224 		case R_ARM_RELATIVE:
225 			*where += elf->load_addr;
226 			break;
227 		case R_ARM_GLOB_DAT:
228 		case R_ARM_JUMP_SLOT:
229 			e32_process_dyn_rel(sym_tab, num_syms, str_tab,
230 					    str_tab_size, rel, where);
231 			break;
232 		default:
233 			err(TEE_ERROR_BAD_FORMAT, "Unknown relocation type %d",
234 			     ELF32_R_TYPE(rel->r_info));
235 		}
236 	}
237 }
238 
239 #ifdef ARM64
240 static void e64_process_dyn_rela(const Elf64_Sym *sym_tab, size_t num_syms,
241 				 const char *str_tab, size_t str_tab_size,
242 				 Elf64_Rela *rela, Elf64_Addr *where)
243 {
244 	size_t sym_idx = 0;
245 	const char *name = NULL;
246 	uintptr_t val = 0;
247 	size_t name_idx = 0;
248 
249 	sym_idx = ELF64_R_SYM(rela->r_info);
250 	assert(sym_idx < num_syms);
251 
252 	name_idx = sym_tab[sym_idx].st_name;
253 	assert(name_idx < str_tab_size);
254 	name = str_tab + name_idx;
255 
256 	resolve_sym(name, &val);
257 	*where = val;
258 }
259 
260 static void e64_relocate(struct ta_elf *elf, unsigned int rel_sidx)
261 {
262 	Elf64_Shdr *shdr = elf->shdr;
263 	Elf64_Rela *rela = NULL;
264 	Elf64_Rela *rela_end = NULL;
265 	size_t sym_tab_idx = 0;
266 	Elf64_Sym *sym_tab = NULL;
267 	size_t num_syms = 0;
268 	size_t sh_end = 0;
269 	const char *str_tab = NULL;
270 	size_t str_tab_size = 0;
271 
272 	assert(shdr[rel_sidx].sh_type == SHT_RELA);
273 
274 	assert(shdr[rel_sidx].sh_entsize == sizeof(Elf64_Rela));
275 
276 	sym_tab_idx = shdr[rel_sidx].sh_link;
277 	if (sym_tab_idx) {
278 		size_t str_tab_idx = 0;
279 
280 		assert(sym_tab_idx < elf->e_shnum);
281 
282 		assert(shdr[sym_tab_idx].sh_entsize == sizeof(Elf64_Sym));
283 
284 		/* Check the address is inside TA memory */
285 		if (ADD_OVERFLOW(shdr[sym_tab_idx].sh_addr,
286 				 shdr[sym_tab_idx].sh_size, &sh_end))
287 			err(TEE_ERROR_SECURITY, "Overflow");
288 		assert(sh_end < (elf->max_addr - elf->load_addr));
289 
290 		sym_tab = (Elf64_Sym *)(elf->load_addr +
291 					shdr[sym_tab_idx].sh_addr);
292 
293 		num_syms = shdr[sym_tab_idx].sh_size / sizeof(Elf64_Sym);
294 
295 		str_tab_idx = shdr[sym_tab_idx].sh_link;
296 		if (str_tab_idx) {
297 			/* Check the address is inside ELF memory */
298 			if (ADD_OVERFLOW(shdr[str_tab_idx].sh_addr,
299 					 shdr[str_tab_idx].sh_size, &sh_end))
300 				err(TEE_ERROR_SECURITY, "Overflow");
301 			assert(sh_end < (elf->max_addr - elf->load_addr));
302 
303 			str_tab = (const char *)(elf->load_addr +
304 						 shdr[str_tab_idx].sh_addr);
305 			str_tab_size = shdr[str_tab_idx].sh_size;
306 		}
307 	}
308 
309 	/* Check the address is inside TA memory */
310 	assert(shdr[rel_sidx].sh_addr < (elf->max_addr - elf->load_addr));
311 	rela = (Elf64_Rela *)(elf->load_addr + shdr[rel_sidx].sh_addr);
312 
313 	/* Check the address is inside TA memory */
314 	if (ADD_OVERFLOW(shdr[rel_sidx].sh_addr, shdr[rel_sidx].sh_size,
315 			 &sh_end))
316 		err(TEE_ERROR_SECURITY, "Overflow");
317 	assert(sh_end < (elf->max_addr - elf->load_addr));
318 	rela_end = rela + shdr[rel_sidx].sh_size / sizeof(Elf64_Rela);
319 	for (; rela < rela_end; rela++) {
320 		Elf64_Addr *where = NULL;
321 		size_t sym_idx = 0;
322 
323 		/* Check the address is inside TA memory */
324 		assert(rela->r_offset < (elf->max_addr - elf->load_addr));
325 
326 		where = (Elf64_Addr *)(elf->load_addr + rela->r_offset);
327 
328 		switch (ELF64_R_TYPE(rela->r_info)) {
329 		case R_AARCH64_ABS64:
330 			sym_idx = ELF64_R_SYM(rela->r_info);
331 			assert(sym_idx < num_syms);
332 			if (sym_tab[sym_idx].st_shndx == SHN_UNDEF) {
333 				/* Symbol is external */
334 				e64_process_dyn_rela(sym_tab, num_syms, str_tab,
335 						     str_tab_size, rela, where);
336 			} else {
337 				*where = rela->r_addend + elf->load_addr +
338 					 sym_tab[sym_idx].st_value;
339 			}
340 			break;
341 		case R_AARCH64_RELATIVE:
342 			*where = rela->r_addend + elf->load_addr;
343 			break;
344 		case R_AARCH64_GLOB_DAT:
345 		case R_AARCH64_JUMP_SLOT:
346 			e64_process_dyn_rela(sym_tab, num_syms, str_tab,
347 					     str_tab_size, rela, where);
348 			break;
349 		default:
350 			err(TEE_ERROR_BAD_FORMAT, "Unknown relocation type %zd",
351 			     ELF64_R_TYPE(rela->r_info));
352 		}
353 	}
354 }
355 #else /*ARM64*/
356 static void e64_relocate(struct ta_elf *elf __unused,
357 			 unsigned int rel_sidx __unused)
358 {
359 	err(TEE_ERROR_NOT_SUPPORTED, "arm64 not supported");
360 }
361 #endif /*ARM64*/
362 
363 void ta_elf_relocate(struct ta_elf *elf)
364 {
365 	size_t n = 0;
366 
367 	if (elf->is_32bit) {
368 		Elf32_Shdr *shdr = elf->shdr;
369 
370 		for (n = 0; n < elf->e_shnum; n++)
371 			if (shdr[n].sh_type == SHT_REL)
372 				e32_relocate(elf, n);
373 	} else {
374 		Elf64_Shdr *shdr = elf->shdr;
375 
376 		for (n = 0; n < elf->e_shnum; n++)
377 			if (shdr[n].sh_type == SHT_RELA)
378 				e64_relocate(elf, n);
379 
380 	}
381 }
382