xref: /optee_os/ldelf/ta_elf_rel.c (revision 3dd0e94e9ea7b3baf41bfa7f7182765ec63e02f1)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2019, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <elf32.h>
8 #include <elf64.h>
9 #include <elf_common.h>
10 #include <string.h>
11 #include <tee_api_types.h>
12 #include <util.h>
13 
14 #include "sys.h"
15 #include "ta_elf.h"
16 
17 static uint32_t elf_hash(const char *name)
18 {
19 	const unsigned char *p = (const unsigned char *)name;
20 	uint32_t h = 0;
21 	uint32_t g = 0;
22 
23 	while (*p) {
24 		h = (h << 4) + *p++;
25 		g = h & 0xf0000000;
26 		if (g)
27 			h ^= g >> 24;
28 		h &= ~g;
29 	}
30 	return h;
31 }
32 
33 static bool __resolve_sym(struct ta_elf *elf, unsigned int bind,
34 			  size_t st_shndx, size_t st_name, size_t st_value,
35 			  const char *name, vaddr_t *val)
36 {
37 	if (bind != STB_GLOBAL)
38 		return false;
39 	if (st_shndx == SHN_UNDEF || st_shndx == SHN_XINDEX)
40 		return false;
41 	if (!st_name)
42 		return false;
43 	if (st_name > elf->dynstr_size)
44 		err(TEE_ERROR_BAD_FORMAT, "Symbol out of range");
45 
46 	if (strcmp(name, elf->dynstr + st_name))
47 		return false;
48 
49 	*val = st_value + elf->load_addr;
50 	return true;
51 }
52 
53 static void resolve_sym(const char *name, vaddr_t *val)
54 {
55 	uint32_t hash = elf_hash(name);
56 	struct ta_elf *elf = NULL;
57 	size_t n = 0;
58 
59 	TAILQ_FOREACH(elf, &main_elf_queue, link) {
60 		/*
61 		 * Using uint32_t here for convenience because both Elf64_Word
62 		 * and Elf32_Word are 32-bit types
63 		 */
64 		uint32_t *hashtab = elf->hashtab;
65 		uint32_t nbuckets = hashtab[0];
66 		uint32_t nchains = hashtab[1];
67 		uint32_t *bucket = &hashtab[2];
68 		uint32_t *chain = &bucket[nbuckets];
69 
70 		if (elf->is_32bit) {
71 			Elf32_Sym *sym = elf->dynsymtab;
72 
73 			for (n = bucket[hash % nbuckets]; n; n = chain[n]) {
74 				assert(n < nchains);
75 				if (__resolve_sym(elf,
76 						  ELF32_ST_BIND(sym[n].st_info),
77 						  sym[n].st_shndx,
78 						  sym[n].st_name,
79 						  sym[n].st_value, name, val))
80 					return;
81 			}
82 		} else {
83 			Elf64_Sym *sym = elf->dynsymtab;
84 
85 			for (n = bucket[hash % nbuckets]; n; n = chain[n]) {
86 				assert(n < nchains);
87 				if (__resolve_sym(elf,
88 						  ELF64_ST_BIND(sym[n].st_info),
89 						  sym[n].st_shndx,
90 						  sym[n].st_name,
91 						  sym[n].st_value, name, val))
92 					return;
93 			}
94 		}
95 	}
96 	err(TEE_ERROR_ITEM_NOT_FOUND, "Symbol %s not found", name);
97 }
98 
99 static void e32_process_dyn_rel(const Elf32_Sym *sym_tab, size_t num_syms,
100 				const char *str_tab, size_t str_tab_size,
101 				Elf32_Rel *rel, Elf32_Addr *where)
102 {
103 	size_t sym_idx = 0;
104 	const char *name = NULL;
105 	vaddr_t val = 0;
106 	size_t name_idx = 0;
107 
108 	sym_idx = ELF32_R_SYM(rel->r_info);
109 	assert(sym_idx < num_syms);
110 
111 	name_idx = sym_tab[sym_idx].st_name;
112 	assert(name_idx < str_tab_size);
113 	name = str_tab + name_idx;
114 
115 	resolve_sym(name, &val);
116 	*where = val;
117 }
118 
119 static void e32_relocate(struct ta_elf *elf, unsigned int rel_sidx)
120 {
121 	Elf32_Shdr *shdr = elf->shdr;
122 	Elf32_Rel *rel = NULL;
123 	Elf32_Rel *rel_end = NULL;
124 	size_t sym_tab_idx = 0;
125 	Elf32_Sym *sym_tab = NULL;
126 	size_t num_syms = 0;
127 	size_t sh_end = 0;
128 	const char *str_tab = NULL;
129 	size_t str_tab_size = 0;
130 
131 	assert(shdr[rel_sidx].sh_type == SHT_REL);
132 
133 	assert(shdr[rel_sidx].sh_entsize == sizeof(Elf32_Rel));
134 
135 	sym_tab_idx = shdr[rel_sidx].sh_link;
136 	if (sym_tab_idx) {
137 		size_t str_tab_idx = 0;
138 
139 		assert(sym_tab_idx < elf->e_shnum);
140 
141 		assert(shdr[sym_tab_idx].sh_entsize == sizeof(Elf32_Sym));
142 
143 		/* Check the address is inside ELF memory */
144 		if (ADD_OVERFLOW(shdr[sym_tab_idx].sh_addr,
145 				 shdr[sym_tab_idx].sh_size, &sh_end))
146 			err(TEE_ERROR_SECURITY, "Overflow");
147 		assert(sh_end < (elf->max_addr - elf->load_addr));
148 
149 		sym_tab = (Elf32_Sym *)(elf->load_addr +
150 					shdr[sym_tab_idx].sh_addr);
151 
152 		num_syms = shdr[sym_tab_idx].sh_size / sizeof(Elf32_Sym);
153 
154 		str_tab_idx = shdr[sym_tab_idx].sh_link;
155 		if (str_tab_idx) {
156 			/* Check the address is inside ELF memory */
157 			if (ADD_OVERFLOW(shdr[str_tab_idx].sh_addr,
158 					 shdr[str_tab_idx].sh_size, &sh_end))
159 				err(TEE_ERROR_SECURITY, "Overflow");
160 			assert(sh_end < (elf->max_addr - elf->load_addr));
161 
162 			str_tab = (const char *)(elf->load_addr +
163 						 shdr[str_tab_idx].sh_addr);
164 			str_tab_size = shdr[str_tab_idx].sh_size;
165 		}
166 	}
167 
168 	/* Check the address is inside TA memory */
169 	assert(shdr[rel_sidx].sh_addr < (elf->max_addr - elf->load_addr));
170 	rel = (Elf32_Rel *)(elf->load_addr + shdr[rel_sidx].sh_addr);
171 
172 	/* Check the address is inside TA memory */
173 	if (ADD_OVERFLOW(shdr[rel_sidx].sh_addr, shdr[rel_sidx].sh_size,
174 			 &sh_end))
175 		err(TEE_ERROR_SECURITY, "Overflow");
176 	assert(sh_end < (elf->max_addr - elf->load_addr));
177 	rel_end = rel + shdr[rel_sidx].sh_size / sizeof(Elf32_Rel);
178 	for (; rel < rel_end; rel++) {
179 		Elf32_Addr *where = NULL;
180 		size_t sym_idx = 0;
181 
182 		/* Check the address is inside TA memory */
183 		assert(rel->r_offset < (elf->max_addr - elf->load_addr));
184 		where = (Elf32_Addr *)(elf->load_addr + rel->r_offset);
185 
186 		switch (ELF32_R_TYPE(rel->r_info)) {
187 		case R_ARM_ABS32:
188 			sym_idx = ELF32_R_SYM(rel->r_info);
189 			assert(sym_idx < num_syms);
190 			if (sym_tab[sym_idx].st_shndx == SHN_UNDEF) {
191 				/* Symbol is external */
192 				e32_process_dyn_rel(sym_tab, num_syms, str_tab,
193 						    str_tab_size, rel, where);
194 			} else {
195 				*where += elf->load_addr +
196 					  sym_tab[sym_idx].st_value;
197 			}
198 			break;
199 		case R_ARM_REL32:
200 			sym_idx = ELF32_R_SYM(rel->r_info);
201 			assert(sym_idx < num_syms);
202 			*where += sym_tab[sym_idx].st_value - rel->r_offset;
203 			break;
204 		case R_ARM_RELATIVE:
205 			*where += elf->load_addr;
206 			break;
207 		case R_ARM_GLOB_DAT:
208 		case R_ARM_JUMP_SLOT:
209 			e32_process_dyn_rel(sym_tab, num_syms, str_tab,
210 					    str_tab_size, rel, where);
211 			break;
212 		default:
213 			err(TEE_ERROR_BAD_FORMAT, "Unknown relocation type %d",
214 			     ELF32_R_TYPE(rel->r_info));
215 		}
216 	}
217 }
218 
219 #ifdef ARM64
220 static void e64_process_dyn_rela(const Elf64_Sym *sym_tab, size_t num_syms,
221 				 const char *str_tab, size_t str_tab_size,
222 				 Elf64_Rela *rela, Elf64_Addr *where)
223 {
224 	size_t sym_idx = 0;
225 	const char *name = NULL;
226 	uintptr_t val = 0;
227 	size_t name_idx = 0;
228 
229 	sym_idx = ELF64_R_SYM(rela->r_info);
230 	assert(sym_idx < num_syms);
231 
232 	name_idx = sym_tab[sym_idx].st_name;
233 	assert(name_idx < str_tab_size);
234 	name = str_tab + name_idx;
235 
236 	resolve_sym(name, &val);
237 	*where = val;
238 }
239 
240 static void e64_relocate(struct ta_elf *elf, unsigned int rel_sidx)
241 {
242 	Elf64_Shdr *shdr = elf->shdr;
243 	Elf64_Rela *rela = NULL;
244 	Elf64_Rela *rela_end = NULL;
245 	size_t sym_tab_idx = 0;
246 	Elf64_Sym *sym_tab = NULL;
247 	size_t num_syms = 0;
248 	size_t sh_end = 0;
249 	const char *str_tab = NULL;
250 	size_t str_tab_size = 0;
251 
252 	assert(shdr[rel_sidx].sh_type == SHT_RELA);
253 
254 	assert(shdr[rel_sidx].sh_entsize == sizeof(Elf64_Rela));
255 
256 	sym_tab_idx = shdr[rel_sidx].sh_link;
257 	if (sym_tab_idx) {
258 		size_t str_tab_idx = 0;
259 
260 		assert(sym_tab_idx < elf->e_shnum);
261 
262 		assert(shdr[sym_tab_idx].sh_entsize == sizeof(Elf64_Sym));
263 
264 		/* Check the address is inside TA memory */
265 		if (ADD_OVERFLOW(shdr[sym_tab_idx].sh_addr,
266 				 shdr[sym_tab_idx].sh_size, &sh_end))
267 			err(TEE_ERROR_SECURITY, "Overflow");
268 		assert(sh_end < (elf->max_addr - elf->load_addr));
269 
270 		sym_tab = (Elf64_Sym *)(elf->load_addr +
271 					shdr[sym_tab_idx].sh_addr);
272 
273 		num_syms = shdr[sym_tab_idx].sh_size / sizeof(Elf64_Sym);
274 
275 		str_tab_idx = shdr[sym_tab_idx].sh_link;
276 		if (str_tab_idx) {
277 			/* Check the address is inside ELF memory */
278 			if (ADD_OVERFLOW(shdr[str_tab_idx].sh_addr,
279 					 shdr[str_tab_idx].sh_size, &sh_end))
280 				err(TEE_ERROR_SECURITY, "Overflow");
281 			assert(sh_end < (elf->max_addr - elf->load_addr));
282 
283 			str_tab = (const char *)(elf->load_addr +
284 						 shdr[str_tab_idx].sh_addr);
285 			str_tab_size = shdr[str_tab_idx].sh_size;
286 		}
287 	}
288 
289 	/* Check the address is inside TA memory */
290 	assert(shdr[rel_sidx].sh_addr < (elf->max_addr - elf->load_addr));
291 	rela = (Elf64_Rela *)(elf->load_addr + shdr[rel_sidx].sh_addr);
292 
293 	/* Check the address is inside TA memory */
294 	if (ADD_OVERFLOW(shdr[rel_sidx].sh_addr, shdr[rel_sidx].sh_size,
295 			 &sh_end))
296 		err(TEE_ERROR_SECURITY, "Overflow");
297 	assert(sh_end < (elf->max_addr - elf->load_addr));
298 	rela_end = rela + shdr[rel_sidx].sh_size / sizeof(Elf64_Rela);
299 	for (; rela < rela_end; rela++) {
300 		Elf64_Addr *where = NULL;
301 		size_t sym_idx = 0;
302 
303 		/* Check the address is inside TA memory */
304 		assert(rela->r_offset < (elf->max_addr - elf->load_addr));
305 
306 		where = (Elf64_Addr *)(elf->load_addr + rela->r_offset);
307 
308 		switch (ELF64_R_TYPE(rela->r_info)) {
309 		case R_AARCH64_ABS64:
310 			sym_idx = ELF64_R_SYM(rela->r_info);
311 			assert(sym_idx < num_syms);
312 			if (sym_tab[sym_idx].st_shndx == SHN_UNDEF) {
313 				/* Symbol is external */
314 				e64_process_dyn_rela(sym_tab, num_syms, str_tab,
315 						     str_tab_size, rela, where);
316 			} else {
317 				*where = rela->r_addend + elf->load_addr +
318 					 sym_tab[sym_idx].st_value;
319 			}
320 			break;
321 		case R_AARCH64_RELATIVE:
322 			*where = rela->r_addend + elf->load_addr;
323 			break;
324 		case R_AARCH64_GLOB_DAT:
325 		case R_AARCH64_JUMP_SLOT:
326 			e64_process_dyn_rela(sym_tab, num_syms, str_tab,
327 					     str_tab_size, rela, where);
328 			break;
329 		default:
330 			err(TEE_ERROR_BAD_FORMAT, "Unknown relocation type %zd",
331 			     ELF64_R_TYPE(rela->r_info));
332 		}
333 	}
334 }
335 #else /*ARM64*/
336 static void e64_relocate(struct ta_elf *elf __unused,
337 			 unsigned int rel_sidx __unused)
338 {
339 	err(TEE_ERROR_NOT_SUPPORTED, "arm64 not supported");
340 }
341 #endif /*ARM64*/
342 
343 void ta_elf_relocate(struct ta_elf *elf)
344 {
345 	size_t n = 0;
346 
347 	if (elf->is_32bit) {
348 		Elf32_Shdr *shdr = elf->shdr;
349 
350 		for (n = 0; n < elf->e_shnum; n++)
351 			if (shdr[n].sh_type == SHT_REL)
352 				e32_relocate(elf, n);
353 	} else {
354 		Elf64_Shdr *shdr = elf->shdr;
355 
356 		for (n = 0; n < elf->e_shnum; n++)
357 			if (shdr[n].sh_type == SHT_RELA)
358 				e64_relocate(elf, n);
359 
360 	}
361 }
362