xref: /optee_os/core/kernel/ldelf_loader.c (revision 12fc37711783247b0d05fdc271ef007f4930767b)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * Copyright (c) 2015-2020, 2022 Linaro Limited
5  * Copyright (c) 2020-2023, Arm Limited
6  */
7 
8 #include <assert.h>
9 #include <kernel/ldelf_loader.h>
10 #include <kernel/ldelf_syscalls.h>
11 #include <kernel/scall.h>
12 #include <ldelf.h>
13 #include <mm/mobj.h>
14 #include <mm/vm.h>
15 
16 extern uint8_t ldelf_data[];
17 extern const unsigned int ldelf_code_size;
18 extern const unsigned int ldelf_data_size;
19 extern const unsigned int ldelf_entry;
20 
21 /* ldelf has the same architecture/register width as the kernel */
22 #if defined(ARM32) || defined(RV32)
23 static const bool is_32bit = true;
24 #else
25 static const bool is_32bit;
26 #endif
27 
28 static TEE_Result alloc_and_map_ldelf_fobj(struct user_mode_ctx *uctx,
29 					   size_t sz, uint32_t prot,
30 					   vaddr_t *va)
31 {
32 	size_t num_pgs = ROUNDUP(sz, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE;
33 	struct fobj *fobj = fobj_ta_mem_alloc(num_pgs);
34 	struct mobj *mobj = mobj_with_fobj_alloc(fobj, NULL,
35 						 TEE_MATTR_MEM_TYPE_TAGGED);
36 	TEE_Result res = TEE_SUCCESS;
37 
38 	fobj_put(fobj);
39 	if (!mobj)
40 		return TEE_ERROR_OUT_OF_MEMORY;
41 	res = vm_map(uctx, va, num_pgs * SMALL_PAGE_SIZE,
42 		     prot, VM_FLAG_LDELF, mobj, 0);
43 	mobj_put(mobj);
44 
45 	return res;
46 }
47 
48 /*
49  * This function may leave a few mappings behind on error, but that's taken
50  * care of by tee_ta_init_user_ta_session() since the entire context is
51  * removed then.
52  */
53 TEE_Result ldelf_load_ldelf(struct user_mode_ctx *uctx)
54 {
55 	TEE_Result res = TEE_SUCCESS;
56 	vaddr_t stack_addr = 0;
57 	vaddr_t code_addr = 0;
58 	vaddr_t rw_addr = 0;
59 	uint32_t prot = 0;
60 
61 	uctx->is_32bit = is_32bit;
62 
63 	res = alloc_and_map_ldelf_fobj(uctx, LDELF_STACK_SIZE,
64 				       TEE_MATTR_URW | TEE_MATTR_PRW,
65 				       &stack_addr);
66 	if (res)
67 		return res;
68 	uctx->ldelf_stack_ptr = stack_addr + LDELF_STACK_SIZE;
69 
70 	res = alloc_and_map_ldelf_fobj(uctx, ldelf_code_size, TEE_MATTR_PRW,
71 				       &code_addr);
72 	if (res)
73 		return res;
74 	uctx->entry_func = code_addr + ldelf_entry;
75 
76 	rw_addr = ROUNDUP(code_addr + ldelf_code_size, SMALL_PAGE_SIZE);
77 	res = alloc_and_map_ldelf_fobj(uctx, ldelf_data_size,
78 				       TEE_MATTR_URW | TEE_MATTR_PRW, &rw_addr);
79 	if (res)
80 		return res;
81 
82 	vm_set_ctx(uctx->ts_ctx);
83 
84 	memcpy((void *)code_addr, ldelf_data, ldelf_code_size);
85 	memcpy((void *)rw_addr, ldelf_data + ldelf_code_size, ldelf_data_size);
86 
87 	prot = TEE_MATTR_URX;
88 	if (IS_ENABLED(CFG_CORE_BTI))
89 		prot |= TEE_MATTR_GUARDED;
90 
91 	res = vm_set_prot(uctx, code_addr,
92 			  ROUNDUP(ldelf_code_size, SMALL_PAGE_SIZE), prot);
93 	if (res)
94 		return res;
95 
96 	DMSG("ldelf load address %#"PRIxVA, code_addr);
97 
98 	return TEE_SUCCESS;
99 }
100 
101 TEE_Result ldelf_init_with_ldelf(struct ts_session *sess,
102 				 struct user_mode_ctx *uctx)
103 {
104 	TEE_Result res = TEE_SUCCESS;
105 	struct ldelf_arg *arg = NULL;
106 	uint32_t panic_code = 0;
107 	uint32_t panicked = 0;
108 	uaddr_t usr_stack = 0;
109 
110 	usr_stack = uctx->ldelf_stack_ptr;
111 	usr_stack -= ROUNDUP(sizeof(*arg), STACK_ALIGNMENT);
112 	arg = (struct ldelf_arg *)usr_stack;
113 	memset(arg, 0, sizeof(*arg));
114 	arg->uuid = uctx->ts_ctx->uuid;
115 	sess->handle_scall = scall_handle_ldelf;
116 
117 	res = thread_enter_user_mode((vaddr_t)arg, 0, 0, 0,
118 				     usr_stack, uctx->entry_func,
119 				     is_32bit, &panicked, &panic_code);
120 
121 	sess->handle_scall = sess->ctx->ops->handle_scall;
122 	thread_user_clear_vfp(uctx);
123 	ldelf_sess_cleanup(sess);
124 
125 	if (panicked) {
126 		abort_print_current_ts();
127 		EMSG("ldelf panicked");
128 		return TEE_ERROR_GENERIC;
129 	}
130 	if (res) {
131 		EMSG("ldelf failed with res: %#"PRIx32, res);
132 		return res;
133 	}
134 
135 	res = vm_check_access_rights(uctx,
136 				     TEE_MEMORY_ACCESS_READ |
137 				     TEE_MEMORY_ACCESS_ANY_OWNER,
138 				     (uaddr_t)arg, sizeof(*arg));
139 	if (res)
140 		return res;
141 
142 	if (is_user_ta_ctx(uctx->ts_ctx)) {
143 		/*
144 		 * This is already checked by the elf loader, but since it runs
145 		 * in user mode we're not trusting it entirely.
146 		 */
147 		if (arg->flags & ~TA_FLAGS_MASK)
148 			return TEE_ERROR_BAD_FORMAT;
149 
150 		to_user_ta_ctx(uctx->ts_ctx)->ta_ctx.flags = arg->flags;
151 	}
152 
153 	uctx->is_32bit = arg->is_32bit;
154 	uctx->entry_func = arg->entry_func;
155 	uctx->load_addr = arg->load_addr;
156 	uctx->stack_ptr = arg->stack_ptr;
157 	uctx->dump_entry_func = arg->dump_entry;
158 #ifdef CFG_FTRACE_SUPPORT
159 	uctx->ftrace_entry_func = arg->ftrace_entry;
160 	sess->fbuf = arg->fbuf;
161 #endif
162 	uctx->dl_entry_func = arg->dl_entry;
163 
164 	return TEE_SUCCESS;
165 }
166 
167 TEE_Result ldelf_dump_state(struct user_mode_ctx *uctx)
168 {
169 	TEE_Result res = TEE_SUCCESS;
170 	uaddr_t usr_stack = uctx->ldelf_stack_ptr;
171 	struct dump_entry_arg *arg = NULL;
172 	uint32_t panic_code = 0;
173 	uint32_t panicked = 0;
174 	struct thread_specific_data *tsd = thread_get_tsd();
175 	struct ts_session *sess = NULL;
176 	struct vm_region *r = NULL;
177 	size_t n = 0;
178 
179 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link)
180 		if (r->attr & TEE_MATTR_URWX)
181 			n++;
182 
183 	usr_stack = uctx->ldelf_stack_ptr;
184 	usr_stack -= ROUNDUP(sizeof(*arg) + n * sizeof(struct dump_map),
185 			     STACK_ALIGNMENT);
186 	arg = (struct dump_entry_arg *)usr_stack;
187 
188 	res = vm_check_access_rights(uctx,
189 				     TEE_MEMORY_ACCESS_READ |
190 				     TEE_MEMORY_ACCESS_ANY_OWNER,
191 				     (uaddr_t)arg, sizeof(*arg));
192 	if (res) {
193 		EMSG("ldelf stack is inaccessible!");
194 		return res;
195 	}
196 
197 	memset(arg, 0, sizeof(*arg) + n * sizeof(struct dump_map));
198 
199 	arg->num_maps = n;
200 	n = 0;
201 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link) {
202 		if (r->attr & TEE_MATTR_URWX) {
203 			if (r->mobj)
204 				mobj_get_pa(r->mobj, r->offset, 0,
205 					    &arg->maps[n].pa);
206 			arg->maps[n].va = r->va;
207 			arg->maps[n].sz = r->size;
208 			if (r->attr & TEE_MATTR_UR)
209 				arg->maps[n].flags |= DUMP_MAP_READ;
210 			if (r->attr & TEE_MATTR_UW)
211 				arg->maps[n].flags |= DUMP_MAP_WRITE;
212 			if (r->attr & TEE_MATTR_UX)
213 				arg->maps[n].flags |= DUMP_MAP_EXEC;
214 			if (r->attr & TEE_MATTR_SECURE)
215 				arg->maps[n].flags |= DUMP_MAP_SECURE;
216 			if (r->flags & VM_FLAG_EPHEMERAL)
217 				arg->maps[n].flags |= DUMP_MAP_EPHEM;
218 			if (r->flags & VM_FLAG_LDELF)
219 				arg->maps[n].flags |= DUMP_MAP_LDELF;
220 			n++;
221 		}
222 	}
223 
224 	arg->is_32bit = uctx->is_32bit;
225 #ifdef ARM32
226 	arg->arm32.regs[0] = tsd->abort_regs.r0;
227 	arg->arm32.regs[1] = tsd->abort_regs.r1;
228 	arg->arm32.regs[2] = tsd->abort_regs.r2;
229 	arg->arm32.regs[3] = tsd->abort_regs.r3;
230 	arg->arm32.regs[4] = tsd->abort_regs.r4;
231 	arg->arm32.regs[5] = tsd->abort_regs.r5;
232 	arg->arm32.regs[6] = tsd->abort_regs.r6;
233 	arg->arm32.regs[7] = tsd->abort_regs.r7;
234 	arg->arm32.regs[8] = tsd->abort_regs.r8;
235 	arg->arm32.regs[9] = tsd->abort_regs.r9;
236 	arg->arm32.regs[10] = tsd->abort_regs.r10;
237 	arg->arm32.regs[11] = tsd->abort_regs.r11;
238 	arg->arm32.regs[12] = tsd->abort_regs.ip;
239 	arg->arm32.regs[13] = tsd->abort_regs.usr_sp; /*SP*/
240 	arg->arm32.regs[14] = tsd->abort_regs.usr_lr; /*LR*/
241 	arg->arm32.regs[15] = tsd->abort_regs.elr; /*PC*/
242 #endif /*ARM32*/
243 #ifdef ARM64
244 	if (uctx->is_32bit) {
245 		arg->arm32.regs[0] = tsd->abort_regs.x0;
246 		arg->arm32.regs[1] = tsd->abort_regs.x1;
247 		arg->arm32.regs[2] = tsd->abort_regs.x2;
248 		arg->arm32.regs[3] = tsd->abort_regs.x3;
249 		arg->arm32.regs[4] = tsd->abort_regs.x4;
250 		arg->arm32.regs[5] = tsd->abort_regs.x5;
251 		arg->arm32.regs[6] = tsd->abort_regs.x6;
252 		arg->arm32.regs[7] = tsd->abort_regs.x7;
253 		arg->arm32.regs[8] = tsd->abort_regs.x8;
254 		arg->arm32.regs[9] = tsd->abort_regs.x9;
255 		arg->arm32.regs[10] = tsd->abort_regs.x10;
256 		arg->arm32.regs[11] = tsd->abort_regs.x11;
257 		arg->arm32.regs[12] = tsd->abort_regs.x12;
258 		arg->arm32.regs[13] = tsd->abort_regs.x13; /*SP*/
259 		arg->arm32.regs[14] = tsd->abort_regs.x14; /*LR*/
260 		arg->arm32.regs[15] = tsd->abort_regs.elr; /*PC*/
261 	} else {
262 		arg->arm64.fp = tsd->abort_regs.x29;
263 		arg->arm64.pc = tsd->abort_regs.elr;
264 		arg->arm64.sp = tsd->abort_regs.sp_el0;
265 	}
266 #endif /*ARM64*/
267 #if defined(RV64) || defined(RV32)
268 	arg->rv.fp = tsd->abort_regs.s0;
269 	arg->rv.pc = tsd->abort_regs.epc;
270 	arg->rv.sp = tsd->abort_regs.sp;
271 #endif /*RV64||RV32*/
272 
273 	sess = ts_get_current_session();
274 	sess->handle_scall = scall_handle_ldelf;
275 
276 	res = thread_enter_user_mode((vaddr_t)arg, 0, 0, 0,
277 				     usr_stack, uctx->dump_entry_func,
278 				     is_32bit, &panicked, &panic_code);
279 
280 	sess->handle_scall = sess->ctx->ops->handle_scall;
281 	thread_user_clear_vfp(uctx);
282 	ldelf_sess_cleanup(sess);
283 
284 	if (panicked) {
285 		uctx->dump_entry_func = 0;
286 		EMSG("ldelf dump function panicked");
287 		abort_print_current_ts();
288 		res = TEE_ERROR_TARGET_DEAD;
289 	}
290 
291 	return res;
292 }
293 
294 #ifdef CFG_FTRACE_SUPPORT
295 TEE_Result ldelf_dump_ftrace(struct user_mode_ctx *uctx,
296 			     void *buf, size_t *blen)
297 {
298 	uaddr_t usr_stack = uctx->ldelf_stack_ptr;
299 	TEE_Result res = TEE_SUCCESS;
300 	uint32_t panic_code = 0;
301 	uint32_t panicked = 0;
302 	size_t *arg = NULL;
303 	struct ts_session *sess = NULL;
304 
305 	if (!uctx->ftrace_entry_func)
306 		return TEE_ERROR_NOT_SUPPORTED;
307 
308 	usr_stack -= ROUNDUP(sizeof(*arg), STACK_ALIGNMENT);
309 	arg = (size_t *)usr_stack;
310 
311 	res = vm_check_access_rights(uctx,
312 				     TEE_MEMORY_ACCESS_READ |
313 				     TEE_MEMORY_ACCESS_ANY_OWNER,
314 				     (uaddr_t)arg, sizeof(*arg));
315 	if (res) {
316 		EMSG("ldelf stack is inaccessible!");
317 		return res;
318 	}
319 
320 	*arg = *blen;
321 
322 	sess = ts_get_current_session();
323 	sess->handle_scall = scall_handle_ldelf;
324 
325 	res = thread_enter_user_mode((vaddr_t)buf, (vaddr_t)arg, 0, 0,
326 				     usr_stack, uctx->ftrace_entry_func,
327 				     is_32bit, &panicked, &panic_code);
328 
329 	sess->handle_scall = sess->ctx->ops->handle_scall;
330 	thread_user_clear_vfp(uctx);
331 	ldelf_sess_cleanup(sess);
332 
333 	if (panicked) {
334 		uctx->ftrace_entry_func = 0;
335 		EMSG("ldelf ftrace function panicked");
336 		abort_print_current_ts();
337 		res = TEE_ERROR_TARGET_DEAD;
338 	}
339 
340 	if (!res) {
341 		if (*arg > *blen)
342 			res = TEE_ERROR_SHORT_BUFFER;
343 		*blen = *arg;
344 	}
345 
346 	return res;
347 }
348 #endif /*CFG_FTRACE_SUPPORT*/
349 
350 TEE_Result ldelf_dlopen(struct user_mode_ctx *uctx, TEE_UUID *uuid,
351 			uint32_t flags)
352 {
353 	uaddr_t usr_stack = uctx->ldelf_stack_ptr;
354 	TEE_Result res = TEE_ERROR_GENERIC;
355 	struct dl_entry_arg *arg = NULL;
356 	uint32_t panic_code = 0;
357 	uint32_t panicked = 0;
358 	struct ts_session *sess = NULL;
359 
360 	assert(uuid);
361 
362 	usr_stack -= ROUNDUP(sizeof(*arg), STACK_ALIGNMENT);
363 	arg = (struct dl_entry_arg *)usr_stack;
364 
365 	res = vm_check_access_rights(uctx,
366 				     TEE_MEMORY_ACCESS_READ |
367 				     TEE_MEMORY_ACCESS_WRITE |
368 				     TEE_MEMORY_ACCESS_ANY_OWNER,
369 				     (uaddr_t)arg, sizeof(*arg));
370 	if (res) {
371 		EMSG("ldelf stack is inaccessible!");
372 		return res;
373 	}
374 
375 	memset(arg, 0, sizeof(*arg));
376 	arg->cmd = LDELF_DL_ENTRY_DLOPEN;
377 	arg->dlopen.uuid = *uuid;
378 	arg->dlopen.flags = flags;
379 
380 	sess = ts_get_current_session();
381 	sess->handle_scall = scall_handle_ldelf;
382 
383 	res = thread_enter_user_mode((vaddr_t)arg, 0, 0, 0,
384 				     usr_stack, uctx->dl_entry_func,
385 				     is_32bit, &panicked, &panic_code);
386 
387 	sess->handle_scall = sess->ctx->ops->handle_scall;
388 	ldelf_sess_cleanup(sess);
389 
390 	if (panicked) {
391 		EMSG("ldelf dl_entry function panicked");
392 		abort_print_current_ts();
393 		res = TEE_ERROR_TARGET_DEAD;
394 	}
395 	if (!res)
396 		res = arg->ret;
397 
398 	return res;
399 }
400 
401 TEE_Result ldelf_dlsym(struct user_mode_ctx *uctx, TEE_UUID *uuid,
402 		       const char *sym, size_t maxlen, vaddr_t *val)
403 {
404 	uaddr_t usr_stack = uctx->ldelf_stack_ptr;
405 	TEE_Result res = TEE_ERROR_GENERIC;
406 	struct dl_entry_arg *arg = NULL;
407 	uint32_t panic_code = 0;
408 	uint32_t panicked = 0;
409 	size_t len = strnlen(sym, maxlen);
410 	struct ts_session *sess = NULL;
411 
412 	if (len == maxlen)
413 		return TEE_ERROR_BAD_PARAMETERS;
414 
415 	usr_stack -= ROUNDUP(sizeof(*arg) + len + 1, STACK_ALIGNMENT);
416 	arg = (struct dl_entry_arg *)usr_stack;
417 
418 	res = vm_check_access_rights(uctx,
419 				     TEE_MEMORY_ACCESS_READ |
420 				     TEE_MEMORY_ACCESS_WRITE |
421 				     TEE_MEMORY_ACCESS_ANY_OWNER,
422 				     (uaddr_t)arg, sizeof(*arg) + len + 1);
423 	if (res) {
424 		EMSG("ldelf stack is inaccessible!");
425 		return res;
426 	}
427 
428 	memset(arg, 0, sizeof(*arg));
429 	arg->cmd = LDELF_DL_ENTRY_DLSYM;
430 	arg->dlsym.uuid = *uuid;
431 	memcpy(arg->dlsym.symbol, sym, len);
432 	arg->dlsym.symbol[len] = '\0';
433 
434 	sess = ts_get_current_session();
435 	sess->handle_scall = scall_handle_ldelf;
436 
437 	res = thread_enter_user_mode((vaddr_t)arg, 0, 0, 0,
438 				     usr_stack, uctx->dl_entry_func,
439 				     is_32bit, &panicked, &panic_code);
440 
441 	sess->handle_scall = sess->ctx->ops->handle_scall;
442 	ldelf_sess_cleanup(sess);
443 
444 	if (panicked) {
445 		EMSG("ldelf dl_entry function panicked");
446 		abort_print_current_ts();
447 		res = TEE_ERROR_TARGET_DEAD;
448 	}
449 	if (!res) {
450 		res = arg->ret;
451 		if (!res)
452 			*val = arg->dlsym.val;
453 	}
454 
455 	return res;
456 }
457