1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2014, STMicroelectronics International N.V.
4 * Copyright (c) 2015-2020, 2022 Linaro Limited
5 * Copyright (c) 2020-2023, Arm Limited
6 */
7
8 #include <assert.h>
9 #include <kernel/ldelf_loader.h>
10 #include <kernel/ldelf_syscalls.h>
11 #include <kernel/scall.h>
12 #include <kernel/user_access.h>
13 #include <ldelf.h>
14 #include <mm/mobj.h>
15 #include <mm/vm.h>
16
17 #define BOUNCE_BUFFER_SIZE 4096
18
19 extern uint8_t ldelf_data[];
20 extern const unsigned int ldelf_code_size;
21 extern const unsigned int ldelf_data_size;
22 extern const unsigned int ldelf_entry;
23
24 /* ldelf has the same architecture/register width as the kernel */
25 #if defined(ARM32) || defined(RV32)
26 static const bool is_32bit = true;
27 #else
28 static const bool is_32bit;
29 #endif
30
alloc_and_map_fobj(struct user_mode_ctx * uctx,size_t sz,uint32_t prot,uint32_t flags,vaddr_t * va)31 static TEE_Result alloc_and_map_fobj(struct user_mode_ctx *uctx, size_t sz,
32 uint32_t prot, uint32_t flags, vaddr_t *va)
33 {
34 size_t num_pgs = ROUNDUP_DIV(sz, SMALL_PAGE_SIZE);
35 struct fobj *fobj = fobj_ta_mem_alloc(num_pgs);
36 struct mobj *mobj = mobj_with_fobj_alloc(fobj, NULL,
37 TEE_MATTR_MEM_TYPE_TAGGED);
38 TEE_Result res = TEE_SUCCESS;
39
40 fobj_put(fobj);
41 if (!mobj)
42 return TEE_ERROR_OUT_OF_MEMORY;
43 res = vm_map(uctx, va, num_pgs * SMALL_PAGE_SIZE, prot, flags, mobj, 0);
44 mobj_put(mobj);
45
46 return res;
47 }
48
49 /*
50 * This function may leave a few mappings behind on error, but that's taken
51 * care of by tee_ta_init_user_ta_session() since the entire context is
52 * removed then.
53 */
ldelf_load_ldelf(struct user_mode_ctx * uctx)54 TEE_Result ldelf_load_ldelf(struct user_mode_ctx *uctx)
55 {
56 TEE_Result res = TEE_SUCCESS;
57 vaddr_t stack_addr = 0;
58 vaddr_t code_addr = 0;
59 vaddr_t rw_addr = 0;
60 vaddr_t bb_addr = 0;
61 uint32_t prot = 0;
62
63 uctx->is_32bit = is_32bit;
64
65 res = alloc_and_map_fobj(uctx, BOUNCE_BUFFER_SIZE, TEE_MATTR_PRW, 0,
66 &bb_addr);
67 if (res)
68 return res;
69 uctx->bbuf = (void *)bb_addr;
70 uctx->bbuf_size = BOUNCE_BUFFER_SIZE;
71
72 res = alloc_and_map_fobj(uctx, LDELF_STACK_SIZE,
73 TEE_MATTR_URW | TEE_MATTR_PRW, VM_FLAG_LDELF,
74 &stack_addr);
75 if (res)
76 return res;
77 uctx->ldelf_stack_ptr = stack_addr + LDELF_STACK_SIZE;
78
79 res = alloc_and_map_fobj(uctx, ldelf_code_size, TEE_MATTR_PRW,
80 VM_FLAG_LDELF, &code_addr);
81 if (res)
82 return res;
83 uctx->entry_func = code_addr + ldelf_entry;
84
85 rw_addr = ROUNDUP(code_addr + ldelf_code_size, SMALL_PAGE_SIZE);
86 res = alloc_and_map_fobj(uctx, ldelf_data_size,
87 TEE_MATTR_URW | TEE_MATTR_PRW, VM_FLAG_LDELF,
88 &rw_addr);
89 if (res)
90 return res;
91
92 vm_set_ctx(uctx->ts_ctx);
93
94 memcpy((void *)code_addr, ldelf_data, ldelf_code_size);
95
96 res = copy_to_user((void *)rw_addr, ldelf_data + ldelf_code_size,
97 ldelf_data_size);
98 if (res)
99 return res;
100
101 prot = TEE_MATTR_URX;
102 if (IS_ENABLED(CFG_CORE_BTI))
103 prot |= TEE_MATTR_GUARDED;
104
105 res = vm_set_prot(uctx, code_addr,
106 ROUNDUP(ldelf_code_size, SMALL_PAGE_SIZE), prot);
107 if (res)
108 return res;
109
110 DMSG("ldelf load address %#"PRIxVA, code_addr);
111
112 return TEE_SUCCESS;
113 }
114
ldelf_init_with_ldelf(struct ts_session * sess,struct user_mode_ctx * uctx)115 TEE_Result ldelf_init_with_ldelf(struct ts_session *sess,
116 struct user_mode_ctx *uctx)
117 {
118 TEE_Result res = TEE_SUCCESS;
119 struct ldelf_arg *arg = NULL;
120 uint32_t panic_code = 0;
121 uint32_t panicked = 0;
122 uaddr_t usr_stack = 0;
123 struct ldelf_arg *arg_bbuf = NULL;
124
125 usr_stack = uctx->ldelf_stack_ptr;
126 usr_stack -= ROUNDUP(sizeof(*arg), STACK_ALIGNMENT);
127 arg = (struct ldelf_arg *)usr_stack;
128 sess->handle_scall = scall_handle_ldelf;
129
130 res = clear_user(arg, sizeof(*arg));
131 if (res)
132 return res;
133
134 res = PUT_USER_SCALAR(uctx->ts_ctx->uuid, &arg->uuid);
135 if (res)
136 return res;
137
138 res = thread_enter_user_mode((vaddr_t)arg, 0, 0, 0,
139 usr_stack, uctx->entry_func,
140 is_32bit, &panicked, &panic_code);
141
142 sess->handle_scall = sess->ctx->ops->handle_scall;
143 thread_user_clear_vfp(uctx);
144 ldelf_sess_cleanup(sess);
145
146 if (panicked) {
147 abort_print_current_ts();
148 EMSG("ldelf panicked");
149 return TEE_ERROR_GENERIC;
150 }
151 if (res) {
152 EMSG("ldelf failed with res: %#"PRIx32, res);
153 return res;
154 }
155
156 res = BB_MEMDUP_USER(arg, sizeof(*arg), &arg_bbuf);
157 if (res)
158 return res;
159
160 if (is_user_ta_ctx(uctx->ts_ctx)) {
161 /*
162 * This is already checked by the elf loader, but since it runs
163 * in user mode we're not trusting it entirely.
164 */
165 if (arg_bbuf->flags & ~TA_FLAGS_MASK)
166 return TEE_ERROR_BAD_FORMAT;
167
168 to_user_ta_ctx(uctx->ts_ctx)->ta_ctx.flags = arg_bbuf->flags;
169 }
170
171 uctx->is_32bit = arg_bbuf->is_32bit;
172 uctx->entry_func = arg_bbuf->entry_func;
173 uctx->load_addr = arg_bbuf->load_addr;
174 uctx->stack_ptr = arg_bbuf->stack_ptr;
175 uctx->dump_entry_func = arg_bbuf->dump_entry;
176 #ifdef CFG_FTRACE_SUPPORT
177 uctx->ftrace_entry_func = arg_bbuf->ftrace_entry;
178 sess->fbuf = arg_bbuf->fbuf;
179 #endif
180 uctx->dl_entry_func = arg_bbuf->dl_entry;
181
182 bb_free(arg_bbuf, sizeof(*arg));
183
184 return TEE_SUCCESS;
185 }
186
ldelf_dump_state(struct user_mode_ctx * uctx)187 TEE_Result ldelf_dump_state(struct user_mode_ctx *uctx)
188 {
189 TEE_Result res = TEE_SUCCESS;
190 uaddr_t usr_stack = uctx->ldelf_stack_ptr;
191 struct dump_entry_arg *arg = NULL;
192 uint32_t panic_code = 0;
193 uint32_t panicked = 0;
194 struct thread_specific_data *tsd = thread_get_tsd();
195 struct ts_session *sess = NULL;
196 struct vm_region *r = NULL;
197 size_t arg_size = 0;
198 size_t n = 0;
199
200 TAILQ_FOREACH(r, &uctx->vm_info.regions, link)
201 if (r->attr & TEE_MATTR_URWX)
202 n++;
203
204 arg_size = ROUNDUP(sizeof(*arg) + n * sizeof(struct dump_map),
205 STACK_ALIGNMENT);
206
207 usr_stack = uctx->ldelf_stack_ptr;
208 usr_stack -= arg_size;
209
210 arg = bb_alloc(arg_size);
211 if (!arg)
212 return TEE_ERROR_OUT_OF_MEMORY;
213 memset(arg, 0, arg_size);
214
215 arg->num_maps = n;
216 n = 0;
217 TAILQ_FOREACH(r, &uctx->vm_info.regions, link) {
218 if (r->attr & TEE_MATTR_URWX) {
219 if (r->mobj)
220 mobj_get_pa(r->mobj, r->offset, 0,
221 &arg->maps[n].pa);
222 arg->maps[n].va = r->va;
223 arg->maps[n].sz = r->size;
224 if (r->attr & TEE_MATTR_UR)
225 arg->maps[n].flags |= DUMP_MAP_READ;
226 if (r->attr & TEE_MATTR_UW)
227 arg->maps[n].flags |= DUMP_MAP_WRITE;
228 if (r->attr & TEE_MATTR_UX)
229 arg->maps[n].flags |= DUMP_MAP_EXEC;
230 if (r->attr & TEE_MATTR_SECURE)
231 arg->maps[n].flags |= DUMP_MAP_SECURE;
232 if (r->flags & VM_FLAG_EPHEMERAL)
233 arg->maps[n].flags |= DUMP_MAP_EPHEM;
234 if (r->flags & VM_FLAG_LDELF)
235 arg->maps[n].flags |= DUMP_MAP_LDELF;
236 n++;
237 }
238 }
239
240 arg->is_32bit = uctx->is_32bit;
241 #ifdef ARM32
242 arg->arm32.regs[0] = tsd->abort_regs.r0;
243 arg->arm32.regs[1] = tsd->abort_regs.r1;
244 arg->arm32.regs[2] = tsd->abort_regs.r2;
245 arg->arm32.regs[3] = tsd->abort_regs.r3;
246 arg->arm32.regs[4] = tsd->abort_regs.r4;
247 arg->arm32.regs[5] = tsd->abort_regs.r5;
248 arg->arm32.regs[6] = tsd->abort_regs.r6;
249 arg->arm32.regs[7] = tsd->abort_regs.r7;
250 arg->arm32.regs[8] = tsd->abort_regs.r8;
251 arg->arm32.regs[9] = tsd->abort_regs.r9;
252 arg->arm32.regs[10] = tsd->abort_regs.r10;
253 arg->arm32.regs[11] = tsd->abort_regs.r11;
254 arg->arm32.regs[12] = tsd->abort_regs.ip;
255 arg->arm32.regs[13] = tsd->abort_regs.usr_sp; /*SP*/
256 arg->arm32.regs[14] = tsd->abort_regs.usr_lr; /*LR*/
257 arg->arm32.regs[15] = tsd->abort_regs.elr; /*PC*/
258 #endif /*ARM32*/
259 #ifdef ARM64
260 if (uctx->is_32bit) {
261 arg->arm32.regs[0] = tsd->abort_regs.x0;
262 arg->arm32.regs[1] = tsd->abort_regs.x1;
263 arg->arm32.regs[2] = tsd->abort_regs.x2;
264 arg->arm32.regs[3] = tsd->abort_regs.x3;
265 arg->arm32.regs[4] = tsd->abort_regs.x4;
266 arg->arm32.regs[5] = tsd->abort_regs.x5;
267 arg->arm32.regs[6] = tsd->abort_regs.x6;
268 arg->arm32.regs[7] = tsd->abort_regs.x7;
269 arg->arm32.regs[8] = tsd->abort_regs.x8;
270 arg->arm32.regs[9] = tsd->abort_regs.x9;
271 arg->arm32.regs[10] = tsd->abort_regs.x10;
272 arg->arm32.regs[11] = tsd->abort_regs.x11;
273 arg->arm32.regs[12] = tsd->abort_regs.x12;
274 arg->arm32.regs[13] = tsd->abort_regs.x13; /*SP*/
275 arg->arm32.regs[14] = tsd->abort_regs.x14; /*LR*/
276 arg->arm32.regs[15] = tsd->abort_regs.elr; /*PC*/
277 } else {
278 arg->arm64.fp = tsd->abort_regs.x29;
279 arg->arm64.pc = tsd->abort_regs.elr;
280 arg->arm64.sp = tsd->abort_regs.sp_el0;
281 }
282 #endif /*ARM64*/
283 #if defined(RV64) || defined(RV32)
284 arg->rv.fp = tsd->abort_regs.s0;
285 arg->rv.pc = tsd->abort_regs.epc;
286 arg->rv.sp = tsd->abort_regs.sp;
287 #endif /*RV64||RV32*/
288
289 res = copy_to_user((void *)usr_stack, arg, arg_size);
290 if (res)
291 return res;
292
293 sess = ts_get_current_session();
294 sess->handle_scall = scall_handle_ldelf;
295
296 res = thread_enter_user_mode(usr_stack, 0, 0, 0,
297 usr_stack, uctx->dump_entry_func,
298 is_32bit, &panicked, &panic_code);
299
300 sess->handle_scall = sess->ctx->ops->handle_scall;
301 thread_user_clear_vfp(uctx);
302 ldelf_sess_cleanup(sess);
303
304 if (panicked) {
305 uctx->dump_entry_func = 0;
306 EMSG("ldelf dump function panicked");
307 abort_print_current_ts();
308 res = TEE_ERROR_TARGET_DEAD;
309 }
310
311 return res;
312 }
313
314 #ifdef CFG_FTRACE_SUPPORT
ldelf_dump_ftrace(struct user_mode_ctx * uctx,void * buf,size_t * blen)315 TEE_Result ldelf_dump_ftrace(struct user_mode_ctx *uctx,
316 void *buf, size_t *blen)
317 {
318 uaddr_t usr_stack = uctx->ldelf_stack_ptr;
319 TEE_Result res = TEE_SUCCESS;
320 uint32_t panic_code = 0;
321 uint32_t panicked = 0;
322 size_t *arg = NULL;
323 struct ts_session *sess = NULL;
324 struct ftrace_buf *saved_fbuf = NULL;
325
326 if (!uctx->ftrace_entry_func)
327 return TEE_ERROR_NOT_SUPPORTED;
328
329 usr_stack -= ROUNDUP(sizeof(*arg), STACK_ALIGNMENT);
330 arg = (size_t *)usr_stack;
331
332 res = vm_check_access_rights(uctx,
333 TEE_MEMORY_ACCESS_READ |
334 TEE_MEMORY_ACCESS_ANY_OWNER,
335 (uaddr_t)arg, sizeof(*arg));
336 if (res) {
337 EMSG("ldelf stack is inaccessible!");
338 return res;
339 }
340
341 *arg = *blen;
342
343 sess = ts_get_current_session();
344 sess->handle_scall = scall_handle_ldelf;
345
346 /*
347 * This function is called twice during every ftrace dumping
348 * process (first to obtain the size of the buffer, then to
349 * get the actual data). The data is required to be the same
350 * during the two calls.
351 * Since ldelf uses syscall_sys_return() to return from the
352 * userspace, the syscall ftrace would be enabled after that,
353 * modifying the ftrace data.
354 * Therefore, we need to null the fbuf pointer before entering
355 * userspace to prevent the ftrace data is modified.
356 */
357 saved_fbuf = sess->fbuf;
358 sess->fbuf = NULL;
359
360 res = thread_enter_user_mode((vaddr_t)buf, (vaddr_t)arg, 0, 0,
361 usr_stack, uctx->ftrace_entry_func,
362 is_32bit, &panicked, &panic_code);
363
364 /* Disable syscall trace again before restoring the pointer */
365 if (saved_fbuf)
366 saved_fbuf->syscall_trace_enabled = false;
367 sess->fbuf = saved_fbuf;
368 sess->handle_scall = sess->ctx->ops->handle_scall;
369 thread_user_clear_vfp(uctx);
370 ldelf_sess_cleanup(sess);
371
372 if (panicked) {
373 uctx->ftrace_entry_func = 0;
374 EMSG("ldelf ftrace function panicked");
375 abort_print_current_ts();
376 res = TEE_ERROR_TARGET_DEAD;
377 }
378
379 if (!res) {
380 if (*arg > *blen)
381 res = TEE_ERROR_SHORT_BUFFER;
382 *blen = *arg;
383 }
384
385 return res;
386 }
387 #endif /*CFG_FTRACE_SUPPORT*/
388
ldelf_dlopen(struct user_mode_ctx * uctx,TEE_UUID * uuid,uint32_t flags)389 TEE_Result ldelf_dlopen(struct user_mode_ctx *uctx, TEE_UUID *uuid,
390 uint32_t flags)
391 {
392 uaddr_t usr_stack = uctx->ldelf_stack_ptr;
393 TEE_Result res = TEE_ERROR_GENERIC;
394 struct dl_entry_arg *usr_arg = NULL;
395 struct dl_entry_arg *arg = NULL;
396 uint32_t panic_code = 0;
397 uint32_t panicked = 0;
398 struct ts_session *sess = NULL;
399
400 assert(uuid);
401
402 arg = bb_alloc(sizeof(*arg));
403 if (!arg)
404 return TEE_ERROR_OUT_OF_MEMORY;
405
406 memset(arg, 0, sizeof(*arg));
407 arg->cmd = LDELF_DL_ENTRY_DLOPEN;
408 arg->dlopen.uuid = *uuid;
409 arg->dlopen.flags = flags;
410
411 usr_stack -= ROUNDUP(sizeof(*arg), STACK_ALIGNMENT);
412 usr_arg = (void *)usr_stack;
413
414 res = copy_to_user(usr_arg, arg, sizeof(*arg));
415 if (res)
416 return res;
417
418 sess = ts_get_current_session();
419 sess->handle_scall = scall_handle_ldelf;
420
421 res = thread_enter_user_mode(usr_stack, 0, 0, 0,
422 usr_stack, uctx->dl_entry_func,
423 is_32bit, &panicked, &panic_code);
424
425 sess->handle_scall = sess->ctx->ops->handle_scall;
426 ldelf_sess_cleanup(sess);
427
428 if (panicked) {
429 EMSG("ldelf dl_entry function panicked");
430 abort_print_current_ts();
431 res = TEE_ERROR_TARGET_DEAD;
432 }
433 if (!res) {
434 TEE_Result res2 = TEE_SUCCESS;
435
436 res2 = GET_USER_SCALAR(res, &usr_arg->ret);
437 if (res2)
438 res = res2;
439 }
440
441 return res;
442 }
443
ldelf_dlsym(struct user_mode_ctx * uctx,TEE_UUID * uuid,const char * sym,size_t symlen,vaddr_t * val)444 TEE_Result ldelf_dlsym(struct user_mode_ctx *uctx, TEE_UUID *uuid,
445 const char *sym, size_t symlen, vaddr_t *val)
446 {
447 uaddr_t usr_stack = uctx->ldelf_stack_ptr;
448 TEE_Result res = TEE_ERROR_GENERIC;
449 struct dl_entry_arg *usr_arg = NULL;
450 struct dl_entry_arg *arg = NULL;
451 uint32_t panic_code = 0;
452 uint32_t panicked = 0;
453 struct ts_session *sess = NULL;
454
455 usr_stack -= ROUNDUP(sizeof(*arg) + symlen + 1, STACK_ALIGNMENT);
456 usr_arg = (void *)usr_stack;
457 arg = bb_alloc(sizeof(*arg));
458 if (!arg)
459 return TEE_ERROR_OUT_OF_MEMORY;
460 memset(arg, 0, sizeof(*arg));
461 arg->cmd = LDELF_DL_ENTRY_DLSYM;
462 arg->dlsym.uuid = *uuid;
463 res = copy_to_user(usr_arg, arg, sizeof(*arg));
464 if (res)
465 return res;
466 res = copy_to_user(usr_arg->dlsym.symbol, sym, symlen + 1);
467 if (res)
468 return res;
469
470 sess = ts_get_current_session();
471 sess->handle_scall = scall_handle_ldelf;
472
473 res = thread_enter_user_mode((vaddr_t)usr_arg, 0, 0, 0,
474 usr_stack, uctx->dl_entry_func,
475 is_32bit, &panicked, &panic_code);
476
477 sess->handle_scall = sess->ctx->ops->handle_scall;
478 ldelf_sess_cleanup(sess);
479
480 if (panicked) {
481 EMSG("ldelf dl_entry function panicked");
482 abort_print_current_ts();
483 res = TEE_ERROR_TARGET_DEAD;
484 }
485 if (!res) {
486 TEE_Result res2 = TEE_SUCCESS;
487
488 res2 = GET_USER_SCALAR(res, &usr_arg->ret);
489 if (res2)
490 res = res2;
491 if (!res)
492 res = GET_USER_SCALAR(*val, &usr_arg->dlsym.val);
493 }
494
495 return res;
496 }
497