1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2014, STMicroelectronics International N.V. 4 * Copyright (c) 2015-2020, 2022 Linaro Limited 5 * Copyright (c) 2020-2023, Arm Limited 6 */ 7 8 #include <assert.h> 9 #include <kernel/ldelf_loader.h> 10 #include <kernel/ldelf_syscalls.h> 11 #include <kernel/scall.h> 12 #include <kernel/user_access.h> 13 #include <ldelf.h> 14 #include <mm/mobj.h> 15 #include <mm/vm.h> 16 17 #define BOUNCE_BUFFER_SIZE 4096 18 19 extern uint8_t ldelf_data[]; 20 extern const unsigned int ldelf_code_size; 21 extern const unsigned int ldelf_data_size; 22 extern const unsigned int ldelf_entry; 23 24 /* ldelf has the same architecture/register width as the kernel */ 25 #if defined(ARM32) || defined(RV32) 26 static const bool is_32bit = true; 27 #else 28 static const bool is_32bit; 29 #endif 30 31 static TEE_Result alloc_and_map_fobj(struct user_mode_ctx *uctx, size_t sz, 32 uint32_t prot, uint32_t flags, vaddr_t *va) 33 { 34 size_t num_pgs = ROUNDUP(sz, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE; 35 struct fobj *fobj = fobj_ta_mem_alloc(num_pgs); 36 struct mobj *mobj = mobj_with_fobj_alloc(fobj, NULL, 37 TEE_MATTR_MEM_TYPE_TAGGED); 38 TEE_Result res = TEE_SUCCESS; 39 40 fobj_put(fobj); 41 if (!mobj) 42 return TEE_ERROR_OUT_OF_MEMORY; 43 res = vm_map(uctx, va, num_pgs * SMALL_PAGE_SIZE, prot, flags, mobj, 0); 44 mobj_put(mobj); 45 46 return res; 47 } 48 49 /* 50 * This function may leave a few mappings behind on error, but that's taken 51 * care of by tee_ta_init_user_ta_session() since the entire context is 52 * removed then. 53 */ 54 TEE_Result ldelf_load_ldelf(struct user_mode_ctx *uctx) 55 { 56 TEE_Result res = TEE_SUCCESS; 57 vaddr_t stack_addr = 0; 58 vaddr_t code_addr = 0; 59 vaddr_t rw_addr = 0; 60 vaddr_t bb_addr = 0; 61 uint32_t prot = 0; 62 63 uctx->is_32bit = is_32bit; 64 65 res = alloc_and_map_fobj(uctx, BOUNCE_BUFFER_SIZE, TEE_MATTR_PRW, 0, 66 &bb_addr); 67 if (res) 68 return res; 69 uctx->bbuf = (void *)bb_addr; 70 uctx->bbuf_size = BOUNCE_BUFFER_SIZE; 71 72 res = alloc_and_map_fobj(uctx, LDELF_STACK_SIZE, 73 TEE_MATTR_URW | TEE_MATTR_PRW, VM_FLAG_LDELF, 74 &stack_addr); 75 if (res) 76 return res; 77 uctx->ldelf_stack_ptr = stack_addr + LDELF_STACK_SIZE; 78 79 res = alloc_and_map_fobj(uctx, ldelf_code_size, TEE_MATTR_PRW, 80 VM_FLAG_LDELF, &code_addr); 81 if (res) 82 return res; 83 uctx->entry_func = code_addr + ldelf_entry; 84 85 rw_addr = ROUNDUP(code_addr + ldelf_code_size, SMALL_PAGE_SIZE); 86 res = alloc_and_map_fobj(uctx, ldelf_data_size, 87 TEE_MATTR_URW | TEE_MATTR_PRW, VM_FLAG_LDELF, 88 &rw_addr); 89 if (res) 90 return res; 91 92 vm_set_ctx(uctx->ts_ctx); 93 94 memcpy((void *)code_addr, ldelf_data, ldelf_code_size); 95 96 res = copy_to_user((void *)rw_addr, ldelf_data + ldelf_code_size, 97 ldelf_data_size); 98 if (res) 99 return res; 100 101 prot = TEE_MATTR_URX; 102 if (IS_ENABLED(CFG_CORE_BTI)) 103 prot |= TEE_MATTR_GUARDED; 104 105 res = vm_set_prot(uctx, code_addr, 106 ROUNDUP(ldelf_code_size, SMALL_PAGE_SIZE), prot); 107 if (res) 108 return res; 109 110 DMSG("ldelf load address %#"PRIxVA, code_addr); 111 112 return TEE_SUCCESS; 113 } 114 115 TEE_Result ldelf_init_with_ldelf(struct ts_session *sess, 116 struct user_mode_ctx *uctx) 117 { 118 TEE_Result res = TEE_SUCCESS; 119 struct ldelf_arg *arg = NULL; 120 uint32_t panic_code = 0; 121 uint32_t panicked = 0; 122 uaddr_t usr_stack = 0; 123 struct ldelf_arg *arg_bbuf = NULL; 124 void *bbuf = NULL; 125 126 usr_stack = uctx->ldelf_stack_ptr; 127 usr_stack -= ROUNDUP(sizeof(*arg), STACK_ALIGNMENT); 128 arg = (struct ldelf_arg *)usr_stack; 129 sess->handle_scall = scall_handle_ldelf; 130 131 res = clear_user(arg, sizeof(*arg)); 132 if (res) 133 return res; 134 135 res = PUT_USER_SCALAR(uctx->ts_ctx->uuid, &arg->uuid); 136 if (res) 137 return res; 138 139 res = thread_enter_user_mode((vaddr_t)arg, 0, 0, 0, 140 usr_stack, uctx->entry_func, 141 is_32bit, &panicked, &panic_code); 142 143 sess->handle_scall = sess->ctx->ops->handle_scall; 144 thread_user_clear_vfp(uctx); 145 ldelf_sess_cleanup(sess); 146 147 if (panicked) { 148 abort_print_current_ts(); 149 EMSG("ldelf panicked"); 150 return TEE_ERROR_GENERIC; 151 } 152 if (res) { 153 EMSG("ldelf failed with res: %#"PRIx32, res); 154 return res; 155 } 156 157 res = bb_memdup_user(arg, sizeof(*arg), &bbuf); 158 if (res) 159 return res; 160 161 arg_bbuf = bbuf; 162 163 if (is_user_ta_ctx(uctx->ts_ctx)) { 164 /* 165 * This is already checked by the elf loader, but since it runs 166 * in user mode we're not trusting it entirely. 167 */ 168 if (arg_bbuf->flags & ~TA_FLAGS_MASK) 169 return TEE_ERROR_BAD_FORMAT; 170 171 to_user_ta_ctx(uctx->ts_ctx)->ta_ctx.flags = arg_bbuf->flags; 172 } 173 174 uctx->is_32bit = arg_bbuf->is_32bit; 175 uctx->entry_func = arg_bbuf->entry_func; 176 uctx->load_addr = arg_bbuf->load_addr; 177 uctx->stack_ptr = arg_bbuf->stack_ptr; 178 uctx->dump_entry_func = arg_bbuf->dump_entry; 179 #ifdef CFG_FTRACE_SUPPORT 180 uctx->ftrace_entry_func = arg_bbuf->ftrace_entry; 181 sess->fbuf = arg_bbuf->fbuf; 182 #endif 183 uctx->dl_entry_func = arg_bbuf->dl_entry; 184 185 bb_free(bbuf, sizeof(*arg)); 186 187 return TEE_SUCCESS; 188 } 189 190 TEE_Result ldelf_dump_state(struct user_mode_ctx *uctx) 191 { 192 TEE_Result res = TEE_SUCCESS; 193 uaddr_t usr_stack = uctx->ldelf_stack_ptr; 194 struct dump_entry_arg *arg = NULL; 195 uint32_t panic_code = 0; 196 uint32_t panicked = 0; 197 struct thread_specific_data *tsd = thread_get_tsd(); 198 struct ts_session *sess = NULL; 199 struct vm_region *r = NULL; 200 size_t arg_size = 0; 201 size_t n = 0; 202 203 TAILQ_FOREACH(r, &uctx->vm_info.regions, link) 204 if (r->attr & TEE_MATTR_URWX) 205 n++; 206 207 arg_size = ROUNDUP(sizeof(*arg) + n * sizeof(struct dump_map), 208 STACK_ALIGNMENT); 209 210 usr_stack = uctx->ldelf_stack_ptr; 211 usr_stack -= arg_size; 212 213 arg = bb_alloc(arg_size); 214 if (!arg) 215 return TEE_ERROR_OUT_OF_MEMORY; 216 memset(arg, 0, arg_size); 217 218 arg->num_maps = n; 219 n = 0; 220 TAILQ_FOREACH(r, &uctx->vm_info.regions, link) { 221 if (r->attr & TEE_MATTR_URWX) { 222 if (r->mobj) 223 mobj_get_pa(r->mobj, r->offset, 0, 224 &arg->maps[n].pa); 225 arg->maps[n].va = r->va; 226 arg->maps[n].sz = r->size; 227 if (r->attr & TEE_MATTR_UR) 228 arg->maps[n].flags |= DUMP_MAP_READ; 229 if (r->attr & TEE_MATTR_UW) 230 arg->maps[n].flags |= DUMP_MAP_WRITE; 231 if (r->attr & TEE_MATTR_UX) 232 arg->maps[n].flags |= DUMP_MAP_EXEC; 233 if (r->attr & TEE_MATTR_SECURE) 234 arg->maps[n].flags |= DUMP_MAP_SECURE; 235 if (r->flags & VM_FLAG_EPHEMERAL) 236 arg->maps[n].flags |= DUMP_MAP_EPHEM; 237 if (r->flags & VM_FLAG_LDELF) 238 arg->maps[n].flags |= DUMP_MAP_LDELF; 239 n++; 240 } 241 } 242 243 arg->is_32bit = uctx->is_32bit; 244 #ifdef ARM32 245 arg->arm32.regs[0] = tsd->abort_regs.r0; 246 arg->arm32.regs[1] = tsd->abort_regs.r1; 247 arg->arm32.regs[2] = tsd->abort_regs.r2; 248 arg->arm32.regs[3] = tsd->abort_regs.r3; 249 arg->arm32.regs[4] = tsd->abort_regs.r4; 250 arg->arm32.regs[5] = tsd->abort_regs.r5; 251 arg->arm32.regs[6] = tsd->abort_regs.r6; 252 arg->arm32.regs[7] = tsd->abort_regs.r7; 253 arg->arm32.regs[8] = tsd->abort_regs.r8; 254 arg->arm32.regs[9] = tsd->abort_regs.r9; 255 arg->arm32.regs[10] = tsd->abort_regs.r10; 256 arg->arm32.regs[11] = tsd->abort_regs.r11; 257 arg->arm32.regs[12] = tsd->abort_regs.ip; 258 arg->arm32.regs[13] = tsd->abort_regs.usr_sp; /*SP*/ 259 arg->arm32.regs[14] = tsd->abort_regs.usr_lr; /*LR*/ 260 arg->arm32.regs[15] = tsd->abort_regs.elr; /*PC*/ 261 #endif /*ARM32*/ 262 #ifdef ARM64 263 if (uctx->is_32bit) { 264 arg->arm32.regs[0] = tsd->abort_regs.x0; 265 arg->arm32.regs[1] = tsd->abort_regs.x1; 266 arg->arm32.regs[2] = tsd->abort_regs.x2; 267 arg->arm32.regs[3] = tsd->abort_regs.x3; 268 arg->arm32.regs[4] = tsd->abort_regs.x4; 269 arg->arm32.regs[5] = tsd->abort_regs.x5; 270 arg->arm32.regs[6] = tsd->abort_regs.x6; 271 arg->arm32.regs[7] = tsd->abort_regs.x7; 272 arg->arm32.regs[8] = tsd->abort_regs.x8; 273 arg->arm32.regs[9] = tsd->abort_regs.x9; 274 arg->arm32.regs[10] = tsd->abort_regs.x10; 275 arg->arm32.regs[11] = tsd->abort_regs.x11; 276 arg->arm32.regs[12] = tsd->abort_regs.x12; 277 arg->arm32.regs[13] = tsd->abort_regs.x13; /*SP*/ 278 arg->arm32.regs[14] = tsd->abort_regs.x14; /*LR*/ 279 arg->arm32.regs[15] = tsd->abort_regs.elr; /*PC*/ 280 } else { 281 arg->arm64.fp = tsd->abort_regs.x29; 282 arg->arm64.pc = tsd->abort_regs.elr; 283 arg->arm64.sp = tsd->abort_regs.sp_el0; 284 } 285 #endif /*ARM64*/ 286 #if defined(RV64) || defined(RV32) 287 arg->rv.fp = tsd->abort_regs.s0; 288 arg->rv.pc = tsd->abort_regs.epc; 289 arg->rv.sp = tsd->abort_regs.sp; 290 #endif /*RV64||RV32*/ 291 292 res = copy_to_user((void *)usr_stack, arg, arg_size); 293 if (res) 294 return res; 295 296 sess = ts_get_current_session(); 297 sess->handle_scall = scall_handle_ldelf; 298 299 res = thread_enter_user_mode(usr_stack, 0, 0, 0, 300 usr_stack, uctx->dump_entry_func, 301 is_32bit, &panicked, &panic_code); 302 303 sess->handle_scall = sess->ctx->ops->handle_scall; 304 thread_user_clear_vfp(uctx); 305 ldelf_sess_cleanup(sess); 306 307 if (panicked) { 308 uctx->dump_entry_func = 0; 309 EMSG("ldelf dump function panicked"); 310 abort_print_current_ts(); 311 res = TEE_ERROR_TARGET_DEAD; 312 } 313 314 return res; 315 } 316 317 #ifdef CFG_FTRACE_SUPPORT 318 TEE_Result ldelf_dump_ftrace(struct user_mode_ctx *uctx, 319 void *buf, size_t *blen) 320 { 321 uaddr_t usr_stack = uctx->ldelf_stack_ptr; 322 TEE_Result res = TEE_SUCCESS; 323 uint32_t panic_code = 0; 324 uint32_t panicked = 0; 325 size_t *arg = NULL; 326 struct ts_session *sess = NULL; 327 328 if (!uctx->ftrace_entry_func) 329 return TEE_ERROR_NOT_SUPPORTED; 330 331 usr_stack -= ROUNDUP(sizeof(*arg), STACK_ALIGNMENT); 332 arg = (size_t *)usr_stack; 333 334 res = vm_check_access_rights(uctx, 335 TEE_MEMORY_ACCESS_READ | 336 TEE_MEMORY_ACCESS_ANY_OWNER, 337 (uaddr_t)arg, sizeof(*arg)); 338 if (res) { 339 EMSG("ldelf stack is inaccessible!"); 340 return res; 341 } 342 343 *arg = *blen; 344 345 sess = ts_get_current_session(); 346 sess->handle_scall = scall_handle_ldelf; 347 348 res = thread_enter_user_mode((vaddr_t)buf, (vaddr_t)arg, 0, 0, 349 usr_stack, uctx->ftrace_entry_func, 350 is_32bit, &panicked, &panic_code); 351 352 sess->handle_scall = sess->ctx->ops->handle_scall; 353 thread_user_clear_vfp(uctx); 354 ldelf_sess_cleanup(sess); 355 356 if (panicked) { 357 uctx->ftrace_entry_func = 0; 358 EMSG("ldelf ftrace function panicked"); 359 abort_print_current_ts(); 360 res = TEE_ERROR_TARGET_DEAD; 361 } 362 363 if (!res) { 364 if (*arg > *blen) 365 res = TEE_ERROR_SHORT_BUFFER; 366 *blen = *arg; 367 } 368 369 return res; 370 } 371 #endif /*CFG_FTRACE_SUPPORT*/ 372 373 TEE_Result ldelf_dlopen(struct user_mode_ctx *uctx, TEE_UUID *uuid, 374 uint32_t flags) 375 { 376 uaddr_t usr_stack = uctx->ldelf_stack_ptr; 377 TEE_Result res = TEE_ERROR_GENERIC; 378 struct dl_entry_arg *usr_arg = NULL; 379 struct dl_entry_arg *arg = NULL; 380 uint32_t panic_code = 0; 381 uint32_t panicked = 0; 382 struct ts_session *sess = NULL; 383 384 assert(uuid); 385 386 arg = bb_alloc(sizeof(*arg)); 387 if (!arg) 388 return TEE_ERROR_OUT_OF_MEMORY; 389 390 memset(arg, 0, sizeof(*arg)); 391 arg->cmd = LDELF_DL_ENTRY_DLOPEN; 392 arg->dlopen.uuid = *uuid; 393 arg->dlopen.flags = flags; 394 395 usr_stack -= ROUNDUP(sizeof(*arg), STACK_ALIGNMENT); 396 usr_arg = (void *)usr_stack; 397 398 res = copy_to_user(usr_arg, arg, sizeof(*arg)); 399 if (res) 400 return res; 401 402 sess = ts_get_current_session(); 403 sess->handle_scall = scall_handle_ldelf; 404 405 res = thread_enter_user_mode(usr_stack, 0, 0, 0, 406 usr_stack, uctx->dl_entry_func, 407 is_32bit, &panicked, &panic_code); 408 409 sess->handle_scall = sess->ctx->ops->handle_scall; 410 ldelf_sess_cleanup(sess); 411 412 if (panicked) { 413 EMSG("ldelf dl_entry function panicked"); 414 abort_print_current_ts(); 415 res = TEE_ERROR_TARGET_DEAD; 416 } 417 if (!res) { 418 TEE_Result res2 = TEE_SUCCESS; 419 420 res2 = GET_USER_SCALAR(res, &usr_arg->ret); 421 if (res2) 422 res = res2; 423 } 424 425 return res; 426 } 427 428 TEE_Result ldelf_dlsym(struct user_mode_ctx *uctx, TEE_UUID *uuid, 429 const char *sym, size_t symlen, vaddr_t *val) 430 { 431 uaddr_t usr_stack = uctx->ldelf_stack_ptr; 432 TEE_Result res = TEE_ERROR_GENERIC; 433 struct dl_entry_arg *usr_arg = NULL; 434 struct dl_entry_arg *arg = NULL; 435 uint32_t panic_code = 0; 436 uint32_t panicked = 0; 437 struct ts_session *sess = NULL; 438 439 usr_stack -= ROUNDUP(sizeof(*arg) + symlen + 1, STACK_ALIGNMENT); 440 usr_arg = (void *)usr_stack; 441 arg = bb_alloc(sizeof(*arg)); 442 if (!arg) 443 return TEE_ERROR_OUT_OF_MEMORY; 444 memset(arg, 0, sizeof(*arg)); 445 arg->cmd = LDELF_DL_ENTRY_DLSYM; 446 arg->dlsym.uuid = *uuid; 447 res = copy_to_user(usr_arg, arg, sizeof(*arg)); 448 if (res) 449 return res; 450 res = copy_to_user(usr_arg->dlsym.symbol, sym, symlen + 1); 451 if (res) 452 return res; 453 454 sess = ts_get_current_session(); 455 sess->handle_scall = scall_handle_ldelf; 456 457 res = thread_enter_user_mode((vaddr_t)usr_arg, 0, 0, 0, 458 usr_stack, uctx->dl_entry_func, 459 is_32bit, &panicked, &panic_code); 460 461 sess->handle_scall = sess->ctx->ops->handle_scall; 462 ldelf_sess_cleanup(sess); 463 464 if (panicked) { 465 EMSG("ldelf dl_entry function panicked"); 466 abort_print_current_ts(); 467 res = TEE_ERROR_TARGET_DEAD; 468 } 469 if (!res) { 470 TEE_Result res2 = TEE_SUCCESS; 471 472 res2 = GET_USER_SCALAR(res, &usr_arg->ret); 473 if (res2) 474 res = res2; 475 if (!res) 476 res = GET_USER_SCALAR(*val, &usr_arg->dlsym.val); 477 } 478 479 return res; 480 } 481