1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2014, STMicroelectronics International N.V. 4 * Copyright (c) 2015-2020, 2022 Linaro Limited 5 * Copyright (c) 2020-2023, Arm Limited 6 */ 7 8 #include <assert.h> 9 #include <kernel/ldelf_loader.h> 10 #include <kernel/ldelf_syscalls.h> 11 #include <kernel/scall.h> 12 #include <ldelf.h> 13 #include <mm/mobj.h> 14 #include <mm/vm.h> 15 16 #define BOUNCE_BUFFER_SIZE 4096 17 18 extern uint8_t ldelf_data[]; 19 extern const unsigned int ldelf_code_size; 20 extern const unsigned int ldelf_data_size; 21 extern const unsigned int ldelf_entry; 22 23 /* ldelf has the same architecture/register width as the kernel */ 24 #if defined(ARM32) || defined(RV32) 25 static const bool is_32bit = true; 26 #else 27 static const bool is_32bit; 28 #endif 29 30 static TEE_Result alloc_and_map_fobj(struct user_mode_ctx *uctx, size_t sz, 31 uint32_t prot, uint32_t flags, vaddr_t *va) 32 { 33 size_t num_pgs = ROUNDUP(sz, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE; 34 struct fobj *fobj = fobj_ta_mem_alloc(num_pgs); 35 struct mobj *mobj = mobj_with_fobj_alloc(fobj, NULL, 36 TEE_MATTR_MEM_TYPE_TAGGED); 37 TEE_Result res = TEE_SUCCESS; 38 39 fobj_put(fobj); 40 if (!mobj) 41 return TEE_ERROR_OUT_OF_MEMORY; 42 res = vm_map(uctx, va, num_pgs * SMALL_PAGE_SIZE, prot, flags, mobj, 0); 43 mobj_put(mobj); 44 45 return res; 46 } 47 48 /* 49 * This function may leave a few mappings behind on error, but that's taken 50 * care of by tee_ta_init_user_ta_session() since the entire context is 51 * removed then. 52 */ 53 TEE_Result ldelf_load_ldelf(struct user_mode_ctx *uctx) 54 { 55 TEE_Result res = TEE_SUCCESS; 56 vaddr_t stack_addr = 0; 57 vaddr_t code_addr = 0; 58 vaddr_t rw_addr = 0; 59 vaddr_t bb_addr = 0; 60 uint32_t prot = 0; 61 62 uctx->is_32bit = is_32bit; 63 64 res = alloc_and_map_fobj(uctx, BOUNCE_BUFFER_SIZE, TEE_MATTR_PRW, 0, 65 &bb_addr); 66 if (res) 67 return res; 68 uctx->bbuf = (void *)bb_addr; 69 uctx->bbuf_size = BOUNCE_BUFFER_SIZE; 70 71 res = alloc_and_map_fobj(uctx, LDELF_STACK_SIZE, 72 TEE_MATTR_URW | TEE_MATTR_PRW, VM_FLAG_LDELF, 73 &stack_addr); 74 if (res) 75 return res; 76 uctx->ldelf_stack_ptr = stack_addr + LDELF_STACK_SIZE; 77 78 res = alloc_and_map_fobj(uctx, ldelf_code_size, TEE_MATTR_PRW, 79 VM_FLAG_LDELF, &code_addr); 80 if (res) 81 return res; 82 uctx->entry_func = code_addr + ldelf_entry; 83 84 rw_addr = ROUNDUP(code_addr + ldelf_code_size, SMALL_PAGE_SIZE); 85 res = alloc_and_map_fobj(uctx, ldelf_data_size, 86 TEE_MATTR_URW | TEE_MATTR_PRW, VM_FLAG_LDELF, 87 &rw_addr); 88 if (res) 89 return res; 90 91 vm_set_ctx(uctx->ts_ctx); 92 93 memcpy((void *)code_addr, ldelf_data, ldelf_code_size); 94 memcpy((void *)rw_addr, ldelf_data + ldelf_code_size, ldelf_data_size); 95 96 prot = TEE_MATTR_URX; 97 if (IS_ENABLED(CFG_CORE_BTI)) 98 prot |= TEE_MATTR_GUARDED; 99 100 res = vm_set_prot(uctx, code_addr, 101 ROUNDUP(ldelf_code_size, SMALL_PAGE_SIZE), prot); 102 if (res) 103 return res; 104 105 DMSG("ldelf load address %#"PRIxVA, code_addr); 106 107 return TEE_SUCCESS; 108 } 109 110 TEE_Result ldelf_init_with_ldelf(struct ts_session *sess, 111 struct user_mode_ctx *uctx) 112 { 113 TEE_Result res = TEE_SUCCESS; 114 struct ldelf_arg *arg = NULL; 115 uint32_t panic_code = 0; 116 uint32_t panicked = 0; 117 uaddr_t usr_stack = 0; 118 119 usr_stack = uctx->ldelf_stack_ptr; 120 usr_stack -= ROUNDUP(sizeof(*arg), STACK_ALIGNMENT); 121 arg = (struct ldelf_arg *)usr_stack; 122 memset(arg, 0, sizeof(*arg)); 123 arg->uuid = uctx->ts_ctx->uuid; 124 sess->handle_scall = scall_handle_ldelf; 125 126 res = thread_enter_user_mode((vaddr_t)arg, 0, 0, 0, 127 usr_stack, uctx->entry_func, 128 is_32bit, &panicked, &panic_code); 129 130 sess->handle_scall = sess->ctx->ops->handle_scall; 131 thread_user_clear_vfp(uctx); 132 ldelf_sess_cleanup(sess); 133 134 if (panicked) { 135 abort_print_current_ts(); 136 EMSG("ldelf panicked"); 137 return TEE_ERROR_GENERIC; 138 } 139 if (res) { 140 EMSG("ldelf failed with res: %#"PRIx32, res); 141 return res; 142 } 143 144 res = vm_check_access_rights(uctx, 145 TEE_MEMORY_ACCESS_READ | 146 TEE_MEMORY_ACCESS_ANY_OWNER, 147 (uaddr_t)arg, sizeof(*arg)); 148 if (res) 149 return res; 150 151 if (is_user_ta_ctx(uctx->ts_ctx)) { 152 /* 153 * This is already checked by the elf loader, but since it runs 154 * in user mode we're not trusting it entirely. 155 */ 156 if (arg->flags & ~TA_FLAGS_MASK) 157 return TEE_ERROR_BAD_FORMAT; 158 159 to_user_ta_ctx(uctx->ts_ctx)->ta_ctx.flags = arg->flags; 160 } 161 162 uctx->is_32bit = arg->is_32bit; 163 uctx->entry_func = arg->entry_func; 164 uctx->load_addr = arg->load_addr; 165 uctx->stack_ptr = arg->stack_ptr; 166 uctx->dump_entry_func = arg->dump_entry; 167 #ifdef CFG_FTRACE_SUPPORT 168 uctx->ftrace_entry_func = arg->ftrace_entry; 169 sess->fbuf = arg->fbuf; 170 #endif 171 uctx->dl_entry_func = arg->dl_entry; 172 173 return TEE_SUCCESS; 174 } 175 176 TEE_Result ldelf_dump_state(struct user_mode_ctx *uctx) 177 { 178 TEE_Result res = TEE_SUCCESS; 179 uaddr_t usr_stack = uctx->ldelf_stack_ptr; 180 struct dump_entry_arg *arg = NULL; 181 uint32_t panic_code = 0; 182 uint32_t panicked = 0; 183 struct thread_specific_data *tsd = thread_get_tsd(); 184 struct ts_session *sess = NULL; 185 struct vm_region *r = NULL; 186 size_t n = 0; 187 188 TAILQ_FOREACH(r, &uctx->vm_info.regions, link) 189 if (r->attr & TEE_MATTR_URWX) 190 n++; 191 192 usr_stack = uctx->ldelf_stack_ptr; 193 usr_stack -= ROUNDUP(sizeof(*arg) + n * sizeof(struct dump_map), 194 STACK_ALIGNMENT); 195 arg = (struct dump_entry_arg *)usr_stack; 196 197 res = vm_check_access_rights(uctx, 198 TEE_MEMORY_ACCESS_READ | 199 TEE_MEMORY_ACCESS_ANY_OWNER, 200 (uaddr_t)arg, sizeof(*arg)); 201 if (res) { 202 EMSG("ldelf stack is inaccessible!"); 203 return res; 204 } 205 206 memset(arg, 0, sizeof(*arg) + n * sizeof(struct dump_map)); 207 208 arg->num_maps = n; 209 n = 0; 210 TAILQ_FOREACH(r, &uctx->vm_info.regions, link) { 211 if (r->attr & TEE_MATTR_URWX) { 212 if (r->mobj) 213 mobj_get_pa(r->mobj, r->offset, 0, 214 &arg->maps[n].pa); 215 arg->maps[n].va = r->va; 216 arg->maps[n].sz = r->size; 217 if (r->attr & TEE_MATTR_UR) 218 arg->maps[n].flags |= DUMP_MAP_READ; 219 if (r->attr & TEE_MATTR_UW) 220 arg->maps[n].flags |= DUMP_MAP_WRITE; 221 if (r->attr & TEE_MATTR_UX) 222 arg->maps[n].flags |= DUMP_MAP_EXEC; 223 if (r->attr & TEE_MATTR_SECURE) 224 arg->maps[n].flags |= DUMP_MAP_SECURE; 225 if (r->flags & VM_FLAG_EPHEMERAL) 226 arg->maps[n].flags |= DUMP_MAP_EPHEM; 227 if (r->flags & VM_FLAG_LDELF) 228 arg->maps[n].flags |= DUMP_MAP_LDELF; 229 n++; 230 } 231 } 232 233 arg->is_32bit = uctx->is_32bit; 234 #ifdef ARM32 235 arg->arm32.regs[0] = tsd->abort_regs.r0; 236 arg->arm32.regs[1] = tsd->abort_regs.r1; 237 arg->arm32.regs[2] = tsd->abort_regs.r2; 238 arg->arm32.regs[3] = tsd->abort_regs.r3; 239 arg->arm32.regs[4] = tsd->abort_regs.r4; 240 arg->arm32.regs[5] = tsd->abort_regs.r5; 241 arg->arm32.regs[6] = tsd->abort_regs.r6; 242 arg->arm32.regs[7] = tsd->abort_regs.r7; 243 arg->arm32.regs[8] = tsd->abort_regs.r8; 244 arg->arm32.regs[9] = tsd->abort_regs.r9; 245 arg->arm32.regs[10] = tsd->abort_regs.r10; 246 arg->arm32.regs[11] = tsd->abort_regs.r11; 247 arg->arm32.regs[12] = tsd->abort_regs.ip; 248 arg->arm32.regs[13] = tsd->abort_regs.usr_sp; /*SP*/ 249 arg->arm32.regs[14] = tsd->abort_regs.usr_lr; /*LR*/ 250 arg->arm32.regs[15] = tsd->abort_regs.elr; /*PC*/ 251 #endif /*ARM32*/ 252 #ifdef ARM64 253 if (uctx->is_32bit) { 254 arg->arm32.regs[0] = tsd->abort_regs.x0; 255 arg->arm32.regs[1] = tsd->abort_regs.x1; 256 arg->arm32.regs[2] = tsd->abort_regs.x2; 257 arg->arm32.regs[3] = tsd->abort_regs.x3; 258 arg->arm32.regs[4] = tsd->abort_regs.x4; 259 arg->arm32.regs[5] = tsd->abort_regs.x5; 260 arg->arm32.regs[6] = tsd->abort_regs.x6; 261 arg->arm32.regs[7] = tsd->abort_regs.x7; 262 arg->arm32.regs[8] = tsd->abort_regs.x8; 263 arg->arm32.regs[9] = tsd->abort_regs.x9; 264 arg->arm32.regs[10] = tsd->abort_regs.x10; 265 arg->arm32.regs[11] = tsd->abort_regs.x11; 266 arg->arm32.regs[12] = tsd->abort_regs.x12; 267 arg->arm32.regs[13] = tsd->abort_regs.x13; /*SP*/ 268 arg->arm32.regs[14] = tsd->abort_regs.x14; /*LR*/ 269 arg->arm32.regs[15] = tsd->abort_regs.elr; /*PC*/ 270 } else { 271 arg->arm64.fp = tsd->abort_regs.x29; 272 arg->arm64.pc = tsd->abort_regs.elr; 273 arg->arm64.sp = tsd->abort_regs.sp_el0; 274 } 275 #endif /*ARM64*/ 276 #if defined(RV64) || defined(RV32) 277 arg->rv.fp = tsd->abort_regs.s0; 278 arg->rv.pc = tsd->abort_regs.epc; 279 arg->rv.sp = tsd->abort_regs.sp; 280 #endif /*RV64||RV32*/ 281 282 sess = ts_get_current_session(); 283 sess->handle_scall = scall_handle_ldelf; 284 285 res = thread_enter_user_mode((vaddr_t)arg, 0, 0, 0, 286 usr_stack, uctx->dump_entry_func, 287 is_32bit, &panicked, &panic_code); 288 289 sess->handle_scall = sess->ctx->ops->handle_scall; 290 thread_user_clear_vfp(uctx); 291 ldelf_sess_cleanup(sess); 292 293 if (panicked) { 294 uctx->dump_entry_func = 0; 295 EMSG("ldelf dump function panicked"); 296 abort_print_current_ts(); 297 res = TEE_ERROR_TARGET_DEAD; 298 } 299 300 return res; 301 } 302 303 #ifdef CFG_FTRACE_SUPPORT 304 TEE_Result ldelf_dump_ftrace(struct user_mode_ctx *uctx, 305 void *buf, size_t *blen) 306 { 307 uaddr_t usr_stack = uctx->ldelf_stack_ptr; 308 TEE_Result res = TEE_SUCCESS; 309 uint32_t panic_code = 0; 310 uint32_t panicked = 0; 311 size_t *arg = NULL; 312 struct ts_session *sess = NULL; 313 314 if (!uctx->ftrace_entry_func) 315 return TEE_ERROR_NOT_SUPPORTED; 316 317 usr_stack -= ROUNDUP(sizeof(*arg), STACK_ALIGNMENT); 318 arg = (size_t *)usr_stack; 319 320 res = vm_check_access_rights(uctx, 321 TEE_MEMORY_ACCESS_READ | 322 TEE_MEMORY_ACCESS_ANY_OWNER, 323 (uaddr_t)arg, sizeof(*arg)); 324 if (res) { 325 EMSG("ldelf stack is inaccessible!"); 326 return res; 327 } 328 329 *arg = *blen; 330 331 sess = ts_get_current_session(); 332 sess->handle_scall = scall_handle_ldelf; 333 334 res = thread_enter_user_mode((vaddr_t)buf, (vaddr_t)arg, 0, 0, 335 usr_stack, uctx->ftrace_entry_func, 336 is_32bit, &panicked, &panic_code); 337 338 sess->handle_scall = sess->ctx->ops->handle_scall; 339 thread_user_clear_vfp(uctx); 340 ldelf_sess_cleanup(sess); 341 342 if (panicked) { 343 uctx->ftrace_entry_func = 0; 344 EMSG("ldelf ftrace function panicked"); 345 abort_print_current_ts(); 346 res = TEE_ERROR_TARGET_DEAD; 347 } 348 349 if (!res) { 350 if (*arg > *blen) 351 res = TEE_ERROR_SHORT_BUFFER; 352 *blen = *arg; 353 } 354 355 return res; 356 } 357 #endif /*CFG_FTRACE_SUPPORT*/ 358 359 TEE_Result ldelf_dlopen(struct user_mode_ctx *uctx, TEE_UUID *uuid, 360 uint32_t flags) 361 { 362 uaddr_t usr_stack = uctx->ldelf_stack_ptr; 363 TEE_Result res = TEE_ERROR_GENERIC; 364 struct dl_entry_arg *arg = NULL; 365 uint32_t panic_code = 0; 366 uint32_t panicked = 0; 367 struct ts_session *sess = NULL; 368 369 assert(uuid); 370 371 usr_stack -= ROUNDUP(sizeof(*arg), STACK_ALIGNMENT); 372 arg = (struct dl_entry_arg *)usr_stack; 373 374 res = vm_check_access_rights(uctx, 375 TEE_MEMORY_ACCESS_READ | 376 TEE_MEMORY_ACCESS_WRITE | 377 TEE_MEMORY_ACCESS_ANY_OWNER, 378 (uaddr_t)arg, sizeof(*arg)); 379 if (res) { 380 EMSG("ldelf stack is inaccessible!"); 381 return res; 382 } 383 384 memset(arg, 0, sizeof(*arg)); 385 arg->cmd = LDELF_DL_ENTRY_DLOPEN; 386 arg->dlopen.uuid = *uuid; 387 arg->dlopen.flags = flags; 388 389 sess = ts_get_current_session(); 390 sess->handle_scall = scall_handle_ldelf; 391 392 res = thread_enter_user_mode((vaddr_t)arg, 0, 0, 0, 393 usr_stack, uctx->dl_entry_func, 394 is_32bit, &panicked, &panic_code); 395 396 sess->handle_scall = sess->ctx->ops->handle_scall; 397 ldelf_sess_cleanup(sess); 398 399 if (panicked) { 400 EMSG("ldelf dl_entry function panicked"); 401 abort_print_current_ts(); 402 res = TEE_ERROR_TARGET_DEAD; 403 } 404 if (!res) 405 res = arg->ret; 406 407 return res; 408 } 409 410 TEE_Result ldelf_dlsym(struct user_mode_ctx *uctx, TEE_UUID *uuid, 411 const char *sym, size_t maxlen, vaddr_t *val) 412 { 413 uaddr_t usr_stack = uctx->ldelf_stack_ptr; 414 TEE_Result res = TEE_ERROR_GENERIC; 415 struct dl_entry_arg *arg = NULL; 416 uint32_t panic_code = 0; 417 uint32_t panicked = 0; 418 size_t len = strnlen(sym, maxlen); 419 struct ts_session *sess = NULL; 420 421 if (len == maxlen) 422 return TEE_ERROR_BAD_PARAMETERS; 423 424 usr_stack -= ROUNDUP(sizeof(*arg) + len + 1, STACK_ALIGNMENT); 425 arg = (struct dl_entry_arg *)usr_stack; 426 427 res = vm_check_access_rights(uctx, 428 TEE_MEMORY_ACCESS_READ | 429 TEE_MEMORY_ACCESS_WRITE | 430 TEE_MEMORY_ACCESS_ANY_OWNER, 431 (uaddr_t)arg, sizeof(*arg) + len + 1); 432 if (res) { 433 EMSG("ldelf stack is inaccessible!"); 434 return res; 435 } 436 437 memset(arg, 0, sizeof(*arg)); 438 arg->cmd = LDELF_DL_ENTRY_DLSYM; 439 arg->dlsym.uuid = *uuid; 440 memcpy(arg->dlsym.symbol, sym, len); 441 arg->dlsym.symbol[len] = '\0'; 442 443 sess = ts_get_current_session(); 444 sess->handle_scall = scall_handle_ldelf; 445 446 res = thread_enter_user_mode((vaddr_t)arg, 0, 0, 0, 447 usr_stack, uctx->dl_entry_func, 448 is_32bit, &panicked, &panic_code); 449 450 sess->handle_scall = sess->ctx->ops->handle_scall; 451 ldelf_sess_cleanup(sess); 452 453 if (panicked) { 454 EMSG("ldelf dl_entry function panicked"); 455 abort_print_current_ts(); 456 res = TEE_ERROR_TARGET_DEAD; 457 } 458 if (!res) { 459 res = arg->ret; 460 if (!res) 461 *val = arg->dlsym.val; 462 } 463 464 return res; 465 } 466