1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2018-2019, 2022 Linaro Limited 4 * Copyright (c) 2020-2021, Arm Limited 5 */ 6 7 #include <assert.h> 8 #include <crypto/crypto.h> 9 #include <kernel/ldelf_syscalls.h> 10 #include <kernel/user_mode_ctx.h> 11 #include <ldelf.h> 12 #include <mm/file.h> 13 #include <mm/fobj.h> 14 #include <mm/mobj.h> 15 #include <mm/vm.h> 16 #include <stdlib.h> 17 #include <string.h> 18 #include <trace.h> 19 #include <util.h> 20 21 struct bin_handle { 22 const struct ts_store_ops *op; 23 struct ts_store_handle *h; 24 struct file *f; 25 size_t offs_bytes; 26 size_t size_bytes; 27 }; 28 29 TEE_Result ldelf_syscall_map_zi(vaddr_t *va, size_t num_bytes, size_t pad_begin, 30 size_t pad_end, unsigned long flags) 31 { 32 TEE_Result res = TEE_SUCCESS; 33 struct ts_session *sess = ts_get_current_session(); 34 struct user_mode_ctx *uctx = to_user_mode_ctx(sess->ctx); 35 struct fobj *f = NULL; 36 struct mobj *mobj = NULL; 37 uint32_t prot = TEE_MATTR_URW | TEE_MATTR_PRW; 38 uint32_t vm_flags = 0; 39 40 if (flags & ~LDELF_MAP_FLAG_SHAREABLE) 41 return TEE_ERROR_BAD_PARAMETERS; 42 43 if (flags & LDELF_MAP_FLAG_SHAREABLE) 44 vm_flags |= VM_FLAG_SHAREABLE; 45 46 f = fobj_ta_mem_alloc(ROUNDUP_DIV(num_bytes, SMALL_PAGE_SIZE)); 47 if (!f) 48 return TEE_ERROR_OUT_OF_MEMORY; 49 mobj = mobj_with_fobj_alloc(f, NULL, TEE_MATTR_MEM_TYPE_TAGGED); 50 fobj_put(f); 51 if (!mobj) 52 return TEE_ERROR_OUT_OF_MEMORY; 53 res = vm_map_pad(uctx, va, num_bytes, prot, vm_flags, 54 mobj, 0, pad_begin, pad_end, 0); 55 mobj_put(mobj); 56 57 return res; 58 } 59 60 TEE_Result ldelf_syscall_unmap(vaddr_t va, size_t num_bytes) 61 { 62 TEE_Result res = TEE_SUCCESS; 63 struct ts_session *sess = ts_get_current_session(); 64 struct user_mode_ctx *uctx = to_user_mode_ctx(sess->ctx); 65 size_t sz = ROUNDUP(num_bytes, SMALL_PAGE_SIZE); 66 uint32_t vm_flags = 0; 67 vaddr_t end_va = 0; 68 69 /* 70 * The vm_get_flags() and vm_unmap() are supposed to detect or handle 71 * overflow directly or indirectly. However, since this function is an 72 * API function it's worth having an extra guard here. If nothing else, 73 * to increase code clarity. 74 */ 75 if (ADD_OVERFLOW(va, sz, &end_va)) 76 return TEE_ERROR_BAD_PARAMETERS; 77 78 res = vm_get_flags(uctx, va, sz, &vm_flags); 79 if (res) 80 return res; 81 if (vm_flags & VM_FLAG_PERMANENT) 82 return TEE_ERROR_ACCESS_DENIED; 83 84 return vm_unmap(uctx, va, sz); 85 } 86 87 static void bin_close(void *ptr) 88 { 89 struct bin_handle *binh = ptr; 90 91 if (binh) { 92 if (binh->op && binh->h) 93 binh->op->close(binh->h); 94 file_put(binh->f); 95 } 96 free(binh); 97 } 98 99 TEE_Result ldelf_syscall_open_bin(const TEE_UUID *uuid, size_t uuid_size, 100 uint32_t *handle) 101 { 102 TEE_Result res = TEE_SUCCESS; 103 struct ts_session *sess = ts_get_current_session(); 104 struct user_mode_ctx *uctx = to_user_mode_ctx(sess->ctx); 105 struct system_ctx *sys_ctx = sess->user_ctx; 106 struct bin_handle *binh = NULL; 107 uint8_t tag[FILE_TAG_SIZE] = { 0 }; 108 unsigned int tag_len = sizeof(tag); 109 int h = 0; 110 111 res = vm_check_access_rights(uctx, 112 TEE_MEMORY_ACCESS_READ | 113 TEE_MEMORY_ACCESS_ANY_OWNER, 114 (uaddr_t)uuid, sizeof(TEE_UUID)); 115 if (res) 116 return res; 117 118 res = vm_check_access_rights(uctx, 119 TEE_MEMORY_ACCESS_WRITE | 120 TEE_MEMORY_ACCESS_ANY_OWNER, 121 (uaddr_t)handle, sizeof(uint32_t)); 122 if (res) 123 return res; 124 125 if (uuid_size != sizeof(*uuid)) 126 return TEE_ERROR_BAD_PARAMETERS; 127 128 if (!sys_ctx) { 129 sys_ctx = calloc(1, sizeof(*sys_ctx)); 130 if (!sys_ctx) 131 return TEE_ERROR_OUT_OF_MEMORY; 132 sess->user_ctx = sys_ctx; 133 } 134 135 binh = calloc(1, sizeof(*binh)); 136 if (!binh) 137 return TEE_ERROR_OUT_OF_MEMORY; 138 139 if (is_user_ta_ctx(sess->ctx) || is_stmm_ctx(sess->ctx)) { 140 SCATTERED_ARRAY_FOREACH(binh->op, ta_stores, 141 struct ts_store_ops) { 142 DMSG("Lookup user TA ELF %pUl (%s)", 143 (void *)uuid, binh->op->description); 144 145 res = binh->op->open(uuid, &binh->h); 146 DMSG("res=%#"PRIx32, res); 147 if (res != TEE_ERROR_ITEM_NOT_FOUND && 148 res != TEE_ERROR_STORAGE_NOT_AVAILABLE) 149 break; 150 } 151 } else if (is_sp_ctx(sess->ctx)) { 152 SCATTERED_ARRAY_FOREACH(binh->op, sp_stores, 153 struct ts_store_ops) { 154 DMSG("Lookup user SP ELF %pUl (%s)", 155 (void *)uuid, binh->op->description); 156 157 res = binh->op->open(uuid, &binh->h); 158 DMSG("res=%#"PRIx32, res); 159 if (res != TEE_ERROR_ITEM_NOT_FOUND && 160 res != TEE_ERROR_STORAGE_NOT_AVAILABLE) 161 break; 162 } 163 } else { 164 res = TEE_ERROR_ITEM_NOT_FOUND; 165 } 166 167 if (res) 168 goto err; 169 170 res = binh->op->get_size(binh->h, &binh->size_bytes); 171 if (res) 172 goto err; 173 res = binh->op->get_tag(binh->h, tag, &tag_len); 174 if (res) 175 goto err; 176 binh->f = file_get_by_tag(tag, tag_len); 177 if (!binh->f) 178 goto err_oom; 179 180 h = handle_get(&sys_ctx->db, binh); 181 if (h < 0) 182 goto err_oom; 183 *handle = h; 184 185 return TEE_SUCCESS; 186 187 err_oom: 188 res = TEE_ERROR_OUT_OF_MEMORY; 189 err: 190 bin_close(binh); 191 return res; 192 } 193 194 TEE_Result ldelf_syscall_close_bin(unsigned long handle) 195 { 196 TEE_Result res = TEE_SUCCESS; 197 struct ts_session *sess = ts_get_current_session(); 198 struct system_ctx *sys_ctx = sess->user_ctx; 199 struct bin_handle *binh = NULL; 200 201 if (!sys_ctx) 202 return TEE_ERROR_BAD_PARAMETERS; 203 204 binh = handle_put(&sys_ctx->db, handle); 205 if (!binh) 206 return TEE_ERROR_BAD_PARAMETERS; 207 208 if (binh->offs_bytes < binh->size_bytes) 209 res = binh->op->read(binh->h, NULL, NULL, 210 binh->size_bytes - binh->offs_bytes); 211 212 bin_close(binh); 213 if (handle_db_is_empty(&sys_ctx->db)) { 214 handle_db_destroy(&sys_ctx->db, bin_close); 215 free(sys_ctx); 216 sess->user_ctx = NULL; 217 } 218 219 return res; 220 } 221 222 static TEE_Result binh_copy_to(struct bin_handle *binh, vaddr_t va_core, 223 vaddr_t va_user, size_t offs_bytes, 224 size_t num_bytes) 225 { 226 TEE_Result res = TEE_SUCCESS; 227 size_t next_offs = 0; 228 229 if (offs_bytes < binh->offs_bytes) 230 return TEE_ERROR_BAD_STATE; 231 232 if (ADD_OVERFLOW(offs_bytes, num_bytes, &next_offs)) 233 return TEE_ERROR_BAD_PARAMETERS; 234 235 if (offs_bytes > binh->offs_bytes) { 236 res = binh->op->read(binh->h, NULL, NULL, 237 offs_bytes - binh->offs_bytes); 238 if (res) 239 return res; 240 binh->offs_bytes = offs_bytes; 241 } 242 243 if (next_offs > binh->size_bytes) { 244 size_t rb = binh->size_bytes - binh->offs_bytes; 245 246 res = binh->op->read(binh->h, (void *)va_core, 247 (void *)va_user, rb); 248 if (res) 249 return res; 250 if (va_core) 251 memset((uint8_t *)va_core + rb, 0, num_bytes - rb); 252 if (va_user) { 253 res = clear_user((uint8_t *)va_user + rb, 254 num_bytes - rb); 255 if (res) 256 return res; 257 } 258 binh->offs_bytes = binh->size_bytes; 259 } else { 260 res = binh->op->read(binh->h, (void *)va_core, 261 (void *)va_user, num_bytes); 262 if (res) 263 return res; 264 binh->offs_bytes = next_offs; 265 } 266 267 return TEE_SUCCESS; 268 } 269 270 TEE_Result ldelf_syscall_map_bin(vaddr_t *va, size_t num_bytes, 271 unsigned long handle, size_t offs_bytes, 272 size_t pad_begin, size_t pad_end, 273 unsigned long flags) 274 { 275 TEE_Result res = TEE_SUCCESS; 276 struct ts_session *sess = ts_get_current_session(); 277 struct user_mode_ctx *uctx = to_user_mode_ctx(sess->ctx); 278 struct system_ctx *sys_ctx = sess->user_ctx; 279 struct bin_handle *binh = NULL; 280 uint32_t num_rounded_bytes = 0; 281 struct file_slice *fs = NULL; 282 bool file_is_locked = false; 283 struct mobj *mobj = NULL; 284 uint32_t offs_pages = 0; 285 size_t num_pages = 0; 286 uint32_t prot = 0; 287 const uint32_t accept_flags = LDELF_MAP_FLAG_SHAREABLE | 288 LDELF_MAP_FLAG_WRITEABLE | 289 LDELF_MAP_FLAG_BTI | 290 LDELF_MAP_FLAG_EXECUTABLE; 291 292 if (!sys_ctx) 293 return TEE_ERROR_BAD_PARAMETERS; 294 295 binh = handle_lookup(&sys_ctx->db, handle); 296 if (!binh) 297 return TEE_ERROR_BAD_PARAMETERS; 298 299 if ((flags & accept_flags) != flags) 300 return TEE_ERROR_BAD_PARAMETERS; 301 302 if ((flags & LDELF_MAP_FLAG_SHAREABLE) && 303 (flags & LDELF_MAP_FLAG_WRITEABLE)) 304 return TEE_ERROR_BAD_PARAMETERS; 305 306 if ((flags & LDELF_MAP_FLAG_EXECUTABLE) && 307 (flags & LDELF_MAP_FLAG_WRITEABLE)) 308 return TEE_ERROR_BAD_PARAMETERS; 309 310 if (offs_bytes & SMALL_PAGE_MASK) 311 return TEE_ERROR_BAD_PARAMETERS; 312 313 prot = TEE_MATTR_UR | TEE_MATTR_PR; 314 if (flags & LDELF_MAP_FLAG_WRITEABLE) 315 prot |= TEE_MATTR_UW | TEE_MATTR_PW; 316 if (flags & LDELF_MAP_FLAG_EXECUTABLE) 317 prot |= TEE_MATTR_UX; 318 if (flags & LDELF_MAP_FLAG_BTI) 319 prot |= TEE_MATTR_GUARDED; 320 321 offs_pages = offs_bytes >> SMALL_PAGE_SHIFT; 322 if (ROUNDUP_OVERFLOW(num_bytes, SMALL_PAGE_SIZE, &num_rounded_bytes)) 323 return TEE_ERROR_BAD_PARAMETERS; 324 num_pages = num_rounded_bytes / SMALL_PAGE_SIZE; 325 326 if (!file_trylock(binh->f)) { 327 /* 328 * Before we can block on the file lock we must make all 329 * our page tables available for reclaiming in order to 330 * avoid a dead-lock with the other thread (which already 331 * is holding the file lock) mapping lots of memory below. 332 */ 333 vm_set_ctx(NULL); 334 file_lock(binh->f); 335 vm_set_ctx(uctx->ts_ctx); 336 } 337 file_is_locked = true; 338 fs = file_find_slice(binh->f, offs_pages); 339 if (fs) { 340 /* If there's registered slice it has to match */ 341 if (fs->page_offset != offs_pages || 342 num_pages > fs->fobj->num_pages) { 343 res = TEE_ERROR_BAD_PARAMETERS; 344 goto err; 345 } 346 347 /* If there's a slice we must be mapping shareable */ 348 if (!(flags & LDELF_MAP_FLAG_SHAREABLE)) { 349 res = TEE_ERROR_BAD_PARAMETERS; 350 goto err; 351 } 352 353 mobj = mobj_with_fobj_alloc(fs->fobj, binh->f, 354 TEE_MATTR_MEM_TYPE_TAGGED); 355 if (!mobj) { 356 res = TEE_ERROR_OUT_OF_MEMORY; 357 goto err; 358 } 359 res = vm_map_pad(uctx, va, num_rounded_bytes, 360 prot, VM_FLAG_READONLY, 361 mobj, 0, pad_begin, pad_end, 0); 362 mobj_put(mobj); 363 if (res) 364 goto err; 365 } else { 366 struct fobj *f = fobj_ta_mem_alloc(num_pages); 367 struct file *file = NULL; 368 uint32_t vm_flags = 0; 369 370 if (!f) { 371 res = TEE_ERROR_OUT_OF_MEMORY; 372 goto err; 373 } 374 if (!(flags & LDELF_MAP_FLAG_WRITEABLE)) { 375 file = binh->f; 376 vm_flags |= VM_FLAG_READONLY; 377 } 378 379 mobj = mobj_with_fobj_alloc(f, file, TEE_MATTR_MEM_TYPE_TAGGED); 380 fobj_put(f); 381 if (!mobj) { 382 res = TEE_ERROR_OUT_OF_MEMORY; 383 goto err; 384 } 385 res = vm_map_pad(uctx, va, num_rounded_bytes, 386 TEE_MATTR_PRW, vm_flags, mobj, 0, 387 pad_begin, pad_end, 0); 388 mobj_put(mobj); 389 if (res) 390 goto err; 391 res = binh_copy_to(binh, *va, 0, offs_bytes, num_bytes); 392 if (res) 393 goto err_unmap_va; 394 res = vm_set_prot(uctx, *va, num_rounded_bytes, 395 prot); 396 if (res) 397 goto err_unmap_va; 398 399 /* 400 * The context currently is active set it again to update 401 * the mapping. 402 */ 403 vm_set_ctx(uctx->ts_ctx); 404 405 if (!(flags & LDELF_MAP_FLAG_WRITEABLE)) { 406 res = file_add_slice(binh->f, f, offs_pages); 407 if (res) 408 goto err_unmap_va; 409 } 410 } 411 412 file_unlock(binh->f); 413 414 return TEE_SUCCESS; 415 416 err_unmap_va: 417 if (vm_unmap(uctx, *va, num_rounded_bytes)) 418 panic(); 419 420 /* 421 * The context currently is active set it again to update 422 * the mapping. 423 */ 424 vm_set_ctx(uctx->ts_ctx); 425 426 err: 427 if (file_is_locked) 428 file_unlock(binh->f); 429 430 return res; 431 } 432 433 TEE_Result ldelf_syscall_copy_from_bin(void *dst, size_t offs, size_t num_bytes, 434 unsigned long handle) 435 { 436 TEE_Result res = TEE_SUCCESS; 437 struct ts_session *sess = ts_get_current_session(); 438 struct user_mode_ctx *uctx = to_user_mode_ctx(sess->ctx); 439 struct system_ctx *sys_ctx = sess->user_ctx; 440 struct bin_handle *binh = NULL; 441 442 res = vm_check_access_rights(uctx, 443 TEE_MEMORY_ACCESS_WRITE | 444 TEE_MEMORY_ACCESS_ANY_OWNER, 445 (uaddr_t)dst, num_bytes); 446 if (res) 447 return res; 448 449 if (!sys_ctx) 450 return TEE_ERROR_BAD_PARAMETERS; 451 452 binh = handle_lookup(&sys_ctx->db, handle); 453 if (!binh) 454 return TEE_ERROR_BAD_PARAMETERS; 455 456 return binh_copy_to(binh, 0, (vaddr_t)dst, offs, num_bytes); 457 } 458 459 TEE_Result ldelf_syscall_set_prot(unsigned long va, size_t num_bytes, 460 unsigned long flags) 461 { 462 TEE_Result res = TEE_SUCCESS; 463 struct ts_session *sess = ts_get_current_session(); 464 struct user_mode_ctx *uctx = to_user_mode_ctx(sess->ctx); 465 size_t sz = ROUNDUP(num_bytes, SMALL_PAGE_SIZE); 466 uint32_t prot = TEE_MATTR_UR | TEE_MATTR_PR; 467 uint32_t vm_flags = 0; 468 vaddr_t end_va = 0; 469 const uint32_t accept_flags = LDELF_MAP_FLAG_WRITEABLE | 470 LDELF_MAP_FLAG_BTI | 471 LDELF_MAP_FLAG_EXECUTABLE; 472 473 if ((flags & accept_flags) != flags) 474 return TEE_ERROR_BAD_PARAMETERS; 475 if (flags & LDELF_MAP_FLAG_WRITEABLE) 476 prot |= TEE_MATTR_UW | TEE_MATTR_PW; 477 if (flags & LDELF_MAP_FLAG_EXECUTABLE) 478 prot |= TEE_MATTR_UX; 479 if (flags & LDELF_MAP_FLAG_BTI) 480 prot |= TEE_MATTR_GUARDED; 481 482 /* 483 * The vm_get_flags() and vm_unmap() are supposed to detect or handle 484 * overflow directly or indirectly. However, since this function is an 485 * API function it's worth having an extra guard here. If nothing else, 486 * to increase code clarity. 487 */ 488 if (ADD_OVERFLOW(va, sz, &end_va)) 489 return TEE_ERROR_BAD_PARAMETERS; 490 491 res = vm_get_flags(uctx, va, sz, &vm_flags); 492 if (res) 493 return res; 494 if (vm_flags & VM_FLAG_PERMANENT) 495 return TEE_ERROR_ACCESS_DENIED; 496 497 /* 498 * If the segment is a mapping of a part of a file (vm_flags & 499 * VM_FLAG_READONLY) it cannot be made writeable as all mapped 500 * files are mapped read-only. 501 */ 502 if ((vm_flags & VM_FLAG_READONLY) && 503 (prot & (TEE_MATTR_UW | TEE_MATTR_PW))) 504 return TEE_ERROR_ACCESS_DENIED; 505 506 return vm_set_prot(uctx, va, sz, prot); 507 } 508 509 TEE_Result ldelf_syscall_remap(unsigned long old_va, vaddr_t *new_va, 510 size_t num_bytes, size_t pad_begin, 511 size_t pad_end) 512 { 513 TEE_Result res = TEE_SUCCESS; 514 struct ts_session *sess = ts_get_current_session(); 515 struct user_mode_ctx *uctx = to_user_mode_ctx(sess->ctx); 516 uint32_t vm_flags = 0; 517 518 res = vm_get_flags(uctx, old_va, num_bytes, &vm_flags); 519 if (res) 520 return res; 521 if (vm_flags & VM_FLAG_PERMANENT) 522 return TEE_ERROR_ACCESS_DENIED; 523 524 res = vm_remap(uctx, new_va, old_va, num_bytes, pad_begin, pad_end); 525 526 return res; 527 } 528 529 TEE_Result ldelf_syscall_gen_rnd_num(void *buf, size_t num_bytes) 530 { 531 TEE_Result res = TEE_SUCCESS; 532 struct ts_session *sess = ts_get_current_session(); 533 struct user_mode_ctx *uctx = to_user_mode_ctx(sess->ctx); 534 535 res = vm_check_access_rights(uctx, 536 TEE_MEMORY_ACCESS_WRITE | 537 TEE_MEMORY_ACCESS_ANY_OWNER, 538 (uaddr_t)buf, num_bytes); 539 if (res) 540 return res; 541 542 return crypto_rng_read(buf, num_bytes); 543 } 544 545 /* 546 * Should be called after returning from ldelf. If user_ctx is not NULL means 547 * that ldelf crashed or otherwise didn't complete properly. This function will 548 * close the remaining handles and free the context structs allocated by ldelf. 549 */ 550 void ldelf_sess_cleanup(struct ts_session *sess) 551 { 552 struct system_ctx *sys_ctx = sess->user_ctx; 553 554 if (sys_ctx) { 555 handle_db_destroy(&sys_ctx->db, bin_close); 556 free(sys_ctx); 557 sess->user_ctx = NULL; 558 } 559 } 560