1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2018-2019, Linaro Limited 4 */ 5 6 #include <assert.h> 7 #include <crypto/crypto.h> 8 #include <kernel/handle.h> 9 #include <kernel/huk_subkey.h> 10 #include <kernel/misc.h> 11 #include <kernel/msg_param.h> 12 #include <kernel/pseudo_ta.h> 13 #include <kernel/user_ta.h> 14 #include <kernel/user_ta_store.h> 15 #include <ldelf.h> 16 #include <mm/file.h> 17 #include <mm/fobj.h> 18 #include <mm/tee_mmu.h> 19 #include <pta_system.h> 20 #include <string.h> 21 #include <tee_api_defines_extensions.h> 22 #include <tee_api_defines.h> 23 #include <util.h> 24 25 #define MAX_ENTROPY_IN 32u 26 27 struct bin_handle { 28 const struct user_ta_store_ops *op; 29 struct user_ta_store_handle *h; 30 struct file *f; 31 size_t offs_bytes; 32 size_t size_bytes; 33 }; 34 35 struct system_ctx { 36 struct handle_db db; 37 const struct user_ta_store_ops *store_op; 38 }; 39 40 static unsigned int system_pnum; 41 42 static TEE_Result system_rng_reseed(struct tee_ta_session *s __unused, 43 uint32_t param_types, 44 TEE_Param params[TEE_NUM_PARAMS]) 45 { 46 size_t entropy_sz; 47 uint8_t *entropy_input; 48 uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INPUT, 49 TEE_PARAM_TYPE_NONE, 50 TEE_PARAM_TYPE_NONE, 51 TEE_PARAM_TYPE_NONE); 52 53 if (exp_pt != param_types) 54 return TEE_ERROR_BAD_PARAMETERS; 55 entropy_input = params[0].memref.buffer; 56 entropy_sz = params[0].memref.size; 57 58 /* Fortuna PRNG requires seed <= 32 bytes */ 59 if (!entropy_sz) 60 return TEE_ERROR_BAD_PARAMETERS; 61 62 entropy_sz = MIN(entropy_sz, MAX_ENTROPY_IN); 63 64 crypto_rng_add_event(CRYPTO_RNG_SRC_NONSECURE, &system_pnum, 65 entropy_input, entropy_sz); 66 return TEE_SUCCESS; 67 } 68 69 static TEE_Result system_derive_ta_unique_key(struct tee_ta_session *s, 70 uint32_t param_types, 71 TEE_Param params[TEE_NUM_PARAMS]) 72 { 73 size_t data_len = sizeof(TEE_UUID); 74 TEE_Result res = TEE_ERROR_GENERIC; 75 uint8_t *data = NULL; 76 uint32_t access_flags = 0; 77 uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INPUT, 78 TEE_PARAM_TYPE_MEMREF_OUTPUT, 79 TEE_PARAM_TYPE_NONE, 80 TEE_PARAM_TYPE_NONE); 81 struct user_ta_ctx *utc = NULL; 82 83 if (exp_pt != param_types) 84 return TEE_ERROR_BAD_PARAMETERS; 85 86 if (params[0].memref.size > TA_DERIVED_EXTRA_DATA_MAX_SIZE || 87 params[1].memref.size < TA_DERIVED_KEY_MIN_SIZE || 88 params[1].memref.size > TA_DERIVED_KEY_MAX_SIZE) 89 return TEE_ERROR_BAD_PARAMETERS; 90 91 utc = to_user_ta_ctx(s->ctx); 92 93 /* 94 * The derived key shall not end up in non-secure memory by 95 * mistake. 96 * 97 * Note that we're allowing shared memory as long as it's 98 * secure. This is needed because a TA always uses shared memory 99 * when communicating with another TA. 100 */ 101 access_flags = TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER | 102 TEE_MEMORY_ACCESS_SECURE; 103 res = tee_mmu_check_access_rights(&utc->uctx, access_flags, 104 (uaddr_t)params[1].memref.buffer, 105 params[1].memref.size); 106 if (res != TEE_SUCCESS) 107 return TEE_ERROR_SECURITY; 108 109 /* Take extra data into account. */ 110 if (ADD_OVERFLOW(data_len, params[0].memref.size, &data_len)) 111 return TEE_ERROR_SECURITY; 112 113 data = calloc(data_len, 1); 114 if (!data) 115 return TEE_ERROR_OUT_OF_MEMORY; 116 117 memcpy(data, &s->ctx->uuid, sizeof(TEE_UUID)); 118 119 /* Append the user provided data */ 120 memcpy(data + sizeof(TEE_UUID), params[0].memref.buffer, 121 params[0].memref.size); 122 123 res = huk_subkey_derive(HUK_SUBKEY_UNIQUE_TA, data, data_len, 124 params[1].memref.buffer, 125 params[1].memref.size); 126 free(data); 127 128 return res; 129 } 130 131 static TEE_Result system_map_zi(struct tee_ta_session *s, uint32_t param_types, 132 TEE_Param params[TEE_NUM_PARAMS]) 133 { 134 uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT, 135 TEE_PARAM_TYPE_VALUE_INOUT, 136 TEE_PARAM_TYPE_VALUE_INPUT, 137 TEE_PARAM_TYPE_NONE); 138 struct user_ta_ctx *utc = to_user_ta_ctx(s->ctx); 139 uint32_t prot = TEE_MATTR_URW | TEE_MATTR_PRW; 140 TEE_Result res = TEE_ERROR_GENERIC; 141 struct mobj *mobj = NULL; 142 uint32_t pad_begin = 0; 143 uint32_t vm_flags = 0; 144 struct fobj *f = NULL; 145 uint32_t pad_end = 0; 146 size_t num_bytes = 0; 147 vaddr_t va = 0; 148 149 if (exp_pt != param_types) 150 return TEE_ERROR_BAD_PARAMETERS; 151 if (params[0].value.b & ~PTA_SYSTEM_MAP_FLAG_SHAREABLE) 152 return TEE_ERROR_BAD_PARAMETERS; 153 154 if (params[0].value.b & PTA_SYSTEM_MAP_FLAG_SHAREABLE) 155 vm_flags |= VM_FLAG_SHAREABLE; 156 157 num_bytes = params[0].value.a; 158 va = reg_pair_to_64(params[1].value.a, params[1].value.b); 159 pad_begin = params[2].value.a; 160 pad_end = params[2].value.b; 161 162 f = fobj_ta_mem_alloc(ROUNDUP(num_bytes, SMALL_PAGE_SIZE) / 163 SMALL_PAGE_SIZE); 164 if (!f) 165 return TEE_ERROR_OUT_OF_MEMORY; 166 mobj = mobj_with_fobj_alloc(f, NULL); 167 fobj_put(f); 168 if (!mobj) 169 return TEE_ERROR_OUT_OF_MEMORY; 170 res = vm_map_pad(&utc->uctx, &va, num_bytes, prot, vm_flags, 171 mobj, 0, pad_begin, pad_end); 172 mobj_put(mobj); 173 if (!res) 174 reg_pair_from_64(va, ¶ms[1].value.a, ¶ms[1].value.b); 175 176 return res; 177 } 178 179 static TEE_Result system_unmap(struct tee_ta_session *s, uint32_t param_types, 180 TEE_Param params[TEE_NUM_PARAMS]) 181 { 182 uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT, 183 TEE_PARAM_TYPE_VALUE_INPUT, 184 TEE_PARAM_TYPE_NONE, 185 TEE_PARAM_TYPE_NONE); 186 struct user_ta_ctx *utc = to_user_ta_ctx(s->ctx); 187 TEE_Result res = TEE_SUCCESS; 188 uint32_t vm_flags = 0; 189 vaddr_t va = 0; 190 size_t sz = 0; 191 192 if (exp_pt != param_types) 193 return TEE_ERROR_BAD_PARAMETERS; 194 195 if (params[0].value.b) 196 return TEE_ERROR_BAD_PARAMETERS; 197 198 va = reg_pair_to_64(params[1].value.a, params[1].value.b); 199 sz = ROUNDUP(params[0].value.a, SMALL_PAGE_SIZE); 200 201 res = vm_get_flags(&utc->uctx, va, sz, &vm_flags); 202 if (res) 203 return res; 204 if (vm_flags & VM_FLAG_PERMANENT) 205 return TEE_ERROR_ACCESS_DENIED; 206 207 return vm_unmap(&to_user_ta_ctx(s->ctx)->uctx, va, sz); 208 } 209 210 static void ta_bin_close(void *ptr) 211 { 212 struct bin_handle *binh = ptr; 213 214 if (binh) { 215 if (binh->op && binh->h) 216 binh->op->close(binh->h); 217 file_put(binh->f); 218 } 219 free(binh); 220 } 221 222 static TEE_Result system_open_ta_binary(struct system_ctx *ctx, 223 uint32_t param_types, 224 TEE_Param params[TEE_NUM_PARAMS]) 225 { 226 TEE_Result res = TEE_SUCCESS; 227 struct bin_handle *binh = NULL; 228 int h = 0; 229 TEE_UUID *uuid = NULL; 230 uint8_t tag[FILE_TAG_SIZE] = { 0 }; 231 unsigned int tag_len = sizeof(tag); 232 uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INPUT, 233 TEE_PARAM_TYPE_VALUE_OUTPUT, 234 TEE_PARAM_TYPE_NONE, 235 TEE_PARAM_TYPE_NONE); 236 237 if (exp_pt != param_types) 238 return TEE_ERROR_BAD_PARAMETERS; 239 if (params[0].memref.size != sizeof(*uuid)) 240 return TEE_ERROR_BAD_PARAMETERS; 241 242 uuid = params[0].memref.buffer; 243 244 binh = calloc(1, sizeof(*binh)); 245 if (!binh) 246 return TEE_ERROR_OUT_OF_MEMORY; 247 248 SCATTERED_ARRAY_FOREACH(binh->op, ta_stores, struct user_ta_store_ops) { 249 DMSG("Lookup user TA ELF %pUl (%s)", 250 (void *)uuid, binh->op->description); 251 252 res = binh->op->open(uuid, &binh->h); 253 DMSG("res=0x%x", res); 254 if (res != TEE_ERROR_ITEM_NOT_FOUND && 255 res != TEE_ERROR_STORAGE_NOT_AVAILABLE) 256 break; 257 } 258 if (res) 259 goto err; 260 261 res = binh->op->get_size(binh->h, &binh->size_bytes); 262 if (res) 263 goto err; 264 res = binh->op->get_tag(binh->h, tag, &tag_len); 265 if (res) 266 goto err; 267 binh->f = file_get_by_tag(tag, tag_len); 268 if (!binh->f) 269 goto err_oom; 270 271 h = handle_get(&ctx->db, binh); 272 if (h < 0) 273 goto err_oom; 274 275 return TEE_SUCCESS; 276 err_oom: 277 res = TEE_ERROR_OUT_OF_MEMORY; 278 err: 279 ta_bin_close(binh); 280 return res; 281 } 282 283 static TEE_Result system_close_ta_binary(struct system_ctx *ctx, 284 uint32_t param_types, 285 TEE_Param params[TEE_NUM_PARAMS]) 286 { 287 TEE_Result res = TEE_SUCCESS; 288 struct bin_handle *binh = NULL; 289 uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT, 290 TEE_PARAM_TYPE_NONE, 291 TEE_PARAM_TYPE_NONE, 292 TEE_PARAM_TYPE_NONE); 293 294 if (exp_pt != param_types) 295 return TEE_ERROR_BAD_PARAMETERS; 296 297 if (params[0].value.b) 298 return TEE_ERROR_BAD_PARAMETERS; 299 300 binh = handle_put(&ctx->db, params[0].value.a); 301 if (!binh) 302 return TEE_ERROR_BAD_PARAMETERS; 303 304 if (binh->offs_bytes < binh->size_bytes) 305 res = binh->op->read(binh->h, NULL, 306 binh->size_bytes - binh->offs_bytes); 307 308 ta_bin_close(binh); 309 return res; 310 } 311 312 static TEE_Result binh_copy_to(struct bin_handle *binh, vaddr_t va, 313 size_t offs_bytes, size_t num_bytes) 314 { 315 TEE_Result res = TEE_SUCCESS; 316 size_t l = num_bytes; 317 318 if (offs_bytes < binh->offs_bytes) 319 return TEE_ERROR_BAD_STATE; 320 if (offs_bytes > binh->offs_bytes) { 321 res = binh->op->read(binh->h, NULL, 322 offs_bytes - binh->offs_bytes); 323 if (res) 324 return res; 325 binh->offs_bytes = offs_bytes; 326 } 327 328 if (binh->offs_bytes + l > binh->size_bytes) { 329 size_t rb = binh->size_bytes - binh->offs_bytes; 330 331 res = binh->op->read(binh->h, (void *)va, rb); 332 if (res) 333 return res; 334 memset((uint8_t *)va + rb, 0, l - rb); 335 binh->offs_bytes = binh->size_bytes; 336 } else { 337 res = binh->op->read(binh->h, (void *)va, l); 338 if (res) 339 return res; 340 binh->offs_bytes += l; 341 } 342 343 return TEE_SUCCESS; 344 } 345 346 static TEE_Result system_map_ta_binary(struct system_ctx *ctx, 347 struct tee_ta_session *s, 348 uint32_t param_types, 349 TEE_Param params[TEE_NUM_PARAMS]) 350 { 351 const uint32_t accept_flags = PTA_SYSTEM_MAP_FLAG_SHAREABLE | 352 PTA_SYSTEM_MAP_FLAG_WRITEABLE | 353 PTA_SYSTEM_MAP_FLAG_EXECUTABLE; 354 uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT, 355 TEE_PARAM_TYPE_VALUE_INPUT, 356 TEE_PARAM_TYPE_VALUE_INOUT, 357 TEE_PARAM_TYPE_VALUE_INPUT); 358 struct user_ta_ctx *utc = to_user_ta_ctx(s->ctx); 359 struct bin_handle *binh = NULL; 360 TEE_Result res = TEE_SUCCESS; 361 struct file_slice *fs = NULL; 362 bool file_is_locked = false; 363 struct mobj *mobj = NULL; 364 uint32_t offs_bytes = 0; 365 uint32_t offs_pages = 0; 366 uint32_t num_bytes = 0; 367 uint32_t pad_begin = 0; 368 uint32_t pad_end = 0; 369 size_t num_pages = 0; 370 uint32_t flags = 0; 371 uint32_t prot = 0; 372 vaddr_t va = 0; 373 374 if (exp_pt != param_types) 375 return TEE_ERROR_BAD_PARAMETERS; 376 377 binh = handle_lookup(&ctx->db, params[0].value.a); 378 if (!binh) 379 return TEE_ERROR_BAD_PARAMETERS; 380 flags = params[0].value.b; 381 offs_bytes = params[1].value.a; 382 num_bytes = params[1].value.b; 383 va = reg_pair_to_64(params[2].value.a, params[2].value.b); 384 pad_begin = params[3].value.a; 385 pad_end = params[3].value.b; 386 387 if ((flags & accept_flags) != flags) 388 return TEE_ERROR_BAD_PARAMETERS; 389 390 if ((flags & PTA_SYSTEM_MAP_FLAG_SHAREABLE) && 391 (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE)) 392 return TEE_ERROR_BAD_PARAMETERS; 393 394 if ((flags & PTA_SYSTEM_MAP_FLAG_EXECUTABLE) && 395 (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE)) 396 return TEE_ERROR_BAD_PARAMETERS; 397 398 if (offs_bytes & SMALL_PAGE_MASK) 399 return TEE_ERROR_BAD_PARAMETERS; 400 401 prot = TEE_MATTR_UR | TEE_MATTR_PR; 402 if (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE) 403 prot |= TEE_MATTR_UW | TEE_MATTR_PW; 404 if (flags & PTA_SYSTEM_MAP_FLAG_EXECUTABLE) 405 prot |= TEE_MATTR_UX; 406 407 offs_pages = offs_bytes >> SMALL_PAGE_SHIFT; 408 num_pages = ROUNDUP(num_bytes, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE; 409 410 if (!file_trylock(binh->f)) { 411 /* 412 * Before we can block on the file lock we must make all 413 * our page tables available for reclaiming in order to 414 * avoid a dead-lock with the other thread (which already 415 * is holding the file lock) mapping lots of memory below. 416 */ 417 tee_mmu_set_ctx(NULL); 418 file_lock(binh->f); 419 tee_mmu_set_ctx(s->ctx); 420 } 421 file_is_locked = true; 422 fs = file_find_slice(binh->f, offs_pages); 423 if (fs) { 424 /* If there's registered slice it has to match */ 425 if (fs->page_offset != offs_pages || 426 num_pages > fs->fobj->num_pages) { 427 res = TEE_ERROR_BAD_PARAMETERS; 428 goto err; 429 } 430 431 /* If there's a slice we must be mapping shareable */ 432 if (!(flags & PTA_SYSTEM_MAP_FLAG_SHAREABLE)) { 433 res = TEE_ERROR_BAD_PARAMETERS; 434 goto err; 435 } 436 437 mobj = mobj_with_fobj_alloc(fs->fobj, binh->f); 438 if (!mobj) { 439 res = TEE_ERROR_OUT_OF_MEMORY; 440 goto err; 441 } 442 res = vm_map_pad(&utc->uctx, &va, num_pages * SMALL_PAGE_SIZE, 443 prot, VM_FLAG_READONLY, 444 mobj, 0, pad_begin, pad_end); 445 mobj_put(mobj); 446 if (res) 447 goto err; 448 } else { 449 struct fobj *f = fobj_ta_mem_alloc(num_pages); 450 struct file *file = NULL; 451 uint32_t vm_flags = 0; 452 453 if (!f) { 454 res = TEE_ERROR_OUT_OF_MEMORY; 455 goto err; 456 } 457 if (!(flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE)) { 458 file = binh->f; 459 vm_flags |= VM_FLAG_READONLY; 460 } 461 462 mobj = mobj_with_fobj_alloc(f, file); 463 fobj_put(f); 464 if (!mobj) { 465 res = TEE_ERROR_OUT_OF_MEMORY; 466 goto err; 467 } 468 res = vm_map_pad(&utc->uctx, &va, num_pages * SMALL_PAGE_SIZE, 469 TEE_MATTR_PRW, vm_flags, mobj, 0, 470 pad_begin, pad_end); 471 mobj_put(mobj); 472 if (res) 473 goto err; 474 res = binh_copy_to(binh, va, offs_bytes, num_bytes); 475 if (res) 476 goto err_unmap_va; 477 res = vm_set_prot(&utc->uctx, va, num_pages * SMALL_PAGE_SIZE, 478 prot); 479 if (res) 480 goto err_unmap_va; 481 482 /* 483 * The context currently is active set it again to update 484 * the mapping. 485 */ 486 tee_mmu_set_ctx(s->ctx); 487 488 if (!(flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE)) { 489 res = file_add_slice(binh->f, f, offs_pages); 490 if (res) 491 goto err_unmap_va; 492 } 493 } 494 495 file_unlock(binh->f); 496 497 reg_pair_from_64(va, ¶ms[2].value.a, ¶ms[2].value.b); 498 return TEE_SUCCESS; 499 500 err_unmap_va: 501 if (vm_unmap(&utc->uctx, va, num_pages * SMALL_PAGE_SIZE)) 502 panic(); 503 504 /* 505 * The context currently is active set it again to update 506 * the mapping. 507 */ 508 tee_mmu_set_ctx(s->ctx); 509 510 err: 511 if (file_is_locked) 512 file_unlock(binh->f); 513 514 return res; 515 } 516 517 static TEE_Result system_copy_from_ta_binary(struct system_ctx *ctx, 518 uint32_t param_types, 519 TEE_Param params[TEE_NUM_PARAMS]) 520 { 521 struct bin_handle *binh = NULL; 522 uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT, 523 TEE_PARAM_TYPE_MEMREF_OUTPUT, 524 TEE_PARAM_TYPE_NONE, 525 TEE_PARAM_TYPE_NONE); 526 527 if (exp_pt != param_types) 528 return TEE_ERROR_BAD_PARAMETERS; 529 530 binh = handle_lookup(&ctx->db, params[0].value.a); 531 if (!binh) 532 return TEE_ERROR_BAD_PARAMETERS; 533 534 return binh_copy_to(binh, (vaddr_t)params[1].memref.buffer, 535 params[0].value.b, params[1].memref.size); 536 } 537 538 static TEE_Result system_set_prot(struct tee_ta_session *s, 539 uint32_t param_types, 540 TEE_Param params[TEE_NUM_PARAMS]) 541 { 542 const uint32_t accept_flags = PTA_SYSTEM_MAP_FLAG_WRITEABLE | 543 PTA_SYSTEM_MAP_FLAG_EXECUTABLE; 544 uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT, 545 TEE_PARAM_TYPE_VALUE_INPUT, 546 TEE_PARAM_TYPE_NONE, 547 TEE_PARAM_TYPE_NONE); 548 struct user_ta_ctx *utc = to_user_ta_ctx(s->ctx); 549 uint32_t prot = TEE_MATTR_UR | TEE_MATTR_PR; 550 TEE_Result res = TEE_SUCCESS; 551 uint32_t vm_flags = 0; 552 uint32_t flags = 0; 553 vaddr_t va = 0; 554 size_t sz = 0; 555 556 if (exp_pt != param_types) 557 return TEE_ERROR_BAD_PARAMETERS; 558 559 flags = params[0].value.b; 560 561 if ((flags & accept_flags) != flags) 562 return TEE_ERROR_BAD_PARAMETERS; 563 if (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE) 564 prot |= TEE_MATTR_UW | TEE_MATTR_PW; 565 if (flags & PTA_SYSTEM_MAP_FLAG_EXECUTABLE) 566 prot |= TEE_MATTR_UX; 567 568 va = reg_pair_to_64(params[1].value.a, params[1].value.b), 569 sz = ROUNDUP(params[0].value.a, SMALL_PAGE_SIZE); 570 571 res = vm_get_flags(&utc->uctx, va, sz, &vm_flags); 572 if (res) 573 return res; 574 if (vm_flags & VM_FLAG_PERMANENT) 575 return TEE_ERROR_ACCESS_DENIED; 576 577 /* 578 * If the segment is a mapping of a part of a file (vm_flags & 579 * VM_FLAG_READONLY) it cannot be made writeable as all mapped 580 * files are mapped read-only. 581 */ 582 if ((vm_flags & VM_FLAG_READONLY) && 583 (prot & (TEE_MATTR_UW | TEE_MATTR_PW))) 584 return TEE_ERROR_ACCESS_DENIED; 585 586 return vm_set_prot(&utc->uctx, va, sz, prot); 587 } 588 589 static TEE_Result system_remap(struct tee_ta_session *s, uint32_t param_types, 590 TEE_Param params[TEE_NUM_PARAMS]) 591 { 592 uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT, 593 TEE_PARAM_TYPE_VALUE_INPUT, 594 TEE_PARAM_TYPE_VALUE_INOUT, 595 TEE_PARAM_TYPE_VALUE_INPUT); 596 struct user_ta_ctx *utc = to_user_ta_ctx(s->ctx); 597 TEE_Result res = TEE_SUCCESS; 598 uint32_t num_bytes = 0; 599 uint32_t pad_begin = 0; 600 uint32_t vm_flags = 0; 601 uint32_t pad_end = 0; 602 vaddr_t old_va = 0; 603 vaddr_t new_va = 0; 604 605 if (exp_pt != param_types) 606 return TEE_ERROR_BAD_PARAMETERS; 607 608 num_bytes = params[0].value.a; 609 old_va = reg_pair_to_64(params[1].value.a, params[1].value.b); 610 new_va = reg_pair_to_64(params[2].value.a, params[2].value.b); 611 pad_begin = params[3].value.a; 612 pad_end = params[3].value.b; 613 614 res = vm_get_flags(&utc->uctx, old_va, num_bytes, &vm_flags); 615 if (res) 616 return res; 617 if (vm_flags & VM_FLAG_PERMANENT) 618 return TEE_ERROR_ACCESS_DENIED; 619 620 res = vm_remap(&utc->uctx, &new_va, old_va, num_bytes, pad_begin, 621 pad_end); 622 if (!res) 623 reg_pair_from_64(new_va, ¶ms[2].value.a, 624 ¶ms[2].value.b); 625 626 return res; 627 } 628 629 /* ldelf has the same architecture/register width as the kernel */ 630 #ifdef ARM32 631 static const bool is_arm32 = true; 632 #else 633 static const bool is_arm32; 634 #endif 635 636 static TEE_Result call_ldelf_dlopen(struct user_ta_ctx *utc, TEE_UUID *uuid, 637 uint32_t flags) 638 { 639 uaddr_t usr_stack = utc->ldelf_stack_ptr; 640 TEE_Result res = TEE_ERROR_GENERIC; 641 struct dl_entry_arg *arg = NULL; 642 uint32_t panic_code = 0; 643 uint32_t panicked = 0; 644 645 assert(uuid); 646 647 usr_stack -= ROUNDUP(sizeof(*arg), STACK_ALIGNMENT); 648 arg = (struct dl_entry_arg *)usr_stack; 649 650 res = tee_mmu_check_access_rights(&utc->uctx, 651 TEE_MEMORY_ACCESS_READ | 652 TEE_MEMORY_ACCESS_WRITE | 653 TEE_MEMORY_ACCESS_ANY_OWNER, 654 (uaddr_t)arg, sizeof(*arg)); 655 if (res) { 656 EMSG("ldelf stack is inaccessible!"); 657 return res; 658 } 659 660 memset(arg, 0, sizeof(*arg)); 661 arg->cmd = LDELF_DL_ENTRY_DLOPEN; 662 arg->dlopen.uuid = *uuid; 663 arg->dlopen.flags = flags; 664 665 res = thread_enter_user_mode((vaddr_t)arg, 0, 0, 0, 666 usr_stack, utc->dl_entry_func, 667 is_arm32, &panicked, &panic_code); 668 if (panicked) { 669 EMSG("ldelf dl_entry function panicked"); 670 abort_print_current_ta(); 671 res = TEE_ERROR_TARGET_DEAD; 672 } 673 if (!res) 674 res = arg->ret; 675 676 return res; 677 } 678 679 static TEE_Result call_ldelf_dlsym(struct user_ta_ctx *utc, TEE_UUID *uuid, 680 const char *sym, size_t maxlen, vaddr_t *val) 681 { 682 uaddr_t usr_stack = utc->ldelf_stack_ptr; 683 TEE_Result res = TEE_ERROR_GENERIC; 684 struct dl_entry_arg *arg = NULL; 685 uint32_t panic_code = 0; 686 uint32_t panicked = 0; 687 size_t len = strnlen(sym, maxlen); 688 689 if (len == maxlen) 690 return TEE_ERROR_BAD_PARAMETERS; 691 692 usr_stack -= ROUNDUP(sizeof(*arg) + len + 1, STACK_ALIGNMENT); 693 arg = (struct dl_entry_arg *)usr_stack; 694 695 res = tee_mmu_check_access_rights(&utc->uctx, 696 TEE_MEMORY_ACCESS_READ | 697 TEE_MEMORY_ACCESS_WRITE | 698 TEE_MEMORY_ACCESS_ANY_OWNER, 699 (uaddr_t)arg, sizeof(*arg) + len + 1); 700 if (res) { 701 EMSG("ldelf stack is inaccessible!"); 702 return res; 703 } 704 705 memset(arg, 0, sizeof(*arg)); 706 arg->cmd = LDELF_DL_ENTRY_DLSYM; 707 arg->dlsym.uuid = *uuid; 708 memcpy(arg->dlsym.symbol, sym, len); 709 arg->dlsym.symbol[len] = '\0'; 710 711 res = thread_enter_user_mode((vaddr_t)arg, 0, 0, 0, 712 usr_stack, utc->dl_entry_func, 713 is_arm32, &panicked, &panic_code); 714 if (panicked) { 715 EMSG("ldelf dl_entry function panicked"); 716 abort_print_current_ta(); 717 res = TEE_ERROR_TARGET_DEAD; 718 } 719 if (!res) { 720 res = arg->ret; 721 if (!res) 722 *val = arg->dlsym.val; 723 } 724 725 return res; 726 } 727 728 static TEE_Result system_dlopen(struct tee_ta_session *cs, uint32_t param_types, 729 TEE_Param params[TEE_NUM_PARAMS]) 730 { 731 uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INPUT, 732 TEE_PARAM_TYPE_VALUE_INPUT, 733 TEE_PARAM_TYPE_NONE, 734 TEE_PARAM_TYPE_NONE); 735 TEE_Result res = TEE_ERROR_GENERIC; 736 struct tee_ta_session *s = NULL; 737 struct user_ta_ctx *utc = NULL; 738 TEE_UUID *uuid = NULL; 739 uint32_t flags = 0; 740 741 if (exp_pt != param_types) 742 return TEE_ERROR_BAD_PARAMETERS; 743 744 uuid = params[0].memref.buffer; 745 if (!uuid || params[0].memref.size != sizeof(*uuid)) 746 return TEE_ERROR_BAD_PARAMETERS; 747 748 flags = params[1].value.a; 749 750 utc = to_user_ta_ctx(cs->ctx); 751 752 s = tee_ta_pop_current_session(); 753 res = call_ldelf_dlopen(utc, uuid, flags); 754 tee_ta_push_current_session(s); 755 756 return res; 757 } 758 759 static TEE_Result system_dlsym(struct tee_ta_session *cs, uint32_t param_types, 760 TEE_Param params[TEE_NUM_PARAMS]) 761 { 762 uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INPUT, 763 TEE_PARAM_TYPE_MEMREF_INPUT, 764 TEE_PARAM_TYPE_VALUE_OUTPUT, 765 TEE_PARAM_TYPE_NONE); 766 TEE_Result res = TEE_ERROR_GENERIC; 767 struct tee_ta_session *s = NULL; 768 struct user_ta_ctx *utc = NULL; 769 const char *sym = NULL; 770 TEE_UUID *uuid = NULL; 771 size_t maxlen = 0; 772 vaddr_t va = 0; 773 774 if (exp_pt != param_types) 775 return TEE_ERROR_BAD_PARAMETERS; 776 777 uuid = params[0].memref.buffer; 778 if (uuid && params[0].memref.size != sizeof(*uuid)) 779 return TEE_ERROR_BAD_PARAMETERS; 780 781 sym = params[1].memref.buffer; 782 if (!sym) 783 return TEE_ERROR_BAD_PARAMETERS; 784 maxlen = params[1].memref.size; 785 786 utc = to_user_ta_ctx(cs->ctx); 787 788 s = tee_ta_pop_current_session(); 789 res = call_ldelf_dlsym(utc, uuid, sym, maxlen, &va); 790 tee_ta_push_current_session(s); 791 792 if (!res) 793 reg_pair_from_64(va, ¶ms[2].value.a, ¶ms[2].value.b); 794 795 return res; 796 } 797 798 static TEE_Result open_session(uint32_t param_types __unused, 799 TEE_Param params[TEE_NUM_PARAMS] __unused, 800 void **sess_ctx) 801 { 802 struct tee_ta_session *s = NULL; 803 struct system_ctx *ctx = NULL; 804 805 /* Check that we're called from a user TA */ 806 s = tee_ta_get_calling_session(); 807 if (!s) 808 return TEE_ERROR_ACCESS_DENIED; 809 if (!is_user_ta_ctx(s->ctx)) 810 return TEE_ERROR_ACCESS_DENIED; 811 812 ctx = calloc(1, sizeof(*ctx)); 813 if (!ctx) 814 return TEE_ERROR_OUT_OF_MEMORY; 815 816 *sess_ctx = ctx; 817 818 return TEE_SUCCESS; 819 } 820 821 static void close_session(void *sess_ctx) 822 { 823 struct system_ctx *ctx = sess_ctx; 824 825 handle_db_destroy(&ctx->db, ta_bin_close); 826 free(ctx); 827 } 828 829 static TEE_Result invoke_command(void *sess_ctx, uint32_t cmd_id, 830 uint32_t param_types, 831 TEE_Param params[TEE_NUM_PARAMS]) 832 { 833 struct tee_ta_session *s = tee_ta_get_calling_session(); 834 835 switch (cmd_id) { 836 case PTA_SYSTEM_ADD_RNG_ENTROPY: 837 return system_rng_reseed(s, param_types, params); 838 case PTA_SYSTEM_DERIVE_TA_UNIQUE_KEY: 839 return system_derive_ta_unique_key(s, param_types, params); 840 case PTA_SYSTEM_MAP_ZI: 841 return system_map_zi(s, param_types, params); 842 case PTA_SYSTEM_UNMAP: 843 return system_unmap(s, param_types, params); 844 case PTA_SYSTEM_OPEN_TA_BINARY: 845 return system_open_ta_binary(sess_ctx, param_types, params); 846 case PTA_SYSTEM_CLOSE_TA_BINARY: 847 return system_close_ta_binary(sess_ctx, param_types, params); 848 case PTA_SYSTEM_MAP_TA_BINARY: 849 return system_map_ta_binary(sess_ctx, s, param_types, params); 850 case PTA_SYSTEM_COPY_FROM_TA_BINARY: 851 return system_copy_from_ta_binary(sess_ctx, param_types, 852 params); 853 case PTA_SYSTEM_SET_PROT: 854 return system_set_prot(s, param_types, params); 855 case PTA_SYSTEM_REMAP: 856 return system_remap(s, param_types, params); 857 case PTA_SYSTEM_DLOPEN: 858 return system_dlopen(s, param_types, params); 859 case PTA_SYSTEM_DLSYM: 860 return system_dlsym(s, param_types, params); 861 default: 862 break; 863 } 864 865 return TEE_ERROR_NOT_IMPLEMENTED; 866 } 867 868 pseudo_ta_register(.uuid = PTA_SYSTEM_UUID, .name = "system.pta", 869 .flags = PTA_DEFAULT_FLAGS | TA_FLAG_CONCURRENT, 870 .open_session_entry_point = open_session, 871 .close_session_entry_point = close_session, 872 .invoke_command_entry_point = invoke_command); 873