1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2018-2019, Linaro Limited 4 */ 5 6 #include <assert.h> 7 #include <crypto/crypto.h> 8 #include <kernel/handle.h> 9 #include <kernel/huk_subkey.h> 10 #include <kernel/misc.h> 11 #include <kernel/msg_param.h> 12 #include <kernel/pseudo_ta.h> 13 #include <kernel/tpm.h> 14 #include <kernel/user_ta.h> 15 #include <kernel/user_ta_store.h> 16 #include <ldelf.h> 17 #include <mm/file.h> 18 #include <mm/fobj.h> 19 #include <mm/tee_mmu.h> 20 #include <pta_system.h> 21 #include <stdlib_ext.h> 22 #include <stdlib.h> 23 #include <string.h> 24 #include <tee_api_defines_extensions.h> 25 #include <tee_api_defines.h> 26 #include <util.h> 27 28 struct bin_handle { 29 const struct user_ta_store_ops *op; 30 struct user_ta_store_handle *h; 31 struct file *f; 32 size_t offs_bytes; 33 size_t size_bytes; 34 }; 35 36 struct system_ctx { 37 struct handle_db db; 38 const struct user_ta_store_ops *store_op; 39 }; 40 41 static unsigned int system_pnum; 42 43 static TEE_Result system_rng_reseed(struct tee_ta_session *s __unused, 44 uint32_t param_types, 45 TEE_Param params[TEE_NUM_PARAMS]) 46 { 47 size_t entropy_sz; 48 uint8_t *entropy_input; 49 uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INPUT, 50 TEE_PARAM_TYPE_NONE, 51 TEE_PARAM_TYPE_NONE, 52 TEE_PARAM_TYPE_NONE); 53 54 if (exp_pt != param_types) 55 return TEE_ERROR_BAD_PARAMETERS; 56 entropy_input = params[0].memref.buffer; 57 entropy_sz = params[0].memref.size; 58 59 if (!entropy_sz || !entropy_input) 60 return TEE_ERROR_BAD_PARAMETERS; 61 62 crypto_rng_add_event(CRYPTO_RNG_SRC_NONSECURE, &system_pnum, 63 entropy_input, entropy_sz); 64 return TEE_SUCCESS; 65 } 66 67 static TEE_Result system_derive_ta_unique_key(struct tee_ta_session *s, 68 uint32_t param_types, 69 TEE_Param params[TEE_NUM_PARAMS]) 70 { 71 size_t data_len = sizeof(TEE_UUID); 72 TEE_Result res = TEE_ERROR_GENERIC; 73 uint8_t *data = NULL; 74 uint32_t access_flags = 0; 75 uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INPUT, 76 TEE_PARAM_TYPE_MEMREF_OUTPUT, 77 TEE_PARAM_TYPE_NONE, 78 TEE_PARAM_TYPE_NONE); 79 struct user_ta_ctx *utc = NULL; 80 81 if (exp_pt != param_types) 82 return TEE_ERROR_BAD_PARAMETERS; 83 84 if (params[0].memref.size > TA_DERIVED_EXTRA_DATA_MAX_SIZE || 85 params[1].memref.size < TA_DERIVED_KEY_MIN_SIZE || 86 params[1].memref.size > TA_DERIVED_KEY_MAX_SIZE) 87 return TEE_ERROR_BAD_PARAMETERS; 88 89 utc = to_user_ta_ctx(s->ctx); 90 91 /* 92 * The derived key shall not end up in non-secure memory by 93 * mistake. 94 * 95 * Note that we're allowing shared memory as long as it's 96 * secure. This is needed because a TA always uses shared memory 97 * when communicating with another TA. 98 */ 99 access_flags = TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER | 100 TEE_MEMORY_ACCESS_SECURE; 101 res = tee_mmu_check_access_rights(&utc->uctx, access_flags, 102 (uaddr_t)params[1].memref.buffer, 103 params[1].memref.size); 104 if (res != TEE_SUCCESS) 105 return TEE_ERROR_SECURITY; 106 107 /* Take extra data into account. */ 108 if (ADD_OVERFLOW(data_len, params[0].memref.size, &data_len)) 109 return TEE_ERROR_SECURITY; 110 111 data = calloc(data_len, 1); 112 if (!data) 113 return TEE_ERROR_OUT_OF_MEMORY; 114 115 memcpy(data, &s->ctx->uuid, sizeof(TEE_UUID)); 116 117 /* Append the user provided data */ 118 memcpy(data + sizeof(TEE_UUID), params[0].memref.buffer, 119 params[0].memref.size); 120 121 res = huk_subkey_derive(HUK_SUBKEY_UNIQUE_TA, data, data_len, 122 params[1].memref.buffer, 123 params[1].memref.size); 124 free_wipe(data); 125 126 return res; 127 } 128 129 static TEE_Result system_map_zi(struct tee_ta_session *s, uint32_t param_types, 130 TEE_Param params[TEE_NUM_PARAMS]) 131 { 132 uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT, 133 TEE_PARAM_TYPE_VALUE_INOUT, 134 TEE_PARAM_TYPE_VALUE_INPUT, 135 TEE_PARAM_TYPE_NONE); 136 struct user_ta_ctx *utc = to_user_ta_ctx(s->ctx); 137 uint32_t prot = TEE_MATTR_URW | TEE_MATTR_PRW; 138 TEE_Result res = TEE_ERROR_GENERIC; 139 struct mobj *mobj = NULL; 140 uint32_t pad_begin = 0; 141 uint32_t vm_flags = 0; 142 struct fobj *f = NULL; 143 uint32_t pad_end = 0; 144 size_t num_bytes = 0; 145 vaddr_t va = 0; 146 147 if (exp_pt != param_types) 148 return TEE_ERROR_BAD_PARAMETERS; 149 if (params[0].value.b & ~PTA_SYSTEM_MAP_FLAG_SHAREABLE) 150 return TEE_ERROR_BAD_PARAMETERS; 151 152 if (params[0].value.b & PTA_SYSTEM_MAP_FLAG_SHAREABLE) 153 vm_flags |= VM_FLAG_SHAREABLE; 154 155 num_bytes = params[0].value.a; 156 va = reg_pair_to_64(params[1].value.a, params[1].value.b); 157 pad_begin = params[2].value.a; 158 pad_end = params[2].value.b; 159 160 f = fobj_ta_mem_alloc(ROUNDUP_DIV(num_bytes, SMALL_PAGE_SIZE)); 161 if (!f) 162 return TEE_ERROR_OUT_OF_MEMORY; 163 mobj = mobj_with_fobj_alloc(f, NULL); 164 fobj_put(f); 165 if (!mobj) 166 return TEE_ERROR_OUT_OF_MEMORY; 167 res = vm_map_pad(&utc->uctx, &va, num_bytes, prot, vm_flags, 168 mobj, 0, pad_begin, pad_end); 169 mobj_put(mobj); 170 if (!res) 171 reg_pair_from_64(va, ¶ms[1].value.a, ¶ms[1].value.b); 172 173 return res; 174 } 175 176 static TEE_Result system_unmap(struct tee_ta_session *s, uint32_t param_types, 177 TEE_Param params[TEE_NUM_PARAMS]) 178 { 179 uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT, 180 TEE_PARAM_TYPE_VALUE_INPUT, 181 TEE_PARAM_TYPE_NONE, 182 TEE_PARAM_TYPE_NONE); 183 struct user_ta_ctx *utc = to_user_ta_ctx(s->ctx); 184 TEE_Result res = TEE_SUCCESS; 185 uint32_t vm_flags = 0; 186 vaddr_t end_va = 0; 187 vaddr_t va = 0; 188 size_t sz = 0; 189 190 if (exp_pt != param_types) 191 return TEE_ERROR_BAD_PARAMETERS; 192 193 if (params[0].value.b) 194 return TEE_ERROR_BAD_PARAMETERS; 195 196 va = reg_pair_to_64(params[1].value.a, params[1].value.b); 197 sz = ROUNDUP(params[0].value.a, SMALL_PAGE_SIZE); 198 199 /* 200 * The vm_get_flags() and vm_unmap() are supposed to detect or 201 * handle overflow directly or indirectly. However, this function 202 * an API function so an extra guard here is in order. If nothing 203 * else to make it easier to review the code. 204 */ 205 if (ADD_OVERFLOW(va, sz, &end_va)) 206 return TEE_ERROR_BAD_PARAMETERS; 207 208 res = vm_get_flags(&utc->uctx, va, sz, &vm_flags); 209 if (res) 210 return res; 211 if (vm_flags & VM_FLAG_PERMANENT) 212 return TEE_ERROR_ACCESS_DENIED; 213 214 return vm_unmap(&to_user_ta_ctx(s->ctx)->uctx, va, sz); 215 } 216 217 static void ta_bin_close(void *ptr) 218 { 219 struct bin_handle *binh = ptr; 220 221 if (binh) { 222 if (binh->op && binh->h) 223 binh->op->close(binh->h); 224 file_put(binh->f); 225 } 226 free(binh); 227 } 228 229 static TEE_Result system_open_ta_binary(struct system_ctx *ctx, 230 uint32_t param_types, 231 TEE_Param params[TEE_NUM_PARAMS]) 232 { 233 TEE_Result res = TEE_SUCCESS; 234 struct bin_handle *binh = NULL; 235 int h = 0; 236 TEE_UUID *uuid = NULL; 237 uint8_t tag[FILE_TAG_SIZE] = { 0 }; 238 unsigned int tag_len = sizeof(tag); 239 uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INPUT, 240 TEE_PARAM_TYPE_VALUE_OUTPUT, 241 TEE_PARAM_TYPE_NONE, 242 TEE_PARAM_TYPE_NONE); 243 244 if (exp_pt != param_types) 245 return TEE_ERROR_BAD_PARAMETERS; 246 if (params[0].memref.size != sizeof(*uuid)) 247 return TEE_ERROR_BAD_PARAMETERS; 248 249 uuid = params[0].memref.buffer; 250 251 binh = calloc(1, sizeof(*binh)); 252 if (!binh) 253 return TEE_ERROR_OUT_OF_MEMORY; 254 255 SCATTERED_ARRAY_FOREACH(binh->op, ta_stores, struct user_ta_store_ops) { 256 DMSG("Lookup user TA ELF %pUl (%s)", 257 (void *)uuid, binh->op->description); 258 259 res = binh->op->open(uuid, &binh->h); 260 DMSG("res=0x%x", res); 261 if (res != TEE_ERROR_ITEM_NOT_FOUND && 262 res != TEE_ERROR_STORAGE_NOT_AVAILABLE) 263 break; 264 } 265 if (res) 266 goto err; 267 268 res = binh->op->get_size(binh->h, &binh->size_bytes); 269 if (res) 270 goto err; 271 res = binh->op->get_tag(binh->h, tag, &tag_len); 272 if (res) 273 goto err; 274 binh->f = file_get_by_tag(tag, tag_len); 275 if (!binh->f) 276 goto err_oom; 277 278 h = handle_get(&ctx->db, binh); 279 if (h < 0) 280 goto err_oom; 281 params[0].value.a = h; 282 283 return TEE_SUCCESS; 284 err_oom: 285 res = TEE_ERROR_OUT_OF_MEMORY; 286 err: 287 ta_bin_close(binh); 288 return res; 289 } 290 291 static TEE_Result system_close_ta_binary(struct system_ctx *ctx, 292 uint32_t param_types, 293 TEE_Param params[TEE_NUM_PARAMS]) 294 { 295 TEE_Result res = TEE_SUCCESS; 296 struct bin_handle *binh = NULL; 297 uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT, 298 TEE_PARAM_TYPE_NONE, 299 TEE_PARAM_TYPE_NONE, 300 TEE_PARAM_TYPE_NONE); 301 302 if (exp_pt != param_types) 303 return TEE_ERROR_BAD_PARAMETERS; 304 305 if (params[0].value.b) 306 return TEE_ERROR_BAD_PARAMETERS; 307 308 binh = handle_put(&ctx->db, params[0].value.a); 309 if (!binh) 310 return TEE_ERROR_BAD_PARAMETERS; 311 312 if (binh->offs_bytes < binh->size_bytes) 313 res = binh->op->read(binh->h, NULL, 314 binh->size_bytes - binh->offs_bytes); 315 316 ta_bin_close(binh); 317 return res; 318 } 319 320 static TEE_Result binh_copy_to(struct bin_handle *binh, vaddr_t va, 321 size_t offs_bytes, size_t num_bytes) 322 { 323 TEE_Result res = TEE_SUCCESS; 324 size_t next_offs = 0; 325 326 if (offs_bytes < binh->offs_bytes) 327 return TEE_ERROR_BAD_STATE; 328 329 if (ADD_OVERFLOW(offs_bytes, num_bytes, &next_offs)) 330 return TEE_ERROR_BAD_PARAMETERS; 331 332 if (offs_bytes > binh->offs_bytes) { 333 res = binh->op->read(binh->h, NULL, 334 offs_bytes - binh->offs_bytes); 335 if (res) 336 return res; 337 binh->offs_bytes = offs_bytes; 338 } 339 340 if (next_offs > binh->size_bytes) { 341 size_t rb = binh->size_bytes - binh->offs_bytes; 342 343 res = binh->op->read(binh->h, (void *)va, rb); 344 if (res) 345 return res; 346 memset((uint8_t *)va + rb, 0, num_bytes - rb); 347 binh->offs_bytes = binh->size_bytes; 348 } else { 349 res = binh->op->read(binh->h, (void *)va, num_bytes); 350 if (res) 351 return res; 352 binh->offs_bytes = next_offs; 353 } 354 355 return TEE_SUCCESS; 356 } 357 358 static TEE_Result system_map_ta_binary(struct system_ctx *ctx, 359 struct tee_ta_session *s, 360 uint32_t param_types, 361 TEE_Param params[TEE_NUM_PARAMS]) 362 { 363 const uint32_t accept_flags = PTA_SYSTEM_MAP_FLAG_SHAREABLE | 364 PTA_SYSTEM_MAP_FLAG_WRITEABLE | 365 PTA_SYSTEM_MAP_FLAG_EXECUTABLE; 366 uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT, 367 TEE_PARAM_TYPE_VALUE_INPUT, 368 TEE_PARAM_TYPE_VALUE_INOUT, 369 TEE_PARAM_TYPE_VALUE_INPUT); 370 struct user_ta_ctx *utc = to_user_ta_ctx(s->ctx); 371 struct bin_handle *binh = NULL; 372 uint32_t num_rounded_bytes = 0; 373 TEE_Result res = TEE_SUCCESS; 374 struct file_slice *fs = NULL; 375 bool file_is_locked = false; 376 struct mobj *mobj = NULL; 377 uint32_t offs_bytes = 0; 378 uint32_t offs_pages = 0; 379 uint32_t num_bytes = 0; 380 uint32_t pad_begin = 0; 381 uint32_t pad_end = 0; 382 size_t num_pages = 0; 383 uint32_t flags = 0; 384 uint32_t prot = 0; 385 vaddr_t va = 0; 386 387 if (exp_pt != param_types) 388 return TEE_ERROR_BAD_PARAMETERS; 389 390 binh = handle_lookup(&ctx->db, params[0].value.a); 391 if (!binh) 392 return TEE_ERROR_BAD_PARAMETERS; 393 flags = params[0].value.b; 394 offs_bytes = params[1].value.a; 395 num_bytes = params[1].value.b; 396 va = reg_pair_to_64(params[2].value.a, params[2].value.b); 397 pad_begin = params[3].value.a; 398 pad_end = params[3].value.b; 399 400 if ((flags & accept_flags) != flags) 401 return TEE_ERROR_BAD_PARAMETERS; 402 403 if ((flags & PTA_SYSTEM_MAP_FLAG_SHAREABLE) && 404 (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE)) 405 return TEE_ERROR_BAD_PARAMETERS; 406 407 if ((flags & PTA_SYSTEM_MAP_FLAG_EXECUTABLE) && 408 (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE)) 409 return TEE_ERROR_BAD_PARAMETERS; 410 411 if (offs_bytes & SMALL_PAGE_MASK) 412 return TEE_ERROR_BAD_PARAMETERS; 413 414 prot = TEE_MATTR_UR | TEE_MATTR_PR; 415 if (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE) 416 prot |= TEE_MATTR_UW | TEE_MATTR_PW; 417 if (flags & PTA_SYSTEM_MAP_FLAG_EXECUTABLE) 418 prot |= TEE_MATTR_UX; 419 420 offs_pages = offs_bytes >> SMALL_PAGE_SHIFT; 421 if (ROUNDUP_OVERFLOW(num_bytes, SMALL_PAGE_SIZE, &num_rounded_bytes)) 422 return TEE_ERROR_BAD_PARAMETERS; 423 num_pages = num_rounded_bytes / SMALL_PAGE_SIZE; 424 425 if (!file_trylock(binh->f)) { 426 /* 427 * Before we can block on the file lock we must make all 428 * our page tables available for reclaiming in order to 429 * avoid a dead-lock with the other thread (which already 430 * is holding the file lock) mapping lots of memory below. 431 */ 432 tee_mmu_set_ctx(NULL); 433 file_lock(binh->f); 434 tee_mmu_set_ctx(s->ctx); 435 } 436 file_is_locked = true; 437 fs = file_find_slice(binh->f, offs_pages); 438 if (fs) { 439 /* If there's registered slice it has to match */ 440 if (fs->page_offset != offs_pages || 441 num_pages > fs->fobj->num_pages) { 442 res = TEE_ERROR_BAD_PARAMETERS; 443 goto err; 444 } 445 446 /* If there's a slice we must be mapping shareable */ 447 if (!(flags & PTA_SYSTEM_MAP_FLAG_SHAREABLE)) { 448 res = TEE_ERROR_BAD_PARAMETERS; 449 goto err; 450 } 451 452 mobj = mobj_with_fobj_alloc(fs->fobj, binh->f); 453 if (!mobj) { 454 res = TEE_ERROR_OUT_OF_MEMORY; 455 goto err; 456 } 457 res = vm_map_pad(&utc->uctx, &va, num_rounded_bytes, 458 prot, VM_FLAG_READONLY, 459 mobj, 0, pad_begin, pad_end); 460 mobj_put(mobj); 461 if (res) 462 goto err; 463 } else { 464 struct fobj *f = fobj_ta_mem_alloc(num_pages); 465 struct file *file = NULL; 466 uint32_t vm_flags = 0; 467 468 if (!f) { 469 res = TEE_ERROR_OUT_OF_MEMORY; 470 goto err; 471 } 472 if (!(flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE)) { 473 file = binh->f; 474 vm_flags |= VM_FLAG_READONLY; 475 } 476 477 mobj = mobj_with_fobj_alloc(f, file); 478 fobj_put(f); 479 if (!mobj) { 480 res = TEE_ERROR_OUT_OF_MEMORY; 481 goto err; 482 } 483 res = vm_map_pad(&utc->uctx, &va, num_rounded_bytes, 484 TEE_MATTR_PRW, vm_flags, mobj, 0, 485 pad_begin, pad_end); 486 mobj_put(mobj); 487 if (res) 488 goto err; 489 res = binh_copy_to(binh, va, offs_bytes, num_bytes); 490 if (res) 491 goto err_unmap_va; 492 res = vm_set_prot(&utc->uctx, va, num_rounded_bytes, 493 prot); 494 if (res) 495 goto err_unmap_va; 496 497 /* 498 * The context currently is active set it again to update 499 * the mapping. 500 */ 501 tee_mmu_set_ctx(s->ctx); 502 503 if (!(flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE)) { 504 res = file_add_slice(binh->f, f, offs_pages); 505 if (res) 506 goto err_unmap_va; 507 } 508 } 509 510 file_unlock(binh->f); 511 512 reg_pair_from_64(va, ¶ms[2].value.a, ¶ms[2].value.b); 513 return TEE_SUCCESS; 514 515 err_unmap_va: 516 if (vm_unmap(&utc->uctx, va, num_rounded_bytes)) 517 panic(); 518 519 /* 520 * The context currently is active set it again to update 521 * the mapping. 522 */ 523 tee_mmu_set_ctx(s->ctx); 524 525 err: 526 if (file_is_locked) 527 file_unlock(binh->f); 528 529 return res; 530 } 531 532 static TEE_Result system_copy_from_ta_binary(struct system_ctx *ctx, 533 uint32_t param_types, 534 TEE_Param params[TEE_NUM_PARAMS]) 535 { 536 struct bin_handle *binh = NULL; 537 uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT, 538 TEE_PARAM_TYPE_MEMREF_OUTPUT, 539 TEE_PARAM_TYPE_NONE, 540 TEE_PARAM_TYPE_NONE); 541 542 if (exp_pt != param_types) 543 return TEE_ERROR_BAD_PARAMETERS; 544 545 binh = handle_lookup(&ctx->db, params[0].value.a); 546 if (!binh) 547 return TEE_ERROR_BAD_PARAMETERS; 548 549 return binh_copy_to(binh, (vaddr_t)params[1].memref.buffer, 550 params[0].value.b, params[1].memref.size); 551 } 552 553 static TEE_Result system_set_prot(struct tee_ta_session *s, 554 uint32_t param_types, 555 TEE_Param params[TEE_NUM_PARAMS]) 556 { 557 const uint32_t accept_flags = PTA_SYSTEM_MAP_FLAG_WRITEABLE | 558 PTA_SYSTEM_MAP_FLAG_EXECUTABLE; 559 uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT, 560 TEE_PARAM_TYPE_VALUE_INPUT, 561 TEE_PARAM_TYPE_NONE, 562 TEE_PARAM_TYPE_NONE); 563 struct user_ta_ctx *utc = to_user_ta_ctx(s->ctx); 564 uint32_t prot = TEE_MATTR_UR | TEE_MATTR_PR; 565 TEE_Result res = TEE_SUCCESS; 566 uint32_t vm_flags = 0; 567 uint32_t flags = 0; 568 vaddr_t end_va = 0; 569 vaddr_t va = 0; 570 size_t sz = 0; 571 572 if (exp_pt != param_types) 573 return TEE_ERROR_BAD_PARAMETERS; 574 575 flags = params[0].value.b; 576 577 if ((flags & accept_flags) != flags) 578 return TEE_ERROR_BAD_PARAMETERS; 579 if (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE) 580 prot |= TEE_MATTR_UW | TEE_MATTR_PW; 581 if (flags & PTA_SYSTEM_MAP_FLAG_EXECUTABLE) 582 prot |= TEE_MATTR_UX; 583 584 va = reg_pair_to_64(params[1].value.a, params[1].value.b); 585 sz = ROUNDUP(params[0].value.a, SMALL_PAGE_SIZE); 586 587 /* 588 * The vm_get_flags() and vm_set_prot() are supposed to detect or 589 * handle overflow directly or indirectly. However, this function 590 * an API function so an extra guard here is in order. If nothing 591 * else to make it easier to review the code. 592 */ 593 if (ADD_OVERFLOW(va, sz, &end_va)) 594 return TEE_ERROR_BAD_PARAMETERS; 595 596 res = vm_get_flags(&utc->uctx, va, sz, &vm_flags); 597 if (res) 598 return res; 599 if (vm_flags & VM_FLAG_PERMANENT) 600 return TEE_ERROR_ACCESS_DENIED; 601 602 /* 603 * If the segment is a mapping of a part of a file (vm_flags & 604 * VM_FLAG_READONLY) it cannot be made writeable as all mapped 605 * files are mapped read-only. 606 */ 607 if ((vm_flags & VM_FLAG_READONLY) && 608 (prot & (TEE_MATTR_UW | TEE_MATTR_PW))) 609 return TEE_ERROR_ACCESS_DENIED; 610 611 return vm_set_prot(&utc->uctx, va, sz, prot); 612 } 613 614 static TEE_Result system_remap(struct tee_ta_session *s, uint32_t param_types, 615 TEE_Param params[TEE_NUM_PARAMS]) 616 { 617 uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT, 618 TEE_PARAM_TYPE_VALUE_INPUT, 619 TEE_PARAM_TYPE_VALUE_INOUT, 620 TEE_PARAM_TYPE_VALUE_INPUT); 621 struct user_ta_ctx *utc = to_user_ta_ctx(s->ctx); 622 TEE_Result res = TEE_SUCCESS; 623 uint32_t num_bytes = 0; 624 uint32_t pad_begin = 0; 625 uint32_t vm_flags = 0; 626 uint32_t pad_end = 0; 627 vaddr_t old_va = 0; 628 vaddr_t new_va = 0; 629 630 if (exp_pt != param_types) 631 return TEE_ERROR_BAD_PARAMETERS; 632 633 num_bytes = params[0].value.a; 634 old_va = reg_pair_to_64(params[1].value.a, params[1].value.b); 635 new_va = reg_pair_to_64(params[2].value.a, params[2].value.b); 636 pad_begin = params[3].value.a; 637 pad_end = params[3].value.b; 638 639 res = vm_get_flags(&utc->uctx, old_va, num_bytes, &vm_flags); 640 if (res) 641 return res; 642 if (vm_flags & VM_FLAG_PERMANENT) 643 return TEE_ERROR_ACCESS_DENIED; 644 645 res = vm_remap(&utc->uctx, &new_va, old_va, num_bytes, pad_begin, 646 pad_end); 647 if (!res) 648 reg_pair_from_64(new_va, ¶ms[2].value.a, 649 ¶ms[2].value.b); 650 651 return res; 652 } 653 654 /* ldelf has the same architecture/register width as the kernel */ 655 #ifdef ARM32 656 static const bool is_arm32 = true; 657 #else 658 static const bool is_arm32; 659 #endif 660 661 static TEE_Result call_ldelf_dlopen(struct user_ta_ctx *utc, TEE_UUID *uuid, 662 uint32_t flags) 663 { 664 uaddr_t usr_stack = utc->ldelf_stack_ptr; 665 TEE_Result res = TEE_ERROR_GENERIC; 666 struct dl_entry_arg *arg = NULL; 667 uint32_t panic_code = 0; 668 uint32_t panicked = 0; 669 670 assert(uuid); 671 672 usr_stack -= ROUNDUP(sizeof(*arg), STACK_ALIGNMENT); 673 arg = (struct dl_entry_arg *)usr_stack; 674 675 res = tee_mmu_check_access_rights(&utc->uctx, 676 TEE_MEMORY_ACCESS_READ | 677 TEE_MEMORY_ACCESS_WRITE | 678 TEE_MEMORY_ACCESS_ANY_OWNER, 679 (uaddr_t)arg, sizeof(*arg)); 680 if (res) { 681 EMSG("ldelf stack is inaccessible!"); 682 return res; 683 } 684 685 memset(arg, 0, sizeof(*arg)); 686 arg->cmd = LDELF_DL_ENTRY_DLOPEN; 687 arg->dlopen.uuid = *uuid; 688 arg->dlopen.flags = flags; 689 690 res = thread_enter_user_mode((vaddr_t)arg, 0, 0, 0, 691 usr_stack, utc->dl_entry_func, 692 is_arm32, &panicked, &panic_code); 693 if (panicked) { 694 EMSG("ldelf dl_entry function panicked"); 695 abort_print_current_ta(); 696 res = TEE_ERROR_TARGET_DEAD; 697 } 698 if (!res) 699 res = arg->ret; 700 701 return res; 702 } 703 704 static TEE_Result call_ldelf_dlsym(struct user_ta_ctx *utc, TEE_UUID *uuid, 705 const char *sym, size_t maxlen, vaddr_t *val) 706 { 707 uaddr_t usr_stack = utc->ldelf_stack_ptr; 708 TEE_Result res = TEE_ERROR_GENERIC; 709 struct dl_entry_arg *arg = NULL; 710 uint32_t panic_code = 0; 711 uint32_t panicked = 0; 712 size_t len = strnlen(sym, maxlen); 713 714 if (len == maxlen) 715 return TEE_ERROR_BAD_PARAMETERS; 716 717 usr_stack -= ROUNDUP(sizeof(*arg) + len + 1, STACK_ALIGNMENT); 718 arg = (struct dl_entry_arg *)usr_stack; 719 720 res = tee_mmu_check_access_rights(&utc->uctx, 721 TEE_MEMORY_ACCESS_READ | 722 TEE_MEMORY_ACCESS_WRITE | 723 TEE_MEMORY_ACCESS_ANY_OWNER, 724 (uaddr_t)arg, sizeof(*arg) + len + 1); 725 if (res) { 726 EMSG("ldelf stack is inaccessible!"); 727 return res; 728 } 729 730 memset(arg, 0, sizeof(*arg)); 731 arg->cmd = LDELF_DL_ENTRY_DLSYM; 732 arg->dlsym.uuid = *uuid; 733 memcpy(arg->dlsym.symbol, sym, len); 734 arg->dlsym.symbol[len] = '\0'; 735 736 res = thread_enter_user_mode((vaddr_t)arg, 0, 0, 0, 737 usr_stack, utc->dl_entry_func, 738 is_arm32, &panicked, &panic_code); 739 if (panicked) { 740 EMSG("ldelf dl_entry function panicked"); 741 abort_print_current_ta(); 742 res = TEE_ERROR_TARGET_DEAD; 743 } 744 if (!res) { 745 res = arg->ret; 746 if (!res) 747 *val = arg->dlsym.val; 748 } 749 750 return res; 751 } 752 753 static TEE_Result system_dlopen(struct tee_ta_session *cs, uint32_t param_types, 754 TEE_Param params[TEE_NUM_PARAMS]) 755 { 756 uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INPUT, 757 TEE_PARAM_TYPE_VALUE_INPUT, 758 TEE_PARAM_TYPE_NONE, 759 TEE_PARAM_TYPE_NONE); 760 TEE_Result res = TEE_ERROR_GENERIC; 761 struct tee_ta_session *s = NULL; 762 struct user_ta_ctx *utc = NULL; 763 TEE_UUID *uuid = NULL; 764 uint32_t flags = 0; 765 766 if (exp_pt != param_types) 767 return TEE_ERROR_BAD_PARAMETERS; 768 769 uuid = params[0].memref.buffer; 770 if (!uuid || params[0].memref.size != sizeof(*uuid)) 771 return TEE_ERROR_BAD_PARAMETERS; 772 773 flags = params[1].value.a; 774 775 utc = to_user_ta_ctx(cs->ctx); 776 777 s = tee_ta_pop_current_session(); 778 res = call_ldelf_dlopen(utc, uuid, flags); 779 tee_ta_push_current_session(s); 780 781 return res; 782 } 783 784 static TEE_Result system_dlsym(struct tee_ta_session *cs, uint32_t param_types, 785 TEE_Param params[TEE_NUM_PARAMS]) 786 { 787 uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INPUT, 788 TEE_PARAM_TYPE_MEMREF_INPUT, 789 TEE_PARAM_TYPE_VALUE_OUTPUT, 790 TEE_PARAM_TYPE_NONE); 791 TEE_Result res = TEE_ERROR_GENERIC; 792 struct tee_ta_session *s = NULL; 793 struct user_ta_ctx *utc = NULL; 794 const char *sym = NULL; 795 TEE_UUID *uuid = NULL; 796 size_t maxlen = 0; 797 vaddr_t va = 0; 798 799 if (exp_pt != param_types) 800 return TEE_ERROR_BAD_PARAMETERS; 801 802 uuid = params[0].memref.buffer; 803 if (uuid && params[0].memref.size != sizeof(*uuid)) 804 return TEE_ERROR_BAD_PARAMETERS; 805 806 sym = params[1].memref.buffer; 807 if (!sym) 808 return TEE_ERROR_BAD_PARAMETERS; 809 maxlen = params[1].memref.size; 810 811 utc = to_user_ta_ctx(cs->ctx); 812 813 s = tee_ta_pop_current_session(); 814 res = call_ldelf_dlsym(utc, uuid, sym, maxlen, &va); 815 tee_ta_push_current_session(s); 816 817 if (!res) 818 reg_pair_from_64(va, ¶ms[2].value.a, ¶ms[2].value.b); 819 820 return res; 821 } 822 823 static TEE_Result system_get_tpm_event_log(uint32_t param_types, 824 TEE_Param params[TEE_NUM_PARAMS]) 825 { 826 uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_OUTPUT, 827 TEE_PARAM_TYPE_NONE, 828 TEE_PARAM_TYPE_NONE, 829 TEE_PARAM_TYPE_NONE); 830 size_t size = 0; 831 TEE_Result res = TEE_SUCCESS; 832 833 if (exp_pt != param_types) 834 return TEE_ERROR_BAD_PARAMETERS; 835 836 size = params[0].memref.size; 837 res = tpm_get_event_log(params[0].memref.buffer, &size); 838 params[0].memref.size = size; 839 840 return res; 841 } 842 843 static TEE_Result open_session(uint32_t param_types __unused, 844 TEE_Param params[TEE_NUM_PARAMS] __unused, 845 void **sess_ctx) 846 { 847 struct tee_ta_session *s = NULL; 848 struct system_ctx *ctx = NULL; 849 850 /* Check that we're called from a user TA */ 851 s = tee_ta_get_calling_session(); 852 if (!s) 853 return TEE_ERROR_ACCESS_DENIED; 854 if (!is_user_ta_ctx(s->ctx)) 855 return TEE_ERROR_ACCESS_DENIED; 856 857 ctx = calloc(1, sizeof(*ctx)); 858 if (!ctx) 859 return TEE_ERROR_OUT_OF_MEMORY; 860 861 *sess_ctx = ctx; 862 863 return TEE_SUCCESS; 864 } 865 866 static void close_session(void *sess_ctx) 867 { 868 struct system_ctx *ctx = sess_ctx; 869 870 handle_db_destroy(&ctx->db, ta_bin_close); 871 free(ctx); 872 } 873 874 static TEE_Result invoke_command(void *sess_ctx, uint32_t cmd_id, 875 uint32_t param_types, 876 TEE_Param params[TEE_NUM_PARAMS]) 877 { 878 struct tee_ta_session *s = tee_ta_get_calling_session(); 879 880 switch (cmd_id) { 881 case PTA_SYSTEM_ADD_RNG_ENTROPY: 882 return system_rng_reseed(s, param_types, params); 883 case PTA_SYSTEM_DERIVE_TA_UNIQUE_KEY: 884 return system_derive_ta_unique_key(s, param_types, params); 885 case PTA_SYSTEM_MAP_ZI: 886 return system_map_zi(s, param_types, params); 887 case PTA_SYSTEM_UNMAP: 888 return system_unmap(s, param_types, params); 889 case PTA_SYSTEM_OPEN_TA_BINARY: 890 return system_open_ta_binary(sess_ctx, param_types, params); 891 case PTA_SYSTEM_CLOSE_TA_BINARY: 892 return system_close_ta_binary(sess_ctx, param_types, params); 893 case PTA_SYSTEM_MAP_TA_BINARY: 894 return system_map_ta_binary(sess_ctx, s, param_types, params); 895 case PTA_SYSTEM_COPY_FROM_TA_BINARY: 896 return system_copy_from_ta_binary(sess_ctx, param_types, 897 params); 898 case PTA_SYSTEM_SET_PROT: 899 return system_set_prot(s, param_types, params); 900 case PTA_SYSTEM_REMAP: 901 return system_remap(s, param_types, params); 902 case PTA_SYSTEM_DLOPEN: 903 return system_dlopen(s, param_types, params); 904 case PTA_SYSTEM_DLSYM: 905 return system_dlsym(s, param_types, params); 906 case PTA_SYSTEM_GET_TPM_EVENT_LOG: 907 return system_get_tpm_event_log(param_types, params); 908 default: 909 break; 910 } 911 912 return TEE_ERROR_NOT_IMPLEMENTED; 913 } 914 915 pseudo_ta_register(.uuid = PTA_SYSTEM_UUID, .name = "system.pta", 916 .flags = PTA_DEFAULT_FLAGS | TA_FLAG_CONCURRENT, 917 .open_session_entry_point = open_session, 918 .close_session_entry_point = close_session, 919 .invoke_command_entry_point = invoke_command); 920