1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2014, STMicroelectronics International N.V. 4 */ 5 6 #include <types_ext.h> 7 #include <stdbool.h> 8 #include <stdio.h> 9 #include <stdlib.h> 10 #include <string.h> 11 #include <arm.h> 12 #include <assert.h> 13 #include <kernel/mutex.h> 14 #include <kernel/panic.h> 15 #include <kernel/pseudo_ta.h> 16 #include <kernel/tee_common.h> 17 #include <kernel/tee_misc.h> 18 #include <kernel/tee_ta_manager.h> 19 #include <kernel/tee_time.h> 20 #include <kernel/thread.h> 21 #include <kernel/user_ta.h> 22 #include <mm/core_mmu.h> 23 #include <mm/core_memprot.h> 24 #include <mm/mobj.h> 25 #include <mm/tee_mmu.h> 26 #include <tee/entry_std.h> 27 #include <tee/tee_svc_cryp.h> 28 #include <tee/tee_obj.h> 29 #include <tee/tee_svc_storage.h> 30 #include <tee_api_types.h> 31 #include <trace.h> 32 #include <utee_types.h> 33 #include <util.h> 34 35 /* This mutex protects the critical section in tee_ta_init_session */ 36 struct mutex tee_ta_mutex = MUTEX_INITIALIZER; 37 struct tee_ta_ctx_head tee_ctxes = TAILQ_HEAD_INITIALIZER(tee_ctxes); 38 39 #ifndef CFG_CONCURRENT_SINGLE_INSTANCE_TA 40 static struct condvar tee_ta_cv = CONDVAR_INITIALIZER; 41 static int tee_ta_single_instance_thread = THREAD_ID_INVALID; 42 static size_t tee_ta_single_instance_count; 43 #endif 44 45 #ifdef CFG_CONCURRENT_SINGLE_INSTANCE_TA 46 static void lock_single_instance(void) 47 { 48 } 49 50 static void unlock_single_instance(void) 51 { 52 } 53 54 static bool has_single_instance_lock(void) 55 { 56 return false; 57 } 58 #else 59 static void lock_single_instance(void) 60 { 61 /* Requires tee_ta_mutex to be held */ 62 if (tee_ta_single_instance_thread != thread_get_id()) { 63 /* Wait until the single-instance lock is available. */ 64 while (tee_ta_single_instance_thread != THREAD_ID_INVALID) 65 condvar_wait(&tee_ta_cv, &tee_ta_mutex); 66 67 tee_ta_single_instance_thread = thread_get_id(); 68 assert(tee_ta_single_instance_count == 0); 69 } 70 71 tee_ta_single_instance_count++; 72 } 73 74 static void unlock_single_instance(void) 75 { 76 /* Requires tee_ta_mutex to be held */ 77 assert(tee_ta_single_instance_thread == thread_get_id()); 78 assert(tee_ta_single_instance_count > 0); 79 80 tee_ta_single_instance_count--; 81 if (tee_ta_single_instance_count == 0) { 82 tee_ta_single_instance_thread = THREAD_ID_INVALID; 83 condvar_signal(&tee_ta_cv); 84 } 85 } 86 87 static bool has_single_instance_lock(void) 88 { 89 /* Requires tee_ta_mutex to be held */ 90 return tee_ta_single_instance_thread == thread_get_id(); 91 } 92 #endif 93 94 static bool tee_ta_try_set_busy(struct tee_ta_ctx *ctx) 95 { 96 bool rc = true; 97 98 if (ctx->flags & TA_FLAG_CONCURRENT) 99 return true; 100 101 mutex_lock(&tee_ta_mutex); 102 103 if (ctx->flags & TA_FLAG_SINGLE_INSTANCE) 104 lock_single_instance(); 105 106 if (has_single_instance_lock()) { 107 if (ctx->busy) { 108 /* 109 * We're holding the single-instance lock and the 110 * TA is busy, as waiting now would only cause a 111 * dead-lock, we release the lock and return false. 112 */ 113 rc = false; 114 if (ctx->flags & TA_FLAG_SINGLE_INSTANCE) 115 unlock_single_instance(); 116 } 117 } else { 118 /* 119 * We're not holding the single-instance lock, we're free to 120 * wait for the TA to become available. 121 */ 122 while (ctx->busy) 123 condvar_wait(&ctx->busy_cv, &tee_ta_mutex); 124 } 125 126 /* Either it's already true or we should set it to true */ 127 ctx->busy = true; 128 129 mutex_unlock(&tee_ta_mutex); 130 return rc; 131 } 132 133 static void tee_ta_set_busy(struct tee_ta_ctx *ctx) 134 { 135 if (!tee_ta_try_set_busy(ctx)) 136 panic(); 137 } 138 139 static void tee_ta_clear_busy(struct tee_ta_ctx *ctx) 140 { 141 if (ctx->flags & TA_FLAG_CONCURRENT) 142 return; 143 144 mutex_lock(&tee_ta_mutex); 145 146 assert(ctx->busy); 147 ctx->busy = false; 148 condvar_signal(&ctx->busy_cv); 149 150 if (ctx->flags & TA_FLAG_SINGLE_INSTANCE) 151 unlock_single_instance(); 152 153 mutex_unlock(&tee_ta_mutex); 154 } 155 156 static void dec_session_ref_count(struct tee_ta_session *s) 157 { 158 assert(s->ref_count > 0); 159 s->ref_count--; 160 if (s->ref_count == 1) 161 condvar_signal(&s->refc_cv); 162 } 163 164 void tee_ta_put_session(struct tee_ta_session *s) 165 { 166 mutex_lock(&tee_ta_mutex); 167 168 if (s->lock_thread == thread_get_id()) { 169 s->lock_thread = THREAD_ID_INVALID; 170 condvar_signal(&s->lock_cv); 171 } 172 dec_session_ref_count(s); 173 174 mutex_unlock(&tee_ta_mutex); 175 } 176 177 static struct tee_ta_session *tee_ta_find_session_nolock(uint32_t id, 178 struct tee_ta_session_head *open_sessions) 179 { 180 struct tee_ta_session *s = NULL; 181 struct tee_ta_session *found = NULL; 182 183 TAILQ_FOREACH(s, open_sessions, link) { 184 if (s->id == id) { 185 found = s; 186 break; 187 } 188 } 189 190 return found; 191 } 192 193 struct tee_ta_session *tee_ta_find_session(uint32_t id, 194 struct tee_ta_session_head *open_sessions) 195 { 196 struct tee_ta_session *s = NULL; 197 198 mutex_lock(&tee_ta_mutex); 199 200 s = tee_ta_find_session_nolock(id, open_sessions); 201 202 mutex_unlock(&tee_ta_mutex); 203 204 return s; 205 } 206 207 struct tee_ta_session *tee_ta_get_session(uint32_t id, bool exclusive, 208 struct tee_ta_session_head *open_sessions) 209 { 210 struct tee_ta_session *s; 211 212 mutex_lock(&tee_ta_mutex); 213 214 while (true) { 215 s = tee_ta_find_session_nolock(id, open_sessions); 216 if (!s) 217 break; 218 if (s->unlink) { 219 s = NULL; 220 break; 221 } 222 s->ref_count++; 223 if (!exclusive) 224 break; 225 226 assert(s->lock_thread != thread_get_id()); 227 228 while (s->lock_thread != THREAD_ID_INVALID && !s->unlink) 229 condvar_wait(&s->lock_cv, &tee_ta_mutex); 230 231 if (s->unlink) { 232 dec_session_ref_count(s); 233 s = NULL; 234 break; 235 } 236 237 s->lock_thread = thread_get_id(); 238 break; 239 } 240 241 mutex_unlock(&tee_ta_mutex); 242 return s; 243 } 244 245 static void tee_ta_unlink_session(struct tee_ta_session *s, 246 struct tee_ta_session_head *open_sessions) 247 { 248 mutex_lock(&tee_ta_mutex); 249 250 assert(s->ref_count >= 1); 251 assert(s->lock_thread == thread_get_id()); 252 assert(!s->unlink); 253 254 s->unlink = true; 255 condvar_broadcast(&s->lock_cv); 256 257 while (s->ref_count != 1) 258 condvar_wait(&s->refc_cv, &tee_ta_mutex); 259 260 TAILQ_REMOVE(open_sessions, s, link); 261 262 mutex_unlock(&tee_ta_mutex); 263 } 264 265 static void destroy_session(struct tee_ta_session *s, 266 struct tee_ta_session_head *open_sessions) 267 { 268 tee_ta_unlink_session(s, open_sessions); 269 #if defined(CFG_TA_GPROF_SUPPORT) 270 free(s->sbuf); 271 #endif 272 free(s); 273 } 274 275 static void destroy_context(struct tee_ta_ctx *ctx) 276 { 277 DMSG("Destroy TA ctx (0x%" PRIxVA ")", (vaddr_t)ctx); 278 279 condvar_destroy(&ctx->busy_cv); 280 pgt_flush_ctx(ctx); 281 ctx->ops->destroy(ctx); 282 } 283 284 static void destroy_ta_ctx_from_session(struct tee_ta_session *s) 285 { 286 struct tee_ta_session *sess = NULL; 287 struct tee_ta_session_head *open_sessions = NULL; 288 struct tee_ta_ctx *ctx = NULL; 289 struct user_ta_ctx *utc = NULL; 290 size_t count = 1; /* start counting the references to the context */ 291 292 DMSG("Remove references to context (0x%" PRIxVA ")", (vaddr_t)s->ctx); 293 294 mutex_lock(&tee_ta_mutex); 295 nsec_sessions_list_head(&open_sessions); 296 297 /* 298 * Next two loops will remove all references to the context which is 299 * about to be destroyed, but avoiding such operation to the current 300 * session. That will be done later in this function, only after 301 * the context will be properly destroyed. 302 */ 303 304 /* 305 * Scan the entire list of opened sessions by the clients from 306 * non-secure world. 307 */ 308 TAILQ_FOREACH(sess, open_sessions, link) { 309 if (sess->ctx == s->ctx && sess != s) { 310 sess->ctx = NULL; 311 count++; 312 } 313 } 314 315 /* 316 * Scan all sessions opened from secure side by searching through 317 * all available TA instances and for each context, scan all opened 318 * sessions. 319 */ 320 TAILQ_FOREACH(ctx, &tee_ctxes, link) { 321 if (is_user_ta_ctx(ctx)) { 322 utc = to_user_ta_ctx(ctx); 323 324 TAILQ_FOREACH(sess, &utc->open_sessions, link) { 325 if (sess->ctx == s->ctx && sess != s) { 326 sess->ctx = NULL; 327 count++; 328 } 329 } 330 } 331 } 332 333 assert(count == s->ctx->ref_count); 334 335 TAILQ_REMOVE(&tee_ctxes, s->ctx, link); 336 mutex_unlock(&tee_ta_mutex); 337 338 destroy_context(s->ctx); 339 340 s->ctx = NULL; 341 } 342 343 /* 344 * tee_ta_context_find - Find TA in session list based on a UUID (input) 345 * Returns a pointer to the session 346 */ 347 static struct tee_ta_ctx *tee_ta_context_find(const TEE_UUID *uuid) 348 { 349 struct tee_ta_ctx *ctx; 350 351 TAILQ_FOREACH(ctx, &tee_ctxes, link) { 352 if (memcmp(&ctx->uuid, uuid, sizeof(TEE_UUID)) == 0) 353 return ctx; 354 } 355 356 return NULL; 357 } 358 359 /* check if requester (client ID) matches session initial client */ 360 static TEE_Result check_client(struct tee_ta_session *s, const TEE_Identity *id) 361 { 362 if (id == KERN_IDENTITY) 363 return TEE_SUCCESS; 364 365 if (id == NSAPP_IDENTITY) { 366 if (s->clnt_id.login == TEE_LOGIN_TRUSTED_APP) { 367 DMSG("nsec tries to hijack TA session"); 368 return TEE_ERROR_ACCESS_DENIED; 369 } 370 return TEE_SUCCESS; 371 } 372 373 if (memcmp(&s->clnt_id, id, sizeof(TEE_Identity)) != 0) { 374 DMSG("client id mismatch"); 375 return TEE_ERROR_ACCESS_DENIED; 376 } 377 return TEE_SUCCESS; 378 } 379 380 /* 381 * Check if invocation parameters matches TA properties 382 * 383 * @s - current session handle 384 * @param - already identified memory references hold a valid 'mobj'. 385 * 386 * Policy: 387 * - All TAs can access 'non-secure' shared memory. 388 * - All TAs can access TEE private memory (seccpy) 389 * - Only SDP flagged TAs can accept SDP memory references. 390 */ 391 #ifndef CFG_SECURE_DATA_PATH 392 static bool check_params(struct tee_ta_session *sess __unused, 393 struct tee_ta_param *param __unused) 394 { 395 /* 396 * When CFG_SECURE_DATA_PATH is not enabled, SDP memory references 397 * are rejected at OP-TEE core entry. Hence here all TAs have same 398 * permissions regarding memory reference parameters. 399 */ 400 return true; 401 } 402 #else 403 static bool check_params(struct tee_ta_session *sess, 404 struct tee_ta_param *param) 405 { 406 int n; 407 408 /* 409 * When CFG_SECURE_DATA_PATH is enabled, OP-TEE entry allows SHM and 410 * SDP memory references. Only TAs flagged SDP can access SDP memory. 411 */ 412 if (sess->ctx && sess->ctx->flags & TA_FLAG_SECURE_DATA_PATH) 413 return true; 414 415 for (n = 0; n < TEE_NUM_PARAMS; n++) { 416 uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n); 417 struct param_mem *mem = ¶m->u[n].mem; 418 419 if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT && 420 param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT && 421 param_type != TEE_PARAM_TYPE_MEMREF_INOUT) 422 continue; 423 if (!mem->size) 424 continue; 425 if (mobj_is_sdp_mem(mem->mobj)) 426 return false; 427 } 428 return true; 429 } 430 #endif 431 432 static void set_invoke_timeout(struct tee_ta_session *sess, 433 uint32_t cancel_req_to) 434 { 435 TEE_Time current_time; 436 TEE_Time cancel_time; 437 438 if (cancel_req_to == TEE_TIMEOUT_INFINITE) 439 goto infinite; 440 441 if (tee_time_get_sys_time(¤t_time) != TEE_SUCCESS) 442 goto infinite; 443 444 if (ADD_OVERFLOW(current_time.seconds, cancel_req_to / 1000, 445 &cancel_time.seconds)) 446 goto infinite; 447 448 cancel_time.millis = current_time.millis + cancel_req_to % 1000; 449 if (cancel_time.millis > 1000) { 450 if (ADD_OVERFLOW(current_time.seconds, 1, 451 &cancel_time.seconds)) 452 goto infinite; 453 454 cancel_time.seconds++; 455 cancel_time.millis -= 1000; 456 } 457 458 sess->cancel_time = cancel_time; 459 return; 460 461 infinite: 462 sess->cancel_time.seconds = UINT32_MAX; 463 sess->cancel_time.millis = UINT32_MAX; 464 } 465 466 /*----------------------------------------------------------------------------- 467 * Close a Trusted Application and free available resources 468 *---------------------------------------------------------------------------*/ 469 TEE_Result tee_ta_close_session(struct tee_ta_session *csess, 470 struct tee_ta_session_head *open_sessions, 471 const TEE_Identity *clnt_id) 472 { 473 struct tee_ta_session *sess; 474 struct tee_ta_ctx *ctx; 475 bool keep_alive; 476 477 DMSG("csess 0x%" PRIxVA " id %u", (vaddr_t)csess, csess->id); 478 479 if (!csess) 480 return TEE_ERROR_ITEM_NOT_FOUND; 481 482 sess = tee_ta_get_session(csess->id, true, open_sessions); 483 484 if (!sess) { 485 EMSG("session 0x%" PRIxVA " to be removed is not found", 486 (vaddr_t)csess); 487 return TEE_ERROR_ITEM_NOT_FOUND; 488 } 489 490 if (check_client(sess, clnt_id) != TEE_SUCCESS) { 491 tee_ta_put_session(sess); 492 return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */ 493 } 494 495 ctx = sess->ctx; 496 DMSG("Destroy session"); 497 498 if (!ctx) { 499 destroy_session(sess, open_sessions); 500 return TEE_SUCCESS; 501 } 502 503 if (ctx->panicked) { 504 destroy_session(sess, open_sessions); 505 } else { 506 tee_ta_set_busy(ctx); 507 set_invoke_timeout(sess, TEE_TIMEOUT_INFINITE); 508 ctx->ops->enter_close_session(sess); 509 destroy_session(sess, open_sessions); 510 tee_ta_clear_busy(ctx); 511 } 512 513 mutex_lock(&tee_ta_mutex); 514 515 if (ctx->ref_count <= 0) 516 panic(); 517 518 ctx->ref_count--; 519 keep_alive = (ctx->flags & TA_FLAG_INSTANCE_KEEP_ALIVE) && 520 (ctx->flags & TA_FLAG_SINGLE_INSTANCE); 521 if (!ctx->ref_count && !keep_alive) { 522 TAILQ_REMOVE(&tee_ctxes, ctx, link); 523 mutex_unlock(&tee_ta_mutex); 524 525 destroy_context(ctx); 526 } else 527 mutex_unlock(&tee_ta_mutex); 528 529 return TEE_SUCCESS; 530 } 531 532 static TEE_Result tee_ta_init_session_with_context(struct tee_ta_ctx *ctx, 533 struct tee_ta_session *s) 534 { 535 /* 536 * If TA isn't single instance it should be loaded as new 537 * instance instead of doing anything with this instance. 538 * So tell the caller that we didn't find the TA it the 539 * caller will load a new instance. 540 */ 541 if ((ctx->flags & TA_FLAG_SINGLE_INSTANCE) == 0) 542 return TEE_ERROR_ITEM_NOT_FOUND; 543 544 /* 545 * The TA is single instance, if it isn't multi session we 546 * can't create another session unless its reference is zero 547 */ 548 if (!(ctx->flags & TA_FLAG_MULTI_SESSION) && ctx->ref_count) 549 return TEE_ERROR_BUSY; 550 551 DMSG("Re-open TA %pUl", (void *)&ctx->uuid); 552 553 ctx->ref_count++; 554 s->ctx = ctx; 555 return TEE_SUCCESS; 556 } 557 558 static uint32_t new_session_id(struct tee_ta_session_head *open_sessions) 559 { 560 struct tee_ta_session *last = NULL; 561 uint32_t saved = 0; 562 uint32_t id = 1; 563 564 last = TAILQ_LAST(open_sessions, tee_ta_session_head); 565 if (last) { 566 /* This value is less likely to be already used */ 567 id = last->id + 1; 568 if (!id) 569 id++; /* 0 is not valid */ 570 } 571 572 saved = id; 573 do { 574 if (!tee_ta_find_session_nolock(id, open_sessions)) 575 return id; 576 id++; 577 if (!id) 578 id++; 579 } while (id != saved); 580 581 return 0; 582 } 583 584 static TEE_Result tee_ta_init_session(TEE_ErrorOrigin *err, 585 struct tee_ta_session_head *open_sessions, 586 const TEE_UUID *uuid, 587 struct tee_ta_session **sess) 588 { 589 TEE_Result res; 590 struct tee_ta_ctx *ctx; 591 struct tee_ta_session *s = calloc(1, sizeof(struct tee_ta_session)); 592 593 *err = TEE_ORIGIN_TEE; 594 if (!s) 595 return TEE_ERROR_OUT_OF_MEMORY; 596 597 s->cancel_mask = true; 598 condvar_init(&s->refc_cv); 599 condvar_init(&s->lock_cv); 600 s->lock_thread = THREAD_ID_INVALID; 601 s->ref_count = 1; 602 603 604 /* 605 * We take the global TA mutex here and hold it while doing 606 * RPC to load the TA. This big critical section should be broken 607 * down into smaller pieces. 608 */ 609 610 611 mutex_lock(&tee_ta_mutex); 612 s->id = new_session_id(open_sessions); 613 if (!s->id) { 614 res = TEE_ERROR_OVERFLOW; 615 goto out; 616 } 617 TAILQ_INSERT_TAIL(open_sessions, s, link); 618 619 /* Look for already loaded TA */ 620 ctx = tee_ta_context_find(uuid); 621 if (ctx) { 622 res = tee_ta_init_session_with_context(ctx, s); 623 if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND) 624 goto out; 625 } 626 627 /* Look for pseudo TA */ 628 res = tee_ta_init_pseudo_ta_session(uuid, s); 629 if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND) 630 goto out; 631 632 /* Look for user TA */ 633 res = tee_ta_init_user_ta_session(uuid, s); 634 635 out: 636 if (res == TEE_SUCCESS) { 637 *sess = s; 638 } else { 639 TAILQ_REMOVE(open_sessions, s, link); 640 free(s); 641 } 642 mutex_unlock(&tee_ta_mutex); 643 return res; 644 } 645 646 TEE_Result tee_ta_open_session(TEE_ErrorOrigin *err, 647 struct tee_ta_session **sess, 648 struct tee_ta_session_head *open_sessions, 649 const TEE_UUID *uuid, 650 const TEE_Identity *clnt_id, 651 uint32_t cancel_req_to, 652 struct tee_ta_param *param) 653 { 654 TEE_Result res; 655 struct tee_ta_session *s = NULL; 656 struct tee_ta_ctx *ctx; 657 bool panicked; 658 bool was_busy = false; 659 660 res = tee_ta_init_session(err, open_sessions, uuid, &s); 661 if (res != TEE_SUCCESS) { 662 DMSG("init session failed 0x%x", res); 663 return res; 664 } 665 666 if (!check_params(s, param)) 667 return TEE_ERROR_BAD_PARAMETERS; 668 669 ctx = s->ctx; 670 671 if (!ctx || ctx->panicked) { 672 DMSG("panicked, call tee_ta_close_session()"); 673 tee_ta_close_session(s, open_sessions, KERN_IDENTITY); 674 *err = TEE_ORIGIN_TEE; 675 return TEE_ERROR_TARGET_DEAD; 676 } 677 678 *sess = s; 679 /* Save identity of the owner of the session */ 680 s->clnt_id = *clnt_id; 681 682 if (tee_ta_try_set_busy(ctx)) { 683 set_invoke_timeout(s, cancel_req_to); 684 res = ctx->ops->enter_open_session(s, param, err); 685 tee_ta_clear_busy(ctx); 686 } else { 687 /* Deadlock avoided */ 688 res = TEE_ERROR_BUSY; 689 was_busy = true; 690 } 691 692 panicked = ctx->panicked; 693 694 tee_ta_put_session(s); 695 if (panicked || (res != TEE_SUCCESS)) 696 tee_ta_close_session(s, open_sessions, KERN_IDENTITY); 697 698 /* 699 * Origin error equal to TEE_ORIGIN_TRUSTED_APP for "regular" error, 700 * apart from panicking. 701 */ 702 if (panicked || was_busy) 703 *err = TEE_ORIGIN_TEE; 704 else 705 *err = TEE_ORIGIN_TRUSTED_APP; 706 707 if (res != TEE_SUCCESS) 708 EMSG("Failed. Return error 0x%x", res); 709 710 return res; 711 } 712 713 TEE_Result tee_ta_invoke_command(TEE_ErrorOrigin *err, 714 struct tee_ta_session *sess, 715 const TEE_Identity *clnt_id, 716 uint32_t cancel_req_to, uint32_t cmd, 717 struct tee_ta_param *param) 718 { 719 TEE_Result res; 720 721 if (check_client(sess, clnt_id) != TEE_SUCCESS) 722 return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */ 723 724 if (!check_params(sess, param)) 725 return TEE_ERROR_BAD_PARAMETERS; 726 727 if (!sess->ctx) { 728 /* The context has been already destroyed */ 729 *err = TEE_ORIGIN_TEE; 730 return TEE_ERROR_TARGET_DEAD; 731 } else if (sess->ctx->panicked) { 732 DMSG("Panicked !"); 733 destroy_ta_ctx_from_session(sess); 734 *err = TEE_ORIGIN_TEE; 735 return TEE_ERROR_TARGET_DEAD; 736 } 737 738 tee_ta_set_busy(sess->ctx); 739 740 set_invoke_timeout(sess, cancel_req_to); 741 res = sess->ctx->ops->enter_invoke_cmd(sess, cmd, param, err); 742 743 tee_ta_clear_busy(sess->ctx); 744 745 if (sess->ctx->panicked) { 746 destroy_ta_ctx_from_session(sess); 747 *err = TEE_ORIGIN_TEE; 748 return TEE_ERROR_TARGET_DEAD; 749 } 750 751 /* Short buffer is not an effective error case */ 752 if (res != TEE_SUCCESS && res != TEE_ERROR_SHORT_BUFFER) 753 DMSG("Error: %x of %d", res, *err); 754 755 return res; 756 } 757 758 TEE_Result tee_ta_cancel_command(TEE_ErrorOrigin *err, 759 struct tee_ta_session *sess, 760 const TEE_Identity *clnt_id) 761 { 762 *err = TEE_ORIGIN_TEE; 763 764 if (check_client(sess, clnt_id) != TEE_SUCCESS) 765 return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */ 766 767 sess->cancel = true; 768 return TEE_SUCCESS; 769 } 770 771 bool tee_ta_session_is_cancelled(struct tee_ta_session *s, TEE_Time *curr_time) 772 { 773 TEE_Time current_time; 774 775 if (s->cancel_mask) 776 return false; 777 778 if (s->cancel) 779 return true; 780 781 if (s->cancel_time.seconds == UINT32_MAX) 782 return false; 783 784 if (curr_time != NULL) 785 current_time = *curr_time; 786 else if (tee_time_get_sys_time(¤t_time) != TEE_SUCCESS) 787 return false; 788 789 if (current_time.seconds > s->cancel_time.seconds || 790 (current_time.seconds == s->cancel_time.seconds && 791 current_time.millis >= s->cancel_time.millis)) { 792 return true; 793 } 794 795 return false; 796 } 797 798 static void update_current_ctx(struct thread_specific_data *tsd) 799 { 800 struct tee_ta_ctx *ctx = NULL; 801 struct tee_ta_session *s = TAILQ_FIRST(&tsd->sess_stack); 802 803 if (s) { 804 if (is_pseudo_ta_ctx(s->ctx)) 805 s = TAILQ_NEXT(s, link_tsd); 806 807 if (s) 808 ctx = s->ctx; 809 } 810 811 if (tsd->ctx != ctx) 812 tee_mmu_set_ctx(ctx); 813 /* 814 * If ctx->mmu == NULL we must not have user mapping active, 815 * if ctx->mmu != NULL we must have user mapping active. 816 */ 817 if (((is_user_ta_ctx(ctx) ? 818 to_user_ta_ctx(ctx)->vm_info : NULL) == NULL) == 819 core_mmu_user_mapping_is_active()) 820 panic("unexpected active mapping"); 821 } 822 823 void tee_ta_push_current_session(struct tee_ta_session *sess) 824 { 825 struct thread_specific_data *tsd = thread_get_tsd(); 826 827 TAILQ_INSERT_HEAD(&tsd->sess_stack, sess, link_tsd); 828 update_current_ctx(tsd); 829 } 830 831 struct tee_ta_session *tee_ta_pop_current_session(void) 832 { 833 struct thread_specific_data *tsd = thread_get_tsd(); 834 struct tee_ta_session *s = TAILQ_FIRST(&tsd->sess_stack); 835 836 if (s) { 837 TAILQ_REMOVE(&tsd->sess_stack, s, link_tsd); 838 update_current_ctx(tsd); 839 } 840 return s; 841 } 842 843 TEE_Result tee_ta_get_current_session(struct tee_ta_session **sess) 844 { 845 struct tee_ta_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack); 846 847 if (!s) 848 return TEE_ERROR_BAD_STATE; 849 *sess = s; 850 return TEE_SUCCESS; 851 } 852 853 struct tee_ta_session *tee_ta_get_calling_session(void) 854 { 855 struct tee_ta_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack); 856 857 if (s) 858 s = TAILQ_NEXT(s, link_tsd); 859 return s; 860 } 861 862 /* 863 * dump_state - Display TA state as an error log. 864 */ 865 static void dump_state(struct tee_ta_ctx *ctx) 866 { 867 struct tee_ta_session *s = NULL; 868 bool active __maybe_unused; 869 870 if (!ctx) { 871 EMSG("No TA status: null context reference"); 872 return; 873 } 874 875 active = ((tee_ta_get_current_session(&s) == TEE_SUCCESS) && 876 s && s->ctx == ctx); 877 878 EMSG_RAW("Status of TA %pUl (%p) %s", (void *)&ctx->uuid, (void *)ctx, 879 active ? "(active)" : ""); 880 ctx->ops->dump_state(ctx); 881 } 882 883 void tee_ta_dump_current(void) 884 { 885 struct tee_ta_session *s = NULL; 886 887 if (tee_ta_get_current_session(&s) != TEE_SUCCESS) { 888 EMSG("no valid session found, cannot log TA status"); 889 return; 890 } 891 892 dump_state(s->ctx); 893 } 894 895 #if defined(CFG_TA_GPROF_SUPPORT) 896 void tee_ta_gprof_sample_pc(vaddr_t pc) 897 { 898 struct tee_ta_session *s; 899 struct sample_buf *sbuf; 900 size_t idx; 901 902 if (tee_ta_get_current_session(&s) != TEE_SUCCESS) 903 return; 904 sbuf = s->sbuf; 905 if (!sbuf || !sbuf->enabled) 906 return; /* PC sampling is not enabled */ 907 908 idx = (((uint64_t)pc - sbuf->offset)/2 * sbuf->scale)/65536; 909 if (idx < sbuf->nsamples) 910 sbuf->samples[idx]++; 911 sbuf->count++; 912 } 913 914 /* 915 * Update user-mode CPU time for the current session 916 * @suspend: true if session is being suspended (leaving user mode), false if 917 * it is resumed (entering user mode) 918 */ 919 static void tee_ta_update_session_utime(bool suspend) 920 { 921 struct tee_ta_session *s; 922 struct sample_buf *sbuf; 923 uint64_t now; 924 925 if (tee_ta_get_current_session(&s) != TEE_SUCCESS) 926 return; 927 sbuf = s->sbuf; 928 if (!sbuf) 929 return; 930 now = read_cntpct(); 931 if (suspend) { 932 assert(sbuf->usr_entered); 933 sbuf->usr += now - sbuf->usr_entered; 934 sbuf->usr_entered = 0; 935 } else { 936 assert(!sbuf->usr_entered); 937 if (!now) 938 now++; /* 0 is reserved */ 939 sbuf->usr_entered = now; 940 } 941 } 942 943 void tee_ta_update_session_utime_suspend(void) 944 { 945 tee_ta_update_session_utime(true); 946 } 947 948 void tee_ta_update_session_utime_resume(void) 949 { 950 tee_ta_update_session_utime(false); 951 } 952 #endif 953