1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2014, STMicroelectronics International N.V. 4 */ 5 6 #include <arm.h> 7 #include <assert.h> 8 #include <kernel/mutex.h> 9 #include <kernel/panic.h> 10 #include <kernel/pseudo_ta.h> 11 #include <kernel/secure_partition.h> 12 #include <kernel/tee_common.h> 13 #include <kernel/tee_misc.h> 14 #include <kernel/tee_ta_manager.h> 15 #include <kernel/tee_time.h> 16 #include <kernel/thread.h> 17 #include <kernel/user_mode_ctx.h> 18 #include <kernel/user_ta.h> 19 #include <mm/core_memprot.h> 20 #include <mm/core_mmu.h> 21 #include <mm/mobj.h> 22 #include <mm/tee_mmu.h> 23 #include <stdio.h> 24 #include <stdlib.h> 25 #include <string.h> 26 #include <tee_api_types.h> 27 #include <tee/entry_std.h> 28 #include <tee/tee_obj.h> 29 #include <tee/tee_svc_cryp.h> 30 #include <tee/tee_svc_storage.h> 31 #include <trace.h> 32 #include <types_ext.h> 33 #include <user_ta_header.h> 34 #include <utee_types.h> 35 #include <util.h> 36 37 /* This mutex protects the critical section in tee_ta_init_session */ 38 struct mutex tee_ta_mutex = MUTEX_INITIALIZER; 39 /* This condvar is used when waiting for a TA context to become initialized */ 40 struct condvar tee_ta_init_cv = CONDVAR_INITIALIZER; 41 struct tee_ta_ctx_head tee_ctxes = TAILQ_HEAD_INITIALIZER(tee_ctxes); 42 43 #ifndef CFG_CONCURRENT_SINGLE_INSTANCE_TA 44 static struct condvar tee_ta_cv = CONDVAR_INITIALIZER; 45 static short int tee_ta_single_instance_thread = THREAD_ID_INVALID; 46 static size_t tee_ta_single_instance_count; 47 #endif 48 49 #ifdef CFG_CONCURRENT_SINGLE_INSTANCE_TA 50 static void lock_single_instance(void) 51 { 52 } 53 54 static void unlock_single_instance(void) 55 { 56 } 57 58 static bool has_single_instance_lock(void) 59 { 60 return false; 61 } 62 #else 63 static void lock_single_instance(void) 64 { 65 /* Requires tee_ta_mutex to be held */ 66 if (tee_ta_single_instance_thread != thread_get_id()) { 67 /* Wait until the single-instance lock is available. */ 68 while (tee_ta_single_instance_thread != THREAD_ID_INVALID) 69 condvar_wait(&tee_ta_cv, &tee_ta_mutex); 70 71 tee_ta_single_instance_thread = thread_get_id(); 72 assert(tee_ta_single_instance_count == 0); 73 } 74 75 tee_ta_single_instance_count++; 76 } 77 78 static void unlock_single_instance(void) 79 { 80 /* Requires tee_ta_mutex to be held */ 81 assert(tee_ta_single_instance_thread == thread_get_id()); 82 assert(tee_ta_single_instance_count > 0); 83 84 tee_ta_single_instance_count--; 85 if (tee_ta_single_instance_count == 0) { 86 tee_ta_single_instance_thread = THREAD_ID_INVALID; 87 condvar_signal(&tee_ta_cv); 88 } 89 } 90 91 static bool has_single_instance_lock(void) 92 { 93 /* Requires tee_ta_mutex to be held */ 94 return tee_ta_single_instance_thread == thread_get_id(); 95 } 96 #endif 97 98 static bool tee_ta_try_set_busy(struct tee_ta_ctx *ctx) 99 { 100 bool rc = true; 101 102 if (ctx->flags & TA_FLAG_CONCURRENT) 103 return true; 104 105 mutex_lock(&tee_ta_mutex); 106 107 if (ctx->flags & TA_FLAG_SINGLE_INSTANCE) 108 lock_single_instance(); 109 110 if (has_single_instance_lock()) { 111 if (ctx->busy) { 112 /* 113 * We're holding the single-instance lock and the 114 * TA is busy, as waiting now would only cause a 115 * dead-lock, we release the lock and return false. 116 */ 117 rc = false; 118 if (ctx->flags & TA_FLAG_SINGLE_INSTANCE) 119 unlock_single_instance(); 120 } 121 } else { 122 /* 123 * We're not holding the single-instance lock, we're free to 124 * wait for the TA to become available. 125 */ 126 while (ctx->busy) 127 condvar_wait(&ctx->busy_cv, &tee_ta_mutex); 128 } 129 130 /* Either it's already true or we should set it to true */ 131 ctx->busy = true; 132 133 mutex_unlock(&tee_ta_mutex); 134 return rc; 135 } 136 137 static void tee_ta_set_busy(struct tee_ta_ctx *ctx) 138 { 139 if (!tee_ta_try_set_busy(ctx)) 140 panic(); 141 } 142 143 static void tee_ta_clear_busy(struct tee_ta_ctx *ctx) 144 { 145 if (ctx->flags & TA_FLAG_CONCURRENT) 146 return; 147 148 mutex_lock(&tee_ta_mutex); 149 150 assert(ctx->busy); 151 ctx->busy = false; 152 condvar_signal(&ctx->busy_cv); 153 154 if (!ctx->initializing && (ctx->flags & TA_FLAG_SINGLE_INSTANCE)) 155 unlock_single_instance(); 156 157 ctx->initializing = false; 158 159 mutex_unlock(&tee_ta_mutex); 160 } 161 162 static void dec_session_ref_count(struct tee_ta_session *s) 163 { 164 assert(s->ref_count > 0); 165 s->ref_count--; 166 if (s->ref_count == 1) 167 condvar_signal(&s->refc_cv); 168 } 169 170 void tee_ta_put_session(struct tee_ta_session *s) 171 { 172 mutex_lock(&tee_ta_mutex); 173 174 if (s->lock_thread == thread_get_id()) { 175 s->lock_thread = THREAD_ID_INVALID; 176 condvar_signal(&s->lock_cv); 177 } 178 dec_session_ref_count(s); 179 180 mutex_unlock(&tee_ta_mutex); 181 } 182 183 static struct tee_ta_session *tee_ta_find_session_nolock(uint32_t id, 184 struct tee_ta_session_head *open_sessions) 185 { 186 struct tee_ta_session *s = NULL; 187 struct tee_ta_session *found = NULL; 188 189 TAILQ_FOREACH(s, open_sessions, link) { 190 if (s->id == id) { 191 found = s; 192 break; 193 } 194 } 195 196 return found; 197 } 198 199 struct tee_ta_session *tee_ta_find_session(uint32_t id, 200 struct tee_ta_session_head *open_sessions) 201 { 202 struct tee_ta_session *s = NULL; 203 204 mutex_lock(&tee_ta_mutex); 205 206 s = tee_ta_find_session_nolock(id, open_sessions); 207 208 mutex_unlock(&tee_ta_mutex); 209 210 return s; 211 } 212 213 struct tee_ta_session *tee_ta_get_session(uint32_t id, bool exclusive, 214 struct tee_ta_session_head *open_sessions) 215 { 216 struct tee_ta_session *s; 217 218 mutex_lock(&tee_ta_mutex); 219 220 while (true) { 221 s = tee_ta_find_session_nolock(id, open_sessions); 222 if (!s) 223 break; 224 if (s->unlink) { 225 s = NULL; 226 break; 227 } 228 s->ref_count++; 229 if (!exclusive) 230 break; 231 232 assert(s->lock_thread != thread_get_id()); 233 234 while (s->lock_thread != THREAD_ID_INVALID && !s->unlink) 235 condvar_wait(&s->lock_cv, &tee_ta_mutex); 236 237 if (s->unlink) { 238 dec_session_ref_count(s); 239 s = NULL; 240 break; 241 } 242 243 s->lock_thread = thread_get_id(); 244 break; 245 } 246 247 mutex_unlock(&tee_ta_mutex); 248 return s; 249 } 250 251 static void tee_ta_unlink_session(struct tee_ta_session *s, 252 struct tee_ta_session_head *open_sessions) 253 { 254 mutex_lock(&tee_ta_mutex); 255 256 assert(s->ref_count >= 1); 257 assert(s->lock_thread == thread_get_id()); 258 assert(!s->unlink); 259 260 s->unlink = true; 261 condvar_broadcast(&s->lock_cv); 262 263 while (s->ref_count != 1) 264 condvar_wait(&s->refc_cv, &tee_ta_mutex); 265 266 TAILQ_REMOVE(open_sessions, s, link); 267 268 mutex_unlock(&tee_ta_mutex); 269 } 270 271 static void destroy_session(struct tee_ta_session *s, 272 struct tee_ta_session_head *open_sessions) 273 { 274 #if defined(CFG_FTRACE_SUPPORT) 275 if (s->ts_sess.ctx && s->ts_sess.ctx->ops->dump_ftrace) { 276 ts_push_current_session(&s->ts_sess); 277 s->ts_sess.fbuf = NULL; 278 s->ts_sess.ctx->ops->dump_ftrace(s->ts_sess.ctx); 279 ts_pop_current_session(); 280 } 281 #endif 282 283 tee_ta_unlink_session(s, open_sessions); 284 #if defined(CFG_TA_GPROF_SUPPORT) 285 free(s->ts_sess.sbuf); 286 #endif 287 free(s); 288 } 289 290 static void destroy_context(struct tee_ta_ctx *ctx) 291 { 292 DMSG("Destroy TA ctx (0x%" PRIxVA ")", (vaddr_t)ctx); 293 294 condvar_destroy(&ctx->busy_cv); 295 pgt_flush_ctx(ctx); 296 ctx->ops->destroy(ctx); 297 } 298 299 static void destroy_ta_ctx_from_session(struct tee_ta_session *s) 300 { 301 struct tee_ta_session *sess = NULL; 302 struct tee_ta_session_head *open_sessions = NULL; 303 struct tee_ta_ctx *ctx = NULL; 304 struct user_ta_ctx *utc = NULL; 305 size_t count = 1; /* start counting the references to the context */ 306 307 DMSG("Remove references to context (0x%" PRIxVA ")", 308 (vaddr_t)s->ts_sess.ctx); 309 310 mutex_lock(&tee_ta_mutex); 311 nsec_sessions_list_head(&open_sessions); 312 313 /* 314 * Next two loops will remove all references to the context which is 315 * about to be destroyed, but avoiding such operation to the current 316 * session. That will be done later in this function, only after 317 * the context will be properly destroyed. 318 */ 319 320 /* 321 * Scan the entire list of opened sessions by the clients from 322 * non-secure world. 323 */ 324 TAILQ_FOREACH(sess, open_sessions, link) { 325 if (sess->ts_sess.ctx == s->ts_sess.ctx && sess != s) { 326 sess->ts_sess.ctx = NULL; 327 count++; 328 } 329 } 330 331 /* 332 * Scan all sessions opened from secure side by searching through 333 * all available TA instances and for each context, scan all opened 334 * sessions. 335 */ 336 TAILQ_FOREACH(ctx, &tee_ctxes, link) { 337 if (is_user_ta_ctx(ctx)) { 338 utc = to_user_ta_ctx(ctx); 339 340 TAILQ_FOREACH(sess, &utc->open_sessions, link) { 341 if (sess->ts_sess.ctx == s->ts_sess.ctx && 342 sess != s) { 343 sess->ts_sess.ctx = NULL; 344 count++; 345 } 346 } 347 } 348 } 349 350 assert(count == s->ts_sess.ctx->ref_count); 351 352 TAILQ_REMOVE(&tee_ctxes, s->ts_sess.ctx, link); 353 mutex_unlock(&tee_ta_mutex); 354 355 destroy_context(s->ts_sess.ctx); 356 357 s->ts_sess.ctx = NULL; 358 } 359 360 /* 361 * tee_ta_context_find - Find TA in session list based on a UUID (input) 362 * Returns a pointer to the session 363 */ 364 static struct tee_ta_ctx *tee_ta_context_find(const TEE_UUID *uuid) 365 { 366 struct tee_ta_ctx *ctx; 367 368 TAILQ_FOREACH(ctx, &tee_ctxes, link) { 369 if (memcmp(&ctx->uuid, uuid, sizeof(TEE_UUID)) == 0) 370 return ctx; 371 } 372 373 return NULL; 374 } 375 376 /* check if requester (client ID) matches session initial client */ 377 static TEE_Result check_client(struct tee_ta_session *s, const TEE_Identity *id) 378 { 379 if (id == KERN_IDENTITY) 380 return TEE_SUCCESS; 381 382 if (id == NSAPP_IDENTITY) { 383 if (s->clnt_id.login == TEE_LOGIN_TRUSTED_APP) { 384 DMSG("nsec tries to hijack TA session"); 385 return TEE_ERROR_ACCESS_DENIED; 386 } 387 return TEE_SUCCESS; 388 } 389 390 if (memcmp(&s->clnt_id, id, sizeof(TEE_Identity)) != 0) { 391 DMSG("client id mismatch"); 392 return TEE_ERROR_ACCESS_DENIED; 393 } 394 return TEE_SUCCESS; 395 } 396 397 /* 398 * Check if invocation parameters matches TA properties 399 * 400 * @s - current session handle 401 * @param - already identified memory references hold a valid 'mobj'. 402 * 403 * Policy: 404 * - All TAs can access 'non-secure' shared memory. 405 * - All TAs can access TEE private memory (seccpy) 406 * - Only SDP flagged TAs can accept SDP memory references. 407 */ 408 #ifndef CFG_SECURE_DATA_PATH 409 static bool check_params(struct tee_ta_session *sess __unused, 410 struct tee_ta_param *param __unused) 411 { 412 /* 413 * When CFG_SECURE_DATA_PATH is not enabled, SDP memory references 414 * are rejected at OP-TEE core entry. Hence here all TAs have same 415 * permissions regarding memory reference parameters. 416 */ 417 return true; 418 } 419 #else 420 static bool check_params(struct tee_ta_session *sess, 421 struct tee_ta_param *param) 422 { 423 int n; 424 425 /* 426 * When CFG_SECURE_DATA_PATH is enabled, OP-TEE entry allows SHM and 427 * SDP memory references. Only TAs flagged SDP can access SDP memory. 428 */ 429 if (sess->ts_sess.ctx && 430 sess->ts_sess.ctx->flags & TA_FLAG_SECURE_DATA_PATH) 431 return true; 432 433 for (n = 0; n < TEE_NUM_PARAMS; n++) { 434 uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n); 435 struct param_mem *mem = ¶m->u[n].mem; 436 437 if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT && 438 param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT && 439 param_type != TEE_PARAM_TYPE_MEMREF_INOUT) 440 continue; 441 if (!mem->size) 442 continue; 443 if (mobj_is_sdp_mem(mem->mobj)) 444 return false; 445 } 446 return true; 447 } 448 #endif 449 450 static void set_invoke_timeout(struct tee_ta_session *sess, 451 uint32_t cancel_req_to) 452 { 453 TEE_Time current_time; 454 TEE_Time cancel_time; 455 456 if (cancel_req_to == TEE_TIMEOUT_INFINITE) 457 goto infinite; 458 459 if (tee_time_get_sys_time(¤t_time) != TEE_SUCCESS) 460 goto infinite; 461 462 if (ADD_OVERFLOW(current_time.seconds, cancel_req_to / 1000, 463 &cancel_time.seconds)) 464 goto infinite; 465 466 cancel_time.millis = current_time.millis + cancel_req_to % 1000; 467 if (cancel_time.millis > 1000) { 468 if (ADD_OVERFLOW(current_time.seconds, 1, 469 &cancel_time.seconds)) 470 goto infinite; 471 472 cancel_time.seconds++; 473 cancel_time.millis -= 1000; 474 } 475 476 sess->cancel_time = cancel_time; 477 return; 478 479 infinite: 480 sess->cancel_time.seconds = UINT32_MAX; 481 sess->cancel_time.millis = UINT32_MAX; 482 } 483 484 /*----------------------------------------------------------------------------- 485 * Close a Trusted Application and free available resources 486 *---------------------------------------------------------------------------*/ 487 TEE_Result tee_ta_close_session(struct tee_ta_session *csess, 488 struct tee_ta_session_head *open_sessions, 489 const TEE_Identity *clnt_id) 490 { 491 struct tee_ta_session *sess; 492 struct tee_ta_ctx *ctx; 493 bool keep_alive; 494 495 DMSG("csess 0x%" PRIxVA " id %u", 496 (vaddr_t)csess, csess ? csess->id : UINT_MAX); 497 498 if (!csess) 499 return TEE_ERROR_ITEM_NOT_FOUND; 500 501 sess = tee_ta_get_session(csess->id, true, open_sessions); 502 503 if (!sess) { 504 EMSG("session 0x%" PRIxVA " to be removed is not found", 505 (vaddr_t)csess); 506 return TEE_ERROR_ITEM_NOT_FOUND; 507 } 508 509 if (check_client(sess, clnt_id) != TEE_SUCCESS) { 510 tee_ta_put_session(sess); 511 return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */ 512 } 513 514 ctx = sess->ts_sess.ctx; 515 DMSG("Destroy session"); 516 517 if (!ctx) { 518 destroy_session(sess, open_sessions); 519 return TEE_SUCCESS; 520 } 521 522 if (ctx->panicked) { 523 destroy_session(sess, open_sessions); 524 } else { 525 tee_ta_set_busy(ctx); 526 set_invoke_timeout(sess, TEE_TIMEOUT_INFINITE); 527 ctx->ops->enter_close_session(sess); 528 destroy_session(sess, open_sessions); 529 tee_ta_clear_busy(ctx); 530 } 531 532 mutex_lock(&tee_ta_mutex); 533 534 if (ctx->ref_count <= 0) 535 panic(); 536 537 ctx->ref_count--; 538 keep_alive = (ctx->flags & TA_FLAG_INSTANCE_KEEP_ALIVE) && 539 (ctx->flags & TA_FLAG_SINGLE_INSTANCE); 540 if (!ctx->ref_count && !keep_alive) { 541 TAILQ_REMOVE(&tee_ctxes, ctx, link); 542 mutex_unlock(&tee_ta_mutex); 543 544 destroy_context(ctx); 545 } else 546 mutex_unlock(&tee_ta_mutex); 547 548 return TEE_SUCCESS; 549 } 550 551 static TEE_Result tee_ta_init_session_with_context(struct tee_ta_session *s, 552 const TEE_UUID *uuid) 553 { 554 struct tee_ta_ctx *ctx = NULL; 555 556 while (true) { 557 ctx = tee_ta_context_find(uuid); 558 if (!ctx) 559 return TEE_ERROR_ITEM_NOT_FOUND; 560 561 if (!is_user_ta_ctx(ctx) || 562 !to_user_ta_ctx(ctx)->is_initializing) 563 break; 564 /* 565 * Context is still initializing, wait here until it's 566 * fully initialized. Note that we're searching for the 567 * context again since it may have been removed while we 568 * where sleeping. 569 */ 570 condvar_wait(&tee_ta_init_cv, &tee_ta_mutex); 571 } 572 573 /* 574 * If TA isn't single instance it should be loaded as new 575 * instance instead of doing anything with this instance. 576 * So tell the caller that we didn't find the TA it the 577 * caller will load a new instance. 578 */ 579 if ((ctx->flags & TA_FLAG_SINGLE_INSTANCE) == 0) 580 return TEE_ERROR_ITEM_NOT_FOUND; 581 582 /* 583 * The TA is single instance, if it isn't multi session we 584 * can't create another session unless its reference is zero 585 */ 586 if (!(ctx->flags & TA_FLAG_MULTI_SESSION) && ctx->ref_count) 587 return TEE_ERROR_BUSY; 588 589 DMSG("Re-open TA %pUl", (void *)&ctx->uuid); 590 591 ctx->ref_count++; 592 s->ts_sess.ctx = ctx; 593 return TEE_SUCCESS; 594 } 595 596 static uint32_t new_session_id(struct tee_ta_session_head *open_sessions) 597 { 598 struct tee_ta_session *last = NULL; 599 uint32_t saved = 0; 600 uint32_t id = 1; 601 602 last = TAILQ_LAST(open_sessions, tee_ta_session_head); 603 if (last) { 604 /* This value is less likely to be already used */ 605 id = last->id + 1; 606 if (!id) 607 id++; /* 0 is not valid */ 608 } 609 610 saved = id; 611 do { 612 if (!tee_ta_find_session_nolock(id, open_sessions)) 613 return id; 614 id++; 615 if (!id) 616 id++; 617 } while (id != saved); 618 619 return 0; 620 } 621 622 static TEE_Result tee_ta_init_session(TEE_ErrorOrigin *err, 623 struct tee_ta_session_head *open_sessions, 624 const TEE_UUID *uuid, 625 struct tee_ta_session **sess) 626 { 627 TEE_Result res; 628 struct tee_ta_session *s = calloc(1, sizeof(struct tee_ta_session)); 629 630 *err = TEE_ORIGIN_TEE; 631 if (!s) 632 return TEE_ERROR_OUT_OF_MEMORY; 633 634 s->cancel_mask = true; 635 condvar_init(&s->refc_cv); 636 condvar_init(&s->lock_cv); 637 s->lock_thread = THREAD_ID_INVALID; 638 s->ref_count = 1; 639 640 mutex_lock(&tee_ta_mutex); 641 s->id = new_session_id(open_sessions); 642 if (!s->id) { 643 res = TEE_ERROR_OVERFLOW; 644 goto err_mutex_unlock; 645 } 646 647 TAILQ_INSERT_TAIL(open_sessions, s, link); 648 649 /* Look for already loaded TA */ 650 res = tee_ta_init_session_with_context(s, uuid); 651 mutex_unlock(&tee_ta_mutex); 652 if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND) 653 goto out; 654 655 /* Look for secure partition */ 656 res = sec_part_init_session(uuid, s); 657 if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND) 658 goto out; 659 660 /* Look for pseudo TA */ 661 res = tee_ta_init_pseudo_ta_session(uuid, s); 662 if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND) 663 goto out; 664 665 /* Look for user TA */ 666 res = tee_ta_init_user_ta_session(uuid, s); 667 668 out: 669 if (!res) { 670 *sess = s; 671 return TEE_SUCCESS; 672 } 673 674 mutex_lock(&tee_ta_mutex); 675 TAILQ_REMOVE(open_sessions, s, link); 676 err_mutex_unlock: 677 mutex_unlock(&tee_ta_mutex); 678 free(s); 679 return res; 680 } 681 682 TEE_Result tee_ta_open_session(TEE_ErrorOrigin *err, 683 struct tee_ta_session **sess, 684 struct tee_ta_session_head *open_sessions, 685 const TEE_UUID *uuid, 686 const TEE_Identity *clnt_id, 687 uint32_t cancel_req_to, 688 struct tee_ta_param *param) 689 { 690 TEE_Result res; 691 struct tee_ta_session *s = NULL; 692 struct tee_ta_ctx *ctx; 693 bool panicked; 694 bool was_busy = false; 695 696 res = tee_ta_init_session(err, open_sessions, uuid, &s); 697 if (res != TEE_SUCCESS) { 698 DMSG("init session failed 0x%x", res); 699 return res; 700 } 701 702 if (!check_params(s, param)) 703 return TEE_ERROR_BAD_PARAMETERS; 704 705 ctx = s->ts_sess.ctx; 706 707 if (!ctx || ctx->panicked) { 708 DMSG("panicked, call tee_ta_close_session()"); 709 tee_ta_close_session(s, open_sessions, KERN_IDENTITY); 710 *err = TEE_ORIGIN_TEE; 711 return TEE_ERROR_TARGET_DEAD; 712 } 713 714 *sess = s; 715 /* Save identity of the owner of the session */ 716 s->clnt_id = *clnt_id; 717 718 if (tee_ta_try_set_busy(ctx)) { 719 set_invoke_timeout(s, cancel_req_to); 720 res = ctx->ops->enter_open_session(s, param, err); 721 tee_ta_clear_busy(ctx); 722 } else { 723 /* Deadlock avoided */ 724 res = TEE_ERROR_BUSY; 725 was_busy = true; 726 } 727 728 panicked = ctx->panicked; 729 730 tee_ta_put_session(s); 731 if (panicked || (res != TEE_SUCCESS)) 732 tee_ta_close_session(s, open_sessions, KERN_IDENTITY); 733 734 /* 735 * Origin error equal to TEE_ORIGIN_TRUSTED_APP for "regular" error, 736 * apart from panicking. 737 */ 738 if (panicked || was_busy) 739 *err = TEE_ORIGIN_TEE; 740 741 if (res != TEE_SUCCESS) 742 EMSG("Failed. Return error 0x%x", res); 743 744 return res; 745 } 746 747 TEE_Result tee_ta_invoke_command(TEE_ErrorOrigin *err, 748 struct tee_ta_session *sess, 749 const TEE_Identity *clnt_id, 750 uint32_t cancel_req_to, uint32_t cmd, 751 struct tee_ta_param *param) 752 { 753 TEE_Result res; 754 755 if (check_client(sess, clnt_id) != TEE_SUCCESS) 756 return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */ 757 758 if (!check_params(sess, param)) 759 return TEE_ERROR_BAD_PARAMETERS; 760 761 if (!sess->ts_sess.ctx) { 762 /* The context has been already destroyed */ 763 *err = TEE_ORIGIN_TEE; 764 return TEE_ERROR_TARGET_DEAD; 765 } else if (sess->ts_sess.ctx->panicked) { 766 DMSG("Panicked !"); 767 destroy_ta_ctx_from_session(sess); 768 *err = TEE_ORIGIN_TEE; 769 return TEE_ERROR_TARGET_DEAD; 770 } 771 772 tee_ta_set_busy(sess->ts_sess.ctx); 773 774 set_invoke_timeout(sess, cancel_req_to); 775 res = sess->ts_sess.ctx->ops->enter_invoke_cmd(sess, cmd, param, err); 776 777 tee_ta_clear_busy(sess->ts_sess.ctx); 778 779 if (sess->ts_sess.ctx->panicked) { 780 destroy_ta_ctx_from_session(sess); 781 *err = TEE_ORIGIN_TEE; 782 return TEE_ERROR_TARGET_DEAD; 783 } 784 785 /* Short buffer is not an effective error case */ 786 if (res != TEE_SUCCESS && res != TEE_ERROR_SHORT_BUFFER) 787 DMSG("Error: %x of %d", res, *err); 788 789 return res; 790 } 791 792 TEE_Result tee_ta_cancel_command(TEE_ErrorOrigin *err, 793 struct tee_ta_session *sess, 794 const TEE_Identity *clnt_id) 795 { 796 *err = TEE_ORIGIN_TEE; 797 798 if (check_client(sess, clnt_id) != TEE_SUCCESS) 799 return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */ 800 801 sess->cancel = true; 802 return TEE_SUCCESS; 803 } 804 805 bool tee_ta_session_is_cancelled(struct tee_ta_session *s, TEE_Time *curr_time) 806 { 807 TEE_Time current_time; 808 809 if (s->cancel_mask) 810 return false; 811 812 if (s->cancel) 813 return true; 814 815 if (s->cancel_time.seconds == UINT32_MAX) 816 return false; 817 818 if (curr_time != NULL) 819 current_time = *curr_time; 820 else if (tee_time_get_sys_time(¤t_time) != TEE_SUCCESS) 821 return false; 822 823 if (current_time.seconds > s->cancel_time.seconds || 824 (current_time.seconds == s->cancel_time.seconds && 825 current_time.millis >= s->cancel_time.millis)) { 826 return true; 827 } 828 829 return false; 830 } 831 832 #if defined(CFG_TA_GPROF_SUPPORT) 833 void tee_ta_gprof_sample_pc(vaddr_t pc) 834 { 835 struct ts_session *s = ts_get_current_session(); 836 struct user_ta_ctx *utc = NULL; 837 struct sample_buf *sbuf = NULL; 838 TEE_Result res = 0; 839 size_t idx = 0; 840 841 sbuf = s->sbuf; 842 if (!sbuf || !sbuf->enabled) 843 return; /* PC sampling is not enabled */ 844 845 idx = (((uint64_t)pc - sbuf->offset)/2 * sbuf->scale)/65536; 846 if (idx < sbuf->nsamples) { 847 utc = to_user_ta_ctx(s->ctx); 848 res = tee_mmu_check_access_rights(&utc->uctx, 849 TEE_MEMORY_ACCESS_READ | 850 TEE_MEMORY_ACCESS_WRITE | 851 TEE_MEMORY_ACCESS_ANY_OWNER, 852 (uaddr_t)&sbuf->samples[idx], 853 sizeof(*sbuf->samples)); 854 if (res != TEE_SUCCESS) 855 return; 856 sbuf->samples[idx]++; 857 } 858 sbuf->count++; 859 } 860 861 static void gprof_update_session_utime(bool suspend, struct ts_session *s, 862 uint64_t now) 863 { 864 struct sample_buf *sbuf = s->sbuf; 865 866 if (!sbuf) 867 return; 868 869 if (suspend) { 870 assert(sbuf->usr_entered); 871 sbuf->usr += now - sbuf->usr_entered; 872 sbuf->usr_entered = 0; 873 } else { 874 assert(!sbuf->usr_entered); 875 if (!now) 876 now++; /* 0 is reserved */ 877 sbuf->usr_entered = now; 878 } 879 } 880 881 /* 882 * Update user-mode CPU time for the current session 883 * @suspend: true if session is being suspended (leaving user mode), false if 884 * it is resumed (entering user mode) 885 */ 886 static void tee_ta_update_session_utime(bool suspend) 887 { 888 struct ts_session *s = ts_get_current_session(); 889 uint64_t now = read_cntpct(); 890 891 gprof_update_session_utime(suspend, s, now); 892 } 893 894 void tee_ta_update_session_utime_suspend(void) 895 { 896 tee_ta_update_session_utime(true); 897 } 898 899 void tee_ta_update_session_utime_resume(void) 900 { 901 tee_ta_update_session_utime(false); 902 } 903 #endif 904 905 #if defined(CFG_FTRACE_SUPPORT) 906 static void ftrace_update_times(bool suspend) 907 { 908 struct ts_session *s = ts_get_current_session(); 909 struct ftrace_buf *fbuf = NULL; 910 uint64_t now = 0; 911 uint32_t i = 0; 912 913 now = read_cntpct(); 914 915 fbuf = s->fbuf; 916 if (!fbuf) 917 return; 918 919 if (suspend) { 920 fbuf->suspend_time = now; 921 } else { 922 for (i = 0; i <= fbuf->ret_idx; i++) 923 fbuf->begin_time[i] += now - fbuf->suspend_time; 924 } 925 } 926 927 void tee_ta_ftrace_update_times_suspend(void) 928 { 929 ftrace_update_times(true); 930 } 931 932 void tee_ta_ftrace_update_times_resume(void) 933 { 934 ftrace_update_times(false); 935 } 936 #endif 937