1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2014, STMicroelectronics International N.V. 4 * Copyright (c) 2020, Arm Limited 5 */ 6 7 #include <assert.h> 8 #include <kernel/mutex.h> 9 #include <kernel/panic.h> 10 #include <kernel/pseudo_ta.h> 11 #include <kernel/stmm_sp.h> 12 #include <kernel/tee_common.h> 13 #include <kernel/tee_misc.h> 14 #include <kernel/tee_ta_manager.h> 15 #include <kernel/tee_time.h> 16 #include <kernel/thread.h> 17 #include <kernel/user_mode_ctx.h> 18 #include <kernel/user_ta.h> 19 #include <mm/core_memprot.h> 20 #include <mm/core_mmu.h> 21 #include <mm/mobj.h> 22 #include <mm/vm.h> 23 #include <stdio.h> 24 #include <stdlib.h> 25 #include <string.h> 26 #include <tee_api_types.h> 27 #include <tee/entry_std.h> 28 #include <tee/tee_obj.h> 29 #include <tee/tee_svc_cryp.h> 30 #include <tee/tee_svc_storage.h> 31 #include <trace.h> 32 #include <types_ext.h> 33 #include <user_ta_header.h> 34 #include <utee_types.h> 35 #include <util.h> 36 37 /* This mutex protects the critical section in tee_ta_init_session */ 38 struct mutex tee_ta_mutex = MUTEX_INITIALIZER; 39 /* This condvar is used when waiting for a TA context to become initialized */ 40 struct condvar tee_ta_init_cv = CONDVAR_INITIALIZER; 41 struct tee_ta_ctx_head tee_ctxes = TAILQ_HEAD_INITIALIZER(tee_ctxes); 42 43 #ifndef CFG_CONCURRENT_SINGLE_INSTANCE_TA 44 static struct condvar tee_ta_cv = CONDVAR_INITIALIZER; 45 static short int tee_ta_single_instance_thread = THREAD_ID_INVALID; 46 static size_t tee_ta_single_instance_count; 47 #endif 48 49 #ifdef CFG_CONCURRENT_SINGLE_INSTANCE_TA 50 static void lock_single_instance(void) 51 { 52 } 53 54 static void unlock_single_instance(void) 55 { 56 } 57 58 static bool has_single_instance_lock(void) 59 { 60 return false; 61 } 62 #else 63 static void lock_single_instance(void) 64 { 65 /* Requires tee_ta_mutex to be held */ 66 if (tee_ta_single_instance_thread != thread_get_id()) { 67 /* Wait until the single-instance lock is available. */ 68 while (tee_ta_single_instance_thread != THREAD_ID_INVALID) 69 condvar_wait(&tee_ta_cv, &tee_ta_mutex); 70 71 tee_ta_single_instance_thread = thread_get_id(); 72 assert(tee_ta_single_instance_count == 0); 73 } 74 75 tee_ta_single_instance_count++; 76 } 77 78 static void unlock_single_instance(void) 79 { 80 /* Requires tee_ta_mutex to be held */ 81 assert(tee_ta_single_instance_thread == thread_get_id()); 82 assert(tee_ta_single_instance_count > 0); 83 84 tee_ta_single_instance_count--; 85 if (tee_ta_single_instance_count == 0) { 86 tee_ta_single_instance_thread = THREAD_ID_INVALID; 87 condvar_signal(&tee_ta_cv); 88 } 89 } 90 91 static bool has_single_instance_lock(void) 92 { 93 /* Requires tee_ta_mutex to be held */ 94 return tee_ta_single_instance_thread == thread_get_id(); 95 } 96 #endif 97 98 struct tee_ta_session *__noprof to_ta_session(struct ts_session *sess) 99 { 100 assert(is_ta_ctx(sess->ctx) || is_stmm_ctx(sess->ctx)); 101 return container_of(sess, struct tee_ta_session, ts_sess); 102 } 103 104 static struct tee_ta_ctx *ts_to_ta_ctx(struct ts_ctx *ctx) 105 { 106 if (is_ta_ctx(ctx)) 107 return to_ta_ctx(ctx); 108 109 if (is_stmm_ctx(ctx)) 110 return &(to_stmm_ctx(ctx)->ta_ctx); 111 112 panic("bad context"); 113 } 114 115 static bool tee_ta_try_set_busy(struct tee_ta_ctx *ctx) 116 { 117 bool rc = true; 118 119 if (ctx->flags & TA_FLAG_CONCURRENT) 120 return true; 121 122 mutex_lock(&tee_ta_mutex); 123 124 if (ctx->flags & TA_FLAG_SINGLE_INSTANCE) 125 lock_single_instance(); 126 127 if (has_single_instance_lock()) { 128 if (ctx->busy) { 129 /* 130 * We're holding the single-instance lock and the 131 * TA is busy, as waiting now would only cause a 132 * dead-lock, we release the lock and return false. 133 */ 134 rc = false; 135 if (ctx->flags & TA_FLAG_SINGLE_INSTANCE) 136 unlock_single_instance(); 137 } 138 } else { 139 /* 140 * We're not holding the single-instance lock, we're free to 141 * wait for the TA to become available. 142 */ 143 while (ctx->busy) 144 condvar_wait(&ctx->busy_cv, &tee_ta_mutex); 145 } 146 147 /* Either it's already true or we should set it to true */ 148 ctx->busy = true; 149 150 mutex_unlock(&tee_ta_mutex); 151 return rc; 152 } 153 154 static void tee_ta_set_busy(struct tee_ta_ctx *ctx) 155 { 156 if (!tee_ta_try_set_busy(ctx)) 157 panic(); 158 } 159 160 static void tee_ta_clear_busy(struct tee_ta_ctx *ctx) 161 { 162 if (ctx->flags & TA_FLAG_CONCURRENT) 163 return; 164 165 mutex_lock(&tee_ta_mutex); 166 167 assert(ctx->busy); 168 ctx->busy = false; 169 condvar_signal(&ctx->busy_cv); 170 171 if (ctx->flags & TA_FLAG_SINGLE_INSTANCE) 172 unlock_single_instance(); 173 174 mutex_unlock(&tee_ta_mutex); 175 } 176 177 static void dec_session_ref_count(struct tee_ta_session *s) 178 { 179 assert(s->ref_count > 0); 180 s->ref_count--; 181 if (s->ref_count == 1) 182 condvar_signal(&s->refc_cv); 183 } 184 185 void tee_ta_put_session(struct tee_ta_session *s) 186 { 187 mutex_lock(&tee_ta_mutex); 188 189 if (s->lock_thread == thread_get_id()) { 190 s->lock_thread = THREAD_ID_INVALID; 191 condvar_signal(&s->lock_cv); 192 } 193 dec_session_ref_count(s); 194 195 mutex_unlock(&tee_ta_mutex); 196 } 197 198 static struct tee_ta_session *tee_ta_find_session_nolock(uint32_t id, 199 struct tee_ta_session_head *open_sessions) 200 { 201 struct tee_ta_session *s = NULL; 202 struct tee_ta_session *found = NULL; 203 204 TAILQ_FOREACH(s, open_sessions, link) { 205 if (s->id == id) { 206 found = s; 207 break; 208 } 209 } 210 211 return found; 212 } 213 214 struct tee_ta_session *tee_ta_find_session(uint32_t id, 215 struct tee_ta_session_head *open_sessions) 216 { 217 struct tee_ta_session *s = NULL; 218 219 mutex_lock(&tee_ta_mutex); 220 221 s = tee_ta_find_session_nolock(id, open_sessions); 222 223 mutex_unlock(&tee_ta_mutex); 224 225 return s; 226 } 227 228 struct tee_ta_session *tee_ta_get_session(uint32_t id, bool exclusive, 229 struct tee_ta_session_head *open_sessions) 230 { 231 struct tee_ta_session *s; 232 233 mutex_lock(&tee_ta_mutex); 234 235 while (true) { 236 s = tee_ta_find_session_nolock(id, open_sessions); 237 if (!s) 238 break; 239 if (s->unlink) { 240 s = NULL; 241 break; 242 } 243 s->ref_count++; 244 if (!exclusive) 245 break; 246 247 assert(s->lock_thread != thread_get_id()); 248 249 while (s->lock_thread != THREAD_ID_INVALID && !s->unlink) 250 condvar_wait(&s->lock_cv, &tee_ta_mutex); 251 252 if (s->unlink) { 253 dec_session_ref_count(s); 254 s = NULL; 255 break; 256 } 257 258 s->lock_thread = thread_get_id(); 259 break; 260 } 261 262 mutex_unlock(&tee_ta_mutex); 263 return s; 264 } 265 266 static void tee_ta_unlink_session(struct tee_ta_session *s, 267 struct tee_ta_session_head *open_sessions) 268 { 269 mutex_lock(&tee_ta_mutex); 270 271 assert(s->ref_count >= 1); 272 assert(s->lock_thread == thread_get_id()); 273 assert(!s->unlink); 274 275 s->unlink = true; 276 condvar_broadcast(&s->lock_cv); 277 278 while (s->ref_count != 1) 279 condvar_wait(&s->refc_cv, &tee_ta_mutex); 280 281 TAILQ_REMOVE(open_sessions, s, link); 282 283 mutex_unlock(&tee_ta_mutex); 284 } 285 286 static void destroy_session(struct tee_ta_session *s, 287 struct tee_ta_session_head *open_sessions) 288 { 289 #if defined(CFG_FTRACE_SUPPORT) 290 if (s->ts_sess.ctx && s->ts_sess.ctx->ops->dump_ftrace) { 291 ts_push_current_session(&s->ts_sess); 292 s->ts_sess.fbuf = NULL; 293 s->ts_sess.ctx->ops->dump_ftrace(s->ts_sess.ctx); 294 ts_pop_current_session(); 295 } 296 #endif 297 298 tee_ta_unlink_session(s, open_sessions); 299 #if defined(CFG_TA_GPROF_SUPPORT) 300 free(s->ts_sess.sbuf); 301 #endif 302 free(s); 303 } 304 305 static void destroy_context(struct tee_ta_ctx *ctx) 306 { 307 DMSG("Destroy TA ctx (0x%" PRIxVA ")", (vaddr_t)ctx); 308 309 condvar_destroy(&ctx->busy_cv); 310 ctx->ts_ctx.ops->destroy(&ctx->ts_ctx); 311 } 312 313 static void destroy_ta_ctx_from_session(struct tee_ta_session *s) 314 { 315 struct tee_ta_session *sess = NULL; 316 struct tee_ta_session_head *open_sessions = NULL; 317 struct tee_ta_ctx *ctx = NULL; 318 struct user_ta_ctx *utc = NULL; 319 struct ts_ctx *ts_ctx = s->ts_sess.ctx; 320 size_t count = 1; /* start counting the references to the context */ 321 322 DMSG("Remove references to context (%#"PRIxVA")", (vaddr_t)ts_ctx); 323 324 mutex_lock(&tee_ta_mutex); 325 nsec_sessions_list_head(&open_sessions); 326 327 /* 328 * Next two loops will remove all references to the context which is 329 * about to be destroyed, but avoiding such operation to the current 330 * session. That will be done later in this function, only after 331 * the context will be properly destroyed. 332 */ 333 334 /* 335 * Scan the entire list of opened sessions by the clients from 336 * non-secure world. 337 */ 338 TAILQ_FOREACH(sess, open_sessions, link) { 339 if (sess->ts_sess.ctx == ts_ctx && sess != s) { 340 sess->ts_sess.ctx = NULL; 341 count++; 342 } 343 } 344 345 /* 346 * Scan all sessions opened from secure side by searching through 347 * all available TA instances and for each context, scan all opened 348 * sessions. 349 */ 350 TAILQ_FOREACH(ctx, &tee_ctxes, link) { 351 if (is_user_ta_ctx(&ctx->ts_ctx)) { 352 utc = to_user_ta_ctx(&ctx->ts_ctx); 353 354 TAILQ_FOREACH(sess, &utc->open_sessions, link) { 355 if (sess->ts_sess.ctx == ts_ctx && 356 sess != s) { 357 sess->ts_sess.ctx = NULL; 358 count++; 359 } 360 } 361 } 362 } 363 364 ctx = ts_to_ta_ctx(ts_ctx); 365 assert(count == ctx->ref_count); 366 367 TAILQ_REMOVE(&tee_ctxes, ctx, link); 368 mutex_unlock(&tee_ta_mutex); 369 370 destroy_context(ctx); 371 s->ts_sess.ctx = NULL; 372 } 373 374 /* 375 * tee_ta_context_find - Find TA in session list based on a UUID (input) 376 * Returns a pointer to the session 377 */ 378 static struct tee_ta_ctx *tee_ta_context_find(const TEE_UUID *uuid) 379 { 380 struct tee_ta_ctx *ctx; 381 382 TAILQ_FOREACH(ctx, &tee_ctxes, link) { 383 if (memcmp(&ctx->ts_ctx.uuid, uuid, sizeof(TEE_UUID)) == 0) 384 return ctx; 385 } 386 387 return NULL; 388 } 389 390 /* check if requester (client ID) matches session initial client */ 391 static TEE_Result check_client(struct tee_ta_session *s, const TEE_Identity *id) 392 { 393 if (id == KERN_IDENTITY) 394 return TEE_SUCCESS; 395 396 if (id == NSAPP_IDENTITY) { 397 if (s->clnt_id.login == TEE_LOGIN_TRUSTED_APP) { 398 DMSG("nsec tries to hijack TA session"); 399 return TEE_ERROR_ACCESS_DENIED; 400 } 401 return TEE_SUCCESS; 402 } 403 404 if (memcmp(&s->clnt_id, id, sizeof(TEE_Identity)) != 0) { 405 DMSG("client id mismatch"); 406 return TEE_ERROR_ACCESS_DENIED; 407 } 408 return TEE_SUCCESS; 409 } 410 411 /* 412 * Check if invocation parameters matches TA properties 413 * 414 * @s - current session handle 415 * @param - already identified memory references hold a valid 'mobj'. 416 * 417 * Policy: 418 * - All TAs can access 'non-secure' shared memory. 419 * - All TAs can access TEE private memory (seccpy) 420 * - Only SDP flagged TAs can accept SDP memory references. 421 */ 422 #ifndef CFG_SECURE_DATA_PATH 423 static bool check_params(struct tee_ta_session *sess __unused, 424 struct tee_ta_param *param __unused) 425 { 426 /* 427 * When CFG_SECURE_DATA_PATH is not enabled, SDP memory references 428 * are rejected at OP-TEE core entry. Hence here all TAs have same 429 * permissions regarding memory reference parameters. 430 */ 431 return true; 432 } 433 #else 434 static bool check_params(struct tee_ta_session *sess, 435 struct tee_ta_param *param) 436 { 437 int n; 438 439 /* 440 * When CFG_SECURE_DATA_PATH is enabled, OP-TEE entry allows SHM and 441 * SDP memory references. Only TAs flagged SDP can access SDP memory. 442 */ 443 if (sess->ts_sess.ctx && 444 ts_to_ta_ctx(sess->ts_sess.ctx)->flags & TA_FLAG_SECURE_DATA_PATH) 445 return true; 446 447 for (n = 0; n < TEE_NUM_PARAMS; n++) { 448 uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n); 449 struct param_mem *mem = ¶m->u[n].mem; 450 451 if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT && 452 param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT && 453 param_type != TEE_PARAM_TYPE_MEMREF_INOUT) 454 continue; 455 if (!mem->size) 456 continue; 457 if (mobj_is_sdp_mem(mem->mobj)) 458 return false; 459 } 460 return true; 461 } 462 #endif 463 464 static void set_invoke_timeout(struct tee_ta_session *sess, 465 uint32_t cancel_req_to) 466 { 467 TEE_Time current_time; 468 TEE_Time cancel_time; 469 470 if (cancel_req_to == TEE_TIMEOUT_INFINITE) 471 goto infinite; 472 473 if (tee_time_get_sys_time(¤t_time) != TEE_SUCCESS) 474 goto infinite; 475 476 if (ADD_OVERFLOW(current_time.seconds, cancel_req_to / 1000, 477 &cancel_time.seconds)) 478 goto infinite; 479 480 cancel_time.millis = current_time.millis + cancel_req_to % 1000; 481 if (cancel_time.millis > 1000) { 482 if (ADD_OVERFLOW(current_time.seconds, 1, 483 &cancel_time.seconds)) 484 goto infinite; 485 486 cancel_time.seconds++; 487 cancel_time.millis -= 1000; 488 } 489 490 sess->cancel_time = cancel_time; 491 return; 492 493 infinite: 494 sess->cancel_time.seconds = UINT32_MAX; 495 sess->cancel_time.millis = UINT32_MAX; 496 } 497 498 /*----------------------------------------------------------------------------- 499 * Close a Trusted Application and free available resources 500 *---------------------------------------------------------------------------*/ 501 TEE_Result tee_ta_close_session(struct tee_ta_session *csess, 502 struct tee_ta_session_head *open_sessions, 503 const TEE_Identity *clnt_id) 504 { 505 struct tee_ta_session *sess = NULL; 506 struct tee_ta_ctx *ctx = NULL; 507 struct ts_ctx *ts_ctx = NULL; 508 bool keep_alive = false; 509 510 DMSG("csess 0x%" PRIxVA " id %u", 511 (vaddr_t)csess, csess ? csess->id : UINT_MAX); 512 513 if (!csess) 514 return TEE_ERROR_ITEM_NOT_FOUND; 515 516 sess = tee_ta_get_session(csess->id, true, open_sessions); 517 518 if (!sess) { 519 EMSG("session 0x%" PRIxVA " to be removed is not found", 520 (vaddr_t)csess); 521 return TEE_ERROR_ITEM_NOT_FOUND; 522 } 523 524 if (check_client(sess, clnt_id) != TEE_SUCCESS) { 525 tee_ta_put_session(sess); 526 return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */ 527 } 528 529 DMSG("Destroy session"); 530 531 ts_ctx = sess->ts_sess.ctx; 532 if (!ts_ctx) { 533 destroy_session(sess, open_sessions); 534 return TEE_SUCCESS; 535 } 536 537 ctx = ts_to_ta_ctx(ts_ctx); 538 if (ctx->panicked) { 539 destroy_session(sess, open_sessions); 540 } else { 541 tee_ta_set_busy(ctx); 542 set_invoke_timeout(sess, TEE_TIMEOUT_INFINITE); 543 ts_ctx->ops->enter_close_session(&sess->ts_sess); 544 destroy_session(sess, open_sessions); 545 tee_ta_clear_busy(ctx); 546 } 547 548 mutex_lock(&tee_ta_mutex); 549 550 if (ctx->ref_count <= 0) 551 panic(); 552 553 ctx->ref_count--; 554 keep_alive = (ctx->flags & TA_FLAG_INSTANCE_KEEP_ALIVE) && 555 (ctx->flags & TA_FLAG_SINGLE_INSTANCE); 556 if (!ctx->ref_count && !keep_alive) { 557 TAILQ_REMOVE(&tee_ctxes, ctx, link); 558 mutex_unlock(&tee_ta_mutex); 559 560 destroy_context(ctx); 561 } else 562 mutex_unlock(&tee_ta_mutex); 563 564 return TEE_SUCCESS; 565 } 566 567 static TEE_Result tee_ta_init_session_with_context(struct tee_ta_session *s, 568 const TEE_UUID *uuid) 569 { 570 struct tee_ta_ctx *ctx = NULL; 571 572 while (true) { 573 ctx = tee_ta_context_find(uuid); 574 if (!ctx) 575 return TEE_ERROR_ITEM_NOT_FOUND; 576 577 if (!is_user_ta_ctx(&ctx->ts_ctx) || 578 !to_user_ta_ctx(&ctx->ts_ctx)->uctx.is_initializing) 579 break; 580 /* 581 * Context is still initializing, wait here until it's 582 * fully initialized. Note that we're searching for the 583 * context again since it may have been removed while we 584 * where sleeping. 585 */ 586 condvar_wait(&tee_ta_init_cv, &tee_ta_mutex); 587 } 588 589 /* 590 * If TA isn't single instance it should be loaded as new 591 * instance instead of doing anything with this instance. 592 * So tell the caller that we didn't find the TA it the 593 * caller will load a new instance. 594 */ 595 if ((ctx->flags & TA_FLAG_SINGLE_INSTANCE) == 0) 596 return TEE_ERROR_ITEM_NOT_FOUND; 597 598 /* 599 * The TA is single instance, if it isn't multi session we 600 * can't create another session unless its reference is zero 601 */ 602 if (!(ctx->flags & TA_FLAG_MULTI_SESSION) && ctx->ref_count) 603 return TEE_ERROR_BUSY; 604 605 DMSG("Re-open TA %pUl", (void *)&ctx->ts_ctx.uuid); 606 607 ctx->ref_count++; 608 s->ts_sess.ctx = &ctx->ts_ctx; 609 s->ts_sess.handle_svc = s->ts_sess.ctx->ops->handle_svc; 610 return TEE_SUCCESS; 611 } 612 613 static uint32_t new_session_id(struct tee_ta_session_head *open_sessions) 614 { 615 struct tee_ta_session *last = NULL; 616 uint32_t saved = 0; 617 uint32_t id = 1; 618 619 last = TAILQ_LAST(open_sessions, tee_ta_session_head); 620 if (last) { 621 /* This value is less likely to be already used */ 622 id = last->id + 1; 623 if (!id) 624 id++; /* 0 is not valid */ 625 } 626 627 saved = id; 628 do { 629 if (!tee_ta_find_session_nolock(id, open_sessions)) 630 return id; 631 id++; 632 if (!id) 633 id++; 634 } while (id != saved); 635 636 return 0; 637 } 638 639 static TEE_Result tee_ta_init_session(TEE_ErrorOrigin *err, 640 struct tee_ta_session_head *open_sessions, 641 const TEE_UUID *uuid, 642 struct tee_ta_session **sess) 643 { 644 TEE_Result res; 645 struct tee_ta_session *s = calloc(1, sizeof(struct tee_ta_session)); 646 647 *err = TEE_ORIGIN_TEE; 648 if (!s) 649 return TEE_ERROR_OUT_OF_MEMORY; 650 651 s->cancel_mask = true; 652 condvar_init(&s->refc_cv); 653 condvar_init(&s->lock_cv); 654 s->lock_thread = THREAD_ID_INVALID; 655 s->ref_count = 1; 656 657 mutex_lock(&tee_ta_mutex); 658 s->id = new_session_id(open_sessions); 659 if (!s->id) { 660 res = TEE_ERROR_OVERFLOW; 661 goto err_mutex_unlock; 662 } 663 664 TAILQ_INSERT_TAIL(open_sessions, s, link); 665 666 /* Look for already loaded TA */ 667 res = tee_ta_init_session_with_context(s, uuid); 668 mutex_unlock(&tee_ta_mutex); 669 if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND) 670 goto out; 671 672 /* Look for secure partition */ 673 res = stmm_init_session(uuid, s); 674 if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND) 675 goto out; 676 677 /* Look for pseudo TA */ 678 res = tee_ta_init_pseudo_ta_session(uuid, s); 679 if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND) 680 goto out; 681 682 /* Look for user TA */ 683 res = tee_ta_init_user_ta_session(uuid, s); 684 685 out: 686 if (!res) { 687 *sess = s; 688 return TEE_SUCCESS; 689 } 690 691 mutex_lock(&tee_ta_mutex); 692 TAILQ_REMOVE(open_sessions, s, link); 693 err_mutex_unlock: 694 mutex_unlock(&tee_ta_mutex); 695 free(s); 696 return res; 697 } 698 699 TEE_Result tee_ta_open_session(TEE_ErrorOrigin *err, 700 struct tee_ta_session **sess, 701 struct tee_ta_session_head *open_sessions, 702 const TEE_UUID *uuid, 703 const TEE_Identity *clnt_id, 704 uint32_t cancel_req_to, 705 struct tee_ta_param *param) 706 { 707 TEE_Result res = TEE_SUCCESS; 708 struct tee_ta_session *s = NULL; 709 struct tee_ta_ctx *ctx = NULL; 710 struct ts_ctx *ts_ctx = NULL; 711 bool panicked = false; 712 bool was_busy = false; 713 714 res = tee_ta_init_session(err, open_sessions, uuid, &s); 715 if (res != TEE_SUCCESS) { 716 DMSG("init session failed 0x%x", res); 717 return res; 718 } 719 720 if (!check_params(s, param)) 721 return TEE_ERROR_BAD_PARAMETERS; 722 723 ts_ctx = s->ts_sess.ctx; 724 if (ts_ctx) 725 ctx = ts_to_ta_ctx(ts_ctx); 726 727 if (!ctx || ctx->panicked) { 728 DMSG("panicked, call tee_ta_close_session()"); 729 tee_ta_close_session(s, open_sessions, KERN_IDENTITY); 730 *err = TEE_ORIGIN_TEE; 731 return TEE_ERROR_TARGET_DEAD; 732 } 733 734 *sess = s; 735 /* Save identity of the owner of the session */ 736 s->clnt_id = *clnt_id; 737 738 if (tee_ta_try_set_busy(ctx)) { 739 s->param = param; 740 set_invoke_timeout(s, cancel_req_to); 741 res = ts_ctx->ops->enter_open_session(&s->ts_sess); 742 tee_ta_clear_busy(ctx); 743 } else { 744 /* Deadlock avoided */ 745 res = TEE_ERROR_BUSY; 746 was_busy = true; 747 } 748 749 panicked = ctx->panicked; 750 s->param = NULL; 751 752 /* 753 * Origin error equal to TEE_ORIGIN_TRUSTED_APP for "regular" error, 754 * apart from panicking. 755 */ 756 if (panicked || was_busy) 757 *err = TEE_ORIGIN_TEE; 758 else 759 *err = s->err_origin; 760 761 tee_ta_put_session(s); 762 if (panicked || res != TEE_SUCCESS) 763 tee_ta_close_session(s, open_sessions, KERN_IDENTITY); 764 765 if (res != TEE_SUCCESS) 766 EMSG("Failed. Return error 0x%x", res); 767 768 return res; 769 } 770 771 TEE_Result tee_ta_invoke_command(TEE_ErrorOrigin *err, 772 struct tee_ta_session *sess, 773 const TEE_Identity *clnt_id, 774 uint32_t cancel_req_to, uint32_t cmd, 775 struct tee_ta_param *param) 776 { 777 struct tee_ta_ctx *ta_ctx = NULL; 778 struct ts_ctx *ts_ctx = NULL; 779 TEE_Result res = TEE_SUCCESS; 780 781 if (check_client(sess, clnt_id) != TEE_SUCCESS) 782 return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */ 783 784 if (!check_params(sess, param)) 785 return TEE_ERROR_BAD_PARAMETERS; 786 787 ts_ctx = sess->ts_sess.ctx; 788 if (!ts_ctx) { 789 /* The context has been already destroyed */ 790 *err = TEE_ORIGIN_TEE; 791 return TEE_ERROR_TARGET_DEAD; 792 } 793 794 ta_ctx = ts_to_ta_ctx(ts_ctx); 795 if (ta_ctx->panicked) { 796 DMSG("Panicked !"); 797 destroy_ta_ctx_from_session(sess); 798 *err = TEE_ORIGIN_TEE; 799 return TEE_ERROR_TARGET_DEAD; 800 } 801 802 tee_ta_set_busy(ta_ctx); 803 804 sess->param = param; 805 set_invoke_timeout(sess, cancel_req_to); 806 res = ts_ctx->ops->enter_invoke_cmd(&sess->ts_sess, cmd); 807 808 sess->param = NULL; 809 tee_ta_clear_busy(ta_ctx); 810 811 if (ta_ctx->panicked) { 812 destroy_ta_ctx_from_session(sess); 813 *err = TEE_ORIGIN_TEE; 814 return TEE_ERROR_TARGET_DEAD; 815 } 816 817 *err = sess->err_origin; 818 819 /* Short buffer is not an effective error case */ 820 if (res != TEE_SUCCESS && res != TEE_ERROR_SHORT_BUFFER) 821 DMSG("Error: %x of %d", res, *err); 822 823 return res; 824 } 825 826 TEE_Result tee_ta_cancel_command(TEE_ErrorOrigin *err, 827 struct tee_ta_session *sess, 828 const TEE_Identity *clnt_id) 829 { 830 *err = TEE_ORIGIN_TEE; 831 832 if (check_client(sess, clnt_id) != TEE_SUCCESS) 833 return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */ 834 835 sess->cancel = true; 836 return TEE_SUCCESS; 837 } 838 839 bool tee_ta_session_is_cancelled(struct tee_ta_session *s, TEE_Time *curr_time) 840 { 841 TEE_Time current_time; 842 843 if (s->cancel_mask) 844 return false; 845 846 if (s->cancel) 847 return true; 848 849 if (s->cancel_time.seconds == UINT32_MAX) 850 return false; 851 852 if (curr_time != NULL) 853 current_time = *curr_time; 854 else if (tee_time_get_sys_time(¤t_time) != TEE_SUCCESS) 855 return false; 856 857 if (current_time.seconds > s->cancel_time.seconds || 858 (current_time.seconds == s->cancel_time.seconds && 859 current_time.millis >= s->cancel_time.millis)) { 860 return true; 861 } 862 863 return false; 864 } 865 866 #if defined(CFG_TA_GPROF_SUPPORT) 867 void tee_ta_gprof_sample_pc(vaddr_t pc) 868 { 869 struct ts_session *s = ts_get_current_session(); 870 struct user_ta_ctx *utc = NULL; 871 struct sample_buf *sbuf = NULL; 872 TEE_Result res = 0; 873 size_t idx = 0; 874 875 sbuf = s->sbuf; 876 if (!sbuf || !sbuf->enabled) 877 return; /* PC sampling is not enabled */ 878 879 idx = (((uint64_t)pc - sbuf->offset)/2 * sbuf->scale)/65536; 880 if (idx < sbuf->nsamples) { 881 utc = to_user_ta_ctx(s->ctx); 882 res = vm_check_access_rights(&utc->uctx, 883 TEE_MEMORY_ACCESS_READ | 884 TEE_MEMORY_ACCESS_WRITE | 885 TEE_MEMORY_ACCESS_ANY_OWNER, 886 (uaddr_t)&sbuf->samples[idx], 887 sizeof(*sbuf->samples)); 888 if (res != TEE_SUCCESS) 889 return; 890 sbuf->samples[idx]++; 891 } 892 sbuf->count++; 893 } 894 895 static void gprof_update_session_utime(bool suspend, struct ts_session *s, 896 uint64_t now) 897 { 898 struct sample_buf *sbuf = s->sbuf; 899 900 if (!sbuf) 901 return; 902 903 if (suspend) { 904 assert(sbuf->usr_entered); 905 sbuf->usr += now - sbuf->usr_entered; 906 sbuf->usr_entered = 0; 907 } else { 908 assert(!sbuf->usr_entered); 909 if (!now) 910 now++; /* 0 is reserved */ 911 sbuf->usr_entered = now; 912 } 913 } 914 915 /* 916 * Update user-mode CPU time for the current session 917 * @suspend: true if session is being suspended (leaving user mode), false if 918 * it is resumed (entering user mode) 919 */ 920 static void tee_ta_update_session_utime(bool suspend) 921 { 922 struct ts_session *s = ts_get_current_session(); 923 uint64_t now = barrier_read_counter_timer(); 924 925 gprof_update_session_utime(suspend, s, now); 926 } 927 928 void tee_ta_update_session_utime_suspend(void) 929 { 930 tee_ta_update_session_utime(true); 931 } 932 933 void tee_ta_update_session_utime_resume(void) 934 { 935 tee_ta_update_session_utime(false); 936 } 937 #endif 938 939 #if defined(CFG_FTRACE_SUPPORT) 940 static void ftrace_update_times(bool suspend) 941 { 942 struct ts_session *s = ts_get_current_session_may_fail(); 943 struct ftrace_buf *fbuf = NULL; 944 uint64_t now = 0; 945 uint32_t i = 0; 946 947 if (!s) 948 return; 949 950 now = barrier_read_counter_timer(); 951 952 fbuf = s->fbuf; 953 if (!fbuf) 954 return; 955 956 if (suspend) { 957 fbuf->suspend_time = now; 958 } else { 959 for (i = 0; i <= fbuf->ret_idx; i++) 960 fbuf->begin_time[i] += now - fbuf->suspend_time; 961 } 962 } 963 964 void tee_ta_ftrace_update_times_suspend(void) 965 { 966 ftrace_update_times(true); 967 } 968 969 void tee_ta_ftrace_update_times_resume(void) 970 { 971 ftrace_update_times(false); 972 } 973 #endif 974 975 bool is_ta_ctx(struct ts_ctx *ctx) 976 { 977 return is_user_ta_ctx(ctx) || is_pseudo_ta_ctx(ctx); 978 } 979