1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2014, STMicroelectronics International N.V. 4 * Copyright (c) 2020, Arm Limited 5 */ 6 7 #include <assert.h> 8 #include <kernel/mutex.h> 9 #include <kernel/panic.h> 10 #include <kernel/pseudo_ta.h> 11 #include <kernel/stmm_sp.h> 12 #include <kernel/tee_common.h> 13 #include <kernel/tee_misc.h> 14 #include <kernel/tee_ta_manager.h> 15 #include <kernel/tee_time.h> 16 #include <kernel/thread.h> 17 #include <kernel/user_mode_ctx.h> 18 #include <kernel/user_ta.h> 19 #include <malloc.h> 20 #include <mm/core_memprot.h> 21 #include <mm/core_mmu.h> 22 #include <mm/mobj.h> 23 #include <mm/vm.h> 24 #include <pta_stats.h> 25 #include <stdlib.h> 26 #include <string.h> 27 #include <tee_api_types.h> 28 #include <tee/entry_std.h> 29 #include <tee/tee_obj.h> 30 #include <trace.h> 31 #include <types_ext.h> 32 #include <user_ta_header.h> 33 #include <utee_types.h> 34 #include <util.h> 35 36 #if defined(CFG_TA_STATS) 37 #define MAX_DUMP_SESS_NUM (16) 38 39 struct tee_ta_dump_ctx { 40 TEE_UUID uuid; 41 uint32_t panicked; 42 bool is_user_ta; 43 uint32_t sess_num; 44 uint32_t sess_id[MAX_DUMP_SESS_NUM]; 45 }; 46 #endif 47 48 /* This mutex protects the critical section in tee_ta_init_session */ 49 struct mutex tee_ta_mutex = MUTEX_INITIALIZER; 50 /* This condvar is used when waiting for a TA context to become initialized */ 51 struct condvar tee_ta_init_cv = CONDVAR_INITIALIZER; 52 struct tee_ta_ctx_head tee_ctxes = TAILQ_HEAD_INITIALIZER(tee_ctxes); 53 54 #ifndef CFG_CONCURRENT_SINGLE_INSTANCE_TA 55 static struct condvar tee_ta_cv = CONDVAR_INITIALIZER; 56 static short int tee_ta_single_instance_thread = THREAD_ID_INVALID; 57 static size_t tee_ta_single_instance_count; 58 #endif 59 60 #ifdef CFG_CONCURRENT_SINGLE_INSTANCE_TA 61 static void lock_single_instance(void) 62 { 63 } 64 65 static void unlock_single_instance(void) 66 { 67 } 68 69 static bool has_single_instance_lock(void) 70 { 71 return false; 72 } 73 #else 74 static void lock_single_instance(void) 75 { 76 /* Requires tee_ta_mutex to be held */ 77 if (tee_ta_single_instance_thread != thread_get_id()) { 78 /* Wait until the single-instance lock is available. */ 79 while (tee_ta_single_instance_thread != THREAD_ID_INVALID) 80 condvar_wait(&tee_ta_cv, &tee_ta_mutex); 81 82 tee_ta_single_instance_thread = thread_get_id(); 83 assert(tee_ta_single_instance_count == 0); 84 } 85 86 tee_ta_single_instance_count++; 87 } 88 89 static void unlock_single_instance(void) 90 { 91 /* Requires tee_ta_mutex to be held */ 92 assert(tee_ta_single_instance_thread == thread_get_id()); 93 assert(tee_ta_single_instance_count > 0); 94 95 tee_ta_single_instance_count--; 96 if (tee_ta_single_instance_count == 0) { 97 tee_ta_single_instance_thread = THREAD_ID_INVALID; 98 condvar_signal(&tee_ta_cv); 99 } 100 } 101 102 static bool has_single_instance_lock(void) 103 { 104 /* Requires tee_ta_mutex to be held */ 105 return tee_ta_single_instance_thread == thread_get_id(); 106 } 107 #endif 108 109 struct tee_ta_session *__noprof to_ta_session(struct ts_session *sess) 110 { 111 assert(is_ta_ctx(sess->ctx) || is_stmm_ctx(sess->ctx)); 112 return container_of(sess, struct tee_ta_session, ts_sess); 113 } 114 115 static struct tee_ta_ctx *ts_to_ta_ctx(struct ts_ctx *ctx) 116 { 117 if (is_ta_ctx(ctx)) 118 return to_ta_ctx(ctx); 119 120 if (is_stmm_ctx(ctx)) 121 return &(to_stmm_ctx(ctx)->ta_ctx); 122 123 panic("bad context"); 124 } 125 126 static bool tee_ta_try_set_busy(struct tee_ta_ctx *ctx) 127 { 128 bool rc = true; 129 130 if (ctx->flags & TA_FLAG_CONCURRENT) 131 return true; 132 133 mutex_lock(&tee_ta_mutex); 134 135 if (ctx->flags & TA_FLAG_SINGLE_INSTANCE) 136 lock_single_instance(); 137 138 if (has_single_instance_lock()) { 139 if (ctx->busy) { 140 /* 141 * We're holding the single-instance lock and the 142 * TA is busy, as waiting now would only cause a 143 * dead-lock, we release the lock and return false. 144 */ 145 rc = false; 146 if (ctx->flags & TA_FLAG_SINGLE_INSTANCE) 147 unlock_single_instance(); 148 } 149 } else { 150 /* 151 * We're not holding the single-instance lock, we're free to 152 * wait for the TA to become available. 153 */ 154 while (ctx->busy) 155 condvar_wait(&ctx->busy_cv, &tee_ta_mutex); 156 } 157 158 /* Either it's already true or we should set it to true */ 159 ctx->busy = true; 160 161 mutex_unlock(&tee_ta_mutex); 162 return rc; 163 } 164 165 static void tee_ta_set_busy(struct tee_ta_ctx *ctx) 166 { 167 if (!tee_ta_try_set_busy(ctx)) 168 panic(); 169 } 170 171 static void tee_ta_clear_busy(struct tee_ta_ctx *ctx) 172 { 173 if (ctx->flags & TA_FLAG_CONCURRENT) 174 return; 175 176 mutex_lock(&tee_ta_mutex); 177 178 assert(ctx->busy); 179 ctx->busy = false; 180 condvar_signal(&ctx->busy_cv); 181 182 if (ctx->flags & TA_FLAG_SINGLE_INSTANCE) 183 unlock_single_instance(); 184 185 mutex_unlock(&tee_ta_mutex); 186 } 187 188 static void dec_session_ref_count(struct tee_ta_session *s) 189 { 190 assert(s->ref_count > 0); 191 s->ref_count--; 192 if (s->ref_count == 1) 193 condvar_signal(&s->refc_cv); 194 } 195 196 void tee_ta_put_session(struct tee_ta_session *s) 197 { 198 mutex_lock(&tee_ta_mutex); 199 200 if (s->lock_thread == thread_get_id()) { 201 s->lock_thread = THREAD_ID_INVALID; 202 condvar_signal(&s->lock_cv); 203 } 204 dec_session_ref_count(s); 205 206 mutex_unlock(&tee_ta_mutex); 207 } 208 209 static struct tee_ta_session *tee_ta_find_session_nolock(uint32_t id, 210 struct tee_ta_session_head *open_sessions) 211 { 212 struct tee_ta_session *s = NULL; 213 struct tee_ta_session *found = NULL; 214 215 TAILQ_FOREACH(s, open_sessions, link) { 216 if (s->id == id) { 217 found = s; 218 break; 219 } 220 } 221 222 return found; 223 } 224 225 struct tee_ta_session *tee_ta_find_session(uint32_t id, 226 struct tee_ta_session_head *open_sessions) 227 { 228 struct tee_ta_session *s = NULL; 229 230 mutex_lock(&tee_ta_mutex); 231 232 s = tee_ta_find_session_nolock(id, open_sessions); 233 234 mutex_unlock(&tee_ta_mutex); 235 236 return s; 237 } 238 239 struct tee_ta_session *tee_ta_get_session(uint32_t id, bool exclusive, 240 struct tee_ta_session_head *open_sessions) 241 { 242 struct tee_ta_session *s; 243 244 mutex_lock(&tee_ta_mutex); 245 246 while (true) { 247 s = tee_ta_find_session_nolock(id, open_sessions); 248 if (!s) 249 break; 250 if (s->unlink) { 251 s = NULL; 252 break; 253 } 254 s->ref_count++; 255 if (!exclusive) 256 break; 257 258 assert(s->lock_thread != thread_get_id()); 259 260 while (s->lock_thread != THREAD_ID_INVALID && !s->unlink) 261 condvar_wait(&s->lock_cv, &tee_ta_mutex); 262 263 if (s->unlink) { 264 dec_session_ref_count(s); 265 s = NULL; 266 break; 267 } 268 269 s->lock_thread = thread_get_id(); 270 break; 271 } 272 273 mutex_unlock(&tee_ta_mutex); 274 return s; 275 } 276 277 static void tee_ta_unlink_session(struct tee_ta_session *s, 278 struct tee_ta_session_head *open_sessions) 279 { 280 mutex_lock(&tee_ta_mutex); 281 282 assert(s->ref_count >= 1); 283 assert(s->lock_thread == thread_get_id()); 284 assert(!s->unlink); 285 286 s->unlink = true; 287 condvar_broadcast(&s->lock_cv); 288 289 while (s->ref_count != 1) 290 condvar_wait(&s->refc_cv, &tee_ta_mutex); 291 292 TAILQ_REMOVE(open_sessions, s, link); 293 294 mutex_unlock(&tee_ta_mutex); 295 } 296 297 static void destroy_session(struct tee_ta_session *s, 298 struct tee_ta_session_head *open_sessions) 299 { 300 #if defined(CFG_FTRACE_SUPPORT) 301 if (s->ts_sess.ctx && s->ts_sess.ctx->ops->dump_ftrace) { 302 ts_push_current_session(&s->ts_sess); 303 s->ts_sess.fbuf = NULL; 304 s->ts_sess.ctx->ops->dump_ftrace(s->ts_sess.ctx); 305 ts_pop_current_session(); 306 } 307 #endif 308 309 tee_ta_unlink_session(s, open_sessions); 310 #if defined(CFG_TA_GPROF_SUPPORT) 311 free(s->ts_sess.sbuf); 312 #endif 313 free(s); 314 } 315 316 static void destroy_context(struct tee_ta_ctx *ctx) 317 { 318 DMSG("Destroy TA ctx (0x%" PRIxVA ")", (vaddr_t)ctx); 319 320 condvar_destroy(&ctx->busy_cv); 321 ctx->ts_ctx.ops->destroy(&ctx->ts_ctx); 322 } 323 324 /* 325 * tee_ta_context_find - Find TA in session list based on a UUID (input) 326 * Returns a pointer to the session 327 */ 328 static struct tee_ta_ctx *tee_ta_context_find(const TEE_UUID *uuid) 329 { 330 struct tee_ta_ctx *ctx; 331 332 TAILQ_FOREACH(ctx, &tee_ctxes, link) { 333 if (memcmp(&ctx->ts_ctx.uuid, uuid, sizeof(TEE_UUID)) == 0) 334 return ctx; 335 } 336 337 return NULL; 338 } 339 340 /* check if requester (client ID) matches session initial client */ 341 static TEE_Result check_client(struct tee_ta_session *s, const TEE_Identity *id) 342 { 343 if (id == KERN_IDENTITY) 344 return TEE_SUCCESS; 345 346 if (id == NSAPP_IDENTITY) { 347 if (s->clnt_id.login == TEE_LOGIN_TRUSTED_APP) { 348 DMSG("nsec tries to hijack TA session"); 349 return TEE_ERROR_ACCESS_DENIED; 350 } 351 return TEE_SUCCESS; 352 } 353 354 if (memcmp(&s->clnt_id, id, sizeof(TEE_Identity)) != 0) { 355 DMSG("client id mismatch"); 356 return TEE_ERROR_ACCESS_DENIED; 357 } 358 return TEE_SUCCESS; 359 } 360 361 /* 362 * Check if invocation parameters matches TA properties 363 * 364 * @s - current session handle 365 * @param - already identified memory references hold a valid 'mobj'. 366 * 367 * Policy: 368 * - All TAs can access 'non-secure' shared memory. 369 * - All TAs can access TEE private memory (seccpy) 370 * - Only SDP flagged TAs can accept SDP memory references. 371 */ 372 #ifndef CFG_SECURE_DATA_PATH 373 static bool check_params(struct tee_ta_session *sess __unused, 374 struct tee_ta_param *param __unused) 375 { 376 /* 377 * When CFG_SECURE_DATA_PATH is not enabled, SDP memory references 378 * are rejected at OP-TEE core entry. Hence here all TAs have same 379 * permissions regarding memory reference parameters. 380 */ 381 return true; 382 } 383 #else 384 static bool check_params(struct tee_ta_session *sess, 385 struct tee_ta_param *param) 386 { 387 int n; 388 389 /* 390 * When CFG_SECURE_DATA_PATH is enabled, OP-TEE entry allows SHM and 391 * SDP memory references. Only TAs flagged SDP can access SDP memory. 392 */ 393 if (sess->ts_sess.ctx && 394 ts_to_ta_ctx(sess->ts_sess.ctx)->flags & TA_FLAG_SECURE_DATA_PATH) 395 return true; 396 397 for (n = 0; n < TEE_NUM_PARAMS; n++) { 398 uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n); 399 struct param_mem *mem = ¶m->u[n].mem; 400 401 if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT && 402 param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT && 403 param_type != TEE_PARAM_TYPE_MEMREF_INOUT) 404 continue; 405 if (!mem->size) 406 continue; 407 if (mobj_is_sdp_mem(mem->mobj)) 408 return false; 409 } 410 return true; 411 } 412 #endif 413 414 static void set_invoke_timeout(struct tee_ta_session *sess, 415 uint32_t cancel_req_to) 416 { 417 TEE_Time current_time; 418 TEE_Time cancel_time; 419 420 if (cancel_req_to == TEE_TIMEOUT_INFINITE) 421 goto infinite; 422 423 if (tee_time_get_sys_time(¤t_time) != TEE_SUCCESS) 424 goto infinite; 425 426 if (ADD_OVERFLOW(current_time.seconds, cancel_req_to / 1000, 427 &cancel_time.seconds)) 428 goto infinite; 429 430 cancel_time.millis = current_time.millis + cancel_req_to % 1000; 431 if (cancel_time.millis > 1000) { 432 if (ADD_OVERFLOW(current_time.seconds, 1, 433 &cancel_time.seconds)) 434 goto infinite; 435 436 cancel_time.seconds++; 437 cancel_time.millis -= 1000; 438 } 439 440 sess->cancel_time = cancel_time; 441 return; 442 443 infinite: 444 sess->cancel_time.seconds = UINT32_MAX; 445 sess->cancel_time.millis = UINT32_MAX; 446 } 447 448 /*----------------------------------------------------------------------------- 449 * Close a Trusted Application and free available resources 450 *---------------------------------------------------------------------------*/ 451 TEE_Result tee_ta_close_session(struct tee_ta_session *csess, 452 struct tee_ta_session_head *open_sessions, 453 const TEE_Identity *clnt_id) 454 { 455 struct tee_ta_session *sess = NULL; 456 struct tee_ta_ctx *ctx = NULL; 457 struct ts_ctx *ts_ctx = NULL; 458 bool keep_alive = false; 459 460 DMSG("csess 0x%" PRIxVA " id %u", 461 (vaddr_t)csess, csess ? csess->id : UINT_MAX); 462 463 if (!csess) 464 return TEE_ERROR_ITEM_NOT_FOUND; 465 466 sess = tee_ta_get_session(csess->id, true, open_sessions); 467 468 if (!sess) { 469 EMSG("session 0x%" PRIxVA " to be removed is not found", 470 (vaddr_t)csess); 471 return TEE_ERROR_ITEM_NOT_FOUND; 472 } 473 474 if (check_client(sess, clnt_id) != TEE_SUCCESS) { 475 tee_ta_put_session(sess); 476 return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */ 477 } 478 479 DMSG("Destroy session"); 480 481 ts_ctx = sess->ts_sess.ctx; 482 if (!ts_ctx) { 483 destroy_session(sess, open_sessions); 484 return TEE_SUCCESS; 485 } 486 487 ctx = ts_to_ta_ctx(ts_ctx); 488 if (ctx->panicked) { 489 destroy_session(sess, open_sessions); 490 } else { 491 tee_ta_set_busy(ctx); 492 set_invoke_timeout(sess, TEE_TIMEOUT_INFINITE); 493 ts_ctx->ops->enter_close_session(&sess->ts_sess); 494 destroy_session(sess, open_sessions); 495 tee_ta_clear_busy(ctx); 496 } 497 498 mutex_lock(&tee_ta_mutex); 499 500 if (ctx->ref_count <= 0) 501 panic(); 502 503 ctx->ref_count--; 504 keep_alive = (ctx->flags & TA_FLAG_INSTANCE_KEEP_ALIVE) && 505 (ctx->flags & TA_FLAG_SINGLE_INSTANCE); 506 if (!ctx->ref_count && (ctx->panicked || !keep_alive)) { 507 if (!ctx->is_releasing) { 508 TAILQ_REMOVE(&tee_ctxes, ctx, link); 509 ctx->is_releasing = true; 510 } 511 mutex_unlock(&tee_ta_mutex); 512 513 destroy_context(ctx); 514 } else 515 mutex_unlock(&tee_ta_mutex); 516 517 return TEE_SUCCESS; 518 } 519 520 static TEE_Result tee_ta_init_session_with_context(struct tee_ta_session *s, 521 const TEE_UUID *uuid) 522 { 523 struct tee_ta_ctx *ctx = NULL; 524 525 while (true) { 526 ctx = tee_ta_context_find(uuid); 527 if (!ctx) 528 return TEE_ERROR_ITEM_NOT_FOUND; 529 530 if (!ctx->is_initializing) 531 break; 532 /* 533 * Context is still initializing, wait here until it's 534 * fully initialized. Note that we're searching for the 535 * context again since it may have been removed while we 536 * where sleeping. 537 */ 538 condvar_wait(&tee_ta_init_cv, &tee_ta_mutex); 539 } 540 541 /* 542 * If the trusted service is not a single instance service (e.g. is 543 * a multi-instance TA) it should be loaded as a new instance instead 544 * of doing anything with this instance. So tell the caller that we 545 * didn't find the TA it the caller will load a new instance. 546 */ 547 if ((ctx->flags & TA_FLAG_SINGLE_INSTANCE) == 0) 548 return TEE_ERROR_ITEM_NOT_FOUND; 549 550 /* 551 * The trusted service is single instance, if it isn't multi session we 552 * can't create another session unless its reference is zero 553 */ 554 if (!(ctx->flags & TA_FLAG_MULTI_SESSION) && ctx->ref_count) 555 return TEE_ERROR_BUSY; 556 557 DMSG("Re-open trusted service %pUl", (void *)&ctx->ts_ctx.uuid); 558 559 ctx->ref_count++; 560 s->ts_sess.ctx = &ctx->ts_ctx; 561 s->ts_sess.handle_scall = s->ts_sess.ctx->ops->handle_scall; 562 return TEE_SUCCESS; 563 } 564 565 static uint32_t new_session_id(struct tee_ta_session_head *open_sessions) 566 { 567 struct tee_ta_session *last = NULL; 568 uint32_t saved = 0; 569 uint32_t id = 1; 570 571 last = TAILQ_LAST(open_sessions, tee_ta_session_head); 572 if (last) { 573 /* This value is less likely to be already used */ 574 id = last->id + 1; 575 if (!id) 576 id++; /* 0 is not valid */ 577 } 578 579 saved = id; 580 do { 581 if (!tee_ta_find_session_nolock(id, open_sessions)) 582 return id; 583 id++; 584 if (!id) 585 id++; 586 } while (id != saved); 587 588 return 0; 589 } 590 591 static TEE_Result tee_ta_init_session(TEE_ErrorOrigin *err, 592 struct tee_ta_session_head *open_sessions, 593 const TEE_UUID *uuid, 594 struct tee_ta_session **sess) 595 { 596 TEE_Result res; 597 struct tee_ta_session *s = calloc(1, sizeof(struct tee_ta_session)); 598 599 *err = TEE_ORIGIN_TEE; 600 if (!s) 601 return TEE_ERROR_OUT_OF_MEMORY; 602 603 s->cancel_mask = true; 604 condvar_init(&s->refc_cv); 605 condvar_init(&s->lock_cv); 606 s->lock_thread = THREAD_ID_INVALID; 607 s->ref_count = 1; 608 609 mutex_lock(&tee_ta_mutex); 610 s->id = new_session_id(open_sessions); 611 if (!s->id) { 612 res = TEE_ERROR_OVERFLOW; 613 goto err_mutex_unlock; 614 } 615 616 TAILQ_INSERT_TAIL(open_sessions, s, link); 617 618 /* Look for already loaded TA */ 619 res = tee_ta_init_session_with_context(s, uuid); 620 if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND) { 621 mutex_unlock(&tee_ta_mutex); 622 goto out; 623 } 624 625 /* Look for secure partition */ 626 res = stmm_init_session(uuid, s); 627 if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND) { 628 mutex_unlock(&tee_ta_mutex); 629 if (res == TEE_SUCCESS) 630 res = stmm_complete_session(s); 631 632 goto out; 633 } 634 635 /* Look for pseudo TA */ 636 res = tee_ta_init_pseudo_ta_session(uuid, s); 637 if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND) { 638 mutex_unlock(&tee_ta_mutex); 639 goto out; 640 } 641 642 /* Look for user TA */ 643 res = tee_ta_init_user_ta_session(uuid, s); 644 mutex_unlock(&tee_ta_mutex); 645 if (res == TEE_SUCCESS) 646 res = tee_ta_complete_user_ta_session(s); 647 648 out: 649 if (!res) { 650 *sess = s; 651 return TEE_SUCCESS; 652 } 653 654 mutex_lock(&tee_ta_mutex); 655 TAILQ_REMOVE(open_sessions, s, link); 656 err_mutex_unlock: 657 mutex_unlock(&tee_ta_mutex); 658 free(s); 659 return res; 660 } 661 662 static void release_ta_ctx(struct tee_ta_ctx *ctx) 663 { 664 bool was_releasing = false; 665 666 mutex_lock(&tee_ta_mutex); 667 was_releasing = ctx->is_releasing; 668 ctx->is_releasing = true; 669 if (!was_releasing) { 670 DMSG("Releasing panicked TA ctx"); 671 TAILQ_REMOVE(&tee_ctxes, ctx, link); 672 } 673 mutex_unlock(&tee_ta_mutex); 674 675 if (!was_releasing) 676 ctx->ts_ctx.ops->release_state(&ctx->ts_ctx); 677 } 678 679 TEE_Result tee_ta_open_session(TEE_ErrorOrigin *err, 680 struct tee_ta_session **sess, 681 struct tee_ta_session_head *open_sessions, 682 const TEE_UUID *uuid, 683 const TEE_Identity *clnt_id, 684 uint32_t cancel_req_to, 685 struct tee_ta_param *param) 686 { 687 TEE_Result res = TEE_SUCCESS; 688 struct tee_ta_session *s = NULL; 689 struct tee_ta_ctx *ctx = NULL; 690 struct ts_ctx *ts_ctx = NULL; 691 bool panicked = false; 692 bool was_busy = false; 693 694 res = tee_ta_init_session(err, open_sessions, uuid, &s); 695 if (res != TEE_SUCCESS) { 696 DMSG("init session failed 0x%x", res); 697 return res; 698 } 699 700 if (!check_params(s, param)) 701 return TEE_ERROR_BAD_PARAMETERS; 702 703 ts_ctx = s->ts_sess.ctx; 704 ctx = ts_to_ta_ctx(ts_ctx); 705 706 if (tee_ta_try_set_busy(ctx)) { 707 if (!ctx->panicked) { 708 /* Save identity of the owner of the session */ 709 s->clnt_id = *clnt_id; 710 s->param = param; 711 set_invoke_timeout(s, cancel_req_to); 712 res = ts_ctx->ops->enter_open_session(&s->ts_sess); 713 s->param = NULL; 714 } 715 716 panicked = ctx->panicked; 717 if (panicked) { 718 release_ta_ctx(ctx); 719 res = TEE_ERROR_TARGET_DEAD; 720 } 721 722 tee_ta_clear_busy(ctx); 723 } else { 724 /* Deadlock avoided */ 725 res = TEE_ERROR_BUSY; 726 was_busy = true; 727 } 728 729 /* 730 * Origin error equal to TEE_ORIGIN_TRUSTED_APP for "regular" error, 731 * apart from panicking. 732 */ 733 if (panicked || was_busy) 734 *err = TEE_ORIGIN_TEE; 735 else 736 *err = s->err_origin; 737 738 tee_ta_put_session(s); 739 if (panicked || res != TEE_SUCCESS) 740 tee_ta_close_session(s, open_sessions, KERN_IDENTITY); 741 742 if (!res) 743 *sess = s; 744 else 745 EMSG("Failed for TA %pUl. Return error %#"PRIx32, uuid, res); 746 747 return res; 748 } 749 750 TEE_Result tee_ta_invoke_command(TEE_ErrorOrigin *err, 751 struct tee_ta_session *sess, 752 const TEE_Identity *clnt_id, 753 uint32_t cancel_req_to, uint32_t cmd, 754 struct tee_ta_param *param) 755 { 756 struct tee_ta_ctx *ta_ctx = NULL; 757 struct ts_ctx *ts_ctx = NULL; 758 TEE_Result res = TEE_SUCCESS; 759 bool panicked = false; 760 761 if (check_client(sess, clnt_id) != TEE_SUCCESS) 762 return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */ 763 764 if (!check_params(sess, param)) 765 return TEE_ERROR_BAD_PARAMETERS; 766 767 ts_ctx = sess->ts_sess.ctx; 768 ta_ctx = ts_to_ta_ctx(ts_ctx); 769 770 tee_ta_set_busy(ta_ctx); 771 772 if (!ta_ctx->panicked) { 773 sess->param = param; 774 set_invoke_timeout(sess, cancel_req_to); 775 res = ts_ctx->ops->enter_invoke_cmd(&sess->ts_sess, cmd); 776 sess->param = NULL; 777 } 778 779 panicked = ta_ctx->panicked; 780 if (panicked) { 781 release_ta_ctx(ta_ctx); 782 res = TEE_ERROR_TARGET_DEAD; 783 } 784 785 tee_ta_clear_busy(ta_ctx); 786 787 /* 788 * Origin error equal to TEE_ORIGIN_TRUSTED_APP for "regular" error, 789 * apart from panicking. 790 */ 791 if (panicked) 792 *err = TEE_ORIGIN_TEE; 793 else 794 *err = sess->err_origin; 795 796 /* Short buffer is not an effective error case */ 797 if (res != TEE_SUCCESS && res != TEE_ERROR_SHORT_BUFFER) 798 DMSG("Error: %x of %d", res, *err); 799 800 return res; 801 } 802 803 #if defined(CFG_TA_STATS) 804 static TEE_Result dump_ta_memstats(struct tee_ta_session *s, 805 struct tee_ta_param *param) 806 { 807 TEE_Result res = TEE_SUCCESS; 808 struct tee_ta_ctx *ctx = NULL; 809 struct ts_ctx *ts_ctx = NULL; 810 bool panicked = false; 811 812 ts_ctx = s->ts_sess.ctx; 813 if (!ts_ctx) 814 return TEE_ERROR_ITEM_NOT_FOUND; 815 816 ctx = ts_to_ta_ctx(ts_ctx); 817 818 if (ctx->is_initializing) 819 return TEE_ERROR_BAD_STATE; 820 821 if (tee_ta_try_set_busy(ctx)) { 822 if (!ctx->panicked) { 823 s->param = param; 824 set_invoke_timeout(s, TEE_TIMEOUT_INFINITE); 825 res = ts_ctx->ops->dump_mem_stats(&s->ts_sess); 826 s->param = NULL; 827 } 828 829 panicked = ctx->panicked; 830 if (panicked) { 831 release_ta_ctx(ctx); 832 res = TEE_ERROR_TARGET_DEAD; 833 } 834 835 tee_ta_clear_busy(ctx); 836 } else { 837 /* Deadlock avoided */ 838 res = TEE_ERROR_BUSY; 839 } 840 841 return res; 842 } 843 844 static void init_dump_ctx(struct tee_ta_dump_ctx *dump_ctx) 845 { 846 struct tee_ta_session *sess = NULL; 847 struct tee_ta_session_head *open_sessions = NULL; 848 struct tee_ta_ctx *ctx = NULL; 849 unsigned int n = 0; 850 851 nsec_sessions_list_head(&open_sessions); 852 /* 853 * Scan all sessions opened from secure side by searching through 854 * all available TA instances and for each context, scan all opened 855 * sessions. 856 */ 857 TAILQ_FOREACH(ctx, &tee_ctxes, link) { 858 unsigned int cnt = 0; 859 860 if (!is_user_ta_ctx(&ctx->ts_ctx)) 861 continue; 862 863 memcpy(&dump_ctx[n].uuid, &ctx->ts_ctx.uuid, 864 sizeof(ctx->ts_ctx.uuid)); 865 dump_ctx[n].panicked = ctx->panicked; 866 dump_ctx[n].is_user_ta = is_user_ta_ctx(&ctx->ts_ctx); 867 TAILQ_FOREACH(sess, open_sessions, link) { 868 if (sess->ts_sess.ctx == &ctx->ts_ctx) { 869 if (cnt == MAX_DUMP_SESS_NUM) 870 break; 871 872 dump_ctx[n].sess_id[cnt] = sess->id; 873 cnt++; 874 } 875 } 876 877 dump_ctx[n].sess_num = cnt; 878 n++; 879 } 880 } 881 882 static TEE_Result dump_ta_stats(struct tee_ta_dump_ctx *dump_ctx, 883 struct pta_stats_ta *dump_stats, 884 size_t ta_count) 885 { 886 TEE_Result res = TEE_SUCCESS; 887 struct tee_ta_session *sess = NULL; 888 struct tee_ta_session_head *open_sessions = NULL; 889 struct tee_ta_param param = { }; 890 unsigned int i = 0; 891 unsigned int j = 0; 892 893 nsec_sessions_list_head(&open_sessions); 894 895 for (i = 0; i < ta_count; i++) { 896 struct pta_stats_ta *stats = &dump_stats[i]; 897 898 memcpy(&stats->uuid, &dump_ctx[i].uuid, 899 sizeof(dump_ctx[i].uuid)); 900 stats->panicked = dump_ctx[i].panicked; 901 stats->sess_num = dump_ctx[i].sess_num; 902 903 /* Find a session from dump context */ 904 for (j = 0, sess = NULL; j < dump_ctx[i].sess_num && !sess; j++) 905 sess = tee_ta_get_session(dump_ctx[i].sess_id[j], true, 906 open_sessions); 907 908 if (!sess) 909 continue; 910 /* If session is existing, get its heap stats */ 911 memset(¶m, 0, sizeof(struct tee_ta_param)); 912 param.types = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_OUTPUT, 913 TEE_PARAM_TYPE_VALUE_OUTPUT, 914 TEE_PARAM_TYPE_VALUE_OUTPUT, 915 TEE_PARAM_TYPE_NONE); 916 res = dump_ta_memstats(sess, ¶m); 917 if (res == TEE_SUCCESS) { 918 stats->heap.allocated = param.u[0].val.a; 919 stats->heap.max_allocated = param.u[0].val.b; 920 stats->heap.size = param.u[1].val.a; 921 stats->heap.num_alloc_fail = param.u[1].val.b; 922 stats->heap.biggest_alloc_fail = param.u[2].val.a; 923 stats->heap.biggest_alloc_fail_used = param.u[2].val.b; 924 } else { 925 memset(&stats->heap, 0, sizeof(stats->heap)); 926 } 927 tee_ta_put_session(sess); 928 } 929 930 return TEE_SUCCESS; 931 } 932 933 TEE_Result tee_ta_instance_stats(void *buf, size_t *buf_size) 934 { 935 TEE_Result res = TEE_SUCCESS; 936 struct pta_stats_ta *dump_stats = NULL; 937 struct tee_ta_dump_ctx *dump_ctx = NULL; 938 struct tee_ta_ctx *ctx = NULL; 939 size_t sz = 0; 940 size_t ta_count = 0; 941 942 if (!buf_size) 943 return TEE_ERROR_BAD_PARAMETERS; 944 945 mutex_lock(&tee_ta_mutex); 946 947 /* Go through all available TA and calc out the actual buffer size. */ 948 TAILQ_FOREACH(ctx, &tee_ctxes, link) 949 if (is_user_ta_ctx(&ctx->ts_ctx)) 950 ta_count++; 951 952 sz = sizeof(struct pta_stats_ta) * ta_count; 953 if (!sz) { 954 /* sz = 0 means there is no UTA, return no item found. */ 955 res = TEE_ERROR_ITEM_NOT_FOUND; 956 } else if (!buf || *buf_size < sz) { 957 /* 958 * buf is null or pass size less than actual size 959 * means caller try to query the buffer size. 960 * update *buf_size. 961 */ 962 *buf_size = sz; 963 res = TEE_ERROR_SHORT_BUFFER; 964 } else if (!IS_ALIGNED_WITH_TYPE(buf, uint32_t)) { 965 DMSG("Data alignment"); 966 res = TEE_ERROR_BAD_PARAMETERS; 967 } else { 968 dump_stats = (struct pta_stats_ta *)buf; 969 dump_ctx = malloc(sizeof(struct tee_ta_dump_ctx) * ta_count); 970 if (!dump_ctx) 971 res = TEE_ERROR_OUT_OF_MEMORY; 972 else 973 init_dump_ctx(dump_ctx); 974 } 975 mutex_unlock(&tee_ta_mutex); 976 977 if (res != TEE_SUCCESS) 978 return res; 979 980 /* Dump user ta stats by iterating dump_ctx[] */ 981 res = dump_ta_stats(dump_ctx, dump_stats, ta_count); 982 if (res == TEE_SUCCESS) 983 *buf_size = sz; 984 985 free(dump_ctx); 986 return res; 987 } 988 #endif 989 990 TEE_Result tee_ta_cancel_command(TEE_ErrorOrigin *err, 991 struct tee_ta_session *sess, 992 const TEE_Identity *clnt_id) 993 { 994 *err = TEE_ORIGIN_TEE; 995 996 if (check_client(sess, clnt_id) != TEE_SUCCESS) 997 return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */ 998 999 sess->cancel = true; 1000 return TEE_SUCCESS; 1001 } 1002 1003 bool tee_ta_session_is_cancelled(struct tee_ta_session *s, TEE_Time *curr_time) 1004 { 1005 TEE_Time current_time; 1006 1007 if (s->cancel_mask) 1008 return false; 1009 1010 if (s->cancel) 1011 return true; 1012 1013 if (s->cancel_time.seconds == UINT32_MAX) 1014 return false; 1015 1016 if (curr_time != NULL) 1017 current_time = *curr_time; 1018 else if (tee_time_get_sys_time(¤t_time) != TEE_SUCCESS) 1019 return false; 1020 1021 if (current_time.seconds > s->cancel_time.seconds || 1022 (current_time.seconds == s->cancel_time.seconds && 1023 current_time.millis >= s->cancel_time.millis)) { 1024 return true; 1025 } 1026 1027 return false; 1028 } 1029 1030 #if defined(CFG_TA_GPROF_SUPPORT) 1031 void tee_ta_gprof_sample_pc(vaddr_t pc) 1032 { 1033 struct ts_session *s = ts_get_current_session(); 1034 struct user_ta_ctx *utc = NULL; 1035 struct sample_buf *sbuf = NULL; 1036 TEE_Result res = 0; 1037 size_t idx = 0; 1038 1039 sbuf = s->sbuf; 1040 if (!sbuf || !sbuf->enabled) 1041 return; /* PC sampling is not enabled */ 1042 1043 idx = (((uint64_t)pc - sbuf->offset)/2 * sbuf->scale)/65536; 1044 if (idx < sbuf->nsamples) { 1045 utc = to_user_ta_ctx(s->ctx); 1046 res = vm_check_access_rights(&utc->uctx, 1047 TEE_MEMORY_ACCESS_READ | 1048 TEE_MEMORY_ACCESS_WRITE | 1049 TEE_MEMORY_ACCESS_ANY_OWNER, 1050 (uaddr_t)&sbuf->samples[idx], 1051 sizeof(*sbuf->samples)); 1052 if (res != TEE_SUCCESS) 1053 return; 1054 sbuf->samples[idx]++; 1055 } 1056 sbuf->count++; 1057 } 1058 1059 static void gprof_update_session_utime(bool suspend, struct ts_session *s, 1060 uint64_t now) 1061 { 1062 struct sample_buf *sbuf = s->sbuf; 1063 1064 if (!sbuf) 1065 return; 1066 1067 if (suspend) { 1068 assert(sbuf->usr_entered); 1069 sbuf->usr += now - sbuf->usr_entered; 1070 sbuf->usr_entered = 0; 1071 } else { 1072 assert(!sbuf->usr_entered); 1073 if (!now) 1074 now++; /* 0 is reserved */ 1075 sbuf->usr_entered = now; 1076 } 1077 } 1078 1079 /* 1080 * Update user-mode CPU time for the current session 1081 * @suspend: true if session is being suspended (leaving user mode), false if 1082 * it is resumed (entering user mode) 1083 */ 1084 static void tee_ta_update_session_utime(bool suspend) 1085 { 1086 struct ts_session *s = ts_get_current_session(); 1087 uint64_t now = barrier_read_counter_timer(); 1088 1089 gprof_update_session_utime(suspend, s, now); 1090 } 1091 1092 void tee_ta_update_session_utime_suspend(void) 1093 { 1094 tee_ta_update_session_utime(true); 1095 } 1096 1097 void tee_ta_update_session_utime_resume(void) 1098 { 1099 tee_ta_update_session_utime(false); 1100 } 1101 #endif 1102 1103 #if defined(CFG_FTRACE_SUPPORT) 1104 static void ftrace_update_times(bool suspend) 1105 { 1106 struct ts_session *s = ts_get_current_session_may_fail(); 1107 struct ftrace_buf *fbuf = NULL; 1108 uint64_t now = 0; 1109 uint32_t i = 0; 1110 1111 if (!s) 1112 return; 1113 1114 now = barrier_read_counter_timer(); 1115 1116 fbuf = s->fbuf; 1117 if (!fbuf) 1118 return; 1119 1120 if (suspend) { 1121 fbuf->suspend_time = now; 1122 } else { 1123 for (i = 0; i <= fbuf->ret_idx; i++) 1124 fbuf->begin_time[i] += now - fbuf->suspend_time; 1125 } 1126 } 1127 1128 void tee_ta_ftrace_update_times_suspend(void) 1129 { 1130 ftrace_update_times(true); 1131 } 1132 1133 void tee_ta_ftrace_update_times_resume(void) 1134 { 1135 ftrace_update_times(false); 1136 } 1137 #endif 1138 1139 bool __noprof is_ta_ctx(struct ts_ctx *ctx) 1140 { 1141 return is_user_ta_ctx(ctx) || is_pseudo_ta_ctx(ctx); 1142 } 1143