1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2014, STMicroelectronics International N.V. 4 * Copyright (c) 2020, Arm Limited 5 */ 6 7 #include <assert.h> 8 #include <kernel/mutex.h> 9 #include <kernel/panic.h> 10 #include <kernel/pseudo_ta.h> 11 #include <kernel/stmm_sp.h> 12 #include <kernel/tee_common.h> 13 #include <kernel/tee_misc.h> 14 #include <kernel/tee_ta_manager.h> 15 #include <kernel/tee_time.h> 16 #include <kernel/thread.h> 17 #include <kernel/user_mode_ctx.h> 18 #include <kernel/user_ta.h> 19 #include <malloc.h> 20 #include <mm/core_memprot.h> 21 #include <mm/core_mmu.h> 22 #include <mm/mobj.h> 23 #include <mm/vm.h> 24 #include <pta_stats.h> 25 #include <stdlib.h> 26 #include <string.h> 27 #include <tee_api_types.h> 28 #include <tee/entry_std.h> 29 #include <tee/tee_obj.h> 30 #include <trace.h> 31 #include <types_ext.h> 32 #include <user_ta_header.h> 33 #include <utee_types.h> 34 #include <util.h> 35 36 #if defined(CFG_TA_STATS) 37 #define MAX_DUMP_SESS_NUM (16) 38 39 struct tee_ta_dump_ctx { 40 TEE_UUID uuid; 41 uint32_t panicked; 42 bool is_user_ta; 43 uint32_t sess_num; 44 uint32_t sess_id[MAX_DUMP_SESS_NUM]; 45 }; 46 #endif 47 48 /* This mutex protects the critical section in tee_ta_init_session */ 49 struct mutex tee_ta_mutex = MUTEX_INITIALIZER; 50 /* This condvar is used when waiting for a TA context to become initialized */ 51 struct condvar tee_ta_init_cv = CONDVAR_INITIALIZER; 52 struct tee_ta_ctx_head tee_ctxes = TAILQ_HEAD_INITIALIZER(tee_ctxes); 53 54 #ifndef CFG_CONCURRENT_SINGLE_INSTANCE_TA 55 static struct condvar tee_ta_cv = CONDVAR_INITIALIZER; 56 static short int tee_ta_single_instance_thread = THREAD_ID_INVALID; 57 static size_t tee_ta_single_instance_count; 58 #endif 59 60 #ifdef CFG_CONCURRENT_SINGLE_INSTANCE_TA 61 static void lock_single_instance(void) 62 { 63 } 64 65 static void unlock_single_instance(void) 66 { 67 } 68 69 static bool has_single_instance_lock(void) 70 { 71 return false; 72 } 73 #else 74 static void lock_single_instance(void) 75 { 76 /* Requires tee_ta_mutex to be held */ 77 if (tee_ta_single_instance_thread != thread_get_id()) { 78 /* Wait until the single-instance lock is available. */ 79 while (tee_ta_single_instance_thread != THREAD_ID_INVALID) 80 condvar_wait(&tee_ta_cv, &tee_ta_mutex); 81 82 tee_ta_single_instance_thread = thread_get_id(); 83 assert(tee_ta_single_instance_count == 0); 84 } 85 86 tee_ta_single_instance_count++; 87 } 88 89 static void unlock_single_instance(void) 90 { 91 /* Requires tee_ta_mutex to be held */ 92 assert(tee_ta_single_instance_thread == thread_get_id()); 93 assert(tee_ta_single_instance_count > 0); 94 95 tee_ta_single_instance_count--; 96 if (tee_ta_single_instance_count == 0) { 97 tee_ta_single_instance_thread = THREAD_ID_INVALID; 98 condvar_signal(&tee_ta_cv); 99 } 100 } 101 102 static bool has_single_instance_lock(void) 103 { 104 /* Requires tee_ta_mutex to be held */ 105 return tee_ta_single_instance_thread == thread_get_id(); 106 } 107 #endif 108 109 struct tee_ta_session *__noprof to_ta_session(struct ts_session *sess) 110 { 111 assert(is_ta_ctx(sess->ctx) || is_stmm_ctx(sess->ctx)); 112 return container_of(sess, struct tee_ta_session, ts_sess); 113 } 114 115 static struct tee_ta_ctx *ts_to_ta_ctx(struct ts_ctx *ctx) 116 { 117 if (is_ta_ctx(ctx)) 118 return to_ta_ctx(ctx); 119 120 if (is_stmm_ctx(ctx)) 121 return &(to_stmm_ctx(ctx)->ta_ctx); 122 123 panic("bad context"); 124 } 125 126 static bool tee_ta_try_set_busy(struct tee_ta_ctx *ctx) 127 { 128 bool rc = true; 129 130 if (ctx->flags & TA_FLAG_CONCURRENT) 131 return true; 132 133 mutex_lock(&tee_ta_mutex); 134 135 if (ctx->flags & TA_FLAG_SINGLE_INSTANCE) 136 lock_single_instance(); 137 138 if (has_single_instance_lock()) { 139 if (ctx->busy) { 140 /* 141 * We're holding the single-instance lock and the 142 * TA is busy, as waiting now would only cause a 143 * dead-lock, we release the lock and return false. 144 */ 145 rc = false; 146 if (ctx->flags & TA_FLAG_SINGLE_INSTANCE) 147 unlock_single_instance(); 148 } 149 } else { 150 /* 151 * We're not holding the single-instance lock, we're free to 152 * wait for the TA to become available. 153 */ 154 while (ctx->busy) 155 condvar_wait(&ctx->busy_cv, &tee_ta_mutex); 156 } 157 158 /* Either it's already true or we should set it to true */ 159 ctx->busy = true; 160 161 mutex_unlock(&tee_ta_mutex); 162 return rc; 163 } 164 165 static void tee_ta_set_busy(struct tee_ta_ctx *ctx) 166 { 167 if (!tee_ta_try_set_busy(ctx)) 168 panic(); 169 } 170 171 static void tee_ta_clear_busy(struct tee_ta_ctx *ctx) 172 { 173 if (ctx->flags & TA_FLAG_CONCURRENT) 174 return; 175 176 mutex_lock(&tee_ta_mutex); 177 178 assert(ctx->busy); 179 ctx->busy = false; 180 condvar_signal(&ctx->busy_cv); 181 182 if (ctx->flags & TA_FLAG_SINGLE_INSTANCE) 183 unlock_single_instance(); 184 185 mutex_unlock(&tee_ta_mutex); 186 } 187 188 static void dec_session_ref_count(struct tee_ta_session *s) 189 { 190 assert(s->ref_count > 0); 191 s->ref_count--; 192 if (s->ref_count == 1) 193 condvar_signal(&s->refc_cv); 194 } 195 196 void tee_ta_put_session(struct tee_ta_session *s) 197 { 198 mutex_lock(&tee_ta_mutex); 199 200 if (s->lock_thread == thread_get_id()) { 201 s->lock_thread = THREAD_ID_INVALID; 202 condvar_signal(&s->lock_cv); 203 } 204 dec_session_ref_count(s); 205 206 mutex_unlock(&tee_ta_mutex); 207 } 208 209 static struct tee_ta_session *tee_ta_find_session_nolock(uint32_t id, 210 struct tee_ta_session_head *open_sessions) 211 { 212 struct tee_ta_session *s = NULL; 213 struct tee_ta_session *found = NULL; 214 215 TAILQ_FOREACH(s, open_sessions, link) { 216 if (s->id == id) { 217 found = s; 218 break; 219 } 220 } 221 222 return found; 223 } 224 225 struct tee_ta_session *tee_ta_find_session(uint32_t id, 226 struct tee_ta_session_head *open_sessions) 227 { 228 struct tee_ta_session *s = NULL; 229 230 mutex_lock(&tee_ta_mutex); 231 232 s = tee_ta_find_session_nolock(id, open_sessions); 233 234 mutex_unlock(&tee_ta_mutex); 235 236 return s; 237 } 238 239 struct tee_ta_session *tee_ta_get_session(uint32_t id, bool exclusive, 240 struct tee_ta_session_head *open_sessions) 241 { 242 struct tee_ta_session *s; 243 244 mutex_lock(&tee_ta_mutex); 245 246 while (true) { 247 s = tee_ta_find_session_nolock(id, open_sessions); 248 if (!s) 249 break; 250 if (s->unlink) { 251 s = NULL; 252 break; 253 } 254 s->ref_count++; 255 if (!exclusive) 256 break; 257 258 assert(s->lock_thread != thread_get_id()); 259 260 while (s->lock_thread != THREAD_ID_INVALID && !s->unlink) 261 condvar_wait(&s->lock_cv, &tee_ta_mutex); 262 263 if (s->unlink) { 264 dec_session_ref_count(s); 265 s = NULL; 266 break; 267 } 268 269 s->lock_thread = thread_get_id(); 270 break; 271 } 272 273 mutex_unlock(&tee_ta_mutex); 274 return s; 275 } 276 277 static void tee_ta_unlink_session(struct tee_ta_session *s, 278 struct tee_ta_session_head *open_sessions) 279 { 280 mutex_lock(&tee_ta_mutex); 281 282 assert(s->ref_count >= 1); 283 assert(s->lock_thread == thread_get_id()); 284 assert(!s->unlink); 285 286 s->unlink = true; 287 condvar_broadcast(&s->lock_cv); 288 289 while (s->ref_count != 1) 290 condvar_wait(&s->refc_cv, &tee_ta_mutex); 291 292 TAILQ_REMOVE(open_sessions, s, link); 293 294 mutex_unlock(&tee_ta_mutex); 295 } 296 297 static void destroy_session(struct tee_ta_session *s, 298 struct tee_ta_session_head *open_sessions) 299 { 300 #if defined(CFG_FTRACE_SUPPORT) 301 if (s->ts_sess.ctx && s->ts_sess.ctx->ops->dump_ftrace) { 302 ts_push_current_session(&s->ts_sess); 303 s->ts_sess.fbuf = NULL; 304 s->ts_sess.ctx->ops->dump_ftrace(s->ts_sess.ctx); 305 ts_pop_current_session(); 306 } 307 #endif 308 309 tee_ta_unlink_session(s, open_sessions); 310 #if defined(CFG_TA_GPROF_SUPPORT) 311 free(s->ts_sess.sbuf); 312 #endif 313 free(s); 314 } 315 316 static void destroy_context(struct tee_ta_ctx *ctx) 317 { 318 DMSG("Destroy TA ctx (0x%" PRIxVA ")", (vaddr_t)ctx); 319 320 condvar_destroy(&ctx->busy_cv); 321 ctx->ts_ctx.ops->destroy(&ctx->ts_ctx); 322 } 323 324 /* 325 * tee_ta_context_find - Find TA in session list based on a UUID (input) 326 * Returns a pointer to the session 327 */ 328 static struct tee_ta_ctx *tee_ta_context_find(const TEE_UUID *uuid) 329 { 330 struct tee_ta_ctx *ctx; 331 332 TAILQ_FOREACH(ctx, &tee_ctxes, link) { 333 if (memcmp(&ctx->ts_ctx.uuid, uuid, sizeof(TEE_UUID)) == 0) 334 return ctx; 335 } 336 337 return NULL; 338 } 339 340 /* check if requester (client ID) matches session initial client */ 341 static TEE_Result check_client(struct tee_ta_session *s, const TEE_Identity *id) 342 { 343 if (id == KERN_IDENTITY) 344 return TEE_SUCCESS; 345 346 if (id == NSAPP_IDENTITY) { 347 if (s->clnt_id.login == TEE_LOGIN_TRUSTED_APP) { 348 DMSG("nsec tries to hijack TA session"); 349 return TEE_ERROR_ACCESS_DENIED; 350 } 351 return TEE_SUCCESS; 352 } 353 354 if (memcmp(&s->clnt_id, id, sizeof(TEE_Identity)) != 0) { 355 DMSG("client id mismatch"); 356 return TEE_ERROR_ACCESS_DENIED; 357 } 358 return TEE_SUCCESS; 359 } 360 361 /* 362 * Check if invocation parameters matches TA properties 363 * 364 * @s - current session handle 365 * @param - already identified memory references hold a valid 'mobj'. 366 * 367 * Policy: 368 * - All TAs can access 'non-secure' shared memory. 369 * - All TAs can access TEE private memory (seccpy) 370 * - Only SDP flagged TAs can accept SDP memory references. 371 */ 372 #ifndef CFG_SECURE_DATA_PATH 373 static bool check_params(struct tee_ta_session *sess __unused, 374 struct tee_ta_param *param __unused) 375 { 376 /* 377 * When CFG_SECURE_DATA_PATH is not enabled, SDP memory references 378 * are rejected at OP-TEE core entry. Hence here all TAs have same 379 * permissions regarding memory reference parameters. 380 */ 381 return true; 382 } 383 #else 384 static bool check_params(struct tee_ta_session *sess, 385 struct tee_ta_param *param) 386 { 387 int n; 388 389 /* 390 * When CFG_SECURE_DATA_PATH is enabled, OP-TEE entry allows SHM and 391 * SDP memory references. Only TAs flagged SDP can access SDP memory. 392 */ 393 if (sess->ts_sess.ctx && 394 ts_to_ta_ctx(sess->ts_sess.ctx)->flags & TA_FLAG_SECURE_DATA_PATH) 395 return true; 396 397 for (n = 0; n < TEE_NUM_PARAMS; n++) { 398 uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n); 399 struct param_mem *mem = ¶m->u[n].mem; 400 401 if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT && 402 param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT && 403 param_type != TEE_PARAM_TYPE_MEMREF_INOUT) 404 continue; 405 if (!mem->size) 406 continue; 407 if (mobj_is_sdp_mem(mem->mobj)) 408 return false; 409 } 410 return true; 411 } 412 #endif 413 414 static void set_invoke_timeout(struct tee_ta_session *sess, 415 uint32_t cancel_req_to) 416 { 417 TEE_Time current_time; 418 TEE_Time cancel_time; 419 420 if (cancel_req_to == TEE_TIMEOUT_INFINITE) 421 goto infinite; 422 423 if (tee_time_get_sys_time(¤t_time) != TEE_SUCCESS) 424 goto infinite; 425 426 if (ADD_OVERFLOW(current_time.seconds, cancel_req_to / 1000, 427 &cancel_time.seconds)) 428 goto infinite; 429 430 cancel_time.millis = current_time.millis + cancel_req_to % 1000; 431 if (cancel_time.millis > 1000) { 432 if (ADD_OVERFLOW(current_time.seconds, 1, 433 &cancel_time.seconds)) 434 goto infinite; 435 436 cancel_time.seconds++; 437 cancel_time.millis -= 1000; 438 } 439 440 sess->cancel_time = cancel_time; 441 return; 442 443 infinite: 444 sess->cancel_time.seconds = UINT32_MAX; 445 sess->cancel_time.millis = UINT32_MAX; 446 } 447 448 /*----------------------------------------------------------------------------- 449 * Close a Trusted Application and free available resources 450 *---------------------------------------------------------------------------*/ 451 TEE_Result tee_ta_close_session(struct tee_ta_session *csess, 452 struct tee_ta_session_head *open_sessions, 453 const TEE_Identity *clnt_id) 454 { 455 struct tee_ta_session *sess = NULL; 456 struct tee_ta_ctx *ctx = NULL; 457 struct ts_ctx *ts_ctx = NULL; 458 bool keep_alive = false; 459 460 DMSG("csess 0x%" PRIxVA " id %u", 461 (vaddr_t)csess, csess ? csess->id : UINT_MAX); 462 463 if (!csess) 464 return TEE_ERROR_ITEM_NOT_FOUND; 465 466 sess = tee_ta_get_session(csess->id, true, open_sessions); 467 468 if (!sess) { 469 EMSG("session 0x%" PRIxVA " to be removed is not found", 470 (vaddr_t)csess); 471 return TEE_ERROR_ITEM_NOT_FOUND; 472 } 473 474 if (check_client(sess, clnt_id) != TEE_SUCCESS) { 475 tee_ta_put_session(sess); 476 return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */ 477 } 478 479 DMSG("Destroy session"); 480 481 ts_ctx = sess->ts_sess.ctx; 482 if (!ts_ctx) { 483 destroy_session(sess, open_sessions); 484 return TEE_SUCCESS; 485 } 486 487 ctx = ts_to_ta_ctx(ts_ctx); 488 if (ctx->panicked) { 489 destroy_session(sess, open_sessions); 490 } else { 491 tee_ta_set_busy(ctx); 492 set_invoke_timeout(sess, TEE_TIMEOUT_INFINITE); 493 ts_ctx->ops->enter_close_session(&sess->ts_sess); 494 destroy_session(sess, open_sessions); 495 tee_ta_clear_busy(ctx); 496 } 497 498 mutex_lock(&tee_ta_mutex); 499 500 if (ctx->ref_count <= 0) 501 panic(); 502 503 ctx->ref_count--; 504 keep_alive = (ctx->flags & TA_FLAG_INSTANCE_KEEP_ALIVE) && 505 (ctx->flags & TA_FLAG_SINGLE_INSTANCE); 506 if (!ctx->ref_count && (ctx->panicked || !keep_alive)) { 507 if (!ctx->is_releasing) { 508 TAILQ_REMOVE(&tee_ctxes, ctx, link); 509 ctx->is_releasing = true; 510 } 511 mutex_unlock(&tee_ta_mutex); 512 513 destroy_context(ctx); 514 } else 515 mutex_unlock(&tee_ta_mutex); 516 517 return TEE_SUCCESS; 518 } 519 520 static TEE_Result tee_ta_init_session_with_context(struct tee_ta_session *s, 521 const TEE_UUID *uuid) 522 { 523 struct tee_ta_ctx *ctx = NULL; 524 525 while (true) { 526 ctx = tee_ta_context_find(uuid); 527 if (!ctx) 528 return TEE_ERROR_ITEM_NOT_FOUND; 529 530 if (!is_user_ta_ctx(&ctx->ts_ctx) || 531 !to_user_ta_ctx(&ctx->ts_ctx)->uctx.is_initializing) 532 break; 533 /* 534 * Context is still initializing, wait here until it's 535 * fully initialized. Note that we're searching for the 536 * context again since it may have been removed while we 537 * where sleeping. 538 */ 539 condvar_wait(&tee_ta_init_cv, &tee_ta_mutex); 540 } 541 542 /* 543 * If TA isn't single instance it should be loaded as new 544 * instance instead of doing anything with this instance. 545 * So tell the caller that we didn't find the TA it the 546 * caller will load a new instance. 547 */ 548 if ((ctx->flags & TA_FLAG_SINGLE_INSTANCE) == 0) 549 return TEE_ERROR_ITEM_NOT_FOUND; 550 551 /* 552 * The TA is single instance, if it isn't multi session we 553 * can't create another session unless its reference is zero 554 */ 555 if (!(ctx->flags & TA_FLAG_MULTI_SESSION) && ctx->ref_count) 556 return TEE_ERROR_BUSY; 557 558 DMSG("Re-open TA %pUl", (void *)&ctx->ts_ctx.uuid); 559 560 ctx->ref_count++; 561 s->ts_sess.ctx = &ctx->ts_ctx; 562 s->ts_sess.handle_scall = s->ts_sess.ctx->ops->handle_scall; 563 return TEE_SUCCESS; 564 } 565 566 static uint32_t new_session_id(struct tee_ta_session_head *open_sessions) 567 { 568 struct tee_ta_session *last = NULL; 569 uint32_t saved = 0; 570 uint32_t id = 1; 571 572 last = TAILQ_LAST(open_sessions, tee_ta_session_head); 573 if (last) { 574 /* This value is less likely to be already used */ 575 id = last->id + 1; 576 if (!id) 577 id++; /* 0 is not valid */ 578 } 579 580 saved = id; 581 do { 582 if (!tee_ta_find_session_nolock(id, open_sessions)) 583 return id; 584 id++; 585 if (!id) 586 id++; 587 } while (id != saved); 588 589 return 0; 590 } 591 592 static TEE_Result tee_ta_init_session(TEE_ErrorOrigin *err, 593 struct tee_ta_session_head *open_sessions, 594 const TEE_UUID *uuid, 595 struct tee_ta_session **sess) 596 { 597 TEE_Result res; 598 struct tee_ta_session *s = calloc(1, sizeof(struct tee_ta_session)); 599 600 *err = TEE_ORIGIN_TEE; 601 if (!s) 602 return TEE_ERROR_OUT_OF_MEMORY; 603 604 s->cancel_mask = true; 605 condvar_init(&s->refc_cv); 606 condvar_init(&s->lock_cv); 607 s->lock_thread = THREAD_ID_INVALID; 608 s->ref_count = 1; 609 610 mutex_lock(&tee_ta_mutex); 611 s->id = new_session_id(open_sessions); 612 if (!s->id) { 613 res = TEE_ERROR_OVERFLOW; 614 goto err_mutex_unlock; 615 } 616 617 TAILQ_INSERT_TAIL(open_sessions, s, link); 618 619 /* Look for already loaded TA */ 620 res = tee_ta_init_session_with_context(s, uuid); 621 mutex_unlock(&tee_ta_mutex); 622 if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND) 623 goto out; 624 625 /* Look for secure partition */ 626 res = stmm_init_session(uuid, s); 627 if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND) 628 goto out; 629 630 /* Look for pseudo TA */ 631 res = tee_ta_init_pseudo_ta_session(uuid, s); 632 if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND) 633 goto out; 634 635 /* Look for user TA */ 636 res = tee_ta_init_user_ta_session(uuid, s); 637 638 out: 639 if (!res) { 640 *sess = s; 641 return TEE_SUCCESS; 642 } 643 644 mutex_lock(&tee_ta_mutex); 645 TAILQ_REMOVE(open_sessions, s, link); 646 err_mutex_unlock: 647 mutex_unlock(&tee_ta_mutex); 648 free(s); 649 return res; 650 } 651 652 static void release_ta_ctx(struct tee_ta_ctx *ctx) 653 { 654 bool was_releasing = false; 655 656 mutex_lock(&tee_ta_mutex); 657 was_releasing = ctx->is_releasing; 658 ctx->is_releasing = true; 659 if (!was_releasing) { 660 DMSG("Releasing panicked TA ctx"); 661 TAILQ_REMOVE(&tee_ctxes, ctx, link); 662 } 663 mutex_unlock(&tee_ta_mutex); 664 665 if (!was_releasing) 666 ctx->ts_ctx.ops->release_state(&ctx->ts_ctx); 667 } 668 669 TEE_Result tee_ta_open_session(TEE_ErrorOrigin *err, 670 struct tee_ta_session **sess, 671 struct tee_ta_session_head *open_sessions, 672 const TEE_UUID *uuid, 673 const TEE_Identity *clnt_id, 674 uint32_t cancel_req_to, 675 struct tee_ta_param *param) 676 { 677 TEE_Result res = TEE_SUCCESS; 678 struct tee_ta_session *s = NULL; 679 struct tee_ta_ctx *ctx = NULL; 680 struct ts_ctx *ts_ctx = NULL; 681 bool panicked = false; 682 bool was_busy = false; 683 684 res = tee_ta_init_session(err, open_sessions, uuid, &s); 685 if (res != TEE_SUCCESS) { 686 DMSG("init session failed 0x%x", res); 687 return res; 688 } 689 690 if (!check_params(s, param)) 691 return TEE_ERROR_BAD_PARAMETERS; 692 693 ts_ctx = s->ts_sess.ctx; 694 ctx = ts_to_ta_ctx(ts_ctx); 695 696 if (tee_ta_try_set_busy(ctx)) { 697 if (!ctx->panicked) { 698 /* Save identity of the owner of the session */ 699 s->clnt_id = *clnt_id; 700 s->param = param; 701 set_invoke_timeout(s, cancel_req_to); 702 res = ts_ctx->ops->enter_open_session(&s->ts_sess); 703 s->param = NULL; 704 } 705 706 panicked = ctx->panicked; 707 if (panicked) { 708 release_ta_ctx(ctx); 709 res = TEE_ERROR_TARGET_DEAD; 710 } 711 712 tee_ta_clear_busy(ctx); 713 } else { 714 /* Deadlock avoided */ 715 res = TEE_ERROR_BUSY; 716 was_busy = true; 717 } 718 719 /* 720 * Origin error equal to TEE_ORIGIN_TRUSTED_APP for "regular" error, 721 * apart from panicking. 722 */ 723 if (panicked || was_busy) 724 *err = TEE_ORIGIN_TEE; 725 else 726 *err = s->err_origin; 727 728 tee_ta_put_session(s); 729 if (panicked || res != TEE_SUCCESS) 730 tee_ta_close_session(s, open_sessions, KERN_IDENTITY); 731 732 if (!res) 733 *sess = s; 734 else 735 EMSG("Failed for TA %pUl. Return error %#"PRIx32, uuid, res); 736 737 return res; 738 } 739 740 TEE_Result tee_ta_invoke_command(TEE_ErrorOrigin *err, 741 struct tee_ta_session *sess, 742 const TEE_Identity *clnt_id, 743 uint32_t cancel_req_to, uint32_t cmd, 744 struct tee_ta_param *param) 745 { 746 struct tee_ta_ctx *ta_ctx = NULL; 747 struct ts_ctx *ts_ctx = NULL; 748 TEE_Result res = TEE_SUCCESS; 749 bool panicked = false; 750 751 if (check_client(sess, clnt_id) != TEE_SUCCESS) 752 return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */ 753 754 if (!check_params(sess, param)) 755 return TEE_ERROR_BAD_PARAMETERS; 756 757 ts_ctx = sess->ts_sess.ctx; 758 ta_ctx = ts_to_ta_ctx(ts_ctx); 759 760 tee_ta_set_busy(ta_ctx); 761 762 if (!ta_ctx->panicked) { 763 sess->param = param; 764 set_invoke_timeout(sess, cancel_req_to); 765 res = ts_ctx->ops->enter_invoke_cmd(&sess->ts_sess, cmd); 766 sess->param = NULL; 767 } 768 769 panicked = ta_ctx->panicked; 770 if (panicked) { 771 release_ta_ctx(ta_ctx); 772 res = TEE_ERROR_TARGET_DEAD; 773 } 774 775 tee_ta_clear_busy(ta_ctx); 776 777 /* 778 * Origin error equal to TEE_ORIGIN_TRUSTED_APP for "regular" error, 779 * apart from panicking. 780 */ 781 if (panicked) 782 *err = TEE_ORIGIN_TEE; 783 else 784 *err = sess->err_origin; 785 786 /* Short buffer is not an effective error case */ 787 if (res != TEE_SUCCESS && res != TEE_ERROR_SHORT_BUFFER) 788 DMSG("Error: %x of %d", res, *err); 789 790 return res; 791 } 792 793 #if defined(CFG_TA_STATS) 794 static TEE_Result dump_ta_memstats(struct tee_ta_session *s, 795 struct tee_ta_param *param) 796 { 797 TEE_Result res = TEE_SUCCESS; 798 struct tee_ta_ctx *ctx = NULL; 799 struct ts_ctx *ts_ctx = NULL; 800 bool panicked = false; 801 802 ts_ctx = s->ts_sess.ctx; 803 if (!ts_ctx) 804 return TEE_ERROR_ITEM_NOT_FOUND; 805 806 if (is_user_ta_ctx(ts_ctx) && 807 to_user_ta_ctx(ts_ctx)->uctx.is_initializing) 808 return TEE_ERROR_BAD_STATE; 809 810 ctx = ts_to_ta_ctx(ts_ctx); 811 812 if (tee_ta_try_set_busy(ctx)) { 813 if (!ctx->panicked) { 814 s->param = param; 815 set_invoke_timeout(s, TEE_TIMEOUT_INFINITE); 816 res = ts_ctx->ops->dump_mem_stats(&s->ts_sess); 817 s->param = NULL; 818 } 819 820 panicked = ctx->panicked; 821 if (panicked) { 822 release_ta_ctx(ctx); 823 res = TEE_ERROR_TARGET_DEAD; 824 } 825 826 tee_ta_clear_busy(ctx); 827 } else { 828 /* Deadlock avoided */ 829 res = TEE_ERROR_BUSY; 830 } 831 832 return res; 833 } 834 835 static void init_dump_ctx(struct tee_ta_dump_ctx *dump_ctx) 836 { 837 struct tee_ta_session *sess = NULL; 838 struct tee_ta_session_head *open_sessions = NULL; 839 struct tee_ta_ctx *ctx = NULL; 840 unsigned int n = 0; 841 842 nsec_sessions_list_head(&open_sessions); 843 /* 844 * Scan all sessions opened from secure side by searching through 845 * all available TA instances and for each context, scan all opened 846 * sessions. 847 */ 848 TAILQ_FOREACH(ctx, &tee_ctxes, link) { 849 unsigned int cnt = 0; 850 851 if (!is_user_ta_ctx(&ctx->ts_ctx)) 852 continue; 853 854 memcpy(&dump_ctx[n].uuid, &ctx->ts_ctx.uuid, 855 sizeof(ctx->ts_ctx.uuid)); 856 dump_ctx[n].panicked = ctx->panicked; 857 dump_ctx[n].is_user_ta = is_user_ta_ctx(&ctx->ts_ctx); 858 TAILQ_FOREACH(sess, open_sessions, link) { 859 if (sess->ts_sess.ctx == &ctx->ts_ctx) { 860 if (cnt == MAX_DUMP_SESS_NUM) 861 break; 862 863 dump_ctx[n].sess_id[cnt] = sess->id; 864 cnt++; 865 } 866 } 867 868 dump_ctx[n].sess_num = cnt; 869 n++; 870 } 871 } 872 873 static TEE_Result dump_ta_stats(struct tee_ta_dump_ctx *dump_ctx, 874 struct pta_stats_ta *dump_stats, 875 size_t ta_count) 876 { 877 TEE_Result res = TEE_SUCCESS; 878 struct tee_ta_session *sess = NULL; 879 struct tee_ta_session_head *open_sessions = NULL; 880 struct tee_ta_param param = { }; 881 unsigned int i = 0; 882 unsigned int j = 0; 883 884 nsec_sessions_list_head(&open_sessions); 885 886 for (i = 0; i < ta_count; i++) { 887 struct pta_stats_ta *stats = &dump_stats[i]; 888 889 memcpy(&stats->uuid, &dump_ctx[i].uuid, 890 sizeof(dump_ctx[i].uuid)); 891 stats->panicked = dump_ctx[i].panicked; 892 stats->sess_num = dump_ctx[i].sess_num; 893 894 /* Find a session from dump context */ 895 for (j = 0, sess = NULL; j < dump_ctx[i].sess_num && !sess; j++) 896 sess = tee_ta_get_session(dump_ctx[i].sess_id[j], true, 897 open_sessions); 898 899 if (!sess) 900 continue; 901 /* If session is existing, get its heap stats */ 902 memset(¶m, 0, sizeof(struct tee_ta_param)); 903 param.types = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_OUTPUT, 904 TEE_PARAM_TYPE_VALUE_OUTPUT, 905 TEE_PARAM_TYPE_VALUE_OUTPUT, 906 TEE_PARAM_TYPE_NONE); 907 res = dump_ta_memstats(sess, ¶m); 908 if (res == TEE_SUCCESS) { 909 stats->heap.allocated = param.u[0].val.a; 910 stats->heap.max_allocated = param.u[0].val.b; 911 stats->heap.size = param.u[1].val.a; 912 stats->heap.num_alloc_fail = param.u[1].val.b; 913 stats->heap.biggest_alloc_fail = param.u[2].val.a; 914 stats->heap.biggest_alloc_fail_used = param.u[2].val.b; 915 } else { 916 memset(&stats->heap, 0, sizeof(stats->heap)); 917 } 918 tee_ta_put_session(sess); 919 } 920 921 return TEE_SUCCESS; 922 } 923 924 TEE_Result tee_ta_instance_stats(void *buf, size_t *buf_size) 925 { 926 TEE_Result res = TEE_SUCCESS; 927 struct pta_stats_ta *dump_stats = NULL; 928 struct tee_ta_dump_ctx *dump_ctx = NULL; 929 struct tee_ta_ctx *ctx = NULL; 930 size_t sz = 0; 931 size_t ta_count = 0; 932 933 if (!buf_size) 934 return TEE_ERROR_BAD_PARAMETERS; 935 936 mutex_lock(&tee_ta_mutex); 937 938 /* Go through all available TA and calc out the actual buffer size. */ 939 TAILQ_FOREACH(ctx, &tee_ctxes, link) 940 if (is_user_ta_ctx(&ctx->ts_ctx)) 941 ta_count++; 942 943 sz = sizeof(struct pta_stats_ta) * ta_count; 944 if (!sz) { 945 /* sz = 0 means there is no UTA, return no item found. */ 946 res = TEE_ERROR_ITEM_NOT_FOUND; 947 } else if (!buf || *buf_size < sz) { 948 /* 949 * buf is null or pass size less than actual size 950 * means caller try to query the buffer size. 951 * update *buf_size. 952 */ 953 *buf_size = sz; 954 res = TEE_ERROR_SHORT_BUFFER; 955 } else if (!IS_ALIGNED_WITH_TYPE(buf, uint32_t)) { 956 DMSG("Data alignment"); 957 res = TEE_ERROR_BAD_PARAMETERS; 958 } else { 959 dump_stats = (struct pta_stats_ta *)buf; 960 dump_ctx = malloc(sizeof(struct tee_ta_dump_ctx) * ta_count); 961 if (!dump_ctx) 962 res = TEE_ERROR_OUT_OF_MEMORY; 963 else 964 init_dump_ctx(dump_ctx); 965 } 966 mutex_unlock(&tee_ta_mutex); 967 968 if (res != TEE_SUCCESS) 969 return res; 970 971 /* Dump user ta stats by iterating dump_ctx[] */ 972 res = dump_ta_stats(dump_ctx, dump_stats, ta_count); 973 if (res == TEE_SUCCESS) 974 *buf_size = sz; 975 976 free(dump_ctx); 977 return res; 978 } 979 #endif 980 981 TEE_Result tee_ta_cancel_command(TEE_ErrorOrigin *err, 982 struct tee_ta_session *sess, 983 const TEE_Identity *clnt_id) 984 { 985 *err = TEE_ORIGIN_TEE; 986 987 if (check_client(sess, clnt_id) != TEE_SUCCESS) 988 return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */ 989 990 sess->cancel = true; 991 return TEE_SUCCESS; 992 } 993 994 bool tee_ta_session_is_cancelled(struct tee_ta_session *s, TEE_Time *curr_time) 995 { 996 TEE_Time current_time; 997 998 if (s->cancel_mask) 999 return false; 1000 1001 if (s->cancel) 1002 return true; 1003 1004 if (s->cancel_time.seconds == UINT32_MAX) 1005 return false; 1006 1007 if (curr_time != NULL) 1008 current_time = *curr_time; 1009 else if (tee_time_get_sys_time(¤t_time) != TEE_SUCCESS) 1010 return false; 1011 1012 if (current_time.seconds > s->cancel_time.seconds || 1013 (current_time.seconds == s->cancel_time.seconds && 1014 current_time.millis >= s->cancel_time.millis)) { 1015 return true; 1016 } 1017 1018 return false; 1019 } 1020 1021 #if defined(CFG_TA_GPROF_SUPPORT) 1022 void tee_ta_gprof_sample_pc(vaddr_t pc) 1023 { 1024 struct ts_session *s = ts_get_current_session(); 1025 struct user_ta_ctx *utc = NULL; 1026 struct sample_buf *sbuf = NULL; 1027 TEE_Result res = 0; 1028 size_t idx = 0; 1029 1030 sbuf = s->sbuf; 1031 if (!sbuf || !sbuf->enabled) 1032 return; /* PC sampling is not enabled */ 1033 1034 idx = (((uint64_t)pc - sbuf->offset)/2 * sbuf->scale)/65536; 1035 if (idx < sbuf->nsamples) { 1036 utc = to_user_ta_ctx(s->ctx); 1037 res = vm_check_access_rights(&utc->uctx, 1038 TEE_MEMORY_ACCESS_READ | 1039 TEE_MEMORY_ACCESS_WRITE | 1040 TEE_MEMORY_ACCESS_ANY_OWNER, 1041 (uaddr_t)&sbuf->samples[idx], 1042 sizeof(*sbuf->samples)); 1043 if (res != TEE_SUCCESS) 1044 return; 1045 sbuf->samples[idx]++; 1046 } 1047 sbuf->count++; 1048 } 1049 1050 static void gprof_update_session_utime(bool suspend, struct ts_session *s, 1051 uint64_t now) 1052 { 1053 struct sample_buf *sbuf = s->sbuf; 1054 1055 if (!sbuf) 1056 return; 1057 1058 if (suspend) { 1059 assert(sbuf->usr_entered); 1060 sbuf->usr += now - sbuf->usr_entered; 1061 sbuf->usr_entered = 0; 1062 } else { 1063 assert(!sbuf->usr_entered); 1064 if (!now) 1065 now++; /* 0 is reserved */ 1066 sbuf->usr_entered = now; 1067 } 1068 } 1069 1070 /* 1071 * Update user-mode CPU time for the current session 1072 * @suspend: true if session is being suspended (leaving user mode), false if 1073 * it is resumed (entering user mode) 1074 */ 1075 static void tee_ta_update_session_utime(bool suspend) 1076 { 1077 struct ts_session *s = ts_get_current_session(); 1078 uint64_t now = barrier_read_counter_timer(); 1079 1080 gprof_update_session_utime(suspend, s, now); 1081 } 1082 1083 void tee_ta_update_session_utime_suspend(void) 1084 { 1085 tee_ta_update_session_utime(true); 1086 } 1087 1088 void tee_ta_update_session_utime_resume(void) 1089 { 1090 tee_ta_update_session_utime(false); 1091 } 1092 #endif 1093 1094 #if defined(CFG_FTRACE_SUPPORT) 1095 static void ftrace_update_times(bool suspend) 1096 { 1097 struct ts_session *s = ts_get_current_session_may_fail(); 1098 struct ftrace_buf *fbuf = NULL; 1099 uint64_t now = 0; 1100 uint32_t i = 0; 1101 1102 if (!s) 1103 return; 1104 1105 now = barrier_read_counter_timer(); 1106 1107 fbuf = s->fbuf; 1108 if (!fbuf) 1109 return; 1110 1111 if (suspend) { 1112 fbuf->suspend_time = now; 1113 } else { 1114 for (i = 0; i <= fbuf->ret_idx; i++) 1115 fbuf->begin_time[i] += now - fbuf->suspend_time; 1116 } 1117 } 1118 1119 void tee_ta_ftrace_update_times_suspend(void) 1120 { 1121 ftrace_update_times(true); 1122 } 1123 1124 void tee_ta_ftrace_update_times_resume(void) 1125 { 1126 ftrace_update_times(false); 1127 } 1128 #endif 1129 1130 bool __noprof is_ta_ctx(struct ts_ctx *ctx) 1131 { 1132 return is_user_ta_ctx(ctx) || is_pseudo_ta_ctx(ctx); 1133 } 1134