1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2014, STMicroelectronics International N.V. 4 * Copyright (c) 2020, Arm Limited 5 */ 6 7 #include <assert.h> 8 #include <kernel/mutex.h> 9 #include <kernel/panic.h> 10 #include <kernel/pseudo_ta.h> 11 #include <kernel/stmm_sp.h> 12 #include <kernel/tee_common.h> 13 #include <kernel/tee_misc.h> 14 #include <kernel/tee_ta_manager.h> 15 #include <kernel/tee_time.h> 16 #include <kernel/thread.h> 17 #include <kernel/user_mode_ctx.h> 18 #include <kernel/user_ta.h> 19 #include <malloc.h> 20 #include <mm/core_memprot.h> 21 #include <mm/core_mmu.h> 22 #include <mm/mobj.h> 23 #include <mm/vm.h> 24 #include <pta_stats.h> 25 #include <stdlib.h> 26 #include <string.h> 27 #include <tee_api_types.h> 28 #include <tee/entry_std.h> 29 #include <tee/tee_obj.h> 30 #include <trace.h> 31 #include <types_ext.h> 32 #include <user_ta_header.h> 33 #include <utee_types.h> 34 #include <util.h> 35 36 #if defined(CFG_TA_STATS) 37 #define MAX_DUMP_SESS_NUM (16) 38 39 struct tee_ta_dump_ctx { 40 TEE_UUID uuid; 41 uint32_t panicked; 42 bool is_user_ta; 43 uint32_t sess_num; 44 uint32_t sess_id[MAX_DUMP_SESS_NUM]; 45 }; 46 #endif 47 48 /* This mutex protects the critical section in tee_ta_init_session */ 49 struct mutex tee_ta_mutex = MUTEX_INITIALIZER; 50 /* This condvar is used when waiting for a TA context to become initialized */ 51 struct condvar tee_ta_init_cv = CONDVAR_INITIALIZER; 52 struct tee_ta_ctx_head tee_ctxes = TAILQ_HEAD_INITIALIZER(tee_ctxes); 53 54 #ifndef CFG_CONCURRENT_SINGLE_INSTANCE_TA 55 static struct condvar tee_ta_cv = CONDVAR_INITIALIZER; 56 static short int tee_ta_single_instance_thread = THREAD_ID_INVALID; 57 static size_t tee_ta_single_instance_count; 58 #endif 59 60 #ifdef CFG_CONCURRENT_SINGLE_INSTANCE_TA 61 static void lock_single_instance(void) 62 { 63 } 64 65 static void unlock_single_instance(void) 66 { 67 } 68 69 static bool has_single_instance_lock(void) 70 { 71 return false; 72 } 73 #else 74 static void lock_single_instance(void) 75 { 76 /* Requires tee_ta_mutex to be held */ 77 if (tee_ta_single_instance_thread != thread_get_id()) { 78 /* Wait until the single-instance lock is available. */ 79 while (tee_ta_single_instance_thread != THREAD_ID_INVALID) 80 condvar_wait(&tee_ta_cv, &tee_ta_mutex); 81 82 tee_ta_single_instance_thread = thread_get_id(); 83 assert(tee_ta_single_instance_count == 0); 84 } 85 86 tee_ta_single_instance_count++; 87 } 88 89 static void unlock_single_instance(void) 90 { 91 /* Requires tee_ta_mutex to be held */ 92 assert(tee_ta_single_instance_thread == thread_get_id()); 93 assert(tee_ta_single_instance_count > 0); 94 95 tee_ta_single_instance_count--; 96 if (tee_ta_single_instance_count == 0) { 97 tee_ta_single_instance_thread = THREAD_ID_INVALID; 98 condvar_signal(&tee_ta_cv); 99 } 100 } 101 102 static bool has_single_instance_lock(void) 103 { 104 /* Requires tee_ta_mutex to be held */ 105 return tee_ta_single_instance_thread == thread_get_id(); 106 } 107 #endif 108 109 struct tee_ta_session *__noprof to_ta_session(struct ts_session *sess) 110 { 111 assert(is_ta_ctx(sess->ctx) || is_stmm_ctx(sess->ctx)); 112 return container_of(sess, struct tee_ta_session, ts_sess); 113 } 114 115 static struct tee_ta_ctx *ts_to_ta_ctx(struct ts_ctx *ctx) 116 { 117 if (is_ta_ctx(ctx)) 118 return to_ta_ctx(ctx); 119 120 if (is_stmm_ctx(ctx)) 121 return &(to_stmm_ctx(ctx)->ta_ctx); 122 123 panic("bad context"); 124 } 125 126 static bool tee_ta_try_set_busy(struct tee_ta_ctx *ctx) 127 { 128 bool rc = true; 129 130 if (ctx->flags & TA_FLAG_CONCURRENT) 131 return true; 132 133 mutex_lock(&tee_ta_mutex); 134 135 if (ctx->flags & TA_FLAG_SINGLE_INSTANCE) 136 lock_single_instance(); 137 138 if (has_single_instance_lock()) { 139 if (ctx->busy) { 140 /* 141 * We're holding the single-instance lock and the 142 * TA is busy, as waiting now would only cause a 143 * dead-lock, we release the lock and return false. 144 */ 145 rc = false; 146 if (ctx->flags & TA_FLAG_SINGLE_INSTANCE) 147 unlock_single_instance(); 148 } 149 } else { 150 /* 151 * We're not holding the single-instance lock, we're free to 152 * wait for the TA to become available. 153 */ 154 while (ctx->busy) 155 condvar_wait(&ctx->busy_cv, &tee_ta_mutex); 156 } 157 158 /* Either it's already true or we should set it to true */ 159 ctx->busy = true; 160 161 mutex_unlock(&tee_ta_mutex); 162 return rc; 163 } 164 165 static void tee_ta_set_busy(struct tee_ta_ctx *ctx) 166 { 167 if (!tee_ta_try_set_busy(ctx)) 168 panic(); 169 } 170 171 static void tee_ta_clear_busy(struct tee_ta_ctx *ctx) 172 { 173 if (ctx->flags & TA_FLAG_CONCURRENT) 174 return; 175 176 mutex_lock(&tee_ta_mutex); 177 178 assert(ctx->busy); 179 ctx->busy = false; 180 condvar_signal(&ctx->busy_cv); 181 182 if (ctx->flags & TA_FLAG_SINGLE_INSTANCE) 183 unlock_single_instance(); 184 185 mutex_unlock(&tee_ta_mutex); 186 } 187 188 static void dec_session_ref_count(struct tee_ta_session *s) 189 { 190 assert(s->ref_count > 0); 191 s->ref_count--; 192 if (s->ref_count == 1) 193 condvar_signal(&s->refc_cv); 194 } 195 196 void tee_ta_put_session(struct tee_ta_session *s) 197 { 198 mutex_lock(&tee_ta_mutex); 199 200 if (s->lock_thread == thread_get_id()) { 201 s->lock_thread = THREAD_ID_INVALID; 202 condvar_signal(&s->lock_cv); 203 } 204 dec_session_ref_count(s); 205 206 mutex_unlock(&tee_ta_mutex); 207 } 208 209 static struct tee_ta_session *tee_ta_find_session_nolock(uint32_t id, 210 struct tee_ta_session_head *open_sessions) 211 { 212 struct tee_ta_session *s = NULL; 213 struct tee_ta_session *found = NULL; 214 215 TAILQ_FOREACH(s, open_sessions, link) { 216 if (s->id == id) { 217 found = s; 218 break; 219 } 220 } 221 222 return found; 223 } 224 225 struct tee_ta_session *tee_ta_find_session(uint32_t id, 226 struct tee_ta_session_head *open_sessions) 227 { 228 struct tee_ta_session *s = NULL; 229 230 mutex_lock(&tee_ta_mutex); 231 232 s = tee_ta_find_session_nolock(id, open_sessions); 233 234 mutex_unlock(&tee_ta_mutex); 235 236 return s; 237 } 238 239 struct tee_ta_session *tee_ta_get_session(uint32_t id, bool exclusive, 240 struct tee_ta_session_head *open_sessions) 241 { 242 struct tee_ta_session *s; 243 244 mutex_lock(&tee_ta_mutex); 245 246 while (true) { 247 s = tee_ta_find_session_nolock(id, open_sessions); 248 if (!s) 249 break; 250 if (s->unlink) { 251 s = NULL; 252 break; 253 } 254 s->ref_count++; 255 if (!exclusive) 256 break; 257 258 assert(s->lock_thread != thread_get_id()); 259 260 while (s->lock_thread != THREAD_ID_INVALID && !s->unlink) 261 condvar_wait(&s->lock_cv, &tee_ta_mutex); 262 263 if (s->unlink) { 264 dec_session_ref_count(s); 265 s = NULL; 266 break; 267 } 268 269 s->lock_thread = thread_get_id(); 270 break; 271 } 272 273 mutex_unlock(&tee_ta_mutex); 274 return s; 275 } 276 277 static void tee_ta_unlink_session(struct tee_ta_session *s, 278 struct tee_ta_session_head *open_sessions) 279 { 280 mutex_lock(&tee_ta_mutex); 281 282 assert(s->ref_count >= 1); 283 assert(s->lock_thread == thread_get_id()); 284 assert(!s->unlink); 285 286 s->unlink = true; 287 condvar_broadcast(&s->lock_cv); 288 289 while (s->ref_count != 1) 290 condvar_wait(&s->refc_cv, &tee_ta_mutex); 291 292 TAILQ_REMOVE(open_sessions, s, link); 293 294 mutex_unlock(&tee_ta_mutex); 295 } 296 297 static void destroy_session(struct tee_ta_session *s, 298 struct tee_ta_session_head *open_sessions) 299 { 300 #if defined(CFG_FTRACE_SUPPORT) 301 if (s->ts_sess.ctx && s->ts_sess.ctx->ops->dump_ftrace) { 302 ts_push_current_session(&s->ts_sess); 303 s->ts_sess.fbuf = NULL; 304 s->ts_sess.ctx->ops->dump_ftrace(s->ts_sess.ctx); 305 ts_pop_current_session(); 306 } 307 #endif 308 309 tee_ta_unlink_session(s, open_sessions); 310 #if defined(CFG_TA_GPROF_SUPPORT) 311 free(s->ts_sess.sbuf); 312 #endif 313 free(s); 314 } 315 316 static void destroy_context(struct tee_ta_ctx *ctx) 317 { 318 DMSG("Destroy TA ctx (0x%" PRIxVA ")", (vaddr_t)ctx); 319 320 condvar_destroy(&ctx->busy_cv); 321 ctx->ts_ctx.ops->destroy(&ctx->ts_ctx); 322 } 323 324 /* 325 * tee_ta_context_find - Find TA in session list based on a UUID (input) 326 * Returns a pointer to the session 327 */ 328 static struct tee_ta_ctx *tee_ta_context_find(const TEE_UUID *uuid) 329 { 330 struct tee_ta_ctx *ctx; 331 332 TAILQ_FOREACH(ctx, &tee_ctxes, link) { 333 if (memcmp(&ctx->ts_ctx.uuid, uuid, sizeof(TEE_UUID)) == 0) 334 return ctx; 335 } 336 337 return NULL; 338 } 339 340 /* check if requester (client ID) matches session initial client */ 341 static TEE_Result check_client(struct tee_ta_session *s, const TEE_Identity *id) 342 { 343 if (id == KERN_IDENTITY) 344 return TEE_SUCCESS; 345 346 if (id == NSAPP_IDENTITY) { 347 if (s->clnt_id.login == TEE_LOGIN_TRUSTED_APP) { 348 DMSG("nsec tries to hijack TA session"); 349 return TEE_ERROR_ACCESS_DENIED; 350 } 351 return TEE_SUCCESS; 352 } 353 354 if (memcmp(&s->clnt_id, id, sizeof(TEE_Identity)) != 0) { 355 DMSG("client id mismatch"); 356 return TEE_ERROR_ACCESS_DENIED; 357 } 358 return TEE_SUCCESS; 359 } 360 361 /* 362 * Check if invocation parameters matches TA properties 363 * 364 * @s - current session handle 365 * @param - already identified memory references hold a valid 'mobj'. 366 * 367 * Policy: 368 * - All TAs can access 'non-secure' shared memory. 369 * - All TAs can access TEE private memory (seccpy) 370 * - Only SDP flagged TAs can accept SDP memory references. 371 */ 372 #ifndef CFG_SECURE_DATA_PATH 373 static bool check_params(struct tee_ta_session *sess __unused, 374 struct tee_ta_param *param __unused) 375 { 376 /* 377 * When CFG_SECURE_DATA_PATH is not enabled, SDP memory references 378 * are rejected at OP-TEE core entry. Hence here all TAs have same 379 * permissions regarding memory reference parameters. 380 */ 381 return true; 382 } 383 #else 384 static bool check_params(struct tee_ta_session *sess, 385 struct tee_ta_param *param) 386 { 387 int n; 388 389 /* 390 * When CFG_SECURE_DATA_PATH is enabled, OP-TEE entry allows SHM and 391 * SDP memory references. Only TAs flagged SDP can access SDP memory. 392 */ 393 if (sess->ts_sess.ctx && 394 ts_to_ta_ctx(sess->ts_sess.ctx)->flags & TA_FLAG_SECURE_DATA_PATH) 395 return true; 396 397 for (n = 0; n < TEE_NUM_PARAMS; n++) { 398 uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n); 399 struct param_mem *mem = ¶m->u[n].mem; 400 401 if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT && 402 param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT && 403 param_type != TEE_PARAM_TYPE_MEMREF_INOUT) 404 continue; 405 if (!mem->size) 406 continue; 407 if (mobj_is_sdp_mem(mem->mobj)) 408 return false; 409 } 410 return true; 411 } 412 #endif 413 414 static void set_invoke_timeout(struct tee_ta_session *sess, 415 uint32_t cancel_req_to) 416 { 417 TEE_Time current_time; 418 TEE_Time cancel_time; 419 420 if (cancel_req_to == TEE_TIMEOUT_INFINITE) 421 goto infinite; 422 423 if (tee_time_get_sys_time(¤t_time) != TEE_SUCCESS) 424 goto infinite; 425 426 if (ADD_OVERFLOW(current_time.seconds, cancel_req_to / 1000, 427 &cancel_time.seconds)) 428 goto infinite; 429 430 cancel_time.millis = current_time.millis + cancel_req_to % 1000; 431 if (cancel_time.millis > 1000) { 432 if (ADD_OVERFLOW(current_time.seconds, 1, 433 &cancel_time.seconds)) 434 goto infinite; 435 436 cancel_time.seconds++; 437 cancel_time.millis -= 1000; 438 } 439 440 sess->cancel_time = cancel_time; 441 return; 442 443 infinite: 444 sess->cancel_time.seconds = UINT32_MAX; 445 sess->cancel_time.millis = UINT32_MAX; 446 } 447 448 /*----------------------------------------------------------------------------- 449 * Close a Trusted Application and free available resources 450 *---------------------------------------------------------------------------*/ 451 TEE_Result tee_ta_close_session(struct tee_ta_session *csess, 452 struct tee_ta_session_head *open_sessions, 453 const TEE_Identity *clnt_id) 454 { 455 struct tee_ta_session *sess = NULL; 456 struct tee_ta_ctx *ctx = NULL; 457 struct ts_ctx *ts_ctx = NULL; 458 bool keep_alive = false; 459 460 DMSG("csess 0x%" PRIxVA " id %u", 461 (vaddr_t)csess, csess ? csess->id : UINT_MAX); 462 463 if (!csess) 464 return TEE_ERROR_ITEM_NOT_FOUND; 465 466 sess = tee_ta_get_session(csess->id, true, open_sessions); 467 468 if (!sess) { 469 EMSG("session 0x%" PRIxVA " to be removed is not found", 470 (vaddr_t)csess); 471 return TEE_ERROR_ITEM_NOT_FOUND; 472 } 473 474 if (check_client(sess, clnt_id) != TEE_SUCCESS) { 475 tee_ta_put_session(sess); 476 return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */ 477 } 478 479 DMSG("Destroy session"); 480 481 ts_ctx = sess->ts_sess.ctx; 482 if (!ts_ctx) { 483 destroy_session(sess, open_sessions); 484 return TEE_SUCCESS; 485 } 486 487 ctx = ts_to_ta_ctx(ts_ctx); 488 if (ctx->panicked) { 489 destroy_session(sess, open_sessions); 490 } else { 491 tee_ta_set_busy(ctx); 492 set_invoke_timeout(sess, TEE_TIMEOUT_INFINITE); 493 ts_ctx->ops->enter_close_session(&sess->ts_sess); 494 destroy_session(sess, open_sessions); 495 tee_ta_clear_busy(ctx); 496 } 497 498 mutex_lock(&tee_ta_mutex); 499 500 if (ctx->ref_count <= 0) 501 panic(); 502 503 ctx->ref_count--; 504 keep_alive = (ctx->flags & TA_FLAG_INSTANCE_KEEP_ALIVE) && 505 (ctx->flags & TA_FLAG_SINGLE_INSTANCE); 506 if (!ctx->ref_count && (ctx->panicked || !keep_alive)) { 507 if (!ctx->is_releasing) { 508 TAILQ_REMOVE(&tee_ctxes, ctx, link); 509 ctx->is_releasing = true; 510 } 511 mutex_unlock(&tee_ta_mutex); 512 513 destroy_context(ctx); 514 } else 515 mutex_unlock(&tee_ta_mutex); 516 517 return TEE_SUCCESS; 518 } 519 520 static TEE_Result tee_ta_init_session_with_context(struct tee_ta_session *s, 521 const TEE_UUID *uuid) 522 { 523 struct tee_ta_ctx *ctx = NULL; 524 525 while (true) { 526 ctx = tee_ta_context_find(uuid); 527 if (!ctx) 528 return TEE_ERROR_ITEM_NOT_FOUND; 529 530 if (!ctx->is_initializing) 531 break; 532 /* 533 * Context is still initializing, wait here until it's 534 * fully initialized. Note that we're searching for the 535 * context again since it may have been removed while we 536 * where sleeping. 537 */ 538 condvar_wait(&tee_ta_init_cv, &tee_ta_mutex); 539 } 540 541 /* 542 * If the trusted service is not a single instance service (e.g. is 543 * a multi-instance TA) it should be loaded as a new instance instead 544 * of doing anything with this instance. So tell the caller that we 545 * didn't find the TA it the caller will load a new instance. 546 */ 547 if ((ctx->flags & TA_FLAG_SINGLE_INSTANCE) == 0) 548 return TEE_ERROR_ITEM_NOT_FOUND; 549 550 /* 551 * The trusted service is single instance, if it isn't multi session we 552 * can't create another session unless its reference is zero 553 */ 554 if (!(ctx->flags & TA_FLAG_MULTI_SESSION) && ctx->ref_count) 555 return TEE_ERROR_BUSY; 556 557 DMSG("Re-open trusted service %pUl", (void *)&ctx->ts_ctx.uuid); 558 559 ctx->ref_count++; 560 s->ts_sess.ctx = &ctx->ts_ctx; 561 s->ts_sess.handle_scall = s->ts_sess.ctx->ops->handle_scall; 562 return TEE_SUCCESS; 563 } 564 565 static uint32_t new_session_id(struct tee_ta_session_head *open_sessions) 566 { 567 struct tee_ta_session *last = NULL; 568 uint32_t saved = 0; 569 uint32_t id = 1; 570 571 last = TAILQ_LAST(open_sessions, tee_ta_session_head); 572 if (last) { 573 /* This value is less likely to be already used */ 574 id = last->id + 1; 575 if (!id) 576 id++; /* 0 is not valid */ 577 } 578 579 saved = id; 580 do { 581 if (!tee_ta_find_session_nolock(id, open_sessions)) 582 return id; 583 id++; 584 if (!id) 585 id++; 586 } while (id != saved); 587 588 return 0; 589 } 590 591 static TEE_Result tee_ta_init_session(TEE_ErrorOrigin *err, 592 struct tee_ta_session_head *open_sessions, 593 const TEE_UUID *uuid, 594 struct tee_ta_session **sess) 595 { 596 TEE_Result res; 597 struct tee_ta_session *s = calloc(1, sizeof(struct tee_ta_session)); 598 599 *err = TEE_ORIGIN_TEE; 600 if (!s) 601 return TEE_ERROR_OUT_OF_MEMORY; 602 603 s->cancel_mask = true; 604 condvar_init(&s->refc_cv); 605 condvar_init(&s->lock_cv); 606 s->lock_thread = THREAD_ID_INVALID; 607 s->ref_count = 1; 608 609 mutex_lock(&tee_ta_mutex); 610 s->id = new_session_id(open_sessions); 611 if (!s->id) { 612 res = TEE_ERROR_OVERFLOW; 613 goto err_mutex_unlock; 614 } 615 616 TAILQ_INSERT_TAIL(open_sessions, s, link); 617 618 /* Look for already loaded TA */ 619 res = tee_ta_init_session_with_context(s, uuid); 620 mutex_unlock(&tee_ta_mutex); 621 if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND) 622 goto out; 623 624 /* Look for secure partition */ 625 res = stmm_init_session(uuid, s); 626 if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND) 627 goto out; 628 629 /* Look for pseudo TA */ 630 res = tee_ta_init_pseudo_ta_session(uuid, s); 631 if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND) 632 goto out; 633 634 /* Look for user TA */ 635 res = tee_ta_init_user_ta_session(uuid, s); 636 637 out: 638 if (!res) { 639 *sess = s; 640 return TEE_SUCCESS; 641 } 642 643 mutex_lock(&tee_ta_mutex); 644 TAILQ_REMOVE(open_sessions, s, link); 645 err_mutex_unlock: 646 mutex_unlock(&tee_ta_mutex); 647 free(s); 648 return res; 649 } 650 651 static void release_ta_ctx(struct tee_ta_ctx *ctx) 652 { 653 bool was_releasing = false; 654 655 mutex_lock(&tee_ta_mutex); 656 was_releasing = ctx->is_releasing; 657 ctx->is_releasing = true; 658 if (!was_releasing) { 659 DMSG("Releasing panicked TA ctx"); 660 TAILQ_REMOVE(&tee_ctxes, ctx, link); 661 } 662 mutex_unlock(&tee_ta_mutex); 663 664 if (!was_releasing) 665 ctx->ts_ctx.ops->release_state(&ctx->ts_ctx); 666 } 667 668 TEE_Result tee_ta_open_session(TEE_ErrorOrigin *err, 669 struct tee_ta_session **sess, 670 struct tee_ta_session_head *open_sessions, 671 const TEE_UUID *uuid, 672 const TEE_Identity *clnt_id, 673 uint32_t cancel_req_to, 674 struct tee_ta_param *param) 675 { 676 TEE_Result res = TEE_SUCCESS; 677 struct tee_ta_session *s = NULL; 678 struct tee_ta_ctx *ctx = NULL; 679 struct ts_ctx *ts_ctx = NULL; 680 bool panicked = false; 681 bool was_busy = false; 682 683 res = tee_ta_init_session(err, open_sessions, uuid, &s); 684 if (res != TEE_SUCCESS) { 685 DMSG("init session failed 0x%x", res); 686 return res; 687 } 688 689 if (!check_params(s, param)) 690 return TEE_ERROR_BAD_PARAMETERS; 691 692 ts_ctx = s->ts_sess.ctx; 693 ctx = ts_to_ta_ctx(ts_ctx); 694 695 if (tee_ta_try_set_busy(ctx)) { 696 if (!ctx->panicked) { 697 /* Save identity of the owner of the session */ 698 s->clnt_id = *clnt_id; 699 s->param = param; 700 set_invoke_timeout(s, cancel_req_to); 701 res = ts_ctx->ops->enter_open_session(&s->ts_sess); 702 s->param = NULL; 703 } 704 705 panicked = ctx->panicked; 706 if (panicked) { 707 release_ta_ctx(ctx); 708 res = TEE_ERROR_TARGET_DEAD; 709 } 710 711 tee_ta_clear_busy(ctx); 712 } else { 713 /* Deadlock avoided */ 714 res = TEE_ERROR_BUSY; 715 was_busy = true; 716 } 717 718 /* 719 * Origin error equal to TEE_ORIGIN_TRUSTED_APP for "regular" error, 720 * apart from panicking. 721 */ 722 if (panicked || was_busy) 723 *err = TEE_ORIGIN_TEE; 724 else 725 *err = s->err_origin; 726 727 tee_ta_put_session(s); 728 if (panicked || res != TEE_SUCCESS) 729 tee_ta_close_session(s, open_sessions, KERN_IDENTITY); 730 731 if (!res) 732 *sess = s; 733 else 734 EMSG("Failed for TA %pUl. Return error %#"PRIx32, uuid, res); 735 736 return res; 737 } 738 739 TEE_Result tee_ta_invoke_command(TEE_ErrorOrigin *err, 740 struct tee_ta_session *sess, 741 const TEE_Identity *clnt_id, 742 uint32_t cancel_req_to, uint32_t cmd, 743 struct tee_ta_param *param) 744 { 745 struct tee_ta_ctx *ta_ctx = NULL; 746 struct ts_ctx *ts_ctx = NULL; 747 TEE_Result res = TEE_SUCCESS; 748 bool panicked = false; 749 750 if (check_client(sess, clnt_id) != TEE_SUCCESS) 751 return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */ 752 753 if (!check_params(sess, param)) 754 return TEE_ERROR_BAD_PARAMETERS; 755 756 ts_ctx = sess->ts_sess.ctx; 757 ta_ctx = ts_to_ta_ctx(ts_ctx); 758 759 tee_ta_set_busy(ta_ctx); 760 761 if (!ta_ctx->panicked) { 762 sess->param = param; 763 set_invoke_timeout(sess, cancel_req_to); 764 res = ts_ctx->ops->enter_invoke_cmd(&sess->ts_sess, cmd); 765 sess->param = NULL; 766 } 767 768 panicked = ta_ctx->panicked; 769 if (panicked) { 770 release_ta_ctx(ta_ctx); 771 res = TEE_ERROR_TARGET_DEAD; 772 } 773 774 tee_ta_clear_busy(ta_ctx); 775 776 /* 777 * Origin error equal to TEE_ORIGIN_TRUSTED_APP for "regular" error, 778 * apart from panicking. 779 */ 780 if (panicked) 781 *err = TEE_ORIGIN_TEE; 782 else 783 *err = sess->err_origin; 784 785 /* Short buffer is not an effective error case */ 786 if (res != TEE_SUCCESS && res != TEE_ERROR_SHORT_BUFFER) 787 DMSG("Error: %x of %d", res, *err); 788 789 return res; 790 } 791 792 #if defined(CFG_TA_STATS) 793 static TEE_Result dump_ta_memstats(struct tee_ta_session *s, 794 struct tee_ta_param *param) 795 { 796 TEE_Result res = TEE_SUCCESS; 797 struct tee_ta_ctx *ctx = NULL; 798 struct ts_ctx *ts_ctx = NULL; 799 bool panicked = false; 800 801 ts_ctx = s->ts_sess.ctx; 802 if (!ts_ctx) 803 return TEE_ERROR_ITEM_NOT_FOUND; 804 805 ctx = ts_to_ta_ctx(ts_ctx); 806 807 if (ctx->is_initializing) 808 return TEE_ERROR_BAD_STATE; 809 810 if (tee_ta_try_set_busy(ctx)) { 811 if (!ctx->panicked) { 812 s->param = param; 813 set_invoke_timeout(s, TEE_TIMEOUT_INFINITE); 814 res = ts_ctx->ops->dump_mem_stats(&s->ts_sess); 815 s->param = NULL; 816 } 817 818 panicked = ctx->panicked; 819 if (panicked) { 820 release_ta_ctx(ctx); 821 res = TEE_ERROR_TARGET_DEAD; 822 } 823 824 tee_ta_clear_busy(ctx); 825 } else { 826 /* Deadlock avoided */ 827 res = TEE_ERROR_BUSY; 828 } 829 830 return res; 831 } 832 833 static void init_dump_ctx(struct tee_ta_dump_ctx *dump_ctx) 834 { 835 struct tee_ta_session *sess = NULL; 836 struct tee_ta_session_head *open_sessions = NULL; 837 struct tee_ta_ctx *ctx = NULL; 838 unsigned int n = 0; 839 840 nsec_sessions_list_head(&open_sessions); 841 /* 842 * Scan all sessions opened from secure side by searching through 843 * all available TA instances and for each context, scan all opened 844 * sessions. 845 */ 846 TAILQ_FOREACH(ctx, &tee_ctxes, link) { 847 unsigned int cnt = 0; 848 849 if (!is_user_ta_ctx(&ctx->ts_ctx)) 850 continue; 851 852 memcpy(&dump_ctx[n].uuid, &ctx->ts_ctx.uuid, 853 sizeof(ctx->ts_ctx.uuid)); 854 dump_ctx[n].panicked = ctx->panicked; 855 dump_ctx[n].is_user_ta = is_user_ta_ctx(&ctx->ts_ctx); 856 TAILQ_FOREACH(sess, open_sessions, link) { 857 if (sess->ts_sess.ctx == &ctx->ts_ctx) { 858 if (cnt == MAX_DUMP_SESS_NUM) 859 break; 860 861 dump_ctx[n].sess_id[cnt] = sess->id; 862 cnt++; 863 } 864 } 865 866 dump_ctx[n].sess_num = cnt; 867 n++; 868 } 869 } 870 871 static TEE_Result dump_ta_stats(struct tee_ta_dump_ctx *dump_ctx, 872 struct pta_stats_ta *dump_stats, 873 size_t ta_count) 874 { 875 TEE_Result res = TEE_SUCCESS; 876 struct tee_ta_session *sess = NULL; 877 struct tee_ta_session_head *open_sessions = NULL; 878 struct tee_ta_param param = { }; 879 unsigned int i = 0; 880 unsigned int j = 0; 881 882 nsec_sessions_list_head(&open_sessions); 883 884 for (i = 0; i < ta_count; i++) { 885 struct pta_stats_ta *stats = &dump_stats[i]; 886 887 memcpy(&stats->uuid, &dump_ctx[i].uuid, 888 sizeof(dump_ctx[i].uuid)); 889 stats->panicked = dump_ctx[i].panicked; 890 stats->sess_num = dump_ctx[i].sess_num; 891 892 /* Find a session from dump context */ 893 for (j = 0, sess = NULL; j < dump_ctx[i].sess_num && !sess; j++) 894 sess = tee_ta_get_session(dump_ctx[i].sess_id[j], true, 895 open_sessions); 896 897 if (!sess) 898 continue; 899 /* If session is existing, get its heap stats */ 900 memset(¶m, 0, sizeof(struct tee_ta_param)); 901 param.types = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_OUTPUT, 902 TEE_PARAM_TYPE_VALUE_OUTPUT, 903 TEE_PARAM_TYPE_VALUE_OUTPUT, 904 TEE_PARAM_TYPE_NONE); 905 res = dump_ta_memstats(sess, ¶m); 906 if (res == TEE_SUCCESS) { 907 stats->heap.allocated = param.u[0].val.a; 908 stats->heap.max_allocated = param.u[0].val.b; 909 stats->heap.size = param.u[1].val.a; 910 stats->heap.num_alloc_fail = param.u[1].val.b; 911 stats->heap.biggest_alloc_fail = param.u[2].val.a; 912 stats->heap.biggest_alloc_fail_used = param.u[2].val.b; 913 } else { 914 memset(&stats->heap, 0, sizeof(stats->heap)); 915 } 916 tee_ta_put_session(sess); 917 } 918 919 return TEE_SUCCESS; 920 } 921 922 TEE_Result tee_ta_instance_stats(void *buf, size_t *buf_size) 923 { 924 TEE_Result res = TEE_SUCCESS; 925 struct pta_stats_ta *dump_stats = NULL; 926 struct tee_ta_dump_ctx *dump_ctx = NULL; 927 struct tee_ta_ctx *ctx = NULL; 928 size_t sz = 0; 929 size_t ta_count = 0; 930 931 if (!buf_size) 932 return TEE_ERROR_BAD_PARAMETERS; 933 934 mutex_lock(&tee_ta_mutex); 935 936 /* Go through all available TA and calc out the actual buffer size. */ 937 TAILQ_FOREACH(ctx, &tee_ctxes, link) 938 if (is_user_ta_ctx(&ctx->ts_ctx)) 939 ta_count++; 940 941 sz = sizeof(struct pta_stats_ta) * ta_count; 942 if (!sz) { 943 /* sz = 0 means there is no UTA, return no item found. */ 944 res = TEE_ERROR_ITEM_NOT_FOUND; 945 } else if (!buf || *buf_size < sz) { 946 /* 947 * buf is null or pass size less than actual size 948 * means caller try to query the buffer size. 949 * update *buf_size. 950 */ 951 *buf_size = sz; 952 res = TEE_ERROR_SHORT_BUFFER; 953 } else if (!IS_ALIGNED_WITH_TYPE(buf, uint32_t)) { 954 DMSG("Data alignment"); 955 res = TEE_ERROR_BAD_PARAMETERS; 956 } else { 957 dump_stats = (struct pta_stats_ta *)buf; 958 dump_ctx = malloc(sizeof(struct tee_ta_dump_ctx) * ta_count); 959 if (!dump_ctx) 960 res = TEE_ERROR_OUT_OF_MEMORY; 961 else 962 init_dump_ctx(dump_ctx); 963 } 964 mutex_unlock(&tee_ta_mutex); 965 966 if (res != TEE_SUCCESS) 967 return res; 968 969 /* Dump user ta stats by iterating dump_ctx[] */ 970 res = dump_ta_stats(dump_ctx, dump_stats, ta_count); 971 if (res == TEE_SUCCESS) 972 *buf_size = sz; 973 974 free(dump_ctx); 975 return res; 976 } 977 #endif 978 979 TEE_Result tee_ta_cancel_command(TEE_ErrorOrigin *err, 980 struct tee_ta_session *sess, 981 const TEE_Identity *clnt_id) 982 { 983 *err = TEE_ORIGIN_TEE; 984 985 if (check_client(sess, clnt_id) != TEE_SUCCESS) 986 return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */ 987 988 sess->cancel = true; 989 return TEE_SUCCESS; 990 } 991 992 bool tee_ta_session_is_cancelled(struct tee_ta_session *s, TEE_Time *curr_time) 993 { 994 TEE_Time current_time; 995 996 if (s->cancel_mask) 997 return false; 998 999 if (s->cancel) 1000 return true; 1001 1002 if (s->cancel_time.seconds == UINT32_MAX) 1003 return false; 1004 1005 if (curr_time != NULL) 1006 current_time = *curr_time; 1007 else if (tee_time_get_sys_time(¤t_time) != TEE_SUCCESS) 1008 return false; 1009 1010 if (current_time.seconds > s->cancel_time.seconds || 1011 (current_time.seconds == s->cancel_time.seconds && 1012 current_time.millis >= s->cancel_time.millis)) { 1013 return true; 1014 } 1015 1016 return false; 1017 } 1018 1019 #if defined(CFG_TA_GPROF_SUPPORT) 1020 void tee_ta_gprof_sample_pc(vaddr_t pc) 1021 { 1022 struct ts_session *s = ts_get_current_session(); 1023 struct user_ta_ctx *utc = NULL; 1024 struct sample_buf *sbuf = NULL; 1025 TEE_Result res = 0; 1026 size_t idx = 0; 1027 1028 sbuf = s->sbuf; 1029 if (!sbuf || !sbuf->enabled) 1030 return; /* PC sampling is not enabled */ 1031 1032 idx = (((uint64_t)pc - sbuf->offset)/2 * sbuf->scale)/65536; 1033 if (idx < sbuf->nsamples) { 1034 utc = to_user_ta_ctx(s->ctx); 1035 res = vm_check_access_rights(&utc->uctx, 1036 TEE_MEMORY_ACCESS_READ | 1037 TEE_MEMORY_ACCESS_WRITE | 1038 TEE_MEMORY_ACCESS_ANY_OWNER, 1039 (uaddr_t)&sbuf->samples[idx], 1040 sizeof(*sbuf->samples)); 1041 if (res != TEE_SUCCESS) 1042 return; 1043 sbuf->samples[idx]++; 1044 } 1045 sbuf->count++; 1046 } 1047 1048 static void gprof_update_session_utime(bool suspend, struct ts_session *s, 1049 uint64_t now) 1050 { 1051 struct sample_buf *sbuf = s->sbuf; 1052 1053 if (!sbuf) 1054 return; 1055 1056 if (suspend) { 1057 assert(sbuf->usr_entered); 1058 sbuf->usr += now - sbuf->usr_entered; 1059 sbuf->usr_entered = 0; 1060 } else { 1061 assert(!sbuf->usr_entered); 1062 if (!now) 1063 now++; /* 0 is reserved */ 1064 sbuf->usr_entered = now; 1065 } 1066 } 1067 1068 /* 1069 * Update user-mode CPU time for the current session 1070 * @suspend: true if session is being suspended (leaving user mode), false if 1071 * it is resumed (entering user mode) 1072 */ 1073 static void tee_ta_update_session_utime(bool suspend) 1074 { 1075 struct ts_session *s = ts_get_current_session(); 1076 uint64_t now = barrier_read_counter_timer(); 1077 1078 gprof_update_session_utime(suspend, s, now); 1079 } 1080 1081 void tee_ta_update_session_utime_suspend(void) 1082 { 1083 tee_ta_update_session_utime(true); 1084 } 1085 1086 void tee_ta_update_session_utime_resume(void) 1087 { 1088 tee_ta_update_session_utime(false); 1089 } 1090 #endif 1091 1092 #if defined(CFG_FTRACE_SUPPORT) 1093 static void ftrace_update_times(bool suspend) 1094 { 1095 struct ts_session *s = ts_get_current_session_may_fail(); 1096 struct ftrace_buf *fbuf = NULL; 1097 uint64_t now = 0; 1098 uint32_t i = 0; 1099 1100 if (!s) 1101 return; 1102 1103 now = barrier_read_counter_timer(); 1104 1105 fbuf = s->fbuf; 1106 if (!fbuf) 1107 return; 1108 1109 if (suspend) { 1110 fbuf->suspend_time = now; 1111 } else { 1112 for (i = 0; i <= fbuf->ret_idx; i++) 1113 fbuf->begin_time[i] += now - fbuf->suspend_time; 1114 } 1115 } 1116 1117 void tee_ta_ftrace_update_times_suspend(void) 1118 { 1119 ftrace_update_times(true); 1120 } 1121 1122 void tee_ta_ftrace_update_times_resume(void) 1123 { 1124 ftrace_update_times(false); 1125 } 1126 #endif 1127 1128 bool __noprof is_ta_ctx(struct ts_ctx *ctx) 1129 { 1130 return is_user_ta_ctx(ctx) || is_pseudo_ta_ctx(ctx); 1131 } 1132