1 /* 2 * Copyright (c) 2014, STMicroelectronics International N.V. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright notice, 12 * this list of conditions and the following disclaimer in the documentation 13 * and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 #include <types_ext.h> 28 #include <stdbool.h> 29 #include <stdio.h> 30 #include <stdlib.h> 31 #include <string.h> 32 #include <arm.h> 33 #include <kernel/mutex.h> 34 #include <kernel/panic.h> 35 #include <kernel/static_ta.h> 36 #include <kernel/tee_common.h> 37 #include <kernel/tee_misc.h> 38 #include <kernel/tee_ta_manager.h> 39 #include <kernel/tee_time.h> 40 #include <kernel/thread.h> 41 #include <kernel/user_ta.h> 42 #include <mm/core_mmu.h> 43 #include <mm/core_memprot.h> 44 #include <mm/tee_mmu.h> 45 #include <tee/tee_svc_cryp.h> 46 #include <tee/tee_obj.h> 47 #include <tee/tee_svc_storage.h> 48 #include <tee_api_types.h> 49 #include <trace.h> 50 #include <utee_types.h> 51 #include <util.h> 52 #include <assert.h> 53 54 /* This mutex protects the critical section in tee_ta_init_session */ 55 static struct mutex tee_ta_mutex = MUTEX_INITIALIZER; 56 static struct condvar tee_ta_cv = CONDVAR_INITIALIZER; 57 static int tee_ta_single_instance_thread = THREAD_ID_INVALID; 58 static size_t tee_ta_single_instance_count; 59 struct tee_ta_ctx_head tee_ctxes = TAILQ_HEAD_INITIALIZER(tee_ctxes); 60 61 static void lock_single_instance(void) 62 { 63 /* Requires tee_ta_mutex to be held */ 64 if (tee_ta_single_instance_thread != thread_get_id()) { 65 /* Wait until the single-instance lock is available. */ 66 while (tee_ta_single_instance_thread != THREAD_ID_INVALID) 67 condvar_wait(&tee_ta_cv, &tee_ta_mutex); 68 69 tee_ta_single_instance_thread = thread_get_id(); 70 assert(tee_ta_single_instance_count == 0); 71 } 72 73 tee_ta_single_instance_count++; 74 } 75 76 static void unlock_single_instance(void) 77 { 78 /* Requires tee_ta_mutex to be held */ 79 assert(tee_ta_single_instance_thread == thread_get_id()); 80 assert(tee_ta_single_instance_count > 0); 81 82 tee_ta_single_instance_count--; 83 if (tee_ta_single_instance_count == 0) { 84 tee_ta_single_instance_thread = THREAD_ID_INVALID; 85 condvar_signal(&tee_ta_cv); 86 } 87 } 88 89 static bool has_single_instance_lock(void) 90 { 91 /* Requires tee_ta_mutex to be held */ 92 return tee_ta_single_instance_thread == thread_get_id(); 93 } 94 95 static bool tee_ta_try_set_busy(struct tee_ta_ctx *ctx) 96 { 97 bool rc = true; 98 99 mutex_lock(&tee_ta_mutex); 100 101 if (ctx->flags & TA_FLAG_SINGLE_INSTANCE) 102 lock_single_instance(); 103 104 if (has_single_instance_lock()) { 105 if (ctx->busy) { 106 /* 107 * We're holding the single-instance lock and the 108 * TA is busy, as waiting now would only cause a 109 * dead-lock, we release the lock and return false. 110 */ 111 rc = false; 112 if (ctx->flags & TA_FLAG_SINGLE_INSTANCE) 113 unlock_single_instance(); 114 } 115 } else { 116 /* 117 * We're not holding the single-instance lock, we're free to 118 * wait for the TA to become available. 119 */ 120 while (ctx->busy) 121 condvar_wait(&ctx->busy_cv, &tee_ta_mutex); 122 } 123 124 /* Either it's already true or we should set it to true */ 125 ctx->busy = true; 126 127 mutex_unlock(&tee_ta_mutex); 128 return rc; 129 } 130 131 static void tee_ta_set_busy(struct tee_ta_ctx *ctx) 132 { 133 if (!tee_ta_try_set_busy(ctx)) 134 panic(); 135 } 136 137 static void tee_ta_clear_busy(struct tee_ta_ctx *ctx) 138 { 139 mutex_lock(&tee_ta_mutex); 140 141 assert(ctx->busy); 142 ctx->busy = false; 143 condvar_signal(&ctx->busy_cv); 144 145 if (ctx->flags & TA_FLAG_SINGLE_INSTANCE) 146 unlock_single_instance(); 147 148 mutex_unlock(&tee_ta_mutex); 149 } 150 151 static void dec_session_ref_count(struct tee_ta_session *s) 152 { 153 assert(s->ref_count > 0); 154 s->ref_count--; 155 if (s->ref_count == 1) 156 condvar_signal(&s->refc_cv); 157 } 158 159 void tee_ta_put_session(struct tee_ta_session *s) 160 { 161 mutex_lock(&tee_ta_mutex); 162 163 if (s->lock_thread == thread_get_id()) { 164 s->lock_thread = THREAD_ID_INVALID; 165 condvar_signal(&s->lock_cv); 166 } 167 dec_session_ref_count(s); 168 169 mutex_unlock(&tee_ta_mutex); 170 } 171 172 static struct tee_ta_session *find_session(uint32_t id, 173 struct tee_ta_session_head *open_sessions) 174 { 175 struct tee_ta_session *s; 176 177 TAILQ_FOREACH(s, open_sessions, link) { 178 if ((vaddr_t)s == id) 179 return s; 180 } 181 return NULL; 182 } 183 184 struct tee_ta_session *tee_ta_get_session(uint32_t id, bool exclusive, 185 struct tee_ta_session_head *open_sessions) 186 { 187 struct tee_ta_session *s; 188 189 mutex_lock(&tee_ta_mutex); 190 191 while (true) { 192 s = find_session(id, open_sessions); 193 if (!s) 194 break; 195 if (s->unlink) { 196 s = NULL; 197 break; 198 } 199 s->ref_count++; 200 if (!exclusive) 201 break; 202 203 assert(s->lock_thread != thread_get_id()); 204 205 while (s->lock_thread != THREAD_ID_INVALID && !s->unlink) 206 condvar_wait(&s->lock_cv, &tee_ta_mutex); 207 208 if (s->unlink) { 209 dec_session_ref_count(s); 210 s = NULL; 211 break; 212 } 213 214 s->lock_thread = thread_get_id(); 215 break; 216 } 217 218 mutex_unlock(&tee_ta_mutex); 219 return s; 220 } 221 222 static void tee_ta_unlink_session(struct tee_ta_session *s, 223 struct tee_ta_session_head *open_sessions) 224 { 225 mutex_lock(&tee_ta_mutex); 226 227 assert(s->ref_count >= 1); 228 assert(s->lock_thread == thread_get_id()); 229 assert(!s->unlink); 230 231 s->unlink = true; 232 condvar_broadcast(&s->lock_cv); 233 234 while (s->ref_count != 1) 235 condvar_wait(&s->refc_cv, &tee_ta_mutex); 236 237 TAILQ_REMOVE(open_sessions, s, link); 238 239 mutex_unlock(&tee_ta_mutex); 240 } 241 242 /* 243 * tee_ta_context_find - Find TA in session list based on a UUID (input) 244 * Returns a pointer to the session 245 */ 246 static struct tee_ta_ctx *tee_ta_context_find(const TEE_UUID *uuid) 247 { 248 struct tee_ta_ctx *ctx; 249 250 TAILQ_FOREACH(ctx, &tee_ctxes, link) { 251 if (memcmp(&ctx->uuid, uuid, sizeof(TEE_UUID)) == 0) 252 return ctx; 253 } 254 255 return NULL; 256 } 257 258 /* check if requester (client ID) matches session initial client */ 259 static TEE_Result check_client(struct tee_ta_session *s, const TEE_Identity *id) 260 { 261 if (id == KERN_IDENTITY) 262 return TEE_SUCCESS; 263 264 if (id == NSAPP_IDENTITY) { 265 if (s->clnt_id.login == TEE_LOGIN_TRUSTED_APP) { 266 DMSG("nsec tries to hijack TA session"); 267 return TEE_ERROR_ACCESS_DENIED; 268 } 269 return TEE_SUCCESS; 270 } 271 272 if (memcmp(&s->clnt_id, id, sizeof(TEE_Identity)) != 0) { 273 DMSG("client id mismatch"); 274 return TEE_ERROR_ACCESS_DENIED; 275 } 276 return TEE_SUCCESS; 277 } 278 279 static void set_invoke_timeout(struct tee_ta_session *sess, 280 uint32_t cancel_req_to) 281 { 282 TEE_Time current_time; 283 TEE_Time cancel_time = { UINT32_MAX, UINT32_MAX }; 284 285 if (cancel_req_to == TEE_TIMEOUT_INFINITE) 286 goto out; 287 288 if (tee_time_get_sys_time(¤t_time) != TEE_SUCCESS) 289 goto out; 290 291 /* Check that it doesn't wrap */ 292 if (current_time.seconds + (cancel_req_to / 1000) >= 293 current_time.seconds) { 294 cancel_time.seconds = 295 current_time.seconds + cancel_req_to / 1000; 296 cancel_time.millis = current_time.millis + cancel_req_to % 1000; 297 if (cancel_time.millis > 1000) { 298 cancel_time.seconds++; 299 cancel_time.millis -= 1000; 300 } 301 } 302 303 out: 304 sess->cancel_time = cancel_time; 305 } 306 307 /*----------------------------------------------------------------------------- 308 * Close a Trusted Application and free available resources 309 *---------------------------------------------------------------------------*/ 310 TEE_Result tee_ta_close_session(struct tee_ta_session *csess, 311 struct tee_ta_session_head *open_sessions, 312 const TEE_Identity *clnt_id) 313 { 314 struct tee_ta_session *sess; 315 struct tee_ta_ctx *ctx; 316 317 DMSG("tee_ta_close_session(0x%" PRIxVA ")", (vaddr_t)csess); 318 319 if (!csess) 320 return TEE_ERROR_ITEM_NOT_FOUND; 321 322 sess = tee_ta_get_session((vaddr_t)csess, true, open_sessions); 323 324 if (!sess) { 325 EMSG("session 0x%" PRIxVA " to be removed is not found", 326 (vaddr_t)csess); 327 return TEE_ERROR_ITEM_NOT_FOUND; 328 } 329 330 if (check_client(sess, clnt_id) != TEE_SUCCESS) { 331 tee_ta_put_session(sess); 332 return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */ 333 } 334 335 ctx = sess->ctx; 336 DMSG(" ... Destroy session"); 337 338 tee_ta_set_busy(ctx); 339 340 if (!ctx->panicked) { 341 set_invoke_timeout(sess, TEE_TIMEOUT_INFINITE); 342 ctx->ops->enter_close_session(sess); 343 } 344 345 tee_ta_unlink_session(sess, open_sessions); 346 free(sess); 347 348 tee_ta_clear_busy(ctx); 349 350 mutex_lock(&tee_ta_mutex); 351 352 TEE_ASSERT(ctx->ref_count > 0); 353 ctx->ref_count--; 354 if (!ctx->ref_count && !(ctx->flags & TA_FLAG_INSTANCE_KEEP_ALIVE)) { 355 DMSG(" ... Destroy TA ctx"); 356 357 TAILQ_REMOVE(&tee_ctxes, ctx, link); 358 mutex_unlock(&tee_ta_mutex); 359 360 condvar_destroy(&ctx->busy_cv); 361 362 ctx->ops->destroy(ctx); 363 } else 364 mutex_unlock(&tee_ta_mutex); 365 366 return TEE_SUCCESS; 367 } 368 369 static TEE_Result tee_ta_init_session_with_context(struct tee_ta_ctx *ctx, 370 struct tee_ta_session *s) 371 { 372 /* 373 * If TA isn't single instance it should be loaded as new 374 * instance instead of doing anything with this instance. 375 * So tell the caller that we didn't find the TA it the 376 * caller will load a new instance. 377 */ 378 if ((ctx->flags & TA_FLAG_SINGLE_INSTANCE) == 0) 379 return TEE_ERROR_ITEM_NOT_FOUND; 380 381 /* 382 * The TA is single instance, if it isn't multi session we 383 * can't create another session unless it's the first 384 * new session towards a keepAlive TA. 385 */ 386 387 if (((ctx->flags & TA_FLAG_MULTI_SESSION) == 0) && 388 !(((ctx->flags & TA_FLAG_INSTANCE_KEEP_ALIVE) != 0) && 389 (ctx->ref_count == 0))) 390 return TEE_ERROR_BUSY; 391 392 DMSG(" ... Re-open TA %pUl", (void *)&ctx->uuid); 393 394 ctx->ref_count++; 395 s->ctx = ctx; 396 return TEE_SUCCESS; 397 } 398 399 400 401 static TEE_Result tee_ta_init_session(TEE_ErrorOrigin *err, 402 struct tee_ta_session_head *open_sessions, 403 const TEE_UUID *uuid, 404 struct tee_ta_session **sess) 405 { 406 TEE_Result res; 407 struct tee_ta_ctx *ctx; 408 struct tee_ta_session *s = calloc(1, sizeof(struct tee_ta_session)); 409 410 *err = TEE_ORIGIN_TEE; 411 if (!s) 412 return TEE_ERROR_OUT_OF_MEMORY; 413 414 s->cancel_mask = true; 415 condvar_init(&s->refc_cv); 416 condvar_init(&s->lock_cv); 417 s->lock_thread = THREAD_ID_INVALID; 418 s->ref_count = 1; 419 420 421 /* 422 * We take the global TA mutex here and hold it while doing 423 * RPC to load the TA. This big critical section should be broken 424 * down into smaller pieces. 425 */ 426 427 428 mutex_lock(&tee_ta_mutex); 429 TAILQ_INSERT_TAIL(open_sessions, s, link); 430 431 /* Look for already loaded TA */ 432 ctx = tee_ta_context_find(uuid); 433 if (ctx) { 434 res = tee_ta_init_session_with_context(ctx, s); 435 if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND) 436 goto out; 437 } 438 439 /* Look for static TA */ 440 res = tee_ta_init_static_ta_session(uuid, s); 441 if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND) 442 goto out; 443 444 /* Look for user TA */ 445 res = tee_ta_init_user_ta_session(uuid, s); 446 447 out: 448 if (res == TEE_SUCCESS) { 449 *sess = s; 450 } else { 451 TAILQ_REMOVE(open_sessions, s, link); 452 free(s); 453 } 454 mutex_unlock(&tee_ta_mutex); 455 return res; 456 } 457 458 TEE_Result tee_ta_open_session(TEE_ErrorOrigin *err, 459 struct tee_ta_session **sess, 460 struct tee_ta_session_head *open_sessions, 461 const TEE_UUID *uuid, 462 const TEE_Identity *clnt_id, 463 uint32_t cancel_req_to, 464 struct tee_ta_param *param) 465 { 466 TEE_Result res; 467 struct tee_ta_session *s = NULL; 468 struct tee_ta_ctx *ctx; 469 bool panicked; 470 bool was_busy = false; 471 472 res = tee_ta_init_session(err, open_sessions, uuid, &s); 473 if (res != TEE_SUCCESS) { 474 DMSG("init session failed 0x%x", res); 475 return res; 476 } 477 478 ctx = s->ctx; 479 480 if (ctx->panicked) { 481 DMSG("panicked, call tee_ta_close_session()"); 482 tee_ta_close_session(s, open_sessions, KERN_IDENTITY); 483 *err = TEE_ORIGIN_TEE; 484 return TEE_ERROR_TARGET_DEAD; 485 } 486 487 *sess = s; 488 /* Save identity of the owner of the session */ 489 s->clnt_id = *clnt_id; 490 491 res = tee_ta_verify_param(s, param); 492 if (res == TEE_SUCCESS) { 493 if (tee_ta_try_set_busy(ctx)) { 494 set_invoke_timeout(s, cancel_req_to); 495 res = ctx->ops->enter_open_session(s, param, err); 496 tee_ta_clear_busy(ctx); 497 } else { 498 /* Deadlock avoided */ 499 res = TEE_ERROR_BUSY; 500 was_busy = true; 501 } 502 } 503 504 panicked = ctx->panicked; 505 506 tee_ta_put_session(s); 507 if (panicked || (res != TEE_SUCCESS)) 508 tee_ta_close_session(s, open_sessions, KERN_IDENTITY); 509 510 /* 511 * Origin error equal to TEE_ORIGIN_TRUSTED_APP for "regular" error, 512 * apart from panicking. 513 */ 514 if (panicked || was_busy) 515 *err = TEE_ORIGIN_TEE; 516 else 517 *err = TEE_ORIGIN_TRUSTED_APP; 518 519 if (res != TEE_SUCCESS) 520 EMSG("Failed. Return error 0x%x", res); 521 522 return res; 523 } 524 525 TEE_Result tee_ta_invoke_command(TEE_ErrorOrigin *err, 526 struct tee_ta_session *sess, 527 const TEE_Identity *clnt_id, 528 uint32_t cancel_req_to, uint32_t cmd, 529 struct tee_ta_param *param) 530 { 531 TEE_Result res; 532 533 if (check_client(sess, clnt_id) != TEE_SUCCESS) 534 return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */ 535 536 if (sess->ctx->panicked) { 537 DMSG(" Panicked !"); 538 *err = TEE_ORIGIN_TEE; 539 return TEE_ERROR_TARGET_DEAD; 540 } 541 542 tee_ta_set_busy(sess->ctx); 543 544 res = tee_ta_verify_param(sess, param); 545 if (res != TEE_SUCCESS) { 546 *err = TEE_ORIGIN_TEE; 547 goto function_exit; 548 } 549 550 set_invoke_timeout(sess, cancel_req_to); 551 res = sess->ctx->ops->enter_invoke_cmd(sess, cmd, param, err); 552 553 if (sess->ctx->panicked) { 554 *err = TEE_ORIGIN_TEE; 555 res = TEE_ERROR_TARGET_DEAD; 556 } 557 558 function_exit: 559 tee_ta_clear_busy(sess->ctx); 560 if (res != TEE_SUCCESS) 561 DMSG(" => Error: %x of %d\n", res, *err); 562 return res; 563 } 564 565 TEE_Result tee_ta_cancel_command(TEE_ErrorOrigin *err, 566 struct tee_ta_session *sess, 567 const TEE_Identity *clnt_id) 568 { 569 *err = TEE_ORIGIN_TEE; 570 571 if (check_client(sess, clnt_id) != TEE_SUCCESS) 572 return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */ 573 574 sess->cancel = true; 575 return TEE_SUCCESS; 576 } 577 578 bool tee_ta_session_is_cancelled(struct tee_ta_session *s, TEE_Time *curr_time) 579 { 580 TEE_Time current_time; 581 582 if (s->cancel_mask) 583 return false; 584 585 if (s->cancel) 586 return true; 587 588 if (s->cancel_time.seconds == UINT32_MAX) 589 return false; 590 591 if (curr_time != NULL) 592 current_time = *curr_time; 593 else if (tee_time_get_sys_time(¤t_time) != TEE_SUCCESS) 594 return false; 595 596 if (current_time.seconds > s->cancel_time.seconds || 597 (current_time.seconds == s->cancel_time.seconds && 598 current_time.millis >= s->cancel_time.millis)) { 599 return true; 600 } 601 602 return false; 603 } 604 605 TEE_Result tee_ta_get_current_session(struct tee_ta_session **sess) 606 { 607 struct thread_specific_data *tsd = thread_get_tsd(); 608 609 if (!tsd->sess) 610 return TEE_ERROR_BAD_STATE; 611 *sess = tsd->sess; 612 return TEE_SUCCESS; 613 } 614 615 void tee_ta_set_current_session(struct tee_ta_session *sess) 616 { 617 struct thread_specific_data *tsd = thread_get_tsd(); 618 struct tee_ta_ctx *ctx = NULL; 619 620 if (sess) { 621 if (sess->calling_sess) 622 ctx = sess->calling_sess->ctx; 623 else 624 ctx = sess->ctx; 625 } 626 627 if (tsd->sess != sess) { 628 tsd->sess = sess; 629 tee_mmu_set_ctx(ctx); 630 } 631 /* 632 * If ctx->mmu == NULL we must not have user mapping active, 633 * if ctx->mmu != NULL we must have user mapping active. 634 */ 635 assert(((ctx && (ctx->flags & TA_FLAG_USER_MODE) ? 636 to_user_ta_ctx(ctx)->mmu : NULL) == NULL) == 637 !core_mmu_user_mapping_is_active()); 638 } 639 640 TEE_Result tee_ta_get_client_id(TEE_Identity *id) 641 { 642 TEE_Result res; 643 struct tee_ta_session *sess; 644 645 res = tee_ta_get_current_session(&sess); 646 if (res != TEE_SUCCESS) 647 return res; 648 649 if (id == NULL) 650 return TEE_ERROR_BAD_PARAMETERS; 651 652 *id = sess->clnt_id; 653 return TEE_SUCCESS; 654 } 655 656 /* 657 * dump_state - Display TA state as an error log. 658 */ 659 static void dump_state(struct tee_ta_ctx *ctx) 660 { 661 struct tee_ta_session *s = NULL; 662 bool active __maybe_unused; 663 664 active = ((tee_ta_get_current_session(&s) == TEE_SUCCESS) && 665 s && s->ctx == ctx); 666 667 EMSG_RAW("Status of TA %pUl (%p) %s", (void *)&ctx->uuid, (void *)ctx, 668 active ? "(active)" : ""); 669 ctx->ops->dump_state(ctx); 670 } 671 672 void tee_ta_dump_current(void) 673 { 674 struct tee_ta_session *s = NULL; 675 676 if (tee_ta_get_current_session(&s) != TEE_SUCCESS) { 677 EMSG("no valid session found, cannot log TA status"); 678 return; 679 } 680 681 dump_state(s->ctx); 682 } 683