1 /* 2 * Copyright (c) 2014, STMicroelectronics International N.V. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright notice, 12 * this list of conditions and the following disclaimer in the documentation 13 * and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 #include <util.h> 28 #include <kernel/tee_common_otp.h> 29 #include <kernel/tee_common.h> 30 #include <kernel/tee_compat.h> 31 #include <tee_api_types.h> 32 #include <kernel/tee_ta_manager.h> 33 #include <utee_types.h> 34 #include <tee/tee_svc.h> 35 #include <tee/tee_cryp_utl.h> 36 #include <tee/abi.h> 37 #include <mm/tee_mmu.h> 38 #include <mm/tee_mm.h> 39 #include <kernel/tee_rpc.h> 40 #include <kernel/tee_rpc_types.h> 41 #include <kernel/tee_time.h> 42 43 #include <user_ta_header.h> 44 #include <trace.h> 45 #include <kernel/trace_ta.h> 46 #include <kernel/chip_services.h> 47 48 #if (TRACE_LEVEL == TRACE_FLOW) && defined(CFG_TEE_CORE_TA_TRACE) 49 void tee_svc_trace_syscall(int num) 50 { 51 /* #0 is syscall return, not really interesting */ 52 if (num == 0) 53 return; 54 FMSG("syscall #%d", num); 55 } 56 #endif 57 58 void tee_svc_sys_log(const void *buf __unused, size_t len __unused) 59 { 60 #ifdef CFG_TEE_CORE_TA_TRACE 61 char *kbuf; 62 63 if (len == 0) 64 return; 65 66 kbuf = malloc(len); 67 if (kbuf == NULL) 68 return; 69 *kbuf = '\0'; 70 71 /* log as Info/Raw traces */ 72 if (tee_svc_copy_from_user(NULL, kbuf, buf, len) == TEE_SUCCESS) 73 TAMSG_RAW("%.*s", (int)len, kbuf); 74 75 free(kbuf); 76 #endif 77 } 78 79 TEE_Result tee_svc_reserved(void) 80 { 81 return TEE_ERROR_GENERIC; 82 } 83 84 TEE_Result tee_svc_not_supported(void) 85 { 86 return TEE_ERROR_NOT_SUPPORTED; 87 } 88 89 uint32_t tee_svc_sys_dummy(uint32_t *a __unused) 90 { 91 DMSG("tee_svc_sys_dummy: a 0x%" PRIxVA, (vaddr_t)a); 92 return 0; 93 } 94 95 uint32_t tee_svc_sys_dummy_7args(uint32_t a1 __unused, uint32_t a2 __unused, 96 uint32_t a3 __unused, uint32_t a4 __unused, 97 uint32_t a5 __unused, uint32_t a6 __unused, 98 uint32_t a7 __unused) 99 { 100 DMSG("tee_svc_sys_dummy_7args: 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, %x, %x\n", 101 a1, a2, a3, a4, a5, a6, a7); 102 return 0; 103 } 104 105 uint32_t tee_svc_sys_nocall(void) 106 { 107 DMSG("No syscall"); 108 return 0x1; 109 } 110 111 TEE_Result tee_svc_sys_get_property(uint32_t prop, tee_uaddr_t buf, size_t blen) 112 { 113 static const char api_vers[] = "1.0"; 114 static const char descr[] = "Version N.N"; 115 /* 116 * Value 100 means: 117 * System time based on REE-controlled timers. Can be tampered by the 118 * REE. The implementation must still guarantee that the system time 119 * is monotonous, i.e., successive calls to TEE_GetSystemTime must 120 * return increasing values of the system time. 121 */ 122 static const uint32_t sys_time_prot_lvl = 100; 123 static const uint32_t ta_time_prot_lvl = 100; 124 struct tee_ta_session *sess; 125 TEE_Result res; 126 127 res = tee_ta_get_current_session(&sess); 128 if (res != TEE_SUCCESS) 129 return res; 130 131 switch (prop) { 132 case UTEE_PROP_TEE_API_VERSION: 133 if (blen < sizeof(api_vers)) 134 return TEE_ERROR_SHORT_BUFFER; 135 return tee_svc_copy_to_user(sess, (void *)buf, api_vers, 136 sizeof(api_vers)); 137 138 case UTEE_PROP_TEE_DESCR: 139 if (blen < sizeof(descr)) 140 return TEE_ERROR_SHORT_BUFFER; 141 return tee_svc_copy_to_user(sess, (void *)buf, descr, 142 sizeof(descr)); 143 144 case UTEE_PROP_TEE_DEV_ID: 145 { 146 TEE_UUID uuid; 147 const size_t nslen = 4; 148 uint8_t data[4 + 149 FVR_DIE_ID_NUM_REGS * sizeof(uint32_t)] = { 150 'S', 'T', 'E', 'E' }; 151 152 if (blen < sizeof(uuid)) 153 return TEE_ERROR_SHORT_BUFFER; 154 155 if (tee_otp_get_die_id 156 (data + nslen, sizeof(data) - nslen)) 157 return TEE_ERROR_BAD_STATE; 158 159 res = tee_hash_createdigest(TEE_ALG_SHA256, data, 160 sizeof(data), 161 (uint8_t *)&uuid, 162 sizeof(uuid)); 163 if (res != TEE_SUCCESS) 164 return TEE_ERROR_BAD_STATE; 165 166 /* 167 * Changes the random value into and UUID as specifiec 168 * in RFC 4122. The magic values are from the example 169 * code in the RFC. 170 * 171 * TEE_UUID is defined slightly different from the RFC, 172 * but close enough for our purpose. 173 */ 174 175 uuid.timeHiAndVersion &= 0x0fff; 176 uuid.timeHiAndVersion |= 5 << 12; 177 178 /* uuid.clock_seq_hi_and_reserved in the RFC */ 179 uuid.clockSeqAndNode[0] &= 0x3f; 180 uuid.clockSeqAndNode[0] |= 0x80; 181 182 return tee_svc_copy_to_user(sess, (void *)buf, &uuid, 183 sizeof(TEE_UUID)); 184 } 185 186 case UTEE_PROP_TEE_SYS_TIME_PROT_LEVEL: 187 if (blen < sizeof(sys_time_prot_lvl)) 188 return TEE_ERROR_SHORT_BUFFER; 189 return tee_svc_copy_to_user(sess, (void *)buf, 190 &sys_time_prot_lvl, 191 sizeof(sys_time_prot_lvl)); 192 193 case UTEE_PROP_TEE_TA_TIME_PROT_LEVEL: 194 if (blen < sizeof(ta_time_prot_lvl)) 195 return TEE_ERROR_SHORT_BUFFER; 196 return tee_svc_copy_to_user(sess, (void *)buf, 197 &ta_time_prot_lvl, 198 sizeof(ta_time_prot_lvl)); 199 200 case UTEE_PROP_CLIENT_ID: 201 if (blen < sizeof(TEE_Identity)) 202 return TEE_ERROR_SHORT_BUFFER; 203 204 return tee_svc_copy_to_user(sess, (void *)buf, 205 &sess->clnt_id, sizeof(TEE_Identity)); 206 207 case UTEE_PROP_TA_APP_ID: 208 if (blen < sizeof(TEE_UUID)) 209 return TEE_ERROR_SHORT_BUFFER; 210 211 return tee_svc_copy_to_user(sess, (void *)buf, 212 &sess->ctx->head->uuid, sizeof(TEE_UUID)); 213 214 default: 215 break; 216 } 217 return TEE_ERROR_NOT_IMPLEMENTED; 218 } 219 220 /* 221 * TA invokes some TA with parameter. 222 * If some parameters are memory references: 223 * - either the memref is inside TA private RAM: TA is not allowed to expose 224 * its private RAM: use a temporary memory buffer and copy the data. 225 * - or the memref is not in the TA private RAM: 226 * - if the memref was mapped to the TA, TA is allowed to expose it. 227 * - if so, converts memref virtual address into a physical address. 228 */ 229 static TEE_Result tee_svc_copy_param(struct tee_ta_session *sess, 230 struct tee_ta_session *called_sess, 231 uint32_t param_types, 232 struct abi_user32_param *callee_params, 233 struct tee_ta_param *param, 234 tee_paddr_t tmp_buf_pa[TEE_NUM_PARAMS], 235 tee_mm_entry_t **mm) 236 { 237 size_t n; 238 TEE_Result res; 239 size_t req_mem = 0; 240 size_t s; 241 uint8_t *dst = 0; 242 tee_paddr_t dst_pa, src_pa = 0; 243 bool ta_private_memref[TEE_NUM_PARAMS]; 244 245 /* fill 'param' input struct with caller params description buffer */ 246 param->types = param_types; 247 if (!callee_params) { 248 if (param->types != 0) 249 return TEE_ERROR_BAD_PARAMETERS; 250 memset(param->params, 0, sizeof(param->params)); 251 } else { 252 res = tee_mmu_check_access_rights(sess->ctx, 253 TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, 254 (tee_uaddr_t)callee_params, 255 sizeof(struct abi_user32_param)); 256 if (res != TEE_SUCCESS) 257 return res; 258 abi_user32_param_to_param(param->params, callee_params, 259 param_types); 260 } 261 262 if ((called_sess != NULL) && 263 (called_sess->ctx->static_ta == NULL) && 264 (called_sess->ctx->flags & TA_FLAG_USER_MODE) == 0) { 265 /* 266 * kernel TA, borrow the mapping of the calling 267 * during this call. 268 */ 269 called_sess->calling_sess = sess; 270 return TEE_SUCCESS; 271 } 272 273 for (n = 0; n < TEE_NUM_PARAMS; n++) { 274 275 ta_private_memref[n] = false; 276 277 switch (TEE_PARAM_TYPE_GET(param->types, n)) { 278 case TEE_PARAM_TYPE_MEMREF_INPUT: 279 case TEE_PARAM_TYPE_MEMREF_OUTPUT: 280 case TEE_PARAM_TYPE_MEMREF_INOUT: 281 if (param->params[n].memref.buffer == NULL) { 282 if (param->params[n].memref.size != 0) 283 return TEE_ERROR_BAD_PARAMETERS; 284 break; 285 } 286 /* uTA cannot expose its private memory */ 287 if (tee_mmu_is_vbuf_inside_ta_private(sess->ctx, 288 param->params[n].memref.buffer, 289 param->params[n].memref.size)) { 290 291 s = ROUNDUP(param->params[n].memref.size, 292 sizeof(uint32_t)); 293 /* Check overflow */ 294 if (req_mem + s < req_mem) 295 return TEE_ERROR_BAD_PARAMETERS; 296 req_mem += s; 297 ta_private_memref[n] = true; 298 break; 299 } 300 if (tee_mmu_is_vbuf_intersect_ta_private(sess->ctx, 301 param->params[n].memref.buffer, 302 param->params[n].memref.size)) 303 return TEE_ERROR_BAD_PARAMETERS; 304 305 if (tee_mmu_user_va2pa(sess->ctx, 306 (void *)param->params[n].memref.buffer, 307 &src_pa) != TEE_SUCCESS) 308 return TEE_ERROR_BAD_PARAMETERS; 309 310 param->param_attr[n] = tee_mmu_user_get_cache_attr( 311 sess->ctx, 312 (void *)param->params[n].memref.buffer); 313 314 param->params[n].memref.buffer = (void *)src_pa; 315 break; 316 317 default: 318 break; 319 } 320 } 321 322 if (req_mem == 0) 323 return TEE_SUCCESS; 324 325 /* Allocate section in secure DDR */ 326 *mm = tee_mm_alloc(&tee_mm_sec_ddr, req_mem); 327 if (*mm == NULL) { 328 DMSG("tee_mm_alloc TEE_ERROR_GENERIC"); 329 return TEE_ERROR_GENERIC; 330 } 331 332 /* Get the virtual address for the section in secure DDR */ 333 res = tee_mmu_kmap(tee_mm_get_smem(*mm), req_mem, &dst); 334 if (res != TEE_SUCCESS) 335 return res; 336 dst_pa = tee_mm_get_smem(*mm); 337 338 for (n = 0; n < 4; n++) { 339 340 if (ta_private_memref[n] == false) 341 continue; 342 343 s = ROUNDUP(param->params[n].memref.size, sizeof(uint32_t)); 344 345 switch (TEE_PARAM_TYPE_GET(param->types, n)) { 346 case TEE_PARAM_TYPE_MEMREF_INPUT: 347 case TEE_PARAM_TYPE_MEMREF_INOUT: 348 if (param->params[n].memref.buffer != NULL) { 349 res = tee_svc_copy_from_user(sess, dst, 350 param->params[n].memref.buffer, 351 param->params[n].memref.size); 352 if (res != TEE_SUCCESS) 353 return res; 354 param->param_attr[n] = 355 tee_mmu_kmap_get_cache_attr(dst); 356 param->params[n].memref.buffer = (void *)dst_pa; 357 tmp_buf_pa[n] = dst_pa; 358 dst += s; 359 dst_pa += s; 360 } 361 break; 362 363 case TEE_PARAM_TYPE_MEMREF_OUTPUT: 364 if (param->params[n].memref.buffer != NULL) { 365 param->param_attr[n] = 366 tee_mmu_kmap_get_cache_attr(dst); 367 param->params[n].memref.buffer = (void *)dst_pa; 368 tmp_buf_pa[n] = dst_pa; 369 dst += s; 370 dst_pa += s; 371 } 372 break; 373 374 default: 375 continue; 376 } 377 } 378 379 tee_mmu_kunmap(dst, req_mem); 380 381 return TEE_SUCCESS; 382 } 383 384 /* 385 * Back from execution of service: update parameters passed from TA: 386 * If some parameters were memory references: 387 * - either the memref was temporary: copy back data and update size 388 * - or it was the original TA memref: update only the size value. 389 */ 390 static TEE_Result tee_svc_update_out_param( 391 struct tee_ta_session *sess, 392 struct tee_ta_session *called_sess, 393 struct tee_ta_param *param, 394 tee_paddr_t tmp_buf_pa[TEE_NUM_PARAMS], 395 struct abi_user32_param *usr_param) 396 { 397 size_t n; 398 TEE_Param callee_params[TEE_NUM_PARAMS]; 399 bool have_private_mem_map = (called_sess == NULL) || 400 (called_sess->ctx->static_ta != NULL) || 401 ((called_sess->ctx->flags & TA_FLAG_USER_MODE) != 0); 402 403 tee_ta_set_current_session(sess); 404 abi_user32_param_to_param(callee_params, usr_param, param->types); 405 406 for (n = 0; n < TEE_NUM_PARAMS; n++) { 407 switch (TEE_PARAM_TYPE_GET(param->types, n)) { 408 case TEE_PARAM_TYPE_MEMREF_OUTPUT: 409 case TEE_PARAM_TYPE_MEMREF_INOUT: 410 411 /* outside TA private => memref is valid, update size */ 412 if (!tee_mmu_is_vbuf_inside_ta_private(sess->ctx, 413 callee_params[n].memref.buffer, 414 param->params[n].memref.size)) { 415 callee_params[n].memref.size = 416 param->params[n].memref.size; 417 break; 418 } 419 420 /* 421 * If we called a kernel TA the parameters are in shared 422 * memory and no copy is needed. 423 */ 424 if (have_private_mem_map && 425 param->params[n].memref.size <= 426 callee_params[n].memref.size) { 427 uint8_t *src = 0; 428 TEE_Result res; 429 430 /* FIXME: TA_RAM is already mapped ! */ 431 res = tee_mmu_kmap(tmp_buf_pa[n], 432 param->params[n].memref.size, &src); 433 if (res != TEE_SUCCESS) 434 return TEE_ERROR_GENERIC; 435 436 res = tee_svc_copy_to_user(sess, 437 callee_params[n].memref. 438 buffer, src, 439 param->params[n]. 440 memref.size); 441 if (res != TEE_SUCCESS) 442 return res; 443 tee_mmu_kunmap(src, 444 param->params[n].memref.size); 445 446 } 447 callee_params[n].memref.size = param->params[n].memref.size; 448 break; 449 450 case TEE_PARAM_TYPE_VALUE_OUTPUT: 451 case TEE_PARAM_TYPE_VALUE_INOUT: 452 callee_params[n].value = param->params[n].value; 453 break; 454 455 default: 456 continue; 457 } 458 } 459 460 abi_param_to_user32_param(usr_param, callee_params, param->types); 461 462 return TEE_SUCCESS; 463 } 464 465 /* Called when a TA calls an OpenSession on another TA */ 466 TEE_Result tee_svc_open_ta_session(const TEE_UUID *dest, 467 uint32_t cancel_req_to, uint32_t param_types, 468 struct abi_user32_param *usr_param, 469 TEE_TASessionHandle *ta_sess, 470 uint32_t *ret_orig) 471 { 472 TEE_Result res; 473 uint32_t ret_o = TEE_ORIGIN_TEE; 474 struct tee_ta_session *s = NULL; 475 struct tee_ta_session *sess; 476 tee_mm_entry_t *mm_param = NULL; 477 478 TEE_UUID *uuid = malloc(sizeof(TEE_UUID)); 479 struct tee_ta_param *param = malloc(sizeof(struct tee_ta_param)); 480 TEE_Identity *clnt_id = malloc(sizeof(TEE_Identity)); 481 tee_paddr_t tmp_buf_pa[TEE_NUM_PARAMS]; 482 483 if (uuid == NULL || param == NULL || clnt_id == NULL) { 484 res = TEE_ERROR_OUT_OF_MEMORY; 485 goto out_free_only; 486 } 487 488 memset(param, 0, sizeof(struct tee_ta_param)); 489 490 res = tee_ta_get_current_session(&sess); 491 if (res != TEE_SUCCESS) 492 goto out_free_only; 493 494 res = tee_svc_copy_from_user(sess, uuid, dest, sizeof(TEE_UUID)); 495 if (res != TEE_SUCCESS) 496 goto function_exit; 497 498 clnt_id->login = TEE_LOGIN_TRUSTED_APP; 499 memcpy(&clnt_id->uuid, &sess->ctx->head->uuid, sizeof(TEE_UUID)); 500 501 res = tee_svc_copy_param(sess, NULL, param_types, usr_param, param, 502 tmp_buf_pa, &mm_param); 503 if (res != TEE_SUCCESS) 504 goto function_exit; 505 506 /* 507 * Find session of a multi session TA or a static TA 508 * In such a case, there is no need to ask the supplicant for the TA 509 * code 510 */ 511 res = tee_ta_open_session(&ret_o, &s, &sess->ctx->open_sessions, uuid, 512 clnt_id, cancel_req_to, param); 513 if (res != TEE_SUCCESS) 514 goto function_exit; 515 516 res = tee_svc_update_out_param(sess, NULL, param, tmp_buf_pa, 517 usr_param); 518 519 function_exit: 520 tee_ta_set_current_session(sess); 521 522 if (mm_param != NULL) { 523 TEE_Result res2; 524 void *va = 0; 525 526 res2 = 527 tee_mmu_kmap_pa2va((void *)tee_mm_get_smem(mm_param), &va); 528 if (res2 == TEE_SUCCESS) 529 tee_mmu_kunmap(va, tee_mm_get_bytes(mm_param)); 530 } 531 tee_mm_free(mm_param); 532 tee_svc_copy_to_user(sess, ta_sess, &s, sizeof(s)); 533 tee_svc_copy_to_user(sess, ret_orig, &ret_o, sizeof(ret_o)); 534 535 out_free_only: 536 free(param); 537 free(uuid); 538 free(clnt_id); 539 return res; 540 } 541 542 TEE_Result tee_svc_close_ta_session(TEE_TASessionHandle ta_sess) 543 { 544 TEE_Result res; 545 struct tee_ta_session *sess; 546 TEE_Identity clnt_id; 547 548 res = tee_ta_get_current_session(&sess); 549 if (res != TEE_SUCCESS) 550 return res; 551 552 clnt_id.login = TEE_LOGIN_TRUSTED_APP; 553 memcpy(&clnt_id.uuid, &sess->ctx->head->uuid, sizeof(TEE_UUID)); 554 555 tee_ta_set_current_session(NULL); 556 res = tee_ta_close_session((vaddr_t)ta_sess, &sess->ctx->open_sessions, 557 &clnt_id); 558 tee_ta_set_current_session(sess); 559 return res; 560 } 561 562 TEE_Result tee_svc_invoke_ta_command(TEE_TASessionHandle ta_sess, 563 uint32_t cancel_req_to, uint32_t cmd_id, 564 uint32_t param_types, 565 struct abi_user32_param *usr_param, 566 uint32_t *ret_orig) 567 { 568 TEE_Result res; 569 uint32_t ret_o = TEE_ORIGIN_TEE; 570 struct tee_ta_param param = { 0 }; 571 TEE_Identity clnt_id; 572 struct tee_ta_session *sess; 573 struct tee_ta_session *called_sess = (struct tee_ta_session *)ta_sess; 574 tee_mm_entry_t *mm_param = NULL; 575 tee_paddr_t tmp_buf_pa[TEE_NUM_PARAMS]; 576 577 res = tee_ta_get_current_session(&sess); 578 if (res != TEE_SUCCESS) 579 return res; 580 581 res = 582 tee_ta_verify_session_pointer(called_sess, 583 &sess->ctx->open_sessions); 584 if (res != TEE_SUCCESS) 585 return res; 586 587 clnt_id.login = TEE_LOGIN_TRUSTED_APP; 588 memcpy(&clnt_id.uuid, &sess->ctx->head->uuid, sizeof(TEE_UUID)); 589 590 res = tee_svc_copy_param(sess, called_sess, param_types, usr_param, 591 ¶m, tmp_buf_pa, &mm_param); 592 if (res != TEE_SUCCESS) 593 goto function_exit; 594 595 res = tee_ta_invoke_command(&ret_o, called_sess, &clnt_id, 596 cancel_req_to, cmd_id, ¶m); 597 598 if (res != TEE_SUCCESS) 599 goto function_exit; 600 601 res = tee_svc_update_out_param(sess, called_sess, ¶m, tmp_buf_pa, 602 usr_param); 603 if (res != TEE_SUCCESS) 604 goto function_exit; 605 606 function_exit: 607 tee_ta_set_current_session(sess); 608 called_sess->calling_sess = NULL; /* clear eventual borrowed mapping */ 609 610 if (mm_param != NULL) { 611 TEE_Result res2; 612 void *va = 0; 613 614 res2 = 615 tee_mmu_kmap_pa2va((void *)tee_mm_get_smem(mm_param), &va); 616 if (res2 == TEE_SUCCESS) 617 tee_mmu_kunmap(va, tee_mm_get_bytes(mm_param)); 618 } 619 tee_mm_free(mm_param); 620 if (ret_orig) 621 tee_svc_copy_to_user(sess, ret_orig, &ret_o, sizeof(ret_o)); 622 return res; 623 } 624 625 TEE_Result tee_svc_check_access_rights(uint32_t flags, const void *buf, 626 size_t len) 627 { 628 TEE_Result res; 629 struct tee_ta_session *s; 630 631 res = tee_ta_get_current_session(&s); 632 if (res != TEE_SUCCESS) 633 return res; 634 635 return tee_mmu_check_access_rights(s->ctx, flags, (tee_uaddr_t)buf, 636 len); 637 } 638 639 TEE_Result tee_svc_copy_from_user(struct tee_ta_session *sess, void *kaddr, 640 const void *uaddr, size_t len) 641 { 642 TEE_Result res; 643 struct tee_ta_session *s; 644 645 if (sess == NULL) { 646 res = tee_ta_get_current_session(&s); 647 if (res != TEE_SUCCESS) 648 return res; 649 } else { 650 s = sess; 651 tee_ta_set_current_session(s); 652 } 653 res = 654 tee_mmu_check_access_rights(s->ctx, 655 TEE_MEMORY_ACCESS_READ | 656 TEE_MEMORY_ACCESS_ANY_OWNER, 657 (tee_uaddr_t)uaddr, len); 658 if (res != TEE_SUCCESS) 659 return res; 660 661 memcpy(kaddr, uaddr, len); 662 return TEE_SUCCESS; 663 } 664 665 TEE_Result tee_svc_copy_to_user(struct tee_ta_session *sess, void *uaddr, 666 const void *kaddr, size_t len) 667 { 668 TEE_Result res; 669 struct tee_ta_session *s; 670 671 if (sess == NULL) { 672 res = tee_ta_get_current_session(&s); 673 if (res != TEE_SUCCESS) 674 return res; 675 } else { 676 s = sess; 677 tee_ta_set_current_session(s); 678 } 679 680 res = 681 tee_mmu_check_access_rights(s->ctx, 682 TEE_MEMORY_ACCESS_WRITE | 683 TEE_MEMORY_ACCESS_ANY_OWNER, 684 (tee_uaddr_t)uaddr, len); 685 if (res != TEE_SUCCESS) 686 return res; 687 688 memcpy(uaddr, kaddr, len); 689 return TEE_SUCCESS; 690 } 691 692 static bool session_is_cancelled(struct tee_ta_session *s, TEE_Time *curr_time) 693 { 694 TEE_Time current_time; 695 696 if (s->cancel_mask) 697 return false; 698 699 if (s->cancel) 700 return true; 701 702 if (s->cancel_time.seconds == UINT32_MAX) 703 return false; 704 705 if (curr_time != NULL) 706 current_time = *curr_time; 707 else if (tee_time_get_sys_time(¤t_time) != TEE_SUCCESS) 708 return false; 709 710 if (current_time.seconds > s->cancel_time.seconds || 711 (current_time.seconds == s->cancel_time.seconds && 712 current_time.millis >= s->cancel_time.millis)) { 713 return true; 714 } 715 716 return false; 717 } 718 719 TEE_Result tee_svc_get_cancellation_flag(bool *cancel) 720 { 721 TEE_Result res; 722 struct tee_ta_session *s = NULL; 723 bool c; 724 725 res = tee_ta_get_current_session(&s); 726 if (res != TEE_SUCCESS) 727 return res; 728 729 c = session_is_cancelled(s, NULL); 730 731 return tee_svc_copy_to_user(s, cancel, &c, sizeof(c)); 732 } 733 734 TEE_Result tee_svc_unmask_cancellation(bool *old_mask) 735 { 736 TEE_Result res; 737 struct tee_ta_session *s = NULL; 738 bool m; 739 740 res = tee_ta_get_current_session(&s); 741 if (res != TEE_SUCCESS) 742 return res; 743 744 m = s->cancel_mask; 745 s->cancel_mask = false; 746 return tee_svc_copy_to_user(s, old_mask, &m, sizeof(m)); 747 } 748 749 TEE_Result tee_svc_mask_cancellation(bool *old_mask) 750 { 751 TEE_Result res; 752 struct tee_ta_session *s = NULL; 753 bool m; 754 755 res = tee_ta_get_current_session(&s); 756 if (res != TEE_SUCCESS) 757 return res; 758 759 m = s->cancel_mask; 760 s->cancel_mask = true; 761 return tee_svc_copy_to_user(s, old_mask, &m, sizeof(m)); 762 } 763 764 TEE_Result tee_svc_wait(uint32_t timeout) 765 { 766 TEE_Result res = TEE_SUCCESS; 767 uint32_t mytime = 0; 768 struct tee_ta_session *s; 769 TEE_Time base_time; 770 TEE_Time current_time; 771 772 res = tee_ta_get_current_session(&s); 773 if (res != TEE_SUCCESS) 774 return res; 775 776 res = tee_time_get_sys_time(&base_time); 777 if (res != TEE_SUCCESS) 778 return res; 779 780 while (true) { 781 res = tee_time_get_sys_time(¤t_time); 782 if (res != TEE_SUCCESS) 783 return res; 784 785 if (session_is_cancelled(s, ¤t_time)) 786 return TEE_ERROR_CANCEL; 787 788 mytime = (current_time.seconds - base_time.seconds) * 1000 + 789 (int)current_time.millis - (int)base_time.millis; 790 if (mytime >= timeout) 791 return TEE_SUCCESS; 792 793 tee_time_wait(timeout - mytime); 794 } 795 796 return res; 797 } 798 799 TEE_Result tee_svc_get_time(enum utee_time_category cat, TEE_Time *mytime) 800 { 801 TEE_Result res, res2; 802 struct tee_ta_session *s = NULL; 803 TEE_Time t; 804 805 res = tee_ta_get_current_session(&s); 806 if (res != TEE_SUCCESS) 807 return res; 808 809 switch (cat) { 810 case UTEE_TIME_CAT_SYSTEM: 811 res = tee_time_get_sys_time(&t); 812 break; 813 case UTEE_TIME_CAT_TA_PERSISTENT: 814 res = 815 tee_time_get_ta_time((const void *)&s->ctx->head->uuid, &t); 816 break; 817 case UTEE_TIME_CAT_REE: 818 res = tee_time_get_ree_time(&t); 819 break; 820 default: 821 res = TEE_ERROR_BAD_PARAMETERS; 822 break; 823 } 824 825 if (res == TEE_SUCCESS || res == TEE_ERROR_OVERFLOW) { 826 res2 = tee_svc_copy_to_user(s, mytime, &t, sizeof(t)); 827 if (res2 != TEE_SUCCESS) 828 res = res2; 829 } 830 831 return res; 832 } 833 834 TEE_Result tee_svc_set_ta_time(const TEE_Time *mytime) 835 { 836 TEE_Result res; 837 struct tee_ta_session *s = NULL; 838 TEE_Time t; 839 840 res = tee_ta_get_current_session(&s); 841 if (res != TEE_SUCCESS) 842 return res; 843 844 res = tee_svc_copy_from_user(s, &t, mytime, sizeof(t)); 845 if (res != TEE_SUCCESS) 846 return res; 847 848 return tee_time_set_ta_time((const void *)&s->ctx->head->uuid, &t); 849 } 850 851 #ifdef CFG_CACHE_API 852 TEE_Result tee_svc_cache_operation(void *va, size_t len, 853 enum utee_cache_operation op) 854 { 855 TEE_Result res; 856 struct tee_ta_session *s = NULL; 857 858 res = tee_ta_get_current_session(&s); 859 if (res != TEE_SUCCESS) 860 return res; 861 862 if ((s->ctx->flags & TA_FLAG_CACHE_MAINTENANCE) == 0) 863 return TEE_ERROR_NOT_SUPPORTED; 864 865 return tee_uta_cache_operation(s, op, va, len); 866 } 867 #endif 868