1 /* 2 * Copyright (c) 2016, Linaro Limited 3 * Copyright (c) 2014, STMicroelectronics International N.V. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright notice, 10 * this list of conditions and the following disclaimer. 11 * 12 * 2. Redistributions in binary form must reproduce the above copyright notice, 13 * this list of conditions and the following disclaimer in the documentation 14 * and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <platform_config.h> 30 31 #include <arm.h> 32 #include <assert.h> 33 #include <keep.h> 34 #include <kernel/misc.h> 35 #include <kernel/msg_param.h> 36 #include <kernel/panic.h> 37 #include <kernel/spinlock.h> 38 #include <kernel/tee_ta_manager.h> 39 #include <kernel/thread_defs.h> 40 #include <kernel/thread.h> 41 #include <mm/core_memprot.h> 42 #include <mm/mobj.h> 43 #include <mm/tee_mm.h> 44 #include <mm/tee_mmu.h> 45 #include <mm/tee_pager.h> 46 #include <optee_msg.h> 47 #include <sm/optee_smc.h> 48 #include <sm/sm.h> 49 #include <tee/tee_fs_rpc.h> 50 #include <tee/tee_cryp_utl.h> 51 #include <trace.h> 52 #include <util.h> 53 54 #include "thread_private.h" 55 56 #ifdef CFG_WITH_ARM_TRUSTED_FW 57 #define STACK_TMP_OFFS 0 58 #else 59 #define STACK_TMP_OFFS SM_STACK_TMP_RESERVE_SIZE 60 #endif 61 62 63 #ifdef ARM32 64 #ifdef CFG_CORE_SANITIZE_KADDRESS 65 #define STACK_TMP_SIZE (3072 + STACK_TMP_OFFS) 66 #else 67 #define STACK_TMP_SIZE (1536 + STACK_TMP_OFFS) 68 #endif 69 #define STACK_THREAD_SIZE 8192 70 71 #ifdef CFG_CORE_SANITIZE_KADDRESS 72 #define STACK_ABT_SIZE 3072 73 #else 74 #define STACK_ABT_SIZE 2048 75 #endif 76 77 #endif /*ARM32*/ 78 79 #ifdef ARM64 80 #define STACK_TMP_SIZE (2048 + STACK_TMP_OFFS) 81 #define STACK_THREAD_SIZE 8192 82 83 #if TRACE_LEVEL > 0 84 #define STACK_ABT_SIZE 3072 85 #else 86 #define STACK_ABT_SIZE 1024 87 #endif 88 #endif /*ARM64*/ 89 90 struct thread_ctx threads[CFG_NUM_THREADS]; 91 92 static struct thread_core_local thread_core_local[CFG_TEE_CORE_NB_CORE]; 93 94 #ifdef CFG_WITH_STACK_CANARIES 95 #ifdef ARM32 96 #define STACK_CANARY_SIZE (4 * sizeof(uint32_t)) 97 #endif 98 #ifdef ARM64 99 #define STACK_CANARY_SIZE (8 * sizeof(uint32_t)) 100 #endif 101 #define START_CANARY_VALUE 0xdededede 102 #define END_CANARY_VALUE 0xabababab 103 #define GET_START_CANARY(name, stack_num) name[stack_num][0] 104 #define GET_END_CANARY(name, stack_num) \ 105 name[stack_num][sizeof(name[stack_num]) / sizeof(uint32_t) - 1] 106 #else 107 #define STACK_CANARY_SIZE 0 108 #endif 109 110 #define DECLARE_STACK(name, num_stacks, stack_size, linkage) \ 111 linkage uint32_t name[num_stacks] \ 112 [ROUNDUP(stack_size + STACK_CANARY_SIZE, STACK_ALIGNMENT) / \ 113 sizeof(uint32_t)] \ 114 __attribute__((section(".nozi_stack"), \ 115 aligned(STACK_ALIGNMENT))) 116 117 #define STACK_SIZE(stack) (sizeof(stack) - STACK_CANARY_SIZE / 2) 118 119 #define GET_STACK(stack) \ 120 ((vaddr_t)(stack) + STACK_SIZE(stack)) 121 122 DECLARE_STACK(stack_tmp, CFG_TEE_CORE_NB_CORE, STACK_TMP_SIZE, static); 123 DECLARE_STACK(stack_abt, CFG_TEE_CORE_NB_CORE, STACK_ABT_SIZE, static); 124 #ifndef CFG_WITH_PAGER 125 DECLARE_STACK(stack_thread, CFG_NUM_THREADS, STACK_THREAD_SIZE, static); 126 #endif 127 128 const void *stack_tmp_export = (uint8_t *)stack_tmp + sizeof(stack_tmp[0]) - 129 (STACK_TMP_OFFS + STACK_CANARY_SIZE / 2); 130 const uint32_t stack_tmp_stride = sizeof(stack_tmp[0]); 131 132 /* 133 * These stack setup info are required by secondary boot cores before they 134 * each locally enable the pager (the mmu). Hence kept in pager sections. 135 */ 136 KEEP_PAGER(stack_tmp_export); 137 KEEP_PAGER(stack_tmp_stride); 138 139 thread_smc_handler_t thread_std_smc_handler_ptr; 140 static thread_smc_handler_t thread_fast_smc_handler_ptr; 141 thread_nintr_handler_t thread_nintr_handler_ptr; 142 thread_pm_handler_t thread_cpu_on_handler_ptr; 143 thread_pm_handler_t thread_cpu_off_handler_ptr; 144 thread_pm_handler_t thread_cpu_suspend_handler_ptr; 145 thread_pm_handler_t thread_cpu_resume_handler_ptr; 146 thread_pm_handler_t thread_system_off_handler_ptr; 147 thread_pm_handler_t thread_system_reset_handler_ptr; 148 149 150 static unsigned int thread_global_lock = SPINLOCK_UNLOCK; 151 static bool thread_prealloc_rpc_cache; 152 153 static void init_canaries(void) 154 { 155 #ifdef CFG_WITH_STACK_CANARIES 156 size_t n; 157 #define INIT_CANARY(name) \ 158 for (n = 0; n < ARRAY_SIZE(name); n++) { \ 159 uint32_t *start_canary = &GET_START_CANARY(name, n); \ 160 uint32_t *end_canary = &GET_END_CANARY(name, n); \ 161 \ 162 *start_canary = START_CANARY_VALUE; \ 163 *end_canary = END_CANARY_VALUE; \ 164 DMSG("#Stack canaries for %s[%zu] with top at %p\n", \ 165 #name, n, (void *)(end_canary - 1)); \ 166 DMSG("watch *%p\n", (void *)end_canary); \ 167 } 168 169 INIT_CANARY(stack_tmp); 170 INIT_CANARY(stack_abt); 171 #ifndef CFG_WITH_PAGER 172 INIT_CANARY(stack_thread); 173 #endif 174 #endif/*CFG_WITH_STACK_CANARIES*/ 175 } 176 177 #define CANARY_DIED(stack, loc, n) \ 178 do { \ 179 EMSG_RAW("Dead canary at %s of '%s[%zu]'", #loc, #stack, n); \ 180 panic(); \ 181 } while (0) 182 183 void thread_check_canaries(void) 184 { 185 #ifdef CFG_WITH_STACK_CANARIES 186 size_t n; 187 188 for (n = 0; n < ARRAY_SIZE(stack_tmp); n++) { 189 if (GET_START_CANARY(stack_tmp, n) != START_CANARY_VALUE) 190 CANARY_DIED(stack_tmp, start, n); 191 if (GET_END_CANARY(stack_tmp, n) != END_CANARY_VALUE) 192 CANARY_DIED(stack_tmp, end, n); 193 } 194 195 for (n = 0; n < ARRAY_SIZE(stack_abt); n++) { 196 if (GET_START_CANARY(stack_abt, n) != START_CANARY_VALUE) 197 CANARY_DIED(stack_abt, start, n); 198 if (GET_END_CANARY(stack_abt, n) != END_CANARY_VALUE) 199 CANARY_DIED(stack_abt, end, n); 200 201 } 202 #ifndef CFG_WITH_PAGER 203 for (n = 0; n < ARRAY_SIZE(stack_thread); n++) { 204 if (GET_START_CANARY(stack_thread, n) != START_CANARY_VALUE) 205 CANARY_DIED(stack_thread, start, n); 206 if (GET_END_CANARY(stack_thread, n) != END_CANARY_VALUE) 207 CANARY_DIED(stack_thread, end, n); 208 } 209 #endif 210 #endif/*CFG_WITH_STACK_CANARIES*/ 211 } 212 213 static void lock_global(void) 214 { 215 cpu_spin_lock(&thread_global_lock); 216 } 217 218 static void unlock_global(void) 219 { 220 cpu_spin_unlock(&thread_global_lock); 221 } 222 223 #ifdef ARM32 224 uint32_t thread_get_exceptions(void) 225 { 226 uint32_t cpsr = read_cpsr(); 227 228 return (cpsr >> CPSR_F_SHIFT) & THREAD_EXCP_ALL; 229 } 230 231 void thread_set_exceptions(uint32_t exceptions) 232 { 233 uint32_t cpsr = read_cpsr(); 234 235 /* Foreign interrupts must not be unmasked while holding a spinlock */ 236 if (!(exceptions & THREAD_EXCP_FOREIGN_INTR)) 237 assert_have_no_spinlock(); 238 239 cpsr &= ~(THREAD_EXCP_ALL << CPSR_F_SHIFT); 240 cpsr |= ((exceptions & THREAD_EXCP_ALL) << CPSR_F_SHIFT); 241 write_cpsr(cpsr); 242 } 243 #endif /*ARM32*/ 244 245 #ifdef ARM64 246 uint32_t thread_get_exceptions(void) 247 { 248 uint32_t daif = read_daif(); 249 250 return (daif >> DAIF_F_SHIFT) & THREAD_EXCP_ALL; 251 } 252 253 void thread_set_exceptions(uint32_t exceptions) 254 { 255 uint32_t daif = read_daif(); 256 257 /* Foreign interrupts must not be unmasked while holding a spinlock */ 258 if (!(exceptions & THREAD_EXCP_FOREIGN_INTR)) 259 assert_have_no_spinlock(); 260 261 daif &= ~(THREAD_EXCP_ALL << DAIF_F_SHIFT); 262 daif |= ((exceptions & THREAD_EXCP_ALL) << DAIF_F_SHIFT); 263 write_daif(daif); 264 } 265 #endif /*ARM64*/ 266 267 uint32_t thread_mask_exceptions(uint32_t exceptions) 268 { 269 uint32_t state = thread_get_exceptions(); 270 271 thread_set_exceptions(state | (exceptions & THREAD_EXCP_ALL)); 272 return state; 273 } 274 275 void thread_unmask_exceptions(uint32_t state) 276 { 277 thread_set_exceptions(state & THREAD_EXCP_ALL); 278 } 279 280 281 struct thread_core_local *thread_get_core_local(void) 282 { 283 uint32_t cpu_id = get_core_pos(); 284 285 /* 286 * Foreign interrupts must be disabled before playing with core_local 287 * since we otherwise may be rescheduled to a different core in the 288 * middle of this function. 289 */ 290 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 291 292 assert(cpu_id < CFG_TEE_CORE_NB_CORE); 293 return &thread_core_local[cpu_id]; 294 } 295 296 static void thread_lazy_save_ns_vfp(void) 297 { 298 #ifdef CFG_WITH_VFP 299 struct thread_ctx *thr = threads + thread_get_id(); 300 301 thr->vfp_state.ns_saved = false; 302 #if defined(ARM64) && defined(CFG_WITH_ARM_TRUSTED_FW) 303 /* 304 * ARM TF saves and restores CPACR_EL1, so we must assume NS world 305 * uses VFP and always preserve the register file when secure world 306 * is about to use it 307 */ 308 thr->vfp_state.ns.force_save = true; 309 #endif 310 vfp_lazy_save_state_init(&thr->vfp_state.ns); 311 #endif /*CFG_WITH_VFP*/ 312 } 313 314 static void thread_lazy_restore_ns_vfp(void) 315 { 316 #ifdef CFG_WITH_VFP 317 struct thread_ctx *thr = threads + thread_get_id(); 318 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 319 320 assert(!thr->vfp_state.sec_lazy_saved && !thr->vfp_state.sec_saved); 321 322 if (tuv && tuv->lazy_saved && !tuv->saved) { 323 vfp_lazy_save_state_final(&tuv->vfp); 324 tuv->saved = true; 325 } 326 327 vfp_lazy_restore_state(&thr->vfp_state.ns, thr->vfp_state.ns_saved); 328 thr->vfp_state.ns_saved = false; 329 #endif /*CFG_WITH_VFP*/ 330 } 331 332 #ifdef ARM32 333 static void init_regs(struct thread_ctx *thread, 334 struct thread_smc_args *args) 335 { 336 thread->regs.pc = (uint32_t)thread_std_smc_entry; 337 338 /* 339 * Stdcalls starts in SVC mode with masked foreign interrupts, masked 340 * Asynchronous abort and unmasked native interrupts. 341 */ 342 thread->regs.cpsr = read_cpsr() & ARM32_CPSR_E; 343 thread->regs.cpsr |= CPSR_MODE_SVC | CPSR_A | 344 (THREAD_EXCP_FOREIGN_INTR << ARM32_CPSR_F_SHIFT); 345 /* Enable thumb mode if it's a thumb instruction */ 346 if (thread->regs.pc & 1) 347 thread->regs.cpsr |= CPSR_T; 348 /* Reinitialize stack pointer */ 349 thread->regs.svc_sp = thread->stack_va_end; 350 351 /* 352 * Copy arguments into context. This will make the 353 * arguments appear in r0-r7 when thread is started. 354 */ 355 thread->regs.r0 = args->a0; 356 thread->regs.r1 = args->a1; 357 thread->regs.r2 = args->a2; 358 thread->regs.r3 = args->a3; 359 thread->regs.r4 = args->a4; 360 thread->regs.r5 = args->a5; 361 thread->regs.r6 = args->a6; 362 thread->regs.r7 = args->a7; 363 } 364 #endif /*ARM32*/ 365 366 #ifdef ARM64 367 static void init_regs(struct thread_ctx *thread, 368 struct thread_smc_args *args) 369 { 370 thread->regs.pc = (uint64_t)thread_std_smc_entry; 371 372 /* 373 * Stdcalls starts in SVC mode with masked foreign interrupts, masked 374 * Asynchronous abort and unmasked native interrupts. 375 */ 376 thread->regs.cpsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, 377 THREAD_EXCP_FOREIGN_INTR | DAIFBIT_ABT); 378 /* Reinitialize stack pointer */ 379 thread->regs.sp = thread->stack_va_end; 380 381 /* 382 * Copy arguments into context. This will make the 383 * arguments appear in x0-x7 when thread is started. 384 */ 385 thread->regs.x[0] = args->a0; 386 thread->regs.x[1] = args->a1; 387 thread->regs.x[2] = args->a2; 388 thread->regs.x[3] = args->a3; 389 thread->regs.x[4] = args->a4; 390 thread->regs.x[5] = args->a5; 391 thread->regs.x[6] = args->a6; 392 thread->regs.x[7] = args->a7; 393 394 /* Set up frame pointer as per the Aarch64 AAPCS */ 395 thread->regs.x[29] = 0; 396 } 397 #endif /*ARM64*/ 398 399 void thread_init_boot_thread(void) 400 { 401 struct thread_core_local *l = thread_get_core_local(); 402 size_t n; 403 404 for (n = 0; n < CFG_NUM_THREADS; n++) { 405 TAILQ_INIT(&threads[n].mutexes); 406 TAILQ_INIT(&threads[n].tsd.sess_stack); 407 SLIST_INIT(&threads[n].tsd.pgt_cache); 408 } 409 410 for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) 411 thread_core_local[n].curr_thread = -1; 412 413 l->curr_thread = 0; 414 threads[0].state = THREAD_STATE_ACTIVE; 415 } 416 417 void thread_clr_boot_thread(void) 418 { 419 struct thread_core_local *l = thread_get_core_local(); 420 421 assert(l->curr_thread >= 0 && l->curr_thread < CFG_NUM_THREADS); 422 assert(threads[l->curr_thread].state == THREAD_STATE_ACTIVE); 423 assert(TAILQ_EMPTY(&threads[l->curr_thread].mutexes)); 424 threads[l->curr_thread].state = THREAD_STATE_FREE; 425 l->curr_thread = -1; 426 } 427 428 static void thread_alloc_and_run(struct thread_smc_args *args) 429 { 430 size_t n; 431 struct thread_core_local *l = thread_get_core_local(); 432 bool found_thread = false; 433 434 assert(l->curr_thread == -1); 435 436 lock_global(); 437 438 for (n = 0; n < CFG_NUM_THREADS; n++) { 439 if (threads[n].state == THREAD_STATE_FREE) { 440 threads[n].state = THREAD_STATE_ACTIVE; 441 found_thread = true; 442 break; 443 } 444 } 445 446 unlock_global(); 447 448 if (!found_thread) { 449 args->a0 = OPTEE_SMC_RETURN_ETHREAD_LIMIT; 450 return; 451 } 452 453 l->curr_thread = n; 454 455 threads[n].flags = 0; 456 init_regs(threads + n, args); 457 458 /* Save Hypervisor Client ID */ 459 threads[n].hyp_clnt_id = args->a7; 460 461 thread_lazy_save_ns_vfp(); 462 thread_resume(&threads[n].regs); 463 } 464 465 #ifdef ARM32 466 static void copy_a0_to_a5(struct thread_ctx_regs *regs, 467 struct thread_smc_args *args) 468 { 469 /* 470 * Update returned values from RPC, values will appear in 471 * r0-r3 when thread is resumed. 472 */ 473 regs->r0 = args->a0; 474 regs->r1 = args->a1; 475 regs->r2 = args->a2; 476 regs->r3 = args->a3; 477 regs->r4 = args->a4; 478 regs->r5 = args->a5; 479 } 480 #endif /*ARM32*/ 481 482 #ifdef ARM64 483 static void copy_a0_to_a5(struct thread_ctx_regs *regs, 484 struct thread_smc_args *args) 485 { 486 /* 487 * Update returned values from RPC, values will appear in 488 * x0-x3 when thread is resumed. 489 */ 490 regs->x[0] = args->a0; 491 regs->x[1] = args->a1; 492 regs->x[2] = args->a2; 493 regs->x[3] = args->a3; 494 regs->x[4] = args->a4; 495 regs->x[5] = args->a5; 496 } 497 #endif /*ARM64*/ 498 499 #ifdef ARM32 500 static bool is_from_user(uint32_t cpsr) 501 { 502 return (cpsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR; 503 } 504 #endif 505 506 #ifdef ARM64 507 static bool is_from_user(uint32_t cpsr) 508 { 509 if (cpsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT)) 510 return true; 511 if (((cpsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) == 512 SPSR_64_MODE_EL0) 513 return true; 514 return false; 515 } 516 #endif 517 518 static bool is_user_mode(struct thread_ctx_regs *regs) 519 { 520 return is_from_user((uint32_t)regs->cpsr); 521 } 522 523 static void thread_resume_from_rpc(struct thread_smc_args *args) 524 { 525 size_t n = args->a3; /* thread id */ 526 struct thread_core_local *l = thread_get_core_local(); 527 uint32_t rv = 0; 528 529 assert(l->curr_thread == -1); 530 531 lock_global(); 532 533 if (n < CFG_NUM_THREADS && 534 threads[n].state == THREAD_STATE_SUSPENDED && 535 args->a7 == threads[n].hyp_clnt_id) 536 threads[n].state = THREAD_STATE_ACTIVE; 537 else 538 rv = OPTEE_SMC_RETURN_ERESUME; 539 540 unlock_global(); 541 542 if (rv) { 543 args->a0 = rv; 544 return; 545 } 546 547 l->curr_thread = n; 548 549 if (is_user_mode(&threads[n].regs)) 550 tee_ta_update_session_utime_resume(); 551 552 if (threads[n].have_user_map) 553 core_mmu_set_user_map(&threads[n].user_map); 554 555 /* 556 * Return from RPC to request service of a foreign interrupt must not 557 * get parameters from non-secure world. 558 */ 559 if (threads[n].flags & THREAD_FLAGS_COPY_ARGS_ON_RETURN) { 560 copy_a0_to_a5(&threads[n].regs, args); 561 threads[n].flags &= ~THREAD_FLAGS_COPY_ARGS_ON_RETURN; 562 } 563 564 thread_lazy_save_ns_vfp(); 565 thread_resume(&threads[n].regs); 566 } 567 568 void thread_handle_fast_smc(struct thread_smc_args *args) 569 { 570 thread_check_canaries(); 571 thread_fast_smc_handler_ptr(args); 572 /* Fast handlers must not unmask any exceptions */ 573 assert(thread_get_exceptions() == THREAD_EXCP_ALL); 574 } 575 576 void thread_handle_std_smc(struct thread_smc_args *args) 577 { 578 thread_check_canaries(); 579 580 if (args->a0 == OPTEE_SMC_CALL_RETURN_FROM_RPC) 581 thread_resume_from_rpc(args); 582 else 583 thread_alloc_and_run(args); 584 } 585 586 /* 587 * Helper routine for the assembly function thread_std_smc_entry() 588 * 589 * Note: this function is weak just to make it possible to exclude it from 590 * the unpaged area. 591 */ 592 void __weak __thread_std_smc_entry(struct thread_smc_args *args) 593 { 594 thread_std_smc_handler_ptr(args); 595 596 if (args->a0 == OPTEE_SMC_RETURN_OK) { 597 struct thread_ctx *thr = threads + thread_get_id(); 598 599 tee_fs_rpc_cache_clear(&thr->tsd); 600 if (!thread_prealloc_rpc_cache) { 601 thread_rpc_free_arg(thr->rpc_carg); 602 mobj_free(thr->rpc_mobj); 603 thr->rpc_carg = 0; 604 thr->rpc_arg = 0; 605 thr->rpc_mobj = NULL; 606 } 607 } 608 } 609 610 void *thread_get_tmp_sp(void) 611 { 612 struct thread_core_local *l = thread_get_core_local(); 613 614 return (void *)l->tmp_stack_va_end; 615 } 616 617 #ifdef ARM64 618 vaddr_t thread_get_saved_thread_sp(void) 619 { 620 struct thread_core_local *l = thread_get_core_local(); 621 int ct = l->curr_thread; 622 623 assert(ct != -1); 624 return threads[ct].kern_sp; 625 } 626 #endif /*ARM64*/ 627 628 vaddr_t thread_stack_start(void) 629 { 630 struct thread_ctx *thr; 631 int ct = thread_get_id_may_fail(); 632 633 if (ct == -1) 634 return 0; 635 636 thr = threads + ct; 637 return thr->stack_va_end - STACK_THREAD_SIZE; 638 } 639 640 size_t thread_stack_size(void) 641 { 642 return STACK_THREAD_SIZE; 643 } 644 645 bool thread_is_from_abort_mode(struct thread_abort_regs __maybe_unused *regs) 646 { 647 #ifdef ARM32 648 return (regs->spsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_ABT; 649 #endif 650 #ifdef ARM64 651 struct thread_core_local *l = thread_get_core_local(); 652 653 return (l->flags >> THREAD_CLF_SAVED_SHIFT) & THREAD_CLF_ABORT; 654 #endif 655 } 656 657 #ifdef ARM32 658 bool thread_is_in_normal_mode(void) 659 { 660 return (read_cpsr() & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_SVC; 661 } 662 #endif 663 664 #ifdef ARM64 665 bool thread_is_in_normal_mode(void) 666 { 667 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 668 struct thread_core_local *l = thread_get_core_local(); 669 bool ret; 670 671 /* If any bit in l->flags is set we're handling some exception. */ 672 ret = !l->flags; 673 thread_unmask_exceptions(exceptions); 674 675 return ret; 676 } 677 #endif 678 679 void thread_state_free(void) 680 { 681 struct thread_core_local *l = thread_get_core_local(); 682 int ct = l->curr_thread; 683 684 assert(ct != -1); 685 assert(TAILQ_EMPTY(&threads[ct].mutexes)); 686 687 thread_lazy_restore_ns_vfp(); 688 tee_pager_release_phys( 689 (void *)(threads[ct].stack_va_end - STACK_THREAD_SIZE), 690 STACK_THREAD_SIZE); 691 692 lock_global(); 693 694 assert(threads[ct].state == THREAD_STATE_ACTIVE); 695 threads[ct].state = THREAD_STATE_FREE; 696 threads[ct].flags = 0; 697 l->curr_thread = -1; 698 699 unlock_global(); 700 } 701 702 #ifdef CFG_WITH_PAGER 703 static void release_unused_kernel_stack(struct thread_ctx *thr, 704 uint32_t cpsr __maybe_unused) 705 { 706 #ifdef ARM64 707 /* 708 * If we're from user mode then thr->regs.sp is the saved user 709 * stack pointer and thr->kern_sp holds the last kernel stack 710 * pointer. But if we're from kernel mode then thr->kern_sp isn't 711 * up to date so we need to read from thr->regs.sp instead. 712 */ 713 vaddr_t sp = is_from_user(cpsr) ? thr->kern_sp : thr->regs.sp; 714 #else 715 vaddr_t sp = thr->regs.svc_sp; 716 #endif 717 vaddr_t base = thr->stack_va_end - STACK_THREAD_SIZE; 718 size_t len = sp - base; 719 720 tee_pager_release_phys((void *)base, len); 721 } 722 #else 723 static void release_unused_kernel_stack(struct thread_ctx *thr __unused, 724 uint32_t cpsr __unused) 725 { 726 } 727 #endif 728 729 int thread_state_suspend(uint32_t flags, uint32_t cpsr, vaddr_t pc) 730 { 731 struct thread_core_local *l = thread_get_core_local(); 732 int ct = l->curr_thread; 733 734 assert(ct != -1); 735 736 thread_check_canaries(); 737 738 release_unused_kernel_stack(threads + ct, cpsr); 739 740 if (is_from_user(cpsr)) { 741 thread_user_save_vfp(); 742 tee_ta_update_session_utime_suspend(); 743 tee_ta_gprof_sample_pc(pc); 744 } 745 thread_lazy_restore_ns_vfp(); 746 747 lock_global(); 748 749 assert(threads[ct].state == THREAD_STATE_ACTIVE); 750 threads[ct].flags |= flags; 751 threads[ct].regs.cpsr = cpsr; 752 threads[ct].regs.pc = pc; 753 threads[ct].state = THREAD_STATE_SUSPENDED; 754 755 threads[ct].have_user_map = core_mmu_user_mapping_is_active(); 756 if (threads[ct].have_user_map) { 757 core_mmu_get_user_map(&threads[ct].user_map); 758 core_mmu_set_user_map(NULL); 759 } 760 761 l->curr_thread = -1; 762 763 unlock_global(); 764 765 return ct; 766 } 767 768 #ifdef ARM32 769 static void set_tmp_stack(struct thread_core_local *l, vaddr_t sp) 770 { 771 l->tmp_stack_va_end = sp; 772 thread_set_irq_sp(sp); 773 thread_set_fiq_sp(sp); 774 } 775 776 static void set_abt_stack(struct thread_core_local *l __unused, vaddr_t sp) 777 { 778 thread_set_abt_sp(sp); 779 } 780 #endif /*ARM32*/ 781 782 #ifdef ARM64 783 static void set_tmp_stack(struct thread_core_local *l, vaddr_t sp) 784 { 785 /* 786 * We're already using the tmp stack when this function is called 787 * so there's no need to assign it to any stack pointer. However, 788 * we'll need to restore it at different times so store it here. 789 */ 790 l->tmp_stack_va_end = sp; 791 } 792 793 static void set_abt_stack(struct thread_core_local *l, vaddr_t sp) 794 { 795 l->abt_stack_va_end = sp; 796 } 797 #endif /*ARM64*/ 798 799 bool thread_init_stack(uint32_t thread_id, vaddr_t sp) 800 { 801 if (thread_id >= CFG_NUM_THREADS) 802 return false; 803 threads[thread_id].stack_va_end = sp; 804 return true; 805 } 806 807 int thread_get_id_may_fail(void) 808 { 809 /* 810 * thread_get_core_local() requires foreign interrupts to be disabled 811 */ 812 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 813 struct thread_core_local *l = thread_get_core_local(); 814 int ct = l->curr_thread; 815 816 thread_unmask_exceptions(exceptions); 817 return ct; 818 } 819 820 int thread_get_id(void) 821 { 822 int ct = thread_get_id_may_fail(); 823 824 assert(ct >= 0 && ct < CFG_NUM_THREADS); 825 return ct; 826 } 827 828 static void init_handlers(const struct thread_handlers *handlers) 829 { 830 thread_std_smc_handler_ptr = handlers->std_smc; 831 thread_fast_smc_handler_ptr = handlers->fast_smc; 832 thread_nintr_handler_ptr = handlers->nintr; 833 thread_cpu_on_handler_ptr = handlers->cpu_on; 834 thread_cpu_off_handler_ptr = handlers->cpu_off; 835 thread_cpu_suspend_handler_ptr = handlers->cpu_suspend; 836 thread_cpu_resume_handler_ptr = handlers->cpu_resume; 837 thread_system_off_handler_ptr = handlers->system_off; 838 thread_system_reset_handler_ptr = handlers->system_reset; 839 } 840 841 #ifdef CFG_WITH_PAGER 842 static void init_thread_stacks(void) 843 { 844 size_t n; 845 846 /* 847 * Allocate virtual memory for thread stacks. 848 */ 849 for (n = 0; n < CFG_NUM_THREADS; n++) { 850 tee_mm_entry_t *mm; 851 vaddr_t sp; 852 853 /* Find vmem for thread stack and its protection gap */ 854 mm = tee_mm_alloc(&tee_mm_vcore, 855 SMALL_PAGE_SIZE + STACK_THREAD_SIZE); 856 assert(mm); 857 858 /* Claim eventual physical page */ 859 tee_pager_add_pages(tee_mm_get_smem(mm), tee_mm_get_size(mm), 860 true); 861 862 /* Add the area to the pager */ 863 tee_pager_add_core_area(tee_mm_get_smem(mm) + SMALL_PAGE_SIZE, 864 tee_mm_get_bytes(mm) - SMALL_PAGE_SIZE, 865 TEE_MATTR_PRW | TEE_MATTR_LOCKED, 866 NULL, NULL); 867 868 /* init effective stack */ 869 sp = tee_mm_get_smem(mm) + tee_mm_get_bytes(mm); 870 if (!thread_init_stack(n, sp)) 871 panic("init stack failed"); 872 } 873 } 874 #else 875 static void init_thread_stacks(void) 876 { 877 size_t n; 878 879 /* Assign the thread stacks */ 880 for (n = 0; n < CFG_NUM_THREADS; n++) { 881 if (!thread_init_stack(n, GET_STACK(stack_thread[n]))) 882 panic("thread_init_stack failed"); 883 } 884 } 885 #endif /*CFG_WITH_PAGER*/ 886 887 void thread_init_primary(const struct thread_handlers *handlers) 888 { 889 init_handlers(handlers); 890 891 /* Initialize canaries around the stacks */ 892 init_canaries(); 893 894 init_thread_stacks(); 895 pgt_init(); 896 } 897 898 static void init_sec_mon(size_t pos __maybe_unused) 899 { 900 #if !defined(CFG_WITH_ARM_TRUSTED_FW) 901 /* Initialize secure monitor */ 902 sm_init(GET_STACK(stack_tmp[pos])); 903 #endif 904 } 905 906 void thread_init_per_cpu(void) 907 { 908 size_t pos = get_core_pos(); 909 struct thread_core_local *l = thread_get_core_local(); 910 911 init_sec_mon(pos); 912 913 set_tmp_stack(l, GET_STACK(stack_tmp[pos]) - STACK_TMP_OFFS); 914 set_abt_stack(l, GET_STACK(stack_abt[pos])); 915 916 thread_init_vbar(); 917 } 918 919 struct thread_specific_data *thread_get_tsd(void) 920 { 921 return &threads[thread_get_id()].tsd; 922 } 923 924 struct thread_ctx_regs *thread_get_ctx_regs(void) 925 { 926 struct thread_core_local *l = thread_get_core_local(); 927 928 assert(l->curr_thread != -1); 929 return &threads[l->curr_thread].regs; 930 } 931 932 void thread_set_foreign_intr(bool enable) 933 { 934 /* thread_get_core_local() requires foreign interrupts to be disabled */ 935 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 936 struct thread_core_local *l; 937 938 l = thread_get_core_local(); 939 940 assert(l->curr_thread != -1); 941 942 if (enable) { 943 threads[l->curr_thread].flags |= 944 THREAD_FLAGS_FOREIGN_INTR_ENABLE; 945 thread_set_exceptions(exceptions & ~THREAD_EXCP_FOREIGN_INTR); 946 } else { 947 /* 948 * No need to disable foreign interrupts here since they're 949 * already disabled above. 950 */ 951 threads[l->curr_thread].flags &= 952 ~THREAD_FLAGS_FOREIGN_INTR_ENABLE; 953 } 954 } 955 956 void thread_restore_foreign_intr(void) 957 { 958 /* thread_get_core_local() requires foreign interrupts to be disabled */ 959 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 960 struct thread_core_local *l; 961 962 l = thread_get_core_local(); 963 964 assert(l->curr_thread != -1); 965 966 if (threads[l->curr_thread].flags & THREAD_FLAGS_FOREIGN_INTR_ENABLE) 967 thread_set_exceptions(exceptions & ~THREAD_EXCP_FOREIGN_INTR); 968 } 969 970 #ifdef CFG_WITH_VFP 971 uint32_t thread_kernel_enable_vfp(void) 972 { 973 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 974 struct thread_ctx *thr = threads + thread_get_id(); 975 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 976 977 assert(!vfp_is_enabled()); 978 979 if (!thr->vfp_state.ns_saved) { 980 vfp_lazy_save_state_final(&thr->vfp_state.ns); 981 thr->vfp_state.ns_saved = true; 982 } else if (thr->vfp_state.sec_lazy_saved && 983 !thr->vfp_state.sec_saved) { 984 /* 985 * This happens when we're handling an abort while the 986 * thread was using the VFP state. 987 */ 988 vfp_lazy_save_state_final(&thr->vfp_state.sec); 989 thr->vfp_state.sec_saved = true; 990 } else if (tuv && tuv->lazy_saved && !tuv->saved) { 991 /* 992 * This can happen either during syscall or abort 993 * processing (while processing a syscall). 994 */ 995 vfp_lazy_save_state_final(&tuv->vfp); 996 tuv->saved = true; 997 } 998 999 vfp_enable(); 1000 return exceptions; 1001 } 1002 1003 void thread_kernel_disable_vfp(uint32_t state) 1004 { 1005 uint32_t exceptions; 1006 1007 assert(vfp_is_enabled()); 1008 1009 vfp_disable(); 1010 exceptions = thread_get_exceptions(); 1011 assert(exceptions & THREAD_EXCP_FOREIGN_INTR); 1012 exceptions &= ~THREAD_EXCP_FOREIGN_INTR; 1013 exceptions |= state & THREAD_EXCP_FOREIGN_INTR; 1014 thread_set_exceptions(exceptions); 1015 } 1016 1017 void thread_kernel_save_vfp(void) 1018 { 1019 struct thread_ctx *thr = threads + thread_get_id(); 1020 1021 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 1022 if (vfp_is_enabled()) { 1023 vfp_lazy_save_state_init(&thr->vfp_state.sec); 1024 thr->vfp_state.sec_lazy_saved = true; 1025 } 1026 } 1027 1028 void thread_kernel_restore_vfp(void) 1029 { 1030 struct thread_ctx *thr = threads + thread_get_id(); 1031 1032 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 1033 assert(!vfp_is_enabled()); 1034 if (thr->vfp_state.sec_lazy_saved) { 1035 vfp_lazy_restore_state(&thr->vfp_state.sec, 1036 thr->vfp_state.sec_saved); 1037 thr->vfp_state.sec_saved = false; 1038 thr->vfp_state.sec_lazy_saved = false; 1039 } 1040 } 1041 1042 void thread_user_enable_vfp(struct thread_user_vfp_state *uvfp) 1043 { 1044 struct thread_ctx *thr = threads + thread_get_id(); 1045 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 1046 1047 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 1048 assert(!vfp_is_enabled()); 1049 1050 if (!thr->vfp_state.ns_saved) { 1051 vfp_lazy_save_state_final(&thr->vfp_state.ns); 1052 thr->vfp_state.ns_saved = true; 1053 } else if (tuv && uvfp != tuv) { 1054 if (tuv->lazy_saved && !tuv->saved) { 1055 vfp_lazy_save_state_final(&tuv->vfp); 1056 tuv->saved = true; 1057 } 1058 } 1059 1060 if (uvfp->lazy_saved) 1061 vfp_lazy_restore_state(&uvfp->vfp, uvfp->saved); 1062 uvfp->lazy_saved = false; 1063 uvfp->saved = false; 1064 1065 thr->vfp_state.uvfp = uvfp; 1066 vfp_enable(); 1067 } 1068 1069 void thread_user_save_vfp(void) 1070 { 1071 struct thread_ctx *thr = threads + thread_get_id(); 1072 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 1073 1074 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 1075 if (!vfp_is_enabled()) 1076 return; 1077 1078 assert(tuv && !tuv->lazy_saved && !tuv->saved); 1079 vfp_lazy_save_state_init(&tuv->vfp); 1080 tuv->lazy_saved = true; 1081 } 1082 1083 void thread_user_clear_vfp(struct thread_user_vfp_state *uvfp) 1084 { 1085 struct thread_ctx *thr = threads + thread_get_id(); 1086 1087 if (uvfp == thr->vfp_state.uvfp) 1088 thr->vfp_state.uvfp = NULL; 1089 uvfp->lazy_saved = false; 1090 uvfp->saved = false; 1091 } 1092 #endif /*CFG_WITH_VFP*/ 1093 1094 #ifdef ARM32 1095 static bool get_spsr(bool is_32bit, unsigned long entry_func, uint32_t *spsr) 1096 { 1097 uint32_t s; 1098 1099 if (!is_32bit) 1100 return false; 1101 1102 s = read_spsr(); 1103 s &= ~(CPSR_MODE_MASK | CPSR_T | CPSR_IT_MASK1 | CPSR_IT_MASK2); 1104 s |= CPSR_MODE_USR; 1105 if (entry_func & 1) 1106 s |= CPSR_T; 1107 *spsr = s; 1108 return true; 1109 } 1110 #endif 1111 1112 #ifdef ARM64 1113 static bool get_spsr(bool is_32bit, unsigned long entry_func, uint32_t *spsr) 1114 { 1115 uint32_t s; 1116 1117 if (is_32bit) { 1118 s = read_daif() & (SPSR_32_AIF_MASK << SPSR_32_AIF_SHIFT); 1119 s |= SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT; 1120 s |= (entry_func & SPSR_32_T_MASK) << SPSR_32_T_SHIFT; 1121 } else { 1122 s = read_daif() & (SPSR_64_DAIF_MASK << SPSR_64_DAIF_SHIFT); 1123 } 1124 1125 *spsr = s; 1126 return true; 1127 } 1128 #endif 1129 1130 uint32_t thread_enter_user_mode(unsigned long a0, unsigned long a1, 1131 unsigned long a2, unsigned long a3, unsigned long user_sp, 1132 unsigned long entry_func, bool is_32bit, 1133 uint32_t *exit_status0, uint32_t *exit_status1) 1134 { 1135 uint32_t spsr; 1136 1137 tee_ta_update_session_utime_resume(); 1138 1139 if (!get_spsr(is_32bit, entry_func, &spsr)) { 1140 *exit_status0 = 1; /* panic */ 1141 *exit_status1 = 0xbadbadba; 1142 return 0; 1143 } 1144 return __thread_enter_user_mode(a0, a1, a2, a3, user_sp, entry_func, 1145 spsr, exit_status0, exit_status1); 1146 } 1147 1148 void thread_add_mutex(struct mutex *m) 1149 { 1150 struct thread_core_local *l = thread_get_core_local(); 1151 int ct = l->curr_thread; 1152 1153 assert(ct != -1 && threads[ct].state == THREAD_STATE_ACTIVE); 1154 assert(m->owner_id == MUTEX_OWNER_ID_NONE); 1155 m->owner_id = ct; 1156 TAILQ_INSERT_TAIL(&threads[ct].mutexes, m, link); 1157 } 1158 1159 void thread_rem_mutex(struct mutex *m) 1160 { 1161 struct thread_core_local *l = thread_get_core_local(); 1162 int ct = l->curr_thread; 1163 1164 assert(ct != -1 && threads[ct].state == THREAD_STATE_ACTIVE); 1165 assert(m->owner_id == ct); 1166 m->owner_id = MUTEX_OWNER_ID_NONE; 1167 TAILQ_REMOVE(&threads[ct].mutexes, m, link); 1168 } 1169 1170 bool thread_disable_prealloc_rpc_cache(uint64_t *cookie) 1171 { 1172 bool rv; 1173 size_t n; 1174 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 1175 1176 lock_global(); 1177 1178 for (n = 0; n < CFG_NUM_THREADS; n++) { 1179 if (threads[n].state != THREAD_STATE_FREE) { 1180 rv = false; 1181 goto out; 1182 } 1183 } 1184 1185 rv = true; 1186 for (n = 0; n < CFG_NUM_THREADS; n++) { 1187 if (threads[n].rpc_arg) { 1188 *cookie = threads[n].rpc_carg; 1189 threads[n].rpc_carg = 0; 1190 threads[n].rpc_arg = NULL; 1191 goto out; 1192 } 1193 } 1194 1195 *cookie = 0; 1196 thread_prealloc_rpc_cache = false; 1197 out: 1198 unlock_global(); 1199 thread_unmask_exceptions(exceptions); 1200 return rv; 1201 } 1202 1203 bool thread_enable_prealloc_rpc_cache(void) 1204 { 1205 bool rv; 1206 size_t n; 1207 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 1208 1209 lock_global(); 1210 1211 for (n = 0; n < CFG_NUM_THREADS; n++) { 1212 if (threads[n].state != THREAD_STATE_FREE) { 1213 rv = false; 1214 goto out; 1215 } 1216 } 1217 1218 rv = true; 1219 thread_prealloc_rpc_cache = true; 1220 out: 1221 unlock_global(); 1222 thread_unmask_exceptions(exceptions); 1223 return rv; 1224 } 1225 1226 void thread_rpc_free_arg(uint64_t cookie) 1227 { 1228 if (cookie) { 1229 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { 1230 OPTEE_SMC_RETURN_RPC_FREE 1231 }; 1232 1233 reg_pair_from_64(cookie, rpc_args + 1, rpc_args + 2); 1234 thread_rpc(rpc_args); 1235 } 1236 } 1237 1238 struct mobj *thread_rpc_alloc_arg(size_t size, uint64_t *cookie) 1239 { 1240 paddr_t pa; 1241 uint64_t co; 1242 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { 1243 OPTEE_SMC_RETURN_RPC_ALLOC, size 1244 }; 1245 struct mobj *mobj = NULL; 1246 1247 thread_rpc(rpc_args); 1248 1249 pa = reg_pair_to_64(rpc_args[1], rpc_args[2]); 1250 co = reg_pair_to_64(rpc_args[4], rpc_args[5]); 1251 1252 if (!ALIGNMENT_IS_OK(pa, struct optee_msg_arg)) 1253 goto err; 1254 1255 mobj = mobj_shm_alloc(pa, size); 1256 if (!mobj) 1257 goto err; 1258 1259 *cookie = co; 1260 return mobj; 1261 err: 1262 thread_rpc_free_arg(co); 1263 mobj_free(mobj); 1264 *cookie = 0; 1265 return NULL; 1266 } 1267 1268 static bool get_rpc_arg(uint32_t cmd, size_t num_params, 1269 struct optee_msg_arg **arg_ret, uint64_t *carg_ret) 1270 { 1271 struct thread_ctx *thr = threads + thread_get_id(); 1272 struct optee_msg_arg *arg = thr->rpc_arg; 1273 struct mobj *mobj; 1274 size_t sz = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS); 1275 uint64_t c; 1276 1277 if (num_params > THREAD_RPC_MAX_NUM_PARAMS) 1278 return false; 1279 1280 if (!arg) { 1281 mobj = thread_rpc_alloc_arg(sz, &c); 1282 if (!mobj) 1283 return false; 1284 1285 arg = mobj_get_va(mobj, 0); 1286 if (!arg) 1287 goto bad; 1288 1289 thr->rpc_arg = arg; 1290 thr->rpc_carg = c; 1291 thr->rpc_mobj = mobj; 1292 } 1293 1294 memset(arg, 0, OPTEE_MSG_GET_ARG_SIZE(num_params)); 1295 arg->cmd = cmd; 1296 arg->num_params = num_params; 1297 arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */ 1298 1299 *arg_ret = arg; 1300 *carg_ret = thr->rpc_carg; 1301 return true; 1302 1303 bad: 1304 thread_rpc_free_arg(c); 1305 return false; 1306 } 1307 1308 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params, 1309 struct optee_msg_param *params) 1310 { 1311 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { OPTEE_SMC_RETURN_RPC_CMD }; 1312 struct optee_msg_arg *arg; 1313 uint64_t carg; 1314 size_t n; 1315 1316 /* 1317 * Break recursion in case plat_prng_add_jitter_entropy_norpc() 1318 * sleeps on a mutex or unlocks a mutex with a sleeper (contended 1319 * mutex). 1320 */ 1321 if (cmd != OPTEE_MSG_RPC_CMD_WAIT_QUEUE) 1322 plat_prng_add_jitter_entropy_norpc(); 1323 1324 if (!get_rpc_arg(cmd, num_params, &arg, &carg)) 1325 return TEE_ERROR_OUT_OF_MEMORY; 1326 1327 memcpy(arg->params, params, sizeof(*params) * num_params); 1328 1329 reg_pair_from_64(carg, rpc_args + 1, rpc_args + 2); 1330 thread_rpc(rpc_args); 1331 for (n = 0; n < num_params; n++) { 1332 switch (params[n].attr & OPTEE_MSG_ATTR_TYPE_MASK) { 1333 case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT: 1334 case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT: 1335 case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT: 1336 case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT: 1337 case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT: 1338 case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT: 1339 params[n] = arg->params[n]; 1340 break; 1341 default: 1342 break; 1343 } 1344 } 1345 return arg->ret; 1346 } 1347 1348 /** 1349 * Free physical memory previously allocated with thread_rpc_alloc() 1350 * 1351 * @cookie: cookie received when allocating the buffer 1352 * @bt: must be the same as supplied when allocating 1353 * @mobj: mobj that describes allocated buffer 1354 * 1355 * This function also frees corresponding mobj. 1356 */ 1357 static void thread_rpc_free(unsigned int bt, uint64_t cookie, struct mobj *mobj) 1358 { 1359 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { OPTEE_SMC_RETURN_RPC_CMD }; 1360 struct optee_msg_arg *arg; 1361 uint64_t carg; 1362 1363 if (!get_rpc_arg(OPTEE_MSG_RPC_CMD_SHM_FREE, 1, &arg, &carg)) 1364 return; 1365 1366 arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT; 1367 arg->params[0].u.value.a = bt; 1368 arg->params[0].u.value.b = cookie; 1369 arg->params[0].u.value.c = 0; 1370 1371 mobj_free(mobj); 1372 1373 reg_pair_from_64(carg, rpc_args + 1, rpc_args + 2); 1374 thread_rpc(rpc_args); 1375 } 1376 1377 /** 1378 * Allocates shared memory buffer via RPC 1379 * 1380 * @size: size in bytes of shared memory buffer 1381 * @align: required alignment of buffer 1382 * @bt: buffer type OPTEE_MSG_RPC_SHM_TYPE_* 1383 * @payload: returned physical pointer to buffer, 0 if allocation 1384 * failed. 1385 * @cookie: returned cookie used when freeing the buffer 1386 */ 1387 static struct mobj *thread_rpc_alloc(size_t size, size_t align, unsigned int bt, 1388 uint64_t *cookie) 1389 { 1390 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { OPTEE_SMC_RETURN_RPC_CMD }; 1391 struct optee_msg_arg *arg; 1392 uint64_t carg; 1393 struct mobj *mobj = NULL; 1394 1395 if (!get_rpc_arg(OPTEE_MSG_RPC_CMD_SHM_ALLOC, 1, &arg, &carg)) 1396 goto fail; 1397 1398 arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT; 1399 arg->params[0].u.value.a = bt; 1400 arg->params[0].u.value.b = size; 1401 arg->params[0].u.value.c = align; 1402 1403 reg_pair_from_64(carg, rpc_args + 1, rpc_args + 2); 1404 thread_rpc(rpc_args); 1405 1406 if (arg->ret != TEE_SUCCESS) 1407 goto fail; 1408 1409 if (arg->num_params != 1) 1410 goto fail; 1411 1412 mobj = mobj_shm_alloc(arg->params[0].u.tmem.buf_ptr, 1413 arg->params[0].u.tmem.size); 1414 *cookie = arg->params[0].u.tmem.shm_ref; 1415 1416 if (!mobj) 1417 goto free_first; 1418 1419 assert(mobj_is_nonsec(mobj)); 1420 return mobj; 1421 1422 free_first: 1423 thread_rpc_free(bt, *cookie, mobj); 1424 fail: 1425 *cookie = 0; 1426 return NULL; 1427 } 1428 1429 struct mobj *thread_rpc_alloc_payload(size_t size, uint64_t *cookie) 1430 { 1431 return thread_rpc_alloc(size, 8, OPTEE_MSG_RPC_SHM_TYPE_APPL, cookie); 1432 } 1433 1434 void thread_rpc_free_payload(uint64_t cookie, struct mobj *mobj) 1435 { 1436 thread_rpc_free(OPTEE_MSG_RPC_SHM_TYPE_APPL, cookie, mobj); 1437 } 1438