1 /* 2 * Copyright (c) 2016, Linaro Limited 3 * Copyright (c) 2014, STMicroelectronics International N.V. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright notice, 10 * this list of conditions and the following disclaimer. 11 * 12 * 2. Redistributions in binary form must reproduce the above copyright notice, 13 * this list of conditions and the following disclaimer in the documentation 14 * and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <platform_config.h> 30 31 #include <arm.h> 32 #include <assert.h> 33 #include <keep.h> 34 #include <kernel/misc.h> 35 #include <kernel/panic.h> 36 #include <kernel/spinlock.h> 37 #include <kernel/tee_ta_manager.h> 38 #include <kernel/thread_defs.h> 39 #include <kernel/thread.h> 40 #include <mm/core_memprot.h> 41 #include <mm/tee_mm.h> 42 #include <mm/tee_mmu.h> 43 #include <mm/tee_pager.h> 44 #include <optee_msg.h> 45 #include <sm/optee_smc.h> 46 #include <sm/sm.h> 47 #include <tee/tee_fs_rpc.h> 48 #include <tee/tee_cryp_utl.h> 49 #include <trace.h> 50 #include <util.h> 51 52 #include "thread_private.h" 53 54 #ifdef CFG_WITH_ARM_TRUSTED_FW 55 #define STACK_TMP_OFFS 0 56 #else 57 #define STACK_TMP_OFFS SM_STACK_TMP_RESERVE_SIZE 58 #endif 59 60 61 #ifdef ARM32 62 #ifdef CFG_CORE_SANITIZE_KADDRESS 63 #define STACK_TMP_SIZE (3072 + STACK_TMP_OFFS) 64 #else 65 #define STACK_TMP_SIZE (1024 + STACK_TMP_OFFS) 66 #endif 67 #define STACK_THREAD_SIZE 8192 68 69 #if TRACE_LEVEL > 0 70 #ifdef CFG_CORE_SANITIZE_KADDRESS 71 #define STACK_ABT_SIZE 3072 72 #else 73 #define STACK_ABT_SIZE 2048 74 #endif 75 #else 76 #define STACK_ABT_SIZE 1024 77 #endif 78 79 #endif /*ARM32*/ 80 81 #ifdef ARM64 82 #define STACK_TMP_SIZE (2048 + STACK_TMP_OFFS) 83 #define STACK_THREAD_SIZE 8192 84 85 #if TRACE_LEVEL > 0 86 #define STACK_ABT_SIZE 3072 87 #else 88 #define STACK_ABT_SIZE 1024 89 #endif 90 #endif /*ARM64*/ 91 92 struct thread_ctx threads[CFG_NUM_THREADS]; 93 94 static struct thread_core_local thread_core_local[CFG_TEE_CORE_NB_CORE]; 95 96 #ifdef CFG_WITH_STACK_CANARIES 97 #ifdef ARM32 98 #define STACK_CANARY_SIZE (4 * sizeof(uint32_t)) 99 #endif 100 #ifdef ARM64 101 #define STACK_CANARY_SIZE (8 * sizeof(uint32_t)) 102 #endif 103 #define START_CANARY_VALUE 0xdededede 104 #define END_CANARY_VALUE 0xabababab 105 #define GET_START_CANARY(name, stack_num) name[stack_num][0] 106 #define GET_END_CANARY(name, stack_num) \ 107 name[stack_num][sizeof(name[stack_num]) / sizeof(uint32_t) - 1] 108 #else 109 #define STACK_CANARY_SIZE 0 110 #endif 111 112 #define DECLARE_STACK(name, num_stacks, stack_size, linkage) \ 113 linkage uint32_t name[num_stacks] \ 114 [ROUNDUP(stack_size + STACK_CANARY_SIZE, STACK_ALIGNMENT) / \ 115 sizeof(uint32_t)] \ 116 __attribute__((section(".nozi_stack"), \ 117 aligned(STACK_ALIGNMENT))) 118 119 #define STACK_SIZE(stack) (sizeof(stack) - STACK_CANARY_SIZE / 2) 120 121 #define GET_STACK(stack) \ 122 ((vaddr_t)(stack) + STACK_SIZE(stack)) 123 124 DECLARE_STACK(stack_tmp, CFG_TEE_CORE_NB_CORE, STACK_TMP_SIZE, /* global */); 125 DECLARE_STACK(stack_abt, CFG_TEE_CORE_NB_CORE, STACK_ABT_SIZE, static); 126 #ifndef CFG_WITH_PAGER 127 DECLARE_STACK(stack_thread, CFG_NUM_THREADS, STACK_THREAD_SIZE, static); 128 #endif 129 130 const uint32_t stack_tmp_stride = sizeof(stack_tmp[0]); 131 const uint32_t stack_tmp_offset = STACK_TMP_OFFS + STACK_CANARY_SIZE / 2; 132 133 /* 134 * These stack setup info are required by secondary boot cores before they 135 * each locally enable the pager (the mmu). Hence kept in pager sections. 136 */ 137 KEEP_PAGER(stack_tmp); 138 KEEP_PAGER(stack_tmp_stride); 139 KEEP_PAGER(stack_tmp_offset); 140 141 thread_smc_handler_t thread_std_smc_handler_ptr; 142 static thread_smc_handler_t thread_fast_smc_handler_ptr; 143 thread_fiq_handler_t thread_fiq_handler_ptr; 144 thread_pm_handler_t thread_cpu_on_handler_ptr; 145 thread_pm_handler_t thread_cpu_off_handler_ptr; 146 thread_pm_handler_t thread_cpu_suspend_handler_ptr; 147 thread_pm_handler_t thread_cpu_resume_handler_ptr; 148 thread_pm_handler_t thread_system_off_handler_ptr; 149 thread_pm_handler_t thread_system_reset_handler_ptr; 150 151 152 static unsigned int thread_global_lock = SPINLOCK_UNLOCK; 153 static bool thread_prealloc_rpc_cache; 154 155 static void init_canaries(void) 156 { 157 #ifdef CFG_WITH_STACK_CANARIES 158 size_t n; 159 #define INIT_CANARY(name) \ 160 for (n = 0; n < ARRAY_SIZE(name); n++) { \ 161 uint32_t *start_canary = &GET_START_CANARY(name, n); \ 162 uint32_t *end_canary = &GET_END_CANARY(name, n); \ 163 \ 164 *start_canary = START_CANARY_VALUE; \ 165 *end_canary = END_CANARY_VALUE; \ 166 DMSG("#Stack canaries for %s[%zu] with top at %p\n", \ 167 #name, n, (void *)(end_canary - 1)); \ 168 DMSG("watch *%p\n", (void *)end_canary); \ 169 } 170 171 INIT_CANARY(stack_tmp); 172 INIT_CANARY(stack_abt); 173 #ifndef CFG_WITH_PAGER 174 INIT_CANARY(stack_thread); 175 #endif 176 #endif/*CFG_WITH_STACK_CANARIES*/ 177 } 178 179 #define CANARY_DIED(stack, loc, n) \ 180 do { \ 181 EMSG_RAW("Dead canary at %s of '%s[%zu]'", #loc, #stack, n); \ 182 panic(); \ 183 } while (0) 184 185 void thread_check_canaries(void) 186 { 187 #ifdef CFG_WITH_STACK_CANARIES 188 size_t n; 189 190 for (n = 0; n < ARRAY_SIZE(stack_tmp); n++) { 191 if (GET_START_CANARY(stack_tmp, n) != START_CANARY_VALUE) 192 CANARY_DIED(stack_tmp, start, n); 193 if (GET_END_CANARY(stack_tmp, n) != END_CANARY_VALUE) 194 CANARY_DIED(stack_tmp, end, n); 195 } 196 197 for (n = 0; n < ARRAY_SIZE(stack_abt); n++) { 198 if (GET_START_CANARY(stack_abt, n) != START_CANARY_VALUE) 199 CANARY_DIED(stack_abt, start, n); 200 if (GET_END_CANARY(stack_abt, n) != END_CANARY_VALUE) 201 CANARY_DIED(stack_abt, end, n); 202 203 } 204 #ifndef CFG_WITH_PAGER 205 for (n = 0; n < ARRAY_SIZE(stack_thread); n++) { 206 if (GET_START_CANARY(stack_thread, n) != START_CANARY_VALUE) 207 CANARY_DIED(stack_thread, start, n); 208 if (GET_END_CANARY(stack_thread, n) != END_CANARY_VALUE) 209 CANARY_DIED(stack_thread, end, n); 210 } 211 #endif 212 #endif/*CFG_WITH_STACK_CANARIES*/ 213 } 214 215 static void lock_global(void) 216 { 217 cpu_spin_lock(&thread_global_lock); 218 } 219 220 static void unlock_global(void) 221 { 222 cpu_spin_unlock(&thread_global_lock); 223 } 224 225 #ifdef ARM32 226 uint32_t thread_get_exceptions(void) 227 { 228 uint32_t cpsr = read_cpsr(); 229 230 return (cpsr >> CPSR_F_SHIFT) & THREAD_EXCP_ALL; 231 } 232 233 void thread_set_exceptions(uint32_t exceptions) 234 { 235 uint32_t cpsr = read_cpsr(); 236 237 /* IRQ must not be unmasked while holding a spinlock */ 238 if (!(exceptions & THREAD_EXCP_IRQ)) 239 assert_have_no_spinlock(); 240 241 cpsr &= ~(THREAD_EXCP_ALL << CPSR_F_SHIFT); 242 cpsr |= ((exceptions & THREAD_EXCP_ALL) << CPSR_F_SHIFT); 243 write_cpsr(cpsr); 244 } 245 #endif /*ARM32*/ 246 247 #ifdef ARM64 248 uint32_t thread_get_exceptions(void) 249 { 250 uint32_t daif = read_daif(); 251 252 return (daif >> DAIF_F_SHIFT) & THREAD_EXCP_ALL; 253 } 254 255 void thread_set_exceptions(uint32_t exceptions) 256 { 257 uint32_t daif = read_daif(); 258 259 /* IRQ must not be unmasked while holding a spinlock */ 260 if (!(exceptions & THREAD_EXCP_IRQ)) 261 assert_have_no_spinlock(); 262 263 daif &= ~(THREAD_EXCP_ALL << DAIF_F_SHIFT); 264 daif |= ((exceptions & THREAD_EXCP_ALL) << DAIF_F_SHIFT); 265 write_daif(daif); 266 } 267 #endif /*ARM64*/ 268 269 uint32_t thread_mask_exceptions(uint32_t exceptions) 270 { 271 uint32_t state = thread_get_exceptions(); 272 273 thread_set_exceptions(state | (exceptions & THREAD_EXCP_ALL)); 274 return state; 275 } 276 277 void thread_unmask_exceptions(uint32_t state) 278 { 279 thread_set_exceptions(state & THREAD_EXCP_ALL); 280 } 281 282 283 struct thread_core_local *thread_get_core_local(void) 284 { 285 uint32_t cpu_id = get_core_pos(); 286 287 /* 288 * IRQs must be disabled before playing with core_local since 289 * we otherwise may be rescheduled to a different core in the 290 * middle of this function. 291 */ 292 assert(thread_get_exceptions() & THREAD_EXCP_IRQ); 293 294 assert(cpu_id < CFG_TEE_CORE_NB_CORE); 295 return &thread_core_local[cpu_id]; 296 } 297 298 static void thread_lazy_save_ns_vfp(void) 299 { 300 #ifdef CFG_WITH_VFP 301 struct thread_ctx *thr = threads + thread_get_id(); 302 303 thr->vfp_state.ns_saved = false; 304 #if defined(ARM64) && defined(CFG_WITH_ARM_TRUSTED_FW) 305 /* 306 * ARM TF saves and restores CPACR_EL1, so we must assume NS world 307 * uses VFP and always preserve the register file when secure world 308 * is about to use it 309 */ 310 thr->vfp_state.ns.force_save = true; 311 #endif 312 vfp_lazy_save_state_init(&thr->vfp_state.ns); 313 #endif /*CFG_WITH_VFP*/ 314 } 315 316 static void thread_lazy_restore_ns_vfp(void) 317 { 318 #ifdef CFG_WITH_VFP 319 struct thread_ctx *thr = threads + thread_get_id(); 320 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 321 322 assert(!thr->vfp_state.sec_lazy_saved && !thr->vfp_state.sec_saved); 323 324 if (tuv && tuv->lazy_saved && !tuv->saved) { 325 vfp_lazy_save_state_final(&tuv->vfp); 326 tuv->saved = true; 327 } 328 329 vfp_lazy_restore_state(&thr->vfp_state.ns, thr->vfp_state.ns_saved); 330 thr->vfp_state.ns_saved = false; 331 #endif /*CFG_WITH_VFP*/ 332 } 333 334 #ifdef ARM32 335 static void init_regs(struct thread_ctx *thread, 336 struct thread_smc_args *args) 337 { 338 thread->regs.pc = (uint32_t)thread_std_smc_entry; 339 340 /* 341 * Stdcalls starts in SVC mode with masked IRQ, masked Asynchronous 342 * abort and unmasked FIQ. 343 */ 344 thread->regs.cpsr = read_cpsr() & ARM32_CPSR_E; 345 thread->regs.cpsr |= CPSR_MODE_SVC | CPSR_I | CPSR_A; 346 /* Enable thumb mode if it's a thumb instruction */ 347 if (thread->regs.pc & 1) 348 thread->regs.cpsr |= CPSR_T; 349 /* Reinitialize stack pointer */ 350 thread->regs.svc_sp = thread->stack_va_end; 351 352 /* 353 * Copy arguments into context. This will make the 354 * arguments appear in r0-r7 when thread is started. 355 */ 356 thread->regs.r0 = args->a0; 357 thread->regs.r1 = args->a1; 358 thread->regs.r2 = args->a2; 359 thread->regs.r3 = args->a3; 360 thread->regs.r4 = args->a4; 361 thread->regs.r5 = args->a5; 362 thread->regs.r6 = args->a6; 363 thread->regs.r7 = args->a7; 364 } 365 #endif /*ARM32*/ 366 367 #ifdef ARM64 368 static void init_regs(struct thread_ctx *thread, 369 struct thread_smc_args *args) 370 { 371 thread->regs.pc = (uint64_t)thread_std_smc_entry; 372 373 /* 374 * Stdcalls starts in SVC mode with masked IRQ, masked Asynchronous 375 * abort and unmasked FIQ. 376 */ 377 thread->regs.cpsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, 378 DAIFBIT_IRQ | DAIFBIT_ABT); 379 /* Reinitialize stack pointer */ 380 thread->regs.sp = thread->stack_va_end; 381 382 /* 383 * Copy arguments into context. This will make the 384 * arguments appear in x0-x7 when thread is started. 385 */ 386 thread->regs.x[0] = args->a0; 387 thread->regs.x[1] = args->a1; 388 thread->regs.x[2] = args->a2; 389 thread->regs.x[3] = args->a3; 390 thread->regs.x[4] = args->a4; 391 thread->regs.x[5] = args->a5; 392 thread->regs.x[6] = args->a6; 393 thread->regs.x[7] = args->a7; 394 395 /* Set up frame pointer as per the Aarch64 AAPCS */ 396 thread->regs.x[29] = 0; 397 } 398 #endif /*ARM64*/ 399 400 void thread_init_boot_thread(void) 401 { 402 struct thread_core_local *l = thread_get_core_local(); 403 size_t n; 404 405 for (n = 0; n < CFG_NUM_THREADS; n++) { 406 TAILQ_INIT(&threads[n].mutexes); 407 TAILQ_INIT(&threads[n].tsd.sess_stack); 408 #ifdef CFG_SMALL_PAGE_USER_TA 409 SLIST_INIT(&threads[n].tsd.pgt_cache); 410 #endif 411 } 412 413 for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) 414 thread_core_local[n].curr_thread = -1; 415 416 l->curr_thread = 0; 417 threads[0].state = THREAD_STATE_ACTIVE; 418 } 419 420 void thread_clr_boot_thread(void) 421 { 422 struct thread_core_local *l = thread_get_core_local(); 423 424 assert(l->curr_thread >= 0 && l->curr_thread < CFG_NUM_THREADS); 425 assert(threads[l->curr_thread].state == THREAD_STATE_ACTIVE); 426 assert(TAILQ_EMPTY(&threads[l->curr_thread].mutexes)); 427 threads[l->curr_thread].state = THREAD_STATE_FREE; 428 l->curr_thread = -1; 429 } 430 431 static void thread_alloc_and_run(struct thread_smc_args *args) 432 { 433 size_t n; 434 struct thread_core_local *l = thread_get_core_local(); 435 bool found_thread = false; 436 437 assert(l->curr_thread == -1); 438 439 lock_global(); 440 441 for (n = 0; n < CFG_NUM_THREADS; n++) { 442 if (threads[n].state == THREAD_STATE_FREE) { 443 threads[n].state = THREAD_STATE_ACTIVE; 444 found_thread = true; 445 break; 446 } 447 } 448 449 unlock_global(); 450 451 if (!found_thread) { 452 args->a0 = OPTEE_SMC_RETURN_ETHREAD_LIMIT; 453 return; 454 } 455 456 l->curr_thread = n; 457 458 threads[n].flags = 0; 459 init_regs(threads + n, args); 460 461 /* Save Hypervisor Client ID */ 462 threads[n].hyp_clnt_id = args->a7; 463 464 thread_lazy_save_ns_vfp(); 465 thread_resume(&threads[n].regs); 466 } 467 468 #ifdef ARM32 469 static void copy_a0_to_a5(struct thread_ctx_regs *regs, 470 struct thread_smc_args *args) 471 { 472 /* 473 * Update returned values from RPC, values will appear in 474 * r0-r3 when thread is resumed. 475 */ 476 regs->r0 = args->a0; 477 regs->r1 = args->a1; 478 regs->r2 = args->a2; 479 regs->r3 = args->a3; 480 regs->r4 = args->a4; 481 regs->r5 = args->a5; 482 } 483 #endif /*ARM32*/ 484 485 #ifdef ARM64 486 static void copy_a0_to_a5(struct thread_ctx_regs *regs, 487 struct thread_smc_args *args) 488 { 489 /* 490 * Update returned values from RPC, values will appear in 491 * x0-x3 when thread is resumed. 492 */ 493 regs->x[0] = args->a0; 494 regs->x[1] = args->a1; 495 regs->x[2] = args->a2; 496 regs->x[3] = args->a3; 497 regs->x[4] = args->a4; 498 regs->x[5] = args->a5; 499 } 500 #endif /*ARM64*/ 501 502 #ifdef ARM32 503 static bool is_from_user(uint32_t cpsr) 504 { 505 return (cpsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR; 506 } 507 #endif 508 509 #ifdef ARM64 510 static bool is_from_user(uint32_t cpsr) 511 { 512 if (cpsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT)) 513 return true; 514 if (((cpsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) == 515 SPSR_64_MODE_EL0) 516 return true; 517 return false; 518 } 519 #endif 520 521 static bool is_user_mode(struct thread_ctx_regs *regs) 522 { 523 return is_from_user((uint32_t)regs->cpsr); 524 } 525 526 static void thread_resume_from_rpc(struct thread_smc_args *args) 527 { 528 size_t n = args->a3; /* thread id */ 529 struct thread_core_local *l = thread_get_core_local(); 530 uint32_t rv = 0; 531 532 assert(l->curr_thread == -1); 533 534 lock_global(); 535 536 if (n < CFG_NUM_THREADS && 537 threads[n].state == THREAD_STATE_SUSPENDED && 538 args->a7 == threads[n].hyp_clnt_id) 539 threads[n].state = THREAD_STATE_ACTIVE; 540 else 541 rv = OPTEE_SMC_RETURN_ERESUME; 542 543 unlock_global(); 544 545 if (rv) { 546 args->a0 = rv; 547 return; 548 } 549 550 l->curr_thread = n; 551 552 if (is_user_mode(&threads[n].regs)) 553 tee_ta_update_session_utime_resume(); 554 555 if (threads[n].have_user_map) 556 core_mmu_set_user_map(&threads[n].user_map); 557 558 /* 559 * Return from RPC to request service of an IRQ must not 560 * get parameters from non-secure world. 561 */ 562 if (threads[n].flags & THREAD_FLAGS_COPY_ARGS_ON_RETURN) { 563 copy_a0_to_a5(&threads[n].regs, args); 564 threads[n].flags &= ~THREAD_FLAGS_COPY_ARGS_ON_RETURN; 565 } 566 567 thread_lazy_save_ns_vfp(); 568 thread_resume(&threads[n].regs); 569 } 570 571 void thread_handle_fast_smc(struct thread_smc_args *args) 572 { 573 thread_check_canaries(); 574 thread_fast_smc_handler_ptr(args); 575 /* Fast handlers must not unmask any exceptions */ 576 assert(thread_get_exceptions() == THREAD_EXCP_ALL); 577 } 578 579 void thread_handle_std_smc(struct thread_smc_args *args) 580 { 581 thread_check_canaries(); 582 583 if (args->a0 == OPTEE_SMC_CALL_RETURN_FROM_RPC) 584 thread_resume_from_rpc(args); 585 else 586 thread_alloc_and_run(args); 587 } 588 589 /* Helper routine for the assembly function thread_std_smc_entry() */ 590 void __thread_std_smc_entry(struct thread_smc_args *args) 591 { 592 struct thread_ctx *thr = threads + thread_get_id(); 593 594 if (!thr->rpc_arg) { 595 paddr_t parg; 596 uint64_t carg; 597 void *arg; 598 599 thread_rpc_alloc_arg( 600 OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS), 601 &parg, &carg); 602 if (!parg || !ALIGNMENT_IS_OK(parg, struct optee_msg_arg) || 603 !(arg = phys_to_virt(parg, MEM_AREA_NSEC_SHM))) { 604 thread_rpc_free_arg(carg); 605 args->a0 = OPTEE_SMC_RETURN_ENOMEM; 606 return; 607 } 608 609 thr->rpc_arg = arg; 610 thr->rpc_carg = carg; 611 } 612 613 thread_std_smc_handler_ptr(args); 614 615 tee_fs_rpc_cache_clear(&thr->tsd); 616 if (!thread_prealloc_rpc_cache) { 617 thread_rpc_free_arg(thr->rpc_carg); 618 thr->rpc_carg = 0; 619 thr->rpc_arg = 0; 620 } 621 } 622 623 void *thread_get_tmp_sp(void) 624 { 625 struct thread_core_local *l = thread_get_core_local(); 626 627 return (void *)l->tmp_stack_va_end; 628 } 629 630 #ifdef ARM64 631 vaddr_t thread_get_saved_thread_sp(void) 632 { 633 struct thread_core_local *l = thread_get_core_local(); 634 int ct = l->curr_thread; 635 636 assert(ct != -1); 637 return threads[ct].kern_sp; 638 } 639 #endif /*ARM64*/ 640 641 bool thread_addr_is_in_stack(vaddr_t va) 642 { 643 struct thread_ctx *thr; 644 int ct = thread_get_id_may_fail(); 645 646 if (ct == -1) 647 return false; 648 649 thr = threads + ct; 650 return va < thr->stack_va_end && 651 va >= (thr->stack_va_end - STACK_THREAD_SIZE); 652 } 653 654 void thread_state_free(void) 655 { 656 struct thread_core_local *l = thread_get_core_local(); 657 int ct = l->curr_thread; 658 659 assert(ct != -1); 660 assert(TAILQ_EMPTY(&threads[ct].mutexes)); 661 662 thread_lazy_restore_ns_vfp(); 663 tee_pager_release_phys( 664 (void *)(threads[ct].stack_va_end - STACK_THREAD_SIZE), 665 STACK_THREAD_SIZE); 666 667 lock_global(); 668 669 assert(threads[ct].state == THREAD_STATE_ACTIVE); 670 threads[ct].state = THREAD_STATE_FREE; 671 threads[ct].flags = 0; 672 l->curr_thread = -1; 673 674 unlock_global(); 675 } 676 677 #ifdef CFG_WITH_PAGER 678 static void release_unused_kernel_stack(struct thread_ctx *thr) 679 { 680 vaddr_t sp = thr->regs.svc_sp; 681 vaddr_t base = thr->stack_va_end - STACK_THREAD_SIZE; 682 size_t len = sp - base; 683 684 tee_pager_release_phys((void *)base, len); 685 } 686 #else 687 static void release_unused_kernel_stack(struct thread_ctx *thr __unused) 688 { 689 } 690 #endif 691 692 int thread_state_suspend(uint32_t flags, uint32_t cpsr, vaddr_t pc) 693 { 694 struct thread_core_local *l = thread_get_core_local(); 695 int ct = l->curr_thread; 696 697 assert(ct != -1); 698 699 thread_check_canaries(); 700 701 release_unused_kernel_stack(threads + ct); 702 703 if (is_from_user(cpsr)) { 704 thread_user_save_vfp(); 705 tee_ta_update_session_utime_suspend(); 706 tee_ta_gprof_sample_pc(pc); 707 } 708 thread_lazy_restore_ns_vfp(); 709 710 lock_global(); 711 712 assert(threads[ct].state == THREAD_STATE_ACTIVE); 713 threads[ct].flags |= flags; 714 threads[ct].regs.cpsr = cpsr; 715 threads[ct].regs.pc = pc; 716 threads[ct].state = THREAD_STATE_SUSPENDED; 717 718 threads[ct].have_user_map = core_mmu_user_mapping_is_active(); 719 if (threads[ct].have_user_map) { 720 core_mmu_get_user_map(&threads[ct].user_map); 721 core_mmu_set_user_map(NULL); 722 } 723 724 l->curr_thread = -1; 725 726 unlock_global(); 727 728 return ct; 729 } 730 731 #ifdef ARM32 732 static void set_tmp_stack(struct thread_core_local *l, vaddr_t sp) 733 { 734 l->tmp_stack_va_end = sp; 735 thread_set_irq_sp(sp); 736 thread_set_fiq_sp(sp); 737 } 738 739 static void set_abt_stack(struct thread_core_local *l __unused, vaddr_t sp) 740 { 741 thread_set_abt_sp(sp); 742 } 743 #endif /*ARM32*/ 744 745 #ifdef ARM64 746 static void set_tmp_stack(struct thread_core_local *l, vaddr_t sp) 747 { 748 /* 749 * We're already using the tmp stack when this function is called 750 * so there's no need to assign it to any stack pointer. However, 751 * we'll need to restore it at different times so store it here. 752 */ 753 l->tmp_stack_va_end = sp; 754 } 755 756 static void set_abt_stack(struct thread_core_local *l, vaddr_t sp) 757 { 758 l->abt_stack_va_end = sp; 759 } 760 #endif /*ARM64*/ 761 762 bool thread_init_stack(uint32_t thread_id, vaddr_t sp) 763 { 764 if (thread_id >= CFG_NUM_THREADS) 765 return false; 766 threads[thread_id].stack_va_end = sp; 767 return true; 768 } 769 770 int thread_get_id_may_fail(void) 771 { 772 /* thread_get_core_local() requires IRQs to be disabled */ 773 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ); 774 struct thread_core_local *l = thread_get_core_local(); 775 int ct = l->curr_thread; 776 777 thread_unmask_exceptions(exceptions); 778 return ct; 779 } 780 781 int thread_get_id(void) 782 { 783 int ct = thread_get_id_may_fail(); 784 785 assert(ct >= 0 && ct < CFG_NUM_THREADS); 786 return ct; 787 } 788 789 static void init_handlers(const struct thread_handlers *handlers) 790 { 791 thread_std_smc_handler_ptr = handlers->std_smc; 792 thread_fast_smc_handler_ptr = handlers->fast_smc; 793 thread_fiq_handler_ptr = handlers->fiq; 794 thread_cpu_on_handler_ptr = handlers->cpu_on; 795 thread_cpu_off_handler_ptr = handlers->cpu_off; 796 thread_cpu_suspend_handler_ptr = handlers->cpu_suspend; 797 thread_cpu_resume_handler_ptr = handlers->cpu_resume; 798 thread_system_off_handler_ptr = handlers->system_off; 799 thread_system_reset_handler_ptr = handlers->system_reset; 800 } 801 802 #ifdef CFG_WITH_PAGER 803 static void init_thread_stacks(void) 804 { 805 size_t n; 806 807 /* 808 * Allocate virtual memory for thread stacks. 809 */ 810 for (n = 0; n < CFG_NUM_THREADS; n++) { 811 tee_mm_entry_t *mm; 812 vaddr_t sp; 813 814 /* Find vmem for thread stack and its protection gap */ 815 mm = tee_mm_alloc(&tee_mm_vcore, 816 SMALL_PAGE_SIZE + STACK_THREAD_SIZE); 817 assert(mm); 818 819 /* Claim eventual physical page */ 820 tee_pager_add_pages(tee_mm_get_smem(mm), tee_mm_get_size(mm), 821 true); 822 823 /* Add the area to the pager */ 824 tee_pager_add_core_area(tee_mm_get_smem(mm) + SMALL_PAGE_SIZE, 825 tee_mm_get_bytes(mm) - SMALL_PAGE_SIZE, 826 TEE_MATTR_PRW | TEE_MATTR_LOCKED, 827 NULL, NULL); 828 829 /* init effective stack */ 830 sp = tee_mm_get_smem(mm) + tee_mm_get_bytes(mm); 831 if (!thread_init_stack(n, sp)) 832 panic("init stack failed"); 833 } 834 } 835 #else 836 static void init_thread_stacks(void) 837 { 838 size_t n; 839 840 /* Assign the thread stacks */ 841 for (n = 0; n < CFG_NUM_THREADS; n++) { 842 if (!thread_init_stack(n, GET_STACK(stack_thread[n]))) 843 panic("thread_init_stack failed"); 844 } 845 } 846 #endif /*CFG_WITH_PAGER*/ 847 848 void thread_init_primary(const struct thread_handlers *handlers) 849 { 850 init_handlers(handlers); 851 852 /* Initialize canaries around the stacks */ 853 init_canaries(); 854 855 init_thread_stacks(); 856 pgt_init(); 857 } 858 859 static void init_sec_mon(size_t pos __maybe_unused) 860 { 861 #if !defined(CFG_WITH_ARM_TRUSTED_FW) 862 /* Initialize secure monitor */ 863 sm_init(GET_STACK(stack_tmp[pos])); 864 #endif 865 } 866 867 void thread_init_per_cpu(void) 868 { 869 size_t pos = get_core_pos(); 870 struct thread_core_local *l = thread_get_core_local(); 871 872 init_sec_mon(pos); 873 874 set_tmp_stack(l, GET_STACK(stack_tmp[pos]) - STACK_TMP_OFFS); 875 set_abt_stack(l, GET_STACK(stack_abt[pos])); 876 877 thread_init_vbar(); 878 } 879 880 struct thread_specific_data *thread_get_tsd(void) 881 { 882 return &threads[thread_get_id()].tsd; 883 } 884 885 struct thread_ctx_regs *thread_get_ctx_regs(void) 886 { 887 struct thread_core_local *l = thread_get_core_local(); 888 889 assert(l->curr_thread != -1); 890 return &threads[l->curr_thread].regs; 891 } 892 893 void thread_set_irq(bool enable) 894 { 895 /* thread_get_core_local() requires IRQs to be disabled */ 896 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ); 897 struct thread_core_local *l; 898 899 l = thread_get_core_local(); 900 901 assert(l->curr_thread != -1); 902 903 if (enable) { 904 threads[l->curr_thread].flags |= THREAD_FLAGS_IRQ_ENABLE; 905 thread_set_exceptions(exceptions & ~THREAD_EXCP_IRQ); 906 } else { 907 /* 908 * No need to disable IRQ here since it's already disabled 909 * above. 910 */ 911 threads[l->curr_thread].flags &= ~THREAD_FLAGS_IRQ_ENABLE; 912 } 913 } 914 915 void thread_restore_irq(void) 916 { 917 /* thread_get_core_local() requires IRQs to be disabled */ 918 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ); 919 struct thread_core_local *l; 920 921 l = thread_get_core_local(); 922 923 assert(l->curr_thread != -1); 924 925 if (threads[l->curr_thread].flags & THREAD_FLAGS_IRQ_ENABLE) 926 thread_set_exceptions(exceptions & ~THREAD_EXCP_IRQ); 927 } 928 929 #ifdef CFG_WITH_VFP 930 uint32_t thread_kernel_enable_vfp(void) 931 { 932 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ); 933 struct thread_ctx *thr = threads + thread_get_id(); 934 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 935 936 assert(!vfp_is_enabled()); 937 938 if (!thr->vfp_state.ns_saved) { 939 vfp_lazy_save_state_final(&thr->vfp_state.ns); 940 thr->vfp_state.ns_saved = true; 941 } else if (thr->vfp_state.sec_lazy_saved && 942 !thr->vfp_state.sec_saved) { 943 /* 944 * This happens when we're handling an abort while the 945 * thread was using the VFP state. 946 */ 947 vfp_lazy_save_state_final(&thr->vfp_state.sec); 948 thr->vfp_state.sec_saved = true; 949 } else if (tuv && tuv->lazy_saved && !tuv->saved) { 950 /* 951 * This can happen either during syscall or abort 952 * processing (while processing a syscall). 953 */ 954 vfp_lazy_save_state_final(&tuv->vfp); 955 tuv->saved = true; 956 } 957 958 vfp_enable(); 959 return exceptions; 960 } 961 962 void thread_kernel_disable_vfp(uint32_t state) 963 { 964 uint32_t exceptions; 965 966 assert(vfp_is_enabled()); 967 968 vfp_disable(); 969 exceptions = thread_get_exceptions(); 970 assert(exceptions & THREAD_EXCP_IRQ); 971 exceptions &= ~THREAD_EXCP_IRQ; 972 exceptions |= state & THREAD_EXCP_IRQ; 973 thread_set_exceptions(exceptions); 974 } 975 976 void thread_kernel_save_vfp(void) 977 { 978 struct thread_ctx *thr = threads + thread_get_id(); 979 980 assert(thread_get_exceptions() & THREAD_EXCP_IRQ); 981 if (vfp_is_enabled()) { 982 vfp_lazy_save_state_init(&thr->vfp_state.sec); 983 thr->vfp_state.sec_lazy_saved = true; 984 } 985 } 986 987 void thread_kernel_restore_vfp(void) 988 { 989 struct thread_ctx *thr = threads + thread_get_id(); 990 991 assert(thread_get_exceptions() & THREAD_EXCP_IRQ); 992 assert(!vfp_is_enabled()); 993 if (thr->vfp_state.sec_lazy_saved) { 994 vfp_lazy_restore_state(&thr->vfp_state.sec, 995 thr->vfp_state.sec_saved); 996 thr->vfp_state.sec_saved = false; 997 thr->vfp_state.sec_lazy_saved = false; 998 } 999 } 1000 1001 void thread_user_enable_vfp(struct thread_user_vfp_state *uvfp) 1002 { 1003 struct thread_ctx *thr = threads + thread_get_id(); 1004 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 1005 1006 assert(thread_get_exceptions() & THREAD_EXCP_IRQ); 1007 assert(!vfp_is_enabled()); 1008 1009 if (!thr->vfp_state.ns_saved) { 1010 vfp_lazy_save_state_final(&thr->vfp_state.ns); 1011 thr->vfp_state.ns_saved = true; 1012 } else if (tuv && uvfp != tuv) { 1013 if (tuv->lazy_saved && !tuv->saved) { 1014 vfp_lazy_save_state_final(&tuv->vfp); 1015 tuv->saved = true; 1016 } 1017 } 1018 1019 if (uvfp->lazy_saved) 1020 vfp_lazy_restore_state(&uvfp->vfp, uvfp->saved); 1021 uvfp->lazy_saved = false; 1022 uvfp->saved = false; 1023 1024 thr->vfp_state.uvfp = uvfp; 1025 vfp_enable(); 1026 } 1027 1028 void thread_user_save_vfp(void) 1029 { 1030 struct thread_ctx *thr = threads + thread_get_id(); 1031 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 1032 1033 assert(thread_get_exceptions() & THREAD_EXCP_IRQ); 1034 if (!vfp_is_enabled()) 1035 return; 1036 1037 assert(tuv && !tuv->lazy_saved && !tuv->saved); 1038 vfp_lazy_save_state_init(&tuv->vfp); 1039 tuv->lazy_saved = true; 1040 } 1041 1042 void thread_user_clear_vfp(struct thread_user_vfp_state *uvfp) 1043 { 1044 struct thread_ctx *thr = threads + thread_get_id(); 1045 1046 if (uvfp == thr->vfp_state.uvfp) 1047 thr->vfp_state.uvfp = NULL; 1048 uvfp->lazy_saved = false; 1049 uvfp->saved = false; 1050 } 1051 #endif /*CFG_WITH_VFP*/ 1052 1053 #ifdef ARM32 1054 static bool get_spsr(bool is_32bit, unsigned long entry_func, uint32_t *spsr) 1055 { 1056 uint32_t s; 1057 1058 if (!is_32bit) 1059 return false; 1060 1061 s = read_spsr(); 1062 s &= ~(CPSR_MODE_MASK | CPSR_T | CPSR_IT_MASK1 | CPSR_IT_MASK2); 1063 s |= CPSR_MODE_USR; 1064 if (entry_func & 1) 1065 s |= CPSR_T; 1066 *spsr = s; 1067 return true; 1068 } 1069 #endif 1070 1071 #ifdef ARM64 1072 static bool get_spsr(bool is_32bit, unsigned long entry_func, uint32_t *spsr) 1073 { 1074 uint32_t s; 1075 1076 if (is_32bit) { 1077 s = read_daif() & (SPSR_32_AIF_MASK << SPSR_32_AIF_SHIFT); 1078 s |= SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT; 1079 s |= (entry_func & SPSR_32_T_MASK) << SPSR_32_T_SHIFT; 1080 } else { 1081 s = read_daif() & (SPSR_64_DAIF_MASK << SPSR_64_DAIF_SHIFT); 1082 } 1083 1084 *spsr = s; 1085 return true; 1086 } 1087 #endif 1088 1089 uint32_t thread_enter_user_mode(unsigned long a0, unsigned long a1, 1090 unsigned long a2, unsigned long a3, unsigned long user_sp, 1091 unsigned long entry_func, bool is_32bit, 1092 uint32_t *exit_status0, uint32_t *exit_status1) 1093 { 1094 uint32_t spsr; 1095 1096 tee_ta_update_session_utime_resume(); 1097 1098 if (!get_spsr(is_32bit, entry_func, &spsr)) { 1099 *exit_status0 = 1; /* panic */ 1100 *exit_status1 = 0xbadbadba; 1101 return 0; 1102 } 1103 return __thread_enter_user_mode(a0, a1, a2, a3, user_sp, entry_func, 1104 spsr, exit_status0, exit_status1); 1105 } 1106 1107 void thread_add_mutex(struct mutex *m) 1108 { 1109 struct thread_core_local *l = thread_get_core_local(); 1110 int ct = l->curr_thread; 1111 1112 assert(ct != -1 && threads[ct].state == THREAD_STATE_ACTIVE); 1113 assert(m->owner_id == -1); 1114 m->owner_id = ct; 1115 TAILQ_INSERT_TAIL(&threads[ct].mutexes, m, link); 1116 } 1117 1118 void thread_rem_mutex(struct mutex *m) 1119 { 1120 struct thread_core_local *l = thread_get_core_local(); 1121 int ct = l->curr_thread; 1122 1123 assert(ct != -1 && threads[ct].state == THREAD_STATE_ACTIVE); 1124 assert(m->owner_id == ct); 1125 m->owner_id = -1; 1126 TAILQ_REMOVE(&threads[ct].mutexes, m, link); 1127 } 1128 1129 bool thread_disable_prealloc_rpc_cache(uint64_t *cookie) 1130 { 1131 bool rv; 1132 size_t n; 1133 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ); 1134 1135 lock_global(); 1136 1137 for (n = 0; n < CFG_NUM_THREADS; n++) { 1138 if (threads[n].state != THREAD_STATE_FREE) { 1139 rv = false; 1140 goto out; 1141 } 1142 } 1143 1144 rv = true; 1145 for (n = 0; n < CFG_NUM_THREADS; n++) { 1146 if (threads[n].rpc_arg) { 1147 *cookie = threads[n].rpc_carg; 1148 threads[n].rpc_carg = 0; 1149 threads[n].rpc_arg = NULL; 1150 goto out; 1151 } 1152 } 1153 1154 *cookie = 0; 1155 thread_prealloc_rpc_cache = false; 1156 out: 1157 unlock_global(); 1158 thread_unmask_exceptions(exceptions); 1159 return rv; 1160 } 1161 1162 bool thread_enable_prealloc_rpc_cache(void) 1163 { 1164 bool rv; 1165 size_t n; 1166 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ); 1167 1168 lock_global(); 1169 1170 for (n = 0; n < CFG_NUM_THREADS; n++) { 1171 if (threads[n].state != THREAD_STATE_FREE) { 1172 rv = false; 1173 goto out; 1174 } 1175 } 1176 1177 rv = true; 1178 thread_prealloc_rpc_cache = true; 1179 out: 1180 unlock_global(); 1181 thread_unmask_exceptions(exceptions); 1182 return rv; 1183 } 1184 1185 static uint32_t rpc_cmd_nolock(uint32_t cmd, size_t num_params, 1186 struct optee_msg_param *params) 1187 { 1188 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { OPTEE_SMC_RETURN_RPC_CMD }; 1189 struct thread_ctx *thr = threads + thread_get_id(); 1190 struct optee_msg_arg *arg = thr->rpc_arg; 1191 uint64_t carg = thr->rpc_carg; 1192 const size_t params_size = sizeof(struct optee_msg_param) * num_params; 1193 size_t n; 1194 1195 assert(arg && carg && num_params <= THREAD_RPC_MAX_NUM_PARAMS); 1196 1197 plat_prng_add_jitter_entropy(); 1198 1199 memset(arg, 0, OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS)); 1200 arg->cmd = cmd; 1201 arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */ 1202 arg->num_params = num_params; 1203 memcpy(OPTEE_MSG_GET_PARAMS(arg), params, params_size); 1204 1205 reg_pair_from_64(carg, rpc_args + 1, rpc_args + 2); 1206 thread_rpc(rpc_args); 1207 for (n = 0; n < num_params; n++) { 1208 switch (params[n].attr & OPTEE_MSG_ATTR_TYPE_MASK) { 1209 case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT: 1210 case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT: 1211 case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT: 1212 case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT: 1213 case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT: 1214 case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT: 1215 memcpy(params + n, OPTEE_MSG_GET_PARAMS(arg) + n, 1216 sizeof(struct optee_msg_param)); 1217 break; 1218 default: 1219 break; 1220 } 1221 } 1222 return arg->ret; 1223 } 1224 1225 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params, 1226 struct optee_msg_param *params) 1227 { 1228 uint32_t ret; 1229 1230 ret = rpc_cmd_nolock(cmd, num_params, params); 1231 1232 return ret; 1233 } 1234 1235 static bool check_alloced_shm(paddr_t pa, size_t len, size_t align) 1236 { 1237 if (pa & (align - 1)) 1238 return false; 1239 return core_pbuf_is(CORE_MEM_NSEC_SHM, pa, len); 1240 } 1241 1242 void thread_rpc_free_arg(uint64_t cookie) 1243 { 1244 if (cookie) { 1245 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { 1246 OPTEE_SMC_RETURN_RPC_FREE 1247 }; 1248 1249 reg_pair_from_64(cookie, rpc_args + 1, rpc_args + 2); 1250 thread_rpc(rpc_args); 1251 } 1252 } 1253 1254 void thread_rpc_alloc_arg(size_t size, paddr_t *arg, uint64_t *cookie) 1255 { 1256 paddr_t pa; 1257 uint64_t co; 1258 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { 1259 OPTEE_SMC_RETURN_RPC_ALLOC, size 1260 }; 1261 1262 thread_rpc(rpc_args); 1263 1264 pa = reg_pair_to_64(rpc_args[1], rpc_args[2]); 1265 co = reg_pair_to_64(rpc_args[4], rpc_args[5]); 1266 if (!check_alloced_shm(pa, size, sizeof(uint64_t))) { 1267 thread_rpc_free_arg(co); 1268 pa = 0; 1269 co = 0; 1270 } 1271 1272 *arg = pa; 1273 *cookie = co; 1274 } 1275 1276 /** 1277 * Free physical memory previously allocated with thread_rpc_alloc() 1278 * 1279 * @cookie: cookie received when allocating the buffer 1280 * @bt: must be the same as supplied when allocating 1281 */ 1282 static void thread_rpc_free(unsigned int bt, uint64_t cookie) 1283 { 1284 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { OPTEE_SMC_RETURN_RPC_CMD }; 1285 struct thread_ctx *thr = threads + thread_get_id(); 1286 struct optee_msg_arg *arg = thr->rpc_arg; 1287 uint64_t carg = thr->rpc_carg; 1288 struct optee_msg_param *params = OPTEE_MSG_GET_PARAMS(arg); 1289 1290 memset(arg, 0, OPTEE_MSG_GET_ARG_SIZE(1)); 1291 arg->cmd = OPTEE_MSG_RPC_CMD_SHM_FREE; 1292 arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */ 1293 arg->num_params = 1; 1294 1295 params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT; 1296 params[0].u.value.a = bt; 1297 params[0].u.value.b = cookie; 1298 params[0].u.value.c = 0; 1299 1300 reg_pair_from_64(carg, rpc_args + 1, rpc_args + 2); 1301 thread_rpc(rpc_args); 1302 } 1303 1304 /** 1305 * Allocates shared memory buffer via RPC 1306 * 1307 * @size: size in bytes of shared memory buffer 1308 * @align: required alignment of buffer 1309 * @bt: buffer type OPTEE_MSG_RPC_SHM_TYPE_* 1310 * @payload: returned physical pointer to buffer, 0 if allocation 1311 * failed. 1312 * @cookie: returned cookie used when freeing the buffer 1313 */ 1314 static void thread_rpc_alloc(size_t size, size_t align, unsigned int bt, 1315 paddr_t *payload, uint64_t *cookie) 1316 { 1317 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { OPTEE_SMC_RETURN_RPC_CMD }; 1318 struct thread_ctx *thr = threads + thread_get_id(); 1319 struct optee_msg_arg *arg = thr->rpc_arg; 1320 uint64_t carg = thr->rpc_carg; 1321 struct optee_msg_param *params = OPTEE_MSG_GET_PARAMS(arg); 1322 1323 memset(arg, 0, OPTEE_MSG_GET_ARG_SIZE(1)); 1324 arg->cmd = OPTEE_MSG_RPC_CMD_SHM_ALLOC; 1325 arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */ 1326 arg->num_params = 1; 1327 1328 params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT; 1329 params[0].u.value.a = bt; 1330 params[0].u.value.b = size; 1331 params[0].u.value.c = align; 1332 1333 reg_pair_from_64(carg, rpc_args + 1, rpc_args + 2); 1334 thread_rpc(rpc_args); 1335 if (arg->ret != TEE_SUCCESS) 1336 goto fail; 1337 1338 if (arg->num_params != 1) 1339 goto fail; 1340 1341 if (params[0].attr != OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT) 1342 goto fail; 1343 1344 if (!check_alloced_shm(params[0].u.tmem.buf_ptr, size, align)) { 1345 thread_rpc_free(bt, params[0].u.tmem.shm_ref); 1346 goto fail; 1347 } 1348 1349 *payload = params[0].u.tmem.buf_ptr; 1350 *cookie = params[0].u.tmem.shm_ref; 1351 return; 1352 fail: 1353 *payload = 0; 1354 *cookie = 0; 1355 } 1356 1357 void thread_rpc_alloc_payload(size_t size, paddr_t *payload, uint64_t *cookie) 1358 { 1359 thread_rpc_alloc(size, 8, OPTEE_MSG_RPC_SHM_TYPE_APPL, payload, cookie); 1360 } 1361 1362 void thread_rpc_free_payload(uint64_t cookie) 1363 { 1364 thread_rpc_free(OPTEE_MSG_RPC_SHM_TYPE_APPL, cookie); 1365 } 1366