1 /* 2 * Copyright (c) 2016, Linaro Limited 3 * Copyright (c) 2014, STMicroelectronics International N.V. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright notice, 10 * this list of conditions and the following disclaimer. 11 * 12 * 2. Redistributions in binary form must reproduce the above copyright notice, 13 * this list of conditions and the following disclaimer in the documentation 14 * and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <platform_config.h> 30 31 #include <arm.h> 32 #include <assert.h> 33 #include <keep.h> 34 #include <kernel/misc.h> 35 #include <kernel/panic.h> 36 #include <kernel/tee_ta_manager.h> 37 #include <kernel/thread_defs.h> 38 #include <kernel/thread.h> 39 #include <kernel/tz_proc_def.h> 40 #include <kernel/tz_proc.h> 41 #include <mm/core_memprot.h> 42 #include <mm/tee_mm.h> 43 #include <mm/tee_mmu_defs.h> 44 #include <mm/tee_mmu.h> 45 #include <mm/tee_pager.h> 46 #include <optee_msg.h> 47 #include <sm/optee_smc.h> 48 #include <sm/sm_defs.h> 49 #include <sm/sm.h> 50 #include <tee/tee_fs_rpc.h> 51 #include <trace.h> 52 #include <util.h> 53 54 #include "thread_private.h" 55 56 #ifdef ARM32 57 #ifdef CFG_CORE_SANITIZE_KADDRESS 58 #define STACK_TMP_SIZE 3072 59 #else 60 #define STACK_TMP_SIZE 1024 61 #endif 62 #define STACK_THREAD_SIZE 8192 63 64 #if TRACE_LEVEL > 0 65 #ifdef CFG_CORE_SANITIZE_KADDRESS 66 #define STACK_ABT_SIZE 3072 67 #else 68 #define STACK_ABT_SIZE 2048 69 #endif 70 #else 71 #define STACK_ABT_SIZE 1024 72 #endif 73 74 #endif /*ARM32*/ 75 76 #ifdef ARM64 77 #define STACK_TMP_SIZE 2048 78 #define STACK_THREAD_SIZE 8192 79 80 #if TRACE_LEVEL > 0 81 #define STACK_ABT_SIZE 3072 82 #else 83 #define STACK_ABT_SIZE 1024 84 #endif 85 #endif /*ARM64*/ 86 87 #define RPC_MAX_NUM_PARAMS 2 88 89 struct thread_ctx threads[CFG_NUM_THREADS]; 90 91 static struct thread_core_local thread_core_local[CFG_TEE_CORE_NB_CORE]; 92 93 #ifdef CFG_WITH_STACK_CANARIES 94 #ifdef ARM32 95 #define STACK_CANARY_SIZE (4 * sizeof(uint32_t)) 96 #endif 97 #ifdef ARM64 98 #define STACK_CANARY_SIZE (8 * sizeof(uint32_t)) 99 #endif 100 #define START_CANARY_VALUE 0xdededede 101 #define END_CANARY_VALUE 0xabababab 102 #define GET_START_CANARY(name, stack_num) name[stack_num][0] 103 #define GET_END_CANARY(name, stack_num) \ 104 name[stack_num][sizeof(name[stack_num]) / sizeof(uint32_t) - 1] 105 #else 106 #define STACK_CANARY_SIZE 0 107 #endif 108 109 #define DECLARE_STACK(name, num_stacks, stack_size, linkage) \ 110 linkage uint32_t name[num_stacks] \ 111 [ROUNDUP(stack_size + STACK_CANARY_SIZE, STACK_ALIGNMENT) / \ 112 sizeof(uint32_t)] \ 113 __attribute__((section(".nozi_stack"), \ 114 aligned(STACK_ALIGNMENT))) 115 116 #define STACK_SIZE(stack) (sizeof(stack) - STACK_CANARY_SIZE / 2) 117 118 #define GET_STACK(stack) \ 119 ((vaddr_t)(stack) + STACK_SIZE(stack)) 120 121 DECLARE_STACK(stack_tmp, CFG_TEE_CORE_NB_CORE, STACK_TMP_SIZE, /* global */); 122 DECLARE_STACK(stack_abt, CFG_TEE_CORE_NB_CORE, STACK_ABT_SIZE, static); 123 #if !defined(CFG_WITH_ARM_TRUSTED_FW) 124 DECLARE_STACK(stack_sm, CFG_TEE_CORE_NB_CORE, SM_STACK_SIZE, static); 125 #endif 126 #ifndef CFG_WITH_PAGER 127 DECLARE_STACK(stack_thread, CFG_NUM_THREADS, STACK_THREAD_SIZE, static); 128 #endif 129 130 const uint32_t stack_tmp_stride = STACK_SIZE(stack_tmp[0]); 131 132 KEEP_PAGER(stack_tmp); 133 KEEP_PAGER(stack_tmp_stride); 134 135 thread_smc_handler_t thread_std_smc_handler_ptr; 136 static thread_smc_handler_t thread_fast_smc_handler_ptr; 137 thread_fiq_handler_t thread_fiq_handler_ptr; 138 thread_pm_handler_t thread_cpu_on_handler_ptr; 139 thread_pm_handler_t thread_cpu_off_handler_ptr; 140 thread_pm_handler_t thread_cpu_suspend_handler_ptr; 141 thread_pm_handler_t thread_cpu_resume_handler_ptr; 142 thread_pm_handler_t thread_system_off_handler_ptr; 143 thread_pm_handler_t thread_system_reset_handler_ptr; 144 145 146 static unsigned int thread_global_lock = SPINLOCK_UNLOCK; 147 static bool thread_prealloc_rpc_cache; 148 149 static void init_canaries(void) 150 { 151 #ifdef CFG_WITH_STACK_CANARIES 152 size_t n; 153 #define INIT_CANARY(name) \ 154 for (n = 0; n < ARRAY_SIZE(name); n++) { \ 155 uint32_t *start_canary = &GET_START_CANARY(name, n); \ 156 uint32_t *end_canary = &GET_END_CANARY(name, n); \ 157 \ 158 *start_canary = START_CANARY_VALUE; \ 159 *end_canary = END_CANARY_VALUE; \ 160 DMSG("#Stack canaries for %s[%zu] with top at %p\n", \ 161 #name, n, (void *)(end_canary - 1)); \ 162 DMSG("watch *%p\n", (void *)end_canary); \ 163 } 164 165 INIT_CANARY(stack_tmp); 166 INIT_CANARY(stack_abt); 167 #if !defined(CFG_WITH_ARM_TRUSTED_FW) 168 INIT_CANARY(stack_sm); 169 #endif 170 #ifndef CFG_WITH_PAGER 171 INIT_CANARY(stack_thread); 172 #endif 173 #endif/*CFG_WITH_STACK_CANARIES*/ 174 } 175 176 #define CANARY_DIED(stack, loc, n) \ 177 do { \ 178 EMSG_RAW("Dead canary at %s of '%s[%zu]'", #loc, #stack, n); \ 179 panic(); \ 180 } while (0) 181 182 void thread_check_canaries(void) 183 { 184 #ifdef CFG_WITH_STACK_CANARIES 185 size_t n; 186 187 for (n = 0; n < ARRAY_SIZE(stack_tmp); n++) { 188 if (GET_START_CANARY(stack_tmp, n) != START_CANARY_VALUE) 189 CANARY_DIED(stack_tmp, start, n); 190 if (GET_END_CANARY(stack_tmp, n) != END_CANARY_VALUE) 191 CANARY_DIED(stack_tmp, end, n); 192 } 193 194 for (n = 0; n < ARRAY_SIZE(stack_abt); n++) { 195 if (GET_START_CANARY(stack_abt, n) != START_CANARY_VALUE) 196 CANARY_DIED(stack_abt, start, n); 197 if (GET_END_CANARY(stack_abt, n) != END_CANARY_VALUE) 198 CANARY_DIED(stack_abt, end, n); 199 200 } 201 #if !defined(CFG_WITH_ARM_TRUSTED_FW) 202 for (n = 0; n < ARRAY_SIZE(stack_sm); n++) { 203 if (GET_START_CANARY(stack_sm, n) != START_CANARY_VALUE) 204 CANARY_DIED(stack_sm, start, n); 205 if (GET_END_CANARY(stack_sm, n) != END_CANARY_VALUE) 206 CANARY_DIED(stack_sm, end, n); 207 } 208 #endif 209 #ifndef CFG_WITH_PAGER 210 for (n = 0; n < ARRAY_SIZE(stack_thread); n++) { 211 if (GET_START_CANARY(stack_thread, n) != START_CANARY_VALUE) 212 CANARY_DIED(stack_thread, start, n); 213 if (GET_END_CANARY(stack_thread, n) != END_CANARY_VALUE) 214 CANARY_DIED(stack_thread, end, n); 215 } 216 #endif 217 #endif/*CFG_WITH_STACK_CANARIES*/ 218 } 219 220 static void lock_global(void) 221 { 222 cpu_spin_lock(&thread_global_lock); 223 } 224 225 static void unlock_global(void) 226 { 227 cpu_spin_unlock(&thread_global_lock); 228 } 229 230 #ifdef ARM32 231 uint32_t thread_get_exceptions(void) 232 { 233 uint32_t cpsr = read_cpsr(); 234 235 return (cpsr >> CPSR_F_SHIFT) & THREAD_EXCP_ALL; 236 } 237 238 void thread_set_exceptions(uint32_t exceptions) 239 { 240 uint32_t cpsr = read_cpsr(); 241 242 cpsr &= ~(THREAD_EXCP_ALL << CPSR_F_SHIFT); 243 cpsr |= ((exceptions & THREAD_EXCP_ALL) << CPSR_F_SHIFT); 244 write_cpsr(cpsr); 245 } 246 #endif /*ARM32*/ 247 248 #ifdef ARM64 249 uint32_t thread_get_exceptions(void) 250 { 251 uint32_t daif = read_daif(); 252 253 return (daif >> DAIF_F_SHIFT) & THREAD_EXCP_ALL; 254 } 255 256 void thread_set_exceptions(uint32_t exceptions) 257 { 258 uint32_t daif = read_daif(); 259 260 daif &= ~(THREAD_EXCP_ALL << DAIF_F_SHIFT); 261 daif |= ((exceptions & THREAD_EXCP_ALL) << DAIF_F_SHIFT); 262 write_daif(daif); 263 } 264 #endif /*ARM64*/ 265 266 uint32_t thread_mask_exceptions(uint32_t exceptions) 267 { 268 uint32_t state = thread_get_exceptions(); 269 270 thread_set_exceptions(state | (exceptions & THREAD_EXCP_ALL)); 271 return state; 272 } 273 274 void thread_unmask_exceptions(uint32_t state) 275 { 276 thread_set_exceptions(state & THREAD_EXCP_ALL); 277 } 278 279 280 struct thread_core_local *thread_get_core_local(void) 281 { 282 uint32_t cpu_id = get_core_pos(); 283 284 /* 285 * IRQs must be disabled before playing with core_local since 286 * we otherwise may be rescheduled to a different core in the 287 * middle of this function. 288 */ 289 assert(thread_get_exceptions() & THREAD_EXCP_IRQ); 290 291 assert(cpu_id < CFG_TEE_CORE_NB_CORE); 292 return &thread_core_local[cpu_id]; 293 } 294 295 static void thread_lazy_save_ns_vfp(void) 296 { 297 #ifdef CFG_WITH_VFP 298 struct thread_ctx *thr = threads + thread_get_id(); 299 300 thr->vfp_state.ns_saved = false; 301 #if defined(ARM64) && defined(CFG_WITH_ARM_TRUSTED_FW) 302 /* 303 * ARM TF saves and restores CPACR_EL1, so we must assume NS world 304 * uses VFP and always preserve the register file when secure world 305 * is about to use it 306 */ 307 thr->vfp_state.ns.force_save = true; 308 #endif 309 vfp_lazy_save_state_init(&thr->vfp_state.ns); 310 #endif /*CFG_WITH_VFP*/ 311 } 312 313 static void thread_lazy_restore_ns_vfp(void) 314 { 315 #ifdef CFG_WITH_VFP 316 struct thread_ctx *thr = threads + thread_get_id(); 317 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 318 319 assert(!thr->vfp_state.sec_lazy_saved && !thr->vfp_state.sec_saved); 320 321 if (tuv && tuv->lazy_saved && !tuv->saved) { 322 vfp_lazy_save_state_final(&tuv->vfp); 323 tuv->saved = true; 324 } 325 326 vfp_lazy_restore_state(&thr->vfp_state.ns, thr->vfp_state.ns_saved); 327 thr->vfp_state.ns_saved = false; 328 #endif /*CFG_WITH_VFP*/ 329 } 330 331 #ifdef ARM32 332 static void init_regs(struct thread_ctx *thread, 333 struct thread_smc_args *args) 334 { 335 thread->regs.pc = (uint32_t)thread_std_smc_entry; 336 337 /* 338 * Stdcalls starts in SVC mode with masked IRQ, masked Asynchronous 339 * abort and unmasked FIQ. 340 */ 341 thread->regs.cpsr = read_cpsr() & ARM32_CPSR_E; 342 thread->regs.cpsr |= CPSR_MODE_SVC | CPSR_I | CPSR_A; 343 /* Enable thumb mode if it's a thumb instruction */ 344 if (thread->regs.pc & 1) 345 thread->regs.cpsr |= CPSR_T; 346 /* Reinitialize stack pointer */ 347 thread->regs.svc_sp = thread->stack_va_end; 348 349 /* 350 * Copy arguments into context. This will make the 351 * arguments appear in r0-r7 when thread is started. 352 */ 353 thread->regs.r0 = args->a0; 354 thread->regs.r1 = args->a1; 355 thread->regs.r2 = args->a2; 356 thread->regs.r3 = args->a3; 357 thread->regs.r4 = args->a4; 358 thread->regs.r5 = args->a5; 359 thread->regs.r6 = args->a6; 360 thread->regs.r7 = args->a7; 361 } 362 #endif /*ARM32*/ 363 364 #ifdef ARM64 365 static void init_regs(struct thread_ctx *thread, 366 struct thread_smc_args *args) 367 { 368 thread->regs.pc = (uint64_t)thread_std_smc_entry; 369 370 /* 371 * Stdcalls starts in SVC mode with masked IRQ, masked Asynchronous 372 * abort and unmasked FIQ. 373 */ 374 thread->regs.cpsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, 375 DAIFBIT_IRQ | DAIFBIT_ABT); 376 /* Reinitialize stack pointer */ 377 thread->regs.sp = thread->stack_va_end; 378 379 /* 380 * Copy arguments into context. This will make the 381 * arguments appear in x0-x7 when thread is started. 382 */ 383 thread->regs.x[0] = args->a0; 384 thread->regs.x[1] = args->a1; 385 thread->regs.x[2] = args->a2; 386 thread->regs.x[3] = args->a3; 387 thread->regs.x[4] = args->a4; 388 thread->regs.x[5] = args->a5; 389 thread->regs.x[6] = args->a6; 390 thread->regs.x[7] = args->a7; 391 392 /* Set up frame pointer as per the Aarch64 AAPCS */ 393 thread->regs.x[29] = 0; 394 } 395 #endif /*ARM64*/ 396 397 void thread_init_boot_thread(void) 398 { 399 struct thread_core_local *l = thread_get_core_local(); 400 size_t n; 401 402 for (n = 0; n < CFG_NUM_THREADS; n++) { 403 TAILQ_INIT(&threads[n].mutexes); 404 TAILQ_INIT(&threads[n].tsd.sess_stack); 405 #ifdef CFG_SMALL_PAGE_USER_TA 406 SLIST_INIT(&threads[n].tsd.pgt_cache); 407 #endif 408 } 409 410 for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) 411 thread_core_local[n].curr_thread = -1; 412 413 l->curr_thread = 0; 414 threads[0].state = THREAD_STATE_ACTIVE; 415 } 416 417 void thread_clr_boot_thread(void) 418 { 419 struct thread_core_local *l = thread_get_core_local(); 420 421 assert(l->curr_thread >= 0 && l->curr_thread < CFG_NUM_THREADS); 422 assert(threads[l->curr_thread].state == THREAD_STATE_ACTIVE); 423 assert(TAILQ_EMPTY(&threads[l->curr_thread].mutexes)); 424 threads[l->curr_thread].state = THREAD_STATE_FREE; 425 l->curr_thread = -1; 426 } 427 428 static void thread_alloc_and_run(struct thread_smc_args *args) 429 { 430 size_t n; 431 struct thread_core_local *l = thread_get_core_local(); 432 bool found_thread = false; 433 434 assert(l->curr_thread == -1); 435 436 lock_global(); 437 438 for (n = 0; n < CFG_NUM_THREADS; n++) { 439 if (threads[n].state == THREAD_STATE_FREE) { 440 threads[n].state = THREAD_STATE_ACTIVE; 441 found_thread = true; 442 break; 443 } 444 } 445 446 unlock_global(); 447 448 if (!found_thread) { 449 args->a0 = OPTEE_SMC_RETURN_ETHREAD_LIMIT; 450 return; 451 } 452 453 l->curr_thread = n; 454 455 threads[n].flags = 0; 456 init_regs(threads + n, args); 457 458 /* Save Hypervisor Client ID */ 459 threads[n].hyp_clnt_id = args->a7; 460 461 thread_lazy_save_ns_vfp(); 462 thread_resume(&threads[n].regs); 463 } 464 465 #ifdef ARM32 466 static void copy_a0_to_a5(struct thread_ctx_regs *regs, 467 struct thread_smc_args *args) 468 { 469 /* 470 * Update returned values from RPC, values will appear in 471 * r0-r3 when thread is resumed. 472 */ 473 regs->r0 = args->a0; 474 regs->r1 = args->a1; 475 regs->r2 = args->a2; 476 regs->r3 = args->a3; 477 regs->r4 = args->a4; 478 regs->r5 = args->a5; 479 } 480 #endif /*ARM32*/ 481 482 #ifdef ARM64 483 static void copy_a0_to_a5(struct thread_ctx_regs *regs, 484 struct thread_smc_args *args) 485 { 486 /* 487 * Update returned values from RPC, values will appear in 488 * x0-x3 when thread is resumed. 489 */ 490 regs->x[0] = args->a0; 491 regs->x[1] = args->a1; 492 regs->x[2] = args->a2; 493 regs->x[3] = args->a3; 494 regs->x[4] = args->a4; 495 regs->x[5] = args->a5; 496 } 497 #endif /*ARM64*/ 498 499 static void thread_resume_from_rpc(struct thread_smc_args *args) 500 { 501 size_t n = args->a3; /* thread id */ 502 struct thread_core_local *l = thread_get_core_local(); 503 uint32_t rv = 0; 504 505 assert(l->curr_thread == -1); 506 507 lock_global(); 508 509 if (n < CFG_NUM_THREADS && 510 threads[n].state == THREAD_STATE_SUSPENDED && 511 args->a7 == threads[n].hyp_clnt_id) 512 threads[n].state = THREAD_STATE_ACTIVE; 513 else 514 rv = OPTEE_SMC_RETURN_ERESUME; 515 516 unlock_global(); 517 518 if (rv) { 519 args->a0 = rv; 520 return; 521 } 522 523 l->curr_thread = n; 524 525 if (threads[n].have_user_map) 526 core_mmu_set_user_map(&threads[n].user_map); 527 528 /* 529 * Return from RPC to request service of an IRQ must not 530 * get parameters from non-secure world. 531 */ 532 if (threads[n].flags & THREAD_FLAGS_COPY_ARGS_ON_RETURN) { 533 copy_a0_to_a5(&threads[n].regs, args); 534 threads[n].flags &= ~THREAD_FLAGS_COPY_ARGS_ON_RETURN; 535 } 536 537 thread_lazy_save_ns_vfp(); 538 thread_resume(&threads[n].regs); 539 } 540 541 void thread_handle_fast_smc(struct thread_smc_args *args) 542 { 543 thread_check_canaries(); 544 thread_fast_smc_handler_ptr(args); 545 /* Fast handlers must not unmask any exceptions */ 546 assert(thread_get_exceptions() == THREAD_EXCP_ALL); 547 } 548 549 void thread_handle_std_smc(struct thread_smc_args *args) 550 { 551 thread_check_canaries(); 552 553 if (args->a0 == OPTEE_SMC_CALL_RETURN_FROM_RPC) 554 thread_resume_from_rpc(args); 555 else 556 thread_alloc_and_run(args); 557 } 558 559 /* Helper routine for the assembly function thread_std_smc_entry() */ 560 void __thread_std_smc_entry(struct thread_smc_args *args) 561 { 562 struct thread_ctx *thr = threads + thread_get_id(); 563 564 if (!thr->rpc_arg) { 565 paddr_t parg; 566 uint64_t carg; 567 void *arg; 568 569 thread_rpc_alloc_arg( 570 OPTEE_MSG_GET_ARG_SIZE(RPC_MAX_NUM_PARAMS), 571 &parg, &carg); 572 if (!parg || !ALIGNMENT_IS_OK(parg, struct optee_msg_arg) || 573 !(arg = phys_to_virt(parg, CORE_MEM_NSEC_SHM))) { 574 thread_rpc_free_arg(carg); 575 args->a0 = OPTEE_SMC_RETURN_ENOMEM; 576 return; 577 } 578 579 thr->rpc_arg = arg; 580 thr->rpc_carg = carg; 581 } 582 583 thread_std_smc_handler_ptr(args); 584 585 tee_fs_rpc_cache_clear(&thr->tsd); 586 if (!thread_prealloc_rpc_cache) { 587 thread_rpc_free_arg(thr->rpc_carg); 588 thr->rpc_carg = 0; 589 thr->rpc_arg = 0; 590 } 591 } 592 593 void *thread_get_tmp_sp(void) 594 { 595 struct thread_core_local *l = thread_get_core_local(); 596 597 return (void *)l->tmp_stack_va_end; 598 } 599 600 #ifdef ARM64 601 vaddr_t thread_get_saved_thread_sp(void) 602 { 603 struct thread_core_local *l = thread_get_core_local(); 604 int ct = l->curr_thread; 605 606 assert(ct != -1); 607 return threads[ct].kern_sp; 608 } 609 #endif /*ARM64*/ 610 611 bool thread_addr_is_in_stack(vaddr_t va) 612 { 613 struct thread_ctx *thr = threads + thread_get_id(); 614 615 return va < thr->stack_va_end && 616 va >= (thr->stack_va_end - STACK_THREAD_SIZE); 617 } 618 619 void thread_state_free(void) 620 { 621 struct thread_core_local *l = thread_get_core_local(); 622 int ct = l->curr_thread; 623 624 assert(ct != -1); 625 assert(TAILQ_EMPTY(&threads[ct].mutexes)); 626 627 thread_lazy_restore_ns_vfp(); 628 tee_pager_release_phys( 629 (void *)(threads[ct].stack_va_end - STACK_THREAD_SIZE), 630 STACK_THREAD_SIZE); 631 632 lock_global(); 633 634 assert(threads[ct].state == THREAD_STATE_ACTIVE); 635 threads[ct].state = THREAD_STATE_FREE; 636 threads[ct].flags = 0; 637 l->curr_thread = -1; 638 639 unlock_global(); 640 } 641 642 #ifdef ARM32 643 static bool is_from_user(uint32_t cpsr) 644 { 645 return (cpsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR; 646 } 647 #endif 648 649 #ifdef ARM64 650 static bool is_from_user(uint32_t cpsr) 651 { 652 if (cpsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT)) 653 return true; 654 if (((cpsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) == 655 SPSR_64_MODE_EL0) 656 return true; 657 return false; 658 } 659 #endif 660 661 #ifdef CFG_WITH_PAGER 662 static void release_unused_kernel_stack(struct thread_ctx *thr) 663 { 664 vaddr_t sp = thr->regs.svc_sp; 665 vaddr_t base = thr->stack_va_end - STACK_THREAD_SIZE; 666 size_t len = sp - base; 667 668 tee_pager_release_phys((void *)base, len); 669 } 670 #else 671 static void release_unused_kernel_stack(struct thread_ctx *thr __unused) 672 { 673 } 674 #endif 675 676 int thread_state_suspend(uint32_t flags, uint32_t cpsr, vaddr_t pc) 677 { 678 struct thread_core_local *l = thread_get_core_local(); 679 int ct = l->curr_thread; 680 681 assert(ct != -1); 682 683 thread_check_canaries(); 684 685 release_unused_kernel_stack(threads + ct); 686 687 if (is_from_user(cpsr)) 688 thread_user_save_vfp(); 689 thread_lazy_restore_ns_vfp(); 690 691 lock_global(); 692 693 assert(threads[ct].state == THREAD_STATE_ACTIVE); 694 threads[ct].flags |= flags; 695 threads[ct].regs.cpsr = cpsr; 696 threads[ct].regs.pc = pc; 697 threads[ct].state = THREAD_STATE_SUSPENDED; 698 699 threads[ct].have_user_map = core_mmu_user_mapping_is_active(); 700 if (threads[ct].have_user_map) { 701 core_mmu_get_user_map(&threads[ct].user_map); 702 core_mmu_set_user_map(NULL); 703 } 704 705 l->curr_thread = -1; 706 707 unlock_global(); 708 709 return ct; 710 } 711 712 #ifdef ARM32 713 static void set_tmp_stack(struct thread_core_local *l, vaddr_t sp) 714 { 715 l->tmp_stack_va_end = sp; 716 thread_set_irq_sp(sp); 717 thread_set_fiq_sp(sp); 718 } 719 720 static void set_abt_stack(struct thread_core_local *l __unused, vaddr_t sp) 721 { 722 thread_set_abt_sp(sp); 723 } 724 #endif /*ARM32*/ 725 726 #ifdef ARM64 727 static void set_tmp_stack(struct thread_core_local *l, vaddr_t sp) 728 { 729 /* 730 * We're already using the tmp stack when this function is called 731 * so there's no need to assign it to any stack pointer. However, 732 * we'll need to restore it at different times so store it here. 733 */ 734 l->tmp_stack_va_end = sp; 735 } 736 737 static void set_abt_stack(struct thread_core_local *l, vaddr_t sp) 738 { 739 l->abt_stack_va_end = sp; 740 } 741 #endif /*ARM64*/ 742 743 bool thread_init_stack(uint32_t thread_id, vaddr_t sp) 744 { 745 if (thread_id >= CFG_NUM_THREADS) 746 return false; 747 threads[thread_id].stack_va_end = sp; 748 return true; 749 } 750 751 int thread_get_id_may_fail(void) 752 { 753 /* thread_get_core_local() requires IRQs to be disabled */ 754 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ); 755 struct thread_core_local *l = thread_get_core_local(); 756 int ct = l->curr_thread; 757 758 thread_unmask_exceptions(exceptions); 759 return ct; 760 } 761 762 int thread_get_id(void) 763 { 764 int ct = thread_get_id_may_fail(); 765 766 assert(ct >= 0 && ct < CFG_NUM_THREADS); 767 return ct; 768 } 769 770 static void init_handlers(const struct thread_handlers *handlers) 771 { 772 thread_std_smc_handler_ptr = handlers->std_smc; 773 thread_fast_smc_handler_ptr = handlers->fast_smc; 774 thread_fiq_handler_ptr = handlers->fiq; 775 thread_cpu_on_handler_ptr = handlers->cpu_on; 776 thread_cpu_off_handler_ptr = handlers->cpu_off; 777 thread_cpu_suspend_handler_ptr = handlers->cpu_suspend; 778 thread_cpu_resume_handler_ptr = handlers->cpu_resume; 779 thread_system_off_handler_ptr = handlers->system_off; 780 thread_system_reset_handler_ptr = handlers->system_reset; 781 } 782 783 #ifdef CFG_WITH_PAGER 784 static void init_thread_stacks(void) 785 { 786 size_t n; 787 788 /* 789 * Allocate virtual memory for thread stacks. 790 */ 791 for (n = 0; n < CFG_NUM_THREADS; n++) { 792 tee_mm_entry_t *mm; 793 vaddr_t sp; 794 795 /* Find vmem for thread stack and its protection gap */ 796 mm = tee_mm_alloc(&tee_mm_vcore, 797 SMALL_PAGE_SIZE + STACK_THREAD_SIZE); 798 assert(mm); 799 800 /* Claim eventual physical page */ 801 tee_pager_add_pages(tee_mm_get_smem(mm), tee_mm_get_size(mm), 802 true); 803 804 /* Add the area to the pager */ 805 tee_pager_add_core_area(tee_mm_get_smem(mm) + SMALL_PAGE_SIZE, 806 tee_mm_get_bytes(mm) - SMALL_PAGE_SIZE, 807 TEE_MATTR_PRW | TEE_MATTR_LOCKED, 808 NULL, NULL); 809 810 /* init effective stack */ 811 sp = tee_mm_get_smem(mm) + tee_mm_get_bytes(mm); 812 if (!thread_init_stack(n, sp)) 813 panic("init stack failed"); 814 } 815 } 816 #else 817 static void init_thread_stacks(void) 818 { 819 size_t n; 820 821 /* Assign the thread stacks */ 822 for (n = 0; n < CFG_NUM_THREADS; n++) { 823 if (!thread_init_stack(n, GET_STACK(stack_thread[n]))) 824 panic("thread_init_stack failed"); 825 } 826 } 827 #endif /*CFG_WITH_PAGER*/ 828 829 void thread_init_primary(const struct thread_handlers *handlers) 830 { 831 init_handlers(handlers); 832 833 /* Initialize canaries around the stacks */ 834 init_canaries(); 835 836 init_thread_stacks(); 837 pgt_init(); 838 } 839 840 static void init_sec_mon(size_t pos __maybe_unused) 841 { 842 #if !defined(CFG_WITH_ARM_TRUSTED_FW) 843 /* Initialize secure monitor */ 844 sm_init(GET_STACK(stack_sm[pos])); 845 sm_set_entry_vector(thread_vector_table); 846 #endif 847 } 848 849 void thread_init_per_cpu(void) 850 { 851 size_t pos = get_core_pos(); 852 struct thread_core_local *l = thread_get_core_local(); 853 854 init_sec_mon(pos); 855 856 set_tmp_stack(l, GET_STACK(stack_tmp[pos])); 857 set_abt_stack(l, GET_STACK(stack_abt[pos])); 858 859 thread_init_vbar(); 860 } 861 862 struct thread_specific_data *thread_get_tsd(void) 863 { 864 return &threads[thread_get_id()].tsd; 865 } 866 867 struct thread_ctx_regs *thread_get_ctx_regs(void) 868 { 869 struct thread_core_local *l = thread_get_core_local(); 870 871 assert(l->curr_thread != -1); 872 return &threads[l->curr_thread].regs; 873 } 874 875 void thread_set_irq(bool enable) 876 { 877 /* thread_get_core_local() requires IRQs to be disabled */ 878 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ); 879 struct thread_core_local *l; 880 881 l = thread_get_core_local(); 882 883 assert(l->curr_thread != -1); 884 885 if (enable) { 886 threads[l->curr_thread].flags |= THREAD_FLAGS_IRQ_ENABLE; 887 thread_set_exceptions(exceptions & ~THREAD_EXCP_IRQ); 888 } else { 889 /* 890 * No need to disable IRQ here since it's already disabled 891 * above. 892 */ 893 threads[l->curr_thread].flags &= ~THREAD_FLAGS_IRQ_ENABLE; 894 } 895 } 896 897 void thread_restore_irq(void) 898 { 899 /* thread_get_core_local() requires IRQs to be disabled */ 900 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ); 901 struct thread_core_local *l; 902 903 l = thread_get_core_local(); 904 905 assert(l->curr_thread != -1); 906 907 if (threads[l->curr_thread].flags & THREAD_FLAGS_IRQ_ENABLE) 908 thread_set_exceptions(exceptions & ~THREAD_EXCP_IRQ); 909 } 910 911 #ifdef CFG_WITH_VFP 912 uint32_t thread_kernel_enable_vfp(void) 913 { 914 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ); 915 struct thread_ctx *thr = threads + thread_get_id(); 916 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 917 918 assert(!vfp_is_enabled()); 919 920 if (!thr->vfp_state.ns_saved) { 921 vfp_lazy_save_state_final(&thr->vfp_state.ns); 922 thr->vfp_state.ns_saved = true; 923 } else if (thr->vfp_state.sec_lazy_saved && 924 !thr->vfp_state.sec_saved) { 925 /* 926 * This happens when we're handling an abort while the 927 * thread was using the VFP state. 928 */ 929 vfp_lazy_save_state_final(&thr->vfp_state.sec); 930 thr->vfp_state.sec_saved = true; 931 } else if (tuv && tuv->lazy_saved && !tuv->saved) { 932 /* 933 * This can happen either during syscall or abort 934 * processing (while processing a syscall). 935 */ 936 vfp_lazy_save_state_final(&tuv->vfp); 937 tuv->saved = true; 938 } 939 940 vfp_enable(); 941 return exceptions; 942 } 943 944 void thread_kernel_disable_vfp(uint32_t state) 945 { 946 uint32_t exceptions; 947 948 assert(vfp_is_enabled()); 949 950 vfp_disable(); 951 exceptions = thread_get_exceptions(); 952 assert(exceptions & THREAD_EXCP_IRQ); 953 exceptions &= ~THREAD_EXCP_IRQ; 954 exceptions |= state & THREAD_EXCP_IRQ; 955 thread_set_exceptions(exceptions); 956 } 957 958 void thread_kernel_save_vfp(void) 959 { 960 struct thread_ctx *thr = threads + thread_get_id(); 961 962 assert(thread_get_exceptions() & THREAD_EXCP_IRQ); 963 if (vfp_is_enabled()) { 964 vfp_lazy_save_state_init(&thr->vfp_state.sec); 965 thr->vfp_state.sec_lazy_saved = true; 966 } 967 } 968 969 void thread_kernel_restore_vfp(void) 970 { 971 struct thread_ctx *thr = threads + thread_get_id(); 972 973 assert(thread_get_exceptions() & THREAD_EXCP_IRQ); 974 assert(!vfp_is_enabled()); 975 if (thr->vfp_state.sec_lazy_saved) { 976 vfp_lazy_restore_state(&thr->vfp_state.sec, 977 thr->vfp_state.sec_saved); 978 thr->vfp_state.sec_saved = false; 979 thr->vfp_state.sec_lazy_saved = false; 980 } 981 } 982 983 void thread_user_enable_vfp(struct thread_user_vfp_state *uvfp) 984 { 985 struct thread_ctx *thr = threads + thread_get_id(); 986 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 987 988 assert(thread_get_exceptions() & THREAD_EXCP_IRQ); 989 assert(!vfp_is_enabled()); 990 991 if (!thr->vfp_state.ns_saved) { 992 vfp_lazy_save_state_final(&thr->vfp_state.ns); 993 thr->vfp_state.ns_saved = true; 994 } else if (tuv && uvfp != tuv) { 995 if (tuv->lazy_saved && !tuv->saved) { 996 vfp_lazy_save_state_final(&tuv->vfp); 997 tuv->saved = true; 998 } 999 } 1000 1001 if (uvfp->lazy_saved) 1002 vfp_lazy_restore_state(&uvfp->vfp, uvfp->saved); 1003 uvfp->lazy_saved = false; 1004 uvfp->saved = false; 1005 1006 thr->vfp_state.uvfp = uvfp; 1007 vfp_enable(); 1008 } 1009 1010 void thread_user_save_vfp(void) 1011 { 1012 struct thread_ctx *thr = threads + thread_get_id(); 1013 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 1014 1015 assert(thread_get_exceptions() & THREAD_EXCP_IRQ); 1016 if (!vfp_is_enabled()) 1017 return; 1018 1019 assert(tuv && !tuv->lazy_saved && !tuv->saved); 1020 vfp_lazy_save_state_init(&tuv->vfp); 1021 tuv->lazy_saved = true; 1022 } 1023 1024 void thread_user_clear_vfp(struct thread_user_vfp_state *uvfp) 1025 { 1026 struct thread_ctx *thr = threads + thread_get_id(); 1027 1028 if (uvfp == thr->vfp_state.uvfp) 1029 thr->vfp_state.uvfp = NULL; 1030 uvfp->lazy_saved = false; 1031 uvfp->saved = false; 1032 } 1033 #endif /*CFG_WITH_VFP*/ 1034 1035 #ifdef ARM32 1036 static bool get_spsr(bool is_32bit, unsigned long entry_func, uint32_t *spsr) 1037 { 1038 uint32_t s; 1039 1040 if (!is_32bit) 1041 return false; 1042 1043 s = read_spsr(); 1044 s &= ~(CPSR_MODE_MASK | CPSR_T | CPSR_IT_MASK1 | CPSR_IT_MASK2); 1045 s |= CPSR_MODE_USR; 1046 if (entry_func & 1) 1047 s |= CPSR_T; 1048 *spsr = s; 1049 return true; 1050 } 1051 #endif 1052 1053 #ifdef ARM64 1054 static bool get_spsr(bool is_32bit, unsigned long entry_func, uint32_t *spsr) 1055 { 1056 uint32_t s; 1057 1058 if (is_32bit) { 1059 s = read_daif() & (SPSR_32_AIF_MASK << SPSR_32_AIF_SHIFT); 1060 s |= SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT; 1061 s |= (entry_func & SPSR_32_T_MASK) << SPSR_32_T_SHIFT; 1062 } else { 1063 s = read_daif() & (SPSR_64_DAIF_MASK << SPSR_64_DAIF_SHIFT); 1064 } 1065 1066 *spsr = s; 1067 return true; 1068 } 1069 #endif 1070 1071 uint32_t thread_enter_user_mode(unsigned long a0, unsigned long a1, 1072 unsigned long a2, unsigned long a3, unsigned long user_sp, 1073 unsigned long entry_func, bool is_32bit, 1074 uint32_t *exit_status0, uint32_t *exit_status1) 1075 { 1076 uint32_t spsr; 1077 1078 if (!get_spsr(is_32bit, entry_func, &spsr)) { 1079 *exit_status0 = 1; /* panic */ 1080 *exit_status1 = 0xbadbadba; 1081 return 0; 1082 } 1083 return __thread_enter_user_mode(a0, a1, a2, a3, user_sp, entry_func, 1084 spsr, exit_status0, exit_status1); 1085 } 1086 1087 void thread_add_mutex(struct mutex *m) 1088 { 1089 struct thread_core_local *l = thread_get_core_local(); 1090 int ct = l->curr_thread; 1091 1092 assert(ct != -1 && threads[ct].state == THREAD_STATE_ACTIVE); 1093 assert(m->owner_id == -1); 1094 m->owner_id = ct; 1095 TAILQ_INSERT_TAIL(&threads[ct].mutexes, m, link); 1096 } 1097 1098 void thread_rem_mutex(struct mutex *m) 1099 { 1100 struct thread_core_local *l = thread_get_core_local(); 1101 int ct = l->curr_thread; 1102 1103 assert(ct != -1 && threads[ct].state == THREAD_STATE_ACTIVE); 1104 assert(m->owner_id == ct); 1105 m->owner_id = -1; 1106 TAILQ_REMOVE(&threads[ct].mutexes, m, link); 1107 } 1108 1109 bool thread_disable_prealloc_rpc_cache(uint64_t *cookie) 1110 { 1111 bool rv; 1112 size_t n; 1113 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ); 1114 1115 lock_global(); 1116 1117 for (n = 0; n < CFG_NUM_THREADS; n++) { 1118 if (threads[n].state != THREAD_STATE_FREE) { 1119 rv = false; 1120 goto out; 1121 } 1122 } 1123 1124 rv = true; 1125 for (n = 0; n < CFG_NUM_THREADS; n++) { 1126 if (threads[n].rpc_arg) { 1127 *cookie = threads[n].rpc_carg; 1128 threads[n].rpc_carg = 0; 1129 threads[n].rpc_arg = NULL; 1130 goto out; 1131 } 1132 } 1133 1134 *cookie = 0; 1135 thread_prealloc_rpc_cache = false; 1136 out: 1137 unlock_global(); 1138 thread_unmask_exceptions(exceptions); 1139 return rv; 1140 } 1141 1142 bool thread_enable_prealloc_rpc_cache(void) 1143 { 1144 bool rv; 1145 size_t n; 1146 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ); 1147 1148 lock_global(); 1149 1150 for (n = 0; n < CFG_NUM_THREADS; n++) { 1151 if (threads[n].state != THREAD_STATE_FREE) { 1152 rv = false; 1153 goto out; 1154 } 1155 } 1156 1157 rv = true; 1158 thread_prealloc_rpc_cache = true; 1159 out: 1160 unlock_global(); 1161 thread_unmask_exceptions(exceptions); 1162 return rv; 1163 } 1164 1165 static uint32_t rpc_cmd_nolock(uint32_t cmd, size_t num_params, 1166 struct optee_msg_param *params) 1167 { 1168 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { OPTEE_SMC_RETURN_RPC_CMD }; 1169 struct thread_ctx *thr = threads + thread_get_id(); 1170 struct optee_msg_arg *arg = thr->rpc_arg; 1171 uint64_t carg = thr->rpc_carg; 1172 const size_t params_size = sizeof(struct optee_msg_param) * num_params; 1173 size_t n; 1174 1175 assert(arg && carg && num_params <= RPC_MAX_NUM_PARAMS); 1176 1177 memset(arg, 0, OPTEE_MSG_GET_ARG_SIZE(RPC_MAX_NUM_PARAMS)); 1178 arg->cmd = cmd; 1179 arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */ 1180 arg->num_params = num_params; 1181 memcpy(OPTEE_MSG_GET_PARAMS(arg), params, params_size); 1182 1183 reg_pair_from_64(carg, rpc_args + 1, rpc_args + 2); 1184 thread_rpc(rpc_args); 1185 for (n = 0; n < num_params; n++) { 1186 switch (params[n].attr & OPTEE_MSG_ATTR_TYPE_MASK) { 1187 case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT: 1188 case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT: 1189 case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT: 1190 case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT: 1191 case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT: 1192 case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT: 1193 memcpy(params + n, OPTEE_MSG_GET_PARAMS(arg) + n, 1194 sizeof(struct optee_msg_param)); 1195 break; 1196 default: 1197 break; 1198 } 1199 } 1200 return arg->ret; 1201 } 1202 1203 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params, 1204 struct optee_msg_param *params) 1205 { 1206 uint32_t ret; 1207 1208 ret = rpc_cmd_nolock(cmd, num_params, params); 1209 1210 return ret; 1211 } 1212 1213 static bool check_alloced_shm(paddr_t pa, size_t len, size_t align) 1214 { 1215 if (pa & (align - 1)) 1216 return false; 1217 return core_pbuf_is(CORE_MEM_NSEC_SHM, pa, len); 1218 } 1219 1220 void thread_rpc_free_arg(uint64_t cookie) 1221 { 1222 if (cookie) { 1223 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { 1224 OPTEE_SMC_RETURN_RPC_FREE 1225 }; 1226 1227 reg_pair_from_64(cookie, rpc_args + 1, rpc_args + 2); 1228 thread_rpc(rpc_args); 1229 } 1230 } 1231 1232 void thread_rpc_alloc_arg(size_t size, paddr_t *arg, uint64_t *cookie) 1233 { 1234 paddr_t pa; 1235 uint64_t co; 1236 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { 1237 OPTEE_SMC_RETURN_RPC_ALLOC, size 1238 }; 1239 1240 thread_rpc(rpc_args); 1241 1242 pa = reg_pair_to_64(rpc_args[1], rpc_args[2]); 1243 co = reg_pair_to_64(rpc_args[4], rpc_args[5]); 1244 if (!check_alloced_shm(pa, size, sizeof(uint64_t))) { 1245 thread_rpc_free_arg(co); 1246 pa = 0; 1247 co = 0; 1248 } 1249 1250 *arg = pa; 1251 *cookie = co; 1252 } 1253 1254 /** 1255 * Free physical memory previously allocated with thread_rpc_alloc() 1256 * 1257 * @cookie: cookie received when allocating the buffer 1258 * @bt: must be the same as supplied when allocating 1259 */ 1260 static void thread_rpc_free(unsigned int bt, uint64_t cookie) 1261 { 1262 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { OPTEE_SMC_RETURN_RPC_CMD }; 1263 struct thread_ctx *thr = threads + thread_get_id(); 1264 struct optee_msg_arg *arg = thr->rpc_arg; 1265 uint64_t carg = thr->rpc_carg; 1266 struct optee_msg_param *params = OPTEE_MSG_GET_PARAMS(arg); 1267 1268 memset(arg, 0, OPTEE_MSG_GET_ARG_SIZE(1)); 1269 arg->cmd = OPTEE_MSG_RPC_CMD_SHM_FREE; 1270 arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */ 1271 arg->num_params = 1; 1272 1273 params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT; 1274 params[0].u.value.a = bt; 1275 params[0].u.value.b = cookie; 1276 params[0].u.value.c = 0; 1277 1278 reg_pair_from_64(carg, rpc_args + 1, rpc_args + 2); 1279 thread_rpc(rpc_args); 1280 } 1281 1282 /** 1283 * Allocates shared memory buffer via RPC 1284 * 1285 * @size: size in bytes of shared memory buffer 1286 * @align: required alignment of buffer 1287 * @bt: buffer type OPTEE_MSG_RPC_SHM_TYPE_* 1288 * @payload: returned physical pointer to buffer, 0 if allocation 1289 * failed. 1290 * @cookie: returned cookie used when freeing the buffer 1291 */ 1292 static void thread_rpc_alloc(size_t size, size_t align, unsigned int bt, 1293 paddr_t *payload, uint64_t *cookie) 1294 { 1295 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { OPTEE_SMC_RETURN_RPC_CMD }; 1296 struct thread_ctx *thr = threads + thread_get_id(); 1297 struct optee_msg_arg *arg = thr->rpc_arg; 1298 uint64_t carg = thr->rpc_carg; 1299 struct optee_msg_param *params = OPTEE_MSG_GET_PARAMS(arg); 1300 1301 memset(arg, 0, OPTEE_MSG_GET_ARG_SIZE(1)); 1302 arg->cmd = OPTEE_MSG_RPC_CMD_SHM_ALLOC; 1303 arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */ 1304 arg->num_params = 1; 1305 1306 params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT; 1307 params[0].u.value.a = bt; 1308 params[0].u.value.b = size; 1309 params[0].u.value.c = align; 1310 1311 reg_pair_from_64(carg, rpc_args + 1, rpc_args + 2); 1312 thread_rpc(rpc_args); 1313 if (arg->ret != TEE_SUCCESS) 1314 goto fail; 1315 1316 if (arg->num_params != 1) 1317 goto fail; 1318 1319 if (params[0].attr != OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT) 1320 goto fail; 1321 1322 if (!check_alloced_shm(params[0].u.tmem.buf_ptr, size, align)) { 1323 thread_rpc_free(bt, params[0].u.tmem.shm_ref); 1324 goto fail; 1325 } 1326 1327 *payload = params[0].u.tmem.buf_ptr; 1328 *cookie = params[0].u.tmem.shm_ref; 1329 return; 1330 fail: 1331 *payload = 0; 1332 *cookie = 0; 1333 } 1334 1335 void thread_rpc_alloc_payload(size_t size, paddr_t *payload, uint64_t *cookie) 1336 { 1337 thread_rpc_alloc(size, 8, OPTEE_MSG_RPC_SHM_TYPE_APPL, payload, cookie); 1338 } 1339 1340 void thread_rpc_free_payload(uint64_t cookie) 1341 { 1342 thread_rpc_free(OPTEE_MSG_RPC_SHM_TYPE_APPL, cookie); 1343 } 1344