1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2016, Linaro Limited 4 * Copyright (c) 2014, STMicroelectronics International N.V. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 12 * 13 * 2. Redistributions in binary form must reproduce the above copyright notice, 14 * this list of conditions and the following disclaimer in the documentation 15 * and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 21 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 27 * POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <platform_config.h> 31 32 #include <arm.h> 33 #include <assert.h> 34 #include <keep.h> 35 #include <kernel/asan.h> 36 #include <kernel/misc.h> 37 #include <kernel/msg_param.h> 38 #include <kernel/panic.h> 39 #include <kernel/spinlock.h> 40 #include <kernel/tee_ta_manager.h> 41 #include <kernel/thread_defs.h> 42 #include <kernel/thread.h> 43 #include <mm/core_memprot.h> 44 #include <mm/mobj.h> 45 #include <mm/tee_mm.h> 46 #include <mm/tee_mmu.h> 47 #include <mm/tee_pager.h> 48 #include <optee_msg.h> 49 #include <sm/optee_smc.h> 50 #include <sm/sm.h> 51 #include <tee/tee_cryp_utl.h> 52 #include <tee/tee_fs_rpc.h> 53 #include <trace.h> 54 #include <util.h> 55 56 #include "thread_private.h" 57 58 #ifdef CFG_WITH_ARM_TRUSTED_FW 59 #define STACK_TMP_OFFS 0 60 #else 61 #define STACK_TMP_OFFS SM_STACK_TMP_RESERVE_SIZE 62 #endif 63 64 65 #ifdef ARM32 66 #ifdef CFG_CORE_SANITIZE_KADDRESS 67 #define STACK_TMP_SIZE (3072 + STACK_TMP_OFFS) 68 #else 69 #define STACK_TMP_SIZE (1536 + STACK_TMP_OFFS) 70 #endif 71 #define STACK_THREAD_SIZE 8192 72 73 #ifdef CFG_CORE_SANITIZE_KADDRESS 74 #define STACK_ABT_SIZE 3072 75 #else 76 #define STACK_ABT_SIZE 2048 77 #endif 78 79 #endif /*ARM32*/ 80 81 #ifdef ARM64 82 #define STACK_TMP_SIZE (2048 + STACK_TMP_OFFS) 83 #define STACK_THREAD_SIZE 8192 84 85 #if TRACE_LEVEL > 0 86 #define STACK_ABT_SIZE 3072 87 #else 88 #define STACK_ABT_SIZE 1024 89 #endif 90 #endif /*ARM64*/ 91 92 struct thread_ctx threads[CFG_NUM_THREADS]; 93 94 struct thread_core_local thread_core_local[CFG_TEE_CORE_NB_CORE]; 95 96 #ifdef CFG_WITH_STACK_CANARIES 97 #ifdef ARM32 98 #define STACK_CANARY_SIZE (4 * sizeof(uint32_t)) 99 #endif 100 #ifdef ARM64 101 #define STACK_CANARY_SIZE (8 * sizeof(uint32_t)) 102 #endif 103 #define START_CANARY_VALUE 0xdededede 104 #define END_CANARY_VALUE 0xabababab 105 #define GET_START_CANARY(name, stack_num) name[stack_num][0] 106 #define GET_END_CANARY(name, stack_num) \ 107 name[stack_num][sizeof(name[stack_num]) / sizeof(uint32_t) - 1] 108 #else 109 #define STACK_CANARY_SIZE 0 110 #endif 111 112 #define DECLARE_STACK(name, num_stacks, stack_size, linkage) \ 113 linkage uint32_t name[num_stacks] \ 114 [ROUNDUP(stack_size + STACK_CANARY_SIZE, STACK_ALIGNMENT) / \ 115 sizeof(uint32_t)] \ 116 __attribute__((section(".nozi_stack"), \ 117 aligned(STACK_ALIGNMENT))) 118 119 #define STACK_SIZE(stack) (sizeof(stack) - STACK_CANARY_SIZE / 2) 120 121 #define GET_STACK(stack) \ 122 ((vaddr_t)(stack) + STACK_SIZE(stack)) 123 124 DECLARE_STACK(stack_tmp, CFG_TEE_CORE_NB_CORE, STACK_TMP_SIZE, static); 125 DECLARE_STACK(stack_abt, CFG_TEE_CORE_NB_CORE, STACK_ABT_SIZE, static); 126 #ifndef CFG_WITH_PAGER 127 DECLARE_STACK(stack_thread, CFG_NUM_THREADS, STACK_THREAD_SIZE, static); 128 #endif 129 130 const void *stack_tmp_export = (uint8_t *)stack_tmp + sizeof(stack_tmp[0]) - 131 (STACK_TMP_OFFS + STACK_CANARY_SIZE / 2); 132 const uint32_t stack_tmp_stride = sizeof(stack_tmp[0]); 133 134 /* 135 * These stack setup info are required by secondary boot cores before they 136 * each locally enable the pager (the mmu). Hence kept in pager sections. 137 */ 138 KEEP_PAGER(stack_tmp_export); 139 KEEP_PAGER(stack_tmp_stride); 140 141 thread_smc_handler_t thread_std_smc_handler_ptr; 142 static thread_smc_handler_t thread_fast_smc_handler_ptr; 143 thread_nintr_handler_t thread_nintr_handler_ptr; 144 thread_pm_handler_t thread_cpu_on_handler_ptr; 145 thread_pm_handler_t thread_cpu_off_handler_ptr; 146 thread_pm_handler_t thread_cpu_suspend_handler_ptr; 147 thread_pm_handler_t thread_cpu_resume_handler_ptr; 148 thread_pm_handler_t thread_system_off_handler_ptr; 149 thread_pm_handler_t thread_system_reset_handler_ptr; 150 151 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0 152 static vaddr_t thread_user_kcode_va; 153 long thread_user_kcode_offset; 154 static size_t thread_user_kcode_size; 155 #endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/ 156 157 static unsigned int thread_global_lock = SPINLOCK_UNLOCK; 158 static bool thread_prealloc_rpc_cache; 159 160 static void init_canaries(void) 161 { 162 #ifdef CFG_WITH_STACK_CANARIES 163 size_t n; 164 #define INIT_CANARY(name) \ 165 for (n = 0; n < ARRAY_SIZE(name); n++) { \ 166 uint32_t *start_canary = &GET_START_CANARY(name, n); \ 167 uint32_t *end_canary = &GET_END_CANARY(name, n); \ 168 \ 169 *start_canary = START_CANARY_VALUE; \ 170 *end_canary = END_CANARY_VALUE; \ 171 DMSG("#Stack canaries for %s[%zu] with top at %p\n", \ 172 #name, n, (void *)(end_canary - 1)); \ 173 DMSG("watch *%p\n", (void *)end_canary); \ 174 } 175 176 INIT_CANARY(stack_tmp); 177 INIT_CANARY(stack_abt); 178 #ifndef CFG_WITH_PAGER 179 INIT_CANARY(stack_thread); 180 #endif 181 #endif/*CFG_WITH_STACK_CANARIES*/ 182 } 183 184 #define CANARY_DIED(stack, loc, n) \ 185 do { \ 186 EMSG_RAW("Dead canary at %s of '%s[%zu]'", #loc, #stack, n); \ 187 panic(); \ 188 } while (0) 189 190 void thread_check_canaries(void) 191 { 192 #ifdef CFG_WITH_STACK_CANARIES 193 size_t n; 194 195 for (n = 0; n < ARRAY_SIZE(stack_tmp); n++) { 196 if (GET_START_CANARY(stack_tmp, n) != START_CANARY_VALUE) 197 CANARY_DIED(stack_tmp, start, n); 198 if (GET_END_CANARY(stack_tmp, n) != END_CANARY_VALUE) 199 CANARY_DIED(stack_tmp, end, n); 200 } 201 202 for (n = 0; n < ARRAY_SIZE(stack_abt); n++) { 203 if (GET_START_CANARY(stack_abt, n) != START_CANARY_VALUE) 204 CANARY_DIED(stack_abt, start, n); 205 if (GET_END_CANARY(stack_abt, n) != END_CANARY_VALUE) 206 CANARY_DIED(stack_abt, end, n); 207 208 } 209 #ifndef CFG_WITH_PAGER 210 for (n = 0; n < ARRAY_SIZE(stack_thread); n++) { 211 if (GET_START_CANARY(stack_thread, n) != START_CANARY_VALUE) 212 CANARY_DIED(stack_thread, start, n); 213 if (GET_END_CANARY(stack_thread, n) != END_CANARY_VALUE) 214 CANARY_DIED(stack_thread, end, n); 215 } 216 #endif 217 #endif/*CFG_WITH_STACK_CANARIES*/ 218 } 219 220 static void lock_global(void) 221 { 222 cpu_spin_lock(&thread_global_lock); 223 } 224 225 static void unlock_global(void) 226 { 227 cpu_spin_unlock(&thread_global_lock); 228 } 229 230 #ifdef ARM32 231 uint32_t thread_get_exceptions(void) 232 { 233 uint32_t cpsr = read_cpsr(); 234 235 return (cpsr >> CPSR_F_SHIFT) & THREAD_EXCP_ALL; 236 } 237 238 void thread_set_exceptions(uint32_t exceptions) 239 { 240 uint32_t cpsr = read_cpsr(); 241 242 /* Foreign interrupts must not be unmasked while holding a spinlock */ 243 if (!(exceptions & THREAD_EXCP_FOREIGN_INTR)) 244 assert_have_no_spinlock(); 245 246 cpsr &= ~(THREAD_EXCP_ALL << CPSR_F_SHIFT); 247 cpsr |= ((exceptions & THREAD_EXCP_ALL) << CPSR_F_SHIFT); 248 write_cpsr(cpsr); 249 } 250 #endif /*ARM32*/ 251 252 #ifdef ARM64 253 uint32_t thread_get_exceptions(void) 254 { 255 uint32_t daif = read_daif(); 256 257 return (daif >> DAIF_F_SHIFT) & THREAD_EXCP_ALL; 258 } 259 260 void thread_set_exceptions(uint32_t exceptions) 261 { 262 uint32_t daif = read_daif(); 263 264 /* Foreign interrupts must not be unmasked while holding a spinlock */ 265 if (!(exceptions & THREAD_EXCP_FOREIGN_INTR)) 266 assert_have_no_spinlock(); 267 268 daif &= ~(THREAD_EXCP_ALL << DAIF_F_SHIFT); 269 daif |= ((exceptions & THREAD_EXCP_ALL) << DAIF_F_SHIFT); 270 write_daif(daif); 271 } 272 #endif /*ARM64*/ 273 274 uint32_t thread_mask_exceptions(uint32_t exceptions) 275 { 276 uint32_t state = thread_get_exceptions(); 277 278 thread_set_exceptions(state | (exceptions & THREAD_EXCP_ALL)); 279 return state; 280 } 281 282 void thread_unmask_exceptions(uint32_t state) 283 { 284 thread_set_exceptions(state & THREAD_EXCP_ALL); 285 } 286 287 288 struct thread_core_local *thread_get_core_local(void) 289 { 290 uint32_t cpu_id = get_core_pos(); 291 292 /* 293 * Foreign interrupts must be disabled before playing with core_local 294 * since we otherwise may be rescheduled to a different core in the 295 * middle of this function. 296 */ 297 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 298 299 assert(cpu_id < CFG_TEE_CORE_NB_CORE); 300 return &thread_core_local[cpu_id]; 301 } 302 303 static void thread_lazy_save_ns_vfp(void) 304 { 305 #ifdef CFG_WITH_VFP 306 struct thread_ctx *thr = threads + thread_get_id(); 307 308 thr->vfp_state.ns_saved = false; 309 #if defined(ARM64) && defined(CFG_WITH_ARM_TRUSTED_FW) 310 /* 311 * ARM TF saves and restores CPACR_EL1, so we must assume NS world 312 * uses VFP and always preserve the register file when secure world 313 * is about to use it 314 */ 315 thr->vfp_state.ns.force_save = true; 316 #endif 317 vfp_lazy_save_state_init(&thr->vfp_state.ns); 318 #endif /*CFG_WITH_VFP*/ 319 } 320 321 static void thread_lazy_restore_ns_vfp(void) 322 { 323 #ifdef CFG_WITH_VFP 324 struct thread_ctx *thr = threads + thread_get_id(); 325 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 326 327 assert(!thr->vfp_state.sec_lazy_saved && !thr->vfp_state.sec_saved); 328 329 if (tuv && tuv->lazy_saved && !tuv->saved) { 330 vfp_lazy_save_state_final(&tuv->vfp); 331 tuv->saved = true; 332 } 333 334 vfp_lazy_restore_state(&thr->vfp_state.ns, thr->vfp_state.ns_saved); 335 thr->vfp_state.ns_saved = false; 336 #endif /*CFG_WITH_VFP*/ 337 } 338 339 #ifdef ARM32 340 static void init_regs(struct thread_ctx *thread, 341 struct thread_smc_args *args) 342 { 343 thread->regs.pc = (uint32_t)thread_std_smc_entry; 344 345 /* 346 * Stdcalls starts in SVC mode with masked foreign interrupts, masked 347 * Asynchronous abort and unmasked native interrupts. 348 */ 349 thread->regs.cpsr = read_cpsr() & ARM32_CPSR_E; 350 thread->regs.cpsr |= CPSR_MODE_SVC | CPSR_A | 351 (THREAD_EXCP_FOREIGN_INTR << ARM32_CPSR_F_SHIFT); 352 /* Enable thumb mode if it's a thumb instruction */ 353 if (thread->regs.pc & 1) 354 thread->regs.cpsr |= CPSR_T; 355 /* Reinitialize stack pointer */ 356 thread->regs.svc_sp = thread->stack_va_end; 357 358 /* 359 * Copy arguments into context. This will make the 360 * arguments appear in r0-r7 when thread is started. 361 */ 362 thread->regs.r0 = args->a0; 363 thread->regs.r1 = args->a1; 364 thread->regs.r2 = args->a2; 365 thread->regs.r3 = args->a3; 366 thread->regs.r4 = args->a4; 367 thread->regs.r5 = args->a5; 368 thread->regs.r6 = args->a6; 369 thread->regs.r7 = args->a7; 370 } 371 #endif /*ARM32*/ 372 373 #ifdef ARM64 374 static void init_regs(struct thread_ctx *thread, 375 struct thread_smc_args *args) 376 { 377 thread->regs.pc = (uint64_t)thread_std_smc_entry; 378 379 /* 380 * Stdcalls starts in SVC mode with masked foreign interrupts, masked 381 * Asynchronous abort and unmasked native interrupts. 382 */ 383 thread->regs.cpsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, 384 THREAD_EXCP_FOREIGN_INTR | DAIFBIT_ABT); 385 /* Reinitialize stack pointer */ 386 thread->regs.sp = thread->stack_va_end; 387 388 /* 389 * Copy arguments into context. This will make the 390 * arguments appear in x0-x7 when thread is started. 391 */ 392 thread->regs.x[0] = args->a0; 393 thread->regs.x[1] = args->a1; 394 thread->regs.x[2] = args->a2; 395 thread->regs.x[3] = args->a3; 396 thread->regs.x[4] = args->a4; 397 thread->regs.x[5] = args->a5; 398 thread->regs.x[6] = args->a6; 399 thread->regs.x[7] = args->a7; 400 401 /* Set up frame pointer as per the Aarch64 AAPCS */ 402 thread->regs.x[29] = 0; 403 } 404 #endif /*ARM64*/ 405 406 void thread_init_boot_thread(void) 407 { 408 struct thread_core_local *l = thread_get_core_local(); 409 size_t n; 410 411 for (n = 0; n < CFG_NUM_THREADS; n++) { 412 TAILQ_INIT(&threads[n].mutexes); 413 TAILQ_INIT(&threads[n].tsd.sess_stack); 414 SLIST_INIT(&threads[n].tsd.pgt_cache); 415 } 416 417 for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) 418 thread_core_local[n].curr_thread = -1; 419 420 l->curr_thread = 0; 421 threads[0].state = THREAD_STATE_ACTIVE; 422 } 423 424 void thread_clr_boot_thread(void) 425 { 426 struct thread_core_local *l = thread_get_core_local(); 427 428 assert(l->curr_thread >= 0 && l->curr_thread < CFG_NUM_THREADS); 429 assert(threads[l->curr_thread].state == THREAD_STATE_ACTIVE); 430 assert(TAILQ_EMPTY(&threads[l->curr_thread].mutexes)); 431 threads[l->curr_thread].state = THREAD_STATE_FREE; 432 l->curr_thread = -1; 433 } 434 435 static void thread_alloc_and_run(struct thread_smc_args *args) 436 { 437 size_t n; 438 struct thread_core_local *l = thread_get_core_local(); 439 bool found_thread = false; 440 441 assert(l->curr_thread == -1); 442 443 lock_global(); 444 445 for (n = 0; n < CFG_NUM_THREADS; n++) { 446 if (threads[n].state == THREAD_STATE_FREE) { 447 threads[n].state = THREAD_STATE_ACTIVE; 448 found_thread = true; 449 break; 450 } 451 } 452 453 unlock_global(); 454 455 if (!found_thread) { 456 args->a0 = OPTEE_SMC_RETURN_ETHREAD_LIMIT; 457 return; 458 } 459 460 l->curr_thread = n; 461 462 threads[n].flags = 0; 463 init_regs(threads + n, args); 464 465 /* Save Hypervisor Client ID */ 466 threads[n].hyp_clnt_id = args->a7; 467 468 thread_lazy_save_ns_vfp(); 469 thread_resume(&threads[n].regs); 470 } 471 472 #ifdef ARM32 473 static void copy_a0_to_a5(struct thread_ctx_regs *regs, 474 struct thread_smc_args *args) 475 { 476 /* 477 * Update returned values from RPC, values will appear in 478 * r0-r3 when thread is resumed. 479 */ 480 regs->r0 = args->a0; 481 regs->r1 = args->a1; 482 regs->r2 = args->a2; 483 regs->r3 = args->a3; 484 regs->r4 = args->a4; 485 regs->r5 = args->a5; 486 } 487 #endif /*ARM32*/ 488 489 #ifdef ARM64 490 static void copy_a0_to_a5(struct thread_ctx_regs *regs, 491 struct thread_smc_args *args) 492 { 493 /* 494 * Update returned values from RPC, values will appear in 495 * x0-x3 when thread is resumed. 496 */ 497 regs->x[0] = args->a0; 498 regs->x[1] = args->a1; 499 regs->x[2] = args->a2; 500 regs->x[3] = args->a3; 501 regs->x[4] = args->a4; 502 regs->x[5] = args->a5; 503 } 504 #endif /*ARM64*/ 505 506 #ifdef ARM32 507 static bool is_from_user(uint32_t cpsr) 508 { 509 return (cpsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR; 510 } 511 #endif 512 513 #ifdef ARM64 514 static bool is_from_user(uint32_t cpsr) 515 { 516 if (cpsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT)) 517 return true; 518 if (((cpsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) == 519 SPSR_64_MODE_EL0) 520 return true; 521 return false; 522 } 523 #endif 524 525 static bool is_user_mode(struct thread_ctx_regs *regs) 526 { 527 return is_from_user((uint32_t)regs->cpsr); 528 } 529 530 static void thread_resume_from_rpc(struct thread_smc_args *args) 531 { 532 size_t n = args->a3; /* thread id */ 533 struct thread_core_local *l = thread_get_core_local(); 534 uint32_t rv = 0; 535 536 assert(l->curr_thread == -1); 537 538 lock_global(); 539 540 if (n < CFG_NUM_THREADS && 541 threads[n].state == THREAD_STATE_SUSPENDED && 542 args->a7 == threads[n].hyp_clnt_id) 543 threads[n].state = THREAD_STATE_ACTIVE; 544 else 545 rv = OPTEE_SMC_RETURN_ERESUME; 546 547 unlock_global(); 548 549 if (rv) { 550 args->a0 = rv; 551 return; 552 } 553 554 l->curr_thread = n; 555 556 if (is_user_mode(&threads[n].regs)) 557 tee_ta_update_session_utime_resume(); 558 559 if (threads[n].have_user_map) 560 core_mmu_set_user_map(&threads[n].user_map); 561 562 /* 563 * Return from RPC to request service of a foreign interrupt must not 564 * get parameters from non-secure world. 565 */ 566 if (threads[n].flags & THREAD_FLAGS_COPY_ARGS_ON_RETURN) { 567 copy_a0_to_a5(&threads[n].regs, args); 568 threads[n].flags &= ~THREAD_FLAGS_COPY_ARGS_ON_RETURN; 569 } 570 571 thread_lazy_save_ns_vfp(); 572 thread_resume(&threads[n].regs); 573 } 574 575 void thread_handle_fast_smc(struct thread_smc_args *args) 576 { 577 thread_check_canaries(); 578 thread_fast_smc_handler_ptr(args); 579 /* Fast handlers must not unmask any exceptions */ 580 assert(thread_get_exceptions() == THREAD_EXCP_ALL); 581 } 582 583 void thread_handle_std_smc(struct thread_smc_args *args) 584 { 585 thread_check_canaries(); 586 587 if (args->a0 == OPTEE_SMC_CALL_RETURN_FROM_RPC) 588 thread_resume_from_rpc(args); 589 else 590 thread_alloc_and_run(args); 591 } 592 593 /* 594 * Helper routine for the assembly function thread_std_smc_entry() 595 * 596 * Note: this function is weak just to make it possible to exclude it from 597 * the unpaged area. 598 */ 599 void __weak __thread_std_smc_entry(struct thread_smc_args *args) 600 { 601 thread_std_smc_handler_ptr(args); 602 603 if (args->a0 == OPTEE_SMC_RETURN_OK) { 604 struct thread_ctx *thr = threads + thread_get_id(); 605 606 tee_fs_rpc_cache_clear(&thr->tsd); 607 if (!thread_prealloc_rpc_cache) { 608 thread_rpc_free_arg(thr->rpc_carg); 609 mobj_free(thr->rpc_mobj); 610 thr->rpc_carg = 0; 611 thr->rpc_arg = 0; 612 thr->rpc_mobj = NULL; 613 } 614 } 615 } 616 617 void *thread_get_tmp_sp(void) 618 { 619 struct thread_core_local *l = thread_get_core_local(); 620 621 return (void *)l->tmp_stack_va_end; 622 } 623 624 #ifdef ARM64 625 vaddr_t thread_get_saved_thread_sp(void) 626 { 627 struct thread_core_local *l = thread_get_core_local(); 628 int ct = l->curr_thread; 629 630 assert(ct != -1); 631 return threads[ct].kern_sp; 632 } 633 #endif /*ARM64*/ 634 635 vaddr_t thread_stack_start(void) 636 { 637 struct thread_ctx *thr; 638 int ct = thread_get_id_may_fail(); 639 640 if (ct == -1) 641 return 0; 642 643 thr = threads + ct; 644 return thr->stack_va_end - STACK_THREAD_SIZE; 645 } 646 647 size_t thread_stack_size(void) 648 { 649 return STACK_THREAD_SIZE; 650 } 651 652 bool thread_is_from_abort_mode(void) 653 { 654 struct thread_core_local *l = thread_get_core_local(); 655 656 return (l->flags >> THREAD_CLF_SAVED_SHIFT) & THREAD_CLF_ABORT; 657 } 658 659 #ifdef ARM32 660 bool thread_is_in_normal_mode(void) 661 { 662 return (read_cpsr() & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_SVC; 663 } 664 #endif 665 666 #ifdef ARM64 667 bool thread_is_in_normal_mode(void) 668 { 669 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 670 struct thread_core_local *l = thread_get_core_local(); 671 bool ret; 672 673 /* If any bit in l->flags is set we're handling some exception. */ 674 ret = !l->flags; 675 thread_unmask_exceptions(exceptions); 676 677 return ret; 678 } 679 #endif 680 681 void thread_state_free(void) 682 { 683 struct thread_core_local *l = thread_get_core_local(); 684 int ct = l->curr_thread; 685 686 assert(ct != -1); 687 assert(TAILQ_EMPTY(&threads[ct].mutexes)); 688 689 thread_lazy_restore_ns_vfp(); 690 tee_pager_release_phys( 691 (void *)(threads[ct].stack_va_end - STACK_THREAD_SIZE), 692 STACK_THREAD_SIZE); 693 694 lock_global(); 695 696 assert(threads[ct].state == THREAD_STATE_ACTIVE); 697 threads[ct].state = THREAD_STATE_FREE; 698 threads[ct].flags = 0; 699 l->curr_thread = -1; 700 701 unlock_global(); 702 } 703 704 #ifdef CFG_WITH_PAGER 705 static void release_unused_kernel_stack(struct thread_ctx *thr, 706 uint32_t cpsr __maybe_unused) 707 { 708 #ifdef ARM64 709 /* 710 * If we're from user mode then thr->regs.sp is the saved user 711 * stack pointer and thr->kern_sp holds the last kernel stack 712 * pointer. But if we're from kernel mode then thr->kern_sp isn't 713 * up to date so we need to read from thr->regs.sp instead. 714 */ 715 vaddr_t sp = is_from_user(cpsr) ? thr->kern_sp : thr->regs.sp; 716 #else 717 vaddr_t sp = thr->regs.svc_sp; 718 #endif 719 vaddr_t base = thr->stack_va_end - STACK_THREAD_SIZE; 720 size_t len = sp - base; 721 722 tee_pager_release_phys((void *)base, len); 723 } 724 #else 725 static void release_unused_kernel_stack(struct thread_ctx *thr __unused, 726 uint32_t cpsr __unused) 727 { 728 } 729 #endif 730 731 int thread_state_suspend(uint32_t flags, uint32_t cpsr, vaddr_t pc) 732 { 733 struct thread_core_local *l = thread_get_core_local(); 734 int ct = l->curr_thread; 735 736 assert(ct != -1); 737 738 thread_check_canaries(); 739 740 release_unused_kernel_stack(threads + ct, cpsr); 741 742 if (is_from_user(cpsr)) { 743 thread_user_save_vfp(); 744 tee_ta_update_session_utime_suspend(); 745 tee_ta_gprof_sample_pc(pc); 746 } 747 thread_lazy_restore_ns_vfp(); 748 749 lock_global(); 750 751 assert(threads[ct].state == THREAD_STATE_ACTIVE); 752 threads[ct].flags |= flags; 753 threads[ct].regs.cpsr = cpsr; 754 threads[ct].regs.pc = pc; 755 threads[ct].state = THREAD_STATE_SUSPENDED; 756 757 threads[ct].have_user_map = core_mmu_user_mapping_is_active(); 758 if (threads[ct].have_user_map) { 759 core_mmu_get_user_map(&threads[ct].user_map); 760 core_mmu_set_user_map(NULL); 761 } 762 763 l->curr_thread = -1; 764 765 unlock_global(); 766 767 return ct; 768 } 769 770 #ifdef ARM32 771 static void set_tmp_stack(struct thread_core_local *l, vaddr_t sp) 772 { 773 l->tmp_stack_va_end = sp; 774 thread_set_irq_sp(sp); 775 thread_set_fiq_sp(sp); 776 } 777 778 static void set_abt_stack(struct thread_core_local *l, vaddr_t sp) 779 { 780 l->abt_stack_va_end = sp; 781 thread_set_abt_sp((vaddr_t)l); 782 thread_set_und_sp((vaddr_t)l); 783 } 784 #endif /*ARM32*/ 785 786 #ifdef ARM64 787 static void set_tmp_stack(struct thread_core_local *l, vaddr_t sp) 788 { 789 /* 790 * We're already using the tmp stack when this function is called 791 * so there's no need to assign it to any stack pointer. However, 792 * we'll need to restore it at different times so store it here. 793 */ 794 l->tmp_stack_va_end = sp; 795 } 796 797 static void set_abt_stack(struct thread_core_local *l, vaddr_t sp) 798 { 799 l->abt_stack_va_end = sp; 800 } 801 #endif /*ARM64*/ 802 803 bool thread_init_stack(uint32_t thread_id, vaddr_t sp) 804 { 805 if (thread_id >= CFG_NUM_THREADS) 806 return false; 807 threads[thread_id].stack_va_end = sp; 808 return true; 809 } 810 811 int thread_get_id_may_fail(void) 812 { 813 /* 814 * thread_get_core_local() requires foreign interrupts to be disabled 815 */ 816 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 817 struct thread_core_local *l = thread_get_core_local(); 818 int ct = l->curr_thread; 819 820 thread_unmask_exceptions(exceptions); 821 return ct; 822 } 823 824 int thread_get_id(void) 825 { 826 int ct = thread_get_id_may_fail(); 827 828 assert(ct >= 0 && ct < CFG_NUM_THREADS); 829 return ct; 830 } 831 832 static void init_handlers(const struct thread_handlers *handlers) 833 { 834 thread_std_smc_handler_ptr = handlers->std_smc; 835 thread_fast_smc_handler_ptr = handlers->fast_smc; 836 thread_nintr_handler_ptr = handlers->nintr; 837 thread_cpu_on_handler_ptr = handlers->cpu_on; 838 thread_cpu_off_handler_ptr = handlers->cpu_off; 839 thread_cpu_suspend_handler_ptr = handlers->cpu_suspend; 840 thread_cpu_resume_handler_ptr = handlers->cpu_resume; 841 thread_system_off_handler_ptr = handlers->system_off; 842 thread_system_reset_handler_ptr = handlers->system_reset; 843 } 844 845 #ifdef CFG_WITH_PAGER 846 static void init_thread_stacks(void) 847 { 848 size_t n; 849 850 /* 851 * Allocate virtual memory for thread stacks. 852 */ 853 for (n = 0; n < CFG_NUM_THREADS; n++) { 854 tee_mm_entry_t *mm; 855 vaddr_t sp; 856 857 /* Find vmem for thread stack and its protection gap */ 858 mm = tee_mm_alloc(&tee_mm_vcore, 859 SMALL_PAGE_SIZE + STACK_THREAD_SIZE); 860 assert(mm); 861 862 /* Claim eventual physical page */ 863 tee_pager_add_pages(tee_mm_get_smem(mm), tee_mm_get_size(mm), 864 true); 865 866 /* Add the area to the pager */ 867 tee_pager_add_core_area(tee_mm_get_smem(mm) + SMALL_PAGE_SIZE, 868 tee_mm_get_bytes(mm) - SMALL_PAGE_SIZE, 869 TEE_MATTR_PRW | TEE_MATTR_LOCKED, 870 NULL, NULL); 871 872 /* init effective stack */ 873 sp = tee_mm_get_smem(mm) + tee_mm_get_bytes(mm); 874 asan_tag_access((void *)tee_mm_get_smem(mm), (void *)sp); 875 if (!thread_init_stack(n, sp)) 876 panic("init stack failed"); 877 } 878 } 879 #else 880 static void init_thread_stacks(void) 881 { 882 size_t n; 883 884 /* Assign the thread stacks */ 885 for (n = 0; n < CFG_NUM_THREADS; n++) { 886 if (!thread_init_stack(n, GET_STACK(stack_thread[n]))) 887 panic("thread_init_stack failed"); 888 } 889 } 890 #endif /*CFG_WITH_PAGER*/ 891 892 static void init_user_kcode(void) 893 { 894 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0 895 vaddr_t v; 896 897 v = (vaddr_t)thread_vect_table; 898 thread_user_kcode_va = ROUNDDOWN(v, CORE_MMU_USER_CODE_SIZE); 899 /* 900 * The maximum size of the exception vector and associated code is 901 * something slightly larger than 2 KiB. Worst case the exception 902 * vector can span two pages. 903 */ 904 thread_user_kcode_size = CORE_MMU_USER_CODE_SIZE * 2; 905 906 core_mmu_get_user_va_range(&v, NULL); 907 thread_user_kcode_offset = thread_user_kcode_va - v; 908 #endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/ 909 } 910 911 void thread_init_primary(const struct thread_handlers *handlers) 912 { 913 init_handlers(handlers); 914 915 /* Initialize canaries around the stacks */ 916 init_canaries(); 917 918 init_thread_stacks(); 919 pgt_init(); 920 921 init_user_kcode(); 922 } 923 924 static void init_sec_mon(size_t pos __maybe_unused) 925 { 926 #if !defined(CFG_WITH_ARM_TRUSTED_FW) 927 /* Initialize secure monitor */ 928 sm_init(GET_STACK(stack_tmp[pos])); 929 #endif 930 } 931 932 void thread_init_per_cpu(void) 933 { 934 size_t pos = get_core_pos(); 935 struct thread_core_local *l = thread_get_core_local(); 936 937 init_sec_mon(pos); 938 939 set_tmp_stack(l, GET_STACK(stack_tmp[pos]) - STACK_TMP_OFFS); 940 set_abt_stack(l, GET_STACK(stack_abt[pos])); 941 942 thread_init_vbar(); 943 } 944 945 struct thread_specific_data *thread_get_tsd(void) 946 { 947 return &threads[thread_get_id()].tsd; 948 } 949 950 struct thread_ctx_regs *thread_get_ctx_regs(void) 951 { 952 struct thread_core_local *l = thread_get_core_local(); 953 954 assert(l->curr_thread != -1); 955 return &threads[l->curr_thread].regs; 956 } 957 958 void thread_set_foreign_intr(bool enable) 959 { 960 /* thread_get_core_local() requires foreign interrupts to be disabled */ 961 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 962 struct thread_core_local *l; 963 964 l = thread_get_core_local(); 965 966 assert(l->curr_thread != -1); 967 968 if (enable) { 969 threads[l->curr_thread].flags |= 970 THREAD_FLAGS_FOREIGN_INTR_ENABLE; 971 thread_set_exceptions(exceptions & ~THREAD_EXCP_FOREIGN_INTR); 972 } else { 973 /* 974 * No need to disable foreign interrupts here since they're 975 * already disabled above. 976 */ 977 threads[l->curr_thread].flags &= 978 ~THREAD_FLAGS_FOREIGN_INTR_ENABLE; 979 } 980 } 981 982 void thread_restore_foreign_intr(void) 983 { 984 /* thread_get_core_local() requires foreign interrupts to be disabled */ 985 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 986 struct thread_core_local *l; 987 988 l = thread_get_core_local(); 989 990 assert(l->curr_thread != -1); 991 992 if (threads[l->curr_thread].flags & THREAD_FLAGS_FOREIGN_INTR_ENABLE) 993 thread_set_exceptions(exceptions & ~THREAD_EXCP_FOREIGN_INTR); 994 } 995 996 #ifdef CFG_WITH_VFP 997 uint32_t thread_kernel_enable_vfp(void) 998 { 999 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 1000 struct thread_ctx *thr = threads + thread_get_id(); 1001 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 1002 1003 assert(!vfp_is_enabled()); 1004 1005 if (!thr->vfp_state.ns_saved) { 1006 vfp_lazy_save_state_final(&thr->vfp_state.ns); 1007 thr->vfp_state.ns_saved = true; 1008 } else if (thr->vfp_state.sec_lazy_saved && 1009 !thr->vfp_state.sec_saved) { 1010 /* 1011 * This happens when we're handling an abort while the 1012 * thread was using the VFP state. 1013 */ 1014 vfp_lazy_save_state_final(&thr->vfp_state.sec); 1015 thr->vfp_state.sec_saved = true; 1016 } else if (tuv && tuv->lazy_saved && !tuv->saved) { 1017 /* 1018 * This can happen either during syscall or abort 1019 * processing (while processing a syscall). 1020 */ 1021 vfp_lazy_save_state_final(&tuv->vfp); 1022 tuv->saved = true; 1023 } 1024 1025 vfp_enable(); 1026 return exceptions; 1027 } 1028 1029 void thread_kernel_disable_vfp(uint32_t state) 1030 { 1031 uint32_t exceptions; 1032 1033 assert(vfp_is_enabled()); 1034 1035 vfp_disable(); 1036 exceptions = thread_get_exceptions(); 1037 assert(exceptions & THREAD_EXCP_FOREIGN_INTR); 1038 exceptions &= ~THREAD_EXCP_FOREIGN_INTR; 1039 exceptions |= state & THREAD_EXCP_FOREIGN_INTR; 1040 thread_set_exceptions(exceptions); 1041 } 1042 1043 void thread_kernel_save_vfp(void) 1044 { 1045 struct thread_ctx *thr = threads + thread_get_id(); 1046 1047 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 1048 if (vfp_is_enabled()) { 1049 vfp_lazy_save_state_init(&thr->vfp_state.sec); 1050 thr->vfp_state.sec_lazy_saved = true; 1051 } 1052 } 1053 1054 void thread_kernel_restore_vfp(void) 1055 { 1056 struct thread_ctx *thr = threads + thread_get_id(); 1057 1058 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 1059 assert(!vfp_is_enabled()); 1060 if (thr->vfp_state.sec_lazy_saved) { 1061 vfp_lazy_restore_state(&thr->vfp_state.sec, 1062 thr->vfp_state.sec_saved); 1063 thr->vfp_state.sec_saved = false; 1064 thr->vfp_state.sec_lazy_saved = false; 1065 } 1066 } 1067 1068 void thread_user_enable_vfp(struct thread_user_vfp_state *uvfp) 1069 { 1070 struct thread_ctx *thr = threads + thread_get_id(); 1071 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 1072 1073 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 1074 assert(!vfp_is_enabled()); 1075 1076 if (!thr->vfp_state.ns_saved) { 1077 vfp_lazy_save_state_final(&thr->vfp_state.ns); 1078 thr->vfp_state.ns_saved = true; 1079 } else if (tuv && uvfp != tuv) { 1080 if (tuv->lazy_saved && !tuv->saved) { 1081 vfp_lazy_save_state_final(&tuv->vfp); 1082 tuv->saved = true; 1083 } 1084 } 1085 1086 if (uvfp->lazy_saved) 1087 vfp_lazy_restore_state(&uvfp->vfp, uvfp->saved); 1088 uvfp->lazy_saved = false; 1089 uvfp->saved = false; 1090 1091 thr->vfp_state.uvfp = uvfp; 1092 vfp_enable(); 1093 } 1094 1095 void thread_user_save_vfp(void) 1096 { 1097 struct thread_ctx *thr = threads + thread_get_id(); 1098 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 1099 1100 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 1101 if (!vfp_is_enabled()) 1102 return; 1103 1104 assert(tuv && !tuv->lazy_saved && !tuv->saved); 1105 vfp_lazy_save_state_init(&tuv->vfp); 1106 tuv->lazy_saved = true; 1107 } 1108 1109 void thread_user_clear_vfp(struct thread_user_vfp_state *uvfp) 1110 { 1111 struct thread_ctx *thr = threads + thread_get_id(); 1112 1113 if (uvfp == thr->vfp_state.uvfp) 1114 thr->vfp_state.uvfp = NULL; 1115 uvfp->lazy_saved = false; 1116 uvfp->saved = false; 1117 } 1118 #endif /*CFG_WITH_VFP*/ 1119 1120 #ifdef ARM32 1121 static bool get_spsr(bool is_32bit, unsigned long entry_func, uint32_t *spsr) 1122 { 1123 uint32_t s; 1124 1125 if (!is_32bit) 1126 return false; 1127 1128 s = read_spsr(); 1129 s &= ~(CPSR_MODE_MASK | CPSR_T | CPSR_IT_MASK1 | CPSR_IT_MASK2); 1130 s |= CPSR_MODE_USR; 1131 if (entry_func & 1) 1132 s |= CPSR_T; 1133 *spsr = s; 1134 return true; 1135 } 1136 #endif 1137 1138 #ifdef ARM64 1139 static bool get_spsr(bool is_32bit, unsigned long entry_func, uint32_t *spsr) 1140 { 1141 uint32_t s; 1142 1143 if (is_32bit) { 1144 s = read_daif() & (SPSR_32_AIF_MASK << SPSR_32_AIF_SHIFT); 1145 s |= SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT; 1146 s |= (entry_func & SPSR_32_T_MASK) << SPSR_32_T_SHIFT; 1147 } else { 1148 s = read_daif() & (SPSR_64_DAIF_MASK << SPSR_64_DAIF_SHIFT); 1149 } 1150 1151 *spsr = s; 1152 return true; 1153 } 1154 #endif 1155 1156 uint32_t thread_enter_user_mode(unsigned long a0, unsigned long a1, 1157 unsigned long a2, unsigned long a3, unsigned long user_sp, 1158 unsigned long entry_func, bool is_32bit, 1159 uint32_t *exit_status0, uint32_t *exit_status1) 1160 { 1161 uint32_t spsr; 1162 1163 tee_ta_update_session_utime_resume(); 1164 1165 if (!get_spsr(is_32bit, entry_func, &spsr)) { 1166 *exit_status0 = 1; /* panic */ 1167 *exit_status1 = 0xbadbadba; 1168 return 0; 1169 } 1170 return __thread_enter_user_mode(a0, a1, a2, a3, user_sp, entry_func, 1171 spsr, exit_status0, exit_status1); 1172 } 1173 1174 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0 1175 void thread_get_user_kcode(struct mobj **mobj, size_t *offset, 1176 vaddr_t *va, size_t *sz) 1177 { 1178 core_mmu_get_user_va_range(va, NULL); 1179 *mobj = mobj_tee_ram; 1180 *offset = thread_user_kcode_va - CFG_TEE_RAM_START; 1181 *sz = thread_user_kcode_size; 1182 } 1183 #endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/ 1184 1185 void thread_add_mutex(struct mutex *m) 1186 { 1187 struct thread_core_local *l = thread_get_core_local(); 1188 int ct = l->curr_thread; 1189 1190 assert(ct != -1 && threads[ct].state == THREAD_STATE_ACTIVE); 1191 assert(m->owner_id == MUTEX_OWNER_ID_NONE); 1192 m->owner_id = ct; 1193 TAILQ_INSERT_TAIL(&threads[ct].mutexes, m, link); 1194 } 1195 1196 void thread_rem_mutex(struct mutex *m) 1197 { 1198 struct thread_core_local *l = thread_get_core_local(); 1199 int ct = l->curr_thread; 1200 1201 assert(ct != -1 && threads[ct].state == THREAD_STATE_ACTIVE); 1202 assert(m->owner_id == ct); 1203 m->owner_id = MUTEX_OWNER_ID_NONE; 1204 TAILQ_REMOVE(&threads[ct].mutexes, m, link); 1205 } 1206 1207 bool thread_disable_prealloc_rpc_cache(uint64_t *cookie) 1208 { 1209 bool rv; 1210 size_t n; 1211 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 1212 1213 lock_global(); 1214 1215 for (n = 0; n < CFG_NUM_THREADS; n++) { 1216 if (threads[n].state != THREAD_STATE_FREE) { 1217 rv = false; 1218 goto out; 1219 } 1220 } 1221 1222 rv = true; 1223 for (n = 0; n < CFG_NUM_THREADS; n++) { 1224 if (threads[n].rpc_arg) { 1225 mobj_free(threads[n].rpc_mobj); 1226 *cookie = threads[n].rpc_carg; 1227 threads[n].rpc_carg = 0; 1228 threads[n].rpc_arg = NULL; 1229 goto out; 1230 } 1231 } 1232 1233 *cookie = 0; 1234 thread_prealloc_rpc_cache = false; 1235 out: 1236 unlock_global(); 1237 thread_unmask_exceptions(exceptions); 1238 return rv; 1239 } 1240 1241 bool thread_enable_prealloc_rpc_cache(void) 1242 { 1243 bool rv; 1244 size_t n; 1245 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 1246 1247 lock_global(); 1248 1249 for (n = 0; n < CFG_NUM_THREADS; n++) { 1250 if (threads[n].state != THREAD_STATE_FREE) { 1251 rv = false; 1252 goto out; 1253 } 1254 } 1255 1256 rv = true; 1257 thread_prealloc_rpc_cache = true; 1258 out: 1259 unlock_global(); 1260 thread_unmask_exceptions(exceptions); 1261 return rv; 1262 } 1263 1264 void thread_rpc_free_arg(uint64_t cookie) 1265 { 1266 if (cookie) { 1267 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { 1268 OPTEE_SMC_RETURN_RPC_FREE 1269 }; 1270 1271 reg_pair_from_64(cookie, rpc_args + 1, rpc_args + 2); 1272 thread_rpc(rpc_args); 1273 } 1274 } 1275 1276 struct mobj *thread_rpc_alloc_arg(size_t size, uint64_t *cookie) 1277 { 1278 paddr_t pa; 1279 uint64_t co; 1280 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { 1281 OPTEE_SMC_RETURN_RPC_ALLOC, size 1282 }; 1283 struct mobj *mobj = NULL; 1284 1285 thread_rpc(rpc_args); 1286 1287 pa = reg_pair_to_64(rpc_args[1], rpc_args[2]); 1288 co = reg_pair_to_64(rpc_args[4], rpc_args[5]); 1289 1290 if (!ALIGNMENT_IS_OK(pa, struct optee_msg_arg)) 1291 goto err; 1292 1293 /* Check if this region is in static shared space */ 1294 if (core_pbuf_is(CORE_MEM_NSEC_SHM, pa, size)) 1295 mobj = mobj_shm_alloc(pa, size); 1296 else if ((!(pa & SMALL_PAGE_MASK)) && size <= SMALL_PAGE_SIZE) 1297 mobj = mobj_mapped_shm_alloc(&pa, 1, 0, co); 1298 1299 if (!mobj) 1300 goto err; 1301 1302 *cookie = co; 1303 return mobj; 1304 err: 1305 thread_rpc_free_arg(co); 1306 mobj_free(mobj); 1307 *cookie = 0; 1308 return NULL; 1309 } 1310 1311 static bool get_rpc_arg(uint32_t cmd, size_t num_params, 1312 struct optee_msg_arg **arg_ret, uint64_t *carg_ret) 1313 { 1314 struct thread_ctx *thr = threads + thread_get_id(); 1315 struct optee_msg_arg *arg = thr->rpc_arg; 1316 struct mobj *mobj; 1317 size_t sz = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS); 1318 uint64_t c; 1319 1320 if (num_params > THREAD_RPC_MAX_NUM_PARAMS) 1321 return false; 1322 1323 if (!arg) { 1324 mobj = thread_rpc_alloc_arg(sz, &c); 1325 if (!mobj) 1326 return false; 1327 1328 arg = mobj_get_va(mobj, 0); 1329 if (!arg) 1330 goto bad; 1331 1332 thr->rpc_arg = arg; 1333 thr->rpc_carg = c; 1334 thr->rpc_mobj = mobj; 1335 } 1336 1337 memset(arg, 0, OPTEE_MSG_GET_ARG_SIZE(num_params)); 1338 arg->cmd = cmd; 1339 arg->num_params = num_params; 1340 arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */ 1341 1342 *arg_ret = arg; 1343 *carg_ret = thr->rpc_carg; 1344 return true; 1345 1346 bad: 1347 thread_rpc_free_arg(c); 1348 return false; 1349 } 1350 1351 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params, 1352 struct optee_msg_param *params) 1353 { 1354 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { OPTEE_SMC_RETURN_RPC_CMD }; 1355 struct optee_msg_arg *arg; 1356 uint64_t carg; 1357 size_t n; 1358 1359 /* 1360 * Break recursion in case plat_prng_add_jitter_entropy_norpc() 1361 * sleeps on a mutex or unlocks a mutex with a sleeper (contended 1362 * mutex). 1363 */ 1364 if (cmd != OPTEE_MSG_RPC_CMD_WAIT_QUEUE) 1365 plat_prng_add_jitter_entropy_norpc(); 1366 1367 if (!get_rpc_arg(cmd, num_params, &arg, &carg)) 1368 return TEE_ERROR_OUT_OF_MEMORY; 1369 1370 memcpy(arg->params, params, sizeof(*params) * num_params); 1371 1372 reg_pair_from_64(carg, rpc_args + 1, rpc_args + 2); 1373 thread_rpc(rpc_args); 1374 for (n = 0; n < num_params; n++) { 1375 switch (params[n].attr & OPTEE_MSG_ATTR_TYPE_MASK) { 1376 case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT: 1377 case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT: 1378 case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT: 1379 case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT: 1380 case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT: 1381 case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT: 1382 params[n] = arg->params[n]; 1383 break; 1384 default: 1385 break; 1386 } 1387 } 1388 return arg->ret; 1389 } 1390 1391 /** 1392 * Free physical memory previously allocated with thread_rpc_alloc() 1393 * 1394 * @cookie: cookie received when allocating the buffer 1395 * @bt: must be the same as supplied when allocating 1396 * @mobj: mobj that describes allocated buffer 1397 * 1398 * This function also frees corresponding mobj. 1399 */ 1400 static void thread_rpc_free(unsigned int bt, uint64_t cookie, struct mobj *mobj) 1401 { 1402 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { OPTEE_SMC_RETURN_RPC_CMD }; 1403 struct optee_msg_arg *arg; 1404 uint64_t carg; 1405 1406 if (!get_rpc_arg(OPTEE_MSG_RPC_CMD_SHM_FREE, 1, &arg, &carg)) 1407 return; 1408 1409 arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT; 1410 arg->params[0].u.value.a = bt; 1411 arg->params[0].u.value.b = cookie; 1412 arg->params[0].u.value.c = 0; 1413 1414 mobj_free(mobj); 1415 1416 reg_pair_from_64(carg, rpc_args + 1, rpc_args + 2); 1417 thread_rpc(rpc_args); 1418 } 1419 1420 /** 1421 * Allocates shared memory buffer via RPC 1422 * 1423 * @size: size in bytes of shared memory buffer 1424 * @align: required alignment of buffer 1425 * @bt: buffer type OPTEE_MSG_RPC_SHM_TYPE_* 1426 * @payload: returned physical pointer to buffer, 0 if allocation 1427 * failed. 1428 * @cookie: returned cookie used when freeing the buffer 1429 */ 1430 static struct mobj *thread_rpc_alloc(size_t size, size_t align, unsigned int bt, 1431 uint64_t *cookie) 1432 { 1433 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { OPTEE_SMC_RETURN_RPC_CMD }; 1434 struct optee_msg_arg *arg; 1435 uint64_t carg; 1436 struct mobj *mobj = NULL; 1437 1438 if (!get_rpc_arg(OPTEE_MSG_RPC_CMD_SHM_ALLOC, 1, &arg, &carg)) 1439 goto fail; 1440 1441 arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT; 1442 arg->params[0].u.value.a = bt; 1443 arg->params[0].u.value.b = size; 1444 arg->params[0].u.value.c = align; 1445 1446 reg_pair_from_64(carg, rpc_args + 1, rpc_args + 2); 1447 thread_rpc(rpc_args); 1448 1449 if (arg->ret != TEE_SUCCESS) 1450 goto fail; 1451 1452 if (arg->num_params != 1) 1453 goto fail; 1454 1455 if (arg->params[0].attr == OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT) { 1456 *cookie = arg->params[0].u.tmem.shm_ref; 1457 mobj = mobj_shm_alloc(arg->params[0].u.tmem.buf_ptr, 1458 arg->params[0].u.tmem.size); 1459 } else if (arg->params[0].attr == (OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT | 1460 OPTEE_MSG_ATTR_NONCONTIG)) { 1461 *cookie = arg->params[0].u.tmem.shm_ref; 1462 mobj = msg_param_mobj_from_noncontig( 1463 arg->params[0].u.tmem.buf_ptr, 1464 arg->params[0].u.tmem.size, 1465 *cookie, 1466 true); 1467 } else 1468 goto fail; 1469 1470 if (!mobj) 1471 goto free_first; 1472 1473 assert(mobj_is_nonsec(mobj)); 1474 return mobj; 1475 1476 free_first: 1477 thread_rpc_free(bt, *cookie, mobj); 1478 fail: 1479 *cookie = 0; 1480 return NULL; 1481 } 1482 1483 struct mobj *thread_rpc_alloc_payload(size_t size, uint64_t *cookie) 1484 { 1485 return thread_rpc_alloc(size, 8, OPTEE_MSG_RPC_SHM_TYPE_APPL, cookie); 1486 } 1487 1488 void thread_rpc_free_payload(uint64_t cookie, struct mobj *mobj) 1489 { 1490 thread_rpc_free(OPTEE_MSG_RPC_SHM_TYPE_APPL, cookie, mobj); 1491 } 1492